From 583609c0f392ac85819cfd9930b6ff602d94fdb1 Mon Sep 17 00:00:00 2001 From: Chris Tarazi Date: Wed, 29 Nov 2023 15:57:15 -0800 Subject: [PATCH 1/5] map: Introduce BatchCursor abstraction The batch API in the kernel is quite tricky to get right. In order to pass the arguments in correctly, especially regarding the "in_batch" and "out_batch" arguments, it requires knowledge of how the batch map iteration is implemented in the kernel. In order to avoid pushing the cognitive overhead onto users of the library, abstract away the details of batch map iteration into an opaque type, BatchCursor. Users are expected to pass in a reference to a BatchCursor and the library will handle allocating the underlying buffer when needed. Specifically, the first time that a batch API is called using the cursor, the underlying buffer will be nil, which signfifies that the "in_batch" (aka "prevKey") should be nil to indicate the starting of a batching operation, and "out_batch" (aka "nextKey") is set to an newly-allocated buffer. Previously, users were expected to carry the "out_batch" reference in order to pass it in as "in_batch" upon the next call to the batching operation. Instead, with this abstraction, the cursor handles it for the user, by using the underlying buffer for both "in_batch" and "out_batch" in subsequent calls. Signed-off-by: Chris Tarazi Suggested-by: Lorenz Bauer --- map.go | 56 ++++++++++++++++++++++++++++++----------------------- map_test.go | 33 +++++++++++++++---------------- 2 files changed, 48 insertions(+), 41 deletions(-) diff --git a/map.go b/map.go index dc2524590..ea2db5391 100644 --- a/map.go +++ b/map.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "math" "math/rand" "os" "path/filepath" @@ -963,14 +964,15 @@ func (m *Map) guessNonExistentKey() ([]byte, error) { // // "keysOut" and "valuesOut" must be of type slice, a pointer // to a slice or buffer will not work. -// "prevKey" is the key to start the batch lookup from, it will -// *not* be included in the results. Use nil to start at the first key. +// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass +// "cursor" to subsequent calls of this function to continue the batching +// operation in the case of chunking. // // ErrKeyNotExist is returned when the batch lookup has reached // the end of all possible results, even when partial results // are returned. It should be used to evaluate when lookup is "done". -func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - return m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) +func (m *Map) BatchLookup(cursor *BatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + return m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, cursor, keysOut, valuesOut, opts) } // BatchLookupAndDelete looks up many elements in a map at once, @@ -978,17 +980,35 @@ func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, o // It then deletes all those elements. // "keysOut" and "valuesOut" must be of type slice, a pointer // to a slice or buffer will not work. -// "prevKey" is the key to start the batch lookup from, it will -// *not* be included in the results. Use nil to start at the first key. +// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass +// "cursor" to subsequent calls of this function to continue the batching +// operation in the case of chunking. // // ErrKeyNotExist is returned when the batch lookup has reached // the end of all possible results, even when partial results // are returned. It should be used to evaluate when lookup is "done". -func (m *Map) BatchLookupAndDelete(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - return m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) +func (m *Map) BatchLookupAndDelete(cursor *BatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + return m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, cursor, keysOut, valuesOut, opts) } -func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { +// BatchCursor represents a starting point for a batch operation. +type BatchCursor struct { + opaque []byte // len() is max(key_size, 4) to be on the safe side +} + +func (m *Map) batchLookup(cmd sys.Cmd, cursor *BatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + var inBatch []byte + if cursor.opaque == nil { + // * generic_map_lookup_batch requires that batch_out is key_size bytes. + // This is used by array and LPM maps. + // + // * __htab_map_lookup_and_delete_batch requires u32. This is used by the + // various hash maps. + cursor.opaque = make([]byte, int(math.Max(float64(m.keySize), 4))) + } else { + inBatch = cursor.opaque + } + if err := haveBatchAPI(); err != nil { return 0, err } @@ -1011,14 +1031,14 @@ func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut keyPtr := sys.NewSlicePointer(keyBuf) valueBuf := make([]byte, count*int(m.fullValueSize)) valuePtr := sys.NewSlicePointer(valueBuf) - nextBuf := makeMapSyscallOutput(nextKeyOut, int(m.keySize)) attr := sys.MapLookupBatchAttr{ MapFd: m.fd.Uint(), Keys: keyPtr, Values: valuePtr, Count: uint32(count), - OutBatch: nextBuf.Pointer(), + InBatch: sys.NewSlicePointer(inBatch), + OutBatch: sys.NewSlicePointer(cursor.opaque), } if opts != nil { @@ -1026,25 +1046,13 @@ func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut attr.Flags = opts.Flags } - var err error - if startKey != nil { - attr.InBatch, err = marshalMapSyscallInput(startKey, int(m.keySize)) - if err != nil { - return 0, err - } - } - _, sysErr := sys.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) sysErr = wrapMapError(sysErr) if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { return 0, sysErr } - err = nextBuf.Unmarshal(nextKeyOut) - if err != nil { - return 0, err - } - err = sysenc.Unmarshal(keysOut, keyBuf) + err := sysenc.Unmarshal(keysOut, keyBuf) if err != nil { return 0, err } diff --git a/map_test.go b/map_test.go index e18a1ac51..639571cbd 100644 --- a/map_test.go +++ b/map_test.go @@ -108,7 +108,6 @@ func TestBatchAPIArray(t *testing.T) { defer m.Close() var ( - nextKey uint32 keys = []uint32{0, 1} values = []uint32{42, 4242} lookupKeys = make([]uint32, 2) @@ -133,16 +132,14 @@ func TestBatchAPIArray(t *testing.T) { t.Error("Want value 42, got", v) } - count, err = m.BatchLookup(nil, &nextKey, lookupKeys, lookupValues, nil) + var cursor BatchCursor + count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, nil) if err != nil { t.Fatalf("BatchLookup: %v", err) } if count != len(lookupKeys) { t.Fatalf("BatchLookup: returned %d results, expected %d", count, len(lookupKeys)) } - if nextKey != lookupKeys[1] { - t.Fatalf("BatchLookup: expected nextKey, %d, to be the same as the lastKey returned, %d", nextKey, lookupKeys[1]) - } if !reflect.DeepEqual(keys, lookupKeys) { t.Errorf("BatchUpdate and BatchLookup keys disagree: %v %v", keys, lookupKeys) } @@ -150,7 +147,8 @@ func TestBatchAPIArray(t *testing.T) { t.Errorf("BatchUpdate and BatchLookup values disagree: %v %v", values, lookupValues) } - _, err = m.BatchLookupAndDelete(nil, &nextKey, deleteKeys, deleteValues, nil) + cursor = BatchCursor{} + _, err = m.BatchLookupAndDelete(&cursor, deleteKeys, deleteValues, nil) if !errors.Is(err, ErrNotSupported) { t.Fatalf("BatchLookUpDelete: expected error %v, but got %v", ErrNotSupported, err) } @@ -172,7 +170,6 @@ func TestBatchAPIHash(t *testing.T) { defer m.Close() var ( - nextKey uint32 keys = []uint32{0, 1} values = []uint32{42, 4242} lookupKeys = make([]uint32, 2) @@ -197,7 +194,8 @@ func TestBatchAPIHash(t *testing.T) { t.Error("Want value 42, got", v) } - count, err = m.BatchLookup(nil, &nextKey, lookupKeys, lookupValues, nil) + var cursor BatchCursor + count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, nil) if !errors.Is(err, ErrKeyNotExist) { t.Fatalf("BatchLookup: expected %v got %v", ErrKeyNotExist, err) } @@ -213,7 +211,8 @@ func TestBatchAPIHash(t *testing.T) { t.Errorf("BatchUpdate and BatchLookup values disagree: %v %v", values, lookupValues) } - count, err = m.BatchLookupAndDelete(nil, &nextKey, deleteKeys, deleteValues, nil) + cursor = BatchCursor{} + count, err = m.BatchLookupAndDelete(&cursor, deleteKeys, deleteValues, nil) if !errors.Is(err, ErrKeyNotExist) { t.Fatalf("BatchLookupAndDelete: expected %v got %v", ErrKeyNotExist, err) } @@ -345,10 +344,10 @@ func TestBatchMapWithLock(t *testing.T) { t.Fatalf("BatchUpdate: expected count, %d, to be %d", count, len(keys)) } - nextKey := uint32(0) + var cursor BatchCursor lookupKeys := make([]uint32, 2) lookupValues := make([]spinLockValue, 2) - count, err = m.BatchLookup(nil, &nextKey, lookupKeys, lookupValues, &BatchOptions{ElemFlags: uint64(LookupLock)}) + count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, &BatchOptions{ElemFlags: uint64(LookupLock)}) if !errors.Is(err, ErrKeyNotExist) { t.Fatalf("BatchLookup: %v", err) } @@ -356,10 +355,10 @@ func TestBatchMapWithLock(t *testing.T) { t.Fatalf("BatchLookup: expected two keys, got %d", count) } - nextKey = uint32(0) + cursor = BatchCursor{} deleteKeys := []uint32{0, 1} deleteValues := make([]spinLockValue, 2) - count, err = m.BatchLookupAndDelete(nil, &nextKey, deleteKeys, deleteValues, nil) + count, err = m.BatchLookupAndDelete(&cursor, deleteKeys, deleteValues, nil) if !errors.Is(err, ErrKeyNotExist) { t.Fatalf("BatchLookupAndDelete: %v", err) } @@ -2082,9 +2081,9 @@ func BenchmarkIterate(b *testing.B) { b.ReportAllocs() + var cursor BatchCursor for i := 0; i < b.N; i++ { - var next uint32 - _, err := m.BatchLookup(nil, &next, k, v, nil) + _, err := m.BatchLookup(&cursor, k, v, nil) if err != nil && !errors.Is(err, ErrKeyNotExist) { b.Fatal(err) } @@ -2104,8 +2103,8 @@ func BenchmarkIterate(b *testing.B) { } b.StartTimer() - var next uint32 - _, err := m.BatchLookupAndDelete(nil, &next, k, v, nil) + var cursor BatchCursor + _, err := m.BatchLookupAndDelete(&cursor, k, v, nil) if err != nil && !errors.Is(err, ErrKeyNotExist) { b.Fatal(err) } From 68dc4b614d48328d949d54a95ac2264516ba9920 Mon Sep 17 00:00:00 2001 From: Chris Tarazi Date: Thu, 30 Nov 2023 11:13:28 -0800 Subject: [PATCH 2/5] map: Add unit test for batch lookup with chunking Provide an example of how to use the batch lookup API when chunking, i.e. when the buffer is smaller than the full map. Signed-off-by: Chris Tarazi --- map_test.go | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/map_test.go b/map_test.go index 639571cbd..a34492c9c 100644 --- a/map_test.go +++ b/map_test.go @@ -154,6 +154,77 @@ func TestBatchAPIArray(t *testing.T) { } } +// TestBatchAPIArrayChunk tests chunking of the batch lookup. The important bit +// is that prevKey must be set to nextKey to lookup the next chunk. +func TestBatchAPIArrayChunk(t *testing.T) { + if err := haveBatchAPI(); err != nil { + t.Skipf("batch api not available: %v", err) + } + m, err := NewMap(&MapSpec{ + Type: Array, + KeySize: 4, + ValueSize: 4, + MaxEntries: 2, + }) + if err != nil { + t.Fatal(err) + } + defer m.Close() + + var ( + keys = []uint32{0, 1} + values = []uint32{42, 4242} + lookupKeys []uint32 + lookupValues []uint32 + ) + + count, err := m.BatchUpdate(keys, values, nil) + if err != nil { + t.Fatalf("BatchUpdate: %v", err) + } + if count != len(keys) { + t.Fatalf("BatchUpdate: expected count, %d, to be %d", count, len(keys)) + } + + var v uint32 + if err := m.Lookup(uint32(0), &v); err != nil { + t.Fatal("Can't lookup 0:", err) + } + if v != 42 { + t.Error("Want value 42, got", v) + } + + var cursor BatchCursor + lookupKeys = make([]uint32, 1) // cut the buffer in half. + lookupValues = make([]uint32, 1) + count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, nil) + if err != nil { + t.Errorf("BatchLookup: %v", err) + } + if count != 1 { + t.Errorf("BatchLookup: returned %d results, expected %d", count, 1) + } + if k := []uint32(keys[:1]); !reflect.DeepEqual(k, lookupKeys) { + t.Errorf("BatchUpdate and BatchLookup keys disagree on first chunk: %v %v", k, lookupKeys) + } + if v := []uint32(values[:1]); !reflect.DeepEqual(v, lookupValues) { + t.Errorf("BatchUpdate and BatchLookup values disagree on first chunk: %v %v", v, lookupValues) + } + count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, nil) + if err != nil { + t.Errorf("BatchLookup: %v", err) + } + if count != 1 { + t.Errorf("BatchLookup: returned %d results, expected %d", count, 1) + } + if k := []uint32(keys[1:]); !reflect.DeepEqual(k, lookupKeys) { + t.Errorf("BatchUpdate and BatchLookup keys disagree on second chunk: %v %v", k, lookupKeys) + } + if v := []uint32(values[1:]); !reflect.DeepEqual(v, lookupValues) { + t.Errorf("BatchUpdate and BatchLookup values disagree on second chunk: %v %v", v, lookupValues) + } +} + func TestBatchAPIHash(t *testing.T) { if err := haveBatchAPI(); err != nil { t.Skipf("batch api not available: %v", err) From 2a6ff60a1469c2f7b643619e9d62705d8105bc85 Mon Sep 17 00:00:00 2001 From: Chris Tarazi Date: Thu, 30 Nov 2023 11:25:23 -0800 Subject: [PATCH 3/5] map: Add warning to Batch* API operations See https://github.com/cilium/ebpf/pull/1223#issuecomment-1832029893 for more details. Signed-off-by: Chris Tarazi --- map.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/map.go b/map.go index ea2db5391..434cf91a4 100644 --- a/map.go +++ b/map.go @@ -968,6 +968,10 @@ func (m *Map) guessNonExistentKey() ([]byte, error) { // "cursor" to subsequent calls of this function to continue the batching // operation in the case of chunking. // +// Warning: This API is not very safe to use as the kernel implementation for +// batching relies on the user to be aware of subtle details with regarding to +// different map type implementations. +// // ErrKeyNotExist is returned when the batch lookup has reached // the end of all possible results, even when partial results // are returned. It should be used to evaluate when lookup is "done". @@ -984,6 +988,10 @@ func (m *Map) BatchLookup(cursor *BatchCursor, keysOut, valuesOut interface{}, o // "cursor" to subsequent calls of this function to continue the batching // operation in the case of chunking. // +// Warning: This API is not very safe to use as the kernel implementation for +// batching relies on the user to be aware of subtle details with regarding to +// different map type implementations. +// // ErrKeyNotExist is returned when the batch lookup has reached // the end of all possible results, even when partial results // are returned. It should be used to evaluate when lookup is "done". From bbcfd461d0a6b8da08b7d526456d7c22c36b974b Mon Sep 17 00:00:00 2001 From: Lorenz Bauer Date: Fri, 1 Dec 2023 10:01:00 +0000 Subject: [PATCH 4/5] map: reduce duplication in batch API tests Signed-off-by: Lorenz Bauer --- map_test.go | 250 ++++++++++++++-------------------------------------- 1 file changed, 66 insertions(+), 184 deletions(-) diff --git a/map_test.go b/map_test.go index a34492c9c..d10761036 100644 --- a/map_test.go +++ b/map_test.go @@ -6,7 +6,6 @@ import ( "math" "os" "path/filepath" - "reflect" "sort" "testing" "unsafe" @@ -92,215 +91,98 @@ func TestMap(t *testing.T) { } } -func TestBatchAPIArray(t *testing.T) { +func TestMapBatch(t *testing.T) { if err := haveBatchAPI(); err != nil { t.Skipf("batch api not available: %v", err) } - m, err := NewMap(&MapSpec{ - Type: Array, - KeySize: 4, - ValueSize: 4, - MaxEntries: 2, - }) - if err != nil { - t.Fatal(err) - } - defer m.Close() - - var ( - keys = []uint32{0, 1} - values = []uint32{42, 4242} - lookupKeys = make([]uint32, 2) - lookupValues = make([]uint32, 2) - deleteKeys = make([]uint32, 2) - deleteValues = make([]uint32, 2) - ) - count, err := m.BatchUpdate(keys, values, nil) - if err != nil { - t.Fatalf("BatchUpdate: %v", err) - } - if count != len(keys) { - t.Fatalf("BatchUpdate: expected count, %d, to be %d", count, len(keys)) + contents := map[uint32]uint32{ + 0: 42, 1: 4242, 2: 23, 3: 2323, } - var v uint32 - if err := m.Lookup(uint32(0), &v); err != nil { - t.Fatal("Can't lookup 0:", err) - } - if v != 42 { - t.Error("Want value 42, got", v) - } - - var cursor BatchCursor - count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, nil) + hash, err := NewMap(&MapSpec{ + Type: Hash, + KeySize: 4, + ValueSize: 4, + // Make the map large enough to avoid ENOSPC. + MaxEntries: uint32(len(contents)) * 10, + }) if err != nil { - t.Fatalf("BatchLookup: %v", err) - } - if count != len(lookupKeys) { - t.Fatalf("BatchLookup: returned %d results, expected %d", count, len(lookupKeys)) - } - if !reflect.DeepEqual(keys, lookupKeys) { - t.Errorf("BatchUpdate and BatchLookup keys disagree: %v %v", keys, lookupKeys) - } - if !reflect.DeepEqual(values, lookupValues) { - t.Errorf("BatchUpdate and BatchLookup values disagree: %v %v", values, lookupValues) - } - - cursor = BatchCursor{} - _, err = m.BatchLookupAndDelete(&cursor, deleteKeys, deleteValues, nil) - if !errors.Is(err, ErrNotSupported) { - t.Fatalf("BatchLookUpDelete: expected error %v, but got %v", ErrNotSupported, err) + t.Fatal(err) } -} + defer hash.Close() -// TestBatchAPIArrayChunk tests chunking of the batch lookup. The important bit -// is that prevKey must be set to nextKey to lookup the next chunk. -func TestBatchAPIArrayChunk(t *testing.T) { - if err := haveBatchAPI(); err != nil { - t.Skipf("batch api not available: %v", err) - } - m, err := NewMap(&MapSpec{ + array, err := NewMap(&MapSpec{ Type: Array, KeySize: 4, ValueSize: 4, - MaxEntries: 2, + MaxEntries: 4, }) if err != nil { t.Fatal(err) } - defer m.Close() + defer array.Close() - var ( - keys = []uint32{0, 1} - values = []uint32{42, 4242} - lookupKeys []uint32 - lookupValues []uint32 - ) - - count, err := m.BatchUpdate(keys, values, nil) - if err != nil { - t.Fatalf("BatchUpdate: %v", err) - } - if count != len(keys) { - t.Fatalf("BatchUpdate: expected count, %d, to be %d", count, len(keys)) - } - - var v uint32 - if err := m.Lookup(uint32(0), &v); err != nil { - t.Fatal("Can't lookup 0:", err) - } - if v != 42 { - t.Error("Want value 42, got", v) + var keys, values []uint32 + for key, value := range contents { + keys = append(keys, key) + values = append(values, value) } - var cursor BatchCursor - lookupKeys = make([]uint32, 1) // cut the buffer in half. - lookupValues = make([]uint32, 1) - count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, nil) - if err != nil { - t.Errorf("BatchLookup: %v", err) - } - if count != 1 { - t.Errorf("BatchLookup: returned %d results, expected %d", count, 1) - } - if k := []uint32(keys[:1]); !reflect.DeepEqual(k, lookupKeys) { - t.Errorf("BatchUpdate and BatchLookup keys disagree on first chunk: %v %v", k, lookupKeys) - } - if v := []uint32(values[:1]); !reflect.DeepEqual(v, lookupValues) { - t.Errorf("BatchUpdate and BatchLookup values disagree on first chunk: %v %v", v, lookupValues) - } - count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, nil) - if err != nil { - t.Errorf("BatchLookup: %v", err) - } - if count != 1 { - t.Errorf("BatchLookup: returned %d results, expected %d", count, 1) - } - if k := []uint32(keys[1:]); !reflect.DeepEqual(k, lookupKeys) { - t.Errorf("BatchUpdate and BatchLookup keys disagree on second chunk: %v %v", k, lookupKeys) - } - if v := []uint32(values[1:]); !reflect.DeepEqual(v, lookupValues) { - t.Errorf("BatchUpdate and BatchLookup values disagree on second chunk: %v %v", v, lookupValues) - } -} + for _, m := range []*Map{array, hash} { + t.Run(m.Type().String(), func(t *testing.T) { + count, err := m.BatchUpdate(keys, values, nil) + qt.Assert(t, err, qt.IsNil) + qt.Assert(t, count, qt.Equals, len(contents)) -func TestBatchAPIHash(t *testing.T) { - if err := haveBatchAPI(); err != nil { - t.Skipf("batch api not available: %v", err) - } - m, err := NewMap(&MapSpec{ - Type: Hash, - KeySize: 4, - ValueSize: 4, - MaxEntries: 10, - }) - if err != nil { - t.Fatal(err) - } - defer m.Close() + n := len(contents) / 2 // cut buffer in half + lookupKeys := make([]uint32, n) + lookupValues := make([]uint32, n) - var ( - keys = []uint32{0, 1} - values = []uint32{42, 4242} - lookupKeys = make([]uint32, 2) - lookupValues = make([]uint32, 2) - deleteKeys = make([]uint32, 2) - deleteValues = make([]uint32, 2) - ) + var cursor BatchCursor + var total int + for { + count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, nil) + total += count + if errors.Is(err, ErrKeyNotExist) { + break + } + qt.Assert(t, err, qt.IsNil) - count, err := m.BatchUpdate(keys, values, nil) - if err != nil { - t.Fatalf("BatchUpdate: %v", err) - } - if count != len(keys) { - t.Fatalf("BatchUpdate: expected count, %d, to be %d", count, len(keys)) - } + qt.Assert(t, count, qt.Equals, len(lookupKeys)) + for i, key := range lookupKeys { + value := lookupValues[i] + qt.Assert(t, value, qt.Equals, contents[key], qt.Commentf("value for key %d should match", key)) + } + } + qt.Assert(t, total, qt.Equals, len(contents)) - var v uint32 - if err := m.Lookup(uint32(0), &v); err != nil { - t.Fatal("Can't lookup 0:", err) - } - if v != 42 { - t.Error("Want value 42, got", v) - } + if m.Type() == Array { + // Array doesn't support batch delete + return + } - var cursor BatchCursor - count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, nil) - if !errors.Is(err, ErrKeyNotExist) { - t.Fatalf("BatchLookup: expected %v got %v", ErrKeyNotExist, err) - } - if count != len(lookupKeys) { - t.Fatalf("BatchLookup: returned %d results, expected %d", count, len(lookupKeys)) - } - sort.Slice(lookupKeys, func(i, j int) bool { return lookupKeys[i] < lookupKeys[j] }) - if !reflect.DeepEqual(keys, lookupKeys) { - t.Errorf("BatchUpdate and BatchLookup keys disagree: %v %v", keys, lookupKeys) - } - sort.Slice(lookupValues, func(i, j int) bool { return lookupValues[i] < lookupValues[j] }) - if !reflect.DeepEqual(values, lookupValues) { - t.Errorf("BatchUpdate and BatchLookup values disagree: %v %v", values, lookupValues) - } + cursor = BatchCursor{} + total = 0 + for { + count, err = m.BatchLookupAndDelete(&cursor, lookupKeys, lookupValues, nil) + total += count + if errors.Is(err, ErrKeyNotExist) { + break + } + qt.Assert(t, err, qt.IsNil) - cursor = BatchCursor{} - count, err = m.BatchLookupAndDelete(&cursor, deleteKeys, deleteValues, nil) - if !errors.Is(err, ErrKeyNotExist) { - t.Fatalf("BatchLookupAndDelete: expected %v got %v", ErrKeyNotExist, err) - } - if count != len(deleteKeys) { - t.Fatalf("BatchLookupAndDelete: returned %d results, expected %d", count, len(deleteKeys)) - } - sort.Slice(deleteKeys, func(i, j int) bool { return deleteKeys[i] < deleteKeys[j] }) - if !reflect.DeepEqual(keys, deleteKeys) { - t.Errorf("BatchUpdate and BatchLookupAndDelete keys disagree: %v %v", keys, deleteKeys) - } - sort.Slice(deleteValues, func(i, j int) bool { return deleteValues[i] < deleteValues[j] }) - if !reflect.DeepEqual(values, deleteValues) { - t.Errorf("BatchUpdate and BatchLookupAndDelete values disagree: %v %v", values, deleteValues) - } + qt.Assert(t, count, qt.Equals, len(lookupKeys)) + for i, key := range lookupKeys { + value := lookupValues[i] + qt.Assert(t, value, qt.Equals, contents[key], qt.Commentf("value for key %d should match", key)) + } + } + qt.Assert(t, total, qt.Equals, len(contents)) - if err := m.Lookup(uint32(0), &v); !errors.Is(err, ErrKeyNotExist) { - t.Fatalf("Lookup should have failed with error, %v, instead error is %v", ErrKeyNotExist, err) + var v uint32 + qt.Assert(t, m.Lookup(uint32(0), &v), qt.ErrorIs, ErrKeyNotExist) + }) } } From b74558d754b85d3ad0824b273e28032f9dc03e3f Mon Sep 17 00:00:00 2001 From: Lorenz Bauer Date: Fri, 1 Dec 2023 10:26:10 +0000 Subject: [PATCH 5/5] map: prevent BatchCursor reuse across maps Signed-off-by: Lorenz Bauer --- map.go | 27 ++++++++++++++++++++------- map_test.go | 31 +++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 7 deletions(-) diff --git a/map.go b/map.go index 434cf91a4..4b8d9e94a 100644 --- a/map.go +++ b/map.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "math" "math/rand" "os" "path/filepath" @@ -1001,20 +1000,34 @@ func (m *Map) BatchLookupAndDelete(cursor *BatchCursor, keysOut, valuesOut inter // BatchCursor represents a starting point for a batch operation. type BatchCursor struct { - opaque []byte // len() is max(key_size, 4) to be on the safe side + m *Map + opaque []byte } func (m *Map) batchLookup(cmd sys.Cmd, cursor *BatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - var inBatch []byte - if cursor.opaque == nil { + cursorLen := int(m.keySize) + if cursorLen < 4 { // * generic_map_lookup_batch requires that batch_out is key_size bytes. // This is used by array and LPM maps. // // * __htab_map_lookup_and_delete_batch requires u32. This is used by the // various hash maps. - cursor.opaque = make([]byte, int(math.Max(float64(m.keySize), 4))) - } else { - inBatch = cursor.opaque + // + // Use a minimum of 4 bytes to avoid having to distinguish between the two. + cursorLen = 4 + } + + inBatch := cursor.opaque + if inBatch == nil { + // This is the first lookup, allocate a buffer to hold the cursor. + cursor.opaque = make([]byte, cursorLen) + cursor.m = m + } else if cursor.m != m { + // Prevent reuse of a cursor across maps. First, it's unlikely to work. + // Second, the maps may require different cursorLen and cursor.opaque + // may therefore be too short. This could lead to the kernel clobbering + // user space memory. + return 0, errors.New("a cursor may not be reused across maps") } if err := haveBatchAPI(); err != nil { diff --git a/map_test.go b/map_test.go index d10761036..bb3ad081c 100644 --- a/map_test.go +++ b/map_test.go @@ -186,6 +186,37 @@ func TestMapBatch(t *testing.T) { } } +func TestMapBatchCursorReuse(t *testing.T) { + spec := &MapSpec{ + Type: Array, + KeySize: 4, + ValueSize: 4, + MaxEntries: 4, + } + + arr1, err := NewMap(spec) + if err != nil { + t.Fatal(err) + } + defer arr1.Close() + + arr2, err := NewMap(spec) + if err != nil { + t.Fatal(err) + } + defer arr2.Close() + + tmp := make([]uint32, 2) + + var cursor BatchCursor + _, err = arr1.BatchLookup(&cursor, tmp, tmp, nil) + testutils.SkipIfNotSupported(t, err) + qt.Assert(t, err, qt.IsNil) + + _, err = arr2.BatchLookup(&cursor, tmp, tmp, nil) + qt.Assert(t, err, qt.IsNotNil) +} + func TestMapLookupKeyTooSmall(t *testing.T) { m := createArray(t) defer m.Close()