1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016 Facebook
18 #include <bpf/libbpf.h>
21 #include "../../../include/linux/filter.h"
23 #define LOCAL_FREE_TARGET (128)
24 #define PERCPU_FREE_TARGET (4)
28 static int create_map(int map_type, int map_flags, unsigned int size)
30 LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags);
33 map_fd = bpf_map_create(map_type, NULL, sizeof(unsigned long long),
34 sizeof(unsigned long long), size, &opts);
37 perror("bpf_map_create");
42 static int bpf_map_lookup_elem_with_ref_bit(int fd, unsigned long long key,
45 struct bpf_insn insns[] = {
46 BPF_LD_MAP_VALUE(BPF_REG_9, 0, 0),
47 BPF_LD_MAP_FD(BPF_REG_1, fd),
48 BPF_LD_IMM64(BPF_REG_3, key),
49 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
50 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
51 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
52 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
53 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
54 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
55 BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_1, 0),
56 BPF_MOV64_IMM(BPF_REG_0, 42),
57 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
58 BPF_MOV64_IMM(BPF_REG_0, 1),
62 int mfd, pfd, ret, zero = 0;
63 LIBBPF_OPTS(bpf_test_run_opts, topts,
65 .data_size_in = sizeof(data),
69 mfd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(__u64), 1, NULL);
75 pfd = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, NULL, "GPL", insns, ARRAY_SIZE(insns), NULL);
81 ret = bpf_prog_test_run_opts(pfd, &topts);
82 if (ret < 0 || topts.retval != 42) {
85 assert(!bpf_map_lookup_elem(mfd, &zero, value));
93 static int map_subset(int map0, int map1)
95 unsigned long long next_key = 0;
96 unsigned long long value0[nr_cpus], value1[nr_cpus];
99 while (!bpf_map_get_next_key(map1, &next_key, &next_key)) {
100 assert(!bpf_map_lookup_elem(map1, &next_key, value1));
101 ret = bpf_map_lookup_elem(map0, &next_key, value0);
103 printf("key:%llu not found from map. %s(%d)\n",
104 next_key, strerror(errno), errno);
107 if (value0[0] != value1[0]) {
108 printf("key:%llu value0:%llu != value1:%llu\n",
109 next_key, value0[0], value1[0]);
116 static int map_equal(int lru_map, int expected)
118 return map_subset(lru_map, expected) && map_subset(expected, lru_map);
121 static int sched_next_online(int pid, int *next_to_try)
124 int next = *next_to_try;
127 while (next < nr_cpus) {
129 CPU_SET(next++, &cpuset);
130 if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) {
140 /* Size of the LRU map is 2
145 * => Key=2 will be removed by LRU
146 * Iterate map. Only found key=1 and key=3
148 static void test_lru_sanity0(int map_type, int map_flags)
150 unsigned long long key, value[nr_cpus];
151 int lru_map_fd, expected_map_fd;
154 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
157 assert(sched_next_online(0, &next_cpu) != -1);
159 if (map_flags & BPF_F_NO_COMMON_LRU)
160 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
162 lru_map_fd = create_map(map_type, map_flags, 2);
163 assert(lru_map_fd != -1);
165 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
166 assert(expected_map_fd != -1);
170 /* insert key=1 element */
173 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
174 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
177 /* BPF_NOEXIST means: add new element if it doesn't exist */
178 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
179 /* key=1 already exists */
181 assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -EINVAL);
183 /* insert key=2 element */
185 /* check that key=2 is not found */
187 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
189 /* BPF_EXIST means: update existing element */
190 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
191 /* key=2 is not there */
193 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
195 /* insert key=3 element */
197 /* check that key=3 is not found */
199 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
201 /* check that key=1 can be found and mark the ref bit to
202 * stop LRU from removing key=1
205 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
206 assert(value[0] == 1234);
209 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
210 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
213 /* key=2 has been removed from the LRU */
215 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
217 /* lookup elem key=1 and delete it, then check it doesn't exist */
219 assert(!bpf_map_lookup_and_delete_elem(lru_map_fd, &key, &value));
220 assert(value[0] == 1234);
222 /* remove the same element from the expected map */
223 assert(!bpf_map_delete_elem(expected_map_fd, &key));
225 assert(map_equal(lru_map_fd, expected_map_fd));
227 close(expected_map_fd);
233 /* Size of the LRU map is 1.5*tgt_free
234 * Insert 1 to tgt_free (+tgt_free keys)
235 * Lookup 1 to tgt_free/2
236 * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys)
237 * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU
239 static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
241 unsigned long long key, end_key, value[nr_cpus];
242 int lru_map_fd, expected_map_fd;
243 unsigned int batch_size;
244 unsigned int map_size;
247 if (map_flags & BPF_F_NO_COMMON_LRU)
248 /* This test is only applicable to common LRU list */
251 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
254 assert(sched_next_online(0, &next_cpu) != -1);
256 batch_size = tgt_free / 2;
257 assert(batch_size * 2 == tgt_free);
259 map_size = tgt_free + batch_size;
260 lru_map_fd = create_map(map_type, map_flags, map_size);
261 assert(lru_map_fd != -1);
263 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
264 assert(expected_map_fd != -1);
268 /* Insert 1 to tgt_free (+tgt_free keys) */
269 end_key = 1 + tgt_free;
270 for (key = 1; key < end_key; key++)
271 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
274 /* Lookup 1 to tgt_free/2 */
275 end_key = 1 + batch_size;
276 for (key = 1; key < end_key; key++) {
277 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
278 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
282 /* Insert 1+tgt_free to 2*tgt_free
283 * => 1+tgt_free/2 to LOCALFREE_TARGET will be
287 end_key = key + tgt_free;
288 for (; key < end_key; key++) {
289 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
291 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
295 assert(map_equal(lru_map_fd, expected_map_fd));
297 close(expected_map_fd);
303 /* Size of the LRU map 1.5 * tgt_free
304 * Insert 1 to tgt_free (+tgt_free keys)
305 * Update 1 to tgt_free/2
306 * => The original 1 to tgt_free/2 will be removed due to
307 * the LRU shrink process
308 * Re-insert 1 to tgt_free/2 again and do a lookup immeidately
309 * Insert 1+tgt_free to tgt_free*3/2
310 * Insert 1+tgt_free*3/2 to tgt_free*5/2
311 * => Key 1+tgt_free to tgt_free*3/2
312 * will be removed from LRU because it has never
313 * been lookup and ref bit is not set
315 static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
317 unsigned long long key, value[nr_cpus];
318 unsigned long long end_key;
319 int lru_map_fd, expected_map_fd;
320 unsigned int batch_size;
321 unsigned int map_size;
324 if (map_flags & BPF_F_NO_COMMON_LRU)
325 /* This test is only applicable to common LRU list */
328 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
331 assert(sched_next_online(0, &next_cpu) != -1);
333 batch_size = tgt_free / 2;
334 assert(batch_size * 2 == tgt_free);
336 map_size = tgt_free + batch_size;
337 lru_map_fd = create_map(map_type, map_flags, map_size);
338 assert(lru_map_fd != -1);
340 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
341 assert(expected_map_fd != -1);
345 /* Insert 1 to tgt_free (+tgt_free keys) */
346 end_key = 1 + tgt_free;
347 for (key = 1; key < end_key; key++)
348 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
351 /* Any bpf_map_update_elem will require to acquire a new node
354 * The local list is running out of free nodes.
355 * It gets from the global LRU list which tries to
356 * shrink the inactive list to get tgt_free
357 * number of free nodes.
359 * Hence, the oldest key 1 to tgt_free/2
360 * are removed from the LRU list.
363 if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
364 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
366 assert(!bpf_map_delete_elem(lru_map_fd, &key));
368 assert(bpf_map_update_elem(lru_map_fd, &key, value,
372 /* Re-insert 1 to tgt_free/2 again and do a lookup
375 end_key = 1 + batch_size;
377 for (key = 1; key < end_key; key++) {
378 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
379 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
381 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
382 assert(value[0] == 4321);
383 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
389 /* Insert 1+tgt_free to tgt_free*3/2 */
390 end_key = 1 + tgt_free + batch_size;
391 for (key = 1 + tgt_free; key < end_key; key++)
392 /* These newly added but not referenced keys will be
393 * gone during the next LRU shrink.
395 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
398 /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */
399 end_key = key + tgt_free;
400 for (; key < end_key; key++) {
401 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
403 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
407 assert(map_equal(lru_map_fd, expected_map_fd));
409 close(expected_map_fd);
415 /* Size of the LRU map is 2*tgt_free
416 * It is to test the active/inactive list rotation
417 * Insert 1 to 2*tgt_free (+2*tgt_free keys)
418 * Lookup key 1 to tgt_free*3/2
419 * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
420 * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
422 static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
424 unsigned long long key, end_key, value[nr_cpus];
425 int lru_map_fd, expected_map_fd;
426 unsigned int batch_size;
427 unsigned int map_size;
430 if (map_flags & BPF_F_NO_COMMON_LRU)
431 /* This test is only applicable to common LRU list */
434 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
437 assert(sched_next_online(0, &next_cpu) != -1);
439 batch_size = tgt_free / 2;
440 assert(batch_size * 2 == tgt_free);
442 map_size = tgt_free * 2;
443 lru_map_fd = create_map(map_type, map_flags, map_size);
444 assert(lru_map_fd != -1);
446 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
447 assert(expected_map_fd != -1);
451 /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
452 end_key = 1 + (2 * tgt_free);
453 for (key = 1; key < end_key; key++)
454 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
457 /* Lookup key 1 to tgt_free*3/2 */
458 end_key = tgt_free + batch_size;
459 for (key = 1; key < end_key; key++) {
460 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
461 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
465 /* Add 1+2*tgt_free to tgt_free*5/2
468 key = 2 * tgt_free + 1;
469 end_key = key + batch_size;
470 for (; key < end_key; key++) {
471 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
473 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
477 assert(map_equal(lru_map_fd, expected_map_fd));
479 close(expected_map_fd);
486 static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
488 int lru_map_fd, expected_map_fd;
489 unsigned long long key, value[nr_cpus];
490 unsigned long long end_key;
493 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
496 assert(sched_next_online(0, &next_cpu) != -1);
498 if (map_flags & BPF_F_NO_COMMON_LRU)
499 lru_map_fd = create_map(map_type, map_flags,
500 3 * tgt_free * nr_cpus);
502 lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free);
503 assert(lru_map_fd != -1);
505 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0,
507 assert(expected_map_fd != -1);
511 for (key = 1; key <= 2 * tgt_free; key++)
512 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
516 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
518 for (key = 1; key <= tgt_free; key++) {
519 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
520 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
524 for (; key <= 2 * tgt_free; key++) {
525 assert(!bpf_map_delete_elem(lru_map_fd, &key));
526 assert(bpf_map_delete_elem(lru_map_fd, &key));
529 end_key = key + 2 * tgt_free;
530 for (; key < end_key; key++) {
531 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
533 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
537 assert(map_equal(lru_map_fd, expected_map_fd));
539 close(expected_map_fd);
545 static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
547 unsigned long long key, value[nr_cpus];
549 /* Ensure the last key inserted by previous CPU can be found */
550 assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, last_key, value));
554 assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
555 assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, key, value));
557 /* Cannot find the last key because it was removed by LRU */
558 assert(bpf_map_lookup_elem(map_fd, &last_key, value) == -ENOENT);
561 /* Test map with only one element */
562 static void test_lru_sanity5(int map_type, int map_flags)
564 unsigned long long key, value[nr_cpus];
568 if (map_flags & BPF_F_NO_COMMON_LRU)
571 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
574 map_fd = create_map(map_type, map_flags, 1);
575 assert(map_fd != -1);
579 assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
581 while (sched_next_online(0, &next_cpu) != -1) {
586 do_test_lru_sanity5(key, map_fd);
588 } else if (pid == -1) {
589 printf("couldn't spawn process to test key:%llu\n",
595 assert(waitpid(pid, &status, 0) == pid);
602 /* At least one key should be tested */
608 /* Test list rotation for BPF_F_NO_COMMON_LRU map */
609 static void test_lru_sanity6(int map_type, int map_flags, int tgt_free)
611 int lru_map_fd, expected_map_fd;
612 unsigned long long key, value[nr_cpus];
613 unsigned int map_size = tgt_free * 2;
616 if (!(map_flags & BPF_F_NO_COMMON_LRU))
619 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
622 assert(sched_next_online(0, &next_cpu) != -1);
624 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
625 assert(expected_map_fd != -1);
627 lru_map_fd = create_map(map_type, map_flags, map_size * nr_cpus);
628 assert(lru_map_fd != -1);
632 for (key = 1; key <= tgt_free; key++) {
633 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
635 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
639 for (; key <= tgt_free * 2; key++) {
640 unsigned long long stable_key;
642 /* Make ref bit sticky for key: [1, tgt_free] */
643 for (stable_key = 1; stable_key <= tgt_free; stable_key++) {
644 /* Mark the ref bit */
645 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd,
648 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
652 for (; key <= tgt_free * 3; key++) {
653 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
655 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
659 assert(map_equal(lru_map_fd, expected_map_fd));
661 close(expected_map_fd);
667 /* Size of the LRU map is 2
670 * Lookup Key=1 (datapath)
671 * Lookup Key=2 (syscall)
673 * => Key=2 will be removed by LRU
674 * Iterate map. Only found key=1 and key=3
676 static void test_lru_sanity7(int map_type, int map_flags)
678 unsigned long long key, value[nr_cpus];
679 int lru_map_fd, expected_map_fd;
682 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
685 assert(sched_next_online(0, &next_cpu) != -1);
687 if (map_flags & BPF_F_NO_COMMON_LRU)
688 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
690 lru_map_fd = create_map(map_type, map_flags, 2);
691 assert(lru_map_fd != -1);
693 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
694 assert(expected_map_fd != -1);
698 /* insert key=1 element */
701 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
702 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
705 /* BPF_NOEXIST means: add new element if it doesn't exist */
706 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
707 /* key=1 already exists */
709 /* insert key=2 element */
711 /* check that key=2 is not found */
713 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
715 /* BPF_EXIST means: update existing element */
716 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
717 /* key=2 is not there */
719 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
721 /* insert key=3 element */
723 /* check that key=3 is not found */
725 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
727 /* check that key=1 can be found and mark the ref bit to
728 * stop LRU from removing key=1
731 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
732 assert(value[0] == 1234);
734 /* check that key=2 can be found and do _not_ mark ref bit.
735 * this will be evicted on next update.
738 assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
739 assert(value[0] == 1234);
742 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
743 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
746 /* key=2 has been removed from the LRU */
748 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
750 assert(map_equal(lru_map_fd, expected_map_fd));
752 close(expected_map_fd);
758 /* Size of the LRU map is 2
761 * Lookup Key=1 (syscall)
762 * Lookup Key=2 (datapath)
764 * => Key=1 will be removed by LRU
765 * Iterate map. Only found key=2 and key=3
767 static void test_lru_sanity8(int map_type, int map_flags)
769 unsigned long long key, value[nr_cpus];
770 int lru_map_fd, expected_map_fd;
773 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
776 assert(sched_next_online(0, &next_cpu) != -1);
778 if (map_flags & BPF_F_NO_COMMON_LRU)
779 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
781 lru_map_fd = create_map(map_type, map_flags, 2);
782 assert(lru_map_fd != -1);
784 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
785 assert(expected_map_fd != -1);
789 /* insert key=1 element */
792 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
794 /* BPF_NOEXIST means: add new element if it doesn't exist */
795 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
796 /* key=1 already exists */
798 /* insert key=2 element */
800 /* check that key=2 is not found */
802 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
804 /* BPF_EXIST means: update existing element */
805 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
806 /* key=2 is not there */
808 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
809 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
812 /* insert key=3 element */
814 /* check that key=3 is not found */
816 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
818 /* check that key=1 can be found and do _not_ mark ref bit.
819 * this will be evicted on next update.
822 assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
823 assert(value[0] == 1234);
825 /* check that key=2 can be found and mark the ref bit to
826 * stop LRU from removing key=2
829 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
830 assert(value[0] == 1234);
833 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
834 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
837 /* key=1 has been removed from the LRU */
839 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
841 assert(map_equal(lru_map_fd, expected_map_fd));
843 close(expected_map_fd);
849 int main(int argc, char **argv)
851 int map_types[] = {BPF_MAP_TYPE_LRU_HASH,
852 BPF_MAP_TYPE_LRU_PERCPU_HASH};
853 int map_flags[] = {0, BPF_F_NO_COMMON_LRU};
856 setbuf(stdout, NULL);
858 nr_cpus = bpf_num_possible_cpus();
859 assert(nr_cpus != -1);
860 printf("nr_cpus:%d\n\n", nr_cpus);
862 /* Use libbpf 1.0 API mode */
863 libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
865 for (f = 0; f < ARRAY_SIZE(map_flags); f++) {
866 unsigned int tgt_free = (map_flags[f] & BPF_F_NO_COMMON_LRU) ?
867 PERCPU_FREE_TARGET : LOCAL_FREE_TARGET;
869 for (t = 0; t < ARRAY_SIZE(map_types); t++) {
870 test_lru_sanity0(map_types[t], map_flags[f]);
871 test_lru_sanity1(map_types[t], map_flags[f], tgt_free);
872 test_lru_sanity2(map_types[t], map_flags[f], tgt_free);
873 test_lru_sanity3(map_types[t], map_flags[f], tgt_free);
874 test_lru_sanity4(map_types[t], map_flags[f], tgt_free);
875 test_lru_sanity5(map_types[t], map_flags[f]);
876 test_lru_sanity6(map_types[t], map_flags[f], tgt_free);
877 test_lru_sanity7(map_types[t], map_flags[f]);
878 test_lru_sanity8(map_types[t], map_flags[f]);