1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016 Facebook
18 #include <bpf/libbpf.h>
21 #include "../../../include/linux/filter.h"
23 #define LOCAL_FREE_TARGET (128)
24 #define PERCPU_FREE_TARGET (4)
28 static int create_map(int map_type, int map_flags, unsigned int size)
30 LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags);
33 map_fd = bpf_map_create(map_type, NULL, sizeof(unsigned long long),
34 sizeof(unsigned long long), size, &opts);
37 perror("bpf_map_create");
42 static int bpf_map_lookup_elem_with_ref_bit(int fd, unsigned long long key,
45 struct bpf_insn insns[] = {
46 BPF_LD_MAP_VALUE(BPF_REG_9, 0, 0),
47 BPF_LD_MAP_FD(BPF_REG_1, fd),
48 BPF_LD_IMM64(BPF_REG_3, key),
49 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
50 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
51 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
52 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
53 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
54 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
55 BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_1, 0),
56 BPF_MOV64_IMM(BPF_REG_0, 42),
57 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
58 BPF_MOV64_IMM(BPF_REG_0, 1),
62 int mfd, pfd, ret, zero = 0;
63 LIBBPF_OPTS(bpf_test_run_opts, topts,
65 .data_size_in = sizeof(data),
69 mfd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(__u64), 1, NULL);
75 pfd = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, NULL, "GPL", insns, ARRAY_SIZE(insns), NULL);
81 ret = bpf_prog_test_run_opts(pfd, &topts);
82 if (ret < 0 || topts.retval != 42) {
85 assert(!bpf_map_lookup_elem(mfd, &zero, value));
93 static int map_subset(int map0, int map1)
95 unsigned long long next_key = 0;
96 unsigned long long value0[nr_cpus], value1[nr_cpus];
99 while (!bpf_map_get_next_key(map1, &next_key, &next_key)) {
100 assert(!bpf_map_lookup_elem(map1, &next_key, value1));
101 ret = bpf_map_lookup_elem(map0, &next_key, value0);
103 printf("key:%llu not found from map. %s(%d)\n",
104 next_key, strerror(errno), errno);
107 if (value0[0] != value1[0]) {
108 printf("key:%llu value0:%llu != value1:%llu\n",
109 next_key, value0[0], value1[0]);
116 static int map_equal(int lru_map, int expected)
118 return map_subset(lru_map, expected) && map_subset(expected, lru_map);
121 static int sched_next_online(int pid, int *next_to_try)
124 int next = *next_to_try;
127 while (next < nr_cpus) {
129 CPU_SET(next, &cpuset);
131 if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) {
141 /* Size of the LRU map is 2
146 * => Key=2 will be removed by LRU
147 * Iterate map. Only found key=1 and key=3
149 static void test_lru_sanity0(int map_type, int map_flags)
151 unsigned long long key, value[nr_cpus];
152 int lru_map_fd, expected_map_fd;
155 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
158 assert(sched_next_online(0, &next_cpu) != -1);
160 if (map_flags & BPF_F_NO_COMMON_LRU)
161 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
163 lru_map_fd = create_map(map_type, map_flags, 2);
164 assert(lru_map_fd != -1);
166 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
167 assert(expected_map_fd != -1);
171 /* insert key=1 element */
174 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
175 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
178 /* BPF_NOEXIST means: add new element if it doesn't exist */
179 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
180 /* key=1 already exists */
182 assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -EINVAL);
184 /* insert key=2 element */
186 /* check that key=2 is not found */
188 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
190 /* BPF_EXIST means: update existing element */
191 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
192 /* key=2 is not there */
194 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
196 /* insert key=3 element */
198 /* check that key=3 is not found */
200 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
202 /* check that key=1 can be found and mark the ref bit to
203 * stop LRU from removing key=1
206 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
207 assert(value[0] == 1234);
210 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
211 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
214 /* key=2 has been removed from the LRU */
216 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
218 /* lookup elem key=1 and delete it, then check it doesn't exist */
220 assert(!bpf_map_lookup_and_delete_elem(lru_map_fd, &key, &value));
221 assert(value[0] == 1234);
223 /* remove the same element from the expected map */
224 assert(!bpf_map_delete_elem(expected_map_fd, &key));
226 assert(map_equal(lru_map_fd, expected_map_fd));
228 close(expected_map_fd);
234 /* Size of the LRU map is 1.5*tgt_free
235 * Insert 1 to tgt_free (+tgt_free keys)
236 * Lookup 1 to tgt_free/2
237 * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys)
238 * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU
240 static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
242 unsigned long long key, end_key, value[nr_cpus];
243 int lru_map_fd, expected_map_fd;
244 unsigned int batch_size;
245 unsigned int map_size;
248 if (map_flags & BPF_F_NO_COMMON_LRU)
249 /* This test is only applicable to common LRU list */
252 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
255 assert(sched_next_online(0, &next_cpu) != -1);
257 batch_size = tgt_free / 2;
258 assert(batch_size * 2 == tgt_free);
260 map_size = tgt_free + batch_size;
261 lru_map_fd = create_map(map_type, map_flags, map_size);
262 assert(lru_map_fd != -1);
264 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
265 assert(expected_map_fd != -1);
269 /* Insert 1 to tgt_free (+tgt_free keys) */
270 end_key = 1 + tgt_free;
271 for (key = 1; key < end_key; key++)
272 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
275 /* Lookup 1 to tgt_free/2 */
276 end_key = 1 + batch_size;
277 for (key = 1; key < end_key; key++) {
278 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
279 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
283 /* Insert 1+tgt_free to 2*tgt_free
284 * => 1+tgt_free/2 to LOCALFREE_TARGET will be
288 end_key = key + tgt_free;
289 for (; key < end_key; key++) {
290 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
292 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
296 assert(map_equal(lru_map_fd, expected_map_fd));
298 close(expected_map_fd);
304 /* Size of the LRU map 1.5 * tgt_free
305 * Insert 1 to tgt_free (+tgt_free keys)
306 * Update 1 to tgt_free/2
307 * => The original 1 to tgt_free/2 will be removed due to
308 * the LRU shrink process
309 * Re-insert 1 to tgt_free/2 again and do a lookup immeidately
310 * Insert 1+tgt_free to tgt_free*3/2
311 * Insert 1+tgt_free*3/2 to tgt_free*5/2
312 * => Key 1+tgt_free to tgt_free*3/2
313 * will be removed from LRU because it has never
314 * been lookup and ref bit is not set
316 static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
318 unsigned long long key, value[nr_cpus];
319 unsigned long long end_key;
320 int lru_map_fd, expected_map_fd;
321 unsigned int batch_size;
322 unsigned int map_size;
325 if (map_flags & BPF_F_NO_COMMON_LRU)
326 /* This test is only applicable to common LRU list */
329 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
332 assert(sched_next_online(0, &next_cpu) != -1);
334 batch_size = tgt_free / 2;
335 assert(batch_size * 2 == tgt_free);
337 map_size = tgt_free + batch_size;
338 lru_map_fd = create_map(map_type, map_flags, map_size);
339 assert(lru_map_fd != -1);
341 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
342 assert(expected_map_fd != -1);
346 /* Insert 1 to tgt_free (+tgt_free keys) */
347 end_key = 1 + tgt_free;
348 for (key = 1; key < end_key; key++)
349 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
352 /* Any bpf_map_update_elem will require to acquire a new node
355 * The local list is running out of free nodes.
356 * It gets from the global LRU list which tries to
357 * shrink the inactive list to get tgt_free
358 * number of free nodes.
360 * Hence, the oldest key 1 to tgt_free/2
361 * are removed from the LRU list.
364 if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
365 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
367 assert(!bpf_map_delete_elem(lru_map_fd, &key));
369 assert(bpf_map_update_elem(lru_map_fd, &key, value,
373 /* Re-insert 1 to tgt_free/2 again and do a lookup
376 end_key = 1 + batch_size;
378 for (key = 1; key < end_key; key++) {
379 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
380 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
382 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
383 assert(value[0] == 4321);
384 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
390 /* Insert 1+tgt_free to tgt_free*3/2 */
391 end_key = 1 + tgt_free + batch_size;
392 for (key = 1 + tgt_free; key < end_key; key++)
393 /* These newly added but not referenced keys will be
394 * gone during the next LRU shrink.
396 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
399 /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */
400 end_key = key + tgt_free;
401 for (; key < end_key; key++) {
402 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
404 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
408 assert(map_equal(lru_map_fd, expected_map_fd));
410 close(expected_map_fd);
416 /* Size of the LRU map is 2*tgt_free
417 * It is to test the active/inactive list rotation
418 * Insert 1 to 2*tgt_free (+2*tgt_free keys)
419 * Lookup key 1 to tgt_free*3/2
420 * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
421 * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
423 static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
425 unsigned long long key, end_key, value[nr_cpus];
426 int lru_map_fd, expected_map_fd;
427 unsigned int batch_size;
428 unsigned int map_size;
431 if (map_flags & BPF_F_NO_COMMON_LRU)
432 /* This test is only applicable to common LRU list */
435 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
438 assert(sched_next_online(0, &next_cpu) != -1);
440 batch_size = tgt_free / 2;
441 assert(batch_size * 2 == tgt_free);
443 map_size = tgt_free * 2;
444 lru_map_fd = create_map(map_type, map_flags, map_size);
445 assert(lru_map_fd != -1);
447 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
448 assert(expected_map_fd != -1);
452 /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
453 end_key = 1 + (2 * tgt_free);
454 for (key = 1; key < end_key; key++)
455 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
458 /* Lookup key 1 to tgt_free*3/2 */
459 end_key = tgt_free + batch_size;
460 for (key = 1; key < end_key; key++) {
461 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
462 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
466 /* Add 1+2*tgt_free to tgt_free*5/2
469 key = 2 * tgt_free + 1;
470 end_key = key + batch_size;
471 for (; key < end_key; key++) {
472 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
474 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
478 assert(map_equal(lru_map_fd, expected_map_fd));
480 close(expected_map_fd);
487 static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
489 int lru_map_fd, expected_map_fd;
490 unsigned long long key, value[nr_cpus];
491 unsigned long long end_key;
494 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
497 assert(sched_next_online(0, &next_cpu) != -1);
499 if (map_flags & BPF_F_NO_COMMON_LRU)
500 lru_map_fd = create_map(map_type, map_flags,
501 3 * tgt_free * nr_cpus);
503 lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free);
504 assert(lru_map_fd != -1);
506 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0,
508 assert(expected_map_fd != -1);
512 for (key = 1; key <= 2 * tgt_free; key++)
513 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
517 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
519 for (key = 1; key <= tgt_free; key++) {
520 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
521 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
525 for (; key <= 2 * tgt_free; key++) {
526 assert(!bpf_map_delete_elem(lru_map_fd, &key));
527 assert(bpf_map_delete_elem(lru_map_fd, &key));
530 end_key = key + 2 * tgt_free;
531 for (; key < end_key; key++) {
532 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
534 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
538 assert(map_equal(lru_map_fd, expected_map_fd));
540 close(expected_map_fd);
546 static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
548 unsigned long long key, value[nr_cpus];
550 /* Ensure the last key inserted by previous CPU can be found */
551 assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, last_key, value));
555 assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
556 assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, key, value));
558 /* Cannot find the last key because it was removed by LRU */
559 assert(bpf_map_lookup_elem(map_fd, &last_key, value) == -ENOENT);
562 /* Test map with only one element */
563 static void test_lru_sanity5(int map_type, int map_flags)
565 unsigned long long key, value[nr_cpus];
569 if (map_flags & BPF_F_NO_COMMON_LRU)
572 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
575 map_fd = create_map(map_type, map_flags, 1);
576 assert(map_fd != -1);
580 assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
582 while (sched_next_online(0, &next_cpu) != -1) {
587 do_test_lru_sanity5(key, map_fd);
589 } else if (pid == -1) {
590 printf("couldn't spawn process to test key:%llu\n",
596 assert(waitpid(pid, &status, 0) == pid);
603 /* At least one key should be tested */
609 /* Test list rotation for BPF_F_NO_COMMON_LRU map */
610 static void test_lru_sanity6(int map_type, int map_flags, int tgt_free)
612 int lru_map_fd, expected_map_fd;
613 unsigned long long key, value[nr_cpus];
614 unsigned int map_size = tgt_free * 2;
617 if (!(map_flags & BPF_F_NO_COMMON_LRU))
620 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
623 assert(sched_next_online(0, &next_cpu) != -1);
625 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
626 assert(expected_map_fd != -1);
628 lru_map_fd = create_map(map_type, map_flags, map_size * nr_cpus);
629 assert(lru_map_fd != -1);
633 for (key = 1; key <= tgt_free; key++) {
634 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
636 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
640 for (; key <= tgt_free * 2; key++) {
641 unsigned long long stable_key;
643 /* Make ref bit sticky for key: [1, tgt_free] */
644 for (stable_key = 1; stable_key <= tgt_free; stable_key++) {
645 /* Mark the ref bit */
646 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd,
649 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
653 for (; key <= tgt_free * 3; key++) {
654 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
656 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
660 assert(map_equal(lru_map_fd, expected_map_fd));
662 close(expected_map_fd);
668 /* Size of the LRU map is 2
671 * Lookup Key=1 (datapath)
672 * Lookup Key=2 (syscall)
674 * => Key=2 will be removed by LRU
675 * Iterate map. Only found key=1 and key=3
677 static void test_lru_sanity7(int map_type, int map_flags)
679 unsigned long long key, value[nr_cpus];
680 int lru_map_fd, expected_map_fd;
683 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
686 assert(sched_next_online(0, &next_cpu) != -1);
688 if (map_flags & BPF_F_NO_COMMON_LRU)
689 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
691 lru_map_fd = create_map(map_type, map_flags, 2);
692 assert(lru_map_fd != -1);
694 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
695 assert(expected_map_fd != -1);
699 /* insert key=1 element */
702 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
703 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
706 /* BPF_NOEXIST means: add new element if it doesn't exist */
707 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
708 /* key=1 already exists */
710 /* insert key=2 element */
712 /* check that key=2 is not found */
714 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
716 /* BPF_EXIST means: update existing element */
717 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
718 /* key=2 is not there */
720 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
722 /* insert key=3 element */
724 /* check that key=3 is not found */
726 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
728 /* check that key=1 can be found and mark the ref bit to
729 * stop LRU from removing key=1
732 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
733 assert(value[0] == 1234);
735 /* check that key=2 can be found and do _not_ mark ref bit.
736 * this will be evicted on next update.
739 assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
740 assert(value[0] == 1234);
743 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
744 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
747 /* key=2 has been removed from the LRU */
749 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
751 assert(map_equal(lru_map_fd, expected_map_fd));
753 close(expected_map_fd);
759 /* Size of the LRU map is 2
762 * Lookup Key=1 (syscall)
763 * Lookup Key=2 (datapath)
765 * => Key=1 will be removed by LRU
766 * Iterate map. Only found key=2 and key=3
768 static void test_lru_sanity8(int map_type, int map_flags)
770 unsigned long long key, value[nr_cpus];
771 int lru_map_fd, expected_map_fd;
774 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
777 assert(sched_next_online(0, &next_cpu) != -1);
779 if (map_flags & BPF_F_NO_COMMON_LRU)
780 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
782 lru_map_fd = create_map(map_type, map_flags, 2);
783 assert(lru_map_fd != -1);
785 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
786 assert(expected_map_fd != -1);
790 /* insert key=1 element */
793 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
795 /* BPF_NOEXIST means: add new element if it doesn't exist */
796 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
797 /* key=1 already exists */
799 /* insert key=2 element */
801 /* check that key=2 is not found */
803 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
805 /* BPF_EXIST means: update existing element */
806 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
807 /* key=2 is not there */
809 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
810 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
813 /* insert key=3 element */
815 /* check that key=3 is not found */
817 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
819 /* check that key=1 can be found and do _not_ mark ref bit.
820 * this will be evicted on next update.
823 assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
824 assert(value[0] == 1234);
826 /* check that key=2 can be found and mark the ref bit to
827 * stop LRU from removing key=2
830 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
831 assert(value[0] == 1234);
834 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
835 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
838 /* key=1 has been removed from the LRU */
840 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
842 assert(map_equal(lru_map_fd, expected_map_fd));
844 close(expected_map_fd);
850 int main(int argc, char **argv)
852 int map_types[] = {BPF_MAP_TYPE_LRU_HASH,
853 BPF_MAP_TYPE_LRU_PERCPU_HASH};
854 int map_flags[] = {0, BPF_F_NO_COMMON_LRU};
857 setbuf(stdout, NULL);
859 nr_cpus = bpf_num_possible_cpus();
860 assert(nr_cpus != -1);
861 printf("nr_cpus:%d\n\n", nr_cpus);
863 /* Use libbpf 1.0 API mode */
864 libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
866 for (f = 0; f < ARRAY_SIZE(map_flags); f++) {
867 unsigned int tgt_free = (map_flags[f] & BPF_F_NO_COMMON_LRU) ?
868 PERCPU_FREE_TARGET : LOCAL_FREE_TARGET;
870 for (t = 0; t < ARRAY_SIZE(map_types); t++) {
871 test_lru_sanity0(map_types[t], map_flags[f]);
872 test_lru_sanity1(map_types[t], map_flags[f], tgt_free);
873 test_lru_sanity2(map_types[t], map_flags[f], tgt_free);
874 test_lru_sanity3(map_types[t], map_flags[f], tgt_free);
875 test_lru_sanity4(map_types[t], map_flags[f], tgt_free);
876 test_lru_sanity5(map_types[t], map_flags[f]);
877 test_lru_sanity6(map_types[t], map_flags[f], tgt_free);
878 test_lru_sanity7(map_types[t], map_flags[f]);
879 test_lru_sanity8(map_types[t], map_flags[f]);