1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Facebook
5 #include <bpf/bpf_helpers.h>
9 #define HALF_ENTRIES (MAX_ENTRIES >> 1)
11 _Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
13 enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
15 int page_size = 0; /* userspace should set it */
17 #define VERIFY_TYPE(type, func) ({ \
24 #define VERIFY(expr) ({ \
31 enum bpf_map_type map_type;
36 } __attribute__((preserve_access_index));
38 static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
39 __u32 value_size, __u32 max_entries)
41 VERIFY(map->map_type == g_map_type);
42 VERIFY(map->key_size == key_size);
43 VERIFY(map->value_size == value_size);
44 VERIFY(map->max_entries == max_entries);
50 static inline int check_bpf_map_ptr(struct bpf_map *indirect,
51 struct bpf_map *direct)
53 VERIFY(indirect->map_type == direct->map_type);
54 VERIFY(indirect->key_size == direct->key_size);
55 VERIFY(indirect->value_size == direct->value_size);
56 VERIFY(indirect->max_entries == direct->max_entries);
57 VERIFY(indirect->id == direct->id);
62 static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
63 __u32 key_size, __u32 value_size, __u32 max_entries)
65 VERIFY(check_bpf_map_ptr(indirect, direct));
66 VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
71 static inline int check_default(struct bpf_map *indirect,
72 struct bpf_map *direct)
74 VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
80 check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
82 VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
96 } __attribute__((preserve_access_index));
99 __uint(type, BPF_MAP_TYPE_HASH);
100 __uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
101 __uint(max_entries, MAX_ENTRIES);
103 __type(value, __u32);
104 } m_hash SEC(".maps");
106 __s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym;
108 static inline int check_hash(void)
110 struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
111 struct bpf_map *map = (struct bpf_map *)&m_hash;
114 VERIFY(check_default_noinline(&hash->map, map));
116 VERIFY(hash->n_buckets == MAX_ENTRIES);
117 VERIFY(hash->elem_size == 64);
119 VERIFY(hash->count.counter == 0);
120 VERIFY(bpf_map_sum_elem_count(map) == 0);
122 for (i = 0; i < HALF_ENTRIES; ++i) {
126 if (bpf_map_update_elem(hash, &key, &val, 0))
129 VERIFY(hash->count.counter == HALF_ENTRIES);
130 VERIFY(bpf_map_sum_elem_count(map) == HALF_ENTRIES);
138 } __attribute__((preserve_access_index));
141 __uint(type, BPF_MAP_TYPE_ARRAY);
142 __uint(max_entries, MAX_ENTRIES);
144 __type(value, __u32);
145 } m_array SEC(".maps");
147 static inline int check_array(void)
149 struct bpf_array *array = (struct bpf_array *)&m_array;
150 struct bpf_map *map = (struct bpf_map *)&m_array;
151 int i, n_lookups = 0, n_keys = 0;
153 VERIFY(check_default(&array->map, map));
155 VERIFY(array->elem_size == 8);
157 for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
159 __u32 *val = bpf_map_lookup_elem(array, &key);
166 VERIFY(n_lookups == MAX_ENTRIES);
167 VERIFY(n_keys == MAX_ENTRIES);
173 __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
174 __uint(max_entries, MAX_ENTRIES);
176 __type(value, __u32);
177 } m_prog_array SEC(".maps");
179 static inline int check_prog_array(void)
181 struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
182 struct bpf_map *map = (struct bpf_map *)&m_prog_array;
184 VERIFY(check_default(&prog_array->map, map));
190 __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
191 __uint(max_entries, MAX_ENTRIES);
193 __type(value, __u32);
194 } m_perf_event_array SEC(".maps");
196 static inline int check_perf_event_array(void)
198 struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
199 struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
201 VERIFY(check_default(&perf_event_array->map, map));
207 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
208 __uint(max_entries, MAX_ENTRIES);
210 __type(value, __u32);
211 } m_percpu_hash SEC(".maps");
213 static inline int check_percpu_hash(void)
215 struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
216 struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
218 VERIFY(check_default(&percpu_hash->map, map));
224 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
225 __uint(max_entries, MAX_ENTRIES);
227 __type(value, __u32);
228 } m_percpu_array SEC(".maps");
230 static inline int check_percpu_array(void)
232 struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
233 struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
235 VERIFY(check_default(&percpu_array->map, map));
240 struct bpf_stack_map {
242 } __attribute__((preserve_access_index));
245 __uint(type, BPF_MAP_TYPE_STACK_TRACE);
246 __uint(max_entries, MAX_ENTRIES);
248 __type(value, __u64);
249 } m_stack_trace SEC(".maps");
251 static inline int check_stack_trace(void)
253 struct bpf_stack_map *stack_trace =
254 (struct bpf_stack_map *)&m_stack_trace;
255 struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
257 VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
264 __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
265 __uint(max_entries, MAX_ENTRIES);
267 __type(value, __u32);
268 } m_cgroup_array SEC(".maps");
270 static inline int check_cgroup_array(void)
272 struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
273 struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
275 VERIFY(check_default(&cgroup_array->map, map));
281 __uint(type, BPF_MAP_TYPE_LRU_HASH);
282 __uint(max_entries, MAX_ENTRIES);
284 __type(value, __u32);
285 } m_lru_hash SEC(".maps");
287 static inline int check_lru_hash(void)
289 struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
290 struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
292 VERIFY(check_default(&lru_hash->map, map));
298 __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
299 __uint(max_entries, MAX_ENTRIES);
301 __type(value, __u32);
302 } m_lru_percpu_hash SEC(".maps");
304 static inline int check_lru_percpu_hash(void)
306 struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
307 struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
309 VERIFY(check_default(&lru_percpu_hash->map, map));
316 } __attribute__((preserve_access_index));
319 struct bpf_lpm_trie_key_hdr trie_key;
324 __uint(type, BPF_MAP_TYPE_LPM_TRIE);
325 __uint(map_flags, BPF_F_NO_PREALLOC);
326 __uint(max_entries, MAX_ENTRIES);
327 __type(key, struct lpm_key);
328 __type(value, __u32);
329 } m_lpm_trie SEC(".maps");
331 static inline int check_lpm_trie(void)
333 struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
334 struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
336 VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
342 #define INNER_MAX_ENTRIES 1234
345 __uint(type, BPF_MAP_TYPE_ARRAY);
346 __uint(max_entries, INNER_MAX_ENTRIES);
348 __type(value, __u32);
349 } inner_map SEC(".maps");
352 __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
353 __uint(max_entries, MAX_ENTRIES);
355 __type(value, __u32);
356 __array(values, struct {
357 __uint(type, BPF_MAP_TYPE_ARRAY);
358 __uint(max_entries, INNER_MAX_ENTRIES);
360 __type(value, __u32);
362 } m_array_of_maps SEC(".maps") = {
363 .values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
366 static inline int check_array_of_maps(void)
368 struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
369 struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
370 struct bpf_array *inner_map;
373 VERIFY(check_default(&array_of_maps->map, map));
374 inner_map = bpf_map_lookup_elem(array_of_maps, &key);
375 VERIFY(inner_map != NULL);
376 VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
382 __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
383 __uint(max_entries, MAX_ENTRIES);
385 __type(value, __u32);
386 __array(values, struct inner_map);
387 } m_hash_of_maps SEC(".maps") = {
393 static inline int check_hash_of_maps(void)
395 struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
396 struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
397 struct bpf_htab *inner_map;
400 VERIFY(check_default(&hash_of_maps->map, map));
401 inner_map = bpf_map_lookup_elem(hash_of_maps, &key);
402 VERIFY(inner_map != NULL);
403 VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
410 } __attribute__((preserve_access_index));
413 __uint(type, BPF_MAP_TYPE_DEVMAP);
414 __uint(max_entries, MAX_ENTRIES);
416 __type(value, __u32);
417 } m_devmap SEC(".maps");
419 static inline int check_devmap(void)
421 struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
422 struct bpf_map *map = (struct bpf_map *)&m_devmap;
424 VERIFY(check_default(&devmap->map, map));
431 } __attribute__((preserve_access_index));
434 __uint(type, BPF_MAP_TYPE_SOCKMAP);
435 __uint(max_entries, MAX_ENTRIES);
437 __type(value, __u32);
438 } m_sockmap SEC(".maps");
440 static inline int check_sockmap(void)
442 struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
443 struct bpf_map *map = (struct bpf_map *)&m_sockmap;
445 VERIFY(check_default(&sockmap->map, map));
452 } __attribute__((preserve_access_index));
455 __uint(type, BPF_MAP_TYPE_CPUMAP);
456 __uint(max_entries, MAX_ENTRIES);
458 __type(value, __u32);
459 } m_cpumap SEC(".maps");
461 static inline int check_cpumap(void)
463 struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
464 struct bpf_map *map = (struct bpf_map *)&m_cpumap;
466 VERIFY(check_default(&cpumap->map, map));
473 } __attribute__((preserve_access_index));
476 __uint(type, BPF_MAP_TYPE_XSKMAP);
477 __uint(max_entries, MAX_ENTRIES);
479 __type(value, __u32);
480 } m_xskmap SEC(".maps");
482 static inline int check_xskmap(void)
484 struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
485 struct bpf_map *map = (struct bpf_map *)&m_xskmap;
487 VERIFY(check_default(&xskmap->map, map));
494 } __attribute__((preserve_access_index));
497 __uint(type, BPF_MAP_TYPE_SOCKHASH);
498 __uint(max_entries, MAX_ENTRIES);
500 __type(value, __u32);
501 } m_sockhash SEC(".maps");
503 static inline int check_sockhash(void)
505 struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
506 struct bpf_map *map = (struct bpf_map *)&m_sockhash;
508 VERIFY(check_default(&sockhash->map, map));
513 struct bpf_cgroup_storage_map {
515 } __attribute__((preserve_access_index));
518 __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
519 __type(key, struct bpf_cgroup_storage_key);
520 __type(value, __u32);
521 } m_cgroup_storage SEC(".maps");
523 static inline int check_cgroup_storage(void)
525 struct bpf_cgroup_storage_map *cgroup_storage =
526 (struct bpf_cgroup_storage_map *)&m_cgroup_storage;
527 struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
529 VERIFY(check(&cgroup_storage->map, map,
530 sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
535 struct reuseport_array {
537 } __attribute__((preserve_access_index));
540 __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
541 __uint(max_entries, MAX_ENTRIES);
543 __type(value, __u32);
544 } m_reuseport_sockarray SEC(".maps");
546 static inline int check_reuseport_sockarray(void)
548 struct reuseport_array *reuseport_sockarray =
549 (struct reuseport_array *)&m_reuseport_sockarray;
550 struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
552 VERIFY(check_default(&reuseport_sockarray->map, map));
558 __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
559 __type(key, struct bpf_cgroup_storage_key);
560 __type(value, __u32);
561 } m_percpu_cgroup_storage SEC(".maps");
563 static inline int check_percpu_cgroup_storage(void)
565 struct bpf_cgroup_storage_map *percpu_cgroup_storage =
566 (struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
567 struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
569 VERIFY(check(&percpu_cgroup_storage->map, map,
570 sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
575 struct bpf_queue_stack {
577 } __attribute__((preserve_access_index));
580 __uint(type, BPF_MAP_TYPE_QUEUE);
581 __uint(max_entries, MAX_ENTRIES);
582 __type(value, __u32);
583 } m_queue SEC(".maps");
585 static inline int check_queue(void)
587 struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
588 struct bpf_map *map = (struct bpf_map *)&m_queue;
590 VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
596 __uint(type, BPF_MAP_TYPE_STACK);
597 __uint(max_entries, MAX_ENTRIES);
598 __type(value, __u32);
599 } m_stack SEC(".maps");
601 static inline int check_stack(void)
603 struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
604 struct bpf_map *map = (struct bpf_map *)&m_stack;
606 VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
611 struct bpf_local_storage_map {
613 } __attribute__((preserve_access_index));
616 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
617 __uint(map_flags, BPF_F_NO_PREALLOC);
619 __type(value, __u32);
620 } m_sk_storage SEC(".maps");
622 static inline int check_sk_storage(void)
624 struct bpf_local_storage_map *sk_storage =
625 (struct bpf_local_storage_map *)&m_sk_storage;
626 struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
628 VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
634 __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
635 __uint(max_entries, MAX_ENTRIES);
637 __type(value, __u32);
638 } m_devmap_hash SEC(".maps");
640 static inline int check_devmap_hash(void)
642 struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
643 struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
645 VERIFY(check_default(&devmap_hash->map, map));
650 struct bpf_ringbuf_map {
652 } __attribute__((preserve_access_index));
655 __uint(type, BPF_MAP_TYPE_RINGBUF);
656 } m_ringbuf SEC(".maps");
658 static inline int check_ringbuf(void)
660 struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
661 struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
663 VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
668 SEC("cgroup_skb/egress")
669 int cg_skb(void *ctx)
671 VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
672 VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
673 VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
674 VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
675 VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
676 VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
677 VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
678 VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
679 VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
680 VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
681 VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
682 VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
683 VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
684 VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
685 VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
686 VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
687 VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
688 VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
689 VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
690 VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
691 check_reuseport_sockarray);
692 VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
693 check_percpu_cgroup_storage);
694 VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
695 VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
696 VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
697 VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
698 VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
703 char _license[] SEC("license") = "GPL";