1 // SPDX-License-Identifier: GPL-2.0
3 #include <bpf/bpf_tracing.h>
4 #include <bpf/bpf_helpers.h>
5 #include "../test_kmods/bpf_testmod_kfunc.h"
8 struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;
9 struct prog_test_ref_kfunc __kptr *ref_ptr;
13 __uint(type, BPF_MAP_TYPE_ARRAY);
15 __type(value, struct map_value);
16 __uint(max_entries, 1);
17 } array_map SEC(".maps");
19 struct pcpu_array_map {
20 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
22 __type(value, struct map_value);
23 __uint(max_entries, 1);
24 } pcpu_array_map SEC(".maps");
27 __uint(type, BPF_MAP_TYPE_HASH);
29 __type(value, struct map_value);
30 __uint(max_entries, 1);
31 } hash_map SEC(".maps");
33 struct pcpu_hash_map {
34 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
36 __type(value, struct map_value);
37 __uint(max_entries, 1);
38 } pcpu_hash_map SEC(".maps");
40 struct hash_malloc_map {
41 __uint(type, BPF_MAP_TYPE_HASH);
43 __type(value, struct map_value);
44 __uint(max_entries, 1);
45 __uint(map_flags, BPF_F_NO_PREALLOC);
46 } hash_malloc_map SEC(".maps");
48 struct pcpu_hash_malloc_map {
49 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
51 __type(value, struct map_value);
52 __uint(max_entries, 1);
53 __uint(map_flags, BPF_F_NO_PREALLOC);
54 } pcpu_hash_malloc_map SEC(".maps");
57 __uint(type, BPF_MAP_TYPE_LRU_HASH);
59 __type(value, struct map_value);
60 __uint(max_entries, 1);
61 } lru_hash_map SEC(".maps");
63 struct lru_pcpu_hash_map {
64 __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
66 __type(value, struct map_value);
67 __uint(max_entries, 1);
68 } lru_pcpu_hash_map SEC(".maps");
71 __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
72 __uint(map_flags, BPF_F_NO_PREALLOC);
74 __type(value, struct map_value);
75 } cgrp_ls_map SEC(".maps");
78 __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
79 __uint(map_flags, BPF_F_NO_PREALLOC);
81 __type(value, struct map_value);
82 } task_ls_map SEC(".maps");
85 __uint(type, BPF_MAP_TYPE_INODE_STORAGE);
86 __uint(map_flags, BPF_F_NO_PREALLOC);
88 __type(value, struct map_value);
89 } inode_ls_map SEC(".maps");
92 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
93 __uint(map_flags, BPF_F_NO_PREALLOC);
95 __type(value, struct map_value);
96 } sk_ls_map SEC(".maps");
98 #define DEFINE_MAP_OF_MAP(map_type, inner_map_type, name) \
100 __uint(type, map_type); \
101 __uint(max_entries, 1); \
102 __uint(key_size, sizeof(int)); \
103 __uint(value_size, sizeof(int)); \
104 __array(values, struct inner_map_type); \
105 } name SEC(".maps") = { \
106 .values = { [0] = &inner_map_type }, \
109 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_map, array_of_array_maps);
110 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_map, array_of_hash_maps);
111 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_malloc_map, array_of_hash_malloc_maps);
112 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, lru_hash_map, array_of_lru_hash_maps);
113 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, pcpu_array_map, array_of_pcpu_array_maps);
114 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, pcpu_hash_map, array_of_pcpu_hash_maps);
115 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, array_map, hash_of_array_maps);
116 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps);
117 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps);
118 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
119 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, pcpu_array_map, hash_of_pcpu_array_maps);
120 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, pcpu_hash_map, hash_of_pcpu_hash_maps);
122 #define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
124 static void test_kptr_unref(struct map_value *v)
126 struct prog_test_ref_kfunc *p;
129 /* store untrusted_ptr_or_null_ */
130 WRITE_ONCE(v->unref_ptr, p);
133 if (p->a + p->b > 100)
135 /* store untrusted_ptr_ */
136 WRITE_ONCE(v->unref_ptr, p);
138 WRITE_ONCE(v->unref_ptr, NULL);
141 static void test_kptr_ref(struct map_value *v)
143 struct prog_test_ref_kfunc *p;
146 /* store ptr_or_null_ */
147 WRITE_ONCE(v->unref_ptr, p);
151 * p is rcu_ptr_prog_test_ref_kfunc,
152 * because bpf prog is non-sleepable and runs in RCU CS.
153 * p can be passed to kfunc that requires KF_RCU.
155 bpf_kfunc_call_test_ref(p);
156 if (p->a + p->b > 100)
159 p = bpf_kptr_xchg(&v->ref_ptr, NULL);
163 * p is trusted_ptr_prog_test_ref_kfunc.
164 * p can be passed to kfunc that requires KF_RCU.
166 bpf_kfunc_call_test_ref(p);
167 if (p->a + p->b > 100) {
168 bpf_kfunc_call_test_release(p);
172 WRITE_ONCE(v->unref_ptr, p);
173 bpf_kfunc_call_test_release(p);
175 p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
179 p = bpf_kptr_xchg(&v->ref_ptr, p);
182 if (p->a + p->b > 100) {
183 bpf_kfunc_call_test_release(p);
186 bpf_kfunc_call_test_release(p);
189 static void test_kptr(struct map_value *v)
196 int test_map_kptr(struct __sk_buff *ctx)
202 v = bpf_map_lookup_elem(&map, &key); \
209 TEST(hash_malloc_map);
211 TEST(pcpu_array_map);
218 SEC("tp_btf/cgroup_mkdir")
219 int BPF_PROG(test_cgrp_map_kptr, struct cgroup *cgrp, const char *path)
223 v = bpf_cgrp_storage_get(&cgrp_ls_map, cgrp, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
229 SEC("lsm/inode_unlink")
230 int BPF_PROG(test_task_map_kptr, struct inode *inode, struct dentry *victim)
232 struct task_struct *task;
235 task = bpf_get_current_task_btf();
238 v = bpf_task_storage_get(&task_ls_map, task, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
244 SEC("lsm/inode_unlink")
245 int BPF_PROG(test_inode_map_kptr, struct inode *inode, struct dentry *victim)
249 v = bpf_inode_storage_get(&inode_ls_map, inode, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
256 int test_sk_map_kptr(struct __sk_buff *ctx)
264 v = bpf_sk_storage_get(&sk_ls_map, sk, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
271 int test_map_in_map_kptr(struct __sk_buff *ctx)
277 #define TEST(map_in_map) \
278 map = bpf_map_lookup_elem(&map_in_map, &key); \
281 v = bpf_map_lookup_elem(map, &key); \
286 TEST(array_of_array_maps);
287 TEST(array_of_hash_maps);
288 TEST(array_of_hash_malloc_maps);
289 TEST(array_of_lru_hash_maps);
290 TEST(array_of_pcpu_array_maps);
291 TEST(array_of_pcpu_hash_maps);
292 TEST(hash_of_array_maps);
293 TEST(hash_of_hash_maps);
294 TEST(hash_of_hash_malloc_maps);
295 TEST(hash_of_lru_hash_maps);
296 TEST(hash_of_pcpu_array_maps);
297 TEST(hash_of_pcpu_hash_maps);
305 static __always_inline
306 int test_map_kptr_ref_pre(struct map_value *v)
308 struct prog_test_ref_kfunc *p, *p_st;
309 unsigned long arg = 0;
312 p = bpf_kfunc_call_test_acquire(&arg);
318 if (p_st->cnt.refs.counter != ref) {
323 p = bpf_kptr_xchg(&v->ref_ptr, p);
328 if (p_st->cnt.refs.counter != ref)
331 p = bpf_kptr_xchg(&v->ref_ptr, NULL);
334 bpf_kfunc_call_test_release(p);
336 if (p_st->cnt.refs.counter != ref)
339 p = bpf_kfunc_call_test_acquire(&arg);
343 p = bpf_kptr_xchg(&v->ref_ptr, p);
348 if (p_st->cnt.refs.counter != ref)
355 bpf_kfunc_call_test_release(p);
359 static __always_inline
360 int test_map_kptr_ref_post(struct map_value *v)
362 struct prog_test_ref_kfunc *p, *p_st;
365 if (!p_st || p_st->cnt.refs.counter != ref)
368 p = bpf_kptr_xchg(&v->ref_ptr, NULL);
371 if (p_st->cnt.refs.counter != ref) {
372 bpf_kfunc_call_test_release(p);
376 p = bpf_kptr_xchg(&v->ref_ptr, p);
378 bpf_kfunc_call_test_release(p);
381 if (p_st->cnt.refs.counter != ref)
388 v = bpf_map_lookup_elem(&map, &key); \
391 ret = test_map_kptr_ref_pre(v); \
395 #define TEST_PCPU(map) \
396 v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
399 ret = test_map_kptr_ref_pre(v); \
404 int test_map_kptr_ref1(struct __sk_buff *ctx)
406 struct map_value *v, val = {};
409 bpf_map_update_elem(&hash_map, &key, &val, 0);
410 bpf_map_update_elem(&hash_malloc_map, &key, &val, 0);
411 bpf_map_update_elem(&lru_hash_map, &key, &val, 0);
413 bpf_map_update_elem(&pcpu_hash_map, &key, &val, 0);
414 bpf_map_update_elem(&pcpu_hash_malloc_map, &key, &val, 0);
415 bpf_map_update_elem(&lru_pcpu_hash_map, &key, &val, 0);
419 TEST(hash_malloc_map);
422 TEST_PCPU(pcpu_array_map);
423 TEST_PCPU(pcpu_hash_map);
424 TEST_PCPU(pcpu_hash_malloc_map);
425 TEST_PCPU(lru_pcpu_hash_map);
434 v = bpf_map_lookup_elem(&map, &key); \
437 ret = test_map_kptr_ref_post(v); \
441 #define TEST_PCPU(map) \
442 v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
445 ret = test_map_kptr_ref_post(v); \
450 int test_map_kptr_ref2(struct __sk_buff *ctx)
457 TEST(hash_malloc_map);
460 TEST_PCPU(pcpu_array_map);
461 TEST_PCPU(pcpu_hash_map);
462 TEST_PCPU(pcpu_hash_malloc_map);
463 TEST_PCPU(lru_pcpu_hash_map);
472 int test_map_kptr_ref3(struct __sk_buff *ctx)
474 struct prog_test_ref_kfunc *p;
475 unsigned long sp = 0;
477 p = bpf_kfunc_call_test_acquire(&sp);
481 if (p->cnt.refs.counter != ref) {
482 bpf_kfunc_call_test_release(p);
485 bpf_kfunc_call_test_release(p);
491 int test_ls_map_kptr_ref1(void *ctx)
493 struct task_struct *current;
496 current = bpf_get_current_task_btf();
499 v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
502 v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
505 return test_map_kptr_ref_pre(v);
509 int test_ls_map_kptr_ref2(void *ctx)
511 struct task_struct *current;
514 current = bpf_get_current_task_btf();
517 v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
520 return test_map_kptr_ref_post(v);
524 int test_ls_map_kptr_ref_del(void *ctx)
526 struct task_struct *current;
529 current = bpf_get_current_task_btf();
532 v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
537 return bpf_task_storage_delete(&task_ls_map, current);
540 char _license[] SEC("license") = "GPL";