1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023. Huawei Technologies Co., Ltd */
4 #include <bpf/bpf_tracing.h>
5 #include <bpf/bpf_helpers.h>
7 #include "bpf_experimental.h"
10 struct generic_map_value {
14 char _license[] SEC("license") = "GPL";
16 const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
17 const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
19 const unsigned int percpu_data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512};
20 const volatile unsigned int percpu_data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
25 #define DEFINE_ARRAY_WITH_KPTR(_size) \
26 struct bin_data_##_size { \
27 char data[_size - sizeof(void *)]; \
29 /* See Commit 5d8d6634ccc, force btf generation for type bin_data_##_size */ \
30 struct bin_data_##_size *__bin_data_##_size; \
31 struct map_value_##_size { \
32 struct bin_data_##_size __kptr * data; \
35 __uint(type, BPF_MAP_TYPE_ARRAY); \
37 __type(value, struct map_value_##_size); \
38 __uint(max_entries, 128); \
39 } array_##_size SEC(".maps")
41 #define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
42 struct percpu_bin_data_##_size { \
45 struct percpu_bin_data_##_size *__percpu_bin_data_##_size; \
46 struct map_value_percpu_##_size { \
47 struct percpu_bin_data_##_size __percpu_kptr * data; \
50 __uint(type, BPF_MAP_TYPE_ARRAY); \
52 __type(value, struct map_value_percpu_##_size); \
53 __uint(max_entries, 128); \
54 } array_percpu_##_size SEC(".maps")
56 static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx)
58 struct generic_map_value *value;
62 for (i = 0; i < batch; i++) {
64 value = bpf_map_lookup_elem(map, &key);
69 new = bpf_obj_new_impl(data_btf_ids[idx], NULL);
74 old = bpf_kptr_xchg(&value->data, new);
83 static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx)
85 struct generic_map_value *value;
89 for (i = 0; i < batch; i++) {
91 value = bpf_map_lookup_elem(map, &key);
96 old = bpf_kptr_xchg(&value->data, NULL);
105 static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch,
108 struct generic_map_value *value;
112 for (i = 0; i < batch; i++) {
114 value = bpf_map_lookup_elem(map, &key);
119 /* per-cpu allocator may not be able to refill in time */
120 new = bpf_percpu_obj_new_impl(percpu_data_btf_ids[idx], NULL);
124 old = bpf_kptr_xchg(&value->data, new);
126 bpf_percpu_obj_drop(old);
133 static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch,
136 struct generic_map_value *value;
140 for (i = 0; i < batch; i++) {
142 value = bpf_map_lookup_elem(map, &key);
147 old = bpf_kptr_xchg(&value->data, NULL);
150 bpf_percpu_obj_drop(old);
154 #define CALL_BATCH_ALLOC(size, batch, idx) \
155 batch_alloc((struct bpf_map *)(&array_##size), batch, idx)
157 #define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
159 batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \
160 batch_free((struct bpf_map *)(&array_##size), batch, idx); \
163 #define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \
164 batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx)
166 #define CALL_BATCH_PERCPU_ALLOC_FREE(size, batch, idx) \
168 batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx); \
169 batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \
172 /* kptr doesn't support bin_data_8 which is a zero-sized array */
173 DEFINE_ARRAY_WITH_KPTR(16);
174 DEFINE_ARRAY_WITH_KPTR(32);
175 DEFINE_ARRAY_WITH_KPTR(64);
176 DEFINE_ARRAY_WITH_KPTR(96);
177 DEFINE_ARRAY_WITH_KPTR(128);
178 DEFINE_ARRAY_WITH_KPTR(192);
179 DEFINE_ARRAY_WITH_KPTR(256);
180 DEFINE_ARRAY_WITH_KPTR(512);
181 DEFINE_ARRAY_WITH_KPTR(1024);
182 DEFINE_ARRAY_WITH_KPTR(2048);
183 DEFINE_ARRAY_WITH_KPTR(4096);
185 DEFINE_ARRAY_WITH_PERCPU_KPTR(8);
186 DEFINE_ARRAY_WITH_PERCPU_KPTR(16);
187 DEFINE_ARRAY_WITH_PERCPU_KPTR(32);
188 DEFINE_ARRAY_WITH_PERCPU_KPTR(64);
189 DEFINE_ARRAY_WITH_PERCPU_KPTR(96);
190 DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
191 DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
192 DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
193 DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
195 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
196 int test_batch_alloc_free(void *ctx)
198 if ((u32)bpf_get_current_pid_tgid() != pid)
201 /* Alloc 128 16-bytes objects in batch to trigger refilling,
202 * then free 128 16-bytes objects in batch to trigger freeing.
204 CALL_BATCH_ALLOC_FREE(16, 128, 0);
205 CALL_BATCH_ALLOC_FREE(32, 128, 1);
206 CALL_BATCH_ALLOC_FREE(64, 128, 2);
207 CALL_BATCH_ALLOC_FREE(96, 128, 3);
208 CALL_BATCH_ALLOC_FREE(128, 128, 4);
209 CALL_BATCH_ALLOC_FREE(192, 128, 5);
210 CALL_BATCH_ALLOC_FREE(256, 128, 6);
211 CALL_BATCH_ALLOC_FREE(512, 64, 7);
212 CALL_BATCH_ALLOC_FREE(1024, 32, 8);
213 CALL_BATCH_ALLOC_FREE(2048, 16, 9);
214 CALL_BATCH_ALLOC_FREE(4096, 8, 10);
219 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
220 int test_free_through_map_free(void *ctx)
222 if ((u32)bpf_get_current_pid_tgid() != pid)
225 /* Alloc 128 16-bytes objects in batch to trigger refilling,
226 * then free these objects through map free.
228 CALL_BATCH_ALLOC(16, 128, 0);
229 CALL_BATCH_ALLOC(32, 128, 1);
230 CALL_BATCH_ALLOC(64, 128, 2);
231 CALL_BATCH_ALLOC(96, 128, 3);
232 CALL_BATCH_ALLOC(128, 128, 4);
233 CALL_BATCH_ALLOC(192, 128, 5);
234 CALL_BATCH_ALLOC(256, 128, 6);
235 CALL_BATCH_ALLOC(512, 64, 7);
236 CALL_BATCH_ALLOC(1024, 32, 8);
237 CALL_BATCH_ALLOC(2048, 16, 9);
238 CALL_BATCH_ALLOC(4096, 8, 10);
243 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
244 int test_batch_percpu_alloc_free(void *ctx)
246 if ((u32)bpf_get_current_pid_tgid() != pid)
249 /* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
250 * then free 128 8-bytes per-cpu objects in batch to trigger freeing.
252 CALL_BATCH_PERCPU_ALLOC_FREE(8, 128, 0);
253 CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1);
254 CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2);
255 CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3);
256 CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4);
257 CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5);
258 CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
259 CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
260 CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
265 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
266 int test_percpu_free_through_map_free(void *ctx)
268 if ((u32)bpf_get_current_pid_tgid() != pid)
271 /* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
272 * then free these object through map free.
274 CALL_BATCH_PERCPU_ALLOC(8, 128, 0);
275 CALL_BATCH_PERCPU_ALLOC(16, 128, 1);
276 CALL_BATCH_PERCPU_ALLOC(32, 128, 2);
277 CALL_BATCH_PERCPU_ALLOC(64, 128, 3);
278 CALL_BATCH_PERCPU_ALLOC(96, 128, 4);
279 CALL_BATCH_PERCPU_ALLOC(128, 128, 5);
280 CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
281 CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
282 CALL_BATCH_PERCPU_ALLOC(512, 64, 8);