1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
7 #include <bpf/bpf_helpers.h>
8 #include <bpf/bpf_tracing.h>
10 char _license[] SEC("license") = "GPL";
13 struct bpf_timer timer;
14 struct bpf_spin_lock lock; /* unused */
18 __uint(type, BPF_MAP_TYPE_HASH);
19 __uint(max_entries, 1000);
21 __type(value, struct hmap_elem);
25 __uint(type, BPF_MAP_TYPE_HASH);
26 __uint(map_flags, BPF_F_NO_PREALLOC);
27 __uint(max_entries, 1000);
29 __type(value, struct hmap_elem);
30 } hmap_malloc SEC(".maps");
37 __uint(type, BPF_MAP_TYPE_ARRAY);
38 __uint(max_entries, 2);
40 __type(value, struct elem);
44 __uint(type, BPF_MAP_TYPE_LRU_HASH);
45 __uint(max_entries, 4);
47 __type(value, struct elem);
51 __uint(type, BPF_MAP_TYPE_ARRAY);
52 __uint(max_entries, 1);
54 __type(value, struct elem);
55 } abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps"),
56 race_array SEC(".maps");
62 __u64 callback_check = 52;
63 __u64 callback2_check = 52;
64 __u64 pinned_callback_check;
72 /* callback for array and lru timers */
73 static int timer_cb1(void *map, int *key, struct bpf_timer *timer)
75 /* increment bss variable twice.
76 * Once via array timer callback and once via lru timer callback
80 /* *key == 0 - the callback was called for array timer.
81 * *key == 4 - the callback was called from lru timer.
84 struct bpf_timer *lru_timer;
87 /* rearm array timer to be called again in ~35 seconds */
88 if (bpf_timer_start(timer, 1ull << 35, 0) != 0)
91 lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
94 bpf_timer_set_callback(lru_timer, timer_cb1);
95 if (bpf_timer_start(lru_timer, 0, 0) != 0)
97 } else if (*key == LRU) {
101 i <= 100 /* for current LRU eviction algorithm this number
102 * should be larger than ~ lru->max_entries * 2
105 struct elem init = {};
107 /* lru_key cannot be used as loop induction variable
108 * otherwise the loop will be unbounded.
112 /* add more elements into lru map to push out current
113 * element and force deletion of this timer
115 bpf_map_update_elem(map, &lru_key, &init, 0);
116 /* look it up to bump it into active list */
117 bpf_map_lookup_elem(map, &lru_key);
119 /* keep adding until *key changes underneath,
120 * which means that key/timer memory was reused
126 /* check that the timer was removed */
127 if (bpf_timer_cancel(timer) != -EINVAL)
134 SEC("fentry/bpf_fentry_test1")
135 int BPF_PROG2(test1, int, a)
137 struct bpf_timer *arr_timer, *lru_timer;
138 struct elem init = {};
140 int array_key = ARRAY;
142 arr_timer = bpf_map_lookup_elem(&array, &array_key);
145 bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
147 bpf_map_update_elem(&lru, &lru_key, &init, 0);
148 lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
151 bpf_timer_init(lru_timer, &lru, CLOCK_MONOTONIC);
153 bpf_timer_set_callback(arr_timer, timer_cb1);
154 bpf_timer_start(arr_timer, 0 /* call timer_cb1 asap */, 0);
156 /* init more timers to check that array destruction
157 * doesn't leak timer memory.
160 arr_timer = bpf_map_lookup_elem(&array, &array_key);
163 bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
167 /* callback for prealloc and non-prealloca hashtab timers */
168 static int timer_cb2(void *map, int *key, struct hmap_elem *val)
174 if (val->counter > 0 && --val->counter) {
175 /* re-arm the timer again to execute after 1 usec */
176 bpf_timer_start(&val->timer, 1000, 0);
177 } else if (*key == HTAB) {
178 struct bpf_timer *arr_timer;
179 int array_key = ARRAY;
181 /* cancel arr_timer otherwise bpf_fentry_test1 prog
182 * will stay alive forever.
184 arr_timer = bpf_map_lookup_elem(&array, &array_key);
187 if (bpf_timer_cancel(arr_timer) != 1)
188 /* bpf_timer_cancel should return 1 to indicate
189 * that arr_timer was active at this time
193 /* try to cancel ourself. It shouldn't deadlock. */
194 if (bpf_timer_cancel(&val->timer) != -EDEADLK)
197 /* delete this key and this timer anyway.
198 * It shouldn't deadlock either.
200 bpf_map_delete_elem(map, key);
202 /* in preallocated hashmap both 'key' and 'val' could have been
203 * reused to store another map element (like in LRU above),
204 * but in controlled test environment the below test works.
205 * It's not a use-after-free. The memory is owned by the map.
207 if (bpf_timer_start(&val->timer, 1000, 0) != -EINVAL)
211 if (*key != HTAB_MALLOC)
214 /* try to cancel ourself. It shouldn't deadlock. */
215 if (bpf_timer_cancel(&val->timer) != -EDEADLK)
218 /* delete this key and this timer anyway.
219 * It shouldn't deadlock either.
221 bpf_map_delete_elem(map, key);
228 int bpf_timer_test(void)
230 struct hmap_elem *val;
231 int key = HTAB, key_malloc = HTAB_MALLOC;
233 val = bpf_map_lookup_elem(&hmap, &key);
235 if (bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME) != 0)
237 bpf_timer_set_callback(&val->timer, timer_cb2);
238 bpf_timer_start(&val->timer, 1000, 0);
240 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
242 if (bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME) != 0)
244 bpf_timer_set_callback(&val->timer, timer_cb2);
245 bpf_timer_start(&val->timer, 1000, 0);
250 SEC("fentry/bpf_fentry_test2")
251 int BPF_PROG2(test2, int, a, int, b)
253 struct hmap_elem init = {}, *val;
254 int key = HTAB, key_malloc = HTAB_MALLOC;
256 init.counter = 10; /* number of times to trigger timer_cb2 */
257 bpf_map_update_elem(&hmap, &key, &init, 0);
258 val = bpf_map_lookup_elem(&hmap, &key);
260 bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
261 /* update the same key to free the timer */
262 bpf_map_update_elem(&hmap, &key, &init, 0);
264 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
265 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
267 bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
268 /* update the same key to free the timer */
269 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
271 /* init more timers to check that htab operations
272 * don't leak timer memory.
275 bpf_map_update_elem(&hmap, &key, &init, 0);
276 val = bpf_map_lookup_elem(&hmap, &key);
278 bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
279 bpf_map_delete_elem(&hmap, &key);
280 bpf_map_update_elem(&hmap, &key, &init, 0);
281 val = bpf_map_lookup_elem(&hmap, &key);
283 bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
285 /* and with non-prealloc htab */
287 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
288 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
290 bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
291 bpf_map_delete_elem(&hmap_malloc, &key_malloc);
292 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
293 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
295 bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
297 return bpf_timer_test();
300 /* callback for absolute timer */
301 static int timer_cb3(void *map, int *key, struct bpf_timer *timer)
306 bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
309 /* Re-arm timer ~35 seconds in future */
310 bpf_timer_start(timer, bpf_ktime_get_boot_ns() + (1ull << 35),
317 SEC("fentry/bpf_fentry_test3")
318 int BPF_PROG2(test3, int, a)
321 struct bpf_timer *timer;
325 timer = bpf_map_lookup_elem(&abs_timer, &key);
327 if (bpf_timer_init(timer, &abs_timer, CLOCK_BOOTTIME) != 0)
329 bpf_timer_set_callback(timer, timer_cb3);
330 bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
337 /* callback for pinned timer */
338 static int timer_cb_pinned(void *map, int *key, struct bpf_timer *timer)
340 __s32 cpu = bpf_get_smp_processor_id();
342 if (cpu != pinned_cpu)
345 pinned_callback_check++;
349 static void test_pinned_timer(bool soft)
353 struct bpf_timer *timer;
354 __u64 flags = BPF_F_TIMER_CPU_PIN;
358 map = &soft_timer_pinned;
361 map = &abs_timer_pinned;
362 start_time = bpf_ktime_get_boot_ns();
363 flags |= BPF_F_TIMER_ABS;
366 timer = bpf_map_lookup_elem(map, &key);
368 if (bpf_timer_init(timer, map, CLOCK_BOOTTIME) != 0)
370 bpf_timer_set_callback(timer, timer_cb_pinned);
371 pinned_cpu = bpf_get_smp_processor_id();
372 bpf_timer_start(timer, start_time + 1000, flags);
378 SEC("fentry/bpf_fentry_test4")
379 int BPF_PROG2(test4, int, a)
382 test_pinned_timer(true);
387 SEC("fentry/bpf_fentry_test5")
388 int BPF_PROG2(test5, int, a)
391 test_pinned_timer(false);
396 static int race_timer_callback(void *race_array, int *race_key, struct bpf_timer *timer)
398 bpf_timer_start(timer, 1000000, 0);
405 struct bpf_timer *timer;
406 int err, race_key = 0;
409 __builtin_memset(&init, 0, sizeof(struct elem));
410 bpf_map_update_elem(&race_array, &race_key, &init, BPF_ANY);
412 timer = bpf_map_lookup_elem(&race_array, &race_key);
416 err = bpf_timer_init(timer, &race_array, CLOCK_MONOTONIC);
417 if (err && err != -EBUSY)
420 bpf_timer_set_callback(timer, race_timer_callback);
421 bpf_timer_start(timer, 0, 0);
422 bpf_timer_cancel(timer);