]>
Commit | Line | Data |
---|---|---|
fec56f58 AS |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (c) 2019 Facebook */ | |
3 | #include <linux/hash.h> | |
4 | #include <linux/bpf.h> | |
5 | #include <linux/filter.h> | |
b91e014f | 6 | #include <linux/ftrace.h> |
e9b4e606 | 7 | #include <linux/rbtree_latch.h> |
a108f7dc | 8 | #include <linux/perf_event.h> |
9e4e01df | 9 | #include <linux/btf.h> |
1e6c62a8 AS |
10 | #include <linux/rcupdate_trace.h> |
11 | #include <linux/rcupdate_wait.h> | |
861de02e | 12 | #include <linux/module.h> |
856c02db | 13 | #include <linux/static_call.h> |
69fd337a SF |
14 | #include <linux/bpf_verifier.h> |
15 | #include <linux/bpf_lsm.h> | |
00963a2e | 16 | #include <linux/delay.h> |
fec56f58 | 17 | |
be8704ff AS |
18 | /* dummy _ops. The verifier will operate on target program's ops. */ |
19 | const struct bpf_verifier_ops bpf_extension_verifier_ops = { | |
20 | }; | |
21 | const struct bpf_prog_ops bpf_extension_prog_ops = { | |
22 | }; | |
23 | ||
fec56f58 AS |
24 | /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */ |
25 | #define TRAMPOLINE_HASH_BITS 10 | |
26 | #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS) | |
27 | ||
28 | static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE]; | |
29 | ||
7ac88eba | 30 | /* serializes access to trampoline_table */ |
fec56f58 AS |
31 | static DEFINE_MUTEX(trampoline_mutex); |
32 | ||
00963a2e SL |
33 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
34 | static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex); | |
35 | ||
36 | static int bpf_tramp_ftrace_ops_func(struct ftrace_ops *ops, enum ftrace_ops_cmd cmd) | |
37 | { | |
38 | struct bpf_trampoline *tr = ops->private; | |
39 | int ret = 0; | |
40 | ||
41 | if (cmd == FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF) { | |
42 | /* This is called inside register_ftrace_direct_multi(), so | |
43 | * tr->mutex is already locked. | |
44 | */ | |
45 | lockdep_assert_held_once(&tr->mutex); | |
46 | ||
47 | /* Instead of updating the trampoline here, we propagate | |
48 | * -EAGAIN to register_ftrace_direct_multi(). Then we can | |
49 | * retry register_ftrace_direct_multi() after updating the | |
50 | * trampoline. | |
51 | */ | |
52 | if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) && | |
53 | !(tr->flags & BPF_TRAMP_F_ORIG_STACK)) { | |
54 | if (WARN_ON_ONCE(tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY)) | |
55 | return -EBUSY; | |
56 | ||
57 | tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY; | |
58 | return -EAGAIN; | |
59 | } | |
60 | ||
61 | return 0; | |
62 | } | |
63 | ||
64 | /* The normal locking order is | |
65 | * tr->mutex => direct_mutex (ftrace.c) => ftrace_lock (ftrace.c) | |
66 | * | |
67 | * The following two commands are called from | |
68 | * | |
69 | * prepare_direct_functions_for_ipmodify | |
70 | * cleanup_direct_functions_after_ipmodify | |
71 | * | |
72 | * In both cases, direct_mutex is already locked. Use | |
73 | * mutex_trylock(&tr->mutex) to avoid deadlock in race condition | |
74 | * (something else is making changes to this same trampoline). | |
75 | */ | |
76 | if (!mutex_trylock(&tr->mutex)) { | |
77 | /* sleep 1 ms to make sure whatever holding tr->mutex makes | |
78 | * some progress. | |
79 | */ | |
80 | msleep(1); | |
81 | return -EAGAIN; | |
82 | } | |
83 | ||
84 | switch (cmd) { | |
85 | case FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER: | |
86 | tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY; | |
87 | ||
88 | if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) && | |
89 | !(tr->flags & BPF_TRAMP_F_ORIG_STACK)) | |
90 | ret = bpf_trampoline_update(tr, false /* lock_direct_mutex */); | |
91 | break; | |
92 | case FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER: | |
93 | tr->flags &= ~BPF_TRAMP_F_SHARE_IPMODIFY; | |
94 | ||
95 | if (tr->flags & BPF_TRAMP_F_ORIG_STACK) | |
96 | ret = bpf_trampoline_update(tr, false /* lock_direct_mutex */); | |
97 | break; | |
98 | default: | |
99 | ret = -EINVAL; | |
100 | break; | |
14250fa4 | 101 | } |
00963a2e SL |
102 | |
103 | mutex_unlock(&tr->mutex); | |
104 | return ret; | |
105 | } | |
106 | #endif | |
107 | ||
f92c1e18 JO |
108 | bool bpf_prog_has_trampoline(const struct bpf_prog *prog) |
109 | { | |
110 | enum bpf_attach_type eatype = prog->expected_attach_type; | |
2fcc8241 | 111 | enum bpf_prog_type ptype = prog->type; |
f92c1e18 | 112 | |
2fcc8241 KFL |
113 | return (ptype == BPF_PROG_TYPE_TRACING && |
114 | (eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT || | |
115 | eatype == BPF_MODIFY_RETURN)) || | |
116 | (ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC); | |
f92c1e18 JO |
117 | } |
118 | ||
a108f7dc JO |
119 | void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym) |
120 | { | |
121 | ksym->start = (unsigned long) data; | |
7ac88eba | 122 | ksym->end = ksym->start + PAGE_SIZE; |
a108f7dc JO |
123 | bpf_ksym_add(ksym); |
124 | perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start, | |
7ac88eba | 125 | PAGE_SIZE, false, ksym->name); |
a108f7dc JO |
126 | } |
127 | ||
128 | void bpf_image_ksym_del(struct bpf_ksym *ksym) | |
129 | { | |
130 | bpf_ksym_del(ksym); | |
131 | perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start, | |
7ac88eba | 132 | PAGE_SIZE, true, ksym->name); |
a108f7dc JO |
133 | } |
134 | ||
f7b12b6f | 135 | static struct bpf_trampoline *bpf_trampoline_lookup(u64 key) |
fec56f58 AS |
136 | { |
137 | struct bpf_trampoline *tr; | |
138 | struct hlist_head *head; | |
fec56f58 AS |
139 | int i; |
140 | ||
141 | mutex_lock(&trampoline_mutex); | |
142 | head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)]; | |
143 | hlist_for_each_entry(tr, head, hlist) { | |
144 | if (tr->key == key) { | |
145 | refcount_inc(&tr->refcnt); | |
146 | goto out; | |
147 | } | |
148 | } | |
149 | tr = kzalloc(sizeof(*tr), GFP_KERNEL); | |
150 | if (!tr) | |
151 | goto out; | |
00963a2e SL |
152 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
153 | tr->fops = kzalloc(sizeof(struct ftrace_ops), GFP_KERNEL); | |
154 | if (!tr->fops) { | |
155 | kfree(tr); | |
156 | tr = NULL; | |
157 | goto out; | |
158 | } | |
159 | tr->fops->private = tr; | |
160 | tr->fops->ops_func = bpf_tramp_ftrace_ops_func; | |
161 | #endif | |
fec56f58 | 162 | |
fec56f58 AS |
163 | tr->key = key; |
164 | INIT_HLIST_NODE(&tr->hlist); | |
165 | hlist_add_head(&tr->hlist, head); | |
166 | refcount_set(&tr->refcnt, 1); | |
167 | mutex_init(&tr->mutex); | |
168 | for (i = 0; i < BPF_TRAMP_MAX; i++) | |
169 | INIT_HLIST_HEAD(&tr->progs_hlist[i]); | |
fec56f58 AS |
170 | out: |
171 | mutex_unlock(&trampoline_mutex); | |
172 | return tr; | |
173 | } | |
174 | ||
861de02e JO |
175 | static int bpf_trampoline_module_get(struct bpf_trampoline *tr) |
176 | { | |
177 | struct module *mod; | |
178 | int err = 0; | |
179 | ||
180 | preempt_disable(); | |
181 | mod = __module_text_address((unsigned long) tr->func.addr); | |
182 | if (mod && !try_module_get(mod)) | |
183 | err = -ENOENT; | |
184 | preempt_enable(); | |
185 | tr->mod = mod; | |
186 | return err; | |
187 | } | |
188 | ||
189 | static void bpf_trampoline_module_put(struct bpf_trampoline *tr) | |
190 | { | |
191 | module_put(tr->mod); | |
192 | tr->mod = NULL; | |
193 | } | |
194 | ||
b91e014f AS |
195 | static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr) |
196 | { | |
197 | void *ip = tr->func.addr; | |
198 | int ret; | |
199 | ||
200 | if (tr->func.ftrace_managed) | |
00963a2e | 201 | ret = unregister_ftrace_direct_multi(tr->fops, (long)old_addr); |
b91e014f AS |
202 | else |
203 | ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL); | |
861de02e JO |
204 | |
205 | if (!ret) | |
206 | bpf_trampoline_module_put(tr); | |
b91e014f AS |
207 | return ret; |
208 | } | |
209 | ||
00963a2e SL |
210 | static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr, |
211 | bool lock_direct_mutex) | |
b91e014f AS |
212 | { |
213 | void *ip = tr->func.addr; | |
214 | int ret; | |
215 | ||
00963a2e SL |
216 | if (tr->func.ftrace_managed) { |
217 | if (lock_direct_mutex) | |
218 | ret = modify_ftrace_direct_multi(tr->fops, (long)new_addr); | |
219 | else | |
220 | ret = modify_ftrace_direct_multi_nolock(tr->fops, (long)new_addr); | |
221 | } else { | |
b91e014f | 222 | ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr); |
00963a2e | 223 | } |
b91e014f AS |
224 | return ret; |
225 | } | |
226 | ||
227 | /* first time registering */ | |
228 | static int register_fentry(struct bpf_trampoline *tr, void *new_addr) | |
229 | { | |
230 | void *ip = tr->func.addr; | |
aebfd125 | 231 | unsigned long faddr; |
b91e014f AS |
232 | int ret; |
233 | ||
aebfd125 | 234 | faddr = ftrace_location((unsigned long)ip); |
3b317abc XK |
235 | if (faddr) { |
236 | if (!tr->fops) | |
237 | return -ENOTSUPP; | |
aebfd125 | 238 | tr->func.ftrace_managed = true; |
3b317abc | 239 | } |
b91e014f | 240 | |
861de02e JO |
241 | if (bpf_trampoline_module_get(tr)) |
242 | return -ENOENT; | |
243 | ||
00963a2e | 244 | if (tr->func.ftrace_managed) { |
dc81f8d1 | 245 | ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1); |
00963a2e SL |
246 | ret = register_ftrace_direct_multi(tr->fops, (long)new_addr); |
247 | } else { | |
b91e014f | 248 | ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr); |
00963a2e | 249 | } |
861de02e JO |
250 | |
251 | if (ret) | |
252 | bpf_trampoline_module_put(tr); | |
b91e014f AS |
253 | return ret; |
254 | } | |
255 | ||
f7e0beaf | 256 | static struct bpf_tramp_links * |
1e37392c | 257 | bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg) |
88fd9e53 | 258 | { |
f7e0beaf KFL |
259 | struct bpf_tramp_link *link; |
260 | struct bpf_tramp_links *tlinks; | |
261 | struct bpf_tramp_link **links; | |
88fd9e53 KS |
262 | int kind; |
263 | ||
264 | *total = 0; | |
f7e0beaf KFL |
265 | tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL); |
266 | if (!tlinks) | |
88fd9e53 KS |
267 | return ERR_PTR(-ENOMEM); |
268 | ||
269 | for (kind = 0; kind < BPF_TRAMP_MAX; kind++) { | |
f7e0beaf | 270 | tlinks[kind].nr_links = tr->progs_cnt[kind]; |
88fd9e53 | 271 | *total += tr->progs_cnt[kind]; |
f7e0beaf | 272 | links = tlinks[kind].links; |
88fd9e53 | 273 | |
f7e0beaf KFL |
274 | hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) { |
275 | *ip_arg |= link->link.prog->call_get_func_ip; | |
276 | *links++ = link; | |
1e37392c | 277 | } |
88fd9e53 | 278 | } |
f7e0beaf | 279 | return tlinks; |
88fd9e53 | 280 | } |
fec56f58 | 281 | |
e21aa341 AS |
282 | static void __bpf_tramp_image_put_deferred(struct work_struct *work) |
283 | { | |
284 | struct bpf_tramp_image *im; | |
285 | ||
286 | im = container_of(work, struct bpf_tramp_image, work); | |
287 | bpf_image_ksym_del(&im->ksym); | |
288 | bpf_jit_free_exec(im->image); | |
3486bedd | 289 | bpf_jit_uncharge_modmem(PAGE_SIZE); |
e21aa341 AS |
290 | percpu_ref_exit(&im->pcref); |
291 | kfree_rcu(im, rcu); | |
292 | } | |
293 | ||
294 | /* callback, fexit step 3 or fentry step 2 */ | |
295 | static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu) | |
296 | { | |
297 | struct bpf_tramp_image *im; | |
298 | ||
299 | im = container_of(rcu, struct bpf_tramp_image, rcu); | |
300 | INIT_WORK(&im->work, __bpf_tramp_image_put_deferred); | |
301 | schedule_work(&im->work); | |
302 | } | |
303 | ||
304 | /* callback, fexit step 2. Called after percpu_ref_kill confirms. */ | |
305 | static void __bpf_tramp_image_release(struct percpu_ref *pcref) | |
306 | { | |
307 | struct bpf_tramp_image *im; | |
308 | ||
309 | im = container_of(pcref, struct bpf_tramp_image, pcref); | |
310 | call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu); | |
311 | } | |
312 | ||
313 | /* callback, fexit or fentry step 1 */ | |
314 | static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu) | |
315 | { | |
316 | struct bpf_tramp_image *im; | |
317 | ||
318 | im = container_of(rcu, struct bpf_tramp_image, rcu); | |
319 | if (im->ip_after_call) | |
320 | /* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */ | |
321 | percpu_ref_kill(&im->pcref); | |
322 | else | |
323 | /* the case of fentry trampoline */ | |
324 | call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu); | |
325 | } | |
326 | ||
327 | static void bpf_tramp_image_put(struct bpf_tramp_image *im) | |
328 | { | |
329 | /* The trampoline image that calls original function is using: | |
330 | * rcu_read_lock_trace to protect sleepable bpf progs | |
331 | * rcu_read_lock to protect normal bpf progs | |
332 | * percpu_ref to protect trampoline itself | |
333 | * rcu tasks to protect trampoline asm not covered by percpu_ref | |
334 | * (which are few asm insns before __bpf_tramp_enter and | |
335 | * after __bpf_tramp_exit) | |
336 | * | |
337 | * The trampoline is unreachable before bpf_tramp_image_put(). | |
338 | * | |
339 | * First, patch the trampoline to avoid calling into fexit progs. | |
340 | * The progs will be freed even if the original function is still | |
341 | * executing or sleeping. | |
342 | * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on | |
343 | * first few asm instructions to execute and call into | |
344 | * __bpf_tramp_enter->percpu_ref_get. | |
345 | * Then use percpu_ref_kill to wait for the trampoline and the original | |
346 | * function to finish. | |
347 | * Then use call_rcu_tasks() to make sure few asm insns in | |
348 | * the trampoline epilogue are done as well. | |
349 | * | |
350 | * In !PREEMPT case the task that got interrupted in the first asm | |
351 | * insns won't go through an RCU quiescent state which the | |
352 | * percpu_ref_kill will be waiting for. Hence the first | |
353 | * call_rcu_tasks() is not necessary. | |
354 | */ | |
355 | if (im->ip_after_call) { | |
356 | int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP, | |
357 | NULL, im->ip_epilogue); | |
358 | WARN_ON(err); | |
359 | if (IS_ENABLED(CONFIG_PREEMPTION)) | |
360 | call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks); | |
361 | else | |
362 | percpu_ref_kill(&im->pcref); | |
363 | return; | |
364 | } | |
365 | ||
366 | /* The trampoline without fexit and fmod_ret progs doesn't call original | |
367 | * function and doesn't use percpu_ref. | |
368 | * Use call_rcu_tasks_trace() to wait for sleepable progs to finish. | |
369 | * Then use call_rcu_tasks() to wait for the rest of trampoline asm | |
370 | * and normal progs. | |
371 | */ | |
372 | call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks); | |
373 | } | |
374 | ||
375 | static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx) | |
376 | { | |
377 | struct bpf_tramp_image *im; | |
378 | struct bpf_ksym *ksym; | |
379 | void *image; | |
380 | int err = -ENOMEM; | |
381 | ||
382 | im = kzalloc(sizeof(*im), GFP_KERNEL); | |
383 | if (!im) | |
384 | goto out; | |
385 | ||
3486bedd | 386 | err = bpf_jit_charge_modmem(PAGE_SIZE); |
e21aa341 AS |
387 | if (err) |
388 | goto out_free_im; | |
389 | ||
390 | err = -ENOMEM; | |
5b0d1c7b | 391 | im->image = image = bpf_jit_alloc_exec(PAGE_SIZE); |
e21aa341 AS |
392 | if (!image) |
393 | goto out_uncharge; | |
5b0d1c7b | 394 | set_vm_flush_reset_perms(image); |
e21aa341 AS |
395 | |
396 | err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL); | |
397 | if (err) | |
398 | goto out_free_image; | |
399 | ||
400 | ksym = &im->ksym; | |
401 | INIT_LIST_HEAD_RCU(&ksym->lnode); | |
402 | snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx); | |
403 | bpf_image_ksym_add(image, ksym); | |
404 | return im; | |
405 | ||
406 | out_free_image: | |
407 | bpf_jit_free_exec(im->image); | |
408 | out_uncharge: | |
3486bedd | 409 | bpf_jit_uncharge_modmem(PAGE_SIZE); |
e21aa341 AS |
410 | out_free_im: |
411 | kfree(im); | |
412 | out: | |
413 | return ERR_PTR(err); | |
414 | } | |
415 | ||
00963a2e | 416 | static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex) |
fec56f58 | 417 | { |
e21aa341 | 418 | struct bpf_tramp_image *im; |
f7e0beaf | 419 | struct bpf_tramp_links *tlinks; |
00963a2e | 420 | u32 orig_flags = tr->flags; |
1e37392c | 421 | bool ip_arg = false; |
88fd9e53 | 422 | int err, total; |
fec56f58 | 423 | |
f7e0beaf KFL |
424 | tlinks = bpf_trampoline_get_progs(tr, &total, &ip_arg); |
425 | if (IS_ERR(tlinks)) | |
426 | return PTR_ERR(tlinks); | |
88fd9e53 KS |
427 | |
428 | if (total == 0) { | |
e21aa341 AS |
429 | err = unregister_fentry(tr, tr->cur_image->image); |
430 | bpf_tramp_image_put(tr->cur_image); | |
431 | tr->cur_image = NULL; | |
fec56f58 AS |
432 | tr->selector = 0; |
433 | goto out; | |
434 | } | |
435 | ||
e21aa341 AS |
436 | im = bpf_tramp_image_alloc(tr->key, tr->selector); |
437 | if (IS_ERR(im)) { | |
438 | err = PTR_ERR(im); | |
439 | goto out; | |
440 | } | |
441 | ||
00963a2e SL |
442 | /* clear all bits except SHARE_IPMODIFY */ |
443 | tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY; | |
444 | ||
f7e0beaf | 445 | if (tlinks[BPF_TRAMP_FEXIT].nr_links || |
00963a2e | 446 | tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) { |
535a57a7 XK |
447 | /* NOTE: BPF_TRAMP_F_RESTORE_REGS and BPF_TRAMP_F_SKIP_FRAME |
448 | * should not be set together. | |
449 | */ | |
00963a2e SL |
450 | tr->flags |= BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME; |
451 | } else { | |
452 | tr->flags |= BPF_TRAMP_F_RESTORE_REGS; | |
453 | } | |
fec56f58 | 454 | |
1e37392c | 455 | if (ip_arg) |
00963a2e SL |
456 | tr->flags |= BPF_TRAMP_F_IP_ARG; |
457 | ||
458 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS | |
459 | again: | |
460 | if ((tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY) && | |
461 | (tr->flags & BPF_TRAMP_F_CALL_ORIG)) | |
462 | tr->flags |= BPF_TRAMP_F_ORIG_STACK; | |
463 | #endif | |
1e37392c | 464 | |
e21aa341 | 465 | err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE, |
00963a2e | 466 | &tr->func.model, tr->flags, tlinks, |
fec56f58 | 467 | tr->func.addr); |
85d33df3 | 468 | if (err < 0) |
fec56f58 AS |
469 | goto out; |
470 | ||
5b0d1c7b SL |
471 | set_memory_ro((long)im->image, 1); |
472 | set_memory_x((long)im->image, 1); | |
473 | ||
e21aa341 AS |
474 | WARN_ON(tr->cur_image && tr->selector == 0); |
475 | WARN_ON(!tr->cur_image && tr->selector); | |
476 | if (tr->cur_image) | |
fec56f58 | 477 | /* progs already running at this address */ |
00963a2e | 478 | err = modify_fentry(tr, tr->cur_image->image, im->image, lock_direct_mutex); |
fec56f58 AS |
479 | else |
480 | /* first time registering */ | |
e21aa341 | 481 | err = register_fentry(tr, im->image); |
00963a2e SL |
482 | |
483 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS | |
484 | if (err == -EAGAIN) { | |
485 | /* -EAGAIN from bpf_tramp_ftrace_ops_func. Now | |
486 | * BPF_TRAMP_F_SHARE_IPMODIFY is set, we can generate the | |
487 | * trampoline again, and retry register. | |
488 | */ | |
489 | /* reset fops->func and fops->trampoline for re-register */ | |
490 | tr->fops->func = NULL; | |
491 | tr->fops->trampoline = 0; | |
492 | goto again; | |
493 | } | |
494 | #endif | |
fec56f58 AS |
495 | if (err) |
496 | goto out; | |
00963a2e | 497 | |
e21aa341 AS |
498 | if (tr->cur_image) |
499 | bpf_tramp_image_put(tr->cur_image); | |
500 | tr->cur_image = im; | |
fec56f58 AS |
501 | tr->selector++; |
502 | out: | |
00963a2e SL |
503 | /* If any error happens, restore previous flags */ |
504 | if (err) | |
505 | tr->flags = orig_flags; | |
f7e0beaf | 506 | kfree(tlinks); |
fec56f58 AS |
507 | return err; |
508 | } | |
509 | ||
9e4e01df | 510 | static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog) |
fec56f58 | 511 | { |
9e4e01df | 512 | switch (prog->expected_attach_type) { |
fec56f58 AS |
513 | case BPF_TRACE_FENTRY: |
514 | return BPF_TRAMP_FENTRY; | |
ae240823 KS |
515 | case BPF_MODIFY_RETURN: |
516 | return BPF_TRAMP_MODIFY_RETURN; | |
be8704ff | 517 | case BPF_TRACE_FEXIT: |
fec56f58 | 518 | return BPF_TRAMP_FEXIT; |
9e4e01df KS |
519 | case BPF_LSM_MAC: |
520 | if (!prog->aux->attach_func_proto->type) | |
521 | /* The function returns void, we cannot modify its | |
522 | * return value. | |
523 | */ | |
524 | return BPF_TRAMP_FEXIT; | |
525 | else | |
526 | return BPF_TRAMP_MODIFY_RETURN; | |
be8704ff AS |
527 | default: |
528 | return BPF_TRAMP_REPLACE; | |
fec56f58 AS |
529 | } |
530 | } | |
531 | ||
af3f4134 | 532 | static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) |
fec56f58 AS |
533 | { |
534 | enum bpf_tramp_prog_type kind; | |
f7e0beaf | 535 | struct bpf_tramp_link *link_exiting; |
fec56f58 | 536 | int err = 0; |
a2aa95b7 | 537 | int cnt = 0, i; |
fec56f58 | 538 | |
f7e0beaf | 539 | kind = bpf_attach_type_to_tramp(link->link.prog); |
af3f4134 | 540 | if (tr->extension_prog) |
be8704ff AS |
541 | /* cannot attach fentry/fexit if extension prog is attached. |
542 | * cannot overwrite extension prog either. | |
543 | */ | |
af3f4134 | 544 | return -EBUSY; |
a2aa95b7 YW |
545 | |
546 | for (i = 0; i < BPF_TRAMP_MAX; i++) | |
547 | cnt += tr->progs_cnt[i]; | |
548 | ||
be8704ff AS |
549 | if (kind == BPF_TRAMP_REPLACE) { |
550 | /* Cannot attach extension if fentry/fexit are in use. */ | |
af3f4134 SF |
551 | if (cnt) |
552 | return -EBUSY; | |
f7e0beaf | 553 | tr->extension_prog = link->link.prog; |
af3f4134 SF |
554 | return bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL, |
555 | link->link.prog->bpf_func); | |
fec56f58 | 556 | } |
af3f4134 SF |
557 | if (cnt >= BPF_MAX_TRAMP_LINKS) |
558 | return -E2BIG; | |
559 | if (!hlist_unhashed(&link->tramp_hlist)) | |
fec56f58 | 560 | /* prog already linked */ |
af3f4134 | 561 | return -EBUSY; |
f7e0beaf KFL |
562 | hlist_for_each_entry(link_exiting, &tr->progs_hlist[kind], tramp_hlist) { |
563 | if (link_exiting->link.prog != link->link.prog) | |
564 | continue; | |
565 | /* prog already linked */ | |
af3f4134 | 566 | return -EBUSY; |
f7e0beaf KFL |
567 | } |
568 | ||
569 | hlist_add_head(&link->tramp_hlist, &tr->progs_hlist[kind]); | |
fec56f58 | 570 | tr->progs_cnt[kind]++; |
00963a2e | 571 | err = bpf_trampoline_update(tr, true /* lock_direct_mutex */); |
fec56f58 | 572 | if (err) { |
f7e0beaf | 573 | hlist_del_init(&link->tramp_hlist); |
fec56f58 AS |
574 | tr->progs_cnt[kind]--; |
575 | } | |
af3f4134 SF |
576 | return err; |
577 | } | |
578 | ||
579 | int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) | |
580 | { | |
581 | int err; | |
582 | ||
583 | mutex_lock(&tr->mutex); | |
584 | err = __bpf_trampoline_link_prog(link, tr); | |
fec56f58 AS |
585 | mutex_unlock(&tr->mutex); |
586 | return err; | |
587 | } | |
588 | ||
af3f4134 | 589 | static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) |
fec56f58 AS |
590 | { |
591 | enum bpf_tramp_prog_type kind; | |
fec56f58 AS |
592 | int err; |
593 | ||
f7e0beaf | 594 | kind = bpf_attach_type_to_tramp(link->link.prog); |
be8704ff AS |
595 | if (kind == BPF_TRAMP_REPLACE) { |
596 | WARN_ON_ONCE(!tr->extension_prog); | |
597 | err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, | |
598 | tr->extension_prog->bpf_func, NULL); | |
599 | tr->extension_prog = NULL; | |
af3f4134 | 600 | return err; |
be8704ff | 601 | } |
f7e0beaf | 602 | hlist_del_init(&link->tramp_hlist); |
fec56f58 | 603 | tr->progs_cnt[kind]--; |
00963a2e | 604 | return bpf_trampoline_update(tr, true /* lock_direct_mutex */); |
af3f4134 SF |
605 | } |
606 | ||
607 | /* bpf_trampoline_unlink_prog() should never fail. */ | |
608 | int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) | |
609 | { | |
610 | int err; | |
611 | ||
612 | mutex_lock(&tr->mutex); | |
613 | err = __bpf_trampoline_unlink_prog(link, tr); | |
fec56f58 AS |
614 | mutex_unlock(&tr->mutex); |
615 | return err; | |
616 | } | |
617 | ||
3908fcdd | 618 | #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM) |
69fd337a SF |
619 | static void bpf_shim_tramp_link_release(struct bpf_link *link) |
620 | { | |
621 | struct bpf_shim_tramp_link *shim_link = | |
622 | container_of(link, struct bpf_shim_tramp_link, link.link); | |
623 | ||
624 | /* paired with 'shim_link->trampoline = tr' in bpf_trampoline_link_cgroup_shim */ | |
625 | if (!shim_link->trampoline) | |
626 | return; | |
627 | ||
628 | WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline)); | |
629 | bpf_trampoline_put(shim_link->trampoline); | |
630 | } | |
631 | ||
632 | static void bpf_shim_tramp_link_dealloc(struct bpf_link *link) | |
633 | { | |
634 | struct bpf_shim_tramp_link *shim_link = | |
635 | container_of(link, struct bpf_shim_tramp_link, link.link); | |
636 | ||
637 | kfree(shim_link); | |
638 | } | |
639 | ||
640 | static const struct bpf_link_ops bpf_shim_tramp_link_lops = { | |
641 | .release = bpf_shim_tramp_link_release, | |
642 | .dealloc = bpf_shim_tramp_link_dealloc, | |
643 | }; | |
644 | ||
645 | static struct bpf_shim_tramp_link *cgroup_shim_alloc(const struct bpf_prog *prog, | |
646 | bpf_func_t bpf_func, | |
647 | int cgroup_atype) | |
648 | { | |
649 | struct bpf_shim_tramp_link *shim_link = NULL; | |
650 | struct bpf_prog *p; | |
651 | ||
652 | shim_link = kzalloc(sizeof(*shim_link), GFP_USER); | |
653 | if (!shim_link) | |
654 | return NULL; | |
655 | ||
656 | p = bpf_prog_alloc(1, 0); | |
657 | if (!p) { | |
658 | kfree(shim_link); | |
659 | return NULL; | |
660 | } | |
661 | ||
662 | p->jited = false; | |
663 | p->bpf_func = bpf_func; | |
664 | ||
665 | p->aux->cgroup_atype = cgroup_atype; | |
666 | p->aux->attach_func_proto = prog->aux->attach_func_proto; | |
667 | p->aux->attach_btf_id = prog->aux->attach_btf_id; | |
668 | p->aux->attach_btf = prog->aux->attach_btf; | |
669 | btf_get(p->aux->attach_btf); | |
670 | p->type = BPF_PROG_TYPE_LSM; | |
671 | p->expected_attach_type = BPF_LSM_MAC; | |
672 | bpf_prog_inc(p); | |
673 | bpf_link_init(&shim_link->link.link, BPF_LINK_TYPE_UNSPEC, | |
674 | &bpf_shim_tramp_link_lops, p); | |
c0e19f2c | 675 | bpf_cgroup_atype_get(p->aux->attach_btf_id, cgroup_atype); |
69fd337a SF |
676 | |
677 | return shim_link; | |
678 | } | |
679 | ||
680 | static struct bpf_shim_tramp_link *cgroup_shim_find(struct bpf_trampoline *tr, | |
681 | bpf_func_t bpf_func) | |
682 | { | |
683 | struct bpf_tramp_link *link; | |
684 | int kind; | |
685 | ||
686 | for (kind = 0; kind < BPF_TRAMP_MAX; kind++) { | |
687 | hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) { | |
688 | struct bpf_prog *p = link->link.prog; | |
689 | ||
690 | if (p->bpf_func == bpf_func) | |
691 | return container_of(link, struct bpf_shim_tramp_link, link); | |
692 | } | |
693 | } | |
694 | ||
695 | return NULL; | |
696 | } | |
697 | ||
698 | int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, | |
699 | int cgroup_atype) | |
700 | { | |
701 | struct bpf_shim_tramp_link *shim_link = NULL; | |
702 | struct bpf_attach_target_info tgt_info = {}; | |
703 | struct bpf_trampoline *tr; | |
704 | bpf_func_t bpf_func; | |
705 | u64 key; | |
706 | int err; | |
707 | ||
708 | err = bpf_check_attach_target(NULL, prog, NULL, | |
709 | prog->aux->attach_btf_id, | |
710 | &tgt_info); | |
711 | if (err) | |
712 | return err; | |
713 | ||
714 | key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, | |
715 | prog->aux->attach_btf_id); | |
716 | ||
717 | bpf_lsm_find_cgroup_shim(prog, &bpf_func); | |
718 | tr = bpf_trampoline_get(key, &tgt_info); | |
719 | if (!tr) | |
720 | return -ENOMEM; | |
721 | ||
722 | mutex_lock(&tr->mutex); | |
723 | ||
724 | shim_link = cgroup_shim_find(tr, bpf_func); | |
725 | if (shim_link) { | |
726 | /* Reusing existing shim attached by the other program. */ | |
727 | bpf_link_inc(&shim_link->link.link); | |
728 | ||
729 | mutex_unlock(&tr->mutex); | |
730 | bpf_trampoline_put(tr); /* bpf_trampoline_get above */ | |
731 | return 0; | |
732 | } | |
733 | ||
734 | /* Allocate and install new shim. */ | |
735 | ||
736 | shim_link = cgroup_shim_alloc(prog, bpf_func, cgroup_atype); | |
737 | if (!shim_link) { | |
738 | err = -ENOMEM; | |
739 | goto err; | |
740 | } | |
741 | ||
742 | err = __bpf_trampoline_link_prog(&shim_link->link, tr); | |
743 | if (err) | |
744 | goto err; | |
745 | ||
746 | shim_link->trampoline = tr; | |
747 | /* note, we're still holding tr refcnt from above */ | |
748 | ||
749 | mutex_unlock(&tr->mutex); | |
750 | ||
751 | return 0; | |
752 | err: | |
753 | mutex_unlock(&tr->mutex); | |
754 | ||
755 | if (shim_link) | |
756 | bpf_link_put(&shim_link->link.link); | |
757 | ||
758 | /* have to release tr while _not_ holding its mutex */ | |
759 | bpf_trampoline_put(tr); /* bpf_trampoline_get above */ | |
760 | ||
761 | return err; | |
762 | } | |
763 | ||
764 | void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog) | |
765 | { | |
766 | struct bpf_shim_tramp_link *shim_link = NULL; | |
767 | struct bpf_trampoline *tr; | |
768 | bpf_func_t bpf_func; | |
769 | u64 key; | |
770 | ||
771 | key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, | |
772 | prog->aux->attach_btf_id); | |
773 | ||
774 | bpf_lsm_find_cgroup_shim(prog, &bpf_func); | |
775 | tr = bpf_trampoline_lookup(key); | |
776 | if (WARN_ON_ONCE(!tr)) | |
777 | return; | |
778 | ||
779 | mutex_lock(&tr->mutex); | |
780 | shim_link = cgroup_shim_find(tr, bpf_func); | |
781 | mutex_unlock(&tr->mutex); | |
782 | ||
783 | if (shim_link) | |
784 | bpf_link_put(&shim_link->link.link); | |
785 | ||
786 | bpf_trampoline_put(tr); /* bpf_trampoline_lookup above */ | |
787 | } | |
788 | #endif | |
789 | ||
f7b12b6f THJ |
790 | struct bpf_trampoline *bpf_trampoline_get(u64 key, |
791 | struct bpf_attach_target_info *tgt_info) | |
792 | { | |
793 | struct bpf_trampoline *tr; | |
794 | ||
795 | tr = bpf_trampoline_lookup(key); | |
796 | if (!tr) | |
797 | return NULL; | |
798 | ||
799 | mutex_lock(&tr->mutex); | |
800 | if (tr->func.addr) | |
801 | goto out; | |
802 | ||
803 | memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel)); | |
804 | tr->func.addr = (void *)tgt_info->tgt_addr; | |
805 | out: | |
806 | mutex_unlock(&tr->mutex); | |
807 | return tr; | |
808 | } | |
809 | ||
fec56f58 AS |
810 | void bpf_trampoline_put(struct bpf_trampoline *tr) |
811 | { | |
a2aa95b7 YW |
812 | int i; |
813 | ||
fec56f58 AS |
814 | if (!tr) |
815 | return; | |
816 | mutex_lock(&trampoline_mutex); | |
817 | if (!refcount_dec_and_test(&tr->refcnt)) | |
818 | goto out; | |
819 | WARN_ON_ONCE(mutex_is_locked(&tr->mutex)); | |
a2aa95b7 YW |
820 | |
821 | for (i = 0; i < BPF_TRAMP_MAX; i++) | |
822 | if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i]))) | |
823 | goto out; | |
824 | ||
e21aa341 AS |
825 | /* This code will be executed even when the last bpf_tramp_image |
826 | * is alive. All progs are detached from the trampoline and the | |
827 | * trampoline image is patched with jmp into epilogue to skip | |
828 | * fexit progs. The fentry-only trampoline will be freed via | |
829 | * multiple rcu callbacks. | |
1e6c62a8 | 830 | */ |
fec56f58 | 831 | hlist_del(&tr->hlist); |
62d468e5 JO |
832 | if (tr->fops) { |
833 | ftrace_free_filter(tr->fops); | |
834 | kfree(tr->fops); | |
835 | } | |
fec56f58 AS |
836 | kfree(tr); |
837 | out: | |
838 | mutex_unlock(&trampoline_mutex); | |
839 | } | |
840 | ||
ca06f55b | 841 | #define NO_START_TIME 1 |
856c02db | 842 | static __always_inline u64 notrace bpf_prog_start_time(void) |
f2dd3b39 AS |
843 | { |
844 | u64 start = NO_START_TIME; | |
845 | ||
ca06f55b | 846 | if (static_branch_unlikely(&bpf_stats_enabled_key)) { |
f2dd3b39 | 847 | start = sched_clock(); |
ca06f55b AS |
848 | if (unlikely(!start)) |
849 | start = NO_START_TIME; | |
850 | } | |
f2dd3b39 AS |
851 | return start; |
852 | } | |
853 | ||
fb7dd8bc | 854 | /* The logic is similar to bpf_prog_run(), but with an explicit |
02ad0596 DM |
855 | * rcu_read_lock() and migrate_disable() which are required |
856 | * for the trampoline. The macro is split into | |
f2dd3b39 | 857 | * call __bpf_prog_enter |
fec56f58 AS |
858 | * call prog->bpf_func |
859 | * call __bpf_prog_exit | |
ca06f55b AS |
860 | * |
861 | * __bpf_prog_enter returns: | |
862 | * 0 - skip execution of the bpf prog | |
863 | * 1 - execute bpf prog | |
8fb33b60 | 864 | * [2..MAX_U64] - execute bpf prog and record execution time. |
ca06f55b | 865 | * This is start time. |
fec56f58 | 866 | */ |
e384c7b7 | 867 | u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx) |
dcce11d5 | 868 | __acquires(RCU) |
fec56f58 | 869 | { |
fec56f58 | 870 | rcu_read_lock(); |
02ad0596 | 871 | migrate_disable(); |
e384c7b7 KFL |
872 | |
873 | run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx); | |
874 | ||
c89e843a | 875 | if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { |
05b24ff9 | 876 | bpf_prog_inc_misses_counter(prog); |
ca06f55b | 877 | return 0; |
9ed9e9ba | 878 | } |
f2dd3b39 | 879 | return bpf_prog_start_time(); |
fec56f58 AS |
880 | } |
881 | ||
f2dd3b39 AS |
882 | static void notrace update_prog_stats(struct bpf_prog *prog, |
883 | u64 start) | |
fec56f58 AS |
884 | { |
885 | struct bpf_prog_stats *stats; | |
886 | ||
887 | if (static_branch_unlikely(&bpf_stats_enabled_key) && | |
f2dd3b39 AS |
888 | /* static_key could be enabled in __bpf_prog_enter* |
889 | * and disabled in __bpf_prog_exit*. | |
fec56f58 | 890 | * And vice versa. |
f2dd3b39 | 891 | * Hence check that 'start' is valid. |
fec56f58 | 892 | */ |
f2dd3b39 | 893 | start > NO_START_TIME) { |
d979617a ED |
894 | unsigned long flags; |
895 | ||
700d4796 | 896 | stats = this_cpu_ptr(prog->stats); |
d979617a | 897 | flags = u64_stats_update_begin_irqsave(&stats->syncp); |
61a0abae ED |
898 | u64_stats_inc(&stats->cnt); |
899 | u64_stats_add(&stats->nsecs, sched_clock() - start); | |
d979617a | 900 | u64_stats_update_end_irqrestore(&stats->syncp, flags); |
fec56f58 | 901 | } |
f2dd3b39 AS |
902 | } |
903 | ||
e384c7b7 | 904 | void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx) |
f2dd3b39 AS |
905 | __releases(RCU) |
906 | { | |
e384c7b7 KFL |
907 | bpf_reset_run_ctx(run_ctx->saved_run_ctx); |
908 | ||
f2dd3b39 | 909 | update_prog_stats(prog, start); |
c89e843a | 910 | this_cpu_dec(*(prog->active)); |
02ad0596 | 911 | migrate_enable(); |
fec56f58 AS |
912 | rcu_read_unlock(); |
913 | } | |
914 | ||
69fd337a SF |
915 | u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog, |
916 | struct bpf_tramp_run_ctx *run_ctx) | |
917 | __acquires(RCU) | |
918 | { | |
919 | /* Runtime stats are exported via actual BPF_LSM_CGROUP | |
920 | * programs, not the shims. | |
921 | */ | |
922 | rcu_read_lock(); | |
923 | migrate_disable(); | |
924 | ||
925 | run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx); | |
926 | ||
927 | return NO_START_TIME; | |
928 | } | |
929 | ||
930 | void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start, | |
931 | struct bpf_tramp_run_ctx *run_ctx) | |
932 | __releases(RCU) | |
933 | { | |
934 | bpf_reset_run_ctx(run_ctx->saved_run_ctx); | |
935 | ||
936 | migrate_enable(); | |
937 | rcu_read_unlock(); | |
938 | } | |
939 | ||
e384c7b7 | 940 | u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx) |
1e6c62a8 AS |
941 | { |
942 | rcu_read_lock_trace(); | |
031d6e02 | 943 | migrate_disable(); |
f56407fa | 944 | might_fault(); |
e384c7b7 | 945 | |
c89e843a | 946 | if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { |
05b24ff9 | 947 | bpf_prog_inc_misses_counter(prog); |
ca06f55b | 948 | return 0; |
9ed9e9ba | 949 | } |
e384c7b7 KFL |
950 | |
951 | run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx); | |
952 | ||
f2dd3b39 | 953 | return bpf_prog_start_time(); |
1e6c62a8 AS |
954 | } |
955 | ||
e384c7b7 KFL |
956 | void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start, |
957 | struct bpf_tramp_run_ctx *run_ctx) | |
1e6c62a8 | 958 | { |
e384c7b7 KFL |
959 | bpf_reset_run_ctx(run_ctx->saved_run_ctx); |
960 | ||
f2dd3b39 | 961 | update_prog_stats(prog, start); |
c89e843a | 962 | this_cpu_dec(*(prog->active)); |
031d6e02 | 963 | migrate_enable(); |
1e6c62a8 AS |
964 | rcu_read_unlock_trace(); |
965 | } | |
966 | ||
64696c40 MKL |
967 | u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog, |
968 | struct bpf_tramp_run_ctx *run_ctx) | |
969 | __acquires(RCU) | |
970 | { | |
971 | rcu_read_lock(); | |
972 | migrate_disable(); | |
973 | ||
974 | run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx); | |
975 | ||
976 | return bpf_prog_start_time(); | |
977 | } | |
978 | ||
979 | void notrace __bpf_prog_exit_struct_ops(struct bpf_prog *prog, u64 start, | |
980 | struct bpf_tramp_run_ctx *run_ctx) | |
981 | __releases(RCU) | |
982 | { | |
983 | bpf_reset_run_ctx(run_ctx->saved_run_ctx); | |
984 | ||
985 | update_prog_stats(prog, start); | |
986 | migrate_enable(); | |
987 | rcu_read_unlock(); | |
988 | } | |
989 | ||
e21aa341 AS |
990 | void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr) |
991 | { | |
992 | percpu_ref_get(&tr->pcref); | |
993 | } | |
994 | ||
995 | void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr) | |
996 | { | |
997 | percpu_ref_put(&tr->pcref); | |
998 | } | |
999 | ||
fec56f58 | 1000 | int __weak |
e21aa341 | 1001 | arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, |
85d33df3 | 1002 | const struct btf_func_model *m, u32 flags, |
f7e0beaf | 1003 | struct bpf_tramp_links *tlinks, |
fec56f58 AS |
1004 | void *orig_call) |
1005 | { | |
1006 | return -ENOTSUPP; | |
1007 | } | |
1008 | ||
1009 | static int __init init_trampolines(void) | |
1010 | { | |
1011 | int i; | |
1012 | ||
1013 | for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++) | |
1014 | INIT_HLIST_HEAD(&trampoline_table[i]); | |
1015 | return 0; | |
1016 | } | |
1017 | late_initcall(init_trampolines); |