]>
Commit | Line | Data |
---|---|---|
179a0cc4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2541517c | 2 | /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com |
0515e599 | 3 | * Copyright (c) 2016 Facebook |
2541517c AS |
4 | */ |
5 | #include <linux/kernel.h> | |
6 | #include <linux/types.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/bpf.h> | |
4279adb0 | 9 | #include <linux/bpf_verifier.h> |
0515e599 | 10 | #include <linux/bpf_perf_event.h> |
c4d0bfb4 | 11 | #include <linux/btf.h> |
2541517c AS |
12 | #include <linux/filter.h> |
13 | #include <linux/uaccess.h> | |
9c959c86 | 14 | #include <linux/ctype.h> |
9802d865 | 15 | #include <linux/kprobes.h> |
ac5a72ea | 16 | #include <linux/spinlock.h> |
41bdc4b4 | 17 | #include <linux/syscalls.h> |
540adea3 | 18 | #include <linux/error-injection.h> |
c9a0f3b8 | 19 | #include <linux/btf_ids.h> |
6f100640 | 20 | #include <linux/bpf_lsm.h> |
0dcac272 | 21 | #include <linux/fprobe.h> |
ca74823c JO |
22 | #include <linux/bsearch.h> |
23 | #include <linux/sort.h> | |
f3cf4134 RS |
24 | #include <linux/key.h> |
25 | #include <linux/verification.h> | |
89ae89f5 | 26 | #include <linux/namei.h> |
6f100640 | 27 | |
8e4597c6 | 28 | #include <net/bpf_sk_storage.h> |
9802d865 | 29 | |
c4d0bfb4 AM |
30 | #include <uapi/linux/bpf.h> |
31 | #include <uapi/linux/btf.h> | |
32 | ||
c7b6f29b NA |
33 | #include <asm/tlb.h> |
34 | ||
9802d865 | 35 | #include "trace_probe.h" |
2541517c AS |
36 | #include "trace.h" |
37 | ||
ac5a72ea AM |
38 | #define CREATE_TRACE_POINTS |
39 | #include "bpf_trace.h" | |
40 | ||
e672db03 SF |
41 | #define bpf_event_rcu_dereference(p) \ |
42 | rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) | |
43 | ||
a38d1107 MM |
44 | #ifdef CONFIG_MODULES |
45 | struct bpf_trace_module { | |
46 | struct module *module; | |
47 | struct list_head list; | |
48 | }; | |
49 | ||
50 | static LIST_HEAD(bpf_trace_modules); | |
51 | static DEFINE_MUTEX(bpf_module_mutex); | |
52 | ||
53 | static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) | |
54 | { | |
55 | struct bpf_raw_event_map *btp, *ret = NULL; | |
56 | struct bpf_trace_module *btm; | |
57 | unsigned int i; | |
58 | ||
59 | mutex_lock(&bpf_module_mutex); | |
60 | list_for_each_entry(btm, &bpf_trace_modules, list) { | |
61 | for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { | |
62 | btp = &btm->module->bpf_raw_events[i]; | |
63 | if (!strcmp(btp->tp->name, name)) { | |
64 | if (try_module_get(btm->module)) | |
65 | ret = btp; | |
66 | goto out; | |
67 | } | |
68 | } | |
69 | } | |
70 | out: | |
71 | mutex_unlock(&bpf_module_mutex); | |
72 | return ret; | |
73 | } | |
74 | #else | |
75 | static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) | |
76 | { | |
77 | return NULL; | |
78 | } | |
79 | #endif /* CONFIG_MODULES */ | |
80 | ||
035226b9 | 81 | u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
c195651e | 82 | u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
035226b9 | 83 | |
eb411377 AM |
84 | static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, |
85 | u64 flags, const struct btf **btf, | |
86 | s32 *btf_id); | |
f7098690 JO |
87 | static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx); |
88 | static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx); | |
eb411377 | 89 | |
0b779b61 | 90 | static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx); |
686328d8 | 91 | static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx); |
0b779b61 | 92 | |
2541517c AS |
93 | /** |
94 | * trace_call_bpf - invoke BPF program | |
e87c6bc3 | 95 | * @call: tracepoint event |
2541517c AS |
96 | * @ctx: opaque context pointer |
97 | * | |
98 | * kprobe handlers execute BPF programs via this helper. | |
99 | * Can be used from static tracepoints in the future. | |
100 | * | |
101 | * Return: BPF programs always return an integer which is interpreted by | |
102 | * kprobe handler as: | |
103 | * 0 - return from kprobe (event is filtered out) | |
104 | * 1 - store kprobe event into ring buffer | |
105 | * Other values are reserved and currently alias to 1 | |
106 | */ | |
e87c6bc3 | 107 | unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) |
2541517c AS |
108 | { |
109 | unsigned int ret; | |
110 | ||
b0a81b94 | 111 | cant_sleep(); |
2541517c AS |
112 | |
113 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { | |
114 | /* | |
115 | * since some bpf program is already running on this cpu, | |
116 | * don't call into another bpf program (same or different) | |
117 | * and don't send kprobe event into ring-buffer, | |
118 | * so return zero here | |
119 | */ | |
dd865789 JO |
120 | rcu_read_lock(); |
121 | bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array)); | |
122 | rcu_read_unlock(); | |
2541517c AS |
123 | ret = 0; |
124 | goto out; | |
125 | } | |
126 | ||
e87c6bc3 YS |
127 | /* |
128 | * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock | |
129 | * to all call sites, we did a bpf_prog_array_valid() there to check | |
130 | * whether call->prog_array is empty or not, which is | |
2b5894cc | 131 | * a heuristic to speed up execution. |
e87c6bc3 YS |
132 | * |
133 | * If bpf_prog_array_valid() fetched prog_array was | |
134 | * non-NULL, we go into trace_call_bpf() and do the actual | |
135 | * proper rcu_dereference() under RCU lock. | |
136 | * If it turns out that prog_array is NULL then, we bail out. | |
137 | * For the opposite, if the bpf_prog_array_valid() fetched pointer | |
138 | * was NULL, you'll skip the prog_array with the risk of missing | |
139 | * out of events when it was updated in between this and the | |
140 | * rcu_dereference() which is accepted risk. | |
141 | */ | |
055eb955 SF |
142 | rcu_read_lock(); |
143 | ret = bpf_prog_run_array(rcu_dereference(call->prog_array), | |
144 | ctx, bpf_prog_run); | |
145 | rcu_read_unlock(); | |
2541517c AS |
146 | |
147 | out: | |
148 | __this_cpu_dec(bpf_prog_active); | |
2541517c AS |
149 | |
150 | return ret; | |
151 | } | |
2541517c | 152 | |
9802d865 JB |
153 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
154 | BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) | |
155 | { | |
9802d865 | 156 | regs_set_return_value(regs, rc); |
540adea3 | 157 | override_function_with_return(regs); |
9802d865 JB |
158 | return 0; |
159 | } | |
160 | ||
161 | static const struct bpf_func_proto bpf_override_return_proto = { | |
162 | .func = bpf_override_return, | |
163 | .gpl_only = true, | |
164 | .ret_type = RET_INTEGER, | |
165 | .arg1_type = ARG_PTR_TO_CTX, | |
166 | .arg2_type = ARG_ANYTHING, | |
167 | }; | |
168 | #endif | |
169 | ||
8d92db5c CH |
170 | static __always_inline int |
171 | bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) | |
2541517c | 172 | { |
8d92db5c | 173 | int ret; |
2541517c | 174 | |
c0ee37e8 | 175 | ret = copy_from_user_nofault(dst, unsafe_ptr, size); |
6ae08ae3 DB |
176 | if (unlikely(ret < 0)) |
177 | memset(dst, 0, size); | |
6ae08ae3 DB |
178 | return ret; |
179 | } | |
180 | ||
8d92db5c CH |
181 | BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, |
182 | const void __user *, unsafe_ptr) | |
183 | { | |
184 | return bpf_probe_read_user_common(dst, size, unsafe_ptr); | |
185 | } | |
186 | ||
f470378c | 187 | const struct bpf_func_proto bpf_probe_read_user_proto = { |
6ae08ae3 DB |
188 | .func = bpf_probe_read_user, |
189 | .gpl_only = true, | |
190 | .ret_type = RET_INTEGER, | |
191 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
192 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
193 | .arg3_type = ARG_ANYTHING, | |
194 | }; | |
195 | ||
8d92db5c CH |
196 | static __always_inline int |
197 | bpf_probe_read_user_str_common(void *dst, u32 size, | |
198 | const void __user *unsafe_ptr) | |
6ae08ae3 | 199 | { |
8d92db5c | 200 | int ret; |
6ae08ae3 | 201 | |
6fa6d280 DX |
202 | /* |
203 | * NB: We rely on strncpy_from_user() not copying junk past the NUL | |
204 | * terminator into `dst`. | |
205 | * | |
206 | * strncpy_from_user() does long-sized strides in the fast path. If the | |
207 | * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`, | |
208 | * then there could be junk after the NUL in `dst`. If user takes `dst` | |
209 | * and keys a hash map with it, then semantically identical strings can | |
210 | * occupy multiple entries in the map. | |
211 | */ | |
8d92db5c | 212 | ret = strncpy_from_user_nofault(dst, unsafe_ptr, size); |
6ae08ae3 DB |
213 | if (unlikely(ret < 0)) |
214 | memset(dst, 0, size); | |
6ae08ae3 DB |
215 | return ret; |
216 | } | |
217 | ||
8d92db5c CH |
218 | BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, |
219 | const void __user *, unsafe_ptr) | |
220 | { | |
221 | return bpf_probe_read_user_str_common(dst, size, unsafe_ptr); | |
222 | } | |
223 | ||
f470378c | 224 | const struct bpf_func_proto bpf_probe_read_user_str_proto = { |
6ae08ae3 DB |
225 | .func = bpf_probe_read_user_str, |
226 | .gpl_only = true, | |
227 | .ret_type = RET_INTEGER, | |
228 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
229 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
230 | .arg3_type = ARG_ANYTHING, | |
231 | }; | |
232 | ||
6ae08ae3 DB |
233 | BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, |
234 | const void *, unsafe_ptr) | |
235 | { | |
8d92db5c | 236 | return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); |
6ae08ae3 DB |
237 | } |
238 | ||
f470378c | 239 | const struct bpf_func_proto bpf_probe_read_kernel_proto = { |
6ae08ae3 DB |
240 | .func = bpf_probe_read_kernel, |
241 | .gpl_only = true, | |
242 | .ret_type = RET_INTEGER, | |
243 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
244 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
245 | .arg3_type = ARG_ANYTHING, | |
246 | }; | |
247 | ||
6ae08ae3 | 248 | static __always_inline int |
8d92db5c | 249 | bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) |
6ae08ae3 | 250 | { |
ff40e510 | 251 | int ret; |
8d92db5c | 252 | |
6ae08ae3 | 253 | /* |
8d92db5c CH |
254 | * The strncpy_from_kernel_nofault() call will likely not fill the |
255 | * entire buffer, but that's okay in this circumstance as we're probing | |
6ae08ae3 DB |
256 | * arbitrary memory anyway similar to bpf_probe_read_*() and might |
257 | * as well probe the stack. Thus, memory is explicitly cleared | |
258 | * only in error case, so that improper users ignoring return | |
259 | * code altogether don't copy garbage; otherwise length of string | |
260 | * is returned that can be used for bpf_perf_event_output() et al. | |
261 | */ | |
8d92db5c | 262 | ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size); |
6ae08ae3 | 263 | if (unlikely(ret < 0)) |
ff40e510 | 264 | memset(dst, 0, size); |
074f528e | 265 | return ret; |
2541517c AS |
266 | } |
267 | ||
6ae08ae3 DB |
268 | BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, |
269 | const void *, unsafe_ptr) | |
270 | { | |
8d92db5c | 271 | return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); |
6ae08ae3 DB |
272 | } |
273 | ||
f470378c | 274 | const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { |
6ae08ae3 DB |
275 | .func = bpf_probe_read_kernel_str, |
276 | .gpl_only = true, | |
277 | .ret_type = RET_INTEGER, | |
278 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
279 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
280 | .arg3_type = ARG_ANYTHING, | |
281 | }; | |
282 | ||
8d92db5c CH |
283 | #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE |
284 | BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, | |
285 | const void *, unsafe_ptr) | |
286 | { | |
287 | if ((unsigned long)unsafe_ptr < TASK_SIZE) { | |
288 | return bpf_probe_read_user_common(dst, size, | |
289 | (__force void __user *)unsafe_ptr); | |
290 | } | |
291 | return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); | |
292 | } | |
293 | ||
294 | static const struct bpf_func_proto bpf_probe_read_compat_proto = { | |
295 | .func = bpf_probe_read_compat, | |
296 | .gpl_only = true, | |
297 | .ret_type = RET_INTEGER, | |
298 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
299 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
300 | .arg3_type = ARG_ANYTHING, | |
301 | }; | |
302 | ||
6ae08ae3 DB |
303 | BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, |
304 | const void *, unsafe_ptr) | |
305 | { | |
8d92db5c CH |
306 | if ((unsigned long)unsafe_ptr < TASK_SIZE) { |
307 | return bpf_probe_read_user_str_common(dst, size, | |
308 | (__force void __user *)unsafe_ptr); | |
309 | } | |
310 | return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); | |
6ae08ae3 DB |
311 | } |
312 | ||
313 | static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { | |
314 | .func = bpf_probe_read_compat_str, | |
2541517c AS |
315 | .gpl_only = true, |
316 | .ret_type = RET_INTEGER, | |
39f19ebb | 317 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
9c019e2b | 318 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
2541517c AS |
319 | .arg3_type = ARG_ANYTHING, |
320 | }; | |
8d92db5c | 321 | #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ |
2541517c | 322 | |
eb1b6688 | 323 | BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, |
f3694e00 | 324 | u32, size) |
96ae5227 | 325 | { |
96ae5227 SD |
326 | /* |
327 | * Ensure we're in user context which is safe for the helper to | |
328 | * run. This helper has no business in a kthread. | |
329 | * | |
330 | * access_ok() should prevent writing to non-user memory, but in | |
331 | * some situations (nommu, temporary switch, etc) access_ok() does | |
332 | * not provide enough validation, hence the check on KERNEL_DS. | |
c7b6f29b NA |
333 | * |
334 | * nmi_uaccess_okay() ensures the probe is not run in an interim | |
335 | * state, when the task or mm are switched. This is specifically | |
336 | * required to prevent the use of temporary mm. | |
96ae5227 SD |
337 | */ |
338 | ||
339 | if (unlikely(in_interrupt() || | |
340 | current->flags & (PF_KTHREAD | PF_EXITING))) | |
341 | return -EPERM; | |
c7b6f29b NA |
342 | if (unlikely(!nmi_uaccess_okay())) |
343 | return -EPERM; | |
96ae5227 | 344 | |
c0ee37e8 | 345 | return copy_to_user_nofault(unsafe_ptr, src, size); |
96ae5227 SD |
346 | } |
347 | ||
348 | static const struct bpf_func_proto bpf_probe_write_user_proto = { | |
349 | .func = bpf_probe_write_user, | |
350 | .gpl_only = true, | |
351 | .ret_type = RET_INTEGER, | |
352 | .arg1_type = ARG_ANYTHING, | |
216e3cd2 | 353 | .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
39f19ebb | 354 | .arg3_type = ARG_CONST_SIZE, |
96ae5227 SD |
355 | }; |
356 | ||
357 | static const struct bpf_func_proto *bpf_get_probe_write_proto(void) | |
358 | { | |
2c78ee89 AS |
359 | if (!capable(CAP_SYS_ADMIN)) |
360 | return NULL; | |
361 | ||
96ae5227 SD |
362 | pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", |
363 | current->comm, task_pid_nr(current)); | |
364 | ||
365 | return &bpf_probe_write_user_proto; | |
366 | } | |
367 | ||
d9c9e4db FR |
368 | #define MAX_TRACE_PRINTK_VARARGS 3 |
369 | #define BPF_TRACE_PRINTK_SIZE 1024 | |
ac5a72ea | 370 | |
d9c9e4db FR |
371 | BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, |
372 | u64, arg2, u64, arg3) | |
ac5a72ea | 373 | { |
d9c9e4db | 374 | u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 }; |
78aa1cc9 JO |
375 | struct bpf_bprintf_data data = { |
376 | .get_bin_args = true, | |
e2bb9e01 | 377 | .get_buf = true, |
78aa1cc9 | 378 | }; |
ac5a72ea AM |
379 | int ret; |
380 | ||
78aa1cc9 JO |
381 | ret = bpf_bprintf_prepare(fmt, fmt_size, args, |
382 | MAX_TRACE_PRINTK_VARARGS, &data); | |
d9c9e4db FR |
383 | if (ret < 0) |
384 | return ret; | |
385 | ||
e2bb9e01 | 386 | ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); |
d9c9e4db | 387 | |
e2bb9e01 | 388 | trace_bpf_trace_printk(data.buf); |
ac5a72ea | 389 | |
f19a4050 | 390 | bpf_bprintf_cleanup(&data); |
9c959c86 | 391 | |
d9c9e4db | 392 | return ret; |
9c959c86 AS |
393 | } |
394 | ||
395 | static const struct bpf_func_proto bpf_trace_printk_proto = { | |
396 | .func = bpf_trace_printk, | |
397 | .gpl_only = true, | |
398 | .ret_type = RET_INTEGER, | |
216e3cd2 | 399 | .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
39f19ebb | 400 | .arg2_type = ARG_CONST_SIZE, |
9c959c86 AS |
401 | }; |
402 | ||
10aceb62 | 403 | static void __set_printk_clr_event(void) |
0756ea3e AS |
404 | { |
405 | /* | |
ac5a72ea AM |
406 | * This program might be calling bpf_trace_printk, |
407 | * so enable the associated bpf_trace/bpf_trace_printk event. | |
408 | * Repeat this each time as it is possible a user has | |
409 | * disabled bpf_trace_printk events. By loading a program | |
410 | * calling bpf_trace_printk() however the user has expressed | |
411 | * the intent to see such events. | |
0756ea3e | 412 | */ |
ac5a72ea AM |
413 | if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) |
414 | pr_warn_ratelimited("could not enable bpf_trace_printk events"); | |
10aceb62 | 415 | } |
0756ea3e | 416 | |
10aceb62 DM |
417 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void) |
418 | { | |
419 | __set_printk_clr_event(); | |
0756ea3e AS |
420 | return &bpf_trace_printk_proto; |
421 | } | |
422 | ||
78aa1cc9 | 423 | BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args, |
10aceb62 DM |
424 | u32, data_len) |
425 | { | |
78aa1cc9 JO |
426 | struct bpf_bprintf_data data = { |
427 | .get_bin_args = true, | |
e2bb9e01 | 428 | .get_buf = true, |
78aa1cc9 | 429 | }; |
10aceb62 | 430 | int ret, num_args; |
10aceb62 DM |
431 | |
432 | if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || | |
78aa1cc9 | 433 | (data_len && !args)) |
10aceb62 DM |
434 | return -EINVAL; |
435 | num_args = data_len / 8; | |
436 | ||
78aa1cc9 | 437 | ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data); |
10aceb62 DM |
438 | if (ret < 0) |
439 | return ret; | |
440 | ||
e2bb9e01 | 441 | ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); |
10aceb62 | 442 | |
e2bb9e01 | 443 | trace_bpf_trace_printk(data.buf); |
10aceb62 | 444 | |
f19a4050 | 445 | bpf_bprintf_cleanup(&data); |
10aceb62 DM |
446 | |
447 | return ret; | |
448 | } | |
449 | ||
450 | static const struct bpf_func_proto bpf_trace_vprintk_proto = { | |
451 | .func = bpf_trace_vprintk, | |
452 | .gpl_only = true, | |
453 | .ret_type = RET_INTEGER, | |
216e3cd2 | 454 | .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
10aceb62 | 455 | .arg2_type = ARG_CONST_SIZE, |
216e3cd2 | 456 | .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, |
10aceb62 DM |
457 | .arg4_type = ARG_CONST_SIZE_OR_ZERO, |
458 | }; | |
459 | ||
460 | const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void) | |
461 | { | |
462 | __set_printk_clr_event(); | |
463 | return &bpf_trace_vprintk_proto; | |
464 | } | |
465 | ||
492e639f | 466 | BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, |
78aa1cc9 | 467 | const void *, args, u32, data_len) |
492e639f | 468 | { |
78aa1cc9 JO |
469 | struct bpf_bprintf_data data = { |
470 | .get_bin_args = true, | |
471 | }; | |
d9c9e4db | 472 | int err, num_args; |
492e639f | 473 | |
335ff499 | 474 | if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || |
78aa1cc9 | 475 | (data_len && !args)) |
d9c9e4db | 476 | return -EINVAL; |
492e639f YS |
477 | num_args = data_len / 8; |
478 | ||
78aa1cc9 | 479 | err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data); |
d9c9e4db FR |
480 | if (err < 0) |
481 | return err; | |
492e639f | 482 | |
78aa1cc9 | 483 | seq_bprintf(m, fmt, data.bin_args); |
48cac3f4 | 484 | |
f19a4050 | 485 | bpf_bprintf_cleanup(&data); |
d9c9e4db FR |
486 | |
487 | return seq_has_overflowed(m) ? -EOVERFLOW : 0; | |
492e639f YS |
488 | } |
489 | ||
9436ef6e | 490 | BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file) |
c9a0f3b8 | 491 | |
492e639f YS |
492 | static const struct bpf_func_proto bpf_seq_printf_proto = { |
493 | .func = bpf_seq_printf, | |
494 | .gpl_only = true, | |
495 | .ret_type = RET_INTEGER, | |
496 | .arg1_type = ARG_PTR_TO_BTF_ID, | |
9436ef6e | 497 | .arg1_btf_id = &btf_seq_file_ids[0], |
216e3cd2 | 498 | .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
492e639f | 499 | .arg3_type = ARG_CONST_SIZE, |
216e3cd2 | 500 | .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, |
492e639f | 501 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
492e639f YS |
502 | }; |
503 | ||
504 | BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) | |
505 | { | |
506 | return seq_write(m, data, len) ? -EOVERFLOW : 0; | |
507 | } | |
508 | ||
492e639f YS |
509 | static const struct bpf_func_proto bpf_seq_write_proto = { |
510 | .func = bpf_seq_write, | |
511 | .gpl_only = true, | |
512 | .ret_type = RET_INTEGER, | |
513 | .arg1_type = ARG_PTR_TO_BTF_ID, | |
9436ef6e | 514 | .arg1_btf_id = &btf_seq_file_ids[0], |
216e3cd2 | 515 | .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
492e639f | 516 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, |
492e639f YS |
517 | }; |
518 | ||
eb411377 AM |
519 | BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr, |
520 | u32, btf_ptr_size, u64, flags) | |
521 | { | |
522 | const struct btf *btf; | |
523 | s32 btf_id; | |
524 | int ret; | |
525 | ||
526 | ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); | |
527 | if (ret) | |
528 | return ret; | |
529 | ||
530 | return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags); | |
531 | } | |
532 | ||
533 | static const struct bpf_func_proto bpf_seq_printf_btf_proto = { | |
534 | .func = bpf_seq_printf_btf, | |
535 | .gpl_only = true, | |
536 | .ret_type = RET_INTEGER, | |
537 | .arg1_type = ARG_PTR_TO_BTF_ID, | |
538 | .arg1_btf_id = &btf_seq_file_ids[0], | |
216e3cd2 | 539 | .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
492e639f | 540 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, |
eb411377 | 541 | .arg4_type = ARG_ANYTHING, |
492e639f YS |
542 | }; |
543 | ||
908432ca YS |
544 | static __always_inline int |
545 | get_map_perf_counter(struct bpf_map *map, u64 flags, | |
546 | u64 *value, u64 *enabled, u64 *running) | |
35578d79 | 547 | { |
35578d79 | 548 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
6816a7ff DB |
549 | unsigned int cpu = smp_processor_id(); |
550 | u64 index = flags & BPF_F_INDEX_MASK; | |
3b1efb19 | 551 | struct bpf_event_entry *ee; |
35578d79 | 552 | |
6816a7ff DB |
553 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) |
554 | return -EINVAL; | |
555 | if (index == BPF_F_CURRENT_CPU) | |
556 | index = cpu; | |
35578d79 KX |
557 | if (unlikely(index >= array->map.max_entries)) |
558 | return -E2BIG; | |
559 | ||
3b1efb19 | 560 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 561 | if (!ee) |
35578d79 KX |
562 | return -ENOENT; |
563 | ||
908432ca YS |
564 | return perf_event_read_local(ee->event, value, enabled, running); |
565 | } | |
566 | ||
567 | BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) | |
568 | { | |
569 | u64 value = 0; | |
570 | int err; | |
571 | ||
572 | err = get_map_perf_counter(map, flags, &value, NULL, NULL); | |
35578d79 | 573 | /* |
f91840a3 AS |
574 | * this api is ugly since we miss [-22..-2] range of valid |
575 | * counter values, but that's uapi | |
35578d79 | 576 | */ |
f91840a3 AS |
577 | if (err) |
578 | return err; | |
579 | return value; | |
35578d79 KX |
580 | } |
581 | ||
62544ce8 | 582 | static const struct bpf_func_proto bpf_perf_event_read_proto = { |
35578d79 | 583 | .func = bpf_perf_event_read, |
1075ef59 | 584 | .gpl_only = true, |
35578d79 KX |
585 | .ret_type = RET_INTEGER, |
586 | .arg1_type = ARG_CONST_MAP_PTR, | |
587 | .arg2_type = ARG_ANYTHING, | |
588 | }; | |
589 | ||
908432ca YS |
590 | BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, |
591 | struct bpf_perf_event_value *, buf, u32, size) | |
592 | { | |
593 | int err = -EINVAL; | |
594 | ||
595 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) | |
596 | goto clear; | |
597 | err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, | |
598 | &buf->running); | |
599 | if (unlikely(err)) | |
600 | goto clear; | |
601 | return 0; | |
602 | clear: | |
603 | memset(buf, 0, size); | |
604 | return err; | |
605 | } | |
606 | ||
607 | static const struct bpf_func_proto bpf_perf_event_read_value_proto = { | |
608 | .func = bpf_perf_event_read_value, | |
609 | .gpl_only = true, | |
610 | .ret_type = RET_INTEGER, | |
611 | .arg1_type = ARG_CONST_MAP_PTR, | |
612 | .arg2_type = ARG_ANYTHING, | |
613 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, | |
614 | .arg4_type = ARG_CONST_SIZE, | |
615 | }; | |
616 | ||
8e7a3920 DB |
617 | static __always_inline u64 |
618 | __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | |
283ca526 | 619 | u64 flags, struct perf_sample_data *sd) |
a43eec30 | 620 | { |
a43eec30 | 621 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
d7931330 | 622 | unsigned int cpu = smp_processor_id(); |
1e33759c | 623 | u64 index = flags & BPF_F_INDEX_MASK; |
3b1efb19 | 624 | struct bpf_event_entry *ee; |
a43eec30 | 625 | struct perf_event *event; |
a43eec30 | 626 | |
1e33759c | 627 | if (index == BPF_F_CURRENT_CPU) |
d7931330 | 628 | index = cpu; |
a43eec30 AS |
629 | if (unlikely(index >= array->map.max_entries)) |
630 | return -E2BIG; | |
631 | ||
3b1efb19 | 632 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 633 | if (!ee) |
a43eec30 AS |
634 | return -ENOENT; |
635 | ||
3b1efb19 | 636 | event = ee->event; |
a43eec30 AS |
637 | if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || |
638 | event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) | |
639 | return -EINVAL; | |
640 | ||
d7931330 | 641 | if (unlikely(event->oncpu != cpu)) |
a43eec30 AS |
642 | return -EOPNOTSUPP; |
643 | ||
56201969 | 644 | return perf_event_output(event, sd, regs); |
a43eec30 AS |
645 | } |
646 | ||
9594dc3c MM |
647 | /* |
648 | * Support executing tracepoints in normal, irq, and nmi context that each call | |
649 | * bpf_perf_event_output | |
650 | */ | |
651 | struct bpf_trace_sample_data { | |
652 | struct perf_sample_data sds[3]; | |
653 | }; | |
654 | ||
655 | static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); | |
656 | static DEFINE_PER_CPU(int, bpf_trace_nest_level); | |
f3694e00 DB |
657 | BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, |
658 | u64, flags, void *, data, u64, size) | |
8e7a3920 | 659 | { |
f2c67a3e | 660 | struct bpf_trace_sample_data *sds; |
8e7a3920 DB |
661 | struct perf_raw_record raw = { |
662 | .frag = { | |
663 | .size = size, | |
664 | .data = data, | |
665 | }, | |
666 | }; | |
9594dc3c | 667 | struct perf_sample_data *sd; |
f2c67a3e JO |
668 | int nest_level, err; |
669 | ||
670 | preempt_disable(); | |
671 | sds = this_cpu_ptr(&bpf_trace_sds); | |
672 | nest_level = this_cpu_inc_return(bpf_trace_nest_level); | |
8e7a3920 | 673 | |
9594dc3c MM |
674 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { |
675 | err = -EBUSY; | |
676 | goto out; | |
677 | } | |
678 | ||
679 | sd = &sds->sds[nest_level - 1]; | |
680 | ||
681 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { | |
682 | err = -EINVAL; | |
683 | goto out; | |
684 | } | |
8e7a3920 | 685 | |
283ca526 | 686 | perf_sample_data_init(sd, 0, 0); |
0a9081cf | 687 | perf_sample_save_raw_data(sd, &raw); |
283ca526 | 688 | |
9594dc3c | 689 | err = __bpf_perf_event_output(regs, map, flags, sd); |
9594dc3c MM |
690 | out: |
691 | this_cpu_dec(bpf_trace_nest_level); | |
f2c67a3e | 692 | preempt_enable(); |
9594dc3c | 693 | return err; |
8e7a3920 DB |
694 | } |
695 | ||
a43eec30 AS |
696 | static const struct bpf_func_proto bpf_perf_event_output_proto = { |
697 | .func = bpf_perf_event_output, | |
1075ef59 | 698 | .gpl_only = true, |
a43eec30 AS |
699 | .ret_type = RET_INTEGER, |
700 | .arg1_type = ARG_PTR_TO_CTX, | |
701 | .arg2_type = ARG_CONST_MAP_PTR, | |
702 | .arg3_type = ARG_ANYTHING, | |
216e3cd2 | 703 | .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
a60dd35d | 704 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
a43eec30 AS |
705 | }; |
706 | ||
768fb61f AZ |
707 | static DEFINE_PER_CPU(int, bpf_event_output_nest_level); |
708 | struct bpf_nested_pt_regs { | |
709 | struct pt_regs regs[3]; | |
710 | }; | |
711 | static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs); | |
712 | static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); | |
bd570ff9 | 713 | |
555c8a86 DB |
714 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, |
715 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) | |
bd570ff9 | 716 | { |
555c8a86 DB |
717 | struct perf_raw_frag frag = { |
718 | .copy = ctx_copy, | |
719 | .size = ctx_size, | |
720 | .data = ctx, | |
721 | }; | |
722 | struct perf_raw_record raw = { | |
723 | .frag = { | |
183fc153 AM |
724 | { |
725 | .next = ctx_size ? &frag : NULL, | |
726 | }, | |
555c8a86 DB |
727 | .size = meta_size, |
728 | .data = meta, | |
729 | }, | |
730 | }; | |
768fb61f AZ |
731 | struct perf_sample_data *sd; |
732 | struct pt_regs *regs; | |
d62cc390 | 733 | int nest_level; |
768fb61f AZ |
734 | u64 ret; |
735 | ||
d62cc390 JO |
736 | preempt_disable(); |
737 | nest_level = this_cpu_inc_return(bpf_event_output_nest_level); | |
738 | ||
768fb61f AZ |
739 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { |
740 | ret = -EBUSY; | |
741 | goto out; | |
742 | } | |
743 | sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); | |
744 | regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); | |
bd570ff9 DB |
745 | |
746 | perf_fetch_caller_regs(regs); | |
283ca526 | 747 | perf_sample_data_init(sd, 0, 0); |
0a9081cf | 748 | perf_sample_save_raw_data(sd, &raw); |
bd570ff9 | 749 | |
768fb61f AZ |
750 | ret = __bpf_perf_event_output(regs, map, flags, sd); |
751 | out: | |
752 | this_cpu_dec(bpf_event_output_nest_level); | |
d62cc390 | 753 | preempt_enable(); |
768fb61f | 754 | return ret; |
bd570ff9 DB |
755 | } |
756 | ||
f3694e00 | 757 | BPF_CALL_0(bpf_get_current_task) |
606274c5 AS |
758 | { |
759 | return (long) current; | |
760 | } | |
761 | ||
f470378c | 762 | const struct bpf_func_proto bpf_get_current_task_proto = { |
606274c5 AS |
763 | .func = bpf_get_current_task, |
764 | .gpl_only = true, | |
765 | .ret_type = RET_INTEGER, | |
766 | }; | |
767 | ||
3ca1032a KS |
768 | BPF_CALL_0(bpf_get_current_task_btf) |
769 | { | |
770 | return (unsigned long) current; | |
771 | } | |
772 | ||
a396eda5 | 773 | const struct bpf_func_proto bpf_get_current_task_btf_proto = { |
3ca1032a KS |
774 | .func = bpf_get_current_task_btf, |
775 | .gpl_only = true, | |
3f00c523 | 776 | .ret_type = RET_PTR_TO_BTF_ID_TRUSTED, |
d19ddb47 | 777 | .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], |
3ca1032a KS |
778 | }; |
779 | ||
dd6e10fb DX |
780 | BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task) |
781 | { | |
782 | return (unsigned long) task_pt_regs(task); | |
783 | } | |
784 | ||
785 | BTF_ID_LIST(bpf_task_pt_regs_ids) | |
786 | BTF_ID(struct, pt_regs) | |
787 | ||
788 | const struct bpf_func_proto bpf_task_pt_regs_proto = { | |
789 | .func = bpf_task_pt_regs, | |
790 | .gpl_only = true, | |
791 | .arg1_type = ARG_PTR_TO_BTF_ID, | |
d19ddb47 | 792 | .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], |
dd6e10fb DX |
793 | .ret_type = RET_PTR_TO_BTF_ID, |
794 | .ret_btf_id = &bpf_task_pt_regs_ids[0], | |
795 | }; | |
796 | ||
f3694e00 | 797 | BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) |
60d20f91 | 798 | { |
60d20f91 SD |
799 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
800 | struct cgroup *cgrp; | |
60d20f91 | 801 | |
60d20f91 SD |
802 | if (unlikely(idx >= array->map.max_entries)) |
803 | return -E2BIG; | |
804 | ||
805 | cgrp = READ_ONCE(array->ptrs[idx]); | |
806 | if (unlikely(!cgrp)) | |
807 | return -EAGAIN; | |
808 | ||
809 | return task_under_cgroup_hierarchy(current, cgrp); | |
810 | } | |
811 | ||
812 | static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { | |
813 | .func = bpf_current_task_under_cgroup, | |
814 | .gpl_only = false, | |
815 | .ret_type = RET_INTEGER, | |
816 | .arg1_type = ARG_CONST_MAP_PTR, | |
817 | .arg2_type = ARG_ANYTHING, | |
818 | }; | |
819 | ||
8b401f9e YS |
820 | struct send_signal_irq_work { |
821 | struct irq_work irq_work; | |
822 | struct task_struct *task; | |
823 | u32 sig; | |
8482941f | 824 | enum pid_type type; |
8b401f9e YS |
825 | }; |
826 | ||
827 | static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); | |
828 | ||
829 | static void do_bpf_send_signal(struct irq_work *entry) | |
830 | { | |
831 | struct send_signal_irq_work *work; | |
832 | ||
833 | work = container_of(entry, struct send_signal_irq_work, irq_work); | |
8482941f | 834 | group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); |
bdb7fdb0 | 835 | put_task_struct(work->task); |
8b401f9e YS |
836 | } |
837 | ||
8482941f | 838 | static int bpf_send_signal_common(u32 sig, enum pid_type type) |
8b401f9e YS |
839 | { |
840 | struct send_signal_irq_work *work = NULL; | |
841 | ||
842 | /* Similar to bpf_probe_write_user, task needs to be | |
843 | * in a sound condition and kernel memory access be | |
844 | * permitted in order to send signal to the current | |
845 | * task. | |
846 | */ | |
847 | if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) | |
848 | return -EPERM; | |
8b401f9e YS |
849 | if (unlikely(!nmi_uaccess_okay())) |
850 | return -EPERM; | |
a3d81bc1 HS |
851 | /* Task should not be pid=1 to avoid kernel panic. */ |
852 | if (unlikely(is_global_init(current))) | |
853 | return -EPERM; | |
8b401f9e | 854 | |
1bc7896e | 855 | if (irqs_disabled()) { |
e1afb702 YS |
856 | /* Do an early check on signal validity. Otherwise, |
857 | * the error is lost in deferred irq_work. | |
858 | */ | |
859 | if (unlikely(!valid_signal(sig))) | |
860 | return -EINVAL; | |
861 | ||
8b401f9e | 862 | work = this_cpu_ptr(&send_signal_work); |
7a9f50a0 | 863 | if (irq_work_is_busy(&work->irq_work)) |
8b401f9e YS |
864 | return -EBUSY; |
865 | ||
866 | /* Add the current task, which is the target of sending signal, | |
867 | * to the irq_work. The current task may change when queued | |
868 | * irq works get executed. | |
869 | */ | |
bdb7fdb0 | 870 | work->task = get_task_struct(current); |
8b401f9e | 871 | work->sig = sig; |
8482941f | 872 | work->type = type; |
8b401f9e YS |
873 | irq_work_queue(&work->irq_work); |
874 | return 0; | |
875 | } | |
876 | ||
8482941f YS |
877 | return group_send_sig_info(sig, SEND_SIG_PRIV, current, type); |
878 | } | |
879 | ||
880 | BPF_CALL_1(bpf_send_signal, u32, sig) | |
881 | { | |
882 | return bpf_send_signal_common(sig, PIDTYPE_TGID); | |
8b401f9e YS |
883 | } |
884 | ||
885 | static const struct bpf_func_proto bpf_send_signal_proto = { | |
886 | .func = bpf_send_signal, | |
887 | .gpl_only = false, | |
888 | .ret_type = RET_INTEGER, | |
889 | .arg1_type = ARG_ANYTHING, | |
890 | }; | |
891 | ||
8482941f YS |
892 | BPF_CALL_1(bpf_send_signal_thread, u32, sig) |
893 | { | |
894 | return bpf_send_signal_common(sig, PIDTYPE_PID); | |
895 | } | |
896 | ||
897 | static const struct bpf_func_proto bpf_send_signal_thread_proto = { | |
898 | .func = bpf_send_signal_thread, | |
899 | .gpl_only = false, | |
900 | .ret_type = RET_INTEGER, | |
901 | .arg1_type = ARG_ANYTHING, | |
902 | }; | |
903 | ||
6e22ab9d JO |
904 | BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) |
905 | { | |
f46fab0e | 906 | struct path copy; |
6e22ab9d JO |
907 | long len; |
908 | char *p; | |
909 | ||
910 | if (!sz) | |
911 | return 0; | |
912 | ||
f46fab0e JO |
913 | /* |
914 | * The path pointer is verified as trusted and safe to use, | |
915 | * but let's double check it's valid anyway to workaround | |
916 | * potentially broken verifier. | |
917 | */ | |
918 | len = copy_from_kernel_nofault(©, path, sizeof(*path)); | |
919 | if (len < 0) | |
920 | return len; | |
921 | ||
922 | p = d_path(©, buf, sz); | |
6e22ab9d JO |
923 | if (IS_ERR(p)) { |
924 | len = PTR_ERR(p); | |
925 | } else { | |
926 | len = buf + sz - p; | |
927 | memmove(buf, p, len); | |
928 | } | |
929 | ||
930 | return len; | |
931 | } | |
932 | ||
933 | BTF_SET_START(btf_allowlist_d_path) | |
a8a71796 JO |
934 | #ifdef CONFIG_SECURITY |
935 | BTF_ID(func, security_file_permission) | |
936 | BTF_ID(func, security_inode_getattr) | |
937 | BTF_ID(func, security_file_open) | |
938 | #endif | |
939 | #ifdef CONFIG_SECURITY_PATH | |
940 | BTF_ID(func, security_path_truncate) | |
941 | #endif | |
6e22ab9d JO |
942 | BTF_ID(func, vfs_truncate) |
943 | BTF_ID(func, vfs_fallocate) | |
944 | BTF_ID(func, dentry_open) | |
945 | BTF_ID(func, vfs_getattr) | |
946 | BTF_ID(func, filp_close) | |
947 | BTF_SET_END(btf_allowlist_d_path) | |
948 | ||
949 | static bool bpf_d_path_allowed(const struct bpf_prog *prog) | |
950 | { | |
3d06f34a SL |
951 | if (prog->type == BPF_PROG_TYPE_TRACING && |
952 | prog->expected_attach_type == BPF_TRACE_ITER) | |
953 | return true; | |
954 | ||
6f100640 KS |
955 | if (prog->type == BPF_PROG_TYPE_LSM) |
956 | return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id); | |
957 | ||
958 | return btf_id_set_contains(&btf_allowlist_d_path, | |
959 | prog->aux->attach_btf_id); | |
6e22ab9d JO |
960 | } |
961 | ||
9436ef6e | 962 | BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path) |
6e22ab9d JO |
963 | |
964 | static const struct bpf_func_proto bpf_d_path_proto = { | |
965 | .func = bpf_d_path, | |
966 | .gpl_only = false, | |
967 | .ret_type = RET_INTEGER, | |
968 | .arg1_type = ARG_PTR_TO_BTF_ID, | |
9436ef6e | 969 | .arg1_btf_id = &bpf_d_path_btf_ids[0], |
6e22ab9d JO |
970 | .arg2_type = ARG_PTR_TO_MEM, |
971 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
6e22ab9d JO |
972 | .allowed = bpf_d_path_allowed, |
973 | }; | |
974 | ||
c4d0bfb4 AM |
975 | #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \ |
976 | BTF_F_PTR_RAW | BTF_F_ZERO) | |
977 | ||
978 | static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, | |
979 | u64 flags, const struct btf **btf, | |
980 | s32 *btf_id) | |
981 | { | |
982 | const struct btf_type *t; | |
983 | ||
984 | if (unlikely(flags & ~(BTF_F_ALL))) | |
985 | return -EINVAL; | |
986 | ||
987 | if (btf_ptr_size != sizeof(struct btf_ptr)) | |
988 | return -EINVAL; | |
989 | ||
990 | *btf = bpf_get_btf_vmlinux(); | |
991 | ||
992 | if (IS_ERR_OR_NULL(*btf)) | |
abbaa433 | 993 | return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL; |
c4d0bfb4 AM |
994 | |
995 | if (ptr->type_id > 0) | |
996 | *btf_id = ptr->type_id; | |
997 | else | |
998 | return -EINVAL; | |
999 | ||
1000 | if (*btf_id > 0) | |
1001 | t = btf_type_by_id(*btf, *btf_id); | |
1002 | if (*btf_id <= 0 || !t) | |
1003 | return -ENOENT; | |
1004 | ||
1005 | return 0; | |
1006 | } | |
1007 | ||
1008 | BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr, | |
1009 | u32, btf_ptr_size, u64, flags) | |
1010 | { | |
1011 | const struct btf *btf; | |
1012 | s32 btf_id; | |
1013 | int ret; | |
1014 | ||
1015 | ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); | |
1016 | if (ret) | |
1017 | return ret; | |
1018 | ||
1019 | return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size, | |
1020 | flags); | |
1021 | } | |
1022 | ||
1023 | const struct bpf_func_proto bpf_snprintf_btf_proto = { | |
1024 | .func = bpf_snprintf_btf, | |
1025 | .gpl_only = false, | |
1026 | .ret_type = RET_INTEGER, | |
1027 | .arg1_type = ARG_PTR_TO_MEM, | |
1028 | .arg2_type = ARG_CONST_SIZE, | |
216e3cd2 | 1029 | .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
c4d0bfb4 AM |
1030 | .arg4_type = ARG_CONST_SIZE, |
1031 | .arg5_type = ARG_ANYTHING, | |
1032 | }; | |
1033 | ||
9b99edca JO |
1034 | BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx) |
1035 | { | |
1036 | /* This helper call is inlined by verifier. */ | |
f92c1e18 | 1037 | return ((u64 *)ctx)[-2]; |
9b99edca JO |
1038 | } |
1039 | ||
1040 | static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = { | |
1041 | .func = bpf_get_func_ip_tracing, | |
1042 | .gpl_only = true, | |
1043 | .ret_type = RET_INTEGER, | |
1044 | .arg1_type = ARG_PTR_TO_CTX, | |
1045 | }; | |
1046 | ||
c09eb2e5 JO |
1047 | #ifdef CONFIG_X86_KERNEL_IBT |
1048 | static unsigned long get_entry_ip(unsigned long fentry_ip) | |
1049 | { | |
1050 | u32 instr; | |
1051 | ||
1052 | /* Being extra safe in here in case entry ip is on the page-edge. */ | |
1053 | if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1)) | |
1054 | return fentry_ip; | |
1055 | if (is_endbr(instr)) | |
1056 | fentry_ip -= ENDBR_INSN_SIZE; | |
1057 | return fentry_ip; | |
1058 | } | |
1059 | #else | |
1060 | #define get_entry_ip(fentry_ip) fentry_ip | |
1061 | #endif | |
1062 | ||
9ffd9f3f JO |
1063 | BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs) |
1064 | { | |
a3c485a5 JO |
1065 | struct bpf_trace_run_ctx *run_ctx __maybe_unused; |
1066 | struct kprobe *kp; | |
1067 | ||
1068 | #ifdef CONFIG_UPROBES | |
1069 | run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); | |
1070 | if (run_ctx->is_uprobe) | |
1071 | return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr; | |
1072 | #endif | |
1073 | ||
1074 | kp = kprobe_running(); | |
9ffd9f3f | 1075 | |
0e253f7e JO |
1076 | if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY)) |
1077 | return 0; | |
1078 | ||
1079 | return get_entry_ip((uintptr_t)kp->addr); | |
9ffd9f3f JO |
1080 | } |
1081 | ||
1082 | static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { | |
1083 | .func = bpf_get_func_ip_kprobe, | |
1084 | .gpl_only = true, | |
1085 | .ret_type = RET_INTEGER, | |
1086 | .arg1_type = ARG_PTR_TO_CTX, | |
1087 | }; | |
1088 | ||
42a57120 JO |
1089 | BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs) |
1090 | { | |
f7098690 | 1091 | return bpf_kprobe_multi_entry_ip(current->bpf_ctx); |
42a57120 JO |
1092 | } |
1093 | ||
1094 | static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = { | |
1095 | .func = bpf_get_func_ip_kprobe_multi, | |
1096 | .gpl_only = false, | |
1097 | .ret_type = RET_INTEGER, | |
1098 | .arg1_type = ARG_PTR_TO_CTX, | |
1099 | }; | |
1100 | ||
ca74823c JO |
1101 | BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs) |
1102 | { | |
f7098690 | 1103 | return bpf_kprobe_multi_cookie(current->bpf_ctx); |
ca74823c JO |
1104 | } |
1105 | ||
1106 | static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = { | |
1107 | .func = bpf_get_attach_cookie_kprobe_multi, | |
1108 | .gpl_only = false, | |
1109 | .ret_type = RET_INTEGER, | |
1110 | .arg1_type = ARG_PTR_TO_CTX, | |
1111 | }; | |
1112 | ||
686328d8 JO |
1113 | BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs) |
1114 | { | |
1115 | return bpf_uprobe_multi_entry_ip(current->bpf_ctx); | |
1116 | } | |
1117 | ||
1118 | static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = { | |
1119 | .func = bpf_get_func_ip_uprobe_multi, | |
1120 | .gpl_only = false, | |
1121 | .ret_type = RET_INTEGER, | |
1122 | .arg1_type = ARG_PTR_TO_CTX, | |
1123 | }; | |
1124 | ||
0b779b61 JO |
1125 | BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs) |
1126 | { | |
1127 | return bpf_uprobe_multi_cookie(current->bpf_ctx); | |
1128 | } | |
1129 | ||
1130 | static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = { | |
1131 | .func = bpf_get_attach_cookie_uprobe_multi, | |
1132 | .gpl_only = false, | |
1133 | .ret_type = RET_INTEGER, | |
1134 | .arg1_type = ARG_PTR_TO_CTX, | |
1135 | }; | |
1136 | ||
7adfc6c9 AN |
1137 | BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) |
1138 | { | |
1139 | struct bpf_trace_run_ctx *run_ctx; | |
1140 | ||
1141 | run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); | |
1142 | return run_ctx->bpf_cookie; | |
1143 | } | |
1144 | ||
1145 | static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = { | |
1146 | .func = bpf_get_attach_cookie_trace, | |
1147 | .gpl_only = false, | |
1148 | .ret_type = RET_INTEGER, | |
1149 | .arg1_type = ARG_PTR_TO_CTX, | |
1150 | }; | |
1151 | ||
1152 | BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx) | |
1153 | { | |
1154 | return ctx->event->bpf_cookie; | |
1155 | } | |
1156 | ||
1157 | static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = { | |
1158 | .func = bpf_get_attach_cookie_pe, | |
1159 | .gpl_only = false, | |
1160 | .ret_type = RET_INTEGER, | |
1161 | .arg1_type = ARG_PTR_TO_CTX, | |
1162 | }; | |
1163 | ||
2fcc8241 KFL |
1164 | BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx) |
1165 | { | |
1166 | struct bpf_trace_run_ctx *run_ctx; | |
1167 | ||
1168 | run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); | |
1169 | return run_ctx->bpf_cookie; | |
1170 | } | |
1171 | ||
1172 | static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = { | |
1173 | .func = bpf_get_attach_cookie_tracing, | |
1174 | .gpl_only = false, | |
1175 | .ret_type = RET_INTEGER, | |
1176 | .arg1_type = ARG_PTR_TO_CTX, | |
1177 | }; | |
1178 | ||
856c02db SL |
1179 | BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags) |
1180 | { | |
1181 | #ifndef CONFIG_X86 | |
1182 | return -ENOENT; | |
1183 | #else | |
1184 | static const u32 br_entry_size = sizeof(struct perf_branch_entry); | |
1185 | u32 entry_cnt = size / br_entry_size; | |
1186 | ||
1187 | entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt); | |
1188 | ||
1189 | if (unlikely(flags)) | |
1190 | return -EINVAL; | |
1191 | ||
1192 | if (!entry_cnt) | |
1193 | return -ENOENT; | |
1194 | ||
1195 | return entry_cnt * br_entry_size; | |
1196 | #endif | |
1197 | } | |
1198 | ||
1199 | static const struct bpf_func_proto bpf_get_branch_snapshot_proto = { | |
1200 | .func = bpf_get_branch_snapshot, | |
1201 | .gpl_only = true, | |
1202 | .ret_type = RET_INTEGER, | |
1203 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
1204 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
1205 | }; | |
1206 | ||
f92c1e18 JO |
1207 | BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value) |
1208 | { | |
1209 | /* This helper call is inlined by verifier. */ | |
1210 | u64 nr_args = ((u64 *)ctx)[-1]; | |
1211 | ||
1212 | if ((u64) n >= nr_args) | |
1213 | return -EINVAL; | |
1214 | *value = ((u64 *)ctx)[n]; | |
1215 | return 0; | |
1216 | } | |
1217 | ||
1218 | static const struct bpf_func_proto bpf_get_func_arg_proto = { | |
1219 | .func = get_func_arg, | |
1220 | .ret_type = RET_INTEGER, | |
1221 | .arg1_type = ARG_PTR_TO_CTX, | |
1222 | .arg2_type = ARG_ANYTHING, | |
1223 | .arg3_type = ARG_PTR_TO_LONG, | |
1224 | }; | |
1225 | ||
1226 | BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value) | |
1227 | { | |
1228 | /* This helper call is inlined by verifier. */ | |
1229 | u64 nr_args = ((u64 *)ctx)[-1]; | |
1230 | ||
1231 | *value = ((u64 *)ctx)[nr_args]; | |
1232 | return 0; | |
1233 | } | |
1234 | ||
1235 | static const struct bpf_func_proto bpf_get_func_ret_proto = { | |
1236 | .func = get_func_ret, | |
1237 | .ret_type = RET_INTEGER, | |
1238 | .arg1_type = ARG_PTR_TO_CTX, | |
1239 | .arg2_type = ARG_PTR_TO_LONG, | |
1240 | }; | |
1241 | ||
1242 | BPF_CALL_1(get_func_arg_cnt, void *, ctx) | |
1243 | { | |
1244 | /* This helper call is inlined by verifier. */ | |
1245 | return ((u64 *)ctx)[-1]; | |
1246 | } | |
1247 | ||
1248 | static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = { | |
1249 | .func = get_func_arg_cnt, | |
1250 | .ret_type = RET_INTEGER, | |
1251 | .arg1_type = ARG_PTR_TO_CTX, | |
1252 | }; | |
1253 | ||
f3cf4134 | 1254 | #ifdef CONFIG_KEYS |
391145ba | 1255 | __bpf_kfunc_start_defs(); |
f3cf4134 RS |
1256 | |
1257 | /** | |
1258 | * bpf_lookup_user_key - lookup a key by its serial | |
1259 | * @serial: key handle serial number | |
1260 | * @flags: lookup-specific flags | |
1261 | * | |
1262 | * Search a key with a given *serial* and the provided *flags*. | |
1263 | * If found, increment the reference count of the key by one, and | |
1264 | * return it in the bpf_key structure. | |
1265 | * | |
1266 | * The bpf_key structure must be passed to bpf_key_put() when done | |
1267 | * with it, so that the key reference count is decremented and the | |
1268 | * bpf_key structure is freed. | |
1269 | * | |
1270 | * Permission checks are deferred to the time the key is used by | |
1271 | * one of the available key-specific kfuncs. | |
1272 | * | |
1273 | * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested | |
1274 | * special keyring (e.g. session keyring), if it doesn't yet exist. | |
1275 | * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting | |
1276 | * for the key construction, and to retrieve uninstantiated keys (keys | |
1277 | * without data attached to them). | |
1278 | * | |
1279 | * Return: a bpf_key pointer with a valid key pointer if the key is found, a | |
1280 | * NULL pointer otherwise. | |
1281 | */ | |
400031e0 | 1282 | __bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags) |
f3cf4134 RS |
1283 | { |
1284 | key_ref_t key_ref; | |
1285 | struct bpf_key *bkey; | |
1286 | ||
1287 | if (flags & ~KEY_LOOKUP_ALL) | |
1288 | return NULL; | |
1289 | ||
1290 | /* | |
1291 | * Permission check is deferred until the key is used, as the | |
1292 | * intent of the caller is unknown here. | |
1293 | */ | |
1294 | key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK); | |
1295 | if (IS_ERR(key_ref)) | |
1296 | return NULL; | |
1297 | ||
1298 | bkey = kmalloc(sizeof(*bkey), GFP_KERNEL); | |
1299 | if (!bkey) { | |
1300 | key_put(key_ref_to_ptr(key_ref)); | |
1301 | return NULL; | |
1302 | } | |
1303 | ||
1304 | bkey->key = key_ref_to_ptr(key_ref); | |
1305 | bkey->has_ref = true; | |
1306 | ||
1307 | return bkey; | |
1308 | } | |
1309 | ||
1310 | /** | |
1311 | * bpf_lookup_system_key - lookup a key by a system-defined ID | |
1312 | * @id: key ID | |
1313 | * | |
1314 | * Obtain a bpf_key structure with a key pointer set to the passed key ID. | |
1315 | * The key pointer is marked as invalid, to prevent bpf_key_put() from | |
1316 | * attempting to decrement the key reference count on that pointer. The key | |
1317 | * pointer set in such way is currently understood only by | |
1318 | * verify_pkcs7_signature(). | |
1319 | * | |
1320 | * Set *id* to one of the values defined in include/linux/verification.h: | |
1321 | * 0 for the primary keyring (immutable keyring of system keys); | |
1322 | * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring | |
1323 | * (where keys can be added only if they are vouched for by existing keys | |
1324 | * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform | |
1325 | * keyring (primarily used by the integrity subsystem to verify a kexec'ed | |
1326 | * kerned image and, possibly, the initramfs signature). | |
1327 | * | |
1328 | * Return: a bpf_key pointer with an invalid key pointer set from the | |
1329 | * pre-determined ID on success, a NULL pointer otherwise | |
1330 | */ | |
400031e0 | 1331 | __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id) |
f3cf4134 RS |
1332 | { |
1333 | struct bpf_key *bkey; | |
1334 | ||
1335 | if (system_keyring_id_check(id) < 0) | |
1336 | return NULL; | |
1337 | ||
1338 | bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC); | |
1339 | if (!bkey) | |
1340 | return NULL; | |
1341 | ||
1342 | bkey->key = (struct key *)(unsigned long)id; | |
1343 | bkey->has_ref = false; | |
1344 | ||
1345 | return bkey; | |
1346 | } | |
1347 | ||
1348 | /** | |
1349 | * bpf_key_put - decrement key reference count if key is valid and free bpf_key | |
1350 | * @bkey: bpf_key structure | |
1351 | * | |
1352 | * Decrement the reference count of the key inside *bkey*, if the pointer | |
1353 | * is valid, and free *bkey*. | |
1354 | */ | |
400031e0 | 1355 | __bpf_kfunc void bpf_key_put(struct bpf_key *bkey) |
f3cf4134 RS |
1356 | { |
1357 | if (bkey->has_ref) | |
1358 | key_put(bkey->key); | |
1359 | ||
1360 | kfree(bkey); | |
1361 | } | |
1362 | ||
865b0566 RS |
1363 | #ifdef CONFIG_SYSTEM_DATA_VERIFICATION |
1364 | /** | |
1365 | * bpf_verify_pkcs7_signature - verify a PKCS#7 signature | |
1366 | * @data_ptr: data to verify | |
1367 | * @sig_ptr: signature of the data | |
1368 | * @trusted_keyring: keyring with keys trusted for signature verification | |
1369 | * | |
1370 | * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr* | |
1371 | * with keys in a keyring referenced by *trusted_keyring*. | |
1372 | * | |
1373 | * Return: 0 on success, a negative value on error. | |
1374 | */ | |
400031e0 | 1375 | __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr, |
865b0566 RS |
1376 | struct bpf_dynptr_kern *sig_ptr, |
1377 | struct bpf_key *trusted_keyring) | |
1378 | { | |
1379 | int ret; | |
1380 | ||
1381 | if (trusted_keyring->has_ref) { | |
1382 | /* | |
1383 | * Do the permission check deferred in bpf_lookup_user_key(). | |
1384 | * See bpf_lookup_user_key() for more details. | |
1385 | * | |
1386 | * A call to key_task_permission() here would be redundant, as | |
1387 | * it is already done by keyring_search() called by | |
1388 | * find_asymmetric_key(). | |
1389 | */ | |
1390 | ret = key_validate(trusted_keyring->key); | |
1391 | if (ret < 0) | |
1392 | return ret; | |
1393 | } | |
1394 | ||
1395 | return verify_pkcs7_signature(data_ptr->data, | |
26662d73 | 1396 | __bpf_dynptr_size(data_ptr), |
865b0566 | 1397 | sig_ptr->data, |
26662d73 | 1398 | __bpf_dynptr_size(sig_ptr), |
865b0566 RS |
1399 | trusted_keyring->key, |
1400 | VERIFYING_UNSPECIFIED_SIGNATURE, NULL, | |
1401 | NULL); | |
1402 | } | |
1403 | #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ | |
1404 | ||
391145ba | 1405 | __bpf_kfunc_end_defs(); |
f3cf4134 RS |
1406 | |
1407 | BTF_SET8_START(key_sig_kfunc_set) | |
1408 | BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE) | |
1409 | BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL) | |
1410 | BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE) | |
865b0566 RS |
1411 | #ifdef CONFIG_SYSTEM_DATA_VERIFICATION |
1412 | BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE) | |
1413 | #endif | |
f3cf4134 RS |
1414 | BTF_SET8_END(key_sig_kfunc_set) |
1415 | ||
1416 | static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = { | |
1417 | .owner = THIS_MODULE, | |
1418 | .set = &key_sig_kfunc_set, | |
1419 | }; | |
1420 | ||
1421 | static int __init bpf_key_sig_kfuncs_init(void) | |
1422 | { | |
1423 | return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, | |
1424 | &bpf_key_sig_kfunc_set); | |
1425 | } | |
1426 | ||
1427 | late_initcall(bpf_key_sig_kfuncs_init); | |
1428 | #endif /* CONFIG_KEYS */ | |
1429 | ||
7adfc6c9 | 1430 | static const struct bpf_func_proto * |
fc611f47 | 1431 | bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
2541517c AS |
1432 | { |
1433 | switch (func_id) { | |
1434 | case BPF_FUNC_map_lookup_elem: | |
1435 | return &bpf_map_lookup_elem_proto; | |
1436 | case BPF_FUNC_map_update_elem: | |
1437 | return &bpf_map_update_elem_proto; | |
1438 | case BPF_FUNC_map_delete_elem: | |
1439 | return &bpf_map_delete_elem_proto; | |
02a8c817 AC |
1440 | case BPF_FUNC_map_push_elem: |
1441 | return &bpf_map_push_elem_proto; | |
1442 | case BPF_FUNC_map_pop_elem: | |
1443 | return &bpf_map_pop_elem_proto; | |
1444 | case BPF_FUNC_map_peek_elem: | |
1445 | return &bpf_map_peek_elem_proto; | |
07343110 FZ |
1446 | case BPF_FUNC_map_lookup_percpu_elem: |
1447 | return &bpf_map_lookup_percpu_elem_proto; | |
d9847d31 AS |
1448 | case BPF_FUNC_ktime_get_ns: |
1449 | return &bpf_ktime_get_ns_proto; | |
71d19214 MÅ» |
1450 | case BPF_FUNC_ktime_get_boot_ns: |
1451 | return &bpf_ktime_get_boot_ns_proto; | |
04fd61ab AS |
1452 | case BPF_FUNC_tail_call: |
1453 | return &bpf_tail_call_proto; | |
ffeedafb AS |
1454 | case BPF_FUNC_get_current_pid_tgid: |
1455 | return &bpf_get_current_pid_tgid_proto; | |
606274c5 AS |
1456 | case BPF_FUNC_get_current_task: |
1457 | return &bpf_get_current_task_proto; | |
3ca1032a KS |
1458 | case BPF_FUNC_get_current_task_btf: |
1459 | return &bpf_get_current_task_btf_proto; | |
dd6e10fb DX |
1460 | case BPF_FUNC_task_pt_regs: |
1461 | return &bpf_task_pt_regs_proto; | |
ffeedafb AS |
1462 | case BPF_FUNC_get_current_uid_gid: |
1463 | return &bpf_get_current_uid_gid_proto; | |
1464 | case BPF_FUNC_get_current_comm: | |
1465 | return &bpf_get_current_comm_proto; | |
9c959c86 | 1466 | case BPF_FUNC_trace_printk: |
0756ea3e | 1467 | return bpf_get_trace_printk_proto(); |
ab1973d3 AS |
1468 | case BPF_FUNC_get_smp_processor_id: |
1469 | return &bpf_get_smp_processor_id_proto; | |
2d0e30c3 DB |
1470 | case BPF_FUNC_get_numa_node_id: |
1471 | return &bpf_get_numa_node_id_proto; | |
35578d79 KX |
1472 | case BPF_FUNC_perf_event_read: |
1473 | return &bpf_perf_event_read_proto; | |
60d20f91 SD |
1474 | case BPF_FUNC_current_task_under_cgroup: |
1475 | return &bpf_current_task_under_cgroup_proto; | |
8937bd80 AS |
1476 | case BPF_FUNC_get_prandom_u32: |
1477 | return &bpf_get_prandom_u32_proto; | |
51e1bb9e DB |
1478 | case BPF_FUNC_probe_write_user: |
1479 | return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ? | |
1480 | NULL : bpf_get_probe_write_proto(); | |
6ae08ae3 DB |
1481 | case BPF_FUNC_probe_read_user: |
1482 | return &bpf_probe_read_user_proto; | |
1483 | case BPF_FUNC_probe_read_kernel: | |
71330842 | 1484 | return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
ff40e510 | 1485 | NULL : &bpf_probe_read_kernel_proto; |
6ae08ae3 DB |
1486 | case BPF_FUNC_probe_read_user_str: |
1487 | return &bpf_probe_read_user_str_proto; | |
1488 | case BPF_FUNC_probe_read_kernel_str: | |
71330842 | 1489 | return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
ff40e510 | 1490 | NULL : &bpf_probe_read_kernel_str_proto; |
0ebeea8c DB |
1491 | #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE |
1492 | case BPF_FUNC_probe_read: | |
71330842 | 1493 | return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
ff40e510 | 1494 | NULL : &bpf_probe_read_compat_proto; |
a5e8c070 | 1495 | case BPF_FUNC_probe_read_str: |
71330842 | 1496 | return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
ff40e510 | 1497 | NULL : &bpf_probe_read_compat_str_proto; |
0ebeea8c | 1498 | #endif |
34ea38ca | 1499 | #ifdef CONFIG_CGROUPS |
c4bcfb38 YS |
1500 | case BPF_FUNC_cgrp_storage_get: |
1501 | return &bpf_cgrp_storage_get_proto; | |
1502 | case BPF_FUNC_cgrp_storage_delete: | |
1503 | return &bpf_cgrp_storage_delete_proto; | |
34ea38ca | 1504 | #endif |
8b401f9e YS |
1505 | case BPF_FUNC_send_signal: |
1506 | return &bpf_send_signal_proto; | |
8482941f YS |
1507 | case BPF_FUNC_send_signal_thread: |
1508 | return &bpf_send_signal_thread_proto; | |
b80b033b SL |
1509 | case BPF_FUNC_perf_event_read_value: |
1510 | return &bpf_perf_event_read_value_proto; | |
b4490c5c CN |
1511 | case BPF_FUNC_get_ns_current_pid_tgid: |
1512 | return &bpf_get_ns_current_pid_tgid_proto; | |
457f4436 AN |
1513 | case BPF_FUNC_ringbuf_output: |
1514 | return &bpf_ringbuf_output_proto; | |
1515 | case BPF_FUNC_ringbuf_reserve: | |
1516 | return &bpf_ringbuf_reserve_proto; | |
1517 | case BPF_FUNC_ringbuf_submit: | |
1518 | return &bpf_ringbuf_submit_proto; | |
1519 | case BPF_FUNC_ringbuf_discard: | |
1520 | return &bpf_ringbuf_discard_proto; | |
1521 | case BPF_FUNC_ringbuf_query: | |
1522 | return &bpf_ringbuf_query_proto; | |
72e2b2b6 YS |
1523 | case BPF_FUNC_jiffies64: |
1524 | return &bpf_jiffies64_proto; | |
fa28dcb8 SL |
1525 | case BPF_FUNC_get_task_stack: |
1526 | return &bpf_get_task_stack_proto; | |
07be4c4a | 1527 | case BPF_FUNC_copy_from_user: |
01685c5b | 1528 | return &bpf_copy_from_user_proto; |
376040e4 | 1529 | case BPF_FUNC_copy_from_user_task: |
01685c5b | 1530 | return &bpf_copy_from_user_task_proto; |
c4d0bfb4 AM |
1531 | case BPF_FUNC_snprintf_btf: |
1532 | return &bpf_snprintf_btf_proto; | |
b7906b70 | 1533 | case BPF_FUNC_per_cpu_ptr: |
eaa6bcb7 | 1534 | return &bpf_per_cpu_ptr_proto; |
b7906b70 | 1535 | case BPF_FUNC_this_cpu_ptr: |
63d9b80d | 1536 | return &bpf_this_cpu_ptr_proto; |
a10787e6 | 1537 | case BPF_FUNC_task_storage_get: |
4279adb0 MKL |
1538 | if (bpf_prog_check_recur(prog)) |
1539 | return &bpf_task_storage_get_recur_proto; | |
a10787e6 SL |
1540 | return &bpf_task_storage_get_proto; |
1541 | case BPF_FUNC_task_storage_delete: | |
8a7dac37 MKL |
1542 | if (bpf_prog_check_recur(prog)) |
1543 | return &bpf_task_storage_delete_recur_proto; | |
a10787e6 | 1544 | return &bpf_task_storage_delete_proto; |
69c087ba YS |
1545 | case BPF_FUNC_for_each_map_elem: |
1546 | return &bpf_for_each_map_elem_proto; | |
7b15523a FR |
1547 | case BPF_FUNC_snprintf: |
1548 | return &bpf_snprintf_proto; | |
9b99edca JO |
1549 | case BPF_FUNC_get_func_ip: |
1550 | return &bpf_get_func_ip_proto_tracing; | |
856c02db SL |
1551 | case BPF_FUNC_get_branch_snapshot: |
1552 | return &bpf_get_branch_snapshot_proto; | |
7c7e3d31 SL |
1553 | case BPF_FUNC_find_vma: |
1554 | return &bpf_find_vma_proto; | |
10aceb62 DM |
1555 | case BPF_FUNC_trace_vprintk: |
1556 | return bpf_get_trace_vprintk_proto(); | |
9fd82b61 | 1557 | default: |
b00628b1 | 1558 | return bpf_base_func_proto(func_id); |
9fd82b61 AS |
1559 | } |
1560 | } | |
1561 | ||
5e43f899 AI |
1562 | static const struct bpf_func_proto * |
1563 | kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
9fd82b61 AS |
1564 | { |
1565 | switch (func_id) { | |
a43eec30 AS |
1566 | case BPF_FUNC_perf_event_output: |
1567 | return &bpf_perf_event_output_proto; | |
d5a3b1f6 AS |
1568 | case BPF_FUNC_get_stackid: |
1569 | return &bpf_get_stackid_proto; | |
c195651e YS |
1570 | case BPF_FUNC_get_stack: |
1571 | return &bpf_get_stack_proto; | |
9802d865 JB |
1572 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
1573 | case BPF_FUNC_override_return: | |
1574 | return &bpf_override_return_proto; | |
1575 | #endif | |
9ffd9f3f | 1576 | case BPF_FUNC_get_func_ip: |
686328d8 JO |
1577 | if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI) |
1578 | return &bpf_get_func_ip_proto_kprobe_multi; | |
1579 | if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) | |
1580 | return &bpf_get_func_ip_proto_uprobe_multi; | |
1581 | return &bpf_get_func_ip_proto_kprobe; | |
7adfc6c9 | 1582 | case BPF_FUNC_get_attach_cookie: |
0b779b61 JO |
1583 | if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI) |
1584 | return &bpf_get_attach_cookie_proto_kmulti; | |
1585 | if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) | |
1586 | return &bpf_get_attach_cookie_proto_umulti; | |
1587 | return &bpf_get_attach_cookie_proto_trace; | |
2541517c | 1588 | default: |
fc611f47 | 1589 | return bpf_tracing_func_proto(func_id, prog); |
2541517c AS |
1590 | } |
1591 | } | |
1592 | ||
1593 | /* bpf+kprobe programs can access fields of 'struct pt_regs' */ | |
19de99f7 | 1594 | static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
5e43f899 | 1595 | const struct bpf_prog *prog, |
23994631 | 1596 | struct bpf_insn_access_aux *info) |
2541517c | 1597 | { |
2541517c AS |
1598 | if (off < 0 || off >= sizeof(struct pt_regs)) |
1599 | return false; | |
2541517c AS |
1600 | if (type != BPF_READ) |
1601 | return false; | |
2541517c AS |
1602 | if (off % size != 0) |
1603 | return false; | |
2d071c64 DB |
1604 | /* |
1605 | * Assertion for 32 bit to make sure last 8 byte access | |
1606 | * (BPF_DW) to the last 4 byte member is disallowed. | |
1607 | */ | |
1608 | if (off + size > sizeof(struct pt_regs)) | |
1609 | return false; | |
1610 | ||
2541517c AS |
1611 | return true; |
1612 | } | |
1613 | ||
7de16e3a | 1614 | const struct bpf_verifier_ops kprobe_verifier_ops = { |
2541517c AS |
1615 | .get_func_proto = kprobe_prog_func_proto, |
1616 | .is_valid_access = kprobe_prog_is_valid_access, | |
1617 | }; | |
1618 | ||
7de16e3a JK |
1619 | const struct bpf_prog_ops kprobe_prog_ops = { |
1620 | }; | |
1621 | ||
f3694e00 DB |
1622 | BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, |
1623 | u64, flags, void *, data, u64, size) | |
9940d67c | 1624 | { |
f3694e00 DB |
1625 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
1626 | ||
9940d67c AS |
1627 | /* |
1628 | * r1 points to perf tracepoint buffer where first 8 bytes are hidden | |
1629 | * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it | |
f3694e00 | 1630 | * from there and call the same bpf_perf_event_output() helper inline. |
9940d67c | 1631 | */ |
f3694e00 | 1632 | return ____bpf_perf_event_output(regs, map, flags, data, size); |
9940d67c AS |
1633 | } |
1634 | ||
1635 | static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { | |
1636 | .func = bpf_perf_event_output_tp, | |
1637 | .gpl_only = true, | |
1638 | .ret_type = RET_INTEGER, | |
1639 | .arg1_type = ARG_PTR_TO_CTX, | |
1640 | .arg2_type = ARG_CONST_MAP_PTR, | |
1641 | .arg3_type = ARG_ANYTHING, | |
216e3cd2 | 1642 | .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
a60dd35d | 1643 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
9940d67c AS |
1644 | }; |
1645 | ||
f3694e00 DB |
1646 | BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, |
1647 | u64, flags) | |
9940d67c | 1648 | { |
f3694e00 | 1649 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
9940d67c | 1650 | |
f3694e00 DB |
1651 | /* |
1652 | * Same comment as in bpf_perf_event_output_tp(), only that this time | |
1653 | * the other helper's function body cannot be inlined due to being | |
1654 | * external, thus we need to call raw helper function. | |
1655 | */ | |
1656 | return bpf_get_stackid((unsigned long) regs, (unsigned long) map, | |
1657 | flags, 0, 0); | |
9940d67c AS |
1658 | } |
1659 | ||
1660 | static const struct bpf_func_proto bpf_get_stackid_proto_tp = { | |
1661 | .func = bpf_get_stackid_tp, | |
1662 | .gpl_only = true, | |
1663 | .ret_type = RET_INTEGER, | |
1664 | .arg1_type = ARG_PTR_TO_CTX, | |
1665 | .arg2_type = ARG_CONST_MAP_PTR, | |
1666 | .arg3_type = ARG_ANYTHING, | |
1667 | }; | |
1668 | ||
c195651e YS |
1669 | BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, |
1670 | u64, flags) | |
1671 | { | |
1672 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; | |
1673 | ||
1674 | return bpf_get_stack((unsigned long) regs, (unsigned long) buf, | |
1675 | (unsigned long) size, flags, 0); | |
1676 | } | |
1677 | ||
1678 | static const struct bpf_func_proto bpf_get_stack_proto_tp = { | |
1679 | .func = bpf_get_stack_tp, | |
1680 | .gpl_only = true, | |
1681 | .ret_type = RET_INTEGER, | |
1682 | .arg1_type = ARG_PTR_TO_CTX, | |
1683 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
1684 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
1685 | .arg4_type = ARG_ANYTHING, | |
1686 | }; | |
1687 | ||
5e43f899 AI |
1688 | static const struct bpf_func_proto * |
1689 | tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
f005afed YS |
1690 | { |
1691 | switch (func_id) { | |
1692 | case BPF_FUNC_perf_event_output: | |
1693 | return &bpf_perf_event_output_proto_tp; | |
1694 | case BPF_FUNC_get_stackid: | |
1695 | return &bpf_get_stackid_proto_tp; | |
c195651e YS |
1696 | case BPF_FUNC_get_stack: |
1697 | return &bpf_get_stack_proto_tp; | |
7adfc6c9 AN |
1698 | case BPF_FUNC_get_attach_cookie: |
1699 | return &bpf_get_attach_cookie_proto_trace; | |
f005afed | 1700 | default: |
fc611f47 | 1701 | return bpf_tracing_func_proto(func_id, prog); |
f005afed YS |
1702 | } |
1703 | } | |
1704 | ||
1705 | static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, | |
5e43f899 | 1706 | const struct bpf_prog *prog, |
f005afed YS |
1707 | struct bpf_insn_access_aux *info) |
1708 | { | |
1709 | if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) | |
1710 | return false; | |
1711 | if (type != BPF_READ) | |
1712 | return false; | |
1713 | if (off % size != 0) | |
1714 | return false; | |
1715 | ||
1716 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); | |
1717 | return true; | |
1718 | } | |
1719 | ||
1720 | const struct bpf_verifier_ops tracepoint_verifier_ops = { | |
1721 | .get_func_proto = tp_prog_func_proto, | |
1722 | .is_valid_access = tp_prog_is_valid_access, | |
1723 | }; | |
1724 | ||
1725 | const struct bpf_prog_ops tracepoint_prog_ops = { | |
1726 | }; | |
1727 | ||
1728 | BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, | |
4bebdc7a YS |
1729 | struct bpf_perf_event_value *, buf, u32, size) |
1730 | { | |
1731 | int err = -EINVAL; | |
1732 | ||
1733 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) | |
1734 | goto clear; | |
1735 | err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, | |
1736 | &buf->running); | |
1737 | if (unlikely(err)) | |
1738 | goto clear; | |
1739 | return 0; | |
1740 | clear: | |
1741 | memset(buf, 0, size); | |
1742 | return err; | |
1743 | } | |
1744 | ||
f005afed YS |
1745 | static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { |
1746 | .func = bpf_perf_prog_read_value, | |
4bebdc7a YS |
1747 | .gpl_only = true, |
1748 | .ret_type = RET_INTEGER, | |
1749 | .arg1_type = ARG_PTR_TO_CTX, | |
1750 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
1751 | .arg3_type = ARG_CONST_SIZE, | |
1752 | }; | |
1753 | ||
fff7b643 DX |
1754 | BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, |
1755 | void *, buf, u32, size, u64, flags) | |
1756 | { | |
fff7b643 DX |
1757 | static const u32 br_entry_size = sizeof(struct perf_branch_entry); |
1758 | struct perf_branch_stack *br_stack = ctx->data->br_stack; | |
1759 | u32 to_copy; | |
1760 | ||
1761 | if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) | |
1762 | return -EINVAL; | |
1763 | ||
cce6a2d7 JO |
1764 | if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK))) |
1765 | return -ENOENT; | |
1766 | ||
fff7b643 | 1767 | if (unlikely(!br_stack)) |
db52f572 | 1768 | return -ENOENT; |
fff7b643 DX |
1769 | |
1770 | if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) | |
1771 | return br_stack->nr * br_entry_size; | |
1772 | ||
1773 | if (!buf || (size % br_entry_size != 0)) | |
1774 | return -EINVAL; | |
1775 | ||
1776 | to_copy = min_t(u32, br_stack->nr * br_entry_size, size); | |
1777 | memcpy(buf, br_stack->entries, to_copy); | |
1778 | ||
1779 | return to_copy; | |
fff7b643 DX |
1780 | } |
1781 | ||
1782 | static const struct bpf_func_proto bpf_read_branch_records_proto = { | |
1783 | .func = bpf_read_branch_records, | |
1784 | .gpl_only = true, | |
1785 | .ret_type = RET_INTEGER, | |
1786 | .arg1_type = ARG_PTR_TO_CTX, | |
1787 | .arg2_type = ARG_PTR_TO_MEM_OR_NULL, | |
1788 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
1789 | .arg4_type = ARG_ANYTHING, | |
1790 | }; | |
1791 | ||
5e43f899 AI |
1792 | static const struct bpf_func_proto * |
1793 | pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
9fd82b61 AS |
1794 | { |
1795 | switch (func_id) { | |
1796 | case BPF_FUNC_perf_event_output: | |
9940d67c | 1797 | return &bpf_perf_event_output_proto_tp; |
9fd82b61 | 1798 | case BPF_FUNC_get_stackid: |
7b04d6d6 | 1799 | return &bpf_get_stackid_proto_pe; |
c195651e | 1800 | case BPF_FUNC_get_stack: |
7b04d6d6 | 1801 | return &bpf_get_stack_proto_pe; |
4bebdc7a | 1802 | case BPF_FUNC_perf_prog_read_value: |
f005afed | 1803 | return &bpf_perf_prog_read_value_proto; |
fff7b643 DX |
1804 | case BPF_FUNC_read_branch_records: |
1805 | return &bpf_read_branch_records_proto; | |
7adfc6c9 AN |
1806 | case BPF_FUNC_get_attach_cookie: |
1807 | return &bpf_get_attach_cookie_proto_pe; | |
9fd82b61 | 1808 | default: |
fc611f47 | 1809 | return bpf_tracing_func_proto(func_id, prog); |
9fd82b61 AS |
1810 | } |
1811 | } | |
1812 | ||
c4f6699d AS |
1813 | /* |
1814 | * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp | |
1815 | * to avoid potential recursive reuse issue when/if tracepoints are added | |
9594dc3c MM |
1816 | * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. |
1817 | * | |
1818 | * Since raw tracepoints run despite bpf_prog_active, support concurrent usage | |
1819 | * in normal, irq, and nmi context. | |
c4f6699d | 1820 | */ |
9594dc3c MM |
1821 | struct bpf_raw_tp_regs { |
1822 | struct pt_regs regs[3]; | |
1823 | }; | |
1824 | static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); | |
1825 | static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); | |
1826 | static struct pt_regs *get_bpf_raw_tp_regs(void) | |
1827 | { | |
1828 | struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); | |
1829 | int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); | |
1830 | ||
1831 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { | |
1832 | this_cpu_dec(bpf_raw_tp_nest_level); | |
1833 | return ERR_PTR(-EBUSY); | |
1834 | } | |
1835 | ||
1836 | return &tp_regs->regs[nest_level - 1]; | |
1837 | } | |
1838 | ||
1839 | static void put_bpf_raw_tp_regs(void) | |
1840 | { | |
1841 | this_cpu_dec(bpf_raw_tp_nest_level); | |
1842 | } | |
1843 | ||
c4f6699d AS |
1844 | BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, |
1845 | struct bpf_map *, map, u64, flags, void *, data, u64, size) | |
1846 | { | |
9594dc3c MM |
1847 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1848 | int ret; | |
1849 | ||
1850 | if (IS_ERR(regs)) | |
1851 | return PTR_ERR(regs); | |
c4f6699d AS |
1852 | |
1853 | perf_fetch_caller_regs(regs); | |
9594dc3c MM |
1854 | ret = ____bpf_perf_event_output(regs, map, flags, data, size); |
1855 | ||
1856 | put_bpf_raw_tp_regs(); | |
1857 | return ret; | |
c4f6699d AS |
1858 | } |
1859 | ||
1860 | static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { | |
1861 | .func = bpf_perf_event_output_raw_tp, | |
1862 | .gpl_only = true, | |
1863 | .ret_type = RET_INTEGER, | |
1864 | .arg1_type = ARG_PTR_TO_CTX, | |
1865 | .arg2_type = ARG_CONST_MAP_PTR, | |
1866 | .arg3_type = ARG_ANYTHING, | |
216e3cd2 | 1867 | .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
c4f6699d AS |
1868 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
1869 | }; | |
1870 | ||
a7658e1a | 1871 | extern const struct bpf_func_proto bpf_skb_output_proto; |
d831ee84 | 1872 | extern const struct bpf_func_proto bpf_xdp_output_proto; |
d9917302 | 1873 | extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto; |
a7658e1a | 1874 | |
c4f6699d AS |
1875 | BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, |
1876 | struct bpf_map *, map, u64, flags) | |
1877 | { | |
9594dc3c MM |
1878 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1879 | int ret; | |
1880 | ||
1881 | if (IS_ERR(regs)) | |
1882 | return PTR_ERR(regs); | |
c4f6699d AS |
1883 | |
1884 | perf_fetch_caller_regs(regs); | |
1885 | /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ | |
9594dc3c MM |
1886 | ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, |
1887 | flags, 0, 0); | |
1888 | put_bpf_raw_tp_regs(); | |
1889 | return ret; | |
c4f6699d AS |
1890 | } |
1891 | ||
1892 | static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { | |
1893 | .func = bpf_get_stackid_raw_tp, | |
1894 | .gpl_only = true, | |
1895 | .ret_type = RET_INTEGER, | |
1896 | .arg1_type = ARG_PTR_TO_CTX, | |
1897 | .arg2_type = ARG_CONST_MAP_PTR, | |
1898 | .arg3_type = ARG_ANYTHING, | |
1899 | }; | |
1900 | ||
c195651e YS |
1901 | BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, |
1902 | void *, buf, u32, size, u64, flags) | |
1903 | { | |
9594dc3c MM |
1904 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1905 | int ret; | |
1906 | ||
1907 | if (IS_ERR(regs)) | |
1908 | return PTR_ERR(regs); | |
c195651e YS |
1909 | |
1910 | perf_fetch_caller_regs(regs); | |
9594dc3c MM |
1911 | ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf, |
1912 | (unsigned long) size, flags, 0); | |
1913 | put_bpf_raw_tp_regs(); | |
1914 | return ret; | |
c195651e YS |
1915 | } |
1916 | ||
1917 | static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { | |
1918 | .func = bpf_get_stack_raw_tp, | |
1919 | .gpl_only = true, | |
1920 | .ret_type = RET_INTEGER, | |
1921 | .arg1_type = ARG_PTR_TO_CTX, | |
216e3cd2 | 1922 | .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
c195651e YS |
1923 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, |
1924 | .arg4_type = ARG_ANYTHING, | |
1925 | }; | |
1926 | ||
5e43f899 AI |
1927 | static const struct bpf_func_proto * |
1928 | raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
c4f6699d AS |
1929 | { |
1930 | switch (func_id) { | |
1931 | case BPF_FUNC_perf_event_output: | |
1932 | return &bpf_perf_event_output_proto_raw_tp; | |
1933 | case BPF_FUNC_get_stackid: | |
1934 | return &bpf_get_stackid_proto_raw_tp; | |
c195651e YS |
1935 | case BPF_FUNC_get_stack: |
1936 | return &bpf_get_stack_proto_raw_tp; | |
c4f6699d | 1937 | default: |
fc611f47 | 1938 | return bpf_tracing_func_proto(func_id, prog); |
c4f6699d AS |
1939 | } |
1940 | } | |
1941 | ||
958a3f2d | 1942 | const struct bpf_func_proto * |
f1b9509c AS |
1943 | tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
1944 | { | |
3cee6fb8 MKL |
1945 | const struct bpf_func_proto *fn; |
1946 | ||
f1b9509c AS |
1947 | switch (func_id) { |
1948 | #ifdef CONFIG_NET | |
1949 | case BPF_FUNC_skb_output: | |
1950 | return &bpf_skb_output_proto; | |
d831ee84 EC |
1951 | case BPF_FUNC_xdp_output: |
1952 | return &bpf_xdp_output_proto; | |
af7ec138 YS |
1953 | case BPF_FUNC_skc_to_tcp6_sock: |
1954 | return &bpf_skc_to_tcp6_sock_proto; | |
478cfbdf YS |
1955 | case BPF_FUNC_skc_to_tcp_sock: |
1956 | return &bpf_skc_to_tcp_sock_proto; | |
1957 | case BPF_FUNC_skc_to_tcp_timewait_sock: | |
1958 | return &bpf_skc_to_tcp_timewait_sock_proto; | |
1959 | case BPF_FUNC_skc_to_tcp_request_sock: | |
1960 | return &bpf_skc_to_tcp_request_sock_proto; | |
0d4fad3e YS |
1961 | case BPF_FUNC_skc_to_udp6_sock: |
1962 | return &bpf_skc_to_udp6_sock_proto; | |
9eeb3aa3 HC |
1963 | case BPF_FUNC_skc_to_unix_sock: |
1964 | return &bpf_skc_to_unix_sock_proto; | |
3bc253c2 GT |
1965 | case BPF_FUNC_skc_to_mptcp_sock: |
1966 | return &bpf_skc_to_mptcp_sock_proto; | |
8e4597c6 MKL |
1967 | case BPF_FUNC_sk_storage_get: |
1968 | return &bpf_sk_storage_get_tracing_proto; | |
1969 | case BPF_FUNC_sk_storage_delete: | |
1970 | return &bpf_sk_storage_delete_tracing_proto; | |
b60da495 FR |
1971 | case BPF_FUNC_sock_from_file: |
1972 | return &bpf_sock_from_file_proto; | |
c5dbb89f FR |
1973 | case BPF_FUNC_get_socket_cookie: |
1974 | return &bpf_get_socket_ptr_cookie_proto; | |
d9917302 EC |
1975 | case BPF_FUNC_xdp_get_buff_len: |
1976 | return &bpf_xdp_get_buff_len_trace_proto; | |
f1b9509c | 1977 | #endif |
492e639f YS |
1978 | case BPF_FUNC_seq_printf: |
1979 | return prog->expected_attach_type == BPF_TRACE_ITER ? | |
1980 | &bpf_seq_printf_proto : | |
1981 | NULL; | |
1982 | case BPF_FUNC_seq_write: | |
1983 | return prog->expected_attach_type == BPF_TRACE_ITER ? | |
1984 | &bpf_seq_write_proto : | |
1985 | NULL; | |
eb411377 AM |
1986 | case BPF_FUNC_seq_printf_btf: |
1987 | return prog->expected_attach_type == BPF_TRACE_ITER ? | |
1988 | &bpf_seq_printf_btf_proto : | |
1989 | NULL; | |
6e22ab9d JO |
1990 | case BPF_FUNC_d_path: |
1991 | return &bpf_d_path_proto; | |
f92c1e18 JO |
1992 | case BPF_FUNC_get_func_arg: |
1993 | return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL; | |
1994 | case BPF_FUNC_get_func_ret: | |
1995 | return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL; | |
1996 | case BPF_FUNC_get_func_arg_cnt: | |
1997 | return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL; | |
2fcc8241 KFL |
1998 | case BPF_FUNC_get_attach_cookie: |
1999 | return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL; | |
f1b9509c | 2000 | default: |
3cee6fb8 MKL |
2001 | fn = raw_tp_prog_func_proto(func_id, prog); |
2002 | if (!fn && prog->expected_attach_type == BPF_TRACE_ITER) | |
2003 | fn = bpf_iter_get_func_proto(func_id, prog); | |
2004 | return fn; | |
f1b9509c AS |
2005 | } |
2006 | } | |
2007 | ||
c4f6699d AS |
2008 | static bool raw_tp_prog_is_valid_access(int off, int size, |
2009 | enum bpf_access_type type, | |
5e43f899 | 2010 | const struct bpf_prog *prog, |
c4f6699d AS |
2011 | struct bpf_insn_access_aux *info) |
2012 | { | |
35346ab6 | 2013 | return bpf_tracing_ctx_access(off, size, type); |
f1b9509c AS |
2014 | } |
2015 | ||
2016 | static bool tracing_prog_is_valid_access(int off, int size, | |
2017 | enum bpf_access_type type, | |
2018 | const struct bpf_prog *prog, | |
2019 | struct bpf_insn_access_aux *info) | |
2020 | { | |
35346ab6 | 2021 | return bpf_tracing_btf_ctx_access(off, size, type, prog, info); |
c4f6699d AS |
2022 | } |
2023 | ||
3e7c67d9 KS |
2024 | int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, |
2025 | const union bpf_attr *kattr, | |
2026 | union bpf_attr __user *uattr) | |
2027 | { | |
2028 | return -ENOTSUPP; | |
2029 | } | |
2030 | ||
c4f6699d AS |
2031 | const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { |
2032 | .get_func_proto = raw_tp_prog_func_proto, | |
2033 | .is_valid_access = raw_tp_prog_is_valid_access, | |
2034 | }; | |
2035 | ||
2036 | const struct bpf_prog_ops raw_tracepoint_prog_ops = { | |
ebfb4d40 | 2037 | #ifdef CONFIG_NET |
1b4d60ec | 2038 | .test_run = bpf_prog_test_run_raw_tp, |
ebfb4d40 | 2039 | #endif |
c4f6699d AS |
2040 | }; |
2041 | ||
f1b9509c AS |
2042 | const struct bpf_verifier_ops tracing_verifier_ops = { |
2043 | .get_func_proto = tracing_prog_func_proto, | |
2044 | .is_valid_access = tracing_prog_is_valid_access, | |
2045 | }; | |
2046 | ||
2047 | const struct bpf_prog_ops tracing_prog_ops = { | |
da00d2f1 | 2048 | .test_run = bpf_prog_test_run_tracing, |
f1b9509c AS |
2049 | }; |
2050 | ||
9df1c28b MM |
2051 | static bool raw_tp_writable_prog_is_valid_access(int off, int size, |
2052 | enum bpf_access_type type, | |
2053 | const struct bpf_prog *prog, | |
2054 | struct bpf_insn_access_aux *info) | |
2055 | { | |
2056 | if (off == 0) { | |
2057 | if (size != sizeof(u64) || type != BPF_READ) | |
2058 | return false; | |
2059 | info->reg_type = PTR_TO_TP_BUFFER; | |
2060 | } | |
2061 | return raw_tp_prog_is_valid_access(off, size, type, prog, info); | |
2062 | } | |
2063 | ||
2064 | const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { | |
2065 | .get_func_proto = raw_tp_prog_func_proto, | |
2066 | .is_valid_access = raw_tp_writable_prog_is_valid_access, | |
2067 | }; | |
2068 | ||
2069 | const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { | |
2070 | }; | |
2071 | ||
0515e599 | 2072 | static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
5e43f899 | 2073 | const struct bpf_prog *prog, |
23994631 | 2074 | struct bpf_insn_access_aux *info) |
0515e599 | 2075 | { |
95da0cdb | 2076 | const int size_u64 = sizeof(u64); |
31fd8581 | 2077 | |
0515e599 AS |
2078 | if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) |
2079 | return false; | |
2080 | if (type != BPF_READ) | |
2081 | return false; | |
bc23105c DB |
2082 | if (off % size != 0) { |
2083 | if (sizeof(unsigned long) != 4) | |
2084 | return false; | |
2085 | if (size != 8) | |
2086 | return false; | |
2087 | if (off % size != 4) | |
2088 | return false; | |
2089 | } | |
31fd8581 | 2090 | |
f96da094 DB |
2091 | switch (off) { |
2092 | case bpf_ctx_range(struct bpf_perf_event_data, sample_period): | |
95da0cdb TQ |
2093 | bpf_ctx_record_field_size(info, size_u64); |
2094 | if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) | |
2095 | return false; | |
2096 | break; | |
2097 | case bpf_ctx_range(struct bpf_perf_event_data, addr): | |
2098 | bpf_ctx_record_field_size(info, size_u64); | |
2099 | if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) | |
23994631 | 2100 | return false; |
f96da094 DB |
2101 | break; |
2102 | default: | |
0515e599 AS |
2103 | if (size != sizeof(long)) |
2104 | return false; | |
2105 | } | |
f96da094 | 2106 | |
0515e599 AS |
2107 | return true; |
2108 | } | |
2109 | ||
6b8cc1d1 DB |
2110 | static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, |
2111 | const struct bpf_insn *si, | |
0515e599 | 2112 | struct bpf_insn *insn_buf, |
f96da094 | 2113 | struct bpf_prog *prog, u32 *target_size) |
0515e599 AS |
2114 | { |
2115 | struct bpf_insn *insn = insn_buf; | |
2116 | ||
6b8cc1d1 | 2117 | switch (si->off) { |
0515e599 | 2118 | case offsetof(struct bpf_perf_event_data, sample_period): |
f035a515 | 2119 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
6b8cc1d1 | 2120 | data), si->dst_reg, si->src_reg, |
0515e599 | 2121 | offsetof(struct bpf_perf_event_data_kern, data)); |
6b8cc1d1 | 2122 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, |
f96da094 DB |
2123 | bpf_target_off(struct perf_sample_data, period, 8, |
2124 | target_size)); | |
0515e599 | 2125 | break; |
95da0cdb TQ |
2126 | case offsetof(struct bpf_perf_event_data, addr): |
2127 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, | |
2128 | data), si->dst_reg, si->src_reg, | |
2129 | offsetof(struct bpf_perf_event_data_kern, data)); | |
2130 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, | |
2131 | bpf_target_off(struct perf_sample_data, addr, 8, | |
2132 | target_size)); | |
2133 | break; | |
0515e599 | 2134 | default: |
f035a515 | 2135 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
6b8cc1d1 | 2136 | regs), si->dst_reg, si->src_reg, |
0515e599 | 2137 | offsetof(struct bpf_perf_event_data_kern, regs)); |
6b8cc1d1 DB |
2138 | *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, |
2139 | si->off); | |
0515e599 AS |
2140 | break; |
2141 | } | |
2142 | ||
2143 | return insn - insn_buf; | |
2144 | } | |
2145 | ||
7de16e3a | 2146 | const struct bpf_verifier_ops perf_event_verifier_ops = { |
f005afed | 2147 | .get_func_proto = pe_prog_func_proto, |
0515e599 AS |
2148 | .is_valid_access = pe_prog_is_valid_access, |
2149 | .convert_ctx_access = pe_prog_convert_ctx_access, | |
2150 | }; | |
7de16e3a JK |
2151 | |
2152 | const struct bpf_prog_ops perf_event_prog_ops = { | |
2153 | }; | |
e87c6bc3 YS |
2154 | |
2155 | static DEFINE_MUTEX(bpf_event_mutex); | |
2156 | ||
c8c088ba YS |
2157 | #define BPF_TRACE_MAX_PROGS 64 |
2158 | ||
e87c6bc3 | 2159 | int perf_event_attach_bpf_prog(struct perf_event *event, |
82e6b1ee AN |
2160 | struct bpf_prog *prog, |
2161 | u64 bpf_cookie) | |
e87c6bc3 | 2162 | { |
e672db03 | 2163 | struct bpf_prog_array *old_array; |
e87c6bc3 YS |
2164 | struct bpf_prog_array *new_array; |
2165 | int ret = -EEXIST; | |
2166 | ||
9802d865 | 2167 | /* |
b4da3340 MH |
2168 | * Kprobe override only works if they are on the function entry, |
2169 | * and only if they are on the opt-in list. | |
9802d865 JB |
2170 | */ |
2171 | if (prog->kprobe_override && | |
b4da3340 | 2172 | (!trace_kprobe_on_func_entry(event->tp_event) || |
9802d865 JB |
2173 | !trace_kprobe_error_injectable(event->tp_event))) |
2174 | return -EINVAL; | |
2175 | ||
e87c6bc3 YS |
2176 | mutex_lock(&bpf_event_mutex); |
2177 | ||
2178 | if (event->prog) | |
07c41a29 | 2179 | goto unlock; |
e87c6bc3 | 2180 | |
e672db03 | 2181 | old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); |
c8c088ba YS |
2182 | if (old_array && |
2183 | bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { | |
2184 | ret = -E2BIG; | |
2185 | goto unlock; | |
2186 | } | |
2187 | ||
82e6b1ee | 2188 | ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array); |
e87c6bc3 | 2189 | if (ret < 0) |
07c41a29 | 2190 | goto unlock; |
e87c6bc3 YS |
2191 | |
2192 | /* set the new array to event->tp_event and set event->prog */ | |
2193 | event->prog = prog; | |
82e6b1ee | 2194 | event->bpf_cookie = bpf_cookie; |
e87c6bc3 | 2195 | rcu_assign_pointer(event->tp_event->prog_array, new_array); |
8c7dcb84 | 2196 | bpf_prog_array_free_sleepable(old_array); |
e87c6bc3 | 2197 | |
07c41a29 | 2198 | unlock: |
e87c6bc3 YS |
2199 | mutex_unlock(&bpf_event_mutex); |
2200 | return ret; | |
2201 | } | |
2202 | ||
2203 | void perf_event_detach_bpf_prog(struct perf_event *event) | |
2204 | { | |
e672db03 | 2205 | struct bpf_prog_array *old_array; |
e87c6bc3 YS |
2206 | struct bpf_prog_array *new_array; |
2207 | int ret; | |
2208 | ||
2209 | mutex_lock(&bpf_event_mutex); | |
2210 | ||
2211 | if (!event->prog) | |
07c41a29 | 2212 | goto unlock; |
e87c6bc3 | 2213 | |
e672db03 | 2214 | old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); |
82e6b1ee | 2215 | ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array); |
170a7e3e SY |
2216 | if (ret == -ENOENT) |
2217 | goto unlock; | |
e87c6bc3 YS |
2218 | if (ret < 0) { |
2219 | bpf_prog_array_delete_safe(old_array, event->prog); | |
2220 | } else { | |
2221 | rcu_assign_pointer(event->tp_event->prog_array, new_array); | |
8c7dcb84 | 2222 | bpf_prog_array_free_sleepable(old_array); |
e87c6bc3 YS |
2223 | } |
2224 | ||
2225 | bpf_prog_put(event->prog); | |
2226 | event->prog = NULL; | |
2227 | ||
07c41a29 | 2228 | unlock: |
e87c6bc3 YS |
2229 | mutex_unlock(&bpf_event_mutex); |
2230 | } | |
f371b304 | 2231 | |
f4e2298e | 2232 | int perf_event_query_prog_array(struct perf_event *event, void __user *info) |
f371b304 YS |
2233 | { |
2234 | struct perf_event_query_bpf __user *uquery = info; | |
2235 | struct perf_event_query_bpf query = {}; | |
e672db03 | 2236 | struct bpf_prog_array *progs; |
3a38bb98 | 2237 | u32 *ids, prog_cnt, ids_len; |
f371b304 YS |
2238 | int ret; |
2239 | ||
031258da | 2240 | if (!perfmon_capable()) |
f371b304 YS |
2241 | return -EPERM; |
2242 | if (event->attr.type != PERF_TYPE_TRACEPOINT) | |
2243 | return -EINVAL; | |
2244 | if (copy_from_user(&query, uquery, sizeof(query))) | |
2245 | return -EFAULT; | |
3a38bb98 YS |
2246 | |
2247 | ids_len = query.ids_len; | |
2248 | if (ids_len > BPF_TRACE_MAX_PROGS) | |
9c481b90 | 2249 | return -E2BIG; |
3a38bb98 YS |
2250 | ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); |
2251 | if (!ids) | |
2252 | return -ENOMEM; | |
2253 | /* | |
2254 | * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which | |
2255 | * is required when user only wants to check for uquery->prog_cnt. | |
2256 | * There is no need to check for it since the case is handled | |
2257 | * gracefully in bpf_prog_array_copy_info. | |
2258 | */ | |
f371b304 YS |
2259 | |
2260 | mutex_lock(&bpf_event_mutex); | |
e672db03 SF |
2261 | progs = bpf_event_rcu_dereference(event->tp_event->prog_array); |
2262 | ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt); | |
f371b304 YS |
2263 | mutex_unlock(&bpf_event_mutex); |
2264 | ||
3a38bb98 YS |
2265 | if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || |
2266 | copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) | |
2267 | ret = -EFAULT; | |
2268 | ||
2269 | kfree(ids); | |
f371b304 YS |
2270 | return ret; |
2271 | } | |
c4f6699d AS |
2272 | |
2273 | extern struct bpf_raw_event_map __start__bpf_raw_tp[]; | |
2274 | extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; | |
2275 | ||
a38d1107 | 2276 | struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) |
c4f6699d AS |
2277 | { |
2278 | struct bpf_raw_event_map *btp = __start__bpf_raw_tp; | |
2279 | ||
2280 | for (; btp < __stop__bpf_raw_tp; btp++) { | |
2281 | if (!strcmp(btp->tp->name, name)) | |
2282 | return btp; | |
2283 | } | |
a38d1107 MM |
2284 | |
2285 | return bpf_get_raw_tracepoint_module(name); | |
2286 | } | |
2287 | ||
2288 | void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) | |
2289 | { | |
12cc126d | 2290 | struct module *mod; |
a38d1107 | 2291 | |
12cc126d AN |
2292 | preempt_disable(); |
2293 | mod = __module_address((unsigned long)btp); | |
2294 | module_put(mod); | |
2295 | preempt_enable(); | |
c4f6699d AS |
2296 | } |
2297 | ||
2298 | static __always_inline | |
2299 | void __bpf_trace_run(struct bpf_prog *prog, u64 *args) | |
2300 | { | |
f03efe49 | 2301 | cant_sleep(); |
05b24ff9 JO |
2302 | if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { |
2303 | bpf_prog_inc_misses_counter(prog); | |
2304 | goto out; | |
2305 | } | |
c4f6699d | 2306 | rcu_read_lock(); |
fb7dd8bc | 2307 | (void) bpf_prog_run(prog, args); |
c4f6699d | 2308 | rcu_read_unlock(); |
05b24ff9 JO |
2309 | out: |
2310 | this_cpu_dec(*(prog->active)); | |
c4f6699d AS |
2311 | } |
2312 | ||
2313 | #define UNPACK(...) __VA_ARGS__ | |
2314 | #define REPEAT_1(FN, DL, X, ...) FN(X) | |
2315 | #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) | |
2316 | #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) | |
2317 | #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) | |
2318 | #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) | |
2319 | #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) | |
2320 | #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) | |
2321 | #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) | |
2322 | #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) | |
2323 | #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) | |
2324 | #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) | |
2325 | #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) | |
2326 | #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) | |
2327 | ||
2328 | #define SARG(X) u64 arg##X | |
2329 | #define COPY(X) args[X] = arg##X | |
2330 | ||
2331 | #define __DL_COM (,) | |
2332 | #define __DL_SEM (;) | |
2333 | ||
2334 | #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 | |
2335 | ||
2336 | #define BPF_TRACE_DEFN_x(x) \ | |
2337 | void bpf_trace_run##x(struct bpf_prog *prog, \ | |
2338 | REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ | |
2339 | { \ | |
2340 | u64 args[x]; \ | |
2341 | REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ | |
2342 | __bpf_trace_run(prog, args); \ | |
2343 | } \ | |
2344 | EXPORT_SYMBOL_GPL(bpf_trace_run##x) | |
2345 | BPF_TRACE_DEFN_x(1); | |
2346 | BPF_TRACE_DEFN_x(2); | |
2347 | BPF_TRACE_DEFN_x(3); | |
2348 | BPF_TRACE_DEFN_x(4); | |
2349 | BPF_TRACE_DEFN_x(5); | |
2350 | BPF_TRACE_DEFN_x(6); | |
2351 | BPF_TRACE_DEFN_x(7); | |
2352 | BPF_TRACE_DEFN_x(8); | |
2353 | BPF_TRACE_DEFN_x(9); | |
2354 | BPF_TRACE_DEFN_x(10); | |
2355 | BPF_TRACE_DEFN_x(11); | |
2356 | BPF_TRACE_DEFN_x(12); | |
2357 | ||
2358 | static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
2359 | { | |
2360 | struct tracepoint *tp = btp->tp; | |
2361 | ||
2362 | /* | |
2363 | * check that program doesn't access arguments beyond what's | |
2364 | * available in this tracepoint | |
2365 | */ | |
2366 | if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) | |
2367 | return -EINVAL; | |
2368 | ||
9df1c28b MM |
2369 | if (prog->aux->max_tp_access > btp->writable_size) |
2370 | return -EINVAL; | |
2371 | ||
9913d574 SRV |
2372 | return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, |
2373 | prog); | |
c4f6699d AS |
2374 | } |
2375 | ||
2376 | int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
2377 | { | |
e16ec340 | 2378 | return __bpf_probe_register(btp, prog); |
c4f6699d AS |
2379 | } |
2380 | ||
2381 | int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
2382 | { | |
e16ec340 | 2383 | return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); |
c4f6699d | 2384 | } |
41bdc4b4 YS |
2385 | |
2386 | int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, | |
2387 | u32 *fd_type, const char **buf, | |
3acf8ace JO |
2388 | u64 *probe_offset, u64 *probe_addr, |
2389 | unsigned long *missed) | |
41bdc4b4 YS |
2390 | { |
2391 | bool is_tracepoint, is_syscall_tp; | |
2392 | struct bpf_prog *prog; | |
2393 | int flags, err = 0; | |
2394 | ||
2395 | prog = event->prog; | |
2396 | if (!prog) | |
2397 | return -ENOENT; | |
2398 | ||
2399 | /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ | |
2400 | if (prog->type == BPF_PROG_TYPE_PERF_EVENT) | |
2401 | return -EOPNOTSUPP; | |
2402 | ||
2403 | *prog_id = prog->aux->id; | |
2404 | flags = event->tp_event->flags; | |
2405 | is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; | |
2406 | is_syscall_tp = is_syscall_trace_event(event->tp_event); | |
2407 | ||
2408 | if (is_tracepoint || is_syscall_tp) { | |
2409 | *buf = is_tracepoint ? event->tp_event->tp->name | |
2410 | : event->tp_event->name; | |
1b715e1b YS |
2411 | /* We allow NULL pointer for tracepoint */ |
2412 | if (fd_type) | |
2413 | *fd_type = BPF_FD_TYPE_TRACEPOINT; | |
2414 | if (probe_offset) | |
2415 | *probe_offset = 0x0; | |
2416 | if (probe_addr) | |
2417 | *probe_addr = 0x0; | |
41bdc4b4 YS |
2418 | } else { |
2419 | /* kprobe/uprobe */ | |
2420 | err = -EOPNOTSUPP; | |
2421 | #ifdef CONFIG_KPROBE_EVENTS | |
2422 | if (flags & TRACE_EVENT_FL_KPROBE) | |
2423 | err = bpf_get_kprobe_info(event, fd_type, buf, | |
3acf8ace | 2424 | probe_offset, probe_addr, missed, |
41bdc4b4 YS |
2425 | event->attr.type == PERF_TYPE_TRACEPOINT); |
2426 | #endif | |
2427 | #ifdef CONFIG_UPROBE_EVENTS | |
2428 | if (flags & TRACE_EVENT_FL_UPROBE) | |
2429 | err = bpf_get_uprobe_info(event, fd_type, buf, | |
5125e757 | 2430 | probe_offset, probe_addr, |
41bdc4b4 YS |
2431 | event->attr.type == PERF_TYPE_TRACEPOINT); |
2432 | #endif | |
2433 | } | |
2434 | ||
2435 | return err; | |
2436 | } | |
a38d1107 | 2437 | |
9db1ff0a YS |
2438 | static int __init send_signal_irq_work_init(void) |
2439 | { | |
2440 | int cpu; | |
2441 | struct send_signal_irq_work *work; | |
2442 | ||
2443 | for_each_possible_cpu(cpu) { | |
2444 | work = per_cpu_ptr(&send_signal_work, cpu); | |
2445 | init_irq_work(&work->irq_work, do_bpf_send_signal); | |
2446 | } | |
2447 | return 0; | |
2448 | } | |
2449 | ||
2450 | subsys_initcall(send_signal_irq_work_init); | |
2451 | ||
a38d1107 | 2452 | #ifdef CONFIG_MODULES |
390e99cf SF |
2453 | static int bpf_event_notify(struct notifier_block *nb, unsigned long op, |
2454 | void *module) | |
a38d1107 MM |
2455 | { |
2456 | struct bpf_trace_module *btm, *tmp; | |
2457 | struct module *mod = module; | |
0340a6b7 | 2458 | int ret = 0; |
a38d1107 MM |
2459 | |
2460 | if (mod->num_bpf_raw_events == 0 || | |
2461 | (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) | |
0340a6b7 | 2462 | goto out; |
a38d1107 MM |
2463 | |
2464 | mutex_lock(&bpf_module_mutex); | |
2465 | ||
2466 | switch (op) { | |
2467 | case MODULE_STATE_COMING: | |
2468 | btm = kzalloc(sizeof(*btm), GFP_KERNEL); | |
2469 | if (btm) { | |
2470 | btm->module = module; | |
2471 | list_add(&btm->list, &bpf_trace_modules); | |
0340a6b7 PZ |
2472 | } else { |
2473 | ret = -ENOMEM; | |
a38d1107 MM |
2474 | } |
2475 | break; | |
2476 | case MODULE_STATE_GOING: | |
2477 | list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) { | |
2478 | if (btm->module == module) { | |
2479 | list_del(&btm->list); | |
2480 | kfree(btm); | |
2481 | break; | |
2482 | } | |
2483 | } | |
2484 | break; | |
2485 | } | |
2486 | ||
2487 | mutex_unlock(&bpf_module_mutex); | |
2488 | ||
0340a6b7 PZ |
2489 | out: |
2490 | return notifier_from_errno(ret); | |
a38d1107 MM |
2491 | } |
2492 | ||
2493 | static struct notifier_block bpf_module_nb = { | |
2494 | .notifier_call = bpf_event_notify, | |
2495 | }; | |
2496 | ||
390e99cf | 2497 | static int __init bpf_event_init(void) |
a38d1107 MM |
2498 | { |
2499 | register_module_notifier(&bpf_module_nb); | |
2500 | return 0; | |
2501 | } | |
2502 | ||
2503 | fs_initcall(bpf_event_init); | |
2504 | #endif /* CONFIG_MODULES */ | |
0dcac272 JO |
2505 | |
2506 | #ifdef CONFIG_FPROBE | |
2507 | struct bpf_kprobe_multi_link { | |
2508 | struct bpf_link link; | |
2509 | struct fprobe fp; | |
2510 | unsigned long *addrs; | |
ca74823c JO |
2511 | u64 *cookies; |
2512 | u32 cnt; | |
e22061b2 JO |
2513 | u32 mods_cnt; |
2514 | struct module **mods; | |
7ac8d0d2 | 2515 | u32 flags; |
0dcac272 JO |
2516 | }; |
2517 | ||
f7098690 JO |
2518 | struct bpf_kprobe_multi_run_ctx { |
2519 | struct bpf_run_ctx run_ctx; | |
2520 | struct bpf_kprobe_multi_link *link; | |
2521 | unsigned long entry_ip; | |
2522 | }; | |
2523 | ||
0236fec5 JO |
2524 | struct user_syms { |
2525 | const char **syms; | |
2526 | char *buf; | |
2527 | }; | |
2528 | ||
2529 | static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt) | |
2530 | { | |
2531 | unsigned long __user usymbol; | |
2532 | const char **syms = NULL; | |
2533 | char *buf = NULL, *p; | |
2534 | int err = -ENOMEM; | |
2535 | unsigned int i; | |
2536 | ||
fd58f7df | 2537 | syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL); |
0236fec5 JO |
2538 | if (!syms) |
2539 | goto error; | |
2540 | ||
fd58f7df | 2541 | buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL); |
0236fec5 JO |
2542 | if (!buf) |
2543 | goto error; | |
2544 | ||
2545 | for (p = buf, i = 0; i < cnt; i++) { | |
2546 | if (__get_user(usymbol, usyms + i)) { | |
2547 | err = -EFAULT; | |
2548 | goto error; | |
2549 | } | |
2550 | err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN); | |
2551 | if (err == KSYM_NAME_LEN) | |
2552 | err = -E2BIG; | |
2553 | if (err < 0) | |
2554 | goto error; | |
2555 | syms[i] = p; | |
2556 | p += err + 1; | |
2557 | } | |
2558 | ||
2559 | us->syms = syms; | |
2560 | us->buf = buf; | |
2561 | return 0; | |
2562 | ||
2563 | error: | |
2564 | if (err) { | |
2565 | kvfree(syms); | |
2566 | kvfree(buf); | |
2567 | } | |
2568 | return err; | |
2569 | } | |
2570 | ||
e22061b2 JO |
2571 | static void kprobe_multi_put_modules(struct module **mods, u32 cnt) |
2572 | { | |
2573 | u32 i; | |
2574 | ||
2575 | for (i = 0; i < cnt; i++) | |
2576 | module_put(mods[i]); | |
2577 | } | |
2578 | ||
0236fec5 JO |
2579 | static void free_user_syms(struct user_syms *us) |
2580 | { | |
2581 | kvfree(us->syms); | |
2582 | kvfree(us->buf); | |
2583 | } | |
2584 | ||
0dcac272 JO |
2585 | static void bpf_kprobe_multi_link_release(struct bpf_link *link) |
2586 | { | |
2587 | struct bpf_kprobe_multi_link *kmulti_link; | |
2588 | ||
2589 | kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); | |
2590 | unregister_fprobe(&kmulti_link->fp); | |
e22061b2 | 2591 | kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt); |
0dcac272 JO |
2592 | } |
2593 | ||
2594 | static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) | |
2595 | { | |
2596 | struct bpf_kprobe_multi_link *kmulti_link; | |
2597 | ||
2598 | kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); | |
2599 | kvfree(kmulti_link->addrs); | |
ca74823c | 2600 | kvfree(kmulti_link->cookies); |
e22061b2 | 2601 | kfree(kmulti_link->mods); |
0dcac272 JO |
2602 | kfree(kmulti_link); |
2603 | } | |
2604 | ||
7ac8d0d2 YS |
2605 | static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link, |
2606 | struct bpf_link_info *info) | |
2607 | { | |
2608 | u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs); | |
2609 | struct bpf_kprobe_multi_link *kmulti_link; | |
2610 | u32 ucount = info->kprobe_multi.count; | |
2611 | int err = 0, i; | |
2612 | ||
2613 | if (!uaddrs ^ !ucount) | |
2614 | return -EINVAL; | |
2615 | ||
2616 | kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); | |
2617 | info->kprobe_multi.count = kmulti_link->cnt; | |
2618 | info->kprobe_multi.flags = kmulti_link->flags; | |
e2b2cd59 | 2619 | info->kprobe_multi.missed = kmulti_link->fp.nmissed; |
7ac8d0d2 YS |
2620 | |
2621 | if (!uaddrs) | |
2622 | return 0; | |
2623 | if (ucount < kmulti_link->cnt) | |
2624 | err = -ENOSPC; | |
2625 | else | |
2626 | ucount = kmulti_link->cnt; | |
2627 | ||
2628 | if (kallsyms_show_value(current_cred())) { | |
2629 | if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64))) | |
2630 | return -EFAULT; | |
2631 | } else { | |
2632 | for (i = 0; i < ucount; i++) { | |
2633 | if (put_user(0, uaddrs + i)) | |
2634 | return -EFAULT; | |
2635 | } | |
2636 | } | |
2637 | return err; | |
2638 | } | |
2639 | ||
0dcac272 JO |
2640 | static const struct bpf_link_ops bpf_kprobe_multi_link_lops = { |
2641 | .release = bpf_kprobe_multi_link_release, | |
2642 | .dealloc = bpf_kprobe_multi_link_dealloc, | |
7ac8d0d2 | 2643 | .fill_link_info = bpf_kprobe_multi_link_fill_link_info, |
0dcac272 JO |
2644 | }; |
2645 | ||
ca74823c JO |
2646 | static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv) |
2647 | { | |
2648 | const struct bpf_kprobe_multi_link *link = priv; | |
2649 | unsigned long *addr_a = a, *addr_b = b; | |
2650 | u64 *cookie_a, *cookie_b; | |
ca74823c JO |
2651 | |
2652 | cookie_a = link->cookies + (addr_a - link->addrs); | |
2653 | cookie_b = link->cookies + (addr_b - link->addrs); | |
2654 | ||
2655 | /* swap addr_a/addr_b and cookie_a/cookie_b values */ | |
11e17ae4 JC |
2656 | swap(*addr_a, *addr_b); |
2657 | swap(*cookie_a, *cookie_b); | |
ca74823c JO |
2658 | } |
2659 | ||
1a1b0716 | 2660 | static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b) |
ca74823c JO |
2661 | { |
2662 | const unsigned long *addr_a = a, *addr_b = b; | |
2663 | ||
2664 | if (*addr_a == *addr_b) | |
2665 | return 0; | |
2666 | return *addr_a < *addr_b ? -1 : 1; | |
2667 | } | |
2668 | ||
2669 | static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv) | |
2670 | { | |
1a1b0716 | 2671 | return bpf_kprobe_multi_addrs_cmp(a, b); |
ca74823c JO |
2672 | } |
2673 | ||
f7098690 | 2674 | static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) |
ca74823c | 2675 | { |
f7098690 | 2676 | struct bpf_kprobe_multi_run_ctx *run_ctx; |
ca74823c | 2677 | struct bpf_kprobe_multi_link *link; |
f7098690 | 2678 | u64 *cookie, entry_ip; |
ca74823c | 2679 | unsigned long *addr; |
ca74823c JO |
2680 | |
2681 | if (WARN_ON_ONCE(!ctx)) | |
2682 | return 0; | |
f7098690 JO |
2683 | run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); |
2684 | link = run_ctx->link; | |
ca74823c JO |
2685 | if (!link->cookies) |
2686 | return 0; | |
f7098690 JO |
2687 | entry_ip = run_ctx->entry_ip; |
2688 | addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip), | |
1a1b0716 | 2689 | bpf_kprobe_multi_addrs_cmp); |
ca74823c JO |
2690 | if (!addr) |
2691 | return 0; | |
2692 | cookie = link->cookies + (addr - link->addrs); | |
2693 | return *cookie; | |
2694 | } | |
2695 | ||
f7098690 JO |
2696 | static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) |
2697 | { | |
2698 | struct bpf_kprobe_multi_run_ctx *run_ctx; | |
2699 | ||
2700 | run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); | |
2701 | return run_ctx->entry_ip; | |
2702 | } | |
2703 | ||
0dcac272 JO |
2704 | static int |
2705 | kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, | |
f7098690 | 2706 | unsigned long entry_ip, struct pt_regs *regs) |
0dcac272 | 2707 | { |
f7098690 JO |
2708 | struct bpf_kprobe_multi_run_ctx run_ctx = { |
2709 | .link = link, | |
2710 | .entry_ip = entry_ip, | |
2711 | }; | |
ca74823c | 2712 | struct bpf_run_ctx *old_run_ctx; |
0dcac272 JO |
2713 | int err; |
2714 | ||
2715 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { | |
f915fcb3 | 2716 | bpf_prog_inc_misses_counter(link->link.prog); |
0dcac272 JO |
2717 | err = 0; |
2718 | goto out; | |
2719 | } | |
2720 | ||
2721 | migrate_disable(); | |
2722 | rcu_read_lock(); | |
f7098690 | 2723 | old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); |
0dcac272 | 2724 | err = bpf_prog_run(link->link.prog, regs); |
ca74823c | 2725 | bpf_reset_run_ctx(old_run_ctx); |
0dcac272 JO |
2726 | rcu_read_unlock(); |
2727 | migrate_enable(); | |
2728 | ||
2729 | out: | |
2730 | __this_cpu_dec(bpf_prog_active); | |
2731 | return err; | |
2732 | } | |
2733 | ||
39d95420 | 2734 | static int |
c09eb2e5 | 2735 | kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip, |
cb16330d MHG |
2736 | unsigned long ret_ip, struct pt_regs *regs, |
2737 | void *data) | |
0dcac272 | 2738 | { |
0dcac272 JO |
2739 | struct bpf_kprobe_multi_link *link; |
2740 | ||
39d95420 MHG |
2741 | link = container_of(fp, struct bpf_kprobe_multi_link, fp); |
2742 | kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs); | |
2743 | return 0; | |
2744 | } | |
2745 | ||
2746 | static void | |
2747 | kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip, | |
cb16330d MHG |
2748 | unsigned long ret_ip, struct pt_regs *regs, |
2749 | void *data) | |
0dcac272 | 2750 | { |
0dcac272 JO |
2751 | struct bpf_kprobe_multi_link *link; |
2752 | ||
0dcac272 | 2753 | link = container_of(fp, struct bpf_kprobe_multi_link, fp); |
c09eb2e5 | 2754 | kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs); |
0dcac272 JO |
2755 | } |
2756 | ||
eb5fb032 | 2757 | static int symbols_cmp_r(const void *a, const void *b, const void *priv) |
0dcac272 | 2758 | { |
0236fec5 JO |
2759 | const char **str_a = (const char **) a; |
2760 | const char **str_b = (const char **) b; | |
0dcac272 | 2761 | |
0236fec5 | 2762 | return strcmp(*str_a, *str_b); |
0dcac272 JO |
2763 | } |
2764 | ||
eb5fb032 JO |
2765 | struct multi_symbols_sort { |
2766 | const char **funcs; | |
2767 | u64 *cookies; | |
2768 | }; | |
2769 | ||
2770 | static void symbols_swap_r(void *a, void *b, int size, const void *priv) | |
2771 | { | |
2772 | const struct multi_symbols_sort *data = priv; | |
2773 | const char **name_a = a, **name_b = b; | |
2774 | ||
2775 | swap(*name_a, *name_b); | |
2776 | ||
2777 | /* If defined, swap also related cookies. */ | |
2778 | if (data->cookies) { | |
2779 | u64 *cookie_a, *cookie_b; | |
2780 | ||
2781 | cookie_a = data->cookies + (name_a - data->funcs); | |
2782 | cookie_b = data->cookies + (name_b - data->funcs); | |
2783 | swap(*cookie_a, *cookie_b); | |
2784 | } | |
2785 | } | |
2786 | ||
6a5f2d6e | 2787 | struct modules_array { |
e22061b2 JO |
2788 | struct module **mods; |
2789 | int mods_cnt; | |
2790 | int mods_cap; | |
2791 | }; | |
2792 | ||
6a5f2d6e | 2793 | static int add_module(struct modules_array *arr, struct module *mod) |
e22061b2 | 2794 | { |
e22061b2 JO |
2795 | struct module **mods; |
2796 | ||
6a5f2d6e JO |
2797 | if (arr->mods_cnt == arr->mods_cap) { |
2798 | arr->mods_cap = max(16, arr->mods_cap * 3 / 2); | |
2799 | mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL); | |
e22061b2 JO |
2800 | if (!mods) |
2801 | return -ENOMEM; | |
6a5f2d6e | 2802 | arr->mods = mods; |
e22061b2 JO |
2803 | } |
2804 | ||
6a5f2d6e JO |
2805 | arr->mods[arr->mods_cnt] = mod; |
2806 | arr->mods_cnt++; | |
e22061b2 JO |
2807 | return 0; |
2808 | } | |
2809 | ||
6a5f2d6e JO |
2810 | static bool has_module(struct modules_array *arr, struct module *mod) |
2811 | { | |
2812 | int i; | |
2813 | ||
2814 | for (i = arr->mods_cnt - 1; i >= 0; i--) { | |
2815 | if (arr->mods[i] == mod) | |
2816 | return true; | |
2817 | } | |
2818 | return false; | |
2819 | } | |
2820 | ||
e22061b2 JO |
2821 | static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt) |
2822 | { | |
6a5f2d6e JO |
2823 | struct modules_array arr = {}; |
2824 | u32 i, err = 0; | |
2825 | ||
2826 | for (i = 0; i < addrs_cnt; i++) { | |
2827 | struct module *mod; | |
2828 | ||
2829 | preempt_disable(); | |
2830 | mod = __module_address(addrs[i]); | |
2831 | /* Either no module or we it's already stored */ | |
2832 | if (!mod || has_module(&arr, mod)) { | |
2833 | preempt_enable(); | |
2834 | continue; | |
2835 | } | |
2836 | if (!try_module_get(mod)) | |
2837 | err = -EINVAL; | |
2838 | preempt_enable(); | |
2839 | if (err) | |
2840 | break; | |
2841 | err = add_module(&arr, mod); | |
2842 | if (err) { | |
2843 | module_put(mod); | |
2844 | break; | |
2845 | } | |
2846 | } | |
e22061b2 JO |
2847 | |
2848 | /* We return either err < 0 in case of error, ... */ | |
e22061b2 | 2849 | if (err) { |
6a5f2d6e JO |
2850 | kprobe_multi_put_modules(arr.mods, arr.mods_cnt); |
2851 | kfree(arr.mods); | |
e22061b2 JO |
2852 | return err; |
2853 | } | |
2854 | ||
2855 | /* or number of modules found if everything is ok. */ | |
6a5f2d6e JO |
2856 | *mods = arr.mods; |
2857 | return arr.mods_cnt; | |
e22061b2 JO |
2858 | } |
2859 | ||
41bc46c1 JO |
2860 | static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt) |
2861 | { | |
2862 | u32 i; | |
2863 | ||
2864 | for (i = 0; i < cnt; i++) { | |
2865 | if (!within_error_injection_list(addrs[i])) | |
2866 | return -EINVAL; | |
2867 | } | |
2868 | return 0; | |
2869 | } | |
2870 | ||
0dcac272 JO |
2871 | int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) |
2872 | { | |
2873 | struct bpf_kprobe_multi_link *link = NULL; | |
2874 | struct bpf_link_primer link_primer; | |
ca74823c | 2875 | void __user *ucookies; |
0dcac272 JO |
2876 | unsigned long *addrs; |
2877 | u32 flags, cnt, size; | |
2878 | void __user *uaddrs; | |
ca74823c | 2879 | u64 *cookies = NULL; |
0dcac272 JO |
2880 | void __user *usyms; |
2881 | int err; | |
2882 | ||
2883 | /* no support for 32bit archs yet */ | |
2884 | if (sizeof(u64) != sizeof(void *)) | |
2885 | return -EOPNOTSUPP; | |
2886 | ||
2887 | if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI) | |
2888 | return -EINVAL; | |
2889 | ||
2890 | flags = attr->link_create.kprobe_multi.flags; | |
2891 | if (flags & ~BPF_F_KPROBE_MULTI_RETURN) | |
2892 | return -EINVAL; | |
2893 | ||
2894 | uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); | |
2895 | usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); | |
2896 | if (!!uaddrs == !!usyms) | |
2897 | return -EINVAL; | |
2898 | ||
2899 | cnt = attr->link_create.kprobe_multi.cnt; | |
2900 | if (!cnt) | |
2901 | return -EINVAL; | |
2902 | ||
2903 | size = cnt * sizeof(*addrs); | |
fd58f7df | 2904 | addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); |
0dcac272 JO |
2905 | if (!addrs) |
2906 | return -ENOMEM; | |
2907 | ||
eb5fb032 JO |
2908 | ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); |
2909 | if (ucookies) { | |
2910 | cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); | |
2911 | if (!cookies) { | |
2912 | err = -ENOMEM; | |
2913 | goto error; | |
2914 | } | |
2915 | if (copy_from_user(cookies, ucookies, size)) { | |
2916 | err = -EFAULT; | |
2917 | goto error; | |
2918 | } | |
2919 | } | |
2920 | ||
0dcac272 JO |
2921 | if (uaddrs) { |
2922 | if (copy_from_user(addrs, uaddrs, size)) { | |
2923 | err = -EFAULT; | |
2924 | goto error; | |
2925 | } | |
2926 | } else { | |
eb5fb032 JO |
2927 | struct multi_symbols_sort data = { |
2928 | .cookies = cookies, | |
2929 | }; | |
0236fec5 JO |
2930 | struct user_syms us; |
2931 | ||
2932 | err = copy_user_syms(&us, usyms, cnt); | |
2933 | if (err) | |
2934 | goto error; | |
2935 | ||
eb5fb032 JO |
2936 | if (cookies) |
2937 | data.funcs = us.syms; | |
2938 | ||
2939 | sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r, | |
2940 | symbols_swap_r, &data); | |
2941 | ||
0236fec5 JO |
2942 | err = ftrace_lookup_symbols(us.syms, cnt, addrs); |
2943 | free_user_syms(&us); | |
0dcac272 JO |
2944 | if (err) |
2945 | goto error; | |
2946 | } | |
2947 | ||
41bc46c1 JO |
2948 | if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) { |
2949 | err = -EINVAL; | |
2950 | goto error; | |
2951 | } | |
2952 | ||
0dcac272 JO |
2953 | link = kzalloc(sizeof(*link), GFP_KERNEL); |
2954 | if (!link) { | |
2955 | err = -ENOMEM; | |
2956 | goto error; | |
2957 | } | |
2958 | ||
2959 | bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI, | |
2960 | &bpf_kprobe_multi_link_lops, prog); | |
2961 | ||
2962 | err = bpf_link_prime(&link->link, &link_primer); | |
2963 | if (err) | |
2964 | goto error; | |
2965 | ||
2966 | if (flags & BPF_F_KPROBE_MULTI_RETURN) | |
39d95420 | 2967 | link->fp.exit_handler = kprobe_multi_link_exit_handler; |
0dcac272 JO |
2968 | else |
2969 | link->fp.entry_handler = kprobe_multi_link_handler; | |
2970 | ||
2971 | link->addrs = addrs; | |
ca74823c JO |
2972 | link->cookies = cookies; |
2973 | link->cnt = cnt; | |
7ac8d0d2 | 2974 | link->flags = flags; |
ca74823c JO |
2975 | |
2976 | if (cookies) { | |
2977 | /* | |
2978 | * Sorting addresses will trigger sorting cookies as well | |
2979 | * (check bpf_kprobe_multi_cookie_swap). This way we can | |
2980 | * find cookie based on the address in bpf_get_attach_cookie | |
2981 | * helper. | |
2982 | */ | |
2983 | sort_r(addrs, cnt, sizeof(*addrs), | |
2984 | bpf_kprobe_multi_cookie_cmp, | |
2985 | bpf_kprobe_multi_cookie_swap, | |
2986 | link); | |
e22061b2 JO |
2987 | } |
2988 | ||
2989 | err = get_modules_for_addrs(&link->mods, addrs, cnt); | |
2990 | if (err < 0) { | |
2991 | bpf_link_cleanup(&link_primer); | |
2992 | return err; | |
ca74823c | 2993 | } |
e22061b2 | 2994 | link->mods_cnt = err; |
0dcac272 JO |
2995 | |
2996 | err = register_fprobe_ips(&link->fp, addrs, cnt); | |
2997 | if (err) { | |
e22061b2 | 2998 | kprobe_multi_put_modules(link->mods, link->mods_cnt); |
0dcac272 JO |
2999 | bpf_link_cleanup(&link_primer); |
3000 | return err; | |
3001 | } | |
3002 | ||
3003 | return bpf_link_settle(&link_primer); | |
3004 | ||
3005 | error: | |
3006 | kfree(link); | |
3007 | kvfree(addrs); | |
ca74823c | 3008 | kvfree(cookies); |
0dcac272 JO |
3009 | return err; |
3010 | } | |
3011 | #else /* !CONFIG_FPROBE */ | |
3012 | int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) | |
3013 | { | |
3014 | return -EOPNOTSUPP; | |
3015 | } | |
f7098690 JO |
3016 | static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) |
3017 | { | |
3018 | return 0; | |
3019 | } | |
3020 | static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) | |
ca74823c JO |
3021 | { |
3022 | return 0; | |
3023 | } | |
0dcac272 | 3024 | #endif |
89ae89f5 JO |
3025 | |
3026 | #ifdef CONFIG_UPROBES | |
3027 | struct bpf_uprobe_multi_link; | |
3028 | ||
3029 | struct bpf_uprobe { | |
3030 | struct bpf_uprobe_multi_link *link; | |
3031 | loff_t offset; | |
0b779b61 | 3032 | u64 cookie; |
89ae89f5 JO |
3033 | struct uprobe_consumer consumer; |
3034 | }; | |
3035 | ||
3036 | struct bpf_uprobe_multi_link { | |
3037 | struct path path; | |
3038 | struct bpf_link link; | |
3039 | u32 cnt; | |
3040 | struct bpf_uprobe *uprobes; | |
b733eead | 3041 | struct task_struct *task; |
89ae89f5 JO |
3042 | }; |
3043 | ||
3044 | struct bpf_uprobe_multi_run_ctx { | |
3045 | struct bpf_run_ctx run_ctx; | |
3046 | unsigned long entry_ip; | |
0b779b61 | 3047 | struct bpf_uprobe *uprobe; |
89ae89f5 JO |
3048 | }; |
3049 | ||
3050 | static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes, | |
3051 | u32 cnt) | |
3052 | { | |
3053 | u32 i; | |
3054 | ||
3055 | for (i = 0; i < cnt; i++) { | |
3056 | uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset, | |
3057 | &uprobes[i].consumer); | |
3058 | } | |
3059 | } | |
3060 | ||
3061 | static void bpf_uprobe_multi_link_release(struct bpf_link *link) | |
3062 | { | |
3063 | struct bpf_uprobe_multi_link *umulti_link; | |
3064 | ||
3065 | umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); | |
3066 | bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt); | |
3067 | } | |
3068 | ||
3069 | static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link) | |
3070 | { | |
3071 | struct bpf_uprobe_multi_link *umulti_link; | |
3072 | ||
3073 | umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); | |
b733eead JO |
3074 | if (umulti_link->task) |
3075 | put_task_struct(umulti_link->task); | |
89ae89f5 JO |
3076 | path_put(&umulti_link->path); |
3077 | kvfree(umulti_link->uprobes); | |
3078 | kfree(umulti_link); | |
3079 | } | |
3080 | ||
3081 | static const struct bpf_link_ops bpf_uprobe_multi_link_lops = { | |
3082 | .release = bpf_uprobe_multi_link_release, | |
3083 | .dealloc = bpf_uprobe_multi_link_dealloc, | |
3084 | }; | |
3085 | ||
3086 | static int uprobe_prog_run(struct bpf_uprobe *uprobe, | |
3087 | unsigned long entry_ip, | |
3088 | struct pt_regs *regs) | |
3089 | { | |
3090 | struct bpf_uprobe_multi_link *link = uprobe->link; | |
3091 | struct bpf_uprobe_multi_run_ctx run_ctx = { | |
3092 | .entry_ip = entry_ip, | |
0b779b61 | 3093 | .uprobe = uprobe, |
89ae89f5 JO |
3094 | }; |
3095 | struct bpf_prog *prog = link->link.prog; | |
3096 | bool sleepable = prog->aux->sleepable; | |
3097 | struct bpf_run_ctx *old_run_ctx; | |
3098 | int err = 0; | |
3099 | ||
b733eead JO |
3100 | if (link->task && current != link->task) |
3101 | return 0; | |
3102 | ||
89ae89f5 JO |
3103 | if (sleepable) |
3104 | rcu_read_lock_trace(); | |
3105 | else | |
3106 | rcu_read_lock(); | |
3107 | ||
3108 | migrate_disable(); | |
3109 | ||
3110 | old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); | |
3111 | err = bpf_prog_run(link->link.prog, regs); | |
3112 | bpf_reset_run_ctx(old_run_ctx); | |
3113 | ||
3114 | migrate_enable(); | |
3115 | ||
3116 | if (sleepable) | |
3117 | rcu_read_unlock_trace(); | |
3118 | else | |
3119 | rcu_read_unlock(); | |
3120 | return err; | |
3121 | } | |
3122 | ||
b733eead JO |
3123 | static bool |
3124 | uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx, | |
3125 | struct mm_struct *mm) | |
3126 | { | |
3127 | struct bpf_uprobe *uprobe; | |
3128 | ||
3129 | uprobe = container_of(con, struct bpf_uprobe, consumer); | |
3130 | return uprobe->link->task->mm == mm; | |
3131 | } | |
3132 | ||
89ae89f5 JO |
3133 | static int |
3134 | uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs) | |
3135 | { | |
3136 | struct bpf_uprobe *uprobe; | |
3137 | ||
3138 | uprobe = container_of(con, struct bpf_uprobe, consumer); | |
3139 | return uprobe_prog_run(uprobe, instruction_pointer(regs), regs); | |
3140 | } | |
3141 | ||
3142 | static int | |
3143 | uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs) | |
3144 | { | |
3145 | struct bpf_uprobe *uprobe; | |
3146 | ||
3147 | uprobe = container_of(con, struct bpf_uprobe, consumer); | |
3148 | return uprobe_prog_run(uprobe, func, regs); | |
3149 | } | |
3150 | ||
686328d8 JO |
3151 | static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) |
3152 | { | |
3153 | struct bpf_uprobe_multi_run_ctx *run_ctx; | |
3154 | ||
3155 | run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); | |
3156 | return run_ctx->entry_ip; | |
3157 | } | |
3158 | ||
0b779b61 JO |
3159 | static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) |
3160 | { | |
3161 | struct bpf_uprobe_multi_run_ctx *run_ctx; | |
3162 | ||
3163 | run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); | |
3164 | return run_ctx->uprobe->cookie; | |
3165 | } | |
3166 | ||
89ae89f5 JO |
3167 | int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) |
3168 | { | |
3169 | struct bpf_uprobe_multi_link *link = NULL; | |
3170 | unsigned long __user *uref_ctr_offsets; | |
3171 | unsigned long *ref_ctr_offsets = NULL; | |
3172 | struct bpf_link_primer link_primer; | |
3173 | struct bpf_uprobe *uprobes = NULL; | |
b733eead | 3174 | struct task_struct *task = NULL; |
89ae89f5 | 3175 | unsigned long __user *uoffsets; |
0b779b61 | 3176 | u64 __user *ucookies; |
89ae89f5 JO |
3177 | void __user *upath; |
3178 | u32 flags, cnt, i; | |
3179 | struct path path; | |
3180 | char *name; | |
b733eead | 3181 | pid_t pid; |
89ae89f5 JO |
3182 | int err; |
3183 | ||
3184 | /* no support for 32bit archs yet */ | |
3185 | if (sizeof(u64) != sizeof(void *)) | |
3186 | return -EOPNOTSUPP; | |
3187 | ||
3188 | if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI) | |
3189 | return -EINVAL; | |
3190 | ||
3191 | flags = attr->link_create.uprobe_multi.flags; | |
3192 | if (flags & ~BPF_F_UPROBE_MULTI_RETURN) | |
3193 | return -EINVAL; | |
3194 | ||
3195 | /* | |
3196 | * path, offsets and cnt are mandatory, | |
0b779b61 | 3197 | * ref_ctr_offsets and cookies are optional |
89ae89f5 JO |
3198 | */ |
3199 | upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path); | |
3200 | uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets); | |
3201 | cnt = attr->link_create.uprobe_multi.cnt; | |
3202 | ||
3203 | if (!upath || !uoffsets || !cnt) | |
3204 | return -EINVAL; | |
3205 | ||
3206 | uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets); | |
0b779b61 | 3207 | ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies); |
89ae89f5 JO |
3208 | |
3209 | name = strndup_user(upath, PATH_MAX); | |
3210 | if (IS_ERR(name)) { | |
3211 | err = PTR_ERR(name); | |
3212 | return err; | |
3213 | } | |
3214 | ||
3215 | err = kern_path(name, LOOKUP_FOLLOW, &path); | |
3216 | kfree(name); | |
3217 | if (err) | |
3218 | return err; | |
3219 | ||
3220 | if (!d_is_reg(path.dentry)) { | |
3221 | err = -EBADF; | |
3222 | goto error_path_put; | |
3223 | } | |
3224 | ||
b733eead JO |
3225 | pid = attr->link_create.uprobe_multi.pid; |
3226 | if (pid) { | |
3227 | rcu_read_lock(); | |
3228 | task = get_pid_task(find_vpid(pid), PIDTYPE_PID); | |
3229 | rcu_read_unlock(); | |
57eb5e1c JO |
3230 | if (!task) { |
3231 | err = -ESRCH; | |
b733eead | 3232 | goto error_path_put; |
57eb5e1c | 3233 | } |
b733eead JO |
3234 | } |
3235 | ||
89ae89f5 JO |
3236 | err = -ENOMEM; |
3237 | ||
3238 | link = kzalloc(sizeof(*link), GFP_KERNEL); | |
3239 | uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL); | |
3240 | ||
3241 | if (!uprobes || !link) | |
3242 | goto error_free; | |
3243 | ||
3244 | if (uref_ctr_offsets) { | |
3245 | ref_ctr_offsets = kvcalloc(cnt, sizeof(*ref_ctr_offsets), GFP_KERNEL); | |
3246 | if (!ref_ctr_offsets) | |
3247 | goto error_free; | |
3248 | } | |
3249 | ||
3250 | for (i = 0; i < cnt; i++) { | |
0b779b61 JO |
3251 | if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) { |
3252 | err = -EFAULT; | |
3253 | goto error_free; | |
3254 | } | |
89ae89f5 JO |
3255 | if (uref_ctr_offsets && __get_user(ref_ctr_offsets[i], uref_ctr_offsets + i)) { |
3256 | err = -EFAULT; | |
3257 | goto error_free; | |
3258 | } | |
3259 | if (__get_user(uprobes[i].offset, uoffsets + i)) { | |
3260 | err = -EFAULT; | |
3261 | goto error_free; | |
3262 | } | |
3263 | ||
3264 | uprobes[i].link = link; | |
3265 | ||
3266 | if (flags & BPF_F_UPROBE_MULTI_RETURN) | |
3267 | uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler; | |
3268 | else | |
3269 | uprobes[i].consumer.handler = uprobe_multi_link_handler; | |
b733eead JO |
3270 | |
3271 | if (pid) | |
3272 | uprobes[i].consumer.filter = uprobe_multi_link_filter; | |
89ae89f5 JO |
3273 | } |
3274 | ||
3275 | link->cnt = cnt; | |
3276 | link->uprobes = uprobes; | |
3277 | link->path = path; | |
b733eead | 3278 | link->task = task; |
89ae89f5 JO |
3279 | |
3280 | bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI, | |
3281 | &bpf_uprobe_multi_link_lops, prog); | |
3282 | ||
3283 | for (i = 0; i < cnt; i++) { | |
3284 | err = uprobe_register_refctr(d_real_inode(link->path.dentry), | |
3285 | uprobes[i].offset, | |
3286 | ref_ctr_offsets ? ref_ctr_offsets[i] : 0, | |
3287 | &uprobes[i].consumer); | |
3288 | if (err) { | |
3289 | bpf_uprobe_unregister(&path, uprobes, i); | |
3290 | goto error_free; | |
3291 | } | |
3292 | } | |
3293 | ||
3294 | err = bpf_link_prime(&link->link, &link_primer); | |
3295 | if (err) | |
3296 | goto error_free; | |
3297 | ||
3298 | kvfree(ref_ctr_offsets); | |
3299 | return bpf_link_settle(&link_primer); | |
3300 | ||
3301 | error_free: | |
3302 | kvfree(ref_ctr_offsets); | |
3303 | kvfree(uprobes); | |
3304 | kfree(link); | |
b733eead JO |
3305 | if (task) |
3306 | put_task_struct(task); | |
89ae89f5 JO |
3307 | error_path_put: |
3308 | path_put(&path); | |
3309 | return err; | |
3310 | } | |
3311 | #else /* !CONFIG_UPROBES */ | |
3312 | int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) | |
3313 | { | |
3314 | return -EOPNOTSUPP; | |
3315 | } | |
0b779b61 JO |
3316 | static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) |
3317 | { | |
3318 | return 0; | |
3319 | } | |
686328d8 JO |
3320 | static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) |
3321 | { | |
3322 | return 0; | |
3323 | } | |
89ae89f5 | 3324 | #endif /* CONFIG_UPROBES */ |