]>
Commit | Line | Data |
---|---|---|
179a0cc4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2541517c | 2 | /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com |
0515e599 | 3 | * Copyright (c) 2016 Facebook |
2541517c AS |
4 | */ |
5 | #include <linux/kernel.h> | |
6 | #include <linux/types.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/bpf.h> | |
0515e599 | 9 | #include <linux/bpf_perf_event.h> |
2541517c AS |
10 | #include <linux/filter.h> |
11 | #include <linux/uaccess.h> | |
9c959c86 | 12 | #include <linux/ctype.h> |
9802d865 | 13 | #include <linux/kprobes.h> |
41bdc4b4 | 14 | #include <linux/syscalls.h> |
540adea3 | 15 | #include <linux/error-injection.h> |
9802d865 JB |
16 | |
17 | #include "trace_probe.h" | |
2541517c AS |
18 | #include "trace.h" |
19 | ||
035226b9 | 20 | u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
c195651e | 21 | u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
035226b9 | 22 | |
2541517c AS |
23 | /** |
24 | * trace_call_bpf - invoke BPF program | |
e87c6bc3 | 25 | * @call: tracepoint event |
2541517c AS |
26 | * @ctx: opaque context pointer |
27 | * | |
28 | * kprobe handlers execute BPF programs via this helper. | |
29 | * Can be used from static tracepoints in the future. | |
30 | * | |
31 | * Return: BPF programs always return an integer which is interpreted by | |
32 | * kprobe handler as: | |
33 | * 0 - return from kprobe (event is filtered out) | |
34 | * 1 - store kprobe event into ring buffer | |
35 | * Other values are reserved and currently alias to 1 | |
36 | */ | |
e87c6bc3 | 37 | unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) |
2541517c AS |
38 | { |
39 | unsigned int ret; | |
40 | ||
41 | if (in_nmi()) /* not supported yet */ | |
42 | return 1; | |
43 | ||
44 | preempt_disable(); | |
45 | ||
46 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { | |
47 | /* | |
48 | * since some bpf program is already running on this cpu, | |
49 | * don't call into another bpf program (same or different) | |
50 | * and don't send kprobe event into ring-buffer, | |
51 | * so return zero here | |
52 | */ | |
53 | ret = 0; | |
54 | goto out; | |
55 | } | |
56 | ||
e87c6bc3 YS |
57 | /* |
58 | * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock | |
59 | * to all call sites, we did a bpf_prog_array_valid() there to check | |
60 | * whether call->prog_array is empty or not, which is | |
61 | * a heurisitc to speed up execution. | |
62 | * | |
63 | * If bpf_prog_array_valid() fetched prog_array was | |
64 | * non-NULL, we go into trace_call_bpf() and do the actual | |
65 | * proper rcu_dereference() under RCU lock. | |
66 | * If it turns out that prog_array is NULL then, we bail out. | |
67 | * For the opposite, if the bpf_prog_array_valid() fetched pointer | |
68 | * was NULL, you'll skip the prog_array with the risk of missing | |
69 | * out of events when it was updated in between this and the | |
70 | * rcu_dereference() which is accepted risk. | |
71 | */ | |
72 | ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN); | |
2541517c AS |
73 | |
74 | out: | |
75 | __this_cpu_dec(bpf_prog_active); | |
76 | preempt_enable(); | |
77 | ||
78 | return ret; | |
79 | } | |
80 | EXPORT_SYMBOL_GPL(trace_call_bpf); | |
81 | ||
9802d865 JB |
82 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
83 | BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) | |
84 | { | |
9802d865 | 85 | regs_set_return_value(regs, rc); |
540adea3 | 86 | override_function_with_return(regs); |
9802d865 JB |
87 | return 0; |
88 | } | |
89 | ||
90 | static const struct bpf_func_proto bpf_override_return_proto = { | |
91 | .func = bpf_override_return, | |
92 | .gpl_only = true, | |
93 | .ret_type = RET_INTEGER, | |
94 | .arg1_type = ARG_PTR_TO_CTX, | |
95 | .arg2_type = ARG_ANYTHING, | |
96 | }; | |
97 | #endif | |
98 | ||
f3694e00 | 99 | BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr) |
2541517c | 100 | { |
eb33f2cc | 101 | int ret; |
2541517c | 102 | |
074f528e DB |
103 | ret = probe_kernel_read(dst, unsafe_ptr, size); |
104 | if (unlikely(ret < 0)) | |
105 | memset(dst, 0, size); | |
106 | ||
107 | return ret; | |
2541517c AS |
108 | } |
109 | ||
110 | static const struct bpf_func_proto bpf_probe_read_proto = { | |
111 | .func = bpf_probe_read, | |
112 | .gpl_only = true, | |
113 | .ret_type = RET_INTEGER, | |
39f19ebb | 114 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
9c019e2b | 115 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
2541517c AS |
116 | .arg3_type = ARG_ANYTHING, |
117 | }; | |
118 | ||
f3694e00 DB |
119 | BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src, |
120 | u32, size) | |
96ae5227 | 121 | { |
96ae5227 SD |
122 | /* |
123 | * Ensure we're in user context which is safe for the helper to | |
124 | * run. This helper has no business in a kthread. | |
125 | * | |
126 | * access_ok() should prevent writing to non-user memory, but in | |
127 | * some situations (nommu, temporary switch, etc) access_ok() does | |
128 | * not provide enough validation, hence the check on KERNEL_DS. | |
129 | */ | |
130 | ||
131 | if (unlikely(in_interrupt() || | |
132 | current->flags & (PF_KTHREAD | PF_EXITING))) | |
133 | return -EPERM; | |
db68ce10 | 134 | if (unlikely(uaccess_kernel())) |
96ae5227 SD |
135 | return -EPERM; |
136 | if (!access_ok(VERIFY_WRITE, unsafe_ptr, size)) | |
137 | return -EPERM; | |
138 | ||
139 | return probe_kernel_write(unsafe_ptr, src, size); | |
140 | } | |
141 | ||
142 | static const struct bpf_func_proto bpf_probe_write_user_proto = { | |
143 | .func = bpf_probe_write_user, | |
144 | .gpl_only = true, | |
145 | .ret_type = RET_INTEGER, | |
146 | .arg1_type = ARG_ANYTHING, | |
39f19ebb AS |
147 | .arg2_type = ARG_PTR_TO_MEM, |
148 | .arg3_type = ARG_CONST_SIZE, | |
96ae5227 SD |
149 | }; |
150 | ||
151 | static const struct bpf_func_proto *bpf_get_probe_write_proto(void) | |
152 | { | |
153 | pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", | |
154 | current->comm, task_pid_nr(current)); | |
155 | ||
156 | return &bpf_probe_write_user_proto; | |
157 | } | |
158 | ||
9c959c86 | 159 | /* |
7bda4b40 JF |
160 | * Only limited trace_printk() conversion specifiers allowed: |
161 | * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s | |
9c959c86 | 162 | */ |
f3694e00 DB |
163 | BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, |
164 | u64, arg2, u64, arg3) | |
9c959c86 | 165 | { |
8d3b7dce | 166 | bool str_seen = false; |
9c959c86 AS |
167 | int mod[3] = {}; |
168 | int fmt_cnt = 0; | |
8d3b7dce AS |
169 | u64 unsafe_addr; |
170 | char buf[64]; | |
9c959c86 AS |
171 | int i; |
172 | ||
173 | /* | |
174 | * bpf_check()->check_func_arg()->check_stack_boundary() | |
175 | * guarantees that fmt points to bpf program stack, | |
176 | * fmt_size bytes of it were initialized and fmt_size > 0 | |
177 | */ | |
178 | if (fmt[--fmt_size] != 0) | |
179 | return -EINVAL; | |
180 | ||
181 | /* check format string for allowed specifiers */ | |
182 | for (i = 0; i < fmt_size; i++) { | |
183 | if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) | |
184 | return -EINVAL; | |
185 | ||
186 | if (fmt[i] != '%') | |
187 | continue; | |
188 | ||
189 | if (fmt_cnt >= 3) | |
190 | return -EINVAL; | |
191 | ||
192 | /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ | |
193 | i++; | |
194 | if (fmt[i] == 'l') { | |
195 | mod[fmt_cnt]++; | |
196 | i++; | |
8d3b7dce | 197 | } else if (fmt[i] == 'p' || fmt[i] == 's') { |
9c959c86 | 198 | mod[fmt_cnt]++; |
1efb6ee3 MP |
199 | /* disallow any further format extensions */ |
200 | if (fmt[i + 1] != 0 && | |
201 | !isspace(fmt[i + 1]) && | |
202 | !ispunct(fmt[i + 1])) | |
9c959c86 AS |
203 | return -EINVAL; |
204 | fmt_cnt++; | |
1efb6ee3 | 205 | if (fmt[i] == 's') { |
8d3b7dce AS |
206 | if (str_seen) |
207 | /* allow only one '%s' per fmt string */ | |
208 | return -EINVAL; | |
209 | str_seen = true; | |
210 | ||
211 | switch (fmt_cnt) { | |
212 | case 1: | |
f3694e00 DB |
213 | unsafe_addr = arg1; |
214 | arg1 = (long) buf; | |
8d3b7dce AS |
215 | break; |
216 | case 2: | |
f3694e00 DB |
217 | unsafe_addr = arg2; |
218 | arg2 = (long) buf; | |
8d3b7dce AS |
219 | break; |
220 | case 3: | |
f3694e00 DB |
221 | unsafe_addr = arg3; |
222 | arg3 = (long) buf; | |
8d3b7dce AS |
223 | break; |
224 | } | |
225 | buf[0] = 0; | |
226 | strncpy_from_unsafe(buf, | |
227 | (void *) (long) unsafe_addr, | |
228 | sizeof(buf)); | |
229 | } | |
9c959c86 AS |
230 | continue; |
231 | } | |
232 | ||
233 | if (fmt[i] == 'l') { | |
234 | mod[fmt_cnt]++; | |
235 | i++; | |
236 | } | |
237 | ||
7bda4b40 JF |
238 | if (fmt[i] != 'i' && fmt[i] != 'd' && |
239 | fmt[i] != 'u' && fmt[i] != 'x') | |
9c959c86 AS |
240 | return -EINVAL; |
241 | fmt_cnt++; | |
242 | } | |
243 | ||
88a5c690 DB |
244 | /* Horrid workaround for getting va_list handling working with different |
245 | * argument type combinations generically for 32 and 64 bit archs. | |
246 | */ | |
247 | #define __BPF_TP_EMIT() __BPF_ARG3_TP() | |
248 | #define __BPF_TP(...) \ | |
eefa864a | 249 | __trace_printk(0 /* Fake ip */, \ |
88a5c690 DB |
250 | fmt, ##__VA_ARGS__) |
251 | ||
252 | #define __BPF_ARG1_TP(...) \ | |
253 | ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \ | |
254 | ? __BPF_TP(arg1, ##__VA_ARGS__) \ | |
255 | : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \ | |
256 | ? __BPF_TP((long)arg1, ##__VA_ARGS__) \ | |
257 | : __BPF_TP((u32)arg1, ##__VA_ARGS__))) | |
258 | ||
259 | #define __BPF_ARG2_TP(...) \ | |
260 | ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \ | |
261 | ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \ | |
262 | : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \ | |
263 | ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \ | |
264 | : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__))) | |
265 | ||
266 | #define __BPF_ARG3_TP(...) \ | |
267 | ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \ | |
268 | ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \ | |
269 | : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \ | |
270 | ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \ | |
271 | : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__))) | |
272 | ||
273 | return __BPF_TP_EMIT(); | |
9c959c86 AS |
274 | } |
275 | ||
276 | static const struct bpf_func_proto bpf_trace_printk_proto = { | |
277 | .func = bpf_trace_printk, | |
278 | .gpl_only = true, | |
279 | .ret_type = RET_INTEGER, | |
39f19ebb AS |
280 | .arg1_type = ARG_PTR_TO_MEM, |
281 | .arg2_type = ARG_CONST_SIZE, | |
9c959c86 AS |
282 | }; |
283 | ||
0756ea3e AS |
284 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void) |
285 | { | |
286 | /* | |
287 | * this program might be calling bpf_trace_printk, | |
288 | * so allocate per-cpu printk buffers | |
289 | */ | |
290 | trace_printk_init_buffers(); | |
291 | ||
292 | return &bpf_trace_printk_proto; | |
293 | } | |
294 | ||
908432ca YS |
295 | static __always_inline int |
296 | get_map_perf_counter(struct bpf_map *map, u64 flags, | |
297 | u64 *value, u64 *enabled, u64 *running) | |
35578d79 | 298 | { |
35578d79 | 299 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
6816a7ff DB |
300 | unsigned int cpu = smp_processor_id(); |
301 | u64 index = flags & BPF_F_INDEX_MASK; | |
3b1efb19 | 302 | struct bpf_event_entry *ee; |
35578d79 | 303 | |
6816a7ff DB |
304 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) |
305 | return -EINVAL; | |
306 | if (index == BPF_F_CURRENT_CPU) | |
307 | index = cpu; | |
35578d79 KX |
308 | if (unlikely(index >= array->map.max_entries)) |
309 | return -E2BIG; | |
310 | ||
3b1efb19 | 311 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 312 | if (!ee) |
35578d79 KX |
313 | return -ENOENT; |
314 | ||
908432ca YS |
315 | return perf_event_read_local(ee->event, value, enabled, running); |
316 | } | |
317 | ||
318 | BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) | |
319 | { | |
320 | u64 value = 0; | |
321 | int err; | |
322 | ||
323 | err = get_map_perf_counter(map, flags, &value, NULL, NULL); | |
35578d79 | 324 | /* |
f91840a3 AS |
325 | * this api is ugly since we miss [-22..-2] range of valid |
326 | * counter values, but that's uapi | |
35578d79 | 327 | */ |
f91840a3 AS |
328 | if (err) |
329 | return err; | |
330 | return value; | |
35578d79 KX |
331 | } |
332 | ||
62544ce8 | 333 | static const struct bpf_func_proto bpf_perf_event_read_proto = { |
35578d79 | 334 | .func = bpf_perf_event_read, |
1075ef59 | 335 | .gpl_only = true, |
35578d79 KX |
336 | .ret_type = RET_INTEGER, |
337 | .arg1_type = ARG_CONST_MAP_PTR, | |
338 | .arg2_type = ARG_ANYTHING, | |
339 | }; | |
340 | ||
908432ca YS |
341 | BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, |
342 | struct bpf_perf_event_value *, buf, u32, size) | |
343 | { | |
344 | int err = -EINVAL; | |
345 | ||
346 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) | |
347 | goto clear; | |
348 | err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, | |
349 | &buf->running); | |
350 | if (unlikely(err)) | |
351 | goto clear; | |
352 | return 0; | |
353 | clear: | |
354 | memset(buf, 0, size); | |
355 | return err; | |
356 | } | |
357 | ||
358 | static const struct bpf_func_proto bpf_perf_event_read_value_proto = { | |
359 | .func = bpf_perf_event_read_value, | |
360 | .gpl_only = true, | |
361 | .ret_type = RET_INTEGER, | |
362 | .arg1_type = ARG_CONST_MAP_PTR, | |
363 | .arg2_type = ARG_ANYTHING, | |
364 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, | |
365 | .arg4_type = ARG_CONST_SIZE, | |
366 | }; | |
367 | ||
283ca526 | 368 | static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd); |
20b9d7ac | 369 | |
8e7a3920 DB |
370 | static __always_inline u64 |
371 | __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | |
283ca526 | 372 | u64 flags, struct perf_sample_data *sd) |
a43eec30 | 373 | { |
a43eec30 | 374 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
d7931330 | 375 | unsigned int cpu = smp_processor_id(); |
1e33759c | 376 | u64 index = flags & BPF_F_INDEX_MASK; |
3b1efb19 | 377 | struct bpf_event_entry *ee; |
a43eec30 | 378 | struct perf_event *event; |
a43eec30 | 379 | |
1e33759c | 380 | if (index == BPF_F_CURRENT_CPU) |
d7931330 | 381 | index = cpu; |
a43eec30 AS |
382 | if (unlikely(index >= array->map.max_entries)) |
383 | return -E2BIG; | |
384 | ||
3b1efb19 | 385 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 386 | if (!ee) |
a43eec30 AS |
387 | return -ENOENT; |
388 | ||
3b1efb19 | 389 | event = ee->event; |
a43eec30 AS |
390 | if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || |
391 | event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) | |
392 | return -EINVAL; | |
393 | ||
d7931330 | 394 | if (unlikely(event->oncpu != cpu)) |
a43eec30 AS |
395 | return -EOPNOTSUPP; |
396 | ||
20b9d7ac | 397 | perf_event_output(event, sd, regs); |
a43eec30 AS |
398 | return 0; |
399 | } | |
400 | ||
f3694e00 DB |
401 | BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, |
402 | u64, flags, void *, data, u64, size) | |
8e7a3920 | 403 | { |
283ca526 | 404 | struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd); |
8e7a3920 DB |
405 | struct perf_raw_record raw = { |
406 | .frag = { | |
407 | .size = size, | |
408 | .data = data, | |
409 | }, | |
410 | }; | |
411 | ||
412 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) | |
413 | return -EINVAL; | |
414 | ||
283ca526 DB |
415 | perf_sample_data_init(sd, 0, 0); |
416 | sd->raw = &raw; | |
417 | ||
418 | return __bpf_perf_event_output(regs, map, flags, sd); | |
8e7a3920 DB |
419 | } |
420 | ||
a43eec30 AS |
421 | static const struct bpf_func_proto bpf_perf_event_output_proto = { |
422 | .func = bpf_perf_event_output, | |
1075ef59 | 423 | .gpl_only = true, |
a43eec30 AS |
424 | .ret_type = RET_INTEGER, |
425 | .arg1_type = ARG_PTR_TO_CTX, | |
426 | .arg2_type = ARG_CONST_MAP_PTR, | |
427 | .arg3_type = ARG_ANYTHING, | |
39f19ebb | 428 | .arg4_type = ARG_PTR_TO_MEM, |
a60dd35d | 429 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
a43eec30 AS |
430 | }; |
431 | ||
bd570ff9 | 432 | static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); |
283ca526 | 433 | static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd); |
bd570ff9 | 434 | |
555c8a86 DB |
435 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, |
436 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) | |
bd570ff9 | 437 | { |
283ca526 | 438 | struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd); |
bd570ff9 | 439 | struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); |
555c8a86 DB |
440 | struct perf_raw_frag frag = { |
441 | .copy = ctx_copy, | |
442 | .size = ctx_size, | |
443 | .data = ctx, | |
444 | }; | |
445 | struct perf_raw_record raw = { | |
446 | .frag = { | |
183fc153 AM |
447 | { |
448 | .next = ctx_size ? &frag : NULL, | |
449 | }, | |
555c8a86 DB |
450 | .size = meta_size, |
451 | .data = meta, | |
452 | }, | |
453 | }; | |
bd570ff9 DB |
454 | |
455 | perf_fetch_caller_regs(regs); | |
283ca526 DB |
456 | perf_sample_data_init(sd, 0, 0); |
457 | sd->raw = &raw; | |
bd570ff9 | 458 | |
283ca526 | 459 | return __bpf_perf_event_output(regs, map, flags, sd); |
bd570ff9 DB |
460 | } |
461 | ||
f3694e00 | 462 | BPF_CALL_0(bpf_get_current_task) |
606274c5 AS |
463 | { |
464 | return (long) current; | |
465 | } | |
466 | ||
467 | static const struct bpf_func_proto bpf_get_current_task_proto = { | |
468 | .func = bpf_get_current_task, | |
469 | .gpl_only = true, | |
470 | .ret_type = RET_INTEGER, | |
471 | }; | |
472 | ||
f3694e00 | 473 | BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) |
60d20f91 | 474 | { |
60d20f91 SD |
475 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
476 | struct cgroup *cgrp; | |
60d20f91 | 477 | |
60d20f91 SD |
478 | if (unlikely(idx >= array->map.max_entries)) |
479 | return -E2BIG; | |
480 | ||
481 | cgrp = READ_ONCE(array->ptrs[idx]); | |
482 | if (unlikely(!cgrp)) | |
483 | return -EAGAIN; | |
484 | ||
485 | return task_under_cgroup_hierarchy(current, cgrp); | |
486 | } | |
487 | ||
488 | static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { | |
489 | .func = bpf_current_task_under_cgroup, | |
490 | .gpl_only = false, | |
491 | .ret_type = RET_INTEGER, | |
492 | .arg1_type = ARG_CONST_MAP_PTR, | |
493 | .arg2_type = ARG_ANYTHING, | |
494 | }; | |
495 | ||
a5e8c070 GB |
496 | BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size, |
497 | const void *, unsafe_ptr) | |
498 | { | |
499 | int ret; | |
500 | ||
501 | /* | |
502 | * The strncpy_from_unsafe() call will likely not fill the entire | |
503 | * buffer, but that's okay in this circumstance as we're probing | |
504 | * arbitrary memory anyway similar to bpf_probe_read() and might | |
505 | * as well probe the stack. Thus, memory is explicitly cleared | |
506 | * only in error case, so that improper users ignoring return | |
507 | * code altogether don't copy garbage; otherwise length of string | |
508 | * is returned that can be used for bpf_perf_event_output() et al. | |
509 | */ | |
510 | ret = strncpy_from_unsafe(dst, unsafe_ptr, size); | |
511 | if (unlikely(ret < 0)) | |
512 | memset(dst, 0, size); | |
513 | ||
514 | return ret; | |
515 | } | |
516 | ||
517 | static const struct bpf_func_proto bpf_probe_read_str_proto = { | |
518 | .func = bpf_probe_read_str, | |
519 | .gpl_only = true, | |
520 | .ret_type = RET_INTEGER, | |
521 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
5c4e1201 | 522 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
a5e8c070 GB |
523 | .arg3_type = ARG_ANYTHING, |
524 | }; | |
525 | ||
5e43f899 AI |
526 | static const struct bpf_func_proto * |
527 | tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
2541517c AS |
528 | { |
529 | switch (func_id) { | |
530 | case BPF_FUNC_map_lookup_elem: | |
531 | return &bpf_map_lookup_elem_proto; | |
532 | case BPF_FUNC_map_update_elem: | |
533 | return &bpf_map_update_elem_proto; | |
534 | case BPF_FUNC_map_delete_elem: | |
535 | return &bpf_map_delete_elem_proto; | |
536 | case BPF_FUNC_probe_read: | |
537 | return &bpf_probe_read_proto; | |
d9847d31 AS |
538 | case BPF_FUNC_ktime_get_ns: |
539 | return &bpf_ktime_get_ns_proto; | |
04fd61ab AS |
540 | case BPF_FUNC_tail_call: |
541 | return &bpf_tail_call_proto; | |
ffeedafb AS |
542 | case BPF_FUNC_get_current_pid_tgid: |
543 | return &bpf_get_current_pid_tgid_proto; | |
606274c5 AS |
544 | case BPF_FUNC_get_current_task: |
545 | return &bpf_get_current_task_proto; | |
ffeedafb AS |
546 | case BPF_FUNC_get_current_uid_gid: |
547 | return &bpf_get_current_uid_gid_proto; | |
548 | case BPF_FUNC_get_current_comm: | |
549 | return &bpf_get_current_comm_proto; | |
9c959c86 | 550 | case BPF_FUNC_trace_printk: |
0756ea3e | 551 | return bpf_get_trace_printk_proto(); |
ab1973d3 AS |
552 | case BPF_FUNC_get_smp_processor_id: |
553 | return &bpf_get_smp_processor_id_proto; | |
2d0e30c3 DB |
554 | case BPF_FUNC_get_numa_node_id: |
555 | return &bpf_get_numa_node_id_proto; | |
35578d79 KX |
556 | case BPF_FUNC_perf_event_read: |
557 | return &bpf_perf_event_read_proto; | |
96ae5227 SD |
558 | case BPF_FUNC_probe_write_user: |
559 | return bpf_get_probe_write_proto(); | |
60d20f91 SD |
560 | case BPF_FUNC_current_task_under_cgroup: |
561 | return &bpf_current_task_under_cgroup_proto; | |
8937bd80 AS |
562 | case BPF_FUNC_get_prandom_u32: |
563 | return &bpf_get_prandom_u32_proto; | |
a5e8c070 GB |
564 | case BPF_FUNC_probe_read_str: |
565 | return &bpf_probe_read_str_proto; | |
34ea38ca | 566 | #ifdef CONFIG_CGROUPS |
bf6fa2c8 YS |
567 | case BPF_FUNC_get_current_cgroup_id: |
568 | return &bpf_get_current_cgroup_id_proto; | |
34ea38ca | 569 | #endif |
9fd82b61 AS |
570 | default: |
571 | return NULL; | |
572 | } | |
573 | } | |
574 | ||
5e43f899 AI |
575 | static const struct bpf_func_proto * |
576 | kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
9fd82b61 AS |
577 | { |
578 | switch (func_id) { | |
a43eec30 AS |
579 | case BPF_FUNC_perf_event_output: |
580 | return &bpf_perf_event_output_proto; | |
d5a3b1f6 AS |
581 | case BPF_FUNC_get_stackid: |
582 | return &bpf_get_stackid_proto; | |
c195651e YS |
583 | case BPF_FUNC_get_stack: |
584 | return &bpf_get_stack_proto; | |
908432ca YS |
585 | case BPF_FUNC_perf_event_read_value: |
586 | return &bpf_perf_event_read_value_proto; | |
9802d865 JB |
587 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
588 | case BPF_FUNC_override_return: | |
589 | return &bpf_override_return_proto; | |
590 | #endif | |
2541517c | 591 | default: |
5e43f899 | 592 | return tracing_func_proto(func_id, prog); |
2541517c AS |
593 | } |
594 | } | |
595 | ||
596 | /* bpf+kprobe programs can access fields of 'struct pt_regs' */ | |
19de99f7 | 597 | static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
5e43f899 | 598 | const struct bpf_prog *prog, |
23994631 | 599 | struct bpf_insn_access_aux *info) |
2541517c | 600 | { |
2541517c AS |
601 | if (off < 0 || off >= sizeof(struct pt_regs)) |
602 | return false; | |
2541517c AS |
603 | if (type != BPF_READ) |
604 | return false; | |
2541517c AS |
605 | if (off % size != 0) |
606 | return false; | |
2d071c64 DB |
607 | /* |
608 | * Assertion for 32 bit to make sure last 8 byte access | |
609 | * (BPF_DW) to the last 4 byte member is disallowed. | |
610 | */ | |
611 | if (off + size > sizeof(struct pt_regs)) | |
612 | return false; | |
613 | ||
2541517c AS |
614 | return true; |
615 | } | |
616 | ||
7de16e3a | 617 | const struct bpf_verifier_ops kprobe_verifier_ops = { |
2541517c AS |
618 | .get_func_proto = kprobe_prog_func_proto, |
619 | .is_valid_access = kprobe_prog_is_valid_access, | |
620 | }; | |
621 | ||
7de16e3a JK |
622 | const struct bpf_prog_ops kprobe_prog_ops = { |
623 | }; | |
624 | ||
f3694e00 DB |
625 | BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, |
626 | u64, flags, void *, data, u64, size) | |
9940d67c | 627 | { |
f3694e00 DB |
628 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
629 | ||
9940d67c AS |
630 | /* |
631 | * r1 points to perf tracepoint buffer where first 8 bytes are hidden | |
632 | * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it | |
f3694e00 | 633 | * from there and call the same bpf_perf_event_output() helper inline. |
9940d67c | 634 | */ |
f3694e00 | 635 | return ____bpf_perf_event_output(regs, map, flags, data, size); |
9940d67c AS |
636 | } |
637 | ||
638 | static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { | |
639 | .func = bpf_perf_event_output_tp, | |
640 | .gpl_only = true, | |
641 | .ret_type = RET_INTEGER, | |
642 | .arg1_type = ARG_PTR_TO_CTX, | |
643 | .arg2_type = ARG_CONST_MAP_PTR, | |
644 | .arg3_type = ARG_ANYTHING, | |
39f19ebb | 645 | .arg4_type = ARG_PTR_TO_MEM, |
a60dd35d | 646 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
9940d67c AS |
647 | }; |
648 | ||
f3694e00 DB |
649 | BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, |
650 | u64, flags) | |
9940d67c | 651 | { |
f3694e00 | 652 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
9940d67c | 653 | |
f3694e00 DB |
654 | /* |
655 | * Same comment as in bpf_perf_event_output_tp(), only that this time | |
656 | * the other helper's function body cannot be inlined due to being | |
657 | * external, thus we need to call raw helper function. | |
658 | */ | |
659 | return bpf_get_stackid((unsigned long) regs, (unsigned long) map, | |
660 | flags, 0, 0); | |
9940d67c AS |
661 | } |
662 | ||
663 | static const struct bpf_func_proto bpf_get_stackid_proto_tp = { | |
664 | .func = bpf_get_stackid_tp, | |
665 | .gpl_only = true, | |
666 | .ret_type = RET_INTEGER, | |
667 | .arg1_type = ARG_PTR_TO_CTX, | |
668 | .arg2_type = ARG_CONST_MAP_PTR, | |
669 | .arg3_type = ARG_ANYTHING, | |
670 | }; | |
671 | ||
c195651e YS |
672 | BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, |
673 | u64, flags) | |
674 | { | |
675 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; | |
676 | ||
677 | return bpf_get_stack((unsigned long) regs, (unsigned long) buf, | |
678 | (unsigned long) size, flags, 0); | |
679 | } | |
680 | ||
681 | static const struct bpf_func_proto bpf_get_stack_proto_tp = { | |
682 | .func = bpf_get_stack_tp, | |
683 | .gpl_only = true, | |
684 | .ret_type = RET_INTEGER, | |
685 | .arg1_type = ARG_PTR_TO_CTX, | |
686 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
687 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
688 | .arg4_type = ARG_ANYTHING, | |
689 | }; | |
690 | ||
5e43f899 AI |
691 | static const struct bpf_func_proto * |
692 | tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
f005afed YS |
693 | { |
694 | switch (func_id) { | |
695 | case BPF_FUNC_perf_event_output: | |
696 | return &bpf_perf_event_output_proto_tp; | |
697 | case BPF_FUNC_get_stackid: | |
698 | return &bpf_get_stackid_proto_tp; | |
c195651e YS |
699 | case BPF_FUNC_get_stack: |
700 | return &bpf_get_stack_proto_tp; | |
f005afed | 701 | default: |
5e43f899 | 702 | return tracing_func_proto(func_id, prog); |
f005afed YS |
703 | } |
704 | } | |
705 | ||
706 | static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, | |
5e43f899 | 707 | const struct bpf_prog *prog, |
f005afed YS |
708 | struct bpf_insn_access_aux *info) |
709 | { | |
710 | if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) | |
711 | return false; | |
712 | if (type != BPF_READ) | |
713 | return false; | |
714 | if (off % size != 0) | |
715 | return false; | |
716 | ||
717 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); | |
718 | return true; | |
719 | } | |
720 | ||
721 | const struct bpf_verifier_ops tracepoint_verifier_ops = { | |
722 | .get_func_proto = tp_prog_func_proto, | |
723 | .is_valid_access = tp_prog_is_valid_access, | |
724 | }; | |
725 | ||
726 | const struct bpf_prog_ops tracepoint_prog_ops = { | |
727 | }; | |
728 | ||
729 | BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, | |
4bebdc7a YS |
730 | struct bpf_perf_event_value *, buf, u32, size) |
731 | { | |
732 | int err = -EINVAL; | |
733 | ||
734 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) | |
735 | goto clear; | |
736 | err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, | |
737 | &buf->running); | |
738 | if (unlikely(err)) | |
739 | goto clear; | |
740 | return 0; | |
741 | clear: | |
742 | memset(buf, 0, size); | |
743 | return err; | |
744 | } | |
745 | ||
f005afed YS |
746 | static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { |
747 | .func = bpf_perf_prog_read_value, | |
4bebdc7a YS |
748 | .gpl_only = true, |
749 | .ret_type = RET_INTEGER, | |
750 | .arg1_type = ARG_PTR_TO_CTX, | |
751 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
752 | .arg3_type = ARG_CONST_SIZE, | |
753 | }; | |
754 | ||
5e43f899 AI |
755 | static const struct bpf_func_proto * |
756 | pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
9fd82b61 AS |
757 | { |
758 | switch (func_id) { | |
759 | case BPF_FUNC_perf_event_output: | |
9940d67c | 760 | return &bpf_perf_event_output_proto_tp; |
9fd82b61 | 761 | case BPF_FUNC_get_stackid: |
9940d67c | 762 | return &bpf_get_stackid_proto_tp; |
c195651e YS |
763 | case BPF_FUNC_get_stack: |
764 | return &bpf_get_stack_proto_tp; | |
4bebdc7a | 765 | case BPF_FUNC_perf_prog_read_value: |
f005afed | 766 | return &bpf_perf_prog_read_value_proto; |
9fd82b61 | 767 | default: |
5e43f899 | 768 | return tracing_func_proto(func_id, prog); |
9fd82b61 AS |
769 | } |
770 | } | |
771 | ||
c4f6699d AS |
772 | /* |
773 | * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp | |
774 | * to avoid potential recursive reuse issue when/if tracepoints are added | |
c195651e | 775 | * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack |
c4f6699d AS |
776 | */ |
777 | static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs); | |
778 | BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, | |
779 | struct bpf_map *, map, u64, flags, void *, data, u64, size) | |
780 | { | |
781 | struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs); | |
782 | ||
783 | perf_fetch_caller_regs(regs); | |
784 | return ____bpf_perf_event_output(regs, map, flags, data, size); | |
785 | } | |
786 | ||
787 | static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { | |
788 | .func = bpf_perf_event_output_raw_tp, | |
789 | .gpl_only = true, | |
790 | .ret_type = RET_INTEGER, | |
791 | .arg1_type = ARG_PTR_TO_CTX, | |
792 | .arg2_type = ARG_CONST_MAP_PTR, | |
793 | .arg3_type = ARG_ANYTHING, | |
794 | .arg4_type = ARG_PTR_TO_MEM, | |
795 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, | |
796 | }; | |
797 | ||
798 | BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, | |
799 | struct bpf_map *, map, u64, flags) | |
800 | { | |
801 | struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs); | |
802 | ||
803 | perf_fetch_caller_regs(regs); | |
804 | /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ | |
805 | return bpf_get_stackid((unsigned long) regs, (unsigned long) map, | |
806 | flags, 0, 0); | |
807 | } | |
808 | ||
809 | static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { | |
810 | .func = bpf_get_stackid_raw_tp, | |
811 | .gpl_only = true, | |
812 | .ret_type = RET_INTEGER, | |
813 | .arg1_type = ARG_PTR_TO_CTX, | |
814 | .arg2_type = ARG_CONST_MAP_PTR, | |
815 | .arg3_type = ARG_ANYTHING, | |
816 | }; | |
817 | ||
c195651e YS |
818 | BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, |
819 | void *, buf, u32, size, u64, flags) | |
820 | { | |
821 | struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs); | |
822 | ||
823 | perf_fetch_caller_regs(regs); | |
824 | return bpf_get_stack((unsigned long) regs, (unsigned long) buf, | |
825 | (unsigned long) size, flags, 0); | |
826 | } | |
827 | ||
828 | static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { | |
829 | .func = bpf_get_stack_raw_tp, | |
830 | .gpl_only = true, | |
831 | .ret_type = RET_INTEGER, | |
832 | .arg1_type = ARG_PTR_TO_CTX, | |
833 | .arg2_type = ARG_PTR_TO_MEM, | |
834 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
835 | .arg4_type = ARG_ANYTHING, | |
836 | }; | |
837 | ||
5e43f899 AI |
838 | static const struct bpf_func_proto * |
839 | raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
c4f6699d AS |
840 | { |
841 | switch (func_id) { | |
842 | case BPF_FUNC_perf_event_output: | |
843 | return &bpf_perf_event_output_proto_raw_tp; | |
844 | case BPF_FUNC_get_stackid: | |
845 | return &bpf_get_stackid_proto_raw_tp; | |
c195651e YS |
846 | case BPF_FUNC_get_stack: |
847 | return &bpf_get_stack_proto_raw_tp; | |
c4f6699d | 848 | default: |
5e43f899 | 849 | return tracing_func_proto(func_id, prog); |
c4f6699d AS |
850 | } |
851 | } | |
852 | ||
853 | static bool raw_tp_prog_is_valid_access(int off, int size, | |
854 | enum bpf_access_type type, | |
5e43f899 | 855 | const struct bpf_prog *prog, |
c4f6699d AS |
856 | struct bpf_insn_access_aux *info) |
857 | { | |
858 | /* largest tracepoint in the kernel has 12 args */ | |
859 | if (off < 0 || off >= sizeof(__u64) * 12) | |
860 | return false; | |
861 | if (type != BPF_READ) | |
862 | return false; | |
863 | if (off % size != 0) | |
864 | return false; | |
865 | return true; | |
866 | } | |
867 | ||
868 | const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { | |
869 | .get_func_proto = raw_tp_prog_func_proto, | |
870 | .is_valid_access = raw_tp_prog_is_valid_access, | |
871 | }; | |
872 | ||
873 | const struct bpf_prog_ops raw_tracepoint_prog_ops = { | |
874 | }; | |
875 | ||
0515e599 | 876 | static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
5e43f899 | 877 | const struct bpf_prog *prog, |
23994631 | 878 | struct bpf_insn_access_aux *info) |
0515e599 | 879 | { |
95da0cdb | 880 | const int size_u64 = sizeof(u64); |
31fd8581 | 881 | |
0515e599 AS |
882 | if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) |
883 | return false; | |
884 | if (type != BPF_READ) | |
885 | return false; | |
bc23105c DB |
886 | if (off % size != 0) { |
887 | if (sizeof(unsigned long) != 4) | |
888 | return false; | |
889 | if (size != 8) | |
890 | return false; | |
891 | if (off % size != 4) | |
892 | return false; | |
893 | } | |
31fd8581 | 894 | |
f96da094 DB |
895 | switch (off) { |
896 | case bpf_ctx_range(struct bpf_perf_event_data, sample_period): | |
95da0cdb TQ |
897 | bpf_ctx_record_field_size(info, size_u64); |
898 | if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) | |
899 | return false; | |
900 | break; | |
901 | case bpf_ctx_range(struct bpf_perf_event_data, addr): | |
902 | bpf_ctx_record_field_size(info, size_u64); | |
903 | if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) | |
23994631 | 904 | return false; |
f96da094 DB |
905 | break; |
906 | default: | |
0515e599 AS |
907 | if (size != sizeof(long)) |
908 | return false; | |
909 | } | |
f96da094 | 910 | |
0515e599 AS |
911 | return true; |
912 | } | |
913 | ||
6b8cc1d1 DB |
914 | static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, |
915 | const struct bpf_insn *si, | |
0515e599 | 916 | struct bpf_insn *insn_buf, |
f96da094 | 917 | struct bpf_prog *prog, u32 *target_size) |
0515e599 AS |
918 | { |
919 | struct bpf_insn *insn = insn_buf; | |
920 | ||
6b8cc1d1 | 921 | switch (si->off) { |
0515e599 | 922 | case offsetof(struct bpf_perf_event_data, sample_period): |
f035a515 | 923 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
6b8cc1d1 | 924 | data), si->dst_reg, si->src_reg, |
0515e599 | 925 | offsetof(struct bpf_perf_event_data_kern, data)); |
6b8cc1d1 | 926 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, |
f96da094 DB |
927 | bpf_target_off(struct perf_sample_data, period, 8, |
928 | target_size)); | |
0515e599 | 929 | break; |
95da0cdb TQ |
930 | case offsetof(struct bpf_perf_event_data, addr): |
931 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, | |
932 | data), si->dst_reg, si->src_reg, | |
933 | offsetof(struct bpf_perf_event_data_kern, data)); | |
934 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, | |
935 | bpf_target_off(struct perf_sample_data, addr, 8, | |
936 | target_size)); | |
937 | break; | |
0515e599 | 938 | default: |
f035a515 | 939 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
6b8cc1d1 | 940 | regs), si->dst_reg, si->src_reg, |
0515e599 | 941 | offsetof(struct bpf_perf_event_data_kern, regs)); |
6b8cc1d1 DB |
942 | *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, |
943 | si->off); | |
0515e599 AS |
944 | break; |
945 | } | |
946 | ||
947 | return insn - insn_buf; | |
948 | } | |
949 | ||
7de16e3a | 950 | const struct bpf_verifier_ops perf_event_verifier_ops = { |
f005afed | 951 | .get_func_proto = pe_prog_func_proto, |
0515e599 AS |
952 | .is_valid_access = pe_prog_is_valid_access, |
953 | .convert_ctx_access = pe_prog_convert_ctx_access, | |
954 | }; | |
7de16e3a JK |
955 | |
956 | const struct bpf_prog_ops perf_event_prog_ops = { | |
957 | }; | |
e87c6bc3 YS |
958 | |
959 | static DEFINE_MUTEX(bpf_event_mutex); | |
960 | ||
c8c088ba YS |
961 | #define BPF_TRACE_MAX_PROGS 64 |
962 | ||
e87c6bc3 YS |
963 | int perf_event_attach_bpf_prog(struct perf_event *event, |
964 | struct bpf_prog *prog) | |
965 | { | |
966 | struct bpf_prog_array __rcu *old_array; | |
967 | struct bpf_prog_array *new_array; | |
968 | int ret = -EEXIST; | |
969 | ||
9802d865 | 970 | /* |
b4da3340 MH |
971 | * Kprobe override only works if they are on the function entry, |
972 | * and only if they are on the opt-in list. | |
9802d865 JB |
973 | */ |
974 | if (prog->kprobe_override && | |
b4da3340 | 975 | (!trace_kprobe_on_func_entry(event->tp_event) || |
9802d865 JB |
976 | !trace_kprobe_error_injectable(event->tp_event))) |
977 | return -EINVAL; | |
978 | ||
e87c6bc3 YS |
979 | mutex_lock(&bpf_event_mutex); |
980 | ||
981 | if (event->prog) | |
07c41a29 | 982 | goto unlock; |
e87c6bc3 | 983 | |
07c41a29 | 984 | old_array = event->tp_event->prog_array; |
c8c088ba YS |
985 | if (old_array && |
986 | bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { | |
987 | ret = -E2BIG; | |
988 | goto unlock; | |
989 | } | |
990 | ||
e87c6bc3 YS |
991 | ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array); |
992 | if (ret < 0) | |
07c41a29 | 993 | goto unlock; |
e87c6bc3 YS |
994 | |
995 | /* set the new array to event->tp_event and set event->prog */ | |
996 | event->prog = prog; | |
997 | rcu_assign_pointer(event->tp_event->prog_array, new_array); | |
998 | bpf_prog_array_free(old_array); | |
999 | ||
07c41a29 | 1000 | unlock: |
e87c6bc3 YS |
1001 | mutex_unlock(&bpf_event_mutex); |
1002 | return ret; | |
1003 | } | |
1004 | ||
1005 | void perf_event_detach_bpf_prog(struct perf_event *event) | |
1006 | { | |
1007 | struct bpf_prog_array __rcu *old_array; | |
1008 | struct bpf_prog_array *new_array; | |
1009 | int ret; | |
1010 | ||
1011 | mutex_lock(&bpf_event_mutex); | |
1012 | ||
1013 | if (!event->prog) | |
07c41a29 | 1014 | goto unlock; |
e87c6bc3 | 1015 | |
07c41a29 | 1016 | old_array = event->tp_event->prog_array; |
e87c6bc3 | 1017 | ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array); |
170a7e3e SY |
1018 | if (ret == -ENOENT) |
1019 | goto unlock; | |
e87c6bc3 YS |
1020 | if (ret < 0) { |
1021 | bpf_prog_array_delete_safe(old_array, event->prog); | |
1022 | } else { | |
1023 | rcu_assign_pointer(event->tp_event->prog_array, new_array); | |
1024 | bpf_prog_array_free(old_array); | |
1025 | } | |
1026 | ||
1027 | bpf_prog_put(event->prog); | |
1028 | event->prog = NULL; | |
1029 | ||
07c41a29 | 1030 | unlock: |
e87c6bc3 YS |
1031 | mutex_unlock(&bpf_event_mutex); |
1032 | } | |
f371b304 | 1033 | |
f4e2298e | 1034 | int perf_event_query_prog_array(struct perf_event *event, void __user *info) |
f371b304 YS |
1035 | { |
1036 | struct perf_event_query_bpf __user *uquery = info; | |
1037 | struct perf_event_query_bpf query = {}; | |
3a38bb98 | 1038 | u32 *ids, prog_cnt, ids_len; |
f371b304 YS |
1039 | int ret; |
1040 | ||
1041 | if (!capable(CAP_SYS_ADMIN)) | |
1042 | return -EPERM; | |
1043 | if (event->attr.type != PERF_TYPE_TRACEPOINT) | |
1044 | return -EINVAL; | |
1045 | if (copy_from_user(&query, uquery, sizeof(query))) | |
1046 | return -EFAULT; | |
3a38bb98 YS |
1047 | |
1048 | ids_len = query.ids_len; | |
1049 | if (ids_len > BPF_TRACE_MAX_PROGS) | |
9c481b90 | 1050 | return -E2BIG; |
3a38bb98 YS |
1051 | ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); |
1052 | if (!ids) | |
1053 | return -ENOMEM; | |
1054 | /* | |
1055 | * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which | |
1056 | * is required when user only wants to check for uquery->prog_cnt. | |
1057 | * There is no need to check for it since the case is handled | |
1058 | * gracefully in bpf_prog_array_copy_info. | |
1059 | */ | |
f371b304 YS |
1060 | |
1061 | mutex_lock(&bpf_event_mutex); | |
1062 | ret = bpf_prog_array_copy_info(event->tp_event->prog_array, | |
3a38bb98 YS |
1063 | ids, |
1064 | ids_len, | |
1065 | &prog_cnt); | |
f371b304 YS |
1066 | mutex_unlock(&bpf_event_mutex); |
1067 | ||
3a38bb98 YS |
1068 | if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || |
1069 | copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) | |
1070 | ret = -EFAULT; | |
1071 | ||
1072 | kfree(ids); | |
f371b304 YS |
1073 | return ret; |
1074 | } | |
c4f6699d AS |
1075 | |
1076 | extern struct bpf_raw_event_map __start__bpf_raw_tp[]; | |
1077 | extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; | |
1078 | ||
1079 | struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name) | |
1080 | { | |
1081 | struct bpf_raw_event_map *btp = __start__bpf_raw_tp; | |
1082 | ||
1083 | for (; btp < __stop__bpf_raw_tp; btp++) { | |
1084 | if (!strcmp(btp->tp->name, name)) | |
1085 | return btp; | |
1086 | } | |
1087 | return NULL; | |
1088 | } | |
1089 | ||
1090 | static __always_inline | |
1091 | void __bpf_trace_run(struct bpf_prog *prog, u64 *args) | |
1092 | { | |
1093 | rcu_read_lock(); | |
1094 | preempt_disable(); | |
1095 | (void) BPF_PROG_RUN(prog, args); | |
1096 | preempt_enable(); | |
1097 | rcu_read_unlock(); | |
1098 | } | |
1099 | ||
1100 | #define UNPACK(...) __VA_ARGS__ | |
1101 | #define REPEAT_1(FN, DL, X, ...) FN(X) | |
1102 | #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) | |
1103 | #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) | |
1104 | #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) | |
1105 | #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) | |
1106 | #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) | |
1107 | #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) | |
1108 | #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) | |
1109 | #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) | |
1110 | #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) | |
1111 | #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) | |
1112 | #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) | |
1113 | #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) | |
1114 | ||
1115 | #define SARG(X) u64 arg##X | |
1116 | #define COPY(X) args[X] = arg##X | |
1117 | ||
1118 | #define __DL_COM (,) | |
1119 | #define __DL_SEM (;) | |
1120 | ||
1121 | #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 | |
1122 | ||
1123 | #define BPF_TRACE_DEFN_x(x) \ | |
1124 | void bpf_trace_run##x(struct bpf_prog *prog, \ | |
1125 | REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ | |
1126 | { \ | |
1127 | u64 args[x]; \ | |
1128 | REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ | |
1129 | __bpf_trace_run(prog, args); \ | |
1130 | } \ | |
1131 | EXPORT_SYMBOL_GPL(bpf_trace_run##x) | |
1132 | BPF_TRACE_DEFN_x(1); | |
1133 | BPF_TRACE_DEFN_x(2); | |
1134 | BPF_TRACE_DEFN_x(3); | |
1135 | BPF_TRACE_DEFN_x(4); | |
1136 | BPF_TRACE_DEFN_x(5); | |
1137 | BPF_TRACE_DEFN_x(6); | |
1138 | BPF_TRACE_DEFN_x(7); | |
1139 | BPF_TRACE_DEFN_x(8); | |
1140 | BPF_TRACE_DEFN_x(9); | |
1141 | BPF_TRACE_DEFN_x(10); | |
1142 | BPF_TRACE_DEFN_x(11); | |
1143 | BPF_TRACE_DEFN_x(12); | |
1144 | ||
1145 | static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
1146 | { | |
1147 | struct tracepoint *tp = btp->tp; | |
1148 | ||
1149 | /* | |
1150 | * check that program doesn't access arguments beyond what's | |
1151 | * available in this tracepoint | |
1152 | */ | |
1153 | if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) | |
1154 | return -EINVAL; | |
1155 | ||
1156 | return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog); | |
1157 | } | |
1158 | ||
1159 | int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
1160 | { | |
1161 | int err; | |
1162 | ||
1163 | mutex_lock(&bpf_event_mutex); | |
1164 | err = __bpf_probe_register(btp, prog); | |
1165 | mutex_unlock(&bpf_event_mutex); | |
1166 | return err; | |
1167 | } | |
1168 | ||
1169 | int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
1170 | { | |
1171 | int err; | |
1172 | ||
1173 | mutex_lock(&bpf_event_mutex); | |
1174 | err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); | |
1175 | mutex_unlock(&bpf_event_mutex); | |
1176 | return err; | |
1177 | } | |
41bdc4b4 YS |
1178 | |
1179 | int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, | |
1180 | u32 *fd_type, const char **buf, | |
1181 | u64 *probe_offset, u64 *probe_addr) | |
1182 | { | |
1183 | bool is_tracepoint, is_syscall_tp; | |
1184 | struct bpf_prog *prog; | |
1185 | int flags, err = 0; | |
1186 | ||
1187 | prog = event->prog; | |
1188 | if (!prog) | |
1189 | return -ENOENT; | |
1190 | ||
1191 | /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ | |
1192 | if (prog->type == BPF_PROG_TYPE_PERF_EVENT) | |
1193 | return -EOPNOTSUPP; | |
1194 | ||
1195 | *prog_id = prog->aux->id; | |
1196 | flags = event->tp_event->flags; | |
1197 | is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; | |
1198 | is_syscall_tp = is_syscall_trace_event(event->tp_event); | |
1199 | ||
1200 | if (is_tracepoint || is_syscall_tp) { | |
1201 | *buf = is_tracepoint ? event->tp_event->tp->name | |
1202 | : event->tp_event->name; | |
1203 | *fd_type = BPF_FD_TYPE_TRACEPOINT; | |
1204 | *probe_offset = 0x0; | |
1205 | *probe_addr = 0x0; | |
1206 | } else { | |
1207 | /* kprobe/uprobe */ | |
1208 | err = -EOPNOTSUPP; | |
1209 | #ifdef CONFIG_KPROBE_EVENTS | |
1210 | if (flags & TRACE_EVENT_FL_KPROBE) | |
1211 | err = bpf_get_kprobe_info(event, fd_type, buf, | |
1212 | probe_offset, probe_addr, | |
1213 | event->attr.type == PERF_TYPE_TRACEPOINT); | |
1214 | #endif | |
1215 | #ifdef CONFIG_UPROBE_EVENTS | |
1216 | if (flags & TRACE_EVENT_FL_UPROBE) | |
1217 | err = bpf_get_uprobe_info(event, fd_type, buf, | |
1218 | probe_offset, | |
1219 | event->attr.type == PERF_TYPE_TRACEPOINT); | |
1220 | #endif | |
1221 | } | |
1222 | ||
1223 | return err; | |
1224 | } |