]> Git Repo - linux.git/blame - kernel/trace/bpf_trace.c
bpf: Add PTR_TO_BTF_ID_OR_NULL support
[linux.git] / kernel / trace / bpf_trace.c
CommitLineData
179a0cc4 1// SPDX-License-Identifier: GPL-2.0
2541517c 2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
0515e599 3 * Copyright (c) 2016 Facebook
2541517c
AS
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
0515e599 9#include <linux/bpf_perf_event.h>
2541517c
AS
10#include <linux/filter.h>
11#include <linux/uaccess.h>
9c959c86 12#include <linux/ctype.h>
9802d865 13#include <linux/kprobes.h>
41bdc4b4 14#include <linux/syscalls.h>
540adea3 15#include <linux/error-injection.h>
9802d865 16
c7b6f29b
NA
17#include <asm/tlb.h>
18
9802d865 19#include "trace_probe.h"
2541517c
AS
20#include "trace.h"
21
e672db03
SF
22#define bpf_event_rcu_dereference(p) \
23 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
24
a38d1107
MM
25#ifdef CONFIG_MODULES
26struct bpf_trace_module {
27 struct module *module;
28 struct list_head list;
29};
30
31static LIST_HEAD(bpf_trace_modules);
32static DEFINE_MUTEX(bpf_module_mutex);
33
34static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
35{
36 struct bpf_raw_event_map *btp, *ret = NULL;
37 struct bpf_trace_module *btm;
38 unsigned int i;
39
40 mutex_lock(&bpf_module_mutex);
41 list_for_each_entry(btm, &bpf_trace_modules, list) {
42 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
43 btp = &btm->module->bpf_raw_events[i];
44 if (!strcmp(btp->tp->name, name)) {
45 if (try_module_get(btm->module))
46 ret = btp;
47 goto out;
48 }
49 }
50 }
51out:
52 mutex_unlock(&bpf_module_mutex);
53 return ret;
54}
55#else
56static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
57{
58 return NULL;
59}
60#endif /* CONFIG_MODULES */
61
035226b9 62u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
c195651e 63u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
035226b9 64
2541517c
AS
65/**
66 * trace_call_bpf - invoke BPF program
e87c6bc3 67 * @call: tracepoint event
2541517c
AS
68 * @ctx: opaque context pointer
69 *
70 * kprobe handlers execute BPF programs via this helper.
71 * Can be used from static tracepoints in the future.
72 *
73 * Return: BPF programs always return an integer which is interpreted by
74 * kprobe handler as:
75 * 0 - return from kprobe (event is filtered out)
76 * 1 - store kprobe event into ring buffer
77 * Other values are reserved and currently alias to 1
78 */
e87c6bc3 79unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
2541517c
AS
80{
81 unsigned int ret;
82
83 if (in_nmi()) /* not supported yet */
84 return 1;
85
b0a81b94 86 cant_sleep();
2541517c
AS
87
88 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
89 /*
90 * since some bpf program is already running on this cpu,
91 * don't call into another bpf program (same or different)
92 * and don't send kprobe event into ring-buffer,
93 * so return zero here
94 */
95 ret = 0;
96 goto out;
97 }
98
e87c6bc3
YS
99 /*
100 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
101 * to all call sites, we did a bpf_prog_array_valid() there to check
102 * whether call->prog_array is empty or not, which is
103 * a heurisitc to speed up execution.
104 *
105 * If bpf_prog_array_valid() fetched prog_array was
106 * non-NULL, we go into trace_call_bpf() and do the actual
107 * proper rcu_dereference() under RCU lock.
108 * If it turns out that prog_array is NULL then, we bail out.
109 * For the opposite, if the bpf_prog_array_valid() fetched pointer
110 * was NULL, you'll skip the prog_array with the risk of missing
111 * out of events when it was updated in between this and the
112 * rcu_dereference() which is accepted risk.
113 */
114 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
2541517c
AS
115
116 out:
117 __this_cpu_dec(bpf_prog_active);
2541517c
AS
118
119 return ret;
120}
2541517c 121
9802d865
JB
122#ifdef CONFIG_BPF_KPROBE_OVERRIDE
123BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
124{
9802d865 125 regs_set_return_value(regs, rc);
540adea3 126 override_function_with_return(regs);
9802d865
JB
127 return 0;
128}
129
130static const struct bpf_func_proto bpf_override_return_proto = {
131 .func = bpf_override_return,
132 .gpl_only = true,
133 .ret_type = RET_INTEGER,
134 .arg1_type = ARG_PTR_TO_CTX,
135 .arg2_type = ARG_ANYTHING,
136};
137#endif
138
6ae08ae3
DB
139BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
140 const void __user *, unsafe_ptr)
2541517c 141{
6ae08ae3 142 int ret = probe_user_read(dst, unsafe_ptr, size);
2541517c 143
6ae08ae3
DB
144 if (unlikely(ret < 0))
145 memset(dst, 0, size);
146
147 return ret;
148}
149
150static const struct bpf_func_proto bpf_probe_read_user_proto = {
151 .func = bpf_probe_read_user,
152 .gpl_only = true,
153 .ret_type = RET_INTEGER,
154 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
155 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
156 .arg3_type = ARG_ANYTHING,
157};
158
159BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
160 const void __user *, unsafe_ptr)
161{
162 int ret = strncpy_from_unsafe_user(dst, unsafe_ptr, size);
163
164 if (unlikely(ret < 0))
165 memset(dst, 0, size);
166
167 return ret;
168}
169
170static const struct bpf_func_proto bpf_probe_read_user_str_proto = {
171 .func = bpf_probe_read_user_str,
172 .gpl_only = true,
173 .ret_type = RET_INTEGER,
174 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
175 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
176 .arg3_type = ARG_ANYTHING,
177};
178
179static __always_inline int
180bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr,
181 const bool compat)
182{
183 int ret = security_locked_down(LOCKDOWN_BPF_READ);
9d1f8be5 184
6ae08ae3
DB
185 if (unlikely(ret < 0))
186 goto out;
187 ret = compat ? probe_kernel_read(dst, unsafe_ptr, size) :
188 probe_kernel_read_strict(dst, unsafe_ptr, size);
074f528e 189 if (unlikely(ret < 0))
9d1f8be5 190out:
074f528e 191 memset(dst, 0, size);
6ae08ae3
DB
192 return ret;
193}
074f528e 194
6ae08ae3
DB
195BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
196 const void *, unsafe_ptr)
197{
198 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false);
199}
200
201static const struct bpf_func_proto bpf_probe_read_kernel_proto = {
202 .func = bpf_probe_read_kernel,
203 .gpl_only = true,
204 .ret_type = RET_INTEGER,
205 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
206 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
207 .arg3_type = ARG_ANYTHING,
208};
209
210BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
211 const void *, unsafe_ptr)
212{
213 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, true);
214}
215
216static const struct bpf_func_proto bpf_probe_read_compat_proto = {
217 .func = bpf_probe_read_compat,
218 .gpl_only = true,
219 .ret_type = RET_INTEGER,
220 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
221 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
222 .arg3_type = ARG_ANYTHING,
223};
224
225static __always_inline int
226bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr,
227 const bool compat)
228{
229 int ret = security_locked_down(LOCKDOWN_BPF_READ);
230
231 if (unlikely(ret < 0))
232 goto out;
233 /*
234 * The strncpy_from_unsafe_*() call will likely not fill the entire
235 * buffer, but that's okay in this circumstance as we're probing
236 * arbitrary memory anyway similar to bpf_probe_read_*() and might
237 * as well probe the stack. Thus, memory is explicitly cleared
238 * only in error case, so that improper users ignoring return
239 * code altogether don't copy garbage; otherwise length of string
240 * is returned that can be used for bpf_perf_event_output() et al.
241 */
242 ret = compat ? strncpy_from_unsafe(dst, unsafe_ptr, size) :
243 strncpy_from_unsafe_strict(dst, unsafe_ptr, size);
244 if (unlikely(ret < 0))
245out:
246 memset(dst, 0, size);
074f528e 247 return ret;
2541517c
AS
248}
249
6ae08ae3
DB
250BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
251 const void *, unsafe_ptr)
252{
253 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false);
254}
255
256static const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
257 .func = bpf_probe_read_kernel_str,
258 .gpl_only = true,
259 .ret_type = RET_INTEGER,
260 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
261 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
262 .arg3_type = ARG_ANYTHING,
263};
264
265BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
266 const void *, unsafe_ptr)
267{
268 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, true);
269}
270
271static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
272 .func = bpf_probe_read_compat_str,
2541517c
AS
273 .gpl_only = true,
274 .ret_type = RET_INTEGER,
39f19ebb 275 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
9c019e2b 276 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2541517c
AS
277 .arg3_type = ARG_ANYTHING,
278};
279
eb1b6688 280BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
f3694e00 281 u32, size)
96ae5227 282{
96ae5227
SD
283 /*
284 * Ensure we're in user context which is safe for the helper to
285 * run. This helper has no business in a kthread.
286 *
287 * access_ok() should prevent writing to non-user memory, but in
288 * some situations (nommu, temporary switch, etc) access_ok() does
289 * not provide enough validation, hence the check on KERNEL_DS.
c7b6f29b
NA
290 *
291 * nmi_uaccess_okay() ensures the probe is not run in an interim
292 * state, when the task or mm are switched. This is specifically
293 * required to prevent the use of temporary mm.
96ae5227
SD
294 */
295
296 if (unlikely(in_interrupt() ||
297 current->flags & (PF_KTHREAD | PF_EXITING)))
298 return -EPERM;
db68ce10 299 if (unlikely(uaccess_kernel()))
96ae5227 300 return -EPERM;
c7b6f29b
NA
301 if (unlikely(!nmi_uaccess_okay()))
302 return -EPERM;
96ae5227 303
eb1b6688 304 return probe_user_write(unsafe_ptr, src, size);
96ae5227
SD
305}
306
307static const struct bpf_func_proto bpf_probe_write_user_proto = {
308 .func = bpf_probe_write_user,
309 .gpl_only = true,
310 .ret_type = RET_INTEGER,
311 .arg1_type = ARG_ANYTHING,
39f19ebb
AS
312 .arg2_type = ARG_PTR_TO_MEM,
313 .arg3_type = ARG_CONST_SIZE,
96ae5227
SD
314};
315
316static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
317{
318 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
319 current->comm, task_pid_nr(current));
320
321 return &bpf_probe_write_user_proto;
322}
323
9c959c86 324/*
7bda4b40
JF
325 * Only limited trace_printk() conversion specifiers allowed:
326 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
9c959c86 327 */
f3694e00
DB
328BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
329 u64, arg2, u64, arg3)
9c959c86 330{
8d3b7dce 331 bool str_seen = false;
9c959c86
AS
332 int mod[3] = {};
333 int fmt_cnt = 0;
8d3b7dce
AS
334 u64 unsafe_addr;
335 char buf[64];
9c959c86
AS
336 int i;
337
338 /*
339 * bpf_check()->check_func_arg()->check_stack_boundary()
340 * guarantees that fmt points to bpf program stack,
341 * fmt_size bytes of it were initialized and fmt_size > 0
342 */
343 if (fmt[--fmt_size] != 0)
344 return -EINVAL;
345
346 /* check format string for allowed specifiers */
347 for (i = 0; i < fmt_size; i++) {
348 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
349 return -EINVAL;
350
351 if (fmt[i] != '%')
352 continue;
353
354 if (fmt_cnt >= 3)
355 return -EINVAL;
356
357 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
358 i++;
359 if (fmt[i] == 'l') {
360 mod[fmt_cnt]++;
361 i++;
8d3b7dce 362 } else if (fmt[i] == 'p' || fmt[i] == 's') {
9c959c86 363 mod[fmt_cnt]++;
1efb6ee3
MP
364 /* disallow any further format extensions */
365 if (fmt[i + 1] != 0 &&
366 !isspace(fmt[i + 1]) &&
367 !ispunct(fmt[i + 1]))
9c959c86
AS
368 return -EINVAL;
369 fmt_cnt++;
1efb6ee3 370 if (fmt[i] == 's') {
8d3b7dce
AS
371 if (str_seen)
372 /* allow only one '%s' per fmt string */
373 return -EINVAL;
374 str_seen = true;
375
376 switch (fmt_cnt) {
377 case 1:
f3694e00
DB
378 unsafe_addr = arg1;
379 arg1 = (long) buf;
8d3b7dce
AS
380 break;
381 case 2:
f3694e00
DB
382 unsafe_addr = arg2;
383 arg2 = (long) buf;
8d3b7dce
AS
384 break;
385 case 3:
f3694e00
DB
386 unsafe_addr = arg3;
387 arg3 = (long) buf;
8d3b7dce
AS
388 break;
389 }
390 buf[0] = 0;
391 strncpy_from_unsafe(buf,
392 (void *) (long) unsafe_addr,
393 sizeof(buf));
394 }
9c959c86
AS
395 continue;
396 }
397
398 if (fmt[i] == 'l') {
399 mod[fmt_cnt]++;
400 i++;
401 }
402
7bda4b40
JF
403 if (fmt[i] != 'i' && fmt[i] != 'd' &&
404 fmt[i] != 'u' && fmt[i] != 'x')
9c959c86
AS
405 return -EINVAL;
406 fmt_cnt++;
407 }
408
88a5c690
DB
409/* Horrid workaround for getting va_list handling working with different
410 * argument type combinations generically for 32 and 64 bit archs.
411 */
412#define __BPF_TP_EMIT() __BPF_ARG3_TP()
413#define __BPF_TP(...) \
eefa864a 414 __trace_printk(0 /* Fake ip */, \
88a5c690
DB
415 fmt, ##__VA_ARGS__)
416
417#define __BPF_ARG1_TP(...) \
418 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
419 ? __BPF_TP(arg1, ##__VA_ARGS__) \
420 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
421 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
422 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
423
424#define __BPF_ARG2_TP(...) \
425 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
426 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
427 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
428 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
429 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
430
431#define __BPF_ARG3_TP(...) \
432 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
433 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
434 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
435 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
436 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
437
438 return __BPF_TP_EMIT();
9c959c86
AS
439}
440
441static const struct bpf_func_proto bpf_trace_printk_proto = {
442 .func = bpf_trace_printk,
443 .gpl_only = true,
444 .ret_type = RET_INTEGER,
39f19ebb
AS
445 .arg1_type = ARG_PTR_TO_MEM,
446 .arg2_type = ARG_CONST_SIZE,
9c959c86
AS
447};
448
0756ea3e
AS
449const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
450{
451 /*
452 * this program might be calling bpf_trace_printk,
453 * so allocate per-cpu printk buffers
454 */
455 trace_printk_init_buffers();
456
457 return &bpf_trace_printk_proto;
458}
459
908432ca
YS
460static __always_inline int
461get_map_perf_counter(struct bpf_map *map, u64 flags,
462 u64 *value, u64 *enabled, u64 *running)
35578d79 463{
35578d79 464 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
465 unsigned int cpu = smp_processor_id();
466 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 467 struct bpf_event_entry *ee;
35578d79 468
6816a7ff
DB
469 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
470 return -EINVAL;
471 if (index == BPF_F_CURRENT_CPU)
472 index = cpu;
35578d79
KX
473 if (unlikely(index >= array->map.max_entries))
474 return -E2BIG;
475
3b1efb19 476 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 477 if (!ee)
35578d79
KX
478 return -ENOENT;
479
908432ca
YS
480 return perf_event_read_local(ee->event, value, enabled, running);
481}
482
483BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
484{
485 u64 value = 0;
486 int err;
487
488 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
35578d79 489 /*
f91840a3
AS
490 * this api is ugly since we miss [-22..-2] range of valid
491 * counter values, but that's uapi
35578d79 492 */
f91840a3
AS
493 if (err)
494 return err;
495 return value;
35578d79
KX
496}
497
62544ce8 498static const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 499 .func = bpf_perf_event_read,
1075ef59 500 .gpl_only = true,
35578d79
KX
501 .ret_type = RET_INTEGER,
502 .arg1_type = ARG_CONST_MAP_PTR,
503 .arg2_type = ARG_ANYTHING,
504};
505
908432ca
YS
506BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
507 struct bpf_perf_event_value *, buf, u32, size)
508{
509 int err = -EINVAL;
510
511 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
512 goto clear;
513 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
514 &buf->running);
515 if (unlikely(err))
516 goto clear;
517 return 0;
518clear:
519 memset(buf, 0, size);
520 return err;
521}
522
523static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
524 .func = bpf_perf_event_read_value,
525 .gpl_only = true,
526 .ret_type = RET_INTEGER,
527 .arg1_type = ARG_CONST_MAP_PTR,
528 .arg2_type = ARG_ANYTHING,
529 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
530 .arg4_type = ARG_CONST_SIZE,
531};
532
8e7a3920
DB
533static __always_inline u64
534__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
283ca526 535 u64 flags, struct perf_sample_data *sd)
a43eec30 536{
a43eec30 537 struct bpf_array *array = container_of(map, struct bpf_array, map);
d7931330 538 unsigned int cpu = smp_processor_id();
1e33759c 539 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 540 struct bpf_event_entry *ee;
a43eec30 541 struct perf_event *event;
a43eec30 542
1e33759c 543 if (index == BPF_F_CURRENT_CPU)
d7931330 544 index = cpu;
a43eec30
AS
545 if (unlikely(index >= array->map.max_entries))
546 return -E2BIG;
547
3b1efb19 548 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 549 if (!ee)
a43eec30
AS
550 return -ENOENT;
551
3b1efb19 552 event = ee->event;
a43eec30
AS
553 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
554 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
555 return -EINVAL;
556
d7931330 557 if (unlikely(event->oncpu != cpu))
a43eec30
AS
558 return -EOPNOTSUPP;
559
56201969 560 return perf_event_output(event, sd, regs);
a43eec30
AS
561}
562
9594dc3c
MM
563/*
564 * Support executing tracepoints in normal, irq, and nmi context that each call
565 * bpf_perf_event_output
566 */
567struct bpf_trace_sample_data {
568 struct perf_sample_data sds[3];
569};
570
571static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
572static DEFINE_PER_CPU(int, bpf_trace_nest_level);
f3694e00
DB
573BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
574 u64, flags, void *, data, u64, size)
8e7a3920 575{
9594dc3c
MM
576 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
577 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
8e7a3920
DB
578 struct perf_raw_record raw = {
579 .frag = {
580 .size = size,
581 .data = data,
582 },
583 };
9594dc3c
MM
584 struct perf_sample_data *sd;
585 int err;
8e7a3920 586
9594dc3c
MM
587 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
588 err = -EBUSY;
589 goto out;
590 }
591
592 sd = &sds->sds[nest_level - 1];
593
594 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
595 err = -EINVAL;
596 goto out;
597 }
8e7a3920 598
283ca526
DB
599 perf_sample_data_init(sd, 0, 0);
600 sd->raw = &raw;
601
9594dc3c
MM
602 err = __bpf_perf_event_output(regs, map, flags, sd);
603
604out:
605 this_cpu_dec(bpf_trace_nest_level);
606 return err;
8e7a3920
DB
607}
608
a43eec30
AS
609static const struct bpf_func_proto bpf_perf_event_output_proto = {
610 .func = bpf_perf_event_output,
1075ef59 611 .gpl_only = true,
a43eec30
AS
612 .ret_type = RET_INTEGER,
613 .arg1_type = ARG_PTR_TO_CTX,
614 .arg2_type = ARG_CONST_MAP_PTR,
615 .arg3_type = ARG_ANYTHING,
39f19ebb 616 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 617 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
a43eec30
AS
618};
619
768fb61f
AZ
620static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
621struct bpf_nested_pt_regs {
622 struct pt_regs regs[3];
623};
624static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
625static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
bd570ff9 626
555c8a86
DB
627u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
628 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 629{
768fb61f 630 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
555c8a86
DB
631 struct perf_raw_frag frag = {
632 .copy = ctx_copy,
633 .size = ctx_size,
634 .data = ctx,
635 };
636 struct perf_raw_record raw = {
637 .frag = {
183fc153
AM
638 {
639 .next = ctx_size ? &frag : NULL,
640 },
555c8a86
DB
641 .size = meta_size,
642 .data = meta,
643 },
644 };
768fb61f
AZ
645 struct perf_sample_data *sd;
646 struct pt_regs *regs;
647 u64 ret;
648
649 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
650 ret = -EBUSY;
651 goto out;
652 }
653 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
654 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
bd570ff9
DB
655
656 perf_fetch_caller_regs(regs);
283ca526
DB
657 perf_sample_data_init(sd, 0, 0);
658 sd->raw = &raw;
bd570ff9 659
768fb61f
AZ
660 ret = __bpf_perf_event_output(regs, map, flags, sd);
661out:
662 this_cpu_dec(bpf_event_output_nest_level);
663 return ret;
bd570ff9
DB
664}
665
f3694e00 666BPF_CALL_0(bpf_get_current_task)
606274c5
AS
667{
668 return (long) current;
669}
670
671static const struct bpf_func_proto bpf_get_current_task_proto = {
672 .func = bpf_get_current_task,
673 .gpl_only = true,
674 .ret_type = RET_INTEGER,
675};
676
f3694e00 677BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
60d20f91 678{
60d20f91
SD
679 struct bpf_array *array = container_of(map, struct bpf_array, map);
680 struct cgroup *cgrp;
60d20f91 681
60d20f91
SD
682 if (unlikely(idx >= array->map.max_entries))
683 return -E2BIG;
684
685 cgrp = READ_ONCE(array->ptrs[idx]);
686 if (unlikely(!cgrp))
687 return -EAGAIN;
688
689 return task_under_cgroup_hierarchy(current, cgrp);
690}
691
692static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
693 .func = bpf_current_task_under_cgroup,
694 .gpl_only = false,
695 .ret_type = RET_INTEGER,
696 .arg1_type = ARG_CONST_MAP_PTR,
697 .arg2_type = ARG_ANYTHING,
698};
699
8b401f9e
YS
700struct send_signal_irq_work {
701 struct irq_work irq_work;
702 struct task_struct *task;
703 u32 sig;
8482941f 704 enum pid_type type;
8b401f9e
YS
705};
706
707static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
708
709static void do_bpf_send_signal(struct irq_work *entry)
710{
711 struct send_signal_irq_work *work;
712
713 work = container_of(entry, struct send_signal_irq_work, irq_work);
8482941f 714 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
8b401f9e
YS
715}
716
8482941f 717static int bpf_send_signal_common(u32 sig, enum pid_type type)
8b401f9e
YS
718{
719 struct send_signal_irq_work *work = NULL;
720
721 /* Similar to bpf_probe_write_user, task needs to be
722 * in a sound condition and kernel memory access be
723 * permitted in order to send signal to the current
724 * task.
725 */
726 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
727 return -EPERM;
728 if (unlikely(uaccess_kernel()))
729 return -EPERM;
730 if (unlikely(!nmi_uaccess_okay()))
731 return -EPERM;
732
1bc7896e 733 if (irqs_disabled()) {
e1afb702
YS
734 /* Do an early check on signal validity. Otherwise,
735 * the error is lost in deferred irq_work.
736 */
737 if (unlikely(!valid_signal(sig)))
738 return -EINVAL;
739
8b401f9e 740 work = this_cpu_ptr(&send_signal_work);
153bedba 741 if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
8b401f9e
YS
742 return -EBUSY;
743
744 /* Add the current task, which is the target of sending signal,
745 * to the irq_work. The current task may change when queued
746 * irq works get executed.
747 */
748 work->task = current;
749 work->sig = sig;
8482941f 750 work->type = type;
8b401f9e
YS
751 irq_work_queue(&work->irq_work);
752 return 0;
753 }
754
8482941f
YS
755 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
756}
757
758BPF_CALL_1(bpf_send_signal, u32, sig)
759{
760 return bpf_send_signal_common(sig, PIDTYPE_TGID);
8b401f9e
YS
761}
762
763static const struct bpf_func_proto bpf_send_signal_proto = {
764 .func = bpf_send_signal,
765 .gpl_only = false,
766 .ret_type = RET_INTEGER,
767 .arg1_type = ARG_ANYTHING,
768};
769
8482941f
YS
770BPF_CALL_1(bpf_send_signal_thread, u32, sig)
771{
772 return bpf_send_signal_common(sig, PIDTYPE_PID);
773}
774
775static const struct bpf_func_proto bpf_send_signal_thread_proto = {
776 .func = bpf_send_signal_thread,
777 .gpl_only = false,
778 .ret_type = RET_INTEGER,
779 .arg1_type = ARG_ANYTHING,
780};
781
fc611f47
KS
782const struct bpf_func_proto *
783bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2541517c
AS
784{
785 switch (func_id) {
786 case BPF_FUNC_map_lookup_elem:
787 return &bpf_map_lookup_elem_proto;
788 case BPF_FUNC_map_update_elem:
789 return &bpf_map_update_elem_proto;
790 case BPF_FUNC_map_delete_elem:
791 return &bpf_map_delete_elem_proto;
02a8c817
AC
792 case BPF_FUNC_map_push_elem:
793 return &bpf_map_push_elem_proto;
794 case BPF_FUNC_map_pop_elem:
795 return &bpf_map_pop_elem_proto;
796 case BPF_FUNC_map_peek_elem:
797 return &bpf_map_peek_elem_proto;
d9847d31
AS
798 case BPF_FUNC_ktime_get_ns:
799 return &bpf_ktime_get_ns_proto;
71d19214
MÅ»
800 case BPF_FUNC_ktime_get_boot_ns:
801 return &bpf_ktime_get_boot_ns_proto;
04fd61ab
AS
802 case BPF_FUNC_tail_call:
803 return &bpf_tail_call_proto;
ffeedafb
AS
804 case BPF_FUNC_get_current_pid_tgid:
805 return &bpf_get_current_pid_tgid_proto;
606274c5
AS
806 case BPF_FUNC_get_current_task:
807 return &bpf_get_current_task_proto;
ffeedafb
AS
808 case BPF_FUNC_get_current_uid_gid:
809 return &bpf_get_current_uid_gid_proto;
810 case BPF_FUNC_get_current_comm:
811 return &bpf_get_current_comm_proto;
9c959c86 812 case BPF_FUNC_trace_printk:
0756ea3e 813 return bpf_get_trace_printk_proto();
ab1973d3
AS
814 case BPF_FUNC_get_smp_processor_id:
815 return &bpf_get_smp_processor_id_proto;
2d0e30c3
DB
816 case BPF_FUNC_get_numa_node_id:
817 return &bpf_get_numa_node_id_proto;
35578d79
KX
818 case BPF_FUNC_perf_event_read:
819 return &bpf_perf_event_read_proto;
96ae5227
SD
820 case BPF_FUNC_probe_write_user:
821 return bpf_get_probe_write_proto();
60d20f91
SD
822 case BPF_FUNC_current_task_under_cgroup:
823 return &bpf_current_task_under_cgroup_proto;
8937bd80
AS
824 case BPF_FUNC_get_prandom_u32:
825 return &bpf_get_prandom_u32_proto;
6ae08ae3
DB
826 case BPF_FUNC_probe_read_user:
827 return &bpf_probe_read_user_proto;
828 case BPF_FUNC_probe_read_kernel:
829 return &bpf_probe_read_kernel_proto;
830 case BPF_FUNC_probe_read:
831 return &bpf_probe_read_compat_proto;
832 case BPF_FUNC_probe_read_user_str:
833 return &bpf_probe_read_user_str_proto;
834 case BPF_FUNC_probe_read_kernel_str:
835 return &bpf_probe_read_kernel_str_proto;
a5e8c070 836 case BPF_FUNC_probe_read_str:
6ae08ae3 837 return &bpf_probe_read_compat_str_proto;
34ea38ca 838#ifdef CONFIG_CGROUPS
bf6fa2c8
YS
839 case BPF_FUNC_get_current_cgroup_id:
840 return &bpf_get_current_cgroup_id_proto;
34ea38ca 841#endif
8b401f9e
YS
842 case BPF_FUNC_send_signal:
843 return &bpf_send_signal_proto;
8482941f
YS
844 case BPF_FUNC_send_signal_thread:
845 return &bpf_send_signal_thread_proto;
b80b033b
SL
846 case BPF_FUNC_perf_event_read_value:
847 return &bpf_perf_event_read_value_proto;
b4490c5c
CN
848 case BPF_FUNC_get_ns_current_pid_tgid:
849 return &bpf_get_ns_current_pid_tgid_proto;
9fd82b61
AS
850 default:
851 return NULL;
852 }
853}
854
5e43f899
AI
855static const struct bpf_func_proto *
856kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
857{
858 switch (func_id) {
a43eec30
AS
859 case BPF_FUNC_perf_event_output:
860 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
861 case BPF_FUNC_get_stackid:
862 return &bpf_get_stackid_proto;
c195651e
YS
863 case BPF_FUNC_get_stack:
864 return &bpf_get_stack_proto;
9802d865
JB
865#ifdef CONFIG_BPF_KPROBE_OVERRIDE
866 case BPF_FUNC_override_return:
867 return &bpf_override_return_proto;
868#endif
2541517c 869 default:
fc611f47 870 return bpf_tracing_func_proto(func_id, prog);
2541517c
AS
871 }
872}
873
874/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7 875static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 876 const struct bpf_prog *prog,
23994631 877 struct bpf_insn_access_aux *info)
2541517c 878{
2541517c
AS
879 if (off < 0 || off >= sizeof(struct pt_regs))
880 return false;
2541517c
AS
881 if (type != BPF_READ)
882 return false;
2541517c
AS
883 if (off % size != 0)
884 return false;
2d071c64
DB
885 /*
886 * Assertion for 32 bit to make sure last 8 byte access
887 * (BPF_DW) to the last 4 byte member is disallowed.
888 */
889 if (off + size > sizeof(struct pt_regs))
890 return false;
891
2541517c
AS
892 return true;
893}
894
7de16e3a 895const struct bpf_verifier_ops kprobe_verifier_ops = {
2541517c
AS
896 .get_func_proto = kprobe_prog_func_proto,
897 .is_valid_access = kprobe_prog_is_valid_access,
898};
899
7de16e3a
JK
900const struct bpf_prog_ops kprobe_prog_ops = {
901};
902
f3694e00
DB
903BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
904 u64, flags, void *, data, u64, size)
9940d67c 905{
f3694e00
DB
906 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
907
9940d67c
AS
908 /*
909 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
910 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
f3694e00 911 * from there and call the same bpf_perf_event_output() helper inline.
9940d67c 912 */
f3694e00 913 return ____bpf_perf_event_output(regs, map, flags, data, size);
9940d67c
AS
914}
915
916static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
917 .func = bpf_perf_event_output_tp,
918 .gpl_only = true,
919 .ret_type = RET_INTEGER,
920 .arg1_type = ARG_PTR_TO_CTX,
921 .arg2_type = ARG_CONST_MAP_PTR,
922 .arg3_type = ARG_ANYTHING,
39f19ebb 923 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 924 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
9940d67c
AS
925};
926
f3694e00
DB
927BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
928 u64, flags)
9940d67c 929{
f3694e00 930 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
9940d67c 931
f3694e00
DB
932 /*
933 * Same comment as in bpf_perf_event_output_tp(), only that this time
934 * the other helper's function body cannot be inlined due to being
935 * external, thus we need to call raw helper function.
936 */
937 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
938 flags, 0, 0);
9940d67c
AS
939}
940
941static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
942 .func = bpf_get_stackid_tp,
943 .gpl_only = true,
944 .ret_type = RET_INTEGER,
945 .arg1_type = ARG_PTR_TO_CTX,
946 .arg2_type = ARG_CONST_MAP_PTR,
947 .arg3_type = ARG_ANYTHING,
948};
949
c195651e
YS
950BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
951 u64, flags)
952{
953 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
954
955 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
956 (unsigned long) size, flags, 0);
957}
958
959static const struct bpf_func_proto bpf_get_stack_proto_tp = {
960 .func = bpf_get_stack_tp,
961 .gpl_only = true,
962 .ret_type = RET_INTEGER,
963 .arg1_type = ARG_PTR_TO_CTX,
964 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
965 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
966 .arg4_type = ARG_ANYTHING,
967};
968
5e43f899
AI
969static const struct bpf_func_proto *
970tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
f005afed
YS
971{
972 switch (func_id) {
973 case BPF_FUNC_perf_event_output:
974 return &bpf_perf_event_output_proto_tp;
975 case BPF_FUNC_get_stackid:
976 return &bpf_get_stackid_proto_tp;
c195651e
YS
977 case BPF_FUNC_get_stack:
978 return &bpf_get_stack_proto_tp;
f005afed 979 default:
fc611f47 980 return bpf_tracing_func_proto(func_id, prog);
f005afed
YS
981 }
982}
983
984static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 985 const struct bpf_prog *prog,
f005afed
YS
986 struct bpf_insn_access_aux *info)
987{
988 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
989 return false;
990 if (type != BPF_READ)
991 return false;
992 if (off % size != 0)
993 return false;
994
995 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
996 return true;
997}
998
999const struct bpf_verifier_ops tracepoint_verifier_ops = {
1000 .get_func_proto = tp_prog_func_proto,
1001 .is_valid_access = tp_prog_is_valid_access,
1002};
1003
1004const struct bpf_prog_ops tracepoint_prog_ops = {
1005};
1006
1007BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
4bebdc7a
YS
1008 struct bpf_perf_event_value *, buf, u32, size)
1009{
1010 int err = -EINVAL;
1011
1012 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1013 goto clear;
1014 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1015 &buf->running);
1016 if (unlikely(err))
1017 goto clear;
1018 return 0;
1019clear:
1020 memset(buf, 0, size);
1021 return err;
1022}
1023
f005afed
YS
1024static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1025 .func = bpf_perf_prog_read_value,
4bebdc7a
YS
1026 .gpl_only = true,
1027 .ret_type = RET_INTEGER,
1028 .arg1_type = ARG_PTR_TO_CTX,
1029 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1030 .arg3_type = ARG_CONST_SIZE,
1031};
1032
fff7b643
DX
1033BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1034 void *, buf, u32, size, u64, flags)
1035{
1036#ifndef CONFIG_X86
1037 return -ENOENT;
1038#else
1039 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1040 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1041 u32 to_copy;
1042
1043 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1044 return -EINVAL;
1045
1046 if (unlikely(!br_stack))
1047 return -EINVAL;
1048
1049 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1050 return br_stack->nr * br_entry_size;
1051
1052 if (!buf || (size % br_entry_size != 0))
1053 return -EINVAL;
1054
1055 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1056 memcpy(buf, br_stack->entries, to_copy);
1057
1058 return to_copy;
1059#endif
1060}
1061
1062static const struct bpf_func_proto bpf_read_branch_records_proto = {
1063 .func = bpf_read_branch_records,
1064 .gpl_only = true,
1065 .ret_type = RET_INTEGER,
1066 .arg1_type = ARG_PTR_TO_CTX,
1067 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1068 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1069 .arg4_type = ARG_ANYTHING,
1070};
1071
5e43f899
AI
1072static const struct bpf_func_proto *
1073pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1074{
1075 switch (func_id) {
1076 case BPF_FUNC_perf_event_output:
9940d67c 1077 return &bpf_perf_event_output_proto_tp;
9fd82b61 1078 case BPF_FUNC_get_stackid:
9940d67c 1079 return &bpf_get_stackid_proto_tp;
c195651e
YS
1080 case BPF_FUNC_get_stack:
1081 return &bpf_get_stack_proto_tp;
4bebdc7a 1082 case BPF_FUNC_perf_prog_read_value:
f005afed 1083 return &bpf_perf_prog_read_value_proto;
fff7b643
DX
1084 case BPF_FUNC_read_branch_records:
1085 return &bpf_read_branch_records_proto;
9fd82b61 1086 default:
fc611f47 1087 return bpf_tracing_func_proto(func_id, prog);
9fd82b61
AS
1088 }
1089}
1090
c4f6699d
AS
1091/*
1092 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1093 * to avoid potential recursive reuse issue when/if tracepoints are added
9594dc3c
MM
1094 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1095 *
1096 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1097 * in normal, irq, and nmi context.
c4f6699d 1098 */
9594dc3c
MM
1099struct bpf_raw_tp_regs {
1100 struct pt_regs regs[3];
1101};
1102static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1103static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1104static struct pt_regs *get_bpf_raw_tp_regs(void)
1105{
1106 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1107 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1108
1109 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1110 this_cpu_dec(bpf_raw_tp_nest_level);
1111 return ERR_PTR(-EBUSY);
1112 }
1113
1114 return &tp_regs->regs[nest_level - 1];
1115}
1116
1117static void put_bpf_raw_tp_regs(void)
1118{
1119 this_cpu_dec(bpf_raw_tp_nest_level);
1120}
1121
c4f6699d
AS
1122BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1123 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1124{
9594dc3c
MM
1125 struct pt_regs *regs = get_bpf_raw_tp_regs();
1126 int ret;
1127
1128 if (IS_ERR(regs))
1129 return PTR_ERR(regs);
c4f6699d
AS
1130
1131 perf_fetch_caller_regs(regs);
9594dc3c
MM
1132 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1133
1134 put_bpf_raw_tp_regs();
1135 return ret;
c4f6699d
AS
1136}
1137
1138static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1139 .func = bpf_perf_event_output_raw_tp,
1140 .gpl_only = true,
1141 .ret_type = RET_INTEGER,
1142 .arg1_type = ARG_PTR_TO_CTX,
1143 .arg2_type = ARG_CONST_MAP_PTR,
1144 .arg3_type = ARG_ANYTHING,
1145 .arg4_type = ARG_PTR_TO_MEM,
1146 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1147};
1148
a7658e1a 1149extern const struct bpf_func_proto bpf_skb_output_proto;
d831ee84 1150extern const struct bpf_func_proto bpf_xdp_output_proto;
a7658e1a 1151
c4f6699d
AS
1152BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1153 struct bpf_map *, map, u64, flags)
1154{
9594dc3c
MM
1155 struct pt_regs *regs = get_bpf_raw_tp_regs();
1156 int ret;
1157
1158 if (IS_ERR(regs))
1159 return PTR_ERR(regs);
c4f6699d
AS
1160
1161 perf_fetch_caller_regs(regs);
1162 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
9594dc3c
MM
1163 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1164 flags, 0, 0);
1165 put_bpf_raw_tp_regs();
1166 return ret;
c4f6699d
AS
1167}
1168
1169static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1170 .func = bpf_get_stackid_raw_tp,
1171 .gpl_only = true,
1172 .ret_type = RET_INTEGER,
1173 .arg1_type = ARG_PTR_TO_CTX,
1174 .arg2_type = ARG_CONST_MAP_PTR,
1175 .arg3_type = ARG_ANYTHING,
1176};
1177
c195651e
YS
1178BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1179 void *, buf, u32, size, u64, flags)
1180{
9594dc3c
MM
1181 struct pt_regs *regs = get_bpf_raw_tp_regs();
1182 int ret;
1183
1184 if (IS_ERR(regs))
1185 return PTR_ERR(regs);
c195651e
YS
1186
1187 perf_fetch_caller_regs(regs);
9594dc3c
MM
1188 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1189 (unsigned long) size, flags, 0);
1190 put_bpf_raw_tp_regs();
1191 return ret;
c195651e
YS
1192}
1193
1194static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1195 .func = bpf_get_stack_raw_tp,
1196 .gpl_only = true,
1197 .ret_type = RET_INTEGER,
1198 .arg1_type = ARG_PTR_TO_CTX,
1199 .arg2_type = ARG_PTR_TO_MEM,
1200 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1201 .arg4_type = ARG_ANYTHING,
1202};
1203
5e43f899
AI
1204static const struct bpf_func_proto *
1205raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
c4f6699d
AS
1206{
1207 switch (func_id) {
1208 case BPF_FUNC_perf_event_output:
1209 return &bpf_perf_event_output_proto_raw_tp;
1210 case BPF_FUNC_get_stackid:
1211 return &bpf_get_stackid_proto_raw_tp;
c195651e
YS
1212 case BPF_FUNC_get_stack:
1213 return &bpf_get_stack_proto_raw_tp;
c4f6699d 1214 default:
fc611f47 1215 return bpf_tracing_func_proto(func_id, prog);
c4f6699d
AS
1216 }
1217}
1218
f1b9509c
AS
1219static const struct bpf_func_proto *
1220tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1221{
1222 switch (func_id) {
1223#ifdef CONFIG_NET
1224 case BPF_FUNC_skb_output:
1225 return &bpf_skb_output_proto;
d831ee84
EC
1226 case BPF_FUNC_xdp_output:
1227 return &bpf_xdp_output_proto;
f1b9509c
AS
1228#endif
1229 default:
1230 return raw_tp_prog_func_proto(func_id, prog);
1231 }
1232}
1233
c4f6699d
AS
1234static bool raw_tp_prog_is_valid_access(int off, int size,
1235 enum bpf_access_type type,
5e43f899 1236 const struct bpf_prog *prog,
c4f6699d
AS
1237 struct bpf_insn_access_aux *info)
1238{
f1b9509c
AS
1239 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1240 return false;
1241 if (type != BPF_READ)
1242 return false;
1243 if (off % size != 0)
1244 return false;
1245 return true;
1246}
1247
1248static bool tracing_prog_is_valid_access(int off, int size,
1249 enum bpf_access_type type,
1250 const struct bpf_prog *prog,
1251 struct bpf_insn_access_aux *info)
1252{
1253 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
c4f6699d
AS
1254 return false;
1255 if (type != BPF_READ)
1256 return false;
1257 if (off % size != 0)
1258 return false;
9e15db66 1259 return btf_ctx_access(off, size, type, prog, info);
c4f6699d
AS
1260}
1261
3e7c67d9
KS
1262int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1263 const union bpf_attr *kattr,
1264 union bpf_attr __user *uattr)
1265{
1266 return -ENOTSUPP;
1267}
1268
c4f6699d
AS
1269const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1270 .get_func_proto = raw_tp_prog_func_proto,
1271 .is_valid_access = raw_tp_prog_is_valid_access,
1272};
1273
1274const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1275};
1276
f1b9509c
AS
1277const struct bpf_verifier_ops tracing_verifier_ops = {
1278 .get_func_proto = tracing_prog_func_proto,
1279 .is_valid_access = tracing_prog_is_valid_access,
1280};
1281
1282const struct bpf_prog_ops tracing_prog_ops = {
da00d2f1 1283 .test_run = bpf_prog_test_run_tracing,
f1b9509c
AS
1284};
1285
9df1c28b
MM
1286static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1287 enum bpf_access_type type,
1288 const struct bpf_prog *prog,
1289 struct bpf_insn_access_aux *info)
1290{
1291 if (off == 0) {
1292 if (size != sizeof(u64) || type != BPF_READ)
1293 return false;
1294 info->reg_type = PTR_TO_TP_BUFFER;
1295 }
1296 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1297}
1298
1299const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1300 .get_func_proto = raw_tp_prog_func_proto,
1301 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1302};
1303
1304const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1305};
1306
0515e599 1307static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1308 const struct bpf_prog *prog,
23994631 1309 struct bpf_insn_access_aux *info)
0515e599 1310{
95da0cdb 1311 const int size_u64 = sizeof(u64);
31fd8581 1312
0515e599
AS
1313 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1314 return false;
1315 if (type != BPF_READ)
1316 return false;
bc23105c
DB
1317 if (off % size != 0) {
1318 if (sizeof(unsigned long) != 4)
1319 return false;
1320 if (size != 8)
1321 return false;
1322 if (off % size != 4)
1323 return false;
1324 }
31fd8581 1325
f96da094
DB
1326 switch (off) {
1327 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
95da0cdb
TQ
1328 bpf_ctx_record_field_size(info, size_u64);
1329 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1330 return false;
1331 break;
1332 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1333 bpf_ctx_record_field_size(info, size_u64);
1334 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
23994631 1335 return false;
f96da094
DB
1336 break;
1337 default:
0515e599
AS
1338 if (size != sizeof(long))
1339 return false;
1340 }
f96da094 1341
0515e599
AS
1342 return true;
1343}
1344
6b8cc1d1
DB
1345static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1346 const struct bpf_insn *si,
0515e599 1347 struct bpf_insn *insn_buf,
f96da094 1348 struct bpf_prog *prog, u32 *target_size)
0515e599
AS
1349{
1350 struct bpf_insn *insn = insn_buf;
1351
6b8cc1d1 1352 switch (si->off) {
0515e599 1353 case offsetof(struct bpf_perf_event_data, sample_period):
f035a515 1354 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1355 data), si->dst_reg, si->src_reg,
0515e599 1356 offsetof(struct bpf_perf_event_data_kern, data));
6b8cc1d1 1357 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
f96da094
DB
1358 bpf_target_off(struct perf_sample_data, period, 8,
1359 target_size));
0515e599 1360 break;
95da0cdb
TQ
1361 case offsetof(struct bpf_perf_event_data, addr):
1362 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1363 data), si->dst_reg, si->src_reg,
1364 offsetof(struct bpf_perf_event_data_kern, data));
1365 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1366 bpf_target_off(struct perf_sample_data, addr, 8,
1367 target_size));
1368 break;
0515e599 1369 default:
f035a515 1370 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1371 regs), si->dst_reg, si->src_reg,
0515e599 1372 offsetof(struct bpf_perf_event_data_kern, regs));
6b8cc1d1
DB
1373 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1374 si->off);
0515e599
AS
1375 break;
1376 }
1377
1378 return insn - insn_buf;
1379}
1380
7de16e3a 1381const struct bpf_verifier_ops perf_event_verifier_ops = {
f005afed 1382 .get_func_proto = pe_prog_func_proto,
0515e599
AS
1383 .is_valid_access = pe_prog_is_valid_access,
1384 .convert_ctx_access = pe_prog_convert_ctx_access,
1385};
7de16e3a
JK
1386
1387const struct bpf_prog_ops perf_event_prog_ops = {
1388};
e87c6bc3
YS
1389
1390static DEFINE_MUTEX(bpf_event_mutex);
1391
c8c088ba
YS
1392#define BPF_TRACE_MAX_PROGS 64
1393
e87c6bc3
YS
1394int perf_event_attach_bpf_prog(struct perf_event *event,
1395 struct bpf_prog *prog)
1396{
e672db03 1397 struct bpf_prog_array *old_array;
e87c6bc3
YS
1398 struct bpf_prog_array *new_array;
1399 int ret = -EEXIST;
1400
9802d865 1401 /*
b4da3340
MH
1402 * Kprobe override only works if they are on the function entry,
1403 * and only if they are on the opt-in list.
9802d865
JB
1404 */
1405 if (prog->kprobe_override &&
b4da3340 1406 (!trace_kprobe_on_func_entry(event->tp_event) ||
9802d865
JB
1407 !trace_kprobe_error_injectable(event->tp_event)))
1408 return -EINVAL;
1409
e87c6bc3
YS
1410 mutex_lock(&bpf_event_mutex);
1411
1412 if (event->prog)
07c41a29 1413 goto unlock;
e87c6bc3 1414
e672db03 1415 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
c8c088ba
YS
1416 if (old_array &&
1417 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1418 ret = -E2BIG;
1419 goto unlock;
1420 }
1421
e87c6bc3
YS
1422 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1423 if (ret < 0)
07c41a29 1424 goto unlock;
e87c6bc3
YS
1425
1426 /* set the new array to event->tp_event and set event->prog */
1427 event->prog = prog;
1428 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1429 bpf_prog_array_free(old_array);
1430
07c41a29 1431unlock:
e87c6bc3
YS
1432 mutex_unlock(&bpf_event_mutex);
1433 return ret;
1434}
1435
1436void perf_event_detach_bpf_prog(struct perf_event *event)
1437{
e672db03 1438 struct bpf_prog_array *old_array;
e87c6bc3
YS
1439 struct bpf_prog_array *new_array;
1440 int ret;
1441
1442 mutex_lock(&bpf_event_mutex);
1443
1444 if (!event->prog)
07c41a29 1445 goto unlock;
e87c6bc3 1446
e672db03 1447 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
e87c6bc3 1448 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
170a7e3e
SY
1449 if (ret == -ENOENT)
1450 goto unlock;
e87c6bc3
YS
1451 if (ret < 0) {
1452 bpf_prog_array_delete_safe(old_array, event->prog);
1453 } else {
1454 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1455 bpf_prog_array_free(old_array);
1456 }
1457
1458 bpf_prog_put(event->prog);
1459 event->prog = NULL;
1460
07c41a29 1461unlock:
e87c6bc3
YS
1462 mutex_unlock(&bpf_event_mutex);
1463}
f371b304 1464
f4e2298e 1465int perf_event_query_prog_array(struct perf_event *event, void __user *info)
f371b304
YS
1466{
1467 struct perf_event_query_bpf __user *uquery = info;
1468 struct perf_event_query_bpf query = {};
e672db03 1469 struct bpf_prog_array *progs;
3a38bb98 1470 u32 *ids, prog_cnt, ids_len;
f371b304
YS
1471 int ret;
1472
1473 if (!capable(CAP_SYS_ADMIN))
1474 return -EPERM;
1475 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1476 return -EINVAL;
1477 if (copy_from_user(&query, uquery, sizeof(query)))
1478 return -EFAULT;
3a38bb98
YS
1479
1480 ids_len = query.ids_len;
1481 if (ids_len > BPF_TRACE_MAX_PROGS)
9c481b90 1482 return -E2BIG;
3a38bb98
YS
1483 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1484 if (!ids)
1485 return -ENOMEM;
1486 /*
1487 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1488 * is required when user only wants to check for uquery->prog_cnt.
1489 * There is no need to check for it since the case is handled
1490 * gracefully in bpf_prog_array_copy_info.
1491 */
f371b304
YS
1492
1493 mutex_lock(&bpf_event_mutex);
e672db03
SF
1494 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1495 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
f371b304
YS
1496 mutex_unlock(&bpf_event_mutex);
1497
3a38bb98
YS
1498 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1499 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1500 ret = -EFAULT;
1501
1502 kfree(ids);
f371b304
YS
1503 return ret;
1504}
c4f6699d
AS
1505
1506extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1507extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1508
a38d1107 1509struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
c4f6699d
AS
1510{
1511 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1512
1513 for (; btp < __stop__bpf_raw_tp; btp++) {
1514 if (!strcmp(btp->tp->name, name))
1515 return btp;
1516 }
a38d1107
MM
1517
1518 return bpf_get_raw_tracepoint_module(name);
1519}
1520
1521void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1522{
1523 struct module *mod = __module_address((unsigned long)btp);
1524
1525 if (mod)
1526 module_put(mod);
c4f6699d
AS
1527}
1528
1529static __always_inline
1530void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1531{
f03efe49 1532 cant_sleep();
c4f6699d 1533 rcu_read_lock();
c4f6699d 1534 (void) BPF_PROG_RUN(prog, args);
c4f6699d
AS
1535 rcu_read_unlock();
1536}
1537
1538#define UNPACK(...) __VA_ARGS__
1539#define REPEAT_1(FN, DL, X, ...) FN(X)
1540#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1541#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1542#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1543#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1544#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1545#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1546#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1547#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1548#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1549#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1550#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1551#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1552
1553#define SARG(X) u64 arg##X
1554#define COPY(X) args[X] = arg##X
1555
1556#define __DL_COM (,)
1557#define __DL_SEM (;)
1558
1559#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1560
1561#define BPF_TRACE_DEFN_x(x) \
1562 void bpf_trace_run##x(struct bpf_prog *prog, \
1563 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1564 { \
1565 u64 args[x]; \
1566 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1567 __bpf_trace_run(prog, args); \
1568 } \
1569 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1570BPF_TRACE_DEFN_x(1);
1571BPF_TRACE_DEFN_x(2);
1572BPF_TRACE_DEFN_x(3);
1573BPF_TRACE_DEFN_x(4);
1574BPF_TRACE_DEFN_x(5);
1575BPF_TRACE_DEFN_x(6);
1576BPF_TRACE_DEFN_x(7);
1577BPF_TRACE_DEFN_x(8);
1578BPF_TRACE_DEFN_x(9);
1579BPF_TRACE_DEFN_x(10);
1580BPF_TRACE_DEFN_x(11);
1581BPF_TRACE_DEFN_x(12);
1582
1583static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1584{
1585 struct tracepoint *tp = btp->tp;
1586
1587 /*
1588 * check that program doesn't access arguments beyond what's
1589 * available in this tracepoint
1590 */
1591 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1592 return -EINVAL;
1593
9df1c28b
MM
1594 if (prog->aux->max_tp_access > btp->writable_size)
1595 return -EINVAL;
1596
c4f6699d
AS
1597 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1598}
1599
1600int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1601{
e16ec340 1602 return __bpf_probe_register(btp, prog);
c4f6699d
AS
1603}
1604
1605int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1606{
e16ec340 1607 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
c4f6699d 1608}
41bdc4b4
YS
1609
1610int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1611 u32 *fd_type, const char **buf,
1612 u64 *probe_offset, u64 *probe_addr)
1613{
1614 bool is_tracepoint, is_syscall_tp;
1615 struct bpf_prog *prog;
1616 int flags, err = 0;
1617
1618 prog = event->prog;
1619 if (!prog)
1620 return -ENOENT;
1621
1622 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1623 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1624 return -EOPNOTSUPP;
1625
1626 *prog_id = prog->aux->id;
1627 flags = event->tp_event->flags;
1628 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1629 is_syscall_tp = is_syscall_trace_event(event->tp_event);
1630
1631 if (is_tracepoint || is_syscall_tp) {
1632 *buf = is_tracepoint ? event->tp_event->tp->name
1633 : event->tp_event->name;
1634 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1635 *probe_offset = 0x0;
1636 *probe_addr = 0x0;
1637 } else {
1638 /* kprobe/uprobe */
1639 err = -EOPNOTSUPP;
1640#ifdef CONFIG_KPROBE_EVENTS
1641 if (flags & TRACE_EVENT_FL_KPROBE)
1642 err = bpf_get_kprobe_info(event, fd_type, buf,
1643 probe_offset, probe_addr,
1644 event->attr.type == PERF_TYPE_TRACEPOINT);
1645#endif
1646#ifdef CONFIG_UPROBE_EVENTS
1647 if (flags & TRACE_EVENT_FL_UPROBE)
1648 err = bpf_get_uprobe_info(event, fd_type, buf,
1649 probe_offset,
1650 event->attr.type == PERF_TYPE_TRACEPOINT);
1651#endif
1652 }
1653
1654 return err;
1655}
a38d1107 1656
9db1ff0a
YS
1657static int __init send_signal_irq_work_init(void)
1658{
1659 int cpu;
1660 struct send_signal_irq_work *work;
1661
1662 for_each_possible_cpu(cpu) {
1663 work = per_cpu_ptr(&send_signal_work, cpu);
1664 init_irq_work(&work->irq_work, do_bpf_send_signal);
1665 }
1666 return 0;
1667}
1668
1669subsys_initcall(send_signal_irq_work_init);
1670
a38d1107 1671#ifdef CONFIG_MODULES
390e99cf
SF
1672static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1673 void *module)
a38d1107
MM
1674{
1675 struct bpf_trace_module *btm, *tmp;
1676 struct module *mod = module;
1677
1678 if (mod->num_bpf_raw_events == 0 ||
1679 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1680 return 0;
1681
1682 mutex_lock(&bpf_module_mutex);
1683
1684 switch (op) {
1685 case MODULE_STATE_COMING:
1686 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1687 if (btm) {
1688 btm->module = module;
1689 list_add(&btm->list, &bpf_trace_modules);
1690 }
1691 break;
1692 case MODULE_STATE_GOING:
1693 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1694 if (btm->module == module) {
1695 list_del(&btm->list);
1696 kfree(btm);
1697 break;
1698 }
1699 }
1700 break;
1701 }
1702
1703 mutex_unlock(&bpf_module_mutex);
1704
1705 return 0;
1706}
1707
1708static struct notifier_block bpf_module_nb = {
1709 .notifier_call = bpf_event_notify,
1710};
1711
390e99cf 1712static int __init bpf_event_init(void)
a38d1107
MM
1713{
1714 register_module_notifier(&bpf_module_nb);
1715 return 0;
1716}
1717
1718fs_initcall(bpf_event_init);
1719#endif /* CONFIG_MODULES */
This page took 0.786854 seconds and 4 git commands to generate.