1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _LINUX_TRACE_EVENT_H
4 #define _LINUX_TRACE_EVENT_H
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_seq.h>
8 #include <linux/percpu.h>
9 #include <linux/hardirq.h>
10 #include <linux/perf_event.h>
11 #include <linux/tracepoint.h>
20 /* Used for event string fields when they are NULL */
21 #define EVENT_NULL_STR "(null)"
23 const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
25 const struct trace_print_flags *flag_array);
27 const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
28 const struct trace_print_flags *symbol_array);
30 #if BITS_PER_LONG == 32
31 const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
32 unsigned long long flags,
33 const struct trace_print_flags_u64 *flag_array);
35 const char *trace_print_symbols_seq_u64(struct trace_seq *p,
36 unsigned long long val,
37 const struct trace_print_flags_u64
41 const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
42 unsigned int bitmask_size);
44 const char *trace_print_hex_seq(struct trace_seq *p,
45 const unsigned char *buf, int len,
48 const char *trace_print_array_seq(struct trace_seq *p,
49 const void *buf, int count,
53 trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
54 int prefix_type, int rowsize, int groupsize,
55 const void *buf, size_t len, bool ascii);
57 struct trace_iterator;
60 int trace_raw_output_prep(struct trace_iterator *iter,
61 struct trace_event *event);
63 void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
65 /* Used to find the offset and length of dynamic fields in trace events */
66 struct trace_dynamic_info {
67 #ifdef CONFIG_CPU_BIG_ENDIAN
77 * The trace entry - the most basic unit of tracing. This is what
78 * is printed in the end as a single line in the trace output, such as:
80 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
85 unsigned char preempt_count;
89 #define TRACE_EVENT_TYPE_MAX \
90 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
93 * Trace iterator - used by printout routines who present trace
94 * results to users and which routines might sleep, etc:
96 struct trace_iterator {
97 struct trace_array *tr;
99 struct array_buffer *array_buffer;
103 struct ring_buffer_iter **buffer_iter;
104 unsigned long iter_flags;
105 void *temp; /* temp holder */
106 unsigned int temp_size;
107 char *fmt; /* modified format holder */
108 unsigned int fmt_size;
111 /* trace_seq for __print_flags() and __print_symbolic() etc. */
112 struct trace_seq tmp_seq;
114 cpumask_var_t started;
116 /* Set when the file is closed to prevent new waiters */
119 /* it's true when current open file is snapshot */
122 /* The below is zeroed out in pipe_read */
123 struct trace_seq seq;
124 struct trace_entry *ent;
125 unsigned long lost_events;
134 /* All new field here will be zeroed out in pipe_read */
137 enum trace_iter_flags {
138 TRACE_FILE_LAT_FMT = 1,
139 TRACE_FILE_ANNOTATE = 2,
140 TRACE_FILE_TIME_IN_NS = 4,
144 typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
145 int flags, struct trace_event *event);
147 struct trace_event_functions {
148 trace_print_func trace;
149 trace_print_func raw;
150 trace_print_func hex;
151 trace_print_func binary;
155 struct hlist_node node;
157 struct trace_event_functions *funcs;
160 extern int register_trace_event(struct trace_event *event);
161 extern int unregister_trace_event(struct trace_event *event);
163 /* Return values for print_line callback */
165 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
166 TRACE_TYPE_HANDLED = 1,
167 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
168 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
171 enum print_line_t trace_handle_return(struct trace_seq *s);
173 static inline void tracing_generic_entry_update(struct trace_entry *entry,
175 unsigned int trace_ctx)
177 entry->preempt_count = trace_ctx & 0xff;
178 entry->pid = current->pid;
180 entry->flags = trace_ctx >> 16;
183 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
185 enum trace_flag_type {
186 TRACE_FLAG_IRQS_OFF = 0x01,
187 TRACE_FLAG_NEED_RESCHED = 0x04,
188 TRACE_FLAG_HARDIRQ = 0x08,
189 TRACE_FLAG_SOFTIRQ = 0x10,
190 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
191 TRACE_FLAG_NMI = 0x40,
192 TRACE_FLAG_BH_OFF = 0x80,
195 static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
197 unsigned int irq_status = irqs_disabled_flags(irqflags) ?
198 TRACE_FLAG_IRQS_OFF : 0;
199 return tracing_gen_ctx_irq_test(irq_status);
201 static inline unsigned int tracing_gen_ctx(void)
203 unsigned long irqflags;
205 local_save_flags(irqflags);
206 return tracing_gen_ctx_flags(irqflags);
209 static inline unsigned int tracing_gen_ctx_dec(void)
211 unsigned int trace_ctx;
213 trace_ctx = tracing_gen_ctx();
215 * Subtract one from the preemption counter if preemption is enabled,
216 * see trace_event_buffer_reserve()for details.
218 if (IS_ENABLED(CONFIG_PREEMPTION))
223 struct trace_event_file;
225 struct ring_buffer_event *
226 trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
227 struct trace_event_file *trace_file,
228 int type, unsigned long len,
229 unsigned int trace_ctx);
231 #define TRACE_RECORD_CMDLINE BIT(0)
232 #define TRACE_RECORD_TGID BIT(1)
234 void tracing_record_taskinfo(struct task_struct *task, int flags);
235 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
236 struct task_struct *next, int flags);
238 void tracing_record_cmdline(struct task_struct *task);
239 void tracing_record_tgid(struct task_struct *task);
241 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
248 TRACE_REG_UNREGISTER,
249 #ifdef CONFIG_PERF_EVENTS
250 TRACE_REG_PERF_REGISTER,
251 TRACE_REG_PERF_UNREGISTER,
253 TRACE_REG_PERF_CLOSE,
255 * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
256 * custom action was taken and the default action is not to be
264 struct trace_event_call;
266 #define TRACE_FUNCTION_TYPE ((const char *)~0UL)
268 struct trace_event_fields {
276 const int filter_type;
279 int (*define_fields)(struct trace_event_call *);
283 struct trace_event_class {
286 #ifdef CONFIG_PERF_EVENTS
289 int (*reg)(struct trace_event_call *event,
290 enum trace_reg type, void *data);
291 struct trace_event_fields *fields_array;
292 struct list_head *(*get_fields)(struct trace_event_call *);
293 struct list_head fields;
294 int (*raw_init)(struct trace_event_call *);
297 extern int trace_event_reg(struct trace_event_call *event,
298 enum trace_reg type, void *data);
300 struct trace_event_buffer {
301 struct trace_buffer *buffer;
302 struct ring_buffer_event *event;
303 struct trace_event_file *trace_file;
305 unsigned int trace_ctx;
306 struct pt_regs *regs;
309 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
310 struct trace_event_file *trace_file,
313 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
316 TRACE_EVENT_FL_CAP_ANY_BIT,
317 TRACE_EVENT_FL_NO_SET_FILTER_BIT,
318 TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
319 TRACE_EVENT_FL_TRACEPOINT_BIT,
320 TRACE_EVENT_FL_DYNAMIC_BIT,
321 TRACE_EVENT_FL_KPROBE_BIT,
322 TRACE_EVENT_FL_UPROBE_BIT,
323 TRACE_EVENT_FL_EPROBE_BIT,
324 TRACE_EVENT_FL_FPROBE_BIT,
325 TRACE_EVENT_FL_CUSTOM_BIT,
330 * CAP_ANY - Any user can enable for perf
331 * NO_SET_FILTER - Set when filter has error and is to be ignored
332 * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
333 * TRACEPOINT - Event is a tracepoint
334 * DYNAMIC - Event is a dynamic event (created at run time)
335 * KPROBE - Event is a kprobe
336 * UPROBE - Event is a uprobe
337 * EPROBE - Event is an event probe
338 * FPROBE - Event is an function probe
339 * CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint)
340 * This is set when the custom event has not been attached
341 * to a tracepoint yet, then it is cleared when it is.
344 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
345 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
346 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
347 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
348 TRACE_EVENT_FL_DYNAMIC = (1 << TRACE_EVENT_FL_DYNAMIC_BIT),
349 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
350 TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
351 TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT),
352 TRACE_EVENT_FL_FPROBE = (1 << TRACE_EVENT_FL_FPROBE_BIT),
353 TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT),
356 #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
358 struct trace_event_call {
359 struct list_head list;
360 struct trace_event_class *class;
363 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
364 struct tracepoint *tp;
366 struct trace_event event;
369 * Static events can disappear with modules,
370 * where as dynamic ones need their own ref count.
378 /* See the TRACE_EVENT_FL_* flags above */
379 int flags; /* static flags of different events */
381 #ifdef CONFIG_PERF_EVENTS
383 struct hlist_head __percpu *perf_events;
384 struct bpf_prog_array __rcu *prog_array;
386 int (*perf_perm)(struct trace_event_call *,
387 struct perf_event *);
391 #ifdef CONFIG_DYNAMIC_EVENTS
392 bool trace_event_dyn_try_get_ref(struct trace_event_call *call);
393 void trace_event_dyn_put_ref(struct trace_event_call *call);
394 bool trace_event_dyn_busy(struct trace_event_call *call);
396 static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call)
398 /* Without DYNAMIC_EVENTS configured, nothing should be calling this */
401 static inline void trace_event_dyn_put_ref(struct trace_event_call *call)
404 static inline bool trace_event_dyn_busy(struct trace_event_call *call)
406 /* Nothing should call this without DYNAIMIC_EVENTS configured. */
411 static inline bool trace_event_try_get_ref(struct trace_event_call *call)
413 if (call->flags & TRACE_EVENT_FL_DYNAMIC)
414 return trace_event_dyn_try_get_ref(call);
416 return try_module_get(call->module);
419 static inline void trace_event_put_ref(struct trace_event_call *call)
421 if (call->flags & TRACE_EVENT_FL_DYNAMIC)
422 trace_event_dyn_put_ref(call);
424 module_put(call->module);
427 #ifdef CONFIG_PERF_EVENTS
428 static inline bool bpf_prog_array_valid(struct trace_event_call *call)
431 * This inline function checks whether call->prog_array
432 * is valid or not. The function is called in various places,
433 * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
435 * If this function returns true, and later call->prog_array
436 * becomes false inside rcu_read_lock/unlock region,
437 * we bail out then. If this function return false,
438 * there is a risk that we might miss a few events if the checking
439 * were delayed until inside rcu_read_lock/unlock region and
440 * call->prog_array happened to become non-NULL then.
442 * Here, READ_ONCE() is used instead of rcu_access_pointer().
443 * rcu_access_pointer() requires the actual definition of
444 * "struct bpf_prog_array" while READ_ONCE() only needs
445 * a declaration of the same type.
447 return !!READ_ONCE(call->prog_array);
451 static inline const char *
452 trace_event_name(struct trace_event_call *call)
454 if (call->flags & TRACE_EVENT_FL_CUSTOM)
456 else if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
457 return call->tp ? call->tp->name : NULL;
462 static inline struct list_head *
463 trace_get_fields(struct trace_event_call *event_call)
465 if (!event_call->class->get_fields)
466 return &event_call->class->fields;
467 return event_call->class->get_fields(event_call);
470 struct trace_subsystem_dir;
473 EVENT_FILE_FL_ENABLED_BIT,
474 EVENT_FILE_FL_RECORDED_CMD_BIT,
475 EVENT_FILE_FL_RECORDED_TGID_BIT,
476 EVENT_FILE_FL_FILTERED_BIT,
477 EVENT_FILE_FL_NO_SET_FILTER_BIT,
478 EVENT_FILE_FL_SOFT_MODE_BIT,
479 EVENT_FILE_FL_SOFT_DISABLED_BIT,
480 EVENT_FILE_FL_TRIGGER_MODE_BIT,
481 EVENT_FILE_FL_TRIGGER_COND_BIT,
482 EVENT_FILE_FL_PID_FILTER_BIT,
483 EVENT_FILE_FL_WAS_ENABLED_BIT,
484 EVENT_FILE_FL_FREED_BIT,
487 extern struct trace_event_file *trace_get_event_file(const char *instance,
490 extern void trace_put_event_file(struct trace_event_file *file);
492 #define MAX_DYNEVENT_CMD_LEN (2048)
495 DYNEVENT_TYPE_SYNTH = 1,
496 DYNEVENT_TYPE_KPROBE,
502 typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd);
504 struct dynevent_cmd {
506 const char *event_name;
507 unsigned int n_fields;
508 enum dynevent_type type;
509 dynevent_create_fn_t run_command;
513 extern int dynevent_create(struct dynevent_cmd *cmd);
515 extern int synth_event_delete(const char *name);
517 extern void synth_event_cmd_init(struct dynevent_cmd *cmd,
518 char *buf, int maxlen);
520 extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd,
522 struct module *mod, ...);
524 #define synth_event_gen_cmd_start(cmd, name, mod, ...) \
525 __synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL)
527 struct synth_field_desc {
532 extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd,
535 struct synth_field_desc *fields,
536 unsigned int n_fields);
537 extern int synth_event_create(const char *name,
538 struct synth_field_desc *fields,
539 unsigned int n_fields, struct module *mod);
541 extern int synth_event_add_field(struct dynevent_cmd *cmd,
544 extern int synth_event_add_field_str(struct dynevent_cmd *cmd,
545 const char *type_name);
546 extern int synth_event_add_fields(struct dynevent_cmd *cmd,
547 struct synth_field_desc *fields,
548 unsigned int n_fields);
550 #define synth_event_gen_cmd_end(cmd) \
555 struct synth_event_trace_state {
556 struct trace_event_buffer fbuffer;
557 struct synth_trace_event *entry;
558 struct trace_buffer *buffer;
559 struct synth_event *event;
560 unsigned int cur_field;
567 extern int synth_event_trace(struct trace_event_file *file,
568 unsigned int n_vals, ...);
569 extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
570 unsigned int n_vals);
571 extern int synth_event_trace_start(struct trace_event_file *file,
572 struct synth_event_trace_state *trace_state);
573 extern int synth_event_add_next_val(u64 val,
574 struct synth_event_trace_state *trace_state);
575 extern int synth_event_add_val(const char *field_name, u64 val,
576 struct synth_event_trace_state *trace_state);
577 extern int synth_event_trace_end(struct synth_event_trace_state *trace_state);
579 extern int kprobe_event_delete(const char *name);
581 extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd,
582 char *buf, int maxlen);
584 #define kprobe_event_gen_cmd_start(cmd, name, loc, ...) \
585 __kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL)
587 #define kretprobe_event_gen_cmd_start(cmd, name, loc, ...) \
588 __kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL)
590 extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd,
593 const char *loc, ...);
595 #define kprobe_event_add_fields(cmd, ...) \
596 __kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL)
598 #define kprobe_event_add_field(cmd, field) \
599 __kprobe_event_add_fields(cmd, field, NULL)
601 extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
603 #define kprobe_event_gen_cmd_end(cmd) \
606 #define kretprobe_event_gen_cmd_end(cmd) \
611 * ENABLED - The event is enabled
612 * RECORDED_CMD - The comms should be recorded at sched_switch
613 * RECORDED_TGID - The tgids should be recorded at sched_switch
614 * FILTERED - The event has a filter attached
615 * NO_SET_FILTER - Set when filter has error and is to be ignored
616 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
617 * SOFT_DISABLED - When set, do not trace the event (even though its
618 * tracepoint may be enabled)
619 * TRIGGER_MODE - When set, invoke the triggers associated with the event
620 * TRIGGER_COND - When set, one or more triggers has an associated filter
621 * PID_FILTER - When set, the event is filtered based on pid
622 * WAS_ENABLED - Set when enabled to know to clear trace on module removal
623 * FREED - File descriptor is freed, all fields should be considered invalid
626 EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
627 EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
628 EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
629 EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
630 EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
631 EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
632 EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
633 EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
634 EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
635 EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
636 EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
637 EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT),
640 struct trace_event_file {
641 struct list_head list;
642 struct trace_event_call *event_call;
643 struct event_filter __rcu *filter;
644 struct eventfs_inode *ei;
645 struct trace_array *tr;
646 struct trace_subsystem_dir *system;
647 struct list_head triggers;
652 * bit 1: enabled cmd record
653 * bit 2: enable/disable with the soft disable bit
654 * bit 3: soft disabled
655 * bit 4: trigger enabled
657 * Note: The bits must be set atomically to prevent races
658 * from other writers. Reads of flags do not need to be in
659 * sync as they occur in critical sections. But the way flags
660 * is currently used, these changes do not affect the code
661 * except that when a change is made, it may have a slight
662 * delay in propagating the changes to other CPUs due to
663 * caching and such. Which is mostly OK ;-)
666 refcount_t ref; /* ref count for opened files */
667 atomic_t sm_ref; /* soft-mode reference counter */
668 atomic_t tm_ref; /* trigger-mode reference counter */
671 #define __TRACE_EVENT_FLAGS(name, value) \
672 static int __init trace_init_flags_##name(void) \
674 event_##name.flags |= value; \
677 early_initcall(trace_init_flags_##name);
679 #define __TRACE_EVENT_PERF_PERM(name, expr...) \
680 static int perf_perm_##name(struct trace_event_call *tp_event, \
681 struct perf_event *p_event) \
683 return ({ expr; }); \
685 static int __init trace_init_perf_perm_##name(void) \
687 event_##name.perf_perm = &perf_perm_##name; \
690 early_initcall(trace_init_perf_perm_##name);
692 #define PERF_MAX_TRACE_SIZE 8192
694 #define MAX_FILTER_STR_VAL 256U /* Should handle KSYM_SYMBOL_LEN */
696 enum event_trigger_type {
698 ETT_TRACE_ONOFF = (1 << 0),
699 ETT_SNAPSHOT = (1 << 1),
700 ETT_STACKTRACE = (1 << 2),
701 ETT_EVENT_ENABLE = (1 << 3),
702 ETT_EVENT_HIST = (1 << 4),
703 ETT_HIST_ENABLE = (1 << 5),
704 ETT_EVENT_EPROBE = (1 << 6),
707 extern int filter_match_preds(struct event_filter *filter, void *rec);
709 extern enum event_trigger_type
710 event_triggers_call(struct trace_event_file *file,
711 struct trace_buffer *buffer, void *rec,
712 struct ring_buffer_event *event);
714 event_triggers_post_call(struct trace_event_file *file,
715 enum event_trigger_type tt);
717 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
719 bool __trace_trigger_soft_disabled(struct trace_event_file *file);
722 * trace_trigger_soft_disabled - do triggers and test if soft disabled
723 * @file: The file pointer of the event to test
725 * If any triggers without filters are attached to this event, they
726 * will be called here. If the event is soft disabled and has no
727 * triggers that require testing the fields, it will return true,
730 static __always_inline bool
731 trace_trigger_soft_disabled(struct trace_event_file *file)
733 unsigned long eflags = file->flags;
735 if (likely(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE |
736 EVENT_FILE_FL_SOFT_DISABLED |
737 EVENT_FILE_FL_PID_FILTER))))
740 if (likely(eflags & EVENT_FILE_FL_TRIGGER_COND))
743 return __trace_trigger_soft_disabled(file);
746 #ifdef CONFIG_BPF_EVENTS
747 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
748 int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
749 void perf_event_detach_bpf_prog(struct perf_event *event);
750 int perf_event_query_prog_array(struct perf_event *event, void __user *info);
752 struct bpf_raw_tp_link;
753 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link);
754 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link);
756 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
757 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
758 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
759 u32 *fd_type, const char **buf,
760 u64 *probe_offset, u64 *probe_addr,
761 unsigned long *missed);
762 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
763 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
765 static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
771 perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie)
776 static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
779 perf_event_query_prog_array(struct perf_event *event, void __user *info)
783 struct bpf_raw_tp_link;
784 static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
788 static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
792 static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
796 static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
799 static inline int bpf_get_perf_event_info(const struct perf_event *event,
800 u32 *prog_id, u32 *fd_type,
801 const char **buf, u64 *probe_offset,
802 u64 *probe_addr, unsigned long *missed)
807 bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
812 bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
820 FILTER_STATIC_STRING,
831 extern int trace_event_raw_init(struct trace_event_call *call);
832 extern int trace_define_field(struct trace_event_call *call, const char *type,
833 const char *name, int offset, int size,
834 int is_signed, int filter_type);
835 extern int trace_add_event_call(struct trace_event_call *call);
836 extern int trace_remove_event_call(struct trace_event_call *call);
837 extern int trace_event_get_offsets(struct trace_event_call *call);
839 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
840 int trace_set_clr_event(const char *system, const char *event, int set);
841 int trace_array_set_clr_event(struct trace_array *tr, const char *system,
842 const char *event, bool enable);
844 * The double __builtin_constant_p is because gcc will give us an error
845 * if we try to allocate the static variable to fmt if it is not a
846 * constant. Even with the outer if statement optimizing out.
848 #define event_trace_printk(ip, fmt, args...) \
850 __trace_printk_check_format(fmt, ##args); \
851 tracing_record_cmdline(current); \
852 if (__builtin_constant_p(fmt)) { \
853 static const char *trace_printk_fmt \
854 __section("__trace_printk_fmt") = \
855 __builtin_constant_p(fmt) ? fmt : NULL; \
857 __trace_bprintk(ip, trace_printk_fmt, ##args); \
859 __trace_printk(ip, fmt, ##args); \
862 #ifdef CONFIG_PERF_EVENTS
865 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
867 extern int perf_trace_init(struct perf_event *event);
868 extern void perf_trace_destroy(struct perf_event *event);
869 extern int perf_trace_add(struct perf_event *event, int flags);
870 extern void perf_trace_del(struct perf_event *event, int flags);
871 #ifdef CONFIG_KPROBE_EVENTS
872 extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe);
873 extern void perf_kprobe_destroy(struct perf_event *event);
874 extern int bpf_get_kprobe_info(const struct perf_event *event,
875 u32 *fd_type, const char **symbol,
876 u64 *probe_offset, u64 *probe_addr,
877 unsigned long *missed,
878 bool perf_type_tracepoint);
880 #ifdef CONFIG_UPROBE_EVENTS
881 extern int perf_uprobe_init(struct perf_event *event,
882 unsigned long ref_ctr_offset, bool is_retprobe);
883 extern void perf_uprobe_destroy(struct perf_event *event);
884 extern int bpf_get_uprobe_info(const struct perf_event *event,
885 u32 *fd_type, const char **filename,
886 u64 *probe_offset, u64 *probe_addr,
887 bool perf_type_tracepoint);
889 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
891 extern void ftrace_profile_free_filter(struct perf_event *event);
892 void perf_trace_buf_update(void *record, u16 type);
893 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
895 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
896 void perf_event_free_bpf_prog(struct perf_event *event);
898 void bpf_trace_run1(struct bpf_raw_tp_link *link, u64 arg1);
899 void bpf_trace_run2(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2);
900 void bpf_trace_run3(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
902 void bpf_trace_run4(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
904 void bpf_trace_run5(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
905 u64 arg3, u64 arg4, u64 arg5);
906 void bpf_trace_run6(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
907 u64 arg3, u64 arg4, u64 arg5, u64 arg6);
908 void bpf_trace_run7(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
909 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
910 void bpf_trace_run8(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
911 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
913 void bpf_trace_run9(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
914 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
916 void bpf_trace_run10(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
917 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
918 u64 arg8, u64 arg9, u64 arg10);
919 void bpf_trace_run11(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
920 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
921 u64 arg8, u64 arg9, u64 arg10, u64 arg11);
922 void bpf_trace_run12(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
923 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
924 u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
925 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
926 struct trace_event_call *call, u64 count,
927 struct pt_regs *regs, struct hlist_head *head,
928 struct task_struct *task);
931 perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
932 u64 count, struct pt_regs *regs, void *head,
933 struct task_struct *task)
935 perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
940 #define TRACE_EVENT_STR_MAX 512
943 * gcc warns that you can not use a va_list in an inlined
944 * function. But lets me make it into a macro :-/
946 #define __trace_event_vstr_len(fmt, va) \
951 va_copy(__ap, *(va)); \
952 __ret = vsnprintf(NULL, 0, fmt, __ap) + 1; \
955 min(__ret, TRACE_EVENT_STR_MAX); \
958 #endif /* _LINUX_TRACE_EVENT_H */
961 * Note: we keep the TRACE_CUSTOM_EVENT outside the include file ifdef protection.
962 * This is due to the way trace custom events work. If a file includes two
963 * trace event headers under one "CREATE_CUSTOM_TRACE_EVENTS" the first include
964 * will override the TRACE_CUSTOM_EVENT and break the second include.
967 #ifndef TRACE_CUSTOM_EVENT
969 #define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print)
970 #define DEFINE_CUSTOM_EVENT(template, name, proto, args)
971 #define TRACE_CUSTOM_EVENT(name, proto, args, struct, assign, print)
973 #endif /* ifdef TRACE_CUSTOM_EVENT (see note above) */