1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
8 * Originally taken from the RT patch by:
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <linux/utsname.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/panic_notifier.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
52 #include <asm/setup.h> /* COMMAND_LINE_SIZE */
55 #include "trace_output.h"
57 #ifdef CONFIG_FTRACE_STARTUP_TEST
59 * We need to change this state when a selftest is running.
60 * A selftest will lurk into the ring-buffer to count the
61 * entries inserted during the selftest although some concurrent
62 * insertions into the ring-buffer such as trace_printk could occurred
63 * at the same time, giving false positive or negative results.
65 static bool __read_mostly tracing_selftest_running;
68 * If boot-time tracing including tracers/events via kernel cmdline
69 * is running, we do not want to run SELFTEST.
71 bool __read_mostly tracing_selftest_disabled;
73 void __init disable_tracing_selftest(const char *reason)
75 if (!tracing_selftest_disabled) {
76 tracing_selftest_disabled = true;
77 pr_info("Ftrace startup test is disabled due to %s\n", reason);
81 #define tracing_selftest_running 0
82 #define tracing_selftest_disabled 0
85 /* Pipe tracepoints to printk */
86 static struct trace_iterator *tracepoint_print_iter;
87 int tracepoint_printk;
88 static bool tracepoint_printk_stop_on_boot __initdata;
89 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
91 /* For tracers that don't implement custom flags */
92 static struct tracer_opt dummy_tracer_opt[] = {
97 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
103 * To prevent the comm cache from being overwritten when no
104 * tracing is active, only save the comm when a trace event
107 DEFINE_PER_CPU(bool, trace_taskinfo_save);
110 * Kill all tracing for good (never come back).
111 * It is initialized to 1 but will turn to zero if the initialization
112 * of the tracer is successful. But that is the only place that sets
115 static int tracing_disabled = 1;
117 cpumask_var_t __read_mostly tracing_buffer_mask;
120 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
122 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
123 * is set, then ftrace_dump is called. This will output the contents
124 * of the ftrace buffers to the console. This is very useful for
125 * capturing traces that lead to crashes and outputing it to a
128 * It is default off, but you can enable it with either specifying
129 * "ftrace_dump_on_oops" in the kernel command line, or setting
130 * /proc/sys/kernel/ftrace_dump_on_oops
131 * Set 1 if you want to dump buffers of all CPUs
132 * Set 2 if you want to dump the buffer of the CPU that triggered oops
133 * Set instance name if you want to dump the specific trace instance
134 * Multiple instance dump is also supported, and instances are seperated
137 /* Set to string format zero to disable by default */
138 char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0";
140 /* When set, tracing will stop when a WARN*() is hit */
141 int __disable_trace_on_warning;
143 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
144 /* Map of enums to their values, for "eval_map" file */
145 struct trace_eval_map_head {
147 unsigned long length;
150 union trace_eval_map_item;
152 struct trace_eval_map_tail {
154 * "end" is first and points to NULL as it must be different
155 * than "mod" or "eval_string"
157 union trace_eval_map_item *next;
158 const char *end; /* points to NULL */
161 static DEFINE_MUTEX(trace_eval_mutex);
164 * The trace_eval_maps are saved in an array with two extra elements,
165 * one at the beginning, and one at the end. The beginning item contains
166 * the count of the saved maps (head.length), and the module they
167 * belong to if not built in (head.mod). The ending item contains a
168 * pointer to the next array of saved eval_map items.
170 union trace_eval_map_item {
171 struct trace_eval_map map;
172 struct trace_eval_map_head head;
173 struct trace_eval_map_tail tail;
176 static union trace_eval_map_item *trace_eval_maps;
177 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
179 int tracing_set_tracer(struct trace_array *tr, const char *buf);
180 static void ftrace_trace_userstack(struct trace_array *tr,
181 struct trace_buffer *buffer,
182 unsigned int trace_ctx);
184 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
185 static char *default_bootup_tracer;
187 static bool allocate_snapshot;
188 static bool snapshot_at_boot;
190 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
191 static int boot_instance_index;
193 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
194 static int boot_snapshot_index;
196 static int __init set_cmdline_ftrace(char *str)
198 strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
199 default_bootup_tracer = bootup_tracer_buf;
200 /* We are using ftrace early, expand it */
201 trace_set_ring_buffer_expanded(NULL);
204 __setup("ftrace=", set_cmdline_ftrace);
206 int ftrace_dump_on_oops_enabled(void)
208 if (!strcmp("0", ftrace_dump_on_oops))
214 static int __init set_ftrace_dump_on_oops(char *str)
217 strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
222 strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
223 strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1);
228 strscpy(ftrace_dump_on_oops, str, MAX_TRACER_SIZE);
234 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
236 static int __init stop_trace_on_warning(char *str)
238 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
239 __disable_trace_on_warning = 1;
242 __setup("traceoff_on_warning", stop_trace_on_warning);
244 static int __init boot_alloc_snapshot(char *str)
246 char *slot = boot_snapshot_info + boot_snapshot_index;
247 int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
252 if (strlen(str) >= left)
255 ret = snprintf(slot, left, "%s\t", str);
256 boot_snapshot_index += ret;
258 allocate_snapshot = true;
259 /* We also need the main ring buffer expanded */
260 trace_set_ring_buffer_expanded(NULL);
264 __setup("alloc_snapshot", boot_alloc_snapshot);
267 static int __init boot_snapshot(char *str)
269 snapshot_at_boot = true;
270 boot_alloc_snapshot(str);
273 __setup("ftrace_boot_snapshot", boot_snapshot);
276 static int __init boot_instance(char *str)
278 char *slot = boot_instance_info + boot_instance_index;
279 int left = sizeof(boot_instance_info) - boot_instance_index;
282 if (strlen(str) >= left)
285 ret = snprintf(slot, left, "%s\t", str);
286 boot_instance_index += ret;
290 __setup("trace_instance=", boot_instance);
293 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
295 static int __init set_trace_boot_options(char *str)
297 strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
300 __setup("trace_options=", set_trace_boot_options);
302 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
303 static char *trace_boot_clock __initdata;
305 static int __init set_trace_boot_clock(char *str)
307 strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
308 trace_boot_clock = trace_boot_clock_buf;
311 __setup("trace_clock=", set_trace_boot_clock);
313 static int __init set_tracepoint_printk(char *str)
315 /* Ignore the "tp_printk_stop_on_boot" param */
319 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
320 tracepoint_printk = 1;
323 __setup("tp_printk", set_tracepoint_printk);
325 static int __init set_tracepoint_printk_stop(char *str)
327 tracepoint_printk_stop_on_boot = true;
330 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
332 unsigned long long ns2usecs(u64 nsec)
340 trace_process_export(struct trace_export *export,
341 struct ring_buffer_event *event, int flag)
343 struct trace_entry *entry;
344 unsigned int size = 0;
346 if (export->flags & flag) {
347 entry = ring_buffer_event_data(event);
348 size = ring_buffer_event_length(event);
349 export->write(export, entry, size);
353 static DEFINE_MUTEX(ftrace_export_lock);
355 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
357 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
358 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
359 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
361 static inline void ftrace_exports_enable(struct trace_export *export)
363 if (export->flags & TRACE_EXPORT_FUNCTION)
364 static_branch_inc(&trace_function_exports_enabled);
366 if (export->flags & TRACE_EXPORT_EVENT)
367 static_branch_inc(&trace_event_exports_enabled);
369 if (export->flags & TRACE_EXPORT_MARKER)
370 static_branch_inc(&trace_marker_exports_enabled);
373 static inline void ftrace_exports_disable(struct trace_export *export)
375 if (export->flags & TRACE_EXPORT_FUNCTION)
376 static_branch_dec(&trace_function_exports_enabled);
378 if (export->flags & TRACE_EXPORT_EVENT)
379 static_branch_dec(&trace_event_exports_enabled);
381 if (export->flags & TRACE_EXPORT_MARKER)
382 static_branch_dec(&trace_marker_exports_enabled);
385 static void ftrace_exports(struct ring_buffer_event *event, int flag)
387 struct trace_export *export;
389 preempt_disable_notrace();
391 export = rcu_dereference_raw_check(ftrace_exports_list);
393 trace_process_export(export, event, flag);
394 export = rcu_dereference_raw_check(export->next);
397 preempt_enable_notrace();
401 add_trace_export(struct trace_export **list, struct trace_export *export)
403 rcu_assign_pointer(export->next, *list);
405 * We are entering export into the list but another
406 * CPU might be walking that list. We need to make sure
407 * the export->next pointer is valid before another CPU sees
408 * the export pointer included into the list.
410 rcu_assign_pointer(*list, export);
414 rm_trace_export(struct trace_export **list, struct trace_export *export)
416 struct trace_export **p;
418 for (p = list; *p != NULL; p = &(*p)->next)
425 rcu_assign_pointer(*p, (*p)->next);
431 add_ftrace_export(struct trace_export **list, struct trace_export *export)
433 ftrace_exports_enable(export);
435 add_trace_export(list, export);
439 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
443 ret = rm_trace_export(list, export);
444 ftrace_exports_disable(export);
449 int register_ftrace_export(struct trace_export *export)
451 if (WARN_ON_ONCE(!export->write))
454 mutex_lock(&ftrace_export_lock);
456 add_ftrace_export(&ftrace_exports_list, export);
458 mutex_unlock(&ftrace_export_lock);
462 EXPORT_SYMBOL_GPL(register_ftrace_export);
464 int unregister_ftrace_export(struct trace_export *export)
468 mutex_lock(&ftrace_export_lock);
470 ret = rm_ftrace_export(&ftrace_exports_list, export);
472 mutex_unlock(&ftrace_export_lock);
476 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
478 /* trace_flags holds trace_options default values */
479 #define TRACE_DEFAULT_FLAGS \
480 (FUNCTION_DEFAULT_FLAGS | \
481 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
482 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
483 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
484 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
487 /* trace_options that are only supported by global_trace */
488 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
489 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
491 /* trace_flags that are default zero for instances */
492 #define ZEROED_TRACE_FLAGS \
493 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
496 * The global_trace is the descriptor that holds the top-level tracing
497 * buffers for the live tracing.
499 static struct trace_array global_trace = {
500 .trace_flags = TRACE_DEFAULT_FLAGS,
503 void trace_set_ring_buffer_expanded(struct trace_array *tr)
507 tr->ring_buffer_expanded = true;
510 LIST_HEAD(ftrace_trace_arrays);
512 int trace_array_get(struct trace_array *this_tr)
514 struct trace_array *tr;
517 mutex_lock(&trace_types_lock);
518 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
525 mutex_unlock(&trace_types_lock);
530 static void __trace_array_put(struct trace_array *this_tr)
532 WARN_ON(!this_tr->ref);
537 * trace_array_put - Decrement the reference counter for this trace array.
538 * @this_tr : pointer to the trace array
540 * NOTE: Use this when we no longer need the trace array returned by
541 * trace_array_get_by_name(). This ensures the trace array can be later
545 void trace_array_put(struct trace_array *this_tr)
550 mutex_lock(&trace_types_lock);
551 __trace_array_put(this_tr);
552 mutex_unlock(&trace_types_lock);
554 EXPORT_SYMBOL_GPL(trace_array_put);
556 int tracing_check_open_get_tr(struct trace_array *tr)
560 ret = security_locked_down(LOCKDOWN_TRACEFS);
564 if (tracing_disabled)
567 if (tr && trace_array_get(tr) < 0)
573 int call_filter_check_discard(struct trace_event_call *call, void *rec,
574 struct trace_buffer *buffer,
575 struct ring_buffer_event *event)
577 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
578 !filter_match_preds(call->filter, rec)) {
579 __trace_event_discard_commit(buffer, event);
587 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
588 * @filtered_pids: The list of pids to check
589 * @search_pid: The PID to find in @filtered_pids
591 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
594 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
596 return trace_pid_list_is_set(filtered_pids, search_pid);
600 * trace_ignore_this_task - should a task be ignored for tracing
601 * @filtered_pids: The list of pids to check
602 * @filtered_no_pids: The list of pids not to be traced
603 * @task: The task that should be ignored if not filtered
605 * Checks if @task should be traced or not from @filtered_pids.
606 * Returns true if @task should *NOT* be traced.
607 * Returns false if @task should be traced.
610 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
611 struct trace_pid_list *filtered_no_pids,
612 struct task_struct *task)
615 * If filtered_no_pids is not empty, and the task's pid is listed
616 * in filtered_no_pids, then return true.
617 * Otherwise, if filtered_pids is empty, that means we can
618 * trace all tasks. If it has content, then only trace pids
619 * within filtered_pids.
622 return (filtered_pids &&
623 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
625 trace_find_filtered_pid(filtered_no_pids, task->pid));
629 * trace_filter_add_remove_task - Add or remove a task from a pid_list
630 * @pid_list: The list to modify
631 * @self: The current task for fork or NULL for exit
632 * @task: The task to add or remove
634 * If adding a task, if @self is defined, the task is only added if @self
635 * is also included in @pid_list. This happens on fork and tasks should
636 * only be added when the parent is listed. If @self is NULL, then the
637 * @task pid will be removed from the list, which would happen on exit
640 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
641 struct task_struct *self,
642 struct task_struct *task)
647 /* For forks, we only add if the forking task is listed */
649 if (!trace_find_filtered_pid(pid_list, self->pid))
653 /* "self" is set for forks, and NULL for exits */
655 trace_pid_list_set(pid_list, task->pid);
657 trace_pid_list_clear(pid_list, task->pid);
661 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
662 * @pid_list: The pid list to show
663 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
664 * @pos: The position of the file
666 * This is used by the seq_file "next" operation to iterate the pids
667 * listed in a trace_pid_list structure.
669 * Returns the pid+1 as we want to display pid of zero, but NULL would
670 * stop the iteration.
672 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
674 long pid = (unsigned long)v;
679 /* pid already is +1 of the actual previous bit */
680 if (trace_pid_list_next(pid_list, pid, &next) < 0)
685 /* Return pid + 1 to allow zero to be represented */
686 return (void *)(pid + 1);
690 * trace_pid_start - Used for seq_file to start reading pid lists
691 * @pid_list: The pid list to show
692 * @pos: The position of the file
694 * This is used by seq_file "start" operation to start the iteration
697 * Returns the pid+1 as we want to display pid of zero, but NULL would
698 * stop the iteration.
700 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
706 if (trace_pid_list_first(pid_list, &first) < 0)
711 /* Return pid + 1 so that zero can be the exit value */
712 for (pid++; pid && l < *pos;
713 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
719 * trace_pid_show - show the current pid in seq_file processing
720 * @m: The seq_file structure to write into
721 * @v: A void pointer of the pid (+1) value to display
723 * Can be directly used by seq_file operations to display the current
726 int trace_pid_show(struct seq_file *m, void *v)
728 unsigned long pid = (unsigned long)v - 1;
730 seq_printf(m, "%lu\n", pid);
734 /* 128 should be much more than enough */
735 #define PID_BUF_SIZE 127
737 int trace_pid_write(struct trace_pid_list *filtered_pids,
738 struct trace_pid_list **new_pid_list,
739 const char __user *ubuf, size_t cnt)
741 struct trace_pid_list *pid_list;
742 struct trace_parser parser;
750 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
754 * Always recreate a new array. The write is an all or nothing
755 * operation. Always create a new array when adding new pids by
756 * the user. If the operation fails, then the current list is
759 pid_list = trace_pid_list_alloc();
761 trace_parser_put(&parser);
766 /* copy the current bits to the new max */
767 ret = trace_pid_list_first(filtered_pids, &pid);
769 trace_pid_list_set(pid_list, pid);
770 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
780 ret = trace_get_user(&parser, ubuf, cnt, &pos);
788 if (!trace_parser_loaded(&parser))
792 if (kstrtoul(parser.buffer, 0, &val))
797 if (trace_pid_list_set(pid_list, pid) < 0) {
803 trace_parser_clear(&parser);
806 trace_parser_put(&parser);
809 trace_pid_list_free(pid_list);
814 /* Cleared the list of pids */
815 trace_pid_list_free(pid_list);
819 *new_pid_list = pid_list;
824 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
828 /* Early boot up does not have a buffer yet */
830 return trace_clock_local();
832 ts = ring_buffer_time_stamp(buf->buffer);
833 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
838 u64 ftrace_now(int cpu)
840 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
844 * tracing_is_enabled - Show if global_trace has been enabled
846 * Shows if the global trace has been enabled or not. It uses the
847 * mirror flag "buffer_disabled" to be used in fast paths such as for
848 * the irqsoff tracer. But it may be inaccurate due to races. If you
849 * need to know the accurate state, use tracing_is_on() which is a little
850 * slower, but accurate.
852 int tracing_is_enabled(void)
855 * For quick access (irqsoff uses this in fast path), just
856 * return the mirror variable of the state of the ring buffer.
857 * It's a little racy, but we don't really care.
860 return !global_trace.buffer_disabled;
864 * trace_buf_size is the size in bytes that is allocated
865 * for a buffer. Note, the number of bytes is always rounded
868 * This number is purposely set to a low number of 16384.
869 * If the dump on oops happens, it will be much appreciated
870 * to not have to wait for all that output. Anyway this can be
871 * boot time and run time configurable.
873 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
875 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
877 /* trace_types holds a link list of available tracers. */
878 static struct tracer *trace_types __read_mostly;
881 * trace_types_lock is used to protect the trace_types list.
883 DEFINE_MUTEX(trace_types_lock);
886 * serialize the access of the ring buffer
888 * ring buffer serializes readers, but it is low level protection.
889 * The validity of the events (which returns by ring_buffer_peek() ..etc)
890 * are not protected by ring buffer.
892 * The content of events may become garbage if we allow other process consumes
893 * these events concurrently:
894 * A) the page of the consumed events may become a normal page
895 * (not reader page) in ring buffer, and this page will be rewritten
896 * by events producer.
897 * B) The page of the consumed events may become a page for splice_read,
898 * and this page will be returned to system.
900 * These primitives allow multi process access to different cpu ring buffer
903 * These primitives don't distinguish read-only and read-consume access.
904 * Multi read-only access are also serialized.
908 static DECLARE_RWSEM(all_cpu_access_lock);
909 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
911 static inline void trace_access_lock(int cpu)
913 if (cpu == RING_BUFFER_ALL_CPUS) {
914 /* gain it for accessing the whole ring buffer. */
915 down_write(&all_cpu_access_lock);
917 /* gain it for accessing a cpu ring buffer. */
919 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
920 down_read(&all_cpu_access_lock);
922 /* Secondly block other access to this @cpu ring buffer. */
923 mutex_lock(&per_cpu(cpu_access_lock, cpu));
927 static inline void trace_access_unlock(int cpu)
929 if (cpu == RING_BUFFER_ALL_CPUS) {
930 up_write(&all_cpu_access_lock);
932 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
933 up_read(&all_cpu_access_lock);
937 static inline void trace_access_lock_init(void)
941 for_each_possible_cpu(cpu)
942 mutex_init(&per_cpu(cpu_access_lock, cpu));
947 static DEFINE_MUTEX(access_lock);
949 static inline void trace_access_lock(int cpu)
952 mutex_lock(&access_lock);
955 static inline void trace_access_unlock(int cpu)
958 mutex_unlock(&access_lock);
961 static inline void trace_access_lock_init(void)
967 #ifdef CONFIG_STACKTRACE
968 static void __ftrace_trace_stack(struct trace_buffer *buffer,
969 unsigned int trace_ctx,
970 int skip, struct pt_regs *regs);
971 static inline void ftrace_trace_stack(struct trace_array *tr,
972 struct trace_buffer *buffer,
973 unsigned int trace_ctx,
974 int skip, struct pt_regs *regs);
977 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
978 unsigned int trace_ctx,
979 int skip, struct pt_regs *regs)
982 static inline void ftrace_trace_stack(struct trace_array *tr,
983 struct trace_buffer *buffer,
984 unsigned long trace_ctx,
985 int skip, struct pt_regs *regs)
991 static __always_inline void
992 trace_event_setup(struct ring_buffer_event *event,
993 int type, unsigned int trace_ctx)
995 struct trace_entry *ent = ring_buffer_event_data(event);
997 tracing_generic_entry_update(ent, type, trace_ctx);
1000 static __always_inline struct ring_buffer_event *
1001 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
1004 unsigned int trace_ctx)
1006 struct ring_buffer_event *event;
1008 event = ring_buffer_lock_reserve(buffer, len);
1010 trace_event_setup(event, type, trace_ctx);
1015 void tracer_tracing_on(struct trace_array *tr)
1017 if (tr->array_buffer.buffer)
1018 ring_buffer_record_on(tr->array_buffer.buffer);
1020 * This flag is looked at when buffers haven't been allocated
1021 * yet, or by some tracers (like irqsoff), that just want to
1022 * know if the ring buffer has been disabled, but it can handle
1023 * races of where it gets disabled but we still do a record.
1024 * As the check is in the fast path of the tracers, it is more
1025 * important to be fast than accurate.
1027 tr->buffer_disabled = 0;
1028 /* Make the flag seen by readers */
1033 * tracing_on - enable tracing buffers
1035 * This function enables tracing buffers that may have been
1036 * disabled with tracing_off.
1038 void tracing_on(void)
1040 tracer_tracing_on(&global_trace);
1042 EXPORT_SYMBOL_GPL(tracing_on);
1045 static __always_inline void
1046 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1048 __this_cpu_write(trace_taskinfo_save, true);
1050 /* If this is the temp buffer, we need to commit fully */
1051 if (this_cpu_read(trace_buffered_event) == event) {
1052 /* Length is in event->array[0] */
1053 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1054 /* Release the temp buffer */
1055 this_cpu_dec(trace_buffered_event_cnt);
1056 /* ring_buffer_unlock_commit() enables preemption */
1057 preempt_enable_notrace();
1059 ring_buffer_unlock_commit(buffer);
1062 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1063 const char *str, int size)
1065 struct ring_buffer_event *event;
1066 struct trace_buffer *buffer;
1067 struct print_entry *entry;
1068 unsigned int trace_ctx;
1071 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1074 if (unlikely(tracing_selftest_running && tr == &global_trace))
1077 if (unlikely(tracing_disabled))
1080 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1082 trace_ctx = tracing_gen_ctx();
1083 buffer = tr->array_buffer.buffer;
1084 ring_buffer_nest_start(buffer);
1085 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1092 entry = ring_buffer_event_data(event);
1095 memcpy(&entry->buf, str, size);
1097 /* Add a newline if necessary */
1098 if (entry->buf[size - 1] != '\n') {
1099 entry->buf[size] = '\n';
1100 entry->buf[size + 1] = '\0';
1102 entry->buf[size] = '\0';
1104 __buffer_unlock_commit(buffer, event);
1105 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1107 ring_buffer_nest_end(buffer);
1110 EXPORT_SYMBOL_GPL(__trace_array_puts);
1113 * __trace_puts - write a constant string into the trace buffer.
1114 * @ip: The address of the caller
1115 * @str: The constant string to write
1116 * @size: The size of the string.
1118 int __trace_puts(unsigned long ip, const char *str, int size)
1120 return __trace_array_puts(&global_trace, ip, str, size);
1122 EXPORT_SYMBOL_GPL(__trace_puts);
1125 * __trace_bputs - write the pointer to a constant string into trace buffer
1126 * @ip: The address of the caller
1127 * @str: The constant string to write to the buffer to
1129 int __trace_bputs(unsigned long ip, const char *str)
1131 struct ring_buffer_event *event;
1132 struct trace_buffer *buffer;
1133 struct bputs_entry *entry;
1134 unsigned int trace_ctx;
1135 int size = sizeof(struct bputs_entry);
1138 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1141 if (unlikely(tracing_selftest_running || tracing_disabled))
1144 trace_ctx = tracing_gen_ctx();
1145 buffer = global_trace.array_buffer.buffer;
1147 ring_buffer_nest_start(buffer);
1148 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1153 entry = ring_buffer_event_data(event);
1157 __buffer_unlock_commit(buffer, event);
1158 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1162 ring_buffer_nest_end(buffer);
1165 EXPORT_SYMBOL_GPL(__trace_bputs);
1167 #ifdef CONFIG_TRACER_SNAPSHOT
1168 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1171 struct tracer *tracer = tr->current_trace;
1172 unsigned long flags;
1175 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1176 trace_array_puts(tr, "*** snapshot is being ignored ***\n");
1180 if (!tr->allocated_snapshot) {
1181 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1182 trace_array_puts(tr, "*** stopping trace here! ***\n");
1183 tracer_tracing_off(tr);
1187 /* Note, snapshot can not be used when the tracer uses it */
1188 if (tracer->use_max_tr) {
1189 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1190 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1195 trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n");
1196 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1200 local_irq_save(flags);
1201 update_max_tr(tr, current, smp_processor_id(), cond_data);
1202 local_irq_restore(flags);
1205 void tracing_snapshot_instance(struct trace_array *tr)
1207 tracing_snapshot_instance_cond(tr, NULL);
1211 * tracing_snapshot - take a snapshot of the current buffer.
1213 * This causes a swap between the snapshot buffer and the current live
1214 * tracing buffer. You can use this to take snapshots of the live
1215 * trace when some condition is triggered, but continue to trace.
1217 * Note, make sure to allocate the snapshot with either
1218 * a tracing_snapshot_alloc(), or by doing it manually
1219 * with: echo 1 > /sys/kernel/tracing/snapshot
1221 * If the snapshot buffer is not allocated, it will stop tracing.
1222 * Basically making a permanent snapshot.
1224 void tracing_snapshot(void)
1226 struct trace_array *tr = &global_trace;
1228 tracing_snapshot_instance(tr);
1230 EXPORT_SYMBOL_GPL(tracing_snapshot);
1233 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1234 * @tr: The tracing instance to snapshot
1235 * @cond_data: The data to be tested conditionally, and possibly saved
1237 * This is the same as tracing_snapshot() except that the snapshot is
1238 * conditional - the snapshot will only happen if the
1239 * cond_snapshot.update() implementation receiving the cond_data
1240 * returns true, which means that the trace array's cond_snapshot
1241 * update() operation used the cond_data to determine whether the
1242 * snapshot should be taken, and if it was, presumably saved it along
1243 * with the snapshot.
1245 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1247 tracing_snapshot_instance_cond(tr, cond_data);
1249 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1252 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1253 * @tr: The tracing instance
1255 * When the user enables a conditional snapshot using
1256 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1257 * with the snapshot. This accessor is used to retrieve it.
1259 * Should not be called from cond_snapshot.update(), since it takes
1260 * the tr->max_lock lock, which the code calling
1261 * cond_snapshot.update() has already done.
1263 * Returns the cond_data associated with the trace array's snapshot.
1265 void *tracing_cond_snapshot_data(struct trace_array *tr)
1267 void *cond_data = NULL;
1269 local_irq_disable();
1270 arch_spin_lock(&tr->max_lock);
1272 if (tr->cond_snapshot)
1273 cond_data = tr->cond_snapshot->cond_data;
1275 arch_spin_unlock(&tr->max_lock);
1280 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1282 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1283 struct array_buffer *size_buf, int cpu_id);
1284 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1286 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1291 if (!tr->allocated_snapshot) {
1293 /* Make the snapshot buffer have the same order as main buffer */
1294 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
1295 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
1299 /* allocate spare buffer */
1300 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1301 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1305 tr->allocated_snapshot = true;
1311 static void free_snapshot(struct trace_array *tr)
1314 * We don't free the ring buffer. instead, resize it because
1315 * The max_tr ring buffer has some state (e.g. ring->clock) and
1316 * we want preserve it.
1318 ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
1319 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1320 set_buffer_entries(&tr->max_buffer, 1);
1321 tracing_reset_online_cpus(&tr->max_buffer);
1322 tr->allocated_snapshot = false;
1325 static int tracing_arm_snapshot_locked(struct trace_array *tr)
1329 lockdep_assert_held(&trace_types_lock);
1331 spin_lock(&tr->snapshot_trigger_lock);
1332 if (tr->snapshot == UINT_MAX || tr->mapped) {
1333 spin_unlock(&tr->snapshot_trigger_lock);
1338 spin_unlock(&tr->snapshot_trigger_lock);
1340 ret = tracing_alloc_snapshot_instance(tr);
1342 spin_lock(&tr->snapshot_trigger_lock);
1344 spin_unlock(&tr->snapshot_trigger_lock);
1350 int tracing_arm_snapshot(struct trace_array *tr)
1354 mutex_lock(&trace_types_lock);
1355 ret = tracing_arm_snapshot_locked(tr);
1356 mutex_unlock(&trace_types_lock);
1361 void tracing_disarm_snapshot(struct trace_array *tr)
1363 spin_lock(&tr->snapshot_trigger_lock);
1364 if (!WARN_ON(!tr->snapshot))
1366 spin_unlock(&tr->snapshot_trigger_lock);
1370 * tracing_alloc_snapshot - allocate snapshot buffer.
1372 * This only allocates the snapshot buffer if it isn't already
1373 * allocated - it doesn't also take a snapshot.
1375 * This is meant to be used in cases where the snapshot buffer needs
1376 * to be set up for events that can't sleep but need to be able to
1377 * trigger a snapshot.
1379 int tracing_alloc_snapshot(void)
1381 struct trace_array *tr = &global_trace;
1384 ret = tracing_alloc_snapshot_instance(tr);
1389 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1392 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1394 * This is similar to tracing_snapshot(), but it will allocate the
1395 * snapshot buffer if it isn't already allocated. Use this only
1396 * where it is safe to sleep, as the allocation may sleep.
1398 * This causes a swap between the snapshot buffer and the current live
1399 * tracing buffer. You can use this to take snapshots of the live
1400 * trace when some condition is triggered, but continue to trace.
1402 void tracing_snapshot_alloc(void)
1406 ret = tracing_alloc_snapshot();
1412 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1415 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1416 * @tr: The tracing instance
1417 * @cond_data: User data to associate with the snapshot
1418 * @update: Implementation of the cond_snapshot update function
1420 * Check whether the conditional snapshot for the given instance has
1421 * already been enabled, or if the current tracer is already using a
1422 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1423 * save the cond_data and update function inside.
1425 * Returns 0 if successful, error otherwise.
1427 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1428 cond_update_fn_t update)
1430 struct cond_snapshot *cond_snapshot;
1433 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1437 cond_snapshot->cond_data = cond_data;
1438 cond_snapshot->update = update;
1440 mutex_lock(&trace_types_lock);
1442 if (tr->current_trace->use_max_tr) {
1448 * The cond_snapshot can only change to NULL without the
1449 * trace_types_lock. We don't care if we race with it going
1450 * to NULL, but we want to make sure that it's not set to
1451 * something other than NULL when we get here, which we can
1452 * do safely with only holding the trace_types_lock and not
1453 * having to take the max_lock.
1455 if (tr->cond_snapshot) {
1460 ret = tracing_arm_snapshot_locked(tr);
1464 local_irq_disable();
1465 arch_spin_lock(&tr->max_lock);
1466 tr->cond_snapshot = cond_snapshot;
1467 arch_spin_unlock(&tr->max_lock);
1470 mutex_unlock(&trace_types_lock);
1475 mutex_unlock(&trace_types_lock);
1476 kfree(cond_snapshot);
1479 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1482 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1483 * @tr: The tracing instance
1485 * Check whether the conditional snapshot for the given instance is
1486 * enabled; if so, free the cond_snapshot associated with it,
1487 * otherwise return -EINVAL.
1489 * Returns 0 if successful, error otherwise.
1491 int tracing_snapshot_cond_disable(struct trace_array *tr)
1495 local_irq_disable();
1496 arch_spin_lock(&tr->max_lock);
1498 if (!tr->cond_snapshot)
1501 kfree(tr->cond_snapshot);
1502 tr->cond_snapshot = NULL;
1505 arch_spin_unlock(&tr->max_lock);
1508 tracing_disarm_snapshot(tr);
1512 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1514 void tracing_snapshot(void)
1516 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1518 EXPORT_SYMBOL_GPL(tracing_snapshot);
1519 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1521 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1523 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1524 int tracing_alloc_snapshot(void)
1526 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1529 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1530 void tracing_snapshot_alloc(void)
1535 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1536 void *tracing_cond_snapshot_data(struct trace_array *tr)
1540 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1541 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1545 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1546 int tracing_snapshot_cond_disable(struct trace_array *tr)
1550 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1551 #define free_snapshot(tr) do { } while (0)
1552 #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; })
1553 #endif /* CONFIG_TRACER_SNAPSHOT */
1555 void tracer_tracing_off(struct trace_array *tr)
1557 if (tr->array_buffer.buffer)
1558 ring_buffer_record_off(tr->array_buffer.buffer);
1560 * This flag is looked at when buffers haven't been allocated
1561 * yet, or by some tracers (like irqsoff), that just want to
1562 * know if the ring buffer has been disabled, but it can handle
1563 * races of where it gets disabled but we still do a record.
1564 * As the check is in the fast path of the tracers, it is more
1565 * important to be fast than accurate.
1567 tr->buffer_disabled = 1;
1568 /* Make the flag seen by readers */
1573 * tracing_off - turn off tracing buffers
1575 * This function stops the tracing buffers from recording data.
1576 * It does not disable any overhead the tracers themselves may
1577 * be causing. This function simply causes all recording to
1578 * the ring buffers to fail.
1580 void tracing_off(void)
1582 tracer_tracing_off(&global_trace);
1584 EXPORT_SYMBOL_GPL(tracing_off);
1586 void disable_trace_on_warning(void)
1588 if (__disable_trace_on_warning) {
1589 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1590 "Disabling tracing due to warning\n");
1596 * tracer_tracing_is_on - show real state of ring buffer enabled
1597 * @tr : the trace array to know if ring buffer is enabled
1599 * Shows real state of the ring buffer if it is enabled or not.
1601 bool tracer_tracing_is_on(struct trace_array *tr)
1603 if (tr->array_buffer.buffer)
1604 return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
1605 return !tr->buffer_disabled;
1609 * tracing_is_on - show state of ring buffers enabled
1611 int tracing_is_on(void)
1613 return tracer_tracing_is_on(&global_trace);
1615 EXPORT_SYMBOL_GPL(tracing_is_on);
1617 static int __init set_buf_size(char *str)
1619 unsigned long buf_size;
1623 buf_size = memparse(str, &str);
1625 * nr_entries can not be zero and the startup
1626 * tests require some buffer space. Therefore
1627 * ensure we have at least 4096 bytes of buffer.
1629 trace_buf_size = max(4096UL, buf_size);
1632 __setup("trace_buf_size=", set_buf_size);
1634 static int __init set_tracing_thresh(char *str)
1636 unsigned long threshold;
1641 ret = kstrtoul(str, 0, &threshold);
1644 tracing_thresh = threshold * 1000;
1647 __setup("tracing_thresh=", set_tracing_thresh);
1649 unsigned long nsecs_to_usecs(unsigned long nsecs)
1651 return nsecs / 1000;
1655 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1656 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1657 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1658 * of strings in the order that the evals (enum) were defined.
1663 /* These must match the bit positions in trace_iterator_flags */
1664 static const char *trace_options[] = {
1672 int in_ns; /* is this clock in nanoseconds? */
1673 } trace_clocks[] = {
1674 { trace_clock_local, "local", 1 },
1675 { trace_clock_global, "global", 1 },
1676 { trace_clock_counter, "counter", 0 },
1677 { trace_clock_jiffies, "uptime", 0 },
1678 { trace_clock, "perf", 1 },
1679 { ktime_get_mono_fast_ns, "mono", 1 },
1680 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1681 { ktime_get_boot_fast_ns, "boot", 1 },
1682 { ktime_get_tai_fast_ns, "tai", 1 },
1686 bool trace_clock_in_ns(struct trace_array *tr)
1688 if (trace_clocks[tr->clock_id].in_ns)
1695 * trace_parser_get_init - gets the buffer for trace parser
1697 int trace_parser_get_init(struct trace_parser *parser, int size)
1699 memset(parser, 0, sizeof(*parser));
1701 parser->buffer = kmalloc(size, GFP_KERNEL);
1702 if (!parser->buffer)
1705 parser->size = size;
1710 * trace_parser_put - frees the buffer for trace parser
1712 void trace_parser_put(struct trace_parser *parser)
1714 kfree(parser->buffer);
1715 parser->buffer = NULL;
1719 * trace_get_user - reads the user input string separated by space
1720 * (matched by isspace(ch))
1722 * For each string found the 'struct trace_parser' is updated,
1723 * and the function returns.
1725 * Returns number of bytes read.
1727 * See kernel/trace/trace.h for 'struct trace_parser' details.
1729 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1730 size_t cnt, loff_t *ppos)
1737 trace_parser_clear(parser);
1739 ret = get_user(ch, ubuf++);
1747 * The parser is not finished with the last write,
1748 * continue reading the user input without skipping spaces.
1750 if (!parser->cont) {
1751 /* skip white space */
1752 while (cnt && isspace(ch)) {
1753 ret = get_user(ch, ubuf++);
1762 /* only spaces were written */
1763 if (isspace(ch) || !ch) {
1770 /* read the non-space input */
1771 while (cnt && !isspace(ch) && ch) {
1772 if (parser->idx < parser->size - 1)
1773 parser->buffer[parser->idx++] = ch;
1778 ret = get_user(ch, ubuf++);
1785 /* We either got finished input or we have to wait for another call. */
1786 if (isspace(ch) || !ch) {
1787 parser->buffer[parser->idx] = 0;
1788 parser->cont = false;
1789 } else if (parser->idx < parser->size - 1) {
1790 parser->cont = true;
1791 parser->buffer[parser->idx++] = ch;
1792 /* Make sure the parsed string always terminates with '\0'. */
1793 parser->buffer[parser->idx] = 0;
1806 /* TODO add a seq_buf_to_buffer() */
1807 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1811 if (trace_seq_used(s) <= s->readpos)
1814 len = trace_seq_used(s) - s->readpos;
1817 memcpy(buf, s->buffer + s->readpos, cnt);
1823 unsigned long __read_mostly tracing_thresh;
1825 #ifdef CONFIG_TRACER_MAX_TRACE
1826 static const struct file_operations tracing_max_lat_fops;
1828 #ifdef LATENCY_FS_NOTIFY
1830 static struct workqueue_struct *fsnotify_wq;
1832 static void latency_fsnotify_workfn(struct work_struct *work)
1834 struct trace_array *tr = container_of(work, struct trace_array,
1836 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1839 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1841 struct trace_array *tr = container_of(iwork, struct trace_array,
1843 queue_work(fsnotify_wq, &tr->fsnotify_work);
1846 static void trace_create_maxlat_file(struct trace_array *tr,
1847 struct dentry *d_tracer)
1849 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1850 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1851 tr->d_max_latency = trace_create_file("tracing_max_latency",
1854 &tracing_max_lat_fops);
1857 __init static int latency_fsnotify_init(void)
1859 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1860 WQ_UNBOUND | WQ_HIGHPRI, 0);
1862 pr_err("Unable to allocate tr_max_lat_wq\n");
1868 late_initcall_sync(latency_fsnotify_init);
1870 void latency_fsnotify(struct trace_array *tr)
1875 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1876 * possible that we are called from __schedule() or do_idle(), which
1877 * could cause a deadlock.
1879 irq_work_queue(&tr->fsnotify_irqwork);
1882 #else /* !LATENCY_FS_NOTIFY */
1884 #define trace_create_maxlat_file(tr, d_tracer) \
1885 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1886 d_tracer, tr, &tracing_max_lat_fops)
1891 * Copy the new maximum trace into the separate maximum-trace
1892 * structure. (this way the maximum trace is permanently saved,
1893 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1896 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1898 struct array_buffer *trace_buf = &tr->array_buffer;
1899 struct array_buffer *max_buf = &tr->max_buffer;
1900 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1901 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1904 max_buf->time_start = data->preempt_timestamp;
1906 max_data->saved_latency = tr->max_latency;
1907 max_data->critical_start = data->critical_start;
1908 max_data->critical_end = data->critical_end;
1910 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1911 max_data->pid = tsk->pid;
1913 * If tsk == current, then use current_uid(), as that does not use
1914 * RCU. The irq tracer can be called out of RCU scope.
1917 max_data->uid = current_uid();
1919 max_data->uid = task_uid(tsk);
1921 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1922 max_data->policy = tsk->policy;
1923 max_data->rt_priority = tsk->rt_priority;
1925 /* record this tasks comm */
1926 tracing_record_cmdline(tsk);
1927 latency_fsnotify(tr);
1931 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1933 * @tsk: the task with the latency
1934 * @cpu: The cpu that initiated the trace.
1935 * @cond_data: User data associated with a conditional snapshot
1937 * Flip the buffers between the @tr and the max_tr and record information
1938 * about which task was the cause of this latency.
1941 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1947 WARN_ON_ONCE(!irqs_disabled());
1949 if (!tr->allocated_snapshot) {
1950 /* Only the nop tracer should hit this when disabling */
1951 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1955 arch_spin_lock(&tr->max_lock);
1957 /* Inherit the recordable setting from array_buffer */
1958 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1959 ring_buffer_record_on(tr->max_buffer.buffer);
1961 ring_buffer_record_off(tr->max_buffer.buffer);
1963 #ifdef CONFIG_TRACER_SNAPSHOT
1964 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1965 arch_spin_unlock(&tr->max_lock);
1969 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1971 __update_max_tr(tr, tsk, cpu);
1973 arch_spin_unlock(&tr->max_lock);
1975 /* Any waiters on the old snapshot buffer need to wake up */
1976 ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
1980 * update_max_tr_single - only copy one trace over, and reset the rest
1982 * @tsk: task with the latency
1983 * @cpu: the cpu of the buffer to copy.
1985 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1988 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1995 WARN_ON_ONCE(!irqs_disabled());
1996 if (!tr->allocated_snapshot) {
1997 /* Only the nop tracer should hit this when disabling */
1998 WARN_ON_ONCE(tr->current_trace != &nop_trace);
2002 arch_spin_lock(&tr->max_lock);
2004 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
2006 if (ret == -EBUSY) {
2008 * We failed to swap the buffer due to a commit taking
2009 * place on this CPU. We fail to record, but we reset
2010 * the max trace buffer (no one writes directly to it)
2011 * and flag that it failed.
2012 * Another reason is resize is in progress.
2014 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
2015 "Failed to swap buffers due to commit or resize in progress\n");
2018 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
2020 __update_max_tr(tr, tsk, cpu);
2021 arch_spin_unlock(&tr->max_lock);
2024 #endif /* CONFIG_TRACER_MAX_TRACE */
2027 struct trace_iterator *iter;
2031 static bool wait_pipe_cond(void *data)
2033 struct pipe_wait *pwait = data;
2034 struct trace_iterator *iter = pwait->iter;
2036 if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index)
2039 return iter->closed;
2042 static int wait_on_pipe(struct trace_iterator *iter, int full)
2044 struct pipe_wait pwait;
2047 /* Iterators are static, they should be filled or empty */
2048 if (trace_buffer_iter(iter, iter->cpu_file))
2051 pwait.wait_index = atomic_read_acquire(&iter->wait_index);
2054 ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full,
2055 wait_pipe_cond, &pwait);
2057 #ifdef CONFIG_TRACER_MAX_TRACE
2059 * Make sure this is still the snapshot buffer, as if a snapshot were
2060 * to happen, this would now be the main buffer.
2063 iter->array_buffer = &iter->tr->max_buffer;
2068 #ifdef CONFIG_FTRACE_STARTUP_TEST
2069 static bool selftests_can_run;
2071 struct trace_selftests {
2072 struct list_head list;
2073 struct tracer *type;
2076 static LIST_HEAD(postponed_selftests);
2078 static int save_selftest(struct tracer *type)
2080 struct trace_selftests *selftest;
2082 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
2086 selftest->type = type;
2087 list_add(&selftest->list, &postponed_selftests);
2091 static int run_tracer_selftest(struct tracer *type)
2093 struct trace_array *tr = &global_trace;
2094 struct tracer *saved_tracer = tr->current_trace;
2097 if (!type->selftest || tracing_selftest_disabled)
2101 * If a tracer registers early in boot up (before scheduling is
2102 * initialized and such), then do not run its selftests yet.
2103 * Instead, run it a little later in the boot process.
2105 if (!selftests_can_run)
2106 return save_selftest(type);
2108 if (!tracing_is_on()) {
2109 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
2115 * Run a selftest on this tracer.
2116 * Here we reset the trace buffer, and set the current
2117 * tracer to be this tracer. The tracer can then run some
2118 * internal tracing to verify that everything is in order.
2119 * If we fail, we do not register this tracer.
2121 tracing_reset_online_cpus(&tr->array_buffer);
2123 tr->current_trace = type;
2125 #ifdef CONFIG_TRACER_MAX_TRACE
2126 if (type->use_max_tr) {
2127 /* If we expanded the buffers, make sure the max is expanded too */
2128 if (tr->ring_buffer_expanded)
2129 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2130 RING_BUFFER_ALL_CPUS);
2131 tr->allocated_snapshot = true;
2135 /* the test is responsible for initializing and enabling */
2136 pr_info("Testing tracer %s: ", type->name);
2137 ret = type->selftest(type, tr);
2138 /* the test is responsible for resetting too */
2139 tr->current_trace = saved_tracer;
2141 printk(KERN_CONT "FAILED!\n");
2142 /* Add the warning after printing 'FAILED' */
2146 /* Only reset on passing, to avoid touching corrupted buffers */
2147 tracing_reset_online_cpus(&tr->array_buffer);
2149 #ifdef CONFIG_TRACER_MAX_TRACE
2150 if (type->use_max_tr) {
2151 tr->allocated_snapshot = false;
2153 /* Shrink the max buffer again */
2154 if (tr->ring_buffer_expanded)
2155 ring_buffer_resize(tr->max_buffer.buffer, 1,
2156 RING_BUFFER_ALL_CPUS);
2160 printk(KERN_CONT "PASSED\n");
2164 static int do_run_tracer_selftest(struct tracer *type)
2169 * Tests can take a long time, especially if they are run one after the
2170 * other, as does happen during bootup when all the tracers are
2171 * registered. This could cause the soft lockup watchdog to trigger.
2175 tracing_selftest_running = true;
2176 ret = run_tracer_selftest(type);
2177 tracing_selftest_running = false;
2182 static __init int init_trace_selftests(void)
2184 struct trace_selftests *p, *n;
2185 struct tracer *t, **last;
2188 selftests_can_run = true;
2190 mutex_lock(&trace_types_lock);
2192 if (list_empty(&postponed_selftests))
2195 pr_info("Running postponed tracer tests:\n");
2197 tracing_selftest_running = true;
2198 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2199 /* This loop can take minutes when sanitizers are enabled, so
2200 * lets make sure we allow RCU processing.
2203 ret = run_tracer_selftest(p->type);
2204 /* If the test fails, then warn and remove from available_tracers */
2206 WARN(1, "tracer: %s failed selftest, disabling\n",
2208 last = &trace_types;
2209 for (t = trace_types; t; t = t->next) {
2220 tracing_selftest_running = false;
2223 mutex_unlock(&trace_types_lock);
2227 core_initcall(init_trace_selftests);
2229 static inline int run_tracer_selftest(struct tracer *type)
2233 static inline int do_run_tracer_selftest(struct tracer *type)
2237 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2239 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2241 static void __init apply_trace_boot_options(void);
2244 * register_tracer - register a tracer with the ftrace system.
2245 * @type: the plugin for the tracer
2247 * Register a new plugin tracer.
2249 int __init register_tracer(struct tracer *type)
2255 pr_info("Tracer must have a name\n");
2259 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2260 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2264 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2265 pr_warn("Can not register tracer %s due to lockdown\n",
2270 mutex_lock(&trace_types_lock);
2272 for (t = trace_types; t; t = t->next) {
2273 if (strcmp(type->name, t->name) == 0) {
2275 pr_info("Tracer %s already registered\n",
2282 if (!type->set_flag)
2283 type->set_flag = &dummy_set_flag;
2285 /*allocate a dummy tracer_flags*/
2286 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2291 type->flags->val = 0;
2292 type->flags->opts = dummy_tracer_opt;
2294 if (!type->flags->opts)
2295 type->flags->opts = dummy_tracer_opt;
2297 /* store the tracer for __set_tracer_option */
2298 type->flags->trace = type;
2300 ret = do_run_tracer_selftest(type);
2304 type->next = trace_types;
2306 add_tracer_options(&global_trace, type);
2309 mutex_unlock(&trace_types_lock);
2311 if (ret || !default_bootup_tracer)
2314 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2317 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2318 /* Do we want this tracer to start on bootup? */
2319 tracing_set_tracer(&global_trace, type->name);
2320 default_bootup_tracer = NULL;
2322 apply_trace_boot_options();
2324 /* disable other selftests, since this will break it. */
2325 disable_tracing_selftest("running a tracer");
2331 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2333 struct trace_buffer *buffer = buf->buffer;
2338 ring_buffer_record_disable(buffer);
2340 /* Make sure all commits have finished */
2342 ring_buffer_reset_cpu(buffer, cpu);
2344 ring_buffer_record_enable(buffer);
2347 void tracing_reset_online_cpus(struct array_buffer *buf)
2349 struct trace_buffer *buffer = buf->buffer;
2354 ring_buffer_record_disable(buffer);
2356 /* Make sure all commits have finished */
2359 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2361 ring_buffer_reset_online_cpus(buffer);
2363 ring_buffer_record_enable(buffer);
2366 /* Must have trace_types_lock held */
2367 void tracing_reset_all_online_cpus_unlocked(void)
2369 struct trace_array *tr;
2371 lockdep_assert_held(&trace_types_lock);
2373 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2374 if (!tr->clear_trace)
2376 tr->clear_trace = false;
2377 tracing_reset_online_cpus(&tr->array_buffer);
2378 #ifdef CONFIG_TRACER_MAX_TRACE
2379 tracing_reset_online_cpus(&tr->max_buffer);
2384 void tracing_reset_all_online_cpus(void)
2386 mutex_lock(&trace_types_lock);
2387 tracing_reset_all_online_cpus_unlocked();
2388 mutex_unlock(&trace_types_lock);
2391 int is_tracing_stopped(void)
2393 return global_trace.stop_count;
2396 static void tracing_start_tr(struct trace_array *tr)
2398 struct trace_buffer *buffer;
2399 unsigned long flags;
2401 if (tracing_disabled)
2404 raw_spin_lock_irqsave(&tr->start_lock, flags);
2405 if (--tr->stop_count) {
2406 if (WARN_ON_ONCE(tr->stop_count < 0)) {
2407 /* Someone screwed up their debugging */
2413 /* Prevent the buffers from switching */
2414 arch_spin_lock(&tr->max_lock);
2416 buffer = tr->array_buffer.buffer;
2418 ring_buffer_record_enable(buffer);
2420 #ifdef CONFIG_TRACER_MAX_TRACE
2421 buffer = tr->max_buffer.buffer;
2423 ring_buffer_record_enable(buffer);
2426 arch_spin_unlock(&tr->max_lock);
2429 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2433 * tracing_start - quick start of the tracer
2435 * If tracing is enabled but was stopped by tracing_stop,
2436 * this will start the tracer back up.
2438 void tracing_start(void)
2441 return tracing_start_tr(&global_trace);
2444 static void tracing_stop_tr(struct trace_array *tr)
2446 struct trace_buffer *buffer;
2447 unsigned long flags;
2449 raw_spin_lock_irqsave(&tr->start_lock, flags);
2450 if (tr->stop_count++)
2453 /* Prevent the buffers from switching */
2454 arch_spin_lock(&tr->max_lock);
2456 buffer = tr->array_buffer.buffer;
2458 ring_buffer_record_disable(buffer);
2460 #ifdef CONFIG_TRACER_MAX_TRACE
2461 buffer = tr->max_buffer.buffer;
2463 ring_buffer_record_disable(buffer);
2466 arch_spin_unlock(&tr->max_lock);
2469 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2473 * tracing_stop - quick stop of the tracer
2475 * Light weight way to stop tracing. Use in conjunction with
2478 void tracing_stop(void)
2480 return tracing_stop_tr(&global_trace);
2484 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2485 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2486 * simplifies those functions and keeps them in sync.
2488 enum print_line_t trace_handle_return(struct trace_seq *s)
2490 return trace_seq_has_overflowed(s) ?
2491 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2493 EXPORT_SYMBOL_GPL(trace_handle_return);
2495 static unsigned short migration_disable_value(void)
2497 #if defined(CONFIG_SMP)
2498 return current->migration_disabled;
2504 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2506 unsigned int trace_flags = irqs_status;
2509 pc = preempt_count();
2512 trace_flags |= TRACE_FLAG_NMI;
2513 if (pc & HARDIRQ_MASK)
2514 trace_flags |= TRACE_FLAG_HARDIRQ;
2515 if (in_serving_softirq())
2516 trace_flags |= TRACE_FLAG_SOFTIRQ;
2517 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2518 trace_flags |= TRACE_FLAG_BH_OFF;
2520 if (tif_need_resched())
2521 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2522 if (test_preempt_need_resched())
2523 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2524 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2525 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2528 struct ring_buffer_event *
2529 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2532 unsigned int trace_ctx)
2534 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2537 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2538 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2539 static int trace_buffered_event_ref;
2542 * trace_buffered_event_enable - enable buffering events
2544 * When events are being filtered, it is quicker to use a temporary
2545 * buffer to write the event data into if there's a likely chance
2546 * that it will not be committed. The discard of the ring buffer
2547 * is not as fast as committing, and is much slower than copying
2550 * When an event is to be filtered, allocate per cpu buffers to
2551 * write the event data into, and if the event is filtered and discarded
2552 * it is simply dropped, otherwise, the entire data is to be committed
2555 void trace_buffered_event_enable(void)
2557 struct ring_buffer_event *event;
2561 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2563 if (trace_buffered_event_ref++)
2566 for_each_tracing_cpu(cpu) {
2567 page = alloc_pages_node(cpu_to_node(cpu),
2568 GFP_KERNEL | __GFP_NORETRY, 0);
2569 /* This is just an optimization and can handle failures */
2571 pr_err("Failed to allocate event buffer\n");
2575 event = page_address(page);
2576 memset(event, 0, sizeof(*event));
2578 per_cpu(trace_buffered_event, cpu) = event;
2581 if (cpu == smp_processor_id() &&
2582 __this_cpu_read(trace_buffered_event) !=
2583 per_cpu(trace_buffered_event, cpu))
2589 static void enable_trace_buffered_event(void *data)
2591 /* Probably not needed, but do it anyway */
2593 this_cpu_dec(trace_buffered_event_cnt);
2596 static void disable_trace_buffered_event(void *data)
2598 this_cpu_inc(trace_buffered_event_cnt);
2602 * trace_buffered_event_disable - disable buffering events
2604 * When a filter is removed, it is faster to not use the buffered
2605 * events, and to commit directly into the ring buffer. Free up
2606 * the temp buffers when there are no more users. This requires
2607 * special synchronization with current events.
2609 void trace_buffered_event_disable(void)
2613 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2615 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2618 if (--trace_buffered_event_ref)
2621 /* For each CPU, set the buffer as used. */
2622 on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
2625 /* Wait for all current users to finish */
2628 for_each_tracing_cpu(cpu) {
2629 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2630 per_cpu(trace_buffered_event, cpu) = NULL;
2634 * Wait for all CPUs that potentially started checking if they can use
2635 * their event buffer only after the previous synchronize_rcu() call and
2636 * they still read a valid pointer from trace_buffered_event. It must be
2637 * ensured they don't see cleared trace_buffered_event_cnt else they
2638 * could wrongly decide to use the pointed-to buffer which is now freed.
2642 /* For each CPU, relinquish the buffer */
2643 on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
2647 static struct trace_buffer *temp_buffer;
2649 struct ring_buffer_event *
2650 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2651 struct trace_event_file *trace_file,
2652 int type, unsigned long len,
2653 unsigned int trace_ctx)
2655 struct ring_buffer_event *entry;
2656 struct trace_array *tr = trace_file->tr;
2659 *current_rb = tr->array_buffer.buffer;
2661 if (!tr->no_filter_buffering_ref &&
2662 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2663 preempt_disable_notrace();
2665 * Filtering is on, so try to use the per cpu buffer first.
2666 * This buffer will simulate a ring_buffer_event,
2667 * where the type_len is zero and the array[0] will
2668 * hold the full length.
2669 * (see include/linux/ring-buffer.h for details on
2670 * how the ring_buffer_event is structured).
2672 * Using a temp buffer during filtering and copying it
2673 * on a matched filter is quicker than writing directly
2674 * into the ring buffer and then discarding it when
2675 * it doesn't match. That is because the discard
2676 * requires several atomic operations to get right.
2677 * Copying on match and doing nothing on a failed match
2678 * is still quicker than no copy on match, but having
2679 * to discard out of the ring buffer on a failed match.
2681 if ((entry = __this_cpu_read(trace_buffered_event))) {
2682 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2684 val = this_cpu_inc_return(trace_buffered_event_cnt);
2687 * Preemption is disabled, but interrupts and NMIs
2688 * can still come in now. If that happens after
2689 * the above increment, then it will have to go
2690 * back to the old method of allocating the event
2691 * on the ring buffer, and if the filter fails, it
2692 * will have to call ring_buffer_discard_commit()
2695 * Need to also check the unlikely case that the
2696 * length is bigger than the temp buffer size.
2697 * If that happens, then the reserve is pretty much
2698 * guaranteed to fail, as the ring buffer currently
2699 * only allows events less than a page. But that may
2700 * change in the future, so let the ring buffer reserve
2701 * handle the failure in that case.
2703 if (val == 1 && likely(len <= max_len)) {
2704 trace_event_setup(entry, type, trace_ctx);
2705 entry->array[0] = len;
2706 /* Return with preemption disabled */
2709 this_cpu_dec(trace_buffered_event_cnt);
2711 /* __trace_buffer_lock_reserve() disables preemption */
2712 preempt_enable_notrace();
2715 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2718 * If tracing is off, but we have triggers enabled
2719 * we still need to look at the event data. Use the temp_buffer
2720 * to store the trace event for the trigger to use. It's recursive
2721 * safe and will not be recorded anywhere.
2723 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2724 *current_rb = temp_buffer;
2725 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2730 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2732 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2733 static DEFINE_MUTEX(tracepoint_printk_mutex);
2735 static void output_printk(struct trace_event_buffer *fbuffer)
2737 struct trace_event_call *event_call;
2738 struct trace_event_file *file;
2739 struct trace_event *event;
2740 unsigned long flags;
2741 struct trace_iterator *iter = tracepoint_print_iter;
2743 /* We should never get here if iter is NULL */
2744 if (WARN_ON_ONCE(!iter))
2747 event_call = fbuffer->trace_file->event_call;
2748 if (!event_call || !event_call->event.funcs ||
2749 !event_call->event.funcs->trace)
2752 file = fbuffer->trace_file;
2753 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2754 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2755 !filter_match_preds(file->filter, fbuffer->entry)))
2758 event = &fbuffer->trace_file->event_call->event;
2760 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2761 trace_seq_init(&iter->seq);
2762 iter->ent = fbuffer->entry;
2763 event_call->event.funcs->trace(iter, 0, event);
2764 trace_seq_putc(&iter->seq, 0);
2765 printk("%s", iter->seq.buffer);
2767 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2770 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2771 void *buffer, size_t *lenp,
2774 int save_tracepoint_printk;
2777 mutex_lock(&tracepoint_printk_mutex);
2778 save_tracepoint_printk = tracepoint_printk;
2780 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2783 * This will force exiting early, as tracepoint_printk
2784 * is always zero when tracepoint_printk_iter is not allocated
2786 if (!tracepoint_print_iter)
2787 tracepoint_printk = 0;
2789 if (save_tracepoint_printk == tracepoint_printk)
2792 if (tracepoint_printk)
2793 static_key_enable(&tracepoint_printk_key.key);
2795 static_key_disable(&tracepoint_printk_key.key);
2798 mutex_unlock(&tracepoint_printk_mutex);
2803 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2805 enum event_trigger_type tt = ETT_NONE;
2806 struct trace_event_file *file = fbuffer->trace_file;
2808 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2809 fbuffer->entry, &tt))
2812 if (static_key_false(&tracepoint_printk_key.key))
2813 output_printk(fbuffer);
2815 if (static_branch_unlikely(&trace_event_exports_enabled))
2816 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2818 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2819 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2823 event_triggers_post_call(file, tt);
2826 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2831 * trace_buffer_unlock_commit_regs()
2832 * trace_event_buffer_commit()
2833 * trace_event_raw_event_xxx()
2835 # define STACK_SKIP 3
2837 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2838 struct trace_buffer *buffer,
2839 struct ring_buffer_event *event,
2840 unsigned int trace_ctx,
2841 struct pt_regs *regs)
2843 __buffer_unlock_commit(buffer, event);
2846 * If regs is not set, then skip the necessary functions.
2847 * Note, we can still get here via blktrace, wakeup tracer
2848 * and mmiotrace, but that's ok if they lose a function or
2849 * two. They are not that meaningful.
2851 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2852 ftrace_trace_userstack(tr, buffer, trace_ctx);
2856 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2859 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2860 struct ring_buffer_event *event)
2862 __buffer_unlock_commit(buffer, event);
2866 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2867 parent_ip, unsigned int trace_ctx)
2869 struct trace_event_call *call = &event_function;
2870 struct trace_buffer *buffer = tr->array_buffer.buffer;
2871 struct ring_buffer_event *event;
2872 struct ftrace_entry *entry;
2874 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2878 entry = ring_buffer_event_data(event);
2880 entry->parent_ip = parent_ip;
2882 if (!call_filter_check_discard(call, entry, buffer, event)) {
2883 if (static_branch_unlikely(&trace_function_exports_enabled))
2884 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2885 __buffer_unlock_commit(buffer, event);
2889 #ifdef CONFIG_STACKTRACE
2891 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2892 #define FTRACE_KSTACK_NESTING 4
2894 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2896 struct ftrace_stack {
2897 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2901 struct ftrace_stacks {
2902 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2905 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2906 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2908 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2909 unsigned int trace_ctx,
2910 int skip, struct pt_regs *regs)
2912 struct trace_event_call *call = &event_kernel_stack;
2913 struct ring_buffer_event *event;
2914 unsigned int size, nr_entries;
2915 struct ftrace_stack *fstack;
2916 struct stack_entry *entry;
2920 * Add one, for this function and the call to save_stack_trace()
2921 * If regs is set, then these functions will not be in the way.
2923 #ifndef CONFIG_UNWINDER_ORC
2928 preempt_disable_notrace();
2930 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2932 /* This should never happen. If it does, yell once and skip */
2933 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
2937 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2938 * interrupt will either see the value pre increment or post
2939 * increment. If the interrupt happens pre increment it will have
2940 * restored the counter when it returns. We just need a barrier to
2941 * keep gcc from moving things around.
2945 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2946 size = ARRAY_SIZE(fstack->calls);
2949 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2952 nr_entries = stack_trace_save(fstack->calls, size, skip);
2955 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2956 struct_size(entry, caller, nr_entries),
2960 entry = ring_buffer_event_data(event);
2962 entry->size = nr_entries;
2963 memcpy(&entry->caller, fstack->calls,
2964 flex_array_size(entry, caller, nr_entries));
2966 if (!call_filter_check_discard(call, entry, buffer, event))
2967 __buffer_unlock_commit(buffer, event);
2970 /* Again, don't let gcc optimize things here */
2972 __this_cpu_dec(ftrace_stack_reserve);
2973 preempt_enable_notrace();
2977 static inline void ftrace_trace_stack(struct trace_array *tr,
2978 struct trace_buffer *buffer,
2979 unsigned int trace_ctx,
2980 int skip, struct pt_regs *regs)
2982 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2985 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
2988 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
2991 struct trace_buffer *buffer = tr->array_buffer.buffer;
2993 if (rcu_is_watching()) {
2994 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
2998 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
3002 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3003 * but if the above rcu_is_watching() failed, then the NMI
3004 * triggered someplace critical, and ct_irq_enter() should
3005 * not be called from NMI.
3007 if (unlikely(in_nmi()))
3010 ct_irq_enter_irqson();
3011 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3012 ct_irq_exit_irqson();
3016 * trace_dump_stack - record a stack back trace in the trace buffer
3017 * @skip: Number of functions to skip (helper handlers)
3019 void trace_dump_stack(int skip)
3021 if (tracing_disabled || tracing_selftest_running)
3024 #ifndef CONFIG_UNWINDER_ORC
3025 /* Skip 1 to skip this function. */
3028 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3029 tracing_gen_ctx(), skip, NULL);
3031 EXPORT_SYMBOL_GPL(trace_dump_stack);
3033 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3034 static DEFINE_PER_CPU(int, user_stack_count);
3037 ftrace_trace_userstack(struct trace_array *tr,
3038 struct trace_buffer *buffer, unsigned int trace_ctx)
3040 struct trace_event_call *call = &event_user_stack;
3041 struct ring_buffer_event *event;
3042 struct userstack_entry *entry;
3044 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3048 * NMIs can not handle page faults, even with fix ups.
3049 * The save user stack can (and often does) fault.
3051 if (unlikely(in_nmi()))
3055 * prevent recursion, since the user stack tracing may
3056 * trigger other kernel events.
3059 if (__this_cpu_read(user_stack_count))
3062 __this_cpu_inc(user_stack_count);
3064 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3065 sizeof(*entry), trace_ctx);
3067 goto out_drop_count;
3068 entry = ring_buffer_event_data(event);
3070 entry->tgid = current->tgid;
3071 memset(&entry->caller, 0, sizeof(entry->caller));
3073 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3074 if (!call_filter_check_discard(call, entry, buffer, event))
3075 __buffer_unlock_commit(buffer, event);
3078 __this_cpu_dec(user_stack_count);
3082 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3083 static void ftrace_trace_userstack(struct trace_array *tr,
3084 struct trace_buffer *buffer,
3085 unsigned int trace_ctx)
3088 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3090 #endif /* CONFIG_STACKTRACE */
3093 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3094 unsigned long long delta)
3096 entry->bottom_delta_ts = delta & U32_MAX;
3097 entry->top_delta_ts = (delta >> 32);
3100 void trace_last_func_repeats(struct trace_array *tr,
3101 struct trace_func_repeats *last_info,
3102 unsigned int trace_ctx)
3104 struct trace_buffer *buffer = tr->array_buffer.buffer;
3105 struct func_repeats_entry *entry;
3106 struct ring_buffer_event *event;
3109 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3110 sizeof(*entry), trace_ctx);
3114 delta = ring_buffer_event_time_stamp(buffer, event) -
3115 last_info->ts_last_call;
3117 entry = ring_buffer_event_data(event);
3118 entry->ip = last_info->ip;
3119 entry->parent_ip = last_info->parent_ip;
3120 entry->count = last_info->count;
3121 func_repeats_set_delta_ts(entry, delta);
3123 __buffer_unlock_commit(buffer, event);
3126 /* created for use with alloc_percpu */
3127 struct trace_buffer_struct {
3129 char buffer[4][TRACE_BUF_SIZE];
3132 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3135 * This allows for lockless recording. If we're nested too deeply, then
3136 * this returns NULL.
3138 static char *get_trace_buf(void)
3140 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3142 if (!trace_percpu_buffer || buffer->nesting >= 4)
3147 /* Interrupts must see nesting incremented before we use the buffer */
3149 return &buffer->buffer[buffer->nesting - 1][0];
3152 static void put_trace_buf(void)
3154 /* Don't let the decrement of nesting leak before this */
3156 this_cpu_dec(trace_percpu_buffer->nesting);
3159 static int alloc_percpu_trace_buffer(void)
3161 struct trace_buffer_struct __percpu *buffers;
3163 if (trace_percpu_buffer)
3166 buffers = alloc_percpu(struct trace_buffer_struct);
3167 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3170 trace_percpu_buffer = buffers;
3174 static int buffers_allocated;
3176 void trace_printk_init_buffers(void)
3178 if (buffers_allocated)
3181 if (alloc_percpu_trace_buffer())
3184 /* trace_printk() is for debug use only. Don't use it in production. */
3187 pr_warn("**********************************************************\n");
3188 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3190 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3192 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3193 pr_warn("** unsafe for production use. **\n");
3195 pr_warn("** If you see this message and you are not debugging **\n");
3196 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3198 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3199 pr_warn("**********************************************************\n");
3201 /* Expand the buffers to set size */
3202 tracing_update_buffers(&global_trace);
3204 buffers_allocated = 1;
3207 * trace_printk_init_buffers() can be called by modules.
3208 * If that happens, then we need to start cmdline recording
3209 * directly here. If the global_trace.buffer is already
3210 * allocated here, then this was called by module code.
3212 if (global_trace.array_buffer.buffer)
3213 tracing_start_cmdline_record();
3215 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3217 void trace_printk_start_comm(void)
3219 /* Start tracing comms if trace printk is set */
3220 if (!buffers_allocated)
3222 tracing_start_cmdline_record();
3225 static void trace_printk_start_stop_comm(int enabled)
3227 if (!buffers_allocated)
3231 tracing_start_cmdline_record();
3233 tracing_stop_cmdline_record();
3237 * trace_vbprintk - write binary msg to tracing buffer
3238 * @ip: The address of the caller
3239 * @fmt: The string format to write to the buffer
3240 * @args: Arguments for @fmt
3242 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3244 struct trace_event_call *call = &event_bprint;
3245 struct ring_buffer_event *event;
3246 struct trace_buffer *buffer;
3247 struct trace_array *tr = &global_trace;
3248 struct bprint_entry *entry;
3249 unsigned int trace_ctx;
3253 if (unlikely(tracing_selftest_running || tracing_disabled))
3256 /* Don't pollute graph traces with trace_vprintk internals */
3257 pause_graph_tracing();
3259 trace_ctx = tracing_gen_ctx();
3260 preempt_disable_notrace();
3262 tbuffer = get_trace_buf();
3268 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3270 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3273 size = sizeof(*entry) + sizeof(u32) * len;
3274 buffer = tr->array_buffer.buffer;
3275 ring_buffer_nest_start(buffer);
3276 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3280 entry = ring_buffer_event_data(event);
3284 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3285 if (!call_filter_check_discard(call, entry, buffer, event)) {
3286 __buffer_unlock_commit(buffer, event);
3287 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3291 ring_buffer_nest_end(buffer);
3296 preempt_enable_notrace();
3297 unpause_graph_tracing();
3301 EXPORT_SYMBOL_GPL(trace_vbprintk);
3305 __trace_array_vprintk(struct trace_buffer *buffer,
3306 unsigned long ip, const char *fmt, va_list args)
3308 struct trace_event_call *call = &event_print;
3309 struct ring_buffer_event *event;
3311 struct print_entry *entry;
3312 unsigned int trace_ctx;
3315 if (tracing_disabled)
3318 /* Don't pollute graph traces with trace_vprintk internals */
3319 pause_graph_tracing();
3321 trace_ctx = tracing_gen_ctx();
3322 preempt_disable_notrace();
3325 tbuffer = get_trace_buf();
3331 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3333 size = sizeof(*entry) + len + 1;
3334 ring_buffer_nest_start(buffer);
3335 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3339 entry = ring_buffer_event_data(event);
3342 memcpy(&entry->buf, tbuffer, len + 1);
3343 if (!call_filter_check_discard(call, entry, buffer, event)) {
3344 __buffer_unlock_commit(buffer, event);
3345 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3349 ring_buffer_nest_end(buffer);
3353 preempt_enable_notrace();
3354 unpause_graph_tracing();
3360 int trace_array_vprintk(struct trace_array *tr,
3361 unsigned long ip, const char *fmt, va_list args)
3363 if (tracing_selftest_running && tr == &global_trace)
3366 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3370 * trace_array_printk - Print a message to a specific instance
3371 * @tr: The instance trace_array descriptor
3372 * @ip: The instruction pointer that this is called from.
3373 * @fmt: The format to print (printf format)
3375 * If a subsystem sets up its own instance, they have the right to
3376 * printk strings into their tracing instance buffer using this
3377 * function. Note, this function will not write into the top level
3378 * buffer (use trace_printk() for that), as writing into the top level
3379 * buffer should only have events that can be individually disabled.
3380 * trace_printk() is only used for debugging a kernel, and should not
3381 * be ever incorporated in normal use.
3383 * trace_array_printk() can be used, as it will not add noise to the
3384 * top level tracing buffer.
3386 * Note, trace_array_init_printk() must be called on @tr before this
3390 int trace_array_printk(struct trace_array *tr,
3391 unsigned long ip, const char *fmt, ...)
3399 /* This is only allowed for created instances */
3400 if (tr == &global_trace)
3403 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3407 ret = trace_array_vprintk(tr, ip, fmt, ap);
3411 EXPORT_SYMBOL_GPL(trace_array_printk);
3414 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3415 * @tr: The trace array to initialize the buffers for
3417 * As trace_array_printk() only writes into instances, they are OK to
3418 * have in the kernel (unlike trace_printk()). This needs to be called
3419 * before trace_array_printk() can be used on a trace_array.
3421 int trace_array_init_printk(struct trace_array *tr)
3426 /* This is only allowed for created instances */
3427 if (tr == &global_trace)
3430 return alloc_percpu_trace_buffer();
3432 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3435 int trace_array_printk_buf(struct trace_buffer *buffer,
3436 unsigned long ip, const char *fmt, ...)
3441 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3445 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3451 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3453 return trace_array_vprintk(&global_trace, ip, fmt, args);
3455 EXPORT_SYMBOL_GPL(trace_vprintk);
3457 static void trace_iterator_increment(struct trace_iterator *iter)
3459 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3463 ring_buffer_iter_advance(buf_iter);
3466 static struct trace_entry *
3467 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3468 unsigned long *lost_events)
3470 struct ring_buffer_event *event;
3471 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3474 event = ring_buffer_iter_peek(buf_iter, ts);
3476 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3477 (unsigned long)-1 : 0;
3479 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3484 iter->ent_size = ring_buffer_event_length(event);
3485 return ring_buffer_event_data(event);
3491 static struct trace_entry *
3492 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3493 unsigned long *missing_events, u64 *ent_ts)
3495 struct trace_buffer *buffer = iter->array_buffer->buffer;
3496 struct trace_entry *ent, *next = NULL;
3497 unsigned long lost_events = 0, next_lost = 0;
3498 int cpu_file = iter->cpu_file;
3499 u64 next_ts = 0, ts;
3505 * If we are in a per_cpu trace file, don't bother by iterating over
3506 * all cpu and peek directly.
3508 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3509 if (ring_buffer_empty_cpu(buffer, cpu_file))
3511 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3513 *ent_cpu = cpu_file;
3518 for_each_tracing_cpu(cpu) {
3520 if (ring_buffer_empty_cpu(buffer, cpu))
3523 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3526 * Pick the entry with the smallest timestamp:
3528 if (ent && (!next || ts < next_ts)) {
3532 next_lost = lost_events;
3533 next_size = iter->ent_size;
3537 iter->ent_size = next_size;
3540 *ent_cpu = next_cpu;
3546 *missing_events = next_lost;
3551 #define STATIC_FMT_BUF_SIZE 128
3552 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3554 char *trace_iter_expand_format(struct trace_iterator *iter)
3559 * iter->tr is NULL when used with tp_printk, which makes
3560 * this get called where it is not safe to call krealloc().
3562 if (!iter->tr || iter->fmt == static_fmt_buf)
3565 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3568 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3575 /* Returns true if the string is safe to dereference from an event */
3576 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3579 unsigned long addr = (unsigned long)str;
3580 struct trace_event *trace_event;
3581 struct trace_event_call *event;
3583 /* Ignore strings with no length */
3587 /* OK if part of the event data */
3588 if ((addr >= (unsigned long)iter->ent) &&
3589 (addr < (unsigned long)iter->ent + iter->ent_size))
3592 /* OK if part of the temp seq buffer */
3593 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3594 (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
3597 /* Core rodata can not be freed */
3598 if (is_kernel_rodata(addr))
3601 if (trace_is_tracepoint_string(str))
3605 * Now this could be a module event, referencing core module
3606 * data, which is OK.
3611 trace_event = ftrace_find_event(iter->ent->type);
3615 event = container_of(trace_event, struct trace_event_call, event);
3616 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3619 /* Would rather have rodata, but this will suffice */
3620 if (within_module_core(addr, event->module))
3626 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3628 static int test_can_verify_check(const char *fmt, ...)
3635 * The verifier is dependent on vsnprintf() modifies the va_list
3636 * passed to it, where it is sent as a reference. Some architectures
3637 * (like x86_32) passes it by value, which means that vsnprintf()
3638 * does not modify the va_list passed to it, and the verifier
3639 * would then need to be able to understand all the values that
3640 * vsnprintf can use. If it is passed by value, then the verifier
3644 vsnprintf(buf, 16, "%d", ap);
3645 ret = va_arg(ap, int);
3651 static void test_can_verify(void)
3653 if (!test_can_verify_check("%d %d", 0, 1)) {
3654 pr_info("trace event string verifier disabled\n");
3655 static_branch_inc(&trace_no_verify);
3660 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3661 * @iter: The iterator that holds the seq buffer and the event being printed
3662 * @fmt: The format used to print the event
3663 * @ap: The va_list holding the data to print from @fmt.
3665 * This writes the data into the @iter->seq buffer using the data from
3666 * @fmt and @ap. If the format has a %s, then the source of the string
3667 * is examined to make sure it is safe to print, otherwise it will
3668 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3671 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3674 const char *p = fmt;
3678 if (WARN_ON_ONCE(!fmt))
3681 if (static_branch_unlikely(&trace_no_verify))
3684 /* Don't bother checking when doing a ftrace_dump() */
3685 if (iter->fmt == static_fmt_buf)
3694 /* We only care about %s and variants */
3695 for (i = 0; p[i]; i++) {
3696 if (i + 1 >= iter->fmt_size) {
3698 * If we can't expand the copy buffer,
3701 if (!trace_iter_expand_format(iter))
3705 if (p[i] == '\\' && p[i+1]) {
3710 /* Need to test cases like %08.*s */
3711 for (j = 1; p[i+j]; j++) {
3712 if (isdigit(p[i+j]) ||
3715 if (p[i+j] == '*') {
3727 /* If no %s found then just print normally */
3731 /* Copy up to the %s, and print that */
3732 strncpy(iter->fmt, p, i);
3733 iter->fmt[i] = '\0';
3734 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3737 * If iter->seq is full, the above call no longer guarantees
3738 * that ap is in sync with fmt processing, and further calls
3739 * to va_arg() can return wrong positional arguments.
3741 * Ensure that ap is no longer used in this case.
3743 if (iter->seq.full) {
3749 len = va_arg(ap, int);
3751 /* The ap now points to the string data of the %s */
3752 str = va_arg(ap, const char *);
3755 * If you hit this warning, it is likely that the
3756 * trace event in question used %s on a string that
3757 * was saved at the time of the event, but may not be
3758 * around when the trace is read. Use __string(),
3759 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3760 * instead. See samples/trace_events/trace-events-sample.h
3763 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3764 "fmt: '%s' current_buffer: '%s'",
3765 fmt, seq_buf_str(&iter->seq.seq))) {
3768 /* Try to safely read the string */
3770 if (len + 1 > iter->fmt_size)
3771 len = iter->fmt_size - 1;
3774 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3778 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3782 trace_seq_printf(&iter->seq, "(0x%px)", str);
3784 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3786 str = "[UNSAFE-MEMORY]";
3787 strcpy(iter->fmt, "%s");
3789 strncpy(iter->fmt, p + i, j + 1);
3790 iter->fmt[j+1] = '\0';
3793 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3795 trace_seq_printf(&iter->seq, iter->fmt, str);
3801 trace_seq_vprintf(&iter->seq, p, ap);
3804 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3806 const char *p, *new_fmt;
3809 if (WARN_ON_ONCE(!fmt))
3812 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3816 new_fmt = q = iter->fmt;
3818 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3819 if (!trace_iter_expand_format(iter))
3822 q += iter->fmt - new_fmt;
3823 new_fmt = iter->fmt;
3828 /* Replace %p with %px */
3832 } else if (p[0] == 'p' && !isalnum(p[1])) {
3843 #define STATIC_TEMP_BUF_SIZE 128
3844 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3846 /* Find the next real entry, without updating the iterator itself */
3847 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3848 int *ent_cpu, u64 *ent_ts)
3850 /* __find_next_entry will reset ent_size */
3851 int ent_size = iter->ent_size;
3852 struct trace_entry *entry;
3855 * If called from ftrace_dump(), then the iter->temp buffer
3856 * will be the static_temp_buf and not created from kmalloc.
3857 * If the entry size is greater than the buffer, we can
3858 * not save it. Just return NULL in that case. This is only
3859 * used to add markers when two consecutive events' time
3860 * stamps have a large delta. See trace_print_lat_context()
3862 if (iter->temp == static_temp_buf &&
3863 STATIC_TEMP_BUF_SIZE < ent_size)
3867 * The __find_next_entry() may call peek_next_entry(), which may
3868 * call ring_buffer_peek() that may make the contents of iter->ent
3869 * undefined. Need to copy iter->ent now.
3871 if (iter->ent && iter->ent != iter->temp) {
3872 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3873 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3875 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3880 iter->temp_size = iter->ent_size;
3882 memcpy(iter->temp, iter->ent, iter->ent_size);
3883 iter->ent = iter->temp;
3885 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3886 /* Put back the original ent_size */
3887 iter->ent_size = ent_size;
3892 /* Find the next real entry, and increment the iterator to the next entry */
3893 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3895 iter->ent = __find_next_entry(iter, &iter->cpu,
3896 &iter->lost_events, &iter->ts);
3899 trace_iterator_increment(iter);
3901 return iter->ent ? iter : NULL;
3904 static void trace_consume(struct trace_iterator *iter)
3906 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3907 &iter->lost_events);
3910 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3912 struct trace_iterator *iter = m->private;
3916 WARN_ON_ONCE(iter->leftover);
3920 /* can't go backwards */
3925 ent = trace_find_next_entry_inc(iter);
3929 while (ent && iter->idx < i)
3930 ent = trace_find_next_entry_inc(iter);
3937 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3939 struct ring_buffer_iter *buf_iter;
3940 unsigned long entries = 0;
3943 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3945 buf_iter = trace_buffer_iter(iter, cpu);
3949 ring_buffer_iter_reset(buf_iter);
3952 * We could have the case with the max latency tracers
3953 * that a reset never took place on a cpu. This is evident
3954 * by the timestamp being before the start of the buffer.
3956 while (ring_buffer_iter_peek(buf_iter, &ts)) {
3957 if (ts >= iter->array_buffer->time_start)
3960 ring_buffer_iter_advance(buf_iter);
3963 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3967 * The current tracer is copied to avoid a global locking
3970 static void *s_start(struct seq_file *m, loff_t *pos)
3972 struct trace_iterator *iter = m->private;
3973 struct trace_array *tr = iter->tr;
3974 int cpu_file = iter->cpu_file;
3979 mutex_lock(&trace_types_lock);
3980 if (unlikely(tr->current_trace != iter->trace)) {
3981 /* Close iter->trace before switching to the new current tracer */
3982 if (iter->trace->close)
3983 iter->trace->close(iter);
3984 iter->trace = tr->current_trace;
3985 /* Reopen the new current tracer */
3986 if (iter->trace->open)
3987 iter->trace->open(iter);
3989 mutex_unlock(&trace_types_lock);
3991 #ifdef CONFIG_TRACER_MAX_TRACE
3992 if (iter->snapshot && iter->trace->use_max_tr)
3993 return ERR_PTR(-EBUSY);
3996 if (*pos != iter->pos) {
4001 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4002 for_each_tracing_cpu(cpu)
4003 tracing_iter_reset(iter, cpu);
4005 tracing_iter_reset(iter, cpu_file);
4008 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4013 * If we overflowed the seq_file before, then we want
4014 * to just reuse the trace_seq buffer again.
4020 p = s_next(m, p, &l);
4024 trace_event_read_lock();
4025 trace_access_lock(cpu_file);
4029 static void s_stop(struct seq_file *m, void *p)
4031 struct trace_iterator *iter = m->private;
4033 #ifdef CONFIG_TRACER_MAX_TRACE
4034 if (iter->snapshot && iter->trace->use_max_tr)
4038 trace_access_unlock(iter->cpu_file);
4039 trace_event_read_unlock();
4043 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4044 unsigned long *entries, int cpu)
4046 unsigned long count;
4048 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4050 * If this buffer has skipped entries, then we hold all
4051 * entries for the trace and we need to ignore the
4052 * ones before the time stamp.
4054 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4055 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4056 /* total is the same as the entries */
4060 ring_buffer_overrun_cpu(buf->buffer, cpu);
4065 get_total_entries(struct array_buffer *buf,
4066 unsigned long *total, unsigned long *entries)
4074 for_each_tracing_cpu(cpu) {
4075 get_total_entries_cpu(buf, &t, &e, cpu);
4081 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4083 unsigned long total, entries;
4088 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4093 unsigned long trace_total_entries(struct trace_array *tr)
4095 unsigned long total, entries;
4100 get_total_entries(&tr->array_buffer, &total, &entries);
4105 static void print_lat_help_header(struct seq_file *m)
4107 seq_puts(m, "# _------=> CPU# \n"
4108 "# / _-----=> irqs-off/BH-disabled\n"
4109 "# | / _----=> need-resched \n"
4110 "# || / _---=> hardirq/softirq \n"
4111 "# ||| / _--=> preempt-depth \n"
4112 "# |||| / _-=> migrate-disable \n"
4113 "# ||||| / delay \n"
4114 "# cmd pid |||||| time | caller \n"
4115 "# \\ / |||||| \\ | / \n");
4118 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4120 unsigned long total;
4121 unsigned long entries;
4123 get_total_entries(buf, &total, &entries);
4124 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4125 entries, total, num_online_cpus());
4129 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4132 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4134 print_event_info(buf, m);
4136 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4137 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4140 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4143 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4144 static const char space[] = " ";
4145 int prec = tgid ? 12 : 2;
4147 print_event_info(buf, m);
4149 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4150 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4151 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4152 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4153 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4154 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4155 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4156 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4160 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4162 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4163 struct array_buffer *buf = iter->array_buffer;
4164 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4165 struct tracer *type = iter->trace;
4166 unsigned long entries;
4167 unsigned long total;
4168 const char *name = type->name;
4170 get_total_entries(buf, &total, &entries);
4172 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4173 name, init_utsname()->release);
4174 seq_puts(m, "# -----------------------------------"
4175 "---------------------------------\n");
4176 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4177 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4178 nsecs_to_usecs(data->saved_latency),
4182 preempt_model_none() ? "server" :
4183 preempt_model_voluntary() ? "desktop" :
4184 preempt_model_full() ? "preempt" :
4185 preempt_model_rt() ? "preempt_rt" :
4187 /* These are reserved for later use */
4190 seq_printf(m, " #P:%d)\n", num_online_cpus());
4194 seq_puts(m, "# -----------------\n");
4195 seq_printf(m, "# | task: %.16s-%d "
4196 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4197 data->comm, data->pid,
4198 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4199 data->policy, data->rt_priority);
4200 seq_puts(m, "# -----------------\n");
4202 if (data->critical_start) {
4203 seq_puts(m, "# => started at: ");
4204 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4205 trace_print_seq(m, &iter->seq);
4206 seq_puts(m, "\n# => ended at: ");
4207 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4208 trace_print_seq(m, &iter->seq);
4209 seq_puts(m, "\n#\n");
4215 static void test_cpu_buff_start(struct trace_iterator *iter)
4217 struct trace_seq *s = &iter->seq;
4218 struct trace_array *tr = iter->tr;
4220 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4223 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4226 if (cpumask_available(iter->started) &&
4227 cpumask_test_cpu(iter->cpu, iter->started))
4230 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4233 if (cpumask_available(iter->started))
4234 cpumask_set_cpu(iter->cpu, iter->started);
4236 /* Don't print started cpu buffer for the first entry of the trace */
4238 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4242 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4244 struct trace_array *tr = iter->tr;
4245 struct trace_seq *s = &iter->seq;
4246 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4247 struct trace_entry *entry;
4248 struct trace_event *event;
4252 test_cpu_buff_start(iter);
4254 event = ftrace_find_event(entry->type);
4256 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4257 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4258 trace_print_lat_context(iter);
4260 trace_print_context(iter);
4263 if (trace_seq_has_overflowed(s))
4264 return TRACE_TYPE_PARTIAL_LINE;
4267 if (tr->trace_flags & TRACE_ITER_FIELDS)
4268 return print_event_fields(iter, event);
4269 return event->funcs->trace(iter, sym_flags, event);
4272 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4274 return trace_handle_return(s);
4277 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4279 struct trace_array *tr = iter->tr;
4280 struct trace_seq *s = &iter->seq;
4281 struct trace_entry *entry;
4282 struct trace_event *event;
4286 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4287 trace_seq_printf(s, "%d %d %llu ",
4288 entry->pid, iter->cpu, iter->ts);
4290 if (trace_seq_has_overflowed(s))
4291 return TRACE_TYPE_PARTIAL_LINE;
4293 event = ftrace_find_event(entry->type);
4295 return event->funcs->raw(iter, 0, event);
4297 trace_seq_printf(s, "%d ?\n", entry->type);
4299 return trace_handle_return(s);
4302 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4304 struct trace_array *tr = iter->tr;
4305 struct trace_seq *s = &iter->seq;
4306 unsigned char newline = '\n';
4307 struct trace_entry *entry;
4308 struct trace_event *event;
4312 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4313 SEQ_PUT_HEX_FIELD(s, entry->pid);
4314 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4315 SEQ_PUT_HEX_FIELD(s, iter->ts);
4316 if (trace_seq_has_overflowed(s))
4317 return TRACE_TYPE_PARTIAL_LINE;
4320 event = ftrace_find_event(entry->type);
4322 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4323 if (ret != TRACE_TYPE_HANDLED)
4327 SEQ_PUT_FIELD(s, newline);
4329 return trace_handle_return(s);
4332 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4334 struct trace_array *tr = iter->tr;
4335 struct trace_seq *s = &iter->seq;
4336 struct trace_entry *entry;
4337 struct trace_event *event;
4341 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4342 SEQ_PUT_FIELD(s, entry->pid);
4343 SEQ_PUT_FIELD(s, iter->cpu);
4344 SEQ_PUT_FIELD(s, iter->ts);
4345 if (trace_seq_has_overflowed(s))
4346 return TRACE_TYPE_PARTIAL_LINE;
4349 event = ftrace_find_event(entry->type);
4350 return event ? event->funcs->binary(iter, 0, event) :
4354 int trace_empty(struct trace_iterator *iter)
4356 struct ring_buffer_iter *buf_iter;
4359 /* If we are looking at one CPU buffer, only check that one */
4360 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4361 cpu = iter->cpu_file;
4362 buf_iter = trace_buffer_iter(iter, cpu);
4364 if (!ring_buffer_iter_empty(buf_iter))
4367 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4373 for_each_tracing_cpu(cpu) {
4374 buf_iter = trace_buffer_iter(iter, cpu);
4376 if (!ring_buffer_iter_empty(buf_iter))
4379 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4387 /* Called with trace_event_read_lock() held. */
4388 enum print_line_t print_trace_line(struct trace_iterator *iter)
4390 struct trace_array *tr = iter->tr;
4391 unsigned long trace_flags = tr->trace_flags;
4392 enum print_line_t ret;
4394 if (iter->lost_events) {
4395 if (iter->lost_events == (unsigned long)-1)
4396 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4399 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4400 iter->cpu, iter->lost_events);
4401 if (trace_seq_has_overflowed(&iter->seq))
4402 return TRACE_TYPE_PARTIAL_LINE;
4405 if (iter->trace && iter->trace->print_line) {
4406 ret = iter->trace->print_line(iter);
4407 if (ret != TRACE_TYPE_UNHANDLED)
4411 if (iter->ent->type == TRACE_BPUTS &&
4412 trace_flags & TRACE_ITER_PRINTK &&
4413 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4414 return trace_print_bputs_msg_only(iter);
4416 if (iter->ent->type == TRACE_BPRINT &&
4417 trace_flags & TRACE_ITER_PRINTK &&
4418 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4419 return trace_print_bprintk_msg_only(iter);
4421 if (iter->ent->type == TRACE_PRINT &&
4422 trace_flags & TRACE_ITER_PRINTK &&
4423 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4424 return trace_print_printk_msg_only(iter);
4426 if (trace_flags & TRACE_ITER_BIN)
4427 return print_bin_fmt(iter);
4429 if (trace_flags & TRACE_ITER_HEX)
4430 return print_hex_fmt(iter);
4432 if (trace_flags & TRACE_ITER_RAW)
4433 return print_raw_fmt(iter);
4435 return print_trace_fmt(iter);
4438 void trace_latency_header(struct seq_file *m)
4440 struct trace_iterator *iter = m->private;
4441 struct trace_array *tr = iter->tr;
4443 /* print nothing if the buffers are empty */
4444 if (trace_empty(iter))
4447 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4448 print_trace_header(m, iter);
4450 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4451 print_lat_help_header(m);
4454 void trace_default_header(struct seq_file *m)
4456 struct trace_iterator *iter = m->private;
4457 struct trace_array *tr = iter->tr;
4458 unsigned long trace_flags = tr->trace_flags;
4460 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4463 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4464 /* print nothing if the buffers are empty */
4465 if (trace_empty(iter))
4467 print_trace_header(m, iter);
4468 if (!(trace_flags & TRACE_ITER_VERBOSE))
4469 print_lat_help_header(m);
4471 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4472 if (trace_flags & TRACE_ITER_IRQ_INFO)
4473 print_func_help_header_irq(iter->array_buffer,
4476 print_func_help_header(iter->array_buffer, m,
4482 static void test_ftrace_alive(struct seq_file *m)
4484 if (!ftrace_is_dead())
4486 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4487 "# MAY BE MISSING FUNCTION EVENTS\n");
4490 #ifdef CONFIG_TRACER_MAX_TRACE
4491 static void show_snapshot_main_help(struct seq_file *m)
4493 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4494 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4495 "# Takes a snapshot of the main buffer.\n"
4496 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4497 "# (Doesn't have to be '2' works with any number that\n"
4498 "# is not a '0' or '1')\n");
4501 static void show_snapshot_percpu_help(struct seq_file *m)
4503 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4504 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4505 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4506 "# Takes a snapshot of the main buffer for this cpu.\n");
4508 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4509 "# Must use main snapshot file to allocate.\n");
4511 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4512 "# (Doesn't have to be '2' works with any number that\n"
4513 "# is not a '0' or '1')\n");
4516 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4518 if (iter->tr->allocated_snapshot)
4519 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4521 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4523 seq_puts(m, "# Snapshot commands:\n");
4524 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4525 show_snapshot_main_help(m);
4527 show_snapshot_percpu_help(m);
4530 /* Should never be called */
4531 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4534 static int s_show(struct seq_file *m, void *v)
4536 struct trace_iterator *iter = v;
4539 if (iter->ent == NULL) {
4541 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4543 test_ftrace_alive(m);
4545 if (iter->snapshot && trace_empty(iter))
4546 print_snapshot_help(m, iter);
4547 else if (iter->trace && iter->trace->print_header)
4548 iter->trace->print_header(m);
4550 trace_default_header(m);
4552 } else if (iter->leftover) {
4554 * If we filled the seq_file buffer earlier, we
4555 * want to just show it now.
4557 ret = trace_print_seq(m, &iter->seq);
4559 /* ret should this time be zero, but you never know */
4560 iter->leftover = ret;
4563 ret = print_trace_line(iter);
4564 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4566 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
4568 ret = trace_print_seq(m, &iter->seq);
4570 * If we overflow the seq_file buffer, then it will
4571 * ask us for this data again at start up.
4573 * ret is 0 if seq_file write succeeded.
4576 iter->leftover = ret;
4583 * Should be used after trace_array_get(), trace_types_lock
4584 * ensures that i_cdev was already initialized.
4586 static inline int tracing_get_cpu(struct inode *inode)
4588 if (inode->i_cdev) /* See trace_create_cpu_file() */
4589 return (long)inode->i_cdev - 1;
4590 return RING_BUFFER_ALL_CPUS;
4593 static const struct seq_operations tracer_seq_ops = {
4601 * Note, as iter itself can be allocated and freed in different
4602 * ways, this function is only used to free its content, and not
4603 * the iterator itself. The only requirement to all the allocations
4604 * is that it must zero all fields (kzalloc), as freeing works with
4605 * ethier allocated content or NULL.
4607 static void free_trace_iter_content(struct trace_iterator *iter)
4609 /* The fmt is either NULL, allocated or points to static_fmt_buf */
4610 if (iter->fmt != static_fmt_buf)
4614 kfree(iter->buffer_iter);
4615 mutex_destroy(&iter->mutex);
4616 free_cpumask_var(iter->started);
4619 static struct trace_iterator *
4620 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4622 struct trace_array *tr = inode->i_private;
4623 struct trace_iterator *iter;
4626 if (tracing_disabled)
4627 return ERR_PTR(-ENODEV);
4629 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4631 return ERR_PTR(-ENOMEM);
4633 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4635 if (!iter->buffer_iter)
4639 * trace_find_next_entry() may need to save off iter->ent.
4640 * It will place it into the iter->temp buffer. As most
4641 * events are less than 128, allocate a buffer of that size.
4642 * If one is greater, then trace_find_next_entry() will
4643 * allocate a new buffer to adjust for the bigger iter->ent.
4644 * It's not critical if it fails to get allocated here.
4646 iter->temp = kmalloc(128, GFP_KERNEL);
4648 iter->temp_size = 128;
4651 * trace_event_printf() may need to modify given format
4652 * string to replace %p with %px so that it shows real address
4653 * instead of hash value. However, that is only for the event
4654 * tracing, other tracer may not need. Defer the allocation
4655 * until it is needed.
4660 mutex_lock(&trace_types_lock);
4661 iter->trace = tr->current_trace;
4663 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4668 #ifdef CONFIG_TRACER_MAX_TRACE
4669 /* Currently only the top directory has a snapshot */
4670 if (tr->current_trace->print_max || snapshot)
4671 iter->array_buffer = &tr->max_buffer;
4674 iter->array_buffer = &tr->array_buffer;
4675 iter->snapshot = snapshot;
4677 iter->cpu_file = tracing_get_cpu(inode);
4678 mutex_init(&iter->mutex);
4680 /* Notify the tracer early; before we stop tracing. */
4681 if (iter->trace->open)
4682 iter->trace->open(iter);
4684 /* Annotate start of buffers if we had overruns */
4685 if (ring_buffer_overruns(iter->array_buffer->buffer))
4686 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4688 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4689 if (trace_clocks[tr->clock_id].in_ns)
4690 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4693 * If pause-on-trace is enabled, then stop the trace while
4694 * dumping, unless this is the "snapshot" file
4696 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4697 tracing_stop_tr(tr);
4699 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4700 for_each_tracing_cpu(cpu) {
4701 iter->buffer_iter[cpu] =
4702 ring_buffer_read_prepare(iter->array_buffer->buffer,
4705 ring_buffer_read_prepare_sync();
4706 for_each_tracing_cpu(cpu) {
4707 ring_buffer_read_start(iter->buffer_iter[cpu]);
4708 tracing_iter_reset(iter, cpu);
4711 cpu = iter->cpu_file;
4712 iter->buffer_iter[cpu] =
4713 ring_buffer_read_prepare(iter->array_buffer->buffer,
4715 ring_buffer_read_prepare_sync();
4716 ring_buffer_read_start(iter->buffer_iter[cpu]);
4717 tracing_iter_reset(iter, cpu);
4720 mutex_unlock(&trace_types_lock);
4725 mutex_unlock(&trace_types_lock);
4726 free_trace_iter_content(iter);
4728 seq_release_private(inode, file);
4729 return ERR_PTR(-ENOMEM);
4732 int tracing_open_generic(struct inode *inode, struct file *filp)
4736 ret = tracing_check_open_get_tr(NULL);
4740 filp->private_data = inode->i_private;
4744 bool tracing_is_disabled(void)
4746 return (tracing_disabled) ? true: false;
4750 * Open and update trace_array ref count.
4751 * Must have the current trace_array passed to it.
4753 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4755 struct trace_array *tr = inode->i_private;
4758 ret = tracing_check_open_get_tr(tr);
4762 filp->private_data = inode->i_private;
4768 * The private pointer of the inode is the trace_event_file.
4769 * Update the tr ref count associated to it.
4771 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4773 struct trace_event_file *file = inode->i_private;
4776 ret = tracing_check_open_get_tr(file->tr);
4780 mutex_lock(&event_mutex);
4782 /* Fail if the file is marked for removal */
4783 if (file->flags & EVENT_FILE_FL_FREED) {
4784 trace_array_put(file->tr);
4787 event_file_get(file);
4790 mutex_unlock(&event_mutex);
4794 filp->private_data = inode->i_private;
4799 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4801 struct trace_event_file *file = inode->i_private;
4803 trace_array_put(file->tr);
4804 event_file_put(file);
4809 int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
4811 tracing_release_file_tr(inode, filp);
4812 return single_release(inode, filp);
4815 static int tracing_mark_open(struct inode *inode, struct file *filp)
4817 stream_open(inode, filp);
4818 return tracing_open_generic_tr(inode, filp);
4821 static int tracing_release(struct inode *inode, struct file *file)
4823 struct trace_array *tr = inode->i_private;
4824 struct seq_file *m = file->private_data;
4825 struct trace_iterator *iter;
4828 if (!(file->f_mode & FMODE_READ)) {
4829 trace_array_put(tr);
4833 /* Writes do not use seq_file */
4835 mutex_lock(&trace_types_lock);
4837 for_each_tracing_cpu(cpu) {
4838 if (iter->buffer_iter[cpu])
4839 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4842 if (iter->trace && iter->trace->close)
4843 iter->trace->close(iter);
4845 if (!iter->snapshot && tr->stop_count)
4846 /* reenable tracing if it was previously enabled */
4847 tracing_start_tr(tr);
4849 __trace_array_put(tr);
4851 mutex_unlock(&trace_types_lock);
4853 free_trace_iter_content(iter);
4854 seq_release_private(inode, file);
4859 int tracing_release_generic_tr(struct inode *inode, struct file *file)
4861 struct trace_array *tr = inode->i_private;
4863 trace_array_put(tr);
4867 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4869 struct trace_array *tr = inode->i_private;
4871 trace_array_put(tr);
4873 return single_release(inode, file);
4876 static int tracing_open(struct inode *inode, struct file *file)
4878 struct trace_array *tr = inode->i_private;
4879 struct trace_iterator *iter;
4882 ret = tracing_check_open_get_tr(tr);
4886 /* If this file was open for write, then erase contents */
4887 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4888 int cpu = tracing_get_cpu(inode);
4889 struct array_buffer *trace_buf = &tr->array_buffer;
4891 #ifdef CONFIG_TRACER_MAX_TRACE
4892 if (tr->current_trace->print_max)
4893 trace_buf = &tr->max_buffer;
4896 if (cpu == RING_BUFFER_ALL_CPUS)
4897 tracing_reset_online_cpus(trace_buf);
4899 tracing_reset_cpu(trace_buf, cpu);
4902 if (file->f_mode & FMODE_READ) {
4903 iter = __tracing_open(inode, file, false);
4905 ret = PTR_ERR(iter);
4906 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4907 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4911 trace_array_put(tr);
4917 * Some tracers are not suitable for instance buffers.
4918 * A tracer is always available for the global array (toplevel)
4919 * or if it explicitly states that it is.
4922 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4924 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4927 /* Find the next tracer that this trace array may use */
4928 static struct tracer *
4929 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4931 while (t && !trace_ok_for_array(t, tr))
4938 t_next(struct seq_file *m, void *v, loff_t *pos)
4940 struct trace_array *tr = m->private;
4941 struct tracer *t = v;
4946 t = get_tracer_for_array(tr, t->next);
4951 static void *t_start(struct seq_file *m, loff_t *pos)
4953 struct trace_array *tr = m->private;
4957 mutex_lock(&trace_types_lock);
4959 t = get_tracer_for_array(tr, trace_types);
4960 for (; t && l < *pos; t = t_next(m, t, &l))
4966 static void t_stop(struct seq_file *m, void *p)
4968 mutex_unlock(&trace_types_lock);
4971 static int t_show(struct seq_file *m, void *v)
4973 struct tracer *t = v;
4978 seq_puts(m, t->name);
4987 static const struct seq_operations show_traces_seq_ops = {
4994 static int show_traces_open(struct inode *inode, struct file *file)
4996 struct trace_array *tr = inode->i_private;
5000 ret = tracing_check_open_get_tr(tr);
5004 ret = seq_open(file, &show_traces_seq_ops);
5006 trace_array_put(tr);
5010 m = file->private_data;
5016 static int show_traces_release(struct inode *inode, struct file *file)
5018 struct trace_array *tr = inode->i_private;
5020 trace_array_put(tr);
5021 return seq_release(inode, file);
5025 tracing_write_stub(struct file *filp, const char __user *ubuf,
5026 size_t count, loff_t *ppos)
5031 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5035 if (file->f_mode & FMODE_READ)
5036 ret = seq_lseek(file, offset, whence);
5038 file->f_pos = ret = 0;
5043 static const struct file_operations tracing_fops = {
5044 .open = tracing_open,
5046 .read_iter = seq_read_iter,
5047 .splice_read = copy_splice_read,
5048 .write = tracing_write_stub,
5049 .llseek = tracing_lseek,
5050 .release = tracing_release,
5053 static const struct file_operations show_traces_fops = {
5054 .open = show_traces_open,
5056 .llseek = seq_lseek,
5057 .release = show_traces_release,
5061 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5062 size_t count, loff_t *ppos)
5064 struct trace_array *tr = file_inode(filp)->i_private;
5068 len = snprintf(NULL, 0, "%*pb\n",
5069 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5070 mask_str = kmalloc(len, GFP_KERNEL);
5074 len = snprintf(mask_str, len, "%*pb\n",
5075 cpumask_pr_args(tr->tracing_cpumask));
5080 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5088 int tracing_set_cpumask(struct trace_array *tr,
5089 cpumask_var_t tracing_cpumask_new)
5096 local_irq_disable();
5097 arch_spin_lock(&tr->max_lock);
5098 for_each_tracing_cpu(cpu) {
5100 * Increase/decrease the disabled counter if we are
5101 * about to flip a bit in the cpumask:
5103 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5104 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5105 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5106 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5107 #ifdef CONFIG_TRACER_MAX_TRACE
5108 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5111 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5112 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5113 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5114 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5115 #ifdef CONFIG_TRACER_MAX_TRACE
5116 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5120 arch_spin_unlock(&tr->max_lock);
5123 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5129 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5130 size_t count, loff_t *ppos)
5132 struct trace_array *tr = file_inode(filp)->i_private;
5133 cpumask_var_t tracing_cpumask_new;
5136 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5139 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5143 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5147 free_cpumask_var(tracing_cpumask_new);
5152 free_cpumask_var(tracing_cpumask_new);
5157 static const struct file_operations tracing_cpumask_fops = {
5158 .open = tracing_open_generic_tr,
5159 .read = tracing_cpumask_read,
5160 .write = tracing_cpumask_write,
5161 .release = tracing_release_generic_tr,
5162 .llseek = generic_file_llseek,
5165 static int tracing_trace_options_show(struct seq_file *m, void *v)
5167 struct tracer_opt *trace_opts;
5168 struct trace_array *tr = m->private;
5172 mutex_lock(&trace_types_lock);
5173 tracer_flags = tr->current_trace->flags->val;
5174 trace_opts = tr->current_trace->flags->opts;
5176 for (i = 0; trace_options[i]; i++) {
5177 if (tr->trace_flags & (1 << i))
5178 seq_printf(m, "%s\n", trace_options[i]);
5180 seq_printf(m, "no%s\n", trace_options[i]);
5183 for (i = 0; trace_opts[i].name; i++) {
5184 if (tracer_flags & trace_opts[i].bit)
5185 seq_printf(m, "%s\n", trace_opts[i].name);
5187 seq_printf(m, "no%s\n", trace_opts[i].name);
5189 mutex_unlock(&trace_types_lock);
5194 static int __set_tracer_option(struct trace_array *tr,
5195 struct tracer_flags *tracer_flags,
5196 struct tracer_opt *opts, int neg)
5198 struct tracer *trace = tracer_flags->trace;
5201 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5206 tracer_flags->val &= ~opts->bit;
5208 tracer_flags->val |= opts->bit;
5212 /* Try to assign a tracer specific option */
5213 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5215 struct tracer *trace = tr->current_trace;
5216 struct tracer_flags *tracer_flags = trace->flags;
5217 struct tracer_opt *opts = NULL;
5220 for (i = 0; tracer_flags->opts[i].name; i++) {
5221 opts = &tracer_flags->opts[i];
5223 if (strcmp(cmp, opts->name) == 0)
5224 return __set_tracer_option(tr, trace->flags, opts, neg);
5230 /* Some tracers require overwrite to stay enabled */
5231 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5233 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5239 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5241 if ((mask == TRACE_ITER_RECORD_TGID) ||
5242 (mask == TRACE_ITER_RECORD_CMD))
5243 lockdep_assert_held(&event_mutex);
5245 /* do nothing if flag is already set */
5246 if (!!(tr->trace_flags & mask) == !!enabled)
5249 /* Give the tracer a chance to approve the change */
5250 if (tr->current_trace->flag_changed)
5251 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5255 tr->trace_flags |= mask;
5257 tr->trace_flags &= ~mask;
5259 if (mask == TRACE_ITER_RECORD_CMD)
5260 trace_event_enable_cmd_record(enabled);
5262 if (mask == TRACE_ITER_RECORD_TGID) {
5264 if (trace_alloc_tgid_map() < 0) {
5265 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5269 trace_event_enable_tgid_record(enabled);
5272 if (mask == TRACE_ITER_EVENT_FORK)
5273 trace_event_follow_fork(tr, enabled);
5275 if (mask == TRACE_ITER_FUNC_FORK)
5276 ftrace_pid_follow_fork(tr, enabled);
5278 if (mask == TRACE_ITER_OVERWRITE) {
5279 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5280 #ifdef CONFIG_TRACER_MAX_TRACE
5281 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5285 if (mask == TRACE_ITER_PRINTK) {
5286 trace_printk_start_stop_comm(enabled);
5287 trace_printk_control(enabled);
5293 int trace_set_options(struct trace_array *tr, char *option)
5298 size_t orig_len = strlen(option);
5301 cmp = strstrip(option);
5303 len = str_has_prefix(cmp, "no");
5309 mutex_lock(&event_mutex);
5310 mutex_lock(&trace_types_lock);
5312 ret = match_string(trace_options, -1, cmp);
5313 /* If no option could be set, test the specific tracer options */
5315 ret = set_tracer_option(tr, cmp, neg);
5317 ret = set_tracer_flag(tr, 1 << ret, !neg);
5319 mutex_unlock(&trace_types_lock);
5320 mutex_unlock(&event_mutex);
5323 * If the first trailing whitespace is replaced with '\0' by strstrip,
5324 * turn it back into a space.
5326 if (orig_len > strlen(option))
5327 option[strlen(option)] = ' ';
5332 static void __init apply_trace_boot_options(void)
5334 char *buf = trace_boot_options_buf;
5338 option = strsep(&buf, ",");
5344 trace_set_options(&global_trace, option);
5346 /* Put back the comma to allow this to be called again */
5353 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5354 size_t cnt, loff_t *ppos)
5356 struct seq_file *m = filp->private_data;
5357 struct trace_array *tr = m->private;
5361 if (cnt >= sizeof(buf))
5364 if (copy_from_user(buf, ubuf, cnt))
5369 ret = trace_set_options(tr, buf);
5378 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5380 struct trace_array *tr = inode->i_private;
5383 ret = tracing_check_open_get_tr(tr);
5387 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5389 trace_array_put(tr);
5394 static const struct file_operations tracing_iter_fops = {
5395 .open = tracing_trace_options_open,
5397 .llseek = seq_lseek,
5398 .release = tracing_single_release_tr,
5399 .write = tracing_trace_options_write,
5402 static const char readme_msg[] =
5403 "tracing mini-HOWTO:\n\n"
5404 "# echo 0 > tracing_on : quick way to disable tracing\n"
5405 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5406 " Important files:\n"
5407 " trace\t\t\t- The static contents of the buffer\n"
5408 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5409 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5410 " current_tracer\t- function and latency tracers\n"
5411 " available_tracers\t- list of configured tracers for current_tracer\n"
5412 " error_log\t- error log for failed commands (that support it)\n"
5413 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5414 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5415 " trace_clock\t\t- change the clock used to order events\n"
5416 " local: Per cpu clock but may not be synced across CPUs\n"
5417 " global: Synced across CPUs but slows tracing down.\n"
5418 " counter: Not a clock, but just an increment\n"
5419 " uptime: Jiffy counter from time of boot\n"
5420 " perf: Same clock that perf events use\n"
5421 #ifdef CONFIG_X86_64
5422 " x86-tsc: TSC cycle counter\n"
5424 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5425 " delta: Delta difference against a buffer-wide timestamp\n"
5426 " absolute: Absolute (standalone) timestamp\n"
5427 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5428 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5429 " tracing_cpumask\t- Limit which CPUs to trace\n"
5430 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5431 "\t\t\t Remove sub-buffer with rmdir\n"
5432 " trace_options\t\t- Set format or modify how tracing happens\n"
5433 "\t\t\t Disable an option by prefixing 'no' to the\n"
5434 "\t\t\t option name\n"
5435 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5436 #ifdef CONFIG_DYNAMIC_FTRACE
5437 "\n available_filter_functions - list of functions that can be filtered on\n"
5438 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5439 "\t\t\t functions\n"
5440 "\t accepts: func_full_name or glob-matching-pattern\n"
5441 "\t modules: Can select a group via module\n"
5442 "\t Format: :mod:<module-name>\n"
5443 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5444 "\t triggers: a command to perform when function is hit\n"
5445 "\t Format: <function>:<trigger>[:count]\n"
5446 "\t trigger: traceon, traceoff\n"
5447 "\t\t enable_event:<system>:<event>\n"
5448 "\t\t disable_event:<system>:<event>\n"
5449 #ifdef CONFIG_STACKTRACE
5452 #ifdef CONFIG_TRACER_SNAPSHOT
5457 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5458 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5459 "\t The first one will disable tracing every time do_fault is hit\n"
5460 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5461 "\t The first time do trap is hit and it disables tracing, the\n"
5462 "\t counter will decrement to 2. If tracing is already disabled,\n"
5463 "\t the counter will not decrement. It only decrements when the\n"
5464 "\t trigger did work\n"
5465 "\t To remove trigger without count:\n"
5466 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5467 "\t To remove trigger with a count:\n"
5468 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5469 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5470 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5471 "\t modules: Can select a group via module command :mod:\n"
5472 "\t Does not accept triggers\n"
5473 #endif /* CONFIG_DYNAMIC_FTRACE */
5474 #ifdef CONFIG_FUNCTION_TRACER
5475 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5477 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5480 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5481 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5482 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5483 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5485 #ifdef CONFIG_TRACER_SNAPSHOT
5486 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5487 "\t\t\t snapshot buffer. Read the contents for more\n"
5488 "\t\t\t information\n"
5490 #ifdef CONFIG_STACK_TRACER
5491 " stack_trace\t\t- Shows the max stack trace when active\n"
5492 " stack_max_size\t- Shows current max stack size that was traced\n"
5493 "\t\t\t Write into this file to reset the max size (trigger a\n"
5494 "\t\t\t new trace)\n"
5495 #ifdef CONFIG_DYNAMIC_FTRACE
5496 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5499 #endif /* CONFIG_STACK_TRACER */
5500 #ifdef CONFIG_DYNAMIC_EVENTS
5501 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5502 "\t\t\t Write into this file to define/undefine new trace events.\n"
5504 #ifdef CONFIG_KPROBE_EVENTS
5505 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5506 "\t\t\t Write into this file to define/undefine new trace events.\n"
5508 #ifdef CONFIG_UPROBE_EVENTS
5509 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5510 "\t\t\t Write into this file to define/undefine new trace events.\n"
5512 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5513 defined(CONFIG_FPROBE_EVENTS)
5514 "\t accepts: event-definitions (one definition per line)\n"
5515 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5516 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5517 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5519 #ifdef CONFIG_FPROBE_EVENTS
5520 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5521 "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5523 #ifdef CONFIG_HIST_TRIGGERS
5524 "\t s:[synthetic/]<event> <field> [<field>]\n"
5526 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5527 "\t -:[<group>/][<event>]\n"
5528 #ifdef CONFIG_KPROBE_EVENTS
5529 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5530 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5532 #ifdef CONFIG_UPROBE_EVENTS
5533 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5535 "\t args: <name>=fetcharg[:type]\n"
5536 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5537 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5538 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5539 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5540 "\t <argname>[->field[->field|.field...]],\n"
5543 "\t $stack<index>, $stack, $retval, $comm,\n"
5545 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5546 "\t kernel return probes support: $retval, $arg<N>, $comm\n"
5547 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5548 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5549 "\t symstr, %pd/%pD, <type>\\[<array-size>\\]\n"
5550 #ifdef CONFIG_HIST_TRIGGERS
5551 "\t field: <stype> <name>;\n"
5552 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5553 "\t [unsigned] char/int/long\n"
5555 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5556 "\t of the <attached-group>/<attached-event>.\n"
5558 " events/\t\t- Directory containing all trace event subsystems:\n"
5559 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5560 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5561 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5563 " filter\t\t- If set, only events passing filter are traced\n"
5564 " events/<system>/<event>/\t- Directory containing control files for\n"
5566 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5567 " filter\t\t- If set, only events passing filter are traced\n"
5568 " trigger\t\t- If set, a command to perform when event is hit\n"
5569 "\t Format: <trigger>[:count][if <filter>]\n"
5570 "\t trigger: traceon, traceoff\n"
5571 "\t enable_event:<system>:<event>\n"
5572 "\t disable_event:<system>:<event>\n"
5573 #ifdef CONFIG_HIST_TRIGGERS
5574 "\t enable_hist:<system>:<event>\n"
5575 "\t disable_hist:<system>:<event>\n"
5577 #ifdef CONFIG_STACKTRACE
5580 #ifdef CONFIG_TRACER_SNAPSHOT
5583 #ifdef CONFIG_HIST_TRIGGERS
5584 "\t\t hist (see below)\n"
5586 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5587 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5588 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5589 "\t events/block/block_unplug/trigger\n"
5590 "\t The first disables tracing every time block_unplug is hit.\n"
5591 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5592 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5593 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5594 "\t Like function triggers, the counter is only decremented if it\n"
5595 "\t enabled or disabled tracing.\n"
5596 "\t To remove a trigger without a count:\n"
5597 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5598 "\t To remove a trigger with a count:\n"
5599 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5600 "\t Filters can be ignored when removing a trigger.\n"
5601 #ifdef CONFIG_HIST_TRIGGERS
5602 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5603 "\t Format: hist:keys=<field1[,field2,...]>\n"
5604 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5605 "\t [:values=<field1[,field2,...]>]\n"
5606 "\t [:sort=<field1[,field2,...]>]\n"
5607 "\t [:size=#entries]\n"
5608 "\t [:pause][:continue][:clear]\n"
5609 "\t [:name=histname1]\n"
5610 "\t [:nohitcount]\n"
5611 "\t [:<handler>.<action>]\n"
5612 "\t [if <filter>]\n\n"
5613 "\t Note, special fields can be used as well:\n"
5614 "\t common_timestamp - to record current timestamp\n"
5615 "\t common_cpu - to record the CPU the event happened on\n"
5617 "\t A hist trigger variable can be:\n"
5618 "\t - a reference to a field e.g. x=current_timestamp,\n"
5619 "\t - a reference to another variable e.g. y=$x,\n"
5620 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5621 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5623 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5624 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5625 "\t variable reference, field or numeric literal.\n"
5627 "\t When a matching event is hit, an entry is added to a hash\n"
5628 "\t table using the key(s) and value(s) named, and the value of a\n"
5629 "\t sum called 'hitcount' is incremented. Keys and values\n"
5630 "\t correspond to fields in the event's format description. Keys\n"
5631 "\t can be any field, or the special string 'common_stacktrace'.\n"
5632 "\t Compound keys consisting of up to two fields can be specified\n"
5633 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5634 "\t fields. Sort keys consisting of up to two fields can be\n"
5635 "\t specified using the 'sort' keyword. The sort direction can\n"
5636 "\t be modified by appending '.descending' or '.ascending' to a\n"
5637 "\t sort field. The 'size' parameter can be used to specify more\n"
5638 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5639 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5640 "\t its histogram data will be shared with other triggers of the\n"
5641 "\t same name, and trigger hits will update this common data.\n\n"
5642 "\t Reading the 'hist' file for the event will dump the hash\n"
5643 "\t table in its entirety to stdout. If there are multiple hist\n"
5644 "\t triggers attached to an event, there will be a table for each\n"
5645 "\t trigger in the output. The table displayed for a named\n"
5646 "\t trigger will be the same as any other instance having the\n"
5647 "\t same name. The default format used to display a given field\n"
5648 "\t can be modified by appending any of the following modifiers\n"
5649 "\t to the field name, as applicable:\n\n"
5650 "\t .hex display a number as a hex value\n"
5651 "\t .sym display an address as a symbol\n"
5652 "\t .sym-offset display an address as a symbol and offset\n"
5653 "\t .execname display a common_pid as a program name\n"
5654 "\t .syscall display a syscall id as a syscall name\n"
5655 "\t .log2 display log2 value rather than raw number\n"
5656 "\t .buckets=size display values in groups of size rather than raw number\n"
5657 "\t .usecs display a common_timestamp in microseconds\n"
5658 "\t .percent display a number of percentage value\n"
5659 "\t .graph display a bar-graph of a value\n\n"
5660 "\t The 'pause' parameter can be used to pause an existing hist\n"
5661 "\t trigger or to start a hist trigger but not log any events\n"
5662 "\t until told to do so. 'continue' can be used to start or\n"
5663 "\t restart a paused hist trigger.\n\n"
5664 "\t The 'clear' parameter will clear the contents of a running\n"
5665 "\t hist trigger and leave its current paused/active state\n"
5667 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5668 "\t raw hitcount in the histogram.\n\n"
5669 "\t The enable_hist and disable_hist triggers can be used to\n"
5670 "\t have one event conditionally start and stop another event's\n"
5671 "\t already-attached hist trigger. The syntax is analogous to\n"
5672 "\t the enable_event and disable_event triggers.\n\n"
5673 "\t Hist trigger handlers and actions are executed whenever a\n"
5674 "\t a histogram entry is added or updated. They take the form:\n\n"
5675 "\t <handler>.<action>\n\n"
5676 "\t The available handlers are:\n\n"
5677 "\t onmatch(matching.event) - invoke on addition or update\n"
5678 "\t onmax(var) - invoke if var exceeds current max\n"
5679 "\t onchange(var) - invoke action if var changes\n\n"
5680 "\t The available actions are:\n\n"
5681 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5682 "\t save(field,...) - save current event fields\n"
5683 #ifdef CONFIG_TRACER_SNAPSHOT
5684 "\t snapshot() - snapshot the trace buffer\n\n"
5686 #ifdef CONFIG_SYNTH_EVENTS
5687 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5688 "\t Write into this file to define/undefine new synthetic events.\n"
5689 "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5695 tracing_readme_read(struct file *filp, char __user *ubuf,
5696 size_t cnt, loff_t *ppos)
5698 return simple_read_from_buffer(ubuf, cnt, ppos,
5699 readme_msg, strlen(readme_msg));
5702 static const struct file_operations tracing_readme_fops = {
5703 .open = tracing_open_generic,
5704 .read = tracing_readme_read,
5705 .llseek = generic_file_llseek,
5708 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5709 static union trace_eval_map_item *
5710 update_eval_map(union trace_eval_map_item *ptr)
5712 if (!ptr->map.eval_string) {
5713 if (ptr->tail.next) {
5714 ptr = ptr->tail.next;
5715 /* Set ptr to the next real item (skip head) */
5723 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5725 union trace_eval_map_item *ptr = v;
5728 * Paranoid! If ptr points to end, we don't want to increment past it.
5729 * This really should never happen.
5732 ptr = update_eval_map(ptr);
5733 if (WARN_ON_ONCE(!ptr))
5737 ptr = update_eval_map(ptr);
5742 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5744 union trace_eval_map_item *v;
5747 mutex_lock(&trace_eval_mutex);
5749 v = trace_eval_maps;
5753 while (v && l < *pos) {
5754 v = eval_map_next(m, v, &l);
5760 static void eval_map_stop(struct seq_file *m, void *v)
5762 mutex_unlock(&trace_eval_mutex);
5765 static int eval_map_show(struct seq_file *m, void *v)
5767 union trace_eval_map_item *ptr = v;
5769 seq_printf(m, "%s %ld (%s)\n",
5770 ptr->map.eval_string, ptr->map.eval_value,
5776 static const struct seq_operations tracing_eval_map_seq_ops = {
5777 .start = eval_map_start,
5778 .next = eval_map_next,
5779 .stop = eval_map_stop,
5780 .show = eval_map_show,
5783 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5787 ret = tracing_check_open_get_tr(NULL);
5791 return seq_open(filp, &tracing_eval_map_seq_ops);
5794 static const struct file_operations tracing_eval_map_fops = {
5795 .open = tracing_eval_map_open,
5797 .llseek = seq_lseek,
5798 .release = seq_release,
5801 static inline union trace_eval_map_item *
5802 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5804 /* Return tail of array given the head */
5805 return ptr + ptr->head.length + 1;
5809 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5812 struct trace_eval_map **stop;
5813 struct trace_eval_map **map;
5814 union trace_eval_map_item *map_array;
5815 union trace_eval_map_item *ptr;
5820 * The trace_eval_maps contains the map plus a head and tail item,
5821 * where the head holds the module and length of array, and the
5822 * tail holds a pointer to the next list.
5824 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5826 pr_warn("Unable to allocate trace eval mapping\n");
5830 mutex_lock(&trace_eval_mutex);
5832 if (!trace_eval_maps)
5833 trace_eval_maps = map_array;
5835 ptr = trace_eval_maps;
5837 ptr = trace_eval_jmp_to_tail(ptr);
5838 if (!ptr->tail.next)
5840 ptr = ptr->tail.next;
5843 ptr->tail.next = map_array;
5845 map_array->head.mod = mod;
5846 map_array->head.length = len;
5849 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5850 map_array->map = **map;
5853 memset(map_array, 0, sizeof(*map_array));
5855 mutex_unlock(&trace_eval_mutex);
5858 static void trace_create_eval_file(struct dentry *d_tracer)
5860 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
5861 NULL, &tracing_eval_map_fops);
5864 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5865 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5866 static inline void trace_insert_eval_map_file(struct module *mod,
5867 struct trace_eval_map **start, int len) { }
5868 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5870 static void trace_insert_eval_map(struct module *mod,
5871 struct trace_eval_map **start, int len)
5873 struct trace_eval_map **map;
5880 trace_event_eval_update(map, len);
5882 trace_insert_eval_map_file(mod, start, len);
5886 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5887 size_t cnt, loff_t *ppos)
5889 struct trace_array *tr = filp->private_data;
5890 char buf[MAX_TRACER_SIZE+2];
5893 mutex_lock(&trace_types_lock);
5894 r = sprintf(buf, "%s\n", tr->current_trace->name);
5895 mutex_unlock(&trace_types_lock);
5897 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5900 int tracer_init(struct tracer *t, struct trace_array *tr)
5902 tracing_reset_online_cpus(&tr->array_buffer);
5906 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5910 for_each_tracing_cpu(cpu)
5911 per_cpu_ptr(buf->data, cpu)->entries = val;
5914 static void update_buffer_entries(struct array_buffer *buf, int cpu)
5916 if (cpu == RING_BUFFER_ALL_CPUS) {
5917 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
5919 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
5923 #ifdef CONFIG_TRACER_MAX_TRACE
5924 /* resize @tr's buffer to the size of @size_tr's entries */
5925 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5926 struct array_buffer *size_buf, int cpu_id)
5930 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5931 for_each_tracing_cpu(cpu) {
5932 ret = ring_buffer_resize(trace_buf->buffer,
5933 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5936 per_cpu_ptr(trace_buf->data, cpu)->entries =
5937 per_cpu_ptr(size_buf->data, cpu)->entries;
5940 ret = ring_buffer_resize(trace_buf->buffer,
5941 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5943 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5944 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5949 #endif /* CONFIG_TRACER_MAX_TRACE */
5951 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5952 unsigned long size, int cpu)
5957 * If kernel or user changes the size of the ring buffer
5958 * we use the size that was given, and we can forget about
5959 * expanding it later.
5961 trace_set_ring_buffer_expanded(tr);
5963 /* May be called before buffers are initialized */
5964 if (!tr->array_buffer.buffer)
5967 /* Do not allow tracing while resizing ring buffer */
5968 tracing_stop_tr(tr);
5970 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5974 #ifdef CONFIG_TRACER_MAX_TRACE
5975 if (!tr->allocated_snapshot)
5978 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5980 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5981 &tr->array_buffer, cpu);
5984 * AARGH! We are left with different
5985 * size max buffer!!!!
5986 * The max buffer is our "snapshot" buffer.
5987 * When a tracer needs a snapshot (one of the
5988 * latency tracers), it swaps the max buffer
5989 * with the saved snap shot. We succeeded to
5990 * update the size of the main buffer, but failed to
5991 * update the size of the max buffer. But when we tried
5992 * to reset the main buffer to the original size, we
5993 * failed there too. This is very unlikely to
5994 * happen, but if it does, warn and kill all
5998 tracing_disabled = 1;
6003 update_buffer_entries(&tr->max_buffer, cpu);
6006 #endif /* CONFIG_TRACER_MAX_TRACE */
6008 update_buffer_entries(&tr->array_buffer, cpu);
6010 tracing_start_tr(tr);
6014 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6015 unsigned long size, int cpu_id)
6019 mutex_lock(&trace_types_lock);
6021 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6022 /* make sure, this cpu is enabled in the mask */
6023 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6029 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6034 mutex_unlock(&trace_types_lock);
6041 * tracing_update_buffers - used by tracing facility to expand ring buffers
6042 * @tr: The tracing instance
6044 * To save on memory when the tracing is never used on a system with it
6045 * configured in. The ring buffers are set to a minimum size. But once
6046 * a user starts to use the tracing facility, then they need to grow
6047 * to their default size.
6049 * This function is to be called when a tracer is about to be used.
6051 int tracing_update_buffers(struct trace_array *tr)
6055 mutex_lock(&trace_types_lock);
6056 if (!tr->ring_buffer_expanded)
6057 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6058 RING_BUFFER_ALL_CPUS);
6059 mutex_unlock(&trace_types_lock);
6064 struct trace_option_dentry;
6067 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6070 * Used to clear out the tracer before deletion of an instance.
6071 * Must have trace_types_lock held.
6073 static void tracing_set_nop(struct trace_array *tr)
6075 if (tr->current_trace == &nop_trace)
6078 tr->current_trace->enabled--;
6080 if (tr->current_trace->reset)
6081 tr->current_trace->reset(tr);
6083 tr->current_trace = &nop_trace;
6086 static bool tracer_options_updated;
6088 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6090 /* Only enable if the directory has been created already. */
6094 /* Only create trace option files after update_tracer_options finish */
6095 if (!tracer_options_updated)
6098 create_trace_option_files(tr, t);
6101 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6104 #ifdef CONFIG_TRACER_MAX_TRACE
6109 mutex_lock(&trace_types_lock);
6111 if (!tr->ring_buffer_expanded) {
6112 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6113 RING_BUFFER_ALL_CPUS);
6119 for (t = trace_types; t; t = t->next) {
6120 if (strcmp(t->name, buf) == 0)
6127 if (t == tr->current_trace)
6130 #ifdef CONFIG_TRACER_SNAPSHOT
6131 if (t->use_max_tr) {
6132 local_irq_disable();
6133 arch_spin_lock(&tr->max_lock);
6134 if (tr->cond_snapshot)
6136 arch_spin_unlock(&tr->max_lock);
6142 /* Some tracers won't work on kernel command line */
6143 if (system_state < SYSTEM_RUNNING && t->noboot) {
6144 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6149 /* Some tracers are only allowed for the top level buffer */
6150 if (!trace_ok_for_array(t, tr)) {
6155 /* If trace pipe files are being read, we can't change the tracer */
6156 if (tr->trace_ref) {
6161 trace_branch_disable();
6163 tr->current_trace->enabled--;
6165 if (tr->current_trace->reset)
6166 tr->current_trace->reset(tr);
6168 #ifdef CONFIG_TRACER_MAX_TRACE
6169 had_max_tr = tr->current_trace->use_max_tr;
6171 /* Current trace needs to be nop_trace before synchronize_rcu */
6172 tr->current_trace = &nop_trace;
6174 if (had_max_tr && !t->use_max_tr) {
6176 * We need to make sure that the update_max_tr sees that
6177 * current_trace changed to nop_trace to keep it from
6178 * swapping the buffers after we resize it.
6179 * The update_max_tr is called from interrupts disabled
6180 * so a synchronized_sched() is sufficient.
6184 tracing_disarm_snapshot(tr);
6187 if (!had_max_tr && t->use_max_tr) {
6188 ret = tracing_arm_snapshot_locked(tr);
6193 tr->current_trace = &nop_trace;
6197 ret = tracer_init(t, tr);
6199 #ifdef CONFIG_TRACER_MAX_TRACE
6201 tracing_disarm_snapshot(tr);
6207 tr->current_trace = t;
6208 tr->current_trace->enabled++;
6209 trace_branch_enable(tr);
6211 mutex_unlock(&trace_types_lock);
6217 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6218 size_t cnt, loff_t *ppos)
6220 struct trace_array *tr = filp->private_data;
6221 char buf[MAX_TRACER_SIZE+1];
6228 if (cnt > MAX_TRACER_SIZE)
6229 cnt = MAX_TRACER_SIZE;
6231 if (copy_from_user(buf, ubuf, cnt))
6238 err = tracing_set_tracer(tr, name);
6248 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6249 size_t cnt, loff_t *ppos)
6254 r = snprintf(buf, sizeof(buf), "%ld\n",
6255 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6256 if (r > sizeof(buf))
6258 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6262 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6263 size_t cnt, loff_t *ppos)
6268 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6278 tracing_thresh_read(struct file *filp, char __user *ubuf,
6279 size_t cnt, loff_t *ppos)
6281 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6285 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6286 size_t cnt, loff_t *ppos)
6288 struct trace_array *tr = filp->private_data;
6291 mutex_lock(&trace_types_lock);
6292 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6296 if (tr->current_trace->update_thresh) {
6297 ret = tr->current_trace->update_thresh(tr);
6304 mutex_unlock(&trace_types_lock);
6309 #ifdef CONFIG_TRACER_MAX_TRACE
6312 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6313 size_t cnt, loff_t *ppos)
6315 struct trace_array *tr = filp->private_data;
6317 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6321 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6322 size_t cnt, loff_t *ppos)
6324 struct trace_array *tr = filp->private_data;
6326 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6331 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6333 if (cpu == RING_BUFFER_ALL_CPUS) {
6334 if (cpumask_empty(tr->pipe_cpumask)) {
6335 cpumask_setall(tr->pipe_cpumask);
6338 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6339 cpumask_set_cpu(cpu, tr->pipe_cpumask);
6345 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6347 if (cpu == RING_BUFFER_ALL_CPUS) {
6348 WARN_ON(!cpumask_full(tr->pipe_cpumask));
6349 cpumask_clear(tr->pipe_cpumask);
6351 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6352 cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6356 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6358 struct trace_array *tr = inode->i_private;
6359 struct trace_iterator *iter;
6363 ret = tracing_check_open_get_tr(tr);
6367 mutex_lock(&trace_types_lock);
6368 cpu = tracing_get_cpu(inode);
6369 ret = open_pipe_on_cpu(tr, cpu);
6371 goto fail_pipe_on_cpu;
6373 /* create a buffer to store the information to pass to userspace */
6374 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6377 goto fail_alloc_iter;
6380 trace_seq_init(&iter->seq);
6381 iter->trace = tr->current_trace;
6383 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6388 /* trace pipe does not show start of buffer */
6389 cpumask_setall(iter->started);
6391 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6392 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6394 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6395 if (trace_clocks[tr->clock_id].in_ns)
6396 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6399 iter->array_buffer = &tr->array_buffer;
6400 iter->cpu_file = cpu;
6401 mutex_init(&iter->mutex);
6402 filp->private_data = iter;
6404 if (iter->trace->pipe_open)
6405 iter->trace->pipe_open(iter);
6407 nonseekable_open(inode, filp);
6411 mutex_unlock(&trace_types_lock);
6417 close_pipe_on_cpu(tr, cpu);
6419 __trace_array_put(tr);
6420 mutex_unlock(&trace_types_lock);
6424 static int tracing_release_pipe(struct inode *inode, struct file *file)
6426 struct trace_iterator *iter = file->private_data;
6427 struct trace_array *tr = inode->i_private;
6429 mutex_lock(&trace_types_lock);
6433 if (iter->trace->pipe_close)
6434 iter->trace->pipe_close(iter);
6435 close_pipe_on_cpu(tr, iter->cpu_file);
6436 mutex_unlock(&trace_types_lock);
6438 free_trace_iter_content(iter);
6441 trace_array_put(tr);
6447 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6449 struct trace_array *tr = iter->tr;
6451 /* Iterators are static, they should be filled or empty */
6452 if (trace_buffer_iter(iter, iter->cpu_file))
6453 return EPOLLIN | EPOLLRDNORM;
6455 if (tr->trace_flags & TRACE_ITER_BLOCK)
6457 * Always select as readable when in blocking mode
6459 return EPOLLIN | EPOLLRDNORM;
6461 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6462 filp, poll_table, iter->tr->buffer_percent);
6466 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6468 struct trace_iterator *iter = filp->private_data;
6470 return trace_poll(iter, filp, poll_table);
6473 /* Must be called with iter->mutex held. */
6474 static int tracing_wait_pipe(struct file *filp)
6476 struct trace_iterator *iter = filp->private_data;
6479 while (trace_empty(iter)) {
6481 if ((filp->f_flags & O_NONBLOCK)) {
6486 * We block until we read something and tracing is disabled.
6487 * We still block if tracing is disabled, but we have never
6488 * read anything. This allows a user to cat this file, and
6489 * then enable tracing. But after we have read something,
6490 * we give an EOF when tracing is again disabled.
6492 * iter->pos will be 0 if we haven't read anything.
6494 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6497 mutex_unlock(&iter->mutex);
6499 ret = wait_on_pipe(iter, 0);
6501 mutex_lock(&iter->mutex);
6514 tracing_read_pipe(struct file *filp, char __user *ubuf,
6515 size_t cnt, loff_t *ppos)
6517 struct trace_iterator *iter = filp->private_data;
6521 * Avoid more than one consumer on a single file descriptor
6522 * This is just a matter of traces coherency, the ring buffer itself
6525 mutex_lock(&iter->mutex);
6527 /* return any leftover data */
6528 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6532 trace_seq_init(&iter->seq);
6534 if (iter->trace->read) {
6535 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6541 sret = tracing_wait_pipe(filp);
6545 /* stop when tracing is finished */
6546 if (trace_empty(iter)) {
6551 if (cnt >= TRACE_SEQ_BUFFER_SIZE)
6552 cnt = TRACE_SEQ_BUFFER_SIZE - 1;
6554 /* reset all but tr, trace, and overruns */
6555 trace_iterator_reset(iter);
6556 cpumask_clear(iter->started);
6557 trace_seq_init(&iter->seq);
6559 trace_event_read_lock();
6560 trace_access_lock(iter->cpu_file);
6561 while (trace_find_next_entry_inc(iter) != NULL) {
6562 enum print_line_t ret;
6563 int save_len = iter->seq.seq.len;
6565 ret = print_trace_line(iter);
6566 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6568 * If one print_trace_line() fills entire trace_seq in one shot,
6569 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6570 * In this case, we need to consume it, otherwise, loop will peek
6571 * this event next time, resulting in an infinite loop.
6573 if (save_len == 0) {
6575 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6576 trace_consume(iter);
6580 /* In other cases, don't print partial lines */
6581 iter->seq.seq.len = save_len;
6584 if (ret != TRACE_TYPE_NO_CONSUME)
6585 trace_consume(iter);
6587 if (trace_seq_used(&iter->seq) >= cnt)
6591 * Setting the full flag means we reached the trace_seq buffer
6592 * size and we should leave by partial output condition above.
6593 * One of the trace_seq_* functions is not used properly.
6595 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6598 trace_access_unlock(iter->cpu_file);
6599 trace_event_read_unlock();
6601 /* Now copy what we have to the user */
6602 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6603 if (iter->seq.readpos >= trace_seq_used(&iter->seq))
6604 trace_seq_init(&iter->seq);
6607 * If there was nothing to send to user, in spite of consuming trace
6608 * entries, go back to wait for more entries.
6614 mutex_unlock(&iter->mutex);
6619 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6622 __free_page(spd->pages[idx]);
6626 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6632 /* Seq buffer is page-sized, exactly what we need. */
6634 save_len = iter->seq.seq.len;
6635 ret = print_trace_line(iter);
6637 if (trace_seq_has_overflowed(&iter->seq)) {
6638 iter->seq.seq.len = save_len;
6643 * This should not be hit, because it should only
6644 * be set if the iter->seq overflowed. But check it
6645 * anyway to be safe.
6647 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6648 iter->seq.seq.len = save_len;
6652 count = trace_seq_used(&iter->seq) - save_len;
6655 iter->seq.seq.len = save_len;
6659 if (ret != TRACE_TYPE_NO_CONSUME)
6660 trace_consume(iter);
6662 if (!trace_find_next_entry_inc(iter)) {
6672 static ssize_t tracing_splice_read_pipe(struct file *filp,
6674 struct pipe_inode_info *pipe,
6678 struct page *pages_def[PIPE_DEF_BUFFERS];
6679 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6680 struct trace_iterator *iter = filp->private_data;
6681 struct splice_pipe_desc spd = {
6683 .partial = partial_def,
6684 .nr_pages = 0, /* This gets updated below. */
6685 .nr_pages_max = PIPE_DEF_BUFFERS,
6686 .ops = &default_pipe_buf_ops,
6687 .spd_release = tracing_spd_release_pipe,
6693 if (splice_grow_spd(pipe, &spd))
6696 mutex_lock(&iter->mutex);
6698 if (iter->trace->splice_read) {
6699 ret = iter->trace->splice_read(iter, filp,
6700 ppos, pipe, len, flags);
6705 ret = tracing_wait_pipe(filp);
6709 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6714 trace_event_read_lock();
6715 trace_access_lock(iter->cpu_file);
6717 /* Fill as many pages as possible. */
6718 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6719 spd.pages[i] = alloc_page(GFP_KERNEL);
6723 rem = tracing_fill_pipe_page(rem, iter);
6725 /* Copy the data into the page, so we can start over. */
6726 ret = trace_seq_to_buffer(&iter->seq,
6727 page_address(spd.pages[i]),
6728 trace_seq_used(&iter->seq));
6730 __free_page(spd.pages[i]);
6733 spd.partial[i].offset = 0;
6734 spd.partial[i].len = trace_seq_used(&iter->seq);
6736 trace_seq_init(&iter->seq);
6739 trace_access_unlock(iter->cpu_file);
6740 trace_event_read_unlock();
6741 mutex_unlock(&iter->mutex);
6746 ret = splice_to_pipe(pipe, &spd);
6750 splice_shrink_spd(&spd);
6754 mutex_unlock(&iter->mutex);
6759 tracing_entries_read(struct file *filp, char __user *ubuf,
6760 size_t cnt, loff_t *ppos)
6762 struct inode *inode = file_inode(filp);
6763 struct trace_array *tr = inode->i_private;
6764 int cpu = tracing_get_cpu(inode);
6769 mutex_lock(&trace_types_lock);
6771 if (cpu == RING_BUFFER_ALL_CPUS) {
6772 int cpu, buf_size_same;
6777 /* check if all cpu sizes are same */
6778 for_each_tracing_cpu(cpu) {
6779 /* fill in the size from first enabled cpu */
6781 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6782 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6788 if (buf_size_same) {
6789 if (!tr->ring_buffer_expanded)
6790 r = sprintf(buf, "%lu (expanded: %lu)\n",
6792 trace_buf_size >> 10);
6794 r = sprintf(buf, "%lu\n", size >> 10);
6796 r = sprintf(buf, "X\n");
6798 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6800 mutex_unlock(&trace_types_lock);
6802 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6807 tracing_entries_write(struct file *filp, const char __user *ubuf,
6808 size_t cnt, loff_t *ppos)
6810 struct inode *inode = file_inode(filp);
6811 struct trace_array *tr = inode->i_private;
6815 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6819 /* must have at least 1 entry */
6823 /* value is in KB */
6825 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6835 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6836 size_t cnt, loff_t *ppos)
6838 struct trace_array *tr = filp->private_data;
6841 unsigned long size = 0, expanded_size = 0;
6843 mutex_lock(&trace_types_lock);
6844 for_each_tracing_cpu(cpu) {
6845 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6846 if (!tr->ring_buffer_expanded)
6847 expanded_size += trace_buf_size >> 10;
6849 if (tr->ring_buffer_expanded)
6850 r = sprintf(buf, "%lu\n", size);
6852 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6853 mutex_unlock(&trace_types_lock);
6855 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6859 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6860 size_t cnt, loff_t *ppos)
6863 * There is no need to read what the user has written, this function
6864 * is just to make sure that there is no error when "echo" is used
6873 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6875 struct trace_array *tr = inode->i_private;
6877 /* disable tracing ? */
6878 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6879 tracer_tracing_off(tr);
6880 /* resize the ring buffer to 0 */
6881 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6883 trace_array_put(tr);
6888 #define TRACE_MARKER_MAX_SIZE 4096
6891 tracing_mark_write(struct file *filp, const char __user *ubuf,
6892 size_t cnt, loff_t *fpos)
6894 struct trace_array *tr = filp->private_data;
6895 struct ring_buffer_event *event;
6896 enum event_trigger_type tt = ETT_NONE;
6897 struct trace_buffer *buffer;
6898 struct print_entry *entry;
6904 /* Used in tracing_mark_raw_write() as well */
6905 #define FAULTED_STR "<faulted>"
6906 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6908 if (tracing_disabled)
6911 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6914 if ((ssize_t)cnt < 0)
6917 if (cnt > TRACE_MARKER_MAX_SIZE)
6918 cnt = TRACE_MARKER_MAX_SIZE;
6920 meta_size = sizeof(*entry) + 2; /* add '\0' and possible '\n' */
6922 size = cnt + meta_size;
6924 /* If less than "<faulted>", then make sure we can still add that */
6925 if (cnt < FAULTED_SIZE)
6926 size += FAULTED_SIZE - cnt;
6928 buffer = tr->array_buffer.buffer;
6929 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6931 if (unlikely(!event)) {
6933 * If the size was greater than what was allowed, then
6934 * make it smaller and try again.
6936 if (size > ring_buffer_max_event_size(buffer)) {
6937 /* cnt < FAULTED size should never be bigger than max */
6938 if (WARN_ON_ONCE(cnt < FAULTED_SIZE))
6940 cnt = ring_buffer_max_event_size(buffer) - meta_size;
6941 /* The above should only happen once */
6942 if (WARN_ON_ONCE(cnt + meta_size == size))
6947 /* Ring buffer disabled, return as if not open for write */
6951 entry = ring_buffer_event_data(event);
6952 entry->ip = _THIS_IP_;
6954 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6956 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6962 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6963 /* do not add \n before testing triggers, but add \0 */
6964 entry->buf[cnt] = '\0';
6965 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
6968 if (entry->buf[cnt - 1] != '\n') {
6969 entry->buf[cnt] = '\n';
6970 entry->buf[cnt + 1] = '\0';
6972 entry->buf[cnt] = '\0';
6974 if (static_branch_unlikely(&trace_marker_exports_enabled))
6975 ftrace_exports(event, TRACE_EXPORT_MARKER);
6976 __buffer_unlock_commit(buffer, event);
6979 event_triggers_post_call(tr->trace_marker_file, tt);
6985 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6986 size_t cnt, loff_t *fpos)
6988 struct trace_array *tr = filp->private_data;
6989 struct ring_buffer_event *event;
6990 struct trace_buffer *buffer;
6991 struct raw_data_entry *entry;
6996 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6998 if (tracing_disabled)
7001 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7004 /* The marker must at least have a tag id */
7005 if (cnt < sizeof(unsigned int))
7008 size = sizeof(*entry) + cnt;
7009 if (cnt < FAULT_SIZE_ID)
7010 size += FAULT_SIZE_ID - cnt;
7012 buffer = tr->array_buffer.buffer;
7014 if (size > ring_buffer_max_event_size(buffer))
7017 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7020 /* Ring buffer disabled, return as if not open for write */
7023 entry = ring_buffer_event_data(event);
7025 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7028 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7033 __buffer_unlock_commit(buffer, event);
7038 static int tracing_clock_show(struct seq_file *m, void *v)
7040 struct trace_array *tr = m->private;
7043 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7045 "%s%s%s%s", i ? " " : "",
7046 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7047 i == tr->clock_id ? "]" : "");
7053 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7057 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7058 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7061 if (i == ARRAY_SIZE(trace_clocks))
7064 mutex_lock(&trace_types_lock);
7068 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7071 * New clock may not be consistent with the previous clock.
7072 * Reset the buffer so that it doesn't have incomparable timestamps.
7074 tracing_reset_online_cpus(&tr->array_buffer);
7076 #ifdef CONFIG_TRACER_MAX_TRACE
7077 if (tr->max_buffer.buffer)
7078 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7079 tracing_reset_online_cpus(&tr->max_buffer);
7082 mutex_unlock(&trace_types_lock);
7087 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7088 size_t cnt, loff_t *fpos)
7090 struct seq_file *m = filp->private_data;
7091 struct trace_array *tr = m->private;
7093 const char *clockstr;
7096 if (cnt >= sizeof(buf))
7099 if (copy_from_user(buf, ubuf, cnt))
7104 clockstr = strstrip(buf);
7106 ret = tracing_set_clock(tr, clockstr);
7115 static int tracing_clock_open(struct inode *inode, struct file *file)
7117 struct trace_array *tr = inode->i_private;
7120 ret = tracing_check_open_get_tr(tr);
7124 ret = single_open(file, tracing_clock_show, inode->i_private);
7126 trace_array_put(tr);
7131 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7133 struct trace_array *tr = m->private;
7135 mutex_lock(&trace_types_lock);
7137 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7138 seq_puts(m, "delta [absolute]\n");
7140 seq_puts(m, "[delta] absolute\n");
7142 mutex_unlock(&trace_types_lock);
7147 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7149 struct trace_array *tr = inode->i_private;
7152 ret = tracing_check_open_get_tr(tr);
7156 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7158 trace_array_put(tr);
7163 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7165 if (rbe == this_cpu_read(trace_buffered_event))
7166 return ring_buffer_time_stamp(buffer);
7168 return ring_buffer_event_time_stamp(buffer, rbe);
7172 * Set or disable using the per CPU trace_buffer_event when possible.
7174 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7178 mutex_lock(&trace_types_lock);
7180 if (set && tr->no_filter_buffering_ref++)
7184 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7189 --tr->no_filter_buffering_ref;
7192 mutex_unlock(&trace_types_lock);
7197 struct ftrace_buffer_info {
7198 struct trace_iterator iter;
7200 unsigned int spare_cpu;
7201 unsigned int spare_size;
7205 #ifdef CONFIG_TRACER_SNAPSHOT
7206 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7208 struct trace_array *tr = inode->i_private;
7209 struct trace_iterator *iter;
7213 ret = tracing_check_open_get_tr(tr);
7217 if (file->f_mode & FMODE_READ) {
7218 iter = __tracing_open(inode, file, true);
7220 ret = PTR_ERR(iter);
7222 /* Writes still need the seq_file to hold the private data */
7224 m = kzalloc(sizeof(*m), GFP_KERNEL);
7227 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7235 iter->array_buffer = &tr->max_buffer;
7236 iter->cpu_file = tracing_get_cpu(inode);
7238 file->private_data = m;
7242 trace_array_put(tr);
7247 static void tracing_swap_cpu_buffer(void *tr)
7249 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7253 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7256 struct seq_file *m = filp->private_data;
7257 struct trace_iterator *iter = m->private;
7258 struct trace_array *tr = iter->tr;
7262 ret = tracing_update_buffers(tr);
7266 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7270 mutex_lock(&trace_types_lock);
7272 if (tr->current_trace->use_max_tr) {
7277 local_irq_disable();
7278 arch_spin_lock(&tr->max_lock);
7279 if (tr->cond_snapshot)
7281 arch_spin_unlock(&tr->max_lock);
7288 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7292 if (tr->allocated_snapshot)
7296 /* Only allow per-cpu swap if the ring buffer supports it */
7297 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7298 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7303 if (tr->allocated_snapshot)
7304 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7305 &tr->array_buffer, iter->cpu_file);
7307 ret = tracing_arm_snapshot_locked(tr);
7311 /* Now, we're going to swap */
7312 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7313 local_irq_disable();
7314 update_max_tr(tr, current, smp_processor_id(), NULL);
7317 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7320 tracing_disarm_snapshot(tr);
7323 if (tr->allocated_snapshot) {
7324 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7325 tracing_reset_online_cpus(&tr->max_buffer);
7327 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7337 mutex_unlock(&trace_types_lock);
7341 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7343 struct seq_file *m = file->private_data;
7346 ret = tracing_release(inode, file);
7348 if (file->f_mode & FMODE_READ)
7351 /* If write only, the seq_file is just a stub */
7359 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7360 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7361 size_t count, loff_t *ppos);
7362 static int tracing_buffers_release(struct inode *inode, struct file *file);
7363 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7364 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7366 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7368 struct ftrace_buffer_info *info;
7371 /* The following checks for tracefs lockdown */
7372 ret = tracing_buffers_open(inode, filp);
7376 info = filp->private_data;
7378 if (info->iter.trace->use_max_tr) {
7379 tracing_buffers_release(inode, filp);
7383 info->iter.snapshot = true;
7384 info->iter.array_buffer = &info->iter.tr->max_buffer;
7389 #endif /* CONFIG_TRACER_SNAPSHOT */
7392 static const struct file_operations tracing_thresh_fops = {
7393 .open = tracing_open_generic,
7394 .read = tracing_thresh_read,
7395 .write = tracing_thresh_write,
7396 .llseek = generic_file_llseek,
7399 #ifdef CONFIG_TRACER_MAX_TRACE
7400 static const struct file_operations tracing_max_lat_fops = {
7401 .open = tracing_open_generic_tr,
7402 .read = tracing_max_lat_read,
7403 .write = tracing_max_lat_write,
7404 .llseek = generic_file_llseek,
7405 .release = tracing_release_generic_tr,
7409 static const struct file_operations set_tracer_fops = {
7410 .open = tracing_open_generic_tr,
7411 .read = tracing_set_trace_read,
7412 .write = tracing_set_trace_write,
7413 .llseek = generic_file_llseek,
7414 .release = tracing_release_generic_tr,
7417 static const struct file_operations tracing_pipe_fops = {
7418 .open = tracing_open_pipe,
7419 .poll = tracing_poll_pipe,
7420 .read = tracing_read_pipe,
7421 .splice_read = tracing_splice_read_pipe,
7422 .release = tracing_release_pipe,
7423 .llseek = no_llseek,
7426 static const struct file_operations tracing_entries_fops = {
7427 .open = tracing_open_generic_tr,
7428 .read = tracing_entries_read,
7429 .write = tracing_entries_write,
7430 .llseek = generic_file_llseek,
7431 .release = tracing_release_generic_tr,
7434 static const struct file_operations tracing_total_entries_fops = {
7435 .open = tracing_open_generic_tr,
7436 .read = tracing_total_entries_read,
7437 .llseek = generic_file_llseek,
7438 .release = tracing_release_generic_tr,
7441 static const struct file_operations tracing_free_buffer_fops = {
7442 .open = tracing_open_generic_tr,
7443 .write = tracing_free_buffer_write,
7444 .release = tracing_free_buffer_release,
7447 static const struct file_operations tracing_mark_fops = {
7448 .open = tracing_mark_open,
7449 .write = tracing_mark_write,
7450 .release = tracing_release_generic_tr,
7453 static const struct file_operations tracing_mark_raw_fops = {
7454 .open = tracing_mark_open,
7455 .write = tracing_mark_raw_write,
7456 .release = tracing_release_generic_tr,
7459 static const struct file_operations trace_clock_fops = {
7460 .open = tracing_clock_open,
7462 .llseek = seq_lseek,
7463 .release = tracing_single_release_tr,
7464 .write = tracing_clock_write,
7467 static const struct file_operations trace_time_stamp_mode_fops = {
7468 .open = tracing_time_stamp_mode_open,
7470 .llseek = seq_lseek,
7471 .release = tracing_single_release_tr,
7474 #ifdef CONFIG_TRACER_SNAPSHOT
7475 static const struct file_operations snapshot_fops = {
7476 .open = tracing_snapshot_open,
7478 .write = tracing_snapshot_write,
7479 .llseek = tracing_lseek,
7480 .release = tracing_snapshot_release,
7483 static const struct file_operations snapshot_raw_fops = {
7484 .open = snapshot_raw_open,
7485 .read = tracing_buffers_read,
7486 .release = tracing_buffers_release,
7487 .splice_read = tracing_buffers_splice_read,
7488 .llseek = no_llseek,
7491 #endif /* CONFIG_TRACER_SNAPSHOT */
7494 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7495 * @filp: The active open file structure
7496 * @ubuf: The userspace provided buffer to read value into
7497 * @cnt: The maximum number of bytes to read
7498 * @ppos: The current "file" position
7500 * This function implements the write interface for a struct trace_min_max_param.
7501 * The filp->private_data must point to a trace_min_max_param structure that
7502 * defines where to write the value, the min and the max acceptable values,
7503 * and a lock to protect the write.
7506 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7508 struct trace_min_max_param *param = filp->private_data;
7515 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7520 mutex_lock(param->lock);
7522 if (param->min && val < *param->min)
7525 if (param->max && val > *param->max)
7532 mutex_unlock(param->lock);
7541 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7542 * @filp: The active open file structure
7543 * @ubuf: The userspace provided buffer to read value into
7544 * @cnt: The maximum number of bytes to read
7545 * @ppos: The current "file" position
7547 * This function implements the read interface for a struct trace_min_max_param.
7548 * The filp->private_data must point to a trace_min_max_param struct with valid
7552 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7554 struct trace_min_max_param *param = filp->private_data;
7555 char buf[U64_STR_SIZE];
7564 if (cnt > sizeof(buf))
7567 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7569 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7572 const struct file_operations trace_min_max_fops = {
7573 .open = tracing_open_generic,
7574 .read = trace_min_max_read,
7575 .write = trace_min_max_write,
7578 #define TRACING_LOG_ERRS_MAX 8
7579 #define TRACING_LOG_LOC_MAX 128
7581 #define CMD_PREFIX " Command: "
7584 const char **errs; /* ptr to loc-specific array of err strings */
7585 u8 type; /* index into errs -> specific err string */
7586 u16 pos; /* caret position */
7590 struct tracing_log_err {
7591 struct list_head list;
7592 struct err_info info;
7593 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7594 char *cmd; /* what caused err */
7597 static DEFINE_MUTEX(tracing_err_log_lock);
7599 static struct tracing_log_err *alloc_tracing_log_err(int len)
7601 struct tracing_log_err *err;
7603 err = kzalloc(sizeof(*err), GFP_KERNEL);
7605 return ERR_PTR(-ENOMEM);
7607 err->cmd = kzalloc(len, GFP_KERNEL);
7610 return ERR_PTR(-ENOMEM);
7616 static void free_tracing_log_err(struct tracing_log_err *err)
7622 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7625 struct tracing_log_err *err;
7628 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7629 err = alloc_tracing_log_err(len);
7630 if (PTR_ERR(err) != -ENOMEM)
7631 tr->n_err_log_entries++;
7635 cmd = kzalloc(len, GFP_KERNEL);
7637 return ERR_PTR(-ENOMEM);
7638 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7641 list_del(&err->list);
7647 * err_pos - find the position of a string within a command for error careting
7648 * @cmd: The tracing command that caused the error
7649 * @str: The string to position the caret at within @cmd
7651 * Finds the position of the first occurrence of @str within @cmd. The
7652 * return value can be passed to tracing_log_err() for caret placement
7655 * Returns the index within @cmd of the first occurrence of @str or 0
7656 * if @str was not found.
7658 unsigned int err_pos(char *cmd, const char *str)
7662 if (WARN_ON(!strlen(cmd)))
7665 found = strstr(cmd, str);
7673 * tracing_log_err - write an error to the tracing error log
7674 * @tr: The associated trace array for the error (NULL for top level array)
7675 * @loc: A string describing where the error occurred
7676 * @cmd: The tracing command that caused the error
7677 * @errs: The array of loc-specific static error strings
7678 * @type: The index into errs[], which produces the specific static err string
7679 * @pos: The position the caret should be placed in the cmd
7681 * Writes an error into tracing/error_log of the form:
7683 * <loc>: error: <text>
7687 * tracing/error_log is a small log file containing the last
7688 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7689 * unless there has been a tracing error, and the error log can be
7690 * cleared and have its memory freed by writing the empty string in
7691 * truncation mode to it i.e. echo > tracing/error_log.
7693 * NOTE: the @errs array along with the @type param are used to
7694 * produce a static error string - this string is not copied and saved
7695 * when the error is logged - only a pointer to it is saved. See
7696 * existing callers for examples of how static strings are typically
7697 * defined for use with tracing_log_err().
7699 void tracing_log_err(struct trace_array *tr,
7700 const char *loc, const char *cmd,
7701 const char **errs, u8 type, u16 pos)
7703 struct tracing_log_err *err;
7709 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
7711 mutex_lock(&tracing_err_log_lock);
7712 err = get_tracing_log_err(tr, len);
7713 if (PTR_ERR(err) == -ENOMEM) {
7714 mutex_unlock(&tracing_err_log_lock);
7718 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7719 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7721 err->info.errs = errs;
7722 err->info.type = type;
7723 err->info.pos = pos;
7724 err->info.ts = local_clock();
7726 list_add_tail(&err->list, &tr->err_log);
7727 mutex_unlock(&tracing_err_log_lock);
7730 static void clear_tracing_err_log(struct trace_array *tr)
7732 struct tracing_log_err *err, *next;
7734 mutex_lock(&tracing_err_log_lock);
7735 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7736 list_del(&err->list);
7737 free_tracing_log_err(err);
7740 tr->n_err_log_entries = 0;
7741 mutex_unlock(&tracing_err_log_lock);
7744 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7746 struct trace_array *tr = m->private;
7748 mutex_lock(&tracing_err_log_lock);
7750 return seq_list_start(&tr->err_log, *pos);
7753 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7755 struct trace_array *tr = m->private;
7757 return seq_list_next(v, &tr->err_log, pos);
7760 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7762 mutex_unlock(&tracing_err_log_lock);
7765 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
7769 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7771 for (i = 0; i < pos; i++)
7776 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7778 struct tracing_log_err *err = v;
7781 const char *err_text = err->info.errs[err->info.type];
7782 u64 sec = err->info.ts;
7785 nsec = do_div(sec, NSEC_PER_SEC);
7786 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7787 err->loc, err_text);
7788 seq_printf(m, "%s", err->cmd);
7789 tracing_err_log_show_pos(m, err->info.pos);
7795 static const struct seq_operations tracing_err_log_seq_ops = {
7796 .start = tracing_err_log_seq_start,
7797 .next = tracing_err_log_seq_next,
7798 .stop = tracing_err_log_seq_stop,
7799 .show = tracing_err_log_seq_show
7802 static int tracing_err_log_open(struct inode *inode, struct file *file)
7804 struct trace_array *tr = inode->i_private;
7807 ret = tracing_check_open_get_tr(tr);
7811 /* If this file was opened for write, then erase contents */
7812 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7813 clear_tracing_err_log(tr);
7815 if (file->f_mode & FMODE_READ) {
7816 ret = seq_open(file, &tracing_err_log_seq_ops);
7818 struct seq_file *m = file->private_data;
7821 trace_array_put(tr);
7827 static ssize_t tracing_err_log_write(struct file *file,
7828 const char __user *buffer,
7829 size_t count, loff_t *ppos)
7834 static int tracing_err_log_release(struct inode *inode, struct file *file)
7836 struct trace_array *tr = inode->i_private;
7838 trace_array_put(tr);
7840 if (file->f_mode & FMODE_READ)
7841 seq_release(inode, file);
7846 static const struct file_operations tracing_err_log_fops = {
7847 .open = tracing_err_log_open,
7848 .write = tracing_err_log_write,
7850 .llseek = tracing_lseek,
7851 .release = tracing_err_log_release,
7854 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7856 struct trace_array *tr = inode->i_private;
7857 struct ftrace_buffer_info *info;
7860 ret = tracing_check_open_get_tr(tr);
7864 info = kvzalloc(sizeof(*info), GFP_KERNEL);
7866 trace_array_put(tr);
7870 mutex_lock(&trace_types_lock);
7873 info->iter.cpu_file = tracing_get_cpu(inode);
7874 info->iter.trace = tr->current_trace;
7875 info->iter.array_buffer = &tr->array_buffer;
7877 /* Force reading ring buffer for first read */
7878 info->read = (unsigned int)-1;
7880 filp->private_data = info;
7884 mutex_unlock(&trace_types_lock);
7886 ret = nonseekable_open(inode, filp);
7888 trace_array_put(tr);
7894 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7896 struct ftrace_buffer_info *info = filp->private_data;
7897 struct trace_iterator *iter = &info->iter;
7899 return trace_poll(iter, filp, poll_table);
7903 tracing_buffers_read(struct file *filp, char __user *ubuf,
7904 size_t count, loff_t *ppos)
7906 struct ftrace_buffer_info *info = filp->private_data;
7907 struct trace_iterator *iter = &info->iter;
7916 #ifdef CONFIG_TRACER_MAX_TRACE
7917 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7921 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
7923 /* Make sure the spare matches the current sub buffer size */
7925 if (page_size != info->spare_size) {
7926 ring_buffer_free_read_page(iter->array_buffer->buffer,
7927 info->spare_cpu, info->spare);
7933 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7935 if (IS_ERR(info->spare)) {
7936 ret = PTR_ERR(info->spare);
7939 info->spare_cpu = iter->cpu_file;
7940 info->spare_size = page_size;
7946 /* Do we have previous read data to read? */
7947 if (info->read < page_size)
7951 trace_access_lock(iter->cpu_file);
7952 ret = ring_buffer_read_page(iter->array_buffer->buffer,
7956 trace_access_unlock(iter->cpu_file);
7959 if (trace_empty(iter)) {
7960 if ((filp->f_flags & O_NONBLOCK))
7963 ret = wait_on_pipe(iter, 0);
7974 size = page_size - info->read;
7977 trace_data = ring_buffer_read_page_data(info->spare);
7978 ret = copy_to_user(ubuf, trace_data + info->read, size);
7990 static int tracing_buffers_flush(struct file *file, fl_owner_t id)
7992 struct ftrace_buffer_info *info = file->private_data;
7993 struct trace_iterator *iter = &info->iter;
7995 iter->closed = true;
7996 /* Make sure the waiters see the new wait_index */
7997 (void)atomic_fetch_inc_release(&iter->wait_index);
7999 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8004 static int tracing_buffers_release(struct inode *inode, struct file *file)
8006 struct ftrace_buffer_info *info = file->private_data;
8007 struct trace_iterator *iter = &info->iter;
8009 mutex_lock(&trace_types_lock);
8011 iter->tr->trace_ref--;
8013 __trace_array_put(iter->tr);
8016 ring_buffer_free_read_page(iter->array_buffer->buffer,
8017 info->spare_cpu, info->spare);
8020 mutex_unlock(&trace_types_lock);
8026 struct trace_buffer *buffer;
8029 refcount_t refcount;
8032 static void buffer_ref_release(struct buffer_ref *ref)
8034 if (!refcount_dec_and_test(&ref->refcount))
8036 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8040 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8041 struct pipe_buffer *buf)
8043 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8045 buffer_ref_release(ref);
8049 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8050 struct pipe_buffer *buf)
8052 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8054 if (refcount_read(&ref->refcount) > INT_MAX/2)
8057 refcount_inc(&ref->refcount);
8061 /* Pipe buffer operations for a buffer. */
8062 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8063 .release = buffer_pipe_buf_release,
8064 .get = buffer_pipe_buf_get,
8068 * Callback from splice_to_pipe(), if we need to release some pages
8069 * at the end of the spd in case we error'ed out in filling the pipe.
8071 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8073 struct buffer_ref *ref =
8074 (struct buffer_ref *)spd->partial[i].private;
8076 buffer_ref_release(ref);
8077 spd->partial[i].private = 0;
8081 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8082 struct pipe_inode_info *pipe, size_t len,
8085 struct ftrace_buffer_info *info = file->private_data;
8086 struct trace_iterator *iter = &info->iter;
8087 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8088 struct page *pages_def[PIPE_DEF_BUFFERS];
8089 struct splice_pipe_desc spd = {
8091 .partial = partial_def,
8092 .nr_pages_max = PIPE_DEF_BUFFERS,
8093 .ops = &buffer_pipe_buf_ops,
8094 .spd_release = buffer_spd_release,
8096 struct buffer_ref *ref;
8102 #ifdef CONFIG_TRACER_MAX_TRACE
8103 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8107 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8108 if (*ppos & (page_size - 1))
8111 if (len & (page_size - 1)) {
8112 if (len < page_size)
8114 len &= (~(page_size - 1));
8117 if (splice_grow_spd(pipe, &spd))
8121 trace_access_lock(iter->cpu_file);
8122 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8124 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) {
8128 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8134 refcount_set(&ref->refcount, 1);
8135 ref->buffer = iter->array_buffer->buffer;
8136 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8137 if (IS_ERR(ref->page)) {
8138 ret = PTR_ERR(ref->page);
8143 ref->cpu = iter->cpu_file;
8145 r = ring_buffer_read_page(ref->buffer, ref->page,
8146 len, iter->cpu_file, 1);
8148 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8154 page = virt_to_page(ring_buffer_read_page_data(ref->page));
8156 spd.pages[i] = page;
8157 spd.partial[i].len = page_size;
8158 spd.partial[i].offset = 0;
8159 spd.partial[i].private = (unsigned long)ref;
8163 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8166 trace_access_unlock(iter->cpu_file);
8169 /* did we read anything? */
8170 if (!spd.nr_pages) {
8179 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8182 ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
8186 /* No need to wait after waking up when tracing is off */
8187 if (!tracer_tracing_is_on(iter->tr))
8190 /* Iterate one more time to collect any new data then exit */
8196 ret = splice_to_pipe(pipe, &spd);
8198 splice_shrink_spd(&spd);
8203 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8205 struct ftrace_buffer_info *info = file->private_data;
8206 struct trace_iterator *iter = &info->iter;
8209 if (cmd == TRACE_MMAP_IOCTL_GET_READER) {
8210 if (!(file->f_flags & O_NONBLOCK)) {
8211 err = ring_buffer_wait(iter->array_buffer->buffer,
8213 iter->tr->buffer_percent,
8219 return ring_buffer_map_get_reader(iter->array_buffer->buffer,
8226 * An ioctl call with cmd 0 to the ring buffer file will wake up all
8229 mutex_lock(&trace_types_lock);
8231 /* Make sure the waiters see the new wait_index */
8232 (void)atomic_fetch_inc_release(&iter->wait_index);
8234 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8236 mutex_unlock(&trace_types_lock);
8240 #ifdef CONFIG_TRACER_MAX_TRACE
8241 static int get_snapshot_map(struct trace_array *tr)
8246 * Called with mmap_lock held. lockdep would be unhappy if we would now
8247 * take trace_types_lock. Instead use the specific
8248 * snapshot_trigger_lock.
8250 spin_lock(&tr->snapshot_trigger_lock);
8252 if (tr->snapshot || tr->mapped == UINT_MAX)
8257 spin_unlock(&tr->snapshot_trigger_lock);
8259 /* Wait for update_max_tr() to observe iter->tr->mapped */
8260 if (tr->mapped == 1)
8266 static void put_snapshot_map(struct trace_array *tr)
8268 spin_lock(&tr->snapshot_trigger_lock);
8269 if (!WARN_ON(!tr->mapped))
8271 spin_unlock(&tr->snapshot_trigger_lock);
8274 static inline int get_snapshot_map(struct trace_array *tr) { return 0; }
8275 static inline void put_snapshot_map(struct trace_array *tr) { }
8278 static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
8280 struct ftrace_buffer_info *info = vma->vm_file->private_data;
8281 struct trace_iterator *iter = &info->iter;
8283 WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file));
8284 put_snapshot_map(iter->tr);
8287 static const struct vm_operations_struct tracing_buffers_vmops = {
8288 .close = tracing_buffers_mmap_close,
8291 static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
8293 struct ftrace_buffer_info *info = filp->private_data;
8294 struct trace_iterator *iter = &info->iter;
8297 ret = get_snapshot_map(iter->tr);
8301 ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma);
8303 put_snapshot_map(iter->tr);
8305 vma->vm_ops = &tracing_buffers_vmops;
8310 static const struct file_operations tracing_buffers_fops = {
8311 .open = tracing_buffers_open,
8312 .read = tracing_buffers_read,
8313 .poll = tracing_buffers_poll,
8314 .release = tracing_buffers_release,
8315 .flush = tracing_buffers_flush,
8316 .splice_read = tracing_buffers_splice_read,
8317 .unlocked_ioctl = tracing_buffers_ioctl,
8318 .llseek = no_llseek,
8319 .mmap = tracing_buffers_mmap,
8323 tracing_stats_read(struct file *filp, char __user *ubuf,
8324 size_t count, loff_t *ppos)
8326 struct inode *inode = file_inode(filp);
8327 struct trace_array *tr = inode->i_private;
8328 struct array_buffer *trace_buf = &tr->array_buffer;
8329 int cpu = tracing_get_cpu(inode);
8330 struct trace_seq *s;
8332 unsigned long long t;
8333 unsigned long usec_rem;
8335 s = kmalloc(sizeof(*s), GFP_KERNEL);
8341 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8342 trace_seq_printf(s, "entries: %ld\n", cnt);
8344 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8345 trace_seq_printf(s, "overrun: %ld\n", cnt);
8347 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8348 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8350 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8351 trace_seq_printf(s, "bytes: %ld\n", cnt);
8353 if (trace_clocks[tr->clock_id].in_ns) {
8354 /* local or global for trace_clock */
8355 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8356 usec_rem = do_div(t, USEC_PER_SEC);
8357 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8360 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8361 usec_rem = do_div(t, USEC_PER_SEC);
8362 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8364 /* counter or tsc mode for trace_clock */
8365 trace_seq_printf(s, "oldest event ts: %llu\n",
8366 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8368 trace_seq_printf(s, "now ts: %llu\n",
8369 ring_buffer_time_stamp(trace_buf->buffer));
8372 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8373 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8375 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8376 trace_seq_printf(s, "read events: %ld\n", cnt);
8378 count = simple_read_from_buffer(ubuf, count, ppos,
8379 s->buffer, trace_seq_used(s));
8386 static const struct file_operations tracing_stats_fops = {
8387 .open = tracing_open_generic_tr,
8388 .read = tracing_stats_read,
8389 .llseek = generic_file_llseek,
8390 .release = tracing_release_generic_tr,
8393 #ifdef CONFIG_DYNAMIC_FTRACE
8396 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8397 size_t cnt, loff_t *ppos)
8403 /* 256 should be plenty to hold the amount needed */
8404 buf = kmalloc(256, GFP_KERNEL);
8408 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8409 ftrace_update_tot_cnt,
8410 ftrace_number_of_pages,
8411 ftrace_number_of_groups);
8413 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8418 static const struct file_operations tracing_dyn_info_fops = {
8419 .open = tracing_open_generic,
8420 .read = tracing_read_dyn_info,
8421 .llseek = generic_file_llseek,
8423 #endif /* CONFIG_DYNAMIC_FTRACE */
8425 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8427 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8428 struct trace_array *tr, struct ftrace_probe_ops *ops,
8431 tracing_snapshot_instance(tr);
8435 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8436 struct trace_array *tr, struct ftrace_probe_ops *ops,
8439 struct ftrace_func_mapper *mapper = data;
8443 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8453 tracing_snapshot_instance(tr);
8457 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8458 struct ftrace_probe_ops *ops, void *data)
8460 struct ftrace_func_mapper *mapper = data;
8463 seq_printf(m, "%ps:", (void *)ip);
8465 seq_puts(m, "snapshot");
8468 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8471 seq_printf(m, ":count=%ld\n", *count);
8473 seq_puts(m, ":unlimited\n");
8479 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8480 unsigned long ip, void *init_data, void **data)
8482 struct ftrace_func_mapper *mapper = *data;
8485 mapper = allocate_ftrace_func_mapper();
8491 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8495 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8496 unsigned long ip, void *data)
8498 struct ftrace_func_mapper *mapper = data;
8503 free_ftrace_func_mapper(mapper, NULL);
8507 ftrace_func_mapper_remove_ip(mapper, ip);
8510 static struct ftrace_probe_ops snapshot_probe_ops = {
8511 .func = ftrace_snapshot,
8512 .print = ftrace_snapshot_print,
8515 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8516 .func = ftrace_count_snapshot,
8517 .print = ftrace_snapshot_print,
8518 .init = ftrace_snapshot_init,
8519 .free = ftrace_snapshot_free,
8523 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8524 char *glob, char *cmd, char *param, int enable)
8526 struct ftrace_probe_ops *ops;
8527 void *count = (void *)-1;
8534 /* hash funcs only work with set_ftrace_filter */
8538 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8540 if (glob[0] == '!') {
8541 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
8543 tracing_disarm_snapshot(tr);
8551 number = strsep(¶m, ":");
8553 if (!strlen(number))
8557 * We use the callback data field (which is a pointer)
8560 ret = kstrtoul(number, 0, (unsigned long *)&count);
8565 ret = tracing_arm_snapshot(tr);
8569 ret = register_ftrace_function_probe(glob, tr, ops, count);
8571 tracing_disarm_snapshot(tr);
8573 return ret < 0 ? ret : 0;
8576 static struct ftrace_func_command ftrace_snapshot_cmd = {
8578 .func = ftrace_trace_snapshot_callback,
8581 static __init int register_snapshot_cmd(void)
8583 return register_ftrace_command(&ftrace_snapshot_cmd);
8586 static inline __init int register_snapshot_cmd(void) { return 0; }
8587 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8589 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8591 if (WARN_ON(!tr->dir))
8592 return ERR_PTR(-ENODEV);
8594 /* Top directory uses NULL as the parent */
8595 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8598 /* All sub buffers have a descriptor */
8602 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8604 struct dentry *d_tracer;
8607 return tr->percpu_dir;
8609 d_tracer = tracing_get_dentry(tr);
8610 if (IS_ERR(d_tracer))
8613 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8615 MEM_FAIL(!tr->percpu_dir,
8616 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8618 return tr->percpu_dir;
8621 static struct dentry *
8622 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8623 void *data, long cpu, const struct file_operations *fops)
8625 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8627 if (ret) /* See tracing_get_cpu() */
8628 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8633 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8635 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8636 struct dentry *d_cpu;
8637 char cpu_dir[30]; /* 30 characters should be more than enough */
8642 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8643 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8645 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8649 /* per cpu trace_pipe */
8650 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8651 tr, cpu, &tracing_pipe_fops);
8654 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8655 tr, cpu, &tracing_fops);
8657 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8658 tr, cpu, &tracing_buffers_fops);
8660 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8661 tr, cpu, &tracing_stats_fops);
8663 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8664 tr, cpu, &tracing_entries_fops);
8666 #ifdef CONFIG_TRACER_SNAPSHOT
8667 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8668 tr, cpu, &snapshot_fops);
8670 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8671 tr, cpu, &snapshot_raw_fops);
8675 #ifdef CONFIG_FTRACE_SELFTEST
8676 /* Let selftest have access to static functions in this file */
8677 #include "trace_selftest.c"
8681 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8684 struct trace_option_dentry *topt = filp->private_data;
8687 if (topt->flags->val & topt->opt->bit)
8692 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8696 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8699 struct trace_option_dentry *topt = filp->private_data;
8703 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8707 if (val != 0 && val != 1)
8710 if (!!(topt->flags->val & topt->opt->bit) != val) {
8711 mutex_lock(&trace_types_lock);
8712 ret = __set_tracer_option(topt->tr, topt->flags,
8714 mutex_unlock(&trace_types_lock);
8724 static int tracing_open_options(struct inode *inode, struct file *filp)
8726 struct trace_option_dentry *topt = inode->i_private;
8729 ret = tracing_check_open_get_tr(topt->tr);
8733 filp->private_data = inode->i_private;
8737 static int tracing_release_options(struct inode *inode, struct file *file)
8739 struct trace_option_dentry *topt = file->private_data;
8741 trace_array_put(topt->tr);
8745 static const struct file_operations trace_options_fops = {
8746 .open = tracing_open_options,
8747 .read = trace_options_read,
8748 .write = trace_options_write,
8749 .llseek = generic_file_llseek,
8750 .release = tracing_release_options,
8754 * In order to pass in both the trace_array descriptor as well as the index
8755 * to the flag that the trace option file represents, the trace_array
8756 * has a character array of trace_flags_index[], which holds the index
8757 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8758 * The address of this character array is passed to the flag option file
8759 * read/write callbacks.
8761 * In order to extract both the index and the trace_array descriptor,
8762 * get_tr_index() uses the following algorithm.
8766 * As the pointer itself contains the address of the index (remember
8769 * Then to get the trace_array descriptor, by subtracting that index
8770 * from the ptr, we get to the start of the index itself.
8772 * ptr - idx == &index[0]
8774 * Then a simple container_of() from that pointer gets us to the
8775 * trace_array descriptor.
8777 static void get_tr_index(void *data, struct trace_array **ptr,
8778 unsigned int *pindex)
8780 *pindex = *(unsigned char *)data;
8782 *ptr = container_of(data - *pindex, struct trace_array,
8787 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8790 void *tr_index = filp->private_data;
8791 struct trace_array *tr;
8795 get_tr_index(tr_index, &tr, &index);
8797 if (tr->trace_flags & (1 << index))
8802 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8806 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8809 void *tr_index = filp->private_data;
8810 struct trace_array *tr;
8815 get_tr_index(tr_index, &tr, &index);
8817 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8821 if (val != 0 && val != 1)
8824 mutex_lock(&event_mutex);
8825 mutex_lock(&trace_types_lock);
8826 ret = set_tracer_flag(tr, 1 << index, val);
8827 mutex_unlock(&trace_types_lock);
8828 mutex_unlock(&event_mutex);
8838 static const struct file_operations trace_options_core_fops = {
8839 .open = tracing_open_generic,
8840 .read = trace_options_core_read,
8841 .write = trace_options_core_write,
8842 .llseek = generic_file_llseek,
8845 struct dentry *trace_create_file(const char *name,
8847 struct dentry *parent,
8849 const struct file_operations *fops)
8853 ret = tracefs_create_file(name, mode, parent, data, fops);
8855 pr_warn("Could not create tracefs '%s' entry\n", name);
8861 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8863 struct dentry *d_tracer;
8868 d_tracer = tracing_get_dentry(tr);
8869 if (IS_ERR(d_tracer))
8872 tr->options = tracefs_create_dir("options", d_tracer);
8874 pr_warn("Could not create tracefs directory 'options'\n");
8882 create_trace_option_file(struct trace_array *tr,
8883 struct trace_option_dentry *topt,
8884 struct tracer_flags *flags,
8885 struct tracer_opt *opt)
8887 struct dentry *t_options;
8889 t_options = trace_options_init_dentry(tr);
8893 topt->flags = flags;
8897 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8898 t_options, topt, &trace_options_fops);
8903 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8905 struct trace_option_dentry *topts;
8906 struct trace_options *tr_topts;
8907 struct tracer_flags *flags;
8908 struct tracer_opt *opts;
8915 flags = tracer->flags;
8917 if (!flags || !flags->opts)
8921 * If this is an instance, only create flags for tracers
8922 * the instance may have.
8924 if (!trace_ok_for_array(tracer, tr))
8927 for (i = 0; i < tr->nr_topts; i++) {
8928 /* Make sure there's no duplicate flags. */
8929 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8935 for (cnt = 0; opts[cnt].name; cnt++)
8938 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8942 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8949 tr->topts = tr_topts;
8950 tr->topts[tr->nr_topts].tracer = tracer;
8951 tr->topts[tr->nr_topts].topts = topts;
8954 for (cnt = 0; opts[cnt].name; cnt++) {
8955 create_trace_option_file(tr, &topts[cnt], flags,
8957 MEM_FAIL(topts[cnt].entry == NULL,
8958 "Failed to create trace option: %s",
8963 static struct dentry *
8964 create_trace_option_core_file(struct trace_array *tr,
8965 const char *option, long index)
8967 struct dentry *t_options;
8969 t_options = trace_options_init_dentry(tr);
8973 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
8974 (void *)&tr->trace_flags_index[index],
8975 &trace_options_core_fops);
8978 static void create_trace_options_dir(struct trace_array *tr)
8980 struct dentry *t_options;
8981 bool top_level = tr == &global_trace;
8984 t_options = trace_options_init_dentry(tr);
8988 for (i = 0; trace_options[i]; i++) {
8990 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8991 create_trace_option_core_file(tr, trace_options[i], i);
8996 rb_simple_read(struct file *filp, char __user *ubuf,
8997 size_t cnt, loff_t *ppos)
8999 struct trace_array *tr = filp->private_data;
9003 r = tracer_tracing_is_on(tr);
9004 r = sprintf(buf, "%d\n", r);
9006 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9010 rb_simple_write(struct file *filp, const char __user *ubuf,
9011 size_t cnt, loff_t *ppos)
9013 struct trace_array *tr = filp->private_data;
9014 struct trace_buffer *buffer = tr->array_buffer.buffer;
9018 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9023 mutex_lock(&trace_types_lock);
9024 if (!!val == tracer_tracing_is_on(tr)) {
9025 val = 0; /* do nothing */
9027 tracer_tracing_on(tr);
9028 if (tr->current_trace->start)
9029 tr->current_trace->start(tr);
9031 tracer_tracing_off(tr);
9032 if (tr->current_trace->stop)
9033 tr->current_trace->stop(tr);
9034 /* Wake up any waiters */
9035 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9037 mutex_unlock(&trace_types_lock);
9045 static const struct file_operations rb_simple_fops = {
9046 .open = tracing_open_generic_tr,
9047 .read = rb_simple_read,
9048 .write = rb_simple_write,
9049 .release = tracing_release_generic_tr,
9050 .llseek = default_llseek,
9054 buffer_percent_read(struct file *filp, char __user *ubuf,
9055 size_t cnt, loff_t *ppos)
9057 struct trace_array *tr = filp->private_data;
9061 r = tr->buffer_percent;
9062 r = sprintf(buf, "%d\n", r);
9064 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9068 buffer_percent_write(struct file *filp, const char __user *ubuf,
9069 size_t cnt, loff_t *ppos)
9071 struct trace_array *tr = filp->private_data;
9075 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9082 tr->buffer_percent = val;
9089 static const struct file_operations buffer_percent_fops = {
9090 .open = tracing_open_generic_tr,
9091 .read = buffer_percent_read,
9092 .write = buffer_percent_write,
9093 .release = tracing_release_generic_tr,
9094 .llseek = default_llseek,
9098 buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
9100 struct trace_array *tr = filp->private_data;
9106 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9107 size = (PAGE_SIZE << order) / 1024;
9109 r = sprintf(buf, "%zd\n", size);
9111 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9115 buffer_subbuf_size_write(struct file *filp, const char __user *ubuf,
9116 size_t cnt, loff_t *ppos)
9118 struct trace_array *tr = filp->private_data;
9125 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9129 val *= 1024; /* value passed in is in KB */
9131 pages = DIV_ROUND_UP(val, PAGE_SIZE);
9132 order = fls(pages - 1);
9134 /* limit between 1 and 128 system pages */
9135 if (order < 0 || order > 7)
9138 /* Do not allow tracing while changing the order of the ring buffer */
9139 tracing_stop_tr(tr);
9141 old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9142 if (old_order == order)
9145 ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
9149 #ifdef CONFIG_TRACER_MAX_TRACE
9151 if (!tr->allocated_snapshot)
9154 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
9156 /* Put back the old order */
9157 cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
9158 if (WARN_ON_ONCE(cnt)) {
9160 * AARGH! We are left with different orders!
9161 * The max buffer is our "snapshot" buffer.
9162 * When a tracer needs a snapshot (one of the
9163 * latency tracers), it swaps the max buffer
9164 * with the saved snap shot. We succeeded to
9165 * update the order of the main buffer, but failed to
9166 * update the order of the max buffer. But when we tried
9167 * to reset the main buffer to the original size, we
9168 * failed there too. This is very unlikely to
9169 * happen, but if it does, warn and kill all
9172 tracing_disabled = 1;
9182 tracing_start_tr(tr);
9186 static const struct file_operations buffer_subbuf_size_fops = {
9187 .open = tracing_open_generic_tr,
9188 .read = buffer_subbuf_size_read,
9189 .write = buffer_subbuf_size_write,
9190 .release = tracing_release_generic_tr,
9191 .llseek = default_llseek,
9194 static struct dentry *trace_instance_dir;
9197 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9200 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9202 enum ring_buffer_flags rb_flags;
9204 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9208 buf->buffer = ring_buffer_alloc(size, rb_flags);
9212 buf->data = alloc_percpu(struct trace_array_cpu);
9214 ring_buffer_free(buf->buffer);
9219 /* Allocate the first page for all buffers */
9220 set_buffer_entries(&tr->array_buffer,
9221 ring_buffer_size(tr->array_buffer.buffer, 0));
9226 static void free_trace_buffer(struct array_buffer *buf)
9229 ring_buffer_free(buf->buffer);
9231 free_percpu(buf->data);
9236 static int allocate_trace_buffers(struct trace_array *tr, int size)
9240 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9244 #ifdef CONFIG_TRACER_MAX_TRACE
9245 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9246 allocate_snapshot ? size : 1);
9247 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9248 free_trace_buffer(&tr->array_buffer);
9251 tr->allocated_snapshot = allocate_snapshot;
9253 allocate_snapshot = false;
9259 static void free_trace_buffers(struct trace_array *tr)
9264 free_trace_buffer(&tr->array_buffer);
9266 #ifdef CONFIG_TRACER_MAX_TRACE
9267 free_trace_buffer(&tr->max_buffer);
9271 static void init_trace_flags_index(struct trace_array *tr)
9275 /* Used by the trace options files */
9276 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9277 tr->trace_flags_index[i] = i;
9280 static void __update_tracer_options(struct trace_array *tr)
9284 for (t = trace_types; t; t = t->next)
9285 add_tracer_options(tr, t);
9288 static void update_tracer_options(struct trace_array *tr)
9290 mutex_lock(&trace_types_lock);
9291 tracer_options_updated = true;
9292 __update_tracer_options(tr);
9293 mutex_unlock(&trace_types_lock);
9296 /* Must have trace_types_lock held */
9297 struct trace_array *trace_array_find(const char *instance)
9299 struct trace_array *tr, *found = NULL;
9301 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9302 if (tr->name && strcmp(tr->name, instance) == 0) {
9311 struct trace_array *trace_array_find_get(const char *instance)
9313 struct trace_array *tr;
9315 mutex_lock(&trace_types_lock);
9316 tr = trace_array_find(instance);
9319 mutex_unlock(&trace_types_lock);
9324 static int trace_array_create_dir(struct trace_array *tr)
9328 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9332 ret = event_trace_add_tracer(tr->dir, tr);
9334 tracefs_remove(tr->dir);
9338 init_tracer_tracefs(tr, tr->dir);
9339 __update_tracer_options(tr);
9344 static struct trace_array *
9345 trace_array_create_systems(const char *name, const char *systems)
9347 struct trace_array *tr;
9351 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9353 return ERR_PTR(ret);
9355 tr->name = kstrdup(name, GFP_KERNEL);
9359 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9362 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9366 tr->system_names = kstrdup_const(systems, GFP_KERNEL);
9367 if (!tr->system_names)
9371 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9373 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9375 raw_spin_lock_init(&tr->start_lock);
9377 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9378 #ifdef CONFIG_TRACER_MAX_TRACE
9379 spin_lock_init(&tr->snapshot_trigger_lock);
9381 tr->current_trace = &nop_trace;
9383 INIT_LIST_HEAD(&tr->systems);
9384 INIT_LIST_HEAD(&tr->events);
9385 INIT_LIST_HEAD(&tr->hist_vars);
9386 INIT_LIST_HEAD(&tr->err_log);
9388 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9391 /* The ring buffer is defaultly expanded */
9392 trace_set_ring_buffer_expanded(tr);
9394 if (ftrace_allocate_ftrace_ops(tr) < 0)
9397 ftrace_init_trace_array(tr);
9399 init_trace_flags_index(tr);
9401 if (trace_instance_dir) {
9402 ret = trace_array_create_dir(tr);
9406 __trace_early_add_events(tr);
9408 list_add(&tr->list, &ftrace_trace_arrays);
9415 ftrace_free_ftrace_ops(tr);
9416 free_trace_buffers(tr);
9417 free_cpumask_var(tr->pipe_cpumask);
9418 free_cpumask_var(tr->tracing_cpumask);
9419 kfree_const(tr->system_names);
9423 return ERR_PTR(ret);
9426 static struct trace_array *trace_array_create(const char *name)
9428 return trace_array_create_systems(name, NULL);
9431 static int instance_mkdir(const char *name)
9433 struct trace_array *tr;
9436 mutex_lock(&event_mutex);
9437 mutex_lock(&trace_types_lock);
9440 if (trace_array_find(name))
9443 tr = trace_array_create(name);
9445 ret = PTR_ERR_OR_ZERO(tr);
9448 mutex_unlock(&trace_types_lock);
9449 mutex_unlock(&event_mutex);
9454 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9455 * @name: The name of the trace array to be looked up/created.
9456 * @systems: A list of systems to create event directories for (NULL for all)
9458 * Returns pointer to trace array with given name.
9459 * NULL, if it cannot be created.
9461 * NOTE: This function increments the reference counter associated with the
9462 * trace array returned. This makes sure it cannot be freed while in use.
9463 * Use trace_array_put() once the trace array is no longer needed.
9464 * If the trace_array is to be freed, trace_array_destroy() needs to
9465 * be called after the trace_array_put(), or simply let user space delete
9466 * it from the tracefs instances directory. But until the
9467 * trace_array_put() is called, user space can not delete it.
9470 struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
9472 struct trace_array *tr;
9474 mutex_lock(&event_mutex);
9475 mutex_lock(&trace_types_lock);
9477 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9478 if (tr->name && strcmp(tr->name, name) == 0)
9482 tr = trace_array_create_systems(name, systems);
9490 mutex_unlock(&trace_types_lock);
9491 mutex_unlock(&event_mutex);
9494 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9496 static int __remove_instance(struct trace_array *tr)
9500 /* Reference counter for a newly created trace array = 1. */
9501 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9504 list_del(&tr->list);
9506 /* Disable all the flags that were enabled coming in */
9507 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9508 if ((1 << i) & ZEROED_TRACE_FLAGS)
9509 set_tracer_flag(tr, 1 << i, 0);
9512 tracing_set_nop(tr);
9513 clear_ftrace_function_probes(tr);
9514 event_trace_del_tracer(tr);
9515 ftrace_clear_pids(tr);
9516 ftrace_destroy_function_files(tr);
9517 tracefs_remove(tr->dir);
9518 free_percpu(tr->last_func_repeats);
9519 free_trace_buffers(tr);
9520 clear_tracing_err_log(tr);
9522 for (i = 0; i < tr->nr_topts; i++) {
9523 kfree(tr->topts[i].topts);
9527 free_cpumask_var(tr->pipe_cpumask);
9528 free_cpumask_var(tr->tracing_cpumask);
9529 kfree_const(tr->system_names);
9536 int trace_array_destroy(struct trace_array *this_tr)
9538 struct trace_array *tr;
9544 mutex_lock(&event_mutex);
9545 mutex_lock(&trace_types_lock);
9549 /* Making sure trace array exists before destroying it. */
9550 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9551 if (tr == this_tr) {
9552 ret = __remove_instance(tr);
9557 mutex_unlock(&trace_types_lock);
9558 mutex_unlock(&event_mutex);
9562 EXPORT_SYMBOL_GPL(trace_array_destroy);
9564 static int instance_rmdir(const char *name)
9566 struct trace_array *tr;
9569 mutex_lock(&event_mutex);
9570 mutex_lock(&trace_types_lock);
9573 tr = trace_array_find(name);
9575 ret = __remove_instance(tr);
9577 mutex_unlock(&trace_types_lock);
9578 mutex_unlock(&event_mutex);
9583 static __init void create_trace_instances(struct dentry *d_tracer)
9585 struct trace_array *tr;
9587 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9590 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9593 mutex_lock(&event_mutex);
9594 mutex_lock(&trace_types_lock);
9596 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9599 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9600 "Failed to create instance directory\n"))
9604 mutex_unlock(&trace_types_lock);
9605 mutex_unlock(&event_mutex);
9609 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9613 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9614 tr, &show_traces_fops);
9616 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9617 tr, &set_tracer_fops);
9619 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9620 tr, &tracing_cpumask_fops);
9622 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9623 tr, &tracing_iter_fops);
9625 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9628 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9629 tr, &tracing_pipe_fops);
9631 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9632 tr, &tracing_entries_fops);
9634 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9635 tr, &tracing_total_entries_fops);
9637 trace_create_file("free_buffer", 0200, d_tracer,
9638 tr, &tracing_free_buffer_fops);
9640 trace_create_file("trace_marker", 0220, d_tracer,
9641 tr, &tracing_mark_fops);
9643 tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
9645 trace_create_file("trace_marker_raw", 0220, d_tracer,
9646 tr, &tracing_mark_raw_fops);
9648 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9651 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9652 tr, &rb_simple_fops);
9654 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9655 &trace_time_stamp_mode_fops);
9657 tr->buffer_percent = 50;
9659 trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9660 tr, &buffer_percent_fops);
9662 trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer,
9663 tr, &buffer_subbuf_size_fops);
9665 create_trace_options_dir(tr);
9667 #ifdef CONFIG_TRACER_MAX_TRACE
9668 trace_create_maxlat_file(tr, d_tracer);
9671 if (ftrace_create_function_files(tr, d_tracer))
9672 MEM_FAIL(1, "Could not allocate function filter files");
9674 #ifdef CONFIG_TRACER_SNAPSHOT
9675 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9676 tr, &snapshot_fops);
9679 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9680 tr, &tracing_err_log_fops);
9682 for_each_tracing_cpu(cpu)
9683 tracing_init_tracefs_percpu(tr, cpu);
9685 ftrace_init_tracefs(tr, d_tracer);
9688 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9690 struct vfsmount *mnt;
9691 struct file_system_type *type;
9694 * To maintain backward compatibility for tools that mount
9695 * debugfs to get to the tracing facility, tracefs is automatically
9696 * mounted to the debugfs/tracing directory.
9698 type = get_fs_type("tracefs");
9701 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9702 put_filesystem(type);
9711 * tracing_init_dentry - initialize top level trace array
9713 * This is called when creating files or directories in the tracing
9714 * directory. It is called via fs_initcall() by any of the boot up code
9715 * and expects to return the dentry of the top level tracing directory.
9717 int tracing_init_dentry(void)
9719 struct trace_array *tr = &global_trace;
9721 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9722 pr_warn("Tracing disabled due to lockdown\n");
9726 /* The top level trace array uses NULL as parent */
9730 if (WARN_ON(!tracefs_initialized()))
9734 * As there may still be users that expect the tracing
9735 * files to exist in debugfs/tracing, we must automount
9736 * the tracefs file system there, so older tools still
9737 * work with the newer kernel.
9739 tr->dir = debugfs_create_automount("tracing", NULL,
9740 trace_automount, NULL);
9745 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9746 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9748 static struct workqueue_struct *eval_map_wq __initdata;
9749 static struct work_struct eval_map_work __initdata;
9750 static struct work_struct tracerfs_init_work __initdata;
9752 static void __init eval_map_work_func(struct work_struct *work)
9756 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9757 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9760 static int __init trace_eval_init(void)
9762 INIT_WORK(&eval_map_work, eval_map_work_func);
9764 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9766 pr_err("Unable to allocate eval_map_wq\n");
9768 eval_map_work_func(&eval_map_work);
9772 queue_work(eval_map_wq, &eval_map_work);
9776 subsys_initcall(trace_eval_init);
9778 static int __init trace_eval_sync(void)
9780 /* Make sure the eval map updates are finished */
9782 destroy_workqueue(eval_map_wq);
9786 late_initcall_sync(trace_eval_sync);
9789 #ifdef CONFIG_MODULES
9790 static void trace_module_add_evals(struct module *mod)
9792 if (!mod->num_trace_evals)
9796 * Modules with bad taint do not have events created, do
9797 * not bother with enums either.
9799 if (trace_module_has_bad_taint(mod))
9802 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9805 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9806 static void trace_module_remove_evals(struct module *mod)
9808 union trace_eval_map_item *map;
9809 union trace_eval_map_item **last = &trace_eval_maps;
9811 if (!mod->num_trace_evals)
9814 mutex_lock(&trace_eval_mutex);
9816 map = trace_eval_maps;
9819 if (map->head.mod == mod)
9821 map = trace_eval_jmp_to_tail(map);
9822 last = &map->tail.next;
9823 map = map->tail.next;
9828 *last = trace_eval_jmp_to_tail(map)->tail.next;
9831 mutex_unlock(&trace_eval_mutex);
9834 static inline void trace_module_remove_evals(struct module *mod) { }
9835 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9837 static int trace_module_notify(struct notifier_block *self,
9838 unsigned long val, void *data)
9840 struct module *mod = data;
9843 case MODULE_STATE_COMING:
9844 trace_module_add_evals(mod);
9846 case MODULE_STATE_GOING:
9847 trace_module_remove_evals(mod);
9854 static struct notifier_block trace_module_nb = {
9855 .notifier_call = trace_module_notify,
9858 #endif /* CONFIG_MODULES */
9860 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
9865 init_tracer_tracefs(&global_trace, NULL);
9866 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9868 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9869 &global_trace, &tracing_thresh_fops);
9871 trace_create_file("README", TRACE_MODE_READ, NULL,
9872 NULL, &tracing_readme_fops);
9874 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9875 NULL, &tracing_saved_cmdlines_fops);
9877 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9878 NULL, &tracing_saved_cmdlines_size_fops);
9880 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9881 NULL, &tracing_saved_tgids_fops);
9883 trace_create_eval_file(NULL);
9885 #ifdef CONFIG_MODULES
9886 register_module_notifier(&trace_module_nb);
9889 #ifdef CONFIG_DYNAMIC_FTRACE
9890 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9891 NULL, &tracing_dyn_info_fops);
9894 create_trace_instances(NULL);
9896 update_tracer_options(&global_trace);
9899 static __init int tracer_init_tracefs(void)
9903 trace_access_lock_init();
9905 ret = tracing_init_dentry();
9910 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
9911 queue_work(eval_map_wq, &tracerfs_init_work);
9913 tracer_init_tracefs_work_func(NULL);
9916 rv_init_interface();
9921 fs_initcall(tracer_init_tracefs);
9923 static int trace_die_panic_handler(struct notifier_block *self,
9924 unsigned long ev, void *unused);
9926 static struct notifier_block trace_panic_notifier = {
9927 .notifier_call = trace_die_panic_handler,
9928 .priority = INT_MAX - 1,
9931 static struct notifier_block trace_die_notifier = {
9932 .notifier_call = trace_die_panic_handler,
9933 .priority = INT_MAX - 1,
9937 * The idea is to execute the following die/panic callback early, in order
9938 * to avoid showing irrelevant information in the trace (like other panic
9939 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
9940 * warnings get disabled (to prevent potential log flooding).
9942 static int trace_die_panic_handler(struct notifier_block *self,
9943 unsigned long ev, void *unused)
9945 if (!ftrace_dump_on_oops_enabled())
9948 /* The die notifier requires DIE_OOPS to trigger */
9949 if (self == &trace_die_notifier && ev != DIE_OOPS)
9952 ftrace_dump(DUMP_PARAM);
9958 * printk is set to max of 1024, we really don't need it that big.
9959 * Nothing should be printing 1000 characters anyway.
9961 #define TRACE_MAX_PRINT 1000
9964 * Define here KERN_TRACE so that we have one place to modify
9965 * it if we decide to change what log level the ftrace dump
9968 #define KERN_TRACE KERN_EMERG
9971 trace_printk_seq(struct trace_seq *s)
9973 /* Probably should print a warning here. */
9974 if (s->seq.len >= TRACE_MAX_PRINT)
9975 s->seq.len = TRACE_MAX_PRINT;
9978 * More paranoid code. Although the buffer size is set to
9979 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9980 * an extra layer of protection.
9982 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9983 s->seq.len = s->seq.size - 1;
9985 /* should be zero ended, but we are paranoid. */
9986 s->buffer[s->seq.len] = 0;
9988 printk(KERN_TRACE "%s", s->buffer);
9993 static void trace_init_iter(struct trace_iterator *iter, struct trace_array *tr)
9996 iter->trace = iter->tr->current_trace;
9997 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9998 iter->array_buffer = &tr->array_buffer;
10000 if (iter->trace && iter->trace->open)
10001 iter->trace->open(iter);
10003 /* Annotate start of buffers if we had overruns */
10004 if (ring_buffer_overruns(iter->array_buffer->buffer))
10005 iter->iter_flags |= TRACE_FILE_ANNOTATE;
10007 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
10008 if (trace_clocks[iter->tr->clock_id].in_ns)
10009 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10011 /* Can not use kmalloc for iter.temp and iter.fmt */
10012 iter->temp = static_temp_buf;
10013 iter->temp_size = STATIC_TEMP_BUF_SIZE;
10014 iter->fmt = static_fmt_buf;
10015 iter->fmt_size = STATIC_FMT_BUF_SIZE;
10018 void trace_init_global_iter(struct trace_iterator *iter)
10020 trace_init_iter(iter, &global_trace);
10023 static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_mode)
10025 /* use static because iter can be a bit big for the stack */
10026 static struct trace_iterator iter;
10027 unsigned int old_userobj;
10028 unsigned long flags;
10032 * Always turn off tracing when we dump.
10033 * We don't need to show trace output of what happens
10034 * between multiple crashes.
10036 * If the user does a sysrq-z, then they can re-enable
10037 * tracing with echo 1 > tracing_on.
10039 tracer_tracing_off(tr);
10041 local_irq_save(flags);
10043 /* Simulate the iterator */
10044 trace_init_iter(&iter, tr);
10046 for_each_tracing_cpu(cpu) {
10047 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10050 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10052 /* don't look at user memory in panic mode */
10053 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10055 if (dump_mode == DUMP_ORIG)
10056 iter.cpu_file = raw_smp_processor_id();
10058 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10060 if (tr == &global_trace)
10061 printk(KERN_TRACE "Dumping ftrace buffer:\n");
10063 printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name);
10065 /* Did function tracer already get disabled? */
10066 if (ftrace_is_dead()) {
10067 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10068 printk("# MAY BE MISSING FUNCTION EVENTS\n");
10072 * We need to stop all tracing on all CPUS to read
10073 * the next buffer. This is a bit expensive, but is
10074 * not done often. We fill all what we can read,
10075 * and then release the locks again.
10078 while (!trace_empty(&iter)) {
10081 printk(KERN_TRACE "---------------------------------\n");
10085 trace_iterator_reset(&iter);
10086 iter.iter_flags |= TRACE_FILE_LAT_FMT;
10088 if (trace_find_next_entry_inc(&iter) != NULL) {
10091 ret = print_trace_line(&iter);
10092 if (ret != TRACE_TYPE_NO_CONSUME)
10093 trace_consume(&iter);
10095 touch_nmi_watchdog();
10097 trace_printk_seq(&iter.seq);
10101 printk(KERN_TRACE " (ftrace buffer empty)\n");
10103 printk(KERN_TRACE "---------------------------------\n");
10105 tr->trace_flags |= old_userobj;
10107 for_each_tracing_cpu(cpu) {
10108 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10110 local_irq_restore(flags);
10113 static void ftrace_dump_by_param(void)
10115 bool first_param = true;
10116 char dump_param[MAX_TRACER_SIZE];
10117 char *buf, *token, *inst_name;
10118 struct trace_array *tr;
10120 strscpy(dump_param, ftrace_dump_on_oops, MAX_TRACER_SIZE);
10123 while ((token = strsep(&buf, ",")) != NULL) {
10125 first_param = false;
10126 if (!strcmp("0", token))
10128 else if (!strcmp("1", token)) {
10129 ftrace_dump_one(&global_trace, DUMP_ALL);
10132 else if (!strcmp("2", token) ||
10133 !strcmp("orig_cpu", token)) {
10134 ftrace_dump_one(&global_trace, DUMP_ORIG);
10139 inst_name = strsep(&token, "=");
10140 tr = trace_array_find(inst_name);
10142 printk(KERN_TRACE "Instance %s not found\n", inst_name);
10146 if (token && (!strcmp("2", token) ||
10147 !strcmp("orig_cpu", token)))
10148 ftrace_dump_one(tr, DUMP_ORIG);
10150 ftrace_dump_one(tr, DUMP_ALL);
10154 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10156 static atomic_t dump_running;
10158 /* Only allow one dump user at a time. */
10159 if (atomic_inc_return(&dump_running) != 1) {
10160 atomic_dec(&dump_running);
10164 switch (oops_dump_mode) {
10166 ftrace_dump_one(&global_trace, DUMP_ALL);
10169 ftrace_dump_one(&global_trace, DUMP_ORIG);
10172 ftrace_dump_by_param();
10177 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10178 ftrace_dump_one(&global_trace, DUMP_ALL);
10181 atomic_dec(&dump_running);
10183 EXPORT_SYMBOL_GPL(ftrace_dump);
10185 #define WRITE_BUFSIZE 4096
10187 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10188 size_t count, loff_t *ppos,
10189 int (*createfn)(const char *))
10191 char *kbuf, *buf, *tmp;
10196 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10200 while (done < count) {
10201 size = count - done;
10203 if (size >= WRITE_BUFSIZE)
10204 size = WRITE_BUFSIZE - 1;
10206 if (copy_from_user(kbuf, buffer + done, size)) {
10213 tmp = strchr(buf, '\n');
10216 size = tmp - buf + 1;
10218 size = strlen(buf);
10219 if (done + size < count) {
10222 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10223 pr_warn("Line length is too long: Should be less than %d\n",
10224 WRITE_BUFSIZE - 2);
10231 /* Remove comments */
10232 tmp = strchr(buf, '#');
10237 ret = createfn(buf);
10242 } while (done < count);
10252 #ifdef CONFIG_TRACER_MAX_TRACE
10253 __init static bool tr_needs_alloc_snapshot(const char *name)
10256 int len = strlen(name);
10259 if (!boot_snapshot_index)
10262 if (strncmp(name, boot_snapshot_info, len) == 0 &&
10263 boot_snapshot_info[len] == '\t')
10266 test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10270 sprintf(test, "\t%s\t", name);
10271 ret = strstr(boot_snapshot_info, test) == NULL;
10276 __init static void do_allocate_snapshot(const char *name)
10278 if (!tr_needs_alloc_snapshot(name))
10282 * When allocate_snapshot is set, the next call to
10283 * allocate_trace_buffers() (called by trace_array_get_by_name())
10284 * will allocate the snapshot buffer. That will alse clear
10287 allocate_snapshot = true;
10290 static inline void do_allocate_snapshot(const char *name) { }
10293 __init static void enable_instances(void)
10295 struct trace_array *tr;
10300 /* A tab is always appended */
10301 boot_instance_info[boot_instance_index - 1] = '\0';
10302 str = boot_instance_info;
10304 while ((curr_str = strsep(&str, "\t"))) {
10306 tok = strsep(&curr_str, ",");
10308 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10309 do_allocate_snapshot(tok);
10311 tr = trace_array_get_by_name(tok, NULL);
10313 pr_warn("Failed to create instance buffer %s\n", curr_str);
10316 /* Allow user space to delete it */
10317 trace_array_put(tr);
10319 while ((tok = strsep(&curr_str, ","))) {
10320 early_enable_events(tr, tok, true);
10325 __init static int tracer_alloc_buffers(void)
10331 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10332 pr_warn("Tracing disabled due to lockdown\n");
10337 * Make sure we don't accidentally add more trace options
10338 * than we have bits for.
10340 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10342 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10345 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10346 goto out_free_buffer_mask;
10348 /* Only allocate trace_printk buffers if a trace_printk exists */
10349 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10350 /* Must be called before global_trace.buffer is allocated */
10351 trace_printk_init_buffers();
10353 /* To save memory, keep the ring buffer size to its minimum */
10354 if (global_trace.ring_buffer_expanded)
10355 ring_buf_size = trace_buf_size;
10359 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10360 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10362 raw_spin_lock_init(&global_trace.start_lock);
10365 * The prepare callbacks allocates some memory for the ring buffer. We
10366 * don't free the buffer if the CPU goes down. If we were to free
10367 * the buffer, then the user would lose any trace that was in the
10368 * buffer. The memory will be removed once the "instance" is removed.
10370 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10371 "trace/RB:prepare", trace_rb_cpu_prepare,
10374 goto out_free_cpumask;
10375 /* Used for event triggers */
10377 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10379 goto out_rm_hp_state;
10381 if (trace_create_savedcmd() < 0)
10382 goto out_free_temp_buffer;
10384 if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10385 goto out_free_savedcmd;
10387 /* TODO: make the number of buffers hot pluggable with CPUS */
10388 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10389 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10390 goto out_free_pipe_cpumask;
10392 if (global_trace.buffer_disabled)
10395 if (trace_boot_clock) {
10396 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10398 pr_warn("Trace clock %s not defined, going back to default\n",
10403 * register_tracer() might reference current_trace, so it
10404 * needs to be set before we register anything. This is
10405 * just a bootstrap of current_trace anyway.
10407 global_trace.current_trace = &nop_trace;
10409 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10410 #ifdef CONFIG_TRACER_MAX_TRACE
10411 spin_lock_init(&global_trace.snapshot_trigger_lock);
10413 ftrace_init_global_array_ops(&global_trace);
10415 init_trace_flags_index(&global_trace);
10417 register_tracer(&nop_trace);
10419 /* Function tracing may start here (via kernel command line) */
10420 init_function_trace();
10422 /* All seems OK, enable tracing */
10423 tracing_disabled = 0;
10425 atomic_notifier_chain_register(&panic_notifier_list,
10426 &trace_panic_notifier);
10428 register_die_notifier(&trace_die_notifier);
10430 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10432 INIT_LIST_HEAD(&global_trace.systems);
10433 INIT_LIST_HEAD(&global_trace.events);
10434 INIT_LIST_HEAD(&global_trace.hist_vars);
10435 INIT_LIST_HEAD(&global_trace.err_log);
10436 list_add(&global_trace.list, &ftrace_trace_arrays);
10438 apply_trace_boot_options();
10440 register_snapshot_cmd();
10446 out_free_pipe_cpumask:
10447 free_cpumask_var(global_trace.pipe_cpumask);
10449 trace_free_saved_cmdlines_buffer();
10450 out_free_temp_buffer:
10451 ring_buffer_free(temp_buffer);
10453 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10455 free_cpumask_var(global_trace.tracing_cpumask);
10456 out_free_buffer_mask:
10457 free_cpumask_var(tracing_buffer_mask);
10462 void __init ftrace_boot_snapshot(void)
10464 #ifdef CONFIG_TRACER_MAX_TRACE
10465 struct trace_array *tr;
10467 if (!snapshot_at_boot)
10470 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10471 if (!tr->allocated_snapshot)
10474 tracing_snapshot_instance(tr);
10475 trace_array_puts(tr, "** Boot snapshot taken **\n");
10480 void __init early_trace_init(void)
10482 if (tracepoint_printk) {
10483 tracepoint_print_iter =
10484 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10485 if (MEM_FAIL(!tracepoint_print_iter,
10486 "Failed to allocate trace iterator\n"))
10487 tracepoint_printk = 0;
10489 static_key_enable(&tracepoint_printk_key.key);
10491 tracer_alloc_buffers();
10496 void __init trace_init(void)
10498 trace_event_init();
10500 if (boot_instance_index)
10501 enable_instances();
10504 __init static void clear_boot_tracer(void)
10507 * The default tracer at boot buffer is an init section.
10508 * This function is called in lateinit. If we did not
10509 * find the boot tracer, then clear it out, to prevent
10510 * later registration from accessing the buffer that is
10511 * about to be freed.
10513 if (!default_bootup_tracer)
10516 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10517 default_bootup_tracer);
10518 default_bootup_tracer = NULL;
10521 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10522 __init static void tracing_set_default_clock(void)
10524 /* sched_clock_stable() is determined in late_initcall */
10525 if (!trace_boot_clock && !sched_clock_stable()) {
10526 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10527 pr_warn("Can not set tracing clock due to lockdown\n");
10531 printk(KERN_WARNING
10532 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10533 "If you want to keep using the local clock, then add:\n"
10534 " \"trace_clock=local\"\n"
10535 "on the kernel command line\n");
10536 tracing_set_clock(&global_trace, "global");
10540 static inline void tracing_set_default_clock(void) { }
10543 __init static int late_trace_init(void)
10545 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10546 static_key_disable(&tracepoint_printk_key.key);
10547 tracepoint_printk = 0;
10550 tracing_set_default_clock();
10551 clear_boot_tracer();
10555 late_initcall_sync(late_trace_init);