1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
8 * Originally taken from the RT patch by:
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/panic_notifier.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
52 #include <asm/setup.h> /* COMMAND_LINE_SIZE */
55 #include "trace_output.h"
57 #ifdef CONFIG_FTRACE_STARTUP_TEST
59 * We need to change this state when a selftest is running.
60 * A selftest will lurk into the ring-buffer to count the
61 * entries inserted during the selftest although some concurrent
62 * insertions into the ring-buffer such as trace_printk could occurred
63 * at the same time, giving false positive or negative results.
65 static bool __read_mostly tracing_selftest_running;
68 * If boot-time tracing including tracers/events via kernel cmdline
69 * is running, we do not want to run SELFTEST.
71 bool __read_mostly tracing_selftest_disabled;
73 void __init disable_tracing_selftest(const char *reason)
75 if (!tracing_selftest_disabled) {
76 tracing_selftest_disabled = true;
77 pr_info("Ftrace startup test is disabled due to %s\n", reason);
81 #define tracing_selftest_running 0
82 #define tracing_selftest_disabled 0
85 /* Pipe tracepoints to printk */
86 static struct trace_iterator *tracepoint_print_iter;
87 int tracepoint_printk;
88 static bool tracepoint_printk_stop_on_boot __initdata;
89 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
91 /* For tracers that don't implement custom flags */
92 static struct tracer_opt dummy_tracer_opt[] = {
97 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
103 * To prevent the comm cache from being overwritten when no
104 * tracing is active, only save the comm when a trace event
107 DEFINE_PER_CPU(bool, trace_taskinfo_save);
110 * Kill all tracing for good (never come back).
111 * It is initialized to 1 but will turn to zero if the initialization
112 * of the tracer is successful. But that is the only place that sets
115 static int tracing_disabled = 1;
117 cpumask_var_t __read_mostly tracing_buffer_mask;
120 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
122 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
123 * is set, then ftrace_dump is called. This will output the contents
124 * of the ftrace buffers to the console. This is very useful for
125 * capturing traces that lead to crashes and outputing it to a
128 * It is default off, but you can enable it with either specifying
129 * "ftrace_dump_on_oops" in the kernel command line, or setting
130 * /proc/sys/kernel/ftrace_dump_on_oops
131 * Set 1 if you want to dump buffers of all CPUs
132 * Set 2 if you want to dump the buffer of the CPU that triggered oops
135 enum ftrace_dump_mode ftrace_dump_on_oops;
137 /* When set, tracing will stop when a WARN*() is hit */
138 int __disable_trace_on_warning;
140 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
141 /* Map of enums to their values, for "eval_map" file */
142 struct trace_eval_map_head {
144 unsigned long length;
147 union trace_eval_map_item;
149 struct trace_eval_map_tail {
151 * "end" is first and points to NULL as it must be different
152 * than "mod" or "eval_string"
154 union trace_eval_map_item *next;
155 const char *end; /* points to NULL */
158 static DEFINE_MUTEX(trace_eval_mutex);
161 * The trace_eval_maps are saved in an array with two extra elements,
162 * one at the beginning, and one at the end. The beginning item contains
163 * the count of the saved maps (head.length), and the module they
164 * belong to if not built in (head.mod). The ending item contains a
165 * pointer to the next array of saved eval_map items.
167 union trace_eval_map_item {
168 struct trace_eval_map map;
169 struct trace_eval_map_head head;
170 struct trace_eval_map_tail tail;
173 static union trace_eval_map_item *trace_eval_maps;
174 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
176 int tracing_set_tracer(struct trace_array *tr, const char *buf);
177 static void ftrace_trace_userstack(struct trace_array *tr,
178 struct trace_buffer *buffer,
179 unsigned int trace_ctx);
181 #define MAX_TRACER_SIZE 100
182 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
183 static char *default_bootup_tracer;
185 static bool allocate_snapshot;
186 static bool snapshot_at_boot;
188 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
189 static int boot_instance_index;
191 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
192 static int boot_snapshot_index;
194 static int __init set_cmdline_ftrace(char *str)
196 strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
197 default_bootup_tracer = bootup_tracer_buf;
198 /* We are using ftrace early, expand it */
199 trace_set_ring_buffer_expanded(NULL);
202 __setup("ftrace=", set_cmdline_ftrace);
204 static int __init set_ftrace_dump_on_oops(char *str)
206 if (*str++ != '=' || !*str || !strcmp("1", str)) {
207 ftrace_dump_on_oops = DUMP_ALL;
211 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
212 ftrace_dump_on_oops = DUMP_ORIG;
218 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
220 static int __init stop_trace_on_warning(char *str)
222 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
223 __disable_trace_on_warning = 1;
226 __setup("traceoff_on_warning", stop_trace_on_warning);
228 static int __init boot_alloc_snapshot(char *str)
230 char *slot = boot_snapshot_info + boot_snapshot_index;
231 int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
236 if (strlen(str) >= left)
239 ret = snprintf(slot, left, "%s\t", str);
240 boot_snapshot_index += ret;
242 allocate_snapshot = true;
243 /* We also need the main ring buffer expanded */
244 trace_set_ring_buffer_expanded(NULL);
248 __setup("alloc_snapshot", boot_alloc_snapshot);
251 static int __init boot_snapshot(char *str)
253 snapshot_at_boot = true;
254 boot_alloc_snapshot(str);
257 __setup("ftrace_boot_snapshot", boot_snapshot);
260 static int __init boot_instance(char *str)
262 char *slot = boot_instance_info + boot_instance_index;
263 int left = sizeof(boot_instance_info) - boot_instance_index;
266 if (strlen(str) >= left)
269 ret = snprintf(slot, left, "%s\t", str);
270 boot_instance_index += ret;
274 __setup("trace_instance=", boot_instance);
277 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
279 static int __init set_trace_boot_options(char *str)
281 strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
284 __setup("trace_options=", set_trace_boot_options);
286 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
287 static char *trace_boot_clock __initdata;
289 static int __init set_trace_boot_clock(char *str)
291 strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
292 trace_boot_clock = trace_boot_clock_buf;
295 __setup("trace_clock=", set_trace_boot_clock);
297 static int __init set_tracepoint_printk(char *str)
299 /* Ignore the "tp_printk_stop_on_boot" param */
303 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
304 tracepoint_printk = 1;
307 __setup("tp_printk", set_tracepoint_printk);
309 static int __init set_tracepoint_printk_stop(char *str)
311 tracepoint_printk_stop_on_boot = true;
314 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
316 unsigned long long ns2usecs(u64 nsec)
324 trace_process_export(struct trace_export *export,
325 struct ring_buffer_event *event, int flag)
327 struct trace_entry *entry;
328 unsigned int size = 0;
330 if (export->flags & flag) {
331 entry = ring_buffer_event_data(event);
332 size = ring_buffer_event_length(event);
333 export->write(export, entry, size);
337 static DEFINE_MUTEX(ftrace_export_lock);
339 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
341 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
342 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
343 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
345 static inline void ftrace_exports_enable(struct trace_export *export)
347 if (export->flags & TRACE_EXPORT_FUNCTION)
348 static_branch_inc(&trace_function_exports_enabled);
350 if (export->flags & TRACE_EXPORT_EVENT)
351 static_branch_inc(&trace_event_exports_enabled);
353 if (export->flags & TRACE_EXPORT_MARKER)
354 static_branch_inc(&trace_marker_exports_enabled);
357 static inline void ftrace_exports_disable(struct trace_export *export)
359 if (export->flags & TRACE_EXPORT_FUNCTION)
360 static_branch_dec(&trace_function_exports_enabled);
362 if (export->flags & TRACE_EXPORT_EVENT)
363 static_branch_dec(&trace_event_exports_enabled);
365 if (export->flags & TRACE_EXPORT_MARKER)
366 static_branch_dec(&trace_marker_exports_enabled);
369 static void ftrace_exports(struct ring_buffer_event *event, int flag)
371 struct trace_export *export;
373 preempt_disable_notrace();
375 export = rcu_dereference_raw_check(ftrace_exports_list);
377 trace_process_export(export, event, flag);
378 export = rcu_dereference_raw_check(export->next);
381 preempt_enable_notrace();
385 add_trace_export(struct trace_export **list, struct trace_export *export)
387 rcu_assign_pointer(export->next, *list);
389 * We are entering export into the list but another
390 * CPU might be walking that list. We need to make sure
391 * the export->next pointer is valid before another CPU sees
392 * the export pointer included into the list.
394 rcu_assign_pointer(*list, export);
398 rm_trace_export(struct trace_export **list, struct trace_export *export)
400 struct trace_export **p;
402 for (p = list; *p != NULL; p = &(*p)->next)
409 rcu_assign_pointer(*p, (*p)->next);
415 add_ftrace_export(struct trace_export **list, struct trace_export *export)
417 ftrace_exports_enable(export);
419 add_trace_export(list, export);
423 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
427 ret = rm_trace_export(list, export);
428 ftrace_exports_disable(export);
433 int register_ftrace_export(struct trace_export *export)
435 if (WARN_ON_ONCE(!export->write))
438 mutex_lock(&ftrace_export_lock);
440 add_ftrace_export(&ftrace_exports_list, export);
442 mutex_unlock(&ftrace_export_lock);
446 EXPORT_SYMBOL_GPL(register_ftrace_export);
448 int unregister_ftrace_export(struct trace_export *export)
452 mutex_lock(&ftrace_export_lock);
454 ret = rm_ftrace_export(&ftrace_exports_list, export);
456 mutex_unlock(&ftrace_export_lock);
460 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
462 /* trace_flags holds trace_options default values */
463 #define TRACE_DEFAULT_FLAGS \
464 (FUNCTION_DEFAULT_FLAGS | \
465 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
466 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
467 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
468 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
471 /* trace_options that are only supported by global_trace */
472 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
473 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
475 /* trace_flags that are default zero for instances */
476 #define ZEROED_TRACE_FLAGS \
477 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
480 * The global_trace is the descriptor that holds the top-level tracing
481 * buffers for the live tracing.
483 static struct trace_array global_trace = {
484 .trace_flags = TRACE_DEFAULT_FLAGS,
487 void trace_set_ring_buffer_expanded(struct trace_array *tr)
491 tr->ring_buffer_expanded = true;
494 LIST_HEAD(ftrace_trace_arrays);
496 int trace_array_get(struct trace_array *this_tr)
498 struct trace_array *tr;
501 mutex_lock(&trace_types_lock);
502 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
509 mutex_unlock(&trace_types_lock);
514 static void __trace_array_put(struct trace_array *this_tr)
516 WARN_ON(!this_tr->ref);
521 * trace_array_put - Decrement the reference counter for this trace array.
522 * @this_tr : pointer to the trace array
524 * NOTE: Use this when we no longer need the trace array returned by
525 * trace_array_get_by_name(). This ensures the trace array can be later
529 void trace_array_put(struct trace_array *this_tr)
534 mutex_lock(&trace_types_lock);
535 __trace_array_put(this_tr);
536 mutex_unlock(&trace_types_lock);
538 EXPORT_SYMBOL_GPL(trace_array_put);
540 int tracing_check_open_get_tr(struct trace_array *tr)
544 ret = security_locked_down(LOCKDOWN_TRACEFS);
548 if (tracing_disabled)
551 if (tr && trace_array_get(tr) < 0)
557 int call_filter_check_discard(struct trace_event_call *call, void *rec,
558 struct trace_buffer *buffer,
559 struct ring_buffer_event *event)
561 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
562 !filter_match_preds(call->filter, rec)) {
563 __trace_event_discard_commit(buffer, event);
571 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
572 * @filtered_pids: The list of pids to check
573 * @search_pid: The PID to find in @filtered_pids
575 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
578 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
580 return trace_pid_list_is_set(filtered_pids, search_pid);
584 * trace_ignore_this_task - should a task be ignored for tracing
585 * @filtered_pids: The list of pids to check
586 * @filtered_no_pids: The list of pids not to be traced
587 * @task: The task that should be ignored if not filtered
589 * Checks if @task should be traced or not from @filtered_pids.
590 * Returns true if @task should *NOT* be traced.
591 * Returns false if @task should be traced.
594 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
595 struct trace_pid_list *filtered_no_pids,
596 struct task_struct *task)
599 * If filtered_no_pids is not empty, and the task's pid is listed
600 * in filtered_no_pids, then return true.
601 * Otherwise, if filtered_pids is empty, that means we can
602 * trace all tasks. If it has content, then only trace pids
603 * within filtered_pids.
606 return (filtered_pids &&
607 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
609 trace_find_filtered_pid(filtered_no_pids, task->pid));
613 * trace_filter_add_remove_task - Add or remove a task from a pid_list
614 * @pid_list: The list to modify
615 * @self: The current task for fork or NULL for exit
616 * @task: The task to add or remove
618 * If adding a task, if @self is defined, the task is only added if @self
619 * is also included in @pid_list. This happens on fork and tasks should
620 * only be added when the parent is listed. If @self is NULL, then the
621 * @task pid will be removed from the list, which would happen on exit
624 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
625 struct task_struct *self,
626 struct task_struct *task)
631 /* For forks, we only add if the forking task is listed */
633 if (!trace_find_filtered_pid(pid_list, self->pid))
637 /* "self" is set for forks, and NULL for exits */
639 trace_pid_list_set(pid_list, task->pid);
641 trace_pid_list_clear(pid_list, task->pid);
645 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
646 * @pid_list: The pid list to show
647 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
648 * @pos: The position of the file
650 * This is used by the seq_file "next" operation to iterate the pids
651 * listed in a trace_pid_list structure.
653 * Returns the pid+1 as we want to display pid of zero, but NULL would
654 * stop the iteration.
656 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
658 long pid = (unsigned long)v;
663 /* pid already is +1 of the actual previous bit */
664 if (trace_pid_list_next(pid_list, pid, &next) < 0)
669 /* Return pid + 1 to allow zero to be represented */
670 return (void *)(pid + 1);
674 * trace_pid_start - Used for seq_file to start reading pid lists
675 * @pid_list: The pid list to show
676 * @pos: The position of the file
678 * This is used by seq_file "start" operation to start the iteration
681 * Returns the pid+1 as we want to display pid of zero, but NULL would
682 * stop the iteration.
684 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
690 if (trace_pid_list_first(pid_list, &first) < 0)
695 /* Return pid + 1 so that zero can be the exit value */
696 for (pid++; pid && l < *pos;
697 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
703 * trace_pid_show - show the current pid in seq_file processing
704 * @m: The seq_file structure to write into
705 * @v: A void pointer of the pid (+1) value to display
707 * Can be directly used by seq_file operations to display the current
710 int trace_pid_show(struct seq_file *m, void *v)
712 unsigned long pid = (unsigned long)v - 1;
714 seq_printf(m, "%lu\n", pid);
718 /* 128 should be much more than enough */
719 #define PID_BUF_SIZE 127
721 int trace_pid_write(struct trace_pid_list *filtered_pids,
722 struct trace_pid_list **new_pid_list,
723 const char __user *ubuf, size_t cnt)
725 struct trace_pid_list *pid_list;
726 struct trace_parser parser;
734 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
738 * Always recreate a new array. The write is an all or nothing
739 * operation. Always create a new array when adding new pids by
740 * the user. If the operation fails, then the current list is
743 pid_list = trace_pid_list_alloc();
745 trace_parser_put(&parser);
750 /* copy the current bits to the new max */
751 ret = trace_pid_list_first(filtered_pids, &pid);
753 trace_pid_list_set(pid_list, pid);
754 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
764 ret = trace_get_user(&parser, ubuf, cnt, &pos);
772 if (!trace_parser_loaded(&parser))
776 if (kstrtoul(parser.buffer, 0, &val))
781 if (trace_pid_list_set(pid_list, pid) < 0) {
787 trace_parser_clear(&parser);
790 trace_parser_put(&parser);
793 trace_pid_list_free(pid_list);
798 /* Cleared the list of pids */
799 trace_pid_list_free(pid_list);
803 *new_pid_list = pid_list;
808 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
812 /* Early boot up does not have a buffer yet */
814 return trace_clock_local();
816 ts = ring_buffer_time_stamp(buf->buffer);
817 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
822 u64 ftrace_now(int cpu)
824 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
828 * tracing_is_enabled - Show if global_trace has been enabled
830 * Shows if the global trace has been enabled or not. It uses the
831 * mirror flag "buffer_disabled" to be used in fast paths such as for
832 * the irqsoff tracer. But it may be inaccurate due to races. If you
833 * need to know the accurate state, use tracing_is_on() which is a little
834 * slower, but accurate.
836 int tracing_is_enabled(void)
839 * For quick access (irqsoff uses this in fast path), just
840 * return the mirror variable of the state of the ring buffer.
841 * It's a little racy, but we don't really care.
844 return !global_trace.buffer_disabled;
848 * trace_buf_size is the size in bytes that is allocated
849 * for a buffer. Note, the number of bytes is always rounded
852 * This number is purposely set to a low number of 16384.
853 * If the dump on oops happens, it will be much appreciated
854 * to not have to wait for all that output. Anyway this can be
855 * boot time and run time configurable.
857 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
859 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
861 /* trace_types holds a link list of available tracers. */
862 static struct tracer *trace_types __read_mostly;
865 * trace_types_lock is used to protect the trace_types list.
867 DEFINE_MUTEX(trace_types_lock);
870 * serialize the access of the ring buffer
872 * ring buffer serializes readers, but it is low level protection.
873 * The validity of the events (which returns by ring_buffer_peek() ..etc)
874 * are not protected by ring buffer.
876 * The content of events may become garbage if we allow other process consumes
877 * these events concurrently:
878 * A) the page of the consumed events may become a normal page
879 * (not reader page) in ring buffer, and this page will be rewritten
880 * by events producer.
881 * B) The page of the consumed events may become a page for splice_read,
882 * and this page will be returned to system.
884 * These primitives allow multi process access to different cpu ring buffer
887 * These primitives don't distinguish read-only and read-consume access.
888 * Multi read-only access are also serialized.
892 static DECLARE_RWSEM(all_cpu_access_lock);
893 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
895 static inline void trace_access_lock(int cpu)
897 if (cpu == RING_BUFFER_ALL_CPUS) {
898 /* gain it for accessing the whole ring buffer. */
899 down_write(&all_cpu_access_lock);
901 /* gain it for accessing a cpu ring buffer. */
903 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
904 down_read(&all_cpu_access_lock);
906 /* Secondly block other access to this @cpu ring buffer. */
907 mutex_lock(&per_cpu(cpu_access_lock, cpu));
911 static inline void trace_access_unlock(int cpu)
913 if (cpu == RING_BUFFER_ALL_CPUS) {
914 up_write(&all_cpu_access_lock);
916 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
917 up_read(&all_cpu_access_lock);
921 static inline void trace_access_lock_init(void)
925 for_each_possible_cpu(cpu)
926 mutex_init(&per_cpu(cpu_access_lock, cpu));
931 static DEFINE_MUTEX(access_lock);
933 static inline void trace_access_lock(int cpu)
936 mutex_lock(&access_lock);
939 static inline void trace_access_unlock(int cpu)
942 mutex_unlock(&access_lock);
945 static inline void trace_access_lock_init(void)
951 #ifdef CONFIG_STACKTRACE
952 static void __ftrace_trace_stack(struct trace_buffer *buffer,
953 unsigned int trace_ctx,
954 int skip, struct pt_regs *regs);
955 static inline void ftrace_trace_stack(struct trace_array *tr,
956 struct trace_buffer *buffer,
957 unsigned int trace_ctx,
958 int skip, struct pt_regs *regs);
961 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
962 unsigned int trace_ctx,
963 int skip, struct pt_regs *regs)
966 static inline void ftrace_trace_stack(struct trace_array *tr,
967 struct trace_buffer *buffer,
968 unsigned long trace_ctx,
969 int skip, struct pt_regs *regs)
975 static __always_inline void
976 trace_event_setup(struct ring_buffer_event *event,
977 int type, unsigned int trace_ctx)
979 struct trace_entry *ent = ring_buffer_event_data(event);
981 tracing_generic_entry_update(ent, type, trace_ctx);
984 static __always_inline struct ring_buffer_event *
985 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
988 unsigned int trace_ctx)
990 struct ring_buffer_event *event;
992 event = ring_buffer_lock_reserve(buffer, len);
994 trace_event_setup(event, type, trace_ctx);
999 void tracer_tracing_on(struct trace_array *tr)
1001 if (tr->array_buffer.buffer)
1002 ring_buffer_record_on(tr->array_buffer.buffer);
1004 * This flag is looked at when buffers haven't been allocated
1005 * yet, or by some tracers (like irqsoff), that just want to
1006 * know if the ring buffer has been disabled, but it can handle
1007 * races of where it gets disabled but we still do a record.
1008 * As the check is in the fast path of the tracers, it is more
1009 * important to be fast than accurate.
1011 tr->buffer_disabled = 0;
1012 /* Make the flag seen by readers */
1017 * tracing_on - enable tracing buffers
1019 * This function enables tracing buffers that may have been
1020 * disabled with tracing_off.
1022 void tracing_on(void)
1024 tracer_tracing_on(&global_trace);
1026 EXPORT_SYMBOL_GPL(tracing_on);
1029 static __always_inline void
1030 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1032 __this_cpu_write(trace_taskinfo_save, true);
1034 /* If this is the temp buffer, we need to commit fully */
1035 if (this_cpu_read(trace_buffered_event) == event) {
1036 /* Length is in event->array[0] */
1037 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1038 /* Release the temp buffer */
1039 this_cpu_dec(trace_buffered_event_cnt);
1040 /* ring_buffer_unlock_commit() enables preemption */
1041 preempt_enable_notrace();
1043 ring_buffer_unlock_commit(buffer);
1046 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1047 const char *str, int size)
1049 struct ring_buffer_event *event;
1050 struct trace_buffer *buffer;
1051 struct print_entry *entry;
1052 unsigned int trace_ctx;
1055 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1058 if (unlikely(tracing_selftest_running && tr == &global_trace))
1061 if (unlikely(tracing_disabled))
1064 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1066 trace_ctx = tracing_gen_ctx();
1067 buffer = tr->array_buffer.buffer;
1068 ring_buffer_nest_start(buffer);
1069 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1076 entry = ring_buffer_event_data(event);
1079 memcpy(&entry->buf, str, size);
1081 /* Add a newline if necessary */
1082 if (entry->buf[size - 1] != '\n') {
1083 entry->buf[size] = '\n';
1084 entry->buf[size + 1] = '\0';
1086 entry->buf[size] = '\0';
1088 __buffer_unlock_commit(buffer, event);
1089 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1091 ring_buffer_nest_end(buffer);
1094 EXPORT_SYMBOL_GPL(__trace_array_puts);
1097 * __trace_puts - write a constant string into the trace buffer.
1098 * @ip: The address of the caller
1099 * @str: The constant string to write
1100 * @size: The size of the string.
1102 int __trace_puts(unsigned long ip, const char *str, int size)
1104 return __trace_array_puts(&global_trace, ip, str, size);
1106 EXPORT_SYMBOL_GPL(__trace_puts);
1109 * __trace_bputs - write the pointer to a constant string into trace buffer
1110 * @ip: The address of the caller
1111 * @str: The constant string to write to the buffer to
1113 int __trace_bputs(unsigned long ip, const char *str)
1115 struct ring_buffer_event *event;
1116 struct trace_buffer *buffer;
1117 struct bputs_entry *entry;
1118 unsigned int trace_ctx;
1119 int size = sizeof(struct bputs_entry);
1122 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1125 if (unlikely(tracing_selftest_running || tracing_disabled))
1128 trace_ctx = tracing_gen_ctx();
1129 buffer = global_trace.array_buffer.buffer;
1131 ring_buffer_nest_start(buffer);
1132 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1137 entry = ring_buffer_event_data(event);
1141 __buffer_unlock_commit(buffer, event);
1142 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1146 ring_buffer_nest_end(buffer);
1149 EXPORT_SYMBOL_GPL(__trace_bputs);
1151 #ifdef CONFIG_TRACER_SNAPSHOT
1152 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1155 struct tracer *tracer = tr->current_trace;
1156 unsigned long flags;
1159 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1160 trace_array_puts(tr, "*** snapshot is being ignored ***\n");
1164 if (!tr->allocated_snapshot) {
1165 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1166 trace_array_puts(tr, "*** stopping trace here! ***\n");
1167 tracer_tracing_off(tr);
1171 /* Note, snapshot can not be used when the tracer uses it */
1172 if (tracer->use_max_tr) {
1173 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1174 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1178 local_irq_save(flags);
1179 update_max_tr(tr, current, smp_processor_id(), cond_data);
1180 local_irq_restore(flags);
1183 void tracing_snapshot_instance(struct trace_array *tr)
1185 tracing_snapshot_instance_cond(tr, NULL);
1189 * tracing_snapshot - take a snapshot of the current buffer.
1191 * This causes a swap between the snapshot buffer and the current live
1192 * tracing buffer. You can use this to take snapshots of the live
1193 * trace when some condition is triggered, but continue to trace.
1195 * Note, make sure to allocate the snapshot with either
1196 * a tracing_snapshot_alloc(), or by doing it manually
1197 * with: echo 1 > /sys/kernel/tracing/snapshot
1199 * If the snapshot buffer is not allocated, it will stop tracing.
1200 * Basically making a permanent snapshot.
1202 void tracing_snapshot(void)
1204 struct trace_array *tr = &global_trace;
1206 tracing_snapshot_instance(tr);
1208 EXPORT_SYMBOL_GPL(tracing_snapshot);
1211 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1212 * @tr: The tracing instance to snapshot
1213 * @cond_data: The data to be tested conditionally, and possibly saved
1215 * This is the same as tracing_snapshot() except that the snapshot is
1216 * conditional - the snapshot will only happen if the
1217 * cond_snapshot.update() implementation receiving the cond_data
1218 * returns true, which means that the trace array's cond_snapshot
1219 * update() operation used the cond_data to determine whether the
1220 * snapshot should be taken, and if it was, presumably saved it along
1221 * with the snapshot.
1223 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1225 tracing_snapshot_instance_cond(tr, cond_data);
1227 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1230 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1231 * @tr: The tracing instance
1233 * When the user enables a conditional snapshot using
1234 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1235 * with the snapshot. This accessor is used to retrieve it.
1237 * Should not be called from cond_snapshot.update(), since it takes
1238 * the tr->max_lock lock, which the code calling
1239 * cond_snapshot.update() has already done.
1241 * Returns the cond_data associated with the trace array's snapshot.
1243 void *tracing_cond_snapshot_data(struct trace_array *tr)
1245 void *cond_data = NULL;
1247 local_irq_disable();
1248 arch_spin_lock(&tr->max_lock);
1250 if (tr->cond_snapshot)
1251 cond_data = tr->cond_snapshot->cond_data;
1253 arch_spin_unlock(&tr->max_lock);
1258 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1260 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1261 struct array_buffer *size_buf, int cpu_id);
1262 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1264 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1269 if (!tr->allocated_snapshot) {
1271 /* Make the snapshot buffer have the same order as main buffer */
1272 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
1273 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
1277 /* allocate spare buffer */
1278 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1279 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1283 tr->allocated_snapshot = true;
1289 static void free_snapshot(struct trace_array *tr)
1292 * We don't free the ring buffer. instead, resize it because
1293 * The max_tr ring buffer has some state (e.g. ring->clock) and
1294 * we want preserve it.
1296 ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
1297 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1298 set_buffer_entries(&tr->max_buffer, 1);
1299 tracing_reset_online_cpus(&tr->max_buffer);
1300 tr->allocated_snapshot = false;
1304 * tracing_alloc_snapshot - allocate snapshot buffer.
1306 * This only allocates the snapshot buffer if it isn't already
1307 * allocated - it doesn't also take a snapshot.
1309 * This is meant to be used in cases where the snapshot buffer needs
1310 * to be set up for events that can't sleep but need to be able to
1311 * trigger a snapshot.
1313 int tracing_alloc_snapshot(void)
1315 struct trace_array *tr = &global_trace;
1318 ret = tracing_alloc_snapshot_instance(tr);
1323 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1326 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1328 * This is similar to tracing_snapshot(), but it will allocate the
1329 * snapshot buffer if it isn't already allocated. Use this only
1330 * where it is safe to sleep, as the allocation may sleep.
1332 * This causes a swap between the snapshot buffer and the current live
1333 * tracing buffer. You can use this to take snapshots of the live
1334 * trace when some condition is triggered, but continue to trace.
1336 void tracing_snapshot_alloc(void)
1340 ret = tracing_alloc_snapshot();
1346 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1349 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1350 * @tr: The tracing instance
1351 * @cond_data: User data to associate with the snapshot
1352 * @update: Implementation of the cond_snapshot update function
1354 * Check whether the conditional snapshot for the given instance has
1355 * already been enabled, or if the current tracer is already using a
1356 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1357 * save the cond_data and update function inside.
1359 * Returns 0 if successful, error otherwise.
1361 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1362 cond_update_fn_t update)
1364 struct cond_snapshot *cond_snapshot;
1367 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1371 cond_snapshot->cond_data = cond_data;
1372 cond_snapshot->update = update;
1374 mutex_lock(&trace_types_lock);
1376 ret = tracing_alloc_snapshot_instance(tr);
1380 if (tr->current_trace->use_max_tr) {
1386 * The cond_snapshot can only change to NULL without the
1387 * trace_types_lock. We don't care if we race with it going
1388 * to NULL, but we want to make sure that it's not set to
1389 * something other than NULL when we get here, which we can
1390 * do safely with only holding the trace_types_lock and not
1391 * having to take the max_lock.
1393 if (tr->cond_snapshot) {
1398 local_irq_disable();
1399 arch_spin_lock(&tr->max_lock);
1400 tr->cond_snapshot = cond_snapshot;
1401 arch_spin_unlock(&tr->max_lock);
1404 mutex_unlock(&trace_types_lock);
1409 mutex_unlock(&trace_types_lock);
1410 kfree(cond_snapshot);
1413 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1416 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1417 * @tr: The tracing instance
1419 * Check whether the conditional snapshot for the given instance is
1420 * enabled; if so, free the cond_snapshot associated with it,
1421 * otherwise return -EINVAL.
1423 * Returns 0 if successful, error otherwise.
1425 int tracing_snapshot_cond_disable(struct trace_array *tr)
1429 local_irq_disable();
1430 arch_spin_lock(&tr->max_lock);
1432 if (!tr->cond_snapshot)
1435 kfree(tr->cond_snapshot);
1436 tr->cond_snapshot = NULL;
1439 arch_spin_unlock(&tr->max_lock);
1444 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1446 void tracing_snapshot(void)
1448 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1450 EXPORT_SYMBOL_GPL(tracing_snapshot);
1451 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1453 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1455 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1456 int tracing_alloc_snapshot(void)
1458 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1461 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1462 void tracing_snapshot_alloc(void)
1467 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1468 void *tracing_cond_snapshot_data(struct trace_array *tr)
1472 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1473 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1477 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1478 int tracing_snapshot_cond_disable(struct trace_array *tr)
1482 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1483 #define free_snapshot(tr) do { } while (0)
1484 #endif /* CONFIG_TRACER_SNAPSHOT */
1486 void tracer_tracing_off(struct trace_array *tr)
1488 if (tr->array_buffer.buffer)
1489 ring_buffer_record_off(tr->array_buffer.buffer);
1491 * This flag is looked at when buffers haven't been allocated
1492 * yet, or by some tracers (like irqsoff), that just want to
1493 * know if the ring buffer has been disabled, but it can handle
1494 * races of where it gets disabled but we still do a record.
1495 * As the check is in the fast path of the tracers, it is more
1496 * important to be fast than accurate.
1498 tr->buffer_disabled = 1;
1499 /* Make the flag seen by readers */
1504 * tracing_off - turn off tracing buffers
1506 * This function stops the tracing buffers from recording data.
1507 * It does not disable any overhead the tracers themselves may
1508 * be causing. This function simply causes all recording to
1509 * the ring buffers to fail.
1511 void tracing_off(void)
1513 tracer_tracing_off(&global_trace);
1515 EXPORT_SYMBOL_GPL(tracing_off);
1517 void disable_trace_on_warning(void)
1519 if (__disable_trace_on_warning) {
1520 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1521 "Disabling tracing due to warning\n");
1527 * tracer_tracing_is_on - show real state of ring buffer enabled
1528 * @tr : the trace array to know if ring buffer is enabled
1530 * Shows real state of the ring buffer if it is enabled or not.
1532 bool tracer_tracing_is_on(struct trace_array *tr)
1534 if (tr->array_buffer.buffer)
1535 return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
1536 return !tr->buffer_disabled;
1540 * tracing_is_on - show state of ring buffers enabled
1542 int tracing_is_on(void)
1544 return tracer_tracing_is_on(&global_trace);
1546 EXPORT_SYMBOL_GPL(tracing_is_on);
1548 static int __init set_buf_size(char *str)
1550 unsigned long buf_size;
1554 buf_size = memparse(str, &str);
1556 * nr_entries can not be zero and the startup
1557 * tests require some buffer space. Therefore
1558 * ensure we have at least 4096 bytes of buffer.
1560 trace_buf_size = max(4096UL, buf_size);
1563 __setup("trace_buf_size=", set_buf_size);
1565 static int __init set_tracing_thresh(char *str)
1567 unsigned long threshold;
1572 ret = kstrtoul(str, 0, &threshold);
1575 tracing_thresh = threshold * 1000;
1578 __setup("tracing_thresh=", set_tracing_thresh);
1580 unsigned long nsecs_to_usecs(unsigned long nsecs)
1582 return nsecs / 1000;
1586 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1587 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1588 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1589 * of strings in the order that the evals (enum) were defined.
1594 /* These must match the bit positions in trace_iterator_flags */
1595 static const char *trace_options[] = {
1603 int in_ns; /* is this clock in nanoseconds? */
1604 } trace_clocks[] = {
1605 { trace_clock_local, "local", 1 },
1606 { trace_clock_global, "global", 1 },
1607 { trace_clock_counter, "counter", 0 },
1608 { trace_clock_jiffies, "uptime", 0 },
1609 { trace_clock, "perf", 1 },
1610 { ktime_get_mono_fast_ns, "mono", 1 },
1611 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1612 { ktime_get_boot_fast_ns, "boot", 1 },
1613 { ktime_get_tai_fast_ns, "tai", 1 },
1617 bool trace_clock_in_ns(struct trace_array *tr)
1619 if (trace_clocks[tr->clock_id].in_ns)
1626 * trace_parser_get_init - gets the buffer for trace parser
1628 int trace_parser_get_init(struct trace_parser *parser, int size)
1630 memset(parser, 0, sizeof(*parser));
1632 parser->buffer = kmalloc(size, GFP_KERNEL);
1633 if (!parser->buffer)
1636 parser->size = size;
1641 * trace_parser_put - frees the buffer for trace parser
1643 void trace_parser_put(struct trace_parser *parser)
1645 kfree(parser->buffer);
1646 parser->buffer = NULL;
1650 * trace_get_user - reads the user input string separated by space
1651 * (matched by isspace(ch))
1653 * For each string found the 'struct trace_parser' is updated,
1654 * and the function returns.
1656 * Returns number of bytes read.
1658 * See kernel/trace/trace.h for 'struct trace_parser' details.
1660 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1661 size_t cnt, loff_t *ppos)
1668 trace_parser_clear(parser);
1670 ret = get_user(ch, ubuf++);
1678 * The parser is not finished with the last write,
1679 * continue reading the user input without skipping spaces.
1681 if (!parser->cont) {
1682 /* skip white space */
1683 while (cnt && isspace(ch)) {
1684 ret = get_user(ch, ubuf++);
1693 /* only spaces were written */
1694 if (isspace(ch) || !ch) {
1701 /* read the non-space input */
1702 while (cnt && !isspace(ch) && ch) {
1703 if (parser->idx < parser->size - 1)
1704 parser->buffer[parser->idx++] = ch;
1709 ret = get_user(ch, ubuf++);
1716 /* We either got finished input or we have to wait for another call. */
1717 if (isspace(ch) || !ch) {
1718 parser->buffer[parser->idx] = 0;
1719 parser->cont = false;
1720 } else if (parser->idx < parser->size - 1) {
1721 parser->cont = true;
1722 parser->buffer[parser->idx++] = ch;
1723 /* Make sure the parsed string always terminates with '\0'. */
1724 parser->buffer[parser->idx] = 0;
1737 /* TODO add a seq_buf_to_buffer() */
1738 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1742 if (trace_seq_used(s) <= s->readpos)
1745 len = trace_seq_used(s) - s->readpos;
1748 memcpy(buf, s->buffer + s->readpos, cnt);
1754 unsigned long __read_mostly tracing_thresh;
1756 #ifdef CONFIG_TRACER_MAX_TRACE
1757 static const struct file_operations tracing_max_lat_fops;
1759 #ifdef LATENCY_FS_NOTIFY
1761 static struct workqueue_struct *fsnotify_wq;
1763 static void latency_fsnotify_workfn(struct work_struct *work)
1765 struct trace_array *tr = container_of(work, struct trace_array,
1767 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1770 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1772 struct trace_array *tr = container_of(iwork, struct trace_array,
1774 queue_work(fsnotify_wq, &tr->fsnotify_work);
1777 static void trace_create_maxlat_file(struct trace_array *tr,
1778 struct dentry *d_tracer)
1780 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1781 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1782 tr->d_max_latency = trace_create_file("tracing_max_latency",
1785 &tracing_max_lat_fops);
1788 __init static int latency_fsnotify_init(void)
1790 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1791 WQ_UNBOUND | WQ_HIGHPRI, 0);
1793 pr_err("Unable to allocate tr_max_lat_wq\n");
1799 late_initcall_sync(latency_fsnotify_init);
1801 void latency_fsnotify(struct trace_array *tr)
1806 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1807 * possible that we are called from __schedule() or do_idle(), which
1808 * could cause a deadlock.
1810 irq_work_queue(&tr->fsnotify_irqwork);
1813 #else /* !LATENCY_FS_NOTIFY */
1815 #define trace_create_maxlat_file(tr, d_tracer) \
1816 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1817 d_tracer, tr, &tracing_max_lat_fops)
1822 * Copy the new maximum trace into the separate maximum-trace
1823 * structure. (this way the maximum trace is permanently saved,
1824 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1827 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1829 struct array_buffer *trace_buf = &tr->array_buffer;
1830 struct array_buffer *max_buf = &tr->max_buffer;
1831 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1832 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1835 max_buf->time_start = data->preempt_timestamp;
1837 max_data->saved_latency = tr->max_latency;
1838 max_data->critical_start = data->critical_start;
1839 max_data->critical_end = data->critical_end;
1841 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1842 max_data->pid = tsk->pid;
1844 * If tsk == current, then use current_uid(), as that does not use
1845 * RCU. The irq tracer can be called out of RCU scope.
1848 max_data->uid = current_uid();
1850 max_data->uid = task_uid(tsk);
1852 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1853 max_data->policy = tsk->policy;
1854 max_data->rt_priority = tsk->rt_priority;
1856 /* record this tasks comm */
1857 tracing_record_cmdline(tsk);
1858 latency_fsnotify(tr);
1862 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1864 * @tsk: the task with the latency
1865 * @cpu: The cpu that initiated the trace.
1866 * @cond_data: User data associated with a conditional snapshot
1868 * Flip the buffers between the @tr and the max_tr and record information
1869 * about which task was the cause of this latency.
1872 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1878 WARN_ON_ONCE(!irqs_disabled());
1880 if (!tr->allocated_snapshot) {
1881 /* Only the nop tracer should hit this when disabling */
1882 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1886 arch_spin_lock(&tr->max_lock);
1888 /* Inherit the recordable setting from array_buffer */
1889 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1890 ring_buffer_record_on(tr->max_buffer.buffer);
1892 ring_buffer_record_off(tr->max_buffer.buffer);
1894 #ifdef CONFIG_TRACER_SNAPSHOT
1895 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1896 arch_spin_unlock(&tr->max_lock);
1900 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1902 __update_max_tr(tr, tsk, cpu);
1904 arch_spin_unlock(&tr->max_lock);
1906 /* Any waiters on the old snapshot buffer need to wake up */
1907 ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
1911 * update_max_tr_single - only copy one trace over, and reset the rest
1913 * @tsk: task with the latency
1914 * @cpu: the cpu of the buffer to copy.
1916 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1919 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1926 WARN_ON_ONCE(!irqs_disabled());
1927 if (!tr->allocated_snapshot) {
1928 /* Only the nop tracer should hit this when disabling */
1929 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1933 arch_spin_lock(&tr->max_lock);
1935 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1937 if (ret == -EBUSY) {
1939 * We failed to swap the buffer due to a commit taking
1940 * place on this CPU. We fail to record, but we reset
1941 * the max trace buffer (no one writes directly to it)
1942 * and flag that it failed.
1943 * Another reason is resize is in progress.
1945 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1946 "Failed to swap buffers due to commit or resize in progress\n");
1949 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1951 __update_max_tr(tr, tsk, cpu);
1952 arch_spin_unlock(&tr->max_lock);
1955 #endif /* CONFIG_TRACER_MAX_TRACE */
1958 struct trace_iterator *iter;
1962 static bool wait_pipe_cond(void *data)
1964 struct pipe_wait *pwait = data;
1965 struct trace_iterator *iter = pwait->iter;
1967 if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index)
1970 return iter->closed;
1973 static int wait_on_pipe(struct trace_iterator *iter, int full)
1975 struct pipe_wait pwait;
1978 /* Iterators are static, they should be filled or empty */
1979 if (trace_buffer_iter(iter, iter->cpu_file))
1982 pwait.wait_index = atomic_read_acquire(&iter->wait_index);
1985 ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full,
1986 wait_pipe_cond, &pwait);
1988 #ifdef CONFIG_TRACER_MAX_TRACE
1990 * Make sure this is still the snapshot buffer, as if a snapshot were
1991 * to happen, this would now be the main buffer.
1994 iter->array_buffer = &iter->tr->max_buffer;
1999 #ifdef CONFIG_FTRACE_STARTUP_TEST
2000 static bool selftests_can_run;
2002 struct trace_selftests {
2003 struct list_head list;
2004 struct tracer *type;
2007 static LIST_HEAD(postponed_selftests);
2009 static int save_selftest(struct tracer *type)
2011 struct trace_selftests *selftest;
2013 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
2017 selftest->type = type;
2018 list_add(&selftest->list, &postponed_selftests);
2022 static int run_tracer_selftest(struct tracer *type)
2024 struct trace_array *tr = &global_trace;
2025 struct tracer *saved_tracer = tr->current_trace;
2028 if (!type->selftest || tracing_selftest_disabled)
2032 * If a tracer registers early in boot up (before scheduling is
2033 * initialized and such), then do not run its selftests yet.
2034 * Instead, run it a little later in the boot process.
2036 if (!selftests_can_run)
2037 return save_selftest(type);
2039 if (!tracing_is_on()) {
2040 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
2046 * Run a selftest on this tracer.
2047 * Here we reset the trace buffer, and set the current
2048 * tracer to be this tracer. The tracer can then run some
2049 * internal tracing to verify that everything is in order.
2050 * If we fail, we do not register this tracer.
2052 tracing_reset_online_cpus(&tr->array_buffer);
2054 tr->current_trace = type;
2056 #ifdef CONFIG_TRACER_MAX_TRACE
2057 if (type->use_max_tr) {
2058 /* If we expanded the buffers, make sure the max is expanded too */
2059 if (tr->ring_buffer_expanded)
2060 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2061 RING_BUFFER_ALL_CPUS);
2062 tr->allocated_snapshot = true;
2066 /* the test is responsible for initializing and enabling */
2067 pr_info("Testing tracer %s: ", type->name);
2068 ret = type->selftest(type, tr);
2069 /* the test is responsible for resetting too */
2070 tr->current_trace = saved_tracer;
2072 printk(KERN_CONT "FAILED!\n");
2073 /* Add the warning after printing 'FAILED' */
2077 /* Only reset on passing, to avoid touching corrupted buffers */
2078 tracing_reset_online_cpus(&tr->array_buffer);
2080 #ifdef CONFIG_TRACER_MAX_TRACE
2081 if (type->use_max_tr) {
2082 tr->allocated_snapshot = false;
2084 /* Shrink the max buffer again */
2085 if (tr->ring_buffer_expanded)
2086 ring_buffer_resize(tr->max_buffer.buffer, 1,
2087 RING_BUFFER_ALL_CPUS);
2091 printk(KERN_CONT "PASSED\n");
2095 static int do_run_tracer_selftest(struct tracer *type)
2100 * Tests can take a long time, especially if they are run one after the
2101 * other, as does happen during bootup when all the tracers are
2102 * registered. This could cause the soft lockup watchdog to trigger.
2106 tracing_selftest_running = true;
2107 ret = run_tracer_selftest(type);
2108 tracing_selftest_running = false;
2113 static __init int init_trace_selftests(void)
2115 struct trace_selftests *p, *n;
2116 struct tracer *t, **last;
2119 selftests_can_run = true;
2121 mutex_lock(&trace_types_lock);
2123 if (list_empty(&postponed_selftests))
2126 pr_info("Running postponed tracer tests:\n");
2128 tracing_selftest_running = true;
2129 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2130 /* This loop can take minutes when sanitizers are enabled, so
2131 * lets make sure we allow RCU processing.
2134 ret = run_tracer_selftest(p->type);
2135 /* If the test fails, then warn and remove from available_tracers */
2137 WARN(1, "tracer: %s failed selftest, disabling\n",
2139 last = &trace_types;
2140 for (t = trace_types; t; t = t->next) {
2151 tracing_selftest_running = false;
2154 mutex_unlock(&trace_types_lock);
2158 core_initcall(init_trace_selftests);
2160 static inline int run_tracer_selftest(struct tracer *type)
2164 static inline int do_run_tracer_selftest(struct tracer *type)
2168 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2170 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2172 static void __init apply_trace_boot_options(void);
2175 * register_tracer - register a tracer with the ftrace system.
2176 * @type: the plugin for the tracer
2178 * Register a new plugin tracer.
2180 int __init register_tracer(struct tracer *type)
2186 pr_info("Tracer must have a name\n");
2190 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2191 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2195 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2196 pr_warn("Can not register tracer %s due to lockdown\n",
2201 mutex_lock(&trace_types_lock);
2203 for (t = trace_types; t; t = t->next) {
2204 if (strcmp(type->name, t->name) == 0) {
2206 pr_info("Tracer %s already registered\n",
2213 if (!type->set_flag)
2214 type->set_flag = &dummy_set_flag;
2216 /*allocate a dummy tracer_flags*/
2217 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2222 type->flags->val = 0;
2223 type->flags->opts = dummy_tracer_opt;
2225 if (!type->flags->opts)
2226 type->flags->opts = dummy_tracer_opt;
2228 /* store the tracer for __set_tracer_option */
2229 type->flags->trace = type;
2231 ret = do_run_tracer_selftest(type);
2235 type->next = trace_types;
2237 add_tracer_options(&global_trace, type);
2240 mutex_unlock(&trace_types_lock);
2242 if (ret || !default_bootup_tracer)
2245 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2248 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2249 /* Do we want this tracer to start on bootup? */
2250 tracing_set_tracer(&global_trace, type->name);
2251 default_bootup_tracer = NULL;
2253 apply_trace_boot_options();
2255 /* disable other selftests, since this will break it. */
2256 disable_tracing_selftest("running a tracer");
2262 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2264 struct trace_buffer *buffer = buf->buffer;
2269 ring_buffer_record_disable(buffer);
2271 /* Make sure all commits have finished */
2273 ring_buffer_reset_cpu(buffer, cpu);
2275 ring_buffer_record_enable(buffer);
2278 void tracing_reset_online_cpus(struct array_buffer *buf)
2280 struct trace_buffer *buffer = buf->buffer;
2285 ring_buffer_record_disable(buffer);
2287 /* Make sure all commits have finished */
2290 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2292 ring_buffer_reset_online_cpus(buffer);
2294 ring_buffer_record_enable(buffer);
2297 /* Must have trace_types_lock held */
2298 void tracing_reset_all_online_cpus_unlocked(void)
2300 struct trace_array *tr;
2302 lockdep_assert_held(&trace_types_lock);
2304 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2305 if (!tr->clear_trace)
2307 tr->clear_trace = false;
2308 tracing_reset_online_cpus(&tr->array_buffer);
2309 #ifdef CONFIG_TRACER_MAX_TRACE
2310 tracing_reset_online_cpus(&tr->max_buffer);
2315 void tracing_reset_all_online_cpus(void)
2317 mutex_lock(&trace_types_lock);
2318 tracing_reset_all_online_cpus_unlocked();
2319 mutex_unlock(&trace_types_lock);
2322 int is_tracing_stopped(void)
2324 return global_trace.stop_count;
2327 static void tracing_start_tr(struct trace_array *tr)
2329 struct trace_buffer *buffer;
2330 unsigned long flags;
2332 if (tracing_disabled)
2335 raw_spin_lock_irqsave(&tr->start_lock, flags);
2336 if (--tr->stop_count) {
2337 if (WARN_ON_ONCE(tr->stop_count < 0)) {
2338 /* Someone screwed up their debugging */
2344 /* Prevent the buffers from switching */
2345 arch_spin_lock(&tr->max_lock);
2347 buffer = tr->array_buffer.buffer;
2349 ring_buffer_record_enable(buffer);
2351 #ifdef CONFIG_TRACER_MAX_TRACE
2352 buffer = tr->max_buffer.buffer;
2354 ring_buffer_record_enable(buffer);
2357 arch_spin_unlock(&tr->max_lock);
2360 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2364 * tracing_start - quick start of the tracer
2366 * If tracing is enabled but was stopped by tracing_stop,
2367 * this will start the tracer back up.
2369 void tracing_start(void)
2372 return tracing_start_tr(&global_trace);
2375 static void tracing_stop_tr(struct trace_array *tr)
2377 struct trace_buffer *buffer;
2378 unsigned long flags;
2380 raw_spin_lock_irqsave(&tr->start_lock, flags);
2381 if (tr->stop_count++)
2384 /* Prevent the buffers from switching */
2385 arch_spin_lock(&tr->max_lock);
2387 buffer = tr->array_buffer.buffer;
2389 ring_buffer_record_disable(buffer);
2391 #ifdef CONFIG_TRACER_MAX_TRACE
2392 buffer = tr->max_buffer.buffer;
2394 ring_buffer_record_disable(buffer);
2397 arch_spin_unlock(&tr->max_lock);
2400 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2404 * tracing_stop - quick stop of the tracer
2406 * Light weight way to stop tracing. Use in conjunction with
2409 void tracing_stop(void)
2411 return tracing_stop_tr(&global_trace);
2415 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2416 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2417 * simplifies those functions and keeps them in sync.
2419 enum print_line_t trace_handle_return(struct trace_seq *s)
2421 return trace_seq_has_overflowed(s) ?
2422 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2424 EXPORT_SYMBOL_GPL(trace_handle_return);
2426 static unsigned short migration_disable_value(void)
2428 #if defined(CONFIG_SMP)
2429 return current->migration_disabled;
2435 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2437 unsigned int trace_flags = irqs_status;
2440 pc = preempt_count();
2443 trace_flags |= TRACE_FLAG_NMI;
2444 if (pc & HARDIRQ_MASK)
2445 trace_flags |= TRACE_FLAG_HARDIRQ;
2446 if (in_serving_softirq())
2447 trace_flags |= TRACE_FLAG_SOFTIRQ;
2448 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2449 trace_flags |= TRACE_FLAG_BH_OFF;
2451 if (tif_need_resched())
2452 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2453 if (test_preempt_need_resched())
2454 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2455 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2456 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2459 struct ring_buffer_event *
2460 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2463 unsigned int trace_ctx)
2465 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2468 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2469 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2470 static int trace_buffered_event_ref;
2473 * trace_buffered_event_enable - enable buffering events
2475 * When events are being filtered, it is quicker to use a temporary
2476 * buffer to write the event data into if there's a likely chance
2477 * that it will not be committed. The discard of the ring buffer
2478 * is not as fast as committing, and is much slower than copying
2481 * When an event is to be filtered, allocate per cpu buffers to
2482 * write the event data into, and if the event is filtered and discarded
2483 * it is simply dropped, otherwise, the entire data is to be committed
2486 void trace_buffered_event_enable(void)
2488 struct ring_buffer_event *event;
2492 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2494 if (trace_buffered_event_ref++)
2497 for_each_tracing_cpu(cpu) {
2498 page = alloc_pages_node(cpu_to_node(cpu),
2499 GFP_KERNEL | __GFP_NORETRY, 0);
2500 /* This is just an optimization and can handle failures */
2502 pr_err("Failed to allocate event buffer\n");
2506 event = page_address(page);
2507 memset(event, 0, sizeof(*event));
2509 per_cpu(trace_buffered_event, cpu) = event;
2512 if (cpu == smp_processor_id() &&
2513 __this_cpu_read(trace_buffered_event) !=
2514 per_cpu(trace_buffered_event, cpu))
2520 static void enable_trace_buffered_event(void *data)
2522 /* Probably not needed, but do it anyway */
2524 this_cpu_dec(trace_buffered_event_cnt);
2527 static void disable_trace_buffered_event(void *data)
2529 this_cpu_inc(trace_buffered_event_cnt);
2533 * trace_buffered_event_disable - disable buffering events
2535 * When a filter is removed, it is faster to not use the buffered
2536 * events, and to commit directly into the ring buffer. Free up
2537 * the temp buffers when there are no more users. This requires
2538 * special synchronization with current events.
2540 void trace_buffered_event_disable(void)
2544 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2546 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2549 if (--trace_buffered_event_ref)
2552 /* For each CPU, set the buffer as used. */
2553 on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
2556 /* Wait for all current users to finish */
2559 for_each_tracing_cpu(cpu) {
2560 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2561 per_cpu(trace_buffered_event, cpu) = NULL;
2565 * Wait for all CPUs that potentially started checking if they can use
2566 * their event buffer only after the previous synchronize_rcu() call and
2567 * they still read a valid pointer from trace_buffered_event. It must be
2568 * ensured they don't see cleared trace_buffered_event_cnt else they
2569 * could wrongly decide to use the pointed-to buffer which is now freed.
2573 /* For each CPU, relinquish the buffer */
2574 on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
2578 static struct trace_buffer *temp_buffer;
2580 struct ring_buffer_event *
2581 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2582 struct trace_event_file *trace_file,
2583 int type, unsigned long len,
2584 unsigned int trace_ctx)
2586 struct ring_buffer_event *entry;
2587 struct trace_array *tr = trace_file->tr;
2590 *current_rb = tr->array_buffer.buffer;
2592 if (!tr->no_filter_buffering_ref &&
2593 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2594 preempt_disable_notrace();
2596 * Filtering is on, so try to use the per cpu buffer first.
2597 * This buffer will simulate a ring_buffer_event,
2598 * where the type_len is zero and the array[0] will
2599 * hold the full length.
2600 * (see include/linux/ring-buffer.h for details on
2601 * how the ring_buffer_event is structured).
2603 * Using a temp buffer during filtering and copying it
2604 * on a matched filter is quicker than writing directly
2605 * into the ring buffer and then discarding it when
2606 * it doesn't match. That is because the discard
2607 * requires several atomic operations to get right.
2608 * Copying on match and doing nothing on a failed match
2609 * is still quicker than no copy on match, but having
2610 * to discard out of the ring buffer on a failed match.
2612 if ((entry = __this_cpu_read(trace_buffered_event))) {
2613 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2615 val = this_cpu_inc_return(trace_buffered_event_cnt);
2618 * Preemption is disabled, but interrupts and NMIs
2619 * can still come in now. If that happens after
2620 * the above increment, then it will have to go
2621 * back to the old method of allocating the event
2622 * on the ring buffer, and if the filter fails, it
2623 * will have to call ring_buffer_discard_commit()
2626 * Need to also check the unlikely case that the
2627 * length is bigger than the temp buffer size.
2628 * If that happens, then the reserve is pretty much
2629 * guaranteed to fail, as the ring buffer currently
2630 * only allows events less than a page. But that may
2631 * change in the future, so let the ring buffer reserve
2632 * handle the failure in that case.
2634 if (val == 1 && likely(len <= max_len)) {
2635 trace_event_setup(entry, type, trace_ctx);
2636 entry->array[0] = len;
2637 /* Return with preemption disabled */
2640 this_cpu_dec(trace_buffered_event_cnt);
2642 /* __trace_buffer_lock_reserve() disables preemption */
2643 preempt_enable_notrace();
2646 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2649 * If tracing is off, but we have triggers enabled
2650 * we still need to look at the event data. Use the temp_buffer
2651 * to store the trace event for the trigger to use. It's recursive
2652 * safe and will not be recorded anywhere.
2654 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2655 *current_rb = temp_buffer;
2656 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2661 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2663 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2664 static DEFINE_MUTEX(tracepoint_printk_mutex);
2666 static void output_printk(struct trace_event_buffer *fbuffer)
2668 struct trace_event_call *event_call;
2669 struct trace_event_file *file;
2670 struct trace_event *event;
2671 unsigned long flags;
2672 struct trace_iterator *iter = tracepoint_print_iter;
2674 /* We should never get here if iter is NULL */
2675 if (WARN_ON_ONCE(!iter))
2678 event_call = fbuffer->trace_file->event_call;
2679 if (!event_call || !event_call->event.funcs ||
2680 !event_call->event.funcs->trace)
2683 file = fbuffer->trace_file;
2684 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2685 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2686 !filter_match_preds(file->filter, fbuffer->entry)))
2689 event = &fbuffer->trace_file->event_call->event;
2691 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2692 trace_seq_init(&iter->seq);
2693 iter->ent = fbuffer->entry;
2694 event_call->event.funcs->trace(iter, 0, event);
2695 trace_seq_putc(&iter->seq, 0);
2696 printk("%s", iter->seq.buffer);
2698 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2701 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2702 void *buffer, size_t *lenp,
2705 int save_tracepoint_printk;
2708 mutex_lock(&tracepoint_printk_mutex);
2709 save_tracepoint_printk = tracepoint_printk;
2711 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2714 * This will force exiting early, as tracepoint_printk
2715 * is always zero when tracepoint_printk_iter is not allocated
2717 if (!tracepoint_print_iter)
2718 tracepoint_printk = 0;
2720 if (save_tracepoint_printk == tracepoint_printk)
2723 if (tracepoint_printk)
2724 static_key_enable(&tracepoint_printk_key.key);
2726 static_key_disable(&tracepoint_printk_key.key);
2729 mutex_unlock(&tracepoint_printk_mutex);
2734 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2736 enum event_trigger_type tt = ETT_NONE;
2737 struct trace_event_file *file = fbuffer->trace_file;
2739 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2740 fbuffer->entry, &tt))
2743 if (static_key_false(&tracepoint_printk_key.key))
2744 output_printk(fbuffer);
2746 if (static_branch_unlikely(&trace_event_exports_enabled))
2747 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2749 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2750 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2754 event_triggers_post_call(file, tt);
2757 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2762 * trace_buffer_unlock_commit_regs()
2763 * trace_event_buffer_commit()
2764 * trace_event_raw_event_xxx()
2766 # define STACK_SKIP 3
2768 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2769 struct trace_buffer *buffer,
2770 struct ring_buffer_event *event,
2771 unsigned int trace_ctx,
2772 struct pt_regs *regs)
2774 __buffer_unlock_commit(buffer, event);
2777 * If regs is not set, then skip the necessary functions.
2778 * Note, we can still get here via blktrace, wakeup tracer
2779 * and mmiotrace, but that's ok if they lose a function or
2780 * two. They are not that meaningful.
2782 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2783 ftrace_trace_userstack(tr, buffer, trace_ctx);
2787 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2790 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2791 struct ring_buffer_event *event)
2793 __buffer_unlock_commit(buffer, event);
2797 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2798 parent_ip, unsigned int trace_ctx)
2800 struct trace_event_call *call = &event_function;
2801 struct trace_buffer *buffer = tr->array_buffer.buffer;
2802 struct ring_buffer_event *event;
2803 struct ftrace_entry *entry;
2805 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2809 entry = ring_buffer_event_data(event);
2811 entry->parent_ip = parent_ip;
2813 if (!call_filter_check_discard(call, entry, buffer, event)) {
2814 if (static_branch_unlikely(&trace_function_exports_enabled))
2815 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2816 __buffer_unlock_commit(buffer, event);
2820 #ifdef CONFIG_STACKTRACE
2822 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2823 #define FTRACE_KSTACK_NESTING 4
2825 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2827 struct ftrace_stack {
2828 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2832 struct ftrace_stacks {
2833 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2836 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2837 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2839 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2840 unsigned int trace_ctx,
2841 int skip, struct pt_regs *regs)
2843 struct trace_event_call *call = &event_kernel_stack;
2844 struct ring_buffer_event *event;
2845 unsigned int size, nr_entries;
2846 struct ftrace_stack *fstack;
2847 struct stack_entry *entry;
2851 * Add one, for this function and the call to save_stack_trace()
2852 * If regs is set, then these functions will not be in the way.
2854 #ifndef CONFIG_UNWINDER_ORC
2859 preempt_disable_notrace();
2861 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2863 /* This should never happen. If it does, yell once and skip */
2864 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
2868 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2869 * interrupt will either see the value pre increment or post
2870 * increment. If the interrupt happens pre increment it will have
2871 * restored the counter when it returns. We just need a barrier to
2872 * keep gcc from moving things around.
2876 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2877 size = ARRAY_SIZE(fstack->calls);
2880 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2883 nr_entries = stack_trace_save(fstack->calls, size, skip);
2886 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2887 struct_size(entry, caller, nr_entries),
2891 entry = ring_buffer_event_data(event);
2893 entry->size = nr_entries;
2894 memcpy(&entry->caller, fstack->calls,
2895 flex_array_size(entry, caller, nr_entries));
2897 if (!call_filter_check_discard(call, entry, buffer, event))
2898 __buffer_unlock_commit(buffer, event);
2901 /* Again, don't let gcc optimize things here */
2903 __this_cpu_dec(ftrace_stack_reserve);
2904 preempt_enable_notrace();
2908 static inline void ftrace_trace_stack(struct trace_array *tr,
2909 struct trace_buffer *buffer,
2910 unsigned int trace_ctx,
2911 int skip, struct pt_regs *regs)
2913 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2916 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
2919 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
2922 struct trace_buffer *buffer = tr->array_buffer.buffer;
2924 if (rcu_is_watching()) {
2925 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
2929 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
2933 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
2934 * but if the above rcu_is_watching() failed, then the NMI
2935 * triggered someplace critical, and ct_irq_enter() should
2936 * not be called from NMI.
2938 if (unlikely(in_nmi()))
2941 ct_irq_enter_irqson();
2942 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
2943 ct_irq_exit_irqson();
2947 * trace_dump_stack - record a stack back trace in the trace buffer
2948 * @skip: Number of functions to skip (helper handlers)
2950 void trace_dump_stack(int skip)
2952 if (tracing_disabled || tracing_selftest_running)
2955 #ifndef CONFIG_UNWINDER_ORC
2956 /* Skip 1 to skip this function. */
2959 __ftrace_trace_stack(global_trace.array_buffer.buffer,
2960 tracing_gen_ctx(), skip, NULL);
2962 EXPORT_SYMBOL_GPL(trace_dump_stack);
2964 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
2965 static DEFINE_PER_CPU(int, user_stack_count);
2968 ftrace_trace_userstack(struct trace_array *tr,
2969 struct trace_buffer *buffer, unsigned int trace_ctx)
2971 struct trace_event_call *call = &event_user_stack;
2972 struct ring_buffer_event *event;
2973 struct userstack_entry *entry;
2975 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
2979 * NMIs can not handle page faults, even with fix ups.
2980 * The save user stack can (and often does) fault.
2982 if (unlikely(in_nmi()))
2986 * prevent recursion, since the user stack tracing may
2987 * trigger other kernel events.
2990 if (__this_cpu_read(user_stack_count))
2993 __this_cpu_inc(user_stack_count);
2995 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2996 sizeof(*entry), trace_ctx);
2998 goto out_drop_count;
2999 entry = ring_buffer_event_data(event);
3001 entry->tgid = current->tgid;
3002 memset(&entry->caller, 0, sizeof(entry->caller));
3004 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3005 if (!call_filter_check_discard(call, entry, buffer, event))
3006 __buffer_unlock_commit(buffer, event);
3009 __this_cpu_dec(user_stack_count);
3013 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3014 static void ftrace_trace_userstack(struct trace_array *tr,
3015 struct trace_buffer *buffer,
3016 unsigned int trace_ctx)
3019 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3021 #endif /* CONFIG_STACKTRACE */
3024 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3025 unsigned long long delta)
3027 entry->bottom_delta_ts = delta & U32_MAX;
3028 entry->top_delta_ts = (delta >> 32);
3031 void trace_last_func_repeats(struct trace_array *tr,
3032 struct trace_func_repeats *last_info,
3033 unsigned int trace_ctx)
3035 struct trace_buffer *buffer = tr->array_buffer.buffer;
3036 struct func_repeats_entry *entry;
3037 struct ring_buffer_event *event;
3040 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3041 sizeof(*entry), trace_ctx);
3045 delta = ring_buffer_event_time_stamp(buffer, event) -
3046 last_info->ts_last_call;
3048 entry = ring_buffer_event_data(event);
3049 entry->ip = last_info->ip;
3050 entry->parent_ip = last_info->parent_ip;
3051 entry->count = last_info->count;
3052 func_repeats_set_delta_ts(entry, delta);
3054 __buffer_unlock_commit(buffer, event);
3057 /* created for use with alloc_percpu */
3058 struct trace_buffer_struct {
3060 char buffer[4][TRACE_BUF_SIZE];
3063 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3066 * This allows for lockless recording. If we're nested too deeply, then
3067 * this returns NULL.
3069 static char *get_trace_buf(void)
3071 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3073 if (!trace_percpu_buffer || buffer->nesting >= 4)
3078 /* Interrupts must see nesting incremented before we use the buffer */
3080 return &buffer->buffer[buffer->nesting - 1][0];
3083 static void put_trace_buf(void)
3085 /* Don't let the decrement of nesting leak before this */
3087 this_cpu_dec(trace_percpu_buffer->nesting);
3090 static int alloc_percpu_trace_buffer(void)
3092 struct trace_buffer_struct __percpu *buffers;
3094 if (trace_percpu_buffer)
3097 buffers = alloc_percpu(struct trace_buffer_struct);
3098 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3101 trace_percpu_buffer = buffers;
3105 static int buffers_allocated;
3107 void trace_printk_init_buffers(void)
3109 if (buffers_allocated)
3112 if (alloc_percpu_trace_buffer())
3115 /* trace_printk() is for debug use only. Don't use it in production. */
3118 pr_warn("**********************************************************\n");
3119 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3121 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3123 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3124 pr_warn("** unsafe for production use. **\n");
3126 pr_warn("** If you see this message and you are not debugging **\n");
3127 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3129 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3130 pr_warn("**********************************************************\n");
3132 /* Expand the buffers to set size */
3133 tracing_update_buffers(&global_trace);
3135 buffers_allocated = 1;
3138 * trace_printk_init_buffers() can be called by modules.
3139 * If that happens, then we need to start cmdline recording
3140 * directly here. If the global_trace.buffer is already
3141 * allocated here, then this was called by module code.
3143 if (global_trace.array_buffer.buffer)
3144 tracing_start_cmdline_record();
3146 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3148 void trace_printk_start_comm(void)
3150 /* Start tracing comms if trace printk is set */
3151 if (!buffers_allocated)
3153 tracing_start_cmdline_record();
3156 static void trace_printk_start_stop_comm(int enabled)
3158 if (!buffers_allocated)
3162 tracing_start_cmdline_record();
3164 tracing_stop_cmdline_record();
3168 * trace_vbprintk - write binary msg to tracing buffer
3169 * @ip: The address of the caller
3170 * @fmt: The string format to write to the buffer
3171 * @args: Arguments for @fmt
3173 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3175 struct trace_event_call *call = &event_bprint;
3176 struct ring_buffer_event *event;
3177 struct trace_buffer *buffer;
3178 struct trace_array *tr = &global_trace;
3179 struct bprint_entry *entry;
3180 unsigned int trace_ctx;
3184 if (unlikely(tracing_selftest_running || tracing_disabled))
3187 /* Don't pollute graph traces with trace_vprintk internals */
3188 pause_graph_tracing();
3190 trace_ctx = tracing_gen_ctx();
3191 preempt_disable_notrace();
3193 tbuffer = get_trace_buf();
3199 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3201 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3204 size = sizeof(*entry) + sizeof(u32) * len;
3205 buffer = tr->array_buffer.buffer;
3206 ring_buffer_nest_start(buffer);
3207 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3211 entry = ring_buffer_event_data(event);
3215 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3216 if (!call_filter_check_discard(call, entry, buffer, event)) {
3217 __buffer_unlock_commit(buffer, event);
3218 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3222 ring_buffer_nest_end(buffer);
3227 preempt_enable_notrace();
3228 unpause_graph_tracing();
3232 EXPORT_SYMBOL_GPL(trace_vbprintk);
3236 __trace_array_vprintk(struct trace_buffer *buffer,
3237 unsigned long ip, const char *fmt, va_list args)
3239 struct trace_event_call *call = &event_print;
3240 struct ring_buffer_event *event;
3242 struct print_entry *entry;
3243 unsigned int trace_ctx;
3246 if (tracing_disabled)
3249 /* Don't pollute graph traces with trace_vprintk internals */
3250 pause_graph_tracing();
3252 trace_ctx = tracing_gen_ctx();
3253 preempt_disable_notrace();
3256 tbuffer = get_trace_buf();
3262 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3264 size = sizeof(*entry) + len + 1;
3265 ring_buffer_nest_start(buffer);
3266 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3270 entry = ring_buffer_event_data(event);
3273 memcpy(&entry->buf, tbuffer, len + 1);
3274 if (!call_filter_check_discard(call, entry, buffer, event)) {
3275 __buffer_unlock_commit(buffer, event);
3276 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3280 ring_buffer_nest_end(buffer);
3284 preempt_enable_notrace();
3285 unpause_graph_tracing();
3291 int trace_array_vprintk(struct trace_array *tr,
3292 unsigned long ip, const char *fmt, va_list args)
3294 if (tracing_selftest_running && tr == &global_trace)
3297 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3301 * trace_array_printk - Print a message to a specific instance
3302 * @tr: The instance trace_array descriptor
3303 * @ip: The instruction pointer that this is called from.
3304 * @fmt: The format to print (printf format)
3306 * If a subsystem sets up its own instance, they have the right to
3307 * printk strings into their tracing instance buffer using this
3308 * function. Note, this function will not write into the top level
3309 * buffer (use trace_printk() for that), as writing into the top level
3310 * buffer should only have events that can be individually disabled.
3311 * trace_printk() is only used for debugging a kernel, and should not
3312 * be ever incorporated in normal use.
3314 * trace_array_printk() can be used, as it will not add noise to the
3315 * top level tracing buffer.
3317 * Note, trace_array_init_printk() must be called on @tr before this
3321 int trace_array_printk(struct trace_array *tr,
3322 unsigned long ip, const char *fmt, ...)
3330 /* This is only allowed for created instances */
3331 if (tr == &global_trace)
3334 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3338 ret = trace_array_vprintk(tr, ip, fmt, ap);
3342 EXPORT_SYMBOL_GPL(trace_array_printk);
3345 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3346 * @tr: The trace array to initialize the buffers for
3348 * As trace_array_printk() only writes into instances, they are OK to
3349 * have in the kernel (unlike trace_printk()). This needs to be called
3350 * before trace_array_printk() can be used on a trace_array.
3352 int trace_array_init_printk(struct trace_array *tr)
3357 /* This is only allowed for created instances */
3358 if (tr == &global_trace)
3361 return alloc_percpu_trace_buffer();
3363 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3366 int trace_array_printk_buf(struct trace_buffer *buffer,
3367 unsigned long ip, const char *fmt, ...)
3372 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3376 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3382 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3384 return trace_array_vprintk(&global_trace, ip, fmt, args);
3386 EXPORT_SYMBOL_GPL(trace_vprintk);
3388 static void trace_iterator_increment(struct trace_iterator *iter)
3390 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3394 ring_buffer_iter_advance(buf_iter);
3397 static struct trace_entry *
3398 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3399 unsigned long *lost_events)
3401 struct ring_buffer_event *event;
3402 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3405 event = ring_buffer_iter_peek(buf_iter, ts);
3407 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3408 (unsigned long)-1 : 0;
3410 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3415 iter->ent_size = ring_buffer_event_length(event);
3416 return ring_buffer_event_data(event);
3422 static struct trace_entry *
3423 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3424 unsigned long *missing_events, u64 *ent_ts)
3426 struct trace_buffer *buffer = iter->array_buffer->buffer;
3427 struct trace_entry *ent, *next = NULL;
3428 unsigned long lost_events = 0, next_lost = 0;
3429 int cpu_file = iter->cpu_file;
3430 u64 next_ts = 0, ts;
3436 * If we are in a per_cpu trace file, don't bother by iterating over
3437 * all cpu and peek directly.
3439 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3440 if (ring_buffer_empty_cpu(buffer, cpu_file))
3442 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3444 *ent_cpu = cpu_file;
3449 for_each_tracing_cpu(cpu) {
3451 if (ring_buffer_empty_cpu(buffer, cpu))
3454 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3457 * Pick the entry with the smallest timestamp:
3459 if (ent && (!next || ts < next_ts)) {
3463 next_lost = lost_events;
3464 next_size = iter->ent_size;
3468 iter->ent_size = next_size;
3471 *ent_cpu = next_cpu;
3477 *missing_events = next_lost;
3482 #define STATIC_FMT_BUF_SIZE 128
3483 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3485 char *trace_iter_expand_format(struct trace_iterator *iter)
3490 * iter->tr is NULL when used with tp_printk, which makes
3491 * this get called where it is not safe to call krealloc().
3493 if (!iter->tr || iter->fmt == static_fmt_buf)
3496 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3499 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3506 /* Returns true if the string is safe to dereference from an event */
3507 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3510 unsigned long addr = (unsigned long)str;
3511 struct trace_event *trace_event;
3512 struct trace_event_call *event;
3514 /* Ignore strings with no length */
3518 /* OK if part of the event data */
3519 if ((addr >= (unsigned long)iter->ent) &&
3520 (addr < (unsigned long)iter->ent + iter->ent_size))
3523 /* OK if part of the temp seq buffer */
3524 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3525 (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
3528 /* Core rodata can not be freed */
3529 if (is_kernel_rodata(addr))
3532 if (trace_is_tracepoint_string(str))
3536 * Now this could be a module event, referencing core module
3537 * data, which is OK.
3542 trace_event = ftrace_find_event(iter->ent->type);
3546 event = container_of(trace_event, struct trace_event_call, event);
3547 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3550 /* Would rather have rodata, but this will suffice */
3551 if (within_module_core(addr, event->module))
3557 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3559 static int test_can_verify_check(const char *fmt, ...)
3566 * The verifier is dependent on vsnprintf() modifies the va_list
3567 * passed to it, where it is sent as a reference. Some architectures
3568 * (like x86_32) passes it by value, which means that vsnprintf()
3569 * does not modify the va_list passed to it, and the verifier
3570 * would then need to be able to understand all the values that
3571 * vsnprintf can use. If it is passed by value, then the verifier
3575 vsnprintf(buf, 16, "%d", ap);
3576 ret = va_arg(ap, int);
3582 static void test_can_verify(void)
3584 if (!test_can_verify_check("%d %d", 0, 1)) {
3585 pr_info("trace event string verifier disabled\n");
3586 static_branch_inc(&trace_no_verify);
3591 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3592 * @iter: The iterator that holds the seq buffer and the event being printed
3593 * @fmt: The format used to print the event
3594 * @ap: The va_list holding the data to print from @fmt.
3596 * This writes the data into the @iter->seq buffer using the data from
3597 * @fmt and @ap. If the format has a %s, then the source of the string
3598 * is examined to make sure it is safe to print, otherwise it will
3599 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3602 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3605 const char *p = fmt;
3609 if (WARN_ON_ONCE(!fmt))
3612 if (static_branch_unlikely(&trace_no_verify))
3615 /* Don't bother checking when doing a ftrace_dump() */
3616 if (iter->fmt == static_fmt_buf)
3625 /* We only care about %s and variants */
3626 for (i = 0; p[i]; i++) {
3627 if (i + 1 >= iter->fmt_size) {
3629 * If we can't expand the copy buffer,
3632 if (!trace_iter_expand_format(iter))
3636 if (p[i] == '\\' && p[i+1]) {
3641 /* Need to test cases like %08.*s */
3642 for (j = 1; p[i+j]; j++) {
3643 if (isdigit(p[i+j]) ||
3646 if (p[i+j] == '*') {
3658 /* If no %s found then just print normally */
3662 /* Copy up to the %s, and print that */
3663 strncpy(iter->fmt, p, i);
3664 iter->fmt[i] = '\0';
3665 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3668 * If iter->seq is full, the above call no longer guarantees
3669 * that ap is in sync with fmt processing, and further calls
3670 * to va_arg() can return wrong positional arguments.
3672 * Ensure that ap is no longer used in this case.
3674 if (iter->seq.full) {
3680 len = va_arg(ap, int);
3682 /* The ap now points to the string data of the %s */
3683 str = va_arg(ap, const char *);
3686 * If you hit this warning, it is likely that the
3687 * trace event in question used %s on a string that
3688 * was saved at the time of the event, but may not be
3689 * around when the trace is read. Use __string(),
3690 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3691 * instead. See samples/trace_events/trace-events-sample.h
3694 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3695 "fmt: '%s' current_buffer: '%s'",
3696 fmt, seq_buf_str(&iter->seq.seq))) {
3699 /* Try to safely read the string */
3701 if (len + 1 > iter->fmt_size)
3702 len = iter->fmt_size - 1;
3705 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3709 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3713 trace_seq_printf(&iter->seq, "(0x%px)", str);
3715 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3717 str = "[UNSAFE-MEMORY]";
3718 strcpy(iter->fmt, "%s");
3720 strncpy(iter->fmt, p + i, j + 1);
3721 iter->fmt[j+1] = '\0';
3724 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3726 trace_seq_printf(&iter->seq, iter->fmt, str);
3732 trace_seq_vprintf(&iter->seq, p, ap);
3735 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3737 const char *p, *new_fmt;
3740 if (WARN_ON_ONCE(!fmt))
3743 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3747 new_fmt = q = iter->fmt;
3749 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3750 if (!trace_iter_expand_format(iter))
3753 q += iter->fmt - new_fmt;
3754 new_fmt = iter->fmt;
3759 /* Replace %p with %px */
3763 } else if (p[0] == 'p' && !isalnum(p[1])) {
3774 #define STATIC_TEMP_BUF_SIZE 128
3775 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3777 /* Find the next real entry, without updating the iterator itself */
3778 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3779 int *ent_cpu, u64 *ent_ts)
3781 /* __find_next_entry will reset ent_size */
3782 int ent_size = iter->ent_size;
3783 struct trace_entry *entry;
3786 * If called from ftrace_dump(), then the iter->temp buffer
3787 * will be the static_temp_buf and not created from kmalloc.
3788 * If the entry size is greater than the buffer, we can
3789 * not save it. Just return NULL in that case. This is only
3790 * used to add markers when two consecutive events' time
3791 * stamps have a large delta. See trace_print_lat_context()
3793 if (iter->temp == static_temp_buf &&
3794 STATIC_TEMP_BUF_SIZE < ent_size)
3798 * The __find_next_entry() may call peek_next_entry(), which may
3799 * call ring_buffer_peek() that may make the contents of iter->ent
3800 * undefined. Need to copy iter->ent now.
3802 if (iter->ent && iter->ent != iter->temp) {
3803 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3804 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3806 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3811 iter->temp_size = iter->ent_size;
3813 memcpy(iter->temp, iter->ent, iter->ent_size);
3814 iter->ent = iter->temp;
3816 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3817 /* Put back the original ent_size */
3818 iter->ent_size = ent_size;
3823 /* Find the next real entry, and increment the iterator to the next entry */
3824 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3826 iter->ent = __find_next_entry(iter, &iter->cpu,
3827 &iter->lost_events, &iter->ts);
3830 trace_iterator_increment(iter);
3832 return iter->ent ? iter : NULL;
3835 static void trace_consume(struct trace_iterator *iter)
3837 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3838 &iter->lost_events);
3841 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3843 struct trace_iterator *iter = m->private;
3847 WARN_ON_ONCE(iter->leftover);
3851 /* can't go backwards */
3856 ent = trace_find_next_entry_inc(iter);
3860 while (ent && iter->idx < i)
3861 ent = trace_find_next_entry_inc(iter);
3868 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3870 struct ring_buffer_iter *buf_iter;
3871 unsigned long entries = 0;
3874 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3876 buf_iter = trace_buffer_iter(iter, cpu);
3880 ring_buffer_iter_reset(buf_iter);
3883 * We could have the case with the max latency tracers
3884 * that a reset never took place on a cpu. This is evident
3885 * by the timestamp being before the start of the buffer.
3887 while (ring_buffer_iter_peek(buf_iter, &ts)) {
3888 if (ts >= iter->array_buffer->time_start)
3891 ring_buffer_iter_advance(buf_iter);
3894 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3898 * The current tracer is copied to avoid a global locking
3901 static void *s_start(struct seq_file *m, loff_t *pos)
3903 struct trace_iterator *iter = m->private;
3904 struct trace_array *tr = iter->tr;
3905 int cpu_file = iter->cpu_file;
3910 mutex_lock(&trace_types_lock);
3911 if (unlikely(tr->current_trace != iter->trace)) {
3912 /* Close iter->trace before switching to the new current tracer */
3913 if (iter->trace->close)
3914 iter->trace->close(iter);
3915 iter->trace = tr->current_trace;
3916 /* Reopen the new current tracer */
3917 if (iter->trace->open)
3918 iter->trace->open(iter);
3920 mutex_unlock(&trace_types_lock);
3922 #ifdef CONFIG_TRACER_MAX_TRACE
3923 if (iter->snapshot && iter->trace->use_max_tr)
3924 return ERR_PTR(-EBUSY);
3927 if (*pos != iter->pos) {
3932 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3933 for_each_tracing_cpu(cpu)
3934 tracing_iter_reset(iter, cpu);
3936 tracing_iter_reset(iter, cpu_file);
3939 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3944 * If we overflowed the seq_file before, then we want
3945 * to just reuse the trace_seq buffer again.
3951 p = s_next(m, p, &l);
3955 trace_event_read_lock();
3956 trace_access_lock(cpu_file);
3960 static void s_stop(struct seq_file *m, void *p)
3962 struct trace_iterator *iter = m->private;
3964 #ifdef CONFIG_TRACER_MAX_TRACE
3965 if (iter->snapshot && iter->trace->use_max_tr)
3969 trace_access_unlock(iter->cpu_file);
3970 trace_event_read_unlock();
3974 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3975 unsigned long *entries, int cpu)
3977 unsigned long count;
3979 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3981 * If this buffer has skipped entries, then we hold all
3982 * entries for the trace and we need to ignore the
3983 * ones before the time stamp.
3985 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3986 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3987 /* total is the same as the entries */
3991 ring_buffer_overrun_cpu(buf->buffer, cpu);
3996 get_total_entries(struct array_buffer *buf,
3997 unsigned long *total, unsigned long *entries)
4005 for_each_tracing_cpu(cpu) {
4006 get_total_entries_cpu(buf, &t, &e, cpu);
4012 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4014 unsigned long total, entries;
4019 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4024 unsigned long trace_total_entries(struct trace_array *tr)
4026 unsigned long total, entries;
4031 get_total_entries(&tr->array_buffer, &total, &entries);
4036 static void print_lat_help_header(struct seq_file *m)
4038 seq_puts(m, "# _------=> CPU# \n"
4039 "# / _-----=> irqs-off/BH-disabled\n"
4040 "# | / _----=> need-resched \n"
4041 "# || / _---=> hardirq/softirq \n"
4042 "# ||| / _--=> preempt-depth \n"
4043 "# |||| / _-=> migrate-disable \n"
4044 "# ||||| / delay \n"
4045 "# cmd pid |||||| time | caller \n"
4046 "# \\ / |||||| \\ | / \n");
4049 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4051 unsigned long total;
4052 unsigned long entries;
4054 get_total_entries(buf, &total, &entries);
4055 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4056 entries, total, num_online_cpus());
4060 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4063 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4065 print_event_info(buf, m);
4067 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4068 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4071 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4074 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4075 static const char space[] = " ";
4076 int prec = tgid ? 12 : 2;
4078 print_event_info(buf, m);
4080 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4081 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4082 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4083 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4084 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4085 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4086 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4087 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4091 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4093 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4094 struct array_buffer *buf = iter->array_buffer;
4095 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4096 struct tracer *type = iter->trace;
4097 unsigned long entries;
4098 unsigned long total;
4099 const char *name = type->name;
4101 get_total_entries(buf, &total, &entries);
4103 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4105 seq_puts(m, "# -----------------------------------"
4106 "---------------------------------\n");
4107 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4108 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4109 nsecs_to_usecs(data->saved_latency),
4113 preempt_model_none() ? "server" :
4114 preempt_model_voluntary() ? "desktop" :
4115 preempt_model_full() ? "preempt" :
4116 preempt_model_rt() ? "preempt_rt" :
4118 /* These are reserved for later use */
4121 seq_printf(m, " #P:%d)\n", num_online_cpus());
4125 seq_puts(m, "# -----------------\n");
4126 seq_printf(m, "# | task: %.16s-%d "
4127 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4128 data->comm, data->pid,
4129 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4130 data->policy, data->rt_priority);
4131 seq_puts(m, "# -----------------\n");
4133 if (data->critical_start) {
4134 seq_puts(m, "# => started at: ");
4135 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4136 trace_print_seq(m, &iter->seq);
4137 seq_puts(m, "\n# => ended at: ");
4138 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4139 trace_print_seq(m, &iter->seq);
4140 seq_puts(m, "\n#\n");
4146 static void test_cpu_buff_start(struct trace_iterator *iter)
4148 struct trace_seq *s = &iter->seq;
4149 struct trace_array *tr = iter->tr;
4151 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4154 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4157 if (cpumask_available(iter->started) &&
4158 cpumask_test_cpu(iter->cpu, iter->started))
4161 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4164 if (cpumask_available(iter->started))
4165 cpumask_set_cpu(iter->cpu, iter->started);
4167 /* Don't print started cpu buffer for the first entry of the trace */
4169 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4173 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4175 struct trace_array *tr = iter->tr;
4176 struct trace_seq *s = &iter->seq;
4177 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4178 struct trace_entry *entry;
4179 struct trace_event *event;
4183 test_cpu_buff_start(iter);
4185 event = ftrace_find_event(entry->type);
4187 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4188 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4189 trace_print_lat_context(iter);
4191 trace_print_context(iter);
4194 if (trace_seq_has_overflowed(s))
4195 return TRACE_TYPE_PARTIAL_LINE;
4198 if (tr->trace_flags & TRACE_ITER_FIELDS)
4199 return print_event_fields(iter, event);
4200 return event->funcs->trace(iter, sym_flags, event);
4203 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4205 return trace_handle_return(s);
4208 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4210 struct trace_array *tr = iter->tr;
4211 struct trace_seq *s = &iter->seq;
4212 struct trace_entry *entry;
4213 struct trace_event *event;
4217 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4218 trace_seq_printf(s, "%d %d %llu ",
4219 entry->pid, iter->cpu, iter->ts);
4221 if (trace_seq_has_overflowed(s))
4222 return TRACE_TYPE_PARTIAL_LINE;
4224 event = ftrace_find_event(entry->type);
4226 return event->funcs->raw(iter, 0, event);
4228 trace_seq_printf(s, "%d ?\n", entry->type);
4230 return trace_handle_return(s);
4233 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4235 struct trace_array *tr = iter->tr;
4236 struct trace_seq *s = &iter->seq;
4237 unsigned char newline = '\n';
4238 struct trace_entry *entry;
4239 struct trace_event *event;
4243 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4244 SEQ_PUT_HEX_FIELD(s, entry->pid);
4245 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4246 SEQ_PUT_HEX_FIELD(s, iter->ts);
4247 if (trace_seq_has_overflowed(s))
4248 return TRACE_TYPE_PARTIAL_LINE;
4251 event = ftrace_find_event(entry->type);
4253 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4254 if (ret != TRACE_TYPE_HANDLED)
4258 SEQ_PUT_FIELD(s, newline);
4260 return trace_handle_return(s);
4263 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4265 struct trace_array *tr = iter->tr;
4266 struct trace_seq *s = &iter->seq;
4267 struct trace_entry *entry;
4268 struct trace_event *event;
4272 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4273 SEQ_PUT_FIELD(s, entry->pid);
4274 SEQ_PUT_FIELD(s, iter->cpu);
4275 SEQ_PUT_FIELD(s, iter->ts);
4276 if (trace_seq_has_overflowed(s))
4277 return TRACE_TYPE_PARTIAL_LINE;
4280 event = ftrace_find_event(entry->type);
4281 return event ? event->funcs->binary(iter, 0, event) :
4285 int trace_empty(struct trace_iterator *iter)
4287 struct ring_buffer_iter *buf_iter;
4290 /* If we are looking at one CPU buffer, only check that one */
4291 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4292 cpu = iter->cpu_file;
4293 buf_iter = trace_buffer_iter(iter, cpu);
4295 if (!ring_buffer_iter_empty(buf_iter))
4298 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4304 for_each_tracing_cpu(cpu) {
4305 buf_iter = trace_buffer_iter(iter, cpu);
4307 if (!ring_buffer_iter_empty(buf_iter))
4310 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4318 /* Called with trace_event_read_lock() held. */
4319 enum print_line_t print_trace_line(struct trace_iterator *iter)
4321 struct trace_array *tr = iter->tr;
4322 unsigned long trace_flags = tr->trace_flags;
4323 enum print_line_t ret;
4325 if (iter->lost_events) {
4326 if (iter->lost_events == (unsigned long)-1)
4327 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4330 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4331 iter->cpu, iter->lost_events);
4332 if (trace_seq_has_overflowed(&iter->seq))
4333 return TRACE_TYPE_PARTIAL_LINE;
4336 if (iter->trace && iter->trace->print_line) {
4337 ret = iter->trace->print_line(iter);
4338 if (ret != TRACE_TYPE_UNHANDLED)
4342 if (iter->ent->type == TRACE_BPUTS &&
4343 trace_flags & TRACE_ITER_PRINTK &&
4344 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4345 return trace_print_bputs_msg_only(iter);
4347 if (iter->ent->type == TRACE_BPRINT &&
4348 trace_flags & TRACE_ITER_PRINTK &&
4349 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4350 return trace_print_bprintk_msg_only(iter);
4352 if (iter->ent->type == TRACE_PRINT &&
4353 trace_flags & TRACE_ITER_PRINTK &&
4354 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4355 return trace_print_printk_msg_only(iter);
4357 if (trace_flags & TRACE_ITER_BIN)
4358 return print_bin_fmt(iter);
4360 if (trace_flags & TRACE_ITER_HEX)
4361 return print_hex_fmt(iter);
4363 if (trace_flags & TRACE_ITER_RAW)
4364 return print_raw_fmt(iter);
4366 return print_trace_fmt(iter);
4369 void trace_latency_header(struct seq_file *m)
4371 struct trace_iterator *iter = m->private;
4372 struct trace_array *tr = iter->tr;
4374 /* print nothing if the buffers are empty */
4375 if (trace_empty(iter))
4378 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4379 print_trace_header(m, iter);
4381 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4382 print_lat_help_header(m);
4385 void trace_default_header(struct seq_file *m)
4387 struct trace_iterator *iter = m->private;
4388 struct trace_array *tr = iter->tr;
4389 unsigned long trace_flags = tr->trace_flags;
4391 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4394 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4395 /* print nothing if the buffers are empty */
4396 if (trace_empty(iter))
4398 print_trace_header(m, iter);
4399 if (!(trace_flags & TRACE_ITER_VERBOSE))
4400 print_lat_help_header(m);
4402 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4403 if (trace_flags & TRACE_ITER_IRQ_INFO)
4404 print_func_help_header_irq(iter->array_buffer,
4407 print_func_help_header(iter->array_buffer, m,
4413 static void test_ftrace_alive(struct seq_file *m)
4415 if (!ftrace_is_dead())
4417 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4418 "# MAY BE MISSING FUNCTION EVENTS\n");
4421 #ifdef CONFIG_TRACER_MAX_TRACE
4422 static void show_snapshot_main_help(struct seq_file *m)
4424 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4425 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4426 "# Takes a snapshot of the main buffer.\n"
4427 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4428 "# (Doesn't have to be '2' works with any number that\n"
4429 "# is not a '0' or '1')\n");
4432 static void show_snapshot_percpu_help(struct seq_file *m)
4434 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4435 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4436 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4437 "# Takes a snapshot of the main buffer for this cpu.\n");
4439 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4440 "# Must use main snapshot file to allocate.\n");
4442 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4443 "# (Doesn't have to be '2' works with any number that\n"
4444 "# is not a '0' or '1')\n");
4447 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4449 if (iter->tr->allocated_snapshot)
4450 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4452 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4454 seq_puts(m, "# Snapshot commands:\n");
4455 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4456 show_snapshot_main_help(m);
4458 show_snapshot_percpu_help(m);
4461 /* Should never be called */
4462 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4465 static int s_show(struct seq_file *m, void *v)
4467 struct trace_iterator *iter = v;
4470 if (iter->ent == NULL) {
4472 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4474 test_ftrace_alive(m);
4476 if (iter->snapshot && trace_empty(iter))
4477 print_snapshot_help(m, iter);
4478 else if (iter->trace && iter->trace->print_header)
4479 iter->trace->print_header(m);
4481 trace_default_header(m);
4483 } else if (iter->leftover) {
4485 * If we filled the seq_file buffer earlier, we
4486 * want to just show it now.
4488 ret = trace_print_seq(m, &iter->seq);
4490 /* ret should this time be zero, but you never know */
4491 iter->leftover = ret;
4494 ret = print_trace_line(iter);
4495 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4497 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
4499 ret = trace_print_seq(m, &iter->seq);
4501 * If we overflow the seq_file buffer, then it will
4502 * ask us for this data again at start up.
4504 * ret is 0 if seq_file write succeeded.
4507 iter->leftover = ret;
4514 * Should be used after trace_array_get(), trace_types_lock
4515 * ensures that i_cdev was already initialized.
4517 static inline int tracing_get_cpu(struct inode *inode)
4519 if (inode->i_cdev) /* See trace_create_cpu_file() */
4520 return (long)inode->i_cdev - 1;
4521 return RING_BUFFER_ALL_CPUS;
4524 static const struct seq_operations tracer_seq_ops = {
4532 * Note, as iter itself can be allocated and freed in different
4533 * ways, this function is only used to free its content, and not
4534 * the iterator itself. The only requirement to all the allocations
4535 * is that it must zero all fields (kzalloc), as freeing works with
4536 * ethier allocated content or NULL.
4538 static void free_trace_iter_content(struct trace_iterator *iter)
4540 /* The fmt is either NULL, allocated or points to static_fmt_buf */
4541 if (iter->fmt != static_fmt_buf)
4545 kfree(iter->buffer_iter);
4546 mutex_destroy(&iter->mutex);
4547 free_cpumask_var(iter->started);
4550 static struct trace_iterator *
4551 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4553 struct trace_array *tr = inode->i_private;
4554 struct trace_iterator *iter;
4557 if (tracing_disabled)
4558 return ERR_PTR(-ENODEV);
4560 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4562 return ERR_PTR(-ENOMEM);
4564 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4566 if (!iter->buffer_iter)
4570 * trace_find_next_entry() may need to save off iter->ent.
4571 * It will place it into the iter->temp buffer. As most
4572 * events are less than 128, allocate a buffer of that size.
4573 * If one is greater, then trace_find_next_entry() will
4574 * allocate a new buffer to adjust for the bigger iter->ent.
4575 * It's not critical if it fails to get allocated here.
4577 iter->temp = kmalloc(128, GFP_KERNEL);
4579 iter->temp_size = 128;
4582 * trace_event_printf() may need to modify given format
4583 * string to replace %p with %px so that it shows real address
4584 * instead of hash value. However, that is only for the event
4585 * tracing, other tracer may not need. Defer the allocation
4586 * until it is needed.
4591 mutex_lock(&trace_types_lock);
4592 iter->trace = tr->current_trace;
4594 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4599 #ifdef CONFIG_TRACER_MAX_TRACE
4600 /* Currently only the top directory has a snapshot */
4601 if (tr->current_trace->print_max || snapshot)
4602 iter->array_buffer = &tr->max_buffer;
4605 iter->array_buffer = &tr->array_buffer;
4606 iter->snapshot = snapshot;
4608 iter->cpu_file = tracing_get_cpu(inode);
4609 mutex_init(&iter->mutex);
4611 /* Notify the tracer early; before we stop tracing. */
4612 if (iter->trace->open)
4613 iter->trace->open(iter);
4615 /* Annotate start of buffers if we had overruns */
4616 if (ring_buffer_overruns(iter->array_buffer->buffer))
4617 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4619 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4620 if (trace_clocks[tr->clock_id].in_ns)
4621 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4624 * If pause-on-trace is enabled, then stop the trace while
4625 * dumping, unless this is the "snapshot" file
4627 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4628 tracing_stop_tr(tr);
4630 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4631 for_each_tracing_cpu(cpu) {
4632 iter->buffer_iter[cpu] =
4633 ring_buffer_read_prepare(iter->array_buffer->buffer,
4636 ring_buffer_read_prepare_sync();
4637 for_each_tracing_cpu(cpu) {
4638 ring_buffer_read_start(iter->buffer_iter[cpu]);
4639 tracing_iter_reset(iter, cpu);
4642 cpu = iter->cpu_file;
4643 iter->buffer_iter[cpu] =
4644 ring_buffer_read_prepare(iter->array_buffer->buffer,
4646 ring_buffer_read_prepare_sync();
4647 ring_buffer_read_start(iter->buffer_iter[cpu]);
4648 tracing_iter_reset(iter, cpu);
4651 mutex_unlock(&trace_types_lock);
4656 mutex_unlock(&trace_types_lock);
4657 free_trace_iter_content(iter);
4659 seq_release_private(inode, file);
4660 return ERR_PTR(-ENOMEM);
4663 int tracing_open_generic(struct inode *inode, struct file *filp)
4667 ret = tracing_check_open_get_tr(NULL);
4671 filp->private_data = inode->i_private;
4675 bool tracing_is_disabled(void)
4677 return (tracing_disabled) ? true: false;
4681 * Open and update trace_array ref count.
4682 * Must have the current trace_array passed to it.
4684 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4686 struct trace_array *tr = inode->i_private;
4689 ret = tracing_check_open_get_tr(tr);
4693 filp->private_data = inode->i_private;
4699 * The private pointer of the inode is the trace_event_file.
4700 * Update the tr ref count associated to it.
4702 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4704 struct trace_event_file *file = inode->i_private;
4707 ret = tracing_check_open_get_tr(file->tr);
4711 mutex_lock(&event_mutex);
4713 /* Fail if the file is marked for removal */
4714 if (file->flags & EVENT_FILE_FL_FREED) {
4715 trace_array_put(file->tr);
4718 event_file_get(file);
4721 mutex_unlock(&event_mutex);
4725 filp->private_data = inode->i_private;
4730 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4732 struct trace_event_file *file = inode->i_private;
4734 trace_array_put(file->tr);
4735 event_file_put(file);
4740 int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
4742 tracing_release_file_tr(inode, filp);
4743 return single_release(inode, filp);
4746 static int tracing_mark_open(struct inode *inode, struct file *filp)
4748 stream_open(inode, filp);
4749 return tracing_open_generic_tr(inode, filp);
4752 static int tracing_release(struct inode *inode, struct file *file)
4754 struct trace_array *tr = inode->i_private;
4755 struct seq_file *m = file->private_data;
4756 struct trace_iterator *iter;
4759 if (!(file->f_mode & FMODE_READ)) {
4760 trace_array_put(tr);
4764 /* Writes do not use seq_file */
4766 mutex_lock(&trace_types_lock);
4768 for_each_tracing_cpu(cpu) {
4769 if (iter->buffer_iter[cpu])
4770 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4773 if (iter->trace && iter->trace->close)
4774 iter->trace->close(iter);
4776 if (!iter->snapshot && tr->stop_count)
4777 /* reenable tracing if it was previously enabled */
4778 tracing_start_tr(tr);
4780 __trace_array_put(tr);
4782 mutex_unlock(&trace_types_lock);
4784 free_trace_iter_content(iter);
4785 seq_release_private(inode, file);
4790 int tracing_release_generic_tr(struct inode *inode, struct file *file)
4792 struct trace_array *tr = inode->i_private;
4794 trace_array_put(tr);
4798 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4800 struct trace_array *tr = inode->i_private;
4802 trace_array_put(tr);
4804 return single_release(inode, file);
4807 static int tracing_open(struct inode *inode, struct file *file)
4809 struct trace_array *tr = inode->i_private;
4810 struct trace_iterator *iter;
4813 ret = tracing_check_open_get_tr(tr);
4817 /* If this file was open for write, then erase contents */
4818 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4819 int cpu = tracing_get_cpu(inode);
4820 struct array_buffer *trace_buf = &tr->array_buffer;
4822 #ifdef CONFIG_TRACER_MAX_TRACE
4823 if (tr->current_trace->print_max)
4824 trace_buf = &tr->max_buffer;
4827 if (cpu == RING_BUFFER_ALL_CPUS)
4828 tracing_reset_online_cpus(trace_buf);
4830 tracing_reset_cpu(trace_buf, cpu);
4833 if (file->f_mode & FMODE_READ) {
4834 iter = __tracing_open(inode, file, false);
4836 ret = PTR_ERR(iter);
4837 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4838 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4842 trace_array_put(tr);
4848 * Some tracers are not suitable for instance buffers.
4849 * A tracer is always available for the global array (toplevel)
4850 * or if it explicitly states that it is.
4853 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4855 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4858 /* Find the next tracer that this trace array may use */
4859 static struct tracer *
4860 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4862 while (t && !trace_ok_for_array(t, tr))
4869 t_next(struct seq_file *m, void *v, loff_t *pos)
4871 struct trace_array *tr = m->private;
4872 struct tracer *t = v;
4877 t = get_tracer_for_array(tr, t->next);
4882 static void *t_start(struct seq_file *m, loff_t *pos)
4884 struct trace_array *tr = m->private;
4888 mutex_lock(&trace_types_lock);
4890 t = get_tracer_for_array(tr, trace_types);
4891 for (; t && l < *pos; t = t_next(m, t, &l))
4897 static void t_stop(struct seq_file *m, void *p)
4899 mutex_unlock(&trace_types_lock);
4902 static int t_show(struct seq_file *m, void *v)
4904 struct tracer *t = v;
4909 seq_puts(m, t->name);
4918 static const struct seq_operations show_traces_seq_ops = {
4925 static int show_traces_open(struct inode *inode, struct file *file)
4927 struct trace_array *tr = inode->i_private;
4931 ret = tracing_check_open_get_tr(tr);
4935 ret = seq_open(file, &show_traces_seq_ops);
4937 trace_array_put(tr);
4941 m = file->private_data;
4947 static int show_traces_release(struct inode *inode, struct file *file)
4949 struct trace_array *tr = inode->i_private;
4951 trace_array_put(tr);
4952 return seq_release(inode, file);
4956 tracing_write_stub(struct file *filp, const char __user *ubuf,
4957 size_t count, loff_t *ppos)
4962 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4966 if (file->f_mode & FMODE_READ)
4967 ret = seq_lseek(file, offset, whence);
4969 file->f_pos = ret = 0;
4974 static const struct file_operations tracing_fops = {
4975 .open = tracing_open,
4977 .read_iter = seq_read_iter,
4978 .splice_read = copy_splice_read,
4979 .write = tracing_write_stub,
4980 .llseek = tracing_lseek,
4981 .release = tracing_release,
4984 static const struct file_operations show_traces_fops = {
4985 .open = show_traces_open,
4987 .llseek = seq_lseek,
4988 .release = show_traces_release,
4992 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4993 size_t count, loff_t *ppos)
4995 struct trace_array *tr = file_inode(filp)->i_private;
4999 len = snprintf(NULL, 0, "%*pb\n",
5000 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5001 mask_str = kmalloc(len, GFP_KERNEL);
5005 len = snprintf(mask_str, len, "%*pb\n",
5006 cpumask_pr_args(tr->tracing_cpumask));
5011 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5019 int tracing_set_cpumask(struct trace_array *tr,
5020 cpumask_var_t tracing_cpumask_new)
5027 local_irq_disable();
5028 arch_spin_lock(&tr->max_lock);
5029 for_each_tracing_cpu(cpu) {
5031 * Increase/decrease the disabled counter if we are
5032 * about to flip a bit in the cpumask:
5034 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5035 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5036 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5037 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5038 #ifdef CONFIG_TRACER_MAX_TRACE
5039 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5042 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5043 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5044 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5045 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5046 #ifdef CONFIG_TRACER_MAX_TRACE
5047 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5051 arch_spin_unlock(&tr->max_lock);
5054 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5060 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5061 size_t count, loff_t *ppos)
5063 struct trace_array *tr = file_inode(filp)->i_private;
5064 cpumask_var_t tracing_cpumask_new;
5067 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5070 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5074 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5078 free_cpumask_var(tracing_cpumask_new);
5083 free_cpumask_var(tracing_cpumask_new);
5088 static const struct file_operations tracing_cpumask_fops = {
5089 .open = tracing_open_generic_tr,
5090 .read = tracing_cpumask_read,
5091 .write = tracing_cpumask_write,
5092 .release = tracing_release_generic_tr,
5093 .llseek = generic_file_llseek,
5096 static int tracing_trace_options_show(struct seq_file *m, void *v)
5098 struct tracer_opt *trace_opts;
5099 struct trace_array *tr = m->private;
5103 mutex_lock(&trace_types_lock);
5104 tracer_flags = tr->current_trace->flags->val;
5105 trace_opts = tr->current_trace->flags->opts;
5107 for (i = 0; trace_options[i]; i++) {
5108 if (tr->trace_flags & (1 << i))
5109 seq_printf(m, "%s\n", trace_options[i]);
5111 seq_printf(m, "no%s\n", trace_options[i]);
5114 for (i = 0; trace_opts[i].name; i++) {
5115 if (tracer_flags & trace_opts[i].bit)
5116 seq_printf(m, "%s\n", trace_opts[i].name);
5118 seq_printf(m, "no%s\n", trace_opts[i].name);
5120 mutex_unlock(&trace_types_lock);
5125 static int __set_tracer_option(struct trace_array *tr,
5126 struct tracer_flags *tracer_flags,
5127 struct tracer_opt *opts, int neg)
5129 struct tracer *trace = tracer_flags->trace;
5132 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5137 tracer_flags->val &= ~opts->bit;
5139 tracer_flags->val |= opts->bit;
5143 /* Try to assign a tracer specific option */
5144 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5146 struct tracer *trace = tr->current_trace;
5147 struct tracer_flags *tracer_flags = trace->flags;
5148 struct tracer_opt *opts = NULL;
5151 for (i = 0; tracer_flags->opts[i].name; i++) {
5152 opts = &tracer_flags->opts[i];
5154 if (strcmp(cmp, opts->name) == 0)
5155 return __set_tracer_option(tr, trace->flags, opts, neg);
5161 /* Some tracers require overwrite to stay enabled */
5162 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5164 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5170 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5172 if ((mask == TRACE_ITER_RECORD_TGID) ||
5173 (mask == TRACE_ITER_RECORD_CMD))
5174 lockdep_assert_held(&event_mutex);
5176 /* do nothing if flag is already set */
5177 if (!!(tr->trace_flags & mask) == !!enabled)
5180 /* Give the tracer a chance to approve the change */
5181 if (tr->current_trace->flag_changed)
5182 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5186 tr->trace_flags |= mask;
5188 tr->trace_flags &= ~mask;
5190 if (mask == TRACE_ITER_RECORD_CMD)
5191 trace_event_enable_cmd_record(enabled);
5193 if (mask == TRACE_ITER_RECORD_TGID) {
5195 if (trace_alloc_tgid_map() < 0) {
5196 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5200 trace_event_enable_tgid_record(enabled);
5203 if (mask == TRACE_ITER_EVENT_FORK)
5204 trace_event_follow_fork(tr, enabled);
5206 if (mask == TRACE_ITER_FUNC_FORK)
5207 ftrace_pid_follow_fork(tr, enabled);
5209 if (mask == TRACE_ITER_OVERWRITE) {
5210 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5211 #ifdef CONFIG_TRACER_MAX_TRACE
5212 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5216 if (mask == TRACE_ITER_PRINTK) {
5217 trace_printk_start_stop_comm(enabled);
5218 trace_printk_control(enabled);
5224 int trace_set_options(struct trace_array *tr, char *option)
5229 size_t orig_len = strlen(option);
5232 cmp = strstrip(option);
5234 len = str_has_prefix(cmp, "no");
5240 mutex_lock(&event_mutex);
5241 mutex_lock(&trace_types_lock);
5243 ret = match_string(trace_options, -1, cmp);
5244 /* If no option could be set, test the specific tracer options */
5246 ret = set_tracer_option(tr, cmp, neg);
5248 ret = set_tracer_flag(tr, 1 << ret, !neg);
5250 mutex_unlock(&trace_types_lock);
5251 mutex_unlock(&event_mutex);
5254 * If the first trailing whitespace is replaced with '\0' by strstrip,
5255 * turn it back into a space.
5257 if (orig_len > strlen(option))
5258 option[strlen(option)] = ' ';
5263 static void __init apply_trace_boot_options(void)
5265 char *buf = trace_boot_options_buf;
5269 option = strsep(&buf, ",");
5275 trace_set_options(&global_trace, option);
5277 /* Put back the comma to allow this to be called again */
5284 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5285 size_t cnt, loff_t *ppos)
5287 struct seq_file *m = filp->private_data;
5288 struct trace_array *tr = m->private;
5292 if (cnt >= sizeof(buf))
5295 if (copy_from_user(buf, ubuf, cnt))
5300 ret = trace_set_options(tr, buf);
5309 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5311 struct trace_array *tr = inode->i_private;
5314 ret = tracing_check_open_get_tr(tr);
5318 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5320 trace_array_put(tr);
5325 static const struct file_operations tracing_iter_fops = {
5326 .open = tracing_trace_options_open,
5328 .llseek = seq_lseek,
5329 .release = tracing_single_release_tr,
5330 .write = tracing_trace_options_write,
5333 static const char readme_msg[] =
5334 "tracing mini-HOWTO:\n\n"
5335 "# echo 0 > tracing_on : quick way to disable tracing\n"
5336 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5337 " Important files:\n"
5338 " trace\t\t\t- The static contents of the buffer\n"
5339 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5340 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5341 " current_tracer\t- function and latency tracers\n"
5342 " available_tracers\t- list of configured tracers for current_tracer\n"
5343 " error_log\t- error log for failed commands (that support it)\n"
5344 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5345 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5346 " trace_clock\t\t- change the clock used to order events\n"
5347 " local: Per cpu clock but may not be synced across CPUs\n"
5348 " global: Synced across CPUs but slows tracing down.\n"
5349 " counter: Not a clock, but just an increment\n"
5350 " uptime: Jiffy counter from time of boot\n"
5351 " perf: Same clock that perf events use\n"
5352 #ifdef CONFIG_X86_64
5353 " x86-tsc: TSC cycle counter\n"
5355 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5356 " delta: Delta difference against a buffer-wide timestamp\n"
5357 " absolute: Absolute (standalone) timestamp\n"
5358 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5359 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5360 " tracing_cpumask\t- Limit which CPUs to trace\n"
5361 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5362 "\t\t\t Remove sub-buffer with rmdir\n"
5363 " trace_options\t\t- Set format or modify how tracing happens\n"
5364 "\t\t\t Disable an option by prefixing 'no' to the\n"
5365 "\t\t\t option name\n"
5366 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5367 #ifdef CONFIG_DYNAMIC_FTRACE
5368 "\n available_filter_functions - list of functions that can be filtered on\n"
5369 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5370 "\t\t\t functions\n"
5371 "\t accepts: func_full_name or glob-matching-pattern\n"
5372 "\t modules: Can select a group via module\n"
5373 "\t Format: :mod:<module-name>\n"
5374 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5375 "\t triggers: a command to perform when function is hit\n"
5376 "\t Format: <function>:<trigger>[:count]\n"
5377 "\t trigger: traceon, traceoff\n"
5378 "\t\t enable_event:<system>:<event>\n"
5379 "\t\t disable_event:<system>:<event>\n"
5380 #ifdef CONFIG_STACKTRACE
5383 #ifdef CONFIG_TRACER_SNAPSHOT
5388 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5389 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5390 "\t The first one will disable tracing every time do_fault is hit\n"
5391 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5392 "\t The first time do trap is hit and it disables tracing, the\n"
5393 "\t counter will decrement to 2. If tracing is already disabled,\n"
5394 "\t the counter will not decrement. It only decrements when the\n"
5395 "\t trigger did work\n"
5396 "\t To remove trigger without count:\n"
5397 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5398 "\t To remove trigger with a count:\n"
5399 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5400 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5401 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5402 "\t modules: Can select a group via module command :mod:\n"
5403 "\t Does not accept triggers\n"
5404 #endif /* CONFIG_DYNAMIC_FTRACE */
5405 #ifdef CONFIG_FUNCTION_TRACER
5406 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5408 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5411 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5412 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5413 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5414 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5416 #ifdef CONFIG_TRACER_SNAPSHOT
5417 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5418 "\t\t\t snapshot buffer. Read the contents for more\n"
5419 "\t\t\t information\n"
5421 #ifdef CONFIG_STACK_TRACER
5422 " stack_trace\t\t- Shows the max stack trace when active\n"
5423 " stack_max_size\t- Shows current max stack size that was traced\n"
5424 "\t\t\t Write into this file to reset the max size (trigger a\n"
5425 "\t\t\t new trace)\n"
5426 #ifdef CONFIG_DYNAMIC_FTRACE
5427 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5430 #endif /* CONFIG_STACK_TRACER */
5431 #ifdef CONFIG_DYNAMIC_EVENTS
5432 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5433 "\t\t\t Write into this file to define/undefine new trace events.\n"
5435 #ifdef CONFIG_KPROBE_EVENTS
5436 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5437 "\t\t\t Write into this file to define/undefine new trace events.\n"
5439 #ifdef CONFIG_UPROBE_EVENTS
5440 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5441 "\t\t\t Write into this file to define/undefine new trace events.\n"
5443 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5444 defined(CONFIG_FPROBE_EVENTS)
5445 "\t accepts: event-definitions (one definition per line)\n"
5446 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5447 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5448 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5450 #ifdef CONFIG_FPROBE_EVENTS
5451 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5452 "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5454 #ifdef CONFIG_HIST_TRIGGERS
5455 "\t s:[synthetic/]<event> <field> [<field>]\n"
5457 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5458 "\t -:[<group>/][<event>]\n"
5459 #ifdef CONFIG_KPROBE_EVENTS
5460 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5461 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5463 #ifdef CONFIG_UPROBE_EVENTS
5464 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5466 "\t args: <name>=fetcharg[:type]\n"
5467 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5468 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5469 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5470 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5471 "\t <argname>[->field[->field|.field...]],\n"
5474 "\t $stack<index>, $stack, $retval, $comm,\n"
5476 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5477 "\t kernel return probes support: $retval, $arg<N>, $comm\n"
5478 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5479 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5480 "\t symstr, <type>\\[<array-size>\\]\n"
5481 #ifdef CONFIG_HIST_TRIGGERS
5482 "\t field: <stype> <name>;\n"
5483 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5484 "\t [unsigned] char/int/long\n"
5486 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5487 "\t of the <attached-group>/<attached-event>.\n"
5489 " events/\t\t- Directory containing all trace event subsystems:\n"
5490 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5491 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5492 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5494 " filter\t\t- If set, only events passing filter are traced\n"
5495 " events/<system>/<event>/\t- Directory containing control files for\n"
5497 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5498 " filter\t\t- If set, only events passing filter are traced\n"
5499 " trigger\t\t- If set, a command to perform when event is hit\n"
5500 "\t Format: <trigger>[:count][if <filter>]\n"
5501 "\t trigger: traceon, traceoff\n"
5502 "\t enable_event:<system>:<event>\n"
5503 "\t disable_event:<system>:<event>\n"
5504 #ifdef CONFIG_HIST_TRIGGERS
5505 "\t enable_hist:<system>:<event>\n"
5506 "\t disable_hist:<system>:<event>\n"
5508 #ifdef CONFIG_STACKTRACE
5511 #ifdef CONFIG_TRACER_SNAPSHOT
5514 #ifdef CONFIG_HIST_TRIGGERS
5515 "\t\t hist (see below)\n"
5517 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5518 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5519 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5520 "\t events/block/block_unplug/trigger\n"
5521 "\t The first disables tracing every time block_unplug is hit.\n"
5522 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5523 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5524 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5525 "\t Like function triggers, the counter is only decremented if it\n"
5526 "\t enabled or disabled tracing.\n"
5527 "\t To remove a trigger without a count:\n"
5528 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5529 "\t To remove a trigger with a count:\n"
5530 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5531 "\t Filters can be ignored when removing a trigger.\n"
5532 #ifdef CONFIG_HIST_TRIGGERS
5533 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5534 "\t Format: hist:keys=<field1[,field2,...]>\n"
5535 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5536 "\t [:values=<field1[,field2,...]>]\n"
5537 "\t [:sort=<field1[,field2,...]>]\n"
5538 "\t [:size=#entries]\n"
5539 "\t [:pause][:continue][:clear]\n"
5540 "\t [:name=histname1]\n"
5541 "\t [:nohitcount]\n"
5542 "\t [:<handler>.<action>]\n"
5543 "\t [if <filter>]\n\n"
5544 "\t Note, special fields can be used as well:\n"
5545 "\t common_timestamp - to record current timestamp\n"
5546 "\t common_cpu - to record the CPU the event happened on\n"
5548 "\t A hist trigger variable can be:\n"
5549 "\t - a reference to a field e.g. x=current_timestamp,\n"
5550 "\t - a reference to another variable e.g. y=$x,\n"
5551 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5552 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5554 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5555 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5556 "\t variable reference, field or numeric literal.\n"
5558 "\t When a matching event is hit, an entry is added to a hash\n"
5559 "\t table using the key(s) and value(s) named, and the value of a\n"
5560 "\t sum called 'hitcount' is incremented. Keys and values\n"
5561 "\t correspond to fields in the event's format description. Keys\n"
5562 "\t can be any field, or the special string 'common_stacktrace'.\n"
5563 "\t Compound keys consisting of up to two fields can be specified\n"
5564 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5565 "\t fields. Sort keys consisting of up to two fields can be\n"
5566 "\t specified using the 'sort' keyword. The sort direction can\n"
5567 "\t be modified by appending '.descending' or '.ascending' to a\n"
5568 "\t sort field. The 'size' parameter can be used to specify more\n"
5569 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5570 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5571 "\t its histogram data will be shared with other triggers of the\n"
5572 "\t same name, and trigger hits will update this common data.\n\n"
5573 "\t Reading the 'hist' file for the event will dump the hash\n"
5574 "\t table in its entirety to stdout. If there are multiple hist\n"
5575 "\t triggers attached to an event, there will be a table for each\n"
5576 "\t trigger in the output. The table displayed for a named\n"
5577 "\t trigger will be the same as any other instance having the\n"
5578 "\t same name. The default format used to display a given field\n"
5579 "\t can be modified by appending any of the following modifiers\n"
5580 "\t to the field name, as applicable:\n\n"
5581 "\t .hex display a number as a hex value\n"
5582 "\t .sym display an address as a symbol\n"
5583 "\t .sym-offset display an address as a symbol and offset\n"
5584 "\t .execname display a common_pid as a program name\n"
5585 "\t .syscall display a syscall id as a syscall name\n"
5586 "\t .log2 display log2 value rather than raw number\n"
5587 "\t .buckets=size display values in groups of size rather than raw number\n"
5588 "\t .usecs display a common_timestamp in microseconds\n"
5589 "\t .percent display a number of percentage value\n"
5590 "\t .graph display a bar-graph of a value\n\n"
5591 "\t The 'pause' parameter can be used to pause an existing hist\n"
5592 "\t trigger or to start a hist trigger but not log any events\n"
5593 "\t until told to do so. 'continue' can be used to start or\n"
5594 "\t restart a paused hist trigger.\n\n"
5595 "\t The 'clear' parameter will clear the contents of a running\n"
5596 "\t hist trigger and leave its current paused/active state\n"
5598 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5599 "\t raw hitcount in the histogram.\n\n"
5600 "\t The enable_hist and disable_hist triggers can be used to\n"
5601 "\t have one event conditionally start and stop another event's\n"
5602 "\t already-attached hist trigger. The syntax is analogous to\n"
5603 "\t the enable_event and disable_event triggers.\n\n"
5604 "\t Hist trigger handlers and actions are executed whenever a\n"
5605 "\t a histogram entry is added or updated. They take the form:\n\n"
5606 "\t <handler>.<action>\n\n"
5607 "\t The available handlers are:\n\n"
5608 "\t onmatch(matching.event) - invoke on addition or update\n"
5609 "\t onmax(var) - invoke if var exceeds current max\n"
5610 "\t onchange(var) - invoke action if var changes\n\n"
5611 "\t The available actions are:\n\n"
5612 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5613 "\t save(field,...) - save current event fields\n"
5614 #ifdef CONFIG_TRACER_SNAPSHOT
5615 "\t snapshot() - snapshot the trace buffer\n\n"
5617 #ifdef CONFIG_SYNTH_EVENTS
5618 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5619 "\t Write into this file to define/undefine new synthetic events.\n"
5620 "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5626 tracing_readme_read(struct file *filp, char __user *ubuf,
5627 size_t cnt, loff_t *ppos)
5629 return simple_read_from_buffer(ubuf, cnt, ppos,
5630 readme_msg, strlen(readme_msg));
5633 static const struct file_operations tracing_readme_fops = {
5634 .open = tracing_open_generic,
5635 .read = tracing_readme_read,
5636 .llseek = generic_file_llseek,
5639 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5640 static union trace_eval_map_item *
5641 update_eval_map(union trace_eval_map_item *ptr)
5643 if (!ptr->map.eval_string) {
5644 if (ptr->tail.next) {
5645 ptr = ptr->tail.next;
5646 /* Set ptr to the next real item (skip head) */
5654 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5656 union trace_eval_map_item *ptr = v;
5659 * Paranoid! If ptr points to end, we don't want to increment past it.
5660 * This really should never happen.
5663 ptr = update_eval_map(ptr);
5664 if (WARN_ON_ONCE(!ptr))
5668 ptr = update_eval_map(ptr);
5673 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5675 union trace_eval_map_item *v;
5678 mutex_lock(&trace_eval_mutex);
5680 v = trace_eval_maps;
5684 while (v && l < *pos) {
5685 v = eval_map_next(m, v, &l);
5691 static void eval_map_stop(struct seq_file *m, void *v)
5693 mutex_unlock(&trace_eval_mutex);
5696 static int eval_map_show(struct seq_file *m, void *v)
5698 union trace_eval_map_item *ptr = v;
5700 seq_printf(m, "%s %ld (%s)\n",
5701 ptr->map.eval_string, ptr->map.eval_value,
5707 static const struct seq_operations tracing_eval_map_seq_ops = {
5708 .start = eval_map_start,
5709 .next = eval_map_next,
5710 .stop = eval_map_stop,
5711 .show = eval_map_show,
5714 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5718 ret = tracing_check_open_get_tr(NULL);
5722 return seq_open(filp, &tracing_eval_map_seq_ops);
5725 static const struct file_operations tracing_eval_map_fops = {
5726 .open = tracing_eval_map_open,
5728 .llseek = seq_lseek,
5729 .release = seq_release,
5732 static inline union trace_eval_map_item *
5733 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5735 /* Return tail of array given the head */
5736 return ptr + ptr->head.length + 1;
5740 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5743 struct trace_eval_map **stop;
5744 struct trace_eval_map **map;
5745 union trace_eval_map_item *map_array;
5746 union trace_eval_map_item *ptr;
5751 * The trace_eval_maps contains the map plus a head and tail item,
5752 * where the head holds the module and length of array, and the
5753 * tail holds a pointer to the next list.
5755 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5757 pr_warn("Unable to allocate trace eval mapping\n");
5761 mutex_lock(&trace_eval_mutex);
5763 if (!trace_eval_maps)
5764 trace_eval_maps = map_array;
5766 ptr = trace_eval_maps;
5768 ptr = trace_eval_jmp_to_tail(ptr);
5769 if (!ptr->tail.next)
5771 ptr = ptr->tail.next;
5774 ptr->tail.next = map_array;
5776 map_array->head.mod = mod;
5777 map_array->head.length = len;
5780 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5781 map_array->map = **map;
5784 memset(map_array, 0, sizeof(*map_array));
5786 mutex_unlock(&trace_eval_mutex);
5789 static void trace_create_eval_file(struct dentry *d_tracer)
5791 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
5792 NULL, &tracing_eval_map_fops);
5795 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5796 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5797 static inline void trace_insert_eval_map_file(struct module *mod,
5798 struct trace_eval_map **start, int len) { }
5799 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5801 static void trace_insert_eval_map(struct module *mod,
5802 struct trace_eval_map **start, int len)
5804 struct trace_eval_map **map;
5811 trace_event_eval_update(map, len);
5813 trace_insert_eval_map_file(mod, start, len);
5817 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5818 size_t cnt, loff_t *ppos)
5820 struct trace_array *tr = filp->private_data;
5821 char buf[MAX_TRACER_SIZE+2];
5824 mutex_lock(&trace_types_lock);
5825 r = sprintf(buf, "%s\n", tr->current_trace->name);
5826 mutex_unlock(&trace_types_lock);
5828 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5831 int tracer_init(struct tracer *t, struct trace_array *tr)
5833 tracing_reset_online_cpus(&tr->array_buffer);
5837 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5841 for_each_tracing_cpu(cpu)
5842 per_cpu_ptr(buf->data, cpu)->entries = val;
5845 static void update_buffer_entries(struct array_buffer *buf, int cpu)
5847 if (cpu == RING_BUFFER_ALL_CPUS) {
5848 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
5850 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
5854 #ifdef CONFIG_TRACER_MAX_TRACE
5855 /* resize @tr's buffer to the size of @size_tr's entries */
5856 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5857 struct array_buffer *size_buf, int cpu_id)
5861 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5862 for_each_tracing_cpu(cpu) {
5863 ret = ring_buffer_resize(trace_buf->buffer,
5864 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5867 per_cpu_ptr(trace_buf->data, cpu)->entries =
5868 per_cpu_ptr(size_buf->data, cpu)->entries;
5871 ret = ring_buffer_resize(trace_buf->buffer,
5872 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5874 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5875 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5880 #endif /* CONFIG_TRACER_MAX_TRACE */
5882 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5883 unsigned long size, int cpu)
5888 * If kernel or user changes the size of the ring buffer
5889 * we use the size that was given, and we can forget about
5890 * expanding it later.
5892 trace_set_ring_buffer_expanded(tr);
5894 /* May be called before buffers are initialized */
5895 if (!tr->array_buffer.buffer)
5898 /* Do not allow tracing while resizing ring buffer */
5899 tracing_stop_tr(tr);
5901 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5905 #ifdef CONFIG_TRACER_MAX_TRACE
5906 if (!tr->allocated_snapshot)
5909 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5911 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5912 &tr->array_buffer, cpu);
5915 * AARGH! We are left with different
5916 * size max buffer!!!!
5917 * The max buffer is our "snapshot" buffer.
5918 * When a tracer needs a snapshot (one of the
5919 * latency tracers), it swaps the max buffer
5920 * with the saved snap shot. We succeeded to
5921 * update the size of the main buffer, but failed to
5922 * update the size of the max buffer. But when we tried
5923 * to reset the main buffer to the original size, we
5924 * failed there too. This is very unlikely to
5925 * happen, but if it does, warn and kill all
5929 tracing_disabled = 1;
5934 update_buffer_entries(&tr->max_buffer, cpu);
5937 #endif /* CONFIG_TRACER_MAX_TRACE */
5939 update_buffer_entries(&tr->array_buffer, cpu);
5941 tracing_start_tr(tr);
5945 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5946 unsigned long size, int cpu_id)
5950 mutex_lock(&trace_types_lock);
5952 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5953 /* make sure, this cpu is enabled in the mask */
5954 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5960 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5965 mutex_unlock(&trace_types_lock);
5972 * tracing_update_buffers - used by tracing facility to expand ring buffers
5973 * @tr: The tracing instance
5975 * To save on memory when the tracing is never used on a system with it
5976 * configured in. The ring buffers are set to a minimum size. But once
5977 * a user starts to use the tracing facility, then they need to grow
5978 * to their default size.
5980 * This function is to be called when a tracer is about to be used.
5982 int tracing_update_buffers(struct trace_array *tr)
5986 mutex_lock(&trace_types_lock);
5987 if (!tr->ring_buffer_expanded)
5988 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5989 RING_BUFFER_ALL_CPUS);
5990 mutex_unlock(&trace_types_lock);
5995 struct trace_option_dentry;
5998 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6001 * Used to clear out the tracer before deletion of an instance.
6002 * Must have trace_types_lock held.
6004 static void tracing_set_nop(struct trace_array *tr)
6006 if (tr->current_trace == &nop_trace)
6009 tr->current_trace->enabled--;
6011 if (tr->current_trace->reset)
6012 tr->current_trace->reset(tr);
6014 tr->current_trace = &nop_trace;
6017 static bool tracer_options_updated;
6019 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6021 /* Only enable if the directory has been created already. */
6025 /* Only create trace option files after update_tracer_options finish */
6026 if (!tracer_options_updated)
6029 create_trace_option_files(tr, t);
6032 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6035 #ifdef CONFIG_TRACER_MAX_TRACE
6040 mutex_lock(&trace_types_lock);
6042 if (!tr->ring_buffer_expanded) {
6043 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6044 RING_BUFFER_ALL_CPUS);
6050 for (t = trace_types; t; t = t->next) {
6051 if (strcmp(t->name, buf) == 0)
6058 if (t == tr->current_trace)
6061 #ifdef CONFIG_TRACER_SNAPSHOT
6062 if (t->use_max_tr) {
6063 local_irq_disable();
6064 arch_spin_lock(&tr->max_lock);
6065 if (tr->cond_snapshot)
6067 arch_spin_unlock(&tr->max_lock);
6073 /* Some tracers won't work on kernel command line */
6074 if (system_state < SYSTEM_RUNNING && t->noboot) {
6075 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6080 /* Some tracers are only allowed for the top level buffer */
6081 if (!trace_ok_for_array(t, tr)) {
6086 /* If trace pipe files are being read, we can't change the tracer */
6087 if (tr->trace_ref) {
6092 trace_branch_disable();
6094 tr->current_trace->enabled--;
6096 if (tr->current_trace->reset)
6097 tr->current_trace->reset(tr);
6099 #ifdef CONFIG_TRACER_MAX_TRACE
6100 had_max_tr = tr->current_trace->use_max_tr;
6102 /* Current trace needs to be nop_trace before synchronize_rcu */
6103 tr->current_trace = &nop_trace;
6105 if (had_max_tr && !t->use_max_tr) {
6107 * We need to make sure that the update_max_tr sees that
6108 * current_trace changed to nop_trace to keep it from
6109 * swapping the buffers after we resize it.
6110 * The update_max_tr is called from interrupts disabled
6111 * so a synchronized_sched() is sufficient.
6117 if (t->use_max_tr && !tr->allocated_snapshot) {
6118 ret = tracing_alloc_snapshot_instance(tr);
6123 tr->current_trace = &nop_trace;
6127 ret = tracer_init(t, tr);
6132 tr->current_trace = t;
6133 tr->current_trace->enabled++;
6134 trace_branch_enable(tr);
6136 mutex_unlock(&trace_types_lock);
6142 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6143 size_t cnt, loff_t *ppos)
6145 struct trace_array *tr = filp->private_data;
6146 char buf[MAX_TRACER_SIZE+1];
6153 if (cnt > MAX_TRACER_SIZE)
6154 cnt = MAX_TRACER_SIZE;
6156 if (copy_from_user(buf, ubuf, cnt))
6163 err = tracing_set_tracer(tr, name);
6173 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6174 size_t cnt, loff_t *ppos)
6179 r = snprintf(buf, sizeof(buf), "%ld\n",
6180 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6181 if (r > sizeof(buf))
6183 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6187 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6188 size_t cnt, loff_t *ppos)
6193 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6203 tracing_thresh_read(struct file *filp, char __user *ubuf,
6204 size_t cnt, loff_t *ppos)
6206 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6210 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6211 size_t cnt, loff_t *ppos)
6213 struct trace_array *tr = filp->private_data;
6216 mutex_lock(&trace_types_lock);
6217 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6221 if (tr->current_trace->update_thresh) {
6222 ret = tr->current_trace->update_thresh(tr);
6229 mutex_unlock(&trace_types_lock);
6234 #ifdef CONFIG_TRACER_MAX_TRACE
6237 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6238 size_t cnt, loff_t *ppos)
6240 struct trace_array *tr = filp->private_data;
6242 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6246 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6247 size_t cnt, loff_t *ppos)
6249 struct trace_array *tr = filp->private_data;
6251 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6256 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6258 if (cpu == RING_BUFFER_ALL_CPUS) {
6259 if (cpumask_empty(tr->pipe_cpumask)) {
6260 cpumask_setall(tr->pipe_cpumask);
6263 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6264 cpumask_set_cpu(cpu, tr->pipe_cpumask);
6270 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6272 if (cpu == RING_BUFFER_ALL_CPUS) {
6273 WARN_ON(!cpumask_full(tr->pipe_cpumask));
6274 cpumask_clear(tr->pipe_cpumask);
6276 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6277 cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6281 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6283 struct trace_array *tr = inode->i_private;
6284 struct trace_iterator *iter;
6288 ret = tracing_check_open_get_tr(tr);
6292 mutex_lock(&trace_types_lock);
6293 cpu = tracing_get_cpu(inode);
6294 ret = open_pipe_on_cpu(tr, cpu);
6296 goto fail_pipe_on_cpu;
6298 /* create a buffer to store the information to pass to userspace */
6299 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6302 goto fail_alloc_iter;
6305 trace_seq_init(&iter->seq);
6306 iter->trace = tr->current_trace;
6308 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6313 /* trace pipe does not show start of buffer */
6314 cpumask_setall(iter->started);
6316 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6317 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6319 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6320 if (trace_clocks[tr->clock_id].in_ns)
6321 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6324 iter->array_buffer = &tr->array_buffer;
6325 iter->cpu_file = cpu;
6326 mutex_init(&iter->mutex);
6327 filp->private_data = iter;
6329 if (iter->trace->pipe_open)
6330 iter->trace->pipe_open(iter);
6332 nonseekable_open(inode, filp);
6336 mutex_unlock(&trace_types_lock);
6342 close_pipe_on_cpu(tr, cpu);
6344 __trace_array_put(tr);
6345 mutex_unlock(&trace_types_lock);
6349 static int tracing_release_pipe(struct inode *inode, struct file *file)
6351 struct trace_iterator *iter = file->private_data;
6352 struct trace_array *tr = inode->i_private;
6354 mutex_lock(&trace_types_lock);
6358 if (iter->trace->pipe_close)
6359 iter->trace->pipe_close(iter);
6360 close_pipe_on_cpu(tr, iter->cpu_file);
6361 mutex_unlock(&trace_types_lock);
6363 free_trace_iter_content(iter);
6366 trace_array_put(tr);
6372 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6374 struct trace_array *tr = iter->tr;
6376 /* Iterators are static, they should be filled or empty */
6377 if (trace_buffer_iter(iter, iter->cpu_file))
6378 return EPOLLIN | EPOLLRDNORM;
6380 if (tr->trace_flags & TRACE_ITER_BLOCK)
6382 * Always select as readable when in blocking mode
6384 return EPOLLIN | EPOLLRDNORM;
6386 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6387 filp, poll_table, iter->tr->buffer_percent);
6391 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6393 struct trace_iterator *iter = filp->private_data;
6395 return trace_poll(iter, filp, poll_table);
6398 /* Must be called with iter->mutex held. */
6399 static int tracing_wait_pipe(struct file *filp)
6401 struct trace_iterator *iter = filp->private_data;
6404 while (trace_empty(iter)) {
6406 if ((filp->f_flags & O_NONBLOCK)) {
6411 * We block until we read something and tracing is disabled.
6412 * We still block if tracing is disabled, but we have never
6413 * read anything. This allows a user to cat this file, and
6414 * then enable tracing. But after we have read something,
6415 * we give an EOF when tracing is again disabled.
6417 * iter->pos will be 0 if we haven't read anything.
6419 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6422 mutex_unlock(&iter->mutex);
6424 ret = wait_on_pipe(iter, 0);
6426 mutex_lock(&iter->mutex);
6439 tracing_read_pipe(struct file *filp, char __user *ubuf,
6440 size_t cnt, loff_t *ppos)
6442 struct trace_iterator *iter = filp->private_data;
6446 * Avoid more than one consumer on a single file descriptor
6447 * This is just a matter of traces coherency, the ring buffer itself
6450 mutex_lock(&iter->mutex);
6452 /* return any leftover data */
6453 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6457 trace_seq_init(&iter->seq);
6459 if (iter->trace->read) {
6460 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6466 sret = tracing_wait_pipe(filp);
6470 /* stop when tracing is finished */
6471 if (trace_empty(iter)) {
6476 if (cnt >= TRACE_SEQ_BUFFER_SIZE)
6477 cnt = TRACE_SEQ_BUFFER_SIZE - 1;
6479 /* reset all but tr, trace, and overruns */
6480 trace_iterator_reset(iter);
6481 cpumask_clear(iter->started);
6482 trace_seq_init(&iter->seq);
6484 trace_event_read_lock();
6485 trace_access_lock(iter->cpu_file);
6486 while (trace_find_next_entry_inc(iter) != NULL) {
6487 enum print_line_t ret;
6488 int save_len = iter->seq.seq.len;
6490 ret = print_trace_line(iter);
6491 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6493 * If one print_trace_line() fills entire trace_seq in one shot,
6494 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6495 * In this case, we need to consume it, otherwise, loop will peek
6496 * this event next time, resulting in an infinite loop.
6498 if (save_len == 0) {
6500 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6501 trace_consume(iter);
6505 /* In other cases, don't print partial lines */
6506 iter->seq.seq.len = save_len;
6509 if (ret != TRACE_TYPE_NO_CONSUME)
6510 trace_consume(iter);
6512 if (trace_seq_used(&iter->seq) >= cnt)
6516 * Setting the full flag means we reached the trace_seq buffer
6517 * size and we should leave by partial output condition above.
6518 * One of the trace_seq_* functions is not used properly.
6520 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6523 trace_access_unlock(iter->cpu_file);
6524 trace_event_read_unlock();
6526 /* Now copy what we have to the user */
6527 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6528 if (iter->seq.readpos >= trace_seq_used(&iter->seq))
6529 trace_seq_init(&iter->seq);
6532 * If there was nothing to send to user, in spite of consuming trace
6533 * entries, go back to wait for more entries.
6539 mutex_unlock(&iter->mutex);
6544 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6547 __free_page(spd->pages[idx]);
6551 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6557 /* Seq buffer is page-sized, exactly what we need. */
6559 save_len = iter->seq.seq.len;
6560 ret = print_trace_line(iter);
6562 if (trace_seq_has_overflowed(&iter->seq)) {
6563 iter->seq.seq.len = save_len;
6568 * This should not be hit, because it should only
6569 * be set if the iter->seq overflowed. But check it
6570 * anyway to be safe.
6572 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6573 iter->seq.seq.len = save_len;
6577 count = trace_seq_used(&iter->seq) - save_len;
6580 iter->seq.seq.len = save_len;
6584 if (ret != TRACE_TYPE_NO_CONSUME)
6585 trace_consume(iter);
6587 if (!trace_find_next_entry_inc(iter)) {
6597 static ssize_t tracing_splice_read_pipe(struct file *filp,
6599 struct pipe_inode_info *pipe,
6603 struct page *pages_def[PIPE_DEF_BUFFERS];
6604 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6605 struct trace_iterator *iter = filp->private_data;
6606 struct splice_pipe_desc spd = {
6608 .partial = partial_def,
6609 .nr_pages = 0, /* This gets updated below. */
6610 .nr_pages_max = PIPE_DEF_BUFFERS,
6611 .ops = &default_pipe_buf_ops,
6612 .spd_release = tracing_spd_release_pipe,
6618 if (splice_grow_spd(pipe, &spd))
6621 mutex_lock(&iter->mutex);
6623 if (iter->trace->splice_read) {
6624 ret = iter->trace->splice_read(iter, filp,
6625 ppos, pipe, len, flags);
6630 ret = tracing_wait_pipe(filp);
6634 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6639 trace_event_read_lock();
6640 trace_access_lock(iter->cpu_file);
6642 /* Fill as many pages as possible. */
6643 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6644 spd.pages[i] = alloc_page(GFP_KERNEL);
6648 rem = tracing_fill_pipe_page(rem, iter);
6650 /* Copy the data into the page, so we can start over. */
6651 ret = trace_seq_to_buffer(&iter->seq,
6652 page_address(spd.pages[i]),
6653 trace_seq_used(&iter->seq));
6655 __free_page(spd.pages[i]);
6658 spd.partial[i].offset = 0;
6659 spd.partial[i].len = trace_seq_used(&iter->seq);
6661 trace_seq_init(&iter->seq);
6664 trace_access_unlock(iter->cpu_file);
6665 trace_event_read_unlock();
6666 mutex_unlock(&iter->mutex);
6671 ret = splice_to_pipe(pipe, &spd);
6675 splice_shrink_spd(&spd);
6679 mutex_unlock(&iter->mutex);
6684 tracing_entries_read(struct file *filp, char __user *ubuf,
6685 size_t cnt, loff_t *ppos)
6687 struct inode *inode = file_inode(filp);
6688 struct trace_array *tr = inode->i_private;
6689 int cpu = tracing_get_cpu(inode);
6694 mutex_lock(&trace_types_lock);
6696 if (cpu == RING_BUFFER_ALL_CPUS) {
6697 int cpu, buf_size_same;
6702 /* check if all cpu sizes are same */
6703 for_each_tracing_cpu(cpu) {
6704 /* fill in the size from first enabled cpu */
6706 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6707 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6713 if (buf_size_same) {
6714 if (!tr->ring_buffer_expanded)
6715 r = sprintf(buf, "%lu (expanded: %lu)\n",
6717 trace_buf_size >> 10);
6719 r = sprintf(buf, "%lu\n", size >> 10);
6721 r = sprintf(buf, "X\n");
6723 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6725 mutex_unlock(&trace_types_lock);
6727 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6732 tracing_entries_write(struct file *filp, const char __user *ubuf,
6733 size_t cnt, loff_t *ppos)
6735 struct inode *inode = file_inode(filp);
6736 struct trace_array *tr = inode->i_private;
6740 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6744 /* must have at least 1 entry */
6748 /* value is in KB */
6750 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6760 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6761 size_t cnt, loff_t *ppos)
6763 struct trace_array *tr = filp->private_data;
6766 unsigned long size = 0, expanded_size = 0;
6768 mutex_lock(&trace_types_lock);
6769 for_each_tracing_cpu(cpu) {
6770 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6771 if (!tr->ring_buffer_expanded)
6772 expanded_size += trace_buf_size >> 10;
6774 if (tr->ring_buffer_expanded)
6775 r = sprintf(buf, "%lu\n", size);
6777 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6778 mutex_unlock(&trace_types_lock);
6780 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6784 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6785 size_t cnt, loff_t *ppos)
6788 * There is no need to read what the user has written, this function
6789 * is just to make sure that there is no error when "echo" is used
6798 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6800 struct trace_array *tr = inode->i_private;
6802 /* disable tracing ? */
6803 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6804 tracer_tracing_off(tr);
6805 /* resize the ring buffer to 0 */
6806 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6808 trace_array_put(tr);
6813 #define TRACE_MARKER_MAX_SIZE 4096
6816 tracing_mark_write(struct file *filp, const char __user *ubuf,
6817 size_t cnt, loff_t *fpos)
6819 struct trace_array *tr = filp->private_data;
6820 struct ring_buffer_event *event;
6821 enum event_trigger_type tt = ETT_NONE;
6822 struct trace_buffer *buffer;
6823 struct print_entry *entry;
6829 /* Used in tracing_mark_raw_write() as well */
6830 #define FAULTED_STR "<faulted>"
6831 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6833 if (tracing_disabled)
6836 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6839 if ((ssize_t)cnt < 0)
6842 if (cnt > TRACE_MARKER_MAX_SIZE)
6843 cnt = TRACE_MARKER_MAX_SIZE;
6845 meta_size = sizeof(*entry) + 2; /* add '\0' and possible '\n' */
6847 size = cnt + meta_size;
6849 /* If less than "<faulted>", then make sure we can still add that */
6850 if (cnt < FAULTED_SIZE)
6851 size += FAULTED_SIZE - cnt;
6853 buffer = tr->array_buffer.buffer;
6854 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6856 if (unlikely(!event)) {
6858 * If the size was greater than what was allowed, then
6859 * make it smaller and try again.
6861 if (size > ring_buffer_max_event_size(buffer)) {
6862 /* cnt < FAULTED size should never be bigger than max */
6863 if (WARN_ON_ONCE(cnt < FAULTED_SIZE))
6865 cnt = ring_buffer_max_event_size(buffer) - meta_size;
6866 /* The above should only happen once */
6867 if (WARN_ON_ONCE(cnt + meta_size == size))
6872 /* Ring buffer disabled, return as if not open for write */
6876 entry = ring_buffer_event_data(event);
6877 entry->ip = _THIS_IP_;
6879 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6881 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6887 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6888 /* do not add \n before testing triggers, but add \0 */
6889 entry->buf[cnt] = '\0';
6890 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
6893 if (entry->buf[cnt - 1] != '\n') {
6894 entry->buf[cnt] = '\n';
6895 entry->buf[cnt + 1] = '\0';
6897 entry->buf[cnt] = '\0';
6899 if (static_branch_unlikely(&trace_marker_exports_enabled))
6900 ftrace_exports(event, TRACE_EXPORT_MARKER);
6901 __buffer_unlock_commit(buffer, event);
6904 event_triggers_post_call(tr->trace_marker_file, tt);
6910 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6911 size_t cnt, loff_t *fpos)
6913 struct trace_array *tr = filp->private_data;
6914 struct ring_buffer_event *event;
6915 struct trace_buffer *buffer;
6916 struct raw_data_entry *entry;
6921 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6923 if (tracing_disabled)
6926 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6929 /* The marker must at least have a tag id */
6930 if (cnt < sizeof(unsigned int))
6933 size = sizeof(*entry) + cnt;
6934 if (cnt < FAULT_SIZE_ID)
6935 size += FAULT_SIZE_ID - cnt;
6937 buffer = tr->array_buffer.buffer;
6939 if (size > ring_buffer_max_event_size(buffer))
6942 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6945 /* Ring buffer disabled, return as if not open for write */
6948 entry = ring_buffer_event_data(event);
6950 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6953 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6958 __buffer_unlock_commit(buffer, event);
6963 static int tracing_clock_show(struct seq_file *m, void *v)
6965 struct trace_array *tr = m->private;
6968 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6970 "%s%s%s%s", i ? " " : "",
6971 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6972 i == tr->clock_id ? "]" : "");
6978 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6982 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6983 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6986 if (i == ARRAY_SIZE(trace_clocks))
6989 mutex_lock(&trace_types_lock);
6993 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
6996 * New clock may not be consistent with the previous clock.
6997 * Reset the buffer so that it doesn't have incomparable timestamps.
6999 tracing_reset_online_cpus(&tr->array_buffer);
7001 #ifdef CONFIG_TRACER_MAX_TRACE
7002 if (tr->max_buffer.buffer)
7003 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7004 tracing_reset_online_cpus(&tr->max_buffer);
7007 mutex_unlock(&trace_types_lock);
7012 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7013 size_t cnt, loff_t *fpos)
7015 struct seq_file *m = filp->private_data;
7016 struct trace_array *tr = m->private;
7018 const char *clockstr;
7021 if (cnt >= sizeof(buf))
7024 if (copy_from_user(buf, ubuf, cnt))
7029 clockstr = strstrip(buf);
7031 ret = tracing_set_clock(tr, clockstr);
7040 static int tracing_clock_open(struct inode *inode, struct file *file)
7042 struct trace_array *tr = inode->i_private;
7045 ret = tracing_check_open_get_tr(tr);
7049 ret = single_open(file, tracing_clock_show, inode->i_private);
7051 trace_array_put(tr);
7056 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7058 struct trace_array *tr = m->private;
7060 mutex_lock(&trace_types_lock);
7062 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7063 seq_puts(m, "delta [absolute]\n");
7065 seq_puts(m, "[delta] absolute\n");
7067 mutex_unlock(&trace_types_lock);
7072 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7074 struct trace_array *tr = inode->i_private;
7077 ret = tracing_check_open_get_tr(tr);
7081 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7083 trace_array_put(tr);
7088 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7090 if (rbe == this_cpu_read(trace_buffered_event))
7091 return ring_buffer_time_stamp(buffer);
7093 return ring_buffer_event_time_stamp(buffer, rbe);
7097 * Set or disable using the per CPU trace_buffer_event when possible.
7099 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7103 mutex_lock(&trace_types_lock);
7105 if (set && tr->no_filter_buffering_ref++)
7109 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7114 --tr->no_filter_buffering_ref;
7117 mutex_unlock(&trace_types_lock);
7122 struct ftrace_buffer_info {
7123 struct trace_iterator iter;
7125 unsigned int spare_cpu;
7126 unsigned int spare_size;
7130 #ifdef CONFIG_TRACER_SNAPSHOT
7131 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7133 struct trace_array *tr = inode->i_private;
7134 struct trace_iterator *iter;
7138 ret = tracing_check_open_get_tr(tr);
7142 if (file->f_mode & FMODE_READ) {
7143 iter = __tracing_open(inode, file, true);
7145 ret = PTR_ERR(iter);
7147 /* Writes still need the seq_file to hold the private data */
7149 m = kzalloc(sizeof(*m), GFP_KERNEL);
7152 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7160 iter->array_buffer = &tr->max_buffer;
7161 iter->cpu_file = tracing_get_cpu(inode);
7163 file->private_data = m;
7167 trace_array_put(tr);
7172 static void tracing_swap_cpu_buffer(void *tr)
7174 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7178 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7181 struct seq_file *m = filp->private_data;
7182 struct trace_iterator *iter = m->private;
7183 struct trace_array *tr = iter->tr;
7187 ret = tracing_update_buffers(tr);
7191 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7195 mutex_lock(&trace_types_lock);
7197 if (tr->current_trace->use_max_tr) {
7202 local_irq_disable();
7203 arch_spin_lock(&tr->max_lock);
7204 if (tr->cond_snapshot)
7206 arch_spin_unlock(&tr->max_lock);
7213 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7217 if (tr->allocated_snapshot)
7221 /* Only allow per-cpu swap if the ring buffer supports it */
7222 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7223 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7228 if (tr->allocated_snapshot)
7229 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7230 &tr->array_buffer, iter->cpu_file);
7232 ret = tracing_alloc_snapshot_instance(tr);
7235 /* Now, we're going to swap */
7236 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7237 local_irq_disable();
7238 update_max_tr(tr, current, smp_processor_id(), NULL);
7241 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7246 if (tr->allocated_snapshot) {
7247 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7248 tracing_reset_online_cpus(&tr->max_buffer);
7250 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7260 mutex_unlock(&trace_types_lock);
7264 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7266 struct seq_file *m = file->private_data;
7269 ret = tracing_release(inode, file);
7271 if (file->f_mode & FMODE_READ)
7274 /* If write only, the seq_file is just a stub */
7282 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7283 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7284 size_t count, loff_t *ppos);
7285 static int tracing_buffers_release(struct inode *inode, struct file *file);
7286 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7287 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7289 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7291 struct ftrace_buffer_info *info;
7294 /* The following checks for tracefs lockdown */
7295 ret = tracing_buffers_open(inode, filp);
7299 info = filp->private_data;
7301 if (info->iter.trace->use_max_tr) {
7302 tracing_buffers_release(inode, filp);
7306 info->iter.snapshot = true;
7307 info->iter.array_buffer = &info->iter.tr->max_buffer;
7312 #endif /* CONFIG_TRACER_SNAPSHOT */
7315 static const struct file_operations tracing_thresh_fops = {
7316 .open = tracing_open_generic,
7317 .read = tracing_thresh_read,
7318 .write = tracing_thresh_write,
7319 .llseek = generic_file_llseek,
7322 #ifdef CONFIG_TRACER_MAX_TRACE
7323 static const struct file_operations tracing_max_lat_fops = {
7324 .open = tracing_open_generic_tr,
7325 .read = tracing_max_lat_read,
7326 .write = tracing_max_lat_write,
7327 .llseek = generic_file_llseek,
7328 .release = tracing_release_generic_tr,
7332 static const struct file_operations set_tracer_fops = {
7333 .open = tracing_open_generic_tr,
7334 .read = tracing_set_trace_read,
7335 .write = tracing_set_trace_write,
7336 .llseek = generic_file_llseek,
7337 .release = tracing_release_generic_tr,
7340 static const struct file_operations tracing_pipe_fops = {
7341 .open = tracing_open_pipe,
7342 .poll = tracing_poll_pipe,
7343 .read = tracing_read_pipe,
7344 .splice_read = tracing_splice_read_pipe,
7345 .release = tracing_release_pipe,
7346 .llseek = no_llseek,
7349 static const struct file_operations tracing_entries_fops = {
7350 .open = tracing_open_generic_tr,
7351 .read = tracing_entries_read,
7352 .write = tracing_entries_write,
7353 .llseek = generic_file_llseek,
7354 .release = tracing_release_generic_tr,
7357 static const struct file_operations tracing_total_entries_fops = {
7358 .open = tracing_open_generic_tr,
7359 .read = tracing_total_entries_read,
7360 .llseek = generic_file_llseek,
7361 .release = tracing_release_generic_tr,
7364 static const struct file_operations tracing_free_buffer_fops = {
7365 .open = tracing_open_generic_tr,
7366 .write = tracing_free_buffer_write,
7367 .release = tracing_free_buffer_release,
7370 static const struct file_operations tracing_mark_fops = {
7371 .open = tracing_mark_open,
7372 .write = tracing_mark_write,
7373 .release = tracing_release_generic_tr,
7376 static const struct file_operations tracing_mark_raw_fops = {
7377 .open = tracing_mark_open,
7378 .write = tracing_mark_raw_write,
7379 .release = tracing_release_generic_tr,
7382 static const struct file_operations trace_clock_fops = {
7383 .open = tracing_clock_open,
7385 .llseek = seq_lseek,
7386 .release = tracing_single_release_tr,
7387 .write = tracing_clock_write,
7390 static const struct file_operations trace_time_stamp_mode_fops = {
7391 .open = tracing_time_stamp_mode_open,
7393 .llseek = seq_lseek,
7394 .release = tracing_single_release_tr,
7397 #ifdef CONFIG_TRACER_SNAPSHOT
7398 static const struct file_operations snapshot_fops = {
7399 .open = tracing_snapshot_open,
7401 .write = tracing_snapshot_write,
7402 .llseek = tracing_lseek,
7403 .release = tracing_snapshot_release,
7406 static const struct file_operations snapshot_raw_fops = {
7407 .open = snapshot_raw_open,
7408 .read = tracing_buffers_read,
7409 .release = tracing_buffers_release,
7410 .splice_read = tracing_buffers_splice_read,
7411 .llseek = no_llseek,
7414 #endif /* CONFIG_TRACER_SNAPSHOT */
7417 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7418 * @filp: The active open file structure
7419 * @ubuf: The userspace provided buffer to read value into
7420 * @cnt: The maximum number of bytes to read
7421 * @ppos: The current "file" position
7423 * This function implements the write interface for a struct trace_min_max_param.
7424 * The filp->private_data must point to a trace_min_max_param structure that
7425 * defines where to write the value, the min and the max acceptable values,
7426 * and a lock to protect the write.
7429 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7431 struct trace_min_max_param *param = filp->private_data;
7438 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7443 mutex_lock(param->lock);
7445 if (param->min && val < *param->min)
7448 if (param->max && val > *param->max)
7455 mutex_unlock(param->lock);
7464 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7465 * @filp: The active open file structure
7466 * @ubuf: The userspace provided buffer to read value into
7467 * @cnt: The maximum number of bytes to read
7468 * @ppos: The current "file" position
7470 * This function implements the read interface for a struct trace_min_max_param.
7471 * The filp->private_data must point to a trace_min_max_param struct with valid
7475 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7477 struct trace_min_max_param *param = filp->private_data;
7478 char buf[U64_STR_SIZE];
7487 if (cnt > sizeof(buf))
7490 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7492 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7495 const struct file_operations trace_min_max_fops = {
7496 .open = tracing_open_generic,
7497 .read = trace_min_max_read,
7498 .write = trace_min_max_write,
7501 #define TRACING_LOG_ERRS_MAX 8
7502 #define TRACING_LOG_LOC_MAX 128
7504 #define CMD_PREFIX " Command: "
7507 const char **errs; /* ptr to loc-specific array of err strings */
7508 u8 type; /* index into errs -> specific err string */
7509 u16 pos; /* caret position */
7513 struct tracing_log_err {
7514 struct list_head list;
7515 struct err_info info;
7516 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7517 char *cmd; /* what caused err */
7520 static DEFINE_MUTEX(tracing_err_log_lock);
7522 static struct tracing_log_err *alloc_tracing_log_err(int len)
7524 struct tracing_log_err *err;
7526 err = kzalloc(sizeof(*err), GFP_KERNEL);
7528 return ERR_PTR(-ENOMEM);
7530 err->cmd = kzalloc(len, GFP_KERNEL);
7533 return ERR_PTR(-ENOMEM);
7539 static void free_tracing_log_err(struct tracing_log_err *err)
7545 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7548 struct tracing_log_err *err;
7551 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7552 err = alloc_tracing_log_err(len);
7553 if (PTR_ERR(err) != -ENOMEM)
7554 tr->n_err_log_entries++;
7558 cmd = kzalloc(len, GFP_KERNEL);
7560 return ERR_PTR(-ENOMEM);
7561 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7564 list_del(&err->list);
7570 * err_pos - find the position of a string within a command for error careting
7571 * @cmd: The tracing command that caused the error
7572 * @str: The string to position the caret at within @cmd
7574 * Finds the position of the first occurrence of @str within @cmd. The
7575 * return value can be passed to tracing_log_err() for caret placement
7578 * Returns the index within @cmd of the first occurrence of @str or 0
7579 * if @str was not found.
7581 unsigned int err_pos(char *cmd, const char *str)
7585 if (WARN_ON(!strlen(cmd)))
7588 found = strstr(cmd, str);
7596 * tracing_log_err - write an error to the tracing error log
7597 * @tr: The associated trace array for the error (NULL for top level array)
7598 * @loc: A string describing where the error occurred
7599 * @cmd: The tracing command that caused the error
7600 * @errs: The array of loc-specific static error strings
7601 * @type: The index into errs[], which produces the specific static err string
7602 * @pos: The position the caret should be placed in the cmd
7604 * Writes an error into tracing/error_log of the form:
7606 * <loc>: error: <text>
7610 * tracing/error_log is a small log file containing the last
7611 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7612 * unless there has been a tracing error, and the error log can be
7613 * cleared and have its memory freed by writing the empty string in
7614 * truncation mode to it i.e. echo > tracing/error_log.
7616 * NOTE: the @errs array along with the @type param are used to
7617 * produce a static error string - this string is not copied and saved
7618 * when the error is logged - only a pointer to it is saved. See
7619 * existing callers for examples of how static strings are typically
7620 * defined for use with tracing_log_err().
7622 void tracing_log_err(struct trace_array *tr,
7623 const char *loc, const char *cmd,
7624 const char **errs, u8 type, u16 pos)
7626 struct tracing_log_err *err;
7632 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
7634 mutex_lock(&tracing_err_log_lock);
7635 err = get_tracing_log_err(tr, len);
7636 if (PTR_ERR(err) == -ENOMEM) {
7637 mutex_unlock(&tracing_err_log_lock);
7641 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7642 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7644 err->info.errs = errs;
7645 err->info.type = type;
7646 err->info.pos = pos;
7647 err->info.ts = local_clock();
7649 list_add_tail(&err->list, &tr->err_log);
7650 mutex_unlock(&tracing_err_log_lock);
7653 static void clear_tracing_err_log(struct trace_array *tr)
7655 struct tracing_log_err *err, *next;
7657 mutex_lock(&tracing_err_log_lock);
7658 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7659 list_del(&err->list);
7660 free_tracing_log_err(err);
7663 tr->n_err_log_entries = 0;
7664 mutex_unlock(&tracing_err_log_lock);
7667 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7669 struct trace_array *tr = m->private;
7671 mutex_lock(&tracing_err_log_lock);
7673 return seq_list_start(&tr->err_log, *pos);
7676 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7678 struct trace_array *tr = m->private;
7680 return seq_list_next(v, &tr->err_log, pos);
7683 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7685 mutex_unlock(&tracing_err_log_lock);
7688 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
7692 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7694 for (i = 0; i < pos; i++)
7699 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7701 struct tracing_log_err *err = v;
7704 const char *err_text = err->info.errs[err->info.type];
7705 u64 sec = err->info.ts;
7708 nsec = do_div(sec, NSEC_PER_SEC);
7709 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7710 err->loc, err_text);
7711 seq_printf(m, "%s", err->cmd);
7712 tracing_err_log_show_pos(m, err->info.pos);
7718 static const struct seq_operations tracing_err_log_seq_ops = {
7719 .start = tracing_err_log_seq_start,
7720 .next = tracing_err_log_seq_next,
7721 .stop = tracing_err_log_seq_stop,
7722 .show = tracing_err_log_seq_show
7725 static int tracing_err_log_open(struct inode *inode, struct file *file)
7727 struct trace_array *tr = inode->i_private;
7730 ret = tracing_check_open_get_tr(tr);
7734 /* If this file was opened for write, then erase contents */
7735 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7736 clear_tracing_err_log(tr);
7738 if (file->f_mode & FMODE_READ) {
7739 ret = seq_open(file, &tracing_err_log_seq_ops);
7741 struct seq_file *m = file->private_data;
7744 trace_array_put(tr);
7750 static ssize_t tracing_err_log_write(struct file *file,
7751 const char __user *buffer,
7752 size_t count, loff_t *ppos)
7757 static int tracing_err_log_release(struct inode *inode, struct file *file)
7759 struct trace_array *tr = inode->i_private;
7761 trace_array_put(tr);
7763 if (file->f_mode & FMODE_READ)
7764 seq_release(inode, file);
7769 static const struct file_operations tracing_err_log_fops = {
7770 .open = tracing_err_log_open,
7771 .write = tracing_err_log_write,
7773 .llseek = tracing_lseek,
7774 .release = tracing_err_log_release,
7777 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7779 struct trace_array *tr = inode->i_private;
7780 struct ftrace_buffer_info *info;
7783 ret = tracing_check_open_get_tr(tr);
7787 info = kvzalloc(sizeof(*info), GFP_KERNEL);
7789 trace_array_put(tr);
7793 mutex_lock(&trace_types_lock);
7796 info->iter.cpu_file = tracing_get_cpu(inode);
7797 info->iter.trace = tr->current_trace;
7798 info->iter.array_buffer = &tr->array_buffer;
7800 /* Force reading ring buffer for first read */
7801 info->read = (unsigned int)-1;
7803 filp->private_data = info;
7807 mutex_unlock(&trace_types_lock);
7809 ret = nonseekable_open(inode, filp);
7811 trace_array_put(tr);
7817 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7819 struct ftrace_buffer_info *info = filp->private_data;
7820 struct trace_iterator *iter = &info->iter;
7822 return trace_poll(iter, filp, poll_table);
7826 tracing_buffers_read(struct file *filp, char __user *ubuf,
7827 size_t count, loff_t *ppos)
7829 struct ftrace_buffer_info *info = filp->private_data;
7830 struct trace_iterator *iter = &info->iter;
7839 #ifdef CONFIG_TRACER_MAX_TRACE
7840 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7844 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
7846 /* Make sure the spare matches the current sub buffer size */
7848 if (page_size != info->spare_size) {
7849 ring_buffer_free_read_page(iter->array_buffer->buffer,
7850 info->spare_cpu, info->spare);
7856 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7858 if (IS_ERR(info->spare)) {
7859 ret = PTR_ERR(info->spare);
7862 info->spare_cpu = iter->cpu_file;
7863 info->spare_size = page_size;
7869 /* Do we have previous read data to read? */
7870 if (info->read < page_size)
7874 trace_access_lock(iter->cpu_file);
7875 ret = ring_buffer_read_page(iter->array_buffer->buffer,
7879 trace_access_unlock(iter->cpu_file);
7882 if (trace_empty(iter)) {
7883 if ((filp->f_flags & O_NONBLOCK))
7886 ret = wait_on_pipe(iter, 0);
7897 size = page_size - info->read;
7900 trace_data = ring_buffer_read_page_data(info->spare);
7901 ret = copy_to_user(ubuf, trace_data + info->read, size);
7913 static int tracing_buffers_flush(struct file *file, fl_owner_t id)
7915 struct ftrace_buffer_info *info = file->private_data;
7916 struct trace_iterator *iter = &info->iter;
7918 iter->closed = true;
7919 /* Make sure the waiters see the new wait_index */
7920 (void)atomic_fetch_inc_release(&iter->wait_index);
7922 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
7927 static int tracing_buffers_release(struct inode *inode, struct file *file)
7929 struct ftrace_buffer_info *info = file->private_data;
7930 struct trace_iterator *iter = &info->iter;
7932 mutex_lock(&trace_types_lock);
7934 iter->tr->trace_ref--;
7936 __trace_array_put(iter->tr);
7939 ring_buffer_free_read_page(iter->array_buffer->buffer,
7940 info->spare_cpu, info->spare);
7943 mutex_unlock(&trace_types_lock);
7949 struct trace_buffer *buffer;
7952 refcount_t refcount;
7955 static void buffer_ref_release(struct buffer_ref *ref)
7957 if (!refcount_dec_and_test(&ref->refcount))
7959 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7963 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7964 struct pipe_buffer *buf)
7966 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7968 buffer_ref_release(ref);
7972 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7973 struct pipe_buffer *buf)
7975 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7977 if (refcount_read(&ref->refcount) > INT_MAX/2)
7980 refcount_inc(&ref->refcount);
7984 /* Pipe buffer operations for a buffer. */
7985 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7986 .release = buffer_pipe_buf_release,
7987 .get = buffer_pipe_buf_get,
7991 * Callback from splice_to_pipe(), if we need to release some pages
7992 * at the end of the spd in case we error'ed out in filling the pipe.
7994 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7996 struct buffer_ref *ref =
7997 (struct buffer_ref *)spd->partial[i].private;
7999 buffer_ref_release(ref);
8000 spd->partial[i].private = 0;
8004 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8005 struct pipe_inode_info *pipe, size_t len,
8008 struct ftrace_buffer_info *info = file->private_data;
8009 struct trace_iterator *iter = &info->iter;
8010 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8011 struct page *pages_def[PIPE_DEF_BUFFERS];
8012 struct splice_pipe_desc spd = {
8014 .partial = partial_def,
8015 .nr_pages_max = PIPE_DEF_BUFFERS,
8016 .ops = &buffer_pipe_buf_ops,
8017 .spd_release = buffer_spd_release,
8019 struct buffer_ref *ref;
8025 #ifdef CONFIG_TRACER_MAX_TRACE
8026 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8030 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8031 if (*ppos & (page_size - 1))
8034 if (len & (page_size - 1)) {
8035 if (len < page_size)
8037 len &= (~(page_size - 1));
8040 if (splice_grow_spd(pipe, &spd))
8044 trace_access_lock(iter->cpu_file);
8045 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8047 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) {
8051 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8057 refcount_set(&ref->refcount, 1);
8058 ref->buffer = iter->array_buffer->buffer;
8059 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8060 if (IS_ERR(ref->page)) {
8061 ret = PTR_ERR(ref->page);
8066 ref->cpu = iter->cpu_file;
8068 r = ring_buffer_read_page(ref->buffer, ref->page,
8069 len, iter->cpu_file, 1);
8071 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8077 page = virt_to_page(ring_buffer_read_page_data(ref->page));
8079 spd.pages[i] = page;
8080 spd.partial[i].len = page_size;
8081 spd.partial[i].offset = 0;
8082 spd.partial[i].private = (unsigned long)ref;
8086 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8089 trace_access_unlock(iter->cpu_file);
8092 /* did we read anything? */
8093 if (!spd.nr_pages) {
8102 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8105 ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
8109 /* No need to wait after waking up when tracing is off */
8110 if (!tracer_tracing_is_on(iter->tr))
8113 /* Iterate one more time to collect any new data then exit */
8119 ret = splice_to_pipe(pipe, &spd);
8121 splice_shrink_spd(&spd);
8126 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
8127 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8129 struct ftrace_buffer_info *info = file->private_data;
8130 struct trace_iterator *iter = &info->iter;
8133 return -ENOIOCTLCMD;
8135 mutex_lock(&trace_types_lock);
8137 /* Make sure the waiters see the new wait_index */
8138 (void)atomic_fetch_inc_release(&iter->wait_index);
8140 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8142 mutex_unlock(&trace_types_lock);
8146 static const struct file_operations tracing_buffers_fops = {
8147 .open = tracing_buffers_open,
8148 .read = tracing_buffers_read,
8149 .poll = tracing_buffers_poll,
8150 .release = tracing_buffers_release,
8151 .flush = tracing_buffers_flush,
8152 .splice_read = tracing_buffers_splice_read,
8153 .unlocked_ioctl = tracing_buffers_ioctl,
8154 .llseek = no_llseek,
8158 tracing_stats_read(struct file *filp, char __user *ubuf,
8159 size_t count, loff_t *ppos)
8161 struct inode *inode = file_inode(filp);
8162 struct trace_array *tr = inode->i_private;
8163 struct array_buffer *trace_buf = &tr->array_buffer;
8164 int cpu = tracing_get_cpu(inode);
8165 struct trace_seq *s;
8167 unsigned long long t;
8168 unsigned long usec_rem;
8170 s = kmalloc(sizeof(*s), GFP_KERNEL);
8176 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8177 trace_seq_printf(s, "entries: %ld\n", cnt);
8179 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8180 trace_seq_printf(s, "overrun: %ld\n", cnt);
8182 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8183 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8185 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8186 trace_seq_printf(s, "bytes: %ld\n", cnt);
8188 if (trace_clocks[tr->clock_id].in_ns) {
8189 /* local or global for trace_clock */
8190 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8191 usec_rem = do_div(t, USEC_PER_SEC);
8192 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8195 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8196 usec_rem = do_div(t, USEC_PER_SEC);
8197 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8199 /* counter or tsc mode for trace_clock */
8200 trace_seq_printf(s, "oldest event ts: %llu\n",
8201 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8203 trace_seq_printf(s, "now ts: %llu\n",
8204 ring_buffer_time_stamp(trace_buf->buffer));
8207 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8208 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8210 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8211 trace_seq_printf(s, "read events: %ld\n", cnt);
8213 count = simple_read_from_buffer(ubuf, count, ppos,
8214 s->buffer, trace_seq_used(s));
8221 static const struct file_operations tracing_stats_fops = {
8222 .open = tracing_open_generic_tr,
8223 .read = tracing_stats_read,
8224 .llseek = generic_file_llseek,
8225 .release = tracing_release_generic_tr,
8228 #ifdef CONFIG_DYNAMIC_FTRACE
8231 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8232 size_t cnt, loff_t *ppos)
8238 /* 256 should be plenty to hold the amount needed */
8239 buf = kmalloc(256, GFP_KERNEL);
8243 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8244 ftrace_update_tot_cnt,
8245 ftrace_number_of_pages,
8246 ftrace_number_of_groups);
8248 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8253 static const struct file_operations tracing_dyn_info_fops = {
8254 .open = tracing_open_generic,
8255 .read = tracing_read_dyn_info,
8256 .llseek = generic_file_llseek,
8258 #endif /* CONFIG_DYNAMIC_FTRACE */
8260 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8262 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8263 struct trace_array *tr, struct ftrace_probe_ops *ops,
8266 tracing_snapshot_instance(tr);
8270 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8271 struct trace_array *tr, struct ftrace_probe_ops *ops,
8274 struct ftrace_func_mapper *mapper = data;
8278 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8288 tracing_snapshot_instance(tr);
8292 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8293 struct ftrace_probe_ops *ops, void *data)
8295 struct ftrace_func_mapper *mapper = data;
8298 seq_printf(m, "%ps:", (void *)ip);
8300 seq_puts(m, "snapshot");
8303 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8306 seq_printf(m, ":count=%ld\n", *count);
8308 seq_puts(m, ":unlimited\n");
8314 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8315 unsigned long ip, void *init_data, void **data)
8317 struct ftrace_func_mapper *mapper = *data;
8320 mapper = allocate_ftrace_func_mapper();
8326 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8330 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8331 unsigned long ip, void *data)
8333 struct ftrace_func_mapper *mapper = data;
8338 free_ftrace_func_mapper(mapper, NULL);
8342 ftrace_func_mapper_remove_ip(mapper, ip);
8345 static struct ftrace_probe_ops snapshot_probe_ops = {
8346 .func = ftrace_snapshot,
8347 .print = ftrace_snapshot_print,
8350 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8351 .func = ftrace_count_snapshot,
8352 .print = ftrace_snapshot_print,
8353 .init = ftrace_snapshot_init,
8354 .free = ftrace_snapshot_free,
8358 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8359 char *glob, char *cmd, char *param, int enable)
8361 struct ftrace_probe_ops *ops;
8362 void *count = (void *)-1;
8369 /* hash funcs only work with set_ftrace_filter */
8373 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8376 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8381 number = strsep(¶m, ":");
8383 if (!strlen(number))
8387 * We use the callback data field (which is a pointer)
8390 ret = kstrtoul(number, 0, (unsigned long *)&count);
8395 ret = tracing_alloc_snapshot_instance(tr);
8399 ret = register_ftrace_function_probe(glob, tr, ops, count);
8402 return ret < 0 ? ret : 0;
8405 static struct ftrace_func_command ftrace_snapshot_cmd = {
8407 .func = ftrace_trace_snapshot_callback,
8410 static __init int register_snapshot_cmd(void)
8412 return register_ftrace_command(&ftrace_snapshot_cmd);
8415 static inline __init int register_snapshot_cmd(void) { return 0; }
8416 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8418 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8420 if (WARN_ON(!tr->dir))
8421 return ERR_PTR(-ENODEV);
8423 /* Top directory uses NULL as the parent */
8424 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8427 /* All sub buffers have a descriptor */
8431 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8433 struct dentry *d_tracer;
8436 return tr->percpu_dir;
8438 d_tracer = tracing_get_dentry(tr);
8439 if (IS_ERR(d_tracer))
8442 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8444 MEM_FAIL(!tr->percpu_dir,
8445 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8447 return tr->percpu_dir;
8450 static struct dentry *
8451 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8452 void *data, long cpu, const struct file_operations *fops)
8454 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8456 if (ret) /* See tracing_get_cpu() */
8457 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8462 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8464 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8465 struct dentry *d_cpu;
8466 char cpu_dir[30]; /* 30 characters should be more than enough */
8471 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8472 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8474 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8478 /* per cpu trace_pipe */
8479 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8480 tr, cpu, &tracing_pipe_fops);
8483 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8484 tr, cpu, &tracing_fops);
8486 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8487 tr, cpu, &tracing_buffers_fops);
8489 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8490 tr, cpu, &tracing_stats_fops);
8492 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8493 tr, cpu, &tracing_entries_fops);
8495 #ifdef CONFIG_TRACER_SNAPSHOT
8496 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8497 tr, cpu, &snapshot_fops);
8499 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8500 tr, cpu, &snapshot_raw_fops);
8504 #ifdef CONFIG_FTRACE_SELFTEST
8505 /* Let selftest have access to static functions in this file */
8506 #include "trace_selftest.c"
8510 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8513 struct trace_option_dentry *topt = filp->private_data;
8516 if (topt->flags->val & topt->opt->bit)
8521 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8525 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8528 struct trace_option_dentry *topt = filp->private_data;
8532 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8536 if (val != 0 && val != 1)
8539 if (!!(topt->flags->val & topt->opt->bit) != val) {
8540 mutex_lock(&trace_types_lock);
8541 ret = __set_tracer_option(topt->tr, topt->flags,
8543 mutex_unlock(&trace_types_lock);
8553 static int tracing_open_options(struct inode *inode, struct file *filp)
8555 struct trace_option_dentry *topt = inode->i_private;
8558 ret = tracing_check_open_get_tr(topt->tr);
8562 filp->private_data = inode->i_private;
8566 static int tracing_release_options(struct inode *inode, struct file *file)
8568 struct trace_option_dentry *topt = file->private_data;
8570 trace_array_put(topt->tr);
8574 static const struct file_operations trace_options_fops = {
8575 .open = tracing_open_options,
8576 .read = trace_options_read,
8577 .write = trace_options_write,
8578 .llseek = generic_file_llseek,
8579 .release = tracing_release_options,
8583 * In order to pass in both the trace_array descriptor as well as the index
8584 * to the flag that the trace option file represents, the trace_array
8585 * has a character array of trace_flags_index[], which holds the index
8586 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8587 * The address of this character array is passed to the flag option file
8588 * read/write callbacks.
8590 * In order to extract both the index and the trace_array descriptor,
8591 * get_tr_index() uses the following algorithm.
8595 * As the pointer itself contains the address of the index (remember
8598 * Then to get the trace_array descriptor, by subtracting that index
8599 * from the ptr, we get to the start of the index itself.
8601 * ptr - idx == &index[0]
8603 * Then a simple container_of() from that pointer gets us to the
8604 * trace_array descriptor.
8606 static void get_tr_index(void *data, struct trace_array **ptr,
8607 unsigned int *pindex)
8609 *pindex = *(unsigned char *)data;
8611 *ptr = container_of(data - *pindex, struct trace_array,
8616 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8619 void *tr_index = filp->private_data;
8620 struct trace_array *tr;
8624 get_tr_index(tr_index, &tr, &index);
8626 if (tr->trace_flags & (1 << index))
8631 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8635 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8638 void *tr_index = filp->private_data;
8639 struct trace_array *tr;
8644 get_tr_index(tr_index, &tr, &index);
8646 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8650 if (val != 0 && val != 1)
8653 mutex_lock(&event_mutex);
8654 mutex_lock(&trace_types_lock);
8655 ret = set_tracer_flag(tr, 1 << index, val);
8656 mutex_unlock(&trace_types_lock);
8657 mutex_unlock(&event_mutex);
8667 static const struct file_operations trace_options_core_fops = {
8668 .open = tracing_open_generic,
8669 .read = trace_options_core_read,
8670 .write = trace_options_core_write,
8671 .llseek = generic_file_llseek,
8674 struct dentry *trace_create_file(const char *name,
8676 struct dentry *parent,
8678 const struct file_operations *fops)
8682 ret = tracefs_create_file(name, mode, parent, data, fops);
8684 pr_warn("Could not create tracefs '%s' entry\n", name);
8690 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8692 struct dentry *d_tracer;
8697 d_tracer = tracing_get_dentry(tr);
8698 if (IS_ERR(d_tracer))
8701 tr->options = tracefs_create_dir("options", d_tracer);
8703 pr_warn("Could not create tracefs directory 'options'\n");
8711 create_trace_option_file(struct trace_array *tr,
8712 struct trace_option_dentry *topt,
8713 struct tracer_flags *flags,
8714 struct tracer_opt *opt)
8716 struct dentry *t_options;
8718 t_options = trace_options_init_dentry(tr);
8722 topt->flags = flags;
8726 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8727 t_options, topt, &trace_options_fops);
8732 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8734 struct trace_option_dentry *topts;
8735 struct trace_options *tr_topts;
8736 struct tracer_flags *flags;
8737 struct tracer_opt *opts;
8744 flags = tracer->flags;
8746 if (!flags || !flags->opts)
8750 * If this is an instance, only create flags for tracers
8751 * the instance may have.
8753 if (!trace_ok_for_array(tracer, tr))
8756 for (i = 0; i < tr->nr_topts; i++) {
8757 /* Make sure there's no duplicate flags. */
8758 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8764 for (cnt = 0; opts[cnt].name; cnt++)
8767 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8771 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8778 tr->topts = tr_topts;
8779 tr->topts[tr->nr_topts].tracer = tracer;
8780 tr->topts[tr->nr_topts].topts = topts;
8783 for (cnt = 0; opts[cnt].name; cnt++) {
8784 create_trace_option_file(tr, &topts[cnt], flags,
8786 MEM_FAIL(topts[cnt].entry == NULL,
8787 "Failed to create trace option: %s",
8792 static struct dentry *
8793 create_trace_option_core_file(struct trace_array *tr,
8794 const char *option, long index)
8796 struct dentry *t_options;
8798 t_options = trace_options_init_dentry(tr);
8802 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
8803 (void *)&tr->trace_flags_index[index],
8804 &trace_options_core_fops);
8807 static void create_trace_options_dir(struct trace_array *tr)
8809 struct dentry *t_options;
8810 bool top_level = tr == &global_trace;
8813 t_options = trace_options_init_dentry(tr);
8817 for (i = 0; trace_options[i]; i++) {
8819 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8820 create_trace_option_core_file(tr, trace_options[i], i);
8825 rb_simple_read(struct file *filp, char __user *ubuf,
8826 size_t cnt, loff_t *ppos)
8828 struct trace_array *tr = filp->private_data;
8832 r = tracer_tracing_is_on(tr);
8833 r = sprintf(buf, "%d\n", r);
8835 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8839 rb_simple_write(struct file *filp, const char __user *ubuf,
8840 size_t cnt, loff_t *ppos)
8842 struct trace_array *tr = filp->private_data;
8843 struct trace_buffer *buffer = tr->array_buffer.buffer;
8847 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8852 mutex_lock(&trace_types_lock);
8853 if (!!val == tracer_tracing_is_on(tr)) {
8854 val = 0; /* do nothing */
8856 tracer_tracing_on(tr);
8857 if (tr->current_trace->start)
8858 tr->current_trace->start(tr);
8860 tracer_tracing_off(tr);
8861 if (tr->current_trace->stop)
8862 tr->current_trace->stop(tr);
8863 /* Wake up any waiters */
8864 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
8866 mutex_unlock(&trace_types_lock);
8874 static const struct file_operations rb_simple_fops = {
8875 .open = tracing_open_generic_tr,
8876 .read = rb_simple_read,
8877 .write = rb_simple_write,
8878 .release = tracing_release_generic_tr,
8879 .llseek = default_llseek,
8883 buffer_percent_read(struct file *filp, char __user *ubuf,
8884 size_t cnt, loff_t *ppos)
8886 struct trace_array *tr = filp->private_data;
8890 r = tr->buffer_percent;
8891 r = sprintf(buf, "%d\n", r);
8893 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8897 buffer_percent_write(struct file *filp, const char __user *ubuf,
8898 size_t cnt, loff_t *ppos)
8900 struct trace_array *tr = filp->private_data;
8904 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8911 tr->buffer_percent = val;
8918 static const struct file_operations buffer_percent_fops = {
8919 .open = tracing_open_generic_tr,
8920 .read = buffer_percent_read,
8921 .write = buffer_percent_write,
8922 .release = tracing_release_generic_tr,
8923 .llseek = default_llseek,
8927 buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
8929 struct trace_array *tr = filp->private_data;
8935 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
8936 size = (PAGE_SIZE << order) / 1024;
8938 r = sprintf(buf, "%zd\n", size);
8940 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8944 buffer_subbuf_size_write(struct file *filp, const char __user *ubuf,
8945 size_t cnt, loff_t *ppos)
8947 struct trace_array *tr = filp->private_data;
8954 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8958 val *= 1024; /* value passed in is in KB */
8960 pages = DIV_ROUND_UP(val, PAGE_SIZE);
8961 order = fls(pages - 1);
8963 /* limit between 1 and 128 system pages */
8964 if (order < 0 || order > 7)
8967 /* Do not allow tracing while changing the order of the ring buffer */
8968 tracing_stop_tr(tr);
8970 old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
8971 if (old_order == order)
8974 ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
8978 #ifdef CONFIG_TRACER_MAX_TRACE
8980 if (!tr->allocated_snapshot)
8983 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
8985 /* Put back the old order */
8986 cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
8987 if (WARN_ON_ONCE(cnt)) {
8989 * AARGH! We are left with different orders!
8990 * The max buffer is our "snapshot" buffer.
8991 * When a tracer needs a snapshot (one of the
8992 * latency tracers), it swaps the max buffer
8993 * with the saved snap shot. We succeeded to
8994 * update the order of the main buffer, but failed to
8995 * update the order of the max buffer. But when we tried
8996 * to reset the main buffer to the original size, we
8997 * failed there too. This is very unlikely to
8998 * happen, but if it does, warn and kill all
9001 tracing_disabled = 1;
9011 tracing_start_tr(tr);
9015 static const struct file_operations buffer_subbuf_size_fops = {
9016 .open = tracing_open_generic_tr,
9017 .read = buffer_subbuf_size_read,
9018 .write = buffer_subbuf_size_write,
9019 .release = tracing_release_generic_tr,
9020 .llseek = default_llseek,
9023 static struct dentry *trace_instance_dir;
9026 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9029 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9031 enum ring_buffer_flags rb_flags;
9033 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9037 buf->buffer = ring_buffer_alloc(size, rb_flags);
9041 buf->data = alloc_percpu(struct trace_array_cpu);
9043 ring_buffer_free(buf->buffer);
9048 /* Allocate the first page for all buffers */
9049 set_buffer_entries(&tr->array_buffer,
9050 ring_buffer_size(tr->array_buffer.buffer, 0));
9055 static void free_trace_buffer(struct array_buffer *buf)
9058 ring_buffer_free(buf->buffer);
9060 free_percpu(buf->data);
9065 static int allocate_trace_buffers(struct trace_array *tr, int size)
9069 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9073 #ifdef CONFIG_TRACER_MAX_TRACE
9074 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9075 allocate_snapshot ? size : 1);
9076 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9077 free_trace_buffer(&tr->array_buffer);
9080 tr->allocated_snapshot = allocate_snapshot;
9082 allocate_snapshot = false;
9088 static void free_trace_buffers(struct trace_array *tr)
9093 free_trace_buffer(&tr->array_buffer);
9095 #ifdef CONFIG_TRACER_MAX_TRACE
9096 free_trace_buffer(&tr->max_buffer);
9100 static void init_trace_flags_index(struct trace_array *tr)
9104 /* Used by the trace options files */
9105 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9106 tr->trace_flags_index[i] = i;
9109 static void __update_tracer_options(struct trace_array *tr)
9113 for (t = trace_types; t; t = t->next)
9114 add_tracer_options(tr, t);
9117 static void update_tracer_options(struct trace_array *tr)
9119 mutex_lock(&trace_types_lock);
9120 tracer_options_updated = true;
9121 __update_tracer_options(tr);
9122 mutex_unlock(&trace_types_lock);
9125 /* Must have trace_types_lock held */
9126 struct trace_array *trace_array_find(const char *instance)
9128 struct trace_array *tr, *found = NULL;
9130 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9131 if (tr->name && strcmp(tr->name, instance) == 0) {
9140 struct trace_array *trace_array_find_get(const char *instance)
9142 struct trace_array *tr;
9144 mutex_lock(&trace_types_lock);
9145 tr = trace_array_find(instance);
9148 mutex_unlock(&trace_types_lock);
9153 static int trace_array_create_dir(struct trace_array *tr)
9157 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9161 ret = event_trace_add_tracer(tr->dir, tr);
9163 tracefs_remove(tr->dir);
9167 init_tracer_tracefs(tr, tr->dir);
9168 __update_tracer_options(tr);
9173 static struct trace_array *
9174 trace_array_create_systems(const char *name, const char *systems)
9176 struct trace_array *tr;
9180 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9182 return ERR_PTR(ret);
9184 tr->name = kstrdup(name, GFP_KERNEL);
9188 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9191 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9195 tr->system_names = kstrdup_const(systems, GFP_KERNEL);
9196 if (!tr->system_names)
9200 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9202 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9204 raw_spin_lock_init(&tr->start_lock);
9206 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9208 tr->current_trace = &nop_trace;
9210 INIT_LIST_HEAD(&tr->systems);
9211 INIT_LIST_HEAD(&tr->events);
9212 INIT_LIST_HEAD(&tr->hist_vars);
9213 INIT_LIST_HEAD(&tr->err_log);
9215 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9218 /* The ring buffer is defaultly expanded */
9219 trace_set_ring_buffer_expanded(tr);
9221 if (ftrace_allocate_ftrace_ops(tr) < 0)
9224 ftrace_init_trace_array(tr);
9226 init_trace_flags_index(tr);
9228 if (trace_instance_dir) {
9229 ret = trace_array_create_dir(tr);
9233 __trace_early_add_events(tr);
9235 list_add(&tr->list, &ftrace_trace_arrays);
9242 ftrace_free_ftrace_ops(tr);
9243 free_trace_buffers(tr);
9244 free_cpumask_var(tr->pipe_cpumask);
9245 free_cpumask_var(tr->tracing_cpumask);
9246 kfree_const(tr->system_names);
9250 return ERR_PTR(ret);
9253 static struct trace_array *trace_array_create(const char *name)
9255 return trace_array_create_systems(name, NULL);
9258 static int instance_mkdir(const char *name)
9260 struct trace_array *tr;
9263 mutex_lock(&event_mutex);
9264 mutex_lock(&trace_types_lock);
9267 if (trace_array_find(name))
9270 tr = trace_array_create(name);
9272 ret = PTR_ERR_OR_ZERO(tr);
9275 mutex_unlock(&trace_types_lock);
9276 mutex_unlock(&event_mutex);
9281 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9282 * @name: The name of the trace array to be looked up/created.
9283 * @systems: A list of systems to create event directories for (NULL for all)
9285 * Returns pointer to trace array with given name.
9286 * NULL, if it cannot be created.
9288 * NOTE: This function increments the reference counter associated with the
9289 * trace array returned. This makes sure it cannot be freed while in use.
9290 * Use trace_array_put() once the trace array is no longer needed.
9291 * If the trace_array is to be freed, trace_array_destroy() needs to
9292 * be called after the trace_array_put(), or simply let user space delete
9293 * it from the tracefs instances directory. But until the
9294 * trace_array_put() is called, user space can not delete it.
9297 struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
9299 struct trace_array *tr;
9301 mutex_lock(&event_mutex);
9302 mutex_lock(&trace_types_lock);
9304 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9305 if (tr->name && strcmp(tr->name, name) == 0)
9309 tr = trace_array_create_systems(name, systems);
9317 mutex_unlock(&trace_types_lock);
9318 mutex_unlock(&event_mutex);
9321 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9323 static int __remove_instance(struct trace_array *tr)
9327 /* Reference counter for a newly created trace array = 1. */
9328 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9331 list_del(&tr->list);
9333 /* Disable all the flags that were enabled coming in */
9334 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9335 if ((1 << i) & ZEROED_TRACE_FLAGS)
9336 set_tracer_flag(tr, 1 << i, 0);
9339 tracing_set_nop(tr);
9340 clear_ftrace_function_probes(tr);
9341 event_trace_del_tracer(tr);
9342 ftrace_clear_pids(tr);
9343 ftrace_destroy_function_files(tr);
9344 tracefs_remove(tr->dir);
9345 free_percpu(tr->last_func_repeats);
9346 free_trace_buffers(tr);
9347 clear_tracing_err_log(tr);
9349 for (i = 0; i < tr->nr_topts; i++) {
9350 kfree(tr->topts[i].topts);
9354 free_cpumask_var(tr->pipe_cpumask);
9355 free_cpumask_var(tr->tracing_cpumask);
9356 kfree_const(tr->system_names);
9363 int trace_array_destroy(struct trace_array *this_tr)
9365 struct trace_array *tr;
9371 mutex_lock(&event_mutex);
9372 mutex_lock(&trace_types_lock);
9376 /* Making sure trace array exists before destroying it. */
9377 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9378 if (tr == this_tr) {
9379 ret = __remove_instance(tr);
9384 mutex_unlock(&trace_types_lock);
9385 mutex_unlock(&event_mutex);
9389 EXPORT_SYMBOL_GPL(trace_array_destroy);
9391 static int instance_rmdir(const char *name)
9393 struct trace_array *tr;
9396 mutex_lock(&event_mutex);
9397 mutex_lock(&trace_types_lock);
9400 tr = trace_array_find(name);
9402 ret = __remove_instance(tr);
9404 mutex_unlock(&trace_types_lock);
9405 mutex_unlock(&event_mutex);
9410 static __init void create_trace_instances(struct dentry *d_tracer)
9412 struct trace_array *tr;
9414 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9417 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9420 mutex_lock(&event_mutex);
9421 mutex_lock(&trace_types_lock);
9423 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9426 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9427 "Failed to create instance directory\n"))
9431 mutex_unlock(&trace_types_lock);
9432 mutex_unlock(&event_mutex);
9436 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9440 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9441 tr, &show_traces_fops);
9443 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9444 tr, &set_tracer_fops);
9446 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9447 tr, &tracing_cpumask_fops);
9449 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9450 tr, &tracing_iter_fops);
9452 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9455 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9456 tr, &tracing_pipe_fops);
9458 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9459 tr, &tracing_entries_fops);
9461 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9462 tr, &tracing_total_entries_fops);
9464 trace_create_file("free_buffer", 0200, d_tracer,
9465 tr, &tracing_free_buffer_fops);
9467 trace_create_file("trace_marker", 0220, d_tracer,
9468 tr, &tracing_mark_fops);
9470 tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
9472 trace_create_file("trace_marker_raw", 0220, d_tracer,
9473 tr, &tracing_mark_raw_fops);
9475 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9478 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9479 tr, &rb_simple_fops);
9481 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9482 &trace_time_stamp_mode_fops);
9484 tr->buffer_percent = 50;
9486 trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9487 tr, &buffer_percent_fops);
9489 trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer,
9490 tr, &buffer_subbuf_size_fops);
9492 create_trace_options_dir(tr);
9494 #ifdef CONFIG_TRACER_MAX_TRACE
9495 trace_create_maxlat_file(tr, d_tracer);
9498 if (ftrace_create_function_files(tr, d_tracer))
9499 MEM_FAIL(1, "Could not allocate function filter files");
9501 #ifdef CONFIG_TRACER_SNAPSHOT
9502 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9503 tr, &snapshot_fops);
9506 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9507 tr, &tracing_err_log_fops);
9509 for_each_tracing_cpu(cpu)
9510 tracing_init_tracefs_percpu(tr, cpu);
9512 ftrace_init_tracefs(tr, d_tracer);
9515 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9517 struct vfsmount *mnt;
9518 struct file_system_type *type;
9521 * To maintain backward compatibility for tools that mount
9522 * debugfs to get to the tracing facility, tracefs is automatically
9523 * mounted to the debugfs/tracing directory.
9525 type = get_fs_type("tracefs");
9528 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9529 put_filesystem(type);
9538 * tracing_init_dentry - initialize top level trace array
9540 * This is called when creating files or directories in the tracing
9541 * directory. It is called via fs_initcall() by any of the boot up code
9542 * and expects to return the dentry of the top level tracing directory.
9544 int tracing_init_dentry(void)
9546 struct trace_array *tr = &global_trace;
9548 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9549 pr_warn("Tracing disabled due to lockdown\n");
9553 /* The top level trace array uses NULL as parent */
9557 if (WARN_ON(!tracefs_initialized()))
9561 * As there may still be users that expect the tracing
9562 * files to exist in debugfs/tracing, we must automount
9563 * the tracefs file system there, so older tools still
9564 * work with the newer kernel.
9566 tr->dir = debugfs_create_automount("tracing", NULL,
9567 trace_automount, NULL);
9572 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9573 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9575 static struct workqueue_struct *eval_map_wq __initdata;
9576 static struct work_struct eval_map_work __initdata;
9577 static struct work_struct tracerfs_init_work __initdata;
9579 static void __init eval_map_work_func(struct work_struct *work)
9583 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9584 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9587 static int __init trace_eval_init(void)
9589 INIT_WORK(&eval_map_work, eval_map_work_func);
9591 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9593 pr_err("Unable to allocate eval_map_wq\n");
9595 eval_map_work_func(&eval_map_work);
9599 queue_work(eval_map_wq, &eval_map_work);
9603 subsys_initcall(trace_eval_init);
9605 static int __init trace_eval_sync(void)
9607 /* Make sure the eval map updates are finished */
9609 destroy_workqueue(eval_map_wq);
9613 late_initcall_sync(trace_eval_sync);
9616 #ifdef CONFIG_MODULES
9617 static void trace_module_add_evals(struct module *mod)
9619 if (!mod->num_trace_evals)
9623 * Modules with bad taint do not have events created, do
9624 * not bother with enums either.
9626 if (trace_module_has_bad_taint(mod))
9629 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9632 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9633 static void trace_module_remove_evals(struct module *mod)
9635 union trace_eval_map_item *map;
9636 union trace_eval_map_item **last = &trace_eval_maps;
9638 if (!mod->num_trace_evals)
9641 mutex_lock(&trace_eval_mutex);
9643 map = trace_eval_maps;
9646 if (map->head.mod == mod)
9648 map = trace_eval_jmp_to_tail(map);
9649 last = &map->tail.next;
9650 map = map->tail.next;
9655 *last = trace_eval_jmp_to_tail(map)->tail.next;
9658 mutex_unlock(&trace_eval_mutex);
9661 static inline void trace_module_remove_evals(struct module *mod) { }
9662 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9664 static int trace_module_notify(struct notifier_block *self,
9665 unsigned long val, void *data)
9667 struct module *mod = data;
9670 case MODULE_STATE_COMING:
9671 trace_module_add_evals(mod);
9673 case MODULE_STATE_GOING:
9674 trace_module_remove_evals(mod);
9681 static struct notifier_block trace_module_nb = {
9682 .notifier_call = trace_module_notify,
9685 #endif /* CONFIG_MODULES */
9687 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
9692 init_tracer_tracefs(&global_trace, NULL);
9693 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9695 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9696 &global_trace, &tracing_thresh_fops);
9698 trace_create_file("README", TRACE_MODE_READ, NULL,
9699 NULL, &tracing_readme_fops);
9701 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9702 NULL, &tracing_saved_cmdlines_fops);
9704 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9705 NULL, &tracing_saved_cmdlines_size_fops);
9707 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9708 NULL, &tracing_saved_tgids_fops);
9710 trace_create_eval_file(NULL);
9712 #ifdef CONFIG_MODULES
9713 register_module_notifier(&trace_module_nb);
9716 #ifdef CONFIG_DYNAMIC_FTRACE
9717 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9718 NULL, &tracing_dyn_info_fops);
9721 create_trace_instances(NULL);
9723 update_tracer_options(&global_trace);
9726 static __init int tracer_init_tracefs(void)
9730 trace_access_lock_init();
9732 ret = tracing_init_dentry();
9737 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
9738 queue_work(eval_map_wq, &tracerfs_init_work);
9740 tracer_init_tracefs_work_func(NULL);
9743 rv_init_interface();
9748 fs_initcall(tracer_init_tracefs);
9750 static int trace_die_panic_handler(struct notifier_block *self,
9751 unsigned long ev, void *unused);
9753 static struct notifier_block trace_panic_notifier = {
9754 .notifier_call = trace_die_panic_handler,
9755 .priority = INT_MAX - 1,
9758 static struct notifier_block trace_die_notifier = {
9759 .notifier_call = trace_die_panic_handler,
9760 .priority = INT_MAX - 1,
9764 * The idea is to execute the following die/panic callback early, in order
9765 * to avoid showing irrelevant information in the trace (like other panic
9766 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
9767 * warnings get disabled (to prevent potential log flooding).
9769 static int trace_die_panic_handler(struct notifier_block *self,
9770 unsigned long ev, void *unused)
9772 if (!ftrace_dump_on_oops)
9775 /* The die notifier requires DIE_OOPS to trigger */
9776 if (self == &trace_die_notifier && ev != DIE_OOPS)
9779 ftrace_dump(ftrace_dump_on_oops);
9785 * printk is set to max of 1024, we really don't need it that big.
9786 * Nothing should be printing 1000 characters anyway.
9788 #define TRACE_MAX_PRINT 1000
9791 * Define here KERN_TRACE so that we have one place to modify
9792 * it if we decide to change what log level the ftrace dump
9795 #define KERN_TRACE KERN_EMERG
9798 trace_printk_seq(struct trace_seq *s)
9800 /* Probably should print a warning here. */
9801 if (s->seq.len >= TRACE_MAX_PRINT)
9802 s->seq.len = TRACE_MAX_PRINT;
9805 * More paranoid code. Although the buffer size is set to
9806 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9807 * an extra layer of protection.
9809 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9810 s->seq.len = s->seq.size - 1;
9812 /* should be zero ended, but we are paranoid. */
9813 s->buffer[s->seq.len] = 0;
9815 printk(KERN_TRACE "%s", s->buffer);
9820 void trace_init_global_iter(struct trace_iterator *iter)
9822 iter->tr = &global_trace;
9823 iter->trace = iter->tr->current_trace;
9824 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9825 iter->array_buffer = &global_trace.array_buffer;
9827 if (iter->trace && iter->trace->open)
9828 iter->trace->open(iter);
9830 /* Annotate start of buffers if we had overruns */
9831 if (ring_buffer_overruns(iter->array_buffer->buffer))
9832 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9834 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9835 if (trace_clocks[iter->tr->clock_id].in_ns)
9836 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9838 /* Can not use kmalloc for iter.temp and iter.fmt */
9839 iter->temp = static_temp_buf;
9840 iter->temp_size = STATIC_TEMP_BUF_SIZE;
9841 iter->fmt = static_fmt_buf;
9842 iter->fmt_size = STATIC_FMT_BUF_SIZE;
9845 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9847 /* use static because iter can be a bit big for the stack */
9848 static struct trace_iterator iter;
9849 static atomic_t dump_running;
9850 struct trace_array *tr = &global_trace;
9851 unsigned int old_userobj;
9852 unsigned long flags;
9855 /* Only allow one dump user at a time. */
9856 if (atomic_inc_return(&dump_running) != 1) {
9857 atomic_dec(&dump_running);
9862 * Always turn off tracing when we dump.
9863 * We don't need to show trace output of what happens
9864 * between multiple crashes.
9866 * If the user does a sysrq-z, then they can re-enable
9867 * tracing with echo 1 > tracing_on.
9871 local_irq_save(flags);
9873 /* Simulate the iterator */
9874 trace_init_global_iter(&iter);
9876 for_each_tracing_cpu(cpu) {
9877 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9880 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9882 /* don't look at user memory in panic mode */
9883 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9885 switch (oops_dump_mode) {
9887 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9890 iter.cpu_file = raw_smp_processor_id();
9895 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9896 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9899 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9901 /* Did function tracer already get disabled? */
9902 if (ftrace_is_dead()) {
9903 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9904 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9908 * We need to stop all tracing on all CPUS to read
9909 * the next buffer. This is a bit expensive, but is
9910 * not done often. We fill all what we can read,
9911 * and then release the locks again.
9914 while (!trace_empty(&iter)) {
9917 printk(KERN_TRACE "---------------------------------\n");
9921 trace_iterator_reset(&iter);
9922 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9924 if (trace_find_next_entry_inc(&iter) != NULL) {
9927 ret = print_trace_line(&iter);
9928 if (ret != TRACE_TYPE_NO_CONSUME)
9929 trace_consume(&iter);
9931 touch_nmi_watchdog();
9933 trace_printk_seq(&iter.seq);
9937 printk(KERN_TRACE " (ftrace buffer empty)\n");
9939 printk(KERN_TRACE "---------------------------------\n");
9942 tr->trace_flags |= old_userobj;
9944 for_each_tracing_cpu(cpu) {
9945 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9947 atomic_dec(&dump_running);
9948 local_irq_restore(flags);
9950 EXPORT_SYMBOL_GPL(ftrace_dump);
9952 #define WRITE_BUFSIZE 4096
9954 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9955 size_t count, loff_t *ppos,
9956 int (*createfn)(const char *))
9958 char *kbuf, *buf, *tmp;
9963 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9967 while (done < count) {
9968 size = count - done;
9970 if (size >= WRITE_BUFSIZE)
9971 size = WRITE_BUFSIZE - 1;
9973 if (copy_from_user(kbuf, buffer + done, size)) {
9980 tmp = strchr(buf, '\n');
9983 size = tmp - buf + 1;
9986 if (done + size < count) {
9989 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9990 pr_warn("Line length is too long: Should be less than %d\n",
9998 /* Remove comments */
9999 tmp = strchr(buf, '#');
10004 ret = createfn(buf);
10009 } while (done < count);
10019 #ifdef CONFIG_TRACER_MAX_TRACE
10020 __init static bool tr_needs_alloc_snapshot(const char *name)
10023 int len = strlen(name);
10026 if (!boot_snapshot_index)
10029 if (strncmp(name, boot_snapshot_info, len) == 0 &&
10030 boot_snapshot_info[len] == '\t')
10033 test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10037 sprintf(test, "\t%s\t", name);
10038 ret = strstr(boot_snapshot_info, test) == NULL;
10043 __init static void do_allocate_snapshot(const char *name)
10045 if (!tr_needs_alloc_snapshot(name))
10049 * When allocate_snapshot is set, the next call to
10050 * allocate_trace_buffers() (called by trace_array_get_by_name())
10051 * will allocate the snapshot buffer. That will alse clear
10054 allocate_snapshot = true;
10057 static inline void do_allocate_snapshot(const char *name) { }
10060 __init static void enable_instances(void)
10062 struct trace_array *tr;
10067 /* A tab is always appended */
10068 boot_instance_info[boot_instance_index - 1] = '\0';
10069 str = boot_instance_info;
10071 while ((curr_str = strsep(&str, "\t"))) {
10073 tok = strsep(&curr_str, ",");
10075 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10076 do_allocate_snapshot(tok);
10078 tr = trace_array_get_by_name(tok, NULL);
10080 pr_warn("Failed to create instance buffer %s\n", curr_str);
10083 /* Allow user space to delete it */
10084 trace_array_put(tr);
10086 while ((tok = strsep(&curr_str, ","))) {
10087 early_enable_events(tr, tok, true);
10092 __init static int tracer_alloc_buffers(void)
10098 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10099 pr_warn("Tracing disabled due to lockdown\n");
10104 * Make sure we don't accidentally add more trace options
10105 * than we have bits for.
10107 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10109 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10112 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10113 goto out_free_buffer_mask;
10115 /* Only allocate trace_printk buffers if a trace_printk exists */
10116 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10117 /* Must be called before global_trace.buffer is allocated */
10118 trace_printk_init_buffers();
10120 /* To save memory, keep the ring buffer size to its minimum */
10121 if (global_trace.ring_buffer_expanded)
10122 ring_buf_size = trace_buf_size;
10126 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10127 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10129 raw_spin_lock_init(&global_trace.start_lock);
10132 * The prepare callbacks allocates some memory for the ring buffer. We
10133 * don't free the buffer if the CPU goes down. If we were to free
10134 * the buffer, then the user would lose any trace that was in the
10135 * buffer. The memory will be removed once the "instance" is removed.
10137 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10138 "trace/RB:prepare", trace_rb_cpu_prepare,
10141 goto out_free_cpumask;
10142 /* Used for event triggers */
10144 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10146 goto out_rm_hp_state;
10148 if (trace_create_savedcmd() < 0)
10149 goto out_free_temp_buffer;
10151 if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10152 goto out_free_savedcmd;
10154 /* TODO: make the number of buffers hot pluggable with CPUS */
10155 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10156 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10157 goto out_free_pipe_cpumask;
10159 if (global_trace.buffer_disabled)
10162 if (trace_boot_clock) {
10163 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10165 pr_warn("Trace clock %s not defined, going back to default\n",
10170 * register_tracer() might reference current_trace, so it
10171 * needs to be set before we register anything. This is
10172 * just a bootstrap of current_trace anyway.
10174 global_trace.current_trace = &nop_trace;
10176 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10178 ftrace_init_global_array_ops(&global_trace);
10180 init_trace_flags_index(&global_trace);
10182 register_tracer(&nop_trace);
10184 /* Function tracing may start here (via kernel command line) */
10185 init_function_trace();
10187 /* All seems OK, enable tracing */
10188 tracing_disabled = 0;
10190 atomic_notifier_chain_register(&panic_notifier_list,
10191 &trace_panic_notifier);
10193 register_die_notifier(&trace_die_notifier);
10195 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10197 INIT_LIST_HEAD(&global_trace.systems);
10198 INIT_LIST_HEAD(&global_trace.events);
10199 INIT_LIST_HEAD(&global_trace.hist_vars);
10200 INIT_LIST_HEAD(&global_trace.err_log);
10201 list_add(&global_trace.list, &ftrace_trace_arrays);
10203 apply_trace_boot_options();
10205 register_snapshot_cmd();
10211 out_free_pipe_cpumask:
10212 free_cpumask_var(global_trace.pipe_cpumask);
10214 trace_free_saved_cmdlines_buffer();
10215 out_free_temp_buffer:
10216 ring_buffer_free(temp_buffer);
10218 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10220 free_cpumask_var(global_trace.tracing_cpumask);
10221 out_free_buffer_mask:
10222 free_cpumask_var(tracing_buffer_mask);
10227 void __init ftrace_boot_snapshot(void)
10229 #ifdef CONFIG_TRACER_MAX_TRACE
10230 struct trace_array *tr;
10232 if (!snapshot_at_boot)
10235 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10236 if (!tr->allocated_snapshot)
10239 tracing_snapshot_instance(tr);
10240 trace_array_puts(tr, "** Boot snapshot taken **\n");
10245 void __init early_trace_init(void)
10247 if (tracepoint_printk) {
10248 tracepoint_print_iter =
10249 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10250 if (MEM_FAIL(!tracepoint_print_iter,
10251 "Failed to allocate trace iterator\n"))
10252 tracepoint_printk = 0;
10254 static_key_enable(&tracepoint_printk_key.key);
10256 tracer_alloc_buffers();
10261 void __init trace_init(void)
10263 trace_event_init();
10265 if (boot_instance_index)
10266 enable_instances();
10269 __init static void clear_boot_tracer(void)
10272 * The default tracer at boot buffer is an init section.
10273 * This function is called in lateinit. If we did not
10274 * find the boot tracer, then clear it out, to prevent
10275 * later registration from accessing the buffer that is
10276 * about to be freed.
10278 if (!default_bootup_tracer)
10281 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10282 default_bootup_tracer);
10283 default_bootup_tracer = NULL;
10286 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10287 __init static void tracing_set_default_clock(void)
10289 /* sched_clock_stable() is determined in late_initcall */
10290 if (!trace_boot_clock && !sched_clock_stable()) {
10291 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10292 pr_warn("Can not set tracing clock due to lockdown\n");
10296 printk(KERN_WARNING
10297 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10298 "If you want to keep using the local clock, then add:\n"
10299 " \"trace_clock=local\"\n"
10300 "on the kernel command line\n");
10301 tracing_set_clock(&global_trace, "global");
10305 static inline void tracing_set_default_clock(void) { }
10308 __init static int late_trace_init(void)
10310 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10311 static_key_disable(&tracepoint_printk_key.key);
10312 tracepoint_printk = 0;
10315 tracing_set_default_clock();
10316 clear_boot_tracer();
10320 late_initcall_sync(late_trace_init);