1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
8 * Originally taken from the RT patch by:
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/panic_notifier.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
54 #include "trace_output.h"
57 * On boot up, the ring buffer is set to the minimum size, so that
58 * we do not waste memory on systems that are not using tracing.
60 bool ring_buffer_expanded;
63 * We need to change this state when a selftest is running.
64 * A selftest will lurk into the ring-buffer to count the
65 * entries inserted during the selftest although some concurrent
66 * insertions into the ring-buffer such as trace_printk could occurred
67 * at the same time, giving false positive or negative results.
69 static bool __read_mostly tracing_selftest_running;
72 * If boot-time tracing including tracers/events via kernel cmdline
73 * is running, we do not want to run SELFTEST.
75 bool __read_mostly tracing_selftest_disabled;
77 #ifdef CONFIG_FTRACE_STARTUP_TEST
78 void __init disable_tracing_selftest(const char *reason)
80 if (!tracing_selftest_disabled) {
81 tracing_selftest_disabled = true;
82 pr_info("Ftrace startup test is disabled due to %s\n", reason);
87 /* Pipe tracepoints to printk */
88 struct trace_iterator *tracepoint_print_iter;
89 int tracepoint_printk;
90 static bool tracepoint_printk_stop_on_boot __initdata;
91 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
93 /* For tracers that don't implement custom flags */
94 static struct tracer_opt dummy_tracer_opt[] = {
99 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
105 * To prevent the comm cache from being overwritten when no
106 * tracing is active, only save the comm when a trace event
109 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
112 * Kill all tracing for good (never come back).
113 * It is initialized to 1 but will turn to zero if the initialization
114 * of the tracer is successful. But that is the only place that sets
117 static int tracing_disabled = 1;
119 cpumask_var_t __read_mostly tracing_buffer_mask;
122 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
124 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
125 * is set, then ftrace_dump is called. This will output the contents
126 * of the ftrace buffers to the console. This is very useful for
127 * capturing traces that lead to crashes and outputing it to a
130 * It is default off, but you can enable it with either specifying
131 * "ftrace_dump_on_oops" in the kernel command line, or setting
132 * /proc/sys/kernel/ftrace_dump_on_oops
133 * Set 1 if you want to dump buffers of all CPUs
134 * Set 2 if you want to dump the buffer of the CPU that triggered oops
137 enum ftrace_dump_mode ftrace_dump_on_oops;
139 /* When set, tracing will stop when a WARN*() is hit */
140 int __disable_trace_on_warning;
142 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
143 /* Map of enums to their values, for "eval_map" file */
144 struct trace_eval_map_head {
146 unsigned long length;
149 union trace_eval_map_item;
151 struct trace_eval_map_tail {
153 * "end" is first and points to NULL as it must be different
154 * than "mod" or "eval_string"
156 union trace_eval_map_item *next;
157 const char *end; /* points to NULL */
160 static DEFINE_MUTEX(trace_eval_mutex);
163 * The trace_eval_maps are saved in an array with two extra elements,
164 * one at the beginning, and one at the end. The beginning item contains
165 * the count of the saved maps (head.length), and the module they
166 * belong to if not built in (head.mod). The ending item contains a
167 * pointer to the next array of saved eval_map items.
169 union trace_eval_map_item {
170 struct trace_eval_map map;
171 struct trace_eval_map_head head;
172 struct trace_eval_map_tail tail;
175 static union trace_eval_map_item *trace_eval_maps;
176 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
178 int tracing_set_tracer(struct trace_array *tr, const char *buf);
179 static void ftrace_trace_userstack(struct trace_array *tr,
180 struct trace_buffer *buffer,
181 unsigned int trace_ctx);
183 #define MAX_TRACER_SIZE 100
184 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
185 static char *default_bootup_tracer;
187 static bool allocate_snapshot;
188 static bool snapshot_at_boot;
190 static int __init set_cmdline_ftrace(char *str)
192 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
193 default_bootup_tracer = bootup_tracer_buf;
194 /* We are using ftrace early, expand it */
195 ring_buffer_expanded = true;
198 __setup("ftrace=", set_cmdline_ftrace);
200 static int __init set_ftrace_dump_on_oops(char *str)
202 if (*str++ != '=' || !*str || !strcmp("1", str)) {
203 ftrace_dump_on_oops = DUMP_ALL;
207 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
208 ftrace_dump_on_oops = DUMP_ORIG;
214 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
216 static int __init stop_trace_on_warning(char *str)
218 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
219 __disable_trace_on_warning = 1;
222 __setup("traceoff_on_warning", stop_trace_on_warning);
224 static int __init boot_alloc_snapshot(char *str)
226 allocate_snapshot = true;
227 /* We also need the main ring buffer expanded */
228 ring_buffer_expanded = true;
231 __setup("alloc_snapshot", boot_alloc_snapshot);
234 static int __init boot_snapshot(char *str)
236 snapshot_at_boot = true;
237 boot_alloc_snapshot(str);
240 __setup("ftrace_boot_snapshot", boot_snapshot);
243 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
245 static int __init set_trace_boot_options(char *str)
247 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
250 __setup("trace_options=", set_trace_boot_options);
252 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
253 static char *trace_boot_clock __initdata;
255 static int __init set_trace_boot_clock(char *str)
257 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
258 trace_boot_clock = trace_boot_clock_buf;
261 __setup("trace_clock=", set_trace_boot_clock);
263 static int __init set_tracepoint_printk(char *str)
265 /* Ignore the "tp_printk_stop_on_boot" param */
269 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
270 tracepoint_printk = 1;
273 __setup("tp_printk", set_tracepoint_printk);
275 static int __init set_tracepoint_printk_stop(char *str)
277 tracepoint_printk_stop_on_boot = true;
280 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
282 unsigned long long ns2usecs(u64 nsec)
290 trace_process_export(struct trace_export *export,
291 struct ring_buffer_event *event, int flag)
293 struct trace_entry *entry;
294 unsigned int size = 0;
296 if (export->flags & flag) {
297 entry = ring_buffer_event_data(event);
298 size = ring_buffer_event_length(event);
299 export->write(export, entry, size);
303 static DEFINE_MUTEX(ftrace_export_lock);
305 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
307 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
308 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
309 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
311 static inline void ftrace_exports_enable(struct trace_export *export)
313 if (export->flags & TRACE_EXPORT_FUNCTION)
314 static_branch_inc(&trace_function_exports_enabled);
316 if (export->flags & TRACE_EXPORT_EVENT)
317 static_branch_inc(&trace_event_exports_enabled);
319 if (export->flags & TRACE_EXPORT_MARKER)
320 static_branch_inc(&trace_marker_exports_enabled);
323 static inline void ftrace_exports_disable(struct trace_export *export)
325 if (export->flags & TRACE_EXPORT_FUNCTION)
326 static_branch_dec(&trace_function_exports_enabled);
328 if (export->flags & TRACE_EXPORT_EVENT)
329 static_branch_dec(&trace_event_exports_enabled);
331 if (export->flags & TRACE_EXPORT_MARKER)
332 static_branch_dec(&trace_marker_exports_enabled);
335 static void ftrace_exports(struct ring_buffer_event *event, int flag)
337 struct trace_export *export;
339 preempt_disable_notrace();
341 export = rcu_dereference_raw_check(ftrace_exports_list);
343 trace_process_export(export, event, flag);
344 export = rcu_dereference_raw_check(export->next);
347 preempt_enable_notrace();
351 add_trace_export(struct trace_export **list, struct trace_export *export)
353 rcu_assign_pointer(export->next, *list);
355 * We are entering export into the list but another
356 * CPU might be walking that list. We need to make sure
357 * the export->next pointer is valid before another CPU sees
358 * the export pointer included into the list.
360 rcu_assign_pointer(*list, export);
364 rm_trace_export(struct trace_export **list, struct trace_export *export)
366 struct trace_export **p;
368 for (p = list; *p != NULL; p = &(*p)->next)
375 rcu_assign_pointer(*p, (*p)->next);
381 add_ftrace_export(struct trace_export **list, struct trace_export *export)
383 ftrace_exports_enable(export);
385 add_trace_export(list, export);
389 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
393 ret = rm_trace_export(list, export);
394 ftrace_exports_disable(export);
399 int register_ftrace_export(struct trace_export *export)
401 if (WARN_ON_ONCE(!export->write))
404 mutex_lock(&ftrace_export_lock);
406 add_ftrace_export(&ftrace_exports_list, export);
408 mutex_unlock(&ftrace_export_lock);
412 EXPORT_SYMBOL_GPL(register_ftrace_export);
414 int unregister_ftrace_export(struct trace_export *export)
418 mutex_lock(&ftrace_export_lock);
420 ret = rm_ftrace_export(&ftrace_exports_list, export);
422 mutex_unlock(&ftrace_export_lock);
426 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
428 /* trace_flags holds trace_options default values */
429 #define TRACE_DEFAULT_FLAGS \
430 (FUNCTION_DEFAULT_FLAGS | \
431 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
432 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
433 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
434 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
437 /* trace_options that are only supported by global_trace */
438 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
439 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
441 /* trace_flags that are default zero for instances */
442 #define ZEROED_TRACE_FLAGS \
443 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
446 * The global_trace is the descriptor that holds the top-level tracing
447 * buffers for the live tracing.
449 static struct trace_array global_trace = {
450 .trace_flags = TRACE_DEFAULT_FLAGS,
453 LIST_HEAD(ftrace_trace_arrays);
455 int trace_array_get(struct trace_array *this_tr)
457 struct trace_array *tr;
460 mutex_lock(&trace_types_lock);
461 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
468 mutex_unlock(&trace_types_lock);
473 static void __trace_array_put(struct trace_array *this_tr)
475 WARN_ON(!this_tr->ref);
480 * trace_array_put - Decrement the reference counter for this trace array.
481 * @this_tr : pointer to the trace array
483 * NOTE: Use this when we no longer need the trace array returned by
484 * trace_array_get_by_name(). This ensures the trace array can be later
488 void trace_array_put(struct trace_array *this_tr)
493 mutex_lock(&trace_types_lock);
494 __trace_array_put(this_tr);
495 mutex_unlock(&trace_types_lock);
497 EXPORT_SYMBOL_GPL(trace_array_put);
499 int tracing_check_open_get_tr(struct trace_array *tr)
503 ret = security_locked_down(LOCKDOWN_TRACEFS);
507 if (tracing_disabled)
510 if (tr && trace_array_get(tr) < 0)
516 int call_filter_check_discard(struct trace_event_call *call, void *rec,
517 struct trace_buffer *buffer,
518 struct ring_buffer_event *event)
520 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
521 !filter_match_preds(call->filter, rec)) {
522 __trace_event_discard_commit(buffer, event);
530 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
531 * @filtered_pids: The list of pids to check
532 * @search_pid: The PID to find in @filtered_pids
534 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
537 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
539 return trace_pid_list_is_set(filtered_pids, search_pid);
543 * trace_ignore_this_task - should a task be ignored for tracing
544 * @filtered_pids: The list of pids to check
545 * @filtered_no_pids: The list of pids not to be traced
546 * @task: The task that should be ignored if not filtered
548 * Checks if @task should be traced or not from @filtered_pids.
549 * Returns true if @task should *NOT* be traced.
550 * Returns false if @task should be traced.
553 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
554 struct trace_pid_list *filtered_no_pids,
555 struct task_struct *task)
558 * If filtered_no_pids is not empty, and the task's pid is listed
559 * in filtered_no_pids, then return true.
560 * Otherwise, if filtered_pids is empty, that means we can
561 * trace all tasks. If it has content, then only trace pids
562 * within filtered_pids.
565 return (filtered_pids &&
566 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
568 trace_find_filtered_pid(filtered_no_pids, task->pid));
572 * trace_filter_add_remove_task - Add or remove a task from a pid_list
573 * @pid_list: The list to modify
574 * @self: The current task for fork or NULL for exit
575 * @task: The task to add or remove
577 * If adding a task, if @self is defined, the task is only added if @self
578 * is also included in @pid_list. This happens on fork and tasks should
579 * only be added when the parent is listed. If @self is NULL, then the
580 * @task pid will be removed from the list, which would happen on exit
583 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
584 struct task_struct *self,
585 struct task_struct *task)
590 /* For forks, we only add if the forking task is listed */
592 if (!trace_find_filtered_pid(pid_list, self->pid))
596 /* "self" is set for forks, and NULL for exits */
598 trace_pid_list_set(pid_list, task->pid);
600 trace_pid_list_clear(pid_list, task->pid);
604 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
605 * @pid_list: The pid list to show
606 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
607 * @pos: The position of the file
609 * This is used by the seq_file "next" operation to iterate the pids
610 * listed in a trace_pid_list structure.
612 * Returns the pid+1 as we want to display pid of zero, but NULL would
613 * stop the iteration.
615 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
617 long pid = (unsigned long)v;
622 /* pid already is +1 of the actual previous bit */
623 if (trace_pid_list_next(pid_list, pid, &next) < 0)
628 /* Return pid + 1 to allow zero to be represented */
629 return (void *)(pid + 1);
633 * trace_pid_start - Used for seq_file to start reading pid lists
634 * @pid_list: The pid list to show
635 * @pos: The position of the file
637 * This is used by seq_file "start" operation to start the iteration
640 * Returns the pid+1 as we want to display pid of zero, but NULL would
641 * stop the iteration.
643 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
649 if (trace_pid_list_first(pid_list, &first) < 0)
654 /* Return pid + 1 so that zero can be the exit value */
655 for (pid++; pid && l < *pos;
656 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
662 * trace_pid_show - show the current pid in seq_file processing
663 * @m: The seq_file structure to write into
664 * @v: A void pointer of the pid (+1) value to display
666 * Can be directly used by seq_file operations to display the current
669 int trace_pid_show(struct seq_file *m, void *v)
671 unsigned long pid = (unsigned long)v - 1;
673 seq_printf(m, "%lu\n", pid);
677 /* 128 should be much more than enough */
678 #define PID_BUF_SIZE 127
680 int trace_pid_write(struct trace_pid_list *filtered_pids,
681 struct trace_pid_list **new_pid_list,
682 const char __user *ubuf, size_t cnt)
684 struct trace_pid_list *pid_list;
685 struct trace_parser parser;
693 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
697 * Always recreate a new array. The write is an all or nothing
698 * operation. Always create a new array when adding new pids by
699 * the user. If the operation fails, then the current list is
702 pid_list = trace_pid_list_alloc();
704 trace_parser_put(&parser);
709 /* copy the current bits to the new max */
710 ret = trace_pid_list_first(filtered_pids, &pid);
712 trace_pid_list_set(pid_list, pid);
713 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
723 ret = trace_get_user(&parser, ubuf, cnt, &pos);
731 if (!trace_parser_loaded(&parser))
735 if (kstrtoul(parser.buffer, 0, &val))
740 if (trace_pid_list_set(pid_list, pid) < 0) {
746 trace_parser_clear(&parser);
749 trace_parser_put(&parser);
752 trace_pid_list_free(pid_list);
757 /* Cleared the list of pids */
758 trace_pid_list_free(pid_list);
762 *new_pid_list = pid_list;
767 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
771 /* Early boot up does not have a buffer yet */
773 return trace_clock_local();
775 ts = ring_buffer_time_stamp(buf->buffer);
776 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
781 u64 ftrace_now(int cpu)
783 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
787 * tracing_is_enabled - Show if global_trace has been enabled
789 * Shows if the global trace has been enabled or not. It uses the
790 * mirror flag "buffer_disabled" to be used in fast paths such as for
791 * the irqsoff tracer. But it may be inaccurate due to races. If you
792 * need to know the accurate state, use tracing_is_on() which is a little
793 * slower, but accurate.
795 int tracing_is_enabled(void)
798 * For quick access (irqsoff uses this in fast path), just
799 * return the mirror variable of the state of the ring buffer.
800 * It's a little racy, but we don't really care.
803 return !global_trace.buffer_disabled;
807 * trace_buf_size is the size in bytes that is allocated
808 * for a buffer. Note, the number of bytes is always rounded
811 * This number is purposely set to a low number of 16384.
812 * If the dump on oops happens, it will be much appreciated
813 * to not have to wait for all that output. Anyway this can be
814 * boot time and run time configurable.
816 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
818 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
820 /* trace_types holds a link list of available tracers. */
821 static struct tracer *trace_types __read_mostly;
824 * trace_types_lock is used to protect the trace_types list.
826 DEFINE_MUTEX(trace_types_lock);
829 * serialize the access of the ring buffer
831 * ring buffer serializes readers, but it is low level protection.
832 * The validity of the events (which returns by ring_buffer_peek() ..etc)
833 * are not protected by ring buffer.
835 * The content of events may become garbage if we allow other process consumes
836 * these events concurrently:
837 * A) the page of the consumed events may become a normal page
838 * (not reader page) in ring buffer, and this page will be rewritten
839 * by events producer.
840 * B) The page of the consumed events may become a page for splice_read,
841 * and this page will be returned to system.
843 * These primitives allow multi process access to different cpu ring buffer
846 * These primitives don't distinguish read-only and read-consume access.
847 * Multi read-only access are also serialized.
851 static DECLARE_RWSEM(all_cpu_access_lock);
852 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
854 static inline void trace_access_lock(int cpu)
856 if (cpu == RING_BUFFER_ALL_CPUS) {
857 /* gain it for accessing the whole ring buffer. */
858 down_write(&all_cpu_access_lock);
860 /* gain it for accessing a cpu ring buffer. */
862 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
863 down_read(&all_cpu_access_lock);
865 /* Secondly block other access to this @cpu ring buffer. */
866 mutex_lock(&per_cpu(cpu_access_lock, cpu));
870 static inline void trace_access_unlock(int cpu)
872 if (cpu == RING_BUFFER_ALL_CPUS) {
873 up_write(&all_cpu_access_lock);
875 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
876 up_read(&all_cpu_access_lock);
880 static inline void trace_access_lock_init(void)
884 for_each_possible_cpu(cpu)
885 mutex_init(&per_cpu(cpu_access_lock, cpu));
890 static DEFINE_MUTEX(access_lock);
892 static inline void trace_access_lock(int cpu)
895 mutex_lock(&access_lock);
898 static inline void trace_access_unlock(int cpu)
901 mutex_unlock(&access_lock);
904 static inline void trace_access_lock_init(void)
910 #ifdef CONFIG_STACKTRACE
911 static void __ftrace_trace_stack(struct trace_buffer *buffer,
912 unsigned int trace_ctx,
913 int skip, struct pt_regs *regs);
914 static inline void ftrace_trace_stack(struct trace_array *tr,
915 struct trace_buffer *buffer,
916 unsigned int trace_ctx,
917 int skip, struct pt_regs *regs);
920 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
921 unsigned int trace_ctx,
922 int skip, struct pt_regs *regs)
925 static inline void ftrace_trace_stack(struct trace_array *tr,
926 struct trace_buffer *buffer,
927 unsigned long trace_ctx,
928 int skip, struct pt_regs *regs)
934 static __always_inline void
935 trace_event_setup(struct ring_buffer_event *event,
936 int type, unsigned int trace_ctx)
938 struct trace_entry *ent = ring_buffer_event_data(event);
940 tracing_generic_entry_update(ent, type, trace_ctx);
943 static __always_inline struct ring_buffer_event *
944 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
947 unsigned int trace_ctx)
949 struct ring_buffer_event *event;
951 event = ring_buffer_lock_reserve(buffer, len);
953 trace_event_setup(event, type, trace_ctx);
958 void tracer_tracing_on(struct trace_array *tr)
960 if (tr->array_buffer.buffer)
961 ring_buffer_record_on(tr->array_buffer.buffer);
963 * This flag is looked at when buffers haven't been allocated
964 * yet, or by some tracers (like irqsoff), that just want to
965 * know if the ring buffer has been disabled, but it can handle
966 * races of where it gets disabled but we still do a record.
967 * As the check is in the fast path of the tracers, it is more
968 * important to be fast than accurate.
970 tr->buffer_disabled = 0;
971 /* Make the flag seen by readers */
976 * tracing_on - enable tracing buffers
978 * This function enables tracing buffers that may have been
979 * disabled with tracing_off.
981 void tracing_on(void)
983 tracer_tracing_on(&global_trace);
985 EXPORT_SYMBOL_GPL(tracing_on);
988 static __always_inline void
989 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
991 __this_cpu_write(trace_taskinfo_save, true);
993 /* If this is the temp buffer, we need to commit fully */
994 if (this_cpu_read(trace_buffered_event) == event) {
995 /* Length is in event->array[0] */
996 ring_buffer_write(buffer, event->array[0], &event->array[1]);
997 /* Release the temp buffer */
998 this_cpu_dec(trace_buffered_event_cnt);
999 /* ring_buffer_unlock_commit() enables preemption */
1000 preempt_enable_notrace();
1002 ring_buffer_unlock_commit(buffer, event);
1006 * __trace_puts - write a constant string into the trace buffer.
1007 * @ip: The address of the caller
1008 * @str: The constant string to write
1009 * @size: The size of the string.
1011 int __trace_puts(unsigned long ip, const char *str, int size)
1013 struct ring_buffer_event *event;
1014 struct trace_buffer *buffer;
1015 struct print_entry *entry;
1016 unsigned int trace_ctx;
1019 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1022 if (unlikely(tracing_selftest_running || tracing_disabled))
1025 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1027 trace_ctx = tracing_gen_ctx();
1028 buffer = global_trace.array_buffer.buffer;
1029 ring_buffer_nest_start(buffer);
1030 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1037 entry = ring_buffer_event_data(event);
1040 memcpy(&entry->buf, str, size);
1042 /* Add a newline if necessary */
1043 if (entry->buf[size - 1] != '\n') {
1044 entry->buf[size] = '\n';
1045 entry->buf[size + 1] = '\0';
1047 entry->buf[size] = '\0';
1049 __buffer_unlock_commit(buffer, event);
1050 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1052 ring_buffer_nest_end(buffer);
1055 EXPORT_SYMBOL_GPL(__trace_puts);
1058 * __trace_bputs - write the pointer to a constant string into trace buffer
1059 * @ip: The address of the caller
1060 * @str: The constant string to write to the buffer to
1062 int __trace_bputs(unsigned long ip, const char *str)
1064 struct ring_buffer_event *event;
1065 struct trace_buffer *buffer;
1066 struct bputs_entry *entry;
1067 unsigned int trace_ctx;
1068 int size = sizeof(struct bputs_entry);
1071 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1074 if (unlikely(tracing_selftest_running || tracing_disabled))
1077 trace_ctx = tracing_gen_ctx();
1078 buffer = global_trace.array_buffer.buffer;
1080 ring_buffer_nest_start(buffer);
1081 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1086 entry = ring_buffer_event_data(event);
1090 __buffer_unlock_commit(buffer, event);
1091 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1095 ring_buffer_nest_end(buffer);
1098 EXPORT_SYMBOL_GPL(__trace_bputs);
1100 #ifdef CONFIG_TRACER_SNAPSHOT
1101 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1104 struct tracer *tracer = tr->current_trace;
1105 unsigned long flags;
1108 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1109 internal_trace_puts("*** snapshot is being ignored ***\n");
1113 if (!tr->allocated_snapshot) {
1114 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1115 internal_trace_puts("*** stopping trace here! ***\n");
1120 /* Note, snapshot can not be used when the tracer uses it */
1121 if (tracer->use_max_tr) {
1122 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1123 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1127 local_irq_save(flags);
1128 update_max_tr(tr, current, smp_processor_id(), cond_data);
1129 local_irq_restore(flags);
1132 void tracing_snapshot_instance(struct trace_array *tr)
1134 tracing_snapshot_instance_cond(tr, NULL);
1138 * tracing_snapshot - take a snapshot of the current buffer.
1140 * This causes a swap between the snapshot buffer and the current live
1141 * tracing buffer. You can use this to take snapshots of the live
1142 * trace when some condition is triggered, but continue to trace.
1144 * Note, make sure to allocate the snapshot with either
1145 * a tracing_snapshot_alloc(), or by doing it manually
1146 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1148 * If the snapshot buffer is not allocated, it will stop tracing.
1149 * Basically making a permanent snapshot.
1151 void tracing_snapshot(void)
1153 struct trace_array *tr = &global_trace;
1155 tracing_snapshot_instance(tr);
1157 EXPORT_SYMBOL_GPL(tracing_snapshot);
1160 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1161 * @tr: The tracing instance to snapshot
1162 * @cond_data: The data to be tested conditionally, and possibly saved
1164 * This is the same as tracing_snapshot() except that the snapshot is
1165 * conditional - the snapshot will only happen if the
1166 * cond_snapshot.update() implementation receiving the cond_data
1167 * returns true, which means that the trace array's cond_snapshot
1168 * update() operation used the cond_data to determine whether the
1169 * snapshot should be taken, and if it was, presumably saved it along
1170 * with the snapshot.
1172 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1174 tracing_snapshot_instance_cond(tr, cond_data);
1176 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1179 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1180 * @tr: The tracing instance
1182 * When the user enables a conditional snapshot using
1183 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1184 * with the snapshot. This accessor is used to retrieve it.
1186 * Should not be called from cond_snapshot.update(), since it takes
1187 * the tr->max_lock lock, which the code calling
1188 * cond_snapshot.update() has already done.
1190 * Returns the cond_data associated with the trace array's snapshot.
1192 void *tracing_cond_snapshot_data(struct trace_array *tr)
1194 void *cond_data = NULL;
1196 arch_spin_lock(&tr->max_lock);
1198 if (tr->cond_snapshot)
1199 cond_data = tr->cond_snapshot->cond_data;
1201 arch_spin_unlock(&tr->max_lock);
1205 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1207 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1208 struct array_buffer *size_buf, int cpu_id);
1209 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1211 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1215 if (!tr->allocated_snapshot) {
1217 /* allocate spare buffer */
1218 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1219 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1223 tr->allocated_snapshot = true;
1229 static void free_snapshot(struct trace_array *tr)
1232 * We don't free the ring buffer. instead, resize it because
1233 * The max_tr ring buffer has some state (e.g. ring->clock) and
1234 * we want preserve it.
1236 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1237 set_buffer_entries(&tr->max_buffer, 1);
1238 tracing_reset_online_cpus(&tr->max_buffer);
1239 tr->allocated_snapshot = false;
1243 * tracing_alloc_snapshot - allocate snapshot buffer.
1245 * This only allocates the snapshot buffer if it isn't already
1246 * allocated - it doesn't also take a snapshot.
1248 * This is meant to be used in cases where the snapshot buffer needs
1249 * to be set up for events that can't sleep but need to be able to
1250 * trigger a snapshot.
1252 int tracing_alloc_snapshot(void)
1254 struct trace_array *tr = &global_trace;
1257 ret = tracing_alloc_snapshot_instance(tr);
1262 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1265 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1267 * This is similar to tracing_snapshot(), but it will allocate the
1268 * snapshot buffer if it isn't already allocated. Use this only
1269 * where it is safe to sleep, as the allocation may sleep.
1271 * This causes a swap between the snapshot buffer and the current live
1272 * tracing buffer. You can use this to take snapshots of the live
1273 * trace when some condition is triggered, but continue to trace.
1275 void tracing_snapshot_alloc(void)
1279 ret = tracing_alloc_snapshot();
1285 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1288 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1289 * @tr: The tracing instance
1290 * @cond_data: User data to associate with the snapshot
1291 * @update: Implementation of the cond_snapshot update function
1293 * Check whether the conditional snapshot for the given instance has
1294 * already been enabled, or if the current tracer is already using a
1295 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1296 * save the cond_data and update function inside.
1298 * Returns 0 if successful, error otherwise.
1300 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1301 cond_update_fn_t update)
1303 struct cond_snapshot *cond_snapshot;
1306 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1310 cond_snapshot->cond_data = cond_data;
1311 cond_snapshot->update = update;
1313 mutex_lock(&trace_types_lock);
1315 ret = tracing_alloc_snapshot_instance(tr);
1319 if (tr->current_trace->use_max_tr) {
1325 * The cond_snapshot can only change to NULL without the
1326 * trace_types_lock. We don't care if we race with it going
1327 * to NULL, but we want to make sure that it's not set to
1328 * something other than NULL when we get here, which we can
1329 * do safely with only holding the trace_types_lock and not
1330 * having to take the max_lock.
1332 if (tr->cond_snapshot) {
1337 arch_spin_lock(&tr->max_lock);
1338 tr->cond_snapshot = cond_snapshot;
1339 arch_spin_unlock(&tr->max_lock);
1341 mutex_unlock(&trace_types_lock);
1346 mutex_unlock(&trace_types_lock);
1347 kfree(cond_snapshot);
1350 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1353 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1354 * @tr: The tracing instance
1356 * Check whether the conditional snapshot for the given instance is
1357 * enabled; if so, free the cond_snapshot associated with it,
1358 * otherwise return -EINVAL.
1360 * Returns 0 if successful, error otherwise.
1362 int tracing_snapshot_cond_disable(struct trace_array *tr)
1366 arch_spin_lock(&tr->max_lock);
1368 if (!tr->cond_snapshot)
1371 kfree(tr->cond_snapshot);
1372 tr->cond_snapshot = NULL;
1375 arch_spin_unlock(&tr->max_lock);
1379 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1381 void tracing_snapshot(void)
1383 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1385 EXPORT_SYMBOL_GPL(tracing_snapshot);
1386 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1388 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1390 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1391 int tracing_alloc_snapshot(void)
1393 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1396 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1397 void tracing_snapshot_alloc(void)
1402 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1403 void *tracing_cond_snapshot_data(struct trace_array *tr)
1407 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1408 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1412 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1413 int tracing_snapshot_cond_disable(struct trace_array *tr)
1417 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1418 #endif /* CONFIG_TRACER_SNAPSHOT */
1420 void tracer_tracing_off(struct trace_array *tr)
1422 if (tr->array_buffer.buffer)
1423 ring_buffer_record_off(tr->array_buffer.buffer);
1425 * This flag is looked at when buffers haven't been allocated
1426 * yet, or by some tracers (like irqsoff), that just want to
1427 * know if the ring buffer has been disabled, but it can handle
1428 * races of where it gets disabled but we still do a record.
1429 * As the check is in the fast path of the tracers, it is more
1430 * important to be fast than accurate.
1432 tr->buffer_disabled = 1;
1433 /* Make the flag seen by readers */
1438 * tracing_off - turn off tracing buffers
1440 * This function stops the tracing buffers from recording data.
1441 * It does not disable any overhead the tracers themselves may
1442 * be causing. This function simply causes all recording to
1443 * the ring buffers to fail.
1445 void tracing_off(void)
1447 tracer_tracing_off(&global_trace);
1449 EXPORT_SYMBOL_GPL(tracing_off);
1451 void disable_trace_on_warning(void)
1453 if (__disable_trace_on_warning) {
1454 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1455 "Disabling tracing due to warning\n");
1461 * tracer_tracing_is_on - show real state of ring buffer enabled
1462 * @tr : the trace array to know if ring buffer is enabled
1464 * Shows real state of the ring buffer if it is enabled or not.
1466 bool tracer_tracing_is_on(struct trace_array *tr)
1468 if (tr->array_buffer.buffer)
1469 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1470 return !tr->buffer_disabled;
1474 * tracing_is_on - show state of ring buffers enabled
1476 int tracing_is_on(void)
1478 return tracer_tracing_is_on(&global_trace);
1480 EXPORT_SYMBOL_GPL(tracing_is_on);
1482 static int __init set_buf_size(char *str)
1484 unsigned long buf_size;
1488 buf_size = memparse(str, &str);
1490 * nr_entries can not be zero and the startup
1491 * tests require some buffer space. Therefore
1492 * ensure we have at least 4096 bytes of buffer.
1494 trace_buf_size = max(4096UL, buf_size);
1497 __setup("trace_buf_size=", set_buf_size);
1499 static int __init set_tracing_thresh(char *str)
1501 unsigned long threshold;
1506 ret = kstrtoul(str, 0, &threshold);
1509 tracing_thresh = threshold * 1000;
1512 __setup("tracing_thresh=", set_tracing_thresh);
1514 unsigned long nsecs_to_usecs(unsigned long nsecs)
1516 return nsecs / 1000;
1520 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1521 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1522 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1523 * of strings in the order that the evals (enum) were defined.
1528 /* These must match the bit positions in trace_iterator_flags */
1529 static const char *trace_options[] = {
1537 int in_ns; /* is this clock in nanoseconds? */
1538 } trace_clocks[] = {
1539 { trace_clock_local, "local", 1 },
1540 { trace_clock_global, "global", 1 },
1541 { trace_clock_counter, "counter", 0 },
1542 { trace_clock_jiffies, "uptime", 0 },
1543 { trace_clock, "perf", 1 },
1544 { ktime_get_mono_fast_ns, "mono", 1 },
1545 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1546 { ktime_get_boot_fast_ns, "boot", 1 },
1547 { ktime_get_tai_fast_ns, "tai", 1 },
1551 bool trace_clock_in_ns(struct trace_array *tr)
1553 if (trace_clocks[tr->clock_id].in_ns)
1560 * trace_parser_get_init - gets the buffer for trace parser
1562 int trace_parser_get_init(struct trace_parser *parser, int size)
1564 memset(parser, 0, sizeof(*parser));
1566 parser->buffer = kmalloc(size, GFP_KERNEL);
1567 if (!parser->buffer)
1570 parser->size = size;
1575 * trace_parser_put - frees the buffer for trace parser
1577 void trace_parser_put(struct trace_parser *parser)
1579 kfree(parser->buffer);
1580 parser->buffer = NULL;
1584 * trace_get_user - reads the user input string separated by space
1585 * (matched by isspace(ch))
1587 * For each string found the 'struct trace_parser' is updated,
1588 * and the function returns.
1590 * Returns number of bytes read.
1592 * See kernel/trace/trace.h for 'struct trace_parser' details.
1594 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1595 size_t cnt, loff_t *ppos)
1602 trace_parser_clear(parser);
1604 ret = get_user(ch, ubuf++);
1612 * The parser is not finished with the last write,
1613 * continue reading the user input without skipping spaces.
1615 if (!parser->cont) {
1616 /* skip white space */
1617 while (cnt && isspace(ch)) {
1618 ret = get_user(ch, ubuf++);
1627 /* only spaces were written */
1628 if (isspace(ch) || !ch) {
1635 /* read the non-space input */
1636 while (cnt && !isspace(ch) && ch) {
1637 if (parser->idx < parser->size - 1)
1638 parser->buffer[parser->idx++] = ch;
1643 ret = get_user(ch, ubuf++);
1650 /* We either got finished input or we have to wait for another call. */
1651 if (isspace(ch) || !ch) {
1652 parser->buffer[parser->idx] = 0;
1653 parser->cont = false;
1654 } else if (parser->idx < parser->size - 1) {
1655 parser->cont = true;
1656 parser->buffer[parser->idx++] = ch;
1657 /* Make sure the parsed string always terminates with '\0'. */
1658 parser->buffer[parser->idx] = 0;
1671 /* TODO add a seq_buf_to_buffer() */
1672 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1676 if (trace_seq_used(s) <= s->seq.readpos)
1679 len = trace_seq_used(s) - s->seq.readpos;
1682 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1684 s->seq.readpos += cnt;
1688 unsigned long __read_mostly tracing_thresh;
1689 static const struct file_operations tracing_max_lat_fops;
1691 #ifdef LATENCY_FS_NOTIFY
1693 static struct workqueue_struct *fsnotify_wq;
1695 static void latency_fsnotify_workfn(struct work_struct *work)
1697 struct trace_array *tr = container_of(work, struct trace_array,
1699 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1702 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1704 struct trace_array *tr = container_of(iwork, struct trace_array,
1706 queue_work(fsnotify_wq, &tr->fsnotify_work);
1709 static void trace_create_maxlat_file(struct trace_array *tr,
1710 struct dentry *d_tracer)
1712 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1713 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1714 tr->d_max_latency = trace_create_file("tracing_max_latency",
1716 d_tracer, &tr->max_latency,
1717 &tracing_max_lat_fops);
1720 __init static int latency_fsnotify_init(void)
1722 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1723 WQ_UNBOUND | WQ_HIGHPRI, 0);
1725 pr_err("Unable to allocate tr_max_lat_wq\n");
1731 late_initcall_sync(latency_fsnotify_init);
1733 void latency_fsnotify(struct trace_array *tr)
1738 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1739 * possible that we are called from __schedule() or do_idle(), which
1740 * could cause a deadlock.
1742 irq_work_queue(&tr->fsnotify_irqwork);
1745 #elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
1746 || defined(CONFIG_OSNOISE_TRACER)
1748 #define trace_create_maxlat_file(tr, d_tracer) \
1749 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1750 d_tracer, &tr->max_latency, &tracing_max_lat_fops)
1753 #define trace_create_maxlat_file(tr, d_tracer) do { } while (0)
1756 #ifdef CONFIG_TRACER_MAX_TRACE
1758 * Copy the new maximum trace into the separate maximum-trace
1759 * structure. (this way the maximum trace is permanently saved,
1760 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1763 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1765 struct array_buffer *trace_buf = &tr->array_buffer;
1766 struct array_buffer *max_buf = &tr->max_buffer;
1767 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1768 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1771 max_buf->time_start = data->preempt_timestamp;
1773 max_data->saved_latency = tr->max_latency;
1774 max_data->critical_start = data->critical_start;
1775 max_data->critical_end = data->critical_end;
1777 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1778 max_data->pid = tsk->pid;
1780 * If tsk == current, then use current_uid(), as that does not use
1781 * RCU. The irq tracer can be called out of RCU scope.
1784 max_data->uid = current_uid();
1786 max_data->uid = task_uid(tsk);
1788 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1789 max_data->policy = tsk->policy;
1790 max_data->rt_priority = tsk->rt_priority;
1792 /* record this tasks comm */
1793 tracing_record_cmdline(tsk);
1794 latency_fsnotify(tr);
1798 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1800 * @tsk: the task with the latency
1801 * @cpu: The cpu that initiated the trace.
1802 * @cond_data: User data associated with a conditional snapshot
1804 * Flip the buffers between the @tr and the max_tr and record information
1805 * about which task was the cause of this latency.
1808 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1814 WARN_ON_ONCE(!irqs_disabled());
1816 if (!tr->allocated_snapshot) {
1817 /* Only the nop tracer should hit this when disabling */
1818 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1822 arch_spin_lock(&tr->max_lock);
1824 /* Inherit the recordable setting from array_buffer */
1825 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1826 ring_buffer_record_on(tr->max_buffer.buffer);
1828 ring_buffer_record_off(tr->max_buffer.buffer);
1830 #ifdef CONFIG_TRACER_SNAPSHOT
1831 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1834 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1836 __update_max_tr(tr, tsk, cpu);
1839 arch_spin_unlock(&tr->max_lock);
1843 * update_max_tr_single - only copy one trace over, and reset the rest
1845 * @tsk: task with the latency
1846 * @cpu: the cpu of the buffer to copy.
1848 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1851 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1858 WARN_ON_ONCE(!irqs_disabled());
1859 if (!tr->allocated_snapshot) {
1860 /* Only the nop tracer should hit this when disabling */
1861 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1865 arch_spin_lock(&tr->max_lock);
1867 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1869 if (ret == -EBUSY) {
1871 * We failed to swap the buffer due to a commit taking
1872 * place on this CPU. We fail to record, but we reset
1873 * the max trace buffer (no one writes directly to it)
1874 * and flag that it failed.
1876 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1877 "Failed to swap buffers due to commit in progress\n");
1880 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1882 __update_max_tr(tr, tsk, cpu);
1883 arch_spin_unlock(&tr->max_lock);
1885 #endif /* CONFIG_TRACER_MAX_TRACE */
1887 static int wait_on_pipe(struct trace_iterator *iter, int full)
1889 /* Iterators are static, they should be filled or empty */
1890 if (trace_buffer_iter(iter, iter->cpu_file))
1893 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1897 #ifdef CONFIG_FTRACE_STARTUP_TEST
1898 static bool selftests_can_run;
1900 struct trace_selftests {
1901 struct list_head list;
1902 struct tracer *type;
1905 static LIST_HEAD(postponed_selftests);
1907 static int save_selftest(struct tracer *type)
1909 struct trace_selftests *selftest;
1911 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1915 selftest->type = type;
1916 list_add(&selftest->list, &postponed_selftests);
1920 static int run_tracer_selftest(struct tracer *type)
1922 struct trace_array *tr = &global_trace;
1923 struct tracer *saved_tracer = tr->current_trace;
1926 if (!type->selftest || tracing_selftest_disabled)
1930 * If a tracer registers early in boot up (before scheduling is
1931 * initialized and such), then do not run its selftests yet.
1932 * Instead, run it a little later in the boot process.
1934 if (!selftests_can_run)
1935 return save_selftest(type);
1937 if (!tracing_is_on()) {
1938 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1944 * Run a selftest on this tracer.
1945 * Here we reset the trace buffer, and set the current
1946 * tracer to be this tracer. The tracer can then run some
1947 * internal tracing to verify that everything is in order.
1948 * If we fail, we do not register this tracer.
1950 tracing_reset_online_cpus(&tr->array_buffer);
1952 tr->current_trace = type;
1954 #ifdef CONFIG_TRACER_MAX_TRACE
1955 if (type->use_max_tr) {
1956 /* If we expanded the buffers, make sure the max is expanded too */
1957 if (ring_buffer_expanded)
1958 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1959 RING_BUFFER_ALL_CPUS);
1960 tr->allocated_snapshot = true;
1964 /* the test is responsible for initializing and enabling */
1965 pr_info("Testing tracer %s: ", type->name);
1966 ret = type->selftest(type, tr);
1967 /* the test is responsible for resetting too */
1968 tr->current_trace = saved_tracer;
1970 printk(KERN_CONT "FAILED!\n");
1971 /* Add the warning after printing 'FAILED' */
1975 /* Only reset on passing, to avoid touching corrupted buffers */
1976 tracing_reset_online_cpus(&tr->array_buffer);
1978 #ifdef CONFIG_TRACER_MAX_TRACE
1979 if (type->use_max_tr) {
1980 tr->allocated_snapshot = false;
1982 /* Shrink the max buffer again */
1983 if (ring_buffer_expanded)
1984 ring_buffer_resize(tr->max_buffer.buffer, 1,
1985 RING_BUFFER_ALL_CPUS);
1989 printk(KERN_CONT "PASSED\n");
1993 static __init int init_trace_selftests(void)
1995 struct trace_selftests *p, *n;
1996 struct tracer *t, **last;
1999 selftests_can_run = true;
2001 mutex_lock(&trace_types_lock);
2003 if (list_empty(&postponed_selftests))
2006 pr_info("Running postponed tracer tests:\n");
2008 tracing_selftest_running = true;
2009 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2010 /* This loop can take minutes when sanitizers are enabled, so
2011 * lets make sure we allow RCU processing.
2014 ret = run_tracer_selftest(p->type);
2015 /* If the test fails, then warn and remove from available_tracers */
2017 WARN(1, "tracer: %s failed selftest, disabling\n",
2019 last = &trace_types;
2020 for (t = trace_types; t; t = t->next) {
2031 tracing_selftest_running = false;
2034 mutex_unlock(&trace_types_lock);
2038 core_initcall(init_trace_selftests);
2040 static inline int run_tracer_selftest(struct tracer *type)
2044 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2046 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2048 static void __init apply_trace_boot_options(void);
2051 * register_tracer - register a tracer with the ftrace system.
2052 * @type: the plugin for the tracer
2054 * Register a new plugin tracer.
2056 int __init register_tracer(struct tracer *type)
2062 pr_info("Tracer must have a name\n");
2066 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2067 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2071 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2072 pr_warn("Can not register tracer %s due to lockdown\n",
2077 mutex_lock(&trace_types_lock);
2079 tracing_selftest_running = true;
2081 for (t = trace_types; t; t = t->next) {
2082 if (strcmp(type->name, t->name) == 0) {
2084 pr_info("Tracer %s already registered\n",
2091 if (!type->set_flag)
2092 type->set_flag = &dummy_set_flag;
2094 /*allocate a dummy tracer_flags*/
2095 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2100 type->flags->val = 0;
2101 type->flags->opts = dummy_tracer_opt;
2103 if (!type->flags->opts)
2104 type->flags->opts = dummy_tracer_opt;
2106 /* store the tracer for __set_tracer_option */
2107 type->flags->trace = type;
2109 ret = run_tracer_selftest(type);
2113 type->next = trace_types;
2115 add_tracer_options(&global_trace, type);
2118 tracing_selftest_running = false;
2119 mutex_unlock(&trace_types_lock);
2121 if (ret || !default_bootup_tracer)
2124 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2127 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2128 /* Do we want this tracer to start on bootup? */
2129 tracing_set_tracer(&global_trace, type->name);
2130 default_bootup_tracer = NULL;
2132 apply_trace_boot_options();
2134 /* disable other selftests, since this will break it. */
2135 disable_tracing_selftest("running a tracer");
2141 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2143 struct trace_buffer *buffer = buf->buffer;
2148 ring_buffer_record_disable(buffer);
2150 /* Make sure all commits have finished */
2152 ring_buffer_reset_cpu(buffer, cpu);
2154 ring_buffer_record_enable(buffer);
2157 void tracing_reset_online_cpus(struct array_buffer *buf)
2159 struct trace_buffer *buffer = buf->buffer;
2164 ring_buffer_record_disable(buffer);
2166 /* Make sure all commits have finished */
2169 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2171 ring_buffer_reset_online_cpus(buffer);
2173 ring_buffer_record_enable(buffer);
2176 /* Must have trace_types_lock held */
2177 void tracing_reset_all_online_cpus(void)
2179 struct trace_array *tr;
2181 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2182 if (!tr->clear_trace)
2184 tr->clear_trace = false;
2185 tracing_reset_online_cpus(&tr->array_buffer);
2186 #ifdef CONFIG_TRACER_MAX_TRACE
2187 tracing_reset_online_cpus(&tr->max_buffer);
2193 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2194 * is the tgid last observed corresponding to pid=i.
2196 static int *tgid_map;
2198 /* The maximum valid index into tgid_map. */
2199 static size_t tgid_map_max;
2201 #define SAVED_CMDLINES_DEFAULT 128
2202 #define NO_CMDLINE_MAP UINT_MAX
2203 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2204 struct saved_cmdlines_buffer {
2205 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2206 unsigned *map_cmdline_to_pid;
2207 unsigned cmdline_num;
2209 char *saved_cmdlines;
2211 static struct saved_cmdlines_buffer *savedcmd;
2213 static inline char *get_saved_cmdlines(int idx)
2215 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2218 static inline void set_cmdline(int idx, const char *cmdline)
2220 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2223 static int allocate_cmdlines_buffer(unsigned int val,
2224 struct saved_cmdlines_buffer *s)
2226 s->map_cmdline_to_pid = kmalloc_array(val,
2227 sizeof(*s->map_cmdline_to_pid),
2229 if (!s->map_cmdline_to_pid)
2232 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2233 if (!s->saved_cmdlines) {
2234 kfree(s->map_cmdline_to_pid);
2239 s->cmdline_num = val;
2240 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2241 sizeof(s->map_pid_to_cmdline));
2242 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2243 val * sizeof(*s->map_cmdline_to_pid));
2248 static int trace_create_savedcmd(void)
2252 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2256 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2266 int is_tracing_stopped(void)
2268 return global_trace.stop_count;
2272 * tracing_start - quick start of the tracer
2274 * If tracing is enabled but was stopped by tracing_stop,
2275 * this will start the tracer back up.
2277 void tracing_start(void)
2279 struct trace_buffer *buffer;
2280 unsigned long flags;
2282 if (tracing_disabled)
2285 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2286 if (--global_trace.stop_count) {
2287 if (global_trace.stop_count < 0) {
2288 /* Someone screwed up their debugging */
2290 global_trace.stop_count = 0;
2295 /* Prevent the buffers from switching */
2296 arch_spin_lock(&global_trace.max_lock);
2298 buffer = global_trace.array_buffer.buffer;
2300 ring_buffer_record_enable(buffer);
2302 #ifdef CONFIG_TRACER_MAX_TRACE
2303 buffer = global_trace.max_buffer.buffer;
2305 ring_buffer_record_enable(buffer);
2308 arch_spin_unlock(&global_trace.max_lock);
2311 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2314 static void tracing_start_tr(struct trace_array *tr)
2316 struct trace_buffer *buffer;
2317 unsigned long flags;
2319 if (tracing_disabled)
2322 /* If global, we need to also start the max tracer */
2323 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2324 return tracing_start();
2326 raw_spin_lock_irqsave(&tr->start_lock, flags);
2328 if (--tr->stop_count) {
2329 if (tr->stop_count < 0) {
2330 /* Someone screwed up their debugging */
2337 buffer = tr->array_buffer.buffer;
2339 ring_buffer_record_enable(buffer);
2342 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2346 * tracing_stop - quick stop of the tracer
2348 * Light weight way to stop tracing. Use in conjunction with
2351 void tracing_stop(void)
2353 struct trace_buffer *buffer;
2354 unsigned long flags;
2356 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2357 if (global_trace.stop_count++)
2360 /* Prevent the buffers from switching */
2361 arch_spin_lock(&global_trace.max_lock);
2363 buffer = global_trace.array_buffer.buffer;
2365 ring_buffer_record_disable(buffer);
2367 #ifdef CONFIG_TRACER_MAX_TRACE
2368 buffer = global_trace.max_buffer.buffer;
2370 ring_buffer_record_disable(buffer);
2373 arch_spin_unlock(&global_trace.max_lock);
2376 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2379 static void tracing_stop_tr(struct trace_array *tr)
2381 struct trace_buffer *buffer;
2382 unsigned long flags;
2384 /* If global, we need to also stop the max tracer */
2385 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2386 return tracing_stop();
2388 raw_spin_lock_irqsave(&tr->start_lock, flags);
2389 if (tr->stop_count++)
2392 buffer = tr->array_buffer.buffer;
2394 ring_buffer_record_disable(buffer);
2397 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2400 static int trace_save_cmdline(struct task_struct *tsk)
2404 /* treat recording of idle task as a success */
2408 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2411 * It's not the end of the world if we don't get
2412 * the lock, but we also don't want to spin
2413 * nor do we want to disable interrupts,
2414 * so if we miss here, then better luck next time.
2416 if (!arch_spin_trylock(&trace_cmdline_lock))
2419 idx = savedcmd->map_pid_to_cmdline[tpid];
2420 if (idx == NO_CMDLINE_MAP) {
2421 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2423 savedcmd->map_pid_to_cmdline[tpid] = idx;
2424 savedcmd->cmdline_idx = idx;
2427 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2428 set_cmdline(idx, tsk->comm);
2430 arch_spin_unlock(&trace_cmdline_lock);
2435 static void __trace_find_cmdline(int pid, char comm[])
2441 strcpy(comm, "<idle>");
2445 if (WARN_ON_ONCE(pid < 0)) {
2446 strcpy(comm, "<XXX>");
2450 tpid = pid & (PID_MAX_DEFAULT - 1);
2451 map = savedcmd->map_pid_to_cmdline[tpid];
2452 if (map != NO_CMDLINE_MAP) {
2453 tpid = savedcmd->map_cmdline_to_pid[map];
2455 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2459 strcpy(comm, "<...>");
2462 void trace_find_cmdline(int pid, char comm[])
2465 arch_spin_lock(&trace_cmdline_lock);
2467 __trace_find_cmdline(pid, comm);
2469 arch_spin_unlock(&trace_cmdline_lock);
2473 static int *trace_find_tgid_ptr(int pid)
2476 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2477 * if we observe a non-NULL tgid_map then we also observe the correct
2480 int *map = smp_load_acquire(&tgid_map);
2482 if (unlikely(!map || pid > tgid_map_max))
2488 int trace_find_tgid(int pid)
2490 int *ptr = trace_find_tgid_ptr(pid);
2492 return ptr ? *ptr : 0;
2495 static int trace_save_tgid(struct task_struct *tsk)
2499 /* treat recording of idle task as a success */
2503 ptr = trace_find_tgid_ptr(tsk->pid);
2511 static bool tracing_record_taskinfo_skip(int flags)
2513 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2515 if (!__this_cpu_read(trace_taskinfo_save))
2521 * tracing_record_taskinfo - record the task info of a task
2523 * @task: task to record
2524 * @flags: TRACE_RECORD_CMDLINE for recording comm
2525 * TRACE_RECORD_TGID for recording tgid
2527 void tracing_record_taskinfo(struct task_struct *task, int flags)
2531 if (tracing_record_taskinfo_skip(flags))
2535 * Record as much task information as possible. If some fail, continue
2536 * to try to record the others.
2538 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2539 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2541 /* If recording any information failed, retry again soon. */
2545 __this_cpu_write(trace_taskinfo_save, false);
2549 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2551 * @prev: previous task during sched_switch
2552 * @next: next task during sched_switch
2553 * @flags: TRACE_RECORD_CMDLINE for recording comm
2554 * TRACE_RECORD_TGID for recording tgid
2556 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2557 struct task_struct *next, int flags)
2561 if (tracing_record_taskinfo_skip(flags))
2565 * Record as much task information as possible. If some fail, continue
2566 * to try to record the others.
2568 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2569 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2570 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2571 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2573 /* If recording any information failed, retry again soon. */
2577 __this_cpu_write(trace_taskinfo_save, false);
2580 /* Helpers to record a specific task information */
2581 void tracing_record_cmdline(struct task_struct *task)
2583 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2586 void tracing_record_tgid(struct task_struct *task)
2588 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2592 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2593 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2594 * simplifies those functions and keeps them in sync.
2596 enum print_line_t trace_handle_return(struct trace_seq *s)
2598 return trace_seq_has_overflowed(s) ?
2599 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2601 EXPORT_SYMBOL_GPL(trace_handle_return);
2603 static unsigned short migration_disable_value(void)
2605 #if defined(CONFIG_SMP)
2606 return current->migration_disabled;
2612 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2614 unsigned int trace_flags = irqs_status;
2617 pc = preempt_count();
2620 trace_flags |= TRACE_FLAG_NMI;
2621 if (pc & HARDIRQ_MASK)
2622 trace_flags |= TRACE_FLAG_HARDIRQ;
2623 if (in_serving_softirq())
2624 trace_flags |= TRACE_FLAG_SOFTIRQ;
2625 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2626 trace_flags |= TRACE_FLAG_BH_OFF;
2628 if (tif_need_resched())
2629 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2630 if (test_preempt_need_resched())
2631 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2632 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2633 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2636 struct ring_buffer_event *
2637 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2640 unsigned int trace_ctx)
2642 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2645 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2646 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2647 static int trace_buffered_event_ref;
2650 * trace_buffered_event_enable - enable buffering events
2652 * When events are being filtered, it is quicker to use a temporary
2653 * buffer to write the event data into if there's a likely chance
2654 * that it will not be committed. The discard of the ring buffer
2655 * is not as fast as committing, and is much slower than copying
2658 * When an event is to be filtered, allocate per cpu buffers to
2659 * write the event data into, and if the event is filtered and discarded
2660 * it is simply dropped, otherwise, the entire data is to be committed
2663 void trace_buffered_event_enable(void)
2665 struct ring_buffer_event *event;
2669 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2671 if (trace_buffered_event_ref++)
2674 for_each_tracing_cpu(cpu) {
2675 page = alloc_pages_node(cpu_to_node(cpu),
2676 GFP_KERNEL | __GFP_NORETRY, 0);
2680 event = page_address(page);
2681 memset(event, 0, sizeof(*event));
2683 per_cpu(trace_buffered_event, cpu) = event;
2686 if (cpu == smp_processor_id() &&
2687 __this_cpu_read(trace_buffered_event) !=
2688 per_cpu(trace_buffered_event, cpu))
2695 trace_buffered_event_disable();
2698 static void enable_trace_buffered_event(void *data)
2700 /* Probably not needed, but do it anyway */
2702 this_cpu_dec(trace_buffered_event_cnt);
2705 static void disable_trace_buffered_event(void *data)
2707 this_cpu_inc(trace_buffered_event_cnt);
2711 * trace_buffered_event_disable - disable buffering events
2713 * When a filter is removed, it is faster to not use the buffered
2714 * events, and to commit directly into the ring buffer. Free up
2715 * the temp buffers when there are no more users. This requires
2716 * special synchronization with current events.
2718 void trace_buffered_event_disable(void)
2722 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2724 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2727 if (--trace_buffered_event_ref)
2731 /* For each CPU, set the buffer as used. */
2732 smp_call_function_many(tracing_buffer_mask,
2733 disable_trace_buffered_event, NULL, 1);
2736 /* Wait for all current users to finish */
2739 for_each_tracing_cpu(cpu) {
2740 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2741 per_cpu(trace_buffered_event, cpu) = NULL;
2744 * Make sure trace_buffered_event is NULL before clearing
2745 * trace_buffered_event_cnt.
2750 /* Do the work on each cpu */
2751 smp_call_function_many(tracing_buffer_mask,
2752 enable_trace_buffered_event, NULL, 1);
2756 static struct trace_buffer *temp_buffer;
2758 struct ring_buffer_event *
2759 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2760 struct trace_event_file *trace_file,
2761 int type, unsigned long len,
2762 unsigned int trace_ctx)
2764 struct ring_buffer_event *entry;
2765 struct trace_array *tr = trace_file->tr;
2768 *current_rb = tr->array_buffer.buffer;
2770 if (!tr->no_filter_buffering_ref &&
2771 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2772 preempt_disable_notrace();
2774 * Filtering is on, so try to use the per cpu buffer first.
2775 * This buffer will simulate a ring_buffer_event,
2776 * where the type_len is zero and the array[0] will
2777 * hold the full length.
2778 * (see include/linux/ring-buffer.h for details on
2779 * how the ring_buffer_event is structured).
2781 * Using a temp buffer during filtering and copying it
2782 * on a matched filter is quicker than writing directly
2783 * into the ring buffer and then discarding it when
2784 * it doesn't match. That is because the discard
2785 * requires several atomic operations to get right.
2786 * Copying on match and doing nothing on a failed match
2787 * is still quicker than no copy on match, but having
2788 * to discard out of the ring buffer on a failed match.
2790 if ((entry = __this_cpu_read(trace_buffered_event))) {
2791 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2793 val = this_cpu_inc_return(trace_buffered_event_cnt);
2796 * Preemption is disabled, but interrupts and NMIs
2797 * can still come in now. If that happens after
2798 * the above increment, then it will have to go
2799 * back to the old method of allocating the event
2800 * on the ring buffer, and if the filter fails, it
2801 * will have to call ring_buffer_discard_commit()
2804 * Need to also check the unlikely case that the
2805 * length is bigger than the temp buffer size.
2806 * If that happens, then the reserve is pretty much
2807 * guaranteed to fail, as the ring buffer currently
2808 * only allows events less than a page. But that may
2809 * change in the future, so let the ring buffer reserve
2810 * handle the failure in that case.
2812 if (val == 1 && likely(len <= max_len)) {
2813 trace_event_setup(entry, type, trace_ctx);
2814 entry->array[0] = len;
2815 /* Return with preemption disabled */
2818 this_cpu_dec(trace_buffered_event_cnt);
2820 /* __trace_buffer_lock_reserve() disables preemption */
2821 preempt_enable_notrace();
2824 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2827 * If tracing is off, but we have triggers enabled
2828 * we still need to look at the event data. Use the temp_buffer
2829 * to store the trace event for the trigger to use. It's recursive
2830 * safe and will not be recorded anywhere.
2832 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2833 *current_rb = temp_buffer;
2834 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2839 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2841 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2842 static DEFINE_MUTEX(tracepoint_printk_mutex);
2844 static void output_printk(struct trace_event_buffer *fbuffer)
2846 struct trace_event_call *event_call;
2847 struct trace_event_file *file;
2848 struct trace_event *event;
2849 unsigned long flags;
2850 struct trace_iterator *iter = tracepoint_print_iter;
2852 /* We should never get here if iter is NULL */
2853 if (WARN_ON_ONCE(!iter))
2856 event_call = fbuffer->trace_file->event_call;
2857 if (!event_call || !event_call->event.funcs ||
2858 !event_call->event.funcs->trace)
2861 file = fbuffer->trace_file;
2862 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2863 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2864 !filter_match_preds(file->filter, fbuffer->entry)))
2867 event = &fbuffer->trace_file->event_call->event;
2869 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2870 trace_seq_init(&iter->seq);
2871 iter->ent = fbuffer->entry;
2872 event_call->event.funcs->trace(iter, 0, event);
2873 trace_seq_putc(&iter->seq, 0);
2874 printk("%s", iter->seq.buffer);
2876 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2879 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2880 void *buffer, size_t *lenp,
2883 int save_tracepoint_printk;
2886 mutex_lock(&tracepoint_printk_mutex);
2887 save_tracepoint_printk = tracepoint_printk;
2889 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2892 * This will force exiting early, as tracepoint_printk
2893 * is always zero when tracepoint_printk_iter is not allocated
2895 if (!tracepoint_print_iter)
2896 tracepoint_printk = 0;
2898 if (save_tracepoint_printk == tracepoint_printk)
2901 if (tracepoint_printk)
2902 static_key_enable(&tracepoint_printk_key.key);
2904 static_key_disable(&tracepoint_printk_key.key);
2907 mutex_unlock(&tracepoint_printk_mutex);
2912 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2914 enum event_trigger_type tt = ETT_NONE;
2915 struct trace_event_file *file = fbuffer->trace_file;
2917 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2918 fbuffer->entry, &tt))
2921 if (static_key_false(&tracepoint_printk_key.key))
2922 output_printk(fbuffer);
2924 if (static_branch_unlikely(&trace_event_exports_enabled))
2925 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2927 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2928 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2932 event_triggers_post_call(file, tt);
2935 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2940 * trace_buffer_unlock_commit_regs()
2941 * trace_event_buffer_commit()
2942 * trace_event_raw_event_xxx()
2944 # define STACK_SKIP 3
2946 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2947 struct trace_buffer *buffer,
2948 struct ring_buffer_event *event,
2949 unsigned int trace_ctx,
2950 struct pt_regs *regs)
2952 __buffer_unlock_commit(buffer, event);
2955 * If regs is not set, then skip the necessary functions.
2956 * Note, we can still get here via blktrace, wakeup tracer
2957 * and mmiotrace, but that's ok if they lose a function or
2958 * two. They are not that meaningful.
2960 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2961 ftrace_trace_userstack(tr, buffer, trace_ctx);
2965 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2968 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2969 struct ring_buffer_event *event)
2971 __buffer_unlock_commit(buffer, event);
2975 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2976 parent_ip, unsigned int trace_ctx)
2978 struct trace_event_call *call = &event_function;
2979 struct trace_buffer *buffer = tr->array_buffer.buffer;
2980 struct ring_buffer_event *event;
2981 struct ftrace_entry *entry;
2983 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2987 entry = ring_buffer_event_data(event);
2989 entry->parent_ip = parent_ip;
2991 if (!call_filter_check_discard(call, entry, buffer, event)) {
2992 if (static_branch_unlikely(&trace_function_exports_enabled))
2993 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2994 __buffer_unlock_commit(buffer, event);
2998 #ifdef CONFIG_STACKTRACE
3000 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
3001 #define FTRACE_KSTACK_NESTING 4
3003 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3005 struct ftrace_stack {
3006 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3010 struct ftrace_stacks {
3011 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3014 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3015 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3017 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3018 unsigned int trace_ctx,
3019 int skip, struct pt_regs *regs)
3021 struct trace_event_call *call = &event_kernel_stack;
3022 struct ring_buffer_event *event;
3023 unsigned int size, nr_entries;
3024 struct ftrace_stack *fstack;
3025 struct stack_entry *entry;
3029 * Add one, for this function and the call to save_stack_trace()
3030 * If regs is set, then these functions will not be in the way.
3032 #ifndef CONFIG_UNWINDER_ORC
3037 preempt_disable_notrace();
3039 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3041 /* This should never happen. If it does, yell once and skip */
3042 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3046 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3047 * interrupt will either see the value pre increment or post
3048 * increment. If the interrupt happens pre increment it will have
3049 * restored the counter when it returns. We just need a barrier to
3050 * keep gcc from moving things around.
3054 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3055 size = ARRAY_SIZE(fstack->calls);
3058 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3061 nr_entries = stack_trace_save(fstack->calls, size, skip);
3064 size = nr_entries * sizeof(unsigned long);
3065 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3066 (sizeof(*entry) - sizeof(entry->caller)) + size,
3070 entry = ring_buffer_event_data(event);
3072 memcpy(&entry->caller, fstack->calls, size);
3073 entry->size = nr_entries;
3075 if (!call_filter_check_discard(call, entry, buffer, event))
3076 __buffer_unlock_commit(buffer, event);
3079 /* Again, don't let gcc optimize things here */
3081 __this_cpu_dec(ftrace_stack_reserve);
3082 preempt_enable_notrace();
3086 static inline void ftrace_trace_stack(struct trace_array *tr,
3087 struct trace_buffer *buffer,
3088 unsigned int trace_ctx,
3089 int skip, struct pt_regs *regs)
3091 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3094 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3097 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3100 struct trace_buffer *buffer = tr->array_buffer.buffer;
3102 if (rcu_is_watching()) {
3103 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3108 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3109 * but if the above rcu_is_watching() failed, then the NMI
3110 * triggered someplace critical, and rcu_irq_enter() should
3111 * not be called from NMI.
3113 if (unlikely(in_nmi()))
3116 rcu_irq_enter_irqson();
3117 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3118 rcu_irq_exit_irqson();
3122 * trace_dump_stack - record a stack back trace in the trace buffer
3123 * @skip: Number of functions to skip (helper handlers)
3125 void trace_dump_stack(int skip)
3127 if (tracing_disabled || tracing_selftest_running)
3130 #ifndef CONFIG_UNWINDER_ORC
3131 /* Skip 1 to skip this function. */
3134 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3135 tracing_gen_ctx(), skip, NULL);
3137 EXPORT_SYMBOL_GPL(trace_dump_stack);
3139 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3140 static DEFINE_PER_CPU(int, user_stack_count);
3143 ftrace_trace_userstack(struct trace_array *tr,
3144 struct trace_buffer *buffer, unsigned int trace_ctx)
3146 struct trace_event_call *call = &event_user_stack;
3147 struct ring_buffer_event *event;
3148 struct userstack_entry *entry;
3150 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3154 * NMIs can not handle page faults, even with fix ups.
3155 * The save user stack can (and often does) fault.
3157 if (unlikely(in_nmi()))
3161 * prevent recursion, since the user stack tracing may
3162 * trigger other kernel events.
3165 if (__this_cpu_read(user_stack_count))
3168 __this_cpu_inc(user_stack_count);
3170 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3171 sizeof(*entry), trace_ctx);
3173 goto out_drop_count;
3174 entry = ring_buffer_event_data(event);
3176 entry->tgid = current->tgid;
3177 memset(&entry->caller, 0, sizeof(entry->caller));
3179 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3180 if (!call_filter_check_discard(call, entry, buffer, event))
3181 __buffer_unlock_commit(buffer, event);
3184 __this_cpu_dec(user_stack_count);
3188 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3189 static void ftrace_trace_userstack(struct trace_array *tr,
3190 struct trace_buffer *buffer,
3191 unsigned int trace_ctx)
3194 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3196 #endif /* CONFIG_STACKTRACE */
3199 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3200 unsigned long long delta)
3202 entry->bottom_delta_ts = delta & U32_MAX;
3203 entry->top_delta_ts = (delta >> 32);
3206 void trace_last_func_repeats(struct trace_array *tr,
3207 struct trace_func_repeats *last_info,
3208 unsigned int trace_ctx)
3210 struct trace_buffer *buffer = tr->array_buffer.buffer;
3211 struct func_repeats_entry *entry;
3212 struct ring_buffer_event *event;
3215 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3216 sizeof(*entry), trace_ctx);
3220 delta = ring_buffer_event_time_stamp(buffer, event) -
3221 last_info->ts_last_call;
3223 entry = ring_buffer_event_data(event);
3224 entry->ip = last_info->ip;
3225 entry->parent_ip = last_info->parent_ip;
3226 entry->count = last_info->count;
3227 func_repeats_set_delta_ts(entry, delta);
3229 __buffer_unlock_commit(buffer, event);
3232 /* created for use with alloc_percpu */
3233 struct trace_buffer_struct {
3235 char buffer[4][TRACE_BUF_SIZE];
3238 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3241 * This allows for lockless recording. If we're nested too deeply, then
3242 * this returns NULL.
3244 static char *get_trace_buf(void)
3246 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3248 if (!trace_percpu_buffer || buffer->nesting >= 4)
3253 /* Interrupts must see nesting incremented before we use the buffer */
3255 return &buffer->buffer[buffer->nesting - 1][0];
3258 static void put_trace_buf(void)
3260 /* Don't let the decrement of nesting leak before this */
3262 this_cpu_dec(trace_percpu_buffer->nesting);
3265 static int alloc_percpu_trace_buffer(void)
3267 struct trace_buffer_struct __percpu *buffers;
3269 if (trace_percpu_buffer)
3272 buffers = alloc_percpu(struct trace_buffer_struct);
3273 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3276 trace_percpu_buffer = buffers;
3280 static int buffers_allocated;
3282 void trace_printk_init_buffers(void)
3284 if (buffers_allocated)
3287 if (alloc_percpu_trace_buffer())
3290 /* trace_printk() is for debug use only. Don't use it in production. */
3293 pr_warn("**********************************************************\n");
3294 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3296 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3298 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3299 pr_warn("** unsafe for production use. **\n");
3301 pr_warn("** If you see this message and you are not debugging **\n");
3302 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3304 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3305 pr_warn("**********************************************************\n");
3307 /* Expand the buffers to set size */
3308 tracing_update_buffers();
3310 buffers_allocated = 1;
3313 * trace_printk_init_buffers() can be called by modules.
3314 * If that happens, then we need to start cmdline recording
3315 * directly here. If the global_trace.buffer is already
3316 * allocated here, then this was called by module code.
3318 if (global_trace.array_buffer.buffer)
3319 tracing_start_cmdline_record();
3321 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3323 void trace_printk_start_comm(void)
3325 /* Start tracing comms if trace printk is set */
3326 if (!buffers_allocated)
3328 tracing_start_cmdline_record();
3331 static void trace_printk_start_stop_comm(int enabled)
3333 if (!buffers_allocated)
3337 tracing_start_cmdline_record();
3339 tracing_stop_cmdline_record();
3343 * trace_vbprintk - write binary msg to tracing buffer
3344 * @ip: The address of the caller
3345 * @fmt: The string format to write to the buffer
3346 * @args: Arguments for @fmt
3348 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3350 struct trace_event_call *call = &event_bprint;
3351 struct ring_buffer_event *event;
3352 struct trace_buffer *buffer;
3353 struct trace_array *tr = &global_trace;
3354 struct bprint_entry *entry;
3355 unsigned int trace_ctx;
3359 if (unlikely(tracing_selftest_running || tracing_disabled))
3362 /* Don't pollute graph traces with trace_vprintk internals */
3363 pause_graph_tracing();
3365 trace_ctx = tracing_gen_ctx();
3366 preempt_disable_notrace();
3368 tbuffer = get_trace_buf();
3374 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3376 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3379 size = sizeof(*entry) + sizeof(u32) * len;
3380 buffer = tr->array_buffer.buffer;
3381 ring_buffer_nest_start(buffer);
3382 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3386 entry = ring_buffer_event_data(event);
3390 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3391 if (!call_filter_check_discard(call, entry, buffer, event)) {
3392 __buffer_unlock_commit(buffer, event);
3393 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3397 ring_buffer_nest_end(buffer);
3402 preempt_enable_notrace();
3403 unpause_graph_tracing();
3407 EXPORT_SYMBOL_GPL(trace_vbprintk);
3411 __trace_array_vprintk(struct trace_buffer *buffer,
3412 unsigned long ip, const char *fmt, va_list args)
3414 struct trace_event_call *call = &event_print;
3415 struct ring_buffer_event *event;
3417 struct print_entry *entry;
3418 unsigned int trace_ctx;
3421 if (tracing_disabled || tracing_selftest_running)
3424 /* Don't pollute graph traces with trace_vprintk internals */
3425 pause_graph_tracing();
3427 trace_ctx = tracing_gen_ctx();
3428 preempt_disable_notrace();
3431 tbuffer = get_trace_buf();
3437 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3439 size = sizeof(*entry) + len + 1;
3440 ring_buffer_nest_start(buffer);
3441 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3445 entry = ring_buffer_event_data(event);
3448 memcpy(&entry->buf, tbuffer, len + 1);
3449 if (!call_filter_check_discard(call, entry, buffer, event)) {
3450 __buffer_unlock_commit(buffer, event);
3451 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3455 ring_buffer_nest_end(buffer);
3459 preempt_enable_notrace();
3460 unpause_graph_tracing();
3466 int trace_array_vprintk(struct trace_array *tr,
3467 unsigned long ip, const char *fmt, va_list args)
3469 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3473 * trace_array_printk - Print a message to a specific instance
3474 * @tr: The instance trace_array descriptor
3475 * @ip: The instruction pointer that this is called from.
3476 * @fmt: The format to print (printf format)
3478 * If a subsystem sets up its own instance, they have the right to
3479 * printk strings into their tracing instance buffer using this
3480 * function. Note, this function will not write into the top level
3481 * buffer (use trace_printk() for that), as writing into the top level
3482 * buffer should only have events that can be individually disabled.
3483 * trace_printk() is only used for debugging a kernel, and should not
3484 * be ever incorporated in normal use.
3486 * trace_array_printk() can be used, as it will not add noise to the
3487 * top level tracing buffer.
3489 * Note, trace_array_init_printk() must be called on @tr before this
3493 int trace_array_printk(struct trace_array *tr,
3494 unsigned long ip, const char *fmt, ...)
3502 /* This is only allowed for created instances */
3503 if (tr == &global_trace)
3506 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3510 ret = trace_array_vprintk(tr, ip, fmt, ap);
3514 EXPORT_SYMBOL_GPL(trace_array_printk);
3517 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3518 * @tr: The trace array to initialize the buffers for
3520 * As trace_array_printk() only writes into instances, they are OK to
3521 * have in the kernel (unlike trace_printk()). This needs to be called
3522 * before trace_array_printk() can be used on a trace_array.
3524 int trace_array_init_printk(struct trace_array *tr)
3529 /* This is only allowed for created instances */
3530 if (tr == &global_trace)
3533 return alloc_percpu_trace_buffer();
3535 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3538 int trace_array_printk_buf(struct trace_buffer *buffer,
3539 unsigned long ip, const char *fmt, ...)
3544 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3548 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3554 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3556 return trace_array_vprintk(&global_trace, ip, fmt, args);
3558 EXPORT_SYMBOL_GPL(trace_vprintk);
3560 static void trace_iterator_increment(struct trace_iterator *iter)
3562 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3566 ring_buffer_iter_advance(buf_iter);
3569 static struct trace_entry *
3570 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3571 unsigned long *lost_events)
3573 struct ring_buffer_event *event;
3574 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3577 event = ring_buffer_iter_peek(buf_iter, ts);
3579 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3580 (unsigned long)-1 : 0;
3582 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3587 iter->ent_size = ring_buffer_event_length(event);
3588 return ring_buffer_event_data(event);
3594 static struct trace_entry *
3595 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3596 unsigned long *missing_events, u64 *ent_ts)
3598 struct trace_buffer *buffer = iter->array_buffer->buffer;
3599 struct trace_entry *ent, *next = NULL;
3600 unsigned long lost_events = 0, next_lost = 0;
3601 int cpu_file = iter->cpu_file;
3602 u64 next_ts = 0, ts;
3608 * If we are in a per_cpu trace file, don't bother by iterating over
3609 * all cpu and peek directly.
3611 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3612 if (ring_buffer_empty_cpu(buffer, cpu_file))
3614 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3616 *ent_cpu = cpu_file;
3621 for_each_tracing_cpu(cpu) {
3623 if (ring_buffer_empty_cpu(buffer, cpu))
3626 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3629 * Pick the entry with the smallest timestamp:
3631 if (ent && (!next || ts < next_ts)) {
3635 next_lost = lost_events;
3636 next_size = iter->ent_size;
3640 iter->ent_size = next_size;
3643 *ent_cpu = next_cpu;
3649 *missing_events = next_lost;
3654 #define STATIC_FMT_BUF_SIZE 128
3655 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3657 static char *trace_iter_expand_format(struct trace_iterator *iter)
3662 * iter->tr is NULL when used with tp_printk, which makes
3663 * this get called where it is not safe to call krealloc().
3665 if (!iter->tr || iter->fmt == static_fmt_buf)
3668 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3671 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3678 /* Returns true if the string is safe to dereference from an event */
3679 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3682 unsigned long addr = (unsigned long)str;
3683 struct trace_event *trace_event;
3684 struct trace_event_call *event;
3686 /* Ignore strings with no length */
3690 /* OK if part of the event data */
3691 if ((addr >= (unsigned long)iter->ent) &&
3692 (addr < (unsigned long)iter->ent + iter->ent_size))
3695 /* OK if part of the temp seq buffer */
3696 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3697 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3700 /* Core rodata can not be freed */
3701 if (is_kernel_rodata(addr))
3704 if (trace_is_tracepoint_string(str))
3708 * Now this could be a module event, referencing core module
3709 * data, which is OK.
3714 trace_event = ftrace_find_event(iter->ent->type);
3718 event = container_of(trace_event, struct trace_event_call, event);
3719 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3722 /* Would rather have rodata, but this will suffice */
3723 if (within_module_core(addr, event->module))
3729 static const char *show_buffer(struct trace_seq *s)
3731 struct seq_buf *seq = &s->seq;
3733 seq_buf_terminate(seq);
3738 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3740 static int test_can_verify_check(const char *fmt, ...)
3747 * The verifier is dependent on vsnprintf() modifies the va_list
3748 * passed to it, where it is sent as a reference. Some architectures
3749 * (like x86_32) passes it by value, which means that vsnprintf()
3750 * does not modify the va_list passed to it, and the verifier
3751 * would then need to be able to understand all the values that
3752 * vsnprintf can use. If it is passed by value, then the verifier
3756 vsnprintf(buf, 16, "%d", ap);
3757 ret = va_arg(ap, int);
3763 static void test_can_verify(void)
3765 if (!test_can_verify_check("%d %d", 0, 1)) {
3766 pr_info("trace event string verifier disabled\n");
3767 static_branch_inc(&trace_no_verify);
3772 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3773 * @iter: The iterator that holds the seq buffer and the event being printed
3774 * @fmt: The format used to print the event
3775 * @ap: The va_list holding the data to print from @fmt.
3777 * This writes the data into the @iter->seq buffer using the data from
3778 * @fmt and @ap. If the format has a %s, then the source of the string
3779 * is examined to make sure it is safe to print, otherwise it will
3780 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3783 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3786 const char *p = fmt;
3790 if (WARN_ON_ONCE(!fmt))
3793 if (static_branch_unlikely(&trace_no_verify))
3796 /* Don't bother checking when doing a ftrace_dump() */
3797 if (iter->fmt == static_fmt_buf)
3806 /* We only care about %s and variants */
3807 for (i = 0; p[i]; i++) {
3808 if (i + 1 >= iter->fmt_size) {
3810 * If we can't expand the copy buffer,
3813 if (!trace_iter_expand_format(iter))
3817 if (p[i] == '\\' && p[i+1]) {
3822 /* Need to test cases like %08.*s */
3823 for (j = 1; p[i+j]; j++) {
3824 if (isdigit(p[i+j]) ||
3827 if (p[i+j] == '*') {
3839 /* If no %s found then just print normally */
3843 /* Copy up to the %s, and print that */
3844 strncpy(iter->fmt, p, i);
3845 iter->fmt[i] = '\0';
3846 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3849 * If iter->seq is full, the above call no longer guarantees
3850 * that ap is in sync with fmt processing, and further calls
3851 * to va_arg() can return wrong positional arguments.
3853 * Ensure that ap is no longer used in this case.
3855 if (iter->seq.full) {
3861 len = va_arg(ap, int);
3863 /* The ap now points to the string data of the %s */
3864 str = va_arg(ap, const char *);
3867 * If you hit this warning, it is likely that the
3868 * trace event in question used %s on a string that
3869 * was saved at the time of the event, but may not be
3870 * around when the trace is read. Use __string(),
3871 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3872 * instead. See samples/trace_events/trace-events-sample.h
3875 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3876 "fmt: '%s' current_buffer: '%s'",
3877 fmt, show_buffer(&iter->seq))) {
3880 /* Try to safely read the string */
3882 if (len + 1 > iter->fmt_size)
3883 len = iter->fmt_size - 1;
3886 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3890 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3894 trace_seq_printf(&iter->seq, "(0x%px)", str);
3896 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3898 str = "[UNSAFE-MEMORY]";
3899 strcpy(iter->fmt, "%s");
3901 strncpy(iter->fmt, p + i, j + 1);
3902 iter->fmt[j+1] = '\0';
3905 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3907 trace_seq_printf(&iter->seq, iter->fmt, str);
3913 trace_seq_vprintf(&iter->seq, p, ap);
3916 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3918 const char *p, *new_fmt;
3921 if (WARN_ON_ONCE(!fmt))
3924 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3928 new_fmt = q = iter->fmt;
3930 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3931 if (!trace_iter_expand_format(iter))
3934 q += iter->fmt - new_fmt;
3935 new_fmt = iter->fmt;
3940 /* Replace %p with %px */
3944 } else if (p[0] == 'p' && !isalnum(p[1])) {
3955 #define STATIC_TEMP_BUF_SIZE 128
3956 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3958 /* Find the next real entry, without updating the iterator itself */
3959 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3960 int *ent_cpu, u64 *ent_ts)
3962 /* __find_next_entry will reset ent_size */
3963 int ent_size = iter->ent_size;
3964 struct trace_entry *entry;
3967 * If called from ftrace_dump(), then the iter->temp buffer
3968 * will be the static_temp_buf and not created from kmalloc.
3969 * If the entry size is greater than the buffer, we can
3970 * not save it. Just return NULL in that case. This is only
3971 * used to add markers when two consecutive events' time
3972 * stamps have a large delta. See trace_print_lat_context()
3974 if (iter->temp == static_temp_buf &&
3975 STATIC_TEMP_BUF_SIZE < ent_size)
3979 * The __find_next_entry() may call peek_next_entry(), which may
3980 * call ring_buffer_peek() that may make the contents of iter->ent
3981 * undefined. Need to copy iter->ent now.
3983 if (iter->ent && iter->ent != iter->temp) {
3984 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3985 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3987 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3992 iter->temp_size = iter->ent_size;
3994 memcpy(iter->temp, iter->ent, iter->ent_size);
3995 iter->ent = iter->temp;
3997 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3998 /* Put back the original ent_size */
3999 iter->ent_size = ent_size;
4004 /* Find the next real entry, and increment the iterator to the next entry */
4005 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4007 iter->ent = __find_next_entry(iter, &iter->cpu,
4008 &iter->lost_events, &iter->ts);
4011 trace_iterator_increment(iter);
4013 return iter->ent ? iter : NULL;
4016 static void trace_consume(struct trace_iterator *iter)
4018 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4019 &iter->lost_events);
4022 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4024 struct trace_iterator *iter = m->private;
4028 WARN_ON_ONCE(iter->leftover);
4032 /* can't go backwards */
4037 ent = trace_find_next_entry_inc(iter);
4041 while (ent && iter->idx < i)
4042 ent = trace_find_next_entry_inc(iter);
4049 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4051 struct ring_buffer_iter *buf_iter;
4052 unsigned long entries = 0;
4055 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4057 buf_iter = trace_buffer_iter(iter, cpu);
4061 ring_buffer_iter_reset(buf_iter);
4064 * We could have the case with the max latency tracers
4065 * that a reset never took place on a cpu. This is evident
4066 * by the timestamp being before the start of the buffer.
4068 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4069 if (ts >= iter->array_buffer->time_start)
4072 ring_buffer_iter_advance(buf_iter);
4075 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4079 * The current tracer is copied to avoid a global locking
4082 static void *s_start(struct seq_file *m, loff_t *pos)
4084 struct trace_iterator *iter = m->private;
4085 struct trace_array *tr = iter->tr;
4086 int cpu_file = iter->cpu_file;
4092 * copy the tracer to avoid using a global lock all around.
4093 * iter->trace is a copy of current_trace, the pointer to the
4094 * name may be used instead of a strcmp(), as iter->trace->name
4095 * will point to the same string as current_trace->name.
4097 mutex_lock(&trace_types_lock);
4098 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4099 *iter->trace = *tr->current_trace;
4100 mutex_unlock(&trace_types_lock);
4102 #ifdef CONFIG_TRACER_MAX_TRACE
4103 if (iter->snapshot && iter->trace->use_max_tr)
4104 return ERR_PTR(-EBUSY);
4107 if (*pos != iter->pos) {
4112 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4113 for_each_tracing_cpu(cpu)
4114 tracing_iter_reset(iter, cpu);
4116 tracing_iter_reset(iter, cpu_file);
4119 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4124 * If we overflowed the seq_file before, then we want
4125 * to just reuse the trace_seq buffer again.
4131 p = s_next(m, p, &l);
4135 trace_event_read_lock();
4136 trace_access_lock(cpu_file);
4140 static void s_stop(struct seq_file *m, void *p)
4142 struct trace_iterator *iter = m->private;
4144 #ifdef CONFIG_TRACER_MAX_TRACE
4145 if (iter->snapshot && iter->trace->use_max_tr)
4149 trace_access_unlock(iter->cpu_file);
4150 trace_event_read_unlock();
4154 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4155 unsigned long *entries, int cpu)
4157 unsigned long count;
4159 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4161 * If this buffer has skipped entries, then we hold all
4162 * entries for the trace and we need to ignore the
4163 * ones before the time stamp.
4165 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4166 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4167 /* total is the same as the entries */
4171 ring_buffer_overrun_cpu(buf->buffer, cpu);
4176 get_total_entries(struct array_buffer *buf,
4177 unsigned long *total, unsigned long *entries)
4185 for_each_tracing_cpu(cpu) {
4186 get_total_entries_cpu(buf, &t, &e, cpu);
4192 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4194 unsigned long total, entries;
4199 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4204 unsigned long trace_total_entries(struct trace_array *tr)
4206 unsigned long total, entries;
4211 get_total_entries(&tr->array_buffer, &total, &entries);
4216 static void print_lat_help_header(struct seq_file *m)
4218 seq_puts(m, "# _------=> CPU# \n"
4219 "# / _-----=> irqs-off/BH-disabled\n"
4220 "# | / _----=> need-resched \n"
4221 "# || / _---=> hardirq/softirq \n"
4222 "# ||| / _--=> preempt-depth \n"
4223 "# |||| / _-=> migrate-disable \n"
4224 "# ||||| / delay \n"
4225 "# cmd pid |||||| time | caller \n"
4226 "# \\ / |||||| \\ | / \n");
4229 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4231 unsigned long total;
4232 unsigned long entries;
4234 get_total_entries(buf, &total, &entries);
4235 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4236 entries, total, num_online_cpus());
4240 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4243 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4245 print_event_info(buf, m);
4247 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4248 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4251 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4254 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4255 static const char space[] = " ";
4256 int prec = tgid ? 12 : 2;
4258 print_event_info(buf, m);
4260 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4261 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4262 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4263 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4264 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4265 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4266 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4267 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4271 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4273 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4274 struct array_buffer *buf = iter->array_buffer;
4275 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4276 struct tracer *type = iter->trace;
4277 unsigned long entries;
4278 unsigned long total;
4279 const char *name = type->name;
4281 get_total_entries(buf, &total, &entries);
4283 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4285 seq_puts(m, "# -----------------------------------"
4286 "---------------------------------\n");
4287 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4288 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4289 nsecs_to_usecs(data->saved_latency),
4293 preempt_model_none() ? "server" :
4294 preempt_model_voluntary() ? "desktop" :
4295 preempt_model_full() ? "preempt" :
4296 preempt_model_rt() ? "preempt_rt" :
4298 /* These are reserved for later use */
4301 seq_printf(m, " #P:%d)\n", num_online_cpus());
4305 seq_puts(m, "# -----------------\n");
4306 seq_printf(m, "# | task: %.16s-%d "
4307 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4308 data->comm, data->pid,
4309 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4310 data->policy, data->rt_priority);
4311 seq_puts(m, "# -----------------\n");
4313 if (data->critical_start) {
4314 seq_puts(m, "# => started at: ");
4315 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4316 trace_print_seq(m, &iter->seq);
4317 seq_puts(m, "\n# => ended at: ");
4318 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4319 trace_print_seq(m, &iter->seq);
4320 seq_puts(m, "\n#\n");
4326 static void test_cpu_buff_start(struct trace_iterator *iter)
4328 struct trace_seq *s = &iter->seq;
4329 struct trace_array *tr = iter->tr;
4331 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4334 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4337 if (cpumask_available(iter->started) &&
4338 cpumask_test_cpu(iter->cpu, iter->started))
4341 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4344 if (cpumask_available(iter->started))
4345 cpumask_set_cpu(iter->cpu, iter->started);
4347 /* Don't print started cpu buffer for the first entry of the trace */
4349 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4353 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4355 struct trace_array *tr = iter->tr;
4356 struct trace_seq *s = &iter->seq;
4357 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4358 struct trace_entry *entry;
4359 struct trace_event *event;
4363 test_cpu_buff_start(iter);
4365 event = ftrace_find_event(entry->type);
4367 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4368 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4369 trace_print_lat_context(iter);
4371 trace_print_context(iter);
4374 if (trace_seq_has_overflowed(s))
4375 return TRACE_TYPE_PARTIAL_LINE;
4378 return event->funcs->trace(iter, sym_flags, event);
4380 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4382 return trace_handle_return(s);
4385 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4387 struct trace_array *tr = iter->tr;
4388 struct trace_seq *s = &iter->seq;
4389 struct trace_entry *entry;
4390 struct trace_event *event;
4394 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4395 trace_seq_printf(s, "%d %d %llu ",
4396 entry->pid, iter->cpu, iter->ts);
4398 if (trace_seq_has_overflowed(s))
4399 return TRACE_TYPE_PARTIAL_LINE;
4401 event = ftrace_find_event(entry->type);
4403 return event->funcs->raw(iter, 0, event);
4405 trace_seq_printf(s, "%d ?\n", entry->type);
4407 return trace_handle_return(s);
4410 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4412 struct trace_array *tr = iter->tr;
4413 struct trace_seq *s = &iter->seq;
4414 unsigned char newline = '\n';
4415 struct trace_entry *entry;
4416 struct trace_event *event;
4420 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4421 SEQ_PUT_HEX_FIELD(s, entry->pid);
4422 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4423 SEQ_PUT_HEX_FIELD(s, iter->ts);
4424 if (trace_seq_has_overflowed(s))
4425 return TRACE_TYPE_PARTIAL_LINE;
4428 event = ftrace_find_event(entry->type);
4430 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4431 if (ret != TRACE_TYPE_HANDLED)
4435 SEQ_PUT_FIELD(s, newline);
4437 return trace_handle_return(s);
4440 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4442 struct trace_array *tr = iter->tr;
4443 struct trace_seq *s = &iter->seq;
4444 struct trace_entry *entry;
4445 struct trace_event *event;
4449 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4450 SEQ_PUT_FIELD(s, entry->pid);
4451 SEQ_PUT_FIELD(s, iter->cpu);
4452 SEQ_PUT_FIELD(s, iter->ts);
4453 if (trace_seq_has_overflowed(s))
4454 return TRACE_TYPE_PARTIAL_LINE;
4457 event = ftrace_find_event(entry->type);
4458 return event ? event->funcs->binary(iter, 0, event) :
4462 int trace_empty(struct trace_iterator *iter)
4464 struct ring_buffer_iter *buf_iter;
4467 /* If we are looking at one CPU buffer, only check that one */
4468 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4469 cpu = iter->cpu_file;
4470 buf_iter = trace_buffer_iter(iter, cpu);
4472 if (!ring_buffer_iter_empty(buf_iter))
4475 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4481 for_each_tracing_cpu(cpu) {
4482 buf_iter = trace_buffer_iter(iter, cpu);
4484 if (!ring_buffer_iter_empty(buf_iter))
4487 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4495 /* Called with trace_event_read_lock() held. */
4496 enum print_line_t print_trace_line(struct trace_iterator *iter)
4498 struct trace_array *tr = iter->tr;
4499 unsigned long trace_flags = tr->trace_flags;
4500 enum print_line_t ret;
4502 if (iter->lost_events) {
4503 if (iter->lost_events == (unsigned long)-1)
4504 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4507 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4508 iter->cpu, iter->lost_events);
4509 if (trace_seq_has_overflowed(&iter->seq))
4510 return TRACE_TYPE_PARTIAL_LINE;
4513 if (iter->trace && iter->trace->print_line) {
4514 ret = iter->trace->print_line(iter);
4515 if (ret != TRACE_TYPE_UNHANDLED)
4519 if (iter->ent->type == TRACE_BPUTS &&
4520 trace_flags & TRACE_ITER_PRINTK &&
4521 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4522 return trace_print_bputs_msg_only(iter);
4524 if (iter->ent->type == TRACE_BPRINT &&
4525 trace_flags & TRACE_ITER_PRINTK &&
4526 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4527 return trace_print_bprintk_msg_only(iter);
4529 if (iter->ent->type == TRACE_PRINT &&
4530 trace_flags & TRACE_ITER_PRINTK &&
4531 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4532 return trace_print_printk_msg_only(iter);
4534 if (trace_flags & TRACE_ITER_BIN)
4535 return print_bin_fmt(iter);
4537 if (trace_flags & TRACE_ITER_HEX)
4538 return print_hex_fmt(iter);
4540 if (trace_flags & TRACE_ITER_RAW)
4541 return print_raw_fmt(iter);
4543 return print_trace_fmt(iter);
4546 void trace_latency_header(struct seq_file *m)
4548 struct trace_iterator *iter = m->private;
4549 struct trace_array *tr = iter->tr;
4551 /* print nothing if the buffers are empty */
4552 if (trace_empty(iter))
4555 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4556 print_trace_header(m, iter);
4558 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4559 print_lat_help_header(m);
4562 void trace_default_header(struct seq_file *m)
4564 struct trace_iterator *iter = m->private;
4565 struct trace_array *tr = iter->tr;
4566 unsigned long trace_flags = tr->trace_flags;
4568 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4571 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4572 /* print nothing if the buffers are empty */
4573 if (trace_empty(iter))
4575 print_trace_header(m, iter);
4576 if (!(trace_flags & TRACE_ITER_VERBOSE))
4577 print_lat_help_header(m);
4579 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4580 if (trace_flags & TRACE_ITER_IRQ_INFO)
4581 print_func_help_header_irq(iter->array_buffer,
4584 print_func_help_header(iter->array_buffer, m,
4590 static void test_ftrace_alive(struct seq_file *m)
4592 if (!ftrace_is_dead())
4594 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4595 "# MAY BE MISSING FUNCTION EVENTS\n");
4598 #ifdef CONFIG_TRACER_MAX_TRACE
4599 static void show_snapshot_main_help(struct seq_file *m)
4601 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4602 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4603 "# Takes a snapshot of the main buffer.\n"
4604 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4605 "# (Doesn't have to be '2' works with any number that\n"
4606 "# is not a '0' or '1')\n");
4609 static void show_snapshot_percpu_help(struct seq_file *m)
4611 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4612 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4613 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4614 "# Takes a snapshot of the main buffer for this cpu.\n");
4616 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4617 "# Must use main snapshot file to allocate.\n");
4619 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4620 "# (Doesn't have to be '2' works with any number that\n"
4621 "# is not a '0' or '1')\n");
4624 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4626 if (iter->tr->allocated_snapshot)
4627 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4629 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4631 seq_puts(m, "# Snapshot commands:\n");
4632 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4633 show_snapshot_main_help(m);
4635 show_snapshot_percpu_help(m);
4638 /* Should never be called */
4639 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4642 static int s_show(struct seq_file *m, void *v)
4644 struct trace_iterator *iter = v;
4647 if (iter->ent == NULL) {
4649 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4651 test_ftrace_alive(m);
4653 if (iter->snapshot && trace_empty(iter))
4654 print_snapshot_help(m, iter);
4655 else if (iter->trace && iter->trace->print_header)
4656 iter->trace->print_header(m);
4658 trace_default_header(m);
4660 } else if (iter->leftover) {
4662 * If we filled the seq_file buffer earlier, we
4663 * want to just show it now.
4665 ret = trace_print_seq(m, &iter->seq);
4667 /* ret should this time be zero, but you never know */
4668 iter->leftover = ret;
4671 print_trace_line(iter);
4672 ret = trace_print_seq(m, &iter->seq);
4674 * If we overflow the seq_file buffer, then it will
4675 * ask us for this data again at start up.
4677 * ret is 0 if seq_file write succeeded.
4680 iter->leftover = ret;
4687 * Should be used after trace_array_get(), trace_types_lock
4688 * ensures that i_cdev was already initialized.
4690 static inline int tracing_get_cpu(struct inode *inode)
4692 if (inode->i_cdev) /* See trace_create_cpu_file() */
4693 return (long)inode->i_cdev - 1;
4694 return RING_BUFFER_ALL_CPUS;
4697 static const struct seq_operations tracer_seq_ops = {
4704 static struct trace_iterator *
4705 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4707 struct trace_array *tr = inode->i_private;
4708 struct trace_iterator *iter;
4711 if (tracing_disabled)
4712 return ERR_PTR(-ENODEV);
4714 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4716 return ERR_PTR(-ENOMEM);
4718 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4720 if (!iter->buffer_iter)
4724 * trace_find_next_entry() may need to save off iter->ent.
4725 * It will place it into the iter->temp buffer. As most
4726 * events are less than 128, allocate a buffer of that size.
4727 * If one is greater, then trace_find_next_entry() will
4728 * allocate a new buffer to adjust for the bigger iter->ent.
4729 * It's not critical if it fails to get allocated here.
4731 iter->temp = kmalloc(128, GFP_KERNEL);
4733 iter->temp_size = 128;
4736 * trace_event_printf() may need to modify given format
4737 * string to replace %p with %px so that it shows real address
4738 * instead of hash value. However, that is only for the event
4739 * tracing, other tracer may not need. Defer the allocation
4740 * until it is needed.
4746 * We make a copy of the current tracer to avoid concurrent
4747 * changes on it while we are reading.
4749 mutex_lock(&trace_types_lock);
4750 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4754 *iter->trace = *tr->current_trace;
4756 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4761 #ifdef CONFIG_TRACER_MAX_TRACE
4762 /* Currently only the top directory has a snapshot */
4763 if (tr->current_trace->print_max || snapshot)
4764 iter->array_buffer = &tr->max_buffer;
4767 iter->array_buffer = &tr->array_buffer;
4768 iter->snapshot = snapshot;
4770 iter->cpu_file = tracing_get_cpu(inode);
4771 mutex_init(&iter->mutex);
4773 /* Notify the tracer early; before we stop tracing. */
4774 if (iter->trace->open)
4775 iter->trace->open(iter);
4777 /* Annotate start of buffers if we had overruns */
4778 if (ring_buffer_overruns(iter->array_buffer->buffer))
4779 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4781 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4782 if (trace_clocks[tr->clock_id].in_ns)
4783 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4786 * If pause-on-trace is enabled, then stop the trace while
4787 * dumping, unless this is the "snapshot" file
4789 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4790 tracing_stop_tr(tr);
4792 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4793 for_each_tracing_cpu(cpu) {
4794 iter->buffer_iter[cpu] =
4795 ring_buffer_read_prepare(iter->array_buffer->buffer,
4798 ring_buffer_read_prepare_sync();
4799 for_each_tracing_cpu(cpu) {
4800 ring_buffer_read_start(iter->buffer_iter[cpu]);
4801 tracing_iter_reset(iter, cpu);
4804 cpu = iter->cpu_file;
4805 iter->buffer_iter[cpu] =
4806 ring_buffer_read_prepare(iter->array_buffer->buffer,
4808 ring_buffer_read_prepare_sync();
4809 ring_buffer_read_start(iter->buffer_iter[cpu]);
4810 tracing_iter_reset(iter, cpu);
4813 mutex_unlock(&trace_types_lock);
4818 mutex_unlock(&trace_types_lock);
4821 kfree(iter->buffer_iter);
4823 seq_release_private(inode, file);
4824 return ERR_PTR(-ENOMEM);
4827 int tracing_open_generic(struct inode *inode, struct file *filp)
4831 ret = tracing_check_open_get_tr(NULL);
4835 filp->private_data = inode->i_private;
4839 bool tracing_is_disabled(void)
4841 return (tracing_disabled) ? true: false;
4845 * Open and update trace_array ref count.
4846 * Must have the current trace_array passed to it.
4848 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4850 struct trace_array *tr = inode->i_private;
4853 ret = tracing_check_open_get_tr(tr);
4857 filp->private_data = inode->i_private;
4862 static int tracing_mark_open(struct inode *inode, struct file *filp)
4864 stream_open(inode, filp);
4865 return tracing_open_generic_tr(inode, filp);
4868 static int tracing_release(struct inode *inode, struct file *file)
4870 struct trace_array *tr = inode->i_private;
4871 struct seq_file *m = file->private_data;
4872 struct trace_iterator *iter;
4875 if (!(file->f_mode & FMODE_READ)) {
4876 trace_array_put(tr);
4880 /* Writes do not use seq_file */
4882 mutex_lock(&trace_types_lock);
4884 for_each_tracing_cpu(cpu) {
4885 if (iter->buffer_iter[cpu])
4886 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4889 if (iter->trace && iter->trace->close)
4890 iter->trace->close(iter);
4892 if (!iter->snapshot && tr->stop_count)
4893 /* reenable tracing if it was previously enabled */
4894 tracing_start_tr(tr);
4896 __trace_array_put(tr);
4898 mutex_unlock(&trace_types_lock);
4900 mutex_destroy(&iter->mutex);
4901 free_cpumask_var(iter->started);
4905 kfree(iter->buffer_iter);
4906 seq_release_private(inode, file);
4911 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4913 struct trace_array *tr = inode->i_private;
4915 trace_array_put(tr);
4919 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4921 struct trace_array *tr = inode->i_private;
4923 trace_array_put(tr);
4925 return single_release(inode, file);
4928 static int tracing_open(struct inode *inode, struct file *file)
4930 struct trace_array *tr = inode->i_private;
4931 struct trace_iterator *iter;
4934 ret = tracing_check_open_get_tr(tr);
4938 /* If this file was open for write, then erase contents */
4939 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4940 int cpu = tracing_get_cpu(inode);
4941 struct array_buffer *trace_buf = &tr->array_buffer;
4943 #ifdef CONFIG_TRACER_MAX_TRACE
4944 if (tr->current_trace->print_max)
4945 trace_buf = &tr->max_buffer;
4948 if (cpu == RING_BUFFER_ALL_CPUS)
4949 tracing_reset_online_cpus(trace_buf);
4951 tracing_reset_cpu(trace_buf, cpu);
4954 if (file->f_mode & FMODE_READ) {
4955 iter = __tracing_open(inode, file, false);
4957 ret = PTR_ERR(iter);
4958 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4959 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4963 trace_array_put(tr);
4969 * Some tracers are not suitable for instance buffers.
4970 * A tracer is always available for the global array (toplevel)
4971 * or if it explicitly states that it is.
4974 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4976 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4979 /* Find the next tracer that this trace array may use */
4980 static struct tracer *
4981 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4983 while (t && !trace_ok_for_array(t, tr))
4990 t_next(struct seq_file *m, void *v, loff_t *pos)
4992 struct trace_array *tr = m->private;
4993 struct tracer *t = v;
4998 t = get_tracer_for_array(tr, t->next);
5003 static void *t_start(struct seq_file *m, loff_t *pos)
5005 struct trace_array *tr = m->private;
5009 mutex_lock(&trace_types_lock);
5011 t = get_tracer_for_array(tr, trace_types);
5012 for (; t && l < *pos; t = t_next(m, t, &l))
5018 static void t_stop(struct seq_file *m, void *p)
5020 mutex_unlock(&trace_types_lock);
5023 static int t_show(struct seq_file *m, void *v)
5025 struct tracer *t = v;
5030 seq_puts(m, t->name);
5039 static const struct seq_operations show_traces_seq_ops = {
5046 static int show_traces_open(struct inode *inode, struct file *file)
5048 struct trace_array *tr = inode->i_private;
5052 ret = tracing_check_open_get_tr(tr);
5056 ret = seq_open(file, &show_traces_seq_ops);
5058 trace_array_put(tr);
5062 m = file->private_data;
5068 static int show_traces_release(struct inode *inode, struct file *file)
5070 struct trace_array *tr = inode->i_private;
5072 trace_array_put(tr);
5073 return seq_release(inode, file);
5077 tracing_write_stub(struct file *filp, const char __user *ubuf,
5078 size_t count, loff_t *ppos)
5083 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5087 if (file->f_mode & FMODE_READ)
5088 ret = seq_lseek(file, offset, whence);
5090 file->f_pos = ret = 0;
5095 static const struct file_operations tracing_fops = {
5096 .open = tracing_open,
5098 .write = tracing_write_stub,
5099 .llseek = tracing_lseek,
5100 .release = tracing_release,
5103 static const struct file_operations show_traces_fops = {
5104 .open = show_traces_open,
5106 .llseek = seq_lseek,
5107 .release = show_traces_release,
5111 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5112 size_t count, loff_t *ppos)
5114 struct trace_array *tr = file_inode(filp)->i_private;
5118 len = snprintf(NULL, 0, "%*pb\n",
5119 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5120 mask_str = kmalloc(len, GFP_KERNEL);
5124 len = snprintf(mask_str, len, "%*pb\n",
5125 cpumask_pr_args(tr->tracing_cpumask));
5130 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5138 int tracing_set_cpumask(struct trace_array *tr,
5139 cpumask_var_t tracing_cpumask_new)
5146 local_irq_disable();
5147 arch_spin_lock(&tr->max_lock);
5148 for_each_tracing_cpu(cpu) {
5150 * Increase/decrease the disabled counter if we are
5151 * about to flip a bit in the cpumask:
5153 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5154 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5155 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5156 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5158 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5159 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5160 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5161 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5164 arch_spin_unlock(&tr->max_lock);
5167 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5173 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5174 size_t count, loff_t *ppos)
5176 struct trace_array *tr = file_inode(filp)->i_private;
5177 cpumask_var_t tracing_cpumask_new;
5180 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5183 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5187 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5191 free_cpumask_var(tracing_cpumask_new);
5196 free_cpumask_var(tracing_cpumask_new);
5201 static const struct file_operations tracing_cpumask_fops = {
5202 .open = tracing_open_generic_tr,
5203 .read = tracing_cpumask_read,
5204 .write = tracing_cpumask_write,
5205 .release = tracing_release_generic_tr,
5206 .llseek = generic_file_llseek,
5209 static int tracing_trace_options_show(struct seq_file *m, void *v)
5211 struct tracer_opt *trace_opts;
5212 struct trace_array *tr = m->private;
5216 mutex_lock(&trace_types_lock);
5217 tracer_flags = tr->current_trace->flags->val;
5218 trace_opts = tr->current_trace->flags->opts;
5220 for (i = 0; trace_options[i]; i++) {
5221 if (tr->trace_flags & (1 << i))
5222 seq_printf(m, "%s\n", trace_options[i]);
5224 seq_printf(m, "no%s\n", trace_options[i]);
5227 for (i = 0; trace_opts[i].name; i++) {
5228 if (tracer_flags & trace_opts[i].bit)
5229 seq_printf(m, "%s\n", trace_opts[i].name);
5231 seq_printf(m, "no%s\n", trace_opts[i].name);
5233 mutex_unlock(&trace_types_lock);
5238 static int __set_tracer_option(struct trace_array *tr,
5239 struct tracer_flags *tracer_flags,
5240 struct tracer_opt *opts, int neg)
5242 struct tracer *trace = tracer_flags->trace;
5245 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5250 tracer_flags->val &= ~opts->bit;
5252 tracer_flags->val |= opts->bit;
5256 /* Try to assign a tracer specific option */
5257 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5259 struct tracer *trace = tr->current_trace;
5260 struct tracer_flags *tracer_flags = trace->flags;
5261 struct tracer_opt *opts = NULL;
5264 for (i = 0; tracer_flags->opts[i].name; i++) {
5265 opts = &tracer_flags->opts[i];
5267 if (strcmp(cmp, opts->name) == 0)
5268 return __set_tracer_option(tr, trace->flags, opts, neg);
5274 /* Some tracers require overwrite to stay enabled */
5275 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5277 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5283 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5287 if ((mask == TRACE_ITER_RECORD_TGID) ||
5288 (mask == TRACE_ITER_RECORD_CMD))
5289 lockdep_assert_held(&event_mutex);
5291 /* do nothing if flag is already set */
5292 if (!!(tr->trace_flags & mask) == !!enabled)
5295 /* Give the tracer a chance to approve the change */
5296 if (tr->current_trace->flag_changed)
5297 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5301 tr->trace_flags |= mask;
5303 tr->trace_flags &= ~mask;
5305 if (mask == TRACE_ITER_RECORD_CMD)
5306 trace_event_enable_cmd_record(enabled);
5308 if (mask == TRACE_ITER_RECORD_TGID) {
5310 tgid_map_max = pid_max;
5311 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5315 * Pairs with smp_load_acquire() in
5316 * trace_find_tgid_ptr() to ensure that if it observes
5317 * the tgid_map we just allocated then it also observes
5318 * the corresponding tgid_map_max value.
5320 smp_store_release(&tgid_map, map);
5323 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5327 trace_event_enable_tgid_record(enabled);
5330 if (mask == TRACE_ITER_EVENT_FORK)
5331 trace_event_follow_fork(tr, enabled);
5333 if (mask == TRACE_ITER_FUNC_FORK)
5334 ftrace_pid_follow_fork(tr, enabled);
5336 if (mask == TRACE_ITER_OVERWRITE) {
5337 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5338 #ifdef CONFIG_TRACER_MAX_TRACE
5339 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5343 if (mask == TRACE_ITER_PRINTK) {
5344 trace_printk_start_stop_comm(enabled);
5345 trace_printk_control(enabled);
5351 int trace_set_options(struct trace_array *tr, char *option)
5356 size_t orig_len = strlen(option);
5359 cmp = strstrip(option);
5361 len = str_has_prefix(cmp, "no");
5367 mutex_lock(&event_mutex);
5368 mutex_lock(&trace_types_lock);
5370 ret = match_string(trace_options, -1, cmp);
5371 /* If no option could be set, test the specific tracer options */
5373 ret = set_tracer_option(tr, cmp, neg);
5375 ret = set_tracer_flag(tr, 1 << ret, !neg);
5377 mutex_unlock(&trace_types_lock);
5378 mutex_unlock(&event_mutex);
5381 * If the first trailing whitespace is replaced with '\0' by strstrip,
5382 * turn it back into a space.
5384 if (orig_len > strlen(option))
5385 option[strlen(option)] = ' ';
5390 static void __init apply_trace_boot_options(void)
5392 char *buf = trace_boot_options_buf;
5396 option = strsep(&buf, ",");
5402 trace_set_options(&global_trace, option);
5404 /* Put back the comma to allow this to be called again */
5411 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5412 size_t cnt, loff_t *ppos)
5414 struct seq_file *m = filp->private_data;
5415 struct trace_array *tr = m->private;
5419 if (cnt >= sizeof(buf))
5422 if (copy_from_user(buf, ubuf, cnt))
5427 ret = trace_set_options(tr, buf);
5436 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5438 struct trace_array *tr = inode->i_private;
5441 ret = tracing_check_open_get_tr(tr);
5445 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5447 trace_array_put(tr);
5452 static const struct file_operations tracing_iter_fops = {
5453 .open = tracing_trace_options_open,
5455 .llseek = seq_lseek,
5456 .release = tracing_single_release_tr,
5457 .write = tracing_trace_options_write,
5460 static const char readme_msg[] =
5461 "tracing mini-HOWTO:\n\n"
5462 "# echo 0 > tracing_on : quick way to disable tracing\n"
5463 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5464 " Important files:\n"
5465 " trace\t\t\t- The static contents of the buffer\n"
5466 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5467 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5468 " current_tracer\t- function and latency tracers\n"
5469 " available_tracers\t- list of configured tracers for current_tracer\n"
5470 " error_log\t- error log for failed commands (that support it)\n"
5471 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5472 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5473 " trace_clock\t\t- change the clock used to order events\n"
5474 " local: Per cpu clock but may not be synced across CPUs\n"
5475 " global: Synced across CPUs but slows tracing down.\n"
5476 " counter: Not a clock, but just an increment\n"
5477 " uptime: Jiffy counter from time of boot\n"
5478 " perf: Same clock that perf events use\n"
5479 #ifdef CONFIG_X86_64
5480 " x86-tsc: TSC cycle counter\n"
5482 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5483 " delta: Delta difference against a buffer-wide timestamp\n"
5484 " absolute: Absolute (standalone) timestamp\n"
5485 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5486 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5487 " tracing_cpumask\t- Limit which CPUs to trace\n"
5488 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5489 "\t\t\t Remove sub-buffer with rmdir\n"
5490 " trace_options\t\t- Set format or modify how tracing happens\n"
5491 "\t\t\t Disable an option by prefixing 'no' to the\n"
5492 "\t\t\t option name\n"
5493 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5494 #ifdef CONFIG_DYNAMIC_FTRACE
5495 "\n available_filter_functions - list of functions that can be filtered on\n"
5496 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5497 "\t\t\t functions\n"
5498 "\t accepts: func_full_name or glob-matching-pattern\n"
5499 "\t modules: Can select a group via module\n"
5500 "\t Format: :mod:<module-name>\n"
5501 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5502 "\t triggers: a command to perform when function is hit\n"
5503 "\t Format: <function>:<trigger>[:count]\n"
5504 "\t trigger: traceon, traceoff\n"
5505 "\t\t enable_event:<system>:<event>\n"
5506 "\t\t disable_event:<system>:<event>\n"
5507 #ifdef CONFIG_STACKTRACE
5510 #ifdef CONFIG_TRACER_SNAPSHOT
5515 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5516 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5517 "\t The first one will disable tracing every time do_fault is hit\n"
5518 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5519 "\t The first time do trap is hit and it disables tracing, the\n"
5520 "\t counter will decrement to 2. If tracing is already disabled,\n"
5521 "\t the counter will not decrement. It only decrements when the\n"
5522 "\t trigger did work\n"
5523 "\t To remove trigger without count:\n"
5524 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5525 "\t To remove trigger with a count:\n"
5526 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5527 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5528 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5529 "\t modules: Can select a group via module command :mod:\n"
5530 "\t Does not accept triggers\n"
5531 #endif /* CONFIG_DYNAMIC_FTRACE */
5532 #ifdef CONFIG_FUNCTION_TRACER
5533 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5535 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5538 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5539 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5540 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5541 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5543 #ifdef CONFIG_TRACER_SNAPSHOT
5544 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5545 "\t\t\t snapshot buffer. Read the contents for more\n"
5546 "\t\t\t information\n"
5548 #ifdef CONFIG_STACK_TRACER
5549 " stack_trace\t\t- Shows the max stack trace when active\n"
5550 " stack_max_size\t- Shows current max stack size that was traced\n"
5551 "\t\t\t Write into this file to reset the max size (trigger a\n"
5552 "\t\t\t new trace)\n"
5553 #ifdef CONFIG_DYNAMIC_FTRACE
5554 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5557 #endif /* CONFIG_STACK_TRACER */
5558 #ifdef CONFIG_DYNAMIC_EVENTS
5559 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5560 "\t\t\t Write into this file to define/undefine new trace events.\n"
5562 #ifdef CONFIG_KPROBE_EVENTS
5563 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5564 "\t\t\t Write into this file to define/undefine new trace events.\n"
5566 #ifdef CONFIG_UPROBE_EVENTS
5567 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5568 "\t\t\t Write into this file to define/undefine new trace events.\n"
5570 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5571 "\t accepts: event-definitions (one definition per line)\n"
5572 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5573 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5574 #ifdef CONFIG_HIST_TRIGGERS
5575 "\t s:[synthetic/]<event> <field> [<field>]\n"
5577 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]\n"
5578 "\t -:[<group>/][<event>]\n"
5579 #ifdef CONFIG_KPROBE_EVENTS
5580 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5581 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5583 #ifdef CONFIG_UPROBE_EVENTS
5584 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5586 "\t args: <name>=fetcharg[:type]\n"
5587 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5588 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5589 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5591 "\t $stack<index>, $stack, $retval, $comm,\n"
5593 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5594 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5595 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5596 "\t <type>\\[<array-size>\\]\n"
5597 #ifdef CONFIG_HIST_TRIGGERS
5598 "\t field: <stype> <name>;\n"
5599 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5600 "\t [unsigned] char/int/long\n"
5602 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5603 "\t of the <attached-group>/<attached-event>.\n"
5605 " events/\t\t- Directory containing all trace event subsystems:\n"
5606 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5607 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5608 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5610 " filter\t\t- If set, only events passing filter are traced\n"
5611 " events/<system>/<event>/\t- Directory containing control files for\n"
5613 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5614 " filter\t\t- If set, only events passing filter are traced\n"
5615 " trigger\t\t- If set, a command to perform when event is hit\n"
5616 "\t Format: <trigger>[:count][if <filter>]\n"
5617 "\t trigger: traceon, traceoff\n"
5618 "\t enable_event:<system>:<event>\n"
5619 "\t disable_event:<system>:<event>\n"
5620 #ifdef CONFIG_HIST_TRIGGERS
5621 "\t enable_hist:<system>:<event>\n"
5622 "\t disable_hist:<system>:<event>\n"
5624 #ifdef CONFIG_STACKTRACE
5627 #ifdef CONFIG_TRACER_SNAPSHOT
5630 #ifdef CONFIG_HIST_TRIGGERS
5631 "\t\t hist (see below)\n"
5633 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5634 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5635 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5636 "\t events/block/block_unplug/trigger\n"
5637 "\t The first disables tracing every time block_unplug is hit.\n"
5638 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5639 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5640 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5641 "\t Like function triggers, the counter is only decremented if it\n"
5642 "\t enabled or disabled tracing.\n"
5643 "\t To remove a trigger without a count:\n"
5644 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5645 "\t To remove a trigger with a count:\n"
5646 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5647 "\t Filters can be ignored when removing a trigger.\n"
5648 #ifdef CONFIG_HIST_TRIGGERS
5649 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5650 "\t Format: hist:keys=<field1[,field2,...]>\n"
5651 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5652 "\t [:values=<field1[,field2,...]>]\n"
5653 "\t [:sort=<field1[,field2,...]>]\n"
5654 "\t [:size=#entries]\n"
5655 "\t [:pause][:continue][:clear]\n"
5656 "\t [:name=histname1]\n"
5657 "\t [:<handler>.<action>]\n"
5658 "\t [if <filter>]\n\n"
5659 "\t Note, special fields can be used as well:\n"
5660 "\t common_timestamp - to record current timestamp\n"
5661 "\t common_cpu - to record the CPU the event happened on\n"
5663 "\t A hist trigger variable can be:\n"
5664 "\t - a reference to a field e.g. x=current_timestamp,\n"
5665 "\t - a reference to another variable e.g. y=$x,\n"
5666 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5667 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5669 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5670 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5671 "\t variable reference, field or numeric literal.\n"
5673 "\t When a matching event is hit, an entry is added to a hash\n"
5674 "\t table using the key(s) and value(s) named, and the value of a\n"
5675 "\t sum called 'hitcount' is incremented. Keys and values\n"
5676 "\t correspond to fields in the event's format description. Keys\n"
5677 "\t can be any field, or the special string 'stacktrace'.\n"
5678 "\t Compound keys consisting of up to two fields can be specified\n"
5679 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5680 "\t fields. Sort keys consisting of up to two fields can be\n"
5681 "\t specified using the 'sort' keyword. The sort direction can\n"
5682 "\t be modified by appending '.descending' or '.ascending' to a\n"
5683 "\t sort field. The 'size' parameter can be used to specify more\n"
5684 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5685 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5686 "\t its histogram data will be shared with other triggers of the\n"
5687 "\t same name, and trigger hits will update this common data.\n\n"
5688 "\t Reading the 'hist' file for the event will dump the hash\n"
5689 "\t table in its entirety to stdout. If there are multiple hist\n"
5690 "\t triggers attached to an event, there will be a table for each\n"
5691 "\t trigger in the output. The table displayed for a named\n"
5692 "\t trigger will be the same as any other instance having the\n"
5693 "\t same name. The default format used to display a given field\n"
5694 "\t can be modified by appending any of the following modifiers\n"
5695 "\t to the field name, as applicable:\n\n"
5696 "\t .hex display a number as a hex value\n"
5697 "\t .sym display an address as a symbol\n"
5698 "\t .sym-offset display an address as a symbol and offset\n"
5699 "\t .execname display a common_pid as a program name\n"
5700 "\t .syscall display a syscall id as a syscall name\n"
5701 "\t .log2 display log2 value rather than raw number\n"
5702 "\t .buckets=size display values in groups of size rather than raw number\n"
5703 "\t .usecs display a common_timestamp in microseconds\n\n"
5704 "\t The 'pause' parameter can be used to pause an existing hist\n"
5705 "\t trigger or to start a hist trigger but not log any events\n"
5706 "\t until told to do so. 'continue' can be used to start or\n"
5707 "\t restart a paused hist trigger.\n\n"
5708 "\t The 'clear' parameter will clear the contents of a running\n"
5709 "\t hist trigger and leave its current paused/active state\n"
5711 "\t The enable_hist and disable_hist triggers can be used to\n"
5712 "\t have one event conditionally start and stop another event's\n"
5713 "\t already-attached hist trigger. The syntax is analogous to\n"
5714 "\t the enable_event and disable_event triggers.\n\n"
5715 "\t Hist trigger handlers and actions are executed whenever a\n"
5716 "\t a histogram entry is added or updated. They take the form:\n\n"
5717 "\t <handler>.<action>\n\n"
5718 "\t The available handlers are:\n\n"
5719 "\t onmatch(matching.event) - invoke on addition or update\n"
5720 "\t onmax(var) - invoke if var exceeds current max\n"
5721 "\t onchange(var) - invoke action if var changes\n\n"
5722 "\t The available actions are:\n\n"
5723 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5724 "\t save(field,...) - save current event fields\n"
5725 #ifdef CONFIG_TRACER_SNAPSHOT
5726 "\t snapshot() - snapshot the trace buffer\n\n"
5728 #ifdef CONFIG_SYNTH_EVENTS
5729 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5730 "\t Write into this file to define/undefine new synthetic events.\n"
5731 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5737 tracing_readme_read(struct file *filp, char __user *ubuf,
5738 size_t cnt, loff_t *ppos)
5740 return simple_read_from_buffer(ubuf, cnt, ppos,
5741 readme_msg, strlen(readme_msg));
5744 static const struct file_operations tracing_readme_fops = {
5745 .open = tracing_open_generic,
5746 .read = tracing_readme_read,
5747 .llseek = generic_file_llseek,
5750 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5754 return trace_find_tgid_ptr(pid);
5757 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5761 return trace_find_tgid_ptr(pid);
5764 static void saved_tgids_stop(struct seq_file *m, void *v)
5768 static int saved_tgids_show(struct seq_file *m, void *v)
5770 int *entry = (int *)v;
5771 int pid = entry - tgid_map;
5777 seq_printf(m, "%d %d\n", pid, tgid);
5781 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5782 .start = saved_tgids_start,
5783 .stop = saved_tgids_stop,
5784 .next = saved_tgids_next,
5785 .show = saved_tgids_show,
5788 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5792 ret = tracing_check_open_get_tr(NULL);
5796 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5800 static const struct file_operations tracing_saved_tgids_fops = {
5801 .open = tracing_saved_tgids_open,
5803 .llseek = seq_lseek,
5804 .release = seq_release,
5807 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5809 unsigned int *ptr = v;
5811 if (*pos || m->count)
5816 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5818 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5827 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5833 arch_spin_lock(&trace_cmdline_lock);
5835 v = &savedcmd->map_cmdline_to_pid[0];
5837 v = saved_cmdlines_next(m, v, &l);
5845 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5847 arch_spin_unlock(&trace_cmdline_lock);
5851 static int saved_cmdlines_show(struct seq_file *m, void *v)
5853 char buf[TASK_COMM_LEN];
5854 unsigned int *pid = v;
5856 __trace_find_cmdline(*pid, buf);
5857 seq_printf(m, "%d %s\n", *pid, buf);
5861 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5862 .start = saved_cmdlines_start,
5863 .next = saved_cmdlines_next,
5864 .stop = saved_cmdlines_stop,
5865 .show = saved_cmdlines_show,
5868 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5872 ret = tracing_check_open_get_tr(NULL);
5876 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5879 static const struct file_operations tracing_saved_cmdlines_fops = {
5880 .open = tracing_saved_cmdlines_open,
5882 .llseek = seq_lseek,
5883 .release = seq_release,
5887 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5888 size_t cnt, loff_t *ppos)
5893 arch_spin_lock(&trace_cmdline_lock);
5894 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5895 arch_spin_unlock(&trace_cmdline_lock);
5897 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5900 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5902 kfree(s->saved_cmdlines);
5903 kfree(s->map_cmdline_to_pid);
5907 static int tracing_resize_saved_cmdlines(unsigned int val)
5909 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5911 s = kmalloc(sizeof(*s), GFP_KERNEL);
5915 if (allocate_cmdlines_buffer(val, s) < 0) {
5920 arch_spin_lock(&trace_cmdline_lock);
5921 savedcmd_temp = savedcmd;
5923 arch_spin_unlock(&trace_cmdline_lock);
5924 free_saved_cmdlines_buffer(savedcmd_temp);
5930 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5931 size_t cnt, loff_t *ppos)
5936 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5940 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5941 if (!val || val > PID_MAX_DEFAULT)
5944 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5953 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5954 .open = tracing_open_generic,
5955 .read = tracing_saved_cmdlines_size_read,
5956 .write = tracing_saved_cmdlines_size_write,
5959 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5960 static union trace_eval_map_item *
5961 update_eval_map(union trace_eval_map_item *ptr)
5963 if (!ptr->map.eval_string) {
5964 if (ptr->tail.next) {
5965 ptr = ptr->tail.next;
5966 /* Set ptr to the next real item (skip head) */
5974 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5976 union trace_eval_map_item *ptr = v;
5979 * Paranoid! If ptr points to end, we don't want to increment past it.
5980 * This really should never happen.
5983 ptr = update_eval_map(ptr);
5984 if (WARN_ON_ONCE(!ptr))
5988 ptr = update_eval_map(ptr);
5993 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5995 union trace_eval_map_item *v;
5998 mutex_lock(&trace_eval_mutex);
6000 v = trace_eval_maps;
6004 while (v && l < *pos) {
6005 v = eval_map_next(m, v, &l);
6011 static void eval_map_stop(struct seq_file *m, void *v)
6013 mutex_unlock(&trace_eval_mutex);
6016 static int eval_map_show(struct seq_file *m, void *v)
6018 union trace_eval_map_item *ptr = v;
6020 seq_printf(m, "%s %ld (%s)\n",
6021 ptr->map.eval_string, ptr->map.eval_value,
6027 static const struct seq_operations tracing_eval_map_seq_ops = {
6028 .start = eval_map_start,
6029 .next = eval_map_next,
6030 .stop = eval_map_stop,
6031 .show = eval_map_show,
6034 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6038 ret = tracing_check_open_get_tr(NULL);
6042 return seq_open(filp, &tracing_eval_map_seq_ops);
6045 static const struct file_operations tracing_eval_map_fops = {
6046 .open = tracing_eval_map_open,
6048 .llseek = seq_lseek,
6049 .release = seq_release,
6052 static inline union trace_eval_map_item *
6053 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6055 /* Return tail of array given the head */
6056 return ptr + ptr->head.length + 1;
6060 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6063 struct trace_eval_map **stop;
6064 struct trace_eval_map **map;
6065 union trace_eval_map_item *map_array;
6066 union trace_eval_map_item *ptr;
6071 * The trace_eval_maps contains the map plus a head and tail item,
6072 * where the head holds the module and length of array, and the
6073 * tail holds a pointer to the next list.
6075 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6077 pr_warn("Unable to allocate trace eval mapping\n");
6081 mutex_lock(&trace_eval_mutex);
6083 if (!trace_eval_maps)
6084 trace_eval_maps = map_array;
6086 ptr = trace_eval_maps;
6088 ptr = trace_eval_jmp_to_tail(ptr);
6089 if (!ptr->tail.next)
6091 ptr = ptr->tail.next;
6094 ptr->tail.next = map_array;
6096 map_array->head.mod = mod;
6097 map_array->head.length = len;
6100 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6101 map_array->map = **map;
6104 memset(map_array, 0, sizeof(*map_array));
6106 mutex_unlock(&trace_eval_mutex);
6109 static void trace_create_eval_file(struct dentry *d_tracer)
6111 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6112 NULL, &tracing_eval_map_fops);
6115 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6116 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6117 static inline void trace_insert_eval_map_file(struct module *mod,
6118 struct trace_eval_map **start, int len) { }
6119 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6121 static void trace_insert_eval_map(struct module *mod,
6122 struct trace_eval_map **start, int len)
6124 struct trace_eval_map **map;
6131 trace_event_eval_update(map, len);
6133 trace_insert_eval_map_file(mod, start, len);
6137 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6138 size_t cnt, loff_t *ppos)
6140 struct trace_array *tr = filp->private_data;
6141 char buf[MAX_TRACER_SIZE+2];
6144 mutex_lock(&trace_types_lock);
6145 r = sprintf(buf, "%s\n", tr->current_trace->name);
6146 mutex_unlock(&trace_types_lock);
6148 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6151 int tracer_init(struct tracer *t, struct trace_array *tr)
6153 tracing_reset_online_cpus(&tr->array_buffer);
6157 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6161 for_each_tracing_cpu(cpu)
6162 per_cpu_ptr(buf->data, cpu)->entries = val;
6165 #ifdef CONFIG_TRACER_MAX_TRACE
6166 /* resize @tr's buffer to the size of @size_tr's entries */
6167 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6168 struct array_buffer *size_buf, int cpu_id)
6172 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6173 for_each_tracing_cpu(cpu) {
6174 ret = ring_buffer_resize(trace_buf->buffer,
6175 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6178 per_cpu_ptr(trace_buf->data, cpu)->entries =
6179 per_cpu_ptr(size_buf->data, cpu)->entries;
6182 ret = ring_buffer_resize(trace_buf->buffer,
6183 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6185 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6186 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6191 #endif /* CONFIG_TRACER_MAX_TRACE */
6193 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6194 unsigned long size, int cpu)
6199 * If kernel or user changes the size of the ring buffer
6200 * we use the size that was given, and we can forget about
6201 * expanding it later.
6203 ring_buffer_expanded = true;
6205 /* May be called before buffers are initialized */
6206 if (!tr->array_buffer.buffer)
6209 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6213 #ifdef CONFIG_TRACER_MAX_TRACE
6214 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6215 !tr->current_trace->use_max_tr)
6218 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6220 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6221 &tr->array_buffer, cpu);
6224 * AARGH! We are left with different
6225 * size max buffer!!!!
6226 * The max buffer is our "snapshot" buffer.
6227 * When a tracer needs a snapshot (one of the
6228 * latency tracers), it swaps the max buffer
6229 * with the saved snap shot. We succeeded to
6230 * update the size of the main buffer, but failed to
6231 * update the size of the max buffer. But when we tried
6232 * to reset the main buffer to the original size, we
6233 * failed there too. This is very unlikely to
6234 * happen, but if it does, warn and kill all
6238 tracing_disabled = 1;
6243 if (cpu == RING_BUFFER_ALL_CPUS)
6244 set_buffer_entries(&tr->max_buffer, size);
6246 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6249 #endif /* CONFIG_TRACER_MAX_TRACE */
6251 if (cpu == RING_BUFFER_ALL_CPUS)
6252 set_buffer_entries(&tr->array_buffer, size);
6254 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6259 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6260 unsigned long size, int cpu_id)
6264 mutex_lock(&trace_types_lock);
6266 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6267 /* make sure, this cpu is enabled in the mask */
6268 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6274 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6279 mutex_unlock(&trace_types_lock);
6286 * tracing_update_buffers - used by tracing facility to expand ring buffers
6288 * To save on memory when the tracing is never used on a system with it
6289 * configured in. The ring buffers are set to a minimum size. But once
6290 * a user starts to use the tracing facility, then they need to grow
6291 * to their default size.
6293 * This function is to be called when a tracer is about to be used.
6295 int tracing_update_buffers(void)
6299 mutex_lock(&trace_types_lock);
6300 if (!ring_buffer_expanded)
6301 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6302 RING_BUFFER_ALL_CPUS);
6303 mutex_unlock(&trace_types_lock);
6308 struct trace_option_dentry;
6311 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6314 * Used to clear out the tracer before deletion of an instance.
6315 * Must have trace_types_lock held.
6317 static void tracing_set_nop(struct trace_array *tr)
6319 if (tr->current_trace == &nop_trace)
6322 tr->current_trace->enabled--;
6324 if (tr->current_trace->reset)
6325 tr->current_trace->reset(tr);
6327 tr->current_trace = &nop_trace;
6330 static bool tracer_options_updated;
6332 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6334 /* Only enable if the directory has been created already. */
6338 /* Only create trace option files after update_tracer_options finish */
6339 if (!tracer_options_updated)
6342 create_trace_option_files(tr, t);
6345 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6348 #ifdef CONFIG_TRACER_MAX_TRACE
6353 mutex_lock(&trace_types_lock);
6355 if (!ring_buffer_expanded) {
6356 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6357 RING_BUFFER_ALL_CPUS);
6363 for (t = trace_types; t; t = t->next) {
6364 if (strcmp(t->name, buf) == 0)
6371 if (t == tr->current_trace)
6374 #ifdef CONFIG_TRACER_SNAPSHOT
6375 if (t->use_max_tr) {
6376 arch_spin_lock(&tr->max_lock);
6377 if (tr->cond_snapshot)
6379 arch_spin_unlock(&tr->max_lock);
6384 /* Some tracers won't work on kernel command line */
6385 if (system_state < SYSTEM_RUNNING && t->noboot) {
6386 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6391 /* Some tracers are only allowed for the top level buffer */
6392 if (!trace_ok_for_array(t, tr)) {
6397 /* If trace pipe files are being read, we can't change the tracer */
6398 if (tr->trace_ref) {
6403 trace_branch_disable();
6405 tr->current_trace->enabled--;
6407 if (tr->current_trace->reset)
6408 tr->current_trace->reset(tr);
6410 /* Current trace needs to be nop_trace before synchronize_rcu */
6411 tr->current_trace = &nop_trace;
6413 #ifdef CONFIG_TRACER_MAX_TRACE
6414 had_max_tr = tr->allocated_snapshot;
6416 if (had_max_tr && !t->use_max_tr) {
6418 * We need to make sure that the update_max_tr sees that
6419 * current_trace changed to nop_trace to keep it from
6420 * swapping the buffers after we resize it.
6421 * The update_max_tr is called from interrupts disabled
6422 * so a synchronized_sched() is sufficient.
6428 if (t->use_max_tr && !had_max_tr) {
6429 ret = tracing_alloc_snapshot_instance(tr);
6436 ret = tracer_init(t, tr);
6441 tr->current_trace = t;
6442 tr->current_trace->enabled++;
6443 trace_branch_enable(tr);
6445 mutex_unlock(&trace_types_lock);
6451 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6452 size_t cnt, loff_t *ppos)
6454 struct trace_array *tr = filp->private_data;
6455 char buf[MAX_TRACER_SIZE+1];
6462 if (cnt > MAX_TRACER_SIZE)
6463 cnt = MAX_TRACER_SIZE;
6465 if (copy_from_user(buf, ubuf, cnt))
6472 err = tracing_set_tracer(tr, name);
6482 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6483 size_t cnt, loff_t *ppos)
6488 r = snprintf(buf, sizeof(buf), "%ld\n",
6489 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6490 if (r > sizeof(buf))
6492 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6496 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6497 size_t cnt, loff_t *ppos)
6502 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6512 tracing_thresh_read(struct file *filp, char __user *ubuf,
6513 size_t cnt, loff_t *ppos)
6515 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6519 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6520 size_t cnt, loff_t *ppos)
6522 struct trace_array *tr = filp->private_data;
6525 mutex_lock(&trace_types_lock);
6526 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6530 if (tr->current_trace->update_thresh) {
6531 ret = tr->current_trace->update_thresh(tr);
6538 mutex_unlock(&trace_types_lock);
6543 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6546 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6547 size_t cnt, loff_t *ppos)
6549 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6553 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6554 size_t cnt, loff_t *ppos)
6556 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6561 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6563 struct trace_array *tr = inode->i_private;
6564 struct trace_iterator *iter;
6567 ret = tracing_check_open_get_tr(tr);
6571 mutex_lock(&trace_types_lock);
6573 /* create a buffer to store the information to pass to userspace */
6574 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6577 __trace_array_put(tr);
6581 trace_seq_init(&iter->seq);
6582 iter->trace = tr->current_trace;
6584 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6589 /* trace pipe does not show start of buffer */
6590 cpumask_setall(iter->started);
6592 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6593 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6595 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6596 if (trace_clocks[tr->clock_id].in_ns)
6597 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6600 iter->array_buffer = &tr->array_buffer;
6601 iter->cpu_file = tracing_get_cpu(inode);
6602 mutex_init(&iter->mutex);
6603 filp->private_data = iter;
6605 if (iter->trace->pipe_open)
6606 iter->trace->pipe_open(iter);
6608 nonseekable_open(inode, filp);
6612 mutex_unlock(&trace_types_lock);
6617 __trace_array_put(tr);
6618 mutex_unlock(&trace_types_lock);
6622 static int tracing_release_pipe(struct inode *inode, struct file *file)
6624 struct trace_iterator *iter = file->private_data;
6625 struct trace_array *tr = inode->i_private;
6627 mutex_lock(&trace_types_lock);
6631 if (iter->trace->pipe_close)
6632 iter->trace->pipe_close(iter);
6634 mutex_unlock(&trace_types_lock);
6636 free_cpumask_var(iter->started);
6637 mutex_destroy(&iter->mutex);
6640 trace_array_put(tr);
6646 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6648 struct trace_array *tr = iter->tr;
6650 /* Iterators are static, they should be filled or empty */
6651 if (trace_buffer_iter(iter, iter->cpu_file))
6652 return EPOLLIN | EPOLLRDNORM;
6654 if (tr->trace_flags & TRACE_ITER_BLOCK)
6656 * Always select as readable when in blocking mode
6658 return EPOLLIN | EPOLLRDNORM;
6660 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6665 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6667 struct trace_iterator *iter = filp->private_data;
6669 return trace_poll(iter, filp, poll_table);
6672 /* Must be called with iter->mutex held. */
6673 static int tracing_wait_pipe(struct file *filp)
6675 struct trace_iterator *iter = filp->private_data;
6678 while (trace_empty(iter)) {
6680 if ((filp->f_flags & O_NONBLOCK)) {
6685 * We block until we read something and tracing is disabled.
6686 * We still block if tracing is disabled, but we have never
6687 * read anything. This allows a user to cat this file, and
6688 * then enable tracing. But after we have read something,
6689 * we give an EOF when tracing is again disabled.
6691 * iter->pos will be 0 if we haven't read anything.
6693 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6696 mutex_unlock(&iter->mutex);
6698 ret = wait_on_pipe(iter, 0);
6700 mutex_lock(&iter->mutex);
6713 tracing_read_pipe(struct file *filp, char __user *ubuf,
6714 size_t cnt, loff_t *ppos)
6716 struct trace_iterator *iter = filp->private_data;
6720 * Avoid more than one consumer on a single file descriptor
6721 * This is just a matter of traces coherency, the ring buffer itself
6724 mutex_lock(&iter->mutex);
6726 /* return any leftover data */
6727 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6731 trace_seq_init(&iter->seq);
6733 if (iter->trace->read) {
6734 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6740 sret = tracing_wait_pipe(filp);
6744 /* stop when tracing is finished */
6745 if (trace_empty(iter)) {
6750 if (cnt >= PAGE_SIZE)
6751 cnt = PAGE_SIZE - 1;
6753 /* reset all but tr, trace, and overruns */
6754 trace_iterator_reset(iter);
6755 cpumask_clear(iter->started);
6756 trace_seq_init(&iter->seq);
6758 trace_event_read_lock();
6759 trace_access_lock(iter->cpu_file);
6760 while (trace_find_next_entry_inc(iter) != NULL) {
6761 enum print_line_t ret;
6762 int save_len = iter->seq.seq.len;
6764 ret = print_trace_line(iter);
6765 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6766 /* don't print partial lines */
6767 iter->seq.seq.len = save_len;
6770 if (ret != TRACE_TYPE_NO_CONSUME)
6771 trace_consume(iter);
6773 if (trace_seq_used(&iter->seq) >= cnt)
6777 * Setting the full flag means we reached the trace_seq buffer
6778 * size and we should leave by partial output condition above.
6779 * One of the trace_seq_* functions is not used properly.
6781 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6784 trace_access_unlock(iter->cpu_file);
6785 trace_event_read_unlock();
6787 /* Now copy what we have to the user */
6788 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6789 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6790 trace_seq_init(&iter->seq);
6793 * If there was nothing to send to user, in spite of consuming trace
6794 * entries, go back to wait for more entries.
6800 mutex_unlock(&iter->mutex);
6805 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6808 __free_page(spd->pages[idx]);
6812 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6818 /* Seq buffer is page-sized, exactly what we need. */
6820 save_len = iter->seq.seq.len;
6821 ret = print_trace_line(iter);
6823 if (trace_seq_has_overflowed(&iter->seq)) {
6824 iter->seq.seq.len = save_len;
6829 * This should not be hit, because it should only
6830 * be set if the iter->seq overflowed. But check it
6831 * anyway to be safe.
6833 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6834 iter->seq.seq.len = save_len;
6838 count = trace_seq_used(&iter->seq) - save_len;
6841 iter->seq.seq.len = save_len;
6845 if (ret != TRACE_TYPE_NO_CONSUME)
6846 trace_consume(iter);
6848 if (!trace_find_next_entry_inc(iter)) {
6858 static ssize_t tracing_splice_read_pipe(struct file *filp,
6860 struct pipe_inode_info *pipe,
6864 struct page *pages_def[PIPE_DEF_BUFFERS];
6865 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6866 struct trace_iterator *iter = filp->private_data;
6867 struct splice_pipe_desc spd = {
6869 .partial = partial_def,
6870 .nr_pages = 0, /* This gets updated below. */
6871 .nr_pages_max = PIPE_DEF_BUFFERS,
6872 .ops = &default_pipe_buf_ops,
6873 .spd_release = tracing_spd_release_pipe,
6879 if (splice_grow_spd(pipe, &spd))
6882 mutex_lock(&iter->mutex);
6884 if (iter->trace->splice_read) {
6885 ret = iter->trace->splice_read(iter, filp,
6886 ppos, pipe, len, flags);
6891 ret = tracing_wait_pipe(filp);
6895 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6900 trace_event_read_lock();
6901 trace_access_lock(iter->cpu_file);
6903 /* Fill as many pages as possible. */
6904 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6905 spd.pages[i] = alloc_page(GFP_KERNEL);
6909 rem = tracing_fill_pipe_page(rem, iter);
6911 /* Copy the data into the page, so we can start over. */
6912 ret = trace_seq_to_buffer(&iter->seq,
6913 page_address(spd.pages[i]),
6914 trace_seq_used(&iter->seq));
6916 __free_page(spd.pages[i]);
6919 spd.partial[i].offset = 0;
6920 spd.partial[i].len = trace_seq_used(&iter->seq);
6922 trace_seq_init(&iter->seq);
6925 trace_access_unlock(iter->cpu_file);
6926 trace_event_read_unlock();
6927 mutex_unlock(&iter->mutex);
6932 ret = splice_to_pipe(pipe, &spd);
6936 splice_shrink_spd(&spd);
6940 mutex_unlock(&iter->mutex);
6945 tracing_entries_read(struct file *filp, char __user *ubuf,
6946 size_t cnt, loff_t *ppos)
6948 struct inode *inode = file_inode(filp);
6949 struct trace_array *tr = inode->i_private;
6950 int cpu = tracing_get_cpu(inode);
6955 mutex_lock(&trace_types_lock);
6957 if (cpu == RING_BUFFER_ALL_CPUS) {
6958 int cpu, buf_size_same;
6963 /* check if all cpu sizes are same */
6964 for_each_tracing_cpu(cpu) {
6965 /* fill in the size from first enabled cpu */
6967 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6968 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6974 if (buf_size_same) {
6975 if (!ring_buffer_expanded)
6976 r = sprintf(buf, "%lu (expanded: %lu)\n",
6978 trace_buf_size >> 10);
6980 r = sprintf(buf, "%lu\n", size >> 10);
6982 r = sprintf(buf, "X\n");
6984 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6986 mutex_unlock(&trace_types_lock);
6988 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6993 tracing_entries_write(struct file *filp, const char __user *ubuf,
6994 size_t cnt, loff_t *ppos)
6996 struct inode *inode = file_inode(filp);
6997 struct trace_array *tr = inode->i_private;
7001 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7005 /* must have at least 1 entry */
7009 /* value is in KB */
7011 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7021 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7022 size_t cnt, loff_t *ppos)
7024 struct trace_array *tr = filp->private_data;
7027 unsigned long size = 0, expanded_size = 0;
7029 mutex_lock(&trace_types_lock);
7030 for_each_tracing_cpu(cpu) {
7031 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7032 if (!ring_buffer_expanded)
7033 expanded_size += trace_buf_size >> 10;
7035 if (ring_buffer_expanded)
7036 r = sprintf(buf, "%lu\n", size);
7038 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7039 mutex_unlock(&trace_types_lock);
7041 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7045 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7046 size_t cnt, loff_t *ppos)
7049 * There is no need to read what the user has written, this function
7050 * is just to make sure that there is no error when "echo" is used
7059 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7061 struct trace_array *tr = inode->i_private;
7063 /* disable tracing ? */
7064 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7065 tracer_tracing_off(tr);
7066 /* resize the ring buffer to 0 */
7067 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7069 trace_array_put(tr);
7075 tracing_mark_write(struct file *filp, const char __user *ubuf,
7076 size_t cnt, loff_t *fpos)
7078 struct trace_array *tr = filp->private_data;
7079 struct ring_buffer_event *event;
7080 enum event_trigger_type tt = ETT_NONE;
7081 struct trace_buffer *buffer;
7082 struct print_entry *entry;
7087 /* Used in tracing_mark_raw_write() as well */
7088 #define FAULTED_STR "<faulted>"
7089 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7091 if (tracing_disabled)
7094 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7097 if (cnt > TRACE_BUF_SIZE)
7098 cnt = TRACE_BUF_SIZE;
7100 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7102 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7104 /* If less than "<faulted>", then make sure we can still add that */
7105 if (cnt < FAULTED_SIZE)
7106 size += FAULTED_SIZE - cnt;
7108 buffer = tr->array_buffer.buffer;
7109 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7111 if (unlikely(!event))
7112 /* Ring buffer disabled, return as if not open for write */
7115 entry = ring_buffer_event_data(event);
7116 entry->ip = _THIS_IP_;
7118 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7120 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7126 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7127 /* do not add \n before testing triggers, but add \0 */
7128 entry->buf[cnt] = '\0';
7129 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7132 if (entry->buf[cnt - 1] != '\n') {
7133 entry->buf[cnt] = '\n';
7134 entry->buf[cnt + 1] = '\0';
7136 entry->buf[cnt] = '\0';
7138 if (static_branch_unlikely(&trace_marker_exports_enabled))
7139 ftrace_exports(event, TRACE_EXPORT_MARKER);
7140 __buffer_unlock_commit(buffer, event);
7143 event_triggers_post_call(tr->trace_marker_file, tt);
7148 /* Limit it for now to 3K (including tag) */
7149 #define RAW_DATA_MAX_SIZE (1024*3)
7152 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7153 size_t cnt, loff_t *fpos)
7155 struct trace_array *tr = filp->private_data;
7156 struct ring_buffer_event *event;
7157 struct trace_buffer *buffer;
7158 struct raw_data_entry *entry;
7163 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7165 if (tracing_disabled)
7168 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7171 /* The marker must at least have a tag id */
7172 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7175 if (cnt > TRACE_BUF_SIZE)
7176 cnt = TRACE_BUF_SIZE;
7178 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7180 size = sizeof(*entry) + cnt;
7181 if (cnt < FAULT_SIZE_ID)
7182 size += FAULT_SIZE_ID - cnt;
7184 buffer = tr->array_buffer.buffer;
7185 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7188 /* Ring buffer disabled, return as if not open for write */
7191 entry = ring_buffer_event_data(event);
7193 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7196 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7201 __buffer_unlock_commit(buffer, event);
7206 static int tracing_clock_show(struct seq_file *m, void *v)
7208 struct trace_array *tr = m->private;
7211 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7213 "%s%s%s%s", i ? " " : "",
7214 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7215 i == tr->clock_id ? "]" : "");
7221 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7225 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7226 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7229 if (i == ARRAY_SIZE(trace_clocks))
7232 mutex_lock(&trace_types_lock);
7236 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7239 * New clock may not be consistent with the previous clock.
7240 * Reset the buffer so that it doesn't have incomparable timestamps.
7242 tracing_reset_online_cpus(&tr->array_buffer);
7244 #ifdef CONFIG_TRACER_MAX_TRACE
7245 if (tr->max_buffer.buffer)
7246 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7247 tracing_reset_online_cpus(&tr->max_buffer);
7250 mutex_unlock(&trace_types_lock);
7255 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7256 size_t cnt, loff_t *fpos)
7258 struct seq_file *m = filp->private_data;
7259 struct trace_array *tr = m->private;
7261 const char *clockstr;
7264 if (cnt >= sizeof(buf))
7267 if (copy_from_user(buf, ubuf, cnt))
7272 clockstr = strstrip(buf);
7274 ret = tracing_set_clock(tr, clockstr);
7283 static int tracing_clock_open(struct inode *inode, struct file *file)
7285 struct trace_array *tr = inode->i_private;
7288 ret = tracing_check_open_get_tr(tr);
7292 ret = single_open(file, tracing_clock_show, inode->i_private);
7294 trace_array_put(tr);
7299 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7301 struct trace_array *tr = m->private;
7303 mutex_lock(&trace_types_lock);
7305 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7306 seq_puts(m, "delta [absolute]\n");
7308 seq_puts(m, "[delta] absolute\n");
7310 mutex_unlock(&trace_types_lock);
7315 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7317 struct trace_array *tr = inode->i_private;
7320 ret = tracing_check_open_get_tr(tr);
7324 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7326 trace_array_put(tr);
7331 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7333 if (rbe == this_cpu_read(trace_buffered_event))
7334 return ring_buffer_time_stamp(buffer);
7336 return ring_buffer_event_time_stamp(buffer, rbe);
7340 * Set or disable using the per CPU trace_buffer_event when possible.
7342 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7346 mutex_lock(&trace_types_lock);
7348 if (set && tr->no_filter_buffering_ref++)
7352 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7357 --tr->no_filter_buffering_ref;
7360 mutex_unlock(&trace_types_lock);
7365 struct ftrace_buffer_info {
7366 struct trace_iterator iter;
7368 unsigned int spare_cpu;
7372 #ifdef CONFIG_TRACER_SNAPSHOT
7373 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7375 struct trace_array *tr = inode->i_private;
7376 struct trace_iterator *iter;
7380 ret = tracing_check_open_get_tr(tr);
7384 if (file->f_mode & FMODE_READ) {
7385 iter = __tracing_open(inode, file, true);
7387 ret = PTR_ERR(iter);
7389 /* Writes still need the seq_file to hold the private data */
7391 m = kzalloc(sizeof(*m), GFP_KERNEL);
7394 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7402 iter->array_buffer = &tr->max_buffer;
7403 iter->cpu_file = tracing_get_cpu(inode);
7405 file->private_data = m;
7409 trace_array_put(tr);
7415 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7418 struct seq_file *m = filp->private_data;
7419 struct trace_iterator *iter = m->private;
7420 struct trace_array *tr = iter->tr;
7424 ret = tracing_update_buffers();
7428 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7432 mutex_lock(&trace_types_lock);
7434 if (tr->current_trace->use_max_tr) {
7439 arch_spin_lock(&tr->max_lock);
7440 if (tr->cond_snapshot)
7442 arch_spin_unlock(&tr->max_lock);
7448 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7452 if (tr->allocated_snapshot)
7456 /* Only allow per-cpu swap if the ring buffer supports it */
7457 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7458 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7463 if (tr->allocated_snapshot)
7464 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7465 &tr->array_buffer, iter->cpu_file);
7467 ret = tracing_alloc_snapshot_instance(tr);
7470 local_irq_disable();
7471 /* Now, we're going to swap */
7472 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7473 update_max_tr(tr, current, smp_processor_id(), NULL);
7475 update_max_tr_single(tr, current, iter->cpu_file);
7479 if (tr->allocated_snapshot) {
7480 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7481 tracing_reset_online_cpus(&tr->max_buffer);
7483 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7493 mutex_unlock(&trace_types_lock);
7497 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7499 struct seq_file *m = file->private_data;
7502 ret = tracing_release(inode, file);
7504 if (file->f_mode & FMODE_READ)
7507 /* If write only, the seq_file is just a stub */
7515 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7516 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7517 size_t count, loff_t *ppos);
7518 static int tracing_buffers_release(struct inode *inode, struct file *file);
7519 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7520 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7522 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7524 struct ftrace_buffer_info *info;
7527 /* The following checks for tracefs lockdown */
7528 ret = tracing_buffers_open(inode, filp);
7532 info = filp->private_data;
7534 if (info->iter.trace->use_max_tr) {
7535 tracing_buffers_release(inode, filp);
7539 info->iter.snapshot = true;
7540 info->iter.array_buffer = &info->iter.tr->max_buffer;
7545 #endif /* CONFIG_TRACER_SNAPSHOT */
7548 static const struct file_operations tracing_thresh_fops = {
7549 .open = tracing_open_generic,
7550 .read = tracing_thresh_read,
7551 .write = tracing_thresh_write,
7552 .llseek = generic_file_llseek,
7555 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7556 static const struct file_operations tracing_max_lat_fops = {
7557 .open = tracing_open_generic,
7558 .read = tracing_max_lat_read,
7559 .write = tracing_max_lat_write,
7560 .llseek = generic_file_llseek,
7564 static const struct file_operations set_tracer_fops = {
7565 .open = tracing_open_generic,
7566 .read = tracing_set_trace_read,
7567 .write = tracing_set_trace_write,
7568 .llseek = generic_file_llseek,
7571 static const struct file_operations tracing_pipe_fops = {
7572 .open = tracing_open_pipe,
7573 .poll = tracing_poll_pipe,
7574 .read = tracing_read_pipe,
7575 .splice_read = tracing_splice_read_pipe,
7576 .release = tracing_release_pipe,
7577 .llseek = no_llseek,
7580 static const struct file_operations tracing_entries_fops = {
7581 .open = tracing_open_generic_tr,
7582 .read = tracing_entries_read,
7583 .write = tracing_entries_write,
7584 .llseek = generic_file_llseek,
7585 .release = tracing_release_generic_tr,
7588 static const struct file_operations tracing_total_entries_fops = {
7589 .open = tracing_open_generic_tr,
7590 .read = tracing_total_entries_read,
7591 .llseek = generic_file_llseek,
7592 .release = tracing_release_generic_tr,
7595 static const struct file_operations tracing_free_buffer_fops = {
7596 .open = tracing_open_generic_tr,
7597 .write = tracing_free_buffer_write,
7598 .release = tracing_free_buffer_release,
7601 static const struct file_operations tracing_mark_fops = {
7602 .open = tracing_mark_open,
7603 .write = tracing_mark_write,
7604 .release = tracing_release_generic_tr,
7607 static const struct file_operations tracing_mark_raw_fops = {
7608 .open = tracing_mark_open,
7609 .write = tracing_mark_raw_write,
7610 .release = tracing_release_generic_tr,
7613 static const struct file_operations trace_clock_fops = {
7614 .open = tracing_clock_open,
7616 .llseek = seq_lseek,
7617 .release = tracing_single_release_tr,
7618 .write = tracing_clock_write,
7621 static const struct file_operations trace_time_stamp_mode_fops = {
7622 .open = tracing_time_stamp_mode_open,
7624 .llseek = seq_lseek,
7625 .release = tracing_single_release_tr,
7628 #ifdef CONFIG_TRACER_SNAPSHOT
7629 static const struct file_operations snapshot_fops = {
7630 .open = tracing_snapshot_open,
7632 .write = tracing_snapshot_write,
7633 .llseek = tracing_lseek,
7634 .release = tracing_snapshot_release,
7637 static const struct file_operations snapshot_raw_fops = {
7638 .open = snapshot_raw_open,
7639 .read = tracing_buffers_read,
7640 .release = tracing_buffers_release,
7641 .splice_read = tracing_buffers_splice_read,
7642 .llseek = no_llseek,
7645 #endif /* CONFIG_TRACER_SNAPSHOT */
7648 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7649 * @filp: The active open file structure
7650 * @ubuf: The userspace provided buffer to read value into
7651 * @cnt: The maximum number of bytes to read
7652 * @ppos: The current "file" position
7654 * This function implements the write interface for a struct trace_min_max_param.
7655 * The filp->private_data must point to a trace_min_max_param structure that
7656 * defines where to write the value, the min and the max acceptable values,
7657 * and a lock to protect the write.
7660 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7662 struct trace_min_max_param *param = filp->private_data;
7669 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7674 mutex_lock(param->lock);
7676 if (param->min && val < *param->min)
7679 if (param->max && val > *param->max)
7686 mutex_unlock(param->lock);
7695 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7696 * @filp: The active open file structure
7697 * @ubuf: The userspace provided buffer to read value into
7698 * @cnt: The maximum number of bytes to read
7699 * @ppos: The current "file" position
7701 * This function implements the read interface for a struct trace_min_max_param.
7702 * The filp->private_data must point to a trace_min_max_param struct with valid
7706 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7708 struct trace_min_max_param *param = filp->private_data;
7709 char buf[U64_STR_SIZE];
7718 if (cnt > sizeof(buf))
7721 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7723 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7726 const struct file_operations trace_min_max_fops = {
7727 .open = tracing_open_generic,
7728 .read = trace_min_max_read,
7729 .write = trace_min_max_write,
7732 #define TRACING_LOG_ERRS_MAX 8
7733 #define TRACING_LOG_LOC_MAX 128
7735 #define CMD_PREFIX " Command: "
7738 const char **errs; /* ptr to loc-specific array of err strings */
7739 u8 type; /* index into errs -> specific err string */
7740 u16 pos; /* caret position */
7744 struct tracing_log_err {
7745 struct list_head list;
7746 struct err_info info;
7747 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7748 char *cmd; /* what caused err */
7751 static DEFINE_MUTEX(tracing_err_log_lock);
7753 static struct tracing_log_err *alloc_tracing_log_err(int len)
7755 struct tracing_log_err *err;
7757 err = kzalloc(sizeof(*err), GFP_KERNEL);
7759 return ERR_PTR(-ENOMEM);
7761 err->cmd = kzalloc(len, GFP_KERNEL);
7764 return ERR_PTR(-ENOMEM);
7770 static void free_tracing_log_err(struct tracing_log_err *err)
7776 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7779 struct tracing_log_err *err;
7781 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7782 err = alloc_tracing_log_err(len);
7783 if (PTR_ERR(err) != -ENOMEM)
7784 tr->n_err_log_entries++;
7789 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7791 err->cmd = kzalloc(len, GFP_KERNEL);
7793 return ERR_PTR(-ENOMEM);
7794 list_del(&err->list);
7800 * err_pos - find the position of a string within a command for error careting
7801 * @cmd: The tracing command that caused the error
7802 * @str: The string to position the caret at within @cmd
7804 * Finds the position of the first occurrence of @str within @cmd. The
7805 * return value can be passed to tracing_log_err() for caret placement
7808 * Returns the index within @cmd of the first occurrence of @str or 0
7809 * if @str was not found.
7811 unsigned int err_pos(char *cmd, const char *str)
7815 if (WARN_ON(!strlen(cmd)))
7818 found = strstr(cmd, str);
7826 * tracing_log_err - write an error to the tracing error log
7827 * @tr: The associated trace array for the error (NULL for top level array)
7828 * @loc: A string describing where the error occurred
7829 * @cmd: The tracing command that caused the error
7830 * @errs: The array of loc-specific static error strings
7831 * @type: The index into errs[], which produces the specific static err string
7832 * @pos: The position the caret should be placed in the cmd
7834 * Writes an error into tracing/error_log of the form:
7836 * <loc>: error: <text>
7840 * tracing/error_log is a small log file containing the last
7841 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7842 * unless there has been a tracing error, and the error log can be
7843 * cleared and have its memory freed by writing the empty string in
7844 * truncation mode to it i.e. echo > tracing/error_log.
7846 * NOTE: the @errs array along with the @type param are used to
7847 * produce a static error string - this string is not copied and saved
7848 * when the error is logged - only a pointer to it is saved. See
7849 * existing callers for examples of how static strings are typically
7850 * defined for use with tracing_log_err().
7852 void tracing_log_err(struct trace_array *tr,
7853 const char *loc, const char *cmd,
7854 const char **errs, u8 type, u16 pos)
7856 struct tracing_log_err *err;
7862 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
7864 mutex_lock(&tracing_err_log_lock);
7865 err = get_tracing_log_err(tr, len);
7866 if (PTR_ERR(err) == -ENOMEM) {
7867 mutex_unlock(&tracing_err_log_lock);
7871 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7872 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7874 err->info.errs = errs;
7875 err->info.type = type;
7876 err->info.pos = pos;
7877 err->info.ts = local_clock();
7879 list_add_tail(&err->list, &tr->err_log);
7880 mutex_unlock(&tracing_err_log_lock);
7883 static void clear_tracing_err_log(struct trace_array *tr)
7885 struct tracing_log_err *err, *next;
7887 mutex_lock(&tracing_err_log_lock);
7888 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7889 list_del(&err->list);
7890 free_tracing_log_err(err);
7893 tr->n_err_log_entries = 0;
7894 mutex_unlock(&tracing_err_log_lock);
7897 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7899 struct trace_array *tr = m->private;
7901 mutex_lock(&tracing_err_log_lock);
7903 return seq_list_start(&tr->err_log, *pos);
7906 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7908 struct trace_array *tr = m->private;
7910 return seq_list_next(v, &tr->err_log, pos);
7913 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7915 mutex_unlock(&tracing_err_log_lock);
7918 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
7922 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7924 for (i = 0; i < pos; i++)
7929 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7931 struct tracing_log_err *err = v;
7934 const char *err_text = err->info.errs[err->info.type];
7935 u64 sec = err->info.ts;
7938 nsec = do_div(sec, NSEC_PER_SEC);
7939 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7940 err->loc, err_text);
7941 seq_printf(m, "%s", err->cmd);
7942 tracing_err_log_show_pos(m, err->info.pos);
7948 static const struct seq_operations tracing_err_log_seq_ops = {
7949 .start = tracing_err_log_seq_start,
7950 .next = tracing_err_log_seq_next,
7951 .stop = tracing_err_log_seq_stop,
7952 .show = tracing_err_log_seq_show
7955 static int tracing_err_log_open(struct inode *inode, struct file *file)
7957 struct trace_array *tr = inode->i_private;
7960 ret = tracing_check_open_get_tr(tr);
7964 /* If this file was opened for write, then erase contents */
7965 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7966 clear_tracing_err_log(tr);
7968 if (file->f_mode & FMODE_READ) {
7969 ret = seq_open(file, &tracing_err_log_seq_ops);
7971 struct seq_file *m = file->private_data;
7974 trace_array_put(tr);
7980 static ssize_t tracing_err_log_write(struct file *file,
7981 const char __user *buffer,
7982 size_t count, loff_t *ppos)
7987 static int tracing_err_log_release(struct inode *inode, struct file *file)
7989 struct trace_array *tr = inode->i_private;
7991 trace_array_put(tr);
7993 if (file->f_mode & FMODE_READ)
7994 seq_release(inode, file);
7999 static const struct file_operations tracing_err_log_fops = {
8000 .open = tracing_err_log_open,
8001 .write = tracing_err_log_write,
8003 .llseek = seq_lseek,
8004 .release = tracing_err_log_release,
8007 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8009 struct trace_array *tr = inode->i_private;
8010 struct ftrace_buffer_info *info;
8013 ret = tracing_check_open_get_tr(tr);
8017 info = kvzalloc(sizeof(*info), GFP_KERNEL);
8019 trace_array_put(tr);
8023 mutex_lock(&trace_types_lock);
8026 info->iter.cpu_file = tracing_get_cpu(inode);
8027 info->iter.trace = tr->current_trace;
8028 info->iter.array_buffer = &tr->array_buffer;
8030 /* Force reading ring buffer for first read */
8031 info->read = (unsigned int)-1;
8033 filp->private_data = info;
8037 mutex_unlock(&trace_types_lock);
8039 ret = nonseekable_open(inode, filp);
8041 trace_array_put(tr);
8047 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8049 struct ftrace_buffer_info *info = filp->private_data;
8050 struct trace_iterator *iter = &info->iter;
8052 return trace_poll(iter, filp, poll_table);
8056 tracing_buffers_read(struct file *filp, char __user *ubuf,
8057 size_t count, loff_t *ppos)
8059 struct ftrace_buffer_info *info = filp->private_data;
8060 struct trace_iterator *iter = &info->iter;
8067 #ifdef CONFIG_TRACER_MAX_TRACE
8068 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8073 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8075 if (IS_ERR(info->spare)) {
8076 ret = PTR_ERR(info->spare);
8079 info->spare_cpu = iter->cpu_file;
8085 /* Do we have previous read data to read? */
8086 if (info->read < PAGE_SIZE)
8090 trace_access_lock(iter->cpu_file);
8091 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8095 trace_access_unlock(iter->cpu_file);
8098 if (trace_empty(iter)) {
8099 if ((filp->f_flags & O_NONBLOCK))
8102 ret = wait_on_pipe(iter, 0);
8113 size = PAGE_SIZE - info->read;
8117 ret = copy_to_user(ubuf, info->spare + info->read, size);
8129 static int tracing_buffers_release(struct inode *inode, struct file *file)
8131 struct ftrace_buffer_info *info = file->private_data;
8132 struct trace_iterator *iter = &info->iter;
8134 mutex_lock(&trace_types_lock);
8136 iter->tr->trace_ref--;
8138 __trace_array_put(iter->tr);
8141 ring_buffer_free_read_page(iter->array_buffer->buffer,
8142 info->spare_cpu, info->spare);
8145 mutex_unlock(&trace_types_lock);
8151 struct trace_buffer *buffer;
8154 refcount_t refcount;
8157 static void buffer_ref_release(struct buffer_ref *ref)
8159 if (!refcount_dec_and_test(&ref->refcount))
8161 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8165 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8166 struct pipe_buffer *buf)
8168 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8170 buffer_ref_release(ref);
8174 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8175 struct pipe_buffer *buf)
8177 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8179 if (refcount_read(&ref->refcount) > INT_MAX/2)
8182 refcount_inc(&ref->refcount);
8186 /* Pipe buffer operations for a buffer. */
8187 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8188 .release = buffer_pipe_buf_release,
8189 .get = buffer_pipe_buf_get,
8193 * Callback from splice_to_pipe(), if we need to release some pages
8194 * at the end of the spd in case we error'ed out in filling the pipe.
8196 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8198 struct buffer_ref *ref =
8199 (struct buffer_ref *)spd->partial[i].private;
8201 buffer_ref_release(ref);
8202 spd->partial[i].private = 0;
8206 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8207 struct pipe_inode_info *pipe, size_t len,
8210 struct ftrace_buffer_info *info = file->private_data;
8211 struct trace_iterator *iter = &info->iter;
8212 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8213 struct page *pages_def[PIPE_DEF_BUFFERS];
8214 struct splice_pipe_desc spd = {
8216 .partial = partial_def,
8217 .nr_pages_max = PIPE_DEF_BUFFERS,
8218 .ops = &buffer_pipe_buf_ops,
8219 .spd_release = buffer_spd_release,
8221 struct buffer_ref *ref;
8225 #ifdef CONFIG_TRACER_MAX_TRACE
8226 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8230 if (*ppos & (PAGE_SIZE - 1))
8233 if (len & (PAGE_SIZE - 1)) {
8234 if (len < PAGE_SIZE)
8239 if (splice_grow_spd(pipe, &spd))
8243 trace_access_lock(iter->cpu_file);
8244 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8246 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8250 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8256 refcount_set(&ref->refcount, 1);
8257 ref->buffer = iter->array_buffer->buffer;
8258 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8259 if (IS_ERR(ref->page)) {
8260 ret = PTR_ERR(ref->page);
8265 ref->cpu = iter->cpu_file;
8267 r = ring_buffer_read_page(ref->buffer, &ref->page,
8268 len, iter->cpu_file, 1);
8270 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8276 page = virt_to_page(ref->page);
8278 spd.pages[i] = page;
8279 spd.partial[i].len = PAGE_SIZE;
8280 spd.partial[i].offset = 0;
8281 spd.partial[i].private = (unsigned long)ref;
8285 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8288 trace_access_unlock(iter->cpu_file);
8291 /* did we read anything? */
8292 if (!spd.nr_pages) {
8297 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8300 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8307 ret = splice_to_pipe(pipe, &spd);
8309 splice_shrink_spd(&spd);
8314 static const struct file_operations tracing_buffers_fops = {
8315 .open = tracing_buffers_open,
8316 .read = tracing_buffers_read,
8317 .poll = tracing_buffers_poll,
8318 .release = tracing_buffers_release,
8319 .splice_read = tracing_buffers_splice_read,
8320 .llseek = no_llseek,
8324 tracing_stats_read(struct file *filp, char __user *ubuf,
8325 size_t count, loff_t *ppos)
8327 struct inode *inode = file_inode(filp);
8328 struct trace_array *tr = inode->i_private;
8329 struct array_buffer *trace_buf = &tr->array_buffer;
8330 int cpu = tracing_get_cpu(inode);
8331 struct trace_seq *s;
8333 unsigned long long t;
8334 unsigned long usec_rem;
8336 s = kmalloc(sizeof(*s), GFP_KERNEL);
8342 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8343 trace_seq_printf(s, "entries: %ld\n", cnt);
8345 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8346 trace_seq_printf(s, "overrun: %ld\n", cnt);
8348 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8349 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8351 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8352 trace_seq_printf(s, "bytes: %ld\n", cnt);
8354 if (trace_clocks[tr->clock_id].in_ns) {
8355 /* local or global for trace_clock */
8356 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8357 usec_rem = do_div(t, USEC_PER_SEC);
8358 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8361 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8362 usec_rem = do_div(t, USEC_PER_SEC);
8363 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8365 /* counter or tsc mode for trace_clock */
8366 trace_seq_printf(s, "oldest event ts: %llu\n",
8367 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8369 trace_seq_printf(s, "now ts: %llu\n",
8370 ring_buffer_time_stamp(trace_buf->buffer));
8373 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8374 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8376 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8377 trace_seq_printf(s, "read events: %ld\n", cnt);
8379 count = simple_read_from_buffer(ubuf, count, ppos,
8380 s->buffer, trace_seq_used(s));
8387 static const struct file_operations tracing_stats_fops = {
8388 .open = tracing_open_generic_tr,
8389 .read = tracing_stats_read,
8390 .llseek = generic_file_llseek,
8391 .release = tracing_release_generic_tr,
8394 #ifdef CONFIG_DYNAMIC_FTRACE
8397 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8398 size_t cnt, loff_t *ppos)
8404 /* 256 should be plenty to hold the amount needed */
8405 buf = kmalloc(256, GFP_KERNEL);
8409 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8410 ftrace_update_tot_cnt,
8411 ftrace_number_of_pages,
8412 ftrace_number_of_groups);
8414 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8419 static const struct file_operations tracing_dyn_info_fops = {
8420 .open = tracing_open_generic,
8421 .read = tracing_read_dyn_info,
8422 .llseek = generic_file_llseek,
8424 #endif /* CONFIG_DYNAMIC_FTRACE */
8426 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8428 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8429 struct trace_array *tr, struct ftrace_probe_ops *ops,
8432 tracing_snapshot_instance(tr);
8436 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8437 struct trace_array *tr, struct ftrace_probe_ops *ops,
8440 struct ftrace_func_mapper *mapper = data;
8444 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8454 tracing_snapshot_instance(tr);
8458 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8459 struct ftrace_probe_ops *ops, void *data)
8461 struct ftrace_func_mapper *mapper = data;
8464 seq_printf(m, "%ps:", (void *)ip);
8466 seq_puts(m, "snapshot");
8469 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8472 seq_printf(m, ":count=%ld\n", *count);
8474 seq_puts(m, ":unlimited\n");
8480 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8481 unsigned long ip, void *init_data, void **data)
8483 struct ftrace_func_mapper *mapper = *data;
8486 mapper = allocate_ftrace_func_mapper();
8492 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8496 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8497 unsigned long ip, void *data)
8499 struct ftrace_func_mapper *mapper = data;
8504 free_ftrace_func_mapper(mapper, NULL);
8508 ftrace_func_mapper_remove_ip(mapper, ip);
8511 static struct ftrace_probe_ops snapshot_probe_ops = {
8512 .func = ftrace_snapshot,
8513 .print = ftrace_snapshot_print,
8516 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8517 .func = ftrace_count_snapshot,
8518 .print = ftrace_snapshot_print,
8519 .init = ftrace_snapshot_init,
8520 .free = ftrace_snapshot_free,
8524 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8525 char *glob, char *cmd, char *param, int enable)
8527 struct ftrace_probe_ops *ops;
8528 void *count = (void *)-1;
8535 /* hash funcs only work with set_ftrace_filter */
8539 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8542 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8547 number = strsep(¶m, ":");
8549 if (!strlen(number))
8553 * We use the callback data field (which is a pointer)
8556 ret = kstrtoul(number, 0, (unsigned long *)&count);
8561 ret = tracing_alloc_snapshot_instance(tr);
8565 ret = register_ftrace_function_probe(glob, tr, ops, count);
8568 return ret < 0 ? ret : 0;
8571 static struct ftrace_func_command ftrace_snapshot_cmd = {
8573 .func = ftrace_trace_snapshot_callback,
8576 static __init int register_snapshot_cmd(void)
8578 return register_ftrace_command(&ftrace_snapshot_cmd);
8581 static inline __init int register_snapshot_cmd(void) { return 0; }
8582 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8584 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8586 if (WARN_ON(!tr->dir))
8587 return ERR_PTR(-ENODEV);
8589 /* Top directory uses NULL as the parent */
8590 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8593 /* All sub buffers have a descriptor */
8597 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8599 struct dentry *d_tracer;
8602 return tr->percpu_dir;
8604 d_tracer = tracing_get_dentry(tr);
8605 if (IS_ERR(d_tracer))
8608 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8610 MEM_FAIL(!tr->percpu_dir,
8611 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8613 return tr->percpu_dir;
8616 static struct dentry *
8617 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8618 void *data, long cpu, const struct file_operations *fops)
8620 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8622 if (ret) /* See tracing_get_cpu() */
8623 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8628 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8630 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8631 struct dentry *d_cpu;
8632 char cpu_dir[30]; /* 30 characters should be more than enough */
8637 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8638 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8640 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8644 /* per cpu trace_pipe */
8645 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8646 tr, cpu, &tracing_pipe_fops);
8649 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8650 tr, cpu, &tracing_fops);
8652 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8653 tr, cpu, &tracing_buffers_fops);
8655 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8656 tr, cpu, &tracing_stats_fops);
8658 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8659 tr, cpu, &tracing_entries_fops);
8661 #ifdef CONFIG_TRACER_SNAPSHOT
8662 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8663 tr, cpu, &snapshot_fops);
8665 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8666 tr, cpu, &snapshot_raw_fops);
8670 #ifdef CONFIG_FTRACE_SELFTEST
8671 /* Let selftest have access to static functions in this file */
8672 #include "trace_selftest.c"
8676 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8679 struct trace_option_dentry *topt = filp->private_data;
8682 if (topt->flags->val & topt->opt->bit)
8687 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8691 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8694 struct trace_option_dentry *topt = filp->private_data;
8698 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8702 if (val != 0 && val != 1)
8705 if (!!(topt->flags->val & topt->opt->bit) != val) {
8706 mutex_lock(&trace_types_lock);
8707 ret = __set_tracer_option(topt->tr, topt->flags,
8709 mutex_unlock(&trace_types_lock);
8720 static const struct file_operations trace_options_fops = {
8721 .open = tracing_open_generic,
8722 .read = trace_options_read,
8723 .write = trace_options_write,
8724 .llseek = generic_file_llseek,
8728 * In order to pass in both the trace_array descriptor as well as the index
8729 * to the flag that the trace option file represents, the trace_array
8730 * has a character array of trace_flags_index[], which holds the index
8731 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8732 * The address of this character array is passed to the flag option file
8733 * read/write callbacks.
8735 * In order to extract both the index and the trace_array descriptor,
8736 * get_tr_index() uses the following algorithm.
8740 * As the pointer itself contains the address of the index (remember
8743 * Then to get the trace_array descriptor, by subtracting that index
8744 * from the ptr, we get to the start of the index itself.
8746 * ptr - idx == &index[0]
8748 * Then a simple container_of() from that pointer gets us to the
8749 * trace_array descriptor.
8751 static void get_tr_index(void *data, struct trace_array **ptr,
8752 unsigned int *pindex)
8754 *pindex = *(unsigned char *)data;
8756 *ptr = container_of(data - *pindex, struct trace_array,
8761 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8764 void *tr_index = filp->private_data;
8765 struct trace_array *tr;
8769 get_tr_index(tr_index, &tr, &index);
8771 if (tr->trace_flags & (1 << index))
8776 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8780 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8783 void *tr_index = filp->private_data;
8784 struct trace_array *tr;
8789 get_tr_index(tr_index, &tr, &index);
8791 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8795 if (val != 0 && val != 1)
8798 mutex_lock(&event_mutex);
8799 mutex_lock(&trace_types_lock);
8800 ret = set_tracer_flag(tr, 1 << index, val);
8801 mutex_unlock(&trace_types_lock);
8802 mutex_unlock(&event_mutex);
8812 static const struct file_operations trace_options_core_fops = {
8813 .open = tracing_open_generic,
8814 .read = trace_options_core_read,
8815 .write = trace_options_core_write,
8816 .llseek = generic_file_llseek,
8819 struct dentry *trace_create_file(const char *name,
8821 struct dentry *parent,
8823 const struct file_operations *fops)
8827 ret = tracefs_create_file(name, mode, parent, data, fops);
8829 pr_warn("Could not create tracefs '%s' entry\n", name);
8835 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8837 struct dentry *d_tracer;
8842 d_tracer = tracing_get_dentry(tr);
8843 if (IS_ERR(d_tracer))
8846 tr->options = tracefs_create_dir("options", d_tracer);
8848 pr_warn("Could not create tracefs directory 'options'\n");
8856 create_trace_option_file(struct trace_array *tr,
8857 struct trace_option_dentry *topt,
8858 struct tracer_flags *flags,
8859 struct tracer_opt *opt)
8861 struct dentry *t_options;
8863 t_options = trace_options_init_dentry(tr);
8867 topt->flags = flags;
8871 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8872 t_options, topt, &trace_options_fops);
8877 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8879 struct trace_option_dentry *topts;
8880 struct trace_options *tr_topts;
8881 struct tracer_flags *flags;
8882 struct tracer_opt *opts;
8889 flags = tracer->flags;
8891 if (!flags || !flags->opts)
8895 * If this is an instance, only create flags for tracers
8896 * the instance may have.
8898 if (!trace_ok_for_array(tracer, tr))
8901 for (i = 0; i < tr->nr_topts; i++) {
8902 /* Make sure there's no duplicate flags. */
8903 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8909 for (cnt = 0; opts[cnt].name; cnt++)
8912 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8916 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8923 tr->topts = tr_topts;
8924 tr->topts[tr->nr_topts].tracer = tracer;
8925 tr->topts[tr->nr_topts].topts = topts;
8928 for (cnt = 0; opts[cnt].name; cnt++) {
8929 create_trace_option_file(tr, &topts[cnt], flags,
8931 MEM_FAIL(topts[cnt].entry == NULL,
8932 "Failed to create trace option: %s",
8937 static struct dentry *
8938 create_trace_option_core_file(struct trace_array *tr,
8939 const char *option, long index)
8941 struct dentry *t_options;
8943 t_options = trace_options_init_dentry(tr);
8947 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
8948 (void *)&tr->trace_flags_index[index],
8949 &trace_options_core_fops);
8952 static void create_trace_options_dir(struct trace_array *tr)
8954 struct dentry *t_options;
8955 bool top_level = tr == &global_trace;
8958 t_options = trace_options_init_dentry(tr);
8962 for (i = 0; trace_options[i]; i++) {
8964 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8965 create_trace_option_core_file(tr, trace_options[i], i);
8970 rb_simple_read(struct file *filp, char __user *ubuf,
8971 size_t cnt, loff_t *ppos)
8973 struct trace_array *tr = filp->private_data;
8977 r = tracer_tracing_is_on(tr);
8978 r = sprintf(buf, "%d\n", r);
8980 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8984 rb_simple_write(struct file *filp, const char __user *ubuf,
8985 size_t cnt, loff_t *ppos)
8987 struct trace_array *tr = filp->private_data;
8988 struct trace_buffer *buffer = tr->array_buffer.buffer;
8992 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8997 mutex_lock(&trace_types_lock);
8998 if (!!val == tracer_tracing_is_on(tr)) {
8999 val = 0; /* do nothing */
9001 tracer_tracing_on(tr);
9002 if (tr->current_trace->start)
9003 tr->current_trace->start(tr);
9005 tracer_tracing_off(tr);
9006 if (tr->current_trace->stop)
9007 tr->current_trace->stop(tr);
9009 mutex_unlock(&trace_types_lock);
9017 static const struct file_operations rb_simple_fops = {
9018 .open = tracing_open_generic_tr,
9019 .read = rb_simple_read,
9020 .write = rb_simple_write,
9021 .release = tracing_release_generic_tr,
9022 .llseek = default_llseek,
9026 buffer_percent_read(struct file *filp, char __user *ubuf,
9027 size_t cnt, loff_t *ppos)
9029 struct trace_array *tr = filp->private_data;
9033 r = tr->buffer_percent;
9034 r = sprintf(buf, "%d\n", r);
9036 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9040 buffer_percent_write(struct file *filp, const char __user *ubuf,
9041 size_t cnt, loff_t *ppos)
9043 struct trace_array *tr = filp->private_data;
9047 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9057 tr->buffer_percent = val;
9064 static const struct file_operations buffer_percent_fops = {
9065 .open = tracing_open_generic_tr,
9066 .read = buffer_percent_read,
9067 .write = buffer_percent_write,
9068 .release = tracing_release_generic_tr,
9069 .llseek = default_llseek,
9072 static struct dentry *trace_instance_dir;
9075 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9078 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9080 enum ring_buffer_flags rb_flags;
9082 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9086 buf->buffer = ring_buffer_alloc(size, rb_flags);
9090 buf->data = alloc_percpu(struct trace_array_cpu);
9092 ring_buffer_free(buf->buffer);
9097 /* Allocate the first page for all buffers */
9098 set_buffer_entries(&tr->array_buffer,
9099 ring_buffer_size(tr->array_buffer.buffer, 0));
9104 static int allocate_trace_buffers(struct trace_array *tr, int size)
9108 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9112 #ifdef CONFIG_TRACER_MAX_TRACE
9113 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9114 allocate_snapshot ? size : 1);
9115 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9116 ring_buffer_free(tr->array_buffer.buffer);
9117 tr->array_buffer.buffer = NULL;
9118 free_percpu(tr->array_buffer.data);
9119 tr->array_buffer.data = NULL;
9122 tr->allocated_snapshot = allocate_snapshot;
9125 * Only the top level trace array gets its snapshot allocated
9126 * from the kernel command line.
9128 allocate_snapshot = false;
9134 static void free_trace_buffer(struct array_buffer *buf)
9137 ring_buffer_free(buf->buffer);
9139 free_percpu(buf->data);
9144 static void free_trace_buffers(struct trace_array *tr)
9149 free_trace_buffer(&tr->array_buffer);
9151 #ifdef CONFIG_TRACER_MAX_TRACE
9152 free_trace_buffer(&tr->max_buffer);
9156 static void init_trace_flags_index(struct trace_array *tr)
9160 /* Used by the trace options files */
9161 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9162 tr->trace_flags_index[i] = i;
9165 static void __update_tracer_options(struct trace_array *tr)
9169 for (t = trace_types; t; t = t->next)
9170 add_tracer_options(tr, t);
9173 static void update_tracer_options(struct trace_array *tr)
9175 mutex_lock(&trace_types_lock);
9176 tracer_options_updated = true;
9177 __update_tracer_options(tr);
9178 mutex_unlock(&trace_types_lock);
9181 /* Must have trace_types_lock held */
9182 struct trace_array *trace_array_find(const char *instance)
9184 struct trace_array *tr, *found = NULL;
9186 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9187 if (tr->name && strcmp(tr->name, instance) == 0) {
9196 struct trace_array *trace_array_find_get(const char *instance)
9198 struct trace_array *tr;
9200 mutex_lock(&trace_types_lock);
9201 tr = trace_array_find(instance);
9204 mutex_unlock(&trace_types_lock);
9209 static int trace_array_create_dir(struct trace_array *tr)
9213 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9217 ret = event_trace_add_tracer(tr->dir, tr);
9219 tracefs_remove(tr->dir);
9223 init_tracer_tracefs(tr, tr->dir);
9224 __update_tracer_options(tr);
9229 static struct trace_array *trace_array_create(const char *name)
9231 struct trace_array *tr;
9235 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9237 return ERR_PTR(ret);
9239 tr->name = kstrdup(name, GFP_KERNEL);
9243 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9246 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9248 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9250 raw_spin_lock_init(&tr->start_lock);
9252 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9254 tr->current_trace = &nop_trace;
9256 INIT_LIST_HEAD(&tr->systems);
9257 INIT_LIST_HEAD(&tr->events);
9258 INIT_LIST_HEAD(&tr->hist_vars);
9259 INIT_LIST_HEAD(&tr->err_log);
9261 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9264 if (ftrace_allocate_ftrace_ops(tr) < 0)
9267 ftrace_init_trace_array(tr);
9269 init_trace_flags_index(tr);
9271 if (trace_instance_dir) {
9272 ret = trace_array_create_dir(tr);
9276 __trace_early_add_events(tr);
9278 list_add(&tr->list, &ftrace_trace_arrays);
9285 ftrace_free_ftrace_ops(tr);
9286 free_trace_buffers(tr);
9287 free_cpumask_var(tr->tracing_cpumask);
9291 return ERR_PTR(ret);
9294 static int instance_mkdir(const char *name)
9296 struct trace_array *tr;
9299 mutex_lock(&event_mutex);
9300 mutex_lock(&trace_types_lock);
9303 if (trace_array_find(name))
9306 tr = trace_array_create(name);
9308 ret = PTR_ERR_OR_ZERO(tr);
9311 mutex_unlock(&trace_types_lock);
9312 mutex_unlock(&event_mutex);
9317 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9318 * @name: The name of the trace array to be looked up/created.
9320 * Returns pointer to trace array with given name.
9321 * NULL, if it cannot be created.
9323 * NOTE: This function increments the reference counter associated with the
9324 * trace array returned. This makes sure it cannot be freed while in use.
9325 * Use trace_array_put() once the trace array is no longer needed.
9326 * If the trace_array is to be freed, trace_array_destroy() needs to
9327 * be called after the trace_array_put(), or simply let user space delete
9328 * it from the tracefs instances directory. But until the
9329 * trace_array_put() is called, user space can not delete it.
9332 struct trace_array *trace_array_get_by_name(const char *name)
9334 struct trace_array *tr;
9336 mutex_lock(&event_mutex);
9337 mutex_lock(&trace_types_lock);
9339 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9340 if (tr->name && strcmp(tr->name, name) == 0)
9344 tr = trace_array_create(name);
9352 mutex_unlock(&trace_types_lock);
9353 mutex_unlock(&event_mutex);
9356 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9358 static int __remove_instance(struct trace_array *tr)
9362 /* Reference counter for a newly created trace array = 1. */
9363 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9366 list_del(&tr->list);
9368 /* Disable all the flags that were enabled coming in */
9369 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9370 if ((1 << i) & ZEROED_TRACE_FLAGS)
9371 set_tracer_flag(tr, 1 << i, 0);
9374 tracing_set_nop(tr);
9375 clear_ftrace_function_probes(tr);
9376 event_trace_del_tracer(tr);
9377 ftrace_clear_pids(tr);
9378 ftrace_destroy_function_files(tr);
9379 tracefs_remove(tr->dir);
9380 free_percpu(tr->last_func_repeats);
9381 free_trace_buffers(tr);
9383 for (i = 0; i < tr->nr_topts; i++) {
9384 kfree(tr->topts[i].topts);
9388 free_cpumask_var(tr->tracing_cpumask);
9395 int trace_array_destroy(struct trace_array *this_tr)
9397 struct trace_array *tr;
9403 mutex_lock(&event_mutex);
9404 mutex_lock(&trace_types_lock);
9408 /* Making sure trace array exists before destroying it. */
9409 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9410 if (tr == this_tr) {
9411 ret = __remove_instance(tr);
9416 mutex_unlock(&trace_types_lock);
9417 mutex_unlock(&event_mutex);
9421 EXPORT_SYMBOL_GPL(trace_array_destroy);
9423 static int instance_rmdir(const char *name)
9425 struct trace_array *tr;
9428 mutex_lock(&event_mutex);
9429 mutex_lock(&trace_types_lock);
9432 tr = trace_array_find(name);
9434 ret = __remove_instance(tr);
9436 mutex_unlock(&trace_types_lock);
9437 mutex_unlock(&event_mutex);
9442 static __init void create_trace_instances(struct dentry *d_tracer)
9444 struct trace_array *tr;
9446 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9449 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9452 mutex_lock(&event_mutex);
9453 mutex_lock(&trace_types_lock);
9455 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9458 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9459 "Failed to create instance directory\n"))
9463 mutex_unlock(&trace_types_lock);
9464 mutex_unlock(&event_mutex);
9468 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9470 struct trace_event_file *file;
9473 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9474 tr, &show_traces_fops);
9476 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9477 tr, &set_tracer_fops);
9479 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9480 tr, &tracing_cpumask_fops);
9482 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9483 tr, &tracing_iter_fops);
9485 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9488 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9489 tr, &tracing_pipe_fops);
9491 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9492 tr, &tracing_entries_fops);
9494 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9495 tr, &tracing_total_entries_fops);
9497 trace_create_file("free_buffer", 0200, d_tracer,
9498 tr, &tracing_free_buffer_fops);
9500 trace_create_file("trace_marker", 0220, d_tracer,
9501 tr, &tracing_mark_fops);
9503 file = __find_event_file(tr, "ftrace", "print");
9504 if (file && file->dir)
9505 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9506 file, &event_trigger_fops);
9507 tr->trace_marker_file = file;
9509 trace_create_file("trace_marker_raw", 0220, d_tracer,
9510 tr, &tracing_mark_raw_fops);
9512 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9515 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9516 tr, &rb_simple_fops);
9518 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9519 &trace_time_stamp_mode_fops);
9521 tr->buffer_percent = 50;
9523 trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
9524 tr, &buffer_percent_fops);
9526 create_trace_options_dir(tr);
9528 trace_create_maxlat_file(tr, d_tracer);
9530 if (ftrace_create_function_files(tr, d_tracer))
9531 MEM_FAIL(1, "Could not allocate function filter files");
9533 #ifdef CONFIG_TRACER_SNAPSHOT
9534 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9535 tr, &snapshot_fops);
9538 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9539 tr, &tracing_err_log_fops);
9541 for_each_tracing_cpu(cpu)
9542 tracing_init_tracefs_percpu(tr, cpu);
9544 ftrace_init_tracefs(tr, d_tracer);
9547 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9549 struct vfsmount *mnt;
9550 struct file_system_type *type;
9553 * To maintain backward compatibility for tools that mount
9554 * debugfs to get to the tracing facility, tracefs is automatically
9555 * mounted to the debugfs/tracing directory.
9557 type = get_fs_type("tracefs");
9560 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9561 put_filesystem(type);
9570 * tracing_init_dentry - initialize top level trace array
9572 * This is called when creating files or directories in the tracing
9573 * directory. It is called via fs_initcall() by any of the boot up code
9574 * and expects to return the dentry of the top level tracing directory.
9576 int tracing_init_dentry(void)
9578 struct trace_array *tr = &global_trace;
9580 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9581 pr_warn("Tracing disabled due to lockdown\n");
9585 /* The top level trace array uses NULL as parent */
9589 if (WARN_ON(!tracefs_initialized()))
9593 * As there may still be users that expect the tracing
9594 * files to exist in debugfs/tracing, we must automount
9595 * the tracefs file system there, so older tools still
9596 * work with the newer kernel.
9598 tr->dir = debugfs_create_automount("tracing", NULL,
9599 trace_automount, NULL);
9604 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9605 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9607 static struct workqueue_struct *eval_map_wq __initdata;
9608 static struct work_struct eval_map_work __initdata;
9609 static struct work_struct tracerfs_init_work __initdata;
9611 static void __init eval_map_work_func(struct work_struct *work)
9615 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9616 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9619 static int __init trace_eval_init(void)
9621 INIT_WORK(&eval_map_work, eval_map_work_func);
9623 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9625 pr_err("Unable to allocate eval_map_wq\n");
9627 eval_map_work_func(&eval_map_work);
9631 queue_work(eval_map_wq, &eval_map_work);
9635 subsys_initcall(trace_eval_init);
9637 static int __init trace_eval_sync(void)
9639 /* Make sure the eval map updates are finished */
9641 destroy_workqueue(eval_map_wq);
9645 late_initcall_sync(trace_eval_sync);
9648 #ifdef CONFIG_MODULES
9649 static void trace_module_add_evals(struct module *mod)
9651 if (!mod->num_trace_evals)
9655 * Modules with bad taint do not have events created, do
9656 * not bother with enums either.
9658 if (trace_module_has_bad_taint(mod))
9661 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9664 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9665 static void trace_module_remove_evals(struct module *mod)
9667 union trace_eval_map_item *map;
9668 union trace_eval_map_item **last = &trace_eval_maps;
9670 if (!mod->num_trace_evals)
9673 mutex_lock(&trace_eval_mutex);
9675 map = trace_eval_maps;
9678 if (map->head.mod == mod)
9680 map = trace_eval_jmp_to_tail(map);
9681 last = &map->tail.next;
9682 map = map->tail.next;
9687 *last = trace_eval_jmp_to_tail(map)->tail.next;
9690 mutex_unlock(&trace_eval_mutex);
9693 static inline void trace_module_remove_evals(struct module *mod) { }
9694 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9696 static int trace_module_notify(struct notifier_block *self,
9697 unsigned long val, void *data)
9699 struct module *mod = data;
9702 case MODULE_STATE_COMING:
9703 trace_module_add_evals(mod);
9705 case MODULE_STATE_GOING:
9706 trace_module_remove_evals(mod);
9713 static struct notifier_block trace_module_nb = {
9714 .notifier_call = trace_module_notify,
9717 #endif /* CONFIG_MODULES */
9719 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
9724 init_tracer_tracefs(&global_trace, NULL);
9725 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9727 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9728 &global_trace, &tracing_thresh_fops);
9730 trace_create_file("README", TRACE_MODE_READ, NULL,
9731 NULL, &tracing_readme_fops);
9733 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9734 NULL, &tracing_saved_cmdlines_fops);
9736 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9737 NULL, &tracing_saved_cmdlines_size_fops);
9739 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9740 NULL, &tracing_saved_tgids_fops);
9742 trace_create_eval_file(NULL);
9744 #ifdef CONFIG_MODULES
9745 register_module_notifier(&trace_module_nb);
9748 #ifdef CONFIG_DYNAMIC_FTRACE
9749 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9750 NULL, &tracing_dyn_info_fops);
9753 create_trace_instances(NULL);
9755 update_tracer_options(&global_trace);
9758 static __init int tracer_init_tracefs(void)
9762 trace_access_lock_init();
9764 ret = tracing_init_dentry();
9769 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
9770 queue_work(eval_map_wq, &tracerfs_init_work);
9772 tracer_init_tracefs_work_func(NULL);
9778 fs_initcall(tracer_init_tracefs);
9780 static int trace_panic_handler(struct notifier_block *this,
9781 unsigned long event, void *unused)
9783 if (ftrace_dump_on_oops)
9784 ftrace_dump(ftrace_dump_on_oops);
9788 static struct notifier_block trace_panic_notifier = {
9789 .notifier_call = trace_panic_handler,
9791 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9794 static int trace_die_handler(struct notifier_block *self,
9800 if (ftrace_dump_on_oops)
9801 ftrace_dump(ftrace_dump_on_oops);
9809 static struct notifier_block trace_die_notifier = {
9810 .notifier_call = trace_die_handler,
9815 * printk is set to max of 1024, we really don't need it that big.
9816 * Nothing should be printing 1000 characters anyway.
9818 #define TRACE_MAX_PRINT 1000
9821 * Define here KERN_TRACE so that we have one place to modify
9822 * it if we decide to change what log level the ftrace dump
9825 #define KERN_TRACE KERN_EMERG
9828 trace_printk_seq(struct trace_seq *s)
9830 /* Probably should print a warning here. */
9831 if (s->seq.len >= TRACE_MAX_PRINT)
9832 s->seq.len = TRACE_MAX_PRINT;
9835 * More paranoid code. Although the buffer size is set to
9836 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9837 * an extra layer of protection.
9839 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9840 s->seq.len = s->seq.size - 1;
9842 /* should be zero ended, but we are paranoid. */
9843 s->buffer[s->seq.len] = 0;
9845 printk(KERN_TRACE "%s", s->buffer);
9850 void trace_init_global_iter(struct trace_iterator *iter)
9852 iter->tr = &global_trace;
9853 iter->trace = iter->tr->current_trace;
9854 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9855 iter->array_buffer = &global_trace.array_buffer;
9857 if (iter->trace && iter->trace->open)
9858 iter->trace->open(iter);
9860 /* Annotate start of buffers if we had overruns */
9861 if (ring_buffer_overruns(iter->array_buffer->buffer))
9862 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9864 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9865 if (trace_clocks[iter->tr->clock_id].in_ns)
9866 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9868 /* Can not use kmalloc for iter.temp and iter.fmt */
9869 iter->temp = static_temp_buf;
9870 iter->temp_size = STATIC_TEMP_BUF_SIZE;
9871 iter->fmt = static_fmt_buf;
9872 iter->fmt_size = STATIC_FMT_BUF_SIZE;
9875 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9877 /* use static because iter can be a bit big for the stack */
9878 static struct trace_iterator iter;
9879 static atomic_t dump_running;
9880 struct trace_array *tr = &global_trace;
9881 unsigned int old_userobj;
9882 unsigned long flags;
9885 /* Only allow one dump user at a time. */
9886 if (atomic_inc_return(&dump_running) != 1) {
9887 atomic_dec(&dump_running);
9892 * Always turn off tracing when we dump.
9893 * We don't need to show trace output of what happens
9894 * between multiple crashes.
9896 * If the user does a sysrq-z, then they can re-enable
9897 * tracing with echo 1 > tracing_on.
9901 local_irq_save(flags);
9903 /* Simulate the iterator */
9904 trace_init_global_iter(&iter);
9906 for_each_tracing_cpu(cpu) {
9907 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9910 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9912 /* don't look at user memory in panic mode */
9913 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9915 switch (oops_dump_mode) {
9917 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9920 iter.cpu_file = raw_smp_processor_id();
9925 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9926 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9929 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9931 /* Did function tracer already get disabled? */
9932 if (ftrace_is_dead()) {
9933 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9934 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9938 * We need to stop all tracing on all CPUS to read
9939 * the next buffer. This is a bit expensive, but is
9940 * not done often. We fill all what we can read,
9941 * and then release the locks again.
9944 while (!trace_empty(&iter)) {
9947 printk(KERN_TRACE "---------------------------------\n");
9951 trace_iterator_reset(&iter);
9952 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9954 if (trace_find_next_entry_inc(&iter) != NULL) {
9957 ret = print_trace_line(&iter);
9958 if (ret != TRACE_TYPE_NO_CONSUME)
9959 trace_consume(&iter);
9961 touch_nmi_watchdog();
9963 trace_printk_seq(&iter.seq);
9967 printk(KERN_TRACE " (ftrace buffer empty)\n");
9969 printk(KERN_TRACE "---------------------------------\n");
9972 tr->trace_flags |= old_userobj;
9974 for_each_tracing_cpu(cpu) {
9975 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9977 atomic_dec(&dump_running);
9978 local_irq_restore(flags);
9980 EXPORT_SYMBOL_GPL(ftrace_dump);
9982 #define WRITE_BUFSIZE 4096
9984 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9985 size_t count, loff_t *ppos,
9986 int (*createfn)(const char *))
9988 char *kbuf, *buf, *tmp;
9993 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9997 while (done < count) {
9998 size = count - done;
10000 if (size >= WRITE_BUFSIZE)
10001 size = WRITE_BUFSIZE - 1;
10003 if (copy_from_user(kbuf, buffer + done, size)) {
10010 tmp = strchr(buf, '\n');
10013 size = tmp - buf + 1;
10015 size = strlen(buf);
10016 if (done + size < count) {
10019 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10020 pr_warn("Line length is too long: Should be less than %d\n",
10021 WRITE_BUFSIZE - 2);
10028 /* Remove comments */
10029 tmp = strchr(buf, '#');
10034 ret = createfn(buf);
10039 } while (done < count);
10049 __init static int tracer_alloc_buffers(void)
10055 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10056 pr_warn("Tracing disabled due to lockdown\n");
10061 * Make sure we don't accidentally add more trace options
10062 * than we have bits for.
10064 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10066 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10069 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10070 goto out_free_buffer_mask;
10072 /* Only allocate trace_printk buffers if a trace_printk exists */
10073 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10074 /* Must be called before global_trace.buffer is allocated */
10075 trace_printk_init_buffers();
10077 /* To save memory, keep the ring buffer size to its minimum */
10078 if (ring_buffer_expanded)
10079 ring_buf_size = trace_buf_size;
10083 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10084 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10086 raw_spin_lock_init(&global_trace.start_lock);
10089 * The prepare callbacks allocates some memory for the ring buffer. We
10090 * don't free the buffer if the CPU goes down. If we were to free
10091 * the buffer, then the user would lose any trace that was in the
10092 * buffer. The memory will be removed once the "instance" is removed.
10094 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10095 "trace/RB:preapre", trace_rb_cpu_prepare,
10098 goto out_free_cpumask;
10099 /* Used for event triggers */
10101 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10103 goto out_rm_hp_state;
10105 if (trace_create_savedcmd() < 0)
10106 goto out_free_temp_buffer;
10108 /* TODO: make the number of buffers hot pluggable with CPUS */
10109 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10110 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10111 goto out_free_savedcmd;
10114 if (global_trace.buffer_disabled)
10117 if (trace_boot_clock) {
10118 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10120 pr_warn("Trace clock %s not defined, going back to default\n",
10125 * register_tracer() might reference current_trace, so it
10126 * needs to be set before we register anything. This is
10127 * just a bootstrap of current_trace anyway.
10129 global_trace.current_trace = &nop_trace;
10131 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10133 ftrace_init_global_array_ops(&global_trace);
10135 init_trace_flags_index(&global_trace);
10137 register_tracer(&nop_trace);
10139 /* Function tracing may start here (via kernel command line) */
10140 init_function_trace();
10142 /* All seems OK, enable tracing */
10143 tracing_disabled = 0;
10145 atomic_notifier_chain_register(&panic_notifier_list,
10146 &trace_panic_notifier);
10148 register_die_notifier(&trace_die_notifier);
10150 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10152 INIT_LIST_HEAD(&global_trace.systems);
10153 INIT_LIST_HEAD(&global_trace.events);
10154 INIT_LIST_HEAD(&global_trace.hist_vars);
10155 INIT_LIST_HEAD(&global_trace.err_log);
10156 list_add(&global_trace.list, &ftrace_trace_arrays);
10158 apply_trace_boot_options();
10160 register_snapshot_cmd();
10167 free_saved_cmdlines_buffer(savedcmd);
10168 out_free_temp_buffer:
10169 ring_buffer_free(temp_buffer);
10171 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10173 free_cpumask_var(global_trace.tracing_cpumask);
10174 out_free_buffer_mask:
10175 free_cpumask_var(tracing_buffer_mask);
10180 void __init ftrace_boot_snapshot(void)
10182 if (snapshot_at_boot) {
10183 tracing_snapshot();
10184 internal_trace_puts("** Boot snapshot taken **\n");
10188 void __init early_trace_init(void)
10190 if (tracepoint_printk) {
10191 tracepoint_print_iter =
10192 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10193 if (MEM_FAIL(!tracepoint_print_iter,
10194 "Failed to allocate trace iterator\n"))
10195 tracepoint_printk = 0;
10197 static_key_enable(&tracepoint_printk_key.key);
10199 tracer_alloc_buffers();
10202 void __init trace_init(void)
10204 trace_event_init();
10207 __init static void clear_boot_tracer(void)
10210 * The default tracer at boot buffer is an init section.
10211 * This function is called in lateinit. If we did not
10212 * find the boot tracer, then clear it out, to prevent
10213 * later registration from accessing the buffer that is
10214 * about to be freed.
10216 if (!default_bootup_tracer)
10219 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10220 default_bootup_tracer);
10221 default_bootup_tracer = NULL;
10224 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10225 __init static void tracing_set_default_clock(void)
10227 /* sched_clock_stable() is determined in late_initcall */
10228 if (!trace_boot_clock && !sched_clock_stable()) {
10229 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10230 pr_warn("Can not set tracing clock due to lockdown\n");
10234 printk(KERN_WARNING
10235 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10236 "If you want to keep using the local clock, then add:\n"
10237 " \"trace_clock=local\"\n"
10238 "on the kernel command line\n");
10239 tracing_set_clock(&global_trace, "global");
10243 static inline void tracing_set_default_clock(void) { }
10246 __init static int late_trace_init(void)
10248 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10249 static_key_disable(&tracepoint_printk_key.key);
10250 tracepoint_printk = 0;
10253 tracing_set_default_clock();
10254 clear_boot_tracer();
10258 late_initcall_sync(late_trace_init);