1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
8 * Originally taken from the RT patch by:
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
53 #include "trace_output.h"
56 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
59 bool ring_buffer_expanded;
62 * We need to change this state when a selftest is running.
63 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
65 * insertions into the ring-buffer such as trace_printk could occurred
66 * at the same time, giving false positive or negative results.
68 static bool __read_mostly tracing_selftest_running;
71 * If a tracer is running, we do not want to run SELFTEST.
73 bool __read_mostly tracing_selftest_disabled;
75 /* Pipe tracepoints to printk */
76 struct trace_iterator *tracepoint_print_iter;
77 int tracepoint_printk;
78 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
80 /* For tracers that don't implement custom flags */
81 static struct tracer_opt dummy_tracer_opt[] = {
86 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
92 * To prevent the comm cache from being overwritten when no
93 * tracing is active, only save the comm when a trace event
96 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
99 * Kill all tracing for good (never come back).
100 * It is initialized to 1 but will turn to zero if the initialization
101 * of the tracer is successful. But that is the only place that sets
104 static int tracing_disabled = 1;
106 cpumask_var_t __read_mostly tracing_buffer_mask;
109 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
111 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
112 * is set, then ftrace_dump is called. This will output the contents
113 * of the ftrace buffers to the console. This is very useful for
114 * capturing traces that lead to crashes and outputing it to a
117 * It is default off, but you can enable it with either specifying
118 * "ftrace_dump_on_oops" in the kernel command line, or setting
119 * /proc/sys/kernel/ftrace_dump_on_oops
120 * Set 1 if you want to dump buffers of all CPUs
121 * Set 2 if you want to dump the buffer of the CPU that triggered oops
124 enum ftrace_dump_mode ftrace_dump_on_oops;
126 /* When set, tracing will stop when a WARN*() is hit */
127 int __disable_trace_on_warning;
129 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
130 /* Map of enums to their values, for "eval_map" file */
131 struct trace_eval_map_head {
133 unsigned long length;
136 union trace_eval_map_item;
138 struct trace_eval_map_tail {
140 * "end" is first and points to NULL as it must be different
141 * than "mod" or "eval_string"
143 union trace_eval_map_item *next;
144 const char *end; /* points to NULL */
147 static DEFINE_MUTEX(trace_eval_mutex);
150 * The trace_eval_maps are saved in an array with two extra elements,
151 * one at the beginning, and one at the end. The beginning item contains
152 * the count of the saved maps (head.length), and the module they
153 * belong to if not built in (head.mod). The ending item contains a
154 * pointer to the next array of saved eval_map items.
156 union trace_eval_map_item {
157 struct trace_eval_map map;
158 struct trace_eval_map_head head;
159 struct trace_eval_map_tail tail;
162 static union trace_eval_map_item *trace_eval_maps;
163 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
165 int tracing_set_tracer(struct trace_array *tr, const char *buf);
166 static void ftrace_trace_userstack(struct trace_buffer *buffer,
167 unsigned long flags, int pc);
169 #define MAX_TRACER_SIZE 100
170 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
171 static char *default_bootup_tracer;
173 static bool allocate_snapshot;
175 static int __init set_cmdline_ftrace(char *str)
177 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
178 default_bootup_tracer = bootup_tracer_buf;
179 /* We are using ftrace early, expand it */
180 ring_buffer_expanded = true;
183 __setup("ftrace=", set_cmdline_ftrace);
185 static int __init set_ftrace_dump_on_oops(char *str)
187 if (*str++ != '=' || !*str) {
188 ftrace_dump_on_oops = DUMP_ALL;
192 if (!strcmp("orig_cpu", str)) {
193 ftrace_dump_on_oops = DUMP_ORIG;
199 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
201 static int __init stop_trace_on_warning(char *str)
203 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
204 __disable_trace_on_warning = 1;
207 __setup("traceoff_on_warning", stop_trace_on_warning);
209 static int __init boot_alloc_snapshot(char *str)
211 allocate_snapshot = true;
212 /* We also need the main ring buffer expanded */
213 ring_buffer_expanded = true;
216 __setup("alloc_snapshot", boot_alloc_snapshot);
219 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
221 static int __init set_trace_boot_options(char *str)
223 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
226 __setup("trace_options=", set_trace_boot_options);
228 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
229 static char *trace_boot_clock __initdata;
231 static int __init set_trace_boot_clock(char *str)
233 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
234 trace_boot_clock = trace_boot_clock_buf;
237 __setup("trace_clock=", set_trace_boot_clock);
239 static int __init set_tracepoint_printk(char *str)
241 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
242 tracepoint_printk = 1;
245 __setup("tp_printk", set_tracepoint_printk);
247 unsigned long long ns2usecs(u64 nsec)
254 /* trace_flags holds trace_options default values */
255 #define TRACE_DEFAULT_FLAGS \
256 (FUNCTION_DEFAULT_FLAGS | \
257 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
258 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
259 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
260 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
262 /* trace_options that are only supported by global_trace */
263 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
264 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
266 /* trace_flags that are default zero for instances */
267 #define ZEROED_TRACE_FLAGS \
268 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
271 * The global_trace is the descriptor that holds the top-level tracing
272 * buffers for the live tracing.
274 static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
278 LIST_HEAD(ftrace_trace_arrays);
280 int trace_array_get(struct trace_array *this_tr)
282 struct trace_array *tr;
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
293 mutex_unlock(&trace_types_lock);
298 static void __trace_array_put(struct trace_array *this_tr)
300 WARN_ON(!this_tr->ref);
305 * trace_array_put - Decrement the reference counter for this trace array.
307 * NOTE: Use this when we no longer need the trace array returned by
308 * trace_array_get_by_name(). This ensures the trace array can be later
312 void trace_array_put(struct trace_array *this_tr)
317 mutex_lock(&trace_types_lock);
318 __trace_array_put(this_tr);
319 mutex_unlock(&trace_types_lock);
321 EXPORT_SYMBOL_GPL(trace_array_put);
323 int tracing_check_open_get_tr(struct trace_array *tr)
327 ret = security_locked_down(LOCKDOWN_TRACEFS);
331 if (tracing_disabled)
334 if (tr && trace_array_get(tr) < 0)
340 int call_filter_check_discard(struct trace_event_call *call, void *rec,
341 struct trace_buffer *buffer,
342 struct ring_buffer_event *event)
344 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
345 !filter_match_preds(call->filter, rec)) {
346 __trace_event_discard_commit(buffer, event);
353 void trace_free_pid_list(struct trace_pid_list *pid_list)
355 vfree(pid_list->pids);
360 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
361 * @filtered_pids: The list of pids to check
362 * @search_pid: The PID to find in @filtered_pids
364 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
367 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
370 * If pid_max changed after filtered_pids was created, we
371 * by default ignore all pids greater than the previous pid_max.
373 if (search_pid >= filtered_pids->pid_max)
376 return test_bit(search_pid, filtered_pids->pids);
380 * trace_ignore_this_task - should a task be ignored for tracing
381 * @filtered_pids: The list of pids to check
382 * @task: The task that should be ignored if not filtered
384 * Checks if @task should be traced or not from @filtered_pids.
385 * Returns true if @task should *NOT* be traced.
386 * Returns false if @task should be traced.
389 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
390 struct trace_pid_list *filtered_no_pids,
391 struct task_struct *task)
394 * If filterd_no_pids is not empty, and the task's pid is listed
395 * in filtered_no_pids, then return true.
396 * Otherwise, if filtered_pids is empty, that means we can
397 * trace all tasks. If it has content, then only trace pids
398 * within filtered_pids.
401 return (filtered_pids &&
402 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
404 trace_find_filtered_pid(filtered_no_pids, task->pid));
408 * trace_filter_add_remove_task - Add or remove a task from a pid_list
409 * @pid_list: The list to modify
410 * @self: The current task for fork or NULL for exit
411 * @task: The task to add or remove
413 * If adding a task, if @self is defined, the task is only added if @self
414 * is also included in @pid_list. This happens on fork and tasks should
415 * only be added when the parent is listed. If @self is NULL, then the
416 * @task pid will be removed from the list, which would happen on exit
419 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
420 struct task_struct *self,
421 struct task_struct *task)
426 /* For forks, we only add if the forking task is listed */
428 if (!trace_find_filtered_pid(pid_list, self->pid))
432 /* Sorry, but we don't support pid_max changing after setting */
433 if (task->pid >= pid_list->pid_max)
436 /* "self" is set for forks, and NULL for exits */
438 set_bit(task->pid, pid_list->pids);
440 clear_bit(task->pid, pid_list->pids);
444 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
445 * @pid_list: The pid list to show
446 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
447 * @pos: The position of the file
449 * This is used by the seq_file "next" operation to iterate the pids
450 * listed in a trace_pid_list structure.
452 * Returns the pid+1 as we want to display pid of zero, but NULL would
453 * stop the iteration.
455 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
457 unsigned long pid = (unsigned long)v;
461 /* pid already is +1 of the actual prevous bit */
462 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
464 /* Return pid + 1 to allow zero to be represented */
465 if (pid < pid_list->pid_max)
466 return (void *)(pid + 1);
472 * trace_pid_start - Used for seq_file to start reading pid lists
473 * @pid_list: The pid list to show
474 * @pos: The position of the file
476 * This is used by seq_file "start" operation to start the iteration
479 * Returns the pid+1 as we want to display pid of zero, but NULL would
480 * stop the iteration.
482 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
487 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
488 if (pid >= pid_list->pid_max)
491 /* Return pid + 1 so that zero can be the exit value */
492 for (pid++; pid && l < *pos;
493 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
499 * trace_pid_show - show the current pid in seq_file processing
500 * @m: The seq_file structure to write into
501 * @v: A void pointer of the pid (+1) value to display
503 * Can be directly used by seq_file operations to display the current
506 int trace_pid_show(struct seq_file *m, void *v)
508 unsigned long pid = (unsigned long)v - 1;
510 seq_printf(m, "%lu\n", pid);
514 /* 128 should be much more than enough */
515 #define PID_BUF_SIZE 127
517 int trace_pid_write(struct trace_pid_list *filtered_pids,
518 struct trace_pid_list **new_pid_list,
519 const char __user *ubuf, size_t cnt)
521 struct trace_pid_list *pid_list;
522 struct trace_parser parser;
530 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
534 * Always recreate a new array. The write is an all or nothing
535 * operation. Always create a new array when adding new pids by
536 * the user. If the operation fails, then the current list is
539 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
541 trace_parser_put(&parser);
545 pid_list->pid_max = READ_ONCE(pid_max);
547 /* Only truncating will shrink pid_max */
548 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
549 pid_list->pid_max = filtered_pids->pid_max;
551 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
552 if (!pid_list->pids) {
553 trace_parser_put(&parser);
559 /* copy the current bits to the new max */
560 for_each_set_bit(pid, filtered_pids->pids,
561 filtered_pids->pid_max) {
562 set_bit(pid, pid_list->pids);
571 ret = trace_get_user(&parser, ubuf, cnt, &pos);
572 if (ret < 0 || !trace_parser_loaded(&parser))
580 if (kstrtoul(parser.buffer, 0, &val))
582 if (val >= pid_list->pid_max)
587 set_bit(pid, pid_list->pids);
590 trace_parser_clear(&parser);
593 trace_parser_put(&parser);
596 trace_free_pid_list(pid_list);
601 /* Cleared the list of pids */
602 trace_free_pid_list(pid_list);
607 *new_pid_list = pid_list;
612 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
616 /* Early boot up does not have a buffer yet */
618 return trace_clock_local();
620 ts = ring_buffer_time_stamp(buf->buffer, cpu);
621 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
626 u64 ftrace_now(int cpu)
628 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
632 * tracing_is_enabled - Show if global_trace has been disabled
634 * Shows if the global trace has been enabled or not. It uses the
635 * mirror flag "buffer_disabled" to be used in fast paths such as for
636 * the irqsoff tracer. But it may be inaccurate due to races. If you
637 * need to know the accurate state, use tracing_is_on() which is a little
638 * slower, but accurate.
640 int tracing_is_enabled(void)
643 * For quick access (irqsoff uses this in fast path), just
644 * return the mirror variable of the state of the ring buffer.
645 * It's a little racy, but we don't really care.
648 return !global_trace.buffer_disabled;
652 * trace_buf_size is the size in bytes that is allocated
653 * for a buffer. Note, the number of bytes is always rounded
656 * This number is purposely set to a low number of 16384.
657 * If the dump on oops happens, it will be much appreciated
658 * to not have to wait for all that output. Anyway this can be
659 * boot time and run time configurable.
661 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
663 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
665 /* trace_types holds a link list of available tracers. */
666 static struct tracer *trace_types __read_mostly;
669 * trace_types_lock is used to protect the trace_types list.
671 DEFINE_MUTEX(trace_types_lock);
674 * serialize the access of the ring buffer
676 * ring buffer serializes readers, but it is low level protection.
677 * The validity of the events (which returns by ring_buffer_peek() ..etc)
678 * are not protected by ring buffer.
680 * The content of events may become garbage if we allow other process consumes
681 * these events concurrently:
682 * A) the page of the consumed events may become a normal page
683 * (not reader page) in ring buffer, and this page will be rewrited
684 * by events producer.
685 * B) The page of the consumed events may become a page for splice_read,
686 * and this page will be returned to system.
688 * These primitives allow multi process access to different cpu ring buffer
691 * These primitives don't distinguish read-only and read-consume access.
692 * Multi read-only access are also serialized.
696 static DECLARE_RWSEM(all_cpu_access_lock);
697 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
699 static inline void trace_access_lock(int cpu)
701 if (cpu == RING_BUFFER_ALL_CPUS) {
702 /* gain it for accessing the whole ring buffer. */
703 down_write(&all_cpu_access_lock);
705 /* gain it for accessing a cpu ring buffer. */
707 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
708 down_read(&all_cpu_access_lock);
710 /* Secondly block other access to this @cpu ring buffer. */
711 mutex_lock(&per_cpu(cpu_access_lock, cpu));
715 static inline void trace_access_unlock(int cpu)
717 if (cpu == RING_BUFFER_ALL_CPUS) {
718 up_write(&all_cpu_access_lock);
720 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
721 up_read(&all_cpu_access_lock);
725 static inline void trace_access_lock_init(void)
729 for_each_possible_cpu(cpu)
730 mutex_init(&per_cpu(cpu_access_lock, cpu));
735 static DEFINE_MUTEX(access_lock);
737 static inline void trace_access_lock(int cpu)
740 mutex_lock(&access_lock);
743 static inline void trace_access_unlock(int cpu)
746 mutex_unlock(&access_lock);
749 static inline void trace_access_lock_init(void)
755 #ifdef CONFIG_STACKTRACE
756 static void __ftrace_trace_stack(struct trace_buffer *buffer,
758 int skip, int pc, struct pt_regs *regs);
759 static inline void ftrace_trace_stack(struct trace_array *tr,
760 struct trace_buffer *buffer,
762 int skip, int pc, struct pt_regs *regs);
765 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
767 int skip, int pc, struct pt_regs *regs)
770 static inline void ftrace_trace_stack(struct trace_array *tr,
771 struct trace_buffer *buffer,
773 int skip, int pc, struct pt_regs *regs)
779 static __always_inline void
780 trace_event_setup(struct ring_buffer_event *event,
781 int type, unsigned long flags, int pc)
783 struct trace_entry *ent = ring_buffer_event_data(event);
785 tracing_generic_entry_update(ent, type, flags, pc);
788 static __always_inline struct ring_buffer_event *
789 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
792 unsigned long flags, int pc)
794 struct ring_buffer_event *event;
796 event = ring_buffer_lock_reserve(buffer, len);
798 trace_event_setup(event, type, flags, pc);
803 void tracer_tracing_on(struct trace_array *tr)
805 if (tr->array_buffer.buffer)
806 ring_buffer_record_on(tr->array_buffer.buffer);
808 * This flag is looked at when buffers haven't been allocated
809 * yet, or by some tracers (like irqsoff), that just want to
810 * know if the ring buffer has been disabled, but it can handle
811 * races of where it gets disabled but we still do a record.
812 * As the check is in the fast path of the tracers, it is more
813 * important to be fast than accurate.
815 tr->buffer_disabled = 0;
816 /* Make the flag seen by readers */
821 * tracing_on - enable tracing buffers
823 * This function enables tracing buffers that may have been
824 * disabled with tracing_off.
826 void tracing_on(void)
828 tracer_tracing_on(&global_trace);
830 EXPORT_SYMBOL_GPL(tracing_on);
833 static __always_inline void
834 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
836 __this_cpu_write(trace_taskinfo_save, true);
838 /* If this is the temp buffer, we need to commit fully */
839 if (this_cpu_read(trace_buffered_event) == event) {
840 /* Length is in event->array[0] */
841 ring_buffer_write(buffer, event->array[0], &event->array[1]);
842 /* Release the temp buffer */
843 this_cpu_dec(trace_buffered_event_cnt);
845 ring_buffer_unlock_commit(buffer, event);
849 * __trace_puts - write a constant string into the trace buffer.
850 * @ip: The address of the caller
851 * @str: The constant string to write
852 * @size: The size of the string.
854 int __trace_puts(unsigned long ip, const char *str, int size)
856 struct ring_buffer_event *event;
857 struct trace_buffer *buffer;
858 struct print_entry *entry;
859 unsigned long irq_flags;
863 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
866 pc = preempt_count();
868 if (unlikely(tracing_selftest_running || tracing_disabled))
871 alloc = sizeof(*entry) + size + 2; /* possible \n added */
873 local_save_flags(irq_flags);
874 buffer = global_trace.array_buffer.buffer;
875 ring_buffer_nest_start(buffer);
876 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
883 entry = ring_buffer_event_data(event);
886 memcpy(&entry->buf, str, size);
888 /* Add a newline if necessary */
889 if (entry->buf[size - 1] != '\n') {
890 entry->buf[size] = '\n';
891 entry->buf[size + 1] = '\0';
893 entry->buf[size] = '\0';
895 __buffer_unlock_commit(buffer, event);
896 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
898 ring_buffer_nest_end(buffer);
901 EXPORT_SYMBOL_GPL(__trace_puts);
904 * __trace_bputs - write the pointer to a constant string into trace buffer
905 * @ip: The address of the caller
906 * @str: The constant string to write to the buffer to
908 int __trace_bputs(unsigned long ip, const char *str)
910 struct ring_buffer_event *event;
911 struct trace_buffer *buffer;
912 struct bputs_entry *entry;
913 unsigned long irq_flags;
914 int size = sizeof(struct bputs_entry);
918 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
921 pc = preempt_count();
923 if (unlikely(tracing_selftest_running || tracing_disabled))
926 local_save_flags(irq_flags);
927 buffer = global_trace.array_buffer.buffer;
929 ring_buffer_nest_start(buffer);
930 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
935 entry = ring_buffer_event_data(event);
939 __buffer_unlock_commit(buffer, event);
940 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
944 ring_buffer_nest_end(buffer);
947 EXPORT_SYMBOL_GPL(__trace_bputs);
949 #ifdef CONFIG_TRACER_SNAPSHOT
950 static void tracing_snapshot_instance_cond(struct trace_array *tr,
953 struct tracer *tracer = tr->current_trace;
957 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
958 internal_trace_puts("*** snapshot is being ignored ***\n");
962 if (!tr->allocated_snapshot) {
963 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
964 internal_trace_puts("*** stopping trace here! ***\n");
969 /* Note, snapshot can not be used when the tracer uses it */
970 if (tracer->use_max_tr) {
971 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
972 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
976 local_irq_save(flags);
977 update_max_tr(tr, current, smp_processor_id(), cond_data);
978 local_irq_restore(flags);
981 void tracing_snapshot_instance(struct trace_array *tr)
983 tracing_snapshot_instance_cond(tr, NULL);
987 * tracing_snapshot - take a snapshot of the current buffer.
989 * This causes a swap between the snapshot buffer and the current live
990 * tracing buffer. You can use this to take snapshots of the live
991 * trace when some condition is triggered, but continue to trace.
993 * Note, make sure to allocate the snapshot with either
994 * a tracing_snapshot_alloc(), or by doing it manually
995 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
997 * If the snapshot buffer is not allocated, it will stop tracing.
998 * Basically making a permanent snapshot.
1000 void tracing_snapshot(void)
1002 struct trace_array *tr = &global_trace;
1004 tracing_snapshot_instance(tr);
1006 EXPORT_SYMBOL_GPL(tracing_snapshot);
1009 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1010 * @tr: The tracing instance to snapshot
1011 * @cond_data: The data to be tested conditionally, and possibly saved
1013 * This is the same as tracing_snapshot() except that the snapshot is
1014 * conditional - the snapshot will only happen if the
1015 * cond_snapshot.update() implementation receiving the cond_data
1016 * returns true, which means that the trace array's cond_snapshot
1017 * update() operation used the cond_data to determine whether the
1018 * snapshot should be taken, and if it was, presumably saved it along
1019 * with the snapshot.
1021 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1023 tracing_snapshot_instance_cond(tr, cond_data);
1025 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1028 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1029 * @tr: The tracing instance
1031 * When the user enables a conditional snapshot using
1032 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1033 * with the snapshot. This accessor is used to retrieve it.
1035 * Should not be called from cond_snapshot.update(), since it takes
1036 * the tr->max_lock lock, which the code calling
1037 * cond_snapshot.update() has already done.
1039 * Returns the cond_data associated with the trace array's snapshot.
1041 void *tracing_cond_snapshot_data(struct trace_array *tr)
1043 void *cond_data = NULL;
1045 arch_spin_lock(&tr->max_lock);
1047 if (tr->cond_snapshot)
1048 cond_data = tr->cond_snapshot->cond_data;
1050 arch_spin_unlock(&tr->max_lock);
1054 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1056 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1057 struct array_buffer *size_buf, int cpu_id);
1058 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1060 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1064 if (!tr->allocated_snapshot) {
1066 /* allocate spare buffer */
1067 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1068 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1072 tr->allocated_snapshot = true;
1078 static void free_snapshot(struct trace_array *tr)
1081 * We don't free the ring buffer. instead, resize it because
1082 * The max_tr ring buffer has some state (e.g. ring->clock) and
1083 * we want preserve it.
1085 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1086 set_buffer_entries(&tr->max_buffer, 1);
1087 tracing_reset_online_cpus(&tr->max_buffer);
1088 tr->allocated_snapshot = false;
1092 * tracing_alloc_snapshot - allocate snapshot buffer.
1094 * This only allocates the snapshot buffer if it isn't already
1095 * allocated - it doesn't also take a snapshot.
1097 * This is meant to be used in cases where the snapshot buffer needs
1098 * to be set up for events that can't sleep but need to be able to
1099 * trigger a snapshot.
1101 int tracing_alloc_snapshot(void)
1103 struct trace_array *tr = &global_trace;
1106 ret = tracing_alloc_snapshot_instance(tr);
1111 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1114 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1116 * This is similar to tracing_snapshot(), but it will allocate the
1117 * snapshot buffer if it isn't already allocated. Use this only
1118 * where it is safe to sleep, as the allocation may sleep.
1120 * This causes a swap between the snapshot buffer and the current live
1121 * tracing buffer. You can use this to take snapshots of the live
1122 * trace when some condition is triggered, but continue to trace.
1124 void tracing_snapshot_alloc(void)
1128 ret = tracing_alloc_snapshot();
1134 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1137 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1138 * @tr: The tracing instance
1139 * @cond_data: User data to associate with the snapshot
1140 * @update: Implementation of the cond_snapshot update function
1142 * Check whether the conditional snapshot for the given instance has
1143 * already been enabled, or if the current tracer is already using a
1144 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1145 * save the cond_data and update function inside.
1147 * Returns 0 if successful, error otherwise.
1149 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1150 cond_update_fn_t update)
1152 struct cond_snapshot *cond_snapshot;
1155 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1159 cond_snapshot->cond_data = cond_data;
1160 cond_snapshot->update = update;
1162 mutex_lock(&trace_types_lock);
1164 ret = tracing_alloc_snapshot_instance(tr);
1168 if (tr->current_trace->use_max_tr) {
1174 * The cond_snapshot can only change to NULL without the
1175 * trace_types_lock. We don't care if we race with it going
1176 * to NULL, but we want to make sure that it's not set to
1177 * something other than NULL when we get here, which we can
1178 * do safely with only holding the trace_types_lock and not
1179 * having to take the max_lock.
1181 if (tr->cond_snapshot) {
1186 arch_spin_lock(&tr->max_lock);
1187 tr->cond_snapshot = cond_snapshot;
1188 arch_spin_unlock(&tr->max_lock);
1190 mutex_unlock(&trace_types_lock);
1195 mutex_unlock(&trace_types_lock);
1196 kfree(cond_snapshot);
1199 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1202 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1203 * @tr: The tracing instance
1205 * Check whether the conditional snapshot for the given instance is
1206 * enabled; if so, free the cond_snapshot associated with it,
1207 * otherwise return -EINVAL.
1209 * Returns 0 if successful, error otherwise.
1211 int tracing_snapshot_cond_disable(struct trace_array *tr)
1215 arch_spin_lock(&tr->max_lock);
1217 if (!tr->cond_snapshot)
1220 kfree(tr->cond_snapshot);
1221 tr->cond_snapshot = NULL;
1224 arch_spin_unlock(&tr->max_lock);
1228 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1230 void tracing_snapshot(void)
1232 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1234 EXPORT_SYMBOL_GPL(tracing_snapshot);
1235 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1237 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1239 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1240 int tracing_alloc_snapshot(void)
1242 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1245 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1246 void tracing_snapshot_alloc(void)
1251 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1252 void *tracing_cond_snapshot_data(struct trace_array *tr)
1256 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1257 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1261 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1262 int tracing_snapshot_cond_disable(struct trace_array *tr)
1266 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1267 #endif /* CONFIG_TRACER_SNAPSHOT */
1269 void tracer_tracing_off(struct trace_array *tr)
1271 if (tr->array_buffer.buffer)
1272 ring_buffer_record_off(tr->array_buffer.buffer);
1274 * This flag is looked at when buffers haven't been allocated
1275 * yet, or by some tracers (like irqsoff), that just want to
1276 * know if the ring buffer has been disabled, but it can handle
1277 * races of where it gets disabled but we still do a record.
1278 * As the check is in the fast path of the tracers, it is more
1279 * important to be fast than accurate.
1281 tr->buffer_disabled = 1;
1282 /* Make the flag seen by readers */
1287 * tracing_off - turn off tracing buffers
1289 * This function stops the tracing buffers from recording data.
1290 * It does not disable any overhead the tracers themselves may
1291 * be causing. This function simply causes all recording to
1292 * the ring buffers to fail.
1294 void tracing_off(void)
1296 tracer_tracing_off(&global_trace);
1298 EXPORT_SYMBOL_GPL(tracing_off);
1300 void disable_trace_on_warning(void)
1302 if (__disable_trace_on_warning) {
1303 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1304 "Disabling tracing due to warning\n");
1310 * tracer_tracing_is_on - show real state of ring buffer enabled
1311 * @tr : the trace array to know if ring buffer is enabled
1313 * Shows real state of the ring buffer if it is enabled or not.
1315 bool tracer_tracing_is_on(struct trace_array *tr)
1317 if (tr->array_buffer.buffer)
1318 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1319 return !tr->buffer_disabled;
1323 * tracing_is_on - show state of ring buffers enabled
1325 int tracing_is_on(void)
1327 return tracer_tracing_is_on(&global_trace);
1329 EXPORT_SYMBOL_GPL(tracing_is_on);
1331 static int __init set_buf_size(char *str)
1333 unsigned long buf_size;
1337 buf_size = memparse(str, &str);
1338 /* nr_entries can not be zero */
1341 trace_buf_size = buf_size;
1344 __setup("trace_buf_size=", set_buf_size);
1346 static int __init set_tracing_thresh(char *str)
1348 unsigned long threshold;
1353 ret = kstrtoul(str, 0, &threshold);
1356 tracing_thresh = threshold * 1000;
1359 __setup("tracing_thresh=", set_tracing_thresh);
1361 unsigned long nsecs_to_usecs(unsigned long nsecs)
1363 return nsecs / 1000;
1367 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1368 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1369 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1370 * of strings in the order that the evals (enum) were defined.
1375 /* These must match the bit postions in trace_iterator_flags */
1376 static const char *trace_options[] = {
1384 int in_ns; /* is this clock in nanoseconds? */
1385 } trace_clocks[] = {
1386 { trace_clock_local, "local", 1 },
1387 { trace_clock_global, "global", 1 },
1388 { trace_clock_counter, "counter", 0 },
1389 { trace_clock_jiffies, "uptime", 0 },
1390 { trace_clock, "perf", 1 },
1391 { ktime_get_mono_fast_ns, "mono", 1 },
1392 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1393 { ktime_get_boot_fast_ns, "boot", 1 },
1397 bool trace_clock_in_ns(struct trace_array *tr)
1399 if (trace_clocks[tr->clock_id].in_ns)
1406 * trace_parser_get_init - gets the buffer for trace parser
1408 int trace_parser_get_init(struct trace_parser *parser, int size)
1410 memset(parser, 0, sizeof(*parser));
1412 parser->buffer = kmalloc(size, GFP_KERNEL);
1413 if (!parser->buffer)
1416 parser->size = size;
1421 * trace_parser_put - frees the buffer for trace parser
1423 void trace_parser_put(struct trace_parser *parser)
1425 kfree(parser->buffer);
1426 parser->buffer = NULL;
1430 * trace_get_user - reads the user input string separated by space
1431 * (matched by isspace(ch))
1433 * For each string found the 'struct trace_parser' is updated,
1434 * and the function returns.
1436 * Returns number of bytes read.
1438 * See kernel/trace/trace.h for 'struct trace_parser' details.
1440 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1441 size_t cnt, loff_t *ppos)
1448 trace_parser_clear(parser);
1450 ret = get_user(ch, ubuf++);
1458 * The parser is not finished with the last write,
1459 * continue reading the user input without skipping spaces.
1461 if (!parser->cont) {
1462 /* skip white space */
1463 while (cnt && isspace(ch)) {
1464 ret = get_user(ch, ubuf++);
1473 /* only spaces were written */
1474 if (isspace(ch) || !ch) {
1481 /* read the non-space input */
1482 while (cnt && !isspace(ch) && ch) {
1483 if (parser->idx < parser->size - 1)
1484 parser->buffer[parser->idx++] = ch;
1489 ret = get_user(ch, ubuf++);
1496 /* We either got finished input or we have to wait for another call. */
1497 if (isspace(ch) || !ch) {
1498 parser->buffer[parser->idx] = 0;
1499 parser->cont = false;
1500 } else if (parser->idx < parser->size - 1) {
1501 parser->cont = true;
1502 parser->buffer[parser->idx++] = ch;
1503 /* Make sure the parsed string always terminates with '\0'. */
1504 parser->buffer[parser->idx] = 0;
1517 /* TODO add a seq_buf_to_buffer() */
1518 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1522 if (trace_seq_used(s) <= s->seq.readpos)
1525 len = trace_seq_used(s) - s->seq.readpos;
1528 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1530 s->seq.readpos += cnt;
1534 unsigned long __read_mostly tracing_thresh;
1535 static const struct file_operations tracing_max_lat_fops;
1537 #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1538 defined(CONFIG_FSNOTIFY)
1540 static struct workqueue_struct *fsnotify_wq;
1542 static void latency_fsnotify_workfn(struct work_struct *work)
1544 struct trace_array *tr = container_of(work, struct trace_array,
1546 fsnotify(tr->d_max_latency->d_inode, FS_MODIFY,
1547 tr->d_max_latency->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
1550 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1552 struct trace_array *tr = container_of(iwork, struct trace_array,
1554 queue_work(fsnotify_wq, &tr->fsnotify_work);
1557 static void trace_create_maxlat_file(struct trace_array *tr,
1558 struct dentry *d_tracer)
1560 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1561 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1562 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1563 d_tracer, &tr->max_latency,
1564 &tracing_max_lat_fops);
1567 __init static int latency_fsnotify_init(void)
1569 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1570 WQ_UNBOUND | WQ_HIGHPRI, 0);
1572 pr_err("Unable to allocate tr_max_lat_wq\n");
1578 late_initcall_sync(latency_fsnotify_init);
1580 void latency_fsnotify(struct trace_array *tr)
1585 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1586 * possible that we are called from __schedule() or do_idle(), which
1587 * could cause a deadlock.
1589 irq_work_queue(&tr->fsnotify_irqwork);
1593 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1594 * defined(CONFIG_FSNOTIFY)
1598 #define trace_create_maxlat_file(tr, d_tracer) \
1599 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1600 &tr->max_latency, &tracing_max_lat_fops)
1604 #ifdef CONFIG_TRACER_MAX_TRACE
1606 * Copy the new maximum trace into the separate maximum-trace
1607 * structure. (this way the maximum trace is permanently saved,
1608 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1611 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1613 struct array_buffer *trace_buf = &tr->array_buffer;
1614 struct array_buffer *max_buf = &tr->max_buffer;
1615 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1616 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1619 max_buf->time_start = data->preempt_timestamp;
1621 max_data->saved_latency = tr->max_latency;
1622 max_data->critical_start = data->critical_start;
1623 max_data->critical_end = data->critical_end;
1625 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1626 max_data->pid = tsk->pid;
1628 * If tsk == current, then use current_uid(), as that does not use
1629 * RCU. The irq tracer can be called out of RCU scope.
1632 max_data->uid = current_uid();
1634 max_data->uid = task_uid(tsk);
1636 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1637 max_data->policy = tsk->policy;
1638 max_data->rt_priority = tsk->rt_priority;
1640 /* record this tasks comm */
1641 tracing_record_cmdline(tsk);
1642 latency_fsnotify(tr);
1646 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1648 * @tsk: the task with the latency
1649 * @cpu: The cpu that initiated the trace.
1650 * @cond_data: User data associated with a conditional snapshot
1652 * Flip the buffers between the @tr and the max_tr and record information
1653 * about which task was the cause of this latency.
1656 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1662 WARN_ON_ONCE(!irqs_disabled());
1664 if (!tr->allocated_snapshot) {
1665 /* Only the nop tracer should hit this when disabling */
1666 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1670 arch_spin_lock(&tr->max_lock);
1672 /* Inherit the recordable setting from array_buffer */
1673 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1674 ring_buffer_record_on(tr->max_buffer.buffer);
1676 ring_buffer_record_off(tr->max_buffer.buffer);
1678 #ifdef CONFIG_TRACER_SNAPSHOT
1679 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1682 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1684 __update_max_tr(tr, tsk, cpu);
1687 arch_spin_unlock(&tr->max_lock);
1691 * update_max_tr_single - only copy one trace over, and reset the rest
1693 * @tsk: task with the latency
1694 * @cpu: the cpu of the buffer to copy.
1696 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1699 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1706 WARN_ON_ONCE(!irqs_disabled());
1707 if (!tr->allocated_snapshot) {
1708 /* Only the nop tracer should hit this when disabling */
1709 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1713 arch_spin_lock(&tr->max_lock);
1715 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1717 if (ret == -EBUSY) {
1719 * We failed to swap the buffer due to a commit taking
1720 * place on this CPU. We fail to record, but we reset
1721 * the max trace buffer (no one writes directly to it)
1722 * and flag that it failed.
1724 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1725 "Failed to swap buffers due to commit in progress\n");
1728 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1730 __update_max_tr(tr, tsk, cpu);
1731 arch_spin_unlock(&tr->max_lock);
1733 #endif /* CONFIG_TRACER_MAX_TRACE */
1735 static int wait_on_pipe(struct trace_iterator *iter, int full)
1737 /* Iterators are static, they should be filled or empty */
1738 if (trace_buffer_iter(iter, iter->cpu_file))
1741 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1745 #ifdef CONFIG_FTRACE_STARTUP_TEST
1746 static bool selftests_can_run;
1748 struct trace_selftests {
1749 struct list_head list;
1750 struct tracer *type;
1753 static LIST_HEAD(postponed_selftests);
1755 static int save_selftest(struct tracer *type)
1757 struct trace_selftests *selftest;
1759 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1763 selftest->type = type;
1764 list_add(&selftest->list, &postponed_selftests);
1768 static int run_tracer_selftest(struct tracer *type)
1770 struct trace_array *tr = &global_trace;
1771 struct tracer *saved_tracer = tr->current_trace;
1774 if (!type->selftest || tracing_selftest_disabled)
1778 * If a tracer registers early in boot up (before scheduling is
1779 * initialized and such), then do not run its selftests yet.
1780 * Instead, run it a little later in the boot process.
1782 if (!selftests_can_run)
1783 return save_selftest(type);
1786 * Run a selftest on this tracer.
1787 * Here we reset the trace buffer, and set the current
1788 * tracer to be this tracer. The tracer can then run some
1789 * internal tracing to verify that everything is in order.
1790 * If we fail, we do not register this tracer.
1792 tracing_reset_online_cpus(&tr->array_buffer);
1794 tr->current_trace = type;
1796 #ifdef CONFIG_TRACER_MAX_TRACE
1797 if (type->use_max_tr) {
1798 /* If we expanded the buffers, make sure the max is expanded too */
1799 if (ring_buffer_expanded)
1800 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1801 RING_BUFFER_ALL_CPUS);
1802 tr->allocated_snapshot = true;
1806 /* the test is responsible for initializing and enabling */
1807 pr_info("Testing tracer %s: ", type->name);
1808 ret = type->selftest(type, tr);
1809 /* the test is responsible for resetting too */
1810 tr->current_trace = saved_tracer;
1812 printk(KERN_CONT "FAILED!\n");
1813 /* Add the warning after printing 'FAILED' */
1817 /* Only reset on passing, to avoid touching corrupted buffers */
1818 tracing_reset_online_cpus(&tr->array_buffer);
1820 #ifdef CONFIG_TRACER_MAX_TRACE
1821 if (type->use_max_tr) {
1822 tr->allocated_snapshot = false;
1824 /* Shrink the max buffer again */
1825 if (ring_buffer_expanded)
1826 ring_buffer_resize(tr->max_buffer.buffer, 1,
1827 RING_BUFFER_ALL_CPUS);
1831 printk(KERN_CONT "PASSED\n");
1835 static __init int init_trace_selftests(void)
1837 struct trace_selftests *p, *n;
1838 struct tracer *t, **last;
1841 selftests_can_run = true;
1843 mutex_lock(&trace_types_lock);
1845 if (list_empty(&postponed_selftests))
1848 pr_info("Running postponed tracer tests:\n");
1850 tracing_selftest_running = true;
1851 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1852 /* This loop can take minutes when sanitizers are enabled, so
1853 * lets make sure we allow RCU processing.
1856 ret = run_tracer_selftest(p->type);
1857 /* If the test fails, then warn and remove from available_tracers */
1859 WARN(1, "tracer: %s failed selftest, disabling\n",
1861 last = &trace_types;
1862 for (t = trace_types; t; t = t->next) {
1873 tracing_selftest_running = false;
1876 mutex_unlock(&trace_types_lock);
1880 core_initcall(init_trace_selftests);
1882 static inline int run_tracer_selftest(struct tracer *type)
1886 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1888 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1890 static void __init apply_trace_boot_options(void);
1893 * register_tracer - register a tracer with the ftrace system.
1894 * @type: the plugin for the tracer
1896 * Register a new plugin tracer.
1898 int __init register_tracer(struct tracer *type)
1904 pr_info("Tracer must have a name\n");
1908 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1909 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1913 if (security_locked_down(LOCKDOWN_TRACEFS)) {
1914 pr_warn("Can not register tracer %s due to lockdown\n",
1919 mutex_lock(&trace_types_lock);
1921 tracing_selftest_running = true;
1923 for (t = trace_types; t; t = t->next) {
1924 if (strcmp(type->name, t->name) == 0) {
1926 pr_info("Tracer %s already registered\n",
1933 if (!type->set_flag)
1934 type->set_flag = &dummy_set_flag;
1936 /*allocate a dummy tracer_flags*/
1937 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1942 type->flags->val = 0;
1943 type->flags->opts = dummy_tracer_opt;
1945 if (!type->flags->opts)
1946 type->flags->opts = dummy_tracer_opt;
1948 /* store the tracer for __set_tracer_option */
1949 type->flags->trace = type;
1951 ret = run_tracer_selftest(type);
1955 type->next = trace_types;
1957 add_tracer_options(&global_trace, type);
1960 tracing_selftest_running = false;
1961 mutex_unlock(&trace_types_lock);
1963 if (ret || !default_bootup_tracer)
1966 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1969 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1970 /* Do we want this tracer to start on bootup? */
1971 tracing_set_tracer(&global_trace, type->name);
1972 default_bootup_tracer = NULL;
1974 apply_trace_boot_options();
1976 /* disable other selftests, since this will break it. */
1977 tracing_selftest_disabled = true;
1978 #ifdef CONFIG_FTRACE_STARTUP_TEST
1979 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1987 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
1989 struct trace_buffer *buffer = buf->buffer;
1994 ring_buffer_record_disable(buffer);
1996 /* Make sure all commits have finished */
1998 ring_buffer_reset_cpu(buffer, cpu);
2000 ring_buffer_record_enable(buffer);
2003 void tracing_reset_online_cpus(struct array_buffer *buf)
2005 struct trace_buffer *buffer = buf->buffer;
2011 ring_buffer_record_disable(buffer);
2013 /* Make sure all commits have finished */
2016 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2018 for_each_online_cpu(cpu)
2019 ring_buffer_reset_cpu(buffer, cpu);
2021 ring_buffer_record_enable(buffer);
2024 /* Must have trace_types_lock held */
2025 void tracing_reset_all_online_cpus(void)
2027 struct trace_array *tr;
2029 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2030 if (!tr->clear_trace)
2032 tr->clear_trace = false;
2033 tracing_reset_online_cpus(&tr->array_buffer);
2034 #ifdef CONFIG_TRACER_MAX_TRACE
2035 tracing_reset_online_cpus(&tr->max_buffer);
2040 static int *tgid_map;
2042 #define SAVED_CMDLINES_DEFAULT 128
2043 #define NO_CMDLINE_MAP UINT_MAX
2044 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2045 struct saved_cmdlines_buffer {
2046 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2047 unsigned *map_cmdline_to_pid;
2048 unsigned cmdline_num;
2050 char *saved_cmdlines;
2052 static struct saved_cmdlines_buffer *savedcmd;
2054 /* temporary disable recording */
2055 static atomic_t trace_record_taskinfo_disabled __read_mostly;
2057 static inline char *get_saved_cmdlines(int idx)
2059 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2062 static inline void set_cmdline(int idx, const char *cmdline)
2064 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2067 static int allocate_cmdlines_buffer(unsigned int val,
2068 struct saved_cmdlines_buffer *s)
2070 s->map_cmdline_to_pid = kmalloc_array(val,
2071 sizeof(*s->map_cmdline_to_pid),
2073 if (!s->map_cmdline_to_pid)
2076 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2077 if (!s->saved_cmdlines) {
2078 kfree(s->map_cmdline_to_pid);
2083 s->cmdline_num = val;
2084 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2085 sizeof(s->map_pid_to_cmdline));
2086 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2087 val * sizeof(*s->map_cmdline_to_pid));
2092 static int trace_create_savedcmd(void)
2096 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2100 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2110 int is_tracing_stopped(void)
2112 return global_trace.stop_count;
2116 * tracing_start - quick start of the tracer
2118 * If tracing is enabled but was stopped by tracing_stop,
2119 * this will start the tracer back up.
2121 void tracing_start(void)
2123 struct trace_buffer *buffer;
2124 unsigned long flags;
2126 if (tracing_disabled)
2129 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2130 if (--global_trace.stop_count) {
2131 if (global_trace.stop_count < 0) {
2132 /* Someone screwed up their debugging */
2134 global_trace.stop_count = 0;
2139 /* Prevent the buffers from switching */
2140 arch_spin_lock(&global_trace.max_lock);
2142 buffer = global_trace.array_buffer.buffer;
2144 ring_buffer_record_enable(buffer);
2146 #ifdef CONFIG_TRACER_MAX_TRACE
2147 buffer = global_trace.max_buffer.buffer;
2149 ring_buffer_record_enable(buffer);
2152 arch_spin_unlock(&global_trace.max_lock);
2155 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2158 static void tracing_start_tr(struct trace_array *tr)
2160 struct trace_buffer *buffer;
2161 unsigned long flags;
2163 if (tracing_disabled)
2166 /* If global, we need to also start the max tracer */
2167 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2168 return tracing_start();
2170 raw_spin_lock_irqsave(&tr->start_lock, flags);
2172 if (--tr->stop_count) {
2173 if (tr->stop_count < 0) {
2174 /* Someone screwed up their debugging */
2181 buffer = tr->array_buffer.buffer;
2183 ring_buffer_record_enable(buffer);
2186 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2190 * tracing_stop - quick stop of the tracer
2192 * Light weight way to stop tracing. Use in conjunction with
2195 void tracing_stop(void)
2197 struct trace_buffer *buffer;
2198 unsigned long flags;
2200 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2201 if (global_trace.stop_count++)
2204 /* Prevent the buffers from switching */
2205 arch_spin_lock(&global_trace.max_lock);
2207 buffer = global_trace.array_buffer.buffer;
2209 ring_buffer_record_disable(buffer);
2211 #ifdef CONFIG_TRACER_MAX_TRACE
2212 buffer = global_trace.max_buffer.buffer;
2214 ring_buffer_record_disable(buffer);
2217 arch_spin_unlock(&global_trace.max_lock);
2220 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2223 static void tracing_stop_tr(struct trace_array *tr)
2225 struct trace_buffer *buffer;
2226 unsigned long flags;
2228 /* If global, we need to also stop the max tracer */
2229 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2230 return tracing_stop();
2232 raw_spin_lock_irqsave(&tr->start_lock, flags);
2233 if (tr->stop_count++)
2236 buffer = tr->array_buffer.buffer;
2238 ring_buffer_record_disable(buffer);
2241 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2244 static int trace_save_cmdline(struct task_struct *tsk)
2248 /* treat recording of idle task as a success */
2252 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2256 * It's not the end of the world if we don't get
2257 * the lock, but we also don't want to spin
2258 * nor do we want to disable interrupts,
2259 * so if we miss here, then better luck next time.
2261 if (!arch_spin_trylock(&trace_cmdline_lock))
2264 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2265 if (idx == NO_CMDLINE_MAP) {
2266 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2269 * Check whether the cmdline buffer at idx has a pid
2270 * mapped. We are going to overwrite that entry so we
2271 * need to clear the map_pid_to_cmdline. Otherwise we
2272 * would read the new comm for the old pid.
2274 pid = savedcmd->map_cmdline_to_pid[idx];
2275 if (pid != NO_CMDLINE_MAP)
2276 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2278 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2279 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2281 savedcmd->cmdline_idx = idx;
2284 set_cmdline(idx, tsk->comm);
2286 arch_spin_unlock(&trace_cmdline_lock);
2291 static void __trace_find_cmdline(int pid, char comm[])
2296 strcpy(comm, "<idle>");
2300 if (WARN_ON_ONCE(pid < 0)) {
2301 strcpy(comm, "<XXX>");
2305 if (pid > PID_MAX_DEFAULT) {
2306 strcpy(comm, "<...>");
2310 map = savedcmd->map_pid_to_cmdline[pid];
2311 if (map != NO_CMDLINE_MAP)
2312 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2314 strcpy(comm, "<...>");
2317 void trace_find_cmdline(int pid, char comm[])
2320 arch_spin_lock(&trace_cmdline_lock);
2322 __trace_find_cmdline(pid, comm);
2324 arch_spin_unlock(&trace_cmdline_lock);
2328 int trace_find_tgid(int pid)
2330 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2333 return tgid_map[pid];
2336 static int trace_save_tgid(struct task_struct *tsk)
2338 /* treat recording of idle task as a success */
2342 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2345 tgid_map[tsk->pid] = tsk->tgid;
2349 static bool tracing_record_taskinfo_skip(int flags)
2351 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2353 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2355 if (!__this_cpu_read(trace_taskinfo_save))
2361 * tracing_record_taskinfo - record the task info of a task
2363 * @task: task to record
2364 * @flags: TRACE_RECORD_CMDLINE for recording comm
2365 * TRACE_RECORD_TGID for recording tgid
2367 void tracing_record_taskinfo(struct task_struct *task, int flags)
2371 if (tracing_record_taskinfo_skip(flags))
2375 * Record as much task information as possible. If some fail, continue
2376 * to try to record the others.
2378 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2379 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2381 /* If recording any information failed, retry again soon. */
2385 __this_cpu_write(trace_taskinfo_save, false);
2389 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2391 * @prev: previous task during sched_switch
2392 * @next: next task during sched_switch
2393 * @flags: TRACE_RECORD_CMDLINE for recording comm
2394 * TRACE_RECORD_TGID for recording tgid
2396 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2397 struct task_struct *next, int flags)
2401 if (tracing_record_taskinfo_skip(flags))
2405 * Record as much task information as possible. If some fail, continue
2406 * to try to record the others.
2408 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2409 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2410 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2411 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2413 /* If recording any information failed, retry again soon. */
2417 __this_cpu_write(trace_taskinfo_save, false);
2420 /* Helpers to record a specific task information */
2421 void tracing_record_cmdline(struct task_struct *task)
2423 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2426 void tracing_record_tgid(struct task_struct *task)
2428 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2432 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2433 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2434 * simplifies those functions and keeps them in sync.
2436 enum print_line_t trace_handle_return(struct trace_seq *s)
2438 return trace_seq_has_overflowed(s) ?
2439 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2441 EXPORT_SYMBOL_GPL(trace_handle_return);
2444 tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2445 unsigned long flags, int pc)
2447 struct task_struct *tsk = current;
2449 entry->preempt_count = pc & 0xff;
2450 entry->pid = (tsk) ? tsk->pid : 0;
2453 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2454 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2456 TRACE_FLAG_IRQS_NOSUPPORT |
2458 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2459 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2460 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2461 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2462 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2464 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2466 struct ring_buffer_event *
2467 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2470 unsigned long flags, int pc)
2472 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2475 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2476 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2477 static int trace_buffered_event_ref;
2480 * trace_buffered_event_enable - enable buffering events
2482 * When events are being filtered, it is quicker to use a temporary
2483 * buffer to write the event data into if there's a likely chance
2484 * that it will not be committed. The discard of the ring buffer
2485 * is not as fast as committing, and is much slower than copying
2488 * When an event is to be filtered, allocate per cpu buffers to
2489 * write the event data into, and if the event is filtered and discarded
2490 * it is simply dropped, otherwise, the entire data is to be committed
2493 void trace_buffered_event_enable(void)
2495 struct ring_buffer_event *event;
2499 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2501 if (trace_buffered_event_ref++)
2504 for_each_tracing_cpu(cpu) {
2505 page = alloc_pages_node(cpu_to_node(cpu),
2506 GFP_KERNEL | __GFP_NORETRY, 0);
2510 event = page_address(page);
2511 memset(event, 0, sizeof(*event));
2513 per_cpu(trace_buffered_event, cpu) = event;
2516 if (cpu == smp_processor_id() &&
2517 this_cpu_read(trace_buffered_event) !=
2518 per_cpu(trace_buffered_event, cpu))
2525 trace_buffered_event_disable();
2528 static void enable_trace_buffered_event(void *data)
2530 /* Probably not needed, but do it anyway */
2532 this_cpu_dec(trace_buffered_event_cnt);
2535 static void disable_trace_buffered_event(void *data)
2537 this_cpu_inc(trace_buffered_event_cnt);
2541 * trace_buffered_event_disable - disable buffering events
2543 * When a filter is removed, it is faster to not use the buffered
2544 * events, and to commit directly into the ring buffer. Free up
2545 * the temp buffers when there are no more users. This requires
2546 * special synchronization with current events.
2548 void trace_buffered_event_disable(void)
2552 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2554 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2557 if (--trace_buffered_event_ref)
2561 /* For each CPU, set the buffer as used. */
2562 smp_call_function_many(tracing_buffer_mask,
2563 disable_trace_buffered_event, NULL, 1);
2566 /* Wait for all current users to finish */
2569 for_each_tracing_cpu(cpu) {
2570 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2571 per_cpu(trace_buffered_event, cpu) = NULL;
2574 * Make sure trace_buffered_event is NULL before clearing
2575 * trace_buffered_event_cnt.
2580 /* Do the work on each cpu */
2581 smp_call_function_many(tracing_buffer_mask,
2582 enable_trace_buffered_event, NULL, 1);
2586 static struct trace_buffer *temp_buffer;
2588 struct ring_buffer_event *
2589 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2590 struct trace_event_file *trace_file,
2591 int type, unsigned long len,
2592 unsigned long flags, int pc)
2594 struct ring_buffer_event *entry;
2597 *current_rb = trace_file->tr->array_buffer.buffer;
2599 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2600 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2601 (entry = this_cpu_read(trace_buffered_event))) {
2602 /* Try to use the per cpu buffer first */
2603 val = this_cpu_inc_return(trace_buffered_event_cnt);
2605 trace_event_setup(entry, type, flags, pc);
2606 entry->array[0] = len;
2609 this_cpu_dec(trace_buffered_event_cnt);
2612 entry = __trace_buffer_lock_reserve(*current_rb,
2613 type, len, flags, pc);
2615 * If tracing is off, but we have triggers enabled
2616 * we still need to look at the event data. Use the temp_buffer
2617 * to store the trace event for the tigger to use. It's recusive
2618 * safe and will not be recorded anywhere.
2620 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2621 *current_rb = temp_buffer;
2622 entry = __trace_buffer_lock_reserve(*current_rb,
2623 type, len, flags, pc);
2627 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2629 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2630 static DEFINE_MUTEX(tracepoint_printk_mutex);
2632 static void output_printk(struct trace_event_buffer *fbuffer)
2634 struct trace_event_call *event_call;
2635 struct trace_event_file *file;
2636 struct trace_event *event;
2637 unsigned long flags;
2638 struct trace_iterator *iter = tracepoint_print_iter;
2640 /* We should never get here if iter is NULL */
2641 if (WARN_ON_ONCE(!iter))
2644 event_call = fbuffer->trace_file->event_call;
2645 if (!event_call || !event_call->event.funcs ||
2646 !event_call->event.funcs->trace)
2649 file = fbuffer->trace_file;
2650 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2651 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2652 !filter_match_preds(file->filter, fbuffer->entry)))
2655 event = &fbuffer->trace_file->event_call->event;
2657 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2658 trace_seq_init(&iter->seq);
2659 iter->ent = fbuffer->entry;
2660 event_call->event.funcs->trace(iter, 0, event);
2661 trace_seq_putc(&iter->seq, 0);
2662 printk("%s", iter->seq.buffer);
2664 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2667 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2668 void *buffer, size_t *lenp,
2671 int save_tracepoint_printk;
2674 mutex_lock(&tracepoint_printk_mutex);
2675 save_tracepoint_printk = tracepoint_printk;
2677 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2680 * This will force exiting early, as tracepoint_printk
2681 * is always zero when tracepoint_printk_iter is not allocated
2683 if (!tracepoint_print_iter)
2684 tracepoint_printk = 0;
2686 if (save_tracepoint_printk == tracepoint_printk)
2689 if (tracepoint_printk)
2690 static_key_enable(&tracepoint_printk_key.key);
2692 static_key_disable(&tracepoint_printk_key.key);
2695 mutex_unlock(&tracepoint_printk_mutex);
2700 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2702 if (static_key_false(&tracepoint_printk_key.key))
2703 output_printk(fbuffer);
2705 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2706 fbuffer->event, fbuffer->entry,
2707 fbuffer->flags, fbuffer->pc, fbuffer->regs);
2709 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2714 * trace_buffer_unlock_commit_regs()
2715 * trace_event_buffer_commit()
2716 * trace_event_raw_event_xxx()
2718 # define STACK_SKIP 3
2720 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2721 struct trace_buffer *buffer,
2722 struct ring_buffer_event *event,
2723 unsigned long flags, int pc,
2724 struct pt_regs *regs)
2726 __buffer_unlock_commit(buffer, event);
2729 * If regs is not set, then skip the necessary functions.
2730 * Note, we can still get here via blktrace, wakeup tracer
2731 * and mmiotrace, but that's ok if they lose a function or
2732 * two. They are not that meaningful.
2734 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2735 ftrace_trace_userstack(buffer, flags, pc);
2739 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2742 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2743 struct ring_buffer_event *event)
2745 __buffer_unlock_commit(buffer, event);
2749 trace_process_export(struct trace_export *export,
2750 struct ring_buffer_event *event)
2752 struct trace_entry *entry;
2753 unsigned int size = 0;
2755 entry = ring_buffer_event_data(event);
2756 size = ring_buffer_event_length(event);
2757 export->write(export, entry, size);
2760 static DEFINE_MUTEX(ftrace_export_lock);
2762 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2764 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2766 static inline void ftrace_exports_enable(void)
2768 static_branch_enable(&ftrace_exports_enabled);
2771 static inline void ftrace_exports_disable(void)
2773 static_branch_disable(&ftrace_exports_enabled);
2776 static void ftrace_exports(struct ring_buffer_event *event)
2778 struct trace_export *export;
2780 preempt_disable_notrace();
2782 export = rcu_dereference_raw_check(ftrace_exports_list);
2784 trace_process_export(export, event);
2785 export = rcu_dereference_raw_check(export->next);
2788 preempt_enable_notrace();
2792 add_trace_export(struct trace_export **list, struct trace_export *export)
2794 rcu_assign_pointer(export->next, *list);
2796 * We are entering export into the list but another
2797 * CPU might be walking that list. We need to make sure
2798 * the export->next pointer is valid before another CPU sees
2799 * the export pointer included into the list.
2801 rcu_assign_pointer(*list, export);
2805 rm_trace_export(struct trace_export **list, struct trace_export *export)
2807 struct trace_export **p;
2809 for (p = list; *p != NULL; p = &(*p)->next)
2816 rcu_assign_pointer(*p, (*p)->next);
2822 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2825 ftrace_exports_enable();
2827 add_trace_export(list, export);
2831 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2835 ret = rm_trace_export(list, export);
2837 ftrace_exports_disable();
2842 int register_ftrace_export(struct trace_export *export)
2844 if (WARN_ON_ONCE(!export->write))
2847 mutex_lock(&ftrace_export_lock);
2849 add_ftrace_export(&ftrace_exports_list, export);
2851 mutex_unlock(&ftrace_export_lock);
2855 EXPORT_SYMBOL_GPL(register_ftrace_export);
2857 int unregister_ftrace_export(struct trace_export *export)
2861 mutex_lock(&ftrace_export_lock);
2863 ret = rm_ftrace_export(&ftrace_exports_list, export);
2865 mutex_unlock(&ftrace_export_lock);
2869 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2872 trace_function(struct trace_array *tr,
2873 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2876 struct trace_event_call *call = &event_function;
2877 struct trace_buffer *buffer = tr->array_buffer.buffer;
2878 struct ring_buffer_event *event;
2879 struct ftrace_entry *entry;
2881 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2885 entry = ring_buffer_event_data(event);
2887 entry->parent_ip = parent_ip;
2889 if (!call_filter_check_discard(call, entry, buffer, event)) {
2890 if (static_branch_unlikely(&ftrace_exports_enabled))
2891 ftrace_exports(event);
2892 __buffer_unlock_commit(buffer, event);
2896 #ifdef CONFIG_STACKTRACE
2898 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2899 #define FTRACE_KSTACK_NESTING 4
2901 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2903 struct ftrace_stack {
2904 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2908 struct ftrace_stacks {
2909 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2912 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2913 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2915 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2916 unsigned long flags,
2917 int skip, int pc, struct pt_regs *regs)
2919 struct trace_event_call *call = &event_kernel_stack;
2920 struct ring_buffer_event *event;
2921 unsigned int size, nr_entries;
2922 struct ftrace_stack *fstack;
2923 struct stack_entry *entry;
2927 * Add one, for this function and the call to save_stack_trace()
2928 * If regs is set, then these functions will not be in the way.
2930 #ifndef CONFIG_UNWINDER_ORC
2936 * Since events can happen in NMIs there's no safe way to
2937 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2938 * or NMI comes in, it will just have to use the default
2939 * FTRACE_STACK_SIZE.
2941 preempt_disable_notrace();
2943 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2945 /* This should never happen. If it does, yell once and skip */
2946 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2950 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2951 * interrupt will either see the value pre increment or post
2952 * increment. If the interrupt happens pre increment it will have
2953 * restored the counter when it returns. We just need a barrier to
2954 * keep gcc from moving things around.
2958 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2959 size = ARRAY_SIZE(fstack->calls);
2962 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2965 nr_entries = stack_trace_save(fstack->calls, size, skip);
2968 size = nr_entries * sizeof(unsigned long);
2969 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2970 sizeof(*entry) + size, flags, pc);
2973 entry = ring_buffer_event_data(event);
2975 memcpy(&entry->caller, fstack->calls, size);
2976 entry->size = nr_entries;
2978 if (!call_filter_check_discard(call, entry, buffer, event))
2979 __buffer_unlock_commit(buffer, event);
2982 /* Again, don't let gcc optimize things here */
2984 __this_cpu_dec(ftrace_stack_reserve);
2985 preempt_enable_notrace();
2989 static inline void ftrace_trace_stack(struct trace_array *tr,
2990 struct trace_buffer *buffer,
2991 unsigned long flags,
2992 int skip, int pc, struct pt_regs *regs)
2994 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2997 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
3000 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
3003 struct trace_buffer *buffer = tr->array_buffer.buffer;
3005 if (rcu_is_watching()) {
3006 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3011 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3012 * but if the above rcu_is_watching() failed, then the NMI
3013 * triggered someplace critical, and rcu_irq_enter() should
3014 * not be called from NMI.
3016 if (unlikely(in_nmi()))
3019 rcu_irq_enter_irqson();
3020 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3021 rcu_irq_exit_irqson();
3025 * trace_dump_stack - record a stack back trace in the trace buffer
3026 * @skip: Number of functions to skip (helper handlers)
3028 void trace_dump_stack(int skip)
3030 unsigned long flags;
3032 if (tracing_disabled || tracing_selftest_running)
3035 local_save_flags(flags);
3037 #ifndef CONFIG_UNWINDER_ORC
3038 /* Skip 1 to skip this function. */
3041 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3042 flags, skip, preempt_count(), NULL);
3044 EXPORT_SYMBOL_GPL(trace_dump_stack);
3046 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3047 static DEFINE_PER_CPU(int, user_stack_count);
3050 ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
3052 struct trace_event_call *call = &event_user_stack;
3053 struct ring_buffer_event *event;
3054 struct userstack_entry *entry;
3056 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
3060 * NMIs can not handle page faults, even with fix ups.
3061 * The save user stack can (and often does) fault.
3063 if (unlikely(in_nmi()))
3067 * prevent recursion, since the user stack tracing may
3068 * trigger other kernel events.
3071 if (__this_cpu_read(user_stack_count))
3074 __this_cpu_inc(user_stack_count);
3076 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3077 sizeof(*entry), flags, pc);
3079 goto out_drop_count;
3080 entry = ring_buffer_event_data(event);
3082 entry->tgid = current->tgid;
3083 memset(&entry->caller, 0, sizeof(entry->caller));
3085 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3086 if (!call_filter_check_discard(call, entry, buffer, event))
3087 __buffer_unlock_commit(buffer, event);
3090 __this_cpu_dec(user_stack_count);
3094 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3095 static void ftrace_trace_userstack(struct trace_buffer *buffer,
3096 unsigned long flags, int pc)
3099 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3101 #endif /* CONFIG_STACKTRACE */
3103 /* created for use with alloc_percpu */
3104 struct trace_buffer_struct {
3106 char buffer[4][TRACE_BUF_SIZE];
3109 static struct trace_buffer_struct *trace_percpu_buffer;
3112 * Thise allows for lockless recording. If we're nested too deeply, then
3113 * this returns NULL.
3115 static char *get_trace_buf(void)
3117 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3119 if (!buffer || buffer->nesting >= 4)
3124 /* Interrupts must see nesting incremented before we use the buffer */
3126 return &buffer->buffer[buffer->nesting][0];
3129 static void put_trace_buf(void)
3131 /* Don't let the decrement of nesting leak before this */
3133 this_cpu_dec(trace_percpu_buffer->nesting);
3136 static int alloc_percpu_trace_buffer(void)
3138 struct trace_buffer_struct *buffers;
3140 buffers = alloc_percpu(struct trace_buffer_struct);
3141 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3144 trace_percpu_buffer = buffers;
3148 static int buffers_allocated;
3150 void trace_printk_init_buffers(void)
3152 if (buffers_allocated)
3155 if (alloc_percpu_trace_buffer())
3158 /* trace_printk() is for debug use only. Don't use it in production. */
3161 pr_warn("**********************************************************\n");
3162 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3164 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3166 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3167 pr_warn("** unsafe for production use. **\n");
3169 pr_warn("** If you see this message and you are not debugging **\n");
3170 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3172 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3173 pr_warn("**********************************************************\n");
3175 /* Expand the buffers to set size */
3176 tracing_update_buffers();
3178 buffers_allocated = 1;
3181 * trace_printk_init_buffers() can be called by modules.
3182 * If that happens, then we need to start cmdline recording
3183 * directly here. If the global_trace.buffer is already
3184 * allocated here, then this was called by module code.
3186 if (global_trace.array_buffer.buffer)
3187 tracing_start_cmdline_record();
3189 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3191 void trace_printk_start_comm(void)
3193 /* Start tracing comms if trace printk is set */
3194 if (!buffers_allocated)
3196 tracing_start_cmdline_record();
3199 static void trace_printk_start_stop_comm(int enabled)
3201 if (!buffers_allocated)
3205 tracing_start_cmdline_record();
3207 tracing_stop_cmdline_record();
3211 * trace_vbprintk - write binary msg to tracing buffer
3212 * @ip: The address of the caller
3213 * @fmt: The string format to write to the buffer
3214 * @args: Arguments for @fmt
3216 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3218 struct trace_event_call *call = &event_bprint;
3219 struct ring_buffer_event *event;
3220 struct trace_buffer *buffer;
3221 struct trace_array *tr = &global_trace;
3222 struct bprint_entry *entry;
3223 unsigned long flags;
3225 int len = 0, size, pc;
3227 if (unlikely(tracing_selftest_running || tracing_disabled))
3230 /* Don't pollute graph traces with trace_vprintk internals */
3231 pause_graph_tracing();
3233 pc = preempt_count();
3234 preempt_disable_notrace();
3236 tbuffer = get_trace_buf();
3242 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3244 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3247 local_save_flags(flags);
3248 size = sizeof(*entry) + sizeof(u32) * len;
3249 buffer = tr->array_buffer.buffer;
3250 ring_buffer_nest_start(buffer);
3251 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3255 entry = ring_buffer_event_data(event);
3259 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3260 if (!call_filter_check_discard(call, entry, buffer, event)) {
3261 __buffer_unlock_commit(buffer, event);
3262 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3266 ring_buffer_nest_end(buffer);
3271 preempt_enable_notrace();
3272 unpause_graph_tracing();
3276 EXPORT_SYMBOL_GPL(trace_vbprintk);
3280 __trace_array_vprintk(struct trace_buffer *buffer,
3281 unsigned long ip, const char *fmt, va_list args)
3283 struct trace_event_call *call = &event_print;
3284 struct ring_buffer_event *event;
3285 int len = 0, size, pc;
3286 struct print_entry *entry;
3287 unsigned long flags;
3290 if (tracing_disabled || tracing_selftest_running)
3293 /* Don't pollute graph traces with trace_vprintk internals */
3294 pause_graph_tracing();
3296 pc = preempt_count();
3297 preempt_disable_notrace();
3300 tbuffer = get_trace_buf();
3306 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3308 local_save_flags(flags);
3309 size = sizeof(*entry) + len + 1;
3310 ring_buffer_nest_start(buffer);
3311 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3315 entry = ring_buffer_event_data(event);
3318 memcpy(&entry->buf, tbuffer, len + 1);
3319 if (!call_filter_check_discard(call, entry, buffer, event)) {
3320 __buffer_unlock_commit(buffer, event);
3321 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3325 ring_buffer_nest_end(buffer);
3329 preempt_enable_notrace();
3330 unpause_graph_tracing();
3336 int trace_array_vprintk(struct trace_array *tr,
3337 unsigned long ip, const char *fmt, va_list args)
3339 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3343 int trace_array_printk(struct trace_array *tr,
3344 unsigned long ip, const char *fmt, ...)
3349 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3356 ret = trace_array_vprintk(tr, ip, fmt, ap);
3360 EXPORT_SYMBOL_GPL(trace_array_printk);
3363 int trace_array_printk_buf(struct trace_buffer *buffer,
3364 unsigned long ip, const char *fmt, ...)
3369 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3373 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3379 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3381 return trace_array_vprintk(&global_trace, ip, fmt, args);
3383 EXPORT_SYMBOL_GPL(trace_vprintk);
3385 static void trace_iterator_increment(struct trace_iterator *iter)
3387 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3391 ring_buffer_iter_advance(buf_iter);
3394 static struct trace_entry *
3395 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3396 unsigned long *lost_events)
3398 struct ring_buffer_event *event;
3399 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3402 event = ring_buffer_iter_peek(buf_iter, ts);
3404 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3405 (unsigned long)-1 : 0;
3407 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3412 iter->ent_size = ring_buffer_event_length(event);
3413 return ring_buffer_event_data(event);
3419 static struct trace_entry *
3420 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3421 unsigned long *missing_events, u64 *ent_ts)
3423 struct trace_buffer *buffer = iter->array_buffer->buffer;
3424 struct trace_entry *ent, *next = NULL;
3425 unsigned long lost_events = 0, next_lost = 0;
3426 int cpu_file = iter->cpu_file;
3427 u64 next_ts = 0, ts;
3433 * If we are in a per_cpu trace file, don't bother by iterating over
3434 * all cpu and peek directly.
3436 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3437 if (ring_buffer_empty_cpu(buffer, cpu_file))
3439 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3441 *ent_cpu = cpu_file;
3446 for_each_tracing_cpu(cpu) {
3448 if (ring_buffer_empty_cpu(buffer, cpu))
3451 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3454 * Pick the entry with the smallest timestamp:
3456 if (ent && (!next || ts < next_ts)) {
3460 next_lost = lost_events;
3461 next_size = iter->ent_size;
3465 iter->ent_size = next_size;
3468 *ent_cpu = next_cpu;
3474 *missing_events = next_lost;
3479 #define STATIC_TEMP_BUF_SIZE 128
3480 static char static_temp_buf[STATIC_TEMP_BUF_SIZE];
3482 /* Find the next real entry, without updating the iterator itself */
3483 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3484 int *ent_cpu, u64 *ent_ts)
3486 /* __find_next_entry will reset ent_size */
3487 int ent_size = iter->ent_size;
3488 struct trace_entry *entry;
3491 * If called from ftrace_dump(), then the iter->temp buffer
3492 * will be the static_temp_buf and not created from kmalloc.
3493 * If the entry size is greater than the buffer, we can
3494 * not save it. Just return NULL in that case. This is only
3495 * used to add markers when two consecutive events' time
3496 * stamps have a large delta. See trace_print_lat_context()
3498 if (iter->temp == static_temp_buf &&
3499 STATIC_TEMP_BUF_SIZE < ent_size)
3503 * The __find_next_entry() may call peek_next_entry(), which may
3504 * call ring_buffer_peek() that may make the contents of iter->ent
3505 * undefined. Need to copy iter->ent now.
3507 if (iter->ent && iter->ent != iter->temp) {
3508 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3509 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3511 iter->temp = kmalloc(iter->ent_size, GFP_KERNEL);
3515 memcpy(iter->temp, iter->ent, iter->ent_size);
3516 iter->temp_size = iter->ent_size;
3517 iter->ent = iter->temp;
3519 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3520 /* Put back the original ent_size */
3521 iter->ent_size = ent_size;
3526 /* Find the next real entry, and increment the iterator to the next entry */
3527 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3529 iter->ent = __find_next_entry(iter, &iter->cpu,
3530 &iter->lost_events, &iter->ts);
3533 trace_iterator_increment(iter);
3535 return iter->ent ? iter : NULL;
3538 static void trace_consume(struct trace_iterator *iter)
3540 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3541 &iter->lost_events);
3544 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3546 struct trace_iterator *iter = m->private;
3550 WARN_ON_ONCE(iter->leftover);
3554 /* can't go backwards */
3559 ent = trace_find_next_entry_inc(iter);
3563 while (ent && iter->idx < i)
3564 ent = trace_find_next_entry_inc(iter);
3571 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3573 struct ring_buffer_iter *buf_iter;
3574 unsigned long entries = 0;
3577 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3579 buf_iter = trace_buffer_iter(iter, cpu);
3583 ring_buffer_iter_reset(buf_iter);
3586 * We could have the case with the max latency tracers
3587 * that a reset never took place on a cpu. This is evident
3588 * by the timestamp being before the start of the buffer.
3590 while (ring_buffer_iter_peek(buf_iter, &ts)) {
3591 if (ts >= iter->array_buffer->time_start)
3594 ring_buffer_iter_advance(buf_iter);
3597 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3601 * The current tracer is copied to avoid a global locking
3604 static void *s_start(struct seq_file *m, loff_t *pos)
3606 struct trace_iterator *iter = m->private;
3607 struct trace_array *tr = iter->tr;
3608 int cpu_file = iter->cpu_file;
3614 * copy the tracer to avoid using a global lock all around.
3615 * iter->trace is a copy of current_trace, the pointer to the
3616 * name may be used instead of a strcmp(), as iter->trace->name
3617 * will point to the same string as current_trace->name.
3619 mutex_lock(&trace_types_lock);
3620 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3621 *iter->trace = *tr->current_trace;
3622 mutex_unlock(&trace_types_lock);
3624 #ifdef CONFIG_TRACER_MAX_TRACE
3625 if (iter->snapshot && iter->trace->use_max_tr)
3626 return ERR_PTR(-EBUSY);
3629 if (!iter->snapshot)
3630 atomic_inc(&trace_record_taskinfo_disabled);
3632 if (*pos != iter->pos) {
3637 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3638 for_each_tracing_cpu(cpu)
3639 tracing_iter_reset(iter, cpu);
3641 tracing_iter_reset(iter, cpu_file);
3644 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3649 * If we overflowed the seq_file before, then we want
3650 * to just reuse the trace_seq buffer again.
3656 p = s_next(m, p, &l);
3660 trace_event_read_lock();
3661 trace_access_lock(cpu_file);
3665 static void s_stop(struct seq_file *m, void *p)
3667 struct trace_iterator *iter = m->private;
3669 #ifdef CONFIG_TRACER_MAX_TRACE
3670 if (iter->snapshot && iter->trace->use_max_tr)
3674 if (!iter->snapshot)
3675 atomic_dec(&trace_record_taskinfo_disabled);
3677 trace_access_unlock(iter->cpu_file);
3678 trace_event_read_unlock();
3682 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3683 unsigned long *entries, int cpu)
3685 unsigned long count;
3687 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3689 * If this buffer has skipped entries, then we hold all
3690 * entries for the trace and we need to ignore the
3691 * ones before the time stamp.
3693 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3694 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3695 /* total is the same as the entries */
3699 ring_buffer_overrun_cpu(buf->buffer, cpu);
3704 get_total_entries(struct array_buffer *buf,
3705 unsigned long *total, unsigned long *entries)
3713 for_each_tracing_cpu(cpu) {
3714 get_total_entries_cpu(buf, &t, &e, cpu);
3720 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3722 unsigned long total, entries;
3727 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
3732 unsigned long trace_total_entries(struct trace_array *tr)
3734 unsigned long total, entries;
3739 get_total_entries(&tr->array_buffer, &total, &entries);
3744 static void print_lat_help_header(struct seq_file *m)
3746 seq_puts(m, "# _------=> CPU# \n"
3747 "# / _-----=> irqs-off \n"
3748 "# | / _----=> need-resched \n"
3749 "# || / _---=> hardirq/softirq \n"
3750 "# ||| / _--=> preempt-depth \n"
3752 "# cmd pid ||||| time | caller \n"
3753 "# \\ / ||||| \\ | / \n");
3756 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
3758 unsigned long total;
3759 unsigned long entries;
3761 get_total_entries(buf, &total, &entries);
3762 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3763 entries, total, num_online_cpus());
3767 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
3770 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3772 print_event_info(buf, m);
3774 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3775 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3778 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
3781 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3782 const char *space = " ";
3783 int prec = tgid ? 10 : 2;
3785 print_event_info(buf, m);
3787 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3788 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3789 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3790 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3791 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3792 seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3793 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
3797 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3799 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3800 struct array_buffer *buf = iter->array_buffer;
3801 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3802 struct tracer *type = iter->trace;
3803 unsigned long entries;
3804 unsigned long total;
3805 const char *name = "preemption";
3809 get_total_entries(buf, &total, &entries);
3811 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3813 seq_puts(m, "# -----------------------------------"
3814 "---------------------------------\n");
3815 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3816 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3817 nsecs_to_usecs(data->saved_latency),
3821 #if defined(CONFIG_PREEMPT_NONE)
3823 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3825 #elif defined(CONFIG_PREEMPT)
3827 #elif defined(CONFIG_PREEMPT_RT)
3832 /* These are reserved for later use */
3835 seq_printf(m, " #P:%d)\n", num_online_cpus());
3839 seq_puts(m, "# -----------------\n");
3840 seq_printf(m, "# | task: %.16s-%d "
3841 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3842 data->comm, data->pid,
3843 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3844 data->policy, data->rt_priority);
3845 seq_puts(m, "# -----------------\n");
3847 if (data->critical_start) {
3848 seq_puts(m, "# => started at: ");
3849 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3850 trace_print_seq(m, &iter->seq);
3851 seq_puts(m, "\n# => ended at: ");
3852 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3853 trace_print_seq(m, &iter->seq);
3854 seq_puts(m, "\n#\n");
3860 static void test_cpu_buff_start(struct trace_iterator *iter)
3862 struct trace_seq *s = &iter->seq;
3863 struct trace_array *tr = iter->tr;
3865 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3868 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3871 if (cpumask_available(iter->started) &&
3872 cpumask_test_cpu(iter->cpu, iter->started))
3875 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
3878 if (cpumask_available(iter->started))
3879 cpumask_set_cpu(iter->cpu, iter->started);
3881 /* Don't print started cpu buffer for the first entry of the trace */
3883 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3887 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3889 struct trace_array *tr = iter->tr;
3890 struct trace_seq *s = &iter->seq;
3891 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3892 struct trace_entry *entry;
3893 struct trace_event *event;
3897 test_cpu_buff_start(iter);
3899 event = ftrace_find_event(entry->type);
3901 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3902 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3903 trace_print_lat_context(iter);
3905 trace_print_context(iter);
3908 if (trace_seq_has_overflowed(s))
3909 return TRACE_TYPE_PARTIAL_LINE;
3912 return event->funcs->trace(iter, sym_flags, event);
3914 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3916 return trace_handle_return(s);
3919 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3921 struct trace_array *tr = iter->tr;
3922 struct trace_seq *s = &iter->seq;
3923 struct trace_entry *entry;
3924 struct trace_event *event;
3928 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3929 trace_seq_printf(s, "%d %d %llu ",
3930 entry->pid, iter->cpu, iter->ts);
3932 if (trace_seq_has_overflowed(s))
3933 return TRACE_TYPE_PARTIAL_LINE;
3935 event = ftrace_find_event(entry->type);
3937 return event->funcs->raw(iter, 0, event);
3939 trace_seq_printf(s, "%d ?\n", entry->type);
3941 return trace_handle_return(s);
3944 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3946 struct trace_array *tr = iter->tr;
3947 struct trace_seq *s = &iter->seq;
3948 unsigned char newline = '\n';
3949 struct trace_entry *entry;
3950 struct trace_event *event;
3954 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3955 SEQ_PUT_HEX_FIELD(s, entry->pid);
3956 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3957 SEQ_PUT_HEX_FIELD(s, iter->ts);
3958 if (trace_seq_has_overflowed(s))
3959 return TRACE_TYPE_PARTIAL_LINE;
3962 event = ftrace_find_event(entry->type);
3964 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3965 if (ret != TRACE_TYPE_HANDLED)
3969 SEQ_PUT_FIELD(s, newline);
3971 return trace_handle_return(s);
3974 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3976 struct trace_array *tr = iter->tr;
3977 struct trace_seq *s = &iter->seq;
3978 struct trace_entry *entry;
3979 struct trace_event *event;
3983 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3984 SEQ_PUT_FIELD(s, entry->pid);
3985 SEQ_PUT_FIELD(s, iter->cpu);
3986 SEQ_PUT_FIELD(s, iter->ts);
3987 if (trace_seq_has_overflowed(s))
3988 return TRACE_TYPE_PARTIAL_LINE;
3991 event = ftrace_find_event(entry->type);
3992 return event ? event->funcs->binary(iter, 0, event) :
3996 int trace_empty(struct trace_iterator *iter)
3998 struct ring_buffer_iter *buf_iter;
4001 /* If we are looking at one CPU buffer, only check that one */
4002 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4003 cpu = iter->cpu_file;
4004 buf_iter = trace_buffer_iter(iter, cpu);
4006 if (!ring_buffer_iter_empty(buf_iter))
4009 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4015 for_each_tracing_cpu(cpu) {
4016 buf_iter = trace_buffer_iter(iter, cpu);
4018 if (!ring_buffer_iter_empty(buf_iter))
4021 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4029 /* Called with trace_event_read_lock() held. */
4030 enum print_line_t print_trace_line(struct trace_iterator *iter)
4032 struct trace_array *tr = iter->tr;
4033 unsigned long trace_flags = tr->trace_flags;
4034 enum print_line_t ret;
4036 if (iter->lost_events) {
4037 if (iter->lost_events == (unsigned long)-1)
4038 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4041 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4042 iter->cpu, iter->lost_events);
4043 if (trace_seq_has_overflowed(&iter->seq))
4044 return TRACE_TYPE_PARTIAL_LINE;
4047 if (iter->trace && iter->trace->print_line) {
4048 ret = iter->trace->print_line(iter);
4049 if (ret != TRACE_TYPE_UNHANDLED)
4053 if (iter->ent->type == TRACE_BPUTS &&
4054 trace_flags & TRACE_ITER_PRINTK &&
4055 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4056 return trace_print_bputs_msg_only(iter);
4058 if (iter->ent->type == TRACE_BPRINT &&
4059 trace_flags & TRACE_ITER_PRINTK &&
4060 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4061 return trace_print_bprintk_msg_only(iter);
4063 if (iter->ent->type == TRACE_PRINT &&
4064 trace_flags & TRACE_ITER_PRINTK &&
4065 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4066 return trace_print_printk_msg_only(iter);
4068 if (trace_flags & TRACE_ITER_BIN)
4069 return print_bin_fmt(iter);
4071 if (trace_flags & TRACE_ITER_HEX)
4072 return print_hex_fmt(iter);
4074 if (trace_flags & TRACE_ITER_RAW)
4075 return print_raw_fmt(iter);
4077 return print_trace_fmt(iter);
4080 void trace_latency_header(struct seq_file *m)
4082 struct trace_iterator *iter = m->private;
4083 struct trace_array *tr = iter->tr;
4085 /* print nothing if the buffers are empty */
4086 if (trace_empty(iter))
4089 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4090 print_trace_header(m, iter);
4092 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4093 print_lat_help_header(m);
4096 void trace_default_header(struct seq_file *m)
4098 struct trace_iterator *iter = m->private;
4099 struct trace_array *tr = iter->tr;
4100 unsigned long trace_flags = tr->trace_flags;
4102 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4105 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4106 /* print nothing if the buffers are empty */
4107 if (trace_empty(iter))
4109 print_trace_header(m, iter);
4110 if (!(trace_flags & TRACE_ITER_VERBOSE))
4111 print_lat_help_header(m);
4113 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4114 if (trace_flags & TRACE_ITER_IRQ_INFO)
4115 print_func_help_header_irq(iter->array_buffer,
4118 print_func_help_header(iter->array_buffer, m,
4124 static void test_ftrace_alive(struct seq_file *m)
4126 if (!ftrace_is_dead())
4128 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4129 "# MAY BE MISSING FUNCTION EVENTS\n");
4132 #ifdef CONFIG_TRACER_MAX_TRACE
4133 static void show_snapshot_main_help(struct seq_file *m)
4135 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4136 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4137 "# Takes a snapshot of the main buffer.\n"
4138 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4139 "# (Doesn't have to be '2' works with any number that\n"
4140 "# is not a '0' or '1')\n");
4143 static void show_snapshot_percpu_help(struct seq_file *m)
4145 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4146 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4147 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4148 "# Takes a snapshot of the main buffer for this cpu.\n");
4150 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4151 "# Must use main snapshot file to allocate.\n");
4153 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4154 "# (Doesn't have to be '2' works with any number that\n"
4155 "# is not a '0' or '1')\n");
4158 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4160 if (iter->tr->allocated_snapshot)
4161 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4163 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4165 seq_puts(m, "# Snapshot commands:\n");
4166 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4167 show_snapshot_main_help(m);
4169 show_snapshot_percpu_help(m);
4172 /* Should never be called */
4173 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4176 static int s_show(struct seq_file *m, void *v)
4178 struct trace_iterator *iter = v;
4181 if (iter->ent == NULL) {
4183 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4185 test_ftrace_alive(m);
4187 if (iter->snapshot && trace_empty(iter))
4188 print_snapshot_help(m, iter);
4189 else if (iter->trace && iter->trace->print_header)
4190 iter->trace->print_header(m);
4192 trace_default_header(m);
4194 } else if (iter->leftover) {
4196 * If we filled the seq_file buffer earlier, we
4197 * want to just show it now.
4199 ret = trace_print_seq(m, &iter->seq);
4201 /* ret should this time be zero, but you never know */
4202 iter->leftover = ret;
4205 print_trace_line(iter);
4206 ret = trace_print_seq(m, &iter->seq);
4208 * If we overflow the seq_file buffer, then it will
4209 * ask us for this data again at start up.
4211 * ret is 0 if seq_file write succeeded.
4214 iter->leftover = ret;
4221 * Should be used after trace_array_get(), trace_types_lock
4222 * ensures that i_cdev was already initialized.
4224 static inline int tracing_get_cpu(struct inode *inode)
4226 if (inode->i_cdev) /* See trace_create_cpu_file() */
4227 return (long)inode->i_cdev - 1;
4228 return RING_BUFFER_ALL_CPUS;
4231 static const struct seq_operations tracer_seq_ops = {
4238 static struct trace_iterator *
4239 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4241 struct trace_array *tr = inode->i_private;
4242 struct trace_iterator *iter;
4245 if (tracing_disabled)
4246 return ERR_PTR(-ENODEV);
4248 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4250 return ERR_PTR(-ENOMEM);
4252 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4254 if (!iter->buffer_iter)
4258 * trace_find_next_entry() may need to save off iter->ent.
4259 * It will place it into the iter->temp buffer. As most
4260 * events are less than 128, allocate a buffer of that size.
4261 * If one is greater, then trace_find_next_entry() will
4262 * allocate a new buffer to adjust for the bigger iter->ent.
4263 * It's not critical if it fails to get allocated here.
4265 iter->temp = kmalloc(128, GFP_KERNEL);
4267 iter->temp_size = 128;
4270 * We make a copy of the current tracer to avoid concurrent
4271 * changes on it while we are reading.
4273 mutex_lock(&trace_types_lock);
4274 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4278 *iter->trace = *tr->current_trace;
4280 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4285 #ifdef CONFIG_TRACER_MAX_TRACE
4286 /* Currently only the top directory has a snapshot */
4287 if (tr->current_trace->print_max || snapshot)
4288 iter->array_buffer = &tr->max_buffer;
4291 iter->array_buffer = &tr->array_buffer;
4292 iter->snapshot = snapshot;
4294 iter->cpu_file = tracing_get_cpu(inode);
4295 mutex_init(&iter->mutex);
4297 /* Notify the tracer early; before we stop tracing. */
4298 if (iter->trace->open)
4299 iter->trace->open(iter);
4301 /* Annotate start of buffers if we had overruns */
4302 if (ring_buffer_overruns(iter->array_buffer->buffer))
4303 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4305 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4306 if (trace_clocks[tr->clock_id].in_ns)
4307 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4310 * If pause-on-trace is enabled, then stop the trace while
4311 * dumping, unless this is the "snapshot" file
4313 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4314 tracing_stop_tr(tr);
4316 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4317 for_each_tracing_cpu(cpu) {
4318 iter->buffer_iter[cpu] =
4319 ring_buffer_read_prepare(iter->array_buffer->buffer,
4322 ring_buffer_read_prepare_sync();
4323 for_each_tracing_cpu(cpu) {
4324 ring_buffer_read_start(iter->buffer_iter[cpu]);
4325 tracing_iter_reset(iter, cpu);
4328 cpu = iter->cpu_file;
4329 iter->buffer_iter[cpu] =
4330 ring_buffer_read_prepare(iter->array_buffer->buffer,
4332 ring_buffer_read_prepare_sync();
4333 ring_buffer_read_start(iter->buffer_iter[cpu]);
4334 tracing_iter_reset(iter, cpu);
4337 mutex_unlock(&trace_types_lock);
4342 mutex_unlock(&trace_types_lock);
4345 kfree(iter->buffer_iter);
4347 seq_release_private(inode, file);
4348 return ERR_PTR(-ENOMEM);
4351 int tracing_open_generic(struct inode *inode, struct file *filp)
4355 ret = tracing_check_open_get_tr(NULL);
4359 filp->private_data = inode->i_private;
4363 bool tracing_is_disabled(void)
4365 return (tracing_disabled) ? true: false;
4369 * Open and update trace_array ref count.
4370 * Must have the current trace_array passed to it.
4372 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4374 struct trace_array *tr = inode->i_private;
4377 ret = tracing_check_open_get_tr(tr);
4381 filp->private_data = inode->i_private;
4386 static int tracing_release(struct inode *inode, struct file *file)
4388 struct trace_array *tr = inode->i_private;
4389 struct seq_file *m = file->private_data;
4390 struct trace_iterator *iter;
4393 if (!(file->f_mode & FMODE_READ)) {
4394 trace_array_put(tr);
4398 /* Writes do not use seq_file */
4400 mutex_lock(&trace_types_lock);
4402 for_each_tracing_cpu(cpu) {
4403 if (iter->buffer_iter[cpu])
4404 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4407 if (iter->trace && iter->trace->close)
4408 iter->trace->close(iter);
4410 if (!iter->snapshot && tr->stop_count)
4411 /* reenable tracing if it was previously enabled */
4412 tracing_start_tr(tr);
4414 __trace_array_put(tr);
4416 mutex_unlock(&trace_types_lock);
4418 mutex_destroy(&iter->mutex);
4419 free_cpumask_var(iter->started);
4422 kfree(iter->buffer_iter);
4423 seq_release_private(inode, file);
4428 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4430 struct trace_array *tr = inode->i_private;
4432 trace_array_put(tr);
4436 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4438 struct trace_array *tr = inode->i_private;
4440 trace_array_put(tr);
4442 return single_release(inode, file);
4445 static int tracing_open(struct inode *inode, struct file *file)
4447 struct trace_array *tr = inode->i_private;
4448 struct trace_iterator *iter;
4451 ret = tracing_check_open_get_tr(tr);
4455 /* If this file was open for write, then erase contents */
4456 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4457 int cpu = tracing_get_cpu(inode);
4458 struct array_buffer *trace_buf = &tr->array_buffer;
4460 #ifdef CONFIG_TRACER_MAX_TRACE
4461 if (tr->current_trace->print_max)
4462 trace_buf = &tr->max_buffer;
4465 if (cpu == RING_BUFFER_ALL_CPUS)
4466 tracing_reset_online_cpus(trace_buf);
4468 tracing_reset_cpu(trace_buf, cpu);
4471 if (file->f_mode & FMODE_READ) {
4472 iter = __tracing_open(inode, file, false);
4474 ret = PTR_ERR(iter);
4475 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4476 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4480 trace_array_put(tr);
4486 * Some tracers are not suitable for instance buffers.
4487 * A tracer is always available for the global array (toplevel)
4488 * or if it explicitly states that it is.
4491 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4493 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4496 /* Find the next tracer that this trace array may use */
4497 static struct tracer *
4498 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4500 while (t && !trace_ok_for_array(t, tr))
4507 t_next(struct seq_file *m, void *v, loff_t *pos)
4509 struct trace_array *tr = m->private;
4510 struct tracer *t = v;
4515 t = get_tracer_for_array(tr, t->next);
4520 static void *t_start(struct seq_file *m, loff_t *pos)
4522 struct trace_array *tr = m->private;
4526 mutex_lock(&trace_types_lock);
4528 t = get_tracer_for_array(tr, trace_types);
4529 for (; t && l < *pos; t = t_next(m, t, &l))
4535 static void t_stop(struct seq_file *m, void *p)
4537 mutex_unlock(&trace_types_lock);
4540 static int t_show(struct seq_file *m, void *v)
4542 struct tracer *t = v;
4547 seq_puts(m, t->name);
4556 static const struct seq_operations show_traces_seq_ops = {
4563 static int show_traces_open(struct inode *inode, struct file *file)
4565 struct trace_array *tr = inode->i_private;
4569 ret = tracing_check_open_get_tr(tr);
4573 ret = seq_open(file, &show_traces_seq_ops);
4575 trace_array_put(tr);
4579 m = file->private_data;
4585 static int show_traces_release(struct inode *inode, struct file *file)
4587 struct trace_array *tr = inode->i_private;
4589 trace_array_put(tr);
4590 return seq_release(inode, file);
4594 tracing_write_stub(struct file *filp, const char __user *ubuf,
4595 size_t count, loff_t *ppos)
4600 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4604 if (file->f_mode & FMODE_READ)
4605 ret = seq_lseek(file, offset, whence);
4607 file->f_pos = ret = 0;
4612 static const struct file_operations tracing_fops = {
4613 .open = tracing_open,
4615 .write = tracing_write_stub,
4616 .llseek = tracing_lseek,
4617 .release = tracing_release,
4620 static const struct file_operations show_traces_fops = {
4621 .open = show_traces_open,
4623 .llseek = seq_lseek,
4624 .release = show_traces_release,
4628 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4629 size_t count, loff_t *ppos)
4631 struct trace_array *tr = file_inode(filp)->i_private;
4635 len = snprintf(NULL, 0, "%*pb\n",
4636 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4637 mask_str = kmalloc(len, GFP_KERNEL);
4641 len = snprintf(mask_str, len, "%*pb\n",
4642 cpumask_pr_args(tr->tracing_cpumask));
4647 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4655 int tracing_set_cpumask(struct trace_array *tr,
4656 cpumask_var_t tracing_cpumask_new)
4663 local_irq_disable();
4664 arch_spin_lock(&tr->max_lock);
4665 for_each_tracing_cpu(cpu) {
4667 * Increase/decrease the disabled counter if we are
4668 * about to flip a bit in the cpumask:
4670 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4671 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4672 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4673 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
4675 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4676 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4677 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4678 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
4681 arch_spin_unlock(&tr->max_lock);
4684 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4690 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4691 size_t count, loff_t *ppos)
4693 struct trace_array *tr = file_inode(filp)->i_private;
4694 cpumask_var_t tracing_cpumask_new;
4697 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4700 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4704 err = tracing_set_cpumask(tr, tracing_cpumask_new);
4708 free_cpumask_var(tracing_cpumask_new);
4713 free_cpumask_var(tracing_cpumask_new);
4718 static const struct file_operations tracing_cpumask_fops = {
4719 .open = tracing_open_generic_tr,
4720 .read = tracing_cpumask_read,
4721 .write = tracing_cpumask_write,
4722 .release = tracing_release_generic_tr,
4723 .llseek = generic_file_llseek,
4726 static int tracing_trace_options_show(struct seq_file *m, void *v)
4728 struct tracer_opt *trace_opts;
4729 struct trace_array *tr = m->private;
4733 mutex_lock(&trace_types_lock);
4734 tracer_flags = tr->current_trace->flags->val;
4735 trace_opts = tr->current_trace->flags->opts;
4737 for (i = 0; trace_options[i]; i++) {
4738 if (tr->trace_flags & (1 << i))
4739 seq_printf(m, "%s\n", trace_options[i]);
4741 seq_printf(m, "no%s\n", trace_options[i]);
4744 for (i = 0; trace_opts[i].name; i++) {
4745 if (tracer_flags & trace_opts[i].bit)
4746 seq_printf(m, "%s\n", trace_opts[i].name);
4748 seq_printf(m, "no%s\n", trace_opts[i].name);
4750 mutex_unlock(&trace_types_lock);
4755 static int __set_tracer_option(struct trace_array *tr,
4756 struct tracer_flags *tracer_flags,
4757 struct tracer_opt *opts, int neg)
4759 struct tracer *trace = tracer_flags->trace;
4762 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4767 tracer_flags->val &= ~opts->bit;
4769 tracer_flags->val |= opts->bit;
4773 /* Try to assign a tracer specific option */
4774 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4776 struct tracer *trace = tr->current_trace;
4777 struct tracer_flags *tracer_flags = trace->flags;
4778 struct tracer_opt *opts = NULL;
4781 for (i = 0; tracer_flags->opts[i].name; i++) {
4782 opts = &tracer_flags->opts[i];
4784 if (strcmp(cmp, opts->name) == 0)
4785 return __set_tracer_option(tr, trace->flags, opts, neg);
4791 /* Some tracers require overwrite to stay enabled */
4792 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4794 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4800 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4802 if ((mask == TRACE_ITER_RECORD_TGID) ||
4803 (mask == TRACE_ITER_RECORD_CMD))
4804 lockdep_assert_held(&event_mutex);
4806 /* do nothing if flag is already set */
4807 if (!!(tr->trace_flags & mask) == !!enabled)
4810 /* Give the tracer a chance to approve the change */
4811 if (tr->current_trace->flag_changed)
4812 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4816 tr->trace_flags |= mask;
4818 tr->trace_flags &= ~mask;
4820 if (mask == TRACE_ITER_RECORD_CMD)
4821 trace_event_enable_cmd_record(enabled);
4823 if (mask == TRACE_ITER_RECORD_TGID) {
4825 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
4829 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4833 trace_event_enable_tgid_record(enabled);
4836 if (mask == TRACE_ITER_EVENT_FORK)
4837 trace_event_follow_fork(tr, enabled);
4839 if (mask == TRACE_ITER_FUNC_FORK)
4840 ftrace_pid_follow_fork(tr, enabled);
4842 if (mask == TRACE_ITER_OVERWRITE) {
4843 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
4844 #ifdef CONFIG_TRACER_MAX_TRACE
4845 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4849 if (mask == TRACE_ITER_PRINTK) {
4850 trace_printk_start_stop_comm(enabled);
4851 trace_printk_control(enabled);
4857 int trace_set_options(struct trace_array *tr, char *option)
4862 size_t orig_len = strlen(option);
4865 cmp = strstrip(option);
4867 len = str_has_prefix(cmp, "no");
4873 mutex_lock(&event_mutex);
4874 mutex_lock(&trace_types_lock);
4876 ret = match_string(trace_options, -1, cmp);
4877 /* If no option could be set, test the specific tracer options */
4879 ret = set_tracer_option(tr, cmp, neg);
4881 ret = set_tracer_flag(tr, 1 << ret, !neg);
4883 mutex_unlock(&trace_types_lock);
4884 mutex_unlock(&event_mutex);
4887 * If the first trailing whitespace is replaced with '\0' by strstrip,
4888 * turn it back into a space.
4890 if (orig_len > strlen(option))
4891 option[strlen(option)] = ' ';
4896 static void __init apply_trace_boot_options(void)
4898 char *buf = trace_boot_options_buf;
4902 option = strsep(&buf, ",");
4908 trace_set_options(&global_trace, option);
4910 /* Put back the comma to allow this to be called again */
4917 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4918 size_t cnt, loff_t *ppos)
4920 struct seq_file *m = filp->private_data;
4921 struct trace_array *tr = m->private;
4925 if (cnt >= sizeof(buf))
4928 if (copy_from_user(buf, ubuf, cnt))
4933 ret = trace_set_options(tr, buf);
4942 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4944 struct trace_array *tr = inode->i_private;
4947 ret = tracing_check_open_get_tr(tr);
4951 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4953 trace_array_put(tr);
4958 static const struct file_operations tracing_iter_fops = {
4959 .open = tracing_trace_options_open,
4961 .llseek = seq_lseek,
4962 .release = tracing_single_release_tr,
4963 .write = tracing_trace_options_write,
4966 static const char readme_msg[] =
4967 "tracing mini-HOWTO:\n\n"
4968 "# echo 0 > tracing_on : quick way to disable tracing\n"
4969 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4970 " Important files:\n"
4971 " trace\t\t\t- The static contents of the buffer\n"
4972 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4973 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4974 " current_tracer\t- function and latency tracers\n"
4975 " available_tracers\t- list of configured tracers for current_tracer\n"
4976 " error_log\t- error log for failed commands (that support it)\n"
4977 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4978 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4979 " trace_clock\t\t-change the clock used to order events\n"
4980 " local: Per cpu clock but may not be synced across CPUs\n"
4981 " global: Synced across CPUs but slows tracing down.\n"
4982 " counter: Not a clock, but just an increment\n"
4983 " uptime: Jiffy counter from time of boot\n"
4984 " perf: Same clock that perf events use\n"
4985 #ifdef CONFIG_X86_64
4986 " x86-tsc: TSC cycle counter\n"
4988 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4989 " delta: Delta difference against a buffer-wide timestamp\n"
4990 " absolute: Absolute (standalone) timestamp\n"
4991 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4992 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4993 " tracing_cpumask\t- Limit which CPUs to trace\n"
4994 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4995 "\t\t\t Remove sub-buffer with rmdir\n"
4996 " trace_options\t\t- Set format or modify how tracing happens\n"
4997 "\t\t\t Disable an option by prefixing 'no' to the\n"
4998 "\t\t\t option name\n"
4999 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5000 #ifdef CONFIG_DYNAMIC_FTRACE
5001 "\n available_filter_functions - list of functions that can be filtered on\n"
5002 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5003 "\t\t\t functions\n"
5004 "\t accepts: func_full_name or glob-matching-pattern\n"
5005 "\t modules: Can select a group via module\n"
5006 "\t Format: :mod:<module-name>\n"
5007 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5008 "\t triggers: a command to perform when function is hit\n"
5009 "\t Format: <function>:<trigger>[:count]\n"
5010 "\t trigger: traceon, traceoff\n"
5011 "\t\t enable_event:<system>:<event>\n"
5012 "\t\t disable_event:<system>:<event>\n"
5013 #ifdef CONFIG_STACKTRACE
5016 #ifdef CONFIG_TRACER_SNAPSHOT
5021 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5022 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5023 "\t The first one will disable tracing every time do_fault is hit\n"
5024 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5025 "\t The first time do trap is hit and it disables tracing, the\n"
5026 "\t counter will decrement to 2. If tracing is already disabled,\n"
5027 "\t the counter will not decrement. It only decrements when the\n"
5028 "\t trigger did work\n"
5029 "\t To remove trigger without count:\n"
5030 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5031 "\t To remove trigger with a count:\n"
5032 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5033 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5034 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5035 "\t modules: Can select a group via module command :mod:\n"
5036 "\t Does not accept triggers\n"
5037 #endif /* CONFIG_DYNAMIC_FTRACE */
5038 #ifdef CONFIG_FUNCTION_TRACER
5039 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5041 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5044 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5045 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5046 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5047 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5049 #ifdef CONFIG_TRACER_SNAPSHOT
5050 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5051 "\t\t\t snapshot buffer. Read the contents for more\n"
5052 "\t\t\t information\n"
5054 #ifdef CONFIG_STACK_TRACER
5055 " stack_trace\t\t- Shows the max stack trace when active\n"
5056 " stack_max_size\t- Shows current max stack size that was traced\n"
5057 "\t\t\t Write into this file to reset the max size (trigger a\n"
5058 "\t\t\t new trace)\n"
5059 #ifdef CONFIG_DYNAMIC_FTRACE
5060 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5063 #endif /* CONFIG_STACK_TRACER */
5064 #ifdef CONFIG_DYNAMIC_EVENTS
5065 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5066 "\t\t\t Write into this file to define/undefine new trace events.\n"
5068 #ifdef CONFIG_KPROBE_EVENTS
5069 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5070 "\t\t\t Write into this file to define/undefine new trace events.\n"
5072 #ifdef CONFIG_UPROBE_EVENTS
5073 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5074 "\t\t\t Write into this file to define/undefine new trace events.\n"
5076 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5077 "\t accepts: event-definitions (one definition per line)\n"
5078 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5079 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5080 #ifdef CONFIG_HIST_TRIGGERS
5081 "\t s:[synthetic/]<event> <field> [<field>]\n"
5083 "\t -:[<group>/]<event>\n"
5084 #ifdef CONFIG_KPROBE_EVENTS
5085 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5086 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5088 #ifdef CONFIG_UPROBE_EVENTS
5089 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
5091 "\t args: <name>=fetcharg[:type]\n"
5092 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
5093 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5094 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5096 "\t $stack<index>, $stack, $retval, $comm,\n"
5098 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5099 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5100 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5101 "\t <type>\\[<array-size>\\]\n"
5102 #ifdef CONFIG_HIST_TRIGGERS
5103 "\t field: <stype> <name>;\n"
5104 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5105 "\t [unsigned] char/int/long\n"
5108 " events/\t\t- Directory containing all trace event subsystems:\n"
5109 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5110 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5111 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5113 " filter\t\t- If set, only events passing filter are traced\n"
5114 " events/<system>/<event>/\t- Directory containing control files for\n"
5116 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5117 " filter\t\t- If set, only events passing filter are traced\n"
5118 " trigger\t\t- If set, a command to perform when event is hit\n"
5119 "\t Format: <trigger>[:count][if <filter>]\n"
5120 "\t trigger: traceon, traceoff\n"
5121 "\t enable_event:<system>:<event>\n"
5122 "\t disable_event:<system>:<event>\n"
5123 #ifdef CONFIG_HIST_TRIGGERS
5124 "\t enable_hist:<system>:<event>\n"
5125 "\t disable_hist:<system>:<event>\n"
5127 #ifdef CONFIG_STACKTRACE
5130 #ifdef CONFIG_TRACER_SNAPSHOT
5133 #ifdef CONFIG_HIST_TRIGGERS
5134 "\t\t hist (see below)\n"
5136 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5137 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5138 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5139 "\t events/block/block_unplug/trigger\n"
5140 "\t The first disables tracing every time block_unplug is hit.\n"
5141 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5142 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5143 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5144 "\t Like function triggers, the counter is only decremented if it\n"
5145 "\t enabled or disabled tracing.\n"
5146 "\t To remove a trigger without a count:\n"
5147 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5148 "\t To remove a trigger with a count:\n"
5149 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5150 "\t Filters can be ignored when removing a trigger.\n"
5151 #ifdef CONFIG_HIST_TRIGGERS
5152 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5153 "\t Format: hist:keys=<field1[,field2,...]>\n"
5154 "\t [:values=<field1[,field2,...]>]\n"
5155 "\t [:sort=<field1[,field2,...]>]\n"
5156 "\t [:size=#entries]\n"
5157 "\t [:pause][:continue][:clear]\n"
5158 "\t [:name=histname1]\n"
5159 "\t [:<handler>.<action>]\n"
5160 "\t [if <filter>]\n\n"
5161 "\t When a matching event is hit, an entry is added to a hash\n"
5162 "\t table using the key(s) and value(s) named, and the value of a\n"
5163 "\t sum called 'hitcount' is incremented. Keys and values\n"
5164 "\t correspond to fields in the event's format description. Keys\n"
5165 "\t can be any field, or the special string 'stacktrace'.\n"
5166 "\t Compound keys consisting of up to two fields can be specified\n"
5167 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5168 "\t fields. Sort keys consisting of up to two fields can be\n"
5169 "\t specified using the 'sort' keyword. The sort direction can\n"
5170 "\t be modified by appending '.descending' or '.ascending' to a\n"
5171 "\t sort field. The 'size' parameter can be used to specify more\n"
5172 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5173 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5174 "\t its histogram data will be shared with other triggers of the\n"
5175 "\t same name, and trigger hits will update this common data.\n\n"
5176 "\t Reading the 'hist' file for the event will dump the hash\n"
5177 "\t table in its entirety to stdout. If there are multiple hist\n"
5178 "\t triggers attached to an event, there will be a table for each\n"
5179 "\t trigger in the output. The table displayed for a named\n"
5180 "\t trigger will be the same as any other instance having the\n"
5181 "\t same name. The default format used to display a given field\n"
5182 "\t can be modified by appending any of the following modifiers\n"
5183 "\t to the field name, as applicable:\n\n"
5184 "\t .hex display a number as a hex value\n"
5185 "\t .sym display an address as a symbol\n"
5186 "\t .sym-offset display an address as a symbol and offset\n"
5187 "\t .execname display a common_pid as a program name\n"
5188 "\t .syscall display a syscall id as a syscall name\n"
5189 "\t .log2 display log2 value rather than raw number\n"
5190 "\t .usecs display a common_timestamp in microseconds\n\n"
5191 "\t The 'pause' parameter can be used to pause an existing hist\n"
5192 "\t trigger or to start a hist trigger but not log any events\n"
5193 "\t until told to do so. 'continue' can be used to start or\n"
5194 "\t restart a paused hist trigger.\n\n"
5195 "\t The 'clear' parameter will clear the contents of a running\n"
5196 "\t hist trigger and leave its current paused/active state\n"
5198 "\t The enable_hist and disable_hist triggers can be used to\n"
5199 "\t have one event conditionally start and stop another event's\n"
5200 "\t already-attached hist trigger. The syntax is analogous to\n"
5201 "\t the enable_event and disable_event triggers.\n\n"
5202 "\t Hist trigger handlers and actions are executed whenever a\n"
5203 "\t a histogram entry is added or updated. They take the form:\n\n"
5204 "\t <handler>.<action>\n\n"
5205 "\t The available handlers are:\n\n"
5206 "\t onmatch(matching.event) - invoke on addition or update\n"
5207 "\t onmax(var) - invoke if var exceeds current max\n"
5208 "\t onchange(var) - invoke action if var changes\n\n"
5209 "\t The available actions are:\n\n"
5210 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5211 "\t save(field,...) - save current event fields\n"
5212 #ifdef CONFIG_TRACER_SNAPSHOT
5213 "\t snapshot() - snapshot the trace buffer\n"
5219 tracing_readme_read(struct file *filp, char __user *ubuf,
5220 size_t cnt, loff_t *ppos)
5222 return simple_read_from_buffer(ubuf, cnt, ppos,
5223 readme_msg, strlen(readme_msg));
5226 static const struct file_operations tracing_readme_fops = {
5227 .open = tracing_open_generic,
5228 .read = tracing_readme_read,
5229 .llseek = generic_file_llseek,
5232 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5236 if (*pos || m->count)
5241 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5242 if (trace_find_tgid(*ptr))
5249 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5259 v = saved_tgids_next(m, v, &l);
5267 static void saved_tgids_stop(struct seq_file *m, void *v)
5271 static int saved_tgids_show(struct seq_file *m, void *v)
5273 int pid = (int *)v - tgid_map;
5275 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5279 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5280 .start = saved_tgids_start,
5281 .stop = saved_tgids_stop,
5282 .next = saved_tgids_next,
5283 .show = saved_tgids_show,
5286 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5290 ret = tracing_check_open_get_tr(NULL);
5294 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5298 static const struct file_operations tracing_saved_tgids_fops = {
5299 .open = tracing_saved_tgids_open,
5301 .llseek = seq_lseek,
5302 .release = seq_release,
5305 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5307 unsigned int *ptr = v;
5309 if (*pos || m->count)
5314 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5316 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5325 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5331 arch_spin_lock(&trace_cmdline_lock);
5333 v = &savedcmd->map_cmdline_to_pid[0];
5335 v = saved_cmdlines_next(m, v, &l);
5343 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5345 arch_spin_unlock(&trace_cmdline_lock);
5349 static int saved_cmdlines_show(struct seq_file *m, void *v)
5351 char buf[TASK_COMM_LEN];
5352 unsigned int *pid = v;
5354 __trace_find_cmdline(*pid, buf);
5355 seq_printf(m, "%d %s\n", *pid, buf);
5359 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5360 .start = saved_cmdlines_start,
5361 .next = saved_cmdlines_next,
5362 .stop = saved_cmdlines_stop,
5363 .show = saved_cmdlines_show,
5366 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5370 ret = tracing_check_open_get_tr(NULL);
5374 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5377 static const struct file_operations tracing_saved_cmdlines_fops = {
5378 .open = tracing_saved_cmdlines_open,
5380 .llseek = seq_lseek,
5381 .release = seq_release,
5385 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5386 size_t cnt, loff_t *ppos)
5391 arch_spin_lock(&trace_cmdline_lock);
5392 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5393 arch_spin_unlock(&trace_cmdline_lock);
5395 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5398 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5400 kfree(s->saved_cmdlines);
5401 kfree(s->map_cmdline_to_pid);
5405 static int tracing_resize_saved_cmdlines(unsigned int val)
5407 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5409 s = kmalloc(sizeof(*s), GFP_KERNEL);
5413 if (allocate_cmdlines_buffer(val, s) < 0) {
5418 arch_spin_lock(&trace_cmdline_lock);
5419 savedcmd_temp = savedcmd;
5421 arch_spin_unlock(&trace_cmdline_lock);
5422 free_saved_cmdlines_buffer(savedcmd_temp);
5428 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5429 size_t cnt, loff_t *ppos)
5434 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5438 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5439 if (!val || val > PID_MAX_DEFAULT)
5442 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5451 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5452 .open = tracing_open_generic,
5453 .read = tracing_saved_cmdlines_size_read,
5454 .write = tracing_saved_cmdlines_size_write,
5457 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5458 static union trace_eval_map_item *
5459 update_eval_map(union trace_eval_map_item *ptr)
5461 if (!ptr->map.eval_string) {
5462 if (ptr->tail.next) {
5463 ptr = ptr->tail.next;
5464 /* Set ptr to the next real item (skip head) */
5472 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5474 union trace_eval_map_item *ptr = v;
5477 * Paranoid! If ptr points to end, we don't want to increment past it.
5478 * This really should never happen.
5481 ptr = update_eval_map(ptr);
5482 if (WARN_ON_ONCE(!ptr))
5486 ptr = update_eval_map(ptr);
5491 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5493 union trace_eval_map_item *v;
5496 mutex_lock(&trace_eval_mutex);
5498 v = trace_eval_maps;
5502 while (v && l < *pos) {
5503 v = eval_map_next(m, v, &l);
5509 static void eval_map_stop(struct seq_file *m, void *v)
5511 mutex_unlock(&trace_eval_mutex);
5514 static int eval_map_show(struct seq_file *m, void *v)
5516 union trace_eval_map_item *ptr = v;
5518 seq_printf(m, "%s %ld (%s)\n",
5519 ptr->map.eval_string, ptr->map.eval_value,
5525 static const struct seq_operations tracing_eval_map_seq_ops = {
5526 .start = eval_map_start,
5527 .next = eval_map_next,
5528 .stop = eval_map_stop,
5529 .show = eval_map_show,
5532 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5536 ret = tracing_check_open_get_tr(NULL);
5540 return seq_open(filp, &tracing_eval_map_seq_ops);
5543 static const struct file_operations tracing_eval_map_fops = {
5544 .open = tracing_eval_map_open,
5546 .llseek = seq_lseek,
5547 .release = seq_release,
5550 static inline union trace_eval_map_item *
5551 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5553 /* Return tail of array given the head */
5554 return ptr + ptr->head.length + 1;
5558 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5561 struct trace_eval_map **stop;
5562 struct trace_eval_map **map;
5563 union trace_eval_map_item *map_array;
5564 union trace_eval_map_item *ptr;
5569 * The trace_eval_maps contains the map plus a head and tail item,
5570 * where the head holds the module and length of array, and the
5571 * tail holds a pointer to the next list.
5573 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5575 pr_warn("Unable to allocate trace eval mapping\n");
5579 mutex_lock(&trace_eval_mutex);
5581 if (!trace_eval_maps)
5582 trace_eval_maps = map_array;
5584 ptr = trace_eval_maps;
5586 ptr = trace_eval_jmp_to_tail(ptr);
5587 if (!ptr->tail.next)
5589 ptr = ptr->tail.next;
5592 ptr->tail.next = map_array;
5594 map_array->head.mod = mod;
5595 map_array->head.length = len;
5598 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5599 map_array->map = **map;
5602 memset(map_array, 0, sizeof(*map_array));
5604 mutex_unlock(&trace_eval_mutex);
5607 static void trace_create_eval_file(struct dentry *d_tracer)
5609 trace_create_file("eval_map", 0444, d_tracer,
5610 NULL, &tracing_eval_map_fops);
5613 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5614 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5615 static inline void trace_insert_eval_map_file(struct module *mod,
5616 struct trace_eval_map **start, int len) { }
5617 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5619 static void trace_insert_eval_map(struct module *mod,
5620 struct trace_eval_map **start, int len)
5622 struct trace_eval_map **map;
5629 trace_event_eval_update(map, len);
5631 trace_insert_eval_map_file(mod, start, len);
5635 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5636 size_t cnt, loff_t *ppos)
5638 struct trace_array *tr = filp->private_data;
5639 char buf[MAX_TRACER_SIZE+2];
5642 mutex_lock(&trace_types_lock);
5643 r = sprintf(buf, "%s\n", tr->current_trace->name);
5644 mutex_unlock(&trace_types_lock);
5646 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5649 int tracer_init(struct tracer *t, struct trace_array *tr)
5651 tracing_reset_online_cpus(&tr->array_buffer);
5655 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5659 for_each_tracing_cpu(cpu)
5660 per_cpu_ptr(buf->data, cpu)->entries = val;
5663 #ifdef CONFIG_TRACER_MAX_TRACE
5664 /* resize @tr's buffer to the size of @size_tr's entries */
5665 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5666 struct array_buffer *size_buf, int cpu_id)
5670 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5671 for_each_tracing_cpu(cpu) {
5672 ret = ring_buffer_resize(trace_buf->buffer,
5673 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5676 per_cpu_ptr(trace_buf->data, cpu)->entries =
5677 per_cpu_ptr(size_buf->data, cpu)->entries;
5680 ret = ring_buffer_resize(trace_buf->buffer,
5681 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5683 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5684 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5689 #endif /* CONFIG_TRACER_MAX_TRACE */
5691 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5692 unsigned long size, int cpu)
5697 * If kernel or user changes the size of the ring buffer
5698 * we use the size that was given, and we can forget about
5699 * expanding it later.
5701 ring_buffer_expanded = true;
5703 /* May be called before buffers are initialized */
5704 if (!tr->array_buffer.buffer)
5707 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5711 #ifdef CONFIG_TRACER_MAX_TRACE
5712 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5713 !tr->current_trace->use_max_tr)
5716 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5718 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5719 &tr->array_buffer, cpu);
5722 * AARGH! We are left with different
5723 * size max buffer!!!!
5724 * The max buffer is our "snapshot" buffer.
5725 * When a tracer needs a snapshot (one of the
5726 * latency tracers), it swaps the max buffer
5727 * with the saved snap shot. We succeeded to
5728 * update the size of the main buffer, but failed to
5729 * update the size of the max buffer. But when we tried
5730 * to reset the main buffer to the original size, we
5731 * failed there too. This is very unlikely to
5732 * happen, but if it does, warn and kill all
5736 tracing_disabled = 1;
5741 if (cpu == RING_BUFFER_ALL_CPUS)
5742 set_buffer_entries(&tr->max_buffer, size);
5744 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5747 #endif /* CONFIG_TRACER_MAX_TRACE */
5749 if (cpu == RING_BUFFER_ALL_CPUS)
5750 set_buffer_entries(&tr->array_buffer, size);
5752 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
5757 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5758 unsigned long size, int cpu_id)
5762 mutex_lock(&trace_types_lock);
5764 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5765 /* make sure, this cpu is enabled in the mask */
5766 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5772 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5777 mutex_unlock(&trace_types_lock);
5784 * tracing_update_buffers - used by tracing facility to expand ring buffers
5786 * To save on memory when the tracing is never used on a system with it
5787 * configured in. The ring buffers are set to a minimum size. But once
5788 * a user starts to use the tracing facility, then they need to grow
5789 * to their default size.
5791 * This function is to be called when a tracer is about to be used.
5793 int tracing_update_buffers(void)
5797 mutex_lock(&trace_types_lock);
5798 if (!ring_buffer_expanded)
5799 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5800 RING_BUFFER_ALL_CPUS);
5801 mutex_unlock(&trace_types_lock);
5806 struct trace_option_dentry;
5809 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5812 * Used to clear out the tracer before deletion of an instance.
5813 * Must have trace_types_lock held.
5815 static void tracing_set_nop(struct trace_array *tr)
5817 if (tr->current_trace == &nop_trace)
5820 tr->current_trace->enabled--;
5822 if (tr->current_trace->reset)
5823 tr->current_trace->reset(tr);
5825 tr->current_trace = &nop_trace;
5828 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5830 /* Only enable if the directory has been created already. */
5834 create_trace_option_files(tr, t);
5837 int tracing_set_tracer(struct trace_array *tr, const char *buf)
5840 #ifdef CONFIG_TRACER_MAX_TRACE
5845 mutex_lock(&trace_types_lock);
5847 if (!ring_buffer_expanded) {
5848 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5849 RING_BUFFER_ALL_CPUS);
5855 for (t = trace_types; t; t = t->next) {
5856 if (strcmp(t->name, buf) == 0)
5863 if (t == tr->current_trace)
5866 #ifdef CONFIG_TRACER_SNAPSHOT
5867 if (t->use_max_tr) {
5868 arch_spin_lock(&tr->max_lock);
5869 if (tr->cond_snapshot)
5871 arch_spin_unlock(&tr->max_lock);
5876 /* Some tracers won't work on kernel command line */
5877 if (system_state < SYSTEM_RUNNING && t->noboot) {
5878 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5883 /* Some tracers are only allowed for the top level buffer */
5884 if (!trace_ok_for_array(t, tr)) {
5889 /* If trace pipe files are being read, we can't change the tracer */
5890 if (tr->current_trace->ref) {
5895 trace_branch_disable();
5897 tr->current_trace->enabled--;
5899 if (tr->current_trace->reset)
5900 tr->current_trace->reset(tr);
5902 /* Current trace needs to be nop_trace before synchronize_rcu */
5903 tr->current_trace = &nop_trace;
5905 #ifdef CONFIG_TRACER_MAX_TRACE
5906 had_max_tr = tr->allocated_snapshot;
5908 if (had_max_tr && !t->use_max_tr) {
5910 * We need to make sure that the update_max_tr sees that
5911 * current_trace changed to nop_trace to keep it from
5912 * swapping the buffers after we resize it.
5913 * The update_max_tr is called from interrupts disabled
5914 * so a synchronized_sched() is sufficient.
5921 #ifdef CONFIG_TRACER_MAX_TRACE
5922 if (t->use_max_tr && !had_max_tr) {
5923 ret = tracing_alloc_snapshot_instance(tr);
5930 ret = tracer_init(t, tr);
5935 tr->current_trace = t;
5936 tr->current_trace->enabled++;
5937 trace_branch_enable(tr);
5939 mutex_unlock(&trace_types_lock);
5945 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5946 size_t cnt, loff_t *ppos)
5948 struct trace_array *tr = filp->private_data;
5949 char buf[MAX_TRACER_SIZE+1];
5956 if (cnt > MAX_TRACER_SIZE)
5957 cnt = MAX_TRACER_SIZE;
5959 if (copy_from_user(buf, ubuf, cnt))
5964 /* strip ending whitespace. */
5965 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5968 err = tracing_set_tracer(tr, buf);
5978 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5979 size_t cnt, loff_t *ppos)
5984 r = snprintf(buf, sizeof(buf), "%ld\n",
5985 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5986 if (r > sizeof(buf))
5988 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5992 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5993 size_t cnt, loff_t *ppos)
5998 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6008 tracing_thresh_read(struct file *filp, char __user *ubuf,
6009 size_t cnt, loff_t *ppos)
6011 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6015 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6016 size_t cnt, loff_t *ppos)
6018 struct trace_array *tr = filp->private_data;
6021 mutex_lock(&trace_types_lock);
6022 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6026 if (tr->current_trace->update_thresh) {
6027 ret = tr->current_trace->update_thresh(tr);
6034 mutex_unlock(&trace_types_lock);
6039 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6042 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6043 size_t cnt, loff_t *ppos)
6045 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6049 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6050 size_t cnt, loff_t *ppos)
6052 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6057 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6059 struct trace_array *tr = inode->i_private;
6060 struct trace_iterator *iter;
6063 ret = tracing_check_open_get_tr(tr);
6067 mutex_lock(&trace_types_lock);
6069 /* create a buffer to store the information to pass to userspace */
6070 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6073 __trace_array_put(tr);
6077 trace_seq_init(&iter->seq);
6078 iter->trace = tr->current_trace;
6080 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6085 /* trace pipe does not show start of buffer */
6086 cpumask_setall(iter->started);
6088 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6089 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6091 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6092 if (trace_clocks[tr->clock_id].in_ns)
6093 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6096 iter->array_buffer = &tr->array_buffer;
6097 iter->cpu_file = tracing_get_cpu(inode);
6098 mutex_init(&iter->mutex);
6099 filp->private_data = iter;
6101 if (iter->trace->pipe_open)
6102 iter->trace->pipe_open(iter);
6104 nonseekable_open(inode, filp);
6106 tr->current_trace->ref++;
6108 mutex_unlock(&trace_types_lock);
6113 __trace_array_put(tr);
6114 mutex_unlock(&trace_types_lock);
6118 static int tracing_release_pipe(struct inode *inode, struct file *file)
6120 struct trace_iterator *iter = file->private_data;
6121 struct trace_array *tr = inode->i_private;
6123 mutex_lock(&trace_types_lock);
6125 tr->current_trace->ref--;
6127 if (iter->trace->pipe_close)
6128 iter->trace->pipe_close(iter);
6130 mutex_unlock(&trace_types_lock);
6132 free_cpumask_var(iter->started);
6133 mutex_destroy(&iter->mutex);
6136 trace_array_put(tr);
6142 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6144 struct trace_array *tr = iter->tr;
6146 /* Iterators are static, they should be filled or empty */
6147 if (trace_buffer_iter(iter, iter->cpu_file))
6148 return EPOLLIN | EPOLLRDNORM;
6150 if (tr->trace_flags & TRACE_ITER_BLOCK)
6152 * Always select as readable when in blocking mode
6154 return EPOLLIN | EPOLLRDNORM;
6156 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6161 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6163 struct trace_iterator *iter = filp->private_data;
6165 return trace_poll(iter, filp, poll_table);
6168 /* Must be called with iter->mutex held. */
6169 static int tracing_wait_pipe(struct file *filp)
6171 struct trace_iterator *iter = filp->private_data;
6174 while (trace_empty(iter)) {
6176 if ((filp->f_flags & O_NONBLOCK)) {
6181 * We block until we read something and tracing is disabled.
6182 * We still block if tracing is disabled, but we have never
6183 * read anything. This allows a user to cat this file, and
6184 * then enable tracing. But after we have read something,
6185 * we give an EOF when tracing is again disabled.
6187 * iter->pos will be 0 if we haven't read anything.
6189 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6192 mutex_unlock(&iter->mutex);
6194 ret = wait_on_pipe(iter, 0);
6196 mutex_lock(&iter->mutex);
6209 tracing_read_pipe(struct file *filp, char __user *ubuf,
6210 size_t cnt, loff_t *ppos)
6212 struct trace_iterator *iter = filp->private_data;
6216 * Avoid more than one consumer on a single file descriptor
6217 * This is just a matter of traces coherency, the ring buffer itself
6220 mutex_lock(&iter->mutex);
6222 /* return any leftover data */
6223 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6227 trace_seq_init(&iter->seq);
6229 if (iter->trace->read) {
6230 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6236 sret = tracing_wait_pipe(filp);
6240 /* stop when tracing is finished */
6241 if (trace_empty(iter)) {
6246 if (cnt >= PAGE_SIZE)
6247 cnt = PAGE_SIZE - 1;
6249 /* reset all but tr, trace, and overruns */
6250 memset(&iter->seq, 0,
6251 sizeof(struct trace_iterator) -
6252 offsetof(struct trace_iterator, seq));
6253 cpumask_clear(iter->started);
6254 trace_seq_init(&iter->seq);
6257 trace_event_read_lock();
6258 trace_access_lock(iter->cpu_file);
6259 while (trace_find_next_entry_inc(iter) != NULL) {
6260 enum print_line_t ret;
6261 int save_len = iter->seq.seq.len;
6263 ret = print_trace_line(iter);
6264 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6265 /* don't print partial lines */
6266 iter->seq.seq.len = save_len;
6269 if (ret != TRACE_TYPE_NO_CONSUME)
6270 trace_consume(iter);
6272 if (trace_seq_used(&iter->seq) >= cnt)
6276 * Setting the full flag means we reached the trace_seq buffer
6277 * size and we should leave by partial output condition above.
6278 * One of the trace_seq_* functions is not used properly.
6280 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6283 trace_access_unlock(iter->cpu_file);
6284 trace_event_read_unlock();
6286 /* Now copy what we have to the user */
6287 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6288 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6289 trace_seq_init(&iter->seq);
6292 * If there was nothing to send to user, in spite of consuming trace
6293 * entries, go back to wait for more entries.
6299 mutex_unlock(&iter->mutex);
6304 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6307 __free_page(spd->pages[idx]);
6311 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6317 /* Seq buffer is page-sized, exactly what we need. */
6319 save_len = iter->seq.seq.len;
6320 ret = print_trace_line(iter);
6322 if (trace_seq_has_overflowed(&iter->seq)) {
6323 iter->seq.seq.len = save_len;
6328 * This should not be hit, because it should only
6329 * be set if the iter->seq overflowed. But check it
6330 * anyway to be safe.
6332 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6333 iter->seq.seq.len = save_len;
6337 count = trace_seq_used(&iter->seq) - save_len;
6340 iter->seq.seq.len = save_len;
6344 if (ret != TRACE_TYPE_NO_CONSUME)
6345 trace_consume(iter);
6347 if (!trace_find_next_entry_inc(iter)) {
6357 static ssize_t tracing_splice_read_pipe(struct file *filp,
6359 struct pipe_inode_info *pipe,
6363 struct page *pages_def[PIPE_DEF_BUFFERS];
6364 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6365 struct trace_iterator *iter = filp->private_data;
6366 struct splice_pipe_desc spd = {
6368 .partial = partial_def,
6369 .nr_pages = 0, /* This gets updated below. */
6370 .nr_pages_max = PIPE_DEF_BUFFERS,
6371 .ops = &default_pipe_buf_ops,
6372 .spd_release = tracing_spd_release_pipe,
6378 if (splice_grow_spd(pipe, &spd))
6381 mutex_lock(&iter->mutex);
6383 if (iter->trace->splice_read) {
6384 ret = iter->trace->splice_read(iter, filp,
6385 ppos, pipe, len, flags);
6390 ret = tracing_wait_pipe(filp);
6394 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6399 trace_event_read_lock();
6400 trace_access_lock(iter->cpu_file);
6402 /* Fill as many pages as possible. */
6403 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6404 spd.pages[i] = alloc_page(GFP_KERNEL);
6408 rem = tracing_fill_pipe_page(rem, iter);
6410 /* Copy the data into the page, so we can start over. */
6411 ret = trace_seq_to_buffer(&iter->seq,
6412 page_address(spd.pages[i]),
6413 trace_seq_used(&iter->seq));
6415 __free_page(spd.pages[i]);
6418 spd.partial[i].offset = 0;
6419 spd.partial[i].len = trace_seq_used(&iter->seq);
6421 trace_seq_init(&iter->seq);
6424 trace_access_unlock(iter->cpu_file);
6425 trace_event_read_unlock();
6426 mutex_unlock(&iter->mutex);
6431 ret = splice_to_pipe(pipe, &spd);
6435 splice_shrink_spd(&spd);
6439 mutex_unlock(&iter->mutex);
6444 tracing_entries_read(struct file *filp, char __user *ubuf,
6445 size_t cnt, loff_t *ppos)
6447 struct inode *inode = file_inode(filp);
6448 struct trace_array *tr = inode->i_private;
6449 int cpu = tracing_get_cpu(inode);
6454 mutex_lock(&trace_types_lock);
6456 if (cpu == RING_BUFFER_ALL_CPUS) {
6457 int cpu, buf_size_same;
6462 /* check if all cpu sizes are same */
6463 for_each_tracing_cpu(cpu) {
6464 /* fill in the size from first enabled cpu */
6466 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6467 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6473 if (buf_size_same) {
6474 if (!ring_buffer_expanded)
6475 r = sprintf(buf, "%lu (expanded: %lu)\n",
6477 trace_buf_size >> 10);
6479 r = sprintf(buf, "%lu\n", size >> 10);
6481 r = sprintf(buf, "X\n");
6483 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6485 mutex_unlock(&trace_types_lock);
6487 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6492 tracing_entries_write(struct file *filp, const char __user *ubuf,
6493 size_t cnt, loff_t *ppos)
6495 struct inode *inode = file_inode(filp);
6496 struct trace_array *tr = inode->i_private;
6500 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6504 /* must have at least 1 entry */
6508 /* value is in KB */
6510 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6520 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6521 size_t cnt, loff_t *ppos)
6523 struct trace_array *tr = filp->private_data;
6526 unsigned long size = 0, expanded_size = 0;
6528 mutex_lock(&trace_types_lock);
6529 for_each_tracing_cpu(cpu) {
6530 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6531 if (!ring_buffer_expanded)
6532 expanded_size += trace_buf_size >> 10;
6534 if (ring_buffer_expanded)
6535 r = sprintf(buf, "%lu\n", size);
6537 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6538 mutex_unlock(&trace_types_lock);
6540 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6544 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6545 size_t cnt, loff_t *ppos)
6548 * There is no need to read what the user has written, this function
6549 * is just to make sure that there is no error when "echo" is used
6558 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6560 struct trace_array *tr = inode->i_private;
6562 /* disable tracing ? */
6563 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6564 tracer_tracing_off(tr);
6565 /* resize the ring buffer to 0 */
6566 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6568 trace_array_put(tr);
6574 tracing_mark_write(struct file *filp, const char __user *ubuf,
6575 size_t cnt, loff_t *fpos)
6577 struct trace_array *tr = filp->private_data;
6578 struct ring_buffer_event *event;
6579 enum event_trigger_type tt = ETT_NONE;
6580 struct trace_buffer *buffer;
6581 struct print_entry *entry;
6582 unsigned long irq_flags;
6587 /* Used in tracing_mark_raw_write() as well */
6588 #define FAULTED_STR "<faulted>"
6589 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6591 if (tracing_disabled)
6594 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6597 if (cnt > TRACE_BUF_SIZE)
6598 cnt = TRACE_BUF_SIZE;
6600 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6602 local_save_flags(irq_flags);
6603 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6605 /* If less than "<faulted>", then make sure we can still add that */
6606 if (cnt < FAULTED_SIZE)
6607 size += FAULTED_SIZE - cnt;
6609 buffer = tr->array_buffer.buffer;
6610 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6611 irq_flags, preempt_count());
6612 if (unlikely(!event))
6613 /* Ring buffer disabled, return as if not open for write */
6616 entry = ring_buffer_event_data(event);
6617 entry->ip = _THIS_IP_;
6619 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6621 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6628 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6629 /* do not add \n before testing triggers, but add \0 */
6630 entry->buf[cnt] = '\0';
6631 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6634 if (entry->buf[cnt - 1] != '\n') {
6635 entry->buf[cnt] = '\n';
6636 entry->buf[cnt + 1] = '\0';
6638 entry->buf[cnt] = '\0';
6640 __buffer_unlock_commit(buffer, event);
6643 event_triggers_post_call(tr->trace_marker_file, tt);
6651 /* Limit it for now to 3K (including tag) */
6652 #define RAW_DATA_MAX_SIZE (1024*3)
6655 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6656 size_t cnt, loff_t *fpos)
6658 struct trace_array *tr = filp->private_data;
6659 struct ring_buffer_event *event;
6660 struct trace_buffer *buffer;
6661 struct raw_data_entry *entry;
6662 unsigned long irq_flags;
6667 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6669 if (tracing_disabled)
6672 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6675 /* The marker must at least have a tag id */
6676 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6679 if (cnt > TRACE_BUF_SIZE)
6680 cnt = TRACE_BUF_SIZE;
6682 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6684 local_save_flags(irq_flags);
6685 size = sizeof(*entry) + cnt;
6686 if (cnt < FAULT_SIZE_ID)
6687 size += FAULT_SIZE_ID - cnt;
6689 buffer = tr->array_buffer.buffer;
6690 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6691 irq_flags, preempt_count());
6693 /* Ring buffer disabled, return as if not open for write */
6696 entry = ring_buffer_event_data(event);
6698 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6701 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6706 __buffer_unlock_commit(buffer, event);
6714 static int tracing_clock_show(struct seq_file *m, void *v)
6716 struct trace_array *tr = m->private;
6719 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6721 "%s%s%s%s", i ? " " : "",
6722 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6723 i == tr->clock_id ? "]" : "");
6729 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6733 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6734 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6737 if (i == ARRAY_SIZE(trace_clocks))
6740 mutex_lock(&trace_types_lock);
6744 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
6747 * New clock may not be consistent with the previous clock.
6748 * Reset the buffer so that it doesn't have incomparable timestamps.
6750 tracing_reset_online_cpus(&tr->array_buffer);
6752 #ifdef CONFIG_TRACER_MAX_TRACE
6753 if (tr->max_buffer.buffer)
6754 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6755 tracing_reset_online_cpus(&tr->max_buffer);
6758 mutex_unlock(&trace_types_lock);
6763 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6764 size_t cnt, loff_t *fpos)
6766 struct seq_file *m = filp->private_data;
6767 struct trace_array *tr = m->private;
6769 const char *clockstr;
6772 if (cnt >= sizeof(buf))
6775 if (copy_from_user(buf, ubuf, cnt))
6780 clockstr = strstrip(buf);
6782 ret = tracing_set_clock(tr, clockstr);
6791 static int tracing_clock_open(struct inode *inode, struct file *file)
6793 struct trace_array *tr = inode->i_private;
6796 ret = tracing_check_open_get_tr(tr);
6800 ret = single_open(file, tracing_clock_show, inode->i_private);
6802 trace_array_put(tr);
6807 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6809 struct trace_array *tr = m->private;
6811 mutex_lock(&trace_types_lock);
6813 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
6814 seq_puts(m, "delta [absolute]\n");
6816 seq_puts(m, "[delta] absolute\n");
6818 mutex_unlock(&trace_types_lock);
6823 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6825 struct trace_array *tr = inode->i_private;
6828 ret = tracing_check_open_get_tr(tr);
6832 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6834 trace_array_put(tr);
6839 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6843 mutex_lock(&trace_types_lock);
6845 if (abs && tr->time_stamp_abs_ref++)
6849 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6854 if (--tr->time_stamp_abs_ref)
6858 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
6860 #ifdef CONFIG_TRACER_MAX_TRACE
6861 if (tr->max_buffer.buffer)
6862 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6865 mutex_unlock(&trace_types_lock);
6870 struct ftrace_buffer_info {
6871 struct trace_iterator iter;
6873 unsigned int spare_cpu;
6877 #ifdef CONFIG_TRACER_SNAPSHOT
6878 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6880 struct trace_array *tr = inode->i_private;
6881 struct trace_iterator *iter;
6885 ret = tracing_check_open_get_tr(tr);
6889 if (file->f_mode & FMODE_READ) {
6890 iter = __tracing_open(inode, file, true);
6892 ret = PTR_ERR(iter);
6894 /* Writes still need the seq_file to hold the private data */
6896 m = kzalloc(sizeof(*m), GFP_KERNEL);
6899 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6907 iter->array_buffer = &tr->max_buffer;
6908 iter->cpu_file = tracing_get_cpu(inode);
6910 file->private_data = m;
6914 trace_array_put(tr);
6920 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6923 struct seq_file *m = filp->private_data;
6924 struct trace_iterator *iter = m->private;
6925 struct trace_array *tr = iter->tr;
6929 ret = tracing_update_buffers();
6933 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6937 mutex_lock(&trace_types_lock);
6939 if (tr->current_trace->use_max_tr) {
6944 arch_spin_lock(&tr->max_lock);
6945 if (tr->cond_snapshot)
6947 arch_spin_unlock(&tr->max_lock);
6953 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6957 if (tr->allocated_snapshot)
6961 /* Only allow per-cpu swap if the ring buffer supports it */
6962 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6963 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6968 if (tr->allocated_snapshot)
6969 ret = resize_buffer_duplicate_size(&tr->max_buffer,
6970 &tr->array_buffer, iter->cpu_file);
6972 ret = tracing_alloc_snapshot_instance(tr);
6975 local_irq_disable();
6976 /* Now, we're going to swap */
6977 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6978 update_max_tr(tr, current, smp_processor_id(), NULL);
6980 update_max_tr_single(tr, current, iter->cpu_file);
6984 if (tr->allocated_snapshot) {
6985 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6986 tracing_reset_online_cpus(&tr->max_buffer);
6988 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
6998 mutex_unlock(&trace_types_lock);
7002 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7004 struct seq_file *m = file->private_data;
7007 ret = tracing_release(inode, file);
7009 if (file->f_mode & FMODE_READ)
7012 /* If write only, the seq_file is just a stub */
7020 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7021 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7022 size_t count, loff_t *ppos);
7023 static int tracing_buffers_release(struct inode *inode, struct file *file);
7024 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7025 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7027 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7029 struct ftrace_buffer_info *info;
7032 /* The following checks for tracefs lockdown */
7033 ret = tracing_buffers_open(inode, filp);
7037 info = filp->private_data;
7039 if (info->iter.trace->use_max_tr) {
7040 tracing_buffers_release(inode, filp);
7044 info->iter.snapshot = true;
7045 info->iter.array_buffer = &info->iter.tr->max_buffer;
7050 #endif /* CONFIG_TRACER_SNAPSHOT */
7053 static const struct file_operations tracing_thresh_fops = {
7054 .open = tracing_open_generic,
7055 .read = tracing_thresh_read,
7056 .write = tracing_thresh_write,
7057 .llseek = generic_file_llseek,
7060 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7061 static const struct file_operations tracing_max_lat_fops = {
7062 .open = tracing_open_generic,
7063 .read = tracing_max_lat_read,
7064 .write = tracing_max_lat_write,
7065 .llseek = generic_file_llseek,
7069 static const struct file_operations set_tracer_fops = {
7070 .open = tracing_open_generic,
7071 .read = tracing_set_trace_read,
7072 .write = tracing_set_trace_write,
7073 .llseek = generic_file_llseek,
7076 static const struct file_operations tracing_pipe_fops = {
7077 .open = tracing_open_pipe,
7078 .poll = tracing_poll_pipe,
7079 .read = tracing_read_pipe,
7080 .splice_read = tracing_splice_read_pipe,
7081 .release = tracing_release_pipe,
7082 .llseek = no_llseek,
7085 static const struct file_operations tracing_entries_fops = {
7086 .open = tracing_open_generic_tr,
7087 .read = tracing_entries_read,
7088 .write = tracing_entries_write,
7089 .llseek = generic_file_llseek,
7090 .release = tracing_release_generic_tr,
7093 static const struct file_operations tracing_total_entries_fops = {
7094 .open = tracing_open_generic_tr,
7095 .read = tracing_total_entries_read,
7096 .llseek = generic_file_llseek,
7097 .release = tracing_release_generic_tr,
7100 static const struct file_operations tracing_free_buffer_fops = {
7101 .open = tracing_open_generic_tr,
7102 .write = tracing_free_buffer_write,
7103 .release = tracing_free_buffer_release,
7106 static const struct file_operations tracing_mark_fops = {
7107 .open = tracing_open_generic_tr,
7108 .write = tracing_mark_write,
7109 .llseek = generic_file_llseek,
7110 .release = tracing_release_generic_tr,
7113 static const struct file_operations tracing_mark_raw_fops = {
7114 .open = tracing_open_generic_tr,
7115 .write = tracing_mark_raw_write,
7116 .llseek = generic_file_llseek,
7117 .release = tracing_release_generic_tr,
7120 static const struct file_operations trace_clock_fops = {
7121 .open = tracing_clock_open,
7123 .llseek = seq_lseek,
7124 .release = tracing_single_release_tr,
7125 .write = tracing_clock_write,
7128 static const struct file_operations trace_time_stamp_mode_fops = {
7129 .open = tracing_time_stamp_mode_open,
7131 .llseek = seq_lseek,
7132 .release = tracing_single_release_tr,
7135 #ifdef CONFIG_TRACER_SNAPSHOT
7136 static const struct file_operations snapshot_fops = {
7137 .open = tracing_snapshot_open,
7139 .write = tracing_snapshot_write,
7140 .llseek = tracing_lseek,
7141 .release = tracing_snapshot_release,
7144 static const struct file_operations snapshot_raw_fops = {
7145 .open = snapshot_raw_open,
7146 .read = tracing_buffers_read,
7147 .release = tracing_buffers_release,
7148 .splice_read = tracing_buffers_splice_read,
7149 .llseek = no_llseek,
7152 #endif /* CONFIG_TRACER_SNAPSHOT */
7154 #define TRACING_LOG_ERRS_MAX 8
7155 #define TRACING_LOG_LOC_MAX 128
7157 #define CMD_PREFIX " Command: "
7160 const char **errs; /* ptr to loc-specific array of err strings */
7161 u8 type; /* index into errs -> specific err string */
7162 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7166 struct tracing_log_err {
7167 struct list_head list;
7168 struct err_info info;
7169 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7170 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7173 static DEFINE_MUTEX(tracing_err_log_lock);
7175 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7177 struct tracing_log_err *err;
7179 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7180 err = kzalloc(sizeof(*err), GFP_KERNEL);
7182 err = ERR_PTR(-ENOMEM);
7183 tr->n_err_log_entries++;
7188 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7189 list_del(&err->list);
7195 * err_pos - find the position of a string within a command for error careting
7196 * @cmd: The tracing command that caused the error
7197 * @str: The string to position the caret at within @cmd
7199 * Finds the position of the first occurence of @str within @cmd. The
7200 * return value can be passed to tracing_log_err() for caret placement
7203 * Returns the index within @cmd of the first occurence of @str or 0
7204 * if @str was not found.
7206 unsigned int err_pos(char *cmd, const char *str)
7210 if (WARN_ON(!strlen(cmd)))
7213 found = strstr(cmd, str);
7221 * tracing_log_err - write an error to the tracing error log
7222 * @tr: The associated trace array for the error (NULL for top level array)
7223 * @loc: A string describing where the error occurred
7224 * @cmd: The tracing command that caused the error
7225 * @errs: The array of loc-specific static error strings
7226 * @type: The index into errs[], which produces the specific static err string
7227 * @pos: The position the caret should be placed in the cmd
7229 * Writes an error into tracing/error_log of the form:
7231 * <loc>: error: <text>
7235 * tracing/error_log is a small log file containing the last
7236 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7237 * unless there has been a tracing error, and the error log can be
7238 * cleared and have its memory freed by writing the empty string in
7239 * truncation mode to it i.e. echo > tracing/error_log.
7241 * NOTE: the @errs array along with the @type param are used to
7242 * produce a static error string - this string is not copied and saved
7243 * when the error is logged - only a pointer to it is saved. See
7244 * existing callers for examples of how static strings are typically
7245 * defined for use with tracing_log_err().
7247 void tracing_log_err(struct trace_array *tr,
7248 const char *loc, const char *cmd,
7249 const char **errs, u8 type, u8 pos)
7251 struct tracing_log_err *err;
7256 mutex_lock(&tracing_err_log_lock);
7257 err = get_tracing_log_err(tr);
7258 if (PTR_ERR(err) == -ENOMEM) {
7259 mutex_unlock(&tracing_err_log_lock);
7263 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7264 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7266 err->info.errs = errs;
7267 err->info.type = type;
7268 err->info.pos = pos;
7269 err->info.ts = local_clock();
7271 list_add_tail(&err->list, &tr->err_log);
7272 mutex_unlock(&tracing_err_log_lock);
7275 static void clear_tracing_err_log(struct trace_array *tr)
7277 struct tracing_log_err *err, *next;
7279 mutex_lock(&tracing_err_log_lock);
7280 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7281 list_del(&err->list);
7285 tr->n_err_log_entries = 0;
7286 mutex_unlock(&tracing_err_log_lock);
7289 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7291 struct trace_array *tr = m->private;
7293 mutex_lock(&tracing_err_log_lock);
7295 return seq_list_start(&tr->err_log, *pos);
7298 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7300 struct trace_array *tr = m->private;
7302 return seq_list_next(v, &tr->err_log, pos);
7305 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7307 mutex_unlock(&tracing_err_log_lock);
7310 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7314 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7316 for (i = 0; i < pos; i++)
7321 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7323 struct tracing_log_err *err = v;
7326 const char *err_text = err->info.errs[err->info.type];
7327 u64 sec = err->info.ts;
7330 nsec = do_div(sec, NSEC_PER_SEC);
7331 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7332 err->loc, err_text);
7333 seq_printf(m, "%s", err->cmd);
7334 tracing_err_log_show_pos(m, err->info.pos);
7340 static const struct seq_operations tracing_err_log_seq_ops = {
7341 .start = tracing_err_log_seq_start,
7342 .next = tracing_err_log_seq_next,
7343 .stop = tracing_err_log_seq_stop,
7344 .show = tracing_err_log_seq_show
7347 static int tracing_err_log_open(struct inode *inode, struct file *file)
7349 struct trace_array *tr = inode->i_private;
7352 ret = tracing_check_open_get_tr(tr);
7356 /* If this file was opened for write, then erase contents */
7357 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7358 clear_tracing_err_log(tr);
7360 if (file->f_mode & FMODE_READ) {
7361 ret = seq_open(file, &tracing_err_log_seq_ops);
7363 struct seq_file *m = file->private_data;
7366 trace_array_put(tr);
7372 static ssize_t tracing_err_log_write(struct file *file,
7373 const char __user *buffer,
7374 size_t count, loff_t *ppos)
7379 static int tracing_err_log_release(struct inode *inode, struct file *file)
7381 struct trace_array *tr = inode->i_private;
7383 trace_array_put(tr);
7385 if (file->f_mode & FMODE_READ)
7386 seq_release(inode, file);
7391 static const struct file_operations tracing_err_log_fops = {
7392 .open = tracing_err_log_open,
7393 .write = tracing_err_log_write,
7395 .llseek = seq_lseek,
7396 .release = tracing_err_log_release,
7399 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7401 struct trace_array *tr = inode->i_private;
7402 struct ftrace_buffer_info *info;
7405 ret = tracing_check_open_get_tr(tr);
7409 info = kzalloc(sizeof(*info), GFP_KERNEL);
7411 trace_array_put(tr);
7415 mutex_lock(&trace_types_lock);
7418 info->iter.cpu_file = tracing_get_cpu(inode);
7419 info->iter.trace = tr->current_trace;
7420 info->iter.array_buffer = &tr->array_buffer;
7422 /* Force reading ring buffer for first read */
7423 info->read = (unsigned int)-1;
7425 filp->private_data = info;
7427 tr->current_trace->ref++;
7429 mutex_unlock(&trace_types_lock);
7431 ret = nonseekable_open(inode, filp);
7433 trace_array_put(tr);
7439 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7441 struct ftrace_buffer_info *info = filp->private_data;
7442 struct trace_iterator *iter = &info->iter;
7444 return trace_poll(iter, filp, poll_table);
7448 tracing_buffers_read(struct file *filp, char __user *ubuf,
7449 size_t count, loff_t *ppos)
7451 struct ftrace_buffer_info *info = filp->private_data;
7452 struct trace_iterator *iter = &info->iter;
7459 #ifdef CONFIG_TRACER_MAX_TRACE
7460 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7465 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7467 if (IS_ERR(info->spare)) {
7468 ret = PTR_ERR(info->spare);
7471 info->spare_cpu = iter->cpu_file;
7477 /* Do we have previous read data to read? */
7478 if (info->read < PAGE_SIZE)
7482 trace_access_lock(iter->cpu_file);
7483 ret = ring_buffer_read_page(iter->array_buffer->buffer,
7487 trace_access_unlock(iter->cpu_file);
7490 if (trace_empty(iter)) {
7491 if ((filp->f_flags & O_NONBLOCK))
7494 ret = wait_on_pipe(iter, 0);
7505 size = PAGE_SIZE - info->read;
7509 ret = copy_to_user(ubuf, info->spare + info->read, size);
7521 static int tracing_buffers_release(struct inode *inode, struct file *file)
7523 struct ftrace_buffer_info *info = file->private_data;
7524 struct trace_iterator *iter = &info->iter;
7526 mutex_lock(&trace_types_lock);
7528 iter->tr->current_trace->ref--;
7530 __trace_array_put(iter->tr);
7533 ring_buffer_free_read_page(iter->array_buffer->buffer,
7534 info->spare_cpu, info->spare);
7537 mutex_unlock(&trace_types_lock);
7543 struct trace_buffer *buffer;
7546 refcount_t refcount;
7549 static void buffer_ref_release(struct buffer_ref *ref)
7551 if (!refcount_dec_and_test(&ref->refcount))
7553 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7557 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7558 struct pipe_buffer *buf)
7560 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7562 buffer_ref_release(ref);
7566 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7567 struct pipe_buffer *buf)
7569 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7571 if (refcount_read(&ref->refcount) > INT_MAX/2)
7574 refcount_inc(&ref->refcount);
7578 /* Pipe buffer operations for a buffer. */
7579 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7580 .release = buffer_pipe_buf_release,
7581 .get = buffer_pipe_buf_get,
7585 * Callback from splice_to_pipe(), if we need to release some pages
7586 * at the end of the spd in case we error'ed out in filling the pipe.
7588 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7590 struct buffer_ref *ref =
7591 (struct buffer_ref *)spd->partial[i].private;
7593 buffer_ref_release(ref);
7594 spd->partial[i].private = 0;
7598 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7599 struct pipe_inode_info *pipe, size_t len,
7602 struct ftrace_buffer_info *info = file->private_data;
7603 struct trace_iterator *iter = &info->iter;
7604 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7605 struct page *pages_def[PIPE_DEF_BUFFERS];
7606 struct splice_pipe_desc spd = {
7608 .partial = partial_def,
7609 .nr_pages_max = PIPE_DEF_BUFFERS,
7610 .ops = &buffer_pipe_buf_ops,
7611 .spd_release = buffer_spd_release,
7613 struct buffer_ref *ref;
7617 #ifdef CONFIG_TRACER_MAX_TRACE
7618 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7622 if (*ppos & (PAGE_SIZE - 1))
7625 if (len & (PAGE_SIZE - 1)) {
7626 if (len < PAGE_SIZE)
7631 if (splice_grow_spd(pipe, &spd))
7635 trace_access_lock(iter->cpu_file);
7636 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7638 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7642 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7648 refcount_set(&ref->refcount, 1);
7649 ref->buffer = iter->array_buffer->buffer;
7650 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7651 if (IS_ERR(ref->page)) {
7652 ret = PTR_ERR(ref->page);
7657 ref->cpu = iter->cpu_file;
7659 r = ring_buffer_read_page(ref->buffer, &ref->page,
7660 len, iter->cpu_file, 1);
7662 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7668 page = virt_to_page(ref->page);
7670 spd.pages[i] = page;
7671 spd.partial[i].len = PAGE_SIZE;
7672 spd.partial[i].offset = 0;
7673 spd.partial[i].private = (unsigned long)ref;
7677 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7680 trace_access_unlock(iter->cpu_file);
7683 /* did we read anything? */
7684 if (!spd.nr_pages) {
7689 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7692 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7699 ret = splice_to_pipe(pipe, &spd);
7701 splice_shrink_spd(&spd);
7706 static const struct file_operations tracing_buffers_fops = {
7707 .open = tracing_buffers_open,
7708 .read = tracing_buffers_read,
7709 .poll = tracing_buffers_poll,
7710 .release = tracing_buffers_release,
7711 .splice_read = tracing_buffers_splice_read,
7712 .llseek = no_llseek,
7716 tracing_stats_read(struct file *filp, char __user *ubuf,
7717 size_t count, loff_t *ppos)
7719 struct inode *inode = file_inode(filp);
7720 struct trace_array *tr = inode->i_private;
7721 struct array_buffer *trace_buf = &tr->array_buffer;
7722 int cpu = tracing_get_cpu(inode);
7723 struct trace_seq *s;
7725 unsigned long long t;
7726 unsigned long usec_rem;
7728 s = kmalloc(sizeof(*s), GFP_KERNEL);
7734 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7735 trace_seq_printf(s, "entries: %ld\n", cnt);
7737 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7738 trace_seq_printf(s, "overrun: %ld\n", cnt);
7740 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7741 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7743 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7744 trace_seq_printf(s, "bytes: %ld\n", cnt);
7746 if (trace_clocks[tr->clock_id].in_ns) {
7747 /* local or global for trace_clock */
7748 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7749 usec_rem = do_div(t, USEC_PER_SEC);
7750 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7753 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7754 usec_rem = do_div(t, USEC_PER_SEC);
7755 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7757 /* counter or tsc mode for trace_clock */
7758 trace_seq_printf(s, "oldest event ts: %llu\n",
7759 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7761 trace_seq_printf(s, "now ts: %llu\n",
7762 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7765 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7766 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7768 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7769 trace_seq_printf(s, "read events: %ld\n", cnt);
7771 count = simple_read_from_buffer(ubuf, count, ppos,
7772 s->buffer, trace_seq_used(s));
7779 static const struct file_operations tracing_stats_fops = {
7780 .open = tracing_open_generic_tr,
7781 .read = tracing_stats_read,
7782 .llseek = generic_file_llseek,
7783 .release = tracing_release_generic_tr,
7786 #ifdef CONFIG_DYNAMIC_FTRACE
7789 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7790 size_t cnt, loff_t *ppos)
7796 /* 256 should be plenty to hold the amount needed */
7797 buf = kmalloc(256, GFP_KERNEL);
7801 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7802 ftrace_update_tot_cnt,
7803 ftrace_number_of_pages,
7804 ftrace_number_of_groups);
7806 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7811 static const struct file_operations tracing_dyn_info_fops = {
7812 .open = tracing_open_generic,
7813 .read = tracing_read_dyn_info,
7814 .llseek = generic_file_llseek,
7816 #endif /* CONFIG_DYNAMIC_FTRACE */
7818 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7820 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7821 struct trace_array *tr, struct ftrace_probe_ops *ops,
7824 tracing_snapshot_instance(tr);
7828 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7829 struct trace_array *tr, struct ftrace_probe_ops *ops,
7832 struct ftrace_func_mapper *mapper = data;
7836 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7846 tracing_snapshot_instance(tr);
7850 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7851 struct ftrace_probe_ops *ops, void *data)
7853 struct ftrace_func_mapper *mapper = data;
7856 seq_printf(m, "%ps:", (void *)ip);
7858 seq_puts(m, "snapshot");
7861 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7864 seq_printf(m, ":count=%ld\n", *count);
7866 seq_puts(m, ":unlimited\n");
7872 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7873 unsigned long ip, void *init_data, void **data)
7875 struct ftrace_func_mapper *mapper = *data;
7878 mapper = allocate_ftrace_func_mapper();
7884 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7888 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7889 unsigned long ip, void *data)
7891 struct ftrace_func_mapper *mapper = data;
7896 free_ftrace_func_mapper(mapper, NULL);
7900 ftrace_func_mapper_remove_ip(mapper, ip);
7903 static struct ftrace_probe_ops snapshot_probe_ops = {
7904 .func = ftrace_snapshot,
7905 .print = ftrace_snapshot_print,
7908 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7909 .func = ftrace_count_snapshot,
7910 .print = ftrace_snapshot_print,
7911 .init = ftrace_snapshot_init,
7912 .free = ftrace_snapshot_free,
7916 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7917 char *glob, char *cmd, char *param, int enable)
7919 struct ftrace_probe_ops *ops;
7920 void *count = (void *)-1;
7927 /* hash funcs only work with set_ftrace_filter */
7931 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7934 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7939 number = strsep(¶m, ":");
7941 if (!strlen(number))
7945 * We use the callback data field (which is a pointer)
7948 ret = kstrtoul(number, 0, (unsigned long *)&count);
7953 ret = tracing_alloc_snapshot_instance(tr);
7957 ret = register_ftrace_function_probe(glob, tr, ops, count);
7960 return ret < 0 ? ret : 0;
7963 static struct ftrace_func_command ftrace_snapshot_cmd = {
7965 .func = ftrace_trace_snapshot_callback,
7968 static __init int register_snapshot_cmd(void)
7970 return register_ftrace_command(&ftrace_snapshot_cmd);
7973 static inline __init int register_snapshot_cmd(void) { return 0; }
7974 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7976 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7978 if (WARN_ON(!tr->dir))
7979 return ERR_PTR(-ENODEV);
7981 /* Top directory uses NULL as the parent */
7982 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7985 /* All sub buffers have a descriptor */
7989 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7991 struct dentry *d_tracer;
7994 return tr->percpu_dir;
7996 d_tracer = tracing_get_dentry(tr);
7997 if (IS_ERR(d_tracer))
8000 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8002 MEM_FAIL(!tr->percpu_dir,
8003 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8005 return tr->percpu_dir;
8008 static struct dentry *
8009 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8010 void *data, long cpu, const struct file_operations *fops)
8012 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8014 if (ret) /* See tracing_get_cpu() */
8015 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8020 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8022 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8023 struct dentry *d_cpu;
8024 char cpu_dir[30]; /* 30 characters should be more than enough */
8029 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8030 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8032 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8036 /* per cpu trace_pipe */
8037 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
8038 tr, cpu, &tracing_pipe_fops);
8041 trace_create_cpu_file("trace", 0644, d_cpu,
8042 tr, cpu, &tracing_fops);
8044 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
8045 tr, cpu, &tracing_buffers_fops);
8047 trace_create_cpu_file("stats", 0444, d_cpu,
8048 tr, cpu, &tracing_stats_fops);
8050 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
8051 tr, cpu, &tracing_entries_fops);
8053 #ifdef CONFIG_TRACER_SNAPSHOT
8054 trace_create_cpu_file("snapshot", 0644, d_cpu,
8055 tr, cpu, &snapshot_fops);
8057 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
8058 tr, cpu, &snapshot_raw_fops);
8062 #ifdef CONFIG_FTRACE_SELFTEST
8063 /* Let selftest have access to static functions in this file */
8064 #include "trace_selftest.c"
8068 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8071 struct trace_option_dentry *topt = filp->private_data;
8074 if (topt->flags->val & topt->opt->bit)
8079 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8083 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8086 struct trace_option_dentry *topt = filp->private_data;
8090 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8094 if (val != 0 && val != 1)
8097 if (!!(topt->flags->val & topt->opt->bit) != val) {
8098 mutex_lock(&trace_types_lock);
8099 ret = __set_tracer_option(topt->tr, topt->flags,
8101 mutex_unlock(&trace_types_lock);
8112 static const struct file_operations trace_options_fops = {
8113 .open = tracing_open_generic,
8114 .read = trace_options_read,
8115 .write = trace_options_write,
8116 .llseek = generic_file_llseek,
8120 * In order to pass in both the trace_array descriptor as well as the index
8121 * to the flag that the trace option file represents, the trace_array
8122 * has a character array of trace_flags_index[], which holds the index
8123 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8124 * The address of this character array is passed to the flag option file
8125 * read/write callbacks.
8127 * In order to extract both the index and the trace_array descriptor,
8128 * get_tr_index() uses the following algorithm.
8132 * As the pointer itself contains the address of the index (remember
8135 * Then to get the trace_array descriptor, by subtracting that index
8136 * from the ptr, we get to the start of the index itself.
8138 * ptr - idx == &index[0]
8140 * Then a simple container_of() from that pointer gets us to the
8141 * trace_array descriptor.
8143 static void get_tr_index(void *data, struct trace_array **ptr,
8144 unsigned int *pindex)
8146 *pindex = *(unsigned char *)data;
8148 *ptr = container_of(data - *pindex, struct trace_array,
8153 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8156 void *tr_index = filp->private_data;
8157 struct trace_array *tr;
8161 get_tr_index(tr_index, &tr, &index);
8163 if (tr->trace_flags & (1 << index))
8168 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8172 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8175 void *tr_index = filp->private_data;
8176 struct trace_array *tr;
8181 get_tr_index(tr_index, &tr, &index);
8183 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8187 if (val != 0 && val != 1)
8190 mutex_lock(&event_mutex);
8191 mutex_lock(&trace_types_lock);
8192 ret = set_tracer_flag(tr, 1 << index, val);
8193 mutex_unlock(&trace_types_lock);
8194 mutex_unlock(&event_mutex);
8204 static const struct file_operations trace_options_core_fops = {
8205 .open = tracing_open_generic,
8206 .read = trace_options_core_read,
8207 .write = trace_options_core_write,
8208 .llseek = generic_file_llseek,
8211 struct dentry *trace_create_file(const char *name,
8213 struct dentry *parent,
8215 const struct file_operations *fops)
8219 ret = tracefs_create_file(name, mode, parent, data, fops);
8221 pr_warn("Could not create tracefs '%s' entry\n", name);
8227 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8229 struct dentry *d_tracer;
8234 d_tracer = tracing_get_dentry(tr);
8235 if (IS_ERR(d_tracer))
8238 tr->options = tracefs_create_dir("options", d_tracer);
8240 pr_warn("Could not create tracefs directory 'options'\n");
8248 create_trace_option_file(struct trace_array *tr,
8249 struct trace_option_dentry *topt,
8250 struct tracer_flags *flags,
8251 struct tracer_opt *opt)
8253 struct dentry *t_options;
8255 t_options = trace_options_init_dentry(tr);
8259 topt->flags = flags;
8263 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8264 &trace_options_fops);
8269 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8271 struct trace_option_dentry *topts;
8272 struct trace_options *tr_topts;
8273 struct tracer_flags *flags;
8274 struct tracer_opt *opts;
8281 flags = tracer->flags;
8283 if (!flags || !flags->opts)
8287 * If this is an instance, only create flags for tracers
8288 * the instance may have.
8290 if (!trace_ok_for_array(tracer, tr))
8293 for (i = 0; i < tr->nr_topts; i++) {
8294 /* Make sure there's no duplicate flags. */
8295 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8301 for (cnt = 0; opts[cnt].name; cnt++)
8304 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8308 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8315 tr->topts = tr_topts;
8316 tr->topts[tr->nr_topts].tracer = tracer;
8317 tr->topts[tr->nr_topts].topts = topts;
8320 for (cnt = 0; opts[cnt].name; cnt++) {
8321 create_trace_option_file(tr, &topts[cnt], flags,
8323 MEM_FAIL(topts[cnt].entry == NULL,
8324 "Failed to create trace option: %s",
8329 static struct dentry *
8330 create_trace_option_core_file(struct trace_array *tr,
8331 const char *option, long index)
8333 struct dentry *t_options;
8335 t_options = trace_options_init_dentry(tr);
8339 return trace_create_file(option, 0644, t_options,
8340 (void *)&tr->trace_flags_index[index],
8341 &trace_options_core_fops);
8344 static void create_trace_options_dir(struct trace_array *tr)
8346 struct dentry *t_options;
8347 bool top_level = tr == &global_trace;
8350 t_options = trace_options_init_dentry(tr);
8354 for (i = 0; trace_options[i]; i++) {
8356 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8357 create_trace_option_core_file(tr, trace_options[i], i);
8362 rb_simple_read(struct file *filp, char __user *ubuf,
8363 size_t cnt, loff_t *ppos)
8365 struct trace_array *tr = filp->private_data;
8369 r = tracer_tracing_is_on(tr);
8370 r = sprintf(buf, "%d\n", r);
8372 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8376 rb_simple_write(struct file *filp, const char __user *ubuf,
8377 size_t cnt, loff_t *ppos)
8379 struct trace_array *tr = filp->private_data;
8380 struct trace_buffer *buffer = tr->array_buffer.buffer;
8384 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8389 mutex_lock(&trace_types_lock);
8390 if (!!val == tracer_tracing_is_on(tr)) {
8391 val = 0; /* do nothing */
8393 tracer_tracing_on(tr);
8394 if (tr->current_trace->start)
8395 tr->current_trace->start(tr);
8397 tracer_tracing_off(tr);
8398 if (tr->current_trace->stop)
8399 tr->current_trace->stop(tr);
8401 mutex_unlock(&trace_types_lock);
8409 static const struct file_operations rb_simple_fops = {
8410 .open = tracing_open_generic_tr,
8411 .read = rb_simple_read,
8412 .write = rb_simple_write,
8413 .release = tracing_release_generic_tr,
8414 .llseek = default_llseek,
8418 buffer_percent_read(struct file *filp, char __user *ubuf,
8419 size_t cnt, loff_t *ppos)
8421 struct trace_array *tr = filp->private_data;
8425 r = tr->buffer_percent;
8426 r = sprintf(buf, "%d\n", r);
8428 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8432 buffer_percent_write(struct file *filp, const char __user *ubuf,
8433 size_t cnt, loff_t *ppos)
8435 struct trace_array *tr = filp->private_data;
8439 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8449 tr->buffer_percent = val;
8456 static const struct file_operations buffer_percent_fops = {
8457 .open = tracing_open_generic_tr,
8458 .read = buffer_percent_read,
8459 .write = buffer_percent_write,
8460 .release = tracing_release_generic_tr,
8461 .llseek = default_llseek,
8464 static struct dentry *trace_instance_dir;
8467 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8470 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
8472 enum ring_buffer_flags rb_flags;
8474 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8478 buf->buffer = ring_buffer_alloc(size, rb_flags);
8482 buf->data = alloc_percpu(struct trace_array_cpu);
8484 ring_buffer_free(buf->buffer);
8489 /* Allocate the first page for all buffers */
8490 set_buffer_entries(&tr->array_buffer,
8491 ring_buffer_size(tr->array_buffer.buffer, 0));
8496 static int allocate_trace_buffers(struct trace_array *tr, int size)
8500 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
8504 #ifdef CONFIG_TRACER_MAX_TRACE
8505 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8506 allocate_snapshot ? size : 1);
8507 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
8508 ring_buffer_free(tr->array_buffer.buffer);
8509 tr->array_buffer.buffer = NULL;
8510 free_percpu(tr->array_buffer.data);
8511 tr->array_buffer.data = NULL;
8514 tr->allocated_snapshot = allocate_snapshot;
8517 * Only the top level trace array gets its snapshot allocated
8518 * from the kernel command line.
8520 allocate_snapshot = false;
8526 static void free_trace_buffer(struct array_buffer *buf)
8529 ring_buffer_free(buf->buffer);
8531 free_percpu(buf->data);
8536 static void free_trace_buffers(struct trace_array *tr)
8541 free_trace_buffer(&tr->array_buffer);
8543 #ifdef CONFIG_TRACER_MAX_TRACE
8544 free_trace_buffer(&tr->max_buffer);
8548 static void init_trace_flags_index(struct trace_array *tr)
8552 /* Used by the trace options files */
8553 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8554 tr->trace_flags_index[i] = i;
8557 static void __update_tracer_options(struct trace_array *tr)
8561 for (t = trace_types; t; t = t->next)
8562 add_tracer_options(tr, t);
8565 static void update_tracer_options(struct trace_array *tr)
8567 mutex_lock(&trace_types_lock);
8568 __update_tracer_options(tr);
8569 mutex_unlock(&trace_types_lock);
8572 /* Must have trace_types_lock held */
8573 struct trace_array *trace_array_find(const char *instance)
8575 struct trace_array *tr, *found = NULL;
8577 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8578 if (tr->name && strcmp(tr->name, instance) == 0) {
8587 struct trace_array *trace_array_find_get(const char *instance)
8589 struct trace_array *tr;
8591 mutex_lock(&trace_types_lock);
8592 tr = trace_array_find(instance);
8595 mutex_unlock(&trace_types_lock);
8600 static struct trace_array *trace_array_create(const char *name)
8602 struct trace_array *tr;
8606 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8608 return ERR_PTR(ret);
8610 tr->name = kstrdup(name, GFP_KERNEL);
8614 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8617 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8619 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8621 raw_spin_lock_init(&tr->start_lock);
8623 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8625 tr->current_trace = &nop_trace;
8627 INIT_LIST_HEAD(&tr->systems);
8628 INIT_LIST_HEAD(&tr->events);
8629 INIT_LIST_HEAD(&tr->hist_vars);
8630 INIT_LIST_HEAD(&tr->err_log);
8632 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8635 tr->dir = tracefs_create_dir(name, trace_instance_dir);
8639 ret = event_trace_add_tracer(tr->dir, tr);
8641 tracefs_remove(tr->dir);
8645 ftrace_init_trace_array(tr);
8647 init_tracer_tracefs(tr, tr->dir);
8648 init_trace_flags_index(tr);
8649 __update_tracer_options(tr);
8651 list_add(&tr->list, &ftrace_trace_arrays);
8659 free_trace_buffers(tr);
8660 free_cpumask_var(tr->tracing_cpumask);
8664 return ERR_PTR(ret);
8667 static int instance_mkdir(const char *name)
8669 struct trace_array *tr;
8672 mutex_lock(&event_mutex);
8673 mutex_lock(&trace_types_lock);
8676 if (trace_array_find(name))
8679 tr = trace_array_create(name);
8681 ret = PTR_ERR_OR_ZERO(tr);
8684 mutex_unlock(&trace_types_lock);
8685 mutex_unlock(&event_mutex);
8690 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8691 * @name: The name of the trace array to be looked up/created.
8693 * Returns pointer to trace array with given name.
8694 * NULL, if it cannot be created.
8696 * NOTE: This function increments the reference counter associated with the
8697 * trace array returned. This makes sure it cannot be freed while in use.
8698 * Use trace_array_put() once the trace array is no longer needed.
8699 * If the trace_array is to be freed, trace_array_destroy() needs to
8700 * be called after the trace_array_put(), or simply let user space delete
8701 * it from the tracefs instances directory. But until the
8702 * trace_array_put() is called, user space can not delete it.
8705 struct trace_array *trace_array_get_by_name(const char *name)
8707 struct trace_array *tr;
8709 mutex_lock(&event_mutex);
8710 mutex_lock(&trace_types_lock);
8712 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8713 if (tr->name && strcmp(tr->name, name) == 0)
8717 tr = trace_array_create(name);
8725 mutex_unlock(&trace_types_lock);
8726 mutex_unlock(&event_mutex);
8729 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8731 static int __remove_instance(struct trace_array *tr)
8735 /* Reference counter for a newly created trace array = 1. */
8736 if (tr->ref > 1 || (tr->current_trace && tr->current_trace->ref))
8739 list_del(&tr->list);
8741 /* Disable all the flags that were enabled coming in */
8742 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8743 if ((1 << i) & ZEROED_TRACE_FLAGS)
8744 set_tracer_flag(tr, 1 << i, 0);
8747 tracing_set_nop(tr);
8748 clear_ftrace_function_probes(tr);
8749 event_trace_del_tracer(tr);
8750 ftrace_clear_pids(tr);
8751 ftrace_destroy_function_files(tr);
8752 tracefs_remove(tr->dir);
8753 free_trace_buffers(tr);
8755 for (i = 0; i < tr->nr_topts; i++) {
8756 kfree(tr->topts[i].topts);
8760 free_cpumask_var(tr->tracing_cpumask);
8768 int trace_array_destroy(struct trace_array *this_tr)
8770 struct trace_array *tr;
8776 mutex_lock(&event_mutex);
8777 mutex_lock(&trace_types_lock);
8781 /* Making sure trace array exists before destroying it. */
8782 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8783 if (tr == this_tr) {
8784 ret = __remove_instance(tr);
8789 mutex_unlock(&trace_types_lock);
8790 mutex_unlock(&event_mutex);
8794 EXPORT_SYMBOL_GPL(trace_array_destroy);
8796 static int instance_rmdir(const char *name)
8798 struct trace_array *tr;
8801 mutex_lock(&event_mutex);
8802 mutex_lock(&trace_types_lock);
8805 tr = trace_array_find(name);
8807 ret = __remove_instance(tr);
8809 mutex_unlock(&trace_types_lock);
8810 mutex_unlock(&event_mutex);
8815 static __init void create_trace_instances(struct dentry *d_tracer)
8817 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8820 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
8825 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8827 struct trace_event_file *file;
8830 trace_create_file("available_tracers", 0444, d_tracer,
8831 tr, &show_traces_fops);
8833 trace_create_file("current_tracer", 0644, d_tracer,
8834 tr, &set_tracer_fops);
8836 trace_create_file("tracing_cpumask", 0644, d_tracer,
8837 tr, &tracing_cpumask_fops);
8839 trace_create_file("trace_options", 0644, d_tracer,
8840 tr, &tracing_iter_fops);
8842 trace_create_file("trace", 0644, d_tracer,
8845 trace_create_file("trace_pipe", 0444, d_tracer,
8846 tr, &tracing_pipe_fops);
8848 trace_create_file("buffer_size_kb", 0644, d_tracer,
8849 tr, &tracing_entries_fops);
8851 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8852 tr, &tracing_total_entries_fops);
8854 trace_create_file("free_buffer", 0200, d_tracer,
8855 tr, &tracing_free_buffer_fops);
8857 trace_create_file("trace_marker", 0220, d_tracer,
8858 tr, &tracing_mark_fops);
8860 file = __find_event_file(tr, "ftrace", "print");
8861 if (file && file->dir)
8862 trace_create_file("trigger", 0644, file->dir, file,
8863 &event_trigger_fops);
8864 tr->trace_marker_file = file;
8866 trace_create_file("trace_marker_raw", 0220, d_tracer,
8867 tr, &tracing_mark_raw_fops);
8869 trace_create_file("trace_clock", 0644, d_tracer, tr,
8872 trace_create_file("tracing_on", 0644, d_tracer,
8873 tr, &rb_simple_fops);
8875 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8876 &trace_time_stamp_mode_fops);
8878 tr->buffer_percent = 50;
8880 trace_create_file("buffer_percent", 0444, d_tracer,
8881 tr, &buffer_percent_fops);
8883 create_trace_options_dir(tr);
8885 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8886 trace_create_maxlat_file(tr, d_tracer);
8889 if (ftrace_create_function_files(tr, d_tracer))
8890 MEM_FAIL(1, "Could not allocate function filter files");
8892 #ifdef CONFIG_TRACER_SNAPSHOT
8893 trace_create_file("snapshot", 0644, d_tracer,
8894 tr, &snapshot_fops);
8897 trace_create_file("error_log", 0644, d_tracer,
8898 tr, &tracing_err_log_fops);
8900 for_each_tracing_cpu(cpu)
8901 tracing_init_tracefs_percpu(tr, cpu);
8903 ftrace_init_tracefs(tr, d_tracer);
8906 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8908 struct vfsmount *mnt;
8909 struct file_system_type *type;
8912 * To maintain backward compatibility for tools that mount
8913 * debugfs to get to the tracing facility, tracefs is automatically
8914 * mounted to the debugfs/tracing directory.
8916 type = get_fs_type("tracefs");
8919 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8920 put_filesystem(type);
8929 * tracing_init_dentry - initialize top level trace array
8931 * This is called when creating files or directories in the tracing
8932 * directory. It is called via fs_initcall() by any of the boot up code
8933 * and expects to return the dentry of the top level tracing directory.
8935 struct dentry *tracing_init_dentry(void)
8937 struct trace_array *tr = &global_trace;
8939 if (security_locked_down(LOCKDOWN_TRACEFS)) {
8940 pr_warn("Tracing disabled due to lockdown\n");
8941 return ERR_PTR(-EPERM);
8944 /* The top level trace array uses NULL as parent */
8948 if (WARN_ON(!tracefs_initialized()) ||
8949 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8950 WARN_ON(!debugfs_initialized())))
8951 return ERR_PTR(-ENODEV);
8954 * As there may still be users that expect the tracing
8955 * files to exist in debugfs/tracing, we must automount
8956 * the tracefs file system there, so older tools still
8957 * work with the newer kerenl.
8959 tr->dir = debugfs_create_automount("tracing", NULL,
8960 trace_automount, NULL);
8965 extern struct trace_eval_map *__start_ftrace_eval_maps[];
8966 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
8968 static void __init trace_eval_init(void)
8972 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
8973 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
8976 #ifdef CONFIG_MODULES
8977 static void trace_module_add_evals(struct module *mod)
8979 if (!mod->num_trace_evals)
8983 * Modules with bad taint do not have events created, do
8984 * not bother with enums either.
8986 if (trace_module_has_bad_taint(mod))
8989 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
8992 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
8993 static void trace_module_remove_evals(struct module *mod)
8995 union trace_eval_map_item *map;
8996 union trace_eval_map_item **last = &trace_eval_maps;
8998 if (!mod->num_trace_evals)
9001 mutex_lock(&trace_eval_mutex);
9003 map = trace_eval_maps;
9006 if (map->head.mod == mod)
9008 map = trace_eval_jmp_to_tail(map);
9009 last = &map->tail.next;
9010 map = map->tail.next;
9015 *last = trace_eval_jmp_to_tail(map)->tail.next;
9018 mutex_unlock(&trace_eval_mutex);
9021 static inline void trace_module_remove_evals(struct module *mod) { }
9022 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9024 static int trace_module_notify(struct notifier_block *self,
9025 unsigned long val, void *data)
9027 struct module *mod = data;
9030 case MODULE_STATE_COMING:
9031 trace_module_add_evals(mod);
9033 case MODULE_STATE_GOING:
9034 trace_module_remove_evals(mod);
9041 static struct notifier_block trace_module_nb = {
9042 .notifier_call = trace_module_notify,
9045 #endif /* CONFIG_MODULES */
9047 static __init int tracer_init_tracefs(void)
9049 struct dentry *d_tracer;
9051 trace_access_lock_init();
9053 d_tracer = tracing_init_dentry();
9054 if (IS_ERR(d_tracer))
9059 init_tracer_tracefs(&global_trace, d_tracer);
9060 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
9062 trace_create_file("tracing_thresh", 0644, d_tracer,
9063 &global_trace, &tracing_thresh_fops);
9065 trace_create_file("README", 0444, d_tracer,
9066 NULL, &tracing_readme_fops);
9068 trace_create_file("saved_cmdlines", 0444, d_tracer,
9069 NULL, &tracing_saved_cmdlines_fops);
9071 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
9072 NULL, &tracing_saved_cmdlines_size_fops);
9074 trace_create_file("saved_tgids", 0444, d_tracer,
9075 NULL, &tracing_saved_tgids_fops);
9079 trace_create_eval_file(d_tracer);
9081 #ifdef CONFIG_MODULES
9082 register_module_notifier(&trace_module_nb);
9085 #ifdef CONFIG_DYNAMIC_FTRACE
9086 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
9087 NULL, &tracing_dyn_info_fops);
9090 create_trace_instances(d_tracer);
9092 update_tracer_options(&global_trace);
9097 static int trace_panic_handler(struct notifier_block *this,
9098 unsigned long event, void *unused)
9100 if (ftrace_dump_on_oops)
9101 ftrace_dump(ftrace_dump_on_oops);
9105 static struct notifier_block trace_panic_notifier = {
9106 .notifier_call = trace_panic_handler,
9108 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9111 static int trace_die_handler(struct notifier_block *self,
9117 if (ftrace_dump_on_oops)
9118 ftrace_dump(ftrace_dump_on_oops);
9126 static struct notifier_block trace_die_notifier = {
9127 .notifier_call = trace_die_handler,
9132 * printk is set to max of 1024, we really don't need it that big.
9133 * Nothing should be printing 1000 characters anyway.
9135 #define TRACE_MAX_PRINT 1000
9138 * Define here KERN_TRACE so that we have one place to modify
9139 * it if we decide to change what log level the ftrace dump
9142 #define KERN_TRACE KERN_EMERG
9145 trace_printk_seq(struct trace_seq *s)
9147 /* Probably should print a warning here. */
9148 if (s->seq.len >= TRACE_MAX_PRINT)
9149 s->seq.len = TRACE_MAX_PRINT;
9152 * More paranoid code. Although the buffer size is set to
9153 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9154 * an extra layer of protection.
9156 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9157 s->seq.len = s->seq.size - 1;
9159 /* should be zero ended, but we are paranoid. */
9160 s->buffer[s->seq.len] = 0;
9162 printk(KERN_TRACE "%s", s->buffer);
9167 void trace_init_global_iter(struct trace_iterator *iter)
9169 iter->tr = &global_trace;
9170 iter->trace = iter->tr->current_trace;
9171 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9172 iter->array_buffer = &global_trace.array_buffer;
9174 if (iter->trace && iter->trace->open)
9175 iter->trace->open(iter);
9177 /* Annotate start of buffers if we had overruns */
9178 if (ring_buffer_overruns(iter->array_buffer->buffer))
9179 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9181 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9182 if (trace_clocks[iter->tr->clock_id].in_ns)
9183 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9186 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9188 /* use static because iter can be a bit big for the stack */
9189 static struct trace_iterator iter;
9190 static atomic_t dump_running;
9191 struct trace_array *tr = &global_trace;
9192 unsigned int old_userobj;
9193 unsigned long flags;
9196 /* Only allow one dump user at a time. */
9197 if (atomic_inc_return(&dump_running) != 1) {
9198 atomic_dec(&dump_running);
9203 * Always turn off tracing when we dump.
9204 * We don't need to show trace output of what happens
9205 * between multiple crashes.
9207 * If the user does a sysrq-z, then they can re-enable
9208 * tracing with echo 1 > tracing_on.
9212 local_irq_save(flags);
9213 printk_nmi_direct_enter();
9215 /* Simulate the iterator */
9216 trace_init_global_iter(&iter);
9217 /* Can not use kmalloc for iter.temp */
9218 iter.temp = static_temp_buf;
9219 iter.temp_size = STATIC_TEMP_BUF_SIZE;
9221 for_each_tracing_cpu(cpu) {
9222 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9225 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9227 /* don't look at user memory in panic mode */
9228 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9230 switch (oops_dump_mode) {
9232 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9235 iter.cpu_file = raw_smp_processor_id();
9240 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9241 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9244 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9246 /* Did function tracer already get disabled? */
9247 if (ftrace_is_dead()) {
9248 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9249 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9253 * We need to stop all tracing on all CPUS to read the
9254 * the next buffer. This is a bit expensive, but is
9255 * not done often. We fill all what we can read,
9256 * and then release the locks again.
9259 while (!trace_empty(&iter)) {
9262 printk(KERN_TRACE "---------------------------------\n");
9266 trace_iterator_reset(&iter);
9267 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9269 if (trace_find_next_entry_inc(&iter) != NULL) {
9272 ret = print_trace_line(&iter);
9273 if (ret != TRACE_TYPE_NO_CONSUME)
9274 trace_consume(&iter);
9276 touch_nmi_watchdog();
9278 trace_printk_seq(&iter.seq);
9282 printk(KERN_TRACE " (ftrace buffer empty)\n");
9284 printk(KERN_TRACE "---------------------------------\n");
9287 tr->trace_flags |= old_userobj;
9289 for_each_tracing_cpu(cpu) {
9290 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9292 atomic_dec(&dump_running);
9293 printk_nmi_direct_exit();
9294 local_irq_restore(flags);
9296 EXPORT_SYMBOL_GPL(ftrace_dump);
9298 int trace_run_command(const char *buf, int (*createfn)(int, char **))
9305 argv = argv_split(GFP_KERNEL, buf, &argc);
9310 ret = createfn(argc, argv);
9317 #define WRITE_BUFSIZE 4096
9319 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9320 size_t count, loff_t *ppos,
9321 int (*createfn)(int, char **))
9323 char *kbuf, *buf, *tmp;
9328 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9332 while (done < count) {
9333 size = count - done;
9335 if (size >= WRITE_BUFSIZE)
9336 size = WRITE_BUFSIZE - 1;
9338 if (copy_from_user(kbuf, buffer + done, size)) {
9345 tmp = strchr(buf, '\n');
9348 size = tmp - buf + 1;
9351 if (done + size < count) {
9354 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9355 pr_warn("Line length is too long: Should be less than %d\n",
9363 /* Remove comments */
9364 tmp = strchr(buf, '#');
9369 ret = trace_run_command(buf, createfn);
9374 } while (done < count);
9384 __init static int tracer_alloc_buffers(void)
9390 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9391 pr_warn("Tracing disabled due to lockdown\n");
9396 * Make sure we don't accidently add more trace options
9397 * than we have bits for.
9399 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9401 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9404 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9405 goto out_free_buffer_mask;
9407 /* Only allocate trace_printk buffers if a trace_printk exists */
9408 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
9409 /* Must be called before global_trace.buffer is allocated */
9410 trace_printk_init_buffers();
9412 /* To save memory, keep the ring buffer size to its minimum */
9413 if (ring_buffer_expanded)
9414 ring_buf_size = trace_buf_size;
9418 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9419 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9421 raw_spin_lock_init(&global_trace.start_lock);
9424 * The prepare callbacks allocates some memory for the ring buffer. We
9425 * don't free the buffer if the if the CPU goes down. If we were to free
9426 * the buffer, then the user would lose any trace that was in the
9427 * buffer. The memory will be removed once the "instance" is removed.
9429 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9430 "trace/RB:preapre", trace_rb_cpu_prepare,
9433 goto out_free_cpumask;
9434 /* Used for event triggers */
9436 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9438 goto out_rm_hp_state;
9440 if (trace_create_savedcmd() < 0)
9441 goto out_free_temp_buffer;
9443 /* TODO: make the number of buffers hot pluggable with CPUS */
9444 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9445 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
9446 goto out_free_savedcmd;
9449 if (global_trace.buffer_disabled)
9452 if (trace_boot_clock) {
9453 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9455 pr_warn("Trace clock %s not defined, going back to default\n",
9460 * register_tracer() might reference current_trace, so it
9461 * needs to be set before we register anything. This is
9462 * just a bootstrap of current_trace anyway.
9464 global_trace.current_trace = &nop_trace;
9466 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9468 ftrace_init_global_array_ops(&global_trace);
9470 init_trace_flags_index(&global_trace);
9472 register_tracer(&nop_trace);
9474 /* Function tracing may start here (via kernel command line) */
9475 init_function_trace();
9477 /* All seems OK, enable tracing */
9478 tracing_disabled = 0;
9480 atomic_notifier_chain_register(&panic_notifier_list,
9481 &trace_panic_notifier);
9483 register_die_notifier(&trace_die_notifier);
9485 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9487 INIT_LIST_HEAD(&global_trace.systems);
9488 INIT_LIST_HEAD(&global_trace.events);
9489 INIT_LIST_HEAD(&global_trace.hist_vars);
9490 INIT_LIST_HEAD(&global_trace.err_log);
9491 list_add(&global_trace.list, &ftrace_trace_arrays);
9493 apply_trace_boot_options();
9495 register_snapshot_cmd();
9500 free_saved_cmdlines_buffer(savedcmd);
9501 out_free_temp_buffer:
9502 ring_buffer_free(temp_buffer);
9504 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9506 free_cpumask_var(global_trace.tracing_cpumask);
9507 out_free_buffer_mask:
9508 free_cpumask_var(tracing_buffer_mask);
9513 void __init early_trace_init(void)
9515 if (tracepoint_printk) {
9516 tracepoint_print_iter =
9517 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9518 if (MEM_FAIL(!tracepoint_print_iter,
9519 "Failed to allocate trace iterator\n"))
9520 tracepoint_printk = 0;
9522 static_key_enable(&tracepoint_printk_key.key);
9524 tracer_alloc_buffers();
9527 void __init trace_init(void)
9532 __init static int clear_boot_tracer(void)
9535 * The default tracer at boot buffer is an init section.
9536 * This function is called in lateinit. If we did not
9537 * find the boot tracer, then clear it out, to prevent
9538 * later registration from accessing the buffer that is
9539 * about to be freed.
9541 if (!default_bootup_tracer)
9544 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9545 default_bootup_tracer);
9546 default_bootup_tracer = NULL;
9551 fs_initcall(tracer_init_tracefs);
9552 late_initcall_sync(clear_boot_tracer);
9554 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9555 __init static int tracing_set_default_clock(void)
9557 /* sched_clock_stable() is determined in late_initcall */
9558 if (!trace_boot_clock && !sched_clock_stable()) {
9559 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9560 pr_warn("Can not set tracing clock due to lockdown\n");
9565 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9566 "If you want to keep using the local clock, then add:\n"
9567 " \"trace_clock=local\"\n"
9568 "on the kernel command line\n");
9569 tracing_set_clock(&global_trace, "global");
9574 late_initcall_sync(tracing_set_default_clock);