]> Git Repo - linux.git/blob - kernel/trace/trace.c
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2012 Steven Rostedt <[email protected]>
5  * Copyright (C) 2008 Ingo Molnar <[email protected]>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <[email protected]>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 Nadia Yvette Chambers
13  */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
42 #include <linux/fs.h>
43 #include <linux/trace.h>
44 #include <linux/sched/rt.h>
45
46 #include "trace.h"
47 #include "trace_output.h"
48
49 /*
50  * On boot up, the ring buffer is set to the minimum size, so that
51  * we do not waste memory on systems that are not using tracing.
52  */
53 bool ring_buffer_expanded;
54
55 /*
56  * We need to change this state when a selftest is running.
57  * A selftest will lurk into the ring-buffer to count the
58  * entries inserted during the selftest although some concurrent
59  * insertions into the ring-buffer such as trace_printk could occurred
60  * at the same time, giving false positive or negative results.
61  */
62 static bool __read_mostly tracing_selftest_running;
63
64 /*
65  * If a tracer is running, we do not want to run SELFTEST.
66  */
67 bool __read_mostly tracing_selftest_disabled;
68
69 /* Pipe tracepoints to printk */
70 struct trace_iterator *tracepoint_print_iter;
71 int tracepoint_printk;
72 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
73
74 /* For tracers that don't implement custom flags */
75 static struct tracer_opt dummy_tracer_opt[] = {
76         { }
77 };
78
79 static int
80 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
81 {
82         return 0;
83 }
84
85 /*
86  * To prevent the comm cache from being overwritten when no
87  * tracing is active, only save the comm when a trace event
88  * occurred.
89  */
90 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
91
92 /*
93  * Kill all tracing for good (never come back).
94  * It is initialized to 1 but will turn to zero if the initialization
95  * of the tracer is successful. But that is the only place that sets
96  * this back to zero.
97  */
98 static int tracing_disabled = 1;
99
100 cpumask_var_t __read_mostly     tracing_buffer_mask;
101
102 /*
103  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104  *
105  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106  * is set, then ftrace_dump is called. This will output the contents
107  * of the ftrace buffers to the console.  This is very useful for
108  * capturing traces that lead to crashes and outputing it to a
109  * serial console.
110  *
111  * It is default off, but you can enable it with either specifying
112  * "ftrace_dump_on_oops" in the kernel command line, or setting
113  * /proc/sys/kernel/ftrace_dump_on_oops
114  * Set 1 if you want to dump buffers of all CPUs
115  * Set 2 if you want to dump the buffer of the CPU that triggered oops
116  */
117
118 enum ftrace_dump_mode ftrace_dump_on_oops;
119
120 /* When set, tracing will stop when a WARN*() is hit */
121 int __disable_trace_on_warning;
122
123 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
124 /* Map of enums to their values, for "eval_map" file */
125 struct trace_eval_map_head {
126         struct module                   *mod;
127         unsigned long                   length;
128 };
129
130 union trace_eval_map_item;
131
132 struct trace_eval_map_tail {
133         /*
134          * "end" is first and points to NULL as it must be different
135          * than "mod" or "eval_string"
136          */
137         union trace_eval_map_item       *next;
138         const char                      *end;   /* points to NULL */
139 };
140
141 static DEFINE_MUTEX(trace_eval_mutex);
142
143 /*
144  * The trace_eval_maps are saved in an array with two extra elements,
145  * one at the beginning, and one at the end. The beginning item contains
146  * the count of the saved maps (head.length), and the module they
147  * belong to if not built in (head.mod). The ending item contains a
148  * pointer to the next array of saved eval_map items.
149  */
150 union trace_eval_map_item {
151         struct trace_eval_map           map;
152         struct trace_eval_map_head      head;
153         struct trace_eval_map_tail      tail;
154 };
155
156 static union trace_eval_map_item *trace_eval_maps;
157 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
158
159 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
160
161 #define MAX_TRACER_SIZE         100
162 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
163 static char *default_bootup_tracer;
164
165 static bool allocate_snapshot;
166
167 static int __init set_cmdline_ftrace(char *str)
168 {
169         strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
170         default_bootup_tracer = bootup_tracer_buf;
171         /* We are using ftrace early, expand it */
172         ring_buffer_expanded = true;
173         return 1;
174 }
175 __setup("ftrace=", set_cmdline_ftrace);
176
177 static int __init set_ftrace_dump_on_oops(char *str)
178 {
179         if (*str++ != '=' || !*str) {
180                 ftrace_dump_on_oops = DUMP_ALL;
181                 return 1;
182         }
183
184         if (!strcmp("orig_cpu", str)) {
185                 ftrace_dump_on_oops = DUMP_ORIG;
186                 return 1;
187         }
188
189         return 0;
190 }
191 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
192
193 static int __init stop_trace_on_warning(char *str)
194 {
195         if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196                 __disable_trace_on_warning = 1;
197         return 1;
198 }
199 __setup("traceoff_on_warning", stop_trace_on_warning);
200
201 static int __init boot_alloc_snapshot(char *str)
202 {
203         allocate_snapshot = true;
204         /* We also need the main ring buffer expanded */
205         ring_buffer_expanded = true;
206         return 1;
207 }
208 __setup("alloc_snapshot", boot_alloc_snapshot);
209
210
211 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
212
213 static int __init set_trace_boot_options(char *str)
214 {
215         strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
216         return 0;
217 }
218 __setup("trace_options=", set_trace_boot_options);
219
220 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221 static char *trace_boot_clock __initdata;
222
223 static int __init set_trace_boot_clock(char *str)
224 {
225         strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226         trace_boot_clock = trace_boot_clock_buf;
227         return 0;
228 }
229 __setup("trace_clock=", set_trace_boot_clock);
230
231 static int __init set_tracepoint_printk(char *str)
232 {
233         if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234                 tracepoint_printk = 1;
235         return 1;
236 }
237 __setup("tp_printk", set_tracepoint_printk);
238
239 unsigned long long ns2usecs(u64 nsec)
240 {
241         nsec += 500;
242         do_div(nsec, 1000);
243         return nsec;
244 }
245
246 /* trace_flags holds trace_options default values */
247 #define TRACE_DEFAULT_FLAGS                                             \
248         (FUNCTION_DEFAULT_FLAGS |                                       \
249          TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |                  \
250          TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |                \
251          TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |                 \
252          TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
253
254 /* trace_options that are only supported by global_trace */
255 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |                      \
256                TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
257
258 /* trace_flags that are default zero for instances */
259 #define ZEROED_TRACE_FLAGS \
260         (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
261
262 /*
263  * The global_trace is the descriptor that holds the top-level tracing
264  * buffers for the live tracing.
265  */
266 static struct trace_array global_trace = {
267         .trace_flags = TRACE_DEFAULT_FLAGS,
268 };
269
270 LIST_HEAD(ftrace_trace_arrays);
271
272 int trace_array_get(struct trace_array *this_tr)
273 {
274         struct trace_array *tr;
275         int ret = -ENODEV;
276
277         mutex_lock(&trace_types_lock);
278         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
279                 if (tr == this_tr) {
280                         tr->ref++;
281                         ret = 0;
282                         break;
283                 }
284         }
285         mutex_unlock(&trace_types_lock);
286
287         return ret;
288 }
289
290 static void __trace_array_put(struct trace_array *this_tr)
291 {
292         WARN_ON(!this_tr->ref);
293         this_tr->ref--;
294 }
295
296 void trace_array_put(struct trace_array *this_tr)
297 {
298         mutex_lock(&trace_types_lock);
299         __trace_array_put(this_tr);
300         mutex_unlock(&trace_types_lock);
301 }
302
303 int call_filter_check_discard(struct trace_event_call *call, void *rec,
304                               struct ring_buffer *buffer,
305                               struct ring_buffer_event *event)
306 {
307         if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
308             !filter_match_preds(call->filter, rec)) {
309                 __trace_event_discard_commit(buffer, event);
310                 return 1;
311         }
312
313         return 0;
314 }
315
316 void trace_free_pid_list(struct trace_pid_list *pid_list)
317 {
318         vfree(pid_list->pids);
319         kfree(pid_list);
320 }
321
322 /**
323  * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324  * @filtered_pids: The list of pids to check
325  * @search_pid: The PID to find in @filtered_pids
326  *
327  * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
328  */
329 bool
330 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
331 {
332         /*
333          * If pid_max changed after filtered_pids was created, we
334          * by default ignore all pids greater than the previous pid_max.
335          */
336         if (search_pid >= filtered_pids->pid_max)
337                 return false;
338
339         return test_bit(search_pid, filtered_pids->pids);
340 }
341
342 /**
343  * trace_ignore_this_task - should a task be ignored for tracing
344  * @filtered_pids: The list of pids to check
345  * @task: The task that should be ignored if not filtered
346  *
347  * Checks if @task should be traced or not from @filtered_pids.
348  * Returns true if @task should *NOT* be traced.
349  * Returns false if @task should be traced.
350  */
351 bool
352 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
353 {
354         /*
355          * Return false, because if filtered_pids does not exist,
356          * all pids are good to trace.
357          */
358         if (!filtered_pids)
359                 return false;
360
361         return !trace_find_filtered_pid(filtered_pids, task->pid);
362 }
363
364 /**
365  * trace_pid_filter_add_remove - Add or remove a task from a pid_list
366  * @pid_list: The list to modify
367  * @self: The current task for fork or NULL for exit
368  * @task: The task to add or remove
369  *
370  * If adding a task, if @self is defined, the task is only added if @self
371  * is also included in @pid_list. This happens on fork and tasks should
372  * only be added when the parent is listed. If @self is NULL, then the
373  * @task pid will be removed from the list, which would happen on exit
374  * of a task.
375  */
376 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
377                                   struct task_struct *self,
378                                   struct task_struct *task)
379 {
380         if (!pid_list)
381                 return;
382
383         /* For forks, we only add if the forking task is listed */
384         if (self) {
385                 if (!trace_find_filtered_pid(pid_list, self->pid))
386                         return;
387         }
388
389         /* Sorry, but we don't support pid_max changing after setting */
390         if (task->pid >= pid_list->pid_max)
391                 return;
392
393         /* "self" is set for forks, and NULL for exits */
394         if (self)
395                 set_bit(task->pid, pid_list->pids);
396         else
397                 clear_bit(task->pid, pid_list->pids);
398 }
399
400 /**
401  * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402  * @pid_list: The pid list to show
403  * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404  * @pos: The position of the file
405  *
406  * This is used by the seq_file "next" operation to iterate the pids
407  * listed in a trace_pid_list structure.
408  *
409  * Returns the pid+1 as we want to display pid of zero, but NULL would
410  * stop the iteration.
411  */
412 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
413 {
414         unsigned long pid = (unsigned long)v;
415
416         (*pos)++;
417
418         /* pid already is +1 of the actual prevous bit */
419         pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
420
421         /* Return pid + 1 to allow zero to be represented */
422         if (pid < pid_list->pid_max)
423                 return (void *)(pid + 1);
424
425         return NULL;
426 }
427
428 /**
429  * trace_pid_start - Used for seq_file to start reading pid lists
430  * @pid_list: The pid list to show
431  * @pos: The position of the file
432  *
433  * This is used by seq_file "start" operation to start the iteration
434  * of listing pids.
435  *
436  * Returns the pid+1 as we want to display pid of zero, but NULL would
437  * stop the iteration.
438  */
439 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
440 {
441         unsigned long pid;
442         loff_t l = 0;
443
444         pid = find_first_bit(pid_list->pids, pid_list->pid_max);
445         if (pid >= pid_list->pid_max)
446                 return NULL;
447
448         /* Return pid + 1 so that zero can be the exit value */
449         for (pid++; pid && l < *pos;
450              pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
451                 ;
452         return (void *)pid;
453 }
454
455 /**
456  * trace_pid_show - show the current pid in seq_file processing
457  * @m: The seq_file structure to write into
458  * @v: A void pointer of the pid (+1) value to display
459  *
460  * Can be directly used by seq_file operations to display the current
461  * pid value.
462  */
463 int trace_pid_show(struct seq_file *m, void *v)
464 {
465         unsigned long pid = (unsigned long)v - 1;
466
467         seq_printf(m, "%lu\n", pid);
468         return 0;
469 }
470
471 /* 128 should be much more than enough */
472 #define PID_BUF_SIZE            127
473
474 int trace_pid_write(struct trace_pid_list *filtered_pids,
475                     struct trace_pid_list **new_pid_list,
476                     const char __user *ubuf, size_t cnt)
477 {
478         struct trace_pid_list *pid_list;
479         struct trace_parser parser;
480         unsigned long val;
481         int nr_pids = 0;
482         ssize_t read = 0;
483         ssize_t ret = 0;
484         loff_t pos;
485         pid_t pid;
486
487         if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
488                 return -ENOMEM;
489
490         /*
491          * Always recreate a new array. The write is an all or nothing
492          * operation. Always create a new array when adding new pids by
493          * the user. If the operation fails, then the current list is
494          * not modified.
495          */
496         pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
497         if (!pid_list)
498                 return -ENOMEM;
499
500         pid_list->pid_max = READ_ONCE(pid_max);
501
502         /* Only truncating will shrink pid_max */
503         if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
504                 pid_list->pid_max = filtered_pids->pid_max;
505
506         pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
507         if (!pid_list->pids) {
508                 kfree(pid_list);
509                 return -ENOMEM;
510         }
511
512         if (filtered_pids) {
513                 /* copy the current bits to the new max */
514                 for_each_set_bit(pid, filtered_pids->pids,
515                                  filtered_pids->pid_max) {
516                         set_bit(pid, pid_list->pids);
517                         nr_pids++;
518                 }
519         }
520
521         while (cnt > 0) {
522
523                 pos = 0;
524
525                 ret = trace_get_user(&parser, ubuf, cnt, &pos);
526                 if (ret < 0 || !trace_parser_loaded(&parser))
527                         break;
528
529                 read += ret;
530                 ubuf += ret;
531                 cnt -= ret;
532
533                 parser.buffer[parser.idx] = 0;
534
535                 ret = -EINVAL;
536                 if (kstrtoul(parser.buffer, 0, &val))
537                         break;
538                 if (val >= pid_list->pid_max)
539                         break;
540
541                 pid = (pid_t)val;
542
543                 set_bit(pid, pid_list->pids);
544                 nr_pids++;
545
546                 trace_parser_clear(&parser);
547                 ret = 0;
548         }
549         trace_parser_put(&parser);
550
551         if (ret < 0) {
552                 trace_free_pid_list(pid_list);
553                 return ret;
554         }
555
556         if (!nr_pids) {
557                 /* Cleared the list of pids */
558                 trace_free_pid_list(pid_list);
559                 read = ret;
560                 pid_list = NULL;
561         }
562
563         *new_pid_list = pid_list;
564
565         return read;
566 }
567
568 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
569 {
570         u64 ts;
571
572         /* Early boot up does not have a buffer yet */
573         if (!buf->buffer)
574                 return trace_clock_local();
575
576         ts = ring_buffer_time_stamp(buf->buffer, cpu);
577         ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
578
579         return ts;
580 }
581
582 u64 ftrace_now(int cpu)
583 {
584         return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585 }
586
587 /**
588  * tracing_is_enabled - Show if global_trace has been disabled
589  *
590  * Shows if the global trace has been enabled or not. It uses the
591  * mirror flag "buffer_disabled" to be used in fast paths such as for
592  * the irqsoff tracer. But it may be inaccurate due to races. If you
593  * need to know the accurate state, use tracing_is_on() which is a little
594  * slower, but accurate.
595  */
596 int tracing_is_enabled(void)
597 {
598         /*
599          * For quick access (irqsoff uses this in fast path), just
600          * return the mirror variable of the state of the ring buffer.
601          * It's a little racy, but we don't really care.
602          */
603         smp_rmb();
604         return !global_trace.buffer_disabled;
605 }
606
607 /*
608  * trace_buf_size is the size in bytes that is allocated
609  * for a buffer. Note, the number of bytes is always rounded
610  * to page size.
611  *
612  * This number is purposely set to a low number of 16384.
613  * If the dump on oops happens, it will be much appreciated
614  * to not have to wait for all that output. Anyway this can be
615  * boot time and run time configurable.
616  */
617 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
618
619 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
620
621 /* trace_types holds a link list of available tracers. */
622 static struct tracer            *trace_types __read_mostly;
623
624 /*
625  * trace_types_lock is used to protect the trace_types list.
626  */
627 DEFINE_MUTEX(trace_types_lock);
628
629 /*
630  * serialize the access of the ring buffer
631  *
632  * ring buffer serializes readers, but it is low level protection.
633  * The validity of the events (which returns by ring_buffer_peek() ..etc)
634  * are not protected by ring buffer.
635  *
636  * The content of events may become garbage if we allow other process consumes
637  * these events concurrently:
638  *   A) the page of the consumed events may become a normal page
639  *      (not reader page) in ring buffer, and this page will be rewrited
640  *      by events producer.
641  *   B) The page of the consumed events may become a page for splice_read,
642  *      and this page will be returned to system.
643  *
644  * These primitives allow multi process access to different cpu ring buffer
645  * concurrently.
646  *
647  * These primitives don't distinguish read-only and read-consume access.
648  * Multi read-only access are also serialized.
649  */
650
651 #ifdef CONFIG_SMP
652 static DECLARE_RWSEM(all_cpu_access_lock);
653 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655 static inline void trace_access_lock(int cpu)
656 {
657         if (cpu == RING_BUFFER_ALL_CPUS) {
658                 /* gain it for accessing the whole ring buffer. */
659                 down_write(&all_cpu_access_lock);
660         } else {
661                 /* gain it for accessing a cpu ring buffer. */
662
663                 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
664                 down_read(&all_cpu_access_lock);
665
666                 /* Secondly block other access to this @cpu ring buffer. */
667                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668         }
669 }
670
671 static inline void trace_access_unlock(int cpu)
672 {
673         if (cpu == RING_BUFFER_ALL_CPUS) {
674                 up_write(&all_cpu_access_lock);
675         } else {
676                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677                 up_read(&all_cpu_access_lock);
678         }
679 }
680
681 static inline void trace_access_lock_init(void)
682 {
683         int cpu;
684
685         for_each_possible_cpu(cpu)
686                 mutex_init(&per_cpu(cpu_access_lock, cpu));
687 }
688
689 #else
690
691 static DEFINE_MUTEX(access_lock);
692
693 static inline void trace_access_lock(int cpu)
694 {
695         (void)cpu;
696         mutex_lock(&access_lock);
697 }
698
699 static inline void trace_access_unlock(int cpu)
700 {
701         (void)cpu;
702         mutex_unlock(&access_lock);
703 }
704
705 static inline void trace_access_lock_init(void)
706 {
707 }
708
709 #endif
710
711 #ifdef CONFIG_STACKTRACE
712 static void __ftrace_trace_stack(struct ring_buffer *buffer,
713                                  unsigned long flags,
714                                  int skip, int pc, struct pt_regs *regs);
715 static inline void ftrace_trace_stack(struct trace_array *tr,
716                                       struct ring_buffer *buffer,
717                                       unsigned long flags,
718                                       int skip, int pc, struct pt_regs *regs);
719
720 #else
721 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722                                         unsigned long flags,
723                                         int skip, int pc, struct pt_regs *regs)
724 {
725 }
726 static inline void ftrace_trace_stack(struct trace_array *tr,
727                                       struct ring_buffer *buffer,
728                                       unsigned long flags,
729                                       int skip, int pc, struct pt_regs *regs)
730 {
731 }
732
733 #endif
734
735 static __always_inline void
736 trace_event_setup(struct ring_buffer_event *event,
737                   int type, unsigned long flags, int pc)
738 {
739         struct trace_entry *ent = ring_buffer_event_data(event);
740
741         tracing_generic_entry_update(ent, flags, pc);
742         ent->type = type;
743 }
744
745 static __always_inline struct ring_buffer_event *
746 __trace_buffer_lock_reserve(struct ring_buffer *buffer,
747                           int type,
748                           unsigned long len,
749                           unsigned long flags, int pc)
750 {
751         struct ring_buffer_event *event;
752
753         event = ring_buffer_lock_reserve(buffer, len);
754         if (event != NULL)
755                 trace_event_setup(event, type, flags, pc);
756
757         return event;
758 }
759
760 void tracer_tracing_on(struct trace_array *tr)
761 {
762         if (tr->trace_buffer.buffer)
763                 ring_buffer_record_on(tr->trace_buffer.buffer);
764         /*
765          * This flag is looked at when buffers haven't been allocated
766          * yet, or by some tracers (like irqsoff), that just want to
767          * know if the ring buffer has been disabled, but it can handle
768          * races of where it gets disabled but we still do a record.
769          * As the check is in the fast path of the tracers, it is more
770          * important to be fast than accurate.
771          */
772         tr->buffer_disabled = 0;
773         /* Make the flag seen by readers */
774         smp_wmb();
775 }
776
777 /**
778  * tracing_on - enable tracing buffers
779  *
780  * This function enables tracing buffers that may have been
781  * disabled with tracing_off.
782  */
783 void tracing_on(void)
784 {
785         tracer_tracing_on(&global_trace);
786 }
787 EXPORT_SYMBOL_GPL(tracing_on);
788
789
790 static __always_inline void
791 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792 {
793         __this_cpu_write(trace_taskinfo_save, true);
794
795         /* If this is the temp buffer, we need to commit fully */
796         if (this_cpu_read(trace_buffered_event) == event) {
797                 /* Length is in event->array[0] */
798                 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799                 /* Release the temp buffer */
800                 this_cpu_dec(trace_buffered_event_cnt);
801         } else
802                 ring_buffer_unlock_commit(buffer, event);
803 }
804
805 /**
806  * __trace_puts - write a constant string into the trace buffer.
807  * @ip:    The address of the caller
808  * @str:   The constant string to write
809  * @size:  The size of the string.
810  */
811 int __trace_puts(unsigned long ip, const char *str, int size)
812 {
813         struct ring_buffer_event *event;
814         struct ring_buffer *buffer;
815         struct print_entry *entry;
816         unsigned long irq_flags;
817         int alloc;
818         int pc;
819
820         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
821                 return 0;
822
823         pc = preempt_count();
824
825         if (unlikely(tracing_selftest_running || tracing_disabled))
826                 return 0;
827
828         alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830         local_save_flags(irq_flags);
831         buffer = global_trace.trace_buffer.buffer;
832         event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
833                                             irq_flags, pc);
834         if (!event)
835                 return 0;
836
837         entry = ring_buffer_event_data(event);
838         entry->ip = ip;
839
840         memcpy(&entry->buf, str, size);
841
842         /* Add a newline if necessary */
843         if (entry->buf[size - 1] != '\n') {
844                 entry->buf[size] = '\n';
845                 entry->buf[size + 1] = '\0';
846         } else
847                 entry->buf[size] = '\0';
848
849         __buffer_unlock_commit(buffer, event);
850         ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
851
852         return size;
853 }
854 EXPORT_SYMBOL_GPL(__trace_puts);
855
856 /**
857  * __trace_bputs - write the pointer to a constant string into trace buffer
858  * @ip:    The address of the caller
859  * @str:   The constant string to write to the buffer to
860  */
861 int __trace_bputs(unsigned long ip, const char *str)
862 {
863         struct ring_buffer_event *event;
864         struct ring_buffer *buffer;
865         struct bputs_entry *entry;
866         unsigned long irq_flags;
867         int size = sizeof(struct bputs_entry);
868         int pc;
869
870         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
871                 return 0;
872
873         pc = preempt_count();
874
875         if (unlikely(tracing_selftest_running || tracing_disabled))
876                 return 0;
877
878         local_save_flags(irq_flags);
879         buffer = global_trace.trace_buffer.buffer;
880         event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881                                             irq_flags, pc);
882         if (!event)
883                 return 0;
884
885         entry = ring_buffer_event_data(event);
886         entry->ip                       = ip;
887         entry->str                      = str;
888
889         __buffer_unlock_commit(buffer, event);
890         ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
891
892         return 1;
893 }
894 EXPORT_SYMBOL_GPL(__trace_bputs);
895
896 #ifdef CONFIG_TRACER_SNAPSHOT
897 static void tracing_snapshot_instance(struct trace_array *tr)
898 {
899         struct tracer *tracer = tr->current_trace;
900         unsigned long flags;
901
902         if (in_nmi()) {
903                 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904                 internal_trace_puts("*** snapshot is being ignored        ***\n");
905                 return;
906         }
907
908         if (!tr->allocated_snapshot) {
909                 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910                 internal_trace_puts("*** stopping trace here!   ***\n");
911                 tracing_off();
912                 return;
913         }
914
915         /* Note, snapshot can not be used when the tracer uses it */
916         if (tracer->use_max_tr) {
917                 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918                 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
919                 return;
920         }
921
922         local_irq_save(flags);
923         update_max_tr(tr, current, smp_processor_id());
924         local_irq_restore(flags);
925 }
926
927 /**
928  * trace_snapshot - take a snapshot of the current buffer.
929  *
930  * This causes a swap between the snapshot buffer and the current live
931  * tracing buffer. You can use this to take snapshots of the live
932  * trace when some condition is triggered, but continue to trace.
933  *
934  * Note, make sure to allocate the snapshot with either
935  * a tracing_snapshot_alloc(), or by doing it manually
936  * with: echo 1 > /sys/kernel/debug/tracing/snapshot
937  *
938  * If the snapshot buffer is not allocated, it will stop tracing.
939  * Basically making a permanent snapshot.
940  */
941 void tracing_snapshot(void)
942 {
943         struct trace_array *tr = &global_trace;
944
945         tracing_snapshot_instance(tr);
946 }
947 EXPORT_SYMBOL_GPL(tracing_snapshot);
948
949 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
950                                         struct trace_buffer *size_buf, int cpu_id);
951 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
952
953 static int alloc_snapshot(struct trace_array *tr)
954 {
955         int ret;
956
957         if (!tr->allocated_snapshot) {
958
959                 /* allocate spare buffer */
960                 ret = resize_buffer_duplicate_size(&tr->max_buffer,
961                                    &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
962                 if (ret < 0)
963                         return ret;
964
965                 tr->allocated_snapshot = true;
966         }
967
968         return 0;
969 }
970
971 static void free_snapshot(struct trace_array *tr)
972 {
973         /*
974          * We don't free the ring buffer. instead, resize it because
975          * The max_tr ring buffer has some state (e.g. ring->clock) and
976          * we want preserve it.
977          */
978         ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
979         set_buffer_entries(&tr->max_buffer, 1);
980         tracing_reset_online_cpus(&tr->max_buffer);
981         tr->allocated_snapshot = false;
982 }
983
984 /**
985  * tracing_alloc_snapshot - allocate snapshot buffer.
986  *
987  * This only allocates the snapshot buffer if it isn't already
988  * allocated - it doesn't also take a snapshot.
989  *
990  * This is meant to be used in cases where the snapshot buffer needs
991  * to be set up for events that can't sleep but need to be able to
992  * trigger a snapshot.
993  */
994 int tracing_alloc_snapshot(void)
995 {
996         struct trace_array *tr = &global_trace;
997         int ret;
998
999         ret = alloc_snapshot(tr);
1000         WARN_ON(ret < 0);
1001
1002         return ret;
1003 }
1004 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005
1006 /**
1007  * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1008  *
1009  * This is similar to trace_snapshot(), but it will allocate the
1010  * snapshot buffer if it isn't already allocated. Use this only
1011  * where it is safe to sleep, as the allocation may sleep.
1012  *
1013  * This causes a swap between the snapshot buffer and the current live
1014  * tracing buffer. You can use this to take snapshots of the live
1015  * trace when some condition is triggered, but continue to trace.
1016  */
1017 void tracing_snapshot_alloc(void)
1018 {
1019         int ret;
1020
1021         ret = tracing_alloc_snapshot();
1022         if (ret < 0)
1023                 return;
1024
1025         tracing_snapshot();
1026 }
1027 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1028 #else
1029 void tracing_snapshot(void)
1030 {
1031         WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1032 }
1033 EXPORT_SYMBOL_GPL(tracing_snapshot);
1034 int tracing_alloc_snapshot(void)
1035 {
1036         WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1037         return -ENODEV;
1038 }
1039 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1040 void tracing_snapshot_alloc(void)
1041 {
1042         /* Give warning */
1043         tracing_snapshot();
1044 }
1045 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1046 #endif /* CONFIG_TRACER_SNAPSHOT */
1047
1048 void tracer_tracing_off(struct trace_array *tr)
1049 {
1050         if (tr->trace_buffer.buffer)
1051                 ring_buffer_record_off(tr->trace_buffer.buffer);
1052         /*
1053          * This flag is looked at when buffers haven't been allocated
1054          * yet, or by some tracers (like irqsoff), that just want to
1055          * know if the ring buffer has been disabled, but it can handle
1056          * races of where it gets disabled but we still do a record.
1057          * As the check is in the fast path of the tracers, it is more
1058          * important to be fast than accurate.
1059          */
1060         tr->buffer_disabled = 1;
1061         /* Make the flag seen by readers */
1062         smp_wmb();
1063 }
1064
1065 /**
1066  * tracing_off - turn off tracing buffers
1067  *
1068  * This function stops the tracing buffers from recording data.
1069  * It does not disable any overhead the tracers themselves may
1070  * be causing. This function simply causes all recording to
1071  * the ring buffers to fail.
1072  */
1073 void tracing_off(void)
1074 {
1075         tracer_tracing_off(&global_trace);
1076 }
1077 EXPORT_SYMBOL_GPL(tracing_off);
1078
1079 void disable_trace_on_warning(void)
1080 {
1081         if (__disable_trace_on_warning)
1082                 tracing_off();
1083 }
1084
1085 /**
1086  * tracer_tracing_is_on - show real state of ring buffer enabled
1087  * @tr : the trace array to know if ring buffer is enabled
1088  *
1089  * Shows real state of the ring buffer if it is enabled or not.
1090  */
1091 int tracer_tracing_is_on(struct trace_array *tr)
1092 {
1093         if (tr->trace_buffer.buffer)
1094                 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095         return !tr->buffer_disabled;
1096 }
1097
1098 /**
1099  * tracing_is_on - show state of ring buffers enabled
1100  */
1101 int tracing_is_on(void)
1102 {
1103         return tracer_tracing_is_on(&global_trace);
1104 }
1105 EXPORT_SYMBOL_GPL(tracing_is_on);
1106
1107 static int __init set_buf_size(char *str)
1108 {
1109         unsigned long buf_size;
1110
1111         if (!str)
1112                 return 0;
1113         buf_size = memparse(str, &str);
1114         /* nr_entries can not be zero */
1115         if (buf_size == 0)
1116                 return 0;
1117         trace_buf_size = buf_size;
1118         return 1;
1119 }
1120 __setup("trace_buf_size=", set_buf_size);
1121
1122 static int __init set_tracing_thresh(char *str)
1123 {
1124         unsigned long threshold;
1125         int ret;
1126
1127         if (!str)
1128                 return 0;
1129         ret = kstrtoul(str, 0, &threshold);
1130         if (ret < 0)
1131                 return 0;
1132         tracing_thresh = threshold * 1000;
1133         return 1;
1134 }
1135 __setup("tracing_thresh=", set_tracing_thresh);
1136
1137 unsigned long nsecs_to_usecs(unsigned long nsecs)
1138 {
1139         return nsecs / 1000;
1140 }
1141
1142 /*
1143  * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1144  * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1145  * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1146  * of strings in the order that the evals (enum) were defined.
1147  */
1148 #undef C
1149 #define C(a, b) b
1150
1151 /* These must match the bit postions in trace_iterator_flags */
1152 static const char *trace_options[] = {
1153         TRACE_FLAGS
1154         NULL
1155 };
1156
1157 static struct {
1158         u64 (*func)(void);
1159         const char *name;
1160         int in_ns;              /* is this clock in nanoseconds? */
1161 } trace_clocks[] = {
1162         { trace_clock_local,            "local",        1 },
1163         { trace_clock_global,           "global",       1 },
1164         { trace_clock_counter,          "counter",      0 },
1165         { trace_clock_jiffies,          "uptime",       0 },
1166         { trace_clock,                  "perf",         1 },
1167         { ktime_get_mono_fast_ns,       "mono",         1 },
1168         { ktime_get_raw_fast_ns,        "mono_raw",     1 },
1169         { ktime_get_boot_fast_ns,       "boot",         1 },
1170         ARCH_TRACE_CLOCKS
1171 };
1172
1173 /*
1174  * trace_parser_get_init - gets the buffer for trace parser
1175  */
1176 int trace_parser_get_init(struct trace_parser *parser, int size)
1177 {
1178         memset(parser, 0, sizeof(*parser));
1179
1180         parser->buffer = kmalloc(size, GFP_KERNEL);
1181         if (!parser->buffer)
1182                 return 1;
1183
1184         parser->size = size;
1185         return 0;
1186 }
1187
1188 /*
1189  * trace_parser_put - frees the buffer for trace parser
1190  */
1191 void trace_parser_put(struct trace_parser *parser)
1192 {
1193         kfree(parser->buffer);
1194         parser->buffer = NULL;
1195 }
1196
1197 /*
1198  * trace_get_user - reads the user input string separated by  space
1199  * (matched by isspace(ch))
1200  *
1201  * For each string found the 'struct trace_parser' is updated,
1202  * and the function returns.
1203  *
1204  * Returns number of bytes read.
1205  *
1206  * See kernel/trace/trace.h for 'struct trace_parser' details.
1207  */
1208 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1209         size_t cnt, loff_t *ppos)
1210 {
1211         char ch;
1212         size_t read = 0;
1213         ssize_t ret;
1214
1215         if (!*ppos)
1216                 trace_parser_clear(parser);
1217
1218         ret = get_user(ch, ubuf++);
1219         if (ret)
1220                 goto out;
1221
1222         read++;
1223         cnt--;
1224
1225         /*
1226          * The parser is not finished with the last write,
1227          * continue reading the user input without skipping spaces.
1228          */
1229         if (!parser->cont) {
1230                 /* skip white space */
1231                 while (cnt && isspace(ch)) {
1232                         ret = get_user(ch, ubuf++);
1233                         if (ret)
1234                                 goto out;
1235                         read++;
1236                         cnt--;
1237                 }
1238
1239                 /* only spaces were written */
1240                 if (isspace(ch)) {
1241                         *ppos += read;
1242                         ret = read;
1243                         goto out;
1244                 }
1245
1246                 parser->idx = 0;
1247         }
1248
1249         /* read the non-space input */
1250         while (cnt && !isspace(ch)) {
1251                 if (parser->idx < parser->size - 1)
1252                         parser->buffer[parser->idx++] = ch;
1253                 else {
1254                         ret = -EINVAL;
1255                         goto out;
1256                 }
1257                 ret = get_user(ch, ubuf++);
1258                 if (ret)
1259                         goto out;
1260                 read++;
1261                 cnt--;
1262         }
1263
1264         /* We either got finished input or we have to wait for another call. */
1265         if (isspace(ch)) {
1266                 parser->buffer[parser->idx] = 0;
1267                 parser->cont = false;
1268         } else if (parser->idx < parser->size - 1) {
1269                 parser->cont = true;
1270                 parser->buffer[parser->idx++] = ch;
1271         } else {
1272                 ret = -EINVAL;
1273                 goto out;
1274         }
1275
1276         *ppos += read;
1277         ret = read;
1278
1279 out:
1280         return ret;
1281 }
1282
1283 /* TODO add a seq_buf_to_buffer() */
1284 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1285 {
1286         int len;
1287
1288         if (trace_seq_used(s) <= s->seq.readpos)
1289                 return -EBUSY;
1290
1291         len = trace_seq_used(s) - s->seq.readpos;
1292         if (cnt > len)
1293                 cnt = len;
1294         memcpy(buf, s->buffer + s->seq.readpos, cnt);
1295
1296         s->seq.readpos += cnt;
1297         return cnt;
1298 }
1299
1300 unsigned long __read_mostly     tracing_thresh;
1301
1302 #ifdef CONFIG_TRACER_MAX_TRACE
1303 /*
1304  * Copy the new maximum trace into the separate maximum-trace
1305  * structure. (this way the maximum trace is permanently saved,
1306  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1307  */
1308 static void
1309 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1310 {
1311         struct trace_buffer *trace_buf = &tr->trace_buffer;
1312         struct trace_buffer *max_buf = &tr->max_buffer;
1313         struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1314         struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1315
1316         max_buf->cpu = cpu;
1317         max_buf->time_start = data->preempt_timestamp;
1318
1319         max_data->saved_latency = tr->max_latency;
1320         max_data->critical_start = data->critical_start;
1321         max_data->critical_end = data->critical_end;
1322
1323         memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1324         max_data->pid = tsk->pid;
1325         /*
1326          * If tsk == current, then use current_uid(), as that does not use
1327          * RCU. The irq tracer can be called out of RCU scope.
1328          */
1329         if (tsk == current)
1330                 max_data->uid = current_uid();
1331         else
1332                 max_data->uid = task_uid(tsk);
1333
1334         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1335         max_data->policy = tsk->policy;
1336         max_data->rt_priority = tsk->rt_priority;
1337
1338         /* record this tasks comm */
1339         tracing_record_cmdline(tsk);
1340 }
1341
1342 /**
1343  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1344  * @tr: tracer
1345  * @tsk: the task with the latency
1346  * @cpu: The cpu that initiated the trace.
1347  *
1348  * Flip the buffers between the @tr and the max_tr and record information
1349  * about which task was the cause of this latency.
1350  */
1351 void
1352 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1353 {
1354         struct ring_buffer *buf;
1355
1356         if (tr->stop_count)
1357                 return;
1358
1359         WARN_ON_ONCE(!irqs_disabled());
1360
1361         if (!tr->allocated_snapshot) {
1362                 /* Only the nop tracer should hit this when disabling */
1363                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1364                 return;
1365         }
1366
1367         arch_spin_lock(&tr->max_lock);
1368
1369         buf = tr->trace_buffer.buffer;
1370         tr->trace_buffer.buffer = tr->max_buffer.buffer;
1371         tr->max_buffer.buffer = buf;
1372
1373         __update_max_tr(tr, tsk, cpu);
1374         arch_spin_unlock(&tr->max_lock);
1375 }
1376
1377 /**
1378  * update_max_tr_single - only copy one trace over, and reset the rest
1379  * @tr - tracer
1380  * @tsk - task with the latency
1381  * @cpu - the cpu of the buffer to copy.
1382  *
1383  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1384  */
1385 void
1386 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1387 {
1388         int ret;
1389
1390         if (tr->stop_count)
1391                 return;
1392
1393         WARN_ON_ONCE(!irqs_disabled());
1394         if (!tr->allocated_snapshot) {
1395                 /* Only the nop tracer should hit this when disabling */
1396                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1397                 return;
1398         }
1399
1400         arch_spin_lock(&tr->max_lock);
1401
1402         ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1403
1404         if (ret == -EBUSY) {
1405                 /*
1406                  * We failed to swap the buffer due to a commit taking
1407                  * place on this CPU. We fail to record, but we reset
1408                  * the max trace buffer (no one writes directly to it)
1409                  * and flag that it failed.
1410                  */
1411                 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1412                         "Failed to swap buffers due to commit in progress\n");
1413         }
1414
1415         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1416
1417         __update_max_tr(tr, tsk, cpu);
1418         arch_spin_unlock(&tr->max_lock);
1419 }
1420 #endif /* CONFIG_TRACER_MAX_TRACE */
1421
1422 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1423 {
1424         /* Iterators are static, they should be filled or empty */
1425         if (trace_buffer_iter(iter, iter->cpu_file))
1426                 return 0;
1427
1428         return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1429                                 full);
1430 }
1431
1432 #ifdef CONFIG_FTRACE_STARTUP_TEST
1433 static bool selftests_can_run;
1434
1435 struct trace_selftests {
1436         struct list_head                list;
1437         struct tracer                   *type;
1438 };
1439
1440 static LIST_HEAD(postponed_selftests);
1441
1442 static int save_selftest(struct tracer *type)
1443 {
1444         struct trace_selftests *selftest;
1445
1446         selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1447         if (!selftest)
1448                 return -ENOMEM;
1449
1450         selftest->type = type;
1451         list_add(&selftest->list, &postponed_selftests);
1452         return 0;
1453 }
1454
1455 static int run_tracer_selftest(struct tracer *type)
1456 {
1457         struct trace_array *tr = &global_trace;
1458         struct tracer *saved_tracer = tr->current_trace;
1459         int ret;
1460
1461         if (!type->selftest || tracing_selftest_disabled)
1462                 return 0;
1463
1464         /*
1465          * If a tracer registers early in boot up (before scheduling is
1466          * initialized and such), then do not run its selftests yet.
1467          * Instead, run it a little later in the boot process.
1468          */
1469         if (!selftests_can_run)
1470                 return save_selftest(type);
1471
1472         /*
1473          * Run a selftest on this tracer.
1474          * Here we reset the trace buffer, and set the current
1475          * tracer to be this tracer. The tracer can then run some
1476          * internal tracing to verify that everything is in order.
1477          * If we fail, we do not register this tracer.
1478          */
1479         tracing_reset_online_cpus(&tr->trace_buffer);
1480
1481         tr->current_trace = type;
1482
1483 #ifdef CONFIG_TRACER_MAX_TRACE
1484         if (type->use_max_tr) {
1485                 /* If we expanded the buffers, make sure the max is expanded too */
1486                 if (ring_buffer_expanded)
1487                         ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1488                                            RING_BUFFER_ALL_CPUS);
1489                 tr->allocated_snapshot = true;
1490         }
1491 #endif
1492
1493         /* the test is responsible for initializing and enabling */
1494         pr_info("Testing tracer %s: ", type->name);
1495         ret = type->selftest(type, tr);
1496         /* the test is responsible for resetting too */
1497         tr->current_trace = saved_tracer;
1498         if (ret) {
1499                 printk(KERN_CONT "FAILED!\n");
1500                 /* Add the warning after printing 'FAILED' */
1501                 WARN_ON(1);
1502                 return -1;
1503         }
1504         /* Only reset on passing, to avoid touching corrupted buffers */
1505         tracing_reset_online_cpus(&tr->trace_buffer);
1506
1507 #ifdef CONFIG_TRACER_MAX_TRACE
1508         if (type->use_max_tr) {
1509                 tr->allocated_snapshot = false;
1510
1511                 /* Shrink the max buffer again */
1512                 if (ring_buffer_expanded)
1513                         ring_buffer_resize(tr->max_buffer.buffer, 1,
1514                                            RING_BUFFER_ALL_CPUS);
1515         }
1516 #endif
1517
1518         printk(KERN_CONT "PASSED\n");
1519         return 0;
1520 }
1521
1522 static __init int init_trace_selftests(void)
1523 {
1524         struct trace_selftests *p, *n;
1525         struct tracer *t, **last;
1526         int ret;
1527
1528         selftests_can_run = true;
1529
1530         mutex_lock(&trace_types_lock);
1531
1532         if (list_empty(&postponed_selftests))
1533                 goto out;
1534
1535         pr_info("Running postponed tracer tests:\n");
1536
1537         list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1538                 ret = run_tracer_selftest(p->type);
1539                 /* If the test fails, then warn and remove from available_tracers */
1540                 if (ret < 0) {
1541                         WARN(1, "tracer: %s failed selftest, disabling\n",
1542                              p->type->name);
1543                         last = &trace_types;
1544                         for (t = trace_types; t; t = t->next) {
1545                                 if (t == p->type) {
1546                                         *last = t->next;
1547                                         break;
1548                                 }
1549                                 last = &t->next;
1550                         }
1551                 }
1552                 list_del(&p->list);
1553                 kfree(p);
1554         }
1555
1556  out:
1557         mutex_unlock(&trace_types_lock);
1558
1559         return 0;
1560 }
1561 core_initcall(init_trace_selftests);
1562 #else
1563 static inline int run_tracer_selftest(struct tracer *type)
1564 {
1565         return 0;
1566 }
1567 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1568
1569 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1570
1571 static void __init apply_trace_boot_options(void);
1572
1573 /**
1574  * register_tracer - register a tracer with the ftrace system.
1575  * @type - the plugin for the tracer
1576  *
1577  * Register a new plugin tracer.
1578  */
1579 int __init register_tracer(struct tracer *type)
1580 {
1581         struct tracer *t;
1582         int ret = 0;
1583
1584         if (!type->name) {
1585                 pr_info("Tracer must have a name\n");
1586                 return -1;
1587         }
1588
1589         if (strlen(type->name) >= MAX_TRACER_SIZE) {
1590                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1591                 return -1;
1592         }
1593
1594         mutex_lock(&trace_types_lock);
1595
1596         tracing_selftest_running = true;
1597
1598         for (t = trace_types; t; t = t->next) {
1599                 if (strcmp(type->name, t->name) == 0) {
1600                         /* already found */
1601                         pr_info("Tracer %s already registered\n",
1602                                 type->name);
1603                         ret = -1;
1604                         goto out;
1605                 }
1606         }
1607
1608         if (!type->set_flag)
1609                 type->set_flag = &dummy_set_flag;
1610         if (!type->flags) {
1611                 /*allocate a dummy tracer_flags*/
1612                 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1613                 if (!type->flags) {
1614                         ret = -ENOMEM;
1615                         goto out;
1616                 }
1617                 type->flags->val = 0;
1618                 type->flags->opts = dummy_tracer_opt;
1619         } else
1620                 if (!type->flags->opts)
1621                         type->flags->opts = dummy_tracer_opt;
1622
1623         /* store the tracer for __set_tracer_option */
1624         type->flags->trace = type;
1625
1626         ret = run_tracer_selftest(type);
1627         if (ret < 0)
1628                 goto out;
1629
1630         type->next = trace_types;
1631         trace_types = type;
1632         add_tracer_options(&global_trace, type);
1633
1634  out:
1635         tracing_selftest_running = false;
1636         mutex_unlock(&trace_types_lock);
1637
1638         if (ret || !default_bootup_tracer)
1639                 goto out_unlock;
1640
1641         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1642                 goto out_unlock;
1643
1644         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1645         /* Do we want this tracer to start on bootup? */
1646         tracing_set_tracer(&global_trace, type->name);
1647         default_bootup_tracer = NULL;
1648
1649         apply_trace_boot_options();
1650
1651         /* disable other selftests, since this will break it. */
1652         tracing_selftest_disabled = true;
1653 #ifdef CONFIG_FTRACE_STARTUP_TEST
1654         printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1655                type->name);
1656 #endif
1657
1658  out_unlock:
1659         return ret;
1660 }
1661
1662 void tracing_reset(struct trace_buffer *buf, int cpu)
1663 {
1664         struct ring_buffer *buffer = buf->buffer;
1665
1666         if (!buffer)
1667                 return;
1668
1669         ring_buffer_record_disable(buffer);
1670
1671         /* Make sure all commits have finished */
1672         synchronize_sched();
1673         ring_buffer_reset_cpu(buffer, cpu);
1674
1675         ring_buffer_record_enable(buffer);
1676 }
1677
1678 void tracing_reset_online_cpus(struct trace_buffer *buf)
1679 {
1680         struct ring_buffer *buffer = buf->buffer;
1681         int cpu;
1682
1683         if (!buffer)
1684                 return;
1685
1686         ring_buffer_record_disable(buffer);
1687
1688         /* Make sure all commits have finished */
1689         synchronize_sched();
1690
1691         buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1692
1693         for_each_online_cpu(cpu)
1694                 ring_buffer_reset_cpu(buffer, cpu);
1695
1696         ring_buffer_record_enable(buffer);
1697 }
1698
1699 /* Must have trace_types_lock held */
1700 void tracing_reset_all_online_cpus(void)
1701 {
1702         struct trace_array *tr;
1703
1704         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1705                 if (!tr->clear_trace)
1706                         continue;
1707                 tr->clear_trace = false;
1708                 tracing_reset_online_cpus(&tr->trace_buffer);
1709 #ifdef CONFIG_TRACER_MAX_TRACE
1710                 tracing_reset_online_cpus(&tr->max_buffer);
1711 #endif
1712         }
1713 }
1714
1715 static int *tgid_map;
1716
1717 #define SAVED_CMDLINES_DEFAULT 128
1718 #define NO_CMDLINE_MAP UINT_MAX
1719 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1720 struct saved_cmdlines_buffer {
1721         unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1722         unsigned *map_cmdline_to_pid;
1723         unsigned cmdline_num;
1724         int cmdline_idx;
1725         char *saved_cmdlines;
1726 };
1727 static struct saved_cmdlines_buffer *savedcmd;
1728
1729 /* temporary disable recording */
1730 static atomic_t trace_record_taskinfo_disabled __read_mostly;
1731
1732 static inline char *get_saved_cmdlines(int idx)
1733 {
1734         return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1735 }
1736
1737 static inline void set_cmdline(int idx, const char *cmdline)
1738 {
1739         memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1740 }
1741
1742 static int allocate_cmdlines_buffer(unsigned int val,
1743                                     struct saved_cmdlines_buffer *s)
1744 {
1745         s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1746                                         GFP_KERNEL);
1747         if (!s->map_cmdline_to_pid)
1748                 return -ENOMEM;
1749
1750         s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1751         if (!s->saved_cmdlines) {
1752                 kfree(s->map_cmdline_to_pid);
1753                 return -ENOMEM;
1754         }
1755
1756         s->cmdline_idx = 0;
1757         s->cmdline_num = val;
1758         memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1759                sizeof(s->map_pid_to_cmdline));
1760         memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1761                val * sizeof(*s->map_cmdline_to_pid));
1762
1763         return 0;
1764 }
1765
1766 static int trace_create_savedcmd(void)
1767 {
1768         int ret;
1769
1770         savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1771         if (!savedcmd)
1772                 return -ENOMEM;
1773
1774         ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1775         if (ret < 0) {
1776                 kfree(savedcmd);
1777                 savedcmd = NULL;
1778                 return -ENOMEM;
1779         }
1780
1781         return 0;
1782 }
1783
1784 int is_tracing_stopped(void)
1785 {
1786         return global_trace.stop_count;
1787 }
1788
1789 /**
1790  * tracing_start - quick start of the tracer
1791  *
1792  * If tracing is enabled but was stopped by tracing_stop,
1793  * this will start the tracer back up.
1794  */
1795 void tracing_start(void)
1796 {
1797         struct ring_buffer *buffer;
1798         unsigned long flags;
1799
1800         if (tracing_disabled)
1801                 return;
1802
1803         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1804         if (--global_trace.stop_count) {
1805                 if (global_trace.stop_count < 0) {
1806                         /* Someone screwed up their debugging */
1807                         WARN_ON_ONCE(1);
1808                         global_trace.stop_count = 0;
1809                 }
1810                 goto out;
1811         }
1812
1813         /* Prevent the buffers from switching */
1814         arch_spin_lock(&global_trace.max_lock);
1815
1816         buffer = global_trace.trace_buffer.buffer;
1817         if (buffer)
1818                 ring_buffer_record_enable(buffer);
1819
1820 #ifdef CONFIG_TRACER_MAX_TRACE
1821         buffer = global_trace.max_buffer.buffer;
1822         if (buffer)
1823                 ring_buffer_record_enable(buffer);
1824 #endif
1825
1826         arch_spin_unlock(&global_trace.max_lock);
1827
1828  out:
1829         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1830 }
1831
1832 static void tracing_start_tr(struct trace_array *tr)
1833 {
1834         struct ring_buffer *buffer;
1835         unsigned long flags;
1836
1837         if (tracing_disabled)
1838                 return;
1839
1840         /* If global, we need to also start the max tracer */
1841         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1842                 return tracing_start();
1843
1844         raw_spin_lock_irqsave(&tr->start_lock, flags);
1845
1846         if (--tr->stop_count) {
1847                 if (tr->stop_count < 0) {
1848                         /* Someone screwed up their debugging */
1849                         WARN_ON_ONCE(1);
1850                         tr->stop_count = 0;
1851                 }
1852                 goto out;
1853         }
1854
1855         buffer = tr->trace_buffer.buffer;
1856         if (buffer)
1857                 ring_buffer_record_enable(buffer);
1858
1859  out:
1860         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1861 }
1862
1863 /**
1864  * tracing_stop - quick stop of the tracer
1865  *
1866  * Light weight way to stop tracing. Use in conjunction with
1867  * tracing_start.
1868  */
1869 void tracing_stop(void)
1870 {
1871         struct ring_buffer *buffer;
1872         unsigned long flags;
1873
1874         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1875         if (global_trace.stop_count++)
1876                 goto out;
1877
1878         /* Prevent the buffers from switching */
1879         arch_spin_lock(&global_trace.max_lock);
1880
1881         buffer = global_trace.trace_buffer.buffer;
1882         if (buffer)
1883                 ring_buffer_record_disable(buffer);
1884
1885 #ifdef CONFIG_TRACER_MAX_TRACE
1886         buffer = global_trace.max_buffer.buffer;
1887         if (buffer)
1888                 ring_buffer_record_disable(buffer);
1889 #endif
1890
1891         arch_spin_unlock(&global_trace.max_lock);
1892
1893  out:
1894         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1895 }
1896
1897 static void tracing_stop_tr(struct trace_array *tr)
1898 {
1899         struct ring_buffer *buffer;
1900         unsigned long flags;
1901
1902         /* If global, we need to also stop the max tracer */
1903         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1904                 return tracing_stop();
1905
1906         raw_spin_lock_irqsave(&tr->start_lock, flags);
1907         if (tr->stop_count++)
1908                 goto out;
1909
1910         buffer = tr->trace_buffer.buffer;
1911         if (buffer)
1912                 ring_buffer_record_disable(buffer);
1913
1914  out:
1915         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1916 }
1917
1918 static int trace_save_cmdline(struct task_struct *tsk)
1919 {
1920         unsigned pid, idx;
1921
1922         /* treat recording of idle task as a success */
1923         if (!tsk->pid)
1924                 return 1;
1925
1926         if (unlikely(tsk->pid > PID_MAX_DEFAULT))
1927                 return 0;
1928
1929         /*
1930          * It's not the end of the world if we don't get
1931          * the lock, but we also don't want to spin
1932          * nor do we want to disable interrupts,
1933          * so if we miss here, then better luck next time.
1934          */
1935         if (!arch_spin_trylock(&trace_cmdline_lock))
1936                 return 0;
1937
1938         idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1939         if (idx == NO_CMDLINE_MAP) {
1940                 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1941
1942                 /*
1943                  * Check whether the cmdline buffer at idx has a pid
1944                  * mapped. We are going to overwrite that entry so we
1945                  * need to clear the map_pid_to_cmdline. Otherwise we
1946                  * would read the new comm for the old pid.
1947                  */
1948                 pid = savedcmd->map_cmdline_to_pid[idx];
1949                 if (pid != NO_CMDLINE_MAP)
1950                         savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1951
1952                 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1953                 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1954
1955                 savedcmd->cmdline_idx = idx;
1956         }
1957
1958         set_cmdline(idx, tsk->comm);
1959
1960         arch_spin_unlock(&trace_cmdline_lock);
1961
1962         return 1;
1963 }
1964
1965 static void __trace_find_cmdline(int pid, char comm[])
1966 {
1967         unsigned map;
1968
1969         if (!pid) {
1970                 strcpy(comm, "<idle>");
1971                 return;
1972         }
1973
1974         if (WARN_ON_ONCE(pid < 0)) {
1975                 strcpy(comm, "<XXX>");
1976                 return;
1977         }
1978
1979         if (pid > PID_MAX_DEFAULT) {
1980                 strcpy(comm, "<...>");
1981                 return;
1982         }
1983
1984         map = savedcmd->map_pid_to_cmdline[pid];
1985         if (map != NO_CMDLINE_MAP)
1986                 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
1987         else
1988                 strcpy(comm, "<...>");
1989 }
1990
1991 void trace_find_cmdline(int pid, char comm[])
1992 {
1993         preempt_disable();
1994         arch_spin_lock(&trace_cmdline_lock);
1995
1996         __trace_find_cmdline(pid, comm);
1997
1998         arch_spin_unlock(&trace_cmdline_lock);
1999         preempt_enable();
2000 }
2001
2002 int trace_find_tgid(int pid)
2003 {
2004         if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2005                 return 0;
2006
2007         return tgid_map[pid];
2008 }
2009
2010 static int trace_save_tgid(struct task_struct *tsk)
2011 {
2012         /* treat recording of idle task as a success */
2013         if (!tsk->pid)
2014                 return 1;
2015
2016         if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2017                 return 0;
2018
2019         tgid_map[tsk->pid] = tsk->tgid;
2020         return 1;
2021 }
2022
2023 static bool tracing_record_taskinfo_skip(int flags)
2024 {
2025         if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2026                 return true;
2027         if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2028                 return true;
2029         if (!__this_cpu_read(trace_taskinfo_save))
2030                 return true;
2031         return false;
2032 }
2033
2034 /**
2035  * tracing_record_taskinfo - record the task info of a task
2036  *
2037  * @task  - task to record
2038  * @flags - TRACE_RECORD_CMDLINE for recording comm
2039  *        - TRACE_RECORD_TGID for recording tgid
2040  */
2041 void tracing_record_taskinfo(struct task_struct *task, int flags)
2042 {
2043         bool done;
2044
2045         if (tracing_record_taskinfo_skip(flags))
2046                 return;
2047
2048         /*
2049          * Record as much task information as possible. If some fail, continue
2050          * to try to record the others.
2051          */
2052         done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2053         done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2054
2055         /* If recording any information failed, retry again soon. */
2056         if (!done)
2057                 return;
2058
2059         __this_cpu_write(trace_taskinfo_save, false);
2060 }
2061
2062 /**
2063  * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2064  *
2065  * @prev - previous task during sched_switch
2066  * @next - next task during sched_switch
2067  * @flags - TRACE_RECORD_CMDLINE for recording comm
2068  *          TRACE_RECORD_TGID for recording tgid
2069  */
2070 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2071                                           struct task_struct *next, int flags)
2072 {
2073         bool done;
2074
2075         if (tracing_record_taskinfo_skip(flags))
2076                 return;
2077
2078         /*
2079          * Record as much task information as possible. If some fail, continue
2080          * to try to record the others.
2081          */
2082         done  = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2083         done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2084         done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2085         done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2086
2087         /* If recording any information failed, retry again soon. */
2088         if (!done)
2089                 return;
2090
2091         __this_cpu_write(trace_taskinfo_save, false);
2092 }
2093
2094 /* Helpers to record a specific task information */
2095 void tracing_record_cmdline(struct task_struct *task)
2096 {
2097         tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2098 }
2099
2100 void tracing_record_tgid(struct task_struct *task)
2101 {
2102         tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2103 }
2104
2105 /*
2106  * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2107  * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2108  * simplifies those functions and keeps them in sync.
2109  */
2110 enum print_line_t trace_handle_return(struct trace_seq *s)
2111 {
2112         return trace_seq_has_overflowed(s) ?
2113                 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2114 }
2115 EXPORT_SYMBOL_GPL(trace_handle_return);
2116
2117 void
2118 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2119                              int pc)
2120 {
2121         struct task_struct *tsk = current;
2122
2123         entry->preempt_count            = pc & 0xff;
2124         entry->pid                      = (tsk) ? tsk->pid : 0;
2125         entry->flags =
2126 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2127                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2128 #else
2129                 TRACE_FLAG_IRQS_NOSUPPORT |
2130 #endif
2131                 ((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
2132                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2133                 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2134                 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2135                 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2136 }
2137 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2138
2139 struct ring_buffer_event *
2140 trace_buffer_lock_reserve(struct ring_buffer *buffer,
2141                           int type,
2142                           unsigned long len,
2143                           unsigned long flags, int pc)
2144 {
2145         return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2146 }
2147
2148 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2149 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2150 static int trace_buffered_event_ref;
2151
2152 /**
2153  * trace_buffered_event_enable - enable buffering events
2154  *
2155  * When events are being filtered, it is quicker to use a temporary
2156  * buffer to write the event data into if there's a likely chance
2157  * that it will not be committed. The discard of the ring buffer
2158  * is not as fast as committing, and is much slower than copying
2159  * a commit.
2160  *
2161  * When an event is to be filtered, allocate per cpu buffers to
2162  * write the event data into, and if the event is filtered and discarded
2163  * it is simply dropped, otherwise, the entire data is to be committed
2164  * in one shot.
2165  */
2166 void trace_buffered_event_enable(void)
2167 {
2168         struct ring_buffer_event *event;
2169         struct page *page;
2170         int cpu;
2171
2172         WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2173
2174         if (trace_buffered_event_ref++)
2175                 return;
2176
2177         for_each_tracing_cpu(cpu) {
2178                 page = alloc_pages_node(cpu_to_node(cpu),
2179                                         GFP_KERNEL | __GFP_NORETRY, 0);
2180                 if (!page)
2181                         goto failed;
2182
2183                 event = page_address(page);
2184                 memset(event, 0, sizeof(*event));
2185
2186                 per_cpu(trace_buffered_event, cpu) = event;
2187
2188                 preempt_disable();
2189                 if (cpu == smp_processor_id() &&
2190                     this_cpu_read(trace_buffered_event) !=
2191                     per_cpu(trace_buffered_event, cpu))
2192                         WARN_ON_ONCE(1);
2193                 preempt_enable();
2194         }
2195
2196         return;
2197  failed:
2198         trace_buffered_event_disable();
2199 }
2200
2201 static void enable_trace_buffered_event(void *data)
2202 {
2203         /* Probably not needed, but do it anyway */
2204         smp_rmb();
2205         this_cpu_dec(trace_buffered_event_cnt);
2206 }
2207
2208 static void disable_trace_buffered_event(void *data)
2209 {
2210         this_cpu_inc(trace_buffered_event_cnt);
2211 }
2212
2213 /**
2214  * trace_buffered_event_disable - disable buffering events
2215  *
2216  * When a filter is removed, it is faster to not use the buffered
2217  * events, and to commit directly into the ring buffer. Free up
2218  * the temp buffers when there are no more users. This requires
2219  * special synchronization with current events.
2220  */
2221 void trace_buffered_event_disable(void)
2222 {
2223         int cpu;
2224
2225         WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2226
2227         if (WARN_ON_ONCE(!trace_buffered_event_ref))
2228                 return;
2229
2230         if (--trace_buffered_event_ref)
2231                 return;
2232
2233         preempt_disable();
2234         /* For each CPU, set the buffer as used. */
2235         smp_call_function_many(tracing_buffer_mask,
2236                                disable_trace_buffered_event, NULL, 1);
2237         preempt_enable();
2238
2239         /* Wait for all current users to finish */
2240         synchronize_sched();
2241
2242         for_each_tracing_cpu(cpu) {
2243                 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2244                 per_cpu(trace_buffered_event, cpu) = NULL;
2245         }
2246         /*
2247          * Make sure trace_buffered_event is NULL before clearing
2248          * trace_buffered_event_cnt.
2249          */
2250         smp_wmb();
2251
2252         preempt_disable();
2253         /* Do the work on each cpu */
2254         smp_call_function_many(tracing_buffer_mask,
2255                                enable_trace_buffered_event, NULL, 1);
2256         preempt_enable();
2257 }
2258
2259 static struct ring_buffer *temp_buffer;
2260
2261 struct ring_buffer_event *
2262 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2263                           struct trace_event_file *trace_file,
2264                           int type, unsigned long len,
2265                           unsigned long flags, int pc)
2266 {
2267         struct ring_buffer_event *entry;
2268         int val;
2269
2270         *current_rb = trace_file->tr->trace_buffer.buffer;
2271
2272         if ((trace_file->flags &
2273              (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2274             (entry = this_cpu_read(trace_buffered_event))) {
2275                 /* Try to use the per cpu buffer first */
2276                 val = this_cpu_inc_return(trace_buffered_event_cnt);
2277                 if (val == 1) {
2278                         trace_event_setup(entry, type, flags, pc);
2279                         entry->array[0] = len;
2280                         return entry;
2281                 }
2282                 this_cpu_dec(trace_buffered_event_cnt);
2283         }
2284
2285         entry = __trace_buffer_lock_reserve(*current_rb,
2286                                             type, len, flags, pc);
2287         /*
2288          * If tracing is off, but we have triggers enabled
2289          * we still need to look at the event data. Use the temp_buffer
2290          * to store the trace event for the tigger to use. It's recusive
2291          * safe and will not be recorded anywhere.
2292          */
2293         if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2294                 *current_rb = temp_buffer;
2295                 entry = __trace_buffer_lock_reserve(*current_rb,
2296                                                     type, len, flags, pc);
2297         }
2298         return entry;
2299 }
2300 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2301
2302 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2303 static DEFINE_MUTEX(tracepoint_printk_mutex);
2304
2305 static void output_printk(struct trace_event_buffer *fbuffer)
2306 {
2307         struct trace_event_call *event_call;
2308         struct trace_event *event;
2309         unsigned long flags;
2310         struct trace_iterator *iter = tracepoint_print_iter;
2311
2312         /* We should never get here if iter is NULL */
2313         if (WARN_ON_ONCE(!iter))
2314                 return;
2315
2316         event_call = fbuffer->trace_file->event_call;
2317         if (!event_call || !event_call->event.funcs ||
2318             !event_call->event.funcs->trace)
2319                 return;
2320
2321         event = &fbuffer->trace_file->event_call->event;
2322
2323         spin_lock_irqsave(&tracepoint_iter_lock, flags);
2324         trace_seq_init(&iter->seq);
2325         iter->ent = fbuffer->entry;
2326         event_call->event.funcs->trace(iter, 0, event);
2327         trace_seq_putc(&iter->seq, 0);
2328         printk("%s", iter->seq.buffer);
2329
2330         spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2331 }
2332
2333 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2334                              void __user *buffer, size_t *lenp,
2335                              loff_t *ppos)
2336 {
2337         int save_tracepoint_printk;
2338         int ret;
2339
2340         mutex_lock(&tracepoint_printk_mutex);
2341         save_tracepoint_printk = tracepoint_printk;
2342
2343         ret = proc_dointvec(table, write, buffer, lenp, ppos);
2344
2345         /*
2346          * This will force exiting early, as tracepoint_printk
2347          * is always zero when tracepoint_printk_iter is not allocated
2348          */
2349         if (!tracepoint_print_iter)
2350                 tracepoint_printk = 0;
2351
2352         if (save_tracepoint_printk == tracepoint_printk)
2353                 goto out;
2354
2355         if (tracepoint_printk)
2356                 static_key_enable(&tracepoint_printk_key.key);
2357         else
2358                 static_key_disable(&tracepoint_printk_key.key);
2359
2360  out:
2361         mutex_unlock(&tracepoint_printk_mutex);
2362
2363         return ret;
2364 }
2365
2366 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2367 {
2368         if (static_key_false(&tracepoint_printk_key.key))
2369                 output_printk(fbuffer);
2370
2371         event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2372                                     fbuffer->event, fbuffer->entry,
2373                                     fbuffer->flags, fbuffer->pc);
2374 }
2375 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2376
2377 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2378                                      struct ring_buffer *buffer,
2379                                      struct ring_buffer_event *event,
2380                                      unsigned long flags, int pc,
2381                                      struct pt_regs *regs)
2382 {
2383         __buffer_unlock_commit(buffer, event);
2384
2385         /*
2386          * If regs is not set, then skip the following callers:
2387          *   trace_buffer_unlock_commit_regs
2388          *   event_trigger_unlock_commit
2389          *   trace_event_buffer_commit
2390          *   trace_event_raw_event_sched_switch
2391          * Note, we can still get here via blktrace, wakeup tracer
2392          * and mmiotrace, but that's ok if they lose a function or
2393          * two. They are that meaningful.
2394          */
2395         ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
2396         ftrace_trace_userstack(buffer, flags, pc);
2397 }
2398
2399 /*
2400  * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2401  */
2402 void
2403 trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2404                                    struct ring_buffer_event *event)
2405 {
2406         __buffer_unlock_commit(buffer, event);
2407 }
2408
2409 static void
2410 trace_process_export(struct trace_export *export,
2411                struct ring_buffer_event *event)
2412 {
2413         struct trace_entry *entry;
2414         unsigned int size = 0;
2415
2416         entry = ring_buffer_event_data(event);
2417         size = ring_buffer_event_length(event);
2418         export->write(entry, size);
2419 }
2420
2421 static DEFINE_MUTEX(ftrace_export_lock);
2422
2423 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2424
2425 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2426
2427 static inline void ftrace_exports_enable(void)
2428 {
2429         static_branch_enable(&ftrace_exports_enabled);
2430 }
2431
2432 static inline void ftrace_exports_disable(void)
2433 {
2434         static_branch_disable(&ftrace_exports_enabled);
2435 }
2436
2437 void ftrace_exports(struct ring_buffer_event *event)
2438 {
2439         struct trace_export *export;
2440
2441         preempt_disable_notrace();
2442
2443         export = rcu_dereference_raw_notrace(ftrace_exports_list);
2444         while (export) {
2445                 trace_process_export(export, event);
2446                 export = rcu_dereference_raw_notrace(export->next);
2447         }
2448
2449         preempt_enable_notrace();
2450 }
2451
2452 static inline void
2453 add_trace_export(struct trace_export **list, struct trace_export *export)
2454 {
2455         rcu_assign_pointer(export->next, *list);
2456         /*
2457          * We are entering export into the list but another
2458          * CPU might be walking that list. We need to make sure
2459          * the export->next pointer is valid before another CPU sees
2460          * the export pointer included into the list.
2461          */
2462         rcu_assign_pointer(*list, export);
2463 }
2464
2465 static inline int
2466 rm_trace_export(struct trace_export **list, struct trace_export *export)
2467 {
2468         struct trace_export **p;
2469
2470         for (p = list; *p != NULL; p = &(*p)->next)
2471                 if (*p == export)
2472                         break;
2473
2474         if (*p != export)
2475                 return -1;
2476
2477         rcu_assign_pointer(*p, (*p)->next);
2478
2479         return 0;
2480 }
2481
2482 static inline void
2483 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2484 {
2485         if (*list == NULL)
2486                 ftrace_exports_enable();
2487
2488         add_trace_export(list, export);
2489 }
2490
2491 static inline int
2492 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2493 {
2494         int ret;
2495
2496         ret = rm_trace_export(list, export);
2497         if (*list == NULL)
2498                 ftrace_exports_disable();
2499
2500         return ret;
2501 }
2502
2503 int register_ftrace_export(struct trace_export *export)
2504 {
2505         if (WARN_ON_ONCE(!export->write))
2506                 return -1;
2507
2508         mutex_lock(&ftrace_export_lock);
2509
2510         add_ftrace_export(&ftrace_exports_list, export);
2511
2512         mutex_unlock(&ftrace_export_lock);
2513
2514         return 0;
2515 }
2516 EXPORT_SYMBOL_GPL(register_ftrace_export);
2517
2518 int unregister_ftrace_export(struct trace_export *export)
2519 {
2520         int ret;
2521
2522         mutex_lock(&ftrace_export_lock);
2523
2524         ret = rm_ftrace_export(&ftrace_exports_list, export);
2525
2526         mutex_unlock(&ftrace_export_lock);
2527
2528         return ret;
2529 }
2530 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2531
2532 void
2533 trace_function(struct trace_array *tr,
2534                unsigned long ip, unsigned long parent_ip, unsigned long flags,
2535                int pc)
2536 {
2537         struct trace_event_call *call = &event_function;
2538         struct ring_buffer *buffer = tr->trace_buffer.buffer;
2539         struct ring_buffer_event *event;
2540         struct ftrace_entry *entry;
2541
2542         event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2543                                             flags, pc);
2544         if (!event)
2545                 return;
2546         entry   = ring_buffer_event_data(event);
2547         entry->ip                       = ip;
2548         entry->parent_ip                = parent_ip;
2549
2550         if (!call_filter_check_discard(call, entry, buffer, event)) {
2551                 if (static_branch_unlikely(&ftrace_exports_enabled))
2552                         ftrace_exports(event);
2553                 __buffer_unlock_commit(buffer, event);
2554         }
2555 }
2556
2557 #ifdef CONFIG_STACKTRACE
2558
2559 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2560 struct ftrace_stack {
2561         unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
2562 };
2563
2564 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2565 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2566
2567 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2568                                  unsigned long flags,
2569                                  int skip, int pc, struct pt_regs *regs)
2570 {
2571         struct trace_event_call *call = &event_kernel_stack;
2572         struct ring_buffer_event *event;
2573         struct stack_entry *entry;
2574         struct stack_trace trace;
2575         int use_stack;
2576         int size = FTRACE_STACK_ENTRIES;
2577
2578         trace.nr_entries        = 0;
2579         trace.skip              = skip;
2580
2581         /*
2582          * Add two, for this function and the call to save_stack_trace()
2583          * If regs is set, then these functions will not be in the way.
2584          */
2585         if (!regs)
2586                 trace.skip += 2;
2587
2588         /*
2589          * Since events can happen in NMIs there's no safe way to
2590          * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2591          * or NMI comes in, it will just have to use the default
2592          * FTRACE_STACK_SIZE.
2593          */
2594         preempt_disable_notrace();
2595
2596         use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
2597         /*
2598          * We don't need any atomic variables, just a barrier.
2599          * If an interrupt comes in, we don't care, because it would
2600          * have exited and put the counter back to what we want.
2601          * We just need a barrier to keep gcc from moving things
2602          * around.
2603          */
2604         barrier();
2605         if (use_stack == 1) {
2606                 trace.entries           = this_cpu_ptr(ftrace_stack.calls);
2607                 trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
2608
2609                 if (regs)
2610                         save_stack_trace_regs(regs, &trace);
2611                 else
2612                         save_stack_trace(&trace);
2613
2614                 if (trace.nr_entries > size)
2615                         size = trace.nr_entries;
2616         } else
2617                 /* From now on, use_stack is a boolean */
2618                 use_stack = 0;
2619
2620         size *= sizeof(unsigned long);
2621
2622         event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2623                                             sizeof(*entry) + size, flags, pc);
2624         if (!event)
2625                 goto out;
2626         entry = ring_buffer_event_data(event);
2627
2628         memset(&entry->caller, 0, size);
2629
2630         if (use_stack)
2631                 memcpy(&entry->caller, trace.entries,
2632                        trace.nr_entries * sizeof(unsigned long));
2633         else {
2634                 trace.max_entries       = FTRACE_STACK_ENTRIES;
2635                 trace.entries           = entry->caller;
2636                 if (regs)
2637                         save_stack_trace_regs(regs, &trace);
2638                 else
2639                         save_stack_trace(&trace);
2640         }
2641
2642         entry->size = trace.nr_entries;
2643
2644         if (!call_filter_check_discard(call, entry, buffer, event))
2645                 __buffer_unlock_commit(buffer, event);
2646
2647  out:
2648         /* Again, don't let gcc optimize things here */
2649         barrier();
2650         __this_cpu_dec(ftrace_stack_reserve);
2651         preempt_enable_notrace();
2652
2653 }
2654
2655 static inline void ftrace_trace_stack(struct trace_array *tr,
2656                                       struct ring_buffer *buffer,
2657                                       unsigned long flags,
2658                                       int skip, int pc, struct pt_regs *regs)
2659 {
2660         if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2661                 return;
2662
2663         __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2664 }
2665
2666 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2667                    int pc)
2668 {
2669         struct ring_buffer *buffer = tr->trace_buffer.buffer;
2670
2671         if (rcu_is_watching()) {
2672                 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2673                 return;
2674         }
2675
2676         /*
2677          * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2678          * but if the above rcu_is_watching() failed, then the NMI
2679          * triggered someplace critical, and rcu_irq_enter() should
2680          * not be called from NMI.
2681          */
2682         if (unlikely(in_nmi()))
2683                 return;
2684
2685         /*
2686          * It is possible that a function is being traced in a
2687          * location that RCU is not watching. A call to
2688          * rcu_irq_enter() will make sure that it is, but there's
2689          * a few internal rcu functions that could be traced
2690          * where that wont work either. In those cases, we just
2691          * do nothing.
2692          */
2693         if (unlikely(rcu_irq_enter_disabled()))
2694                 return;
2695
2696         rcu_irq_enter_irqson();
2697         __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2698         rcu_irq_exit_irqson();
2699 }
2700
2701 /**
2702  * trace_dump_stack - record a stack back trace in the trace buffer
2703  * @skip: Number of functions to skip (helper handlers)
2704  */
2705 void trace_dump_stack(int skip)
2706 {
2707         unsigned long flags;
2708
2709         if (tracing_disabled || tracing_selftest_running)
2710                 return;
2711
2712         local_save_flags(flags);
2713
2714         /*
2715          * Skip 3 more, seems to get us at the caller of
2716          * this function.
2717          */
2718         skip += 3;
2719         __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2720                              flags, skip, preempt_count(), NULL);
2721 }
2722
2723 static DEFINE_PER_CPU(int, user_stack_count);
2724
2725 void
2726 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
2727 {
2728         struct trace_event_call *call = &event_user_stack;
2729         struct ring_buffer_event *event;
2730         struct userstack_entry *entry;
2731         struct stack_trace trace;
2732
2733         if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2734                 return;
2735
2736         /*
2737          * NMIs can not handle page faults, even with fix ups.
2738          * The save user stack can (and often does) fault.
2739          */
2740         if (unlikely(in_nmi()))
2741                 return;
2742
2743         /*
2744          * prevent recursion, since the user stack tracing may
2745          * trigger other kernel events.
2746          */
2747         preempt_disable();
2748         if (__this_cpu_read(user_stack_count))
2749                 goto out;
2750
2751         __this_cpu_inc(user_stack_count);
2752
2753         event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2754                                             sizeof(*entry), flags, pc);
2755         if (!event)
2756                 goto out_drop_count;
2757         entry   = ring_buffer_event_data(event);
2758
2759         entry->tgid             = current->tgid;
2760         memset(&entry->caller, 0, sizeof(entry->caller));
2761
2762         trace.nr_entries        = 0;
2763         trace.max_entries       = FTRACE_STACK_ENTRIES;
2764         trace.skip              = 0;
2765         trace.entries           = entry->caller;
2766
2767         save_stack_trace_user(&trace);
2768         if (!call_filter_check_discard(call, entry, buffer, event))
2769                 __buffer_unlock_commit(buffer, event);
2770
2771  out_drop_count:
2772         __this_cpu_dec(user_stack_count);
2773  out:
2774         preempt_enable();
2775 }
2776
2777 #ifdef UNUSED
2778 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
2779 {
2780         ftrace_trace_userstack(tr, flags, preempt_count());
2781 }
2782 #endif /* UNUSED */
2783
2784 #endif /* CONFIG_STACKTRACE */
2785
2786 /* created for use with alloc_percpu */
2787 struct trace_buffer_struct {
2788         int nesting;
2789         char buffer[4][TRACE_BUF_SIZE];
2790 };
2791
2792 static struct trace_buffer_struct *trace_percpu_buffer;
2793
2794 /*
2795  * Thise allows for lockless recording.  If we're nested too deeply, then
2796  * this returns NULL.
2797  */
2798 static char *get_trace_buf(void)
2799 {
2800         struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2801
2802         if (!buffer || buffer->nesting >= 4)
2803                 return NULL;
2804
2805         buffer->nesting++;
2806
2807         /* Interrupts must see nesting incremented before we use the buffer */
2808         barrier();
2809         return &buffer->buffer[buffer->nesting][0];
2810 }
2811
2812 static void put_trace_buf(void)
2813 {
2814         /* Don't let the decrement of nesting leak before this */
2815         barrier();
2816         this_cpu_dec(trace_percpu_buffer->nesting);
2817 }
2818
2819 static int alloc_percpu_trace_buffer(void)
2820 {
2821         struct trace_buffer_struct *buffers;
2822
2823         buffers = alloc_percpu(struct trace_buffer_struct);
2824         if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2825                 return -ENOMEM;
2826
2827         trace_percpu_buffer = buffers;
2828         return 0;
2829 }
2830
2831 static int buffers_allocated;
2832
2833 void trace_printk_init_buffers(void)
2834 {
2835         if (buffers_allocated)
2836                 return;
2837
2838         if (alloc_percpu_trace_buffer())
2839                 return;
2840
2841         /* trace_printk() is for debug use only. Don't use it in production. */
2842
2843         pr_warn("\n");
2844         pr_warn("**********************************************************\n");
2845         pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2846         pr_warn("**                                                      **\n");
2847         pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
2848         pr_warn("**                                                      **\n");
2849         pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
2850         pr_warn("** unsafe for production use.                           **\n");
2851         pr_warn("**                                                      **\n");
2852         pr_warn("** If you see this message and you are not debugging    **\n");
2853         pr_warn("** the kernel, report this immediately to your vendor!  **\n");
2854         pr_warn("**                                                      **\n");
2855         pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2856         pr_warn("**********************************************************\n");
2857
2858         /* Expand the buffers to set size */
2859         tracing_update_buffers();
2860
2861         buffers_allocated = 1;
2862
2863         /*
2864          * trace_printk_init_buffers() can be called by modules.
2865          * If that happens, then we need to start cmdline recording
2866          * directly here. If the global_trace.buffer is already
2867          * allocated here, then this was called by module code.
2868          */
2869         if (global_trace.trace_buffer.buffer)
2870                 tracing_start_cmdline_record();
2871 }
2872
2873 void trace_printk_start_comm(void)
2874 {
2875         /* Start tracing comms if trace printk is set */
2876         if (!buffers_allocated)
2877                 return;
2878         tracing_start_cmdline_record();
2879 }
2880
2881 static void trace_printk_start_stop_comm(int enabled)
2882 {
2883         if (!buffers_allocated)
2884                 return;
2885
2886         if (enabled)
2887                 tracing_start_cmdline_record();
2888         else
2889                 tracing_stop_cmdline_record();
2890 }
2891
2892 /**
2893  * trace_vbprintk - write binary msg to tracing buffer
2894  *
2895  */
2896 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2897 {
2898         struct trace_event_call *call = &event_bprint;
2899         struct ring_buffer_event *event;
2900         struct ring_buffer *buffer;
2901         struct trace_array *tr = &global_trace;
2902         struct bprint_entry *entry;
2903         unsigned long flags;
2904         char *tbuffer;
2905         int len = 0, size, pc;
2906
2907         if (unlikely(tracing_selftest_running || tracing_disabled))
2908                 return 0;
2909
2910         /* Don't pollute graph traces with trace_vprintk internals */
2911         pause_graph_tracing();
2912
2913         pc = preempt_count();
2914         preempt_disable_notrace();
2915
2916         tbuffer = get_trace_buf();
2917         if (!tbuffer) {
2918                 len = 0;
2919                 goto out_nobuffer;
2920         }
2921
2922         len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2923
2924         if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2925                 goto out;
2926
2927         local_save_flags(flags);
2928         size = sizeof(*entry) + sizeof(u32) * len;
2929         buffer = tr->trace_buffer.buffer;
2930         event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2931                                             flags, pc);
2932         if (!event)
2933                 goto out;
2934         entry = ring_buffer_event_data(event);
2935         entry->ip                       = ip;
2936         entry->fmt                      = fmt;
2937
2938         memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2939         if (!call_filter_check_discard(call, entry, buffer, event)) {
2940                 __buffer_unlock_commit(buffer, event);
2941                 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2942         }
2943
2944 out:
2945         put_trace_buf();
2946
2947 out_nobuffer:
2948         preempt_enable_notrace();
2949         unpause_graph_tracing();
2950
2951         return len;
2952 }
2953 EXPORT_SYMBOL_GPL(trace_vbprintk);
2954
2955 static int
2956 __trace_array_vprintk(struct ring_buffer *buffer,
2957                       unsigned long ip, const char *fmt, va_list args)
2958 {
2959         struct trace_event_call *call = &event_print;
2960         struct ring_buffer_event *event;
2961         int len = 0, size, pc;
2962         struct print_entry *entry;
2963         unsigned long flags;
2964         char *tbuffer;
2965
2966         if (tracing_disabled || tracing_selftest_running)
2967                 return 0;
2968
2969         /* Don't pollute graph traces with trace_vprintk internals */
2970         pause_graph_tracing();
2971
2972         pc = preempt_count();
2973         preempt_disable_notrace();
2974
2975
2976         tbuffer = get_trace_buf();
2977         if (!tbuffer) {
2978                 len = 0;
2979                 goto out_nobuffer;
2980         }
2981
2982         len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2983
2984         local_save_flags(flags);
2985         size = sizeof(*entry) + len + 1;
2986         event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2987                                             flags, pc);
2988         if (!event)
2989                 goto out;
2990         entry = ring_buffer_event_data(event);
2991         entry->ip = ip;
2992
2993         memcpy(&entry->buf, tbuffer, len + 1);
2994         if (!call_filter_check_discard(call, entry, buffer, event)) {
2995                 __buffer_unlock_commit(buffer, event);
2996                 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2997         }
2998
2999 out:
3000         put_trace_buf();
3001
3002 out_nobuffer:
3003         preempt_enable_notrace();
3004         unpause_graph_tracing();
3005
3006         return len;
3007 }
3008
3009 int trace_array_vprintk(struct trace_array *tr,
3010                         unsigned long ip, const char *fmt, va_list args)
3011 {
3012         return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3013 }
3014
3015 int trace_array_printk(struct trace_array *tr,
3016                        unsigned long ip, const char *fmt, ...)
3017 {
3018         int ret;
3019         va_list ap;
3020
3021         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3022                 return 0;
3023
3024         va_start(ap, fmt);
3025         ret = trace_array_vprintk(tr, ip, fmt, ap);
3026         va_end(ap);
3027         return ret;
3028 }
3029
3030 int trace_array_printk_buf(struct ring_buffer *buffer,
3031                            unsigned long ip, const char *fmt, ...)
3032 {
3033         int ret;
3034         va_list ap;
3035
3036         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3037                 return 0;
3038
3039         va_start(ap, fmt);
3040         ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3041         va_end(ap);
3042         return ret;
3043 }
3044
3045 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3046 {
3047         return trace_array_vprintk(&global_trace, ip, fmt, args);
3048 }
3049 EXPORT_SYMBOL_GPL(trace_vprintk);
3050
3051 static void trace_iterator_increment(struct trace_iterator *iter)
3052 {
3053         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3054
3055         iter->idx++;
3056         if (buf_iter)
3057                 ring_buffer_read(buf_iter, NULL);
3058 }
3059
3060 static struct trace_entry *
3061 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3062                 unsigned long *lost_events)
3063 {
3064         struct ring_buffer_event *event;
3065         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3066
3067         if (buf_iter)
3068                 event = ring_buffer_iter_peek(buf_iter, ts);
3069         else
3070                 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3071                                          lost_events);
3072
3073         if (event) {
3074                 iter->ent_size = ring_buffer_event_length(event);
3075                 return ring_buffer_event_data(event);
3076         }
3077         iter->ent_size = 0;
3078         return NULL;
3079 }
3080
3081 static struct trace_entry *
3082 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3083                   unsigned long *missing_events, u64 *ent_ts)
3084 {
3085         struct ring_buffer *buffer = iter->trace_buffer->buffer;
3086         struct trace_entry *ent, *next = NULL;
3087         unsigned long lost_events = 0, next_lost = 0;
3088         int cpu_file = iter->cpu_file;
3089         u64 next_ts = 0, ts;
3090         int next_cpu = -1;
3091         int next_size = 0;
3092         int cpu;
3093
3094         /*
3095          * If we are in a per_cpu trace file, don't bother by iterating over
3096          * all cpu and peek directly.
3097          */
3098         if (cpu_file > RING_BUFFER_ALL_CPUS) {
3099                 if (ring_buffer_empty_cpu(buffer, cpu_file))
3100                         return NULL;
3101                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3102                 if (ent_cpu)
3103                         *ent_cpu = cpu_file;
3104
3105                 return ent;
3106         }
3107
3108         for_each_tracing_cpu(cpu) {
3109
3110                 if (ring_buffer_empty_cpu(buffer, cpu))
3111                         continue;
3112
3113                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3114
3115                 /*
3116                  * Pick the entry with the smallest timestamp:
3117                  */
3118                 if (ent && (!next || ts < next_ts)) {
3119                         next = ent;
3120                         next_cpu = cpu;
3121                         next_ts = ts;
3122                         next_lost = lost_events;
3123                         next_size = iter->ent_size;
3124                 }
3125         }
3126
3127         iter->ent_size = next_size;
3128
3129         if (ent_cpu)
3130                 *ent_cpu = next_cpu;
3131
3132         if (ent_ts)
3133                 *ent_ts = next_ts;
3134
3135         if (missing_events)
3136                 *missing_events = next_lost;
3137
3138         return next;
3139 }
3140
3141 /* Find the next real entry, without updating the iterator itself */
3142 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3143                                           int *ent_cpu, u64 *ent_ts)
3144 {
3145         return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3146 }
3147
3148 /* Find the next real entry, and increment the iterator to the next entry */
3149 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3150 {
3151         iter->ent = __find_next_entry(iter, &iter->cpu,
3152                                       &iter->lost_events, &iter->ts);
3153
3154         if (iter->ent)
3155                 trace_iterator_increment(iter);
3156
3157         return iter->ent ? iter : NULL;
3158 }
3159
3160 static void trace_consume(struct trace_iterator *iter)
3161 {
3162         ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3163                             &iter->lost_events);
3164 }
3165
3166 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3167 {
3168         struct trace_iterator *iter = m->private;
3169         int i = (int)*pos;
3170         void *ent;
3171
3172         WARN_ON_ONCE(iter->leftover);
3173
3174         (*pos)++;
3175
3176         /* can't go backwards */
3177         if (iter->idx > i)
3178                 return NULL;
3179
3180         if (iter->idx < 0)
3181                 ent = trace_find_next_entry_inc(iter);
3182         else
3183                 ent = iter;
3184
3185         while (ent && iter->idx < i)
3186                 ent = trace_find_next_entry_inc(iter);
3187
3188         iter->pos = *pos;
3189
3190         return ent;
3191 }
3192
3193 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3194 {
3195         struct ring_buffer_event *event;
3196         struct ring_buffer_iter *buf_iter;
3197         unsigned long entries = 0;
3198         u64 ts;
3199
3200         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3201
3202         buf_iter = trace_buffer_iter(iter, cpu);
3203         if (!buf_iter)
3204                 return;
3205
3206         ring_buffer_iter_reset(buf_iter);
3207
3208         /*
3209          * We could have the case with the max latency tracers
3210          * that a reset never took place on a cpu. This is evident
3211          * by the timestamp being before the start of the buffer.
3212          */
3213         while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3214                 if (ts >= iter->trace_buffer->time_start)
3215                         break;
3216                 entries++;
3217                 ring_buffer_read(buf_iter, NULL);
3218         }
3219
3220         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3221 }
3222
3223 /*
3224  * The current tracer is copied to avoid a global locking
3225  * all around.
3226  */
3227 static void *s_start(struct seq_file *m, loff_t *pos)
3228 {
3229         struct trace_iterator *iter = m->private;
3230         struct trace_array *tr = iter->tr;
3231         int cpu_file = iter->cpu_file;
3232         void *p = NULL;
3233         loff_t l = 0;
3234         int cpu;
3235
3236         /*
3237          * copy the tracer to avoid using a global lock all around.
3238          * iter->trace is a copy of current_trace, the pointer to the
3239          * name may be used instead of a strcmp(), as iter->trace->name
3240          * will point to the same string as current_trace->name.
3241          */
3242         mutex_lock(&trace_types_lock);
3243         if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3244                 *iter->trace = *tr->current_trace;
3245         mutex_unlock(&trace_types_lock);
3246
3247 #ifdef CONFIG_TRACER_MAX_TRACE
3248         if (iter->snapshot && iter->trace->use_max_tr)
3249                 return ERR_PTR(-EBUSY);
3250 #endif
3251
3252         if (!iter->snapshot)
3253                 atomic_inc(&trace_record_taskinfo_disabled);
3254
3255         if (*pos != iter->pos) {
3256                 iter->ent = NULL;
3257                 iter->cpu = 0;
3258                 iter->idx = -1;
3259
3260                 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3261                         for_each_tracing_cpu(cpu)
3262                                 tracing_iter_reset(iter, cpu);
3263                 } else
3264                         tracing_iter_reset(iter, cpu_file);
3265
3266                 iter->leftover = 0;
3267                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3268                         ;
3269
3270         } else {
3271                 /*
3272                  * If we overflowed the seq_file before, then we want
3273                  * to just reuse the trace_seq buffer again.
3274                  */
3275                 if (iter->leftover)
3276                         p = iter;
3277                 else {
3278                         l = *pos - 1;
3279                         p = s_next(m, p, &l);
3280                 }
3281         }
3282
3283         trace_event_read_lock();
3284         trace_access_lock(cpu_file);
3285         return p;
3286 }
3287
3288 static void s_stop(struct seq_file *m, void *p)
3289 {
3290         struct trace_iterator *iter = m->private;
3291
3292 #ifdef CONFIG_TRACER_MAX_TRACE
3293         if (iter->snapshot && iter->trace->use_max_tr)
3294                 return;
3295 #endif
3296
3297         if (!iter->snapshot)
3298                 atomic_dec(&trace_record_taskinfo_disabled);
3299
3300         trace_access_unlock(iter->cpu_file);
3301         trace_event_read_unlock();
3302 }
3303
3304 static void
3305 get_total_entries(struct trace_buffer *buf,
3306                   unsigned long *total, unsigned long *entries)
3307 {
3308         unsigned long count;
3309         int cpu;
3310
3311         *total = 0;
3312         *entries = 0;
3313
3314         for_each_tracing_cpu(cpu) {
3315                 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3316                 /*
3317                  * If this buffer has skipped entries, then we hold all
3318                  * entries for the trace and we need to ignore the
3319                  * ones before the time stamp.
3320                  */
3321                 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3322                         count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3323                         /* total is the same as the entries */
3324                         *total += count;
3325                 } else
3326                         *total += count +
3327                                 ring_buffer_overrun_cpu(buf->buffer, cpu);
3328                 *entries += count;
3329         }
3330 }
3331
3332 static void print_lat_help_header(struct seq_file *m)
3333 {
3334         seq_puts(m, "#                  _------=> CPU#            \n"
3335                     "#                 / _-----=> irqs-off        \n"
3336                     "#                | / _----=> need-resched    \n"
3337                     "#                || / _---=> hardirq/softirq \n"
3338                     "#                ||| / _--=> preempt-depth   \n"
3339                     "#                |||| /     delay            \n"
3340                     "#  cmd     pid   ||||| time  |   caller      \n"
3341                     "#     \\   /      |||||  \\    |   /         \n");
3342 }
3343
3344 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3345 {
3346         unsigned long total;
3347         unsigned long entries;
3348
3349         get_total_entries(buf, &total, &entries);
3350         seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
3351                    entries, total, num_online_cpus());
3352         seq_puts(m, "#\n");
3353 }
3354
3355 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3356                                    unsigned int flags)
3357 {
3358         bool tgid = flags & TRACE_ITER_RECORD_TGID;
3359
3360         print_event_info(buf, m);
3361
3362         seq_printf(m, "#           TASK-PID   CPU#   %s  TIMESTAMP  FUNCTION\n", tgid ? "TGID     " : "");
3363         seq_printf(m, "#              | |       |    %s     |         |\n",      tgid ? "  |      " : "");
3364 }
3365
3366 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3367                                        unsigned int flags)
3368 {
3369         bool tgid = flags & TRACE_ITER_RECORD_TGID;
3370         const char tgid_space[] = "          ";
3371         const char space[] = "  ";
3372
3373         seq_printf(m, "#                          %s  _-----=> irqs-off\n",
3374                    tgid ? tgid_space : space);
3375         seq_printf(m, "#                          %s / _----=> need-resched\n",
3376                    tgid ? tgid_space : space);
3377         seq_printf(m, "#                          %s| / _---=> hardirq/softirq\n",
3378                    tgid ? tgid_space : space);
3379         seq_printf(m, "#                          %s|| / _--=> preempt-depth\n",
3380                    tgid ? tgid_space : space);
3381         seq_printf(m, "#                          %s||| /     delay\n",
3382                    tgid ? tgid_space : space);
3383         seq_printf(m, "#           TASK-PID   CPU#%s||||    TIMESTAMP  FUNCTION\n",
3384                    tgid ? "   TGID   " : space);
3385         seq_printf(m, "#              | |       | %s||||       |         |\n",
3386                    tgid ? "     |    " : space);
3387 }
3388
3389 void
3390 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3391 {
3392         unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3393         struct trace_buffer *buf = iter->trace_buffer;
3394         struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3395         struct tracer *type = iter->trace;
3396         unsigned long entries;
3397         unsigned long total;
3398         const char *name = "preemption";
3399
3400         name = type->name;
3401
3402         get_total_entries(buf, &total, &entries);
3403
3404         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3405                    name, UTS_RELEASE);
3406         seq_puts(m, "# -----------------------------------"
3407                  "---------------------------------\n");
3408         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3409                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3410                    nsecs_to_usecs(data->saved_latency),
3411                    entries,
3412                    total,
3413                    buf->cpu,
3414 #if defined(CONFIG_PREEMPT_NONE)
3415                    "server",
3416 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3417                    "desktop",
3418 #elif defined(CONFIG_PREEMPT)
3419                    "preempt",
3420 #else
3421                    "unknown",
3422 #endif
3423                    /* These are reserved for later use */
3424                    0, 0, 0, 0);
3425 #ifdef CONFIG_SMP
3426         seq_printf(m, " #P:%d)\n", num_online_cpus());
3427 #else
3428         seq_puts(m, ")\n");
3429 #endif
3430         seq_puts(m, "#    -----------------\n");
3431         seq_printf(m, "#    | task: %.16s-%d "
3432                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3433                    data->comm, data->pid,
3434                    from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3435                    data->policy, data->rt_priority);
3436         seq_puts(m, "#    -----------------\n");
3437
3438         if (data->critical_start) {
3439                 seq_puts(m, "#  => started at: ");
3440                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3441                 trace_print_seq(m, &iter->seq);
3442                 seq_puts(m, "\n#  => ended at:   ");
3443                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3444                 trace_print_seq(m, &iter->seq);
3445                 seq_puts(m, "\n#\n");
3446         }
3447
3448         seq_puts(m, "#\n");
3449 }
3450
3451 static void test_cpu_buff_start(struct trace_iterator *iter)
3452 {
3453         struct trace_seq *s = &iter->seq;
3454         struct trace_array *tr = iter->tr;
3455
3456         if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3457                 return;
3458
3459         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3460                 return;
3461
3462         if (cpumask_available(iter->started) &&
3463             cpumask_test_cpu(iter->cpu, iter->started))
3464                 return;
3465
3466         if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3467                 return;
3468
3469         if (cpumask_available(iter->started))
3470                 cpumask_set_cpu(iter->cpu, iter->started);
3471
3472         /* Don't print started cpu buffer for the first entry of the trace */
3473         if (iter->idx > 1)
3474                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3475                                 iter->cpu);
3476 }
3477
3478 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3479 {
3480         struct trace_array *tr = iter->tr;
3481         struct trace_seq *s = &iter->seq;
3482         unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3483         struct trace_entry *entry;
3484         struct trace_event *event;
3485
3486         entry = iter->ent;
3487
3488         test_cpu_buff_start(iter);
3489
3490         event = ftrace_find_event(entry->type);
3491
3492         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3493                 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3494                         trace_print_lat_context(iter);
3495                 else
3496                         trace_print_context(iter);
3497         }
3498
3499         if (trace_seq_has_overflowed(s))
3500                 return TRACE_TYPE_PARTIAL_LINE;
3501
3502         if (event)
3503                 return event->funcs->trace(iter, sym_flags, event);
3504
3505         trace_seq_printf(s, "Unknown type %d\n", entry->type);
3506
3507         return trace_handle_return(s);
3508 }
3509
3510 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3511 {
3512         struct trace_array *tr = iter->tr;
3513         struct trace_seq *s = &iter->seq;
3514         struct trace_entry *entry;
3515         struct trace_event *event;
3516
3517         entry = iter->ent;
3518
3519         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3520                 trace_seq_printf(s, "%d %d %llu ",
3521                                  entry->pid, iter->cpu, iter->ts);
3522
3523         if (trace_seq_has_overflowed(s))
3524                 return TRACE_TYPE_PARTIAL_LINE;
3525
3526         event = ftrace_find_event(entry->type);
3527         if (event)
3528                 return event->funcs->raw(iter, 0, event);
3529
3530         trace_seq_printf(s, "%d ?\n", entry->type);
3531
3532         return trace_handle_return(s);
3533 }
3534
3535 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3536 {
3537         struct trace_array *tr = iter->tr;
3538         struct trace_seq *s = &iter->seq;
3539         unsigned char newline = '\n';
3540         struct trace_entry *entry;
3541         struct trace_event *event;
3542
3543         entry = iter->ent;
3544
3545         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3546                 SEQ_PUT_HEX_FIELD(s, entry->pid);
3547                 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3548                 SEQ_PUT_HEX_FIELD(s, iter->ts);
3549                 if (trace_seq_has_overflowed(s))
3550                         return TRACE_TYPE_PARTIAL_LINE;
3551         }
3552
3553         event = ftrace_find_event(entry->type);
3554         if (event) {
3555                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3556                 if (ret != TRACE_TYPE_HANDLED)
3557                         return ret;
3558         }
3559
3560         SEQ_PUT_FIELD(s, newline);
3561
3562         return trace_handle_return(s);
3563 }
3564
3565 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3566 {
3567         struct trace_array *tr = iter->tr;
3568         struct trace_seq *s = &iter->seq;
3569         struct trace_entry *entry;
3570         struct trace_event *event;
3571
3572         entry = iter->ent;
3573
3574         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3575                 SEQ_PUT_FIELD(s, entry->pid);
3576                 SEQ_PUT_FIELD(s, iter->cpu);
3577                 SEQ_PUT_FIELD(s, iter->ts);
3578                 if (trace_seq_has_overflowed(s))
3579                         return TRACE_TYPE_PARTIAL_LINE;
3580         }
3581
3582         event = ftrace_find_event(entry->type);
3583         return event ? event->funcs->binary(iter, 0, event) :
3584                 TRACE_TYPE_HANDLED;
3585 }
3586
3587 int trace_empty(struct trace_iterator *iter)
3588 {
3589         struct ring_buffer_iter *buf_iter;
3590         int cpu;
3591
3592         /* If we are looking at one CPU buffer, only check that one */
3593         if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3594                 cpu = iter->cpu_file;
3595                 buf_iter = trace_buffer_iter(iter, cpu);
3596                 if (buf_iter) {
3597                         if (!ring_buffer_iter_empty(buf_iter))
3598                                 return 0;
3599                 } else {
3600                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3601                                 return 0;
3602                 }
3603                 return 1;
3604         }
3605
3606         for_each_tracing_cpu(cpu) {
3607                 buf_iter = trace_buffer_iter(iter, cpu);
3608                 if (buf_iter) {
3609                         if (!ring_buffer_iter_empty(buf_iter))
3610                                 return 0;
3611                 } else {
3612                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3613                                 return 0;
3614                 }
3615         }
3616
3617         return 1;
3618 }
3619
3620 /*  Called with trace_event_read_lock() held. */
3621 enum print_line_t print_trace_line(struct trace_iterator *iter)
3622 {
3623         struct trace_array *tr = iter->tr;
3624         unsigned long trace_flags = tr->trace_flags;
3625         enum print_line_t ret;
3626
3627         if (iter->lost_events) {
3628                 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3629                                  iter->cpu, iter->lost_events);
3630                 if (trace_seq_has_overflowed(&iter->seq))
3631                         return TRACE_TYPE_PARTIAL_LINE;
3632         }
3633
3634         if (iter->trace && iter->trace->print_line) {
3635                 ret = iter->trace->print_line(iter);
3636                 if (ret != TRACE_TYPE_UNHANDLED)
3637                         return ret;
3638         }
3639
3640         if (iter->ent->type == TRACE_BPUTS &&
3641                         trace_flags & TRACE_ITER_PRINTK &&
3642                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3643                 return trace_print_bputs_msg_only(iter);
3644
3645         if (iter->ent->type == TRACE_BPRINT &&
3646                         trace_flags & TRACE_ITER_PRINTK &&
3647                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3648                 return trace_print_bprintk_msg_only(iter);
3649
3650         if (iter->ent->type == TRACE_PRINT &&
3651                         trace_flags & TRACE_ITER_PRINTK &&
3652                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3653                 return trace_print_printk_msg_only(iter);
3654
3655         if (trace_flags & TRACE_ITER_BIN)
3656                 return print_bin_fmt(iter);
3657
3658         if (trace_flags & TRACE_ITER_HEX)
3659                 return print_hex_fmt(iter);
3660
3661         if (trace_flags & TRACE_ITER_RAW)
3662                 return print_raw_fmt(iter);
3663
3664         return print_trace_fmt(iter);
3665 }
3666
3667 void trace_latency_header(struct seq_file *m)
3668 {
3669         struct trace_iterator *iter = m->private;
3670         struct trace_array *tr = iter->tr;
3671
3672         /* print nothing if the buffers are empty */
3673         if (trace_empty(iter))
3674                 return;
3675
3676         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3677                 print_trace_header(m, iter);
3678
3679         if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3680                 print_lat_help_header(m);
3681 }
3682
3683 void trace_default_header(struct seq_file *m)
3684 {
3685         struct trace_iterator *iter = m->private;
3686         struct trace_array *tr = iter->tr;
3687         unsigned long trace_flags = tr->trace_flags;
3688
3689         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3690                 return;
3691
3692         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3693                 /* print nothing if the buffers are empty */
3694                 if (trace_empty(iter))
3695                         return;
3696                 print_trace_header(m, iter);
3697                 if (!(trace_flags & TRACE_ITER_VERBOSE))
3698                         print_lat_help_header(m);
3699         } else {
3700                 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3701                         if (trace_flags & TRACE_ITER_IRQ_INFO)
3702                                 print_func_help_header_irq(iter->trace_buffer,
3703                                                            m, trace_flags);
3704                         else
3705                                 print_func_help_header(iter->trace_buffer, m,
3706                                                        trace_flags);
3707                 }
3708         }
3709 }
3710
3711 static void test_ftrace_alive(struct seq_file *m)
3712 {
3713         if (!ftrace_is_dead())
3714                 return;
3715         seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3716                     "#          MAY BE MISSING FUNCTION EVENTS\n");
3717 }
3718
3719 #ifdef CONFIG_TRACER_MAX_TRACE
3720 static void show_snapshot_main_help(struct seq_file *m)
3721 {
3722         seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3723                     "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3724                     "#                      Takes a snapshot of the main buffer.\n"
3725                     "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3726                     "#                      (Doesn't have to be '2' works with any number that\n"
3727                     "#                       is not a '0' or '1')\n");
3728 }
3729
3730 static void show_snapshot_percpu_help(struct seq_file *m)
3731 {
3732         seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3733 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3734         seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3735                     "#                      Takes a snapshot of the main buffer for this cpu.\n");
3736 #else
3737         seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3738                     "#                     Must use main snapshot file to allocate.\n");
3739 #endif
3740         seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3741                     "#                      (Doesn't have to be '2' works with any number that\n"
3742                     "#                       is not a '0' or '1')\n");
3743 }
3744
3745 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3746 {
3747         if (iter->tr->allocated_snapshot)
3748                 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3749         else
3750                 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3751
3752         seq_puts(m, "# Snapshot commands:\n");
3753         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3754                 show_snapshot_main_help(m);
3755         else
3756                 show_snapshot_percpu_help(m);
3757 }
3758 #else
3759 /* Should never be called */
3760 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3761 #endif
3762
3763 static int s_show(struct seq_file *m, void *v)
3764 {
3765         struct trace_iterator *iter = v;
3766         int ret;
3767
3768         if (iter->ent == NULL) {
3769                 if (iter->tr) {
3770                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
3771                         seq_puts(m, "#\n");
3772                         test_ftrace_alive(m);
3773                 }
3774                 if (iter->snapshot && trace_empty(iter))
3775                         print_snapshot_help(m, iter);
3776                 else if (iter->trace && iter->trace->print_header)
3777                         iter->trace->print_header(m);
3778                 else
3779                         trace_default_header(m);
3780
3781         } else if (iter->leftover) {
3782                 /*
3783                  * If we filled the seq_file buffer earlier, we
3784                  * want to just show it now.
3785                  */
3786                 ret = trace_print_seq(m, &iter->seq);
3787
3788                 /* ret should this time be zero, but you never know */
3789                 iter->leftover = ret;
3790
3791         } else {
3792                 print_trace_line(iter);
3793                 ret = trace_print_seq(m, &iter->seq);
3794                 /*
3795                  * If we overflow the seq_file buffer, then it will
3796                  * ask us for this data again at start up.
3797                  * Use that instead.
3798                  *  ret is 0 if seq_file write succeeded.
3799                  *        -1 otherwise.
3800                  */
3801                 iter->leftover = ret;
3802         }
3803
3804         return 0;
3805 }
3806
3807 /*
3808  * Should be used after trace_array_get(), trace_types_lock
3809  * ensures that i_cdev was already initialized.
3810  */
3811 static inline int tracing_get_cpu(struct inode *inode)
3812 {
3813         if (inode->i_cdev) /* See trace_create_cpu_file() */
3814                 return (long)inode->i_cdev - 1;
3815         return RING_BUFFER_ALL_CPUS;
3816 }
3817
3818 static const struct seq_operations tracer_seq_ops = {
3819         .start          = s_start,
3820         .next           = s_next,
3821         .stop           = s_stop,
3822         .show           = s_show,
3823 };
3824
3825 static struct trace_iterator *
3826 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3827 {
3828         struct trace_array *tr = inode->i_private;
3829         struct trace_iterator *iter;
3830         int cpu;
3831
3832         if (tracing_disabled)
3833                 return ERR_PTR(-ENODEV);
3834
3835         iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3836         if (!iter)
3837                 return ERR_PTR(-ENOMEM);
3838
3839         iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3840                                     GFP_KERNEL);
3841         if (!iter->buffer_iter)
3842                 goto release;
3843
3844         /*
3845          * We make a copy of the current tracer to avoid concurrent
3846          * changes on it while we are reading.
3847          */
3848         mutex_lock(&trace_types_lock);
3849         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3850         if (!iter->trace)
3851                 goto fail;
3852
3853         *iter->trace = *tr->current_trace;
3854
3855         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3856                 goto fail;
3857
3858         iter->tr = tr;
3859
3860 #ifdef CONFIG_TRACER_MAX_TRACE
3861         /* Currently only the top directory has a snapshot */
3862         if (tr->current_trace->print_max || snapshot)
3863                 iter->trace_buffer = &tr->max_buffer;
3864         else
3865 #endif
3866                 iter->trace_buffer = &tr->trace_buffer;
3867         iter->snapshot = snapshot;
3868         iter->pos = -1;
3869         iter->cpu_file = tracing_get_cpu(inode);
3870         mutex_init(&iter->mutex);
3871
3872         /* Notify the tracer early; before we stop tracing. */
3873         if (iter->trace && iter->trace->open)
3874                 iter->trace->open(iter);
3875
3876         /* Annotate start of buffers if we had overruns */
3877         if (ring_buffer_overruns(iter->trace_buffer->buffer))
3878                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3879
3880         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3881         if (trace_clocks[tr->clock_id].in_ns)
3882                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3883
3884         /* stop the trace while dumping if we are not opening "snapshot" */
3885         if (!iter->snapshot)
3886                 tracing_stop_tr(tr);
3887
3888         if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3889                 for_each_tracing_cpu(cpu) {
3890                         iter->buffer_iter[cpu] =
3891                                 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3892                 }
3893                 ring_buffer_read_prepare_sync();
3894                 for_each_tracing_cpu(cpu) {
3895                         ring_buffer_read_start(iter->buffer_iter[cpu]);
3896                         tracing_iter_reset(iter, cpu);
3897                 }
3898         } else {
3899                 cpu = iter->cpu_file;
3900                 iter->buffer_iter[cpu] =
3901                         ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3902                 ring_buffer_read_prepare_sync();
3903                 ring_buffer_read_start(iter->buffer_iter[cpu]);
3904                 tracing_iter_reset(iter, cpu);
3905         }
3906
3907         mutex_unlock(&trace_types_lock);
3908
3909         return iter;
3910
3911  fail:
3912         mutex_unlock(&trace_types_lock);
3913         kfree(iter->trace);
3914         kfree(iter->buffer_iter);
3915 release:
3916         seq_release_private(inode, file);
3917         return ERR_PTR(-ENOMEM);
3918 }
3919
3920 int tracing_open_generic(struct inode *inode, struct file *filp)
3921 {
3922         if (tracing_disabled)
3923                 return -ENODEV;
3924
3925         filp->private_data = inode->i_private;
3926         return 0;
3927 }
3928
3929 bool tracing_is_disabled(void)
3930 {
3931         return (tracing_disabled) ? true: false;
3932 }
3933
3934 /*
3935  * Open and update trace_array ref count.
3936  * Must have the current trace_array passed to it.
3937  */
3938 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3939 {
3940         struct trace_array *tr = inode->i_private;
3941
3942         if (tracing_disabled)
3943                 return -ENODEV;
3944
3945         if (trace_array_get(tr) < 0)
3946                 return -ENODEV;
3947
3948         filp->private_data = inode->i_private;
3949
3950         return 0;
3951 }
3952
3953 static int tracing_release(struct inode *inode, struct file *file)
3954 {
3955         struct trace_array *tr = inode->i_private;
3956         struct seq_file *m = file->private_data;
3957         struct trace_iterator *iter;
3958         int cpu;
3959
3960         if (!(file->f_mode & FMODE_READ)) {
3961                 trace_array_put(tr);
3962                 return 0;
3963         }
3964
3965         /* Writes do not use seq_file */
3966         iter = m->private;
3967         mutex_lock(&trace_types_lock);
3968
3969         for_each_tracing_cpu(cpu) {
3970                 if (iter->buffer_iter[cpu])
3971                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
3972         }
3973
3974         if (iter->trace && iter->trace->close)
3975                 iter->trace->close(iter);
3976
3977         if (!iter->snapshot)
3978                 /* reenable tracing if it was previously enabled */
3979                 tracing_start_tr(tr);
3980
3981         __trace_array_put(tr);
3982
3983         mutex_unlock(&trace_types_lock);
3984
3985         mutex_destroy(&iter->mutex);
3986         free_cpumask_var(iter->started);
3987         kfree(iter->trace);
3988         kfree(iter->buffer_iter);
3989         seq_release_private(inode, file);
3990
3991         return 0;
3992 }
3993
3994 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3995 {
3996         struct trace_array *tr = inode->i_private;
3997
3998         trace_array_put(tr);
3999         return 0;
4000 }
4001
4002 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4003 {
4004         struct trace_array *tr = inode->i_private;
4005
4006         trace_array_put(tr);
4007
4008         return single_release(inode, file);
4009 }
4010
4011 static int tracing_open(struct inode *inode, struct file *file)
4012 {
4013         struct trace_array *tr = inode->i_private;
4014         struct trace_iterator *iter;
4015         int ret = 0;
4016
4017         if (trace_array_get(tr) < 0)
4018                 return -ENODEV;
4019
4020         /* If this file was open for write, then erase contents */
4021         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4022                 int cpu = tracing_get_cpu(inode);
4023
4024                 if (cpu == RING_BUFFER_ALL_CPUS)
4025                         tracing_reset_online_cpus(&tr->trace_buffer);
4026                 else
4027                         tracing_reset(&tr->trace_buffer, cpu);
4028         }
4029
4030         if (file->f_mode & FMODE_READ) {
4031                 iter = __tracing_open(inode, file, false);
4032                 if (IS_ERR(iter))
4033                         ret = PTR_ERR(iter);
4034                 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4035                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
4036         }
4037
4038         if (ret < 0)
4039                 trace_array_put(tr);
4040
4041         return ret;
4042 }
4043
4044 /*
4045  * Some tracers are not suitable for instance buffers.
4046  * A tracer is always available for the global array (toplevel)
4047  * or if it explicitly states that it is.
4048  */
4049 static bool
4050 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4051 {
4052         return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4053 }
4054
4055 /* Find the next tracer that this trace array may use */
4056 static struct tracer *
4057 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4058 {
4059         while (t && !trace_ok_for_array(t, tr))
4060                 t = t->next;
4061
4062         return t;
4063 }
4064
4065 static void *
4066 t_next(struct seq_file *m, void *v, loff_t *pos)
4067 {
4068         struct trace_array *tr = m->private;
4069         struct tracer *t = v;
4070
4071         (*pos)++;
4072
4073         if (t)
4074                 t = get_tracer_for_array(tr, t->next);
4075
4076         return t;
4077 }
4078
4079 static void *t_start(struct seq_file *m, loff_t *pos)
4080 {
4081         struct trace_array *tr = m->private;
4082         struct tracer *t;
4083         loff_t l = 0;
4084
4085         mutex_lock(&trace_types_lock);
4086
4087         t = get_tracer_for_array(tr, trace_types);
4088         for (; t && l < *pos; t = t_next(m, t, &l))
4089                         ;
4090
4091         return t;
4092 }
4093
4094 static void t_stop(struct seq_file *m, void *p)
4095 {
4096         mutex_unlock(&trace_types_lock);
4097 }
4098
4099 static int t_show(struct seq_file *m, void *v)
4100 {
4101         struct tracer *t = v;
4102
4103         if (!t)
4104                 return 0;
4105
4106         seq_puts(m, t->name);
4107         if (t->next)
4108                 seq_putc(m, ' ');
4109         else
4110                 seq_putc(m, '\n');
4111
4112         return 0;
4113 }
4114
4115 static const struct seq_operations show_traces_seq_ops = {
4116         .start          = t_start,
4117         .next           = t_next,
4118         .stop           = t_stop,
4119         .show           = t_show,
4120 };
4121
4122 static int show_traces_open(struct inode *inode, struct file *file)
4123 {
4124         struct trace_array *tr = inode->i_private;
4125         struct seq_file *m;
4126         int ret;
4127
4128         if (tracing_disabled)
4129                 return -ENODEV;
4130
4131         ret = seq_open(file, &show_traces_seq_ops);
4132         if (ret)
4133                 return ret;
4134
4135         m = file->private_data;
4136         m->private = tr;
4137
4138         return 0;
4139 }
4140
4141 static ssize_t
4142 tracing_write_stub(struct file *filp, const char __user *ubuf,
4143                    size_t count, loff_t *ppos)
4144 {
4145         return count;
4146 }
4147
4148 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4149 {
4150         int ret;
4151
4152         if (file->f_mode & FMODE_READ)
4153                 ret = seq_lseek(file, offset, whence);
4154         else
4155                 file->f_pos = ret = 0;
4156
4157         return ret;
4158 }
4159
4160 static const struct file_operations tracing_fops = {
4161         .open           = tracing_open,
4162         .read           = seq_read,
4163         .write          = tracing_write_stub,
4164         .llseek         = tracing_lseek,
4165         .release        = tracing_release,
4166 };
4167
4168 static const struct file_operations show_traces_fops = {
4169         .open           = show_traces_open,
4170         .read           = seq_read,
4171         .release        = seq_release,
4172         .llseek         = seq_lseek,
4173 };
4174
4175 /*
4176  * The tracer itself will not take this lock, but still we want
4177  * to provide a consistent cpumask to user-space:
4178  */
4179 static DEFINE_MUTEX(tracing_cpumask_update_lock);
4180
4181 /*
4182  * Temporary storage for the character representation of the
4183  * CPU bitmask (and one more byte for the newline):
4184  */
4185 static char mask_str[NR_CPUS + 1];
4186
4187 static ssize_t
4188 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4189                      size_t count, loff_t *ppos)
4190 {
4191         struct trace_array *tr = file_inode(filp)->i_private;
4192         int len;
4193
4194         mutex_lock(&tracing_cpumask_update_lock);
4195
4196         len = snprintf(mask_str, count, "%*pb\n",
4197                        cpumask_pr_args(tr->tracing_cpumask));
4198         if (len >= count) {
4199                 count = -EINVAL;
4200                 goto out_err;
4201         }
4202         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
4203
4204 out_err:
4205         mutex_unlock(&tracing_cpumask_update_lock);
4206
4207         return count;
4208 }
4209
4210 static ssize_t
4211 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4212                       size_t count, loff_t *ppos)
4213 {
4214         struct trace_array *tr = file_inode(filp)->i_private;
4215         cpumask_var_t tracing_cpumask_new;
4216         int err, cpu;
4217
4218         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4219                 return -ENOMEM;
4220
4221         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4222         if (err)
4223                 goto err_unlock;
4224
4225         mutex_lock(&tracing_cpumask_update_lock);
4226
4227         local_irq_disable();
4228         arch_spin_lock(&tr->max_lock);
4229         for_each_tracing_cpu(cpu) {
4230                 /*
4231                  * Increase/decrease the disabled counter if we are
4232                  * about to flip a bit in the cpumask:
4233                  */
4234                 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4235                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4236                         atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4237                         ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4238                 }
4239                 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4240                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4241                         atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4242                         ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4243                 }
4244         }
4245         arch_spin_unlock(&tr->max_lock);
4246         local_irq_enable();
4247
4248         cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4249
4250         mutex_unlock(&tracing_cpumask_update_lock);
4251         free_cpumask_var(tracing_cpumask_new);
4252
4253         return count;
4254
4255 err_unlock:
4256         free_cpumask_var(tracing_cpumask_new);
4257
4258         return err;
4259 }
4260
4261 static const struct file_operations tracing_cpumask_fops = {
4262         .open           = tracing_open_generic_tr,
4263         .read           = tracing_cpumask_read,
4264         .write          = tracing_cpumask_write,
4265         .release        = tracing_release_generic_tr,
4266         .llseek         = generic_file_llseek,
4267 };
4268
4269 static int tracing_trace_options_show(struct seq_file *m, void *v)
4270 {
4271         struct tracer_opt *trace_opts;
4272         struct trace_array *tr = m->private;
4273         u32 tracer_flags;
4274         int i;
4275
4276         mutex_lock(&trace_types_lock);
4277         tracer_flags = tr->current_trace->flags->val;
4278         trace_opts = tr->current_trace->flags->opts;
4279
4280         for (i = 0; trace_options[i]; i++) {
4281                 if (tr->trace_flags & (1 << i))
4282                         seq_printf(m, "%s\n", trace_options[i]);
4283                 else
4284                         seq_printf(m, "no%s\n", trace_options[i]);
4285         }
4286
4287         for (i = 0; trace_opts[i].name; i++) {
4288                 if (tracer_flags & trace_opts[i].bit)
4289                         seq_printf(m, "%s\n", trace_opts[i].name);
4290                 else
4291                         seq_printf(m, "no%s\n", trace_opts[i].name);
4292         }
4293         mutex_unlock(&trace_types_lock);
4294
4295         return 0;
4296 }
4297
4298 static int __set_tracer_option(struct trace_array *tr,
4299                                struct tracer_flags *tracer_flags,
4300                                struct tracer_opt *opts, int neg)
4301 {
4302         struct tracer *trace = tracer_flags->trace;
4303         int ret;
4304
4305         ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4306         if (ret)
4307                 return ret;
4308
4309         if (neg)
4310                 tracer_flags->val &= ~opts->bit;
4311         else
4312                 tracer_flags->val |= opts->bit;
4313         return 0;
4314 }
4315
4316 /* Try to assign a tracer specific option */
4317 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4318 {
4319         struct tracer *trace = tr->current_trace;
4320         struct tracer_flags *tracer_flags = trace->flags;
4321         struct tracer_opt *opts = NULL;
4322         int i;
4323
4324         for (i = 0; tracer_flags->opts[i].name; i++) {
4325                 opts = &tracer_flags->opts[i];
4326
4327                 if (strcmp(cmp, opts->name) == 0)
4328                         return __set_tracer_option(tr, trace->flags, opts, neg);
4329         }
4330
4331         return -EINVAL;
4332 }
4333
4334 /* Some tracers require overwrite to stay enabled */
4335 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4336 {
4337         if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4338                 return -1;
4339
4340         return 0;
4341 }
4342
4343 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4344 {
4345         /* do nothing if flag is already set */
4346         if (!!(tr->trace_flags & mask) == !!enabled)
4347                 return 0;
4348
4349         /* Give the tracer a chance to approve the change */
4350         if (tr->current_trace->flag_changed)
4351                 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4352                         return -EINVAL;
4353
4354         if (enabled)
4355                 tr->trace_flags |= mask;
4356         else
4357                 tr->trace_flags &= ~mask;
4358
4359         if (mask == TRACE_ITER_RECORD_CMD)
4360                 trace_event_enable_cmd_record(enabled);
4361
4362         if (mask == TRACE_ITER_RECORD_TGID) {
4363                 if (!tgid_map)
4364                         tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map),
4365                                            GFP_KERNEL);
4366                 if (!tgid_map) {
4367                         tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4368                         return -ENOMEM;
4369                 }
4370
4371                 trace_event_enable_tgid_record(enabled);
4372         }
4373
4374         if (mask == TRACE_ITER_EVENT_FORK)
4375                 trace_event_follow_fork(tr, enabled);
4376
4377         if (mask == TRACE_ITER_FUNC_FORK)
4378                 ftrace_pid_follow_fork(tr, enabled);
4379
4380         if (mask == TRACE_ITER_OVERWRITE) {
4381                 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4382 #ifdef CONFIG_TRACER_MAX_TRACE
4383                 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4384 #endif
4385         }
4386
4387         if (mask == TRACE_ITER_PRINTK) {
4388                 trace_printk_start_stop_comm(enabled);
4389                 trace_printk_control(enabled);
4390         }
4391
4392         return 0;
4393 }
4394
4395 static int trace_set_options(struct trace_array *tr, char *option)
4396 {
4397         char *cmp;
4398         int neg = 0;
4399         int ret = -ENODEV;
4400         int i;
4401         size_t orig_len = strlen(option);
4402
4403         cmp = strstrip(option);
4404
4405         if (strncmp(cmp, "no", 2) == 0) {
4406                 neg = 1;
4407                 cmp += 2;
4408         }
4409
4410         mutex_lock(&trace_types_lock);
4411
4412         for (i = 0; trace_options[i]; i++) {
4413                 if (strcmp(cmp, trace_options[i]) == 0) {
4414                         ret = set_tracer_flag(tr, 1 << i, !neg);
4415                         break;
4416                 }
4417         }
4418
4419         /* If no option could be set, test the specific tracer options */
4420         if (!trace_options[i])
4421                 ret = set_tracer_option(tr, cmp, neg);
4422
4423         mutex_unlock(&trace_types_lock);
4424
4425         /*
4426          * If the first trailing whitespace is replaced with '\0' by strstrip,
4427          * turn it back into a space.
4428          */
4429         if (orig_len > strlen(option))
4430                 option[strlen(option)] = ' ';
4431
4432         return ret;
4433 }
4434
4435 static void __init apply_trace_boot_options(void)
4436 {
4437         char *buf = trace_boot_options_buf;
4438         char *option;
4439
4440         while (true) {
4441                 option = strsep(&buf, ",");
4442
4443                 if (!option)
4444                         break;
4445
4446                 if (*option)
4447                         trace_set_options(&global_trace, option);
4448
4449                 /* Put back the comma to allow this to be called again */
4450                 if (buf)
4451                         *(buf - 1) = ',';
4452         }
4453 }
4454
4455 static ssize_t
4456 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4457                         size_t cnt, loff_t *ppos)
4458 {
4459         struct seq_file *m = filp->private_data;
4460         struct trace_array *tr = m->private;
4461         char buf[64];
4462         int ret;
4463
4464         if (cnt >= sizeof(buf))
4465                 return -EINVAL;
4466
4467         if (copy_from_user(buf, ubuf, cnt))
4468                 return -EFAULT;
4469
4470         buf[cnt] = 0;
4471
4472         ret = trace_set_options(tr, buf);
4473         if (ret < 0)
4474                 return ret;
4475
4476         *ppos += cnt;
4477
4478         return cnt;
4479 }
4480
4481 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4482 {
4483         struct trace_array *tr = inode->i_private;
4484         int ret;
4485
4486         if (tracing_disabled)
4487                 return -ENODEV;
4488
4489         if (trace_array_get(tr) < 0)
4490                 return -ENODEV;
4491
4492         ret = single_open(file, tracing_trace_options_show, inode->i_private);
4493         if (ret < 0)
4494                 trace_array_put(tr);
4495
4496         return ret;
4497 }
4498
4499 static const struct file_operations tracing_iter_fops = {
4500         .open           = tracing_trace_options_open,
4501         .read           = seq_read,
4502         .llseek         = seq_lseek,
4503         .release        = tracing_single_release_tr,
4504         .write          = tracing_trace_options_write,
4505 };
4506
4507 static const char readme_msg[] =
4508         "tracing mini-HOWTO:\n\n"
4509         "# echo 0 > tracing_on : quick way to disable tracing\n"
4510         "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4511         " Important files:\n"
4512         "  trace\t\t\t- The static contents of the buffer\n"
4513         "\t\t\t  To clear the buffer write into this file: echo > trace\n"
4514         "  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4515         "  current_tracer\t- function and latency tracers\n"
4516         "  available_tracers\t- list of configured tracers for current_tracer\n"
4517         "  buffer_size_kb\t- view and modify size of per cpu buffer\n"
4518         "  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
4519         "  trace_clock\t\t-change the clock used to order events\n"
4520         "       local:   Per cpu clock but may not be synced across CPUs\n"
4521         "      global:   Synced across CPUs but slows tracing down.\n"
4522         "     counter:   Not a clock, but just an increment\n"
4523         "      uptime:   Jiffy counter from time of boot\n"
4524         "        perf:   Same clock that perf events use\n"
4525 #ifdef CONFIG_X86_64
4526         "     x86-tsc:   TSC cycle counter\n"
4527 #endif
4528         "\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4529         "\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4530         "  tracing_cpumask\t- Limit which CPUs to trace\n"
4531         "  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4532         "\t\t\t  Remove sub-buffer with rmdir\n"
4533         "  trace_options\t\t- Set format or modify how tracing happens\n"
4534         "\t\t\t  Disable an option by adding a suffix 'no' to the\n"
4535         "\t\t\t  option name\n"
4536         "  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4537 #ifdef CONFIG_DYNAMIC_FTRACE
4538         "\n  available_filter_functions - list of functions that can be filtered on\n"
4539         "  set_ftrace_filter\t- echo function name in here to only trace these\n"
4540         "\t\t\t  functions\n"
4541         "\t     accepts: func_full_name or glob-matching-pattern\n"
4542         "\t     modules: Can select a group via module\n"
4543         "\t      Format: :mod:<module-name>\n"
4544         "\t     example: echo :mod:ext3 > set_ftrace_filter\n"
4545         "\t    triggers: a command to perform when function is hit\n"
4546         "\t      Format: <function>:<trigger>[:count]\n"
4547         "\t     trigger: traceon, traceoff\n"
4548         "\t\t      enable_event:<system>:<event>\n"
4549         "\t\t      disable_event:<system>:<event>\n"
4550 #ifdef CONFIG_STACKTRACE
4551         "\t\t      stacktrace\n"
4552 #endif
4553 #ifdef CONFIG_TRACER_SNAPSHOT
4554         "\t\t      snapshot\n"
4555 #endif
4556         "\t\t      dump\n"
4557         "\t\t      cpudump\n"
4558         "\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
4559         "\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
4560         "\t     The first one will disable tracing every time do_fault is hit\n"
4561         "\t     The second will disable tracing at most 3 times when do_trap is hit\n"
4562         "\t       The first time do trap is hit and it disables tracing, the\n"
4563         "\t       counter will decrement to 2. If tracing is already disabled,\n"
4564         "\t       the counter will not decrement. It only decrements when the\n"
4565         "\t       trigger did work\n"
4566         "\t     To remove trigger without count:\n"
4567         "\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
4568         "\t     To remove trigger with a count:\n"
4569         "\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4570         "  set_ftrace_notrace\t- echo function name in here to never trace.\n"
4571         "\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4572         "\t    modules: Can select a group via module command :mod:\n"
4573         "\t    Does not accept triggers\n"
4574 #endif /* CONFIG_DYNAMIC_FTRACE */
4575 #ifdef CONFIG_FUNCTION_TRACER
4576         "  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4577         "\t\t    (function)\n"
4578 #endif
4579 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4580         "  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4581         "  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4582         "  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4583 #endif
4584 #ifdef CONFIG_TRACER_SNAPSHOT
4585         "\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
4586         "\t\t\t  snapshot buffer. Read the contents for more\n"
4587         "\t\t\t  information\n"
4588 #endif
4589 #ifdef CONFIG_STACK_TRACER
4590         "  stack_trace\t\t- Shows the max stack trace when active\n"
4591         "  stack_max_size\t- Shows current max stack size that was traced\n"
4592         "\t\t\t  Write into this file to reset the max size (trigger a\n"
4593         "\t\t\t  new trace)\n"
4594 #ifdef CONFIG_DYNAMIC_FTRACE
4595         "  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4596         "\t\t\t  traces\n"
4597 #endif
4598 #endif /* CONFIG_STACK_TRACER */
4599 #ifdef CONFIG_KPROBE_EVENTS
4600         "  kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4601         "\t\t\t  Write into this file to define/undefine new trace events.\n"
4602 #endif
4603 #ifdef CONFIG_UPROBE_EVENTS
4604         "  uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4605         "\t\t\t  Write into this file to define/undefine new trace events.\n"
4606 #endif
4607 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4608         "\t  accepts: event-definitions (one definition per line)\n"
4609         "\t   Format: p[:[<group>/]<event>] <place> [<args>]\n"
4610         "\t           r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4611         "\t           -:[<group>/]<event>\n"
4612 #ifdef CONFIG_KPROBE_EVENTS
4613         "\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4614   "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4615 #endif
4616 #ifdef CONFIG_UPROBE_EVENTS
4617         "\t    place: <path>:<offset>\n"
4618 #endif
4619         "\t     args: <name>=fetcharg[:type]\n"
4620         "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4621         "\t           $stack<index>, $stack, $retval, $comm\n"
4622         "\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4623         "\t           b<bit-width>@<bit-offset>/<container-size>\n"
4624 #endif
4625         "  events/\t\t- Directory containing all trace event subsystems:\n"
4626         "      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4627         "  events/<system>/\t- Directory containing all trace events for <system>:\n"
4628         "      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4629         "\t\t\t  events\n"
4630         "      filter\t\t- If set, only events passing filter are traced\n"
4631         "  events/<system>/<event>/\t- Directory containing control files for\n"
4632         "\t\t\t  <event>:\n"
4633         "      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4634         "      filter\t\t- If set, only events passing filter are traced\n"
4635         "      trigger\t\t- If set, a command to perform when event is hit\n"
4636         "\t    Format: <trigger>[:count][if <filter>]\n"
4637         "\t   trigger: traceon, traceoff\n"
4638         "\t            enable_event:<system>:<event>\n"
4639         "\t            disable_event:<system>:<event>\n"
4640 #ifdef CONFIG_HIST_TRIGGERS
4641         "\t            enable_hist:<system>:<event>\n"
4642         "\t            disable_hist:<system>:<event>\n"
4643 #endif
4644 #ifdef CONFIG_STACKTRACE
4645         "\t\t    stacktrace\n"
4646 #endif
4647 #ifdef CONFIG_TRACER_SNAPSHOT
4648         "\t\t    snapshot\n"
4649 #endif
4650 #ifdef CONFIG_HIST_TRIGGERS
4651         "\t\t    hist (see below)\n"
4652 #endif
4653         "\t   example: echo traceoff > events/block/block_unplug/trigger\n"
4654         "\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
4655         "\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4656         "\t                  events/block/block_unplug/trigger\n"
4657         "\t   The first disables tracing every time block_unplug is hit.\n"
4658         "\t   The second disables tracing the first 3 times block_unplug is hit.\n"
4659         "\t   The third enables the kmalloc event the first 3 times block_unplug\n"
4660         "\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4661         "\t   Like function triggers, the counter is only decremented if it\n"
4662         "\t    enabled or disabled tracing.\n"
4663         "\t   To remove a trigger without a count:\n"
4664         "\t     echo '!<trigger> > <system>/<event>/trigger\n"
4665         "\t   To remove a trigger with a count:\n"
4666         "\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
4667         "\t   Filters can be ignored when removing a trigger.\n"
4668 #ifdef CONFIG_HIST_TRIGGERS
4669         "      hist trigger\t- If set, event hits are aggregated into a hash table\n"
4670         "\t    Format: hist:keys=<field1[,field2,...]>\n"
4671         "\t            [:values=<field1[,field2,...]>]\n"
4672         "\t            [:sort=<field1[,field2,...]>]\n"
4673         "\t            [:size=#entries]\n"
4674         "\t            [:pause][:continue][:clear]\n"
4675         "\t            [:name=histname1]\n"
4676         "\t            [if <filter>]\n\n"
4677         "\t    When a matching event is hit, an entry is added to a hash\n"
4678         "\t    table using the key(s) and value(s) named, and the value of a\n"
4679         "\t    sum called 'hitcount' is incremented.  Keys and values\n"
4680         "\t    correspond to fields in the event's format description.  Keys\n"
4681         "\t    can be any field, or the special string 'stacktrace'.\n"
4682         "\t    Compound keys consisting of up to two fields can be specified\n"
4683         "\t    by the 'keys' keyword.  Values must correspond to numeric\n"
4684         "\t    fields.  Sort keys consisting of up to two fields can be\n"
4685         "\t    specified using the 'sort' keyword.  The sort direction can\n"
4686         "\t    be modified by appending '.descending' or '.ascending' to a\n"
4687         "\t    sort field.  The 'size' parameter can be used to specify more\n"
4688         "\t    or fewer than the default 2048 entries for the hashtable size.\n"
4689         "\t    If a hist trigger is given a name using the 'name' parameter,\n"
4690         "\t    its histogram data will be shared with other triggers of the\n"
4691         "\t    same name, and trigger hits will update this common data.\n\n"
4692         "\t    Reading the 'hist' file for the event will dump the hash\n"
4693         "\t    table in its entirety to stdout.  If there are multiple hist\n"
4694         "\t    triggers attached to an event, there will be a table for each\n"
4695         "\t    trigger in the output.  The table displayed for a named\n"
4696         "\t    trigger will be the same as any other instance having the\n"
4697         "\t    same name.  The default format used to display a given field\n"
4698         "\t    can be modified by appending any of the following modifiers\n"
4699         "\t    to the field name, as applicable:\n\n"
4700         "\t            .hex        display a number as a hex value\n"
4701         "\t            .sym        display an address as a symbol\n"
4702         "\t            .sym-offset display an address as a symbol and offset\n"
4703         "\t            .execname   display a common_pid as a program name\n"
4704         "\t            .syscall    display a syscall id as a syscall name\n\n"
4705         "\t            .log2       display log2 value rather than raw number\n\n"
4706         "\t    The 'pause' parameter can be used to pause an existing hist\n"
4707         "\t    trigger or to start a hist trigger but not log any events\n"
4708         "\t    until told to do so.  'continue' can be used to start or\n"
4709         "\t    restart a paused hist trigger.\n\n"
4710         "\t    The 'clear' parameter will clear the contents of a running\n"
4711         "\t    hist trigger and leave its current paused/active state\n"
4712         "\t    unchanged.\n\n"
4713         "\t    The enable_hist and disable_hist triggers can be used to\n"
4714         "\t    have one event conditionally start and stop another event's\n"
4715         "\t    already-attached hist trigger.  The syntax is analagous to\n"
4716         "\t    the enable_event and disable_event triggers.\n"
4717 #endif
4718 ;
4719
4720 static ssize_t
4721 tracing_readme_read(struct file *filp, char __user *ubuf,
4722                        size_t cnt, loff_t *ppos)
4723 {
4724         return simple_read_from_buffer(ubuf, cnt, ppos,
4725                                         readme_msg, strlen(readme_msg));
4726 }
4727
4728 static const struct file_operations tracing_readme_fops = {
4729         .open           = tracing_open_generic,
4730         .read           = tracing_readme_read,
4731         .llseek         = generic_file_llseek,
4732 };
4733
4734 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4735 {
4736         int *ptr = v;
4737
4738         if (*pos || m->count)
4739                 ptr++;
4740
4741         (*pos)++;
4742
4743         for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4744                 if (trace_find_tgid(*ptr))
4745                         return ptr;
4746         }
4747
4748         return NULL;
4749 }
4750
4751 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4752 {
4753         void *v;
4754         loff_t l = 0;
4755
4756         if (!tgid_map)
4757                 return NULL;
4758
4759         v = &tgid_map[0];
4760         while (l <= *pos) {
4761                 v = saved_tgids_next(m, v, &l);
4762                 if (!v)
4763                         return NULL;
4764         }
4765
4766         return v;
4767 }
4768
4769 static void saved_tgids_stop(struct seq_file *m, void *v)
4770 {
4771 }
4772
4773 static int saved_tgids_show(struct seq_file *m, void *v)
4774 {
4775         int pid = (int *)v - tgid_map;
4776
4777         seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4778         return 0;
4779 }
4780
4781 static const struct seq_operations tracing_saved_tgids_seq_ops = {
4782         .start          = saved_tgids_start,
4783         .stop           = saved_tgids_stop,
4784         .next           = saved_tgids_next,
4785         .show           = saved_tgids_show,
4786 };
4787
4788 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4789 {
4790         if (tracing_disabled)
4791                 return -ENODEV;
4792
4793         return seq_open(filp, &tracing_saved_tgids_seq_ops);
4794 }
4795
4796
4797 static const struct file_operations tracing_saved_tgids_fops = {
4798         .open           = tracing_saved_tgids_open,
4799         .read           = seq_read,
4800         .llseek         = seq_lseek,
4801         .release        = seq_release,
4802 };
4803
4804 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4805 {
4806         unsigned int *ptr = v;
4807
4808         if (*pos || m->count)
4809                 ptr++;
4810
4811         (*pos)++;
4812
4813         for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4814              ptr++) {
4815                 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4816                         continue;
4817
4818                 return ptr;
4819         }
4820
4821         return NULL;
4822 }
4823
4824 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4825 {
4826         void *v;
4827         loff_t l = 0;
4828
4829         preempt_disable();
4830         arch_spin_lock(&trace_cmdline_lock);
4831
4832         v = &savedcmd->map_cmdline_to_pid[0];
4833         while (l <= *pos) {
4834                 v = saved_cmdlines_next(m, v, &l);
4835                 if (!v)
4836                         return NULL;
4837         }
4838
4839         return v;
4840 }
4841
4842 static void saved_cmdlines_stop(struct seq_file *m, void *v)
4843 {
4844         arch_spin_unlock(&trace_cmdline_lock);
4845         preempt_enable();
4846 }
4847
4848 static int saved_cmdlines_show(struct seq_file *m, void *v)
4849 {
4850         char buf[TASK_COMM_LEN];
4851         unsigned int *pid = v;
4852
4853         __trace_find_cmdline(*pid, buf);
4854         seq_printf(m, "%d %s\n", *pid, buf);
4855         return 0;
4856 }
4857
4858 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4859         .start          = saved_cmdlines_start,
4860         .next           = saved_cmdlines_next,
4861         .stop           = saved_cmdlines_stop,
4862         .show           = saved_cmdlines_show,
4863 };
4864
4865 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4866 {
4867         if (tracing_disabled)
4868                 return -ENODEV;
4869
4870         return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
4871 }
4872
4873 static const struct file_operations tracing_saved_cmdlines_fops = {
4874         .open           = tracing_saved_cmdlines_open,
4875         .read           = seq_read,
4876         .llseek         = seq_lseek,
4877         .release        = seq_release,
4878 };
4879
4880 static ssize_t
4881 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4882                                  size_t cnt, loff_t *ppos)
4883 {
4884         char buf[64];
4885         int r;
4886
4887         arch_spin_lock(&trace_cmdline_lock);
4888         r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
4889         arch_spin_unlock(&trace_cmdline_lock);
4890
4891         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4892 }
4893
4894 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4895 {
4896         kfree(s->saved_cmdlines);
4897         kfree(s->map_cmdline_to_pid);
4898         kfree(s);
4899 }
4900
4901 static int tracing_resize_saved_cmdlines(unsigned int val)
4902 {
4903         struct saved_cmdlines_buffer *s, *savedcmd_temp;
4904
4905         s = kmalloc(sizeof(*s), GFP_KERNEL);
4906         if (!s)
4907                 return -ENOMEM;
4908
4909         if (allocate_cmdlines_buffer(val, s) < 0) {
4910                 kfree(s);
4911                 return -ENOMEM;
4912         }
4913
4914         arch_spin_lock(&trace_cmdline_lock);
4915         savedcmd_temp = savedcmd;
4916         savedcmd = s;
4917         arch_spin_unlock(&trace_cmdline_lock);
4918         free_saved_cmdlines_buffer(savedcmd_temp);
4919
4920         return 0;
4921 }
4922
4923 static ssize_t
4924 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4925                                   size_t cnt, loff_t *ppos)
4926 {
4927         unsigned long val;
4928         int ret;
4929
4930         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4931         if (ret)
4932                 return ret;
4933
4934         /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4935         if (!val || val > PID_MAX_DEFAULT)
4936                 return -EINVAL;
4937
4938         ret = tracing_resize_saved_cmdlines((unsigned int)val);
4939         if (ret < 0)
4940                 return ret;
4941
4942         *ppos += cnt;
4943
4944         return cnt;
4945 }
4946
4947 static const struct file_operations tracing_saved_cmdlines_size_fops = {
4948         .open           = tracing_open_generic,
4949         .read           = tracing_saved_cmdlines_size_read,
4950         .write          = tracing_saved_cmdlines_size_write,
4951 };
4952
4953 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
4954 static union trace_eval_map_item *
4955 update_eval_map(union trace_eval_map_item *ptr)
4956 {
4957         if (!ptr->map.eval_string) {
4958                 if (ptr->tail.next) {
4959                         ptr = ptr->tail.next;
4960                         /* Set ptr to the next real item (skip head) */
4961                         ptr++;
4962                 } else
4963                         return NULL;
4964         }
4965         return ptr;
4966 }
4967
4968 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
4969 {
4970         union trace_eval_map_item *ptr = v;
4971
4972         /*
4973          * Paranoid! If ptr points to end, we don't want to increment past it.
4974          * This really should never happen.
4975          */
4976         ptr = update_eval_map(ptr);
4977         if (WARN_ON_ONCE(!ptr))
4978                 return NULL;
4979
4980         ptr++;
4981
4982         (*pos)++;
4983
4984         ptr = update_eval_map(ptr);
4985
4986         return ptr;
4987 }
4988
4989 static void *eval_map_start(struct seq_file *m, loff_t *pos)
4990 {
4991         union trace_eval_map_item *v;
4992         loff_t l = 0;
4993
4994         mutex_lock(&trace_eval_mutex);
4995
4996         v = trace_eval_maps;
4997         if (v)
4998                 v++;
4999
5000         while (v && l < *pos) {
5001                 v = eval_map_next(m, v, &l);
5002         }
5003
5004         return v;
5005 }
5006
5007 static void eval_map_stop(struct seq_file *m, void *v)
5008 {
5009         mutex_unlock(&trace_eval_mutex);
5010 }
5011
5012 static int eval_map_show(struct seq_file *m, void *v)
5013 {
5014         union trace_eval_map_item *ptr = v;
5015
5016         seq_printf(m, "%s %ld (%s)\n",
5017                    ptr->map.eval_string, ptr->map.eval_value,
5018                    ptr->map.system);
5019
5020         return 0;
5021 }
5022
5023 static const struct seq_operations tracing_eval_map_seq_ops = {
5024         .start          = eval_map_start,
5025         .next           = eval_map_next,
5026         .stop           = eval_map_stop,
5027         .show           = eval_map_show,
5028 };
5029
5030 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5031 {
5032         if (tracing_disabled)
5033                 return -ENODEV;
5034
5035         return seq_open(filp, &tracing_eval_map_seq_ops);
5036 }
5037
5038 static const struct file_operations tracing_eval_map_fops = {
5039         .open           = tracing_eval_map_open,
5040         .read           = seq_read,
5041         .llseek         = seq_lseek,
5042         .release        = seq_release,
5043 };
5044
5045 static inline union trace_eval_map_item *
5046 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5047 {
5048         /* Return tail of array given the head */
5049         return ptr + ptr->head.length + 1;
5050 }
5051
5052 static void
5053 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5054                            int len)
5055 {
5056         struct trace_eval_map **stop;
5057         struct trace_eval_map **map;
5058         union trace_eval_map_item *map_array;
5059         union trace_eval_map_item *ptr;
5060
5061         stop = start + len;
5062
5063         /*
5064          * The trace_eval_maps contains the map plus a head and tail item,
5065          * where the head holds the module and length of array, and the
5066          * tail holds a pointer to the next list.
5067          */
5068         map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
5069         if (!map_array) {
5070                 pr_warn("Unable to allocate trace eval mapping\n");
5071                 return;
5072         }
5073
5074         mutex_lock(&trace_eval_mutex);
5075
5076         if (!trace_eval_maps)
5077                 trace_eval_maps = map_array;
5078         else {
5079                 ptr = trace_eval_maps;
5080                 for (;;) {
5081                         ptr = trace_eval_jmp_to_tail(ptr);
5082                         if (!ptr->tail.next)
5083                                 break;
5084                         ptr = ptr->tail.next;
5085
5086                 }
5087                 ptr->tail.next = map_array;
5088         }
5089         map_array->head.mod = mod;
5090         map_array->head.length = len;
5091         map_array++;
5092
5093         for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5094                 map_array->map = **map;
5095                 map_array++;
5096         }
5097         memset(map_array, 0, sizeof(*map_array));
5098
5099         mutex_unlock(&trace_eval_mutex);
5100 }
5101
5102 static void trace_create_eval_file(struct dentry *d_tracer)
5103 {
5104         trace_create_file("eval_map", 0444, d_tracer,
5105                           NULL, &tracing_eval_map_fops);
5106 }
5107
5108 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5109 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5110 static inline void trace_insert_eval_map_file(struct module *mod,
5111                               struct trace_eval_map **start, int len) { }
5112 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5113
5114 static void trace_insert_eval_map(struct module *mod,
5115                                   struct trace_eval_map **start, int len)
5116 {
5117         struct trace_eval_map **map;
5118
5119         if (len <= 0)
5120                 return;
5121
5122         map = start;
5123
5124         trace_event_eval_update(map, len);
5125
5126         trace_insert_eval_map_file(mod, start, len);
5127 }
5128
5129 static ssize_t
5130 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5131                        size_t cnt, loff_t *ppos)
5132 {
5133         struct trace_array *tr = filp->private_data;
5134         char buf[MAX_TRACER_SIZE+2];
5135         int r;
5136
5137         mutex_lock(&trace_types_lock);
5138         r = sprintf(buf, "%s\n", tr->current_trace->name);
5139         mutex_unlock(&trace_types_lock);
5140
5141         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5142 }
5143
5144 int tracer_init(struct tracer *t, struct trace_array *tr)
5145 {
5146         tracing_reset_online_cpus(&tr->trace_buffer);
5147         return t->init(tr);
5148 }
5149
5150 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5151 {
5152         int cpu;
5153
5154         for_each_tracing_cpu(cpu)
5155                 per_cpu_ptr(buf->data, cpu)->entries = val;
5156 }
5157
5158 #ifdef CONFIG_TRACER_MAX_TRACE
5159 /* resize @tr's buffer to the size of @size_tr's entries */
5160 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5161                                         struct trace_buffer *size_buf, int cpu_id)
5162 {
5163         int cpu, ret = 0;
5164
5165         if (cpu_id == RING_BUFFER_ALL_CPUS) {
5166                 for_each_tracing_cpu(cpu) {
5167                         ret = ring_buffer_resize(trace_buf->buffer,
5168                                  per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5169                         if (ret < 0)
5170                                 break;
5171                         per_cpu_ptr(trace_buf->data, cpu)->entries =
5172                                 per_cpu_ptr(size_buf->data, cpu)->entries;
5173                 }
5174         } else {
5175                 ret = ring_buffer_resize(trace_buf->buffer,
5176                                  per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5177                 if (ret == 0)
5178                         per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5179                                 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5180         }
5181
5182         return ret;
5183 }
5184 #endif /* CONFIG_TRACER_MAX_TRACE */
5185
5186 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5187                                         unsigned long size, int cpu)
5188 {
5189         int ret;
5190
5191         /*
5192          * If kernel or user changes the size of the ring buffer
5193          * we use the size that was given, and we can forget about
5194          * expanding it later.
5195          */
5196         ring_buffer_expanded = true;
5197
5198         /* May be called before buffers are initialized */
5199         if (!tr->trace_buffer.buffer)
5200                 return 0;
5201
5202         ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5203         if (ret < 0)
5204                 return ret;
5205
5206 #ifdef CONFIG_TRACER_MAX_TRACE
5207         if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5208             !tr->current_trace->use_max_tr)
5209                 goto out;
5210
5211         ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5212         if (ret < 0) {
5213                 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5214                                                      &tr->trace_buffer, cpu);
5215                 if (r < 0) {
5216                         /*
5217                          * AARGH! We are left with different
5218                          * size max buffer!!!!
5219                          * The max buffer is our "snapshot" buffer.
5220                          * When a tracer needs a snapshot (one of the
5221                          * latency tracers), it swaps the max buffer
5222                          * with the saved snap shot. We succeeded to
5223                          * update the size of the main buffer, but failed to
5224                          * update the size of the max buffer. But when we tried
5225                          * to reset the main buffer to the original size, we
5226                          * failed there too. This is very unlikely to
5227                          * happen, but if it does, warn and kill all
5228                          * tracing.
5229                          */
5230                         WARN_ON(1);
5231                         tracing_disabled = 1;
5232                 }
5233                 return ret;
5234         }
5235
5236         if (cpu == RING_BUFFER_ALL_CPUS)
5237                 set_buffer_entries(&tr->max_buffer, size);
5238         else
5239                 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5240
5241  out:
5242 #endif /* CONFIG_TRACER_MAX_TRACE */
5243
5244         if (cpu == RING_BUFFER_ALL_CPUS)
5245                 set_buffer_entries(&tr->trace_buffer, size);
5246         else
5247                 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5248
5249         return ret;
5250 }
5251
5252 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5253                                           unsigned long size, int cpu_id)
5254 {
5255         int ret = size;
5256
5257         mutex_lock(&trace_types_lock);
5258
5259         if (cpu_id != RING_BUFFER_ALL_CPUS) {
5260                 /* make sure, this cpu is enabled in the mask */
5261                 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5262                         ret = -EINVAL;
5263                         goto out;
5264                 }
5265         }
5266
5267         ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5268         if (ret < 0)
5269                 ret = -ENOMEM;
5270
5271 out:
5272         mutex_unlock(&trace_types_lock);
5273
5274         return ret;
5275 }
5276
5277
5278 /**
5279  * tracing_update_buffers - used by tracing facility to expand ring buffers
5280  *
5281  * To save on memory when the tracing is never used on a system with it
5282  * configured in. The ring buffers are set to a minimum size. But once
5283  * a user starts to use the tracing facility, then they need to grow
5284  * to their default size.
5285  *
5286  * This function is to be called when a tracer is about to be used.
5287  */
5288 int tracing_update_buffers(void)
5289 {
5290         int ret = 0;
5291
5292         mutex_lock(&trace_types_lock);
5293         if (!ring_buffer_expanded)
5294                 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5295                                                 RING_BUFFER_ALL_CPUS);
5296         mutex_unlock(&trace_types_lock);
5297
5298         return ret;
5299 }
5300
5301 struct trace_option_dentry;
5302
5303 static void
5304 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5305
5306 /*
5307  * Used to clear out the tracer before deletion of an instance.
5308  * Must have trace_types_lock held.
5309  */
5310 static void tracing_set_nop(struct trace_array *tr)
5311 {
5312         if (tr->current_trace == &nop_trace)
5313                 return;
5314         
5315         tr->current_trace->enabled--;
5316
5317         if (tr->current_trace->reset)
5318                 tr->current_trace->reset(tr);
5319
5320         tr->current_trace = &nop_trace;
5321 }
5322
5323 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5324 {
5325         /* Only enable if the directory has been created already. */
5326         if (!tr->dir)
5327                 return;
5328
5329         create_trace_option_files(tr, t);
5330 }
5331
5332 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5333 {
5334         struct tracer *t;
5335 #ifdef CONFIG_TRACER_MAX_TRACE
5336         bool had_max_tr;
5337 #endif
5338         int ret = 0;
5339
5340         mutex_lock(&trace_types_lock);
5341
5342         if (!ring_buffer_expanded) {
5343                 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5344                                                 RING_BUFFER_ALL_CPUS);
5345                 if (ret < 0)
5346                         goto out;
5347                 ret = 0;
5348         }
5349
5350         for (t = trace_types; t; t = t->next) {
5351                 if (strcmp(t->name, buf) == 0)
5352                         break;
5353         }
5354         if (!t) {
5355                 ret = -EINVAL;
5356                 goto out;
5357         }
5358         if (t == tr->current_trace)
5359                 goto out;
5360
5361         /* Some tracers are only allowed for the top level buffer */
5362         if (!trace_ok_for_array(t, tr)) {
5363                 ret = -EINVAL;
5364                 goto out;
5365         }
5366
5367         /* If trace pipe files are being read, we can't change the tracer */
5368         if (tr->current_trace->ref) {
5369                 ret = -EBUSY;
5370                 goto out;
5371         }
5372
5373         trace_branch_disable();
5374
5375         tr->current_trace->enabled--;
5376
5377         if (tr->current_trace->reset)
5378                 tr->current_trace->reset(tr);
5379
5380         /* Current trace needs to be nop_trace before synchronize_sched */
5381         tr->current_trace = &nop_trace;
5382
5383 #ifdef CONFIG_TRACER_MAX_TRACE
5384         had_max_tr = tr->allocated_snapshot;
5385
5386         if (had_max_tr && !t->use_max_tr) {
5387                 /*
5388                  * We need to make sure that the update_max_tr sees that
5389                  * current_trace changed to nop_trace to keep it from
5390                  * swapping the buffers after we resize it.
5391                  * The update_max_tr is called from interrupts disabled
5392                  * so a synchronized_sched() is sufficient.
5393                  */
5394                 synchronize_sched();
5395                 free_snapshot(tr);
5396         }
5397 #endif
5398
5399 #ifdef CONFIG_TRACER_MAX_TRACE
5400         if (t->use_max_tr && !had_max_tr) {
5401                 ret = alloc_snapshot(tr);
5402                 if (ret < 0)
5403                         goto out;
5404         }
5405 #endif
5406
5407         if (t->init) {
5408                 ret = tracer_init(t, tr);
5409                 if (ret)
5410                         goto out;
5411         }
5412
5413         tr->current_trace = t;
5414         tr->current_trace->enabled++;
5415         trace_branch_enable(tr);
5416  out:
5417         mutex_unlock(&trace_types_lock);
5418
5419         return ret;
5420 }
5421
5422 static ssize_t
5423 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5424                         size_t cnt, loff_t *ppos)
5425 {
5426         struct trace_array *tr = filp->private_data;
5427         char buf[MAX_TRACER_SIZE+1];
5428         int i;
5429         size_t ret;
5430         int err;
5431
5432         ret = cnt;
5433
5434         if (cnt > MAX_TRACER_SIZE)
5435                 cnt = MAX_TRACER_SIZE;
5436
5437         if (copy_from_user(buf, ubuf, cnt))
5438                 return -EFAULT;
5439
5440         buf[cnt] = 0;
5441
5442         /* strip ending whitespace. */
5443         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5444                 buf[i] = 0;
5445
5446         err = tracing_set_tracer(tr, buf);
5447         if (err)
5448                 return err;
5449
5450         *ppos += ret;
5451
5452         return ret;
5453 }
5454
5455 static ssize_t
5456 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5457                    size_t cnt, loff_t *ppos)
5458 {
5459         char buf[64];
5460         int r;
5461
5462         r = snprintf(buf, sizeof(buf), "%ld\n",
5463                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5464         if (r > sizeof(buf))
5465                 r = sizeof(buf);
5466         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5467 }
5468
5469 static ssize_t
5470 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5471                     size_t cnt, loff_t *ppos)
5472 {
5473         unsigned long val;
5474         int ret;
5475
5476         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5477         if (ret)
5478                 return ret;
5479
5480         *ptr = val * 1000;
5481
5482         return cnt;
5483 }
5484
5485 static ssize_t
5486 tracing_thresh_read(struct file *filp, char __user *ubuf,
5487                     size_t cnt, loff_t *ppos)
5488 {
5489         return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5490 }
5491
5492 static ssize_t
5493 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5494                      size_t cnt, loff_t *ppos)
5495 {
5496         struct trace_array *tr = filp->private_data;
5497         int ret;
5498
5499         mutex_lock(&trace_types_lock);
5500         ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5501         if (ret < 0)
5502                 goto out;
5503
5504         if (tr->current_trace->update_thresh) {
5505                 ret = tr->current_trace->update_thresh(tr);
5506                 if (ret < 0)
5507                         goto out;
5508         }
5509
5510         ret = cnt;
5511 out:
5512         mutex_unlock(&trace_types_lock);
5513
5514         return ret;
5515 }
5516
5517 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5518
5519 static ssize_t
5520 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5521                      size_t cnt, loff_t *ppos)
5522 {
5523         return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5524 }
5525
5526 static ssize_t
5527 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5528                       size_t cnt, loff_t *ppos)
5529 {
5530         return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5531 }
5532
5533 #endif
5534
5535 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5536 {
5537         struct trace_array *tr = inode->i_private;
5538         struct trace_iterator *iter;
5539         int ret = 0;
5540
5541         if (tracing_disabled)
5542                 return -ENODEV;
5543
5544         if (trace_array_get(tr) < 0)
5545                 return -ENODEV;
5546
5547         mutex_lock(&trace_types_lock);
5548
5549         /* create a buffer to store the information to pass to userspace */
5550         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5551         if (!iter) {
5552                 ret = -ENOMEM;
5553                 __trace_array_put(tr);
5554                 goto out;
5555         }
5556
5557         trace_seq_init(&iter->seq);
5558         iter->trace = tr->current_trace;
5559
5560         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5561                 ret = -ENOMEM;
5562                 goto fail;
5563         }
5564
5565         /* trace pipe does not show start of buffer */
5566         cpumask_setall(iter->started);
5567
5568         if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5569                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5570
5571         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5572         if (trace_clocks[tr->clock_id].in_ns)
5573                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5574
5575         iter->tr = tr;
5576         iter->trace_buffer = &tr->trace_buffer;
5577         iter->cpu_file = tracing_get_cpu(inode);
5578         mutex_init(&iter->mutex);
5579         filp->private_data = iter;
5580
5581         if (iter->trace->pipe_open)
5582                 iter->trace->pipe_open(iter);
5583
5584         nonseekable_open(inode, filp);
5585
5586         tr->current_trace->ref++;
5587 out:
5588         mutex_unlock(&trace_types_lock);
5589         return ret;
5590
5591 fail:
5592         kfree(iter->trace);
5593         kfree(iter);
5594         __trace_array_put(tr);
5595         mutex_unlock(&trace_types_lock);
5596         return ret;
5597 }
5598
5599 static int tracing_release_pipe(struct inode *inode, struct file *file)
5600 {
5601         struct trace_iterator *iter = file->private_data;
5602         struct trace_array *tr = inode->i_private;
5603
5604         mutex_lock(&trace_types_lock);
5605
5606         tr->current_trace->ref--;
5607
5608         if (iter->trace->pipe_close)
5609                 iter->trace->pipe_close(iter);
5610
5611         mutex_unlock(&trace_types_lock);
5612
5613         free_cpumask_var(iter->started);
5614         mutex_destroy(&iter->mutex);
5615         kfree(iter);
5616
5617         trace_array_put(tr);
5618
5619         return 0;
5620 }
5621
5622 static unsigned int
5623 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5624 {
5625         struct trace_array *tr = iter->tr;
5626
5627         /* Iterators are static, they should be filled or empty */
5628         if (trace_buffer_iter(iter, iter->cpu_file))
5629                 return POLLIN | POLLRDNORM;
5630
5631         if (tr->trace_flags & TRACE_ITER_BLOCK)
5632                 /*
5633                  * Always select as readable when in blocking mode
5634                  */
5635                 return POLLIN | POLLRDNORM;
5636         else
5637                 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5638                                              filp, poll_table);
5639 }
5640
5641 static unsigned int
5642 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5643 {
5644         struct trace_iterator *iter = filp->private_data;
5645
5646         return trace_poll(iter, filp, poll_table);
5647 }
5648
5649 /* Must be called with iter->mutex held. */
5650 static int tracing_wait_pipe(struct file *filp)
5651 {
5652         struct trace_iterator *iter = filp->private_data;
5653         int ret;
5654
5655         while (trace_empty(iter)) {
5656
5657                 if ((filp->f_flags & O_NONBLOCK)) {
5658                         return -EAGAIN;
5659                 }
5660
5661                 /*
5662                  * We block until we read something and tracing is disabled.
5663                  * We still block if tracing is disabled, but we have never
5664                  * read anything. This allows a user to cat this file, and
5665                  * then enable tracing. But after we have read something,
5666                  * we give an EOF when tracing is again disabled.
5667                  *
5668                  * iter->pos will be 0 if we haven't read anything.
5669                  */
5670                 if (!tracing_is_on() && iter->pos)
5671                         break;
5672
5673                 mutex_unlock(&iter->mutex);
5674
5675                 ret = wait_on_pipe(iter, false);
5676
5677                 mutex_lock(&iter->mutex);
5678
5679                 if (ret)
5680                         return ret;
5681         }
5682
5683         return 1;
5684 }
5685
5686 /*
5687  * Consumer reader.
5688  */
5689 static ssize_t
5690 tracing_read_pipe(struct file *filp, char __user *ubuf,
5691                   size_t cnt, loff_t *ppos)
5692 {
5693         struct trace_iterator *iter = filp->private_data;
5694         ssize_t sret;
5695
5696         /*
5697          * Avoid more than one consumer on a single file descriptor
5698          * This is just a matter of traces coherency, the ring buffer itself
5699          * is protected.
5700          */
5701         mutex_lock(&iter->mutex);
5702
5703         /* return any leftover data */
5704         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5705         if (sret != -EBUSY)
5706                 goto out;
5707
5708         trace_seq_init(&iter->seq);
5709
5710         if (iter->trace->read) {
5711                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5712                 if (sret)
5713                         goto out;
5714         }
5715
5716 waitagain:
5717         sret = tracing_wait_pipe(filp);
5718         if (sret <= 0)
5719                 goto out;
5720
5721         /* stop when tracing is finished */
5722         if (trace_empty(iter)) {
5723                 sret = 0;
5724                 goto out;
5725         }
5726
5727         if (cnt >= PAGE_SIZE)
5728                 cnt = PAGE_SIZE - 1;
5729
5730         /* reset all but tr, trace, and overruns */
5731         memset(&iter->seq, 0,
5732                sizeof(struct trace_iterator) -
5733                offsetof(struct trace_iterator, seq));
5734         cpumask_clear(iter->started);
5735         iter->pos = -1;
5736
5737         trace_event_read_lock();
5738         trace_access_lock(iter->cpu_file);
5739         while (trace_find_next_entry_inc(iter) != NULL) {
5740                 enum print_line_t ret;
5741                 int save_len = iter->seq.seq.len;
5742
5743                 ret = print_trace_line(iter);
5744                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5745                         /* don't print partial lines */
5746                         iter->seq.seq.len = save_len;
5747                         break;
5748                 }
5749                 if (ret != TRACE_TYPE_NO_CONSUME)
5750                         trace_consume(iter);
5751
5752                 if (trace_seq_used(&iter->seq) >= cnt)
5753                         break;
5754
5755                 /*
5756                  * Setting the full flag means we reached the trace_seq buffer
5757                  * size and we should leave by partial output condition above.
5758                  * One of the trace_seq_* functions is not used properly.
5759                  */
5760                 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5761                           iter->ent->type);
5762         }
5763         trace_access_unlock(iter->cpu_file);
5764         trace_event_read_unlock();
5765
5766         /* Now copy what we have to the user */
5767         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5768         if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
5769                 trace_seq_init(&iter->seq);
5770
5771         /*
5772          * If there was nothing to send to user, in spite of consuming trace
5773          * entries, go back to wait for more entries.
5774          */
5775         if (sret == -EBUSY)
5776                 goto waitagain;
5777
5778 out:
5779         mutex_unlock(&iter->mutex);
5780
5781         return sret;
5782 }
5783
5784 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5785                                      unsigned int idx)
5786 {
5787         __free_page(spd->pages[idx]);
5788 }
5789
5790 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
5791         .can_merge              = 0,
5792         .confirm                = generic_pipe_buf_confirm,
5793         .release                = generic_pipe_buf_release,
5794         .steal                  = generic_pipe_buf_steal,
5795         .get                    = generic_pipe_buf_get,
5796 };
5797
5798 static size_t
5799 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
5800 {
5801         size_t count;
5802         int save_len;
5803         int ret;
5804
5805         /* Seq buffer is page-sized, exactly what we need. */
5806         for (;;) {
5807                 save_len = iter->seq.seq.len;
5808                 ret = print_trace_line(iter);
5809
5810                 if (trace_seq_has_overflowed(&iter->seq)) {
5811                         iter->seq.seq.len = save_len;
5812                         break;
5813                 }
5814
5815                 /*
5816                  * This should not be hit, because it should only
5817                  * be set if the iter->seq overflowed. But check it
5818                  * anyway to be safe.
5819                  */
5820                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5821                         iter->seq.seq.len = save_len;
5822                         break;
5823                 }
5824
5825                 count = trace_seq_used(&iter->seq) - save_len;
5826                 if (rem < count) {
5827                         rem = 0;
5828                         iter->seq.seq.len = save_len;
5829                         break;
5830                 }
5831
5832                 if (ret != TRACE_TYPE_NO_CONSUME)
5833                         trace_consume(iter);
5834                 rem -= count;
5835                 if (!trace_find_next_entry_inc(iter))   {
5836                         rem = 0;
5837                         iter->ent = NULL;
5838                         break;
5839                 }
5840         }
5841
5842         return rem;
5843 }
5844
5845 static ssize_t tracing_splice_read_pipe(struct file *filp,
5846                                         loff_t *ppos,
5847                                         struct pipe_inode_info *pipe,
5848                                         size_t len,
5849                                         unsigned int flags)
5850 {
5851         struct page *pages_def[PIPE_DEF_BUFFERS];
5852         struct partial_page partial_def[PIPE_DEF_BUFFERS];
5853         struct trace_iterator *iter = filp->private_data;
5854         struct splice_pipe_desc spd = {
5855                 .pages          = pages_def,
5856                 .partial        = partial_def,
5857                 .nr_pages       = 0, /* This gets updated below. */
5858                 .nr_pages_max   = PIPE_DEF_BUFFERS,
5859                 .ops            = &tracing_pipe_buf_ops,
5860                 .spd_release    = tracing_spd_release_pipe,
5861         };
5862         ssize_t ret;
5863         size_t rem;
5864         unsigned int i;
5865
5866         if (splice_grow_spd(pipe, &spd))
5867                 return -ENOMEM;
5868
5869         mutex_lock(&iter->mutex);
5870
5871         if (iter->trace->splice_read) {
5872                 ret = iter->trace->splice_read(iter, filp,
5873                                                ppos, pipe, len, flags);
5874                 if (ret)
5875                         goto out_err;
5876         }
5877
5878         ret = tracing_wait_pipe(filp);
5879         if (ret <= 0)
5880                 goto out_err;
5881
5882         if (!iter->ent && !trace_find_next_entry_inc(iter)) {
5883                 ret = -EFAULT;
5884                 goto out_err;
5885         }
5886
5887         trace_event_read_lock();
5888         trace_access_lock(iter->cpu_file);
5889
5890         /* Fill as many pages as possible. */
5891         for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
5892                 spd.pages[i] = alloc_page(GFP_KERNEL);
5893                 if (!spd.pages[i])
5894                         break;
5895
5896                 rem = tracing_fill_pipe_page(rem, iter);
5897
5898                 /* Copy the data into the page, so we can start over. */
5899                 ret = trace_seq_to_buffer(&iter->seq,
5900                                           page_address(spd.pages[i]),
5901                                           trace_seq_used(&iter->seq));
5902                 if (ret < 0) {
5903                         __free_page(spd.pages[i]);
5904                         break;
5905                 }
5906                 spd.partial[i].offset = 0;
5907                 spd.partial[i].len = trace_seq_used(&iter->seq);
5908
5909                 trace_seq_init(&iter->seq);
5910         }
5911
5912         trace_access_unlock(iter->cpu_file);
5913         trace_event_read_unlock();
5914         mutex_unlock(&iter->mutex);
5915
5916         spd.nr_pages = i;
5917
5918         if (i)
5919                 ret = splice_to_pipe(pipe, &spd);
5920         else
5921                 ret = 0;
5922 out:
5923         splice_shrink_spd(&spd);
5924         return ret;
5925
5926 out_err:
5927         mutex_unlock(&iter->mutex);
5928         goto out;
5929 }
5930
5931 static ssize_t
5932 tracing_entries_read(struct file *filp, char __user *ubuf,
5933                      size_t cnt, loff_t *ppos)
5934 {
5935         struct inode *inode = file_inode(filp);
5936         struct trace_array *tr = inode->i_private;
5937         int cpu = tracing_get_cpu(inode);
5938         char buf[64];
5939         int r = 0;
5940         ssize_t ret;
5941
5942         mutex_lock(&trace_types_lock);
5943
5944         if (cpu == RING_BUFFER_ALL_CPUS) {
5945                 int cpu, buf_size_same;
5946                 unsigned long size;
5947
5948                 size = 0;
5949                 buf_size_same = 1;
5950                 /* check if all cpu sizes are same */
5951                 for_each_tracing_cpu(cpu) {
5952                         /* fill in the size from first enabled cpu */
5953                         if (size == 0)
5954                                 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5955                         if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
5956                                 buf_size_same = 0;
5957                                 break;
5958                         }
5959                 }
5960
5961                 if (buf_size_same) {
5962                         if (!ring_buffer_expanded)
5963                                 r = sprintf(buf, "%lu (expanded: %lu)\n",
5964                                             size >> 10,
5965                                             trace_buf_size >> 10);
5966                         else
5967                                 r = sprintf(buf, "%lu\n", size >> 10);
5968                 } else
5969                         r = sprintf(buf, "X\n");
5970         } else
5971                 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5972
5973         mutex_unlock(&trace_types_lock);
5974
5975         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5976         return ret;
5977 }
5978
5979 static ssize_t
5980 tracing_entries_write(struct file *filp, const char __user *ubuf,
5981                       size_t cnt, loff_t *ppos)
5982 {
5983         struct inode *inode = file_inode(filp);
5984         struct trace_array *tr = inode->i_private;
5985         unsigned long val;
5986         int ret;
5987
5988         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5989         if (ret)
5990                 return ret;
5991
5992         /* must have at least 1 entry */
5993         if (!val)
5994                 return -EINVAL;
5995
5996         /* value is in KB */
5997         val <<= 10;
5998         ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5999         if (ret < 0)
6000                 return ret;
6001
6002         *ppos += cnt;
6003
6004         return cnt;
6005 }
6006
6007 static ssize_t
6008 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6009                                 size_t cnt, loff_t *ppos)
6010 {
6011         struct trace_array *tr = filp->private_data;
6012         char buf[64];
6013         int r, cpu;
6014         unsigned long size = 0, expanded_size = 0;
6015
6016         mutex_lock(&trace_types_lock);
6017         for_each_tracing_cpu(cpu) {
6018                 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6019                 if (!ring_buffer_expanded)
6020                         expanded_size += trace_buf_size >> 10;
6021         }
6022         if (ring_buffer_expanded)
6023                 r = sprintf(buf, "%lu\n", size);
6024         else
6025                 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6026         mutex_unlock(&trace_types_lock);
6027
6028         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6029 }
6030
6031 static ssize_t
6032 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6033                           size_t cnt, loff_t *ppos)
6034 {
6035         /*
6036          * There is no need to read what the user has written, this function
6037          * is just to make sure that there is no error when "echo" is used
6038          */
6039
6040         *ppos += cnt;
6041
6042         return cnt;
6043 }
6044
6045 static int
6046 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6047 {
6048         struct trace_array *tr = inode->i_private;
6049
6050         /* disable tracing ? */
6051         if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6052                 tracer_tracing_off(tr);
6053         /* resize the ring buffer to 0 */
6054         tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6055
6056         trace_array_put(tr);
6057
6058         return 0;
6059 }
6060
6061 static ssize_t
6062 tracing_mark_write(struct file *filp, const char __user *ubuf,
6063                                         size_t cnt, loff_t *fpos)
6064 {
6065         struct trace_array *tr = filp->private_data;
6066         struct ring_buffer_event *event;
6067         struct ring_buffer *buffer;
6068         struct print_entry *entry;
6069         unsigned long irq_flags;
6070         const char faulted[] = "<faulted>";
6071         ssize_t written;
6072         int size;
6073         int len;
6074
6075 /* Used in tracing_mark_raw_write() as well */
6076 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6077
6078         if (tracing_disabled)
6079                 return -EINVAL;
6080
6081         if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6082                 return -EINVAL;
6083
6084         if (cnt > TRACE_BUF_SIZE)
6085                 cnt = TRACE_BUF_SIZE;
6086
6087         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6088
6089         local_save_flags(irq_flags);
6090         size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6091
6092         /* If less than "<faulted>", then make sure we can still add that */
6093         if (cnt < FAULTED_SIZE)
6094                 size += FAULTED_SIZE - cnt;
6095
6096         buffer = tr->trace_buffer.buffer;
6097         event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6098                                             irq_flags, preempt_count());
6099         if (unlikely(!event))
6100                 /* Ring buffer disabled, return as if not open for write */
6101                 return -EBADF;
6102
6103         entry = ring_buffer_event_data(event);
6104         entry->ip = _THIS_IP_;
6105
6106         len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6107         if (len) {
6108                 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6109                 cnt = FAULTED_SIZE;
6110                 written = -EFAULT;
6111         } else
6112                 written = cnt;
6113         len = cnt;
6114
6115         if (entry->buf[cnt - 1] != '\n') {
6116                 entry->buf[cnt] = '\n';
6117                 entry->buf[cnt + 1] = '\0';
6118         } else
6119                 entry->buf[cnt] = '\0';
6120
6121         __buffer_unlock_commit(buffer, event);
6122
6123         if (written > 0)
6124                 *fpos += written;
6125
6126         return written;
6127 }
6128
6129 /* Limit it for now to 3K (including tag) */
6130 #define RAW_DATA_MAX_SIZE (1024*3)
6131
6132 static ssize_t
6133 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6134                                         size_t cnt, loff_t *fpos)
6135 {
6136         struct trace_array *tr = filp->private_data;
6137         struct ring_buffer_event *event;
6138         struct ring_buffer *buffer;
6139         struct raw_data_entry *entry;
6140         const char faulted[] = "<faulted>";
6141         unsigned long irq_flags;
6142         ssize_t written;
6143         int size;
6144         int len;
6145
6146 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6147
6148         if (tracing_disabled)
6149                 return -EINVAL;
6150
6151         if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6152                 return -EINVAL;
6153
6154         /* The marker must at least have a tag id */
6155         if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6156                 return -EINVAL;
6157
6158         if (cnt > TRACE_BUF_SIZE)
6159                 cnt = TRACE_BUF_SIZE;
6160
6161         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6162
6163         local_save_flags(irq_flags);
6164         size = sizeof(*entry) + cnt;
6165         if (cnt < FAULT_SIZE_ID)
6166                 size += FAULT_SIZE_ID - cnt;
6167
6168         buffer = tr->trace_buffer.buffer;
6169         event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6170                                             irq_flags, preempt_count());
6171         if (!event)
6172                 /* Ring buffer disabled, return as if not open for write */
6173                 return -EBADF;
6174
6175         entry = ring_buffer_event_data(event);
6176
6177         len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6178         if (len) {
6179                 entry->id = -1;
6180                 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6181                 written = -EFAULT;
6182         } else
6183                 written = cnt;
6184
6185         __buffer_unlock_commit(buffer, event);
6186
6187         if (written > 0)
6188                 *fpos += written;
6189
6190         return written;
6191 }
6192
6193 static int tracing_clock_show(struct seq_file *m, void *v)
6194 {
6195         struct trace_array *tr = m->private;
6196         int i;
6197
6198         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6199                 seq_printf(m,
6200                         "%s%s%s%s", i ? " " : "",
6201                         i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6202                         i == tr->clock_id ? "]" : "");
6203         seq_putc(m, '\n');
6204
6205         return 0;
6206 }
6207
6208 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6209 {
6210         int i;
6211
6212         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6213                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6214                         break;
6215         }
6216         if (i == ARRAY_SIZE(trace_clocks))
6217                 return -EINVAL;
6218
6219         mutex_lock(&trace_types_lock);
6220
6221         tr->clock_id = i;
6222
6223         ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6224
6225         /*
6226          * New clock may not be consistent with the previous clock.
6227          * Reset the buffer so that it doesn't have incomparable timestamps.
6228          */
6229         tracing_reset_online_cpus(&tr->trace_buffer);
6230
6231 #ifdef CONFIG_TRACER_MAX_TRACE
6232         if (tr->max_buffer.buffer)
6233                 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6234         tracing_reset_online_cpus(&tr->max_buffer);
6235 #endif
6236
6237         mutex_unlock(&trace_types_lock);
6238
6239         return 0;
6240 }
6241
6242 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6243                                    size_t cnt, loff_t *fpos)
6244 {
6245         struct seq_file *m = filp->private_data;
6246         struct trace_array *tr = m->private;
6247         char buf[64];
6248         const char *clockstr;
6249         int ret;
6250
6251         if (cnt >= sizeof(buf))
6252                 return -EINVAL;
6253
6254         if (copy_from_user(buf, ubuf, cnt))
6255                 return -EFAULT;
6256
6257         buf[cnt] = 0;
6258
6259         clockstr = strstrip(buf);
6260
6261         ret = tracing_set_clock(tr, clockstr);
6262         if (ret)
6263                 return ret;
6264
6265         *fpos += cnt;
6266
6267         return cnt;
6268 }
6269
6270 static int tracing_clock_open(struct inode *inode, struct file *file)
6271 {
6272         struct trace_array *tr = inode->i_private;
6273         int ret;
6274
6275         if (tracing_disabled)
6276                 return -ENODEV;
6277
6278         if (trace_array_get(tr))
6279                 return -ENODEV;
6280
6281         ret = single_open(file, tracing_clock_show, inode->i_private);
6282         if (ret < 0)
6283                 trace_array_put(tr);
6284
6285         return ret;
6286 }
6287
6288 struct ftrace_buffer_info {
6289         struct trace_iterator   iter;
6290         void                    *spare;
6291         unsigned int            spare_cpu;
6292         unsigned int            read;
6293 };
6294
6295 #ifdef CONFIG_TRACER_SNAPSHOT
6296 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6297 {
6298         struct trace_array *tr = inode->i_private;
6299         struct trace_iterator *iter;
6300         struct seq_file *m;
6301         int ret = 0;
6302
6303         if (trace_array_get(tr) < 0)
6304                 return -ENODEV;
6305
6306         if (file->f_mode & FMODE_READ) {
6307                 iter = __tracing_open(inode, file, true);
6308                 if (IS_ERR(iter))
6309                         ret = PTR_ERR(iter);
6310         } else {
6311                 /* Writes still need the seq_file to hold the private data */
6312                 ret = -ENOMEM;
6313                 m = kzalloc(sizeof(*m), GFP_KERNEL);
6314                 if (!m)
6315                         goto out;
6316                 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6317                 if (!iter) {
6318                         kfree(m);
6319                         goto out;
6320                 }
6321                 ret = 0;
6322
6323                 iter->tr = tr;
6324                 iter->trace_buffer = &tr->max_buffer;
6325                 iter->cpu_file = tracing_get_cpu(inode);
6326                 m->private = iter;
6327                 file->private_data = m;
6328         }
6329 out:
6330         if (ret < 0)
6331                 trace_array_put(tr);
6332
6333         return ret;
6334 }
6335
6336 static ssize_t
6337 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6338                        loff_t *ppos)
6339 {
6340         struct seq_file *m = filp->private_data;
6341         struct trace_iterator *iter = m->private;
6342         struct trace_array *tr = iter->tr;
6343         unsigned long val;
6344         int ret;
6345
6346         ret = tracing_update_buffers();
6347         if (ret < 0)
6348                 return ret;
6349
6350         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6351         if (ret)
6352                 return ret;
6353
6354         mutex_lock(&trace_types_lock);
6355
6356         if (tr->current_trace->use_max_tr) {
6357                 ret = -EBUSY;
6358                 goto out;
6359         }
6360
6361         switch (val) {
6362         case 0:
6363                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6364                         ret = -EINVAL;
6365                         break;
6366                 }
6367                 if (tr->allocated_snapshot)
6368                         free_snapshot(tr);
6369                 break;
6370         case 1:
6371 /* Only allow per-cpu swap if the ring buffer supports it */
6372 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6373                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6374                         ret = -EINVAL;
6375                         break;
6376                 }
6377 #endif
6378                 if (!tr->allocated_snapshot) {
6379                         ret = alloc_snapshot(tr);
6380                         if (ret < 0)
6381                                 break;
6382                 }
6383                 local_irq_disable();
6384                 /* Now, we're going to swap */
6385                 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6386                         update_max_tr(tr, current, smp_processor_id());
6387                 else
6388                         update_max_tr_single(tr, current, iter->cpu_file);
6389                 local_irq_enable();
6390                 break;
6391         default:
6392                 if (tr->allocated_snapshot) {
6393                         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6394                                 tracing_reset_online_cpus(&tr->max_buffer);
6395                         else
6396                                 tracing_reset(&tr->max_buffer, iter->cpu_file);
6397                 }
6398                 break;
6399         }
6400
6401         if (ret >= 0) {
6402                 *ppos += cnt;
6403                 ret = cnt;
6404         }
6405 out:
6406         mutex_unlock(&trace_types_lock);
6407         return ret;
6408 }
6409
6410 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6411 {
6412         struct seq_file *m = file->private_data;
6413         int ret;
6414
6415         ret = tracing_release(inode, file);
6416
6417         if (file->f_mode & FMODE_READ)
6418                 return ret;
6419
6420         /* If write only, the seq_file is just a stub */
6421         if (m)
6422                 kfree(m->private);
6423         kfree(m);
6424
6425         return 0;
6426 }
6427
6428 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6429 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6430                                     size_t count, loff_t *ppos);
6431 static int tracing_buffers_release(struct inode *inode, struct file *file);
6432 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6433                    struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6434
6435 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6436 {
6437         struct ftrace_buffer_info *info;
6438         int ret;
6439
6440         ret = tracing_buffers_open(inode, filp);
6441         if (ret < 0)
6442                 return ret;
6443
6444         info = filp->private_data;
6445
6446         if (info->iter.trace->use_max_tr) {
6447                 tracing_buffers_release(inode, filp);
6448                 return -EBUSY;
6449         }
6450
6451         info->iter.snapshot = true;
6452         info->iter.trace_buffer = &info->iter.tr->max_buffer;
6453
6454         return ret;
6455 }
6456
6457 #endif /* CONFIG_TRACER_SNAPSHOT */
6458
6459
6460 static const struct file_operations tracing_thresh_fops = {
6461         .open           = tracing_open_generic,
6462         .read           = tracing_thresh_read,
6463         .write          = tracing_thresh_write,
6464         .llseek         = generic_file_llseek,
6465 };
6466
6467 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6468 static const struct file_operations tracing_max_lat_fops = {
6469         .open           = tracing_open_generic,
6470         .read           = tracing_max_lat_read,
6471         .write          = tracing_max_lat_write,
6472         .llseek         = generic_file_llseek,
6473 };
6474 #endif
6475
6476 static const struct file_operations set_tracer_fops = {
6477         .open           = tracing_open_generic,
6478         .read           = tracing_set_trace_read,
6479         .write          = tracing_set_trace_write,
6480         .llseek         = generic_file_llseek,
6481 };
6482
6483 static const struct file_operations tracing_pipe_fops = {
6484         .open           = tracing_open_pipe,
6485         .poll           = tracing_poll_pipe,
6486         .read           = tracing_read_pipe,
6487         .splice_read    = tracing_splice_read_pipe,
6488         .release        = tracing_release_pipe,
6489         .llseek         = no_llseek,
6490 };
6491
6492 static const struct file_operations tracing_entries_fops = {
6493         .open           = tracing_open_generic_tr,
6494         .read           = tracing_entries_read,
6495         .write          = tracing_entries_write,
6496         .llseek         = generic_file_llseek,
6497         .release        = tracing_release_generic_tr,
6498 };
6499
6500 static const struct file_operations tracing_total_entries_fops = {
6501         .open           = tracing_open_generic_tr,
6502         .read           = tracing_total_entries_read,
6503         .llseek         = generic_file_llseek,
6504         .release        = tracing_release_generic_tr,
6505 };
6506
6507 static const struct file_operations tracing_free_buffer_fops = {
6508         .open           = tracing_open_generic_tr,
6509         .write          = tracing_free_buffer_write,
6510         .release        = tracing_free_buffer_release,
6511 };
6512
6513 static const struct file_operations tracing_mark_fops = {
6514         .open           = tracing_open_generic_tr,
6515         .write          = tracing_mark_write,
6516         .llseek         = generic_file_llseek,
6517         .release        = tracing_release_generic_tr,
6518 };
6519
6520 static const struct file_operations tracing_mark_raw_fops = {
6521         .open           = tracing_open_generic_tr,
6522         .write          = tracing_mark_raw_write,
6523         .llseek         = generic_file_llseek,
6524         .release        = tracing_release_generic_tr,
6525 };
6526
6527 static const struct file_operations trace_clock_fops = {
6528         .open           = tracing_clock_open,
6529         .read           = seq_read,
6530         .llseek         = seq_lseek,
6531         .release        = tracing_single_release_tr,
6532         .write          = tracing_clock_write,
6533 };
6534
6535 #ifdef CONFIG_TRACER_SNAPSHOT
6536 static const struct file_operations snapshot_fops = {
6537         .open           = tracing_snapshot_open,
6538         .read           = seq_read,
6539         .write          = tracing_snapshot_write,
6540         .llseek         = tracing_lseek,
6541         .release        = tracing_snapshot_release,
6542 };
6543
6544 static const struct file_operations snapshot_raw_fops = {
6545         .open           = snapshot_raw_open,
6546         .read           = tracing_buffers_read,
6547         .release        = tracing_buffers_release,
6548         .splice_read    = tracing_buffers_splice_read,
6549         .llseek         = no_llseek,
6550 };
6551
6552 #endif /* CONFIG_TRACER_SNAPSHOT */
6553
6554 static int tracing_buffers_open(struct inode *inode, struct file *filp)
6555 {
6556         struct trace_array *tr = inode->i_private;
6557         struct ftrace_buffer_info *info;
6558         int ret;
6559
6560         if (tracing_disabled)
6561                 return -ENODEV;
6562
6563         if (trace_array_get(tr) < 0)
6564                 return -ENODEV;
6565
6566         info = kzalloc(sizeof(*info), GFP_KERNEL);
6567         if (!info) {
6568                 trace_array_put(tr);
6569                 return -ENOMEM;
6570         }
6571
6572         mutex_lock(&trace_types_lock);
6573
6574         info->iter.tr           = tr;
6575         info->iter.cpu_file     = tracing_get_cpu(inode);
6576         info->iter.trace        = tr->current_trace;
6577         info->iter.trace_buffer = &tr->trace_buffer;
6578         info->spare             = NULL;
6579         /* Force reading ring buffer for first read */
6580         info->read              = (unsigned int)-1;
6581
6582         filp->private_data = info;
6583
6584         tr->current_trace->ref++;
6585
6586         mutex_unlock(&trace_types_lock);
6587
6588         ret = nonseekable_open(inode, filp);
6589         if (ret < 0)
6590                 trace_array_put(tr);
6591
6592         return ret;
6593 }
6594
6595 static unsigned int
6596 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6597 {
6598         struct ftrace_buffer_info *info = filp->private_data;
6599         struct trace_iterator *iter = &info->iter;
6600
6601         return trace_poll(iter, filp, poll_table);
6602 }
6603
6604 static ssize_t
6605 tracing_buffers_read(struct file *filp, char __user *ubuf,
6606                      size_t count, loff_t *ppos)
6607 {
6608         struct ftrace_buffer_info *info = filp->private_data;
6609         struct trace_iterator *iter = &info->iter;
6610         ssize_t ret = 0;
6611         ssize_t size;
6612
6613         if (!count)
6614                 return 0;
6615
6616 #ifdef CONFIG_TRACER_MAX_TRACE
6617         if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6618                 return -EBUSY;
6619 #endif
6620
6621         if (!info->spare) {
6622                 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6623                                                           iter->cpu_file);
6624                 if (IS_ERR(info->spare)) {
6625                         ret = PTR_ERR(info->spare);
6626                         info->spare = NULL;
6627                 } else {
6628                         info->spare_cpu = iter->cpu_file;
6629                 }
6630         }
6631         if (!info->spare)
6632                 return ret;
6633
6634         /* Do we have previous read data to read? */
6635         if (info->read < PAGE_SIZE)
6636                 goto read;
6637
6638  again:
6639         trace_access_lock(iter->cpu_file);
6640         ret = ring_buffer_read_page(iter->trace_buffer->buffer,
6641                                     &info->spare,
6642                                     count,
6643                                     iter->cpu_file, 0);
6644         trace_access_unlock(iter->cpu_file);
6645
6646         if (ret < 0) {
6647                 if (trace_empty(iter)) {
6648                         if ((filp->f_flags & O_NONBLOCK))
6649                                 return -EAGAIN;
6650
6651                         ret = wait_on_pipe(iter, false);
6652                         if (ret)
6653                                 return ret;
6654
6655                         goto again;
6656                 }
6657                 return 0;
6658         }
6659
6660         info->read = 0;
6661  read:
6662         size = PAGE_SIZE - info->read;
6663         if (size > count)
6664                 size = count;
6665
6666         ret = copy_to_user(ubuf, info->spare + info->read, size);
6667         if (ret == size)
6668                 return -EFAULT;
6669
6670         size -= ret;
6671
6672         *ppos += size;
6673         info->read += size;
6674
6675         return size;
6676 }
6677
6678 static int tracing_buffers_release(struct inode *inode, struct file *file)
6679 {
6680         struct ftrace_buffer_info *info = file->private_data;
6681         struct trace_iterator *iter = &info->iter;
6682
6683         mutex_lock(&trace_types_lock);
6684
6685         iter->tr->current_trace->ref--;
6686
6687         __trace_array_put(iter->tr);
6688
6689         if (info->spare)
6690                 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6691                                            info->spare_cpu, info->spare);
6692         kfree(info);
6693
6694         mutex_unlock(&trace_types_lock);
6695
6696         return 0;
6697 }
6698
6699 struct buffer_ref {
6700         struct ring_buffer      *buffer;
6701         void                    *page;
6702         int                     cpu;
6703         int                     ref;
6704 };
6705
6706 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6707                                     struct pipe_buffer *buf)
6708 {
6709         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6710
6711         if (--ref->ref)
6712                 return;
6713
6714         ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6715         kfree(ref);
6716         buf->private = 0;
6717 }
6718
6719 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6720                                 struct pipe_buffer *buf)
6721 {
6722         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6723
6724         ref->ref++;
6725 }
6726
6727 /* Pipe buffer operations for a buffer. */
6728 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
6729         .can_merge              = 0,
6730         .confirm                = generic_pipe_buf_confirm,
6731         .release                = buffer_pipe_buf_release,
6732         .steal                  = generic_pipe_buf_steal,
6733         .get                    = buffer_pipe_buf_get,
6734 };
6735
6736 /*
6737  * Callback from splice_to_pipe(), if we need to release some pages
6738  * at the end of the spd in case we error'ed out in filling the pipe.
6739  */
6740 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6741 {
6742         struct buffer_ref *ref =
6743                 (struct buffer_ref *)spd->partial[i].private;
6744
6745         if (--ref->ref)
6746                 return;
6747
6748         ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6749         kfree(ref);
6750         spd->partial[i].private = 0;
6751 }
6752
6753 static ssize_t
6754 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6755                             struct pipe_inode_info *pipe, size_t len,
6756                             unsigned int flags)
6757 {
6758         struct ftrace_buffer_info *info = file->private_data;
6759         struct trace_iterator *iter = &info->iter;
6760         struct partial_page partial_def[PIPE_DEF_BUFFERS];
6761         struct page *pages_def[PIPE_DEF_BUFFERS];
6762         struct splice_pipe_desc spd = {
6763                 .pages          = pages_def,
6764                 .partial        = partial_def,
6765                 .nr_pages_max   = PIPE_DEF_BUFFERS,
6766                 .ops            = &buffer_pipe_buf_ops,
6767                 .spd_release    = buffer_spd_release,
6768         };
6769         struct buffer_ref *ref;
6770         int entries, size, i;
6771         ssize_t ret = 0;
6772
6773 #ifdef CONFIG_TRACER_MAX_TRACE
6774         if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6775                 return -EBUSY;
6776 #endif
6777
6778         if (*ppos & (PAGE_SIZE - 1))
6779                 return -EINVAL;
6780
6781         if (len & (PAGE_SIZE - 1)) {
6782                 if (len < PAGE_SIZE)
6783                         return -EINVAL;
6784                 len &= PAGE_MASK;
6785         }
6786
6787         if (splice_grow_spd(pipe, &spd))
6788                 return -ENOMEM;
6789
6790  again:
6791         trace_access_lock(iter->cpu_file);
6792         entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6793
6794         for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
6795                 struct page *page;
6796                 int r;
6797
6798                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
6799                 if (!ref) {
6800                         ret = -ENOMEM;
6801                         break;
6802                 }
6803
6804                 ref->ref = 1;
6805                 ref->buffer = iter->trace_buffer->buffer;
6806                 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
6807                 if (IS_ERR(ref->page)) {
6808                         ret = PTR_ERR(ref->page);
6809                         ref->page = NULL;
6810                         kfree(ref);
6811                         break;
6812                 }
6813                 ref->cpu = iter->cpu_file;
6814
6815                 r = ring_buffer_read_page(ref->buffer, &ref->page,
6816                                           len, iter->cpu_file, 1);
6817                 if (r < 0) {
6818                         ring_buffer_free_read_page(ref->buffer, ref->cpu,
6819                                                    ref->page);
6820                         kfree(ref);
6821                         break;
6822                 }
6823
6824                 /*
6825                  * zero out any left over data, this is going to
6826                  * user land.
6827                  */
6828                 size = ring_buffer_page_len(ref->page);
6829                 if (size < PAGE_SIZE)
6830                         memset(ref->page + size, 0, PAGE_SIZE - size);
6831
6832                 page = virt_to_page(ref->page);
6833
6834                 spd.pages[i] = page;
6835                 spd.partial[i].len = PAGE_SIZE;
6836                 spd.partial[i].offset = 0;
6837                 spd.partial[i].private = (unsigned long)ref;
6838                 spd.nr_pages++;
6839                 *ppos += PAGE_SIZE;
6840
6841                 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6842         }
6843
6844         trace_access_unlock(iter->cpu_file);
6845         spd.nr_pages = i;
6846
6847         /* did we read anything? */
6848         if (!spd.nr_pages) {
6849                 if (ret)
6850                         goto out;
6851
6852                 ret = -EAGAIN;
6853                 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
6854                         goto out;
6855
6856                 ret = wait_on_pipe(iter, true);
6857                 if (ret)
6858                         goto out;
6859
6860                 goto again;
6861         }
6862
6863         ret = splice_to_pipe(pipe, &spd);
6864 out:
6865         splice_shrink_spd(&spd);
6866
6867         return ret;
6868 }
6869
6870 static const struct file_operations tracing_buffers_fops = {
6871         .open           = tracing_buffers_open,
6872         .read           = tracing_buffers_read,
6873         .poll           = tracing_buffers_poll,
6874         .release        = tracing_buffers_release,
6875         .splice_read    = tracing_buffers_splice_read,
6876         .llseek         = no_llseek,
6877 };
6878
6879 static ssize_t
6880 tracing_stats_read(struct file *filp, char __user *ubuf,
6881                    size_t count, loff_t *ppos)
6882 {
6883         struct inode *inode = file_inode(filp);
6884         struct trace_array *tr = inode->i_private;
6885         struct trace_buffer *trace_buf = &tr->trace_buffer;
6886         int cpu = tracing_get_cpu(inode);
6887         struct trace_seq *s;
6888         unsigned long cnt;
6889         unsigned long long t;
6890         unsigned long usec_rem;
6891
6892         s = kmalloc(sizeof(*s), GFP_KERNEL);
6893         if (!s)
6894                 return -ENOMEM;
6895
6896         trace_seq_init(s);
6897
6898         cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
6899         trace_seq_printf(s, "entries: %ld\n", cnt);
6900
6901         cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
6902         trace_seq_printf(s, "overrun: %ld\n", cnt);
6903
6904         cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
6905         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6906
6907         cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
6908         trace_seq_printf(s, "bytes: %ld\n", cnt);
6909
6910         if (trace_clocks[tr->clock_id].in_ns) {
6911                 /* local or global for trace_clock */
6912                 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
6913                 usec_rem = do_div(t, USEC_PER_SEC);
6914                 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6915                                                                 t, usec_rem);
6916
6917                 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
6918                 usec_rem = do_div(t, USEC_PER_SEC);
6919                 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6920         } else {
6921                 /* counter or tsc mode for trace_clock */
6922                 trace_seq_printf(s, "oldest event ts: %llu\n",
6923                                 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
6924
6925                 trace_seq_printf(s, "now ts: %llu\n",
6926                                 ring_buffer_time_stamp(trace_buf->buffer, cpu));
6927         }
6928
6929         cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
6930         trace_seq_printf(s, "dropped events: %ld\n", cnt);
6931
6932         cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
6933         trace_seq_printf(s, "read events: %ld\n", cnt);
6934
6935         count = simple_read_from_buffer(ubuf, count, ppos,
6936                                         s->buffer, trace_seq_used(s));
6937
6938         kfree(s);
6939
6940         return count;
6941 }
6942
6943 static const struct file_operations tracing_stats_fops = {
6944         .open           = tracing_open_generic_tr,
6945         .read           = tracing_stats_read,
6946         .llseek         = generic_file_llseek,
6947         .release        = tracing_release_generic_tr,
6948 };
6949
6950 #ifdef CONFIG_DYNAMIC_FTRACE
6951
6952 static ssize_t
6953 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
6954                   size_t cnt, loff_t *ppos)
6955 {
6956         unsigned long *p = filp->private_data;
6957         char buf[64]; /* Not too big for a shallow stack */
6958         int r;
6959
6960         r = scnprintf(buf, 63, "%ld", *p);
6961         buf[r++] = '\n';
6962
6963         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6964 }
6965
6966 static const struct file_operations tracing_dyn_info_fops = {
6967         .open           = tracing_open_generic,
6968         .read           = tracing_read_dyn_info,
6969         .llseek         = generic_file_llseek,
6970 };
6971 #endif /* CONFIG_DYNAMIC_FTRACE */
6972
6973 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6974 static void
6975 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
6976                 struct trace_array *tr, struct ftrace_probe_ops *ops,
6977                 void *data)
6978 {
6979         tracing_snapshot_instance(tr);
6980 }
6981
6982 static void
6983 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
6984                       struct trace_array *tr, struct ftrace_probe_ops *ops,
6985                       void *data)
6986 {
6987         struct ftrace_func_mapper *mapper = data;
6988         long *count = NULL;
6989
6990         if (mapper)
6991                 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
6992
6993         if (count) {
6994
6995                 if (*count <= 0)
6996                         return;
6997
6998                 (*count)--;
6999         }
7000
7001         tracing_snapshot_instance(tr);
7002 }
7003
7004 static int
7005 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7006                       struct ftrace_probe_ops *ops, void *data)
7007 {
7008         struct ftrace_func_mapper *mapper = data;
7009         long *count = NULL;
7010
7011         seq_printf(m, "%ps:", (void *)ip);
7012
7013         seq_puts(m, "snapshot");
7014
7015         if (mapper)
7016                 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7017
7018         if (count)
7019                 seq_printf(m, ":count=%ld\n", *count);
7020         else
7021                 seq_puts(m, ":unlimited\n");
7022
7023         return 0;
7024 }
7025
7026 static int
7027 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7028                      unsigned long ip, void *init_data, void **data)
7029 {
7030         struct ftrace_func_mapper *mapper = *data;
7031
7032         if (!mapper) {
7033                 mapper = allocate_ftrace_func_mapper();
7034                 if (!mapper)
7035                         return -ENOMEM;
7036                 *data = mapper;
7037         }
7038
7039         return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7040 }
7041
7042 static void
7043 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7044                      unsigned long ip, void *data)
7045 {
7046         struct ftrace_func_mapper *mapper = data;
7047
7048         if (!ip) {
7049                 if (!mapper)
7050                         return;
7051                 free_ftrace_func_mapper(mapper, NULL);
7052                 return;
7053         }
7054
7055         ftrace_func_mapper_remove_ip(mapper, ip);
7056 }
7057
7058 static struct ftrace_probe_ops snapshot_probe_ops = {
7059         .func                   = ftrace_snapshot,
7060         .print                  = ftrace_snapshot_print,
7061 };
7062
7063 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7064         .func                   = ftrace_count_snapshot,
7065         .print                  = ftrace_snapshot_print,
7066         .init                   = ftrace_snapshot_init,
7067         .free                   = ftrace_snapshot_free,
7068 };
7069
7070 static int
7071 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7072                                char *glob, char *cmd, char *param, int enable)
7073 {
7074         struct ftrace_probe_ops *ops;
7075         void *count = (void *)-1;
7076         char *number;
7077         int ret;
7078
7079         if (!tr)
7080                 return -ENODEV;
7081
7082         /* hash funcs only work with set_ftrace_filter */
7083         if (!enable)
7084                 return -EINVAL;
7085
7086         ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
7087
7088         if (glob[0] == '!')
7089                 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7090
7091         if (!param)
7092                 goto out_reg;
7093
7094         number = strsep(&param, ":");
7095
7096         if (!strlen(number))
7097                 goto out_reg;
7098
7099         /*
7100          * We use the callback data field (which is a pointer)
7101          * as our counter.
7102          */
7103         ret = kstrtoul(number, 0, (unsigned long *)&count);
7104         if (ret)
7105                 return ret;
7106
7107  out_reg:
7108         ret = alloc_snapshot(tr);
7109         if (ret < 0)
7110                 goto out;
7111
7112         ret = register_ftrace_function_probe(glob, tr, ops, count);
7113
7114  out:
7115         return ret < 0 ? ret : 0;
7116 }
7117
7118 static struct ftrace_func_command ftrace_snapshot_cmd = {
7119         .name                   = "snapshot",
7120         .func                   = ftrace_trace_snapshot_callback,
7121 };
7122
7123 static __init int register_snapshot_cmd(void)
7124 {
7125         return register_ftrace_command(&ftrace_snapshot_cmd);
7126 }
7127 #else
7128 static inline __init int register_snapshot_cmd(void) { return 0; }
7129 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7130
7131 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7132 {
7133         if (WARN_ON(!tr->dir))
7134                 return ERR_PTR(-ENODEV);
7135
7136         /* Top directory uses NULL as the parent */
7137         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7138                 return NULL;
7139
7140         /* All sub buffers have a descriptor */
7141         return tr->dir;
7142 }
7143
7144 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7145 {
7146         struct dentry *d_tracer;
7147
7148         if (tr->percpu_dir)
7149                 return tr->percpu_dir;
7150
7151         d_tracer = tracing_get_dentry(tr);
7152         if (IS_ERR(d_tracer))
7153                 return NULL;
7154
7155         tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7156
7157         WARN_ONCE(!tr->percpu_dir,
7158                   "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7159
7160         return tr->percpu_dir;
7161 }
7162
7163 static struct dentry *
7164 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7165                       void *data, long cpu, const struct file_operations *fops)
7166 {
7167         struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7168
7169         if (ret) /* See tracing_get_cpu() */
7170                 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7171         return ret;
7172 }
7173
7174 static void
7175 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7176 {
7177         struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7178         struct dentry *d_cpu;
7179         char cpu_dir[30]; /* 30 characters should be more than enough */
7180
7181         if (!d_percpu)
7182                 return;
7183
7184         snprintf(cpu_dir, 30, "cpu%ld", cpu);
7185         d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7186         if (!d_cpu) {
7187                 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7188                 return;
7189         }
7190
7191         /* per cpu trace_pipe */
7192         trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7193                                 tr, cpu, &tracing_pipe_fops);
7194
7195         /* per cpu trace */
7196         trace_create_cpu_file("trace", 0644, d_cpu,
7197                                 tr, cpu, &tracing_fops);
7198
7199         trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7200                                 tr, cpu, &tracing_buffers_fops);
7201
7202         trace_create_cpu_file("stats", 0444, d_cpu,
7203                                 tr, cpu, &tracing_stats_fops);
7204
7205         trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7206                                 tr, cpu, &tracing_entries_fops);
7207
7208 #ifdef CONFIG_TRACER_SNAPSHOT
7209         trace_create_cpu_file("snapshot", 0644, d_cpu,
7210                                 tr, cpu, &snapshot_fops);
7211
7212         trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7213                                 tr, cpu, &snapshot_raw_fops);
7214 #endif
7215 }
7216
7217 #ifdef CONFIG_FTRACE_SELFTEST
7218 /* Let selftest have access to static functions in this file */
7219 #include "trace_selftest.c"
7220 #endif
7221
7222 static ssize_t
7223 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7224                         loff_t *ppos)
7225 {
7226         struct trace_option_dentry *topt = filp->private_data;
7227         char *buf;
7228
7229         if (topt->flags->val & topt->opt->bit)
7230                 buf = "1\n";
7231         else
7232                 buf = "0\n";
7233
7234         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7235 }
7236
7237 static ssize_t
7238 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7239                          loff_t *ppos)
7240 {
7241         struct trace_option_dentry *topt = filp->private_data;
7242         unsigned long val;
7243         int ret;
7244
7245         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7246         if (ret)
7247                 return ret;
7248
7249         if (val != 0 && val != 1)
7250                 return -EINVAL;
7251
7252         if (!!(topt->flags->val & topt->opt->bit) != val) {
7253                 mutex_lock(&trace_types_lock);
7254                 ret = __set_tracer_option(topt->tr, topt->flags,
7255                                           topt->opt, !val);
7256                 mutex_unlock(&trace_types_lock);
7257                 if (ret)
7258                         return ret;
7259         }
7260
7261         *ppos += cnt;
7262
7263         return cnt;
7264 }
7265
7266
7267 static const struct file_operations trace_options_fops = {
7268         .open = tracing_open_generic,
7269         .read = trace_options_read,
7270         .write = trace_options_write,
7271         .llseek = generic_file_llseek,
7272 };
7273
7274 /*
7275  * In order to pass in both the trace_array descriptor as well as the index
7276  * to the flag that the trace option file represents, the trace_array
7277  * has a character array of trace_flags_index[], which holds the index
7278  * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7279  * The address of this character array is passed to the flag option file
7280  * read/write callbacks.
7281  *
7282  * In order to extract both the index and the trace_array descriptor,
7283  * get_tr_index() uses the following algorithm.
7284  *
7285  *   idx = *ptr;
7286  *
7287  * As the pointer itself contains the address of the index (remember
7288  * index[1] == 1).
7289  *
7290  * Then to get the trace_array descriptor, by subtracting that index
7291  * from the ptr, we get to the start of the index itself.
7292  *
7293  *   ptr - idx == &index[0]
7294  *
7295  * Then a simple container_of() from that pointer gets us to the
7296  * trace_array descriptor.
7297  */
7298 static void get_tr_index(void *data, struct trace_array **ptr,
7299                          unsigned int *pindex)
7300 {
7301         *pindex = *(unsigned char *)data;
7302
7303         *ptr = container_of(data - *pindex, struct trace_array,
7304                             trace_flags_index);
7305 }
7306
7307 static ssize_t
7308 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7309                         loff_t *ppos)
7310 {
7311         void *tr_index = filp->private_data;
7312         struct trace_array *tr;
7313         unsigned int index;
7314         char *buf;
7315
7316         get_tr_index(tr_index, &tr, &index);
7317
7318         if (tr->trace_flags & (1 << index))
7319                 buf = "1\n";
7320         else
7321                 buf = "0\n";
7322
7323         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7324 }
7325
7326 static ssize_t
7327 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7328                          loff_t *ppos)
7329 {
7330         void *tr_index = filp->private_data;
7331         struct trace_array *tr;
7332         unsigned int index;
7333         unsigned long val;
7334         int ret;
7335
7336         get_tr_index(tr_index, &tr, &index);
7337
7338         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7339         if (ret)
7340                 return ret;
7341
7342         if (val != 0 && val != 1)
7343                 return -EINVAL;
7344
7345         mutex_lock(&trace_types_lock);
7346         ret = set_tracer_flag(tr, 1 << index, val);
7347         mutex_unlock(&trace_types_lock);
7348
7349         if (ret < 0)
7350                 return ret;
7351
7352         *ppos += cnt;
7353
7354         return cnt;
7355 }
7356
7357 static const struct file_operations trace_options_core_fops = {
7358         .open = tracing_open_generic,
7359         .read = trace_options_core_read,
7360         .write = trace_options_core_write,
7361         .llseek = generic_file_llseek,
7362 };
7363
7364 struct dentry *trace_create_file(const char *name,
7365                                  umode_t mode,
7366                                  struct dentry *parent,
7367                                  void *data,
7368                                  const struct file_operations *fops)
7369 {
7370         struct dentry *ret;
7371
7372         ret = tracefs_create_file(name, mode, parent, data, fops);
7373         if (!ret)
7374                 pr_warn("Could not create tracefs '%s' entry\n", name);
7375
7376         return ret;
7377 }
7378
7379
7380 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
7381 {
7382         struct dentry *d_tracer;
7383
7384         if (tr->options)
7385                 return tr->options;
7386
7387         d_tracer = tracing_get_dentry(tr);
7388         if (IS_ERR(d_tracer))
7389                 return NULL;
7390
7391         tr->options = tracefs_create_dir("options", d_tracer);
7392         if (!tr->options) {
7393                 pr_warn("Could not create tracefs directory 'options'\n");
7394                 return NULL;
7395         }
7396
7397         return tr->options;
7398 }
7399
7400 static void
7401 create_trace_option_file(struct trace_array *tr,
7402                          struct trace_option_dentry *topt,
7403                          struct tracer_flags *flags,
7404                          struct tracer_opt *opt)
7405 {
7406         struct dentry *t_options;
7407
7408         t_options = trace_options_init_dentry(tr);
7409         if (!t_options)
7410                 return;
7411
7412         topt->flags = flags;
7413         topt->opt = opt;
7414         topt->tr = tr;
7415
7416         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
7417                                     &trace_options_fops);
7418
7419 }
7420
7421 static void
7422 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
7423 {
7424         struct trace_option_dentry *topts;
7425         struct trace_options *tr_topts;
7426         struct tracer_flags *flags;
7427         struct tracer_opt *opts;
7428         int cnt;
7429         int i;
7430
7431         if (!tracer)
7432                 return;
7433
7434         flags = tracer->flags;
7435
7436         if (!flags || !flags->opts)
7437                 return;
7438
7439         /*
7440          * If this is an instance, only create flags for tracers
7441          * the instance may have.
7442          */
7443         if (!trace_ok_for_array(tracer, tr))
7444                 return;
7445
7446         for (i = 0; i < tr->nr_topts; i++) {
7447                 /* Make sure there's no duplicate flags. */
7448                 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
7449                         return;
7450         }
7451
7452         opts = flags->opts;
7453
7454         for (cnt = 0; opts[cnt].name; cnt++)
7455                 ;
7456
7457         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
7458         if (!topts)
7459                 return;
7460
7461         tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7462                             GFP_KERNEL);
7463         if (!tr_topts) {
7464                 kfree(topts);
7465                 return;
7466         }
7467
7468         tr->topts = tr_topts;
7469         tr->topts[tr->nr_topts].tracer = tracer;
7470         tr->topts[tr->nr_topts].topts = topts;
7471         tr->nr_topts++;
7472
7473         for (cnt = 0; opts[cnt].name; cnt++) {
7474                 create_trace_option_file(tr, &topts[cnt], flags,
7475                                          &opts[cnt]);
7476                 WARN_ONCE(topts[cnt].entry == NULL,
7477                           "Failed to create trace option: %s",
7478                           opts[cnt].name);
7479         }
7480 }
7481
7482 static struct dentry *
7483 create_trace_option_core_file(struct trace_array *tr,
7484                               const char *option, long index)
7485 {
7486         struct dentry *t_options;
7487
7488         t_options = trace_options_init_dentry(tr);
7489         if (!t_options)
7490                 return NULL;
7491
7492         return trace_create_file(option, 0644, t_options,
7493                                  (void *)&tr->trace_flags_index[index],
7494                                  &trace_options_core_fops);
7495 }
7496
7497 static void create_trace_options_dir(struct trace_array *tr)
7498 {
7499         struct dentry *t_options;
7500         bool top_level = tr == &global_trace;
7501         int i;
7502
7503         t_options = trace_options_init_dentry(tr);
7504         if (!t_options)
7505                 return;
7506
7507         for (i = 0; trace_options[i]; i++) {
7508                 if (top_level ||
7509                     !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7510                         create_trace_option_core_file(tr, trace_options[i], i);
7511         }
7512 }
7513
7514 static ssize_t
7515 rb_simple_read(struct file *filp, char __user *ubuf,
7516                size_t cnt, loff_t *ppos)
7517 {
7518         struct trace_array *tr = filp->private_data;
7519         char buf[64];
7520         int r;
7521
7522         r = tracer_tracing_is_on(tr);
7523         r = sprintf(buf, "%d\n", r);
7524
7525         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7526 }
7527
7528 static ssize_t
7529 rb_simple_write(struct file *filp, const char __user *ubuf,
7530                 size_t cnt, loff_t *ppos)
7531 {
7532         struct trace_array *tr = filp->private_data;
7533         struct ring_buffer *buffer = tr->trace_buffer.buffer;
7534         unsigned long val;
7535         int ret;
7536
7537         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7538         if (ret)
7539                 return ret;
7540
7541         if (buffer) {
7542                 mutex_lock(&trace_types_lock);
7543                 if (val) {
7544                         tracer_tracing_on(tr);
7545                         if (tr->current_trace->start)
7546                                 tr->current_trace->start(tr);
7547                 } else {
7548                         tracer_tracing_off(tr);
7549                         if (tr->current_trace->stop)
7550                                 tr->current_trace->stop(tr);
7551                 }
7552                 mutex_unlock(&trace_types_lock);
7553         }
7554
7555         (*ppos)++;
7556
7557         return cnt;
7558 }
7559
7560 static const struct file_operations rb_simple_fops = {
7561         .open           = tracing_open_generic_tr,
7562         .read           = rb_simple_read,
7563         .write          = rb_simple_write,
7564         .release        = tracing_release_generic_tr,
7565         .llseek         = default_llseek,
7566 };
7567
7568 struct dentry *trace_instance_dir;
7569
7570 static void
7571 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
7572
7573 static int
7574 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
7575 {
7576         enum ring_buffer_flags rb_flags;
7577
7578         rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
7579
7580         buf->tr = tr;
7581
7582         buf->buffer = ring_buffer_alloc(size, rb_flags);
7583         if (!buf->buffer)
7584                 return -ENOMEM;
7585
7586         buf->data = alloc_percpu(struct trace_array_cpu);
7587         if (!buf->data) {
7588                 ring_buffer_free(buf->buffer);
7589                 return -ENOMEM;
7590         }
7591
7592         /* Allocate the first page for all buffers */
7593         set_buffer_entries(&tr->trace_buffer,
7594                            ring_buffer_size(tr->trace_buffer.buffer, 0));
7595
7596         return 0;
7597 }
7598
7599 static int allocate_trace_buffers(struct trace_array *tr, int size)
7600 {
7601         int ret;
7602
7603         ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7604         if (ret)
7605                 return ret;
7606
7607 #ifdef CONFIG_TRACER_MAX_TRACE
7608         ret = allocate_trace_buffer(tr, &tr->max_buffer,
7609                                     allocate_snapshot ? size : 1);
7610         if (WARN_ON(ret)) {
7611                 ring_buffer_free(tr->trace_buffer.buffer);
7612                 free_percpu(tr->trace_buffer.data);
7613                 return -ENOMEM;
7614         }
7615         tr->allocated_snapshot = allocate_snapshot;
7616
7617         /*
7618          * Only the top level trace array gets its snapshot allocated
7619          * from the kernel command line.
7620          */
7621         allocate_snapshot = false;
7622 #endif
7623         return 0;
7624 }
7625
7626 static void free_trace_buffer(struct trace_buffer *buf)
7627 {
7628         if (buf->buffer) {
7629                 ring_buffer_free(buf->buffer);
7630                 buf->buffer = NULL;
7631                 free_percpu(buf->data);
7632                 buf->data = NULL;
7633         }
7634 }
7635
7636 static void free_trace_buffers(struct trace_array *tr)
7637 {
7638         if (!tr)
7639                 return;
7640
7641         free_trace_buffer(&tr->trace_buffer);
7642
7643 #ifdef CONFIG_TRACER_MAX_TRACE
7644         free_trace_buffer(&tr->max_buffer);
7645 #endif
7646 }
7647
7648 static void init_trace_flags_index(struct trace_array *tr)
7649 {
7650         int i;
7651
7652         /* Used by the trace options files */
7653         for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7654                 tr->trace_flags_index[i] = i;
7655 }
7656
7657 static void __update_tracer_options(struct trace_array *tr)
7658 {
7659         struct tracer *t;
7660
7661         for (t = trace_types; t; t = t->next)
7662                 add_tracer_options(tr, t);
7663 }
7664
7665 static void update_tracer_options(struct trace_array *tr)
7666 {
7667         mutex_lock(&trace_types_lock);
7668         __update_tracer_options(tr);
7669         mutex_unlock(&trace_types_lock);
7670 }
7671
7672 static int instance_mkdir(const char *name)
7673 {
7674         struct trace_array *tr;
7675         int ret;
7676
7677         mutex_lock(&trace_types_lock);
7678
7679         ret = -EEXIST;
7680         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7681                 if (tr->name && strcmp(tr->name, name) == 0)
7682                         goto out_unlock;
7683         }
7684
7685         ret = -ENOMEM;
7686         tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7687         if (!tr)
7688                 goto out_unlock;
7689
7690         tr->name = kstrdup(name, GFP_KERNEL);
7691         if (!tr->name)
7692                 goto out_free_tr;
7693
7694         if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7695                 goto out_free_tr;
7696
7697         tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
7698
7699         cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7700
7701         raw_spin_lock_init(&tr->start_lock);
7702
7703         tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7704
7705         tr->current_trace = &nop_trace;
7706
7707         INIT_LIST_HEAD(&tr->systems);
7708         INIT_LIST_HEAD(&tr->events);
7709
7710         if (allocate_trace_buffers(tr, trace_buf_size) < 0)
7711                 goto out_free_tr;
7712
7713         tr->dir = tracefs_create_dir(name, trace_instance_dir);
7714         if (!tr->dir)
7715                 goto out_free_tr;
7716
7717         ret = event_trace_add_tracer(tr->dir, tr);
7718         if (ret) {
7719                 tracefs_remove_recursive(tr->dir);
7720                 goto out_free_tr;
7721         }
7722
7723         ftrace_init_trace_array(tr);
7724
7725         init_tracer_tracefs(tr, tr->dir);
7726         init_trace_flags_index(tr);
7727         __update_tracer_options(tr);
7728
7729         list_add(&tr->list, &ftrace_trace_arrays);
7730
7731         mutex_unlock(&trace_types_lock);
7732
7733         return 0;
7734
7735  out_free_tr:
7736         free_trace_buffers(tr);
7737         free_cpumask_var(tr->tracing_cpumask);
7738         kfree(tr->name);
7739         kfree(tr);
7740
7741  out_unlock:
7742         mutex_unlock(&trace_types_lock);
7743
7744         return ret;
7745
7746 }
7747
7748 static int instance_rmdir(const char *name)
7749 {
7750         struct trace_array *tr;
7751         int found = 0;
7752         int ret;
7753         int i;
7754
7755         mutex_lock(&trace_types_lock);
7756
7757         ret = -ENODEV;
7758         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7759                 if (tr->name && strcmp(tr->name, name) == 0) {
7760                         found = 1;
7761                         break;
7762                 }
7763         }
7764         if (!found)
7765                 goto out_unlock;
7766
7767         ret = -EBUSY;
7768         if (tr->ref || (tr->current_trace && tr->current_trace->ref))
7769                 goto out_unlock;
7770
7771         list_del(&tr->list);
7772
7773         /* Disable all the flags that were enabled coming in */
7774         for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7775                 if ((1 << i) & ZEROED_TRACE_FLAGS)
7776                         set_tracer_flag(tr, 1 << i, 0);
7777         }
7778
7779         tracing_set_nop(tr);
7780         clear_ftrace_function_probes(tr);
7781         event_trace_del_tracer(tr);
7782         ftrace_clear_pids(tr);
7783         ftrace_destroy_function_files(tr);
7784         tracefs_remove_recursive(tr->dir);
7785         free_trace_buffers(tr);
7786
7787         for (i = 0; i < tr->nr_topts; i++) {
7788                 kfree(tr->topts[i].topts);
7789         }
7790         kfree(tr->topts);
7791
7792         free_cpumask_var(tr->tracing_cpumask);
7793         kfree(tr->name);
7794         kfree(tr);
7795
7796         ret = 0;
7797
7798  out_unlock:
7799         mutex_unlock(&trace_types_lock);
7800
7801         return ret;
7802 }
7803
7804 static __init void create_trace_instances(struct dentry *d_tracer)
7805 {
7806         trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7807                                                          instance_mkdir,
7808                                                          instance_rmdir);
7809         if (WARN_ON(!trace_instance_dir))
7810                 return;
7811 }
7812
7813 static void
7814 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7815 {
7816         int cpu;
7817
7818         trace_create_file("available_tracers", 0444, d_tracer,
7819                         tr, &show_traces_fops);
7820
7821         trace_create_file("current_tracer", 0644, d_tracer,
7822                         tr, &set_tracer_fops);
7823
7824         trace_create_file("tracing_cpumask", 0644, d_tracer,
7825                           tr, &tracing_cpumask_fops);
7826
7827         trace_create_file("trace_options", 0644, d_tracer,
7828                           tr, &tracing_iter_fops);
7829
7830         trace_create_file("trace", 0644, d_tracer,
7831                           tr, &tracing_fops);
7832
7833         trace_create_file("trace_pipe", 0444, d_tracer,
7834                           tr, &tracing_pipe_fops);
7835
7836         trace_create_file("buffer_size_kb", 0644, d_tracer,
7837                           tr, &tracing_entries_fops);
7838
7839         trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7840                           tr, &tracing_total_entries_fops);
7841
7842         trace_create_file("free_buffer", 0200, d_tracer,
7843                           tr, &tracing_free_buffer_fops);
7844
7845         trace_create_file("trace_marker", 0220, d_tracer,
7846                           tr, &tracing_mark_fops);
7847
7848         trace_create_file("trace_marker_raw", 0220, d_tracer,
7849                           tr, &tracing_mark_raw_fops);
7850
7851         trace_create_file("trace_clock", 0644, d_tracer, tr,
7852                           &trace_clock_fops);
7853
7854         trace_create_file("tracing_on", 0644, d_tracer,
7855                           tr, &rb_simple_fops);
7856
7857         create_trace_options_dir(tr);
7858
7859 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7860         trace_create_file("tracing_max_latency", 0644, d_tracer,
7861                         &tr->max_latency, &tracing_max_lat_fops);
7862 #endif
7863
7864         if (ftrace_create_function_files(tr, d_tracer))
7865                 WARN(1, "Could not allocate function filter files");
7866
7867 #ifdef CONFIG_TRACER_SNAPSHOT
7868         trace_create_file("snapshot", 0644, d_tracer,
7869                           tr, &snapshot_fops);
7870 #endif
7871
7872         for_each_tracing_cpu(cpu)
7873                 tracing_init_tracefs_percpu(tr, cpu);
7874
7875         ftrace_init_tracefs(tr, d_tracer);
7876 }
7877
7878 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
7879 {
7880         struct vfsmount *mnt;
7881         struct file_system_type *type;
7882
7883         /*
7884          * To maintain backward compatibility for tools that mount
7885          * debugfs to get to the tracing facility, tracefs is automatically
7886          * mounted to the debugfs/tracing directory.
7887          */
7888         type = get_fs_type("tracefs");
7889         if (!type)
7890                 return NULL;
7891         mnt = vfs_submount(mntpt, type, "tracefs", NULL);
7892         put_filesystem(type);
7893         if (IS_ERR(mnt))
7894                 return NULL;
7895         mntget(mnt);
7896
7897         return mnt;
7898 }
7899
7900 /**
7901  * tracing_init_dentry - initialize top level trace array
7902  *
7903  * This is called when creating files or directories in the tracing
7904  * directory. It is called via fs_initcall() by any of the boot up code
7905  * and expects to return the dentry of the top level tracing directory.
7906  */
7907 struct dentry *tracing_init_dentry(void)
7908 {
7909         struct trace_array *tr = &global_trace;
7910
7911         /* The top level trace array uses  NULL as parent */
7912         if (tr->dir)
7913                 return NULL;
7914
7915         if (WARN_ON(!tracefs_initialized()) ||
7916                 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7917                  WARN_ON(!debugfs_initialized())))
7918                 return ERR_PTR(-ENODEV);
7919
7920         /*
7921          * As there may still be users that expect the tracing
7922          * files to exist in debugfs/tracing, we must automount
7923          * the tracefs file system there, so older tools still
7924          * work with the newer kerenl.
7925          */
7926         tr->dir = debugfs_create_automount("tracing", NULL,
7927                                            trace_automount, NULL);
7928         if (!tr->dir) {
7929                 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7930                 return ERR_PTR(-ENOMEM);
7931         }
7932
7933         return NULL;
7934 }
7935
7936 extern struct trace_eval_map *__start_ftrace_eval_maps[];
7937 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
7938
7939 static void __init trace_eval_init(void)
7940 {
7941         int len;
7942
7943         len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
7944         trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
7945 }
7946
7947 #ifdef CONFIG_MODULES
7948 static void trace_module_add_evals(struct module *mod)
7949 {
7950         if (!mod->num_trace_evals)
7951                 return;
7952
7953         /*
7954          * Modules with bad taint do not have events created, do
7955          * not bother with enums either.
7956          */
7957         if (trace_module_has_bad_taint(mod))
7958                 return;
7959
7960         trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
7961 }
7962
7963 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
7964 static void trace_module_remove_evals(struct module *mod)
7965 {
7966         union trace_eval_map_item *map;
7967         union trace_eval_map_item **last = &trace_eval_maps;
7968
7969         if (!mod->num_trace_evals)
7970                 return;
7971
7972         mutex_lock(&trace_eval_mutex);
7973
7974         map = trace_eval_maps;
7975
7976         while (map) {
7977                 if (map->head.mod == mod)
7978                         break;
7979                 map = trace_eval_jmp_to_tail(map);
7980                 last = &map->tail.next;
7981                 map = map->tail.next;
7982         }
7983         if (!map)
7984                 goto out;
7985
7986         *last = trace_eval_jmp_to_tail(map)->tail.next;
7987         kfree(map);
7988  out:
7989         mutex_unlock(&trace_eval_mutex);
7990 }
7991 #else
7992 static inline void trace_module_remove_evals(struct module *mod) { }
7993 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
7994
7995 static int trace_module_notify(struct notifier_block *self,
7996                                unsigned long val, void *data)
7997 {
7998         struct module *mod = data;
7999
8000         switch (val) {
8001         case MODULE_STATE_COMING:
8002                 trace_module_add_evals(mod);
8003                 break;
8004         case MODULE_STATE_GOING:
8005                 trace_module_remove_evals(mod);
8006                 break;
8007         }
8008
8009         return 0;
8010 }
8011
8012 static struct notifier_block trace_module_nb = {
8013         .notifier_call = trace_module_notify,
8014         .priority = 0,
8015 };
8016 #endif /* CONFIG_MODULES */
8017
8018 static __init int tracer_init_tracefs(void)
8019 {
8020         struct dentry *d_tracer;
8021
8022         trace_access_lock_init();
8023
8024         d_tracer = tracing_init_dentry();
8025         if (IS_ERR(d_tracer))
8026                 return 0;
8027
8028         init_tracer_tracefs(&global_trace, d_tracer);
8029         ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8030
8031         trace_create_file("tracing_thresh", 0644, d_tracer,
8032                         &global_trace, &tracing_thresh_fops);
8033
8034         trace_create_file("README", 0444, d_tracer,
8035                         NULL, &tracing_readme_fops);
8036
8037         trace_create_file("saved_cmdlines", 0444, d_tracer,
8038                         NULL, &tracing_saved_cmdlines_fops);
8039
8040         trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8041                           NULL, &tracing_saved_cmdlines_size_fops);
8042
8043         trace_create_file("saved_tgids", 0444, d_tracer,
8044                         NULL, &tracing_saved_tgids_fops);
8045
8046         trace_eval_init();
8047
8048         trace_create_eval_file(d_tracer);
8049
8050 #ifdef CONFIG_MODULES
8051         register_module_notifier(&trace_module_nb);
8052 #endif
8053
8054 #ifdef CONFIG_DYNAMIC_FTRACE
8055         trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8056                         &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8057 #endif
8058
8059         create_trace_instances(d_tracer);
8060
8061         update_tracer_options(&global_trace);
8062
8063         return 0;
8064 }
8065
8066 static int trace_panic_handler(struct notifier_block *this,
8067                                unsigned long event, void *unused)
8068 {
8069         if (ftrace_dump_on_oops)
8070                 ftrace_dump(ftrace_dump_on_oops);
8071         return NOTIFY_OK;
8072 }
8073
8074 static struct notifier_block trace_panic_notifier = {
8075         .notifier_call  = trace_panic_handler,
8076         .next           = NULL,
8077         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
8078 };
8079
8080 static int trace_die_handler(struct notifier_block *self,
8081                              unsigned long val,
8082                              void *data)
8083 {
8084         switch (val) {
8085         case DIE_OOPS:
8086                 if (ftrace_dump_on_oops)
8087                         ftrace_dump(ftrace_dump_on_oops);
8088                 break;
8089         default:
8090                 break;
8091         }
8092         return NOTIFY_OK;
8093 }
8094
8095 static struct notifier_block trace_die_notifier = {
8096         .notifier_call = trace_die_handler,
8097         .priority = 200
8098 };
8099
8100 /*
8101  * printk is set to max of 1024, we really don't need it that big.
8102  * Nothing should be printing 1000 characters anyway.
8103  */
8104 #define TRACE_MAX_PRINT         1000
8105
8106 /*
8107  * Define here KERN_TRACE so that we have one place to modify
8108  * it if we decide to change what log level the ftrace dump
8109  * should be at.
8110  */
8111 #define KERN_TRACE              KERN_EMERG
8112
8113 void
8114 trace_printk_seq(struct trace_seq *s)
8115 {
8116         /* Probably should print a warning here. */
8117         if (s->seq.len >= TRACE_MAX_PRINT)
8118                 s->seq.len = TRACE_MAX_PRINT;
8119
8120         /*
8121          * More paranoid code. Although the buffer size is set to
8122          * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8123          * an extra layer of protection.
8124          */
8125         if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8126                 s->seq.len = s->seq.size - 1;
8127
8128         /* should be zero ended, but we are paranoid. */
8129         s->buffer[s->seq.len] = 0;
8130
8131         printk(KERN_TRACE "%s", s->buffer);
8132
8133         trace_seq_init(s);
8134 }
8135
8136 void trace_init_global_iter(struct trace_iterator *iter)
8137 {
8138         iter->tr = &global_trace;
8139         iter->trace = iter->tr->current_trace;
8140         iter->cpu_file = RING_BUFFER_ALL_CPUS;
8141         iter->trace_buffer = &global_trace.trace_buffer;
8142
8143         if (iter->trace && iter->trace->open)
8144                 iter->trace->open(iter);
8145
8146         /* Annotate start of buffers if we had overruns */
8147         if (ring_buffer_overruns(iter->trace_buffer->buffer))
8148                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8149
8150         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8151         if (trace_clocks[iter->tr->clock_id].in_ns)
8152                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8153 }
8154
8155 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8156 {
8157         /* use static because iter can be a bit big for the stack */
8158         static struct trace_iterator iter;
8159         static atomic_t dump_running;
8160         struct trace_array *tr = &global_trace;
8161         unsigned int old_userobj;
8162         unsigned long flags;
8163         int cnt = 0, cpu;
8164
8165         /* Only allow one dump user at a time. */
8166         if (atomic_inc_return(&dump_running) != 1) {
8167                 atomic_dec(&dump_running);
8168                 return;
8169         }
8170
8171         /*
8172          * Always turn off tracing when we dump.
8173          * We don't need to show trace output of what happens
8174          * between multiple crashes.
8175          *
8176          * If the user does a sysrq-z, then they can re-enable
8177          * tracing with echo 1 > tracing_on.
8178          */
8179         tracing_off();
8180
8181         local_irq_save(flags);
8182
8183         /* Simulate the iterator */
8184         trace_init_global_iter(&iter);
8185
8186         for_each_tracing_cpu(cpu) {
8187                 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8188         }
8189
8190         old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8191
8192         /* don't look at user memory in panic mode */
8193         tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8194
8195         switch (oops_dump_mode) {
8196         case DUMP_ALL:
8197                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8198                 break;
8199         case DUMP_ORIG:
8200                 iter.cpu_file = raw_smp_processor_id();
8201                 break;
8202         case DUMP_NONE:
8203                 goto out_enable;
8204         default:
8205                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8206                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8207         }
8208
8209         printk(KERN_TRACE "Dumping ftrace buffer:\n");
8210
8211         /* Did function tracer already get disabled? */
8212         if (ftrace_is_dead()) {
8213                 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8214                 printk("#          MAY BE MISSING FUNCTION EVENTS\n");
8215         }
8216
8217         /*
8218          * We need to stop all tracing on all CPUS to read the
8219          * the next buffer. This is a bit expensive, but is
8220          * not done often. We fill all what we can read,
8221          * and then release the locks again.
8222          */
8223
8224         while (!trace_empty(&iter)) {
8225
8226                 if (!cnt)
8227                         printk(KERN_TRACE "---------------------------------\n");
8228
8229                 cnt++;
8230
8231                 /* reset all but tr, trace, and overruns */
8232                 memset(&iter.seq, 0,
8233                        sizeof(struct trace_iterator) -
8234                        offsetof(struct trace_iterator, seq));
8235                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8236                 iter.pos = -1;
8237
8238                 if (trace_find_next_entry_inc(&iter) != NULL) {
8239                         int ret;
8240
8241                         ret = print_trace_line(&iter);
8242                         if (ret != TRACE_TYPE_NO_CONSUME)
8243                                 trace_consume(&iter);
8244                 }
8245                 touch_nmi_watchdog();
8246
8247                 trace_printk_seq(&iter.seq);
8248         }
8249
8250         if (!cnt)
8251                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
8252         else
8253                 printk(KERN_TRACE "---------------------------------\n");
8254
8255  out_enable:
8256         tr->trace_flags |= old_userobj;
8257
8258         for_each_tracing_cpu(cpu) {
8259                 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8260         }
8261         atomic_dec(&dump_running);
8262         local_irq_restore(flags);
8263 }
8264 EXPORT_SYMBOL_GPL(ftrace_dump);
8265
8266 __init static int tracer_alloc_buffers(void)
8267 {
8268         int ring_buf_size;
8269         int ret = -ENOMEM;
8270
8271         /*
8272          * Make sure we don't accidently add more trace options
8273          * than we have bits for.
8274          */
8275         BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
8276
8277         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8278                 goto out;
8279
8280         if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
8281                 goto out_free_buffer_mask;
8282
8283         /* Only allocate trace_printk buffers if a trace_printk exists */
8284         if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
8285                 /* Must be called before global_trace.buffer is allocated */
8286                 trace_printk_init_buffers();
8287
8288         /* To save memory, keep the ring buffer size to its minimum */
8289         if (ring_buffer_expanded)
8290                 ring_buf_size = trace_buf_size;
8291         else
8292                 ring_buf_size = 1;
8293
8294         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
8295         cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
8296
8297         raw_spin_lock_init(&global_trace.start_lock);
8298
8299         /*
8300          * The prepare callbacks allocates some memory for the ring buffer. We
8301          * don't free the buffer if the if the CPU goes down. If we were to free
8302          * the buffer, then the user would lose any trace that was in the
8303          * buffer. The memory will be removed once the "instance" is removed.
8304          */
8305         ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8306                                       "trace/RB:preapre", trace_rb_cpu_prepare,
8307                                       NULL);
8308         if (ret < 0)
8309                 goto out_free_cpumask;
8310         /* Used for event triggers */
8311         ret = -ENOMEM;
8312         temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8313         if (!temp_buffer)
8314                 goto out_rm_hp_state;
8315
8316         if (trace_create_savedcmd() < 0)
8317                 goto out_free_temp_buffer;
8318
8319         /* TODO: make the number of buffers hot pluggable with CPUS */
8320         if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
8321                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8322                 WARN_ON(1);
8323                 goto out_free_savedcmd;
8324         }
8325
8326         if (global_trace.buffer_disabled)
8327                 tracing_off();
8328
8329         if (trace_boot_clock) {
8330                 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8331                 if (ret < 0)
8332                         pr_warn("Trace clock %s not defined, going back to default\n",
8333                                 trace_boot_clock);
8334         }
8335
8336         /*
8337          * register_tracer() might reference current_trace, so it
8338          * needs to be set before we register anything. This is
8339          * just a bootstrap of current_trace anyway.
8340          */
8341         global_trace.current_trace = &nop_trace;
8342
8343         global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8344
8345         ftrace_init_global_array_ops(&global_trace);
8346
8347         init_trace_flags_index(&global_trace);
8348
8349         register_tracer(&nop_trace);
8350
8351         /* Function tracing may start here (via kernel command line) */
8352         init_function_trace();
8353
8354         /* All seems OK, enable tracing */
8355         tracing_disabled = 0;
8356
8357         atomic_notifier_chain_register(&panic_notifier_list,
8358                                        &trace_panic_notifier);
8359
8360         register_die_notifier(&trace_die_notifier);
8361
8362         global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8363
8364         INIT_LIST_HEAD(&global_trace.systems);
8365         INIT_LIST_HEAD(&global_trace.events);
8366         list_add(&global_trace.list, &ftrace_trace_arrays);
8367
8368         apply_trace_boot_options();
8369
8370         register_snapshot_cmd();
8371
8372         return 0;
8373
8374 out_free_savedcmd:
8375         free_saved_cmdlines_buffer(savedcmd);
8376 out_free_temp_buffer:
8377         ring_buffer_free(temp_buffer);
8378 out_rm_hp_state:
8379         cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
8380 out_free_cpumask:
8381         free_cpumask_var(global_trace.tracing_cpumask);
8382 out_free_buffer_mask:
8383         free_cpumask_var(tracing_buffer_mask);
8384 out:
8385         return ret;
8386 }
8387
8388 void __init early_trace_init(void)
8389 {
8390         if (tracepoint_printk) {
8391                 tracepoint_print_iter =
8392                         kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8393                 if (WARN_ON(!tracepoint_print_iter))
8394                         tracepoint_printk = 0;
8395                 else
8396                         static_key_enable(&tracepoint_printk_key.key);
8397         }
8398         tracer_alloc_buffers();
8399 }
8400
8401 void __init trace_init(void)
8402 {
8403         trace_event_init();
8404 }
8405
8406 __init static int clear_boot_tracer(void)
8407 {
8408         /*
8409          * The default tracer at boot buffer is an init section.
8410          * This function is called in lateinit. If we did not
8411          * find the boot tracer, then clear it out, to prevent
8412          * later registration from accessing the buffer that is
8413          * about to be freed.
8414          */
8415         if (!default_bootup_tracer)
8416                 return 0;
8417
8418         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8419                default_bootup_tracer);
8420         default_bootup_tracer = NULL;
8421
8422         return 0;
8423 }
8424
8425 fs_initcall(tracer_init_tracefs);
8426 late_initcall_sync(clear_boot_tracer);
This page took 0.524455 seconds and 4 git commands to generate.