]> Git Repo - linux.git/blob - kernel/trace/trace.c
1e5f80cda39a9b63675de914e1ccd7b46cb17e97
[linux.git] / kernel / trace / trace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2012 Steven Rostedt <[email protected]>
6  * Copyright (C) 2008 Ingo Molnar <[email protected]>
7  *
8  * Originally taken from the RT patch by:
9  *    Arnaldo Carvalho de Melo <[email protected]>
10  *
11  * Based on code from the latency_tracer, that is:
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/panic_notifier.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
44 #include <linux/fs.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
51
52 #include <asm/setup.h> /* COMMAND_LINE_SIZE */
53
54 #include "trace.h"
55 #include "trace_output.h"
56
57 #ifdef CONFIG_FTRACE_STARTUP_TEST
58 /*
59  * We need to change this state when a selftest is running.
60  * A selftest will lurk into the ring-buffer to count the
61  * entries inserted during the selftest although some concurrent
62  * insertions into the ring-buffer such as trace_printk could occurred
63  * at the same time, giving false positive or negative results.
64  */
65 static bool __read_mostly tracing_selftest_running;
66
67 /*
68  * If boot-time tracing including tracers/events via kernel cmdline
69  * is running, we do not want to run SELFTEST.
70  */
71 bool __read_mostly tracing_selftest_disabled;
72
73 void __init disable_tracing_selftest(const char *reason)
74 {
75         if (!tracing_selftest_disabled) {
76                 tracing_selftest_disabled = true;
77                 pr_info("Ftrace startup test is disabled due to %s\n", reason);
78         }
79 }
80 #else
81 #define tracing_selftest_running        0
82 #define tracing_selftest_disabled       0
83 #endif
84
85 /* Pipe tracepoints to printk */
86 static struct trace_iterator *tracepoint_print_iter;
87 int tracepoint_printk;
88 static bool tracepoint_printk_stop_on_boot __initdata;
89 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
90
91 /* For tracers that don't implement custom flags */
92 static struct tracer_opt dummy_tracer_opt[] = {
93         { }
94 };
95
96 static int
97 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
98 {
99         return 0;
100 }
101
102 /*
103  * To prevent the comm cache from being overwritten when no
104  * tracing is active, only save the comm when a trace event
105  * occurred.
106  */
107 DEFINE_PER_CPU(bool, trace_taskinfo_save);
108
109 /*
110  * Kill all tracing for good (never come back).
111  * It is initialized to 1 but will turn to zero if the initialization
112  * of the tracer is successful. But that is the only place that sets
113  * this back to zero.
114  */
115 static int tracing_disabled = 1;
116
117 cpumask_var_t __read_mostly     tracing_buffer_mask;
118
119 /*
120  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
121  *
122  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
123  * is set, then ftrace_dump is called. This will output the contents
124  * of the ftrace buffers to the console.  This is very useful for
125  * capturing traces that lead to crashes and outputing it to a
126  * serial console.
127  *
128  * It is default off, but you can enable it with either specifying
129  * "ftrace_dump_on_oops" in the kernel command line, or setting
130  * /proc/sys/kernel/ftrace_dump_on_oops
131  * Set 1 if you want to dump buffers of all CPUs
132  * Set 2 if you want to dump the buffer of the CPU that triggered oops
133  */
134
135 enum ftrace_dump_mode ftrace_dump_on_oops;
136
137 /* When set, tracing will stop when a WARN*() is hit */
138 int __disable_trace_on_warning;
139
140 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
141 /* Map of enums to their values, for "eval_map" file */
142 struct trace_eval_map_head {
143         struct module                   *mod;
144         unsigned long                   length;
145 };
146
147 union trace_eval_map_item;
148
149 struct trace_eval_map_tail {
150         /*
151          * "end" is first and points to NULL as it must be different
152          * than "mod" or "eval_string"
153          */
154         union trace_eval_map_item       *next;
155         const char                      *end;   /* points to NULL */
156 };
157
158 static DEFINE_MUTEX(trace_eval_mutex);
159
160 /*
161  * The trace_eval_maps are saved in an array with two extra elements,
162  * one at the beginning, and one at the end. The beginning item contains
163  * the count of the saved maps (head.length), and the module they
164  * belong to if not built in (head.mod). The ending item contains a
165  * pointer to the next array of saved eval_map items.
166  */
167 union trace_eval_map_item {
168         struct trace_eval_map           map;
169         struct trace_eval_map_head      head;
170         struct trace_eval_map_tail      tail;
171 };
172
173 static union trace_eval_map_item *trace_eval_maps;
174 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
175
176 int tracing_set_tracer(struct trace_array *tr, const char *buf);
177 static void ftrace_trace_userstack(struct trace_array *tr,
178                                    struct trace_buffer *buffer,
179                                    unsigned int trace_ctx);
180
181 #define MAX_TRACER_SIZE         100
182 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
183 static char *default_bootup_tracer;
184
185 static bool allocate_snapshot;
186 static bool snapshot_at_boot;
187
188 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
189 static int boot_instance_index;
190
191 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
192 static int boot_snapshot_index;
193
194 static int __init set_cmdline_ftrace(char *str)
195 {
196         strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
197         default_bootup_tracer = bootup_tracer_buf;
198         /* We are using ftrace early, expand it */
199         trace_set_ring_buffer_expanded(NULL);
200         return 1;
201 }
202 __setup("ftrace=", set_cmdline_ftrace);
203
204 static int __init set_ftrace_dump_on_oops(char *str)
205 {
206         if (*str++ != '=' || !*str || !strcmp("1", str)) {
207                 ftrace_dump_on_oops = DUMP_ALL;
208                 return 1;
209         }
210
211         if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
212                 ftrace_dump_on_oops = DUMP_ORIG;
213                 return 1;
214         }
215
216         return 0;
217 }
218 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
219
220 static int __init stop_trace_on_warning(char *str)
221 {
222         if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
223                 __disable_trace_on_warning = 1;
224         return 1;
225 }
226 __setup("traceoff_on_warning", stop_trace_on_warning);
227
228 static int __init boot_alloc_snapshot(char *str)
229 {
230         char *slot = boot_snapshot_info + boot_snapshot_index;
231         int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
232         int ret;
233
234         if (str[0] == '=') {
235                 str++;
236                 if (strlen(str) >= left)
237                         return -1;
238
239                 ret = snprintf(slot, left, "%s\t", str);
240                 boot_snapshot_index += ret;
241         } else {
242                 allocate_snapshot = true;
243                 /* We also need the main ring buffer expanded */
244                 trace_set_ring_buffer_expanded(NULL);
245         }
246         return 1;
247 }
248 __setup("alloc_snapshot", boot_alloc_snapshot);
249
250
251 static int __init boot_snapshot(char *str)
252 {
253         snapshot_at_boot = true;
254         boot_alloc_snapshot(str);
255         return 1;
256 }
257 __setup("ftrace_boot_snapshot", boot_snapshot);
258
259
260 static int __init boot_instance(char *str)
261 {
262         char *slot = boot_instance_info + boot_instance_index;
263         int left = sizeof(boot_instance_info) - boot_instance_index;
264         int ret;
265
266         if (strlen(str) >= left)
267                 return -1;
268
269         ret = snprintf(slot, left, "%s\t", str);
270         boot_instance_index += ret;
271
272         return 1;
273 }
274 __setup("trace_instance=", boot_instance);
275
276
277 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
278
279 static int __init set_trace_boot_options(char *str)
280 {
281         strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
282         return 1;
283 }
284 __setup("trace_options=", set_trace_boot_options);
285
286 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
287 static char *trace_boot_clock __initdata;
288
289 static int __init set_trace_boot_clock(char *str)
290 {
291         strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
292         trace_boot_clock = trace_boot_clock_buf;
293         return 1;
294 }
295 __setup("trace_clock=", set_trace_boot_clock);
296
297 static int __init set_tracepoint_printk(char *str)
298 {
299         /* Ignore the "tp_printk_stop_on_boot" param */
300         if (*str == '_')
301                 return 0;
302
303         if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
304                 tracepoint_printk = 1;
305         return 1;
306 }
307 __setup("tp_printk", set_tracepoint_printk);
308
309 static int __init set_tracepoint_printk_stop(char *str)
310 {
311         tracepoint_printk_stop_on_boot = true;
312         return 1;
313 }
314 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
315
316 unsigned long long ns2usecs(u64 nsec)
317 {
318         nsec += 500;
319         do_div(nsec, 1000);
320         return nsec;
321 }
322
323 static void
324 trace_process_export(struct trace_export *export,
325                struct ring_buffer_event *event, int flag)
326 {
327         struct trace_entry *entry;
328         unsigned int size = 0;
329
330         if (export->flags & flag) {
331                 entry = ring_buffer_event_data(event);
332                 size = ring_buffer_event_length(event);
333                 export->write(export, entry, size);
334         }
335 }
336
337 static DEFINE_MUTEX(ftrace_export_lock);
338
339 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
340
341 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
342 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
343 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
344
345 static inline void ftrace_exports_enable(struct trace_export *export)
346 {
347         if (export->flags & TRACE_EXPORT_FUNCTION)
348                 static_branch_inc(&trace_function_exports_enabled);
349
350         if (export->flags & TRACE_EXPORT_EVENT)
351                 static_branch_inc(&trace_event_exports_enabled);
352
353         if (export->flags & TRACE_EXPORT_MARKER)
354                 static_branch_inc(&trace_marker_exports_enabled);
355 }
356
357 static inline void ftrace_exports_disable(struct trace_export *export)
358 {
359         if (export->flags & TRACE_EXPORT_FUNCTION)
360                 static_branch_dec(&trace_function_exports_enabled);
361
362         if (export->flags & TRACE_EXPORT_EVENT)
363                 static_branch_dec(&trace_event_exports_enabled);
364
365         if (export->flags & TRACE_EXPORT_MARKER)
366                 static_branch_dec(&trace_marker_exports_enabled);
367 }
368
369 static void ftrace_exports(struct ring_buffer_event *event, int flag)
370 {
371         struct trace_export *export;
372
373         preempt_disable_notrace();
374
375         export = rcu_dereference_raw_check(ftrace_exports_list);
376         while (export) {
377                 trace_process_export(export, event, flag);
378                 export = rcu_dereference_raw_check(export->next);
379         }
380
381         preempt_enable_notrace();
382 }
383
384 static inline void
385 add_trace_export(struct trace_export **list, struct trace_export *export)
386 {
387         rcu_assign_pointer(export->next, *list);
388         /*
389          * We are entering export into the list but another
390          * CPU might be walking that list. We need to make sure
391          * the export->next pointer is valid before another CPU sees
392          * the export pointer included into the list.
393          */
394         rcu_assign_pointer(*list, export);
395 }
396
397 static inline int
398 rm_trace_export(struct trace_export **list, struct trace_export *export)
399 {
400         struct trace_export **p;
401
402         for (p = list; *p != NULL; p = &(*p)->next)
403                 if (*p == export)
404                         break;
405
406         if (*p != export)
407                 return -1;
408
409         rcu_assign_pointer(*p, (*p)->next);
410
411         return 0;
412 }
413
414 static inline void
415 add_ftrace_export(struct trace_export **list, struct trace_export *export)
416 {
417         ftrace_exports_enable(export);
418
419         add_trace_export(list, export);
420 }
421
422 static inline int
423 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
424 {
425         int ret;
426
427         ret = rm_trace_export(list, export);
428         ftrace_exports_disable(export);
429
430         return ret;
431 }
432
433 int register_ftrace_export(struct trace_export *export)
434 {
435         if (WARN_ON_ONCE(!export->write))
436                 return -1;
437
438         mutex_lock(&ftrace_export_lock);
439
440         add_ftrace_export(&ftrace_exports_list, export);
441
442         mutex_unlock(&ftrace_export_lock);
443
444         return 0;
445 }
446 EXPORT_SYMBOL_GPL(register_ftrace_export);
447
448 int unregister_ftrace_export(struct trace_export *export)
449 {
450         int ret;
451
452         mutex_lock(&ftrace_export_lock);
453
454         ret = rm_ftrace_export(&ftrace_exports_list, export);
455
456         mutex_unlock(&ftrace_export_lock);
457
458         return ret;
459 }
460 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
461
462 /* trace_flags holds trace_options default values */
463 #define TRACE_DEFAULT_FLAGS                                             \
464         (FUNCTION_DEFAULT_FLAGS |                                       \
465          TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |                  \
466          TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |                \
467          TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |                 \
468          TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS |                     \
469          TRACE_ITER_HASH_PTR)
470
471 /* trace_options that are only supported by global_trace */
472 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |                      \
473                TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
474
475 /* trace_flags that are default zero for instances */
476 #define ZEROED_TRACE_FLAGS \
477         (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
478
479 /*
480  * The global_trace is the descriptor that holds the top-level tracing
481  * buffers for the live tracing.
482  */
483 static struct trace_array global_trace = {
484         .trace_flags = TRACE_DEFAULT_FLAGS,
485 };
486
487 void trace_set_ring_buffer_expanded(struct trace_array *tr)
488 {
489         if (!tr)
490                 tr = &global_trace;
491         tr->ring_buffer_expanded = true;
492 }
493
494 LIST_HEAD(ftrace_trace_arrays);
495
496 int trace_array_get(struct trace_array *this_tr)
497 {
498         struct trace_array *tr;
499         int ret = -ENODEV;
500
501         mutex_lock(&trace_types_lock);
502         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
503                 if (tr == this_tr) {
504                         tr->ref++;
505                         ret = 0;
506                         break;
507                 }
508         }
509         mutex_unlock(&trace_types_lock);
510
511         return ret;
512 }
513
514 static void __trace_array_put(struct trace_array *this_tr)
515 {
516         WARN_ON(!this_tr->ref);
517         this_tr->ref--;
518 }
519
520 /**
521  * trace_array_put - Decrement the reference counter for this trace array.
522  * @this_tr : pointer to the trace array
523  *
524  * NOTE: Use this when we no longer need the trace array returned by
525  * trace_array_get_by_name(). This ensures the trace array can be later
526  * destroyed.
527  *
528  */
529 void trace_array_put(struct trace_array *this_tr)
530 {
531         if (!this_tr)
532                 return;
533
534         mutex_lock(&trace_types_lock);
535         __trace_array_put(this_tr);
536         mutex_unlock(&trace_types_lock);
537 }
538 EXPORT_SYMBOL_GPL(trace_array_put);
539
540 int tracing_check_open_get_tr(struct trace_array *tr)
541 {
542         int ret;
543
544         ret = security_locked_down(LOCKDOWN_TRACEFS);
545         if (ret)
546                 return ret;
547
548         if (tracing_disabled)
549                 return -ENODEV;
550
551         if (tr && trace_array_get(tr) < 0)
552                 return -ENODEV;
553
554         return 0;
555 }
556
557 int call_filter_check_discard(struct trace_event_call *call, void *rec,
558                               struct trace_buffer *buffer,
559                               struct ring_buffer_event *event)
560 {
561         if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
562             !filter_match_preds(call->filter, rec)) {
563                 __trace_event_discard_commit(buffer, event);
564                 return 1;
565         }
566
567         return 0;
568 }
569
570 /**
571  * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
572  * @filtered_pids: The list of pids to check
573  * @search_pid: The PID to find in @filtered_pids
574  *
575  * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
576  */
577 bool
578 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
579 {
580         return trace_pid_list_is_set(filtered_pids, search_pid);
581 }
582
583 /**
584  * trace_ignore_this_task - should a task be ignored for tracing
585  * @filtered_pids: The list of pids to check
586  * @filtered_no_pids: The list of pids not to be traced
587  * @task: The task that should be ignored if not filtered
588  *
589  * Checks if @task should be traced or not from @filtered_pids.
590  * Returns true if @task should *NOT* be traced.
591  * Returns false if @task should be traced.
592  */
593 bool
594 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
595                        struct trace_pid_list *filtered_no_pids,
596                        struct task_struct *task)
597 {
598         /*
599          * If filtered_no_pids is not empty, and the task's pid is listed
600          * in filtered_no_pids, then return true.
601          * Otherwise, if filtered_pids is empty, that means we can
602          * trace all tasks. If it has content, then only trace pids
603          * within filtered_pids.
604          */
605
606         return (filtered_pids &&
607                 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
608                 (filtered_no_pids &&
609                  trace_find_filtered_pid(filtered_no_pids, task->pid));
610 }
611
612 /**
613  * trace_filter_add_remove_task - Add or remove a task from a pid_list
614  * @pid_list: The list to modify
615  * @self: The current task for fork or NULL for exit
616  * @task: The task to add or remove
617  *
618  * If adding a task, if @self is defined, the task is only added if @self
619  * is also included in @pid_list. This happens on fork and tasks should
620  * only be added when the parent is listed. If @self is NULL, then the
621  * @task pid will be removed from the list, which would happen on exit
622  * of a task.
623  */
624 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
625                                   struct task_struct *self,
626                                   struct task_struct *task)
627 {
628         if (!pid_list)
629                 return;
630
631         /* For forks, we only add if the forking task is listed */
632         if (self) {
633                 if (!trace_find_filtered_pid(pid_list, self->pid))
634                         return;
635         }
636
637         /* "self" is set for forks, and NULL for exits */
638         if (self)
639                 trace_pid_list_set(pid_list, task->pid);
640         else
641                 trace_pid_list_clear(pid_list, task->pid);
642 }
643
644 /**
645  * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
646  * @pid_list: The pid list to show
647  * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
648  * @pos: The position of the file
649  *
650  * This is used by the seq_file "next" operation to iterate the pids
651  * listed in a trace_pid_list structure.
652  *
653  * Returns the pid+1 as we want to display pid of zero, but NULL would
654  * stop the iteration.
655  */
656 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
657 {
658         long pid = (unsigned long)v;
659         unsigned int next;
660
661         (*pos)++;
662
663         /* pid already is +1 of the actual previous bit */
664         if (trace_pid_list_next(pid_list, pid, &next) < 0)
665                 return NULL;
666
667         pid = next;
668
669         /* Return pid + 1 to allow zero to be represented */
670         return (void *)(pid + 1);
671 }
672
673 /**
674  * trace_pid_start - Used for seq_file to start reading pid lists
675  * @pid_list: The pid list to show
676  * @pos: The position of the file
677  *
678  * This is used by seq_file "start" operation to start the iteration
679  * of listing pids.
680  *
681  * Returns the pid+1 as we want to display pid of zero, but NULL would
682  * stop the iteration.
683  */
684 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
685 {
686         unsigned long pid;
687         unsigned int first;
688         loff_t l = 0;
689
690         if (trace_pid_list_first(pid_list, &first) < 0)
691                 return NULL;
692
693         pid = first;
694
695         /* Return pid + 1 so that zero can be the exit value */
696         for (pid++; pid && l < *pos;
697              pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
698                 ;
699         return (void *)pid;
700 }
701
702 /**
703  * trace_pid_show - show the current pid in seq_file processing
704  * @m: The seq_file structure to write into
705  * @v: A void pointer of the pid (+1) value to display
706  *
707  * Can be directly used by seq_file operations to display the current
708  * pid value.
709  */
710 int trace_pid_show(struct seq_file *m, void *v)
711 {
712         unsigned long pid = (unsigned long)v - 1;
713
714         seq_printf(m, "%lu\n", pid);
715         return 0;
716 }
717
718 /* 128 should be much more than enough */
719 #define PID_BUF_SIZE            127
720
721 int trace_pid_write(struct trace_pid_list *filtered_pids,
722                     struct trace_pid_list **new_pid_list,
723                     const char __user *ubuf, size_t cnt)
724 {
725         struct trace_pid_list *pid_list;
726         struct trace_parser parser;
727         unsigned long val;
728         int nr_pids = 0;
729         ssize_t read = 0;
730         ssize_t ret;
731         loff_t pos;
732         pid_t pid;
733
734         if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
735                 return -ENOMEM;
736
737         /*
738          * Always recreate a new array. The write is an all or nothing
739          * operation. Always create a new array when adding new pids by
740          * the user. If the operation fails, then the current list is
741          * not modified.
742          */
743         pid_list = trace_pid_list_alloc();
744         if (!pid_list) {
745                 trace_parser_put(&parser);
746                 return -ENOMEM;
747         }
748
749         if (filtered_pids) {
750                 /* copy the current bits to the new max */
751                 ret = trace_pid_list_first(filtered_pids, &pid);
752                 while (!ret) {
753                         trace_pid_list_set(pid_list, pid);
754                         ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
755                         nr_pids++;
756                 }
757         }
758
759         ret = 0;
760         while (cnt > 0) {
761
762                 pos = 0;
763
764                 ret = trace_get_user(&parser, ubuf, cnt, &pos);
765                 if (ret < 0)
766                         break;
767
768                 read += ret;
769                 ubuf += ret;
770                 cnt -= ret;
771
772                 if (!trace_parser_loaded(&parser))
773                         break;
774
775                 ret = -EINVAL;
776                 if (kstrtoul(parser.buffer, 0, &val))
777                         break;
778
779                 pid = (pid_t)val;
780
781                 if (trace_pid_list_set(pid_list, pid) < 0) {
782                         ret = -1;
783                         break;
784                 }
785                 nr_pids++;
786
787                 trace_parser_clear(&parser);
788                 ret = 0;
789         }
790         trace_parser_put(&parser);
791
792         if (ret < 0) {
793                 trace_pid_list_free(pid_list);
794                 return ret;
795         }
796
797         if (!nr_pids) {
798                 /* Cleared the list of pids */
799                 trace_pid_list_free(pid_list);
800                 pid_list = NULL;
801         }
802
803         *new_pid_list = pid_list;
804
805         return read;
806 }
807
808 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
809 {
810         u64 ts;
811
812         /* Early boot up does not have a buffer yet */
813         if (!buf->buffer)
814                 return trace_clock_local();
815
816         ts = ring_buffer_time_stamp(buf->buffer);
817         ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
818
819         return ts;
820 }
821
822 u64 ftrace_now(int cpu)
823 {
824         return buffer_ftrace_now(&global_trace.array_buffer, cpu);
825 }
826
827 /**
828  * tracing_is_enabled - Show if global_trace has been enabled
829  *
830  * Shows if the global trace has been enabled or not. It uses the
831  * mirror flag "buffer_disabled" to be used in fast paths such as for
832  * the irqsoff tracer. But it may be inaccurate due to races. If you
833  * need to know the accurate state, use tracing_is_on() which is a little
834  * slower, but accurate.
835  */
836 int tracing_is_enabled(void)
837 {
838         /*
839          * For quick access (irqsoff uses this in fast path), just
840          * return the mirror variable of the state of the ring buffer.
841          * It's a little racy, but we don't really care.
842          */
843         smp_rmb();
844         return !global_trace.buffer_disabled;
845 }
846
847 /*
848  * trace_buf_size is the size in bytes that is allocated
849  * for a buffer. Note, the number of bytes is always rounded
850  * to page size.
851  *
852  * This number is purposely set to a low number of 16384.
853  * If the dump on oops happens, it will be much appreciated
854  * to not have to wait for all that output. Anyway this can be
855  * boot time and run time configurable.
856  */
857 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
858
859 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
860
861 /* trace_types holds a link list of available tracers. */
862 static struct tracer            *trace_types __read_mostly;
863
864 /*
865  * trace_types_lock is used to protect the trace_types list.
866  */
867 DEFINE_MUTEX(trace_types_lock);
868
869 /*
870  * serialize the access of the ring buffer
871  *
872  * ring buffer serializes readers, but it is low level protection.
873  * The validity of the events (which returns by ring_buffer_peek() ..etc)
874  * are not protected by ring buffer.
875  *
876  * The content of events may become garbage if we allow other process consumes
877  * these events concurrently:
878  *   A) the page of the consumed events may become a normal page
879  *      (not reader page) in ring buffer, and this page will be rewritten
880  *      by events producer.
881  *   B) The page of the consumed events may become a page for splice_read,
882  *      and this page will be returned to system.
883  *
884  * These primitives allow multi process access to different cpu ring buffer
885  * concurrently.
886  *
887  * These primitives don't distinguish read-only and read-consume access.
888  * Multi read-only access are also serialized.
889  */
890
891 #ifdef CONFIG_SMP
892 static DECLARE_RWSEM(all_cpu_access_lock);
893 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
894
895 static inline void trace_access_lock(int cpu)
896 {
897         if (cpu == RING_BUFFER_ALL_CPUS) {
898                 /* gain it for accessing the whole ring buffer. */
899                 down_write(&all_cpu_access_lock);
900         } else {
901                 /* gain it for accessing a cpu ring buffer. */
902
903                 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
904                 down_read(&all_cpu_access_lock);
905
906                 /* Secondly block other access to this @cpu ring buffer. */
907                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
908         }
909 }
910
911 static inline void trace_access_unlock(int cpu)
912 {
913         if (cpu == RING_BUFFER_ALL_CPUS) {
914                 up_write(&all_cpu_access_lock);
915         } else {
916                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
917                 up_read(&all_cpu_access_lock);
918         }
919 }
920
921 static inline void trace_access_lock_init(void)
922 {
923         int cpu;
924
925         for_each_possible_cpu(cpu)
926                 mutex_init(&per_cpu(cpu_access_lock, cpu));
927 }
928
929 #else
930
931 static DEFINE_MUTEX(access_lock);
932
933 static inline void trace_access_lock(int cpu)
934 {
935         (void)cpu;
936         mutex_lock(&access_lock);
937 }
938
939 static inline void trace_access_unlock(int cpu)
940 {
941         (void)cpu;
942         mutex_unlock(&access_lock);
943 }
944
945 static inline void trace_access_lock_init(void)
946 {
947 }
948
949 #endif
950
951 #ifdef CONFIG_STACKTRACE
952 static void __ftrace_trace_stack(struct trace_buffer *buffer,
953                                  unsigned int trace_ctx,
954                                  int skip, struct pt_regs *regs);
955 static inline void ftrace_trace_stack(struct trace_array *tr,
956                                       struct trace_buffer *buffer,
957                                       unsigned int trace_ctx,
958                                       int skip, struct pt_regs *regs);
959
960 #else
961 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
962                                         unsigned int trace_ctx,
963                                         int skip, struct pt_regs *regs)
964 {
965 }
966 static inline void ftrace_trace_stack(struct trace_array *tr,
967                                       struct trace_buffer *buffer,
968                                       unsigned long trace_ctx,
969                                       int skip, struct pt_regs *regs)
970 {
971 }
972
973 #endif
974
975 static __always_inline void
976 trace_event_setup(struct ring_buffer_event *event,
977                   int type, unsigned int trace_ctx)
978 {
979         struct trace_entry *ent = ring_buffer_event_data(event);
980
981         tracing_generic_entry_update(ent, type, trace_ctx);
982 }
983
984 static __always_inline struct ring_buffer_event *
985 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
986                           int type,
987                           unsigned long len,
988                           unsigned int trace_ctx)
989 {
990         struct ring_buffer_event *event;
991
992         event = ring_buffer_lock_reserve(buffer, len);
993         if (event != NULL)
994                 trace_event_setup(event, type, trace_ctx);
995
996         return event;
997 }
998
999 void tracer_tracing_on(struct trace_array *tr)
1000 {
1001         if (tr->array_buffer.buffer)
1002                 ring_buffer_record_on(tr->array_buffer.buffer);
1003         /*
1004          * This flag is looked at when buffers haven't been allocated
1005          * yet, or by some tracers (like irqsoff), that just want to
1006          * know if the ring buffer has been disabled, but it can handle
1007          * races of where it gets disabled but we still do a record.
1008          * As the check is in the fast path of the tracers, it is more
1009          * important to be fast than accurate.
1010          */
1011         tr->buffer_disabled = 0;
1012         /* Make the flag seen by readers */
1013         smp_wmb();
1014 }
1015
1016 /**
1017  * tracing_on - enable tracing buffers
1018  *
1019  * This function enables tracing buffers that may have been
1020  * disabled with tracing_off.
1021  */
1022 void tracing_on(void)
1023 {
1024         tracer_tracing_on(&global_trace);
1025 }
1026 EXPORT_SYMBOL_GPL(tracing_on);
1027
1028
1029 static __always_inline void
1030 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1031 {
1032         __this_cpu_write(trace_taskinfo_save, true);
1033
1034         /* If this is the temp buffer, we need to commit fully */
1035         if (this_cpu_read(trace_buffered_event) == event) {
1036                 /* Length is in event->array[0] */
1037                 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1038                 /* Release the temp buffer */
1039                 this_cpu_dec(trace_buffered_event_cnt);
1040                 /* ring_buffer_unlock_commit() enables preemption */
1041                 preempt_enable_notrace();
1042         } else
1043                 ring_buffer_unlock_commit(buffer);
1044 }
1045
1046 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1047                        const char *str, int size)
1048 {
1049         struct ring_buffer_event *event;
1050         struct trace_buffer *buffer;
1051         struct print_entry *entry;
1052         unsigned int trace_ctx;
1053         int alloc;
1054
1055         if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1056                 return 0;
1057
1058         if (unlikely(tracing_selftest_running && tr == &global_trace))
1059                 return 0;
1060
1061         if (unlikely(tracing_disabled))
1062                 return 0;
1063
1064         alloc = sizeof(*entry) + size + 2; /* possible \n added */
1065
1066         trace_ctx = tracing_gen_ctx();
1067         buffer = tr->array_buffer.buffer;
1068         ring_buffer_nest_start(buffer);
1069         event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1070                                             trace_ctx);
1071         if (!event) {
1072                 size = 0;
1073                 goto out;
1074         }
1075
1076         entry = ring_buffer_event_data(event);
1077         entry->ip = ip;
1078
1079         memcpy(&entry->buf, str, size);
1080
1081         /* Add a newline if necessary */
1082         if (entry->buf[size - 1] != '\n') {
1083                 entry->buf[size] = '\n';
1084                 entry->buf[size + 1] = '\0';
1085         } else
1086                 entry->buf[size] = '\0';
1087
1088         __buffer_unlock_commit(buffer, event);
1089         ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1090  out:
1091         ring_buffer_nest_end(buffer);
1092         return size;
1093 }
1094 EXPORT_SYMBOL_GPL(__trace_array_puts);
1095
1096 /**
1097  * __trace_puts - write a constant string into the trace buffer.
1098  * @ip:    The address of the caller
1099  * @str:   The constant string to write
1100  * @size:  The size of the string.
1101  */
1102 int __trace_puts(unsigned long ip, const char *str, int size)
1103 {
1104         return __trace_array_puts(&global_trace, ip, str, size);
1105 }
1106 EXPORT_SYMBOL_GPL(__trace_puts);
1107
1108 /**
1109  * __trace_bputs - write the pointer to a constant string into trace buffer
1110  * @ip:    The address of the caller
1111  * @str:   The constant string to write to the buffer to
1112  */
1113 int __trace_bputs(unsigned long ip, const char *str)
1114 {
1115         struct ring_buffer_event *event;
1116         struct trace_buffer *buffer;
1117         struct bputs_entry *entry;
1118         unsigned int trace_ctx;
1119         int size = sizeof(struct bputs_entry);
1120         int ret = 0;
1121
1122         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1123                 return 0;
1124
1125         if (unlikely(tracing_selftest_running || tracing_disabled))
1126                 return 0;
1127
1128         trace_ctx = tracing_gen_ctx();
1129         buffer = global_trace.array_buffer.buffer;
1130
1131         ring_buffer_nest_start(buffer);
1132         event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1133                                             trace_ctx);
1134         if (!event)
1135                 goto out;
1136
1137         entry = ring_buffer_event_data(event);
1138         entry->ip                       = ip;
1139         entry->str                      = str;
1140
1141         __buffer_unlock_commit(buffer, event);
1142         ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1143
1144         ret = 1;
1145  out:
1146         ring_buffer_nest_end(buffer);
1147         return ret;
1148 }
1149 EXPORT_SYMBOL_GPL(__trace_bputs);
1150
1151 #ifdef CONFIG_TRACER_SNAPSHOT
1152 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1153                                            void *cond_data)
1154 {
1155         struct tracer *tracer = tr->current_trace;
1156         unsigned long flags;
1157
1158         if (in_nmi()) {
1159                 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1160                 trace_array_puts(tr, "*** snapshot is being ignored        ***\n");
1161                 return;
1162         }
1163
1164         if (!tr->allocated_snapshot) {
1165                 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1166                 trace_array_puts(tr, "*** stopping trace here!   ***\n");
1167                 tracer_tracing_off(tr);
1168                 return;
1169         }
1170
1171         /* Note, snapshot can not be used when the tracer uses it */
1172         if (tracer->use_max_tr) {
1173                 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1174                 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1175                 return;
1176         }
1177
1178         local_irq_save(flags);
1179         update_max_tr(tr, current, smp_processor_id(), cond_data);
1180         local_irq_restore(flags);
1181 }
1182
1183 void tracing_snapshot_instance(struct trace_array *tr)
1184 {
1185         tracing_snapshot_instance_cond(tr, NULL);
1186 }
1187
1188 /**
1189  * tracing_snapshot - take a snapshot of the current buffer.
1190  *
1191  * This causes a swap between the snapshot buffer and the current live
1192  * tracing buffer. You can use this to take snapshots of the live
1193  * trace when some condition is triggered, but continue to trace.
1194  *
1195  * Note, make sure to allocate the snapshot with either
1196  * a tracing_snapshot_alloc(), or by doing it manually
1197  * with: echo 1 > /sys/kernel/tracing/snapshot
1198  *
1199  * If the snapshot buffer is not allocated, it will stop tracing.
1200  * Basically making a permanent snapshot.
1201  */
1202 void tracing_snapshot(void)
1203 {
1204         struct trace_array *tr = &global_trace;
1205
1206         tracing_snapshot_instance(tr);
1207 }
1208 EXPORT_SYMBOL_GPL(tracing_snapshot);
1209
1210 /**
1211  * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1212  * @tr:         The tracing instance to snapshot
1213  * @cond_data:  The data to be tested conditionally, and possibly saved
1214  *
1215  * This is the same as tracing_snapshot() except that the snapshot is
1216  * conditional - the snapshot will only happen if the
1217  * cond_snapshot.update() implementation receiving the cond_data
1218  * returns true, which means that the trace array's cond_snapshot
1219  * update() operation used the cond_data to determine whether the
1220  * snapshot should be taken, and if it was, presumably saved it along
1221  * with the snapshot.
1222  */
1223 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1224 {
1225         tracing_snapshot_instance_cond(tr, cond_data);
1226 }
1227 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1228
1229 /**
1230  * tracing_cond_snapshot_data - get the user data associated with a snapshot
1231  * @tr:         The tracing instance
1232  *
1233  * When the user enables a conditional snapshot using
1234  * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1235  * with the snapshot.  This accessor is used to retrieve it.
1236  *
1237  * Should not be called from cond_snapshot.update(), since it takes
1238  * the tr->max_lock lock, which the code calling
1239  * cond_snapshot.update() has already done.
1240  *
1241  * Returns the cond_data associated with the trace array's snapshot.
1242  */
1243 void *tracing_cond_snapshot_data(struct trace_array *tr)
1244 {
1245         void *cond_data = NULL;
1246
1247         local_irq_disable();
1248         arch_spin_lock(&tr->max_lock);
1249
1250         if (tr->cond_snapshot)
1251                 cond_data = tr->cond_snapshot->cond_data;
1252
1253         arch_spin_unlock(&tr->max_lock);
1254         local_irq_enable();
1255
1256         return cond_data;
1257 }
1258 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1259
1260 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1261                                         struct array_buffer *size_buf, int cpu_id);
1262 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1263
1264 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1265 {
1266         int order;
1267         int ret;
1268
1269         if (!tr->allocated_snapshot) {
1270
1271                 /* Make the snapshot buffer have the same order as main buffer */
1272                 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
1273                 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
1274                 if (ret < 0)
1275                         return ret;
1276
1277                 /* allocate spare buffer */
1278                 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1279                                    &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1280                 if (ret < 0)
1281                         return ret;
1282
1283                 tr->allocated_snapshot = true;
1284         }
1285
1286         return 0;
1287 }
1288
1289 static void free_snapshot(struct trace_array *tr)
1290 {
1291         /*
1292          * We don't free the ring buffer. instead, resize it because
1293          * The max_tr ring buffer has some state (e.g. ring->clock) and
1294          * we want preserve it.
1295          */
1296         ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
1297         ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1298         set_buffer_entries(&tr->max_buffer, 1);
1299         tracing_reset_online_cpus(&tr->max_buffer);
1300         tr->allocated_snapshot = false;
1301 }
1302
1303 /**
1304  * tracing_alloc_snapshot - allocate snapshot buffer.
1305  *
1306  * This only allocates the snapshot buffer if it isn't already
1307  * allocated - it doesn't also take a snapshot.
1308  *
1309  * This is meant to be used in cases where the snapshot buffer needs
1310  * to be set up for events that can't sleep but need to be able to
1311  * trigger a snapshot.
1312  */
1313 int tracing_alloc_snapshot(void)
1314 {
1315         struct trace_array *tr = &global_trace;
1316         int ret;
1317
1318         ret = tracing_alloc_snapshot_instance(tr);
1319         WARN_ON(ret < 0);
1320
1321         return ret;
1322 }
1323 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1324
1325 /**
1326  * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1327  *
1328  * This is similar to tracing_snapshot(), but it will allocate the
1329  * snapshot buffer if it isn't already allocated. Use this only
1330  * where it is safe to sleep, as the allocation may sleep.
1331  *
1332  * This causes a swap between the snapshot buffer and the current live
1333  * tracing buffer. You can use this to take snapshots of the live
1334  * trace when some condition is triggered, but continue to trace.
1335  */
1336 void tracing_snapshot_alloc(void)
1337 {
1338         int ret;
1339
1340         ret = tracing_alloc_snapshot();
1341         if (ret < 0)
1342                 return;
1343
1344         tracing_snapshot();
1345 }
1346 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1347
1348 /**
1349  * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1350  * @tr:         The tracing instance
1351  * @cond_data:  User data to associate with the snapshot
1352  * @update:     Implementation of the cond_snapshot update function
1353  *
1354  * Check whether the conditional snapshot for the given instance has
1355  * already been enabled, or if the current tracer is already using a
1356  * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1357  * save the cond_data and update function inside.
1358  *
1359  * Returns 0 if successful, error otherwise.
1360  */
1361 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1362                                  cond_update_fn_t update)
1363 {
1364         struct cond_snapshot *cond_snapshot;
1365         int ret = 0;
1366
1367         cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1368         if (!cond_snapshot)
1369                 return -ENOMEM;
1370
1371         cond_snapshot->cond_data = cond_data;
1372         cond_snapshot->update = update;
1373
1374         mutex_lock(&trace_types_lock);
1375
1376         ret = tracing_alloc_snapshot_instance(tr);
1377         if (ret)
1378                 goto fail_unlock;
1379
1380         if (tr->current_trace->use_max_tr) {
1381                 ret = -EBUSY;
1382                 goto fail_unlock;
1383         }
1384
1385         /*
1386          * The cond_snapshot can only change to NULL without the
1387          * trace_types_lock. We don't care if we race with it going
1388          * to NULL, but we want to make sure that it's not set to
1389          * something other than NULL when we get here, which we can
1390          * do safely with only holding the trace_types_lock and not
1391          * having to take the max_lock.
1392          */
1393         if (tr->cond_snapshot) {
1394                 ret = -EBUSY;
1395                 goto fail_unlock;
1396         }
1397
1398         local_irq_disable();
1399         arch_spin_lock(&tr->max_lock);
1400         tr->cond_snapshot = cond_snapshot;
1401         arch_spin_unlock(&tr->max_lock);
1402         local_irq_enable();
1403
1404         mutex_unlock(&trace_types_lock);
1405
1406         return ret;
1407
1408  fail_unlock:
1409         mutex_unlock(&trace_types_lock);
1410         kfree(cond_snapshot);
1411         return ret;
1412 }
1413 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1414
1415 /**
1416  * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1417  * @tr:         The tracing instance
1418  *
1419  * Check whether the conditional snapshot for the given instance is
1420  * enabled; if so, free the cond_snapshot associated with it,
1421  * otherwise return -EINVAL.
1422  *
1423  * Returns 0 if successful, error otherwise.
1424  */
1425 int tracing_snapshot_cond_disable(struct trace_array *tr)
1426 {
1427         int ret = 0;
1428
1429         local_irq_disable();
1430         arch_spin_lock(&tr->max_lock);
1431
1432         if (!tr->cond_snapshot)
1433                 ret = -EINVAL;
1434         else {
1435                 kfree(tr->cond_snapshot);
1436                 tr->cond_snapshot = NULL;
1437         }
1438
1439         arch_spin_unlock(&tr->max_lock);
1440         local_irq_enable();
1441
1442         return ret;
1443 }
1444 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1445 #else
1446 void tracing_snapshot(void)
1447 {
1448         WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1449 }
1450 EXPORT_SYMBOL_GPL(tracing_snapshot);
1451 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1452 {
1453         WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1454 }
1455 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1456 int tracing_alloc_snapshot(void)
1457 {
1458         WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1459         return -ENODEV;
1460 }
1461 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1462 void tracing_snapshot_alloc(void)
1463 {
1464         /* Give warning */
1465         tracing_snapshot();
1466 }
1467 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1468 void *tracing_cond_snapshot_data(struct trace_array *tr)
1469 {
1470         return NULL;
1471 }
1472 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1473 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1474 {
1475         return -ENODEV;
1476 }
1477 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1478 int tracing_snapshot_cond_disable(struct trace_array *tr)
1479 {
1480         return false;
1481 }
1482 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1483 #define free_snapshot(tr)       do { } while (0)
1484 #endif /* CONFIG_TRACER_SNAPSHOT */
1485
1486 void tracer_tracing_off(struct trace_array *tr)
1487 {
1488         if (tr->array_buffer.buffer)
1489                 ring_buffer_record_off(tr->array_buffer.buffer);
1490         /*
1491          * This flag is looked at when buffers haven't been allocated
1492          * yet, or by some tracers (like irqsoff), that just want to
1493          * know if the ring buffer has been disabled, but it can handle
1494          * races of where it gets disabled but we still do a record.
1495          * As the check is in the fast path of the tracers, it is more
1496          * important to be fast than accurate.
1497          */
1498         tr->buffer_disabled = 1;
1499         /* Make the flag seen by readers */
1500         smp_wmb();
1501 }
1502
1503 /**
1504  * tracing_off - turn off tracing buffers
1505  *
1506  * This function stops the tracing buffers from recording data.
1507  * It does not disable any overhead the tracers themselves may
1508  * be causing. This function simply causes all recording to
1509  * the ring buffers to fail.
1510  */
1511 void tracing_off(void)
1512 {
1513         tracer_tracing_off(&global_trace);
1514 }
1515 EXPORT_SYMBOL_GPL(tracing_off);
1516
1517 void disable_trace_on_warning(void)
1518 {
1519         if (__disable_trace_on_warning) {
1520                 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1521                         "Disabling tracing due to warning\n");
1522                 tracing_off();
1523         }
1524 }
1525
1526 /**
1527  * tracer_tracing_is_on - show real state of ring buffer enabled
1528  * @tr : the trace array to know if ring buffer is enabled
1529  *
1530  * Shows real state of the ring buffer if it is enabled or not.
1531  */
1532 bool tracer_tracing_is_on(struct trace_array *tr)
1533 {
1534         if (tr->array_buffer.buffer)
1535                 return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
1536         return !tr->buffer_disabled;
1537 }
1538
1539 /**
1540  * tracing_is_on - show state of ring buffers enabled
1541  */
1542 int tracing_is_on(void)
1543 {
1544         return tracer_tracing_is_on(&global_trace);
1545 }
1546 EXPORT_SYMBOL_GPL(tracing_is_on);
1547
1548 static int __init set_buf_size(char *str)
1549 {
1550         unsigned long buf_size;
1551
1552         if (!str)
1553                 return 0;
1554         buf_size = memparse(str, &str);
1555         /*
1556          * nr_entries can not be zero and the startup
1557          * tests require some buffer space. Therefore
1558          * ensure we have at least 4096 bytes of buffer.
1559          */
1560         trace_buf_size = max(4096UL, buf_size);
1561         return 1;
1562 }
1563 __setup("trace_buf_size=", set_buf_size);
1564
1565 static int __init set_tracing_thresh(char *str)
1566 {
1567         unsigned long threshold;
1568         int ret;
1569
1570         if (!str)
1571                 return 0;
1572         ret = kstrtoul(str, 0, &threshold);
1573         if (ret < 0)
1574                 return 0;
1575         tracing_thresh = threshold * 1000;
1576         return 1;
1577 }
1578 __setup("tracing_thresh=", set_tracing_thresh);
1579
1580 unsigned long nsecs_to_usecs(unsigned long nsecs)
1581 {
1582         return nsecs / 1000;
1583 }
1584
1585 /*
1586  * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1587  * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1588  * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1589  * of strings in the order that the evals (enum) were defined.
1590  */
1591 #undef C
1592 #define C(a, b) b
1593
1594 /* These must match the bit positions in trace_iterator_flags */
1595 static const char *trace_options[] = {
1596         TRACE_FLAGS
1597         NULL
1598 };
1599
1600 static struct {
1601         u64 (*func)(void);
1602         const char *name;
1603         int in_ns;              /* is this clock in nanoseconds? */
1604 } trace_clocks[] = {
1605         { trace_clock_local,            "local",        1 },
1606         { trace_clock_global,           "global",       1 },
1607         { trace_clock_counter,          "counter",      0 },
1608         { trace_clock_jiffies,          "uptime",       0 },
1609         { trace_clock,                  "perf",         1 },
1610         { ktime_get_mono_fast_ns,       "mono",         1 },
1611         { ktime_get_raw_fast_ns,        "mono_raw",     1 },
1612         { ktime_get_boot_fast_ns,       "boot",         1 },
1613         { ktime_get_tai_fast_ns,        "tai",          1 },
1614         ARCH_TRACE_CLOCKS
1615 };
1616
1617 bool trace_clock_in_ns(struct trace_array *tr)
1618 {
1619         if (trace_clocks[tr->clock_id].in_ns)
1620                 return true;
1621
1622         return false;
1623 }
1624
1625 /*
1626  * trace_parser_get_init - gets the buffer for trace parser
1627  */
1628 int trace_parser_get_init(struct trace_parser *parser, int size)
1629 {
1630         memset(parser, 0, sizeof(*parser));
1631
1632         parser->buffer = kmalloc(size, GFP_KERNEL);
1633         if (!parser->buffer)
1634                 return 1;
1635
1636         parser->size = size;
1637         return 0;
1638 }
1639
1640 /*
1641  * trace_parser_put - frees the buffer for trace parser
1642  */
1643 void trace_parser_put(struct trace_parser *parser)
1644 {
1645         kfree(parser->buffer);
1646         parser->buffer = NULL;
1647 }
1648
1649 /*
1650  * trace_get_user - reads the user input string separated by  space
1651  * (matched by isspace(ch))
1652  *
1653  * For each string found the 'struct trace_parser' is updated,
1654  * and the function returns.
1655  *
1656  * Returns number of bytes read.
1657  *
1658  * See kernel/trace/trace.h for 'struct trace_parser' details.
1659  */
1660 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1661         size_t cnt, loff_t *ppos)
1662 {
1663         char ch;
1664         size_t read = 0;
1665         ssize_t ret;
1666
1667         if (!*ppos)
1668                 trace_parser_clear(parser);
1669
1670         ret = get_user(ch, ubuf++);
1671         if (ret)
1672                 goto out;
1673
1674         read++;
1675         cnt--;
1676
1677         /*
1678          * The parser is not finished with the last write,
1679          * continue reading the user input without skipping spaces.
1680          */
1681         if (!parser->cont) {
1682                 /* skip white space */
1683                 while (cnt && isspace(ch)) {
1684                         ret = get_user(ch, ubuf++);
1685                         if (ret)
1686                                 goto out;
1687                         read++;
1688                         cnt--;
1689                 }
1690
1691                 parser->idx = 0;
1692
1693                 /* only spaces were written */
1694                 if (isspace(ch) || !ch) {
1695                         *ppos += read;
1696                         ret = read;
1697                         goto out;
1698                 }
1699         }
1700
1701         /* read the non-space input */
1702         while (cnt && !isspace(ch) && ch) {
1703                 if (parser->idx < parser->size - 1)
1704                         parser->buffer[parser->idx++] = ch;
1705                 else {
1706                         ret = -EINVAL;
1707                         goto out;
1708                 }
1709                 ret = get_user(ch, ubuf++);
1710                 if (ret)
1711                         goto out;
1712                 read++;
1713                 cnt--;
1714         }
1715
1716         /* We either got finished input or we have to wait for another call. */
1717         if (isspace(ch) || !ch) {
1718                 parser->buffer[parser->idx] = 0;
1719                 parser->cont = false;
1720         } else if (parser->idx < parser->size - 1) {
1721                 parser->cont = true;
1722                 parser->buffer[parser->idx++] = ch;
1723                 /* Make sure the parsed string always terminates with '\0'. */
1724                 parser->buffer[parser->idx] = 0;
1725         } else {
1726                 ret = -EINVAL;
1727                 goto out;
1728         }
1729
1730         *ppos += read;
1731         ret = read;
1732
1733 out:
1734         return ret;
1735 }
1736
1737 /* TODO add a seq_buf_to_buffer() */
1738 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1739 {
1740         int len;
1741
1742         if (trace_seq_used(s) <= s->readpos)
1743                 return -EBUSY;
1744
1745         len = trace_seq_used(s) - s->readpos;
1746         if (cnt > len)
1747                 cnt = len;
1748         memcpy(buf, s->buffer + s->readpos, cnt);
1749
1750         s->readpos += cnt;
1751         return cnt;
1752 }
1753
1754 unsigned long __read_mostly     tracing_thresh;
1755
1756 #ifdef CONFIG_TRACER_MAX_TRACE
1757 static const struct file_operations tracing_max_lat_fops;
1758
1759 #ifdef LATENCY_FS_NOTIFY
1760
1761 static struct workqueue_struct *fsnotify_wq;
1762
1763 static void latency_fsnotify_workfn(struct work_struct *work)
1764 {
1765         struct trace_array *tr = container_of(work, struct trace_array,
1766                                               fsnotify_work);
1767         fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1768 }
1769
1770 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1771 {
1772         struct trace_array *tr = container_of(iwork, struct trace_array,
1773                                               fsnotify_irqwork);
1774         queue_work(fsnotify_wq, &tr->fsnotify_work);
1775 }
1776
1777 static void trace_create_maxlat_file(struct trace_array *tr,
1778                                      struct dentry *d_tracer)
1779 {
1780         INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1781         init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1782         tr->d_max_latency = trace_create_file("tracing_max_latency",
1783                                               TRACE_MODE_WRITE,
1784                                               d_tracer, tr,
1785                                               &tracing_max_lat_fops);
1786 }
1787
1788 __init static int latency_fsnotify_init(void)
1789 {
1790         fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1791                                       WQ_UNBOUND | WQ_HIGHPRI, 0);
1792         if (!fsnotify_wq) {
1793                 pr_err("Unable to allocate tr_max_lat_wq\n");
1794                 return -ENOMEM;
1795         }
1796         return 0;
1797 }
1798
1799 late_initcall_sync(latency_fsnotify_init);
1800
1801 void latency_fsnotify(struct trace_array *tr)
1802 {
1803         if (!fsnotify_wq)
1804                 return;
1805         /*
1806          * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1807          * possible that we are called from __schedule() or do_idle(), which
1808          * could cause a deadlock.
1809          */
1810         irq_work_queue(&tr->fsnotify_irqwork);
1811 }
1812
1813 #else /* !LATENCY_FS_NOTIFY */
1814
1815 #define trace_create_maxlat_file(tr, d_tracer)                          \
1816         trace_create_file("tracing_max_latency", TRACE_MODE_WRITE,      \
1817                           d_tracer, tr, &tracing_max_lat_fops)
1818
1819 #endif
1820
1821 /*
1822  * Copy the new maximum trace into the separate maximum-trace
1823  * structure. (this way the maximum trace is permanently saved,
1824  * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1825  */
1826 static void
1827 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1828 {
1829         struct array_buffer *trace_buf = &tr->array_buffer;
1830         struct array_buffer *max_buf = &tr->max_buffer;
1831         struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1832         struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1833
1834         max_buf->cpu = cpu;
1835         max_buf->time_start = data->preempt_timestamp;
1836
1837         max_data->saved_latency = tr->max_latency;
1838         max_data->critical_start = data->critical_start;
1839         max_data->critical_end = data->critical_end;
1840
1841         strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1842         max_data->pid = tsk->pid;
1843         /*
1844          * If tsk == current, then use current_uid(), as that does not use
1845          * RCU. The irq tracer can be called out of RCU scope.
1846          */
1847         if (tsk == current)
1848                 max_data->uid = current_uid();
1849         else
1850                 max_data->uid = task_uid(tsk);
1851
1852         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1853         max_data->policy = tsk->policy;
1854         max_data->rt_priority = tsk->rt_priority;
1855
1856         /* record this tasks comm */
1857         tracing_record_cmdline(tsk);
1858         latency_fsnotify(tr);
1859 }
1860
1861 /**
1862  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1863  * @tr: tracer
1864  * @tsk: the task with the latency
1865  * @cpu: The cpu that initiated the trace.
1866  * @cond_data: User data associated with a conditional snapshot
1867  *
1868  * Flip the buffers between the @tr and the max_tr and record information
1869  * about which task was the cause of this latency.
1870  */
1871 void
1872 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1873               void *cond_data)
1874 {
1875         if (tr->stop_count)
1876                 return;
1877
1878         WARN_ON_ONCE(!irqs_disabled());
1879
1880         if (!tr->allocated_snapshot) {
1881                 /* Only the nop tracer should hit this when disabling */
1882                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1883                 return;
1884         }
1885
1886         arch_spin_lock(&tr->max_lock);
1887
1888         /* Inherit the recordable setting from array_buffer */
1889         if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1890                 ring_buffer_record_on(tr->max_buffer.buffer);
1891         else
1892                 ring_buffer_record_off(tr->max_buffer.buffer);
1893
1894 #ifdef CONFIG_TRACER_SNAPSHOT
1895         if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1896                 arch_spin_unlock(&tr->max_lock);
1897                 return;
1898         }
1899 #endif
1900         swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1901
1902         __update_max_tr(tr, tsk, cpu);
1903
1904         arch_spin_unlock(&tr->max_lock);
1905
1906         /* Any waiters on the old snapshot buffer need to wake up */
1907         ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
1908 }
1909
1910 /**
1911  * update_max_tr_single - only copy one trace over, and reset the rest
1912  * @tr: tracer
1913  * @tsk: task with the latency
1914  * @cpu: the cpu of the buffer to copy.
1915  *
1916  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1917  */
1918 void
1919 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1920 {
1921         int ret;
1922
1923         if (tr->stop_count)
1924                 return;
1925
1926         WARN_ON_ONCE(!irqs_disabled());
1927         if (!tr->allocated_snapshot) {
1928                 /* Only the nop tracer should hit this when disabling */
1929                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1930                 return;
1931         }
1932
1933         arch_spin_lock(&tr->max_lock);
1934
1935         ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1936
1937         if (ret == -EBUSY) {
1938                 /*
1939                  * We failed to swap the buffer due to a commit taking
1940                  * place on this CPU. We fail to record, but we reset
1941                  * the max trace buffer (no one writes directly to it)
1942                  * and flag that it failed.
1943                  * Another reason is resize is in progress.
1944                  */
1945                 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1946                         "Failed to swap buffers due to commit or resize in progress\n");
1947         }
1948
1949         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1950
1951         __update_max_tr(tr, tsk, cpu);
1952         arch_spin_unlock(&tr->max_lock);
1953 }
1954
1955 #endif /* CONFIG_TRACER_MAX_TRACE */
1956
1957 struct pipe_wait {
1958         struct trace_iterator           *iter;
1959         int                             wait_index;
1960 };
1961
1962 static bool wait_pipe_cond(void *data)
1963 {
1964         struct pipe_wait *pwait = data;
1965         struct trace_iterator *iter = pwait->iter;
1966
1967         if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index)
1968                 return true;
1969
1970         return iter->closed;
1971 }
1972
1973 static int wait_on_pipe(struct trace_iterator *iter, int full)
1974 {
1975         struct pipe_wait pwait;
1976         int ret;
1977
1978         /* Iterators are static, they should be filled or empty */
1979         if (trace_buffer_iter(iter, iter->cpu_file))
1980                 return 0;
1981
1982         pwait.wait_index = atomic_read_acquire(&iter->wait_index);
1983         pwait.iter = iter;
1984
1985         ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full,
1986                                wait_pipe_cond, &pwait);
1987
1988 #ifdef CONFIG_TRACER_MAX_TRACE
1989         /*
1990          * Make sure this is still the snapshot buffer, as if a snapshot were
1991          * to happen, this would now be the main buffer.
1992          */
1993         if (iter->snapshot)
1994                 iter->array_buffer = &iter->tr->max_buffer;
1995 #endif
1996         return ret;
1997 }
1998
1999 #ifdef CONFIG_FTRACE_STARTUP_TEST
2000 static bool selftests_can_run;
2001
2002 struct trace_selftests {
2003         struct list_head                list;
2004         struct tracer                   *type;
2005 };
2006
2007 static LIST_HEAD(postponed_selftests);
2008
2009 static int save_selftest(struct tracer *type)
2010 {
2011         struct trace_selftests *selftest;
2012
2013         selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
2014         if (!selftest)
2015                 return -ENOMEM;
2016
2017         selftest->type = type;
2018         list_add(&selftest->list, &postponed_selftests);
2019         return 0;
2020 }
2021
2022 static int run_tracer_selftest(struct tracer *type)
2023 {
2024         struct trace_array *tr = &global_trace;
2025         struct tracer *saved_tracer = tr->current_trace;
2026         int ret;
2027
2028         if (!type->selftest || tracing_selftest_disabled)
2029                 return 0;
2030
2031         /*
2032          * If a tracer registers early in boot up (before scheduling is
2033          * initialized and such), then do not run its selftests yet.
2034          * Instead, run it a little later in the boot process.
2035          */
2036         if (!selftests_can_run)
2037                 return save_selftest(type);
2038
2039         if (!tracing_is_on()) {
2040                 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
2041                         type->name);
2042                 return 0;
2043         }
2044
2045         /*
2046          * Run a selftest on this tracer.
2047          * Here we reset the trace buffer, and set the current
2048          * tracer to be this tracer. The tracer can then run some
2049          * internal tracing to verify that everything is in order.
2050          * If we fail, we do not register this tracer.
2051          */
2052         tracing_reset_online_cpus(&tr->array_buffer);
2053
2054         tr->current_trace = type;
2055
2056 #ifdef CONFIG_TRACER_MAX_TRACE
2057         if (type->use_max_tr) {
2058                 /* If we expanded the buffers, make sure the max is expanded too */
2059                 if (tr->ring_buffer_expanded)
2060                         ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2061                                            RING_BUFFER_ALL_CPUS);
2062                 tr->allocated_snapshot = true;
2063         }
2064 #endif
2065
2066         /* the test is responsible for initializing and enabling */
2067         pr_info("Testing tracer %s: ", type->name);
2068         ret = type->selftest(type, tr);
2069         /* the test is responsible for resetting too */
2070         tr->current_trace = saved_tracer;
2071         if (ret) {
2072                 printk(KERN_CONT "FAILED!\n");
2073                 /* Add the warning after printing 'FAILED' */
2074                 WARN_ON(1);
2075                 return -1;
2076         }
2077         /* Only reset on passing, to avoid touching corrupted buffers */
2078         tracing_reset_online_cpus(&tr->array_buffer);
2079
2080 #ifdef CONFIG_TRACER_MAX_TRACE
2081         if (type->use_max_tr) {
2082                 tr->allocated_snapshot = false;
2083
2084                 /* Shrink the max buffer again */
2085                 if (tr->ring_buffer_expanded)
2086                         ring_buffer_resize(tr->max_buffer.buffer, 1,
2087                                            RING_BUFFER_ALL_CPUS);
2088         }
2089 #endif
2090
2091         printk(KERN_CONT "PASSED\n");
2092         return 0;
2093 }
2094
2095 static int do_run_tracer_selftest(struct tracer *type)
2096 {
2097         int ret;
2098
2099         /*
2100          * Tests can take a long time, especially if they are run one after the
2101          * other, as does happen during bootup when all the tracers are
2102          * registered. This could cause the soft lockup watchdog to trigger.
2103          */
2104         cond_resched();
2105
2106         tracing_selftest_running = true;
2107         ret = run_tracer_selftest(type);
2108         tracing_selftest_running = false;
2109
2110         return ret;
2111 }
2112
2113 static __init int init_trace_selftests(void)
2114 {
2115         struct trace_selftests *p, *n;
2116         struct tracer *t, **last;
2117         int ret;
2118
2119         selftests_can_run = true;
2120
2121         mutex_lock(&trace_types_lock);
2122
2123         if (list_empty(&postponed_selftests))
2124                 goto out;
2125
2126         pr_info("Running postponed tracer tests:\n");
2127
2128         tracing_selftest_running = true;
2129         list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2130                 /* This loop can take minutes when sanitizers are enabled, so
2131                  * lets make sure we allow RCU processing.
2132                  */
2133                 cond_resched();
2134                 ret = run_tracer_selftest(p->type);
2135                 /* If the test fails, then warn and remove from available_tracers */
2136                 if (ret < 0) {
2137                         WARN(1, "tracer: %s failed selftest, disabling\n",
2138                              p->type->name);
2139                         last = &trace_types;
2140                         for (t = trace_types; t; t = t->next) {
2141                                 if (t == p->type) {
2142                                         *last = t->next;
2143                                         break;
2144                                 }
2145                                 last = &t->next;
2146                         }
2147                 }
2148                 list_del(&p->list);
2149                 kfree(p);
2150         }
2151         tracing_selftest_running = false;
2152
2153  out:
2154         mutex_unlock(&trace_types_lock);
2155
2156         return 0;
2157 }
2158 core_initcall(init_trace_selftests);
2159 #else
2160 static inline int run_tracer_selftest(struct tracer *type)
2161 {
2162         return 0;
2163 }
2164 static inline int do_run_tracer_selftest(struct tracer *type)
2165 {
2166         return 0;
2167 }
2168 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2169
2170 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2171
2172 static void __init apply_trace_boot_options(void);
2173
2174 /**
2175  * register_tracer - register a tracer with the ftrace system.
2176  * @type: the plugin for the tracer
2177  *
2178  * Register a new plugin tracer.
2179  */
2180 int __init register_tracer(struct tracer *type)
2181 {
2182         struct tracer *t;
2183         int ret = 0;
2184
2185         if (!type->name) {
2186                 pr_info("Tracer must have a name\n");
2187                 return -1;
2188         }
2189
2190         if (strlen(type->name) >= MAX_TRACER_SIZE) {
2191                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2192                 return -1;
2193         }
2194
2195         if (security_locked_down(LOCKDOWN_TRACEFS)) {
2196                 pr_warn("Can not register tracer %s due to lockdown\n",
2197                            type->name);
2198                 return -EPERM;
2199         }
2200
2201         mutex_lock(&trace_types_lock);
2202
2203         for (t = trace_types; t; t = t->next) {
2204                 if (strcmp(type->name, t->name) == 0) {
2205                         /* already found */
2206                         pr_info("Tracer %s already registered\n",
2207                                 type->name);
2208                         ret = -1;
2209                         goto out;
2210                 }
2211         }
2212
2213         if (!type->set_flag)
2214                 type->set_flag = &dummy_set_flag;
2215         if (!type->flags) {
2216                 /*allocate a dummy tracer_flags*/
2217                 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2218                 if (!type->flags) {
2219                         ret = -ENOMEM;
2220                         goto out;
2221                 }
2222                 type->flags->val = 0;
2223                 type->flags->opts = dummy_tracer_opt;
2224         } else
2225                 if (!type->flags->opts)
2226                         type->flags->opts = dummy_tracer_opt;
2227
2228         /* store the tracer for __set_tracer_option */
2229         type->flags->trace = type;
2230
2231         ret = do_run_tracer_selftest(type);
2232         if (ret < 0)
2233                 goto out;
2234
2235         type->next = trace_types;
2236         trace_types = type;
2237         add_tracer_options(&global_trace, type);
2238
2239  out:
2240         mutex_unlock(&trace_types_lock);
2241
2242         if (ret || !default_bootup_tracer)
2243                 goto out_unlock;
2244
2245         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2246                 goto out_unlock;
2247
2248         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2249         /* Do we want this tracer to start on bootup? */
2250         tracing_set_tracer(&global_trace, type->name);
2251         default_bootup_tracer = NULL;
2252
2253         apply_trace_boot_options();
2254
2255         /* disable other selftests, since this will break it. */
2256         disable_tracing_selftest("running a tracer");
2257
2258  out_unlock:
2259         return ret;
2260 }
2261
2262 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2263 {
2264         struct trace_buffer *buffer = buf->buffer;
2265
2266         if (!buffer)
2267                 return;
2268
2269         ring_buffer_record_disable(buffer);
2270
2271         /* Make sure all commits have finished */
2272         synchronize_rcu();
2273         ring_buffer_reset_cpu(buffer, cpu);
2274
2275         ring_buffer_record_enable(buffer);
2276 }
2277
2278 void tracing_reset_online_cpus(struct array_buffer *buf)
2279 {
2280         struct trace_buffer *buffer = buf->buffer;
2281
2282         if (!buffer)
2283                 return;
2284
2285         ring_buffer_record_disable(buffer);
2286
2287         /* Make sure all commits have finished */
2288         synchronize_rcu();
2289
2290         buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2291
2292         ring_buffer_reset_online_cpus(buffer);
2293
2294         ring_buffer_record_enable(buffer);
2295 }
2296
2297 /* Must have trace_types_lock held */
2298 void tracing_reset_all_online_cpus_unlocked(void)
2299 {
2300         struct trace_array *tr;
2301
2302         lockdep_assert_held(&trace_types_lock);
2303
2304         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2305                 if (!tr->clear_trace)
2306                         continue;
2307                 tr->clear_trace = false;
2308                 tracing_reset_online_cpus(&tr->array_buffer);
2309 #ifdef CONFIG_TRACER_MAX_TRACE
2310                 tracing_reset_online_cpus(&tr->max_buffer);
2311 #endif
2312         }
2313 }
2314
2315 void tracing_reset_all_online_cpus(void)
2316 {
2317         mutex_lock(&trace_types_lock);
2318         tracing_reset_all_online_cpus_unlocked();
2319         mutex_unlock(&trace_types_lock);
2320 }
2321
2322 int is_tracing_stopped(void)
2323 {
2324         return global_trace.stop_count;
2325 }
2326
2327 static void tracing_start_tr(struct trace_array *tr)
2328 {
2329         struct trace_buffer *buffer;
2330         unsigned long flags;
2331
2332         if (tracing_disabled)
2333                 return;
2334
2335         raw_spin_lock_irqsave(&tr->start_lock, flags);
2336         if (--tr->stop_count) {
2337                 if (WARN_ON_ONCE(tr->stop_count < 0)) {
2338                         /* Someone screwed up their debugging */
2339                         tr->stop_count = 0;
2340                 }
2341                 goto out;
2342         }
2343
2344         /* Prevent the buffers from switching */
2345         arch_spin_lock(&tr->max_lock);
2346
2347         buffer = tr->array_buffer.buffer;
2348         if (buffer)
2349                 ring_buffer_record_enable(buffer);
2350
2351 #ifdef CONFIG_TRACER_MAX_TRACE
2352         buffer = tr->max_buffer.buffer;
2353         if (buffer)
2354                 ring_buffer_record_enable(buffer);
2355 #endif
2356
2357         arch_spin_unlock(&tr->max_lock);
2358
2359  out:
2360         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2361 }
2362
2363 /**
2364  * tracing_start - quick start of the tracer
2365  *
2366  * If tracing is enabled but was stopped by tracing_stop,
2367  * this will start the tracer back up.
2368  */
2369 void tracing_start(void)
2370
2371 {
2372         return tracing_start_tr(&global_trace);
2373 }
2374
2375 static void tracing_stop_tr(struct trace_array *tr)
2376 {
2377         struct trace_buffer *buffer;
2378         unsigned long flags;
2379
2380         raw_spin_lock_irqsave(&tr->start_lock, flags);
2381         if (tr->stop_count++)
2382                 goto out;
2383
2384         /* Prevent the buffers from switching */
2385         arch_spin_lock(&tr->max_lock);
2386
2387         buffer = tr->array_buffer.buffer;
2388         if (buffer)
2389                 ring_buffer_record_disable(buffer);
2390
2391 #ifdef CONFIG_TRACER_MAX_TRACE
2392         buffer = tr->max_buffer.buffer;
2393         if (buffer)
2394                 ring_buffer_record_disable(buffer);
2395 #endif
2396
2397         arch_spin_unlock(&tr->max_lock);
2398
2399  out:
2400         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2401 }
2402
2403 /**
2404  * tracing_stop - quick stop of the tracer
2405  *
2406  * Light weight way to stop tracing. Use in conjunction with
2407  * tracing_start.
2408  */
2409 void tracing_stop(void)
2410 {
2411         return tracing_stop_tr(&global_trace);
2412 }
2413
2414 /*
2415  * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2416  * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2417  * simplifies those functions and keeps them in sync.
2418  */
2419 enum print_line_t trace_handle_return(struct trace_seq *s)
2420 {
2421         return trace_seq_has_overflowed(s) ?
2422                 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2423 }
2424 EXPORT_SYMBOL_GPL(trace_handle_return);
2425
2426 static unsigned short migration_disable_value(void)
2427 {
2428 #if defined(CONFIG_SMP)
2429         return current->migration_disabled;
2430 #else
2431         return 0;
2432 #endif
2433 }
2434
2435 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2436 {
2437         unsigned int trace_flags = irqs_status;
2438         unsigned int pc;
2439
2440         pc = preempt_count();
2441
2442         if (pc & NMI_MASK)
2443                 trace_flags |= TRACE_FLAG_NMI;
2444         if (pc & HARDIRQ_MASK)
2445                 trace_flags |= TRACE_FLAG_HARDIRQ;
2446         if (in_serving_softirq())
2447                 trace_flags |= TRACE_FLAG_SOFTIRQ;
2448         if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2449                 trace_flags |= TRACE_FLAG_BH_OFF;
2450
2451         if (tif_need_resched())
2452                 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2453         if (test_preempt_need_resched())
2454                 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2455         return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2456                 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2457 }
2458
2459 struct ring_buffer_event *
2460 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2461                           int type,
2462                           unsigned long len,
2463                           unsigned int trace_ctx)
2464 {
2465         return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2466 }
2467
2468 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2469 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2470 static int trace_buffered_event_ref;
2471
2472 /**
2473  * trace_buffered_event_enable - enable buffering events
2474  *
2475  * When events are being filtered, it is quicker to use a temporary
2476  * buffer to write the event data into if there's a likely chance
2477  * that it will not be committed. The discard of the ring buffer
2478  * is not as fast as committing, and is much slower than copying
2479  * a commit.
2480  *
2481  * When an event is to be filtered, allocate per cpu buffers to
2482  * write the event data into, and if the event is filtered and discarded
2483  * it is simply dropped, otherwise, the entire data is to be committed
2484  * in one shot.
2485  */
2486 void trace_buffered_event_enable(void)
2487 {
2488         struct ring_buffer_event *event;
2489         struct page *page;
2490         int cpu;
2491
2492         WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2493
2494         if (trace_buffered_event_ref++)
2495                 return;
2496
2497         for_each_tracing_cpu(cpu) {
2498                 page = alloc_pages_node(cpu_to_node(cpu),
2499                                         GFP_KERNEL | __GFP_NORETRY, 0);
2500                 /* This is just an optimization and can handle failures */
2501                 if (!page) {
2502                         pr_err("Failed to allocate event buffer\n");
2503                         break;
2504                 }
2505
2506                 event = page_address(page);
2507                 memset(event, 0, sizeof(*event));
2508
2509                 per_cpu(trace_buffered_event, cpu) = event;
2510
2511                 preempt_disable();
2512                 if (cpu == smp_processor_id() &&
2513                     __this_cpu_read(trace_buffered_event) !=
2514                     per_cpu(trace_buffered_event, cpu))
2515                         WARN_ON_ONCE(1);
2516                 preempt_enable();
2517         }
2518 }
2519
2520 static void enable_trace_buffered_event(void *data)
2521 {
2522         /* Probably not needed, but do it anyway */
2523         smp_rmb();
2524         this_cpu_dec(trace_buffered_event_cnt);
2525 }
2526
2527 static void disable_trace_buffered_event(void *data)
2528 {
2529         this_cpu_inc(trace_buffered_event_cnt);
2530 }
2531
2532 /**
2533  * trace_buffered_event_disable - disable buffering events
2534  *
2535  * When a filter is removed, it is faster to not use the buffered
2536  * events, and to commit directly into the ring buffer. Free up
2537  * the temp buffers when there are no more users. This requires
2538  * special synchronization with current events.
2539  */
2540 void trace_buffered_event_disable(void)
2541 {
2542         int cpu;
2543
2544         WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2545
2546         if (WARN_ON_ONCE(!trace_buffered_event_ref))
2547                 return;
2548
2549         if (--trace_buffered_event_ref)
2550                 return;
2551
2552         /* For each CPU, set the buffer as used. */
2553         on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
2554                          NULL, true);
2555
2556         /* Wait for all current users to finish */
2557         synchronize_rcu();
2558
2559         for_each_tracing_cpu(cpu) {
2560                 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2561                 per_cpu(trace_buffered_event, cpu) = NULL;
2562         }
2563
2564         /*
2565          * Wait for all CPUs that potentially started checking if they can use
2566          * their event buffer only after the previous synchronize_rcu() call and
2567          * they still read a valid pointer from trace_buffered_event. It must be
2568          * ensured they don't see cleared trace_buffered_event_cnt else they
2569          * could wrongly decide to use the pointed-to buffer which is now freed.
2570          */
2571         synchronize_rcu();
2572
2573         /* For each CPU, relinquish the buffer */
2574         on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
2575                          true);
2576 }
2577
2578 static struct trace_buffer *temp_buffer;
2579
2580 struct ring_buffer_event *
2581 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2582                           struct trace_event_file *trace_file,
2583                           int type, unsigned long len,
2584                           unsigned int trace_ctx)
2585 {
2586         struct ring_buffer_event *entry;
2587         struct trace_array *tr = trace_file->tr;
2588         int val;
2589
2590         *current_rb = tr->array_buffer.buffer;
2591
2592         if (!tr->no_filter_buffering_ref &&
2593             (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2594                 preempt_disable_notrace();
2595                 /*
2596                  * Filtering is on, so try to use the per cpu buffer first.
2597                  * This buffer will simulate a ring_buffer_event,
2598                  * where the type_len is zero and the array[0] will
2599                  * hold the full length.
2600                  * (see include/linux/ring-buffer.h for details on
2601                  *  how the ring_buffer_event is structured).
2602                  *
2603                  * Using a temp buffer during filtering and copying it
2604                  * on a matched filter is quicker than writing directly
2605                  * into the ring buffer and then discarding it when
2606                  * it doesn't match. That is because the discard
2607                  * requires several atomic operations to get right.
2608                  * Copying on match and doing nothing on a failed match
2609                  * is still quicker than no copy on match, but having
2610                  * to discard out of the ring buffer on a failed match.
2611                  */
2612                 if ((entry = __this_cpu_read(trace_buffered_event))) {
2613                         int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2614
2615                         val = this_cpu_inc_return(trace_buffered_event_cnt);
2616
2617                         /*
2618                          * Preemption is disabled, but interrupts and NMIs
2619                          * can still come in now. If that happens after
2620                          * the above increment, then it will have to go
2621                          * back to the old method of allocating the event
2622                          * on the ring buffer, and if the filter fails, it
2623                          * will have to call ring_buffer_discard_commit()
2624                          * to remove it.
2625                          *
2626                          * Need to also check the unlikely case that the
2627                          * length is bigger than the temp buffer size.
2628                          * If that happens, then the reserve is pretty much
2629                          * guaranteed to fail, as the ring buffer currently
2630                          * only allows events less than a page. But that may
2631                          * change in the future, so let the ring buffer reserve
2632                          * handle the failure in that case.
2633                          */
2634                         if (val == 1 && likely(len <= max_len)) {
2635                                 trace_event_setup(entry, type, trace_ctx);
2636                                 entry->array[0] = len;
2637                                 /* Return with preemption disabled */
2638                                 return entry;
2639                         }
2640                         this_cpu_dec(trace_buffered_event_cnt);
2641                 }
2642                 /* __trace_buffer_lock_reserve() disables preemption */
2643                 preempt_enable_notrace();
2644         }
2645
2646         entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2647                                             trace_ctx);
2648         /*
2649          * If tracing is off, but we have triggers enabled
2650          * we still need to look at the event data. Use the temp_buffer
2651          * to store the trace event for the trigger to use. It's recursive
2652          * safe and will not be recorded anywhere.
2653          */
2654         if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2655                 *current_rb = temp_buffer;
2656                 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2657                                                     trace_ctx);
2658         }
2659         return entry;
2660 }
2661 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2662
2663 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2664 static DEFINE_MUTEX(tracepoint_printk_mutex);
2665
2666 static void output_printk(struct trace_event_buffer *fbuffer)
2667 {
2668         struct trace_event_call *event_call;
2669         struct trace_event_file *file;
2670         struct trace_event *event;
2671         unsigned long flags;
2672         struct trace_iterator *iter = tracepoint_print_iter;
2673
2674         /* We should never get here if iter is NULL */
2675         if (WARN_ON_ONCE(!iter))
2676                 return;
2677
2678         event_call = fbuffer->trace_file->event_call;
2679         if (!event_call || !event_call->event.funcs ||
2680             !event_call->event.funcs->trace)
2681                 return;
2682
2683         file = fbuffer->trace_file;
2684         if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2685             (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2686              !filter_match_preds(file->filter, fbuffer->entry)))
2687                 return;
2688
2689         event = &fbuffer->trace_file->event_call->event;
2690
2691         raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2692         trace_seq_init(&iter->seq);
2693         iter->ent = fbuffer->entry;
2694         event_call->event.funcs->trace(iter, 0, event);
2695         trace_seq_putc(&iter->seq, 0);
2696         printk("%s", iter->seq.buffer);
2697
2698         raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2699 }
2700
2701 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2702                              void *buffer, size_t *lenp,
2703                              loff_t *ppos)
2704 {
2705         int save_tracepoint_printk;
2706         int ret;
2707
2708         mutex_lock(&tracepoint_printk_mutex);
2709         save_tracepoint_printk = tracepoint_printk;
2710
2711         ret = proc_dointvec(table, write, buffer, lenp, ppos);
2712
2713         /*
2714          * This will force exiting early, as tracepoint_printk
2715          * is always zero when tracepoint_printk_iter is not allocated
2716          */
2717         if (!tracepoint_print_iter)
2718                 tracepoint_printk = 0;
2719
2720         if (save_tracepoint_printk == tracepoint_printk)
2721                 goto out;
2722
2723         if (tracepoint_printk)
2724                 static_key_enable(&tracepoint_printk_key.key);
2725         else
2726                 static_key_disable(&tracepoint_printk_key.key);
2727
2728  out:
2729         mutex_unlock(&tracepoint_printk_mutex);
2730
2731         return ret;
2732 }
2733
2734 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2735 {
2736         enum event_trigger_type tt = ETT_NONE;
2737         struct trace_event_file *file = fbuffer->trace_file;
2738
2739         if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2740                         fbuffer->entry, &tt))
2741                 goto discard;
2742
2743         if (static_key_false(&tracepoint_printk_key.key))
2744                 output_printk(fbuffer);
2745
2746         if (static_branch_unlikely(&trace_event_exports_enabled))
2747                 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2748
2749         trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2750                         fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2751
2752 discard:
2753         if (tt)
2754                 event_triggers_post_call(file, tt);
2755
2756 }
2757 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2758
2759 /*
2760  * Skip 3:
2761  *
2762  *   trace_buffer_unlock_commit_regs()
2763  *   trace_event_buffer_commit()
2764  *   trace_event_raw_event_xxx()
2765  */
2766 # define STACK_SKIP 3
2767
2768 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2769                                      struct trace_buffer *buffer,
2770                                      struct ring_buffer_event *event,
2771                                      unsigned int trace_ctx,
2772                                      struct pt_regs *regs)
2773 {
2774         __buffer_unlock_commit(buffer, event);
2775
2776         /*
2777          * If regs is not set, then skip the necessary functions.
2778          * Note, we can still get here via blktrace, wakeup tracer
2779          * and mmiotrace, but that's ok if they lose a function or
2780          * two. They are not that meaningful.
2781          */
2782         ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2783         ftrace_trace_userstack(tr, buffer, trace_ctx);
2784 }
2785
2786 /*
2787  * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2788  */
2789 void
2790 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2791                                    struct ring_buffer_event *event)
2792 {
2793         __buffer_unlock_commit(buffer, event);
2794 }
2795
2796 void
2797 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2798                parent_ip, unsigned int trace_ctx)
2799 {
2800         struct trace_event_call *call = &event_function;
2801         struct trace_buffer *buffer = tr->array_buffer.buffer;
2802         struct ring_buffer_event *event;
2803         struct ftrace_entry *entry;
2804
2805         event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2806                                             trace_ctx);
2807         if (!event)
2808                 return;
2809         entry   = ring_buffer_event_data(event);
2810         entry->ip                       = ip;
2811         entry->parent_ip                = parent_ip;
2812
2813         if (!call_filter_check_discard(call, entry, buffer, event)) {
2814                 if (static_branch_unlikely(&trace_function_exports_enabled))
2815                         ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2816                 __buffer_unlock_commit(buffer, event);
2817         }
2818 }
2819
2820 #ifdef CONFIG_STACKTRACE
2821
2822 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2823 #define FTRACE_KSTACK_NESTING   4
2824
2825 #define FTRACE_KSTACK_ENTRIES   (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2826
2827 struct ftrace_stack {
2828         unsigned long           calls[FTRACE_KSTACK_ENTRIES];
2829 };
2830
2831
2832 struct ftrace_stacks {
2833         struct ftrace_stack     stacks[FTRACE_KSTACK_NESTING];
2834 };
2835
2836 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2837 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2838
2839 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2840                                  unsigned int trace_ctx,
2841                                  int skip, struct pt_regs *regs)
2842 {
2843         struct trace_event_call *call = &event_kernel_stack;
2844         struct ring_buffer_event *event;
2845         unsigned int size, nr_entries;
2846         struct ftrace_stack *fstack;
2847         struct stack_entry *entry;
2848         int stackidx;
2849
2850         /*
2851          * Add one, for this function and the call to save_stack_trace()
2852          * If regs is set, then these functions will not be in the way.
2853          */
2854 #ifndef CONFIG_UNWINDER_ORC
2855         if (!regs)
2856                 skip++;
2857 #endif
2858
2859         preempt_disable_notrace();
2860
2861         stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2862
2863         /* This should never happen. If it does, yell once and skip */
2864         if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
2865                 goto out;
2866
2867         /*
2868          * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2869          * interrupt will either see the value pre increment or post
2870          * increment. If the interrupt happens pre increment it will have
2871          * restored the counter when it returns.  We just need a barrier to
2872          * keep gcc from moving things around.
2873          */
2874         barrier();
2875
2876         fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2877         size = ARRAY_SIZE(fstack->calls);
2878
2879         if (regs) {
2880                 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2881                                                    size, skip);
2882         } else {
2883                 nr_entries = stack_trace_save(fstack->calls, size, skip);
2884         }
2885
2886         event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2887                                     struct_size(entry, caller, nr_entries),
2888                                     trace_ctx);
2889         if (!event)
2890                 goto out;
2891         entry = ring_buffer_event_data(event);
2892
2893         entry->size = nr_entries;
2894         memcpy(&entry->caller, fstack->calls,
2895                flex_array_size(entry, caller, nr_entries));
2896
2897         if (!call_filter_check_discard(call, entry, buffer, event))
2898                 __buffer_unlock_commit(buffer, event);
2899
2900  out:
2901         /* Again, don't let gcc optimize things here */
2902         barrier();
2903         __this_cpu_dec(ftrace_stack_reserve);
2904         preempt_enable_notrace();
2905
2906 }
2907
2908 static inline void ftrace_trace_stack(struct trace_array *tr,
2909                                       struct trace_buffer *buffer,
2910                                       unsigned int trace_ctx,
2911                                       int skip, struct pt_regs *regs)
2912 {
2913         if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2914                 return;
2915
2916         __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
2917 }
2918
2919 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
2920                    int skip)
2921 {
2922         struct trace_buffer *buffer = tr->array_buffer.buffer;
2923
2924         if (rcu_is_watching()) {
2925                 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
2926                 return;
2927         }
2928
2929         if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
2930                 return;
2931
2932         /*
2933          * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
2934          * but if the above rcu_is_watching() failed, then the NMI
2935          * triggered someplace critical, and ct_irq_enter() should
2936          * not be called from NMI.
2937          */
2938         if (unlikely(in_nmi()))
2939                 return;
2940
2941         ct_irq_enter_irqson();
2942         __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
2943         ct_irq_exit_irqson();
2944 }
2945
2946 /**
2947  * trace_dump_stack - record a stack back trace in the trace buffer
2948  * @skip: Number of functions to skip (helper handlers)
2949  */
2950 void trace_dump_stack(int skip)
2951 {
2952         if (tracing_disabled || tracing_selftest_running)
2953                 return;
2954
2955 #ifndef CONFIG_UNWINDER_ORC
2956         /* Skip 1 to skip this function. */
2957         skip++;
2958 #endif
2959         __ftrace_trace_stack(global_trace.array_buffer.buffer,
2960                              tracing_gen_ctx(), skip, NULL);
2961 }
2962 EXPORT_SYMBOL_GPL(trace_dump_stack);
2963
2964 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
2965 static DEFINE_PER_CPU(int, user_stack_count);
2966
2967 static void
2968 ftrace_trace_userstack(struct trace_array *tr,
2969                        struct trace_buffer *buffer, unsigned int trace_ctx)
2970 {
2971         struct trace_event_call *call = &event_user_stack;
2972         struct ring_buffer_event *event;
2973         struct userstack_entry *entry;
2974
2975         if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
2976                 return;
2977
2978         /*
2979          * NMIs can not handle page faults, even with fix ups.
2980          * The save user stack can (and often does) fault.
2981          */
2982         if (unlikely(in_nmi()))
2983                 return;
2984
2985         /*
2986          * prevent recursion, since the user stack tracing may
2987          * trigger other kernel events.
2988          */
2989         preempt_disable();
2990         if (__this_cpu_read(user_stack_count))
2991                 goto out;
2992
2993         __this_cpu_inc(user_stack_count);
2994
2995         event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2996                                             sizeof(*entry), trace_ctx);
2997         if (!event)
2998                 goto out_drop_count;
2999         entry   = ring_buffer_event_data(event);
3000
3001         entry->tgid             = current->tgid;
3002         memset(&entry->caller, 0, sizeof(entry->caller));
3003
3004         stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3005         if (!call_filter_check_discard(call, entry, buffer, event))
3006                 __buffer_unlock_commit(buffer, event);
3007
3008  out_drop_count:
3009         __this_cpu_dec(user_stack_count);
3010  out:
3011         preempt_enable();
3012 }
3013 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3014 static void ftrace_trace_userstack(struct trace_array *tr,
3015                                    struct trace_buffer *buffer,
3016                                    unsigned int trace_ctx)
3017 {
3018 }
3019 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3020
3021 #endif /* CONFIG_STACKTRACE */
3022
3023 static inline void
3024 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3025                           unsigned long long delta)
3026 {
3027         entry->bottom_delta_ts = delta & U32_MAX;
3028         entry->top_delta_ts = (delta >> 32);
3029 }
3030
3031 void trace_last_func_repeats(struct trace_array *tr,
3032                              struct trace_func_repeats *last_info,
3033                              unsigned int trace_ctx)
3034 {
3035         struct trace_buffer *buffer = tr->array_buffer.buffer;
3036         struct func_repeats_entry *entry;
3037         struct ring_buffer_event *event;
3038         u64 delta;
3039
3040         event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3041                                             sizeof(*entry), trace_ctx);
3042         if (!event)
3043                 return;
3044
3045         delta = ring_buffer_event_time_stamp(buffer, event) -
3046                 last_info->ts_last_call;
3047
3048         entry = ring_buffer_event_data(event);
3049         entry->ip = last_info->ip;
3050         entry->parent_ip = last_info->parent_ip;
3051         entry->count = last_info->count;
3052         func_repeats_set_delta_ts(entry, delta);
3053
3054         __buffer_unlock_commit(buffer, event);
3055 }
3056
3057 /* created for use with alloc_percpu */
3058 struct trace_buffer_struct {
3059         int nesting;
3060         char buffer[4][TRACE_BUF_SIZE];
3061 };
3062
3063 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3064
3065 /*
3066  * This allows for lockless recording.  If we're nested too deeply, then
3067  * this returns NULL.
3068  */
3069 static char *get_trace_buf(void)
3070 {
3071         struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3072
3073         if (!trace_percpu_buffer || buffer->nesting >= 4)
3074                 return NULL;
3075
3076         buffer->nesting++;
3077
3078         /* Interrupts must see nesting incremented before we use the buffer */
3079         barrier();
3080         return &buffer->buffer[buffer->nesting - 1][0];
3081 }
3082
3083 static void put_trace_buf(void)
3084 {
3085         /* Don't let the decrement of nesting leak before this */
3086         barrier();
3087         this_cpu_dec(trace_percpu_buffer->nesting);
3088 }
3089
3090 static int alloc_percpu_trace_buffer(void)
3091 {
3092         struct trace_buffer_struct __percpu *buffers;
3093
3094         if (trace_percpu_buffer)
3095                 return 0;
3096
3097         buffers = alloc_percpu(struct trace_buffer_struct);
3098         if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3099                 return -ENOMEM;
3100
3101         trace_percpu_buffer = buffers;
3102         return 0;
3103 }
3104
3105 static int buffers_allocated;
3106
3107 void trace_printk_init_buffers(void)
3108 {
3109         if (buffers_allocated)
3110                 return;
3111
3112         if (alloc_percpu_trace_buffer())
3113                 return;
3114
3115         /* trace_printk() is for debug use only. Don't use it in production. */
3116
3117         pr_warn("\n");
3118         pr_warn("**********************************************************\n");
3119         pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3120         pr_warn("**                                                      **\n");
3121         pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
3122         pr_warn("**                                                      **\n");
3123         pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
3124         pr_warn("** unsafe for production use.                           **\n");
3125         pr_warn("**                                                      **\n");
3126         pr_warn("** If you see this message and you are not debugging    **\n");
3127         pr_warn("** the kernel, report this immediately to your vendor!  **\n");
3128         pr_warn("**                                                      **\n");
3129         pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3130         pr_warn("**********************************************************\n");
3131
3132         /* Expand the buffers to set size */
3133         tracing_update_buffers(&global_trace);
3134
3135         buffers_allocated = 1;
3136
3137         /*
3138          * trace_printk_init_buffers() can be called by modules.
3139          * If that happens, then we need to start cmdline recording
3140          * directly here. If the global_trace.buffer is already
3141          * allocated here, then this was called by module code.
3142          */
3143         if (global_trace.array_buffer.buffer)
3144                 tracing_start_cmdline_record();
3145 }
3146 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3147
3148 void trace_printk_start_comm(void)
3149 {
3150         /* Start tracing comms if trace printk is set */
3151         if (!buffers_allocated)
3152                 return;
3153         tracing_start_cmdline_record();
3154 }
3155
3156 static void trace_printk_start_stop_comm(int enabled)
3157 {
3158         if (!buffers_allocated)
3159                 return;
3160
3161         if (enabled)
3162                 tracing_start_cmdline_record();
3163         else
3164                 tracing_stop_cmdline_record();
3165 }
3166
3167 /**
3168  * trace_vbprintk - write binary msg to tracing buffer
3169  * @ip:    The address of the caller
3170  * @fmt:   The string format to write to the buffer
3171  * @args:  Arguments for @fmt
3172  */
3173 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3174 {
3175         struct trace_event_call *call = &event_bprint;
3176         struct ring_buffer_event *event;
3177         struct trace_buffer *buffer;
3178         struct trace_array *tr = &global_trace;
3179         struct bprint_entry *entry;
3180         unsigned int trace_ctx;
3181         char *tbuffer;
3182         int len = 0, size;
3183
3184         if (unlikely(tracing_selftest_running || tracing_disabled))
3185                 return 0;
3186
3187         /* Don't pollute graph traces with trace_vprintk internals */
3188         pause_graph_tracing();
3189
3190         trace_ctx = tracing_gen_ctx();
3191         preempt_disable_notrace();
3192
3193         tbuffer = get_trace_buf();
3194         if (!tbuffer) {
3195                 len = 0;
3196                 goto out_nobuffer;
3197         }
3198
3199         len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3200
3201         if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3202                 goto out_put;
3203
3204         size = sizeof(*entry) + sizeof(u32) * len;
3205         buffer = tr->array_buffer.buffer;
3206         ring_buffer_nest_start(buffer);
3207         event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3208                                             trace_ctx);
3209         if (!event)
3210                 goto out;
3211         entry = ring_buffer_event_data(event);
3212         entry->ip                       = ip;
3213         entry->fmt                      = fmt;
3214
3215         memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3216         if (!call_filter_check_discard(call, entry, buffer, event)) {
3217                 __buffer_unlock_commit(buffer, event);
3218                 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3219         }
3220
3221 out:
3222         ring_buffer_nest_end(buffer);
3223 out_put:
3224         put_trace_buf();
3225
3226 out_nobuffer:
3227         preempt_enable_notrace();
3228         unpause_graph_tracing();
3229
3230         return len;
3231 }
3232 EXPORT_SYMBOL_GPL(trace_vbprintk);
3233
3234 __printf(3, 0)
3235 static int
3236 __trace_array_vprintk(struct trace_buffer *buffer,
3237                       unsigned long ip, const char *fmt, va_list args)
3238 {
3239         struct trace_event_call *call = &event_print;
3240         struct ring_buffer_event *event;
3241         int len = 0, size;
3242         struct print_entry *entry;
3243         unsigned int trace_ctx;
3244         char *tbuffer;
3245
3246         if (tracing_disabled)
3247                 return 0;
3248
3249         /* Don't pollute graph traces with trace_vprintk internals */
3250         pause_graph_tracing();
3251
3252         trace_ctx = tracing_gen_ctx();
3253         preempt_disable_notrace();
3254
3255
3256         tbuffer = get_trace_buf();
3257         if (!tbuffer) {
3258                 len = 0;
3259                 goto out_nobuffer;
3260         }
3261
3262         len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3263
3264         size = sizeof(*entry) + len + 1;
3265         ring_buffer_nest_start(buffer);
3266         event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3267                                             trace_ctx);
3268         if (!event)
3269                 goto out;
3270         entry = ring_buffer_event_data(event);
3271         entry->ip = ip;
3272
3273         memcpy(&entry->buf, tbuffer, len + 1);
3274         if (!call_filter_check_discard(call, entry, buffer, event)) {
3275                 __buffer_unlock_commit(buffer, event);
3276                 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3277         }
3278
3279 out:
3280         ring_buffer_nest_end(buffer);
3281         put_trace_buf();
3282
3283 out_nobuffer:
3284         preempt_enable_notrace();
3285         unpause_graph_tracing();
3286
3287         return len;
3288 }
3289
3290 __printf(3, 0)
3291 int trace_array_vprintk(struct trace_array *tr,
3292                         unsigned long ip, const char *fmt, va_list args)
3293 {
3294         if (tracing_selftest_running && tr == &global_trace)
3295                 return 0;
3296
3297         return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3298 }
3299
3300 /**
3301  * trace_array_printk - Print a message to a specific instance
3302  * @tr: The instance trace_array descriptor
3303  * @ip: The instruction pointer that this is called from.
3304  * @fmt: The format to print (printf format)
3305  *
3306  * If a subsystem sets up its own instance, they have the right to
3307  * printk strings into their tracing instance buffer using this
3308  * function. Note, this function will not write into the top level
3309  * buffer (use trace_printk() for that), as writing into the top level
3310  * buffer should only have events that can be individually disabled.
3311  * trace_printk() is only used for debugging a kernel, and should not
3312  * be ever incorporated in normal use.
3313  *
3314  * trace_array_printk() can be used, as it will not add noise to the
3315  * top level tracing buffer.
3316  *
3317  * Note, trace_array_init_printk() must be called on @tr before this
3318  * can be used.
3319  */
3320 __printf(3, 0)
3321 int trace_array_printk(struct trace_array *tr,
3322                        unsigned long ip, const char *fmt, ...)
3323 {
3324         int ret;
3325         va_list ap;
3326
3327         if (!tr)
3328                 return -ENOENT;
3329
3330         /* This is only allowed for created instances */
3331         if (tr == &global_trace)
3332                 return 0;
3333
3334         if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3335                 return 0;
3336
3337         va_start(ap, fmt);
3338         ret = trace_array_vprintk(tr, ip, fmt, ap);
3339         va_end(ap);
3340         return ret;
3341 }
3342 EXPORT_SYMBOL_GPL(trace_array_printk);
3343
3344 /**
3345  * trace_array_init_printk - Initialize buffers for trace_array_printk()
3346  * @tr: The trace array to initialize the buffers for
3347  *
3348  * As trace_array_printk() only writes into instances, they are OK to
3349  * have in the kernel (unlike trace_printk()). This needs to be called
3350  * before trace_array_printk() can be used on a trace_array.
3351  */
3352 int trace_array_init_printk(struct trace_array *tr)
3353 {
3354         if (!tr)
3355                 return -ENOENT;
3356
3357         /* This is only allowed for created instances */
3358         if (tr == &global_trace)
3359                 return -EINVAL;
3360
3361         return alloc_percpu_trace_buffer();
3362 }
3363 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3364
3365 __printf(3, 4)
3366 int trace_array_printk_buf(struct trace_buffer *buffer,
3367                            unsigned long ip, const char *fmt, ...)
3368 {
3369         int ret;
3370         va_list ap;
3371
3372         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3373                 return 0;
3374
3375         va_start(ap, fmt);
3376         ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3377         va_end(ap);
3378         return ret;
3379 }
3380
3381 __printf(2, 0)
3382 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3383 {
3384         return trace_array_vprintk(&global_trace, ip, fmt, args);
3385 }
3386 EXPORT_SYMBOL_GPL(trace_vprintk);
3387
3388 static void trace_iterator_increment(struct trace_iterator *iter)
3389 {
3390         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3391
3392         iter->idx++;
3393         if (buf_iter)
3394                 ring_buffer_iter_advance(buf_iter);
3395 }
3396
3397 static struct trace_entry *
3398 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3399                 unsigned long *lost_events)
3400 {
3401         struct ring_buffer_event *event;
3402         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3403
3404         if (buf_iter) {
3405                 event = ring_buffer_iter_peek(buf_iter, ts);
3406                 if (lost_events)
3407                         *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3408                                 (unsigned long)-1 : 0;
3409         } else {
3410                 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3411                                          lost_events);
3412         }
3413
3414         if (event) {
3415                 iter->ent_size = ring_buffer_event_length(event);
3416                 return ring_buffer_event_data(event);
3417         }
3418         iter->ent_size = 0;
3419         return NULL;
3420 }
3421
3422 static struct trace_entry *
3423 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3424                   unsigned long *missing_events, u64 *ent_ts)
3425 {
3426         struct trace_buffer *buffer = iter->array_buffer->buffer;
3427         struct trace_entry *ent, *next = NULL;
3428         unsigned long lost_events = 0, next_lost = 0;
3429         int cpu_file = iter->cpu_file;
3430         u64 next_ts = 0, ts;
3431         int next_cpu = -1;
3432         int next_size = 0;
3433         int cpu;
3434
3435         /*
3436          * If we are in a per_cpu trace file, don't bother by iterating over
3437          * all cpu and peek directly.
3438          */
3439         if (cpu_file > RING_BUFFER_ALL_CPUS) {
3440                 if (ring_buffer_empty_cpu(buffer, cpu_file))
3441                         return NULL;
3442                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3443                 if (ent_cpu)
3444                         *ent_cpu = cpu_file;
3445
3446                 return ent;
3447         }
3448
3449         for_each_tracing_cpu(cpu) {
3450
3451                 if (ring_buffer_empty_cpu(buffer, cpu))
3452                         continue;
3453
3454                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3455
3456                 /*
3457                  * Pick the entry with the smallest timestamp:
3458                  */
3459                 if (ent && (!next || ts < next_ts)) {
3460                         next = ent;
3461                         next_cpu = cpu;
3462                         next_ts = ts;
3463                         next_lost = lost_events;
3464                         next_size = iter->ent_size;
3465                 }
3466         }
3467
3468         iter->ent_size = next_size;
3469
3470         if (ent_cpu)
3471                 *ent_cpu = next_cpu;
3472
3473         if (ent_ts)
3474                 *ent_ts = next_ts;
3475
3476         if (missing_events)
3477                 *missing_events = next_lost;
3478
3479         return next;
3480 }
3481
3482 #define STATIC_FMT_BUF_SIZE     128
3483 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3484
3485 char *trace_iter_expand_format(struct trace_iterator *iter)
3486 {
3487         char *tmp;
3488
3489         /*
3490          * iter->tr is NULL when used with tp_printk, which makes
3491          * this get called where it is not safe to call krealloc().
3492          */
3493         if (!iter->tr || iter->fmt == static_fmt_buf)
3494                 return NULL;
3495
3496         tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3497                        GFP_KERNEL);
3498         if (tmp) {
3499                 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3500                 iter->fmt = tmp;
3501         }
3502
3503         return tmp;
3504 }
3505
3506 /* Returns true if the string is safe to dereference from an event */
3507 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3508                            bool star, int len)
3509 {
3510         unsigned long addr = (unsigned long)str;
3511         struct trace_event *trace_event;
3512         struct trace_event_call *event;
3513
3514         /* Ignore strings with no length */
3515         if (star && !len)
3516                 return true;
3517
3518         /* OK if part of the event data */
3519         if ((addr >= (unsigned long)iter->ent) &&
3520             (addr < (unsigned long)iter->ent + iter->ent_size))
3521                 return true;
3522
3523         /* OK if part of the temp seq buffer */
3524         if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3525             (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
3526                 return true;
3527
3528         /* Core rodata can not be freed */
3529         if (is_kernel_rodata(addr))
3530                 return true;
3531
3532         if (trace_is_tracepoint_string(str))
3533                 return true;
3534
3535         /*
3536          * Now this could be a module event, referencing core module
3537          * data, which is OK.
3538          */
3539         if (!iter->ent)
3540                 return false;
3541
3542         trace_event = ftrace_find_event(iter->ent->type);
3543         if (!trace_event)
3544                 return false;
3545
3546         event = container_of(trace_event, struct trace_event_call, event);
3547         if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3548                 return false;
3549
3550         /* Would rather have rodata, but this will suffice */
3551         if (within_module_core(addr, event->module))
3552                 return true;
3553
3554         return false;
3555 }
3556
3557 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3558
3559 static int test_can_verify_check(const char *fmt, ...)
3560 {
3561         char buf[16];
3562         va_list ap;
3563         int ret;
3564
3565         /*
3566          * The verifier is dependent on vsnprintf() modifies the va_list
3567          * passed to it, where it is sent as a reference. Some architectures
3568          * (like x86_32) passes it by value, which means that vsnprintf()
3569          * does not modify the va_list passed to it, and the verifier
3570          * would then need to be able to understand all the values that
3571          * vsnprintf can use. If it is passed by value, then the verifier
3572          * is disabled.
3573          */
3574         va_start(ap, fmt);
3575         vsnprintf(buf, 16, "%d", ap);
3576         ret = va_arg(ap, int);
3577         va_end(ap);
3578
3579         return ret;
3580 }
3581
3582 static void test_can_verify(void)
3583 {
3584         if (!test_can_verify_check("%d %d", 0, 1)) {
3585                 pr_info("trace event string verifier disabled\n");
3586                 static_branch_inc(&trace_no_verify);
3587         }
3588 }
3589
3590 /**
3591  * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3592  * @iter: The iterator that holds the seq buffer and the event being printed
3593  * @fmt: The format used to print the event
3594  * @ap: The va_list holding the data to print from @fmt.
3595  *
3596  * This writes the data into the @iter->seq buffer using the data from
3597  * @fmt and @ap. If the format has a %s, then the source of the string
3598  * is examined to make sure it is safe to print, otherwise it will
3599  * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3600  * pointer.
3601  */
3602 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3603                          va_list ap)
3604 {
3605         const char *p = fmt;
3606         const char *str;
3607         int i, j;
3608
3609         if (WARN_ON_ONCE(!fmt))
3610                 return;
3611
3612         if (static_branch_unlikely(&trace_no_verify))
3613                 goto print;
3614
3615         /* Don't bother checking when doing a ftrace_dump() */
3616         if (iter->fmt == static_fmt_buf)
3617                 goto print;
3618
3619         while (*p) {
3620                 bool star = false;
3621                 int len = 0;
3622
3623                 j = 0;
3624
3625                 /* We only care about %s and variants */
3626                 for (i = 0; p[i]; i++) {
3627                         if (i + 1 >= iter->fmt_size) {
3628                                 /*
3629                                  * If we can't expand the copy buffer,
3630                                  * just print it.
3631                                  */
3632                                 if (!trace_iter_expand_format(iter))
3633                                         goto print;
3634                         }
3635
3636                         if (p[i] == '\\' && p[i+1]) {
3637                                 i++;
3638                                 continue;
3639                         }
3640                         if (p[i] == '%') {
3641                                 /* Need to test cases like %08.*s */
3642                                 for (j = 1; p[i+j]; j++) {
3643                                         if (isdigit(p[i+j]) ||
3644                                             p[i+j] == '.')
3645                                                 continue;
3646                                         if (p[i+j] == '*') {
3647                                                 star = true;
3648                                                 continue;
3649                                         }
3650                                         break;
3651                                 }
3652                                 if (p[i+j] == 's')
3653                                         break;
3654                                 star = false;
3655                         }
3656                         j = 0;
3657                 }
3658                 /* If no %s found then just print normally */
3659                 if (!p[i])
3660                         break;
3661
3662                 /* Copy up to the %s, and print that */
3663                 strncpy(iter->fmt, p, i);
3664                 iter->fmt[i] = '\0';
3665                 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3666
3667                 /*
3668                  * If iter->seq is full, the above call no longer guarantees
3669                  * that ap is in sync with fmt processing, and further calls
3670                  * to va_arg() can return wrong positional arguments.
3671                  *
3672                  * Ensure that ap is no longer used in this case.
3673                  */
3674                 if (iter->seq.full) {
3675                         p = "";
3676                         break;
3677                 }
3678
3679                 if (star)
3680                         len = va_arg(ap, int);
3681
3682                 /* The ap now points to the string data of the %s */
3683                 str = va_arg(ap, const char *);
3684
3685                 /*
3686                  * If you hit this warning, it is likely that the
3687                  * trace event in question used %s on a string that
3688                  * was saved at the time of the event, but may not be
3689                  * around when the trace is read. Use __string(),
3690                  * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3691                  * instead. See samples/trace_events/trace-events-sample.h
3692                  * for reference.
3693                  */
3694                 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3695                               "fmt: '%s' current_buffer: '%s'",
3696                               fmt, seq_buf_str(&iter->seq.seq))) {
3697                         int ret;
3698
3699                         /* Try to safely read the string */
3700                         if (star) {
3701                                 if (len + 1 > iter->fmt_size)
3702                                         len = iter->fmt_size - 1;
3703                                 if (len < 0)
3704                                         len = 0;
3705                                 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3706                                 iter->fmt[len] = 0;
3707                                 star = false;
3708                         } else {
3709                                 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3710                                                                   iter->fmt_size);
3711                         }
3712                         if (ret < 0)
3713                                 trace_seq_printf(&iter->seq, "(0x%px)", str);
3714                         else
3715                                 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3716                                                  str, iter->fmt);
3717                         str = "[UNSAFE-MEMORY]";
3718                         strcpy(iter->fmt, "%s");
3719                 } else {
3720                         strncpy(iter->fmt, p + i, j + 1);
3721                         iter->fmt[j+1] = '\0';
3722                 }
3723                 if (star)
3724                         trace_seq_printf(&iter->seq, iter->fmt, len, str);
3725                 else
3726                         trace_seq_printf(&iter->seq, iter->fmt, str);
3727
3728                 p += i + j + 1;
3729         }
3730  print:
3731         if (*p)
3732                 trace_seq_vprintf(&iter->seq, p, ap);
3733 }
3734
3735 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3736 {
3737         const char *p, *new_fmt;
3738         char *q;
3739
3740         if (WARN_ON_ONCE(!fmt))
3741                 return fmt;
3742
3743         if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3744                 return fmt;
3745
3746         p = fmt;
3747         new_fmt = q = iter->fmt;
3748         while (*p) {
3749                 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3750                         if (!trace_iter_expand_format(iter))
3751                                 return fmt;
3752
3753                         q += iter->fmt - new_fmt;
3754                         new_fmt = iter->fmt;
3755                 }
3756
3757                 *q++ = *p++;
3758
3759                 /* Replace %p with %px */
3760                 if (p[-1] == '%') {
3761                         if (p[0] == '%') {
3762                                 *q++ = *p++;
3763                         } else if (p[0] == 'p' && !isalnum(p[1])) {
3764                                 *q++ = *p++;
3765                                 *q++ = 'x';
3766                         }
3767                 }
3768         }
3769         *q = '\0';
3770
3771         return new_fmt;
3772 }
3773
3774 #define STATIC_TEMP_BUF_SIZE    128
3775 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3776
3777 /* Find the next real entry, without updating the iterator itself */
3778 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3779                                           int *ent_cpu, u64 *ent_ts)
3780 {
3781         /* __find_next_entry will reset ent_size */
3782         int ent_size = iter->ent_size;
3783         struct trace_entry *entry;
3784
3785         /*
3786          * If called from ftrace_dump(), then the iter->temp buffer
3787          * will be the static_temp_buf and not created from kmalloc.
3788          * If the entry size is greater than the buffer, we can
3789          * not save it. Just return NULL in that case. This is only
3790          * used to add markers when two consecutive events' time
3791          * stamps have a large delta. See trace_print_lat_context()
3792          */
3793         if (iter->temp == static_temp_buf &&
3794             STATIC_TEMP_BUF_SIZE < ent_size)
3795                 return NULL;
3796
3797         /*
3798          * The __find_next_entry() may call peek_next_entry(), which may
3799          * call ring_buffer_peek() that may make the contents of iter->ent
3800          * undefined. Need to copy iter->ent now.
3801          */
3802         if (iter->ent && iter->ent != iter->temp) {
3803                 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3804                     !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3805                         void *temp;
3806                         temp = kmalloc(iter->ent_size, GFP_KERNEL);
3807                         if (!temp)
3808                                 return NULL;
3809                         kfree(iter->temp);
3810                         iter->temp = temp;
3811                         iter->temp_size = iter->ent_size;
3812                 }
3813                 memcpy(iter->temp, iter->ent, iter->ent_size);
3814                 iter->ent = iter->temp;
3815         }
3816         entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3817         /* Put back the original ent_size */
3818         iter->ent_size = ent_size;
3819
3820         return entry;
3821 }
3822
3823 /* Find the next real entry, and increment the iterator to the next entry */
3824 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3825 {
3826         iter->ent = __find_next_entry(iter, &iter->cpu,
3827                                       &iter->lost_events, &iter->ts);
3828
3829         if (iter->ent)
3830                 trace_iterator_increment(iter);
3831
3832         return iter->ent ? iter : NULL;
3833 }
3834
3835 static void trace_consume(struct trace_iterator *iter)
3836 {
3837         ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3838                             &iter->lost_events);
3839 }
3840
3841 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3842 {
3843         struct trace_iterator *iter = m->private;
3844         int i = (int)*pos;
3845         void *ent;
3846
3847         WARN_ON_ONCE(iter->leftover);
3848
3849         (*pos)++;
3850
3851         /* can't go backwards */
3852         if (iter->idx > i)
3853                 return NULL;
3854
3855         if (iter->idx < 0)
3856                 ent = trace_find_next_entry_inc(iter);
3857         else
3858                 ent = iter;
3859
3860         while (ent && iter->idx < i)
3861                 ent = trace_find_next_entry_inc(iter);
3862
3863         iter->pos = *pos;
3864
3865         return ent;
3866 }
3867
3868 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3869 {
3870         struct ring_buffer_iter *buf_iter;
3871         unsigned long entries = 0;
3872         u64 ts;
3873
3874         per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3875
3876         buf_iter = trace_buffer_iter(iter, cpu);
3877         if (!buf_iter)
3878                 return;
3879
3880         ring_buffer_iter_reset(buf_iter);
3881
3882         /*
3883          * We could have the case with the max latency tracers
3884          * that a reset never took place on a cpu. This is evident
3885          * by the timestamp being before the start of the buffer.
3886          */
3887         while (ring_buffer_iter_peek(buf_iter, &ts)) {
3888                 if (ts >= iter->array_buffer->time_start)
3889                         break;
3890                 entries++;
3891                 ring_buffer_iter_advance(buf_iter);
3892         }
3893
3894         per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3895 }
3896
3897 /*
3898  * The current tracer is copied to avoid a global locking
3899  * all around.
3900  */
3901 static void *s_start(struct seq_file *m, loff_t *pos)
3902 {
3903         struct trace_iterator *iter = m->private;
3904         struct trace_array *tr = iter->tr;
3905         int cpu_file = iter->cpu_file;
3906         void *p = NULL;
3907         loff_t l = 0;
3908         int cpu;
3909
3910         mutex_lock(&trace_types_lock);
3911         if (unlikely(tr->current_trace != iter->trace)) {
3912                 /* Close iter->trace before switching to the new current tracer */
3913                 if (iter->trace->close)
3914                         iter->trace->close(iter);
3915                 iter->trace = tr->current_trace;
3916                 /* Reopen the new current tracer */
3917                 if (iter->trace->open)
3918                         iter->trace->open(iter);
3919         }
3920         mutex_unlock(&trace_types_lock);
3921
3922 #ifdef CONFIG_TRACER_MAX_TRACE
3923         if (iter->snapshot && iter->trace->use_max_tr)
3924                 return ERR_PTR(-EBUSY);
3925 #endif
3926
3927         if (*pos != iter->pos) {
3928                 iter->ent = NULL;
3929                 iter->cpu = 0;
3930                 iter->idx = -1;
3931
3932                 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3933                         for_each_tracing_cpu(cpu)
3934                                 tracing_iter_reset(iter, cpu);
3935                 } else
3936                         tracing_iter_reset(iter, cpu_file);
3937
3938                 iter->leftover = 0;
3939                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3940                         ;
3941
3942         } else {
3943                 /*
3944                  * If we overflowed the seq_file before, then we want
3945                  * to just reuse the trace_seq buffer again.
3946                  */
3947                 if (iter->leftover)
3948                         p = iter;
3949                 else {
3950                         l = *pos - 1;
3951                         p = s_next(m, p, &l);
3952                 }
3953         }
3954
3955         trace_event_read_lock();
3956         trace_access_lock(cpu_file);
3957         return p;
3958 }
3959
3960 static void s_stop(struct seq_file *m, void *p)
3961 {
3962         struct trace_iterator *iter = m->private;
3963
3964 #ifdef CONFIG_TRACER_MAX_TRACE
3965         if (iter->snapshot && iter->trace->use_max_tr)
3966                 return;
3967 #endif
3968
3969         trace_access_unlock(iter->cpu_file);
3970         trace_event_read_unlock();
3971 }
3972
3973 static void
3974 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3975                       unsigned long *entries, int cpu)
3976 {
3977         unsigned long count;
3978
3979         count = ring_buffer_entries_cpu(buf->buffer, cpu);
3980         /*
3981          * If this buffer has skipped entries, then we hold all
3982          * entries for the trace and we need to ignore the
3983          * ones before the time stamp.
3984          */
3985         if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3986                 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3987                 /* total is the same as the entries */
3988                 *total = count;
3989         } else
3990                 *total = count +
3991                         ring_buffer_overrun_cpu(buf->buffer, cpu);
3992         *entries = count;
3993 }
3994
3995 static void
3996 get_total_entries(struct array_buffer *buf,
3997                   unsigned long *total, unsigned long *entries)
3998 {
3999         unsigned long t, e;
4000         int cpu;
4001
4002         *total = 0;
4003         *entries = 0;
4004
4005         for_each_tracing_cpu(cpu) {
4006                 get_total_entries_cpu(buf, &t, &e, cpu);
4007                 *total += t;
4008                 *entries += e;
4009         }
4010 }
4011
4012 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4013 {
4014         unsigned long total, entries;
4015
4016         if (!tr)
4017                 tr = &global_trace;
4018
4019         get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4020
4021         return entries;
4022 }
4023
4024 unsigned long trace_total_entries(struct trace_array *tr)
4025 {
4026         unsigned long total, entries;
4027
4028         if (!tr)
4029                 tr = &global_trace;
4030
4031         get_total_entries(&tr->array_buffer, &total, &entries);
4032
4033         return entries;
4034 }
4035
4036 static void print_lat_help_header(struct seq_file *m)
4037 {
4038         seq_puts(m, "#                    _------=> CPU#            \n"
4039                     "#                   / _-----=> irqs-off/BH-disabled\n"
4040                     "#                  | / _----=> need-resched    \n"
4041                     "#                  || / _---=> hardirq/softirq \n"
4042                     "#                  ||| / _--=> preempt-depth   \n"
4043                     "#                  |||| / _-=> migrate-disable \n"
4044                     "#                  ||||| /     delay           \n"
4045                     "#  cmd     pid     |||||| time  |   caller     \n"
4046                     "#     \\   /        ||||||  \\    |    /       \n");
4047 }
4048
4049 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4050 {
4051         unsigned long total;
4052         unsigned long entries;
4053
4054         get_total_entries(buf, &total, &entries);
4055         seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
4056                    entries, total, num_online_cpus());
4057         seq_puts(m, "#\n");
4058 }
4059
4060 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4061                                    unsigned int flags)
4062 {
4063         bool tgid = flags & TRACE_ITER_RECORD_TGID;
4064
4065         print_event_info(buf, m);
4066
4067         seq_printf(m, "#           TASK-PID    %s CPU#     TIMESTAMP  FUNCTION\n", tgid ? "   TGID   " : "");
4068         seq_printf(m, "#              | |      %s   |         |         |\n",      tgid ? "     |    " : "");
4069 }
4070
4071 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4072                                        unsigned int flags)
4073 {
4074         bool tgid = flags & TRACE_ITER_RECORD_TGID;
4075         static const char space[] = "            ";
4076         int prec = tgid ? 12 : 2;
4077
4078         print_event_info(buf, m);
4079
4080         seq_printf(m, "#                            %.*s  _-----=> irqs-off/BH-disabled\n", prec, space);
4081         seq_printf(m, "#                            %.*s / _----=> need-resched\n", prec, space);
4082         seq_printf(m, "#                            %.*s| / _---=> hardirq/softirq\n", prec, space);
4083         seq_printf(m, "#                            %.*s|| / _--=> preempt-depth\n", prec, space);
4084         seq_printf(m, "#                            %.*s||| / _-=> migrate-disable\n", prec, space);
4085         seq_printf(m, "#                            %.*s|||| /     delay\n", prec, space);
4086         seq_printf(m, "#           TASK-PID  %.*s CPU#  |||||  TIMESTAMP  FUNCTION\n", prec, "     TGID   ");
4087         seq_printf(m, "#              | |    %.*s   |   |||||     |         |\n", prec, "       |    ");
4088 }
4089
4090 void
4091 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4092 {
4093         unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4094         struct array_buffer *buf = iter->array_buffer;
4095         struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4096         struct tracer *type = iter->trace;
4097         unsigned long entries;
4098         unsigned long total;
4099         const char *name = type->name;
4100
4101         get_total_entries(buf, &total, &entries);
4102
4103         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4104                    name, UTS_RELEASE);
4105         seq_puts(m, "# -----------------------------------"
4106                  "---------------------------------\n");
4107         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4108                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4109                    nsecs_to_usecs(data->saved_latency),
4110                    entries,
4111                    total,
4112                    buf->cpu,
4113                    preempt_model_none()      ? "server" :
4114                    preempt_model_voluntary() ? "desktop" :
4115                    preempt_model_full()      ? "preempt" :
4116                    preempt_model_rt()        ? "preempt_rt" :
4117                    "unknown",
4118                    /* These are reserved for later use */
4119                    0, 0, 0, 0);
4120 #ifdef CONFIG_SMP
4121         seq_printf(m, " #P:%d)\n", num_online_cpus());
4122 #else
4123         seq_puts(m, ")\n");
4124 #endif
4125         seq_puts(m, "#    -----------------\n");
4126         seq_printf(m, "#    | task: %.16s-%d "
4127                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4128                    data->comm, data->pid,
4129                    from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4130                    data->policy, data->rt_priority);
4131         seq_puts(m, "#    -----------------\n");
4132
4133         if (data->critical_start) {
4134                 seq_puts(m, "#  => started at: ");
4135                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4136                 trace_print_seq(m, &iter->seq);
4137                 seq_puts(m, "\n#  => ended at:   ");
4138                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4139                 trace_print_seq(m, &iter->seq);
4140                 seq_puts(m, "\n#\n");
4141         }
4142
4143         seq_puts(m, "#\n");
4144 }
4145
4146 static void test_cpu_buff_start(struct trace_iterator *iter)
4147 {
4148         struct trace_seq *s = &iter->seq;
4149         struct trace_array *tr = iter->tr;
4150
4151         if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4152                 return;
4153
4154         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4155                 return;
4156
4157         if (cpumask_available(iter->started) &&
4158             cpumask_test_cpu(iter->cpu, iter->started))
4159                 return;
4160
4161         if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4162                 return;
4163
4164         if (cpumask_available(iter->started))
4165                 cpumask_set_cpu(iter->cpu, iter->started);
4166
4167         /* Don't print started cpu buffer for the first entry of the trace */
4168         if (iter->idx > 1)
4169                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4170                                 iter->cpu);
4171 }
4172
4173 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4174 {
4175         struct trace_array *tr = iter->tr;
4176         struct trace_seq *s = &iter->seq;
4177         unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4178         struct trace_entry *entry;
4179         struct trace_event *event;
4180
4181         entry = iter->ent;
4182
4183         test_cpu_buff_start(iter);
4184
4185         event = ftrace_find_event(entry->type);
4186
4187         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4188                 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4189                         trace_print_lat_context(iter);
4190                 else
4191                         trace_print_context(iter);
4192         }
4193
4194         if (trace_seq_has_overflowed(s))
4195                 return TRACE_TYPE_PARTIAL_LINE;
4196
4197         if (event) {
4198                 if (tr->trace_flags & TRACE_ITER_FIELDS)
4199                         return print_event_fields(iter, event);
4200                 return event->funcs->trace(iter, sym_flags, event);
4201         }
4202
4203         trace_seq_printf(s, "Unknown type %d\n", entry->type);
4204
4205         return trace_handle_return(s);
4206 }
4207
4208 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4209 {
4210         struct trace_array *tr = iter->tr;
4211         struct trace_seq *s = &iter->seq;
4212         struct trace_entry *entry;
4213         struct trace_event *event;
4214
4215         entry = iter->ent;
4216
4217         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4218                 trace_seq_printf(s, "%d %d %llu ",
4219                                  entry->pid, iter->cpu, iter->ts);
4220
4221         if (trace_seq_has_overflowed(s))
4222                 return TRACE_TYPE_PARTIAL_LINE;
4223
4224         event = ftrace_find_event(entry->type);
4225         if (event)
4226                 return event->funcs->raw(iter, 0, event);
4227
4228         trace_seq_printf(s, "%d ?\n", entry->type);
4229
4230         return trace_handle_return(s);
4231 }
4232
4233 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4234 {
4235         struct trace_array *tr = iter->tr;
4236         struct trace_seq *s = &iter->seq;
4237         unsigned char newline = '\n';
4238         struct trace_entry *entry;
4239         struct trace_event *event;
4240
4241         entry = iter->ent;
4242
4243         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4244                 SEQ_PUT_HEX_FIELD(s, entry->pid);
4245                 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4246                 SEQ_PUT_HEX_FIELD(s, iter->ts);
4247                 if (trace_seq_has_overflowed(s))
4248                         return TRACE_TYPE_PARTIAL_LINE;
4249         }
4250
4251         event = ftrace_find_event(entry->type);
4252         if (event) {
4253                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4254                 if (ret != TRACE_TYPE_HANDLED)
4255                         return ret;
4256         }
4257
4258         SEQ_PUT_FIELD(s, newline);
4259
4260         return trace_handle_return(s);
4261 }
4262
4263 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4264 {
4265         struct trace_array *tr = iter->tr;
4266         struct trace_seq *s = &iter->seq;
4267         struct trace_entry *entry;
4268         struct trace_event *event;
4269
4270         entry = iter->ent;
4271
4272         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4273                 SEQ_PUT_FIELD(s, entry->pid);
4274                 SEQ_PUT_FIELD(s, iter->cpu);
4275                 SEQ_PUT_FIELD(s, iter->ts);
4276                 if (trace_seq_has_overflowed(s))
4277                         return TRACE_TYPE_PARTIAL_LINE;
4278         }
4279
4280         event = ftrace_find_event(entry->type);
4281         return event ? event->funcs->binary(iter, 0, event) :
4282                 TRACE_TYPE_HANDLED;
4283 }
4284
4285 int trace_empty(struct trace_iterator *iter)
4286 {
4287         struct ring_buffer_iter *buf_iter;
4288         int cpu;
4289
4290         /* If we are looking at one CPU buffer, only check that one */
4291         if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4292                 cpu = iter->cpu_file;
4293                 buf_iter = trace_buffer_iter(iter, cpu);
4294                 if (buf_iter) {
4295                         if (!ring_buffer_iter_empty(buf_iter))
4296                                 return 0;
4297                 } else {
4298                         if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4299                                 return 0;
4300                 }
4301                 return 1;
4302         }
4303
4304         for_each_tracing_cpu(cpu) {
4305                 buf_iter = trace_buffer_iter(iter, cpu);
4306                 if (buf_iter) {
4307                         if (!ring_buffer_iter_empty(buf_iter))
4308                                 return 0;
4309                 } else {
4310                         if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4311                                 return 0;
4312                 }
4313         }
4314
4315         return 1;
4316 }
4317
4318 /*  Called with trace_event_read_lock() held. */
4319 enum print_line_t print_trace_line(struct trace_iterator *iter)
4320 {
4321         struct trace_array *tr = iter->tr;
4322         unsigned long trace_flags = tr->trace_flags;
4323         enum print_line_t ret;
4324
4325         if (iter->lost_events) {
4326                 if (iter->lost_events == (unsigned long)-1)
4327                         trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4328                                          iter->cpu);
4329                 else
4330                         trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4331                                          iter->cpu, iter->lost_events);
4332                 if (trace_seq_has_overflowed(&iter->seq))
4333                         return TRACE_TYPE_PARTIAL_LINE;
4334         }
4335
4336         if (iter->trace && iter->trace->print_line) {
4337                 ret = iter->trace->print_line(iter);
4338                 if (ret != TRACE_TYPE_UNHANDLED)
4339                         return ret;
4340         }
4341
4342         if (iter->ent->type == TRACE_BPUTS &&
4343                         trace_flags & TRACE_ITER_PRINTK &&
4344                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4345                 return trace_print_bputs_msg_only(iter);
4346
4347         if (iter->ent->type == TRACE_BPRINT &&
4348                         trace_flags & TRACE_ITER_PRINTK &&
4349                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4350                 return trace_print_bprintk_msg_only(iter);
4351
4352         if (iter->ent->type == TRACE_PRINT &&
4353                         trace_flags & TRACE_ITER_PRINTK &&
4354                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4355                 return trace_print_printk_msg_only(iter);
4356
4357         if (trace_flags & TRACE_ITER_BIN)
4358                 return print_bin_fmt(iter);
4359
4360         if (trace_flags & TRACE_ITER_HEX)
4361                 return print_hex_fmt(iter);
4362
4363         if (trace_flags & TRACE_ITER_RAW)
4364                 return print_raw_fmt(iter);
4365
4366         return print_trace_fmt(iter);
4367 }
4368
4369 void trace_latency_header(struct seq_file *m)
4370 {
4371         struct trace_iterator *iter = m->private;
4372         struct trace_array *tr = iter->tr;
4373
4374         /* print nothing if the buffers are empty */
4375         if (trace_empty(iter))
4376                 return;
4377
4378         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4379                 print_trace_header(m, iter);
4380
4381         if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4382                 print_lat_help_header(m);
4383 }
4384
4385 void trace_default_header(struct seq_file *m)
4386 {
4387         struct trace_iterator *iter = m->private;
4388         struct trace_array *tr = iter->tr;
4389         unsigned long trace_flags = tr->trace_flags;
4390
4391         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4392                 return;
4393
4394         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4395                 /* print nothing if the buffers are empty */
4396                 if (trace_empty(iter))
4397                         return;
4398                 print_trace_header(m, iter);
4399                 if (!(trace_flags & TRACE_ITER_VERBOSE))
4400                         print_lat_help_header(m);
4401         } else {
4402                 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4403                         if (trace_flags & TRACE_ITER_IRQ_INFO)
4404                                 print_func_help_header_irq(iter->array_buffer,
4405                                                            m, trace_flags);
4406                         else
4407                                 print_func_help_header(iter->array_buffer, m,
4408                                                        trace_flags);
4409                 }
4410         }
4411 }
4412
4413 static void test_ftrace_alive(struct seq_file *m)
4414 {
4415         if (!ftrace_is_dead())
4416                 return;
4417         seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4418                     "#          MAY BE MISSING FUNCTION EVENTS\n");
4419 }
4420
4421 #ifdef CONFIG_TRACER_MAX_TRACE
4422 static void show_snapshot_main_help(struct seq_file *m)
4423 {
4424         seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4425                     "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4426                     "#                      Takes a snapshot of the main buffer.\n"
4427                     "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4428                     "#                      (Doesn't have to be '2' works with any number that\n"
4429                     "#                       is not a '0' or '1')\n");
4430 }
4431
4432 static void show_snapshot_percpu_help(struct seq_file *m)
4433 {
4434         seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4435 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4436         seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4437                     "#                      Takes a snapshot of the main buffer for this cpu.\n");
4438 #else
4439         seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4440                     "#                     Must use main snapshot file to allocate.\n");
4441 #endif
4442         seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4443                     "#                      (Doesn't have to be '2' works with any number that\n"
4444                     "#                       is not a '0' or '1')\n");
4445 }
4446
4447 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4448 {
4449         if (iter->tr->allocated_snapshot)
4450                 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4451         else
4452                 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4453
4454         seq_puts(m, "# Snapshot commands:\n");
4455         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4456                 show_snapshot_main_help(m);
4457         else
4458                 show_snapshot_percpu_help(m);
4459 }
4460 #else
4461 /* Should never be called */
4462 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4463 #endif
4464
4465 static int s_show(struct seq_file *m, void *v)
4466 {
4467         struct trace_iterator *iter = v;
4468         int ret;
4469
4470         if (iter->ent == NULL) {
4471                 if (iter->tr) {
4472                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
4473                         seq_puts(m, "#\n");
4474                         test_ftrace_alive(m);
4475                 }
4476                 if (iter->snapshot && trace_empty(iter))
4477                         print_snapshot_help(m, iter);
4478                 else if (iter->trace && iter->trace->print_header)
4479                         iter->trace->print_header(m);
4480                 else
4481                         trace_default_header(m);
4482
4483         } else if (iter->leftover) {
4484                 /*
4485                  * If we filled the seq_file buffer earlier, we
4486                  * want to just show it now.
4487                  */
4488                 ret = trace_print_seq(m, &iter->seq);
4489
4490                 /* ret should this time be zero, but you never know */
4491                 iter->leftover = ret;
4492
4493         } else {
4494                 ret = print_trace_line(iter);
4495                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4496                         iter->seq.full = 0;
4497                         trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
4498                 }
4499                 ret = trace_print_seq(m, &iter->seq);
4500                 /*
4501                  * If we overflow the seq_file buffer, then it will
4502                  * ask us for this data again at start up.
4503                  * Use that instead.
4504                  *  ret is 0 if seq_file write succeeded.
4505                  *        -1 otherwise.
4506                  */
4507                 iter->leftover = ret;
4508         }
4509
4510         return 0;
4511 }
4512
4513 /*
4514  * Should be used after trace_array_get(), trace_types_lock
4515  * ensures that i_cdev was already initialized.
4516  */
4517 static inline int tracing_get_cpu(struct inode *inode)
4518 {
4519         if (inode->i_cdev) /* See trace_create_cpu_file() */
4520                 return (long)inode->i_cdev - 1;
4521         return RING_BUFFER_ALL_CPUS;
4522 }
4523
4524 static const struct seq_operations tracer_seq_ops = {
4525         .start          = s_start,
4526         .next           = s_next,
4527         .stop           = s_stop,
4528         .show           = s_show,
4529 };
4530
4531 /*
4532  * Note, as iter itself can be allocated and freed in different
4533  * ways, this function is only used to free its content, and not
4534  * the iterator itself. The only requirement to all the allocations
4535  * is that it must zero all fields (kzalloc), as freeing works with
4536  * ethier allocated content or NULL.
4537  */
4538 static void free_trace_iter_content(struct trace_iterator *iter)
4539 {
4540         /* The fmt is either NULL, allocated or points to static_fmt_buf */
4541         if (iter->fmt != static_fmt_buf)
4542                 kfree(iter->fmt);
4543
4544         kfree(iter->temp);
4545         kfree(iter->buffer_iter);
4546         mutex_destroy(&iter->mutex);
4547         free_cpumask_var(iter->started);
4548 }
4549
4550 static struct trace_iterator *
4551 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4552 {
4553         struct trace_array *tr = inode->i_private;
4554         struct trace_iterator *iter;
4555         int cpu;
4556
4557         if (tracing_disabled)
4558                 return ERR_PTR(-ENODEV);
4559
4560         iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4561         if (!iter)
4562                 return ERR_PTR(-ENOMEM);
4563
4564         iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4565                                     GFP_KERNEL);
4566         if (!iter->buffer_iter)
4567                 goto release;
4568
4569         /*
4570          * trace_find_next_entry() may need to save off iter->ent.
4571          * It will place it into the iter->temp buffer. As most
4572          * events are less than 128, allocate a buffer of that size.
4573          * If one is greater, then trace_find_next_entry() will
4574          * allocate a new buffer to adjust for the bigger iter->ent.
4575          * It's not critical if it fails to get allocated here.
4576          */
4577         iter->temp = kmalloc(128, GFP_KERNEL);
4578         if (iter->temp)
4579                 iter->temp_size = 128;
4580
4581         /*
4582          * trace_event_printf() may need to modify given format
4583          * string to replace %p with %px so that it shows real address
4584          * instead of hash value. However, that is only for the event
4585          * tracing, other tracer may not need. Defer the allocation
4586          * until it is needed.
4587          */
4588         iter->fmt = NULL;
4589         iter->fmt_size = 0;
4590
4591         mutex_lock(&trace_types_lock);
4592         iter->trace = tr->current_trace;
4593
4594         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4595                 goto fail;
4596
4597         iter->tr = tr;
4598
4599 #ifdef CONFIG_TRACER_MAX_TRACE
4600         /* Currently only the top directory has a snapshot */
4601         if (tr->current_trace->print_max || snapshot)
4602                 iter->array_buffer = &tr->max_buffer;
4603         else
4604 #endif
4605                 iter->array_buffer = &tr->array_buffer;
4606         iter->snapshot = snapshot;
4607         iter->pos = -1;
4608         iter->cpu_file = tracing_get_cpu(inode);
4609         mutex_init(&iter->mutex);
4610
4611         /* Notify the tracer early; before we stop tracing. */
4612         if (iter->trace->open)
4613                 iter->trace->open(iter);
4614
4615         /* Annotate start of buffers if we had overruns */
4616         if (ring_buffer_overruns(iter->array_buffer->buffer))
4617                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4618
4619         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4620         if (trace_clocks[tr->clock_id].in_ns)
4621                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4622
4623         /*
4624          * If pause-on-trace is enabled, then stop the trace while
4625          * dumping, unless this is the "snapshot" file
4626          */
4627         if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4628                 tracing_stop_tr(tr);
4629
4630         if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4631                 for_each_tracing_cpu(cpu) {
4632                         iter->buffer_iter[cpu] =
4633                                 ring_buffer_read_prepare(iter->array_buffer->buffer,
4634                                                          cpu, GFP_KERNEL);
4635                 }
4636                 ring_buffer_read_prepare_sync();
4637                 for_each_tracing_cpu(cpu) {
4638                         ring_buffer_read_start(iter->buffer_iter[cpu]);
4639                         tracing_iter_reset(iter, cpu);
4640                 }
4641         } else {
4642                 cpu = iter->cpu_file;
4643                 iter->buffer_iter[cpu] =
4644                         ring_buffer_read_prepare(iter->array_buffer->buffer,
4645                                                  cpu, GFP_KERNEL);
4646                 ring_buffer_read_prepare_sync();
4647                 ring_buffer_read_start(iter->buffer_iter[cpu]);
4648                 tracing_iter_reset(iter, cpu);
4649         }
4650
4651         mutex_unlock(&trace_types_lock);
4652
4653         return iter;
4654
4655  fail:
4656         mutex_unlock(&trace_types_lock);
4657         free_trace_iter_content(iter);
4658 release:
4659         seq_release_private(inode, file);
4660         return ERR_PTR(-ENOMEM);
4661 }
4662
4663 int tracing_open_generic(struct inode *inode, struct file *filp)
4664 {
4665         int ret;
4666
4667         ret = tracing_check_open_get_tr(NULL);
4668         if (ret)
4669                 return ret;
4670
4671         filp->private_data = inode->i_private;
4672         return 0;
4673 }
4674
4675 bool tracing_is_disabled(void)
4676 {
4677         return (tracing_disabled) ? true: false;
4678 }
4679
4680 /*
4681  * Open and update trace_array ref count.
4682  * Must have the current trace_array passed to it.
4683  */
4684 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4685 {
4686         struct trace_array *tr = inode->i_private;
4687         int ret;
4688
4689         ret = tracing_check_open_get_tr(tr);
4690         if (ret)
4691                 return ret;
4692
4693         filp->private_data = inode->i_private;
4694
4695         return 0;
4696 }
4697
4698 /*
4699  * The private pointer of the inode is the trace_event_file.
4700  * Update the tr ref count associated to it.
4701  */
4702 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4703 {
4704         struct trace_event_file *file = inode->i_private;
4705         int ret;
4706
4707         ret = tracing_check_open_get_tr(file->tr);
4708         if (ret)
4709                 return ret;
4710
4711         mutex_lock(&event_mutex);
4712
4713         /* Fail if the file is marked for removal */
4714         if (file->flags & EVENT_FILE_FL_FREED) {
4715                 trace_array_put(file->tr);
4716                 ret = -ENODEV;
4717         } else {
4718                 event_file_get(file);
4719         }
4720
4721         mutex_unlock(&event_mutex);
4722         if (ret)
4723                 return ret;
4724
4725         filp->private_data = inode->i_private;
4726
4727         return 0;
4728 }
4729
4730 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4731 {
4732         struct trace_event_file *file = inode->i_private;
4733
4734         trace_array_put(file->tr);
4735         event_file_put(file);
4736
4737         return 0;
4738 }
4739
4740 int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
4741 {
4742         tracing_release_file_tr(inode, filp);
4743         return single_release(inode, filp);
4744 }
4745
4746 static int tracing_mark_open(struct inode *inode, struct file *filp)
4747 {
4748         stream_open(inode, filp);
4749         return tracing_open_generic_tr(inode, filp);
4750 }
4751
4752 static int tracing_release(struct inode *inode, struct file *file)
4753 {
4754         struct trace_array *tr = inode->i_private;
4755         struct seq_file *m = file->private_data;
4756         struct trace_iterator *iter;
4757         int cpu;
4758
4759         if (!(file->f_mode & FMODE_READ)) {
4760                 trace_array_put(tr);
4761                 return 0;
4762         }
4763
4764         /* Writes do not use seq_file */
4765         iter = m->private;
4766         mutex_lock(&trace_types_lock);
4767
4768         for_each_tracing_cpu(cpu) {
4769                 if (iter->buffer_iter[cpu])
4770                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
4771         }
4772
4773         if (iter->trace && iter->trace->close)
4774                 iter->trace->close(iter);
4775
4776         if (!iter->snapshot && tr->stop_count)
4777                 /* reenable tracing if it was previously enabled */
4778                 tracing_start_tr(tr);
4779
4780         __trace_array_put(tr);
4781
4782         mutex_unlock(&trace_types_lock);
4783
4784         free_trace_iter_content(iter);
4785         seq_release_private(inode, file);
4786
4787         return 0;
4788 }
4789
4790 int tracing_release_generic_tr(struct inode *inode, struct file *file)
4791 {
4792         struct trace_array *tr = inode->i_private;
4793
4794         trace_array_put(tr);
4795         return 0;
4796 }
4797
4798 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4799 {
4800         struct trace_array *tr = inode->i_private;
4801
4802         trace_array_put(tr);
4803
4804         return single_release(inode, file);
4805 }
4806
4807 static int tracing_open(struct inode *inode, struct file *file)
4808 {
4809         struct trace_array *tr = inode->i_private;
4810         struct trace_iterator *iter;
4811         int ret;
4812
4813         ret = tracing_check_open_get_tr(tr);
4814         if (ret)
4815                 return ret;
4816
4817         /* If this file was open for write, then erase contents */
4818         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4819                 int cpu = tracing_get_cpu(inode);
4820                 struct array_buffer *trace_buf = &tr->array_buffer;
4821
4822 #ifdef CONFIG_TRACER_MAX_TRACE
4823                 if (tr->current_trace->print_max)
4824                         trace_buf = &tr->max_buffer;
4825 #endif
4826
4827                 if (cpu == RING_BUFFER_ALL_CPUS)
4828                         tracing_reset_online_cpus(trace_buf);
4829                 else
4830                         tracing_reset_cpu(trace_buf, cpu);
4831         }
4832
4833         if (file->f_mode & FMODE_READ) {
4834                 iter = __tracing_open(inode, file, false);
4835                 if (IS_ERR(iter))
4836                         ret = PTR_ERR(iter);
4837                 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4838                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
4839         }
4840
4841         if (ret < 0)
4842                 trace_array_put(tr);
4843
4844         return ret;
4845 }
4846
4847 /*
4848  * Some tracers are not suitable for instance buffers.
4849  * A tracer is always available for the global array (toplevel)
4850  * or if it explicitly states that it is.
4851  */
4852 static bool
4853 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4854 {
4855         return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4856 }
4857
4858 /* Find the next tracer that this trace array may use */
4859 static struct tracer *
4860 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4861 {
4862         while (t && !trace_ok_for_array(t, tr))
4863                 t = t->next;
4864
4865         return t;
4866 }
4867
4868 static void *
4869 t_next(struct seq_file *m, void *v, loff_t *pos)
4870 {
4871         struct trace_array *tr = m->private;
4872         struct tracer *t = v;
4873
4874         (*pos)++;
4875
4876         if (t)
4877                 t = get_tracer_for_array(tr, t->next);
4878
4879         return t;
4880 }
4881
4882 static void *t_start(struct seq_file *m, loff_t *pos)
4883 {
4884         struct trace_array *tr = m->private;
4885         struct tracer *t;
4886         loff_t l = 0;
4887
4888         mutex_lock(&trace_types_lock);
4889
4890         t = get_tracer_for_array(tr, trace_types);
4891         for (; t && l < *pos; t = t_next(m, t, &l))
4892                         ;
4893
4894         return t;
4895 }
4896
4897 static void t_stop(struct seq_file *m, void *p)
4898 {
4899         mutex_unlock(&trace_types_lock);
4900 }
4901
4902 static int t_show(struct seq_file *m, void *v)
4903 {
4904         struct tracer *t = v;
4905
4906         if (!t)
4907                 return 0;
4908
4909         seq_puts(m, t->name);
4910         if (t->next)
4911                 seq_putc(m, ' ');
4912         else
4913                 seq_putc(m, '\n');
4914
4915         return 0;
4916 }
4917
4918 static const struct seq_operations show_traces_seq_ops = {
4919         .start          = t_start,
4920         .next           = t_next,
4921         .stop           = t_stop,
4922         .show           = t_show,
4923 };
4924
4925 static int show_traces_open(struct inode *inode, struct file *file)
4926 {
4927         struct trace_array *tr = inode->i_private;
4928         struct seq_file *m;
4929         int ret;
4930
4931         ret = tracing_check_open_get_tr(tr);
4932         if (ret)
4933                 return ret;
4934
4935         ret = seq_open(file, &show_traces_seq_ops);
4936         if (ret) {
4937                 trace_array_put(tr);
4938                 return ret;
4939         }
4940
4941         m = file->private_data;
4942         m->private = tr;
4943
4944         return 0;
4945 }
4946
4947 static int show_traces_release(struct inode *inode, struct file *file)
4948 {
4949         struct trace_array *tr = inode->i_private;
4950
4951         trace_array_put(tr);
4952         return seq_release(inode, file);
4953 }
4954
4955 static ssize_t
4956 tracing_write_stub(struct file *filp, const char __user *ubuf,
4957                    size_t count, loff_t *ppos)
4958 {
4959         return count;
4960 }
4961
4962 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4963 {
4964         int ret;
4965
4966         if (file->f_mode & FMODE_READ)
4967                 ret = seq_lseek(file, offset, whence);
4968         else
4969                 file->f_pos = ret = 0;
4970
4971         return ret;
4972 }
4973
4974 static const struct file_operations tracing_fops = {
4975         .open           = tracing_open,
4976         .read           = seq_read,
4977         .read_iter      = seq_read_iter,
4978         .splice_read    = copy_splice_read,
4979         .write          = tracing_write_stub,
4980         .llseek         = tracing_lseek,
4981         .release        = tracing_release,
4982 };
4983
4984 static const struct file_operations show_traces_fops = {
4985         .open           = show_traces_open,
4986         .read           = seq_read,
4987         .llseek         = seq_lseek,
4988         .release        = show_traces_release,
4989 };
4990
4991 static ssize_t
4992 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4993                      size_t count, loff_t *ppos)
4994 {
4995         struct trace_array *tr = file_inode(filp)->i_private;
4996         char *mask_str;
4997         int len;
4998
4999         len = snprintf(NULL, 0, "%*pb\n",
5000                        cpumask_pr_args(tr->tracing_cpumask)) + 1;
5001         mask_str = kmalloc(len, GFP_KERNEL);
5002         if (!mask_str)
5003                 return -ENOMEM;
5004
5005         len = snprintf(mask_str, len, "%*pb\n",
5006                        cpumask_pr_args(tr->tracing_cpumask));
5007         if (len >= count) {
5008                 count = -EINVAL;
5009                 goto out_err;
5010         }
5011         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5012
5013 out_err:
5014         kfree(mask_str);
5015
5016         return count;
5017 }
5018
5019 int tracing_set_cpumask(struct trace_array *tr,
5020                         cpumask_var_t tracing_cpumask_new)
5021 {
5022         int cpu;
5023
5024         if (!tr)
5025                 return -EINVAL;
5026
5027         local_irq_disable();
5028         arch_spin_lock(&tr->max_lock);
5029         for_each_tracing_cpu(cpu) {
5030                 /*
5031                  * Increase/decrease the disabled counter if we are
5032                  * about to flip a bit in the cpumask:
5033                  */
5034                 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5035                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5036                         atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5037                         ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5038 #ifdef CONFIG_TRACER_MAX_TRACE
5039                         ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5040 #endif
5041                 }
5042                 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5043                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5044                         atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5045                         ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5046 #ifdef CONFIG_TRACER_MAX_TRACE
5047                         ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5048 #endif
5049                 }
5050         }
5051         arch_spin_unlock(&tr->max_lock);
5052         local_irq_enable();
5053
5054         cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5055
5056         return 0;
5057 }
5058
5059 static ssize_t
5060 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5061                       size_t count, loff_t *ppos)
5062 {
5063         struct trace_array *tr = file_inode(filp)->i_private;
5064         cpumask_var_t tracing_cpumask_new;
5065         int err;
5066
5067         if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5068                 return -ENOMEM;
5069
5070         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5071         if (err)
5072                 goto err_free;
5073
5074         err = tracing_set_cpumask(tr, tracing_cpumask_new);
5075         if (err)
5076                 goto err_free;
5077
5078         free_cpumask_var(tracing_cpumask_new);
5079
5080         return count;
5081
5082 err_free:
5083         free_cpumask_var(tracing_cpumask_new);
5084
5085         return err;
5086 }
5087
5088 static const struct file_operations tracing_cpumask_fops = {
5089         .open           = tracing_open_generic_tr,
5090         .read           = tracing_cpumask_read,
5091         .write          = tracing_cpumask_write,
5092         .release        = tracing_release_generic_tr,
5093         .llseek         = generic_file_llseek,
5094 };
5095
5096 static int tracing_trace_options_show(struct seq_file *m, void *v)
5097 {
5098         struct tracer_opt *trace_opts;
5099         struct trace_array *tr = m->private;
5100         u32 tracer_flags;
5101         int i;
5102
5103         mutex_lock(&trace_types_lock);
5104         tracer_flags = tr->current_trace->flags->val;
5105         trace_opts = tr->current_trace->flags->opts;
5106
5107         for (i = 0; trace_options[i]; i++) {
5108                 if (tr->trace_flags & (1 << i))
5109                         seq_printf(m, "%s\n", trace_options[i]);
5110                 else
5111                         seq_printf(m, "no%s\n", trace_options[i]);
5112         }
5113
5114         for (i = 0; trace_opts[i].name; i++) {
5115                 if (tracer_flags & trace_opts[i].bit)
5116                         seq_printf(m, "%s\n", trace_opts[i].name);
5117                 else
5118                         seq_printf(m, "no%s\n", trace_opts[i].name);
5119         }
5120         mutex_unlock(&trace_types_lock);
5121
5122         return 0;
5123 }
5124
5125 static int __set_tracer_option(struct trace_array *tr,
5126                                struct tracer_flags *tracer_flags,
5127                                struct tracer_opt *opts, int neg)
5128 {
5129         struct tracer *trace = tracer_flags->trace;
5130         int ret;
5131
5132         ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5133         if (ret)
5134                 return ret;
5135
5136         if (neg)
5137                 tracer_flags->val &= ~opts->bit;
5138         else
5139                 tracer_flags->val |= opts->bit;
5140         return 0;
5141 }
5142
5143 /* Try to assign a tracer specific option */
5144 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5145 {
5146         struct tracer *trace = tr->current_trace;
5147         struct tracer_flags *tracer_flags = trace->flags;
5148         struct tracer_opt *opts = NULL;
5149         int i;
5150
5151         for (i = 0; tracer_flags->opts[i].name; i++) {
5152                 opts = &tracer_flags->opts[i];
5153
5154                 if (strcmp(cmp, opts->name) == 0)
5155                         return __set_tracer_option(tr, trace->flags, opts, neg);
5156         }
5157
5158         return -EINVAL;
5159 }
5160
5161 /* Some tracers require overwrite to stay enabled */
5162 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5163 {
5164         if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5165                 return -1;
5166
5167         return 0;
5168 }
5169
5170 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5171 {
5172         if ((mask == TRACE_ITER_RECORD_TGID) ||
5173             (mask == TRACE_ITER_RECORD_CMD))
5174                 lockdep_assert_held(&event_mutex);
5175
5176         /* do nothing if flag is already set */
5177         if (!!(tr->trace_flags & mask) == !!enabled)
5178                 return 0;
5179
5180         /* Give the tracer a chance to approve the change */
5181         if (tr->current_trace->flag_changed)
5182                 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5183                         return -EINVAL;
5184
5185         if (enabled)
5186                 tr->trace_flags |= mask;
5187         else
5188                 tr->trace_flags &= ~mask;
5189
5190         if (mask == TRACE_ITER_RECORD_CMD)
5191                 trace_event_enable_cmd_record(enabled);
5192
5193         if (mask == TRACE_ITER_RECORD_TGID) {
5194
5195                 if (trace_alloc_tgid_map() < 0) {
5196                         tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5197                         return -ENOMEM;
5198                 }
5199
5200                 trace_event_enable_tgid_record(enabled);
5201         }
5202
5203         if (mask == TRACE_ITER_EVENT_FORK)
5204                 trace_event_follow_fork(tr, enabled);
5205
5206         if (mask == TRACE_ITER_FUNC_FORK)
5207                 ftrace_pid_follow_fork(tr, enabled);
5208
5209         if (mask == TRACE_ITER_OVERWRITE) {
5210                 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5211 #ifdef CONFIG_TRACER_MAX_TRACE
5212                 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5213 #endif
5214         }
5215
5216         if (mask == TRACE_ITER_PRINTK) {
5217                 trace_printk_start_stop_comm(enabled);
5218                 trace_printk_control(enabled);
5219         }
5220
5221         return 0;
5222 }
5223
5224 int trace_set_options(struct trace_array *tr, char *option)
5225 {
5226         char *cmp;
5227         int neg = 0;
5228         int ret;
5229         size_t orig_len = strlen(option);
5230         int len;
5231
5232         cmp = strstrip(option);
5233
5234         len = str_has_prefix(cmp, "no");
5235         if (len)
5236                 neg = 1;
5237
5238         cmp += len;
5239
5240         mutex_lock(&event_mutex);
5241         mutex_lock(&trace_types_lock);
5242
5243         ret = match_string(trace_options, -1, cmp);
5244         /* If no option could be set, test the specific tracer options */
5245         if (ret < 0)
5246                 ret = set_tracer_option(tr, cmp, neg);
5247         else
5248                 ret = set_tracer_flag(tr, 1 << ret, !neg);
5249
5250         mutex_unlock(&trace_types_lock);
5251         mutex_unlock(&event_mutex);
5252
5253         /*
5254          * If the first trailing whitespace is replaced with '\0' by strstrip,
5255          * turn it back into a space.
5256          */
5257         if (orig_len > strlen(option))
5258                 option[strlen(option)] = ' ';
5259
5260         return ret;
5261 }
5262
5263 static void __init apply_trace_boot_options(void)
5264 {
5265         char *buf = trace_boot_options_buf;
5266         char *option;
5267
5268         while (true) {
5269                 option = strsep(&buf, ",");
5270
5271                 if (!option)
5272                         break;
5273
5274                 if (*option)
5275                         trace_set_options(&global_trace, option);
5276
5277                 /* Put back the comma to allow this to be called again */
5278                 if (buf)
5279                         *(buf - 1) = ',';
5280         }
5281 }
5282
5283 static ssize_t
5284 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5285                         size_t cnt, loff_t *ppos)
5286 {
5287         struct seq_file *m = filp->private_data;
5288         struct trace_array *tr = m->private;
5289         char buf[64];
5290         int ret;
5291
5292         if (cnt >= sizeof(buf))
5293                 return -EINVAL;
5294
5295         if (copy_from_user(buf, ubuf, cnt))
5296                 return -EFAULT;
5297
5298         buf[cnt] = 0;
5299
5300         ret = trace_set_options(tr, buf);
5301         if (ret < 0)
5302                 return ret;
5303
5304         *ppos += cnt;
5305
5306         return cnt;
5307 }
5308
5309 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5310 {
5311         struct trace_array *tr = inode->i_private;
5312         int ret;
5313
5314         ret = tracing_check_open_get_tr(tr);
5315         if (ret)
5316                 return ret;
5317
5318         ret = single_open(file, tracing_trace_options_show, inode->i_private);
5319         if (ret < 0)
5320                 trace_array_put(tr);
5321
5322         return ret;
5323 }
5324
5325 static const struct file_operations tracing_iter_fops = {
5326         .open           = tracing_trace_options_open,
5327         .read           = seq_read,
5328         .llseek         = seq_lseek,
5329         .release        = tracing_single_release_tr,
5330         .write          = tracing_trace_options_write,
5331 };
5332
5333 static const char readme_msg[] =
5334         "tracing mini-HOWTO:\n\n"
5335         "# echo 0 > tracing_on : quick way to disable tracing\n"
5336         "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5337         " Important files:\n"
5338         "  trace\t\t\t- The static contents of the buffer\n"
5339         "\t\t\t  To clear the buffer write into this file: echo > trace\n"
5340         "  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5341         "  current_tracer\t- function and latency tracers\n"
5342         "  available_tracers\t- list of configured tracers for current_tracer\n"
5343         "  error_log\t- error log for failed commands (that support it)\n"
5344         "  buffer_size_kb\t- view and modify size of per cpu buffer\n"
5345         "  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
5346         "  trace_clock\t\t- change the clock used to order events\n"
5347         "       local:   Per cpu clock but may not be synced across CPUs\n"
5348         "      global:   Synced across CPUs but slows tracing down.\n"
5349         "     counter:   Not a clock, but just an increment\n"
5350         "      uptime:   Jiffy counter from time of boot\n"
5351         "        perf:   Same clock that perf events use\n"
5352 #ifdef CONFIG_X86_64
5353         "     x86-tsc:   TSC cycle counter\n"
5354 #endif
5355         "\n  timestamp_mode\t- view the mode used to timestamp events\n"
5356         "       delta:   Delta difference against a buffer-wide timestamp\n"
5357         "    absolute:   Absolute (standalone) timestamp\n"
5358         "\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5359         "\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5360         "  tracing_cpumask\t- Limit which CPUs to trace\n"
5361         "  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5362         "\t\t\t  Remove sub-buffer with rmdir\n"
5363         "  trace_options\t\t- Set format or modify how tracing happens\n"
5364         "\t\t\t  Disable an option by prefixing 'no' to the\n"
5365         "\t\t\t  option name\n"
5366         "  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5367 #ifdef CONFIG_DYNAMIC_FTRACE
5368         "\n  available_filter_functions - list of functions that can be filtered on\n"
5369         "  set_ftrace_filter\t- echo function name in here to only trace these\n"
5370         "\t\t\t  functions\n"
5371         "\t     accepts: func_full_name or glob-matching-pattern\n"
5372         "\t     modules: Can select a group via module\n"
5373         "\t      Format: :mod:<module-name>\n"
5374         "\t     example: echo :mod:ext3 > set_ftrace_filter\n"
5375         "\t    triggers: a command to perform when function is hit\n"
5376         "\t      Format: <function>:<trigger>[:count]\n"
5377         "\t     trigger: traceon, traceoff\n"
5378         "\t\t      enable_event:<system>:<event>\n"
5379         "\t\t      disable_event:<system>:<event>\n"
5380 #ifdef CONFIG_STACKTRACE
5381         "\t\t      stacktrace\n"
5382 #endif
5383 #ifdef CONFIG_TRACER_SNAPSHOT
5384         "\t\t      snapshot\n"
5385 #endif
5386         "\t\t      dump\n"
5387         "\t\t      cpudump\n"
5388         "\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
5389         "\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
5390         "\t     The first one will disable tracing every time do_fault is hit\n"
5391         "\t     The second will disable tracing at most 3 times when do_trap is hit\n"
5392         "\t       The first time do trap is hit and it disables tracing, the\n"
5393         "\t       counter will decrement to 2. If tracing is already disabled,\n"
5394         "\t       the counter will not decrement. It only decrements when the\n"
5395         "\t       trigger did work\n"
5396         "\t     To remove trigger without count:\n"
5397         "\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
5398         "\t     To remove trigger with a count:\n"
5399         "\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5400         "  set_ftrace_notrace\t- echo function name in here to never trace.\n"
5401         "\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5402         "\t    modules: Can select a group via module command :mod:\n"
5403         "\t    Does not accept triggers\n"
5404 #endif /* CONFIG_DYNAMIC_FTRACE */
5405 #ifdef CONFIG_FUNCTION_TRACER
5406         "  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5407         "\t\t    (function)\n"
5408         "  set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5409         "\t\t    (function)\n"
5410 #endif
5411 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5412         "  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5413         "  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5414         "  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5415 #endif
5416 #ifdef CONFIG_TRACER_SNAPSHOT
5417         "\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
5418         "\t\t\t  snapshot buffer. Read the contents for more\n"
5419         "\t\t\t  information\n"
5420 #endif
5421 #ifdef CONFIG_STACK_TRACER
5422         "  stack_trace\t\t- Shows the max stack trace when active\n"
5423         "  stack_max_size\t- Shows current max stack size that was traced\n"
5424         "\t\t\t  Write into this file to reset the max size (trigger a\n"
5425         "\t\t\t  new trace)\n"
5426 #ifdef CONFIG_DYNAMIC_FTRACE
5427         "  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5428         "\t\t\t  traces\n"
5429 #endif
5430 #endif /* CONFIG_STACK_TRACER */
5431 #ifdef CONFIG_DYNAMIC_EVENTS
5432         "  dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5433         "\t\t\t  Write into this file to define/undefine new trace events.\n"
5434 #endif
5435 #ifdef CONFIG_KPROBE_EVENTS
5436         "  kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5437         "\t\t\t  Write into this file to define/undefine new trace events.\n"
5438 #endif
5439 #ifdef CONFIG_UPROBE_EVENTS
5440         "  uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5441         "\t\t\t  Write into this file to define/undefine new trace events.\n"
5442 #endif
5443 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5444     defined(CONFIG_FPROBE_EVENTS)
5445         "\t  accepts: event-definitions (one definition per line)\n"
5446 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5447         "\t   Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5448         "\t           r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5449 #endif
5450 #ifdef CONFIG_FPROBE_EVENTS
5451         "\t           f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5452         "\t           t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5453 #endif
5454 #ifdef CONFIG_HIST_TRIGGERS
5455         "\t           s:[synthetic/]<event> <field> [<field>]\n"
5456 #endif
5457         "\t           e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5458         "\t           -:[<group>/][<event>]\n"
5459 #ifdef CONFIG_KPROBE_EVENTS
5460         "\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5461   "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5462 #endif
5463 #ifdef CONFIG_UPROBE_EVENTS
5464   "   place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5465 #endif
5466         "\t     args: <name>=fetcharg[:type]\n"
5467         "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5468 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5469         "\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5470 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5471         "\t           <argname>[->field[->field|.field...]],\n"
5472 #endif
5473 #else
5474         "\t           $stack<index>, $stack, $retval, $comm,\n"
5475 #endif
5476         "\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5477         "\t     kernel return probes support: $retval, $arg<N>, $comm\n"
5478         "\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5479         "\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5480         "\t           symstr, <type>\\[<array-size>\\]\n"
5481 #ifdef CONFIG_HIST_TRIGGERS
5482         "\t    field: <stype> <name>;\n"
5483         "\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5484         "\t           [unsigned] char/int/long\n"
5485 #endif
5486         "\t    efield: For event probes ('e' types), the field is on of the fields\n"
5487         "\t            of the <attached-group>/<attached-event>.\n"
5488 #endif
5489         "  events/\t\t- Directory containing all trace event subsystems:\n"
5490         "      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5491         "  events/<system>/\t- Directory containing all trace events for <system>:\n"
5492         "      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5493         "\t\t\t  events\n"
5494         "      filter\t\t- If set, only events passing filter are traced\n"
5495         "  events/<system>/<event>/\t- Directory containing control files for\n"
5496         "\t\t\t  <event>:\n"
5497         "      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5498         "      filter\t\t- If set, only events passing filter are traced\n"
5499         "      trigger\t\t- If set, a command to perform when event is hit\n"
5500         "\t    Format: <trigger>[:count][if <filter>]\n"
5501         "\t   trigger: traceon, traceoff\n"
5502         "\t            enable_event:<system>:<event>\n"
5503         "\t            disable_event:<system>:<event>\n"
5504 #ifdef CONFIG_HIST_TRIGGERS
5505         "\t            enable_hist:<system>:<event>\n"
5506         "\t            disable_hist:<system>:<event>\n"
5507 #endif
5508 #ifdef CONFIG_STACKTRACE
5509         "\t\t    stacktrace\n"
5510 #endif
5511 #ifdef CONFIG_TRACER_SNAPSHOT
5512         "\t\t    snapshot\n"
5513 #endif
5514 #ifdef CONFIG_HIST_TRIGGERS
5515         "\t\t    hist (see below)\n"
5516 #endif
5517         "\t   example: echo traceoff > events/block/block_unplug/trigger\n"
5518         "\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
5519         "\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5520         "\t                  events/block/block_unplug/trigger\n"
5521         "\t   The first disables tracing every time block_unplug is hit.\n"
5522         "\t   The second disables tracing the first 3 times block_unplug is hit.\n"
5523         "\t   The third enables the kmalloc event the first 3 times block_unplug\n"
5524         "\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5525         "\t   Like function triggers, the counter is only decremented if it\n"
5526         "\t    enabled or disabled tracing.\n"
5527         "\t   To remove a trigger without a count:\n"
5528         "\t     echo '!<trigger> > <system>/<event>/trigger\n"
5529         "\t   To remove a trigger with a count:\n"
5530         "\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
5531         "\t   Filters can be ignored when removing a trigger.\n"
5532 #ifdef CONFIG_HIST_TRIGGERS
5533         "      hist trigger\t- If set, event hits are aggregated into a hash table\n"
5534         "\t    Format: hist:keys=<field1[,field2,...]>\n"
5535         "\t            [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5536         "\t            [:values=<field1[,field2,...]>]\n"
5537         "\t            [:sort=<field1[,field2,...]>]\n"
5538         "\t            [:size=#entries]\n"
5539         "\t            [:pause][:continue][:clear]\n"
5540         "\t            [:name=histname1]\n"
5541         "\t            [:nohitcount]\n"
5542         "\t            [:<handler>.<action>]\n"
5543         "\t            [if <filter>]\n\n"
5544         "\t    Note, special fields can be used as well:\n"
5545         "\t            common_timestamp - to record current timestamp\n"
5546         "\t            common_cpu - to record the CPU the event happened on\n"
5547         "\n"
5548         "\t    A hist trigger variable can be:\n"
5549         "\t        - a reference to a field e.g. x=current_timestamp,\n"
5550         "\t        - a reference to another variable e.g. y=$x,\n"
5551         "\t        - a numeric literal: e.g. ms_per_sec=1000,\n"
5552         "\t        - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5553         "\n"
5554         "\t    hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5555         "\t    multiplication(*) and division(/) operators. An operand can be either a\n"
5556         "\t    variable reference, field or numeric literal.\n"
5557         "\n"
5558         "\t    When a matching event is hit, an entry is added to a hash\n"
5559         "\t    table using the key(s) and value(s) named, and the value of a\n"
5560         "\t    sum called 'hitcount' is incremented.  Keys and values\n"
5561         "\t    correspond to fields in the event's format description.  Keys\n"
5562         "\t    can be any field, or the special string 'common_stacktrace'.\n"
5563         "\t    Compound keys consisting of up to two fields can be specified\n"
5564         "\t    by the 'keys' keyword.  Values must correspond to numeric\n"
5565         "\t    fields.  Sort keys consisting of up to two fields can be\n"
5566         "\t    specified using the 'sort' keyword.  The sort direction can\n"
5567         "\t    be modified by appending '.descending' or '.ascending' to a\n"
5568         "\t    sort field.  The 'size' parameter can be used to specify more\n"
5569         "\t    or fewer than the default 2048 entries for the hashtable size.\n"
5570         "\t    If a hist trigger is given a name using the 'name' parameter,\n"
5571         "\t    its histogram data will be shared with other triggers of the\n"
5572         "\t    same name, and trigger hits will update this common data.\n\n"
5573         "\t    Reading the 'hist' file for the event will dump the hash\n"
5574         "\t    table in its entirety to stdout.  If there are multiple hist\n"
5575         "\t    triggers attached to an event, there will be a table for each\n"
5576         "\t    trigger in the output.  The table displayed for a named\n"
5577         "\t    trigger will be the same as any other instance having the\n"
5578         "\t    same name.  The default format used to display a given field\n"
5579         "\t    can be modified by appending any of the following modifiers\n"
5580         "\t    to the field name, as applicable:\n\n"
5581         "\t            .hex        display a number as a hex value\n"
5582         "\t            .sym        display an address as a symbol\n"
5583         "\t            .sym-offset display an address as a symbol and offset\n"
5584         "\t            .execname   display a common_pid as a program name\n"
5585         "\t            .syscall    display a syscall id as a syscall name\n"
5586         "\t            .log2       display log2 value rather than raw number\n"
5587         "\t            .buckets=size  display values in groups of size rather than raw number\n"
5588         "\t            .usecs      display a common_timestamp in microseconds\n"
5589         "\t            .percent    display a number of percentage value\n"
5590         "\t            .graph      display a bar-graph of a value\n\n"
5591         "\t    The 'pause' parameter can be used to pause an existing hist\n"
5592         "\t    trigger or to start a hist trigger but not log any events\n"
5593         "\t    until told to do so.  'continue' can be used to start or\n"
5594         "\t    restart a paused hist trigger.\n\n"
5595         "\t    The 'clear' parameter will clear the contents of a running\n"
5596         "\t    hist trigger and leave its current paused/active state\n"
5597         "\t    unchanged.\n\n"
5598         "\t    The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5599         "\t    raw hitcount in the histogram.\n\n"
5600         "\t    The enable_hist and disable_hist triggers can be used to\n"
5601         "\t    have one event conditionally start and stop another event's\n"
5602         "\t    already-attached hist trigger.  The syntax is analogous to\n"
5603         "\t    the enable_event and disable_event triggers.\n\n"
5604         "\t    Hist trigger handlers and actions are executed whenever a\n"
5605         "\t    a histogram entry is added or updated.  They take the form:\n\n"
5606         "\t        <handler>.<action>\n\n"
5607         "\t    The available handlers are:\n\n"
5608         "\t        onmatch(matching.event)  - invoke on addition or update\n"
5609         "\t        onmax(var)               - invoke if var exceeds current max\n"
5610         "\t        onchange(var)            - invoke action if var changes\n\n"
5611         "\t    The available actions are:\n\n"
5612         "\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"
5613         "\t        save(field,...)                      - save current event fields\n"
5614 #ifdef CONFIG_TRACER_SNAPSHOT
5615         "\t        snapshot()                           - snapshot the trace buffer\n\n"
5616 #endif
5617 #ifdef CONFIG_SYNTH_EVENTS
5618         "  events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5619         "\t  Write into this file to define/undefine new synthetic events.\n"
5620         "\t     example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5621 #endif
5622 #endif
5623 ;
5624
5625 static ssize_t
5626 tracing_readme_read(struct file *filp, char __user *ubuf,
5627                        size_t cnt, loff_t *ppos)
5628 {
5629         return simple_read_from_buffer(ubuf, cnt, ppos,
5630                                         readme_msg, strlen(readme_msg));
5631 }
5632
5633 static const struct file_operations tracing_readme_fops = {
5634         .open           = tracing_open_generic,
5635         .read           = tracing_readme_read,
5636         .llseek         = generic_file_llseek,
5637 };
5638
5639 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5640 static union trace_eval_map_item *
5641 update_eval_map(union trace_eval_map_item *ptr)
5642 {
5643         if (!ptr->map.eval_string) {
5644                 if (ptr->tail.next) {
5645                         ptr = ptr->tail.next;
5646                         /* Set ptr to the next real item (skip head) */
5647                         ptr++;
5648                 } else
5649                         return NULL;
5650         }
5651         return ptr;
5652 }
5653
5654 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5655 {
5656         union trace_eval_map_item *ptr = v;
5657
5658         /*
5659          * Paranoid! If ptr points to end, we don't want to increment past it.
5660          * This really should never happen.
5661          */
5662         (*pos)++;
5663         ptr = update_eval_map(ptr);
5664         if (WARN_ON_ONCE(!ptr))
5665                 return NULL;
5666
5667         ptr++;
5668         ptr = update_eval_map(ptr);
5669
5670         return ptr;
5671 }
5672
5673 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5674 {
5675         union trace_eval_map_item *v;
5676         loff_t l = 0;
5677
5678         mutex_lock(&trace_eval_mutex);
5679
5680         v = trace_eval_maps;
5681         if (v)
5682                 v++;
5683
5684         while (v && l < *pos) {
5685                 v = eval_map_next(m, v, &l);
5686         }
5687
5688         return v;
5689 }
5690
5691 static void eval_map_stop(struct seq_file *m, void *v)
5692 {
5693         mutex_unlock(&trace_eval_mutex);
5694 }
5695
5696 static int eval_map_show(struct seq_file *m, void *v)
5697 {
5698         union trace_eval_map_item *ptr = v;
5699
5700         seq_printf(m, "%s %ld (%s)\n",
5701                    ptr->map.eval_string, ptr->map.eval_value,
5702                    ptr->map.system);
5703
5704         return 0;
5705 }
5706
5707 static const struct seq_operations tracing_eval_map_seq_ops = {
5708         .start          = eval_map_start,
5709         .next           = eval_map_next,
5710         .stop           = eval_map_stop,
5711         .show           = eval_map_show,
5712 };
5713
5714 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5715 {
5716         int ret;
5717
5718         ret = tracing_check_open_get_tr(NULL);
5719         if (ret)
5720                 return ret;
5721
5722         return seq_open(filp, &tracing_eval_map_seq_ops);
5723 }
5724
5725 static const struct file_operations tracing_eval_map_fops = {
5726         .open           = tracing_eval_map_open,
5727         .read           = seq_read,
5728         .llseek         = seq_lseek,
5729         .release        = seq_release,
5730 };
5731
5732 static inline union trace_eval_map_item *
5733 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5734 {
5735         /* Return tail of array given the head */
5736         return ptr + ptr->head.length + 1;
5737 }
5738
5739 static void
5740 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5741                            int len)
5742 {
5743         struct trace_eval_map **stop;
5744         struct trace_eval_map **map;
5745         union trace_eval_map_item *map_array;
5746         union trace_eval_map_item *ptr;
5747
5748         stop = start + len;
5749
5750         /*
5751          * The trace_eval_maps contains the map plus a head and tail item,
5752          * where the head holds the module and length of array, and the
5753          * tail holds a pointer to the next list.
5754          */
5755         map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5756         if (!map_array) {
5757                 pr_warn("Unable to allocate trace eval mapping\n");
5758                 return;
5759         }
5760
5761         mutex_lock(&trace_eval_mutex);
5762
5763         if (!trace_eval_maps)
5764                 trace_eval_maps = map_array;
5765         else {
5766                 ptr = trace_eval_maps;
5767                 for (;;) {
5768                         ptr = trace_eval_jmp_to_tail(ptr);
5769                         if (!ptr->tail.next)
5770                                 break;
5771                         ptr = ptr->tail.next;
5772
5773                 }
5774                 ptr->tail.next = map_array;
5775         }
5776         map_array->head.mod = mod;
5777         map_array->head.length = len;
5778         map_array++;
5779
5780         for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5781                 map_array->map = **map;
5782                 map_array++;
5783         }
5784         memset(map_array, 0, sizeof(*map_array));
5785
5786         mutex_unlock(&trace_eval_mutex);
5787 }
5788
5789 static void trace_create_eval_file(struct dentry *d_tracer)
5790 {
5791         trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
5792                           NULL, &tracing_eval_map_fops);
5793 }
5794
5795 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5796 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5797 static inline void trace_insert_eval_map_file(struct module *mod,
5798                               struct trace_eval_map **start, int len) { }
5799 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5800
5801 static void trace_insert_eval_map(struct module *mod,
5802                                   struct trace_eval_map **start, int len)
5803 {
5804         struct trace_eval_map **map;
5805
5806         if (len <= 0)
5807                 return;
5808
5809         map = start;
5810
5811         trace_event_eval_update(map, len);
5812
5813         trace_insert_eval_map_file(mod, start, len);
5814 }
5815
5816 static ssize_t
5817 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5818                        size_t cnt, loff_t *ppos)
5819 {
5820         struct trace_array *tr = filp->private_data;
5821         char buf[MAX_TRACER_SIZE+2];
5822         int r;
5823
5824         mutex_lock(&trace_types_lock);
5825         r = sprintf(buf, "%s\n", tr->current_trace->name);
5826         mutex_unlock(&trace_types_lock);
5827
5828         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5829 }
5830
5831 int tracer_init(struct tracer *t, struct trace_array *tr)
5832 {
5833         tracing_reset_online_cpus(&tr->array_buffer);
5834         return t->init(tr);
5835 }
5836
5837 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5838 {
5839         int cpu;
5840
5841         for_each_tracing_cpu(cpu)
5842                 per_cpu_ptr(buf->data, cpu)->entries = val;
5843 }
5844
5845 static void update_buffer_entries(struct array_buffer *buf, int cpu)
5846 {
5847         if (cpu == RING_BUFFER_ALL_CPUS) {
5848                 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
5849         } else {
5850                 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
5851         }
5852 }
5853
5854 #ifdef CONFIG_TRACER_MAX_TRACE
5855 /* resize @tr's buffer to the size of @size_tr's entries */
5856 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5857                                         struct array_buffer *size_buf, int cpu_id)
5858 {
5859         int cpu, ret = 0;
5860
5861         if (cpu_id == RING_BUFFER_ALL_CPUS) {
5862                 for_each_tracing_cpu(cpu) {
5863                         ret = ring_buffer_resize(trace_buf->buffer,
5864                                  per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5865                         if (ret < 0)
5866                                 break;
5867                         per_cpu_ptr(trace_buf->data, cpu)->entries =
5868                                 per_cpu_ptr(size_buf->data, cpu)->entries;
5869                 }
5870         } else {
5871                 ret = ring_buffer_resize(trace_buf->buffer,
5872                                  per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5873                 if (ret == 0)
5874                         per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5875                                 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5876         }
5877
5878         return ret;
5879 }
5880 #endif /* CONFIG_TRACER_MAX_TRACE */
5881
5882 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5883                                         unsigned long size, int cpu)
5884 {
5885         int ret;
5886
5887         /*
5888          * If kernel or user changes the size of the ring buffer
5889          * we use the size that was given, and we can forget about
5890          * expanding it later.
5891          */
5892         trace_set_ring_buffer_expanded(tr);
5893
5894         /* May be called before buffers are initialized */
5895         if (!tr->array_buffer.buffer)
5896                 return 0;
5897
5898         /* Do not allow tracing while resizing ring buffer */
5899         tracing_stop_tr(tr);
5900
5901         ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5902         if (ret < 0)
5903                 goto out_start;
5904
5905 #ifdef CONFIG_TRACER_MAX_TRACE
5906         if (!tr->allocated_snapshot)
5907                 goto out;
5908
5909         ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5910         if (ret < 0) {
5911                 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5912                                                      &tr->array_buffer, cpu);
5913                 if (r < 0) {
5914                         /*
5915                          * AARGH! We are left with different
5916                          * size max buffer!!!!
5917                          * The max buffer is our "snapshot" buffer.
5918                          * When a tracer needs a snapshot (one of the
5919                          * latency tracers), it swaps the max buffer
5920                          * with the saved snap shot. We succeeded to
5921                          * update the size of the main buffer, but failed to
5922                          * update the size of the max buffer. But when we tried
5923                          * to reset the main buffer to the original size, we
5924                          * failed there too. This is very unlikely to
5925                          * happen, but if it does, warn and kill all
5926                          * tracing.
5927                          */
5928                         WARN_ON(1);
5929                         tracing_disabled = 1;
5930                 }
5931                 goto out_start;
5932         }
5933
5934         update_buffer_entries(&tr->max_buffer, cpu);
5935
5936  out:
5937 #endif /* CONFIG_TRACER_MAX_TRACE */
5938
5939         update_buffer_entries(&tr->array_buffer, cpu);
5940  out_start:
5941         tracing_start_tr(tr);
5942         return ret;
5943 }
5944
5945 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5946                                   unsigned long size, int cpu_id)
5947 {
5948         int ret;
5949
5950         mutex_lock(&trace_types_lock);
5951
5952         if (cpu_id != RING_BUFFER_ALL_CPUS) {
5953                 /* make sure, this cpu is enabled in the mask */
5954                 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5955                         ret = -EINVAL;
5956                         goto out;
5957                 }
5958         }
5959
5960         ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5961         if (ret < 0)
5962                 ret = -ENOMEM;
5963
5964 out:
5965         mutex_unlock(&trace_types_lock);
5966
5967         return ret;
5968 }
5969
5970
5971 /**
5972  * tracing_update_buffers - used by tracing facility to expand ring buffers
5973  * @tr: The tracing instance
5974  *
5975  * To save on memory when the tracing is never used on a system with it
5976  * configured in. The ring buffers are set to a minimum size. But once
5977  * a user starts to use the tracing facility, then they need to grow
5978  * to their default size.
5979  *
5980  * This function is to be called when a tracer is about to be used.
5981  */
5982 int tracing_update_buffers(struct trace_array *tr)
5983 {
5984         int ret = 0;
5985
5986         mutex_lock(&trace_types_lock);
5987         if (!tr->ring_buffer_expanded)
5988                 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5989                                                 RING_BUFFER_ALL_CPUS);
5990         mutex_unlock(&trace_types_lock);
5991
5992         return ret;
5993 }
5994
5995 struct trace_option_dentry;
5996
5997 static void
5998 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5999
6000 /*
6001  * Used to clear out the tracer before deletion of an instance.
6002  * Must have trace_types_lock held.
6003  */
6004 static void tracing_set_nop(struct trace_array *tr)
6005 {
6006         if (tr->current_trace == &nop_trace)
6007                 return;
6008         
6009         tr->current_trace->enabled--;
6010
6011         if (tr->current_trace->reset)
6012                 tr->current_trace->reset(tr);
6013
6014         tr->current_trace = &nop_trace;
6015 }
6016
6017 static bool tracer_options_updated;
6018
6019 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6020 {
6021         /* Only enable if the directory has been created already. */
6022         if (!tr->dir)
6023                 return;
6024
6025         /* Only create trace option files after update_tracer_options finish */
6026         if (!tracer_options_updated)
6027                 return;
6028
6029         create_trace_option_files(tr, t);
6030 }
6031
6032 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6033 {
6034         struct tracer *t;
6035 #ifdef CONFIG_TRACER_MAX_TRACE
6036         bool had_max_tr;
6037 #endif
6038         int ret = 0;
6039
6040         mutex_lock(&trace_types_lock);
6041
6042         if (!tr->ring_buffer_expanded) {
6043                 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6044                                                 RING_BUFFER_ALL_CPUS);
6045                 if (ret < 0)
6046                         goto out;
6047                 ret = 0;
6048         }
6049
6050         for (t = trace_types; t; t = t->next) {
6051                 if (strcmp(t->name, buf) == 0)
6052                         break;
6053         }
6054         if (!t) {
6055                 ret = -EINVAL;
6056                 goto out;
6057         }
6058         if (t == tr->current_trace)
6059                 goto out;
6060
6061 #ifdef CONFIG_TRACER_SNAPSHOT
6062         if (t->use_max_tr) {
6063                 local_irq_disable();
6064                 arch_spin_lock(&tr->max_lock);
6065                 if (tr->cond_snapshot)
6066                         ret = -EBUSY;
6067                 arch_spin_unlock(&tr->max_lock);
6068                 local_irq_enable();
6069                 if (ret)
6070                         goto out;
6071         }
6072 #endif
6073         /* Some tracers won't work on kernel command line */
6074         if (system_state < SYSTEM_RUNNING && t->noboot) {
6075                 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6076                         t->name);
6077                 goto out;
6078         }
6079
6080         /* Some tracers are only allowed for the top level buffer */
6081         if (!trace_ok_for_array(t, tr)) {
6082                 ret = -EINVAL;
6083                 goto out;
6084         }
6085
6086         /* If trace pipe files are being read, we can't change the tracer */
6087         if (tr->trace_ref) {
6088                 ret = -EBUSY;
6089                 goto out;
6090         }
6091
6092         trace_branch_disable();
6093
6094         tr->current_trace->enabled--;
6095
6096         if (tr->current_trace->reset)
6097                 tr->current_trace->reset(tr);
6098
6099 #ifdef CONFIG_TRACER_MAX_TRACE
6100         had_max_tr = tr->current_trace->use_max_tr;
6101
6102         /* Current trace needs to be nop_trace before synchronize_rcu */
6103         tr->current_trace = &nop_trace;
6104
6105         if (had_max_tr && !t->use_max_tr) {
6106                 /*
6107                  * We need to make sure that the update_max_tr sees that
6108                  * current_trace changed to nop_trace to keep it from
6109                  * swapping the buffers after we resize it.
6110                  * The update_max_tr is called from interrupts disabled
6111                  * so a synchronized_sched() is sufficient.
6112                  */
6113                 synchronize_rcu();
6114                 free_snapshot(tr);
6115         }
6116
6117         if (t->use_max_tr && !tr->allocated_snapshot) {
6118                 ret = tracing_alloc_snapshot_instance(tr);
6119                 if (ret < 0)
6120                         goto out;
6121         }
6122 #else
6123         tr->current_trace = &nop_trace;
6124 #endif
6125
6126         if (t->init) {
6127                 ret = tracer_init(t, tr);
6128                 if (ret)
6129                         goto out;
6130         }
6131
6132         tr->current_trace = t;
6133         tr->current_trace->enabled++;
6134         trace_branch_enable(tr);
6135  out:
6136         mutex_unlock(&trace_types_lock);
6137
6138         return ret;
6139 }
6140
6141 static ssize_t
6142 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6143                         size_t cnt, loff_t *ppos)
6144 {
6145         struct trace_array *tr = filp->private_data;
6146         char buf[MAX_TRACER_SIZE+1];
6147         char *name;
6148         size_t ret;
6149         int err;
6150
6151         ret = cnt;
6152
6153         if (cnt > MAX_TRACER_SIZE)
6154                 cnt = MAX_TRACER_SIZE;
6155
6156         if (copy_from_user(buf, ubuf, cnt))
6157                 return -EFAULT;
6158
6159         buf[cnt] = 0;
6160
6161         name = strim(buf);
6162
6163         err = tracing_set_tracer(tr, name);
6164         if (err)
6165                 return err;
6166
6167         *ppos += ret;
6168
6169         return ret;
6170 }
6171
6172 static ssize_t
6173 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6174                    size_t cnt, loff_t *ppos)
6175 {
6176         char buf[64];
6177         int r;
6178
6179         r = snprintf(buf, sizeof(buf), "%ld\n",
6180                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6181         if (r > sizeof(buf))
6182                 r = sizeof(buf);
6183         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6184 }
6185
6186 static ssize_t
6187 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6188                     size_t cnt, loff_t *ppos)
6189 {
6190         unsigned long val;
6191         int ret;
6192
6193         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6194         if (ret)
6195                 return ret;
6196
6197         *ptr = val * 1000;
6198
6199         return cnt;
6200 }
6201
6202 static ssize_t
6203 tracing_thresh_read(struct file *filp, char __user *ubuf,
6204                     size_t cnt, loff_t *ppos)
6205 {
6206         return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6207 }
6208
6209 static ssize_t
6210 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6211                      size_t cnt, loff_t *ppos)
6212 {
6213         struct trace_array *tr = filp->private_data;
6214         int ret;
6215
6216         mutex_lock(&trace_types_lock);
6217         ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6218         if (ret < 0)
6219                 goto out;
6220
6221         if (tr->current_trace->update_thresh) {
6222                 ret = tr->current_trace->update_thresh(tr);
6223                 if (ret < 0)
6224                         goto out;
6225         }
6226
6227         ret = cnt;
6228 out:
6229         mutex_unlock(&trace_types_lock);
6230
6231         return ret;
6232 }
6233
6234 #ifdef CONFIG_TRACER_MAX_TRACE
6235
6236 static ssize_t
6237 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6238                      size_t cnt, loff_t *ppos)
6239 {
6240         struct trace_array *tr = filp->private_data;
6241
6242         return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6243 }
6244
6245 static ssize_t
6246 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6247                       size_t cnt, loff_t *ppos)
6248 {
6249         struct trace_array *tr = filp->private_data;
6250
6251         return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6252 }
6253
6254 #endif
6255
6256 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6257 {
6258         if (cpu == RING_BUFFER_ALL_CPUS) {
6259                 if (cpumask_empty(tr->pipe_cpumask)) {
6260                         cpumask_setall(tr->pipe_cpumask);
6261                         return 0;
6262                 }
6263         } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6264                 cpumask_set_cpu(cpu, tr->pipe_cpumask);
6265                 return 0;
6266         }
6267         return -EBUSY;
6268 }
6269
6270 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6271 {
6272         if (cpu == RING_BUFFER_ALL_CPUS) {
6273                 WARN_ON(!cpumask_full(tr->pipe_cpumask));
6274                 cpumask_clear(tr->pipe_cpumask);
6275         } else {
6276                 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6277                 cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6278         }
6279 }
6280
6281 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6282 {
6283         struct trace_array *tr = inode->i_private;
6284         struct trace_iterator *iter;
6285         int cpu;
6286         int ret;
6287
6288         ret = tracing_check_open_get_tr(tr);
6289         if (ret)
6290                 return ret;
6291
6292         mutex_lock(&trace_types_lock);
6293         cpu = tracing_get_cpu(inode);
6294         ret = open_pipe_on_cpu(tr, cpu);
6295         if (ret)
6296                 goto fail_pipe_on_cpu;
6297
6298         /* create a buffer to store the information to pass to userspace */
6299         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6300         if (!iter) {
6301                 ret = -ENOMEM;
6302                 goto fail_alloc_iter;
6303         }
6304
6305         trace_seq_init(&iter->seq);
6306         iter->trace = tr->current_trace;
6307
6308         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6309                 ret = -ENOMEM;
6310                 goto fail;
6311         }
6312
6313         /* trace pipe does not show start of buffer */
6314         cpumask_setall(iter->started);
6315
6316         if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6317                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6318
6319         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6320         if (trace_clocks[tr->clock_id].in_ns)
6321                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6322
6323         iter->tr = tr;
6324         iter->array_buffer = &tr->array_buffer;
6325         iter->cpu_file = cpu;
6326         mutex_init(&iter->mutex);
6327         filp->private_data = iter;
6328
6329         if (iter->trace->pipe_open)
6330                 iter->trace->pipe_open(iter);
6331
6332         nonseekable_open(inode, filp);
6333
6334         tr->trace_ref++;
6335
6336         mutex_unlock(&trace_types_lock);
6337         return ret;
6338
6339 fail:
6340         kfree(iter);
6341 fail_alloc_iter:
6342         close_pipe_on_cpu(tr, cpu);
6343 fail_pipe_on_cpu:
6344         __trace_array_put(tr);
6345         mutex_unlock(&trace_types_lock);
6346         return ret;
6347 }
6348
6349 static int tracing_release_pipe(struct inode *inode, struct file *file)
6350 {
6351         struct trace_iterator *iter = file->private_data;
6352         struct trace_array *tr = inode->i_private;
6353
6354         mutex_lock(&trace_types_lock);
6355
6356         tr->trace_ref--;
6357
6358         if (iter->trace->pipe_close)
6359                 iter->trace->pipe_close(iter);
6360         close_pipe_on_cpu(tr, iter->cpu_file);
6361         mutex_unlock(&trace_types_lock);
6362
6363         free_trace_iter_content(iter);
6364         kfree(iter);
6365
6366         trace_array_put(tr);
6367
6368         return 0;
6369 }
6370
6371 static __poll_t
6372 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6373 {
6374         struct trace_array *tr = iter->tr;
6375
6376         /* Iterators are static, they should be filled or empty */
6377         if (trace_buffer_iter(iter, iter->cpu_file))
6378                 return EPOLLIN | EPOLLRDNORM;
6379
6380         if (tr->trace_flags & TRACE_ITER_BLOCK)
6381                 /*
6382                  * Always select as readable when in blocking mode
6383                  */
6384                 return EPOLLIN | EPOLLRDNORM;
6385         else
6386                 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6387                                              filp, poll_table, iter->tr->buffer_percent);
6388 }
6389
6390 static __poll_t
6391 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6392 {
6393         struct trace_iterator *iter = filp->private_data;
6394
6395         return trace_poll(iter, filp, poll_table);
6396 }
6397
6398 /* Must be called with iter->mutex held. */
6399 static int tracing_wait_pipe(struct file *filp)
6400 {
6401         struct trace_iterator *iter = filp->private_data;
6402         int ret;
6403
6404         while (trace_empty(iter)) {
6405
6406                 if ((filp->f_flags & O_NONBLOCK)) {
6407                         return -EAGAIN;
6408                 }
6409
6410                 /*
6411                  * We block until we read something and tracing is disabled.
6412                  * We still block if tracing is disabled, but we have never
6413                  * read anything. This allows a user to cat this file, and
6414                  * then enable tracing. But after we have read something,
6415                  * we give an EOF when tracing is again disabled.
6416                  *
6417                  * iter->pos will be 0 if we haven't read anything.
6418                  */
6419                 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6420                         break;
6421
6422                 mutex_unlock(&iter->mutex);
6423
6424                 ret = wait_on_pipe(iter, 0);
6425
6426                 mutex_lock(&iter->mutex);
6427
6428                 if (ret)
6429                         return ret;
6430         }
6431
6432         return 1;
6433 }
6434
6435 /*
6436  * Consumer reader.
6437  */
6438 static ssize_t
6439 tracing_read_pipe(struct file *filp, char __user *ubuf,
6440                   size_t cnt, loff_t *ppos)
6441 {
6442         struct trace_iterator *iter = filp->private_data;
6443         ssize_t sret;
6444
6445         /*
6446          * Avoid more than one consumer on a single file descriptor
6447          * This is just a matter of traces coherency, the ring buffer itself
6448          * is protected.
6449          */
6450         mutex_lock(&iter->mutex);
6451
6452         /* return any leftover data */
6453         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6454         if (sret != -EBUSY)
6455                 goto out;
6456
6457         trace_seq_init(&iter->seq);
6458
6459         if (iter->trace->read) {
6460                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6461                 if (sret)
6462                         goto out;
6463         }
6464
6465 waitagain:
6466         sret = tracing_wait_pipe(filp);
6467         if (sret <= 0)
6468                 goto out;
6469
6470         /* stop when tracing is finished */
6471         if (trace_empty(iter)) {
6472                 sret = 0;
6473                 goto out;
6474         }
6475
6476         if (cnt >= TRACE_SEQ_BUFFER_SIZE)
6477                 cnt = TRACE_SEQ_BUFFER_SIZE - 1;
6478
6479         /* reset all but tr, trace, and overruns */
6480         trace_iterator_reset(iter);
6481         cpumask_clear(iter->started);
6482         trace_seq_init(&iter->seq);
6483
6484         trace_event_read_lock();
6485         trace_access_lock(iter->cpu_file);
6486         while (trace_find_next_entry_inc(iter) != NULL) {
6487                 enum print_line_t ret;
6488                 int save_len = iter->seq.seq.len;
6489
6490                 ret = print_trace_line(iter);
6491                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6492                         /*
6493                          * If one print_trace_line() fills entire trace_seq in one shot,
6494                          * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6495                          * In this case, we need to consume it, otherwise, loop will peek
6496                          * this event next time, resulting in an infinite loop.
6497                          */
6498                         if (save_len == 0) {
6499                                 iter->seq.full = 0;
6500                                 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6501                                 trace_consume(iter);
6502                                 break;
6503                         }
6504
6505                         /* In other cases, don't print partial lines */
6506                         iter->seq.seq.len = save_len;
6507                         break;
6508                 }
6509                 if (ret != TRACE_TYPE_NO_CONSUME)
6510                         trace_consume(iter);
6511
6512                 if (trace_seq_used(&iter->seq) >= cnt)
6513                         break;
6514
6515                 /*
6516                  * Setting the full flag means we reached the trace_seq buffer
6517                  * size and we should leave by partial output condition above.
6518                  * One of the trace_seq_* functions is not used properly.
6519                  */
6520                 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6521                           iter->ent->type);
6522         }
6523         trace_access_unlock(iter->cpu_file);
6524         trace_event_read_unlock();
6525
6526         /* Now copy what we have to the user */
6527         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6528         if (iter->seq.readpos >= trace_seq_used(&iter->seq))
6529                 trace_seq_init(&iter->seq);
6530
6531         /*
6532          * If there was nothing to send to user, in spite of consuming trace
6533          * entries, go back to wait for more entries.
6534          */
6535         if (sret == -EBUSY)
6536                 goto waitagain;
6537
6538 out:
6539         mutex_unlock(&iter->mutex);
6540
6541         return sret;
6542 }
6543
6544 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6545                                      unsigned int idx)
6546 {
6547         __free_page(spd->pages[idx]);
6548 }
6549
6550 static size_t
6551 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6552 {
6553         size_t count;
6554         int save_len;
6555         int ret;
6556
6557         /* Seq buffer is page-sized, exactly what we need. */
6558         for (;;) {
6559                 save_len = iter->seq.seq.len;
6560                 ret = print_trace_line(iter);
6561
6562                 if (trace_seq_has_overflowed(&iter->seq)) {
6563                         iter->seq.seq.len = save_len;
6564                         break;
6565                 }
6566
6567                 /*
6568                  * This should not be hit, because it should only
6569                  * be set if the iter->seq overflowed. But check it
6570                  * anyway to be safe.
6571                  */
6572                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6573                         iter->seq.seq.len = save_len;
6574                         break;
6575                 }
6576
6577                 count = trace_seq_used(&iter->seq) - save_len;
6578                 if (rem < count) {
6579                         rem = 0;
6580                         iter->seq.seq.len = save_len;
6581                         break;
6582                 }
6583
6584                 if (ret != TRACE_TYPE_NO_CONSUME)
6585                         trace_consume(iter);
6586                 rem -= count;
6587                 if (!trace_find_next_entry_inc(iter))   {
6588                         rem = 0;
6589                         iter->ent = NULL;
6590                         break;
6591                 }
6592         }
6593
6594         return rem;
6595 }
6596
6597 static ssize_t tracing_splice_read_pipe(struct file *filp,
6598                                         loff_t *ppos,
6599                                         struct pipe_inode_info *pipe,
6600                                         size_t len,
6601                                         unsigned int flags)
6602 {
6603         struct page *pages_def[PIPE_DEF_BUFFERS];
6604         struct partial_page partial_def[PIPE_DEF_BUFFERS];
6605         struct trace_iterator *iter = filp->private_data;
6606         struct splice_pipe_desc spd = {
6607                 .pages          = pages_def,
6608                 .partial        = partial_def,
6609                 .nr_pages       = 0, /* This gets updated below. */
6610                 .nr_pages_max   = PIPE_DEF_BUFFERS,
6611                 .ops            = &default_pipe_buf_ops,
6612                 .spd_release    = tracing_spd_release_pipe,
6613         };
6614         ssize_t ret;
6615         size_t rem;
6616         unsigned int i;
6617
6618         if (splice_grow_spd(pipe, &spd))
6619                 return -ENOMEM;
6620
6621         mutex_lock(&iter->mutex);
6622
6623         if (iter->trace->splice_read) {
6624                 ret = iter->trace->splice_read(iter, filp,
6625                                                ppos, pipe, len, flags);
6626                 if (ret)
6627                         goto out_err;
6628         }
6629
6630         ret = tracing_wait_pipe(filp);
6631         if (ret <= 0)
6632                 goto out_err;
6633
6634         if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6635                 ret = -EFAULT;
6636                 goto out_err;
6637         }
6638
6639         trace_event_read_lock();
6640         trace_access_lock(iter->cpu_file);
6641
6642         /* Fill as many pages as possible. */
6643         for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6644                 spd.pages[i] = alloc_page(GFP_KERNEL);
6645                 if (!spd.pages[i])
6646                         break;
6647
6648                 rem = tracing_fill_pipe_page(rem, iter);
6649
6650                 /* Copy the data into the page, so we can start over. */
6651                 ret = trace_seq_to_buffer(&iter->seq,
6652                                           page_address(spd.pages[i]),
6653                                           trace_seq_used(&iter->seq));
6654                 if (ret < 0) {
6655                         __free_page(spd.pages[i]);
6656                         break;
6657                 }
6658                 spd.partial[i].offset = 0;
6659                 spd.partial[i].len = trace_seq_used(&iter->seq);
6660
6661                 trace_seq_init(&iter->seq);
6662         }
6663
6664         trace_access_unlock(iter->cpu_file);
6665         trace_event_read_unlock();
6666         mutex_unlock(&iter->mutex);
6667
6668         spd.nr_pages = i;
6669
6670         if (i)
6671                 ret = splice_to_pipe(pipe, &spd);
6672         else
6673                 ret = 0;
6674 out:
6675         splice_shrink_spd(&spd);
6676         return ret;
6677
6678 out_err:
6679         mutex_unlock(&iter->mutex);
6680         goto out;
6681 }
6682
6683 static ssize_t
6684 tracing_entries_read(struct file *filp, char __user *ubuf,
6685                      size_t cnt, loff_t *ppos)
6686 {
6687         struct inode *inode = file_inode(filp);
6688         struct trace_array *tr = inode->i_private;
6689         int cpu = tracing_get_cpu(inode);
6690         char buf[64];
6691         int r = 0;
6692         ssize_t ret;
6693
6694         mutex_lock(&trace_types_lock);
6695
6696         if (cpu == RING_BUFFER_ALL_CPUS) {
6697                 int cpu, buf_size_same;
6698                 unsigned long size;
6699
6700                 size = 0;
6701                 buf_size_same = 1;
6702                 /* check if all cpu sizes are same */
6703                 for_each_tracing_cpu(cpu) {
6704                         /* fill in the size from first enabled cpu */
6705                         if (size == 0)
6706                                 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6707                         if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6708                                 buf_size_same = 0;
6709                                 break;
6710                         }
6711                 }
6712
6713                 if (buf_size_same) {
6714                         if (!tr->ring_buffer_expanded)
6715                                 r = sprintf(buf, "%lu (expanded: %lu)\n",
6716                                             size >> 10,
6717                                             trace_buf_size >> 10);
6718                         else
6719                                 r = sprintf(buf, "%lu\n", size >> 10);
6720                 } else
6721                         r = sprintf(buf, "X\n");
6722         } else
6723                 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6724
6725         mutex_unlock(&trace_types_lock);
6726
6727         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6728         return ret;
6729 }
6730
6731 static ssize_t
6732 tracing_entries_write(struct file *filp, const char __user *ubuf,
6733                       size_t cnt, loff_t *ppos)
6734 {
6735         struct inode *inode = file_inode(filp);
6736         struct trace_array *tr = inode->i_private;
6737         unsigned long val;
6738         int ret;
6739
6740         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6741         if (ret)
6742                 return ret;
6743
6744         /* must have at least 1 entry */
6745         if (!val)
6746                 return -EINVAL;
6747
6748         /* value is in KB */
6749         val <<= 10;
6750         ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6751         if (ret < 0)
6752                 return ret;
6753
6754         *ppos += cnt;
6755
6756         return cnt;
6757 }
6758
6759 static ssize_t
6760 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6761                                 size_t cnt, loff_t *ppos)
6762 {
6763         struct trace_array *tr = filp->private_data;
6764         char buf[64];
6765         int r, cpu;
6766         unsigned long size = 0, expanded_size = 0;
6767
6768         mutex_lock(&trace_types_lock);
6769         for_each_tracing_cpu(cpu) {
6770                 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6771                 if (!tr->ring_buffer_expanded)
6772                         expanded_size += trace_buf_size >> 10;
6773         }
6774         if (tr->ring_buffer_expanded)
6775                 r = sprintf(buf, "%lu\n", size);
6776         else
6777                 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6778         mutex_unlock(&trace_types_lock);
6779
6780         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6781 }
6782
6783 static ssize_t
6784 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6785                           size_t cnt, loff_t *ppos)
6786 {
6787         /*
6788          * There is no need to read what the user has written, this function
6789          * is just to make sure that there is no error when "echo" is used
6790          */
6791
6792         *ppos += cnt;
6793
6794         return cnt;
6795 }
6796
6797 static int
6798 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6799 {
6800         struct trace_array *tr = inode->i_private;
6801
6802         /* disable tracing ? */
6803         if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6804                 tracer_tracing_off(tr);
6805         /* resize the ring buffer to 0 */
6806         tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6807
6808         trace_array_put(tr);
6809
6810         return 0;
6811 }
6812
6813 #define TRACE_MARKER_MAX_SIZE           4096
6814
6815 static ssize_t
6816 tracing_mark_write(struct file *filp, const char __user *ubuf,
6817                                         size_t cnt, loff_t *fpos)
6818 {
6819         struct trace_array *tr = filp->private_data;
6820         struct ring_buffer_event *event;
6821         enum event_trigger_type tt = ETT_NONE;
6822         struct trace_buffer *buffer;
6823         struct print_entry *entry;
6824         int meta_size;
6825         ssize_t written;
6826         size_t size;
6827         int len;
6828
6829 /* Used in tracing_mark_raw_write() as well */
6830 #define FAULTED_STR "<faulted>"
6831 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6832
6833         if (tracing_disabled)
6834                 return -EINVAL;
6835
6836         if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6837                 return -EINVAL;
6838
6839         if ((ssize_t)cnt < 0)
6840                 return -EINVAL;
6841
6842         if (cnt > TRACE_MARKER_MAX_SIZE)
6843                 cnt = TRACE_MARKER_MAX_SIZE;
6844
6845         meta_size = sizeof(*entry) + 2;  /* add '\0' and possible '\n' */
6846  again:
6847         size = cnt + meta_size;
6848
6849         /* If less than "<faulted>", then make sure we can still add that */
6850         if (cnt < FAULTED_SIZE)
6851                 size += FAULTED_SIZE - cnt;
6852
6853         buffer = tr->array_buffer.buffer;
6854         event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6855                                             tracing_gen_ctx());
6856         if (unlikely(!event)) {
6857                 /*
6858                  * If the size was greater than what was allowed, then
6859                  * make it smaller and try again.
6860                  */
6861                 if (size > ring_buffer_max_event_size(buffer)) {
6862                         /* cnt < FAULTED size should never be bigger than max */
6863                         if (WARN_ON_ONCE(cnt < FAULTED_SIZE))
6864                                 return -EBADF;
6865                         cnt = ring_buffer_max_event_size(buffer) - meta_size;
6866                         /* The above should only happen once */
6867                         if (WARN_ON_ONCE(cnt + meta_size == size))
6868                                 return -EBADF;
6869                         goto again;
6870                 }
6871
6872                 /* Ring buffer disabled, return as if not open for write */
6873                 return -EBADF;
6874         }
6875
6876         entry = ring_buffer_event_data(event);
6877         entry->ip = _THIS_IP_;
6878
6879         len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6880         if (len) {
6881                 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6882                 cnt = FAULTED_SIZE;
6883                 written = -EFAULT;
6884         } else
6885                 written = cnt;
6886
6887         if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6888                 /* do not add \n before testing triggers, but add \0 */
6889                 entry->buf[cnt] = '\0';
6890                 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
6891         }
6892
6893         if (entry->buf[cnt - 1] != '\n') {
6894                 entry->buf[cnt] = '\n';
6895                 entry->buf[cnt + 1] = '\0';
6896         } else
6897                 entry->buf[cnt] = '\0';
6898
6899         if (static_branch_unlikely(&trace_marker_exports_enabled))
6900                 ftrace_exports(event, TRACE_EXPORT_MARKER);
6901         __buffer_unlock_commit(buffer, event);
6902
6903         if (tt)
6904                 event_triggers_post_call(tr->trace_marker_file, tt);
6905
6906         return written;
6907 }
6908
6909 static ssize_t
6910 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6911                                         size_t cnt, loff_t *fpos)
6912 {
6913         struct trace_array *tr = filp->private_data;
6914         struct ring_buffer_event *event;
6915         struct trace_buffer *buffer;
6916         struct raw_data_entry *entry;
6917         ssize_t written;
6918         int size;
6919         int len;
6920
6921 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6922
6923         if (tracing_disabled)
6924                 return -EINVAL;
6925
6926         if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6927                 return -EINVAL;
6928
6929         /* The marker must at least have a tag id */
6930         if (cnt < sizeof(unsigned int))
6931                 return -EINVAL;
6932
6933         size = sizeof(*entry) + cnt;
6934         if (cnt < FAULT_SIZE_ID)
6935                 size += FAULT_SIZE_ID - cnt;
6936
6937         buffer = tr->array_buffer.buffer;
6938
6939         if (size > ring_buffer_max_event_size(buffer))
6940                 return -EINVAL;
6941
6942         event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6943                                             tracing_gen_ctx());
6944         if (!event)
6945                 /* Ring buffer disabled, return as if not open for write */
6946                 return -EBADF;
6947
6948         entry = ring_buffer_event_data(event);
6949
6950         len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6951         if (len) {
6952                 entry->id = -1;
6953                 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6954                 written = -EFAULT;
6955         } else
6956                 written = cnt;
6957
6958         __buffer_unlock_commit(buffer, event);
6959
6960         return written;
6961 }
6962
6963 static int tracing_clock_show(struct seq_file *m, void *v)
6964 {
6965         struct trace_array *tr = m->private;
6966         int i;
6967
6968         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6969                 seq_printf(m,
6970                         "%s%s%s%s", i ? " " : "",
6971                         i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6972                         i == tr->clock_id ? "]" : "");
6973         seq_putc(m, '\n');
6974
6975         return 0;
6976 }
6977
6978 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6979 {
6980         int i;
6981
6982         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6983                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6984                         break;
6985         }
6986         if (i == ARRAY_SIZE(trace_clocks))
6987                 return -EINVAL;
6988
6989         mutex_lock(&trace_types_lock);
6990
6991         tr->clock_id = i;
6992
6993         ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
6994
6995         /*
6996          * New clock may not be consistent with the previous clock.
6997          * Reset the buffer so that it doesn't have incomparable timestamps.
6998          */
6999         tracing_reset_online_cpus(&tr->array_buffer);
7000
7001 #ifdef CONFIG_TRACER_MAX_TRACE
7002         if (tr->max_buffer.buffer)
7003                 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7004         tracing_reset_online_cpus(&tr->max_buffer);
7005 #endif
7006
7007         mutex_unlock(&trace_types_lock);
7008
7009         return 0;
7010 }
7011
7012 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7013                                    size_t cnt, loff_t *fpos)
7014 {
7015         struct seq_file *m = filp->private_data;
7016         struct trace_array *tr = m->private;
7017         char buf[64];
7018         const char *clockstr;
7019         int ret;
7020
7021         if (cnt >= sizeof(buf))
7022                 return -EINVAL;
7023
7024         if (copy_from_user(buf, ubuf, cnt))
7025                 return -EFAULT;
7026
7027         buf[cnt] = 0;
7028
7029         clockstr = strstrip(buf);
7030
7031         ret = tracing_set_clock(tr, clockstr);
7032         if (ret)
7033                 return ret;
7034
7035         *fpos += cnt;
7036
7037         return cnt;
7038 }
7039
7040 static int tracing_clock_open(struct inode *inode, struct file *file)
7041 {
7042         struct trace_array *tr = inode->i_private;
7043         int ret;
7044
7045         ret = tracing_check_open_get_tr(tr);
7046         if (ret)
7047                 return ret;
7048
7049         ret = single_open(file, tracing_clock_show, inode->i_private);
7050         if (ret < 0)
7051                 trace_array_put(tr);
7052
7053         return ret;
7054 }
7055
7056 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7057 {
7058         struct trace_array *tr = m->private;
7059
7060         mutex_lock(&trace_types_lock);
7061
7062         if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7063                 seq_puts(m, "delta [absolute]\n");
7064         else
7065                 seq_puts(m, "[delta] absolute\n");
7066
7067         mutex_unlock(&trace_types_lock);
7068
7069         return 0;
7070 }
7071
7072 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7073 {
7074         struct trace_array *tr = inode->i_private;
7075         int ret;
7076
7077         ret = tracing_check_open_get_tr(tr);
7078         if (ret)
7079                 return ret;
7080
7081         ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7082         if (ret < 0)
7083                 trace_array_put(tr);
7084
7085         return ret;
7086 }
7087
7088 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7089 {
7090         if (rbe == this_cpu_read(trace_buffered_event))
7091                 return ring_buffer_time_stamp(buffer);
7092
7093         return ring_buffer_event_time_stamp(buffer, rbe);
7094 }
7095
7096 /*
7097  * Set or disable using the per CPU trace_buffer_event when possible.
7098  */
7099 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7100 {
7101         int ret = 0;
7102
7103         mutex_lock(&trace_types_lock);
7104
7105         if (set && tr->no_filter_buffering_ref++)
7106                 goto out;
7107
7108         if (!set) {
7109                 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7110                         ret = -EINVAL;
7111                         goto out;
7112                 }
7113
7114                 --tr->no_filter_buffering_ref;
7115         }
7116  out:
7117         mutex_unlock(&trace_types_lock);
7118
7119         return ret;
7120 }
7121
7122 struct ftrace_buffer_info {
7123         struct trace_iterator   iter;
7124         void                    *spare;
7125         unsigned int            spare_cpu;
7126         unsigned int            spare_size;
7127         unsigned int            read;
7128 };
7129
7130 #ifdef CONFIG_TRACER_SNAPSHOT
7131 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7132 {
7133         struct trace_array *tr = inode->i_private;
7134         struct trace_iterator *iter;
7135         struct seq_file *m;
7136         int ret;
7137
7138         ret = tracing_check_open_get_tr(tr);
7139         if (ret)
7140                 return ret;
7141
7142         if (file->f_mode & FMODE_READ) {
7143                 iter = __tracing_open(inode, file, true);
7144                 if (IS_ERR(iter))
7145                         ret = PTR_ERR(iter);
7146         } else {
7147                 /* Writes still need the seq_file to hold the private data */
7148                 ret = -ENOMEM;
7149                 m = kzalloc(sizeof(*m), GFP_KERNEL);
7150                 if (!m)
7151                         goto out;
7152                 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7153                 if (!iter) {
7154                         kfree(m);
7155                         goto out;
7156                 }
7157                 ret = 0;
7158
7159                 iter->tr = tr;
7160                 iter->array_buffer = &tr->max_buffer;
7161                 iter->cpu_file = tracing_get_cpu(inode);
7162                 m->private = iter;
7163                 file->private_data = m;
7164         }
7165 out:
7166         if (ret < 0)
7167                 trace_array_put(tr);
7168
7169         return ret;
7170 }
7171
7172 static void tracing_swap_cpu_buffer(void *tr)
7173 {
7174         update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7175 }
7176
7177 static ssize_t
7178 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7179                        loff_t *ppos)
7180 {
7181         struct seq_file *m = filp->private_data;
7182         struct trace_iterator *iter = m->private;
7183         struct trace_array *tr = iter->tr;
7184         unsigned long val;
7185         int ret;
7186
7187         ret = tracing_update_buffers(tr);
7188         if (ret < 0)
7189                 return ret;
7190
7191         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7192         if (ret)
7193                 return ret;
7194
7195         mutex_lock(&trace_types_lock);
7196
7197         if (tr->current_trace->use_max_tr) {
7198                 ret = -EBUSY;
7199                 goto out;
7200         }
7201
7202         local_irq_disable();
7203         arch_spin_lock(&tr->max_lock);
7204         if (tr->cond_snapshot)
7205                 ret = -EBUSY;
7206         arch_spin_unlock(&tr->max_lock);
7207         local_irq_enable();
7208         if (ret)
7209                 goto out;
7210
7211         switch (val) {
7212         case 0:
7213                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7214                         ret = -EINVAL;
7215                         break;
7216                 }
7217                 if (tr->allocated_snapshot)
7218                         free_snapshot(tr);
7219                 break;
7220         case 1:
7221 /* Only allow per-cpu swap if the ring buffer supports it */
7222 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7223                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7224                         ret = -EINVAL;
7225                         break;
7226                 }
7227 #endif
7228                 if (tr->allocated_snapshot)
7229                         ret = resize_buffer_duplicate_size(&tr->max_buffer,
7230                                         &tr->array_buffer, iter->cpu_file);
7231                 else
7232                         ret = tracing_alloc_snapshot_instance(tr);
7233                 if (ret < 0)
7234                         break;
7235                 /* Now, we're going to swap */
7236                 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7237                         local_irq_disable();
7238                         update_max_tr(tr, current, smp_processor_id(), NULL);
7239                         local_irq_enable();
7240                 } else {
7241                         smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7242                                                  (void *)tr, 1);
7243                 }
7244                 break;
7245         default:
7246                 if (tr->allocated_snapshot) {
7247                         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7248                                 tracing_reset_online_cpus(&tr->max_buffer);
7249                         else
7250                                 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7251                 }
7252                 break;
7253         }
7254
7255         if (ret >= 0) {
7256                 *ppos += cnt;
7257                 ret = cnt;
7258         }
7259 out:
7260         mutex_unlock(&trace_types_lock);
7261         return ret;
7262 }
7263
7264 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7265 {
7266         struct seq_file *m = file->private_data;
7267         int ret;
7268
7269         ret = tracing_release(inode, file);
7270
7271         if (file->f_mode & FMODE_READ)
7272                 return ret;
7273
7274         /* If write only, the seq_file is just a stub */
7275         if (m)
7276                 kfree(m->private);
7277         kfree(m);
7278
7279         return 0;
7280 }
7281
7282 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7283 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7284                                     size_t count, loff_t *ppos);
7285 static int tracing_buffers_release(struct inode *inode, struct file *file);
7286 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7287                    struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7288
7289 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7290 {
7291         struct ftrace_buffer_info *info;
7292         int ret;
7293
7294         /* The following checks for tracefs lockdown */
7295         ret = tracing_buffers_open(inode, filp);
7296         if (ret < 0)
7297                 return ret;
7298
7299         info = filp->private_data;
7300
7301         if (info->iter.trace->use_max_tr) {
7302                 tracing_buffers_release(inode, filp);
7303                 return -EBUSY;
7304         }
7305
7306         info->iter.snapshot = true;
7307         info->iter.array_buffer = &info->iter.tr->max_buffer;
7308
7309         return ret;
7310 }
7311
7312 #endif /* CONFIG_TRACER_SNAPSHOT */
7313
7314
7315 static const struct file_operations tracing_thresh_fops = {
7316         .open           = tracing_open_generic,
7317         .read           = tracing_thresh_read,
7318         .write          = tracing_thresh_write,
7319         .llseek         = generic_file_llseek,
7320 };
7321
7322 #ifdef CONFIG_TRACER_MAX_TRACE
7323 static const struct file_operations tracing_max_lat_fops = {
7324         .open           = tracing_open_generic_tr,
7325         .read           = tracing_max_lat_read,
7326         .write          = tracing_max_lat_write,
7327         .llseek         = generic_file_llseek,
7328         .release        = tracing_release_generic_tr,
7329 };
7330 #endif
7331
7332 static const struct file_operations set_tracer_fops = {
7333         .open           = tracing_open_generic_tr,
7334         .read           = tracing_set_trace_read,
7335         .write          = tracing_set_trace_write,
7336         .llseek         = generic_file_llseek,
7337         .release        = tracing_release_generic_tr,
7338 };
7339
7340 static const struct file_operations tracing_pipe_fops = {
7341         .open           = tracing_open_pipe,
7342         .poll           = tracing_poll_pipe,
7343         .read           = tracing_read_pipe,
7344         .splice_read    = tracing_splice_read_pipe,
7345         .release        = tracing_release_pipe,
7346         .llseek         = no_llseek,
7347 };
7348
7349 static const struct file_operations tracing_entries_fops = {
7350         .open           = tracing_open_generic_tr,
7351         .read           = tracing_entries_read,
7352         .write          = tracing_entries_write,
7353         .llseek         = generic_file_llseek,
7354         .release        = tracing_release_generic_tr,
7355 };
7356
7357 static const struct file_operations tracing_total_entries_fops = {
7358         .open           = tracing_open_generic_tr,
7359         .read           = tracing_total_entries_read,
7360         .llseek         = generic_file_llseek,
7361         .release        = tracing_release_generic_tr,
7362 };
7363
7364 static const struct file_operations tracing_free_buffer_fops = {
7365         .open           = tracing_open_generic_tr,
7366         .write          = tracing_free_buffer_write,
7367         .release        = tracing_free_buffer_release,
7368 };
7369
7370 static const struct file_operations tracing_mark_fops = {
7371         .open           = tracing_mark_open,
7372         .write          = tracing_mark_write,
7373         .release        = tracing_release_generic_tr,
7374 };
7375
7376 static const struct file_operations tracing_mark_raw_fops = {
7377         .open           = tracing_mark_open,
7378         .write          = tracing_mark_raw_write,
7379         .release        = tracing_release_generic_tr,
7380 };
7381
7382 static const struct file_operations trace_clock_fops = {
7383         .open           = tracing_clock_open,
7384         .read           = seq_read,
7385         .llseek         = seq_lseek,
7386         .release        = tracing_single_release_tr,
7387         .write          = tracing_clock_write,
7388 };
7389
7390 static const struct file_operations trace_time_stamp_mode_fops = {
7391         .open           = tracing_time_stamp_mode_open,
7392         .read           = seq_read,
7393         .llseek         = seq_lseek,
7394         .release        = tracing_single_release_tr,
7395 };
7396
7397 #ifdef CONFIG_TRACER_SNAPSHOT
7398 static const struct file_operations snapshot_fops = {
7399         .open           = tracing_snapshot_open,
7400         .read           = seq_read,
7401         .write          = tracing_snapshot_write,
7402         .llseek         = tracing_lseek,
7403         .release        = tracing_snapshot_release,
7404 };
7405
7406 static const struct file_operations snapshot_raw_fops = {
7407         .open           = snapshot_raw_open,
7408         .read           = tracing_buffers_read,
7409         .release        = tracing_buffers_release,
7410         .splice_read    = tracing_buffers_splice_read,
7411         .llseek         = no_llseek,
7412 };
7413
7414 #endif /* CONFIG_TRACER_SNAPSHOT */
7415
7416 /*
7417  * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7418  * @filp: The active open file structure
7419  * @ubuf: The userspace provided buffer to read value into
7420  * @cnt: The maximum number of bytes to read
7421  * @ppos: The current "file" position
7422  *
7423  * This function implements the write interface for a struct trace_min_max_param.
7424  * The filp->private_data must point to a trace_min_max_param structure that
7425  * defines where to write the value, the min and the max acceptable values,
7426  * and a lock to protect the write.
7427  */
7428 static ssize_t
7429 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7430 {
7431         struct trace_min_max_param *param = filp->private_data;
7432         u64 val;
7433         int err;
7434
7435         if (!param)
7436                 return -EFAULT;
7437
7438         err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7439         if (err)
7440                 return err;
7441
7442         if (param->lock)
7443                 mutex_lock(param->lock);
7444
7445         if (param->min && val < *param->min)
7446                 err = -EINVAL;
7447
7448         if (param->max && val > *param->max)
7449                 err = -EINVAL;
7450
7451         if (!err)
7452                 *param->val = val;
7453
7454         if (param->lock)
7455                 mutex_unlock(param->lock);
7456
7457         if (err)
7458                 return err;
7459
7460         return cnt;
7461 }
7462
7463 /*
7464  * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7465  * @filp: The active open file structure
7466  * @ubuf: The userspace provided buffer to read value into
7467  * @cnt: The maximum number of bytes to read
7468  * @ppos: The current "file" position
7469  *
7470  * This function implements the read interface for a struct trace_min_max_param.
7471  * The filp->private_data must point to a trace_min_max_param struct with valid
7472  * data.
7473  */
7474 static ssize_t
7475 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7476 {
7477         struct trace_min_max_param *param = filp->private_data;
7478         char buf[U64_STR_SIZE];
7479         int len;
7480         u64 val;
7481
7482         if (!param)
7483                 return -EFAULT;
7484
7485         val = *param->val;
7486
7487         if (cnt > sizeof(buf))
7488                 cnt = sizeof(buf);
7489
7490         len = snprintf(buf, sizeof(buf), "%llu\n", val);
7491
7492         return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7493 }
7494
7495 const struct file_operations trace_min_max_fops = {
7496         .open           = tracing_open_generic,
7497         .read           = trace_min_max_read,
7498         .write          = trace_min_max_write,
7499 };
7500
7501 #define TRACING_LOG_ERRS_MAX    8
7502 #define TRACING_LOG_LOC_MAX     128
7503
7504 #define CMD_PREFIX "  Command: "
7505
7506 struct err_info {
7507         const char      **errs; /* ptr to loc-specific array of err strings */
7508         u8              type;   /* index into errs -> specific err string */
7509         u16             pos;    /* caret position */
7510         u64             ts;
7511 };
7512
7513 struct tracing_log_err {
7514         struct list_head        list;
7515         struct err_info         info;
7516         char                    loc[TRACING_LOG_LOC_MAX]; /* err location */
7517         char                    *cmd;                     /* what caused err */
7518 };
7519
7520 static DEFINE_MUTEX(tracing_err_log_lock);
7521
7522 static struct tracing_log_err *alloc_tracing_log_err(int len)
7523 {
7524         struct tracing_log_err *err;
7525
7526         err = kzalloc(sizeof(*err), GFP_KERNEL);
7527         if (!err)
7528                 return ERR_PTR(-ENOMEM);
7529
7530         err->cmd = kzalloc(len, GFP_KERNEL);
7531         if (!err->cmd) {
7532                 kfree(err);
7533                 return ERR_PTR(-ENOMEM);
7534         }
7535
7536         return err;
7537 }
7538
7539 static void free_tracing_log_err(struct tracing_log_err *err)
7540 {
7541         kfree(err->cmd);
7542         kfree(err);
7543 }
7544
7545 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7546                                                    int len)
7547 {
7548         struct tracing_log_err *err;
7549         char *cmd;
7550
7551         if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7552                 err = alloc_tracing_log_err(len);
7553                 if (PTR_ERR(err) != -ENOMEM)
7554                         tr->n_err_log_entries++;
7555
7556                 return err;
7557         }
7558         cmd = kzalloc(len, GFP_KERNEL);
7559         if (!cmd)
7560                 return ERR_PTR(-ENOMEM);
7561         err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7562         kfree(err->cmd);
7563         err->cmd = cmd;
7564         list_del(&err->list);
7565
7566         return err;
7567 }
7568
7569 /**
7570  * err_pos - find the position of a string within a command for error careting
7571  * @cmd: The tracing command that caused the error
7572  * @str: The string to position the caret at within @cmd
7573  *
7574  * Finds the position of the first occurrence of @str within @cmd.  The
7575  * return value can be passed to tracing_log_err() for caret placement
7576  * within @cmd.
7577  *
7578  * Returns the index within @cmd of the first occurrence of @str or 0
7579  * if @str was not found.
7580  */
7581 unsigned int err_pos(char *cmd, const char *str)
7582 {
7583         char *found;
7584
7585         if (WARN_ON(!strlen(cmd)))
7586                 return 0;
7587
7588         found = strstr(cmd, str);
7589         if (found)
7590                 return found - cmd;
7591
7592         return 0;
7593 }
7594
7595 /**
7596  * tracing_log_err - write an error to the tracing error log
7597  * @tr: The associated trace array for the error (NULL for top level array)
7598  * @loc: A string describing where the error occurred
7599  * @cmd: The tracing command that caused the error
7600  * @errs: The array of loc-specific static error strings
7601  * @type: The index into errs[], which produces the specific static err string
7602  * @pos: The position the caret should be placed in the cmd
7603  *
7604  * Writes an error into tracing/error_log of the form:
7605  *
7606  * <loc>: error: <text>
7607  *   Command: <cmd>
7608  *              ^
7609  *
7610  * tracing/error_log is a small log file containing the last
7611  * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
7612  * unless there has been a tracing error, and the error log can be
7613  * cleared and have its memory freed by writing the empty string in
7614  * truncation mode to it i.e. echo > tracing/error_log.
7615  *
7616  * NOTE: the @errs array along with the @type param are used to
7617  * produce a static error string - this string is not copied and saved
7618  * when the error is logged - only a pointer to it is saved.  See
7619  * existing callers for examples of how static strings are typically
7620  * defined for use with tracing_log_err().
7621  */
7622 void tracing_log_err(struct trace_array *tr,
7623                      const char *loc, const char *cmd,
7624                      const char **errs, u8 type, u16 pos)
7625 {
7626         struct tracing_log_err *err;
7627         int len = 0;
7628
7629         if (!tr)
7630                 tr = &global_trace;
7631
7632         len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
7633
7634         mutex_lock(&tracing_err_log_lock);
7635         err = get_tracing_log_err(tr, len);
7636         if (PTR_ERR(err) == -ENOMEM) {
7637                 mutex_unlock(&tracing_err_log_lock);
7638                 return;
7639         }
7640
7641         snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7642         snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7643
7644         err->info.errs = errs;
7645         err->info.type = type;
7646         err->info.pos = pos;
7647         err->info.ts = local_clock();
7648
7649         list_add_tail(&err->list, &tr->err_log);
7650         mutex_unlock(&tracing_err_log_lock);
7651 }
7652
7653 static void clear_tracing_err_log(struct trace_array *tr)
7654 {
7655         struct tracing_log_err *err, *next;
7656
7657         mutex_lock(&tracing_err_log_lock);
7658         list_for_each_entry_safe(err, next, &tr->err_log, list) {
7659                 list_del(&err->list);
7660                 free_tracing_log_err(err);
7661         }
7662
7663         tr->n_err_log_entries = 0;
7664         mutex_unlock(&tracing_err_log_lock);
7665 }
7666
7667 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7668 {
7669         struct trace_array *tr = m->private;
7670
7671         mutex_lock(&tracing_err_log_lock);
7672
7673         return seq_list_start(&tr->err_log, *pos);
7674 }
7675
7676 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7677 {
7678         struct trace_array *tr = m->private;
7679
7680         return seq_list_next(v, &tr->err_log, pos);
7681 }
7682
7683 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7684 {
7685         mutex_unlock(&tracing_err_log_lock);
7686 }
7687
7688 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
7689 {
7690         u16 i;
7691
7692         for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7693                 seq_putc(m, ' ');
7694         for (i = 0; i < pos; i++)
7695                 seq_putc(m, ' ');
7696         seq_puts(m, "^\n");
7697 }
7698
7699 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7700 {
7701         struct tracing_log_err *err = v;
7702
7703         if (err) {
7704                 const char *err_text = err->info.errs[err->info.type];
7705                 u64 sec = err->info.ts;
7706                 u32 nsec;
7707
7708                 nsec = do_div(sec, NSEC_PER_SEC);
7709                 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7710                            err->loc, err_text);
7711                 seq_printf(m, "%s", err->cmd);
7712                 tracing_err_log_show_pos(m, err->info.pos);
7713         }
7714
7715         return 0;
7716 }
7717
7718 static const struct seq_operations tracing_err_log_seq_ops = {
7719         .start  = tracing_err_log_seq_start,
7720         .next   = tracing_err_log_seq_next,
7721         .stop   = tracing_err_log_seq_stop,
7722         .show   = tracing_err_log_seq_show
7723 };
7724
7725 static int tracing_err_log_open(struct inode *inode, struct file *file)
7726 {
7727         struct trace_array *tr = inode->i_private;
7728         int ret = 0;
7729
7730         ret = tracing_check_open_get_tr(tr);
7731         if (ret)
7732                 return ret;
7733
7734         /* If this file was opened for write, then erase contents */
7735         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7736                 clear_tracing_err_log(tr);
7737
7738         if (file->f_mode & FMODE_READ) {
7739                 ret = seq_open(file, &tracing_err_log_seq_ops);
7740                 if (!ret) {
7741                         struct seq_file *m = file->private_data;
7742                         m->private = tr;
7743                 } else {
7744                         trace_array_put(tr);
7745                 }
7746         }
7747         return ret;
7748 }
7749
7750 static ssize_t tracing_err_log_write(struct file *file,
7751                                      const char __user *buffer,
7752                                      size_t count, loff_t *ppos)
7753 {
7754         return count;
7755 }
7756
7757 static int tracing_err_log_release(struct inode *inode, struct file *file)
7758 {
7759         struct trace_array *tr = inode->i_private;
7760
7761         trace_array_put(tr);
7762
7763         if (file->f_mode & FMODE_READ)
7764                 seq_release(inode, file);
7765
7766         return 0;
7767 }
7768
7769 static const struct file_operations tracing_err_log_fops = {
7770         .open           = tracing_err_log_open,
7771         .write          = tracing_err_log_write,
7772         .read           = seq_read,
7773         .llseek         = tracing_lseek,
7774         .release        = tracing_err_log_release,
7775 };
7776
7777 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7778 {
7779         struct trace_array *tr = inode->i_private;
7780         struct ftrace_buffer_info *info;
7781         int ret;
7782
7783         ret = tracing_check_open_get_tr(tr);
7784         if (ret)
7785                 return ret;
7786
7787         info = kvzalloc(sizeof(*info), GFP_KERNEL);
7788         if (!info) {
7789                 trace_array_put(tr);
7790                 return -ENOMEM;
7791         }
7792
7793         mutex_lock(&trace_types_lock);
7794
7795         info->iter.tr           = tr;
7796         info->iter.cpu_file     = tracing_get_cpu(inode);
7797         info->iter.trace        = tr->current_trace;
7798         info->iter.array_buffer = &tr->array_buffer;
7799         info->spare             = NULL;
7800         /* Force reading ring buffer for first read */
7801         info->read              = (unsigned int)-1;
7802
7803         filp->private_data = info;
7804
7805         tr->trace_ref++;
7806
7807         mutex_unlock(&trace_types_lock);
7808
7809         ret = nonseekable_open(inode, filp);
7810         if (ret < 0)
7811                 trace_array_put(tr);
7812
7813         return ret;
7814 }
7815
7816 static __poll_t
7817 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7818 {
7819         struct ftrace_buffer_info *info = filp->private_data;
7820         struct trace_iterator *iter = &info->iter;
7821
7822         return trace_poll(iter, filp, poll_table);
7823 }
7824
7825 static ssize_t
7826 tracing_buffers_read(struct file *filp, char __user *ubuf,
7827                      size_t count, loff_t *ppos)
7828 {
7829         struct ftrace_buffer_info *info = filp->private_data;
7830         struct trace_iterator *iter = &info->iter;
7831         void *trace_data;
7832         int page_size;
7833         ssize_t ret = 0;
7834         ssize_t size;
7835
7836         if (!count)
7837                 return 0;
7838
7839 #ifdef CONFIG_TRACER_MAX_TRACE
7840         if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7841                 return -EBUSY;
7842 #endif
7843
7844         page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
7845
7846         /* Make sure the spare matches the current sub buffer size */
7847         if (info->spare) {
7848                 if (page_size != info->spare_size) {
7849                         ring_buffer_free_read_page(iter->array_buffer->buffer,
7850                                                    info->spare_cpu, info->spare);
7851                         info->spare = NULL;
7852                 }
7853         }
7854
7855         if (!info->spare) {
7856                 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7857                                                           iter->cpu_file);
7858                 if (IS_ERR(info->spare)) {
7859                         ret = PTR_ERR(info->spare);
7860                         info->spare = NULL;
7861                 } else {
7862                         info->spare_cpu = iter->cpu_file;
7863                         info->spare_size = page_size;
7864                 }
7865         }
7866         if (!info->spare)
7867                 return ret;
7868
7869         /* Do we have previous read data to read? */
7870         if (info->read < page_size)
7871                 goto read;
7872
7873  again:
7874         trace_access_lock(iter->cpu_file);
7875         ret = ring_buffer_read_page(iter->array_buffer->buffer,
7876                                     info->spare,
7877                                     count,
7878                                     iter->cpu_file, 0);
7879         trace_access_unlock(iter->cpu_file);
7880
7881         if (ret < 0) {
7882                 if (trace_empty(iter)) {
7883                         if ((filp->f_flags & O_NONBLOCK))
7884                                 return -EAGAIN;
7885
7886                         ret = wait_on_pipe(iter, 0);
7887                         if (ret)
7888                                 return ret;
7889
7890                         goto again;
7891                 }
7892                 return 0;
7893         }
7894
7895         info->read = 0;
7896  read:
7897         size = page_size - info->read;
7898         if (size > count)
7899                 size = count;
7900         trace_data = ring_buffer_read_page_data(info->spare);
7901         ret = copy_to_user(ubuf, trace_data + info->read, size);
7902         if (ret == size)
7903                 return -EFAULT;
7904
7905         size -= ret;
7906
7907         *ppos += size;
7908         info->read += size;
7909
7910         return size;
7911 }
7912
7913 static int tracing_buffers_flush(struct file *file, fl_owner_t id)
7914 {
7915         struct ftrace_buffer_info *info = file->private_data;
7916         struct trace_iterator *iter = &info->iter;
7917
7918         iter->closed = true;
7919         /* Make sure the waiters see the new wait_index */
7920         (void)atomic_fetch_inc_release(&iter->wait_index);
7921
7922         ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
7923
7924         return 0;
7925 }
7926
7927 static int tracing_buffers_release(struct inode *inode, struct file *file)
7928 {
7929         struct ftrace_buffer_info *info = file->private_data;
7930         struct trace_iterator *iter = &info->iter;
7931
7932         mutex_lock(&trace_types_lock);
7933
7934         iter->tr->trace_ref--;
7935
7936         __trace_array_put(iter->tr);
7937
7938         if (info->spare)
7939                 ring_buffer_free_read_page(iter->array_buffer->buffer,
7940                                            info->spare_cpu, info->spare);
7941         kvfree(info);
7942
7943         mutex_unlock(&trace_types_lock);
7944
7945         return 0;
7946 }
7947
7948 struct buffer_ref {
7949         struct trace_buffer     *buffer;
7950         void                    *page;
7951         int                     cpu;
7952         refcount_t              refcount;
7953 };
7954
7955 static void buffer_ref_release(struct buffer_ref *ref)
7956 {
7957         if (!refcount_dec_and_test(&ref->refcount))
7958                 return;
7959         ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7960         kfree(ref);
7961 }
7962
7963 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7964                                     struct pipe_buffer *buf)
7965 {
7966         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7967
7968         buffer_ref_release(ref);
7969         buf->private = 0;
7970 }
7971
7972 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7973                                 struct pipe_buffer *buf)
7974 {
7975         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7976
7977         if (refcount_read(&ref->refcount) > INT_MAX/2)
7978                 return false;
7979
7980         refcount_inc(&ref->refcount);
7981         return true;
7982 }
7983
7984 /* Pipe buffer operations for a buffer. */
7985 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7986         .release                = buffer_pipe_buf_release,
7987         .get                    = buffer_pipe_buf_get,
7988 };
7989
7990 /*
7991  * Callback from splice_to_pipe(), if we need to release some pages
7992  * at the end of the spd in case we error'ed out in filling the pipe.
7993  */
7994 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7995 {
7996         struct buffer_ref *ref =
7997                 (struct buffer_ref *)spd->partial[i].private;
7998
7999         buffer_ref_release(ref);
8000         spd->partial[i].private = 0;
8001 }
8002
8003 static ssize_t
8004 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8005                             struct pipe_inode_info *pipe, size_t len,
8006                             unsigned int flags)
8007 {
8008         struct ftrace_buffer_info *info = file->private_data;
8009         struct trace_iterator *iter = &info->iter;
8010         struct partial_page partial_def[PIPE_DEF_BUFFERS];
8011         struct page *pages_def[PIPE_DEF_BUFFERS];
8012         struct splice_pipe_desc spd = {
8013                 .pages          = pages_def,
8014                 .partial        = partial_def,
8015                 .nr_pages_max   = PIPE_DEF_BUFFERS,
8016                 .ops            = &buffer_pipe_buf_ops,
8017                 .spd_release    = buffer_spd_release,
8018         };
8019         struct buffer_ref *ref;
8020         bool woken = false;
8021         int page_size;
8022         int entries, i;
8023         ssize_t ret = 0;
8024
8025 #ifdef CONFIG_TRACER_MAX_TRACE
8026         if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8027                 return -EBUSY;
8028 #endif
8029
8030         page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8031         if (*ppos & (page_size - 1))
8032                 return -EINVAL;
8033
8034         if (len & (page_size - 1)) {
8035                 if (len < page_size)
8036                         return -EINVAL;
8037                 len &= (~(page_size - 1));
8038         }
8039
8040         if (splice_grow_spd(pipe, &spd))
8041                 return -ENOMEM;
8042
8043  again:
8044         trace_access_lock(iter->cpu_file);
8045         entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8046
8047         for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) {
8048                 struct page *page;
8049                 int r;
8050
8051                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8052                 if (!ref) {
8053                         ret = -ENOMEM;
8054                         break;
8055                 }
8056
8057                 refcount_set(&ref->refcount, 1);
8058                 ref->buffer = iter->array_buffer->buffer;
8059                 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8060                 if (IS_ERR(ref->page)) {
8061                         ret = PTR_ERR(ref->page);
8062                         ref->page = NULL;
8063                         kfree(ref);
8064                         break;
8065                 }
8066                 ref->cpu = iter->cpu_file;
8067
8068                 r = ring_buffer_read_page(ref->buffer, ref->page,
8069                                           len, iter->cpu_file, 1);
8070                 if (r < 0) {
8071                         ring_buffer_free_read_page(ref->buffer, ref->cpu,
8072                                                    ref->page);
8073                         kfree(ref);
8074                         break;
8075                 }
8076
8077                 page = virt_to_page(ring_buffer_read_page_data(ref->page));
8078
8079                 spd.pages[i] = page;
8080                 spd.partial[i].len = page_size;
8081                 spd.partial[i].offset = 0;
8082                 spd.partial[i].private = (unsigned long)ref;
8083                 spd.nr_pages++;
8084                 *ppos += page_size;
8085
8086                 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8087         }
8088
8089         trace_access_unlock(iter->cpu_file);
8090         spd.nr_pages = i;
8091
8092         /* did we read anything? */
8093         if (!spd.nr_pages) {
8094
8095                 if (ret)
8096                         goto out;
8097
8098                 if (woken)
8099                         goto out;
8100
8101                 ret = -EAGAIN;
8102                 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8103                         goto out;
8104
8105                 ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
8106                 if (ret)
8107                         goto out;
8108
8109                 /* No need to wait after waking up when tracing is off */
8110                 if (!tracer_tracing_is_on(iter->tr))
8111                         goto out;
8112
8113                 /* Iterate one more time to collect any new data then exit */
8114                 woken = true;
8115
8116                 goto again;
8117         }
8118
8119         ret = splice_to_pipe(pipe, &spd);
8120 out:
8121         splice_shrink_spd(&spd);
8122
8123         return ret;
8124 }
8125
8126 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
8127 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8128 {
8129         struct ftrace_buffer_info *info = file->private_data;
8130         struct trace_iterator *iter = &info->iter;
8131
8132         if (cmd)
8133                 return -ENOIOCTLCMD;
8134
8135         mutex_lock(&trace_types_lock);
8136
8137         /* Make sure the waiters see the new wait_index */
8138         (void)atomic_fetch_inc_release(&iter->wait_index);
8139
8140         ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8141
8142         mutex_unlock(&trace_types_lock);
8143         return 0;
8144 }
8145
8146 static const struct file_operations tracing_buffers_fops = {
8147         .open           = tracing_buffers_open,
8148         .read           = tracing_buffers_read,
8149         .poll           = tracing_buffers_poll,
8150         .release        = tracing_buffers_release,
8151         .flush          = tracing_buffers_flush,
8152         .splice_read    = tracing_buffers_splice_read,
8153         .unlocked_ioctl = tracing_buffers_ioctl,
8154         .llseek         = no_llseek,
8155 };
8156
8157 static ssize_t
8158 tracing_stats_read(struct file *filp, char __user *ubuf,
8159                    size_t count, loff_t *ppos)
8160 {
8161         struct inode *inode = file_inode(filp);
8162         struct trace_array *tr = inode->i_private;
8163         struct array_buffer *trace_buf = &tr->array_buffer;
8164         int cpu = tracing_get_cpu(inode);
8165         struct trace_seq *s;
8166         unsigned long cnt;
8167         unsigned long long t;
8168         unsigned long usec_rem;
8169
8170         s = kmalloc(sizeof(*s), GFP_KERNEL);
8171         if (!s)
8172                 return -ENOMEM;
8173
8174         trace_seq_init(s);
8175
8176         cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8177         trace_seq_printf(s, "entries: %ld\n", cnt);
8178
8179         cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8180         trace_seq_printf(s, "overrun: %ld\n", cnt);
8181
8182         cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8183         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8184
8185         cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8186         trace_seq_printf(s, "bytes: %ld\n", cnt);
8187
8188         if (trace_clocks[tr->clock_id].in_ns) {
8189                 /* local or global for trace_clock */
8190                 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8191                 usec_rem = do_div(t, USEC_PER_SEC);
8192                 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8193                                                                 t, usec_rem);
8194
8195                 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8196                 usec_rem = do_div(t, USEC_PER_SEC);
8197                 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8198         } else {
8199                 /* counter or tsc mode for trace_clock */
8200                 trace_seq_printf(s, "oldest event ts: %llu\n",
8201                                 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8202
8203                 trace_seq_printf(s, "now ts: %llu\n",
8204                                 ring_buffer_time_stamp(trace_buf->buffer));
8205         }
8206
8207         cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8208         trace_seq_printf(s, "dropped events: %ld\n", cnt);
8209
8210         cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8211         trace_seq_printf(s, "read events: %ld\n", cnt);
8212
8213         count = simple_read_from_buffer(ubuf, count, ppos,
8214                                         s->buffer, trace_seq_used(s));
8215
8216         kfree(s);
8217
8218         return count;
8219 }
8220
8221 static const struct file_operations tracing_stats_fops = {
8222         .open           = tracing_open_generic_tr,
8223         .read           = tracing_stats_read,
8224         .llseek         = generic_file_llseek,
8225         .release        = tracing_release_generic_tr,
8226 };
8227
8228 #ifdef CONFIG_DYNAMIC_FTRACE
8229
8230 static ssize_t
8231 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8232                   size_t cnt, loff_t *ppos)
8233 {
8234         ssize_t ret;
8235         char *buf;
8236         int r;
8237
8238         /* 256 should be plenty to hold the amount needed */
8239         buf = kmalloc(256, GFP_KERNEL);
8240         if (!buf)
8241                 return -ENOMEM;
8242
8243         r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8244                       ftrace_update_tot_cnt,
8245                       ftrace_number_of_pages,
8246                       ftrace_number_of_groups);
8247
8248         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8249         kfree(buf);
8250         return ret;
8251 }
8252
8253 static const struct file_operations tracing_dyn_info_fops = {
8254         .open           = tracing_open_generic,
8255         .read           = tracing_read_dyn_info,
8256         .llseek         = generic_file_llseek,
8257 };
8258 #endif /* CONFIG_DYNAMIC_FTRACE */
8259
8260 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8261 static void
8262 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8263                 struct trace_array *tr, struct ftrace_probe_ops *ops,
8264                 void *data)
8265 {
8266         tracing_snapshot_instance(tr);
8267 }
8268
8269 static void
8270 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8271                       struct trace_array *tr, struct ftrace_probe_ops *ops,
8272                       void *data)
8273 {
8274         struct ftrace_func_mapper *mapper = data;
8275         long *count = NULL;
8276
8277         if (mapper)
8278                 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8279
8280         if (count) {
8281
8282                 if (*count <= 0)
8283                         return;
8284
8285                 (*count)--;
8286         }
8287
8288         tracing_snapshot_instance(tr);
8289 }
8290
8291 static int
8292 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8293                       struct ftrace_probe_ops *ops, void *data)
8294 {
8295         struct ftrace_func_mapper *mapper = data;
8296         long *count = NULL;
8297
8298         seq_printf(m, "%ps:", (void *)ip);
8299
8300         seq_puts(m, "snapshot");
8301
8302         if (mapper)
8303                 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8304
8305         if (count)
8306                 seq_printf(m, ":count=%ld\n", *count);
8307         else
8308                 seq_puts(m, ":unlimited\n");
8309
8310         return 0;
8311 }
8312
8313 static int
8314 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8315                      unsigned long ip, void *init_data, void **data)
8316 {
8317         struct ftrace_func_mapper *mapper = *data;
8318
8319         if (!mapper) {
8320                 mapper = allocate_ftrace_func_mapper();
8321                 if (!mapper)
8322                         return -ENOMEM;
8323                 *data = mapper;
8324         }
8325
8326         return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8327 }
8328
8329 static void
8330 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8331                      unsigned long ip, void *data)
8332 {
8333         struct ftrace_func_mapper *mapper = data;
8334
8335         if (!ip) {
8336                 if (!mapper)
8337                         return;
8338                 free_ftrace_func_mapper(mapper, NULL);
8339                 return;
8340         }
8341
8342         ftrace_func_mapper_remove_ip(mapper, ip);
8343 }
8344
8345 static struct ftrace_probe_ops snapshot_probe_ops = {
8346         .func                   = ftrace_snapshot,
8347         .print                  = ftrace_snapshot_print,
8348 };
8349
8350 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8351         .func                   = ftrace_count_snapshot,
8352         .print                  = ftrace_snapshot_print,
8353         .init                   = ftrace_snapshot_init,
8354         .free                   = ftrace_snapshot_free,
8355 };
8356
8357 static int
8358 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8359                                char *glob, char *cmd, char *param, int enable)
8360 {
8361         struct ftrace_probe_ops *ops;
8362         void *count = (void *)-1;
8363         char *number;
8364         int ret;
8365
8366         if (!tr)
8367                 return -ENODEV;
8368
8369         /* hash funcs only work with set_ftrace_filter */
8370         if (!enable)
8371                 return -EINVAL;
8372
8373         ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
8374
8375         if (glob[0] == '!')
8376                 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8377
8378         if (!param)
8379                 goto out_reg;
8380
8381         number = strsep(&param, ":");
8382
8383         if (!strlen(number))
8384                 goto out_reg;
8385
8386         /*
8387          * We use the callback data field (which is a pointer)
8388          * as our counter.
8389          */
8390         ret = kstrtoul(number, 0, (unsigned long *)&count);
8391         if (ret)
8392                 return ret;
8393
8394  out_reg:
8395         ret = tracing_alloc_snapshot_instance(tr);
8396         if (ret < 0)
8397                 goto out;
8398
8399         ret = register_ftrace_function_probe(glob, tr, ops, count);
8400
8401  out:
8402         return ret < 0 ? ret : 0;
8403 }
8404
8405 static struct ftrace_func_command ftrace_snapshot_cmd = {
8406         .name                   = "snapshot",
8407         .func                   = ftrace_trace_snapshot_callback,
8408 };
8409
8410 static __init int register_snapshot_cmd(void)
8411 {
8412         return register_ftrace_command(&ftrace_snapshot_cmd);
8413 }
8414 #else
8415 static inline __init int register_snapshot_cmd(void) { return 0; }
8416 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8417
8418 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8419 {
8420         if (WARN_ON(!tr->dir))
8421                 return ERR_PTR(-ENODEV);
8422
8423         /* Top directory uses NULL as the parent */
8424         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8425                 return NULL;
8426
8427         /* All sub buffers have a descriptor */
8428         return tr->dir;
8429 }
8430
8431 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8432 {
8433         struct dentry *d_tracer;
8434
8435         if (tr->percpu_dir)
8436                 return tr->percpu_dir;
8437
8438         d_tracer = tracing_get_dentry(tr);
8439         if (IS_ERR(d_tracer))
8440                 return NULL;
8441
8442         tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8443
8444         MEM_FAIL(!tr->percpu_dir,
8445                   "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8446
8447         return tr->percpu_dir;
8448 }
8449
8450 static struct dentry *
8451 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8452                       void *data, long cpu, const struct file_operations *fops)
8453 {
8454         struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8455
8456         if (ret) /* See tracing_get_cpu() */
8457                 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8458         return ret;
8459 }
8460
8461 static void
8462 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8463 {
8464         struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8465         struct dentry *d_cpu;
8466         char cpu_dir[30]; /* 30 characters should be more than enough */
8467
8468         if (!d_percpu)
8469                 return;
8470
8471         snprintf(cpu_dir, 30, "cpu%ld", cpu);
8472         d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8473         if (!d_cpu) {
8474                 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8475                 return;
8476         }
8477
8478         /* per cpu trace_pipe */
8479         trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8480                                 tr, cpu, &tracing_pipe_fops);
8481
8482         /* per cpu trace */
8483         trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8484                                 tr, cpu, &tracing_fops);
8485
8486         trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8487                                 tr, cpu, &tracing_buffers_fops);
8488
8489         trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8490                                 tr, cpu, &tracing_stats_fops);
8491
8492         trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8493                                 tr, cpu, &tracing_entries_fops);
8494
8495 #ifdef CONFIG_TRACER_SNAPSHOT
8496         trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8497                                 tr, cpu, &snapshot_fops);
8498
8499         trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8500                                 tr, cpu, &snapshot_raw_fops);
8501 #endif
8502 }
8503
8504 #ifdef CONFIG_FTRACE_SELFTEST
8505 /* Let selftest have access to static functions in this file */
8506 #include "trace_selftest.c"
8507 #endif
8508
8509 static ssize_t
8510 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8511                         loff_t *ppos)
8512 {
8513         struct trace_option_dentry *topt = filp->private_data;
8514         char *buf;
8515
8516         if (topt->flags->val & topt->opt->bit)
8517                 buf = "1\n";
8518         else
8519                 buf = "0\n";
8520
8521         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8522 }
8523
8524 static ssize_t
8525 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8526                          loff_t *ppos)
8527 {
8528         struct trace_option_dentry *topt = filp->private_data;
8529         unsigned long val;
8530         int ret;
8531
8532         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8533         if (ret)
8534                 return ret;
8535
8536         if (val != 0 && val != 1)
8537                 return -EINVAL;
8538
8539         if (!!(topt->flags->val & topt->opt->bit) != val) {
8540                 mutex_lock(&trace_types_lock);
8541                 ret = __set_tracer_option(topt->tr, topt->flags,
8542                                           topt->opt, !val);
8543                 mutex_unlock(&trace_types_lock);
8544                 if (ret)
8545                         return ret;
8546         }
8547
8548         *ppos += cnt;
8549
8550         return cnt;
8551 }
8552
8553 static int tracing_open_options(struct inode *inode, struct file *filp)
8554 {
8555         struct trace_option_dentry *topt = inode->i_private;
8556         int ret;
8557
8558         ret = tracing_check_open_get_tr(topt->tr);
8559         if (ret)
8560                 return ret;
8561
8562         filp->private_data = inode->i_private;
8563         return 0;
8564 }
8565
8566 static int tracing_release_options(struct inode *inode, struct file *file)
8567 {
8568         struct trace_option_dentry *topt = file->private_data;
8569
8570         trace_array_put(topt->tr);
8571         return 0;
8572 }
8573
8574 static const struct file_operations trace_options_fops = {
8575         .open = tracing_open_options,
8576         .read = trace_options_read,
8577         .write = trace_options_write,
8578         .llseek = generic_file_llseek,
8579         .release = tracing_release_options,
8580 };
8581
8582 /*
8583  * In order to pass in both the trace_array descriptor as well as the index
8584  * to the flag that the trace option file represents, the trace_array
8585  * has a character array of trace_flags_index[], which holds the index
8586  * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8587  * The address of this character array is passed to the flag option file
8588  * read/write callbacks.
8589  *
8590  * In order to extract both the index and the trace_array descriptor,
8591  * get_tr_index() uses the following algorithm.
8592  *
8593  *   idx = *ptr;
8594  *
8595  * As the pointer itself contains the address of the index (remember
8596  * index[1] == 1).
8597  *
8598  * Then to get the trace_array descriptor, by subtracting that index
8599  * from the ptr, we get to the start of the index itself.
8600  *
8601  *   ptr - idx == &index[0]
8602  *
8603  * Then a simple container_of() from that pointer gets us to the
8604  * trace_array descriptor.
8605  */
8606 static void get_tr_index(void *data, struct trace_array **ptr,
8607                          unsigned int *pindex)
8608 {
8609         *pindex = *(unsigned char *)data;
8610
8611         *ptr = container_of(data - *pindex, struct trace_array,
8612                             trace_flags_index);
8613 }
8614
8615 static ssize_t
8616 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8617                         loff_t *ppos)
8618 {
8619         void *tr_index = filp->private_data;
8620         struct trace_array *tr;
8621         unsigned int index;
8622         char *buf;
8623
8624         get_tr_index(tr_index, &tr, &index);
8625
8626         if (tr->trace_flags & (1 << index))
8627                 buf = "1\n";
8628         else
8629                 buf = "0\n";
8630
8631         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8632 }
8633
8634 static ssize_t
8635 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8636                          loff_t *ppos)
8637 {
8638         void *tr_index = filp->private_data;
8639         struct trace_array *tr;
8640         unsigned int index;
8641         unsigned long val;
8642         int ret;
8643
8644         get_tr_index(tr_index, &tr, &index);
8645
8646         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8647         if (ret)
8648                 return ret;
8649
8650         if (val != 0 && val != 1)
8651                 return -EINVAL;
8652
8653         mutex_lock(&event_mutex);
8654         mutex_lock(&trace_types_lock);
8655         ret = set_tracer_flag(tr, 1 << index, val);
8656         mutex_unlock(&trace_types_lock);
8657         mutex_unlock(&event_mutex);
8658
8659         if (ret < 0)
8660                 return ret;
8661
8662         *ppos += cnt;
8663
8664         return cnt;
8665 }
8666
8667 static const struct file_operations trace_options_core_fops = {
8668         .open = tracing_open_generic,
8669         .read = trace_options_core_read,
8670         .write = trace_options_core_write,
8671         .llseek = generic_file_llseek,
8672 };
8673
8674 struct dentry *trace_create_file(const char *name,
8675                                  umode_t mode,
8676                                  struct dentry *parent,
8677                                  void *data,
8678                                  const struct file_operations *fops)
8679 {
8680         struct dentry *ret;
8681
8682         ret = tracefs_create_file(name, mode, parent, data, fops);
8683         if (!ret)
8684                 pr_warn("Could not create tracefs '%s' entry\n", name);
8685
8686         return ret;
8687 }
8688
8689
8690 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8691 {
8692         struct dentry *d_tracer;
8693
8694         if (tr->options)
8695                 return tr->options;
8696
8697         d_tracer = tracing_get_dentry(tr);
8698         if (IS_ERR(d_tracer))
8699                 return NULL;
8700
8701         tr->options = tracefs_create_dir("options", d_tracer);
8702         if (!tr->options) {
8703                 pr_warn("Could not create tracefs directory 'options'\n");
8704                 return NULL;
8705         }
8706
8707         return tr->options;
8708 }
8709
8710 static void
8711 create_trace_option_file(struct trace_array *tr,
8712                          struct trace_option_dentry *topt,
8713                          struct tracer_flags *flags,
8714                          struct tracer_opt *opt)
8715 {
8716         struct dentry *t_options;
8717
8718         t_options = trace_options_init_dentry(tr);
8719         if (!t_options)
8720                 return;
8721
8722         topt->flags = flags;
8723         topt->opt = opt;
8724         topt->tr = tr;
8725
8726         topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8727                                         t_options, topt, &trace_options_fops);
8728
8729 }
8730
8731 static void
8732 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8733 {
8734         struct trace_option_dentry *topts;
8735         struct trace_options *tr_topts;
8736         struct tracer_flags *flags;
8737         struct tracer_opt *opts;
8738         int cnt;
8739         int i;
8740
8741         if (!tracer)
8742                 return;
8743
8744         flags = tracer->flags;
8745
8746         if (!flags || !flags->opts)
8747                 return;
8748
8749         /*
8750          * If this is an instance, only create flags for tracers
8751          * the instance may have.
8752          */
8753         if (!trace_ok_for_array(tracer, tr))
8754                 return;
8755
8756         for (i = 0; i < tr->nr_topts; i++) {
8757                 /* Make sure there's no duplicate flags. */
8758                 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8759                         return;
8760         }
8761
8762         opts = flags->opts;
8763
8764         for (cnt = 0; opts[cnt].name; cnt++)
8765                 ;
8766
8767         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8768         if (!topts)
8769                 return;
8770
8771         tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8772                             GFP_KERNEL);
8773         if (!tr_topts) {
8774                 kfree(topts);
8775                 return;
8776         }
8777
8778         tr->topts = tr_topts;
8779         tr->topts[tr->nr_topts].tracer = tracer;
8780         tr->topts[tr->nr_topts].topts = topts;
8781         tr->nr_topts++;
8782
8783         for (cnt = 0; opts[cnt].name; cnt++) {
8784                 create_trace_option_file(tr, &topts[cnt], flags,
8785                                          &opts[cnt]);
8786                 MEM_FAIL(topts[cnt].entry == NULL,
8787                           "Failed to create trace option: %s",
8788                           opts[cnt].name);
8789         }
8790 }
8791
8792 static struct dentry *
8793 create_trace_option_core_file(struct trace_array *tr,
8794                               const char *option, long index)
8795 {
8796         struct dentry *t_options;
8797
8798         t_options = trace_options_init_dentry(tr);
8799         if (!t_options)
8800                 return NULL;
8801
8802         return trace_create_file(option, TRACE_MODE_WRITE, t_options,
8803                                  (void *)&tr->trace_flags_index[index],
8804                                  &trace_options_core_fops);
8805 }
8806
8807 static void create_trace_options_dir(struct trace_array *tr)
8808 {
8809         struct dentry *t_options;
8810         bool top_level = tr == &global_trace;
8811         int i;
8812
8813         t_options = trace_options_init_dentry(tr);
8814         if (!t_options)
8815                 return;
8816
8817         for (i = 0; trace_options[i]; i++) {
8818                 if (top_level ||
8819                     !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8820                         create_trace_option_core_file(tr, trace_options[i], i);
8821         }
8822 }
8823
8824 static ssize_t
8825 rb_simple_read(struct file *filp, char __user *ubuf,
8826                size_t cnt, loff_t *ppos)
8827 {
8828         struct trace_array *tr = filp->private_data;
8829         char buf[64];
8830         int r;
8831
8832         r = tracer_tracing_is_on(tr);
8833         r = sprintf(buf, "%d\n", r);
8834
8835         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8836 }
8837
8838 static ssize_t
8839 rb_simple_write(struct file *filp, const char __user *ubuf,
8840                 size_t cnt, loff_t *ppos)
8841 {
8842         struct trace_array *tr = filp->private_data;
8843         struct trace_buffer *buffer = tr->array_buffer.buffer;
8844         unsigned long val;
8845         int ret;
8846
8847         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8848         if (ret)
8849                 return ret;
8850
8851         if (buffer) {
8852                 mutex_lock(&trace_types_lock);
8853                 if (!!val == tracer_tracing_is_on(tr)) {
8854                         val = 0; /* do nothing */
8855                 } else if (val) {
8856                         tracer_tracing_on(tr);
8857                         if (tr->current_trace->start)
8858                                 tr->current_trace->start(tr);
8859                 } else {
8860                         tracer_tracing_off(tr);
8861                         if (tr->current_trace->stop)
8862                                 tr->current_trace->stop(tr);
8863                         /* Wake up any waiters */
8864                         ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
8865                 }
8866                 mutex_unlock(&trace_types_lock);
8867         }
8868
8869         (*ppos)++;
8870
8871         return cnt;
8872 }
8873
8874 static const struct file_operations rb_simple_fops = {
8875         .open           = tracing_open_generic_tr,
8876         .read           = rb_simple_read,
8877         .write          = rb_simple_write,
8878         .release        = tracing_release_generic_tr,
8879         .llseek         = default_llseek,
8880 };
8881
8882 static ssize_t
8883 buffer_percent_read(struct file *filp, char __user *ubuf,
8884                     size_t cnt, loff_t *ppos)
8885 {
8886         struct trace_array *tr = filp->private_data;
8887         char buf[64];
8888         int r;
8889
8890         r = tr->buffer_percent;
8891         r = sprintf(buf, "%d\n", r);
8892
8893         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8894 }
8895
8896 static ssize_t
8897 buffer_percent_write(struct file *filp, const char __user *ubuf,
8898                      size_t cnt, loff_t *ppos)
8899 {
8900         struct trace_array *tr = filp->private_data;
8901         unsigned long val;
8902         int ret;
8903
8904         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8905         if (ret)
8906                 return ret;
8907
8908         if (val > 100)
8909                 return -EINVAL;
8910
8911         tr->buffer_percent = val;
8912
8913         (*ppos)++;
8914
8915         return cnt;
8916 }
8917
8918 static const struct file_operations buffer_percent_fops = {
8919         .open           = tracing_open_generic_tr,
8920         .read           = buffer_percent_read,
8921         .write          = buffer_percent_write,
8922         .release        = tracing_release_generic_tr,
8923         .llseek         = default_llseek,
8924 };
8925
8926 static ssize_t
8927 buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
8928 {
8929         struct trace_array *tr = filp->private_data;
8930         size_t size;
8931         char buf[64];
8932         int order;
8933         int r;
8934
8935         order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
8936         size = (PAGE_SIZE << order) / 1024;
8937
8938         r = sprintf(buf, "%zd\n", size);
8939
8940         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8941 }
8942
8943 static ssize_t
8944 buffer_subbuf_size_write(struct file *filp, const char __user *ubuf,
8945                          size_t cnt, loff_t *ppos)
8946 {
8947         struct trace_array *tr = filp->private_data;
8948         unsigned long val;
8949         int old_order;
8950         int order;
8951         int pages;
8952         int ret;
8953
8954         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8955         if (ret)
8956                 return ret;
8957
8958         val *= 1024; /* value passed in is in KB */
8959
8960         pages = DIV_ROUND_UP(val, PAGE_SIZE);
8961         order = fls(pages - 1);
8962
8963         /* limit between 1 and 128 system pages */
8964         if (order < 0 || order > 7)
8965                 return -EINVAL;
8966
8967         /* Do not allow tracing while changing the order of the ring buffer */
8968         tracing_stop_tr(tr);
8969
8970         old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
8971         if (old_order == order)
8972                 goto out;
8973
8974         ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
8975         if (ret)
8976                 goto out;
8977
8978 #ifdef CONFIG_TRACER_MAX_TRACE
8979
8980         if (!tr->allocated_snapshot)
8981                 goto out_max;
8982
8983         ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
8984         if (ret) {
8985                 /* Put back the old order */
8986                 cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
8987                 if (WARN_ON_ONCE(cnt)) {
8988                         /*
8989                          * AARGH! We are left with different orders!
8990                          * The max buffer is our "snapshot" buffer.
8991                          * When a tracer needs a snapshot (one of the
8992                          * latency tracers), it swaps the max buffer
8993                          * with the saved snap shot. We succeeded to
8994                          * update the order of the main buffer, but failed to
8995                          * update the order of the max buffer. But when we tried
8996                          * to reset the main buffer to the original size, we
8997                          * failed there too. This is very unlikely to
8998                          * happen, but if it does, warn and kill all
8999                          * tracing.
9000                          */
9001                         tracing_disabled = 1;
9002                 }
9003                 goto out;
9004         }
9005  out_max:
9006 #endif
9007         (*ppos)++;
9008  out:
9009         if (ret)
9010                 cnt = ret;
9011         tracing_start_tr(tr);
9012         return cnt;
9013 }
9014
9015 static const struct file_operations buffer_subbuf_size_fops = {
9016         .open           = tracing_open_generic_tr,
9017         .read           = buffer_subbuf_size_read,
9018         .write          = buffer_subbuf_size_write,
9019         .release        = tracing_release_generic_tr,
9020         .llseek         = default_llseek,
9021 };
9022
9023 static struct dentry *trace_instance_dir;
9024
9025 static void
9026 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9027
9028 static int
9029 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9030 {
9031         enum ring_buffer_flags rb_flags;
9032
9033         rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9034
9035         buf->tr = tr;
9036
9037         buf->buffer = ring_buffer_alloc(size, rb_flags);
9038         if (!buf->buffer)
9039                 return -ENOMEM;
9040
9041         buf->data = alloc_percpu(struct trace_array_cpu);
9042         if (!buf->data) {
9043                 ring_buffer_free(buf->buffer);
9044                 buf->buffer = NULL;
9045                 return -ENOMEM;
9046         }
9047
9048         /* Allocate the first page for all buffers */
9049         set_buffer_entries(&tr->array_buffer,
9050                            ring_buffer_size(tr->array_buffer.buffer, 0));
9051
9052         return 0;
9053 }
9054
9055 static void free_trace_buffer(struct array_buffer *buf)
9056 {
9057         if (buf->buffer) {
9058                 ring_buffer_free(buf->buffer);
9059                 buf->buffer = NULL;
9060                 free_percpu(buf->data);
9061                 buf->data = NULL;
9062         }
9063 }
9064
9065 static int allocate_trace_buffers(struct trace_array *tr, int size)
9066 {
9067         int ret;
9068
9069         ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9070         if (ret)
9071                 return ret;
9072
9073 #ifdef CONFIG_TRACER_MAX_TRACE
9074         ret = allocate_trace_buffer(tr, &tr->max_buffer,
9075                                     allocate_snapshot ? size : 1);
9076         if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9077                 free_trace_buffer(&tr->array_buffer);
9078                 return -ENOMEM;
9079         }
9080         tr->allocated_snapshot = allocate_snapshot;
9081
9082         allocate_snapshot = false;
9083 #endif
9084
9085         return 0;
9086 }
9087
9088 static void free_trace_buffers(struct trace_array *tr)
9089 {
9090         if (!tr)
9091                 return;
9092
9093         free_trace_buffer(&tr->array_buffer);
9094
9095 #ifdef CONFIG_TRACER_MAX_TRACE
9096         free_trace_buffer(&tr->max_buffer);
9097 #endif
9098 }
9099
9100 static void init_trace_flags_index(struct trace_array *tr)
9101 {
9102         int i;
9103
9104         /* Used by the trace options files */
9105         for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9106                 tr->trace_flags_index[i] = i;
9107 }
9108
9109 static void __update_tracer_options(struct trace_array *tr)
9110 {
9111         struct tracer *t;
9112
9113         for (t = trace_types; t; t = t->next)
9114                 add_tracer_options(tr, t);
9115 }
9116
9117 static void update_tracer_options(struct trace_array *tr)
9118 {
9119         mutex_lock(&trace_types_lock);
9120         tracer_options_updated = true;
9121         __update_tracer_options(tr);
9122         mutex_unlock(&trace_types_lock);
9123 }
9124
9125 /* Must have trace_types_lock held */
9126 struct trace_array *trace_array_find(const char *instance)
9127 {
9128         struct trace_array *tr, *found = NULL;
9129
9130         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9131                 if (tr->name && strcmp(tr->name, instance) == 0) {
9132                         found = tr;
9133                         break;
9134                 }
9135         }
9136
9137         return found;
9138 }
9139
9140 struct trace_array *trace_array_find_get(const char *instance)
9141 {
9142         struct trace_array *tr;
9143
9144         mutex_lock(&trace_types_lock);
9145         tr = trace_array_find(instance);
9146         if (tr)
9147                 tr->ref++;
9148         mutex_unlock(&trace_types_lock);
9149
9150         return tr;
9151 }
9152
9153 static int trace_array_create_dir(struct trace_array *tr)
9154 {
9155         int ret;
9156
9157         tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9158         if (!tr->dir)
9159                 return -EINVAL;
9160
9161         ret = event_trace_add_tracer(tr->dir, tr);
9162         if (ret) {
9163                 tracefs_remove(tr->dir);
9164                 return ret;
9165         }
9166
9167         init_tracer_tracefs(tr, tr->dir);
9168         __update_tracer_options(tr);
9169
9170         return ret;
9171 }
9172
9173 static struct trace_array *
9174 trace_array_create_systems(const char *name, const char *systems)
9175 {
9176         struct trace_array *tr;
9177         int ret;
9178
9179         ret = -ENOMEM;
9180         tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9181         if (!tr)
9182                 return ERR_PTR(ret);
9183
9184         tr->name = kstrdup(name, GFP_KERNEL);
9185         if (!tr->name)
9186                 goto out_free_tr;
9187
9188         if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9189                 goto out_free_tr;
9190
9191         if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9192                 goto out_free_tr;
9193
9194         if (systems) {
9195                 tr->system_names = kstrdup_const(systems, GFP_KERNEL);
9196                 if (!tr->system_names)
9197                         goto out_free_tr;
9198         }
9199
9200         tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9201
9202         cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9203
9204         raw_spin_lock_init(&tr->start_lock);
9205
9206         tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9207
9208         tr->current_trace = &nop_trace;
9209
9210         INIT_LIST_HEAD(&tr->systems);
9211         INIT_LIST_HEAD(&tr->events);
9212         INIT_LIST_HEAD(&tr->hist_vars);
9213         INIT_LIST_HEAD(&tr->err_log);
9214
9215         if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9216                 goto out_free_tr;
9217
9218         /* The ring buffer is defaultly expanded */
9219         trace_set_ring_buffer_expanded(tr);
9220
9221         if (ftrace_allocate_ftrace_ops(tr) < 0)
9222                 goto out_free_tr;
9223
9224         ftrace_init_trace_array(tr);
9225
9226         init_trace_flags_index(tr);
9227
9228         if (trace_instance_dir) {
9229                 ret = trace_array_create_dir(tr);
9230                 if (ret)
9231                         goto out_free_tr;
9232         } else
9233                 __trace_early_add_events(tr);
9234
9235         list_add(&tr->list, &ftrace_trace_arrays);
9236
9237         tr->ref++;
9238
9239         return tr;
9240
9241  out_free_tr:
9242         ftrace_free_ftrace_ops(tr);
9243         free_trace_buffers(tr);
9244         free_cpumask_var(tr->pipe_cpumask);
9245         free_cpumask_var(tr->tracing_cpumask);
9246         kfree_const(tr->system_names);
9247         kfree(tr->name);
9248         kfree(tr);
9249
9250         return ERR_PTR(ret);
9251 }
9252
9253 static struct trace_array *trace_array_create(const char *name)
9254 {
9255         return trace_array_create_systems(name, NULL);
9256 }
9257
9258 static int instance_mkdir(const char *name)
9259 {
9260         struct trace_array *tr;
9261         int ret;
9262
9263         mutex_lock(&event_mutex);
9264         mutex_lock(&trace_types_lock);
9265
9266         ret = -EEXIST;
9267         if (trace_array_find(name))
9268                 goto out_unlock;
9269
9270         tr = trace_array_create(name);
9271
9272         ret = PTR_ERR_OR_ZERO(tr);
9273
9274 out_unlock:
9275         mutex_unlock(&trace_types_lock);
9276         mutex_unlock(&event_mutex);
9277         return ret;
9278 }
9279
9280 /**
9281  * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9282  * @name: The name of the trace array to be looked up/created.
9283  * @systems: A list of systems to create event directories for (NULL for all)
9284  *
9285  * Returns pointer to trace array with given name.
9286  * NULL, if it cannot be created.
9287  *
9288  * NOTE: This function increments the reference counter associated with the
9289  * trace array returned. This makes sure it cannot be freed while in use.
9290  * Use trace_array_put() once the trace array is no longer needed.
9291  * If the trace_array is to be freed, trace_array_destroy() needs to
9292  * be called after the trace_array_put(), or simply let user space delete
9293  * it from the tracefs instances directory. But until the
9294  * trace_array_put() is called, user space can not delete it.
9295  *
9296  */
9297 struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
9298 {
9299         struct trace_array *tr;
9300
9301         mutex_lock(&event_mutex);
9302         mutex_lock(&trace_types_lock);
9303
9304         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9305                 if (tr->name && strcmp(tr->name, name) == 0)
9306                         goto out_unlock;
9307         }
9308
9309         tr = trace_array_create_systems(name, systems);
9310
9311         if (IS_ERR(tr))
9312                 tr = NULL;
9313 out_unlock:
9314         if (tr)
9315                 tr->ref++;
9316
9317         mutex_unlock(&trace_types_lock);
9318         mutex_unlock(&event_mutex);
9319         return tr;
9320 }
9321 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9322
9323 static int __remove_instance(struct trace_array *tr)
9324 {
9325         int i;
9326
9327         /* Reference counter for a newly created trace array = 1. */
9328         if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9329                 return -EBUSY;
9330
9331         list_del(&tr->list);
9332
9333         /* Disable all the flags that were enabled coming in */
9334         for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9335                 if ((1 << i) & ZEROED_TRACE_FLAGS)
9336                         set_tracer_flag(tr, 1 << i, 0);
9337         }
9338
9339         tracing_set_nop(tr);
9340         clear_ftrace_function_probes(tr);
9341         event_trace_del_tracer(tr);
9342         ftrace_clear_pids(tr);
9343         ftrace_destroy_function_files(tr);
9344         tracefs_remove(tr->dir);
9345         free_percpu(tr->last_func_repeats);
9346         free_trace_buffers(tr);
9347         clear_tracing_err_log(tr);
9348
9349         for (i = 0; i < tr->nr_topts; i++) {
9350                 kfree(tr->topts[i].topts);
9351         }
9352         kfree(tr->topts);
9353
9354         free_cpumask_var(tr->pipe_cpumask);
9355         free_cpumask_var(tr->tracing_cpumask);
9356         kfree_const(tr->system_names);
9357         kfree(tr->name);
9358         kfree(tr);
9359
9360         return 0;
9361 }
9362
9363 int trace_array_destroy(struct trace_array *this_tr)
9364 {
9365         struct trace_array *tr;
9366         int ret;
9367
9368         if (!this_tr)
9369                 return -EINVAL;
9370
9371         mutex_lock(&event_mutex);
9372         mutex_lock(&trace_types_lock);
9373
9374         ret = -ENODEV;
9375
9376         /* Making sure trace array exists before destroying it. */
9377         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9378                 if (tr == this_tr) {
9379                         ret = __remove_instance(tr);
9380                         break;
9381                 }
9382         }
9383
9384         mutex_unlock(&trace_types_lock);
9385         mutex_unlock(&event_mutex);
9386
9387         return ret;
9388 }
9389 EXPORT_SYMBOL_GPL(trace_array_destroy);
9390
9391 static int instance_rmdir(const char *name)
9392 {
9393         struct trace_array *tr;
9394         int ret;
9395
9396         mutex_lock(&event_mutex);
9397         mutex_lock(&trace_types_lock);
9398
9399         ret = -ENODEV;
9400         tr = trace_array_find(name);
9401         if (tr)
9402                 ret = __remove_instance(tr);
9403
9404         mutex_unlock(&trace_types_lock);
9405         mutex_unlock(&event_mutex);
9406
9407         return ret;
9408 }
9409
9410 static __init void create_trace_instances(struct dentry *d_tracer)
9411 {
9412         struct trace_array *tr;
9413
9414         trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9415                                                          instance_mkdir,
9416                                                          instance_rmdir);
9417         if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9418                 return;
9419
9420         mutex_lock(&event_mutex);
9421         mutex_lock(&trace_types_lock);
9422
9423         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9424                 if (!tr->name)
9425                         continue;
9426                 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9427                              "Failed to create instance directory\n"))
9428                         break;
9429         }
9430
9431         mutex_unlock(&trace_types_lock);
9432         mutex_unlock(&event_mutex);
9433 }
9434
9435 static void
9436 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9437 {
9438         int cpu;
9439
9440         trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9441                         tr, &show_traces_fops);
9442
9443         trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9444                         tr, &set_tracer_fops);
9445
9446         trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9447                           tr, &tracing_cpumask_fops);
9448
9449         trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9450                           tr, &tracing_iter_fops);
9451
9452         trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9453                           tr, &tracing_fops);
9454
9455         trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9456                           tr, &tracing_pipe_fops);
9457
9458         trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9459                           tr, &tracing_entries_fops);
9460
9461         trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9462                           tr, &tracing_total_entries_fops);
9463
9464         trace_create_file("free_buffer", 0200, d_tracer,
9465                           tr, &tracing_free_buffer_fops);
9466
9467         trace_create_file("trace_marker", 0220, d_tracer,
9468                           tr, &tracing_mark_fops);
9469
9470         tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
9471
9472         trace_create_file("trace_marker_raw", 0220, d_tracer,
9473                           tr, &tracing_mark_raw_fops);
9474
9475         trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9476                           &trace_clock_fops);
9477
9478         trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9479                           tr, &rb_simple_fops);
9480
9481         trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9482                           &trace_time_stamp_mode_fops);
9483
9484         tr->buffer_percent = 50;
9485
9486         trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9487                         tr, &buffer_percent_fops);
9488
9489         trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer,
9490                           tr, &buffer_subbuf_size_fops);
9491
9492         create_trace_options_dir(tr);
9493
9494 #ifdef CONFIG_TRACER_MAX_TRACE
9495         trace_create_maxlat_file(tr, d_tracer);
9496 #endif
9497
9498         if (ftrace_create_function_files(tr, d_tracer))
9499                 MEM_FAIL(1, "Could not allocate function filter files");
9500
9501 #ifdef CONFIG_TRACER_SNAPSHOT
9502         trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9503                           tr, &snapshot_fops);
9504 #endif
9505
9506         trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9507                           tr, &tracing_err_log_fops);
9508
9509         for_each_tracing_cpu(cpu)
9510                 tracing_init_tracefs_percpu(tr, cpu);
9511
9512         ftrace_init_tracefs(tr, d_tracer);
9513 }
9514
9515 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9516 {
9517         struct vfsmount *mnt;
9518         struct file_system_type *type;
9519
9520         /*
9521          * To maintain backward compatibility for tools that mount
9522          * debugfs to get to the tracing facility, tracefs is automatically
9523          * mounted to the debugfs/tracing directory.
9524          */
9525         type = get_fs_type("tracefs");
9526         if (!type)
9527                 return NULL;
9528         mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9529         put_filesystem(type);
9530         if (IS_ERR(mnt))
9531                 return NULL;
9532         mntget(mnt);
9533
9534         return mnt;
9535 }
9536
9537 /**
9538  * tracing_init_dentry - initialize top level trace array
9539  *
9540  * This is called when creating files or directories in the tracing
9541  * directory. It is called via fs_initcall() by any of the boot up code
9542  * and expects to return the dentry of the top level tracing directory.
9543  */
9544 int tracing_init_dentry(void)
9545 {
9546         struct trace_array *tr = &global_trace;
9547
9548         if (security_locked_down(LOCKDOWN_TRACEFS)) {
9549                 pr_warn("Tracing disabled due to lockdown\n");
9550                 return -EPERM;
9551         }
9552
9553         /* The top level trace array uses  NULL as parent */
9554         if (tr->dir)
9555                 return 0;
9556
9557         if (WARN_ON(!tracefs_initialized()))
9558                 return -ENODEV;
9559
9560         /*
9561          * As there may still be users that expect the tracing
9562          * files to exist in debugfs/tracing, we must automount
9563          * the tracefs file system there, so older tools still
9564          * work with the newer kernel.
9565          */
9566         tr->dir = debugfs_create_automount("tracing", NULL,
9567                                            trace_automount, NULL);
9568
9569         return 0;
9570 }
9571
9572 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9573 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9574
9575 static struct workqueue_struct *eval_map_wq __initdata;
9576 static struct work_struct eval_map_work __initdata;
9577 static struct work_struct tracerfs_init_work __initdata;
9578
9579 static void __init eval_map_work_func(struct work_struct *work)
9580 {
9581         int len;
9582
9583         len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9584         trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9585 }
9586
9587 static int __init trace_eval_init(void)
9588 {
9589         INIT_WORK(&eval_map_work, eval_map_work_func);
9590
9591         eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9592         if (!eval_map_wq) {
9593                 pr_err("Unable to allocate eval_map_wq\n");
9594                 /* Do work here */
9595                 eval_map_work_func(&eval_map_work);
9596                 return -ENOMEM;
9597         }
9598
9599         queue_work(eval_map_wq, &eval_map_work);
9600         return 0;
9601 }
9602
9603 subsys_initcall(trace_eval_init);
9604
9605 static int __init trace_eval_sync(void)
9606 {
9607         /* Make sure the eval map updates are finished */
9608         if (eval_map_wq)
9609                 destroy_workqueue(eval_map_wq);
9610         return 0;
9611 }
9612
9613 late_initcall_sync(trace_eval_sync);
9614
9615
9616 #ifdef CONFIG_MODULES
9617 static void trace_module_add_evals(struct module *mod)
9618 {
9619         if (!mod->num_trace_evals)
9620                 return;
9621
9622         /*
9623          * Modules with bad taint do not have events created, do
9624          * not bother with enums either.
9625          */
9626         if (trace_module_has_bad_taint(mod))
9627                 return;
9628
9629         trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9630 }
9631
9632 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9633 static void trace_module_remove_evals(struct module *mod)
9634 {
9635         union trace_eval_map_item *map;
9636         union trace_eval_map_item **last = &trace_eval_maps;
9637
9638         if (!mod->num_trace_evals)
9639                 return;
9640
9641         mutex_lock(&trace_eval_mutex);
9642
9643         map = trace_eval_maps;
9644
9645         while (map) {
9646                 if (map->head.mod == mod)
9647                         break;
9648                 map = trace_eval_jmp_to_tail(map);
9649                 last = &map->tail.next;
9650                 map = map->tail.next;
9651         }
9652         if (!map)
9653                 goto out;
9654
9655         *last = trace_eval_jmp_to_tail(map)->tail.next;
9656         kfree(map);
9657  out:
9658         mutex_unlock(&trace_eval_mutex);
9659 }
9660 #else
9661 static inline void trace_module_remove_evals(struct module *mod) { }
9662 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9663
9664 static int trace_module_notify(struct notifier_block *self,
9665                                unsigned long val, void *data)
9666 {
9667         struct module *mod = data;
9668
9669         switch (val) {
9670         case MODULE_STATE_COMING:
9671                 trace_module_add_evals(mod);
9672                 break;
9673         case MODULE_STATE_GOING:
9674                 trace_module_remove_evals(mod);
9675                 break;
9676         }
9677
9678         return NOTIFY_OK;
9679 }
9680
9681 static struct notifier_block trace_module_nb = {
9682         .notifier_call = trace_module_notify,
9683         .priority = 0,
9684 };
9685 #endif /* CONFIG_MODULES */
9686
9687 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
9688 {
9689
9690         event_trace_init();
9691
9692         init_tracer_tracefs(&global_trace, NULL);
9693         ftrace_init_tracefs_toplevel(&global_trace, NULL);
9694
9695         trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9696                         &global_trace, &tracing_thresh_fops);
9697
9698         trace_create_file("README", TRACE_MODE_READ, NULL,
9699                         NULL, &tracing_readme_fops);
9700
9701         trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9702                         NULL, &tracing_saved_cmdlines_fops);
9703
9704         trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9705                           NULL, &tracing_saved_cmdlines_size_fops);
9706
9707         trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9708                         NULL, &tracing_saved_tgids_fops);
9709
9710         trace_create_eval_file(NULL);
9711
9712 #ifdef CONFIG_MODULES
9713         register_module_notifier(&trace_module_nb);
9714 #endif
9715
9716 #ifdef CONFIG_DYNAMIC_FTRACE
9717         trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9718                         NULL, &tracing_dyn_info_fops);
9719 #endif
9720
9721         create_trace_instances(NULL);
9722
9723         update_tracer_options(&global_trace);
9724 }
9725
9726 static __init int tracer_init_tracefs(void)
9727 {
9728         int ret;
9729
9730         trace_access_lock_init();
9731
9732         ret = tracing_init_dentry();
9733         if (ret)
9734                 return 0;
9735
9736         if (eval_map_wq) {
9737                 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
9738                 queue_work(eval_map_wq, &tracerfs_init_work);
9739         } else {
9740                 tracer_init_tracefs_work_func(NULL);
9741         }
9742
9743         rv_init_interface();
9744
9745         return 0;
9746 }
9747
9748 fs_initcall(tracer_init_tracefs);
9749
9750 static int trace_die_panic_handler(struct notifier_block *self,
9751                                 unsigned long ev, void *unused);
9752
9753 static struct notifier_block trace_panic_notifier = {
9754         .notifier_call = trace_die_panic_handler,
9755         .priority = INT_MAX - 1,
9756 };
9757
9758 static struct notifier_block trace_die_notifier = {
9759         .notifier_call = trace_die_panic_handler,
9760         .priority = INT_MAX - 1,
9761 };
9762
9763 /*
9764  * The idea is to execute the following die/panic callback early, in order
9765  * to avoid showing irrelevant information in the trace (like other panic
9766  * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
9767  * warnings get disabled (to prevent potential log flooding).
9768  */
9769 static int trace_die_panic_handler(struct notifier_block *self,
9770                                 unsigned long ev, void *unused)
9771 {
9772         if (!ftrace_dump_on_oops)
9773                 return NOTIFY_DONE;
9774
9775         /* The die notifier requires DIE_OOPS to trigger */
9776         if (self == &trace_die_notifier && ev != DIE_OOPS)
9777                 return NOTIFY_DONE;
9778
9779         ftrace_dump(ftrace_dump_on_oops);
9780
9781         return NOTIFY_DONE;
9782 }
9783
9784 /*
9785  * printk is set to max of 1024, we really don't need it that big.
9786  * Nothing should be printing 1000 characters anyway.
9787  */
9788 #define TRACE_MAX_PRINT         1000
9789
9790 /*
9791  * Define here KERN_TRACE so that we have one place to modify
9792  * it if we decide to change what log level the ftrace dump
9793  * should be at.
9794  */
9795 #define KERN_TRACE              KERN_EMERG
9796
9797 void
9798 trace_printk_seq(struct trace_seq *s)
9799 {
9800         /* Probably should print a warning here. */
9801         if (s->seq.len >= TRACE_MAX_PRINT)
9802                 s->seq.len = TRACE_MAX_PRINT;
9803
9804         /*
9805          * More paranoid code. Although the buffer size is set to
9806          * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9807          * an extra layer of protection.
9808          */
9809         if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9810                 s->seq.len = s->seq.size - 1;
9811
9812         /* should be zero ended, but we are paranoid. */
9813         s->buffer[s->seq.len] = 0;
9814
9815         printk(KERN_TRACE "%s", s->buffer);
9816
9817         trace_seq_init(s);
9818 }
9819
9820 void trace_init_global_iter(struct trace_iterator *iter)
9821 {
9822         iter->tr = &global_trace;
9823         iter->trace = iter->tr->current_trace;
9824         iter->cpu_file = RING_BUFFER_ALL_CPUS;
9825         iter->array_buffer = &global_trace.array_buffer;
9826
9827         if (iter->trace && iter->trace->open)
9828                 iter->trace->open(iter);
9829
9830         /* Annotate start of buffers if we had overruns */
9831         if (ring_buffer_overruns(iter->array_buffer->buffer))
9832                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9833
9834         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9835         if (trace_clocks[iter->tr->clock_id].in_ns)
9836                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9837
9838         /* Can not use kmalloc for iter.temp and iter.fmt */
9839         iter->temp = static_temp_buf;
9840         iter->temp_size = STATIC_TEMP_BUF_SIZE;
9841         iter->fmt = static_fmt_buf;
9842         iter->fmt_size = STATIC_FMT_BUF_SIZE;
9843 }
9844
9845 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9846 {
9847         /* use static because iter can be a bit big for the stack */
9848         static struct trace_iterator iter;
9849         static atomic_t dump_running;
9850         struct trace_array *tr = &global_trace;
9851         unsigned int old_userobj;
9852         unsigned long flags;
9853         int cnt = 0, cpu;
9854
9855         /* Only allow one dump user at a time. */
9856         if (atomic_inc_return(&dump_running) != 1) {
9857                 atomic_dec(&dump_running);
9858                 return;
9859         }
9860
9861         /*
9862          * Always turn off tracing when we dump.
9863          * We don't need to show trace output of what happens
9864          * between multiple crashes.
9865          *
9866          * If the user does a sysrq-z, then they can re-enable
9867          * tracing with echo 1 > tracing_on.
9868          */
9869         tracing_off();
9870
9871         local_irq_save(flags);
9872
9873         /* Simulate the iterator */
9874         trace_init_global_iter(&iter);
9875
9876         for_each_tracing_cpu(cpu) {
9877                 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9878         }
9879
9880         old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9881
9882         /* don't look at user memory in panic mode */
9883         tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9884
9885         switch (oops_dump_mode) {
9886         case DUMP_ALL:
9887                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9888                 break;
9889         case DUMP_ORIG:
9890                 iter.cpu_file = raw_smp_processor_id();
9891                 break;
9892         case DUMP_NONE:
9893                 goto out_enable;
9894         default:
9895                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9896                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9897         }
9898
9899         printk(KERN_TRACE "Dumping ftrace buffer:\n");
9900
9901         /* Did function tracer already get disabled? */
9902         if (ftrace_is_dead()) {
9903                 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9904                 printk("#          MAY BE MISSING FUNCTION EVENTS\n");
9905         }
9906
9907         /*
9908          * We need to stop all tracing on all CPUS to read
9909          * the next buffer. This is a bit expensive, but is
9910          * not done often. We fill all what we can read,
9911          * and then release the locks again.
9912          */
9913
9914         while (!trace_empty(&iter)) {
9915
9916                 if (!cnt)
9917                         printk(KERN_TRACE "---------------------------------\n");
9918
9919                 cnt++;
9920
9921                 trace_iterator_reset(&iter);
9922                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9923
9924                 if (trace_find_next_entry_inc(&iter) != NULL) {
9925                         int ret;
9926
9927                         ret = print_trace_line(&iter);
9928                         if (ret != TRACE_TYPE_NO_CONSUME)
9929                                 trace_consume(&iter);
9930                 }
9931                 touch_nmi_watchdog();
9932
9933                 trace_printk_seq(&iter.seq);
9934         }
9935
9936         if (!cnt)
9937                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
9938         else
9939                 printk(KERN_TRACE "---------------------------------\n");
9940
9941  out_enable:
9942         tr->trace_flags |= old_userobj;
9943
9944         for_each_tracing_cpu(cpu) {
9945                 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9946         }
9947         atomic_dec(&dump_running);
9948         local_irq_restore(flags);
9949 }
9950 EXPORT_SYMBOL_GPL(ftrace_dump);
9951
9952 #define WRITE_BUFSIZE  4096
9953
9954 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9955                                 size_t count, loff_t *ppos,
9956                                 int (*createfn)(const char *))
9957 {
9958         char *kbuf, *buf, *tmp;
9959         int ret = 0;
9960         size_t done = 0;
9961         size_t size;
9962
9963         kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9964         if (!kbuf)
9965                 return -ENOMEM;
9966
9967         while (done < count) {
9968                 size = count - done;
9969
9970                 if (size >= WRITE_BUFSIZE)
9971                         size = WRITE_BUFSIZE - 1;
9972
9973                 if (copy_from_user(kbuf, buffer + done, size)) {
9974                         ret = -EFAULT;
9975                         goto out;
9976                 }
9977                 kbuf[size] = '\0';
9978                 buf = kbuf;
9979                 do {
9980                         tmp = strchr(buf, '\n');
9981                         if (tmp) {
9982                                 *tmp = '\0';
9983                                 size = tmp - buf + 1;
9984                         } else {
9985                                 size = strlen(buf);
9986                                 if (done + size < count) {
9987                                         if (buf != kbuf)
9988                                                 break;
9989                                         /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9990                                         pr_warn("Line length is too long: Should be less than %d\n",
9991                                                 WRITE_BUFSIZE - 2);
9992                                         ret = -EINVAL;
9993                                         goto out;
9994                                 }
9995                         }
9996                         done += size;
9997
9998                         /* Remove comments */
9999                         tmp = strchr(buf, '#');
10000
10001                         if (tmp)
10002                                 *tmp = '\0';
10003
10004                         ret = createfn(buf);
10005                         if (ret)
10006                                 goto out;
10007                         buf += size;
10008
10009                 } while (done < count);
10010         }
10011         ret = done;
10012
10013 out:
10014         kfree(kbuf);
10015
10016         return ret;
10017 }
10018
10019 #ifdef CONFIG_TRACER_MAX_TRACE
10020 __init static bool tr_needs_alloc_snapshot(const char *name)
10021 {
10022         char *test;
10023         int len = strlen(name);
10024         bool ret;
10025
10026         if (!boot_snapshot_index)
10027                 return false;
10028
10029         if (strncmp(name, boot_snapshot_info, len) == 0 &&
10030             boot_snapshot_info[len] == '\t')
10031                 return true;
10032
10033         test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10034         if (!test)
10035                 return false;
10036
10037         sprintf(test, "\t%s\t", name);
10038         ret = strstr(boot_snapshot_info, test) == NULL;
10039         kfree(test);
10040         return ret;
10041 }
10042
10043 __init static void do_allocate_snapshot(const char *name)
10044 {
10045         if (!tr_needs_alloc_snapshot(name))
10046                 return;
10047
10048         /*
10049          * When allocate_snapshot is set, the next call to
10050          * allocate_trace_buffers() (called by trace_array_get_by_name())
10051          * will allocate the snapshot buffer. That will alse clear
10052          * this flag.
10053          */
10054         allocate_snapshot = true;
10055 }
10056 #else
10057 static inline void do_allocate_snapshot(const char *name) { }
10058 #endif
10059
10060 __init static void enable_instances(void)
10061 {
10062         struct trace_array *tr;
10063         char *curr_str;
10064         char *str;
10065         char *tok;
10066
10067         /* A tab is always appended */
10068         boot_instance_info[boot_instance_index - 1] = '\0';
10069         str = boot_instance_info;
10070
10071         while ((curr_str = strsep(&str, "\t"))) {
10072
10073                 tok = strsep(&curr_str, ",");
10074
10075                 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10076                         do_allocate_snapshot(tok);
10077
10078                 tr = trace_array_get_by_name(tok, NULL);
10079                 if (!tr) {
10080                         pr_warn("Failed to create instance buffer %s\n", curr_str);
10081                         continue;
10082                 }
10083                 /* Allow user space to delete it */
10084                 trace_array_put(tr);
10085
10086                 while ((tok = strsep(&curr_str, ","))) {
10087                         early_enable_events(tr, tok, true);
10088                 }
10089         }
10090 }
10091
10092 __init static int tracer_alloc_buffers(void)
10093 {
10094         int ring_buf_size;
10095         int ret = -ENOMEM;
10096
10097
10098         if (security_locked_down(LOCKDOWN_TRACEFS)) {
10099                 pr_warn("Tracing disabled due to lockdown\n");
10100                 return -EPERM;
10101         }
10102
10103         /*
10104          * Make sure we don't accidentally add more trace options
10105          * than we have bits for.
10106          */
10107         BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10108
10109         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10110                 goto out;
10111
10112         if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10113                 goto out_free_buffer_mask;
10114
10115         /* Only allocate trace_printk buffers if a trace_printk exists */
10116         if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10117                 /* Must be called before global_trace.buffer is allocated */
10118                 trace_printk_init_buffers();
10119
10120         /* To save memory, keep the ring buffer size to its minimum */
10121         if (global_trace.ring_buffer_expanded)
10122                 ring_buf_size = trace_buf_size;
10123         else
10124                 ring_buf_size = 1;
10125
10126         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10127         cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10128
10129         raw_spin_lock_init(&global_trace.start_lock);
10130
10131         /*
10132          * The prepare callbacks allocates some memory for the ring buffer. We
10133          * don't free the buffer if the CPU goes down. If we were to free
10134          * the buffer, then the user would lose any trace that was in the
10135          * buffer. The memory will be removed once the "instance" is removed.
10136          */
10137         ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10138                                       "trace/RB:prepare", trace_rb_cpu_prepare,
10139                                       NULL);
10140         if (ret < 0)
10141                 goto out_free_cpumask;
10142         /* Used for event triggers */
10143         ret = -ENOMEM;
10144         temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10145         if (!temp_buffer)
10146                 goto out_rm_hp_state;
10147
10148         if (trace_create_savedcmd() < 0)
10149                 goto out_free_temp_buffer;
10150
10151         if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10152                 goto out_free_savedcmd;
10153
10154         /* TODO: make the number of buffers hot pluggable with CPUS */
10155         if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10156                 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10157                 goto out_free_pipe_cpumask;
10158         }
10159         if (global_trace.buffer_disabled)
10160                 tracing_off();
10161
10162         if (trace_boot_clock) {
10163                 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10164                 if (ret < 0)
10165                         pr_warn("Trace clock %s not defined, going back to default\n",
10166                                 trace_boot_clock);
10167         }
10168
10169         /*
10170          * register_tracer() might reference current_trace, so it
10171          * needs to be set before we register anything. This is
10172          * just a bootstrap of current_trace anyway.
10173          */
10174         global_trace.current_trace = &nop_trace;
10175
10176         global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10177
10178         ftrace_init_global_array_ops(&global_trace);
10179
10180         init_trace_flags_index(&global_trace);
10181
10182         register_tracer(&nop_trace);
10183
10184         /* Function tracing may start here (via kernel command line) */
10185         init_function_trace();
10186
10187         /* All seems OK, enable tracing */
10188         tracing_disabled = 0;
10189
10190         atomic_notifier_chain_register(&panic_notifier_list,
10191                                        &trace_panic_notifier);
10192
10193         register_die_notifier(&trace_die_notifier);
10194
10195         global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10196
10197         INIT_LIST_HEAD(&global_trace.systems);
10198         INIT_LIST_HEAD(&global_trace.events);
10199         INIT_LIST_HEAD(&global_trace.hist_vars);
10200         INIT_LIST_HEAD(&global_trace.err_log);
10201         list_add(&global_trace.list, &ftrace_trace_arrays);
10202
10203         apply_trace_boot_options();
10204
10205         register_snapshot_cmd();
10206
10207         test_can_verify();
10208
10209         return 0;
10210
10211 out_free_pipe_cpumask:
10212         free_cpumask_var(global_trace.pipe_cpumask);
10213 out_free_savedcmd:
10214         trace_free_saved_cmdlines_buffer();
10215 out_free_temp_buffer:
10216         ring_buffer_free(temp_buffer);
10217 out_rm_hp_state:
10218         cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10219 out_free_cpumask:
10220         free_cpumask_var(global_trace.tracing_cpumask);
10221 out_free_buffer_mask:
10222         free_cpumask_var(tracing_buffer_mask);
10223 out:
10224         return ret;
10225 }
10226
10227 void __init ftrace_boot_snapshot(void)
10228 {
10229 #ifdef CONFIG_TRACER_MAX_TRACE
10230         struct trace_array *tr;
10231
10232         if (!snapshot_at_boot)
10233                 return;
10234
10235         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10236                 if (!tr->allocated_snapshot)
10237                         continue;
10238
10239                 tracing_snapshot_instance(tr);
10240                 trace_array_puts(tr, "** Boot snapshot taken **\n");
10241         }
10242 #endif
10243 }
10244
10245 void __init early_trace_init(void)
10246 {
10247         if (tracepoint_printk) {
10248                 tracepoint_print_iter =
10249                         kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10250                 if (MEM_FAIL(!tracepoint_print_iter,
10251                              "Failed to allocate trace iterator\n"))
10252                         tracepoint_printk = 0;
10253                 else
10254                         static_key_enable(&tracepoint_printk_key.key);
10255         }
10256         tracer_alloc_buffers();
10257
10258         init_events();
10259 }
10260
10261 void __init trace_init(void)
10262 {
10263         trace_event_init();
10264
10265         if (boot_instance_index)
10266                 enable_instances();
10267 }
10268
10269 __init static void clear_boot_tracer(void)
10270 {
10271         /*
10272          * The default tracer at boot buffer is an init section.
10273          * This function is called in lateinit. If we did not
10274          * find the boot tracer, then clear it out, to prevent
10275          * later registration from accessing the buffer that is
10276          * about to be freed.
10277          */
10278         if (!default_bootup_tracer)
10279                 return;
10280
10281         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10282                default_bootup_tracer);
10283         default_bootup_tracer = NULL;
10284 }
10285
10286 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10287 __init static void tracing_set_default_clock(void)
10288 {
10289         /* sched_clock_stable() is determined in late_initcall */
10290         if (!trace_boot_clock && !sched_clock_stable()) {
10291                 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10292                         pr_warn("Can not set tracing clock due to lockdown\n");
10293                         return;
10294                 }
10295
10296                 printk(KERN_WARNING
10297                        "Unstable clock detected, switching default tracing clock to \"global\"\n"
10298                        "If you want to keep using the local clock, then add:\n"
10299                        "  \"trace_clock=local\"\n"
10300                        "on the kernel command line\n");
10301                 tracing_set_clock(&global_trace, "global");
10302         }
10303 }
10304 #else
10305 static inline void tracing_set_default_clock(void) { }
10306 #endif
10307
10308 __init static int late_trace_init(void)
10309 {
10310         if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10311                 static_key_disable(&tracepoint_printk_key.key);
10312                 tracepoint_printk = 0;
10313         }
10314
10315         tracing_set_default_clock();
10316         clear_boot_tracer();
10317         return 0;
10318 }
10319
10320 late_initcall_sync(late_trace_init);
This page took 0.6564 seconds and 2 git commands to generate.