2 * Infrastructure for profiling code inserted by 'gcc -pg'.
7 * Originally ported from the -rt patch by:
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
30 #include <asm/ftrace.h>
34 #define FTRACE_WARN_ON(cond) \
40 #define FTRACE_WARN_ON_ONCE(cond) \
42 if (WARN_ON_ONCE(cond)) \
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly;
48 static int last_ftrace_enabled;
50 /* Quick disabling of function tracer. */
51 int function_trace_stop;
54 * ftrace_disabled is set when an anomaly is discovered.
55 * ftrace_disabled is much stronger than ftrace_enabled.
57 static int ftrace_disabled __read_mostly;
59 static DEFINE_SPINLOCK(ftrace_lock);
60 static DEFINE_MUTEX(ftrace_sysctl_lock);
62 static struct ftrace_ops ftrace_list_end __read_mostly =
67 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
68 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
69 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
71 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
73 struct ftrace_ops *op = ftrace_list;
75 /* in case someone actually ports this to alpha! */
76 read_barrier_depends();
78 while (op != &ftrace_list_end) {
80 read_barrier_depends();
81 op->func(ip, parent_ip);
87 * clear_ftrace_function - reset the ftrace function
89 * This NULLs the ftrace function and in essence stops
90 * tracing. There may be lag
92 void clear_ftrace_function(void)
94 ftrace_trace_function = ftrace_stub;
95 __ftrace_trace_function = ftrace_stub;
98 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
100 * For those archs that do not test ftrace_trace_stop in their
101 * mcount call site, we need to do it from C.
103 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
105 if (function_trace_stop)
108 __ftrace_trace_function(ip, parent_ip);
112 static int __register_ftrace_function(struct ftrace_ops *ops)
114 /* should not be called from interrupt context */
115 spin_lock(&ftrace_lock);
117 ops->next = ftrace_list;
119 * We are entering ops into the ftrace_list but another
120 * CPU might be walking that list. We need to make sure
121 * the ops->next pointer is valid before another CPU sees
122 * the ops pointer included into the ftrace_list.
127 if (ftrace_enabled) {
129 * For one func, simply call it directly.
130 * For more than one func, call the chain.
132 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
133 if (ops->next == &ftrace_list_end)
134 ftrace_trace_function = ops->func;
136 ftrace_trace_function = ftrace_list_func;
138 if (ops->next == &ftrace_list_end)
139 __ftrace_trace_function = ops->func;
141 __ftrace_trace_function = ftrace_list_func;
142 ftrace_trace_function = ftrace_test_stop_func;
146 spin_unlock(&ftrace_lock);
151 static int __unregister_ftrace_function(struct ftrace_ops *ops)
153 struct ftrace_ops **p;
156 /* should not be called from interrupt context */
157 spin_lock(&ftrace_lock);
160 * If we are removing the last function, then simply point
161 * to the ftrace_stub.
163 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
164 ftrace_trace_function = ftrace_stub;
165 ftrace_list = &ftrace_list_end;
169 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
180 if (ftrace_enabled) {
181 /* If we only have one func left, then call that directly */
182 if (ftrace_list->next == &ftrace_list_end)
183 ftrace_trace_function = ftrace_list->func;
187 spin_unlock(&ftrace_lock);
192 #ifdef CONFIG_DYNAMIC_FTRACE
193 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
194 # error Dynamic ftrace depends on MCOUNT_RECORD
198 * Since MCOUNT_ADDR may point to mcount itself, we do not want
199 * to get it confused by reading a reference in the code as we
200 * are parsing on objcopy output of text. Use a variable for
203 static unsigned long mcount_addr = MCOUNT_ADDR;
206 FTRACE_ENABLE_CALLS = (1 << 0),
207 FTRACE_DISABLE_CALLS = (1 << 1),
208 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
209 FTRACE_ENABLE_MCOUNT = (1 << 3),
210 FTRACE_DISABLE_MCOUNT = (1 << 4),
213 static int ftrace_filtered;
215 static LIST_HEAD(ftrace_new_addrs);
217 static DEFINE_MUTEX(ftrace_regex_lock);
220 struct ftrace_page *next;
222 struct dyn_ftrace records[];
225 #define ENTRIES_PER_PAGE \
226 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
228 /* estimate from running different kernels */
229 #define NR_TO_INIT 10000
231 static struct ftrace_page *ftrace_pages_start;
232 static struct ftrace_page *ftrace_pages;
234 static struct dyn_ftrace *ftrace_free_records;
237 #ifdef CONFIG_KPROBES
239 static int frozen_record_count;
241 static inline void freeze_record(struct dyn_ftrace *rec)
243 if (!(rec->flags & FTRACE_FL_FROZEN)) {
244 rec->flags |= FTRACE_FL_FROZEN;
245 frozen_record_count++;
249 static inline void unfreeze_record(struct dyn_ftrace *rec)
251 if (rec->flags & FTRACE_FL_FROZEN) {
252 rec->flags &= ~FTRACE_FL_FROZEN;
253 frozen_record_count--;
257 static inline int record_frozen(struct dyn_ftrace *rec)
259 return rec->flags & FTRACE_FL_FROZEN;
262 # define freeze_record(rec) ({ 0; })
263 # define unfreeze_record(rec) ({ 0; })
264 # define record_frozen(rec) ({ 0; })
265 #endif /* CONFIG_KPROBES */
267 static void ftrace_free_rec(struct dyn_ftrace *rec)
269 rec->ip = (unsigned long)ftrace_free_records;
270 ftrace_free_records = rec;
271 rec->flags |= FTRACE_FL_FREE;
274 void ftrace_release(void *start, unsigned long size)
276 struct dyn_ftrace *rec;
277 struct ftrace_page *pg;
278 unsigned long s = (unsigned long)start;
279 unsigned long e = s + size;
282 if (ftrace_disabled || !start)
285 /* should not be called from interrupt context */
286 spin_lock(&ftrace_lock);
288 for (pg = ftrace_pages_start; pg; pg = pg->next) {
289 for (i = 0; i < pg->index; i++) {
290 rec = &pg->records[i];
292 if ((rec->ip >= s) && (rec->ip < e))
293 ftrace_free_rec(rec);
296 spin_unlock(&ftrace_lock);
299 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
301 struct dyn_ftrace *rec;
303 /* First check for freed records */
304 if (ftrace_free_records) {
305 rec = ftrace_free_records;
307 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
308 FTRACE_WARN_ON_ONCE(1);
309 ftrace_free_records = NULL;
313 ftrace_free_records = (void *)rec->ip;
314 memset(rec, 0, sizeof(*rec));
318 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
319 if (!ftrace_pages->next) {
320 /* allocate another page */
322 (void *)get_zeroed_page(GFP_KERNEL);
323 if (!ftrace_pages->next)
326 ftrace_pages = ftrace_pages->next;
329 return &ftrace_pages->records[ftrace_pages->index++];
332 static struct dyn_ftrace *
333 ftrace_record_ip(unsigned long ip)
335 struct dyn_ftrace *rec;
340 rec = ftrace_alloc_dyn_node(ip);
346 list_add(&rec->list, &ftrace_new_addrs);
351 static void print_ip_ins(const char *fmt, unsigned char *p)
355 printk(KERN_CONT "%s", fmt);
357 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
358 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
361 static void ftrace_bug(int failed, unsigned long ip)
365 FTRACE_WARN_ON_ONCE(1);
366 pr_info("ftrace faulted on modifying ");
370 FTRACE_WARN_ON_ONCE(1);
371 pr_info("ftrace failed to modify ");
373 print_ip_ins(" actual: ", (unsigned char *)ip);
374 printk(KERN_CONT "\n");
377 FTRACE_WARN_ON_ONCE(1);
378 pr_info("ftrace faulted on writing ");
382 FTRACE_WARN_ON_ONCE(1);
383 pr_info("ftrace faulted on unknown error ");
388 #define FTRACE_ADDR ((long)(ftrace_caller))
391 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
393 unsigned long ip, fl;
397 if (ftrace_filtered && enable) {
399 * If filtering is on:
401 * If this record is set to be filtered and
402 * is enabled then do nothing.
404 * If this record is set to be filtered and
405 * it is not enabled, enable it.
407 * If this record is not set to be filtered
408 * and it is not enabled do nothing.
410 * If this record is set not to trace then
413 * If this record is set not to trace and
414 * it is enabled then disable it.
416 * If this record is not set to be filtered and
417 * it is enabled, disable it.
420 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
423 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
424 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
425 !fl || (fl == FTRACE_FL_NOTRACE))
429 * If it is enabled disable it,
430 * otherwise enable it!
432 if (fl & FTRACE_FL_ENABLED) {
434 rec->flags &= ~FTRACE_FL_ENABLED;
437 rec->flags |= FTRACE_FL_ENABLED;
443 * If this record is set not to trace and is
444 * not enabled, do nothing.
446 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
447 if (fl == FTRACE_FL_NOTRACE)
452 if (rec->flags & FTRACE_FL_ENABLED)
454 rec->flags |= FTRACE_FL_ENABLED;
456 if (!(rec->flags & FTRACE_FL_ENABLED))
458 rec->flags &= ~FTRACE_FL_ENABLED;
463 return ftrace_make_call(rec, FTRACE_ADDR);
465 return ftrace_make_nop(NULL, rec, FTRACE_ADDR);
468 static void ftrace_replace_code(int enable)
471 struct dyn_ftrace *rec;
472 struct ftrace_page *pg;
474 for (pg = ftrace_pages_start; pg; pg = pg->next) {
475 for (i = 0; i < pg->index; i++) {
476 rec = &pg->records[i];
479 * Skip over free records and records that have
482 if (rec->flags & FTRACE_FL_FREE ||
483 rec->flags & FTRACE_FL_FAILED)
486 /* ignore updates to this record's mcount site */
487 if (get_kprobe((void *)rec->ip)) {
491 unfreeze_record(rec);
494 failed = __ftrace_replace_code(rec, enable);
495 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
496 rec->flags |= FTRACE_FL_FAILED;
497 if ((system_state == SYSTEM_BOOTING) ||
498 !core_kernel_text(rec->ip)) {
499 ftrace_free_rec(rec);
501 ftrace_bug(failed, rec->ip);
508 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
515 ret = ftrace_make_nop(mod, rec, mcount_addr);
518 rec->flags |= FTRACE_FL_FAILED;
524 static int __ftrace_modify_code(void *data)
528 if (*command & FTRACE_ENABLE_CALLS)
529 ftrace_replace_code(1);
530 else if (*command & FTRACE_DISABLE_CALLS)
531 ftrace_replace_code(0);
533 if (*command & FTRACE_UPDATE_TRACE_FUNC)
534 ftrace_update_ftrace_func(ftrace_trace_function);
539 static void ftrace_run_update_code(int command)
541 stop_machine(__ftrace_modify_code, &command, NULL);
544 static ftrace_func_t saved_ftrace_func;
545 static int ftrace_start_up;
546 static DEFINE_MUTEX(ftrace_start_lock);
548 static void ftrace_startup(void)
552 if (unlikely(ftrace_disabled))
555 mutex_lock(&ftrace_start_lock);
557 if (ftrace_start_up == 1)
558 command |= FTRACE_ENABLE_CALLS;
560 if (saved_ftrace_func != ftrace_trace_function) {
561 saved_ftrace_func = ftrace_trace_function;
562 command |= FTRACE_UPDATE_TRACE_FUNC;
565 if (!command || !ftrace_enabled)
568 ftrace_run_update_code(command);
570 mutex_unlock(&ftrace_start_lock);
573 static void ftrace_shutdown(void)
577 if (unlikely(ftrace_disabled))
580 mutex_lock(&ftrace_start_lock);
582 if (!ftrace_start_up)
583 command |= FTRACE_DISABLE_CALLS;
585 if (saved_ftrace_func != ftrace_trace_function) {
586 saved_ftrace_func = ftrace_trace_function;
587 command |= FTRACE_UPDATE_TRACE_FUNC;
590 if (!command || !ftrace_enabled)
593 ftrace_run_update_code(command);
595 mutex_unlock(&ftrace_start_lock);
598 static void ftrace_startup_sysctl(void)
600 int command = FTRACE_ENABLE_MCOUNT;
602 if (unlikely(ftrace_disabled))
605 mutex_lock(&ftrace_start_lock);
606 /* Force update next time */
607 saved_ftrace_func = NULL;
608 /* ftrace_start_up is true if we want ftrace running */
610 command |= FTRACE_ENABLE_CALLS;
612 ftrace_run_update_code(command);
613 mutex_unlock(&ftrace_start_lock);
616 static void ftrace_shutdown_sysctl(void)
618 int command = FTRACE_DISABLE_MCOUNT;
620 if (unlikely(ftrace_disabled))
623 mutex_lock(&ftrace_start_lock);
624 /* ftrace_start_up is true if ftrace is running */
626 command |= FTRACE_DISABLE_CALLS;
628 ftrace_run_update_code(command);
629 mutex_unlock(&ftrace_start_lock);
632 static cycle_t ftrace_update_time;
633 static unsigned long ftrace_update_cnt;
634 unsigned long ftrace_update_tot_cnt;
636 static int ftrace_update_code(struct module *mod)
638 struct dyn_ftrace *p, *t;
641 start = ftrace_now(raw_smp_processor_id());
642 ftrace_update_cnt = 0;
644 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
646 /* If something went wrong, bail without enabling anything */
647 if (unlikely(ftrace_disabled))
650 list_del_init(&p->list);
652 /* convert record (i.e, patch mcount-call with NOP) */
653 if (ftrace_code_disable(mod, p)) {
654 p->flags |= FTRACE_FL_CONVERTED;
660 stop = ftrace_now(raw_smp_processor_id());
661 ftrace_update_time = stop - start;
662 ftrace_update_tot_cnt += ftrace_update_cnt;
667 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
669 struct ftrace_page *pg;
673 /* allocate a few pages */
674 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
675 if (!ftrace_pages_start)
679 * Allocate a few more pages.
681 * TODO: have some parser search vmlinux before
682 * final linking to find all calls to ftrace.
684 * a) know how many pages to allocate.
686 * b) set up the table then.
688 * The dynamic code is still necessary for
692 pg = ftrace_pages = ftrace_pages_start;
694 cnt = num_to_init / ENTRIES_PER_PAGE;
695 pr_info("ftrace: allocating %ld entries in %d pages\n",
698 for (i = 0; i < cnt; i++) {
699 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
701 /* If we fail, we'll try later anyway */
712 FTRACE_ITER_FILTER = (1 << 0),
713 FTRACE_ITER_CONT = (1 << 1),
714 FTRACE_ITER_NOTRACE = (1 << 2),
715 FTRACE_ITER_FAILURES = (1 << 3),
718 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
720 struct ftrace_iterator {
722 struct ftrace_page *pg;
725 unsigned char buffer[FTRACE_BUFF_MAX+1];
731 t_next(struct seq_file *m, void *v, loff_t *pos)
733 struct ftrace_iterator *iter = m->private;
734 struct dyn_ftrace *rec = NULL;
738 /* should not be called from interrupt context */
739 spin_lock(&ftrace_lock);
741 if (iter->idx >= iter->pg->index) {
742 if (iter->pg->next) {
743 iter->pg = iter->pg->next;
748 rec = &iter->pg->records[iter->idx++];
749 if ((rec->flags & FTRACE_FL_FREE) ||
751 (!(iter->flags & FTRACE_ITER_FAILURES) &&
752 (rec->flags & FTRACE_FL_FAILED)) ||
754 ((iter->flags & FTRACE_ITER_FAILURES) &&
755 !(rec->flags & FTRACE_FL_FAILED)) ||
757 ((iter->flags & FTRACE_ITER_FILTER) &&
758 !(rec->flags & FTRACE_FL_FILTER)) ||
760 ((iter->flags & FTRACE_ITER_NOTRACE) &&
761 !(rec->flags & FTRACE_FL_NOTRACE))) {
766 spin_unlock(&ftrace_lock);
773 static void *t_start(struct seq_file *m, loff_t *pos)
775 struct ftrace_iterator *iter = m->private;
779 if (*pos != iter->pos) {
780 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
784 p = t_next(m, p, &l);
790 static void t_stop(struct seq_file *m, void *p)
794 static int t_show(struct seq_file *m, void *v)
796 struct dyn_ftrace *rec = v;
797 char str[KSYM_SYMBOL_LEN];
802 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
804 seq_printf(m, "%s\n", str);
809 static struct seq_operations show_ftrace_seq_ops = {
817 ftrace_avail_open(struct inode *inode, struct file *file)
819 struct ftrace_iterator *iter;
822 if (unlikely(ftrace_disabled))
825 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
829 iter->pg = ftrace_pages_start;
832 ret = seq_open(file, &show_ftrace_seq_ops);
834 struct seq_file *m = file->private_data;
844 int ftrace_avail_release(struct inode *inode, struct file *file)
846 struct seq_file *m = (struct seq_file *)file->private_data;
847 struct ftrace_iterator *iter = m->private;
849 seq_release(inode, file);
856 ftrace_failures_open(struct inode *inode, struct file *file)
860 struct ftrace_iterator *iter;
862 ret = ftrace_avail_open(inode, file);
864 m = (struct seq_file *)file->private_data;
865 iter = (struct ftrace_iterator *)m->private;
866 iter->flags = FTRACE_ITER_FAILURES;
873 static void ftrace_filter_reset(int enable)
875 struct ftrace_page *pg;
876 struct dyn_ftrace *rec;
877 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
880 /* should not be called from interrupt context */
881 spin_lock(&ftrace_lock);
884 pg = ftrace_pages_start;
886 for (i = 0; i < pg->index; i++) {
887 rec = &pg->records[i];
888 if (rec->flags & FTRACE_FL_FAILED)
894 spin_unlock(&ftrace_lock);
898 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
900 struct ftrace_iterator *iter;
903 if (unlikely(ftrace_disabled))
906 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
910 mutex_lock(&ftrace_regex_lock);
911 if ((file->f_mode & FMODE_WRITE) &&
912 !(file->f_flags & O_APPEND))
913 ftrace_filter_reset(enable);
915 if (file->f_mode & FMODE_READ) {
916 iter->pg = ftrace_pages_start;
918 iter->flags = enable ? FTRACE_ITER_FILTER :
921 ret = seq_open(file, &show_ftrace_seq_ops);
923 struct seq_file *m = file->private_data;
928 file->private_data = iter;
929 mutex_unlock(&ftrace_regex_lock);
935 ftrace_filter_open(struct inode *inode, struct file *file)
937 return ftrace_regex_open(inode, file, 1);
941 ftrace_notrace_open(struct inode *inode, struct file *file)
943 return ftrace_regex_open(inode, file, 0);
947 ftrace_regex_read(struct file *file, char __user *ubuf,
948 size_t cnt, loff_t *ppos)
950 if (file->f_mode & FMODE_READ)
951 return seq_read(file, ubuf, cnt, ppos);
957 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
961 if (file->f_mode & FMODE_READ)
962 ret = seq_lseek(file, offset, origin);
964 file->f_pos = ret = 1;
977 ftrace_match(unsigned char *buff, int len, int enable)
979 char str[KSYM_SYMBOL_LEN];
981 struct ftrace_page *pg;
982 struct dyn_ftrace *rec;
983 int type = MATCH_FULL;
984 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
985 unsigned i, match = 0, search_len = 0;
987 for (i = 0; i < len; i++) {
988 if (buff[i] == '*') {
990 search = buff + i + 1;
991 type = MATCH_END_ONLY;
992 search_len = len - (i + 1);
994 if (type == MATCH_END_ONLY) {
995 type = MATCH_MIDDLE_ONLY;
998 type = MATCH_FRONT_ONLY;
1006 /* should not be called from interrupt context */
1007 spin_lock(&ftrace_lock);
1009 ftrace_filtered = 1;
1010 pg = ftrace_pages_start;
1012 for (i = 0; i < pg->index; i++) {
1016 rec = &pg->records[i];
1017 if (rec->flags & FTRACE_FL_FAILED)
1019 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1022 if (strcmp(str, buff) == 0)
1025 case MATCH_FRONT_ONLY:
1026 if (memcmp(str, buff, match) == 0)
1029 case MATCH_MIDDLE_ONLY:
1030 if (strstr(str, search))
1033 case MATCH_END_ONLY:
1034 ptr = strstr(str, search);
1035 if (ptr && (ptr[search_len] == 0))
1044 spin_unlock(&ftrace_lock);
1048 ftrace_regex_write(struct file *file, const char __user *ubuf,
1049 size_t cnt, loff_t *ppos, int enable)
1051 struct ftrace_iterator *iter;
1056 if (!cnt || cnt < 0)
1059 mutex_lock(&ftrace_regex_lock);
1061 if (file->f_mode & FMODE_READ) {
1062 struct seq_file *m = file->private_data;
1065 iter = file->private_data;
1068 iter->flags &= ~FTRACE_ITER_CONT;
1069 iter->buffer_idx = 0;
1072 ret = get_user(ch, ubuf++);
1078 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1079 /* skip white space */
1080 while (cnt && isspace(ch)) {
1081 ret = get_user(ch, ubuf++);
1089 file->f_pos += read;
1094 iter->buffer_idx = 0;
1097 while (cnt && !isspace(ch)) {
1098 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1099 iter->buffer[iter->buffer_idx++] = ch;
1104 ret = get_user(ch, ubuf++);
1113 iter->buffer[iter->buffer_idx] = 0;
1114 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1115 iter->buffer_idx = 0;
1117 iter->flags |= FTRACE_ITER_CONT;
1120 file->f_pos += read;
1124 mutex_unlock(&ftrace_regex_lock);
1130 ftrace_filter_write(struct file *file, const char __user *ubuf,
1131 size_t cnt, loff_t *ppos)
1133 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1137 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1138 size_t cnt, loff_t *ppos)
1140 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1144 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1146 if (unlikely(ftrace_disabled))
1149 mutex_lock(&ftrace_regex_lock);
1151 ftrace_filter_reset(enable);
1153 ftrace_match(buf, len, enable);
1154 mutex_unlock(&ftrace_regex_lock);
1158 * ftrace_set_filter - set a function to filter on in ftrace
1159 * @buf - the string that holds the function filter text.
1160 * @len - the length of the string.
1161 * @reset - non zero to reset all filters before applying this filter.
1163 * Filters denote which functions should be enabled when tracing is enabled.
1164 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1166 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1168 ftrace_set_regex(buf, len, reset, 1);
1172 * ftrace_set_notrace - set a function to not trace in ftrace
1173 * @buf - the string that holds the function notrace text.
1174 * @len - the length of the string.
1175 * @reset - non zero to reset all filters before applying this filter.
1177 * Notrace Filters denote which functions should not be enabled when tracing
1178 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1181 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1183 ftrace_set_regex(buf, len, reset, 0);
1187 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1189 struct seq_file *m = (struct seq_file *)file->private_data;
1190 struct ftrace_iterator *iter;
1192 mutex_lock(&ftrace_regex_lock);
1193 if (file->f_mode & FMODE_READ) {
1196 seq_release(inode, file);
1198 iter = file->private_data;
1200 if (iter->buffer_idx) {
1202 iter->buffer[iter->buffer_idx] = 0;
1203 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1206 mutex_lock(&ftrace_sysctl_lock);
1207 mutex_lock(&ftrace_start_lock);
1208 if (iter->filtered && ftrace_start_up && ftrace_enabled)
1209 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1210 mutex_unlock(&ftrace_start_lock);
1211 mutex_unlock(&ftrace_sysctl_lock);
1214 mutex_unlock(&ftrace_regex_lock);
1219 ftrace_filter_release(struct inode *inode, struct file *file)
1221 return ftrace_regex_release(inode, file, 1);
1225 ftrace_notrace_release(struct inode *inode, struct file *file)
1227 return ftrace_regex_release(inode, file, 0);
1230 static struct file_operations ftrace_avail_fops = {
1231 .open = ftrace_avail_open,
1233 .llseek = seq_lseek,
1234 .release = ftrace_avail_release,
1237 static struct file_operations ftrace_failures_fops = {
1238 .open = ftrace_failures_open,
1240 .llseek = seq_lseek,
1241 .release = ftrace_avail_release,
1244 static struct file_operations ftrace_filter_fops = {
1245 .open = ftrace_filter_open,
1246 .read = ftrace_regex_read,
1247 .write = ftrace_filter_write,
1248 .llseek = ftrace_regex_lseek,
1249 .release = ftrace_filter_release,
1252 static struct file_operations ftrace_notrace_fops = {
1253 .open = ftrace_notrace_open,
1254 .read = ftrace_regex_read,
1255 .write = ftrace_notrace_write,
1256 .llseek = ftrace_regex_lseek,
1257 .release = ftrace_notrace_release,
1260 static __init int ftrace_init_debugfs(void)
1262 struct dentry *d_tracer;
1263 struct dentry *entry;
1265 d_tracer = tracing_init_dentry();
1267 entry = debugfs_create_file("available_filter_functions", 0444,
1268 d_tracer, NULL, &ftrace_avail_fops);
1270 pr_warning("Could not create debugfs "
1271 "'available_filter_functions' entry\n");
1273 entry = debugfs_create_file("failures", 0444,
1274 d_tracer, NULL, &ftrace_failures_fops);
1276 pr_warning("Could not create debugfs 'failures' entry\n");
1278 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1279 NULL, &ftrace_filter_fops);
1281 pr_warning("Could not create debugfs "
1282 "'set_ftrace_filter' entry\n");
1284 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1285 NULL, &ftrace_notrace_fops);
1287 pr_warning("Could not create debugfs "
1288 "'set_ftrace_notrace' entry\n");
1293 fs_initcall(ftrace_init_debugfs);
1295 static int ftrace_convert_nops(struct module *mod,
1296 unsigned long *start,
1301 unsigned long flags;
1303 mutex_lock(&ftrace_start_lock);
1306 addr = ftrace_call_adjust(*p++);
1308 * Some architecture linkers will pad between
1309 * the different mcount_loc sections of different
1310 * object files to satisfy alignments.
1311 * Skip any NULL pointers.
1315 ftrace_record_ip(addr);
1318 /* disable interrupts to prevent kstop machine */
1319 local_irq_save(flags);
1320 ftrace_update_code(mod);
1321 local_irq_restore(flags);
1322 mutex_unlock(&ftrace_start_lock);
1327 void ftrace_init_module(struct module *mod,
1328 unsigned long *start, unsigned long *end)
1330 if (ftrace_disabled || start == end)
1332 ftrace_convert_nops(mod, start, end);
1335 extern unsigned long __start_mcount_loc[];
1336 extern unsigned long __stop_mcount_loc[];
1338 void __init ftrace_init(void)
1340 unsigned long count, addr, flags;
1343 /* Keep the ftrace pointer to the stub */
1344 addr = (unsigned long)ftrace_stub;
1346 local_irq_save(flags);
1347 ftrace_dyn_arch_init(&addr);
1348 local_irq_restore(flags);
1350 /* ftrace_dyn_arch_init places the return code in addr */
1354 count = __stop_mcount_loc - __start_mcount_loc;
1356 ret = ftrace_dyn_table_alloc(count);
1360 last_ftrace_enabled = ftrace_enabled = 1;
1362 ret = ftrace_convert_nops(NULL,
1368 ftrace_disabled = 1;
1373 static int __init ftrace_nodyn_init(void)
1378 device_initcall(ftrace_nodyn_init);
1380 # define ftrace_startup() do { } while (0)
1381 # define ftrace_shutdown() do { } while (0)
1382 # define ftrace_startup_sysctl() do { } while (0)
1383 # define ftrace_shutdown_sysctl() do { } while (0)
1384 #endif /* CONFIG_DYNAMIC_FTRACE */
1387 * ftrace_kill - kill ftrace
1389 * This function should be used by panic code. It stops ftrace
1390 * but in a not so nice way. If you need to simply kill ftrace
1391 * from a non-atomic section, use ftrace_kill.
1393 void ftrace_kill(void)
1395 ftrace_disabled = 1;
1397 clear_ftrace_function();
1401 * register_ftrace_function - register a function for profiling
1402 * @ops - ops structure that holds the function for profiling.
1404 * Register a function to be called by all functions in the
1407 * Note: @ops->func and all the functions it calls must be labeled
1408 * with "notrace", otherwise it will go into a
1411 int register_ftrace_function(struct ftrace_ops *ops)
1415 if (unlikely(ftrace_disabled))
1418 mutex_lock(&ftrace_sysctl_lock);
1419 ret = __register_ftrace_function(ops);
1421 mutex_unlock(&ftrace_sysctl_lock);
1427 * unregister_ftrace_function - unresgister a function for profiling.
1428 * @ops - ops structure that holds the function to unregister
1430 * Unregister a function that was added to be called by ftrace profiling.
1432 int unregister_ftrace_function(struct ftrace_ops *ops)
1436 mutex_lock(&ftrace_sysctl_lock);
1437 ret = __unregister_ftrace_function(ops);
1439 mutex_unlock(&ftrace_sysctl_lock);
1445 ftrace_enable_sysctl(struct ctl_table *table, int write,
1446 struct file *file, void __user *buffer, size_t *lenp,
1451 if (unlikely(ftrace_disabled))
1454 mutex_lock(&ftrace_sysctl_lock);
1456 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1458 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1461 last_ftrace_enabled = ftrace_enabled;
1463 if (ftrace_enabled) {
1465 ftrace_startup_sysctl();
1467 /* we are starting ftrace again */
1468 if (ftrace_list != &ftrace_list_end) {
1469 if (ftrace_list->next == &ftrace_list_end)
1470 ftrace_trace_function = ftrace_list->func;
1472 ftrace_trace_function = ftrace_list_func;
1476 /* stopping ftrace calls (just send to ftrace_stub) */
1477 ftrace_trace_function = ftrace_stub;
1479 ftrace_shutdown_sysctl();
1483 mutex_unlock(&ftrace_sysctl_lock);
1487 #ifdef CONFIG_FUNCTION_RET_TRACER
1488 trace_function_return_t ftrace_function_return =
1489 (trace_function_return_t)ftrace_stub;
1490 void register_ftrace_return(trace_function_return_t func)
1492 ftrace_function_return = func;
1495 void unregister_ftrace_return(void)
1497 ftrace_function_return = (trace_function_return_t)ftrace_stub;