]> Git Repo - linux.git/blame - kernel/trace/trace_events.c
tracing: Add set_event_pid directory for future use
[linux.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <[email protected]>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <[email protected]>.
8 *
b77e38aa
SR
9 */
10
3448bac3
FF
11#define pr_fmt(fmt) fmt
12
e6187007
SR
13#include <linux/workqueue.h>
14#include <linux/spinlock.h>
15#include <linux/kthread.h>
8434dc93 16#include <linux/tracefs.h>
b77e38aa 17#include <linux/uaccess.h>
49090107 18#include <linux/bsearch.h>
b77e38aa
SR
19#include <linux/module.h>
20#include <linux/ctype.h>
49090107 21#include <linux/sort.h>
5a0e3ad6 22#include <linux/slab.h>
e6187007 23#include <linux/delay.h>
b77e38aa 24
020e5f85
LZ
25#include <asm/setup.h>
26
91729ef9 27#include "trace_output.h"
b77e38aa 28
4e5292ea 29#undef TRACE_SYSTEM
b628b3e6
SR
30#define TRACE_SYSTEM "TRACE_SYSTEM"
31
20c8928a 32DEFINE_MUTEX(event_mutex);
11a241a3 33
a59fd602 34LIST_HEAD(ftrace_events);
9f616680 35static LIST_HEAD(ftrace_generic_fields);
b3a8c6fd 36static LIST_HEAD(ftrace_common_fields);
a59fd602 37
d1a29143
SR
38#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
39
40static struct kmem_cache *field_cachep;
41static struct kmem_cache *file_cachep;
42
6e94a780
SR
43static inline int system_refcount(struct event_subsystem *system)
44{
79ac6ef5 45 return system->ref_count;
6e94a780
SR
46}
47
48static int system_refcount_inc(struct event_subsystem *system)
49{
79ac6ef5 50 return system->ref_count++;
6e94a780
SR
51}
52
53static int system_refcount_dec(struct event_subsystem *system)
54{
79ac6ef5 55 return --system->ref_count;
6e94a780
SR
56}
57
ae63b31e
SR
58/* Double loops, do not use break, only goto's work */
59#define do_for_each_event_file(tr, file) \
60 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
61 list_for_each_entry(file, &tr->events, list)
62
63#define do_for_each_event_file_safe(tr, file) \
64 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
7f1d2f82 65 struct trace_event_file *___n; \
ae63b31e
SR
66 list_for_each_entry_safe(file, ___n, &tr->events, list)
67
68#define while_for_each_event_file() \
69 }
70
b3a8c6fd 71static struct list_head *
2425bcb9 72trace_get_fields(struct trace_event_call *event_call)
2e33af02
SR
73{
74 if (!event_call->class->get_fields)
75 return &event_call->class->fields;
76 return event_call->class->get_fields(event_call);
77}
78
b3a8c6fd
J
79static struct ftrace_event_field *
80__find_event_field(struct list_head *head, char *name)
81{
82 struct ftrace_event_field *field;
83
84 list_for_each_entry(field, head, link) {
85 if (!strcmp(field->name, name))
86 return field;
87 }
88
89 return NULL;
90}
91
92struct ftrace_event_field *
2425bcb9 93trace_find_event_field(struct trace_event_call *call, char *name)
b3a8c6fd
J
94{
95 struct ftrace_event_field *field;
96 struct list_head *head;
97
9f616680
DW
98 field = __find_event_field(&ftrace_generic_fields, name);
99 if (field)
100 return field;
101
b3a8c6fd
J
102 field = __find_event_field(&ftrace_common_fields, name);
103 if (field)
104 return field;
105
106 head = trace_get_fields(call);
107 return __find_event_field(head, name);
108}
109
8728fe50
LZ
110static int __trace_define_field(struct list_head *head, const char *type,
111 const char *name, int offset, int size,
112 int is_signed, int filter_type)
cf027f64
TZ
113{
114 struct ftrace_event_field *field;
115
d1a29143 116 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
cf027f64 117 if (!field)
aaf6ac0f 118 return -ENOMEM;
fe9f57f2 119
92edca07
SR
120 field->name = name;
121 field->type = type;
fe9f57f2 122
43b51ead
LZ
123 if (filter_type == FILTER_OTHER)
124 field->filter_type = filter_assign_type(type);
125 else
126 field->filter_type = filter_type;
127
cf027f64
TZ
128 field->offset = offset;
129 field->size = size;
a118e4d1 130 field->is_signed = is_signed;
aa38e9fc 131
2e33af02 132 list_add(&field->link, head);
cf027f64
TZ
133
134 return 0;
cf027f64 135}
8728fe50 136
2425bcb9 137int trace_define_field(struct trace_event_call *call, const char *type,
8728fe50
LZ
138 const char *name, int offset, int size, int is_signed,
139 int filter_type)
140{
141 struct list_head *head;
142
143 if (WARN_ON(!call->class))
144 return 0;
145
146 head = trace_get_fields(call);
147 return __trace_define_field(head, type, name, offset, size,
148 is_signed, filter_type);
149}
17c873ec 150EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 151
9f616680
DW
152#define __generic_field(type, item, filter_type) \
153 ret = __trace_define_field(&ftrace_generic_fields, #type, \
154 #item, 0, 0, is_signed_type(type), \
155 filter_type); \
156 if (ret) \
157 return ret;
158
e647d6b3 159#define __common_field(type, item) \
8728fe50
LZ
160 ret = __trace_define_field(&ftrace_common_fields, #type, \
161 "common_" #item, \
162 offsetof(typeof(ent), item), \
163 sizeof(ent.item), \
164 is_signed_type(type), FILTER_OTHER); \
e647d6b3
LZ
165 if (ret) \
166 return ret;
167
9f616680
DW
168static int trace_define_generic_fields(void)
169{
170 int ret;
171
172 __generic_field(int, cpu, FILTER_OTHER);
173 __generic_field(char *, comm, FILTER_PTR_STRING);
174
175 return ret;
176}
177
8728fe50 178static int trace_define_common_fields(void)
e647d6b3
LZ
179{
180 int ret;
181 struct trace_entry ent;
182
183 __common_field(unsigned short, type);
184 __common_field(unsigned char, flags);
185 __common_field(unsigned char, preempt_count);
186 __common_field(int, pid);
e647d6b3
LZ
187
188 return ret;
189}
190
2425bcb9 191static void trace_destroy_fields(struct trace_event_call *call)
2df75e41
LZ
192{
193 struct ftrace_event_field *field, *next;
2e33af02 194 struct list_head *head;
2df75e41 195
2e33af02
SR
196 head = trace_get_fields(call);
197 list_for_each_entry_safe(field, next, head, link) {
2df75e41 198 list_del(&field->link);
d1a29143 199 kmem_cache_free(field_cachep, field);
2df75e41
LZ
200 }
201}
202
2425bcb9 203int trace_event_raw_init(struct trace_event_call *call)
87d9b4e1
LZ
204{
205 int id;
206
9023c930 207 id = register_trace_event(&call->event);
87d9b4e1
LZ
208 if (!id)
209 return -ENODEV;
87d9b4e1
LZ
210
211 return 0;
212}
213EXPORT_SYMBOL_GPL(trace_event_raw_init);
214
3f795dcf
SRRH
215void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
216 struct trace_event_file *trace_file,
217 unsigned long len)
3fd40d1e 218{
2425bcb9 219 struct trace_event_call *event_call = trace_file->event_call;
3fd40d1e
SR
220
221 local_save_flags(fbuffer->flags);
222 fbuffer->pc = preempt_count();
7f1d2f82 223 fbuffer->trace_file = trace_file;
3fd40d1e
SR
224
225 fbuffer->event =
7f1d2f82 226 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
3fd40d1e
SR
227 event_call->event.type, len,
228 fbuffer->flags, fbuffer->pc);
229 if (!fbuffer->event)
230 return NULL;
231
232 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
233 return fbuffer->entry;
234}
3f795dcf 235EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
3fd40d1e 236
0daa2302
SRRH
237static DEFINE_SPINLOCK(tracepoint_iter_lock);
238
3f795dcf 239static void output_printk(struct trace_event_buffer *fbuffer)
0daa2302 240{
2425bcb9 241 struct trace_event_call *event_call;
0daa2302
SRRH
242 struct trace_event *event;
243 unsigned long flags;
244 struct trace_iterator *iter = tracepoint_print_iter;
245
246 if (!iter)
247 return;
248
7f1d2f82 249 event_call = fbuffer->trace_file->event_call;
0daa2302
SRRH
250 if (!event_call || !event_call->event.funcs ||
251 !event_call->event.funcs->trace)
252 return;
253
7f1d2f82 254 event = &fbuffer->trace_file->event_call->event;
0daa2302
SRRH
255
256 spin_lock_irqsave(&tracepoint_iter_lock, flags);
257 trace_seq_init(&iter->seq);
258 iter->ent = fbuffer->entry;
259 event_call->event.funcs->trace(iter, 0, event);
260 trace_seq_putc(&iter->seq, 0);
261 printk("%s", iter->seq.buffer);
262
263 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
264}
265
3f795dcf 266void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
3fd40d1e 267{
0daa2302
SRRH
268 if (tracepoint_printk)
269 output_printk(fbuffer);
270
7f1d2f82 271 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
3fd40d1e
SR
272 fbuffer->event, fbuffer->entry,
273 fbuffer->flags, fbuffer->pc);
274}
3f795dcf 275EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
3fd40d1e 276
2425bcb9 277int trace_event_reg(struct trace_event_call *call,
9023c930 278 enum trace_reg type, void *data)
a1d0ce82 279{
7f1d2f82 280 struct trace_event_file *file = data;
ae63b31e 281
de7b2973 282 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
a1d0ce82
SR
283 switch (type) {
284 case TRACE_REG_REGISTER:
de7b2973 285 return tracepoint_probe_register(call->tp,
a1d0ce82 286 call->class->probe,
ae63b31e 287 file);
a1d0ce82 288 case TRACE_REG_UNREGISTER:
de7b2973 289 tracepoint_probe_unregister(call->tp,
a1d0ce82 290 call->class->probe,
ae63b31e 291 file);
a1d0ce82
SR
292 return 0;
293
294#ifdef CONFIG_PERF_EVENTS
295 case TRACE_REG_PERF_REGISTER:
de7b2973 296 return tracepoint_probe_register(call->tp,
a1d0ce82
SR
297 call->class->perf_probe,
298 call);
299 case TRACE_REG_PERF_UNREGISTER:
de7b2973 300 tracepoint_probe_unregister(call->tp,
a1d0ce82
SR
301 call->class->perf_probe,
302 call);
303 return 0;
ceec0b6f
JO
304 case TRACE_REG_PERF_OPEN:
305 case TRACE_REG_PERF_CLOSE:
489c75c3
JO
306 case TRACE_REG_PERF_ADD:
307 case TRACE_REG_PERF_DEL:
ceec0b6f 308 return 0;
a1d0ce82
SR
309#endif
310 }
311 return 0;
312}
9023c930 313EXPORT_SYMBOL_GPL(trace_event_reg);
a1d0ce82 314
e870e9a1
LZ
315void trace_event_enable_cmd_record(bool enable)
316{
7f1d2f82 317 struct trace_event_file *file;
ae63b31e 318 struct trace_array *tr;
e870e9a1
LZ
319
320 mutex_lock(&event_mutex);
ae63b31e
SR
321 do_for_each_event_file(tr, file) {
322
5d6ad960 323 if (!(file->flags & EVENT_FILE_FL_ENABLED))
e870e9a1
LZ
324 continue;
325
326 if (enable) {
327 tracing_start_cmdline_record();
5d6ad960 328 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1
LZ
329 } else {
330 tracing_stop_cmdline_record();
5d6ad960 331 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1 332 }
ae63b31e 333 } while_for_each_event_file();
e870e9a1
LZ
334 mutex_unlock(&event_mutex);
335}
336
7f1d2f82 337static int __ftrace_event_enable_disable(struct trace_event_file *file,
417944c4 338 int enable, int soft_disable)
fd994989 339{
2425bcb9 340 struct trace_event_call *call = file->event_call;
983f938a 341 struct trace_array *tr = file->tr;
3b8e4273 342 int ret = 0;
417944c4 343 int disable;
3b8e4273 344
fd994989
SR
345 switch (enable) {
346 case 0:
417944c4 347 /*
1cf4c073
MH
348 * When soft_disable is set and enable is cleared, the sm_ref
349 * reference counter is decremented. If it reaches 0, we want
417944c4
SRRH
350 * to clear the SOFT_DISABLED flag but leave the event in the
351 * state that it was. That is, if the event was enabled and
352 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
353 * is set we do not want the event to be enabled before we
354 * clear the bit.
355 *
356 * When soft_disable is not set but the SOFT_MODE flag is,
357 * we do nothing. Do not disable the tracepoint, otherwise
358 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
359 */
360 if (soft_disable) {
1cf4c073
MH
361 if (atomic_dec_return(&file->sm_ref) > 0)
362 break;
5d6ad960
SRRH
363 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
364 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
417944c4 365 } else
5d6ad960 366 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
417944c4 367
5d6ad960
SRRH
368 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
369 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
370 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
e870e9a1 371 tracing_stop_cmdline_record();
5d6ad960 372 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1 373 }
ae63b31e 374 call->class->reg(call, TRACE_REG_UNREGISTER, file);
fd994989 375 }
3baa5e4c 376 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
5d6ad960
SRRH
377 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
378 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
3baa5e4c 379 else
5d6ad960 380 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
fd994989
SR
381 break;
382 case 1:
417944c4
SRRH
383 /*
384 * When soft_disable is set and enable is set, we want to
385 * register the tracepoint for the event, but leave the event
386 * as is. That means, if the event was already enabled, we do
387 * nothing (but set SOFT_MODE). If the event is disabled, we
388 * set SOFT_DISABLED before enabling the event tracepoint, so
389 * it still seems to be disabled.
390 */
391 if (!soft_disable)
5d6ad960 392 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
1cf4c073
MH
393 else {
394 if (atomic_inc_return(&file->sm_ref) > 1)
395 break;
5d6ad960 396 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
1cf4c073 397 }
417944c4 398
5d6ad960 399 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
417944c4
SRRH
400
401 /* Keep the event disabled, when going to SOFT_MODE. */
402 if (soft_disable)
5d6ad960 403 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
417944c4 404
983f938a 405 if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
e870e9a1 406 tracing_start_cmdline_record();
5d6ad960 407 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1 408 }
ae63b31e 409 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
3b8e4273
LZ
410 if (ret) {
411 tracing_stop_cmdline_record();
412 pr_info("event trace: Could not enable event "
687fcc4a 413 "%s\n", trace_event_name(call));
3b8e4273
LZ
414 break;
415 }
5d6ad960 416 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
575380da
SRRH
417
418 /* WAS_ENABLED gets set but never cleared. */
419 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
fd994989 420 }
fd994989
SR
421 break;
422 }
3b8e4273
LZ
423
424 return ret;
fd994989
SR
425}
426
7f1d2f82 427int trace_event_enable_disable(struct trace_event_file *file,
85f2b082
TZ
428 int enable, int soft_disable)
429{
430 return __ftrace_event_enable_disable(file, enable, soft_disable);
431}
432
7f1d2f82 433static int ftrace_event_enable_disable(struct trace_event_file *file,
417944c4
SRRH
434 int enable)
435{
436 return __ftrace_event_enable_disable(file, enable, 0);
437}
438
ae63b31e 439static void ftrace_clear_events(struct trace_array *tr)
0e907c99 440{
7f1d2f82 441 struct trace_event_file *file;
0e907c99
Z
442
443 mutex_lock(&event_mutex);
ae63b31e
SR
444 list_for_each_entry(file, &tr->events, list) {
445 ftrace_event_enable_disable(file, 0);
0e907c99
Z
446 }
447 mutex_unlock(&event_mutex);
448}
449
49090107
SRRH
450static int cmp_pid(const void *key, const void *elt)
451{
452 const pid_t *search_pid = key;
453 const pid_t *pid = elt;
454
455 if (*search_pid == *pid)
456 return 0;
457 if (*search_pid < *pid)
458 return -1;
459 return 1;
460}
461
462static void __ftrace_clear_event_pids(struct trace_array *tr)
463{
464 struct trace_pid_list *pid_list;
465
466 pid_list = rcu_dereference_protected(tr->filtered_pids,
467 lockdep_is_held(&event_mutex));
468 if (!pid_list)
469 return;
470
471 rcu_assign_pointer(tr->filtered_pids, NULL);
472
473 /* Wait till all users are no longer using pid filtering */
474 synchronize_sched();
475
476 free_pages((unsigned long)pid_list->pids, pid_list->order);
477 kfree(pid_list);
478}
479
480static void ftrace_clear_event_pids(struct trace_array *tr)
481{
482 mutex_lock(&event_mutex);
483 __ftrace_clear_event_pids(tr);
484 mutex_unlock(&event_mutex);
485}
486
e9dbfae5
SR
487static void __put_system(struct event_subsystem *system)
488{
489 struct event_filter *filter = system->filter;
490
6e94a780
SR
491 WARN_ON_ONCE(system_refcount(system) == 0);
492 if (system_refcount_dec(system))
e9dbfae5
SR
493 return;
494
ae63b31e
SR
495 list_del(&system->list);
496
e9dbfae5
SR
497 if (filter) {
498 kfree(filter->filter_string);
499 kfree(filter);
500 }
79ac6ef5 501 kfree_const(system->name);
e9dbfae5
SR
502 kfree(system);
503}
504
505static void __get_system(struct event_subsystem *system)
506{
6e94a780
SR
507 WARN_ON_ONCE(system_refcount(system) == 0);
508 system_refcount_inc(system);
e9dbfae5
SR
509}
510
7967b3e0 511static void __get_system_dir(struct trace_subsystem_dir *dir)
ae63b31e
SR
512{
513 WARN_ON_ONCE(dir->ref_count == 0);
514 dir->ref_count++;
515 __get_system(dir->subsystem);
516}
517
7967b3e0 518static void __put_system_dir(struct trace_subsystem_dir *dir)
ae63b31e
SR
519{
520 WARN_ON_ONCE(dir->ref_count == 0);
521 /* If the subsystem is about to be freed, the dir must be too */
6e94a780 522 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
ae63b31e
SR
523
524 __put_system(dir->subsystem);
525 if (!--dir->ref_count)
526 kfree(dir);
527}
528
7967b3e0 529static void put_system(struct trace_subsystem_dir *dir)
e9dbfae5
SR
530{
531 mutex_lock(&event_mutex);
ae63b31e 532 __put_system_dir(dir);
e9dbfae5
SR
533 mutex_unlock(&event_mutex);
534}
535
7967b3e0 536static void remove_subsystem(struct trace_subsystem_dir *dir)
f6a84bdc
ON
537{
538 if (!dir)
539 return;
540
541 if (!--dir->nr_events) {
8434dc93 542 tracefs_remove_recursive(dir->entry);
f6a84bdc
ON
543 list_del(&dir->list);
544 __put_system_dir(dir);
545 }
546}
547
7f1d2f82 548static void remove_event_file_dir(struct trace_event_file *file)
f6a84bdc 549{
bf682c31
ON
550 struct dentry *dir = file->dir;
551 struct dentry *child;
552
553 if (dir) {
554 spin_lock(&dir->d_lock); /* probably unneeded */
946e51f2 555 list_for_each_entry(child, &dir->d_subdirs, d_child) {
7682c918
DH
556 if (d_really_is_positive(child)) /* probably unneeded */
557 d_inode(child)->i_private = NULL;
bf682c31
ON
558 }
559 spin_unlock(&dir->d_lock);
560
8434dc93 561 tracefs_remove_recursive(dir);
bf682c31
ON
562 }
563
f6a84bdc 564 list_del(&file->list);
f6a84bdc 565 remove_subsystem(file->system);
2448e349 566 free_event_filter(file->filter);
f6a84bdc
ON
567 kmem_cache_free(file_cachep, file);
568}
569
8f31bfe5
LZ
570/*
571 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
572 */
2a6c24af
SRRH
573static int
574__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
575 const char *sub, const char *event, int set)
b77e38aa 576{
7f1d2f82 577 struct trace_event_file *file;
2425bcb9 578 struct trace_event_call *call;
de7b2973 579 const char *name;
29f93943 580 int ret = -EINVAL;
8f31bfe5 581
ae63b31e
SR
582 list_for_each_entry(file, &tr->events, list) {
583
584 call = file->event_call;
687fcc4a 585 name = trace_event_name(call);
8f31bfe5 586
de7b2973 587 if (!name || !call->class || !call->class->reg)
8f31bfe5
LZ
588 continue;
589
9b63776f
SR
590 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
591 continue;
592
8f31bfe5 593 if (match &&
de7b2973 594 strcmp(match, name) != 0 &&
8f082018 595 strcmp(match, call->class->system) != 0)
8f31bfe5
LZ
596 continue;
597
8f082018 598 if (sub && strcmp(sub, call->class->system) != 0)
8f31bfe5
LZ
599 continue;
600
de7b2973 601 if (event && strcmp(event, name) != 0)
8f31bfe5
LZ
602 continue;
603
ae63b31e 604 ftrace_event_enable_disable(file, set);
8f31bfe5
LZ
605
606 ret = 0;
607 }
2a6c24af
SRRH
608
609 return ret;
610}
611
612static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
613 const char *sub, const char *event, int set)
614{
615 int ret;
616
617 mutex_lock(&event_mutex);
618 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
8f31bfe5
LZ
619 mutex_unlock(&event_mutex);
620
621 return ret;
622}
623
ae63b31e 624static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
8f31bfe5 625{
b628b3e6 626 char *event = NULL, *sub = NULL, *match;
84fce9db 627 int ret;
b628b3e6
SR
628
629 /*
630 * The buf format can be <subsystem>:<event-name>
631 * *:<event-name> means any event by that name.
632 * :<event-name> is the same.
633 *
634 * <subsystem>:* means all events in that subsystem
635 * <subsystem>: means the same.
636 *
637 * <name> (no ':') means all events in a subsystem with
638 * the name <name> or any event that matches <name>
639 */
640
641 match = strsep(&buf, ":");
642 if (buf) {
643 sub = match;
644 event = buf;
645 match = NULL;
646
647 if (!strlen(sub) || strcmp(sub, "*") == 0)
648 sub = NULL;
649 if (!strlen(event) || strcmp(event, "*") == 0)
650 event = NULL;
651 }
b77e38aa 652
84fce9db
JK
653 ret = __ftrace_set_clr_event(tr, match, sub, event, set);
654
655 /* Put back the colon to allow this to be called again */
656 if (buf)
657 *(buf - 1) = ':';
658
659 return ret;
b77e38aa
SR
660}
661
4671c794
SR
662/**
663 * trace_set_clr_event - enable or disable an event
664 * @system: system name to match (NULL for any system)
665 * @event: event name to match (NULL for all events, within system)
666 * @set: 1 to enable, 0 to disable
667 *
668 * This is a way for other parts of the kernel to enable or disable
669 * event recording.
670 *
671 * Returns 0 on success, -EINVAL if the parameters do not match any
672 * registered events.
673 */
674int trace_set_clr_event(const char *system, const char *event, int set)
675{
ae63b31e
SR
676 struct trace_array *tr = top_trace_array();
677
dc81e5e3
YY
678 if (!tr)
679 return -ENODEV;
680
ae63b31e 681 return __ftrace_set_clr_event(tr, NULL, system, event, set);
4671c794 682}
56355b83 683EXPORT_SYMBOL_GPL(trace_set_clr_event);
4671c794 684
b77e38aa
SR
685/* 128 should be much more than enough */
686#define EVENT_BUF_SIZE 127
687
688static ssize_t
689ftrace_event_write(struct file *file, const char __user *ubuf,
690 size_t cnt, loff_t *ppos)
691{
48966364 692 struct trace_parser parser;
ae63b31e
SR
693 struct seq_file *m = file->private_data;
694 struct trace_array *tr = m->private;
4ba7978e 695 ssize_t read, ret;
b77e38aa 696
4ba7978e 697 if (!cnt)
b77e38aa
SR
698 return 0;
699
1852fcce
SR
700 ret = tracing_update_buffers();
701 if (ret < 0)
702 return ret;
703
48966364 704 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
b77e38aa
SR
705 return -ENOMEM;
706
48966364 707 read = trace_get_user(&parser, ubuf, cnt, ppos);
708
4ba7978e 709 if (read >= 0 && trace_parser_loaded((&parser))) {
48966364 710 int set = 1;
b77e38aa 711
48966364 712 if (*parser.buffer == '!')
b77e38aa 713 set = 0;
b77e38aa 714
48966364 715 parser.buffer[parser.idx] = 0;
716
ae63b31e 717 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
b77e38aa 718 if (ret)
48966364 719 goto out_put;
b77e38aa 720 }
b77e38aa
SR
721
722 ret = read;
723
48966364 724 out_put:
725 trace_parser_put(&parser);
b77e38aa
SR
726
727 return ret;
728}
729
730static void *
731t_next(struct seq_file *m, void *v, loff_t *pos)
732{
7f1d2f82 733 struct trace_event_file *file = v;
2425bcb9 734 struct trace_event_call *call;
ae63b31e 735 struct trace_array *tr = m->private;
b77e38aa
SR
736
737 (*pos)++;
738
ae63b31e
SR
739 list_for_each_entry_continue(file, &tr->events, list) {
740 call = file->event_call;
40e26815
SR
741 /*
742 * The ftrace subsystem is for showing formats only.
743 * They can not be enabled or disabled via the event files.
744 */
a1d0ce82 745 if (call->class && call->class->reg)
ae63b31e 746 return file;
40e26815 747 }
b77e38aa 748
30bd39cd 749 return NULL;
b77e38aa
SR
750}
751
752static void *t_start(struct seq_file *m, loff_t *pos)
753{
7f1d2f82 754 struct trace_event_file *file;
ae63b31e 755 struct trace_array *tr = m->private;
e1c7e2a6
LZ
756 loff_t l;
757
20c8928a 758 mutex_lock(&event_mutex);
e1c7e2a6 759
7f1d2f82 760 file = list_entry(&tr->events, struct trace_event_file, list);
e1c7e2a6 761 for (l = 0; l <= *pos; ) {
ae63b31e
SR
762 file = t_next(m, file, &l);
763 if (!file)
e1c7e2a6
LZ
764 break;
765 }
ae63b31e 766 return file;
b77e38aa
SR
767}
768
769static void *
770s_next(struct seq_file *m, void *v, loff_t *pos)
771{
7f1d2f82 772 struct trace_event_file *file = v;
ae63b31e 773 struct trace_array *tr = m->private;
b77e38aa
SR
774
775 (*pos)++;
776
ae63b31e 777 list_for_each_entry_continue(file, &tr->events, list) {
5d6ad960 778 if (file->flags & EVENT_FILE_FL_ENABLED)
ae63b31e 779 return file;
b77e38aa
SR
780 }
781
30bd39cd 782 return NULL;
b77e38aa
SR
783}
784
785static void *s_start(struct seq_file *m, loff_t *pos)
786{
7f1d2f82 787 struct trace_event_file *file;
ae63b31e 788 struct trace_array *tr = m->private;
e1c7e2a6
LZ
789 loff_t l;
790
20c8928a 791 mutex_lock(&event_mutex);
e1c7e2a6 792
7f1d2f82 793 file = list_entry(&tr->events, struct trace_event_file, list);
e1c7e2a6 794 for (l = 0; l <= *pos; ) {
ae63b31e
SR
795 file = s_next(m, file, &l);
796 if (!file)
e1c7e2a6
LZ
797 break;
798 }
ae63b31e 799 return file;
b77e38aa
SR
800}
801
802static int t_show(struct seq_file *m, void *v)
803{
7f1d2f82 804 struct trace_event_file *file = v;
2425bcb9 805 struct trace_event_call *call = file->event_call;
b77e38aa 806
8f082018
SR
807 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
808 seq_printf(m, "%s:", call->class->system);
687fcc4a 809 seq_printf(m, "%s\n", trace_event_name(call));
b77e38aa
SR
810
811 return 0;
812}
813
814static void t_stop(struct seq_file *m, void *p)
815{
20c8928a 816 mutex_unlock(&event_mutex);
b77e38aa
SR
817}
818
49090107
SRRH
819static void *p_start(struct seq_file *m, loff_t *pos)
820{
821 struct trace_pid_list *pid_list;
822 struct trace_array *tr = m->private;
823
824 /*
825 * Grab the mutex, to keep calls to p_next() having the same
826 * tr->filtered_pids as p_start() has.
827 * If we just passed the tr->filtered_pids around, then RCU would
828 * have been enough, but doing that makes things more complex.
829 */
830 mutex_lock(&event_mutex);
831 rcu_read_lock_sched();
832
833 pid_list = rcu_dereference_sched(tr->filtered_pids);
834
835 if (!pid_list || *pos >= pid_list->nr_pids)
836 return NULL;
837
838 return (void *)&pid_list->pids[*pos];
839}
840
841static void p_stop(struct seq_file *m, void *p)
842{
843 rcu_read_unlock_sched();
844 mutex_unlock(&event_mutex);
845}
846
847static void *
848p_next(struct seq_file *m, void *v, loff_t *pos)
849{
850 struct trace_array *tr = m->private;
851 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
852
853 (*pos)++;
854
855 if (*pos >= pid_list->nr_pids)
856 return NULL;
857
858 return (void *)&pid_list->pids[*pos];
859}
860
861static int p_show(struct seq_file *m, void *v)
862{
863 pid_t *pid = v;
864
865 seq_printf(m, "%d\n", *pid);
866 return 0;
867}
868
1473e441
SR
869static ssize_t
870event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
871 loff_t *ppos)
872{
7f1d2f82 873 struct trace_event_file *file;
bc6f6b08 874 unsigned long flags;
a4390596
TZ
875 char buf[4] = "0";
876
bc6f6b08
ON
877 mutex_lock(&event_mutex);
878 file = event_file_data(filp);
879 if (likely(file))
880 flags = file->flags;
881 mutex_unlock(&event_mutex);
882
883 if (!file)
884 return -ENODEV;
885
5d6ad960
SRRH
886 if (flags & EVENT_FILE_FL_ENABLED &&
887 !(flags & EVENT_FILE_FL_SOFT_DISABLED))
a4390596
TZ
888 strcpy(buf, "1");
889
5d6ad960
SRRH
890 if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
891 flags & EVENT_FILE_FL_SOFT_MODE)
a4390596
TZ
892 strcat(buf, "*");
893
894 strcat(buf, "\n");
1473e441 895
417944c4 896 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1473e441
SR
897}
898
899static ssize_t
900event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
901 loff_t *ppos)
902{
7f1d2f82 903 struct trace_event_file *file;
1473e441
SR
904 unsigned long val;
905 int ret;
906
22fe9b54
PH
907 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
908 if (ret)
1473e441
SR
909 return ret;
910
1852fcce
SR
911 ret = tracing_update_buffers();
912 if (ret < 0)
913 return ret;
914
1473e441
SR
915 switch (val) {
916 case 0:
1473e441 917 case 1:
bc6f6b08 918 ret = -ENODEV;
11a241a3 919 mutex_lock(&event_mutex);
bc6f6b08
ON
920 file = event_file_data(filp);
921 if (likely(file))
922 ret = ftrace_event_enable_disable(file, val);
11a241a3 923 mutex_unlock(&event_mutex);
1473e441
SR
924 break;
925
926 default:
927 return -EINVAL;
928 }
929
930 *ppos += cnt;
931
3b8e4273 932 return ret ? ret : cnt;
1473e441
SR
933}
934
8ae79a13
SR
935static ssize_t
936system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
937 loff_t *ppos)
938{
c142b15d 939 const char set_to_char[4] = { '?', '0', '1', 'X' };
7967b3e0 940 struct trace_subsystem_dir *dir = filp->private_data;
ae63b31e 941 struct event_subsystem *system = dir->subsystem;
2425bcb9 942 struct trace_event_call *call;
7f1d2f82 943 struct trace_event_file *file;
ae63b31e 944 struct trace_array *tr = dir->tr;
8ae79a13 945 char buf[2];
c142b15d 946 int set = 0;
8ae79a13
SR
947 int ret;
948
8ae79a13 949 mutex_lock(&event_mutex);
ae63b31e
SR
950 list_for_each_entry(file, &tr->events, list) {
951 call = file->event_call;
687fcc4a 952 if (!trace_event_name(call) || !call->class || !call->class->reg)
8ae79a13
SR
953 continue;
954
40ee4dff 955 if (system && strcmp(call->class->system, system->name) != 0)
8ae79a13
SR
956 continue;
957
958 /*
959 * We need to find out if all the events are set
960 * or if all events or cleared, or if we have
961 * a mixture.
962 */
5d6ad960 963 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
c142b15d 964
8ae79a13
SR
965 /*
966 * If we have a mixture, no need to look further.
967 */
c142b15d 968 if (set == 3)
8ae79a13
SR
969 break;
970 }
971 mutex_unlock(&event_mutex);
972
c142b15d 973 buf[0] = set_to_char[set];
8ae79a13 974 buf[1] = '\n';
8ae79a13
SR
975
976 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
977
978 return ret;
979}
980
981static ssize_t
982system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
983 loff_t *ppos)
984{
7967b3e0 985 struct trace_subsystem_dir *dir = filp->private_data;
ae63b31e 986 struct event_subsystem *system = dir->subsystem;
40ee4dff 987 const char *name = NULL;
8ae79a13 988 unsigned long val;
8ae79a13
SR
989 ssize_t ret;
990
22fe9b54
PH
991 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
992 if (ret)
8ae79a13
SR
993 return ret;
994
995 ret = tracing_update_buffers();
996 if (ret < 0)
997 return ret;
998
8f31bfe5 999 if (val != 0 && val != 1)
8ae79a13 1000 return -EINVAL;
8ae79a13 1001
40ee4dff
SR
1002 /*
1003 * Opening of "enable" adds a ref count to system,
1004 * so the name is safe to use.
1005 */
1006 if (system)
1007 name = system->name;
1008
ae63b31e 1009 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
8ae79a13 1010 if (ret)
8f31bfe5 1011 goto out;
8ae79a13
SR
1012
1013 ret = cnt;
1014
8f31bfe5 1015out:
8ae79a13
SR
1016 *ppos += cnt;
1017
1018 return ret;
1019}
1020
2a37a3df
SR
1021enum {
1022 FORMAT_HEADER = 1,
86397dc3
LZ
1023 FORMAT_FIELD_SEPERATOR = 2,
1024 FORMAT_PRINTFMT = 3,
2a37a3df
SR
1025};
1026
1027static void *f_next(struct seq_file *m, void *v, loff_t *pos)
981d081e 1028{
2425bcb9 1029 struct trace_event_call *call = event_file_data(m->private);
86397dc3
LZ
1030 struct list_head *common_head = &ftrace_common_fields;
1031 struct list_head *head = trace_get_fields(call);
7710b639 1032 struct list_head *node = v;
981d081e 1033
2a37a3df 1034 (*pos)++;
5a65e956 1035
2a37a3df
SR
1036 switch ((unsigned long)v) {
1037 case FORMAT_HEADER:
7710b639
ON
1038 node = common_head;
1039 break;
5a65e956 1040
86397dc3 1041 case FORMAT_FIELD_SEPERATOR:
7710b639
ON
1042 node = head;
1043 break;
5a65e956 1044
2a37a3df
SR
1045 case FORMAT_PRINTFMT:
1046 /* all done */
1047 return NULL;
5a65e956
LJ
1048 }
1049
7710b639
ON
1050 node = node->prev;
1051 if (node == common_head)
86397dc3 1052 return (void *)FORMAT_FIELD_SEPERATOR;
7710b639 1053 else if (node == head)
2a37a3df 1054 return (void *)FORMAT_PRINTFMT;
7710b639
ON
1055 else
1056 return node;
2a37a3df
SR
1057}
1058
1059static int f_show(struct seq_file *m, void *v)
1060{
2425bcb9 1061 struct trace_event_call *call = event_file_data(m->private);
2a37a3df
SR
1062 struct ftrace_event_field *field;
1063 const char *array_descriptor;
1064
1065 switch ((unsigned long)v) {
1066 case FORMAT_HEADER:
687fcc4a 1067 seq_printf(m, "name: %s\n", trace_event_name(call));
2a37a3df 1068 seq_printf(m, "ID: %d\n", call->event.type);
fa6f0cc7 1069 seq_puts(m, "format:\n");
8728fe50 1070 return 0;
5a65e956 1071
86397dc3
LZ
1072 case FORMAT_FIELD_SEPERATOR:
1073 seq_putc(m, '\n');
1074 return 0;
1075
2a37a3df
SR
1076 case FORMAT_PRINTFMT:
1077 seq_printf(m, "\nprint fmt: %s\n",
1078 call->print_fmt);
1079 return 0;
981d081e 1080 }
8728fe50 1081
7710b639 1082 field = list_entry(v, struct ftrace_event_field, link);
2a37a3df
SR
1083 /*
1084 * Smartly shows the array type(except dynamic array).
1085 * Normal:
1086 * field:TYPE VAR
1087 * If TYPE := TYPE[LEN], it is shown:
1088 * field:TYPE VAR[LEN]
1089 */
1090 array_descriptor = strchr(field->type, '[');
8728fe50 1091
2a37a3df
SR
1092 if (!strncmp(field->type, "__data_loc", 10))
1093 array_descriptor = NULL;
8728fe50 1094
2a37a3df
SR
1095 if (!array_descriptor)
1096 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1097 field->type, field->name, field->offset,
1098 field->size, !!field->is_signed);
1099 else
1100 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1101 (int)(array_descriptor - field->type),
1102 field->type, field->name,
1103 array_descriptor, field->offset,
1104 field->size, !!field->is_signed);
8728fe50 1105
2a37a3df
SR
1106 return 0;
1107}
5a65e956 1108
7710b639
ON
1109static void *f_start(struct seq_file *m, loff_t *pos)
1110{
1111 void *p = (void *)FORMAT_HEADER;
1112 loff_t l = 0;
1113
c5a44a12
ON
1114 /* ->stop() is called even if ->start() fails */
1115 mutex_lock(&event_mutex);
1116 if (!event_file_data(m->private))
1117 return ERR_PTR(-ENODEV);
1118
7710b639
ON
1119 while (l < *pos && p)
1120 p = f_next(m, p, &l);
1121
1122 return p;
1123}
1124
2a37a3df
SR
1125static void f_stop(struct seq_file *m, void *p)
1126{
c5a44a12 1127 mutex_unlock(&event_mutex);
2a37a3df 1128}
981d081e 1129
2a37a3df
SR
1130static const struct seq_operations trace_format_seq_ops = {
1131 .start = f_start,
1132 .next = f_next,
1133 .stop = f_stop,
1134 .show = f_show,
1135};
1136
1137static int trace_format_open(struct inode *inode, struct file *file)
1138{
2a37a3df
SR
1139 struct seq_file *m;
1140 int ret;
1141
1142 ret = seq_open(file, &trace_format_seq_ops);
1143 if (ret < 0)
1144 return ret;
1145
1146 m = file->private_data;
c5a44a12 1147 m->private = file;
2a37a3df
SR
1148
1149 return 0;
981d081e
SR
1150}
1151
23725aee
PZ
1152static ssize_t
1153event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1154{
1a11126b 1155 int id = (long)event_file_data(filp);
cd458ba9
ON
1156 char buf[32];
1157 int len;
23725aee
PZ
1158
1159 if (*ppos)
1160 return 0;
1161
1a11126b
ON
1162 if (unlikely(!id))
1163 return -ENODEV;
1164
1165 len = sprintf(buf, "%d\n", id);
1166
cd458ba9 1167 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
23725aee
PZ
1168}
1169
7ce7e424
TZ
1170static ssize_t
1171event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1172 loff_t *ppos)
1173{
7f1d2f82 1174 struct trace_event_file *file;
7ce7e424 1175 struct trace_seq *s;
e2912b09 1176 int r = -ENODEV;
7ce7e424
TZ
1177
1178 if (*ppos)
1179 return 0;
1180
1181 s = kmalloc(sizeof(*s), GFP_KERNEL);
e2912b09 1182
7ce7e424
TZ
1183 if (!s)
1184 return -ENOMEM;
1185
1186 trace_seq_init(s);
1187
e2912b09 1188 mutex_lock(&event_mutex);
f306cc82
TZ
1189 file = event_file_data(filp);
1190 if (file)
1191 print_event_filter(file, s);
e2912b09
ON
1192 mutex_unlock(&event_mutex);
1193
f306cc82 1194 if (file)
5ac48378
SRRH
1195 r = simple_read_from_buffer(ubuf, cnt, ppos,
1196 s->buffer, trace_seq_used(s));
7ce7e424
TZ
1197
1198 kfree(s);
1199
1200 return r;
1201}
1202
1203static ssize_t
1204event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1205 loff_t *ppos)
1206{
7f1d2f82 1207 struct trace_event_file *file;
8b372562 1208 char *buf;
e2912b09 1209 int err = -ENODEV;
7ce7e424 1210
8b372562 1211 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
1212 return -EINVAL;
1213
8b372562
TZ
1214 buf = (char *)__get_free_page(GFP_TEMPORARY);
1215 if (!buf)
7ce7e424
TZ
1216 return -ENOMEM;
1217
8b372562
TZ
1218 if (copy_from_user(buf, ubuf, cnt)) {
1219 free_page((unsigned long) buf);
1220 return -EFAULT;
7ce7e424 1221 }
8b372562 1222 buf[cnt] = '\0';
7ce7e424 1223
e2912b09 1224 mutex_lock(&event_mutex);
f306cc82
TZ
1225 file = event_file_data(filp);
1226 if (file)
1227 err = apply_event_filter(file, buf);
e2912b09
ON
1228 mutex_unlock(&event_mutex);
1229
8b372562
TZ
1230 free_page((unsigned long) buf);
1231 if (err < 0)
44e9c8b7 1232 return err;
0a19e53c 1233
7ce7e424
TZ
1234 *ppos += cnt;
1235
1236 return cnt;
1237}
1238
e9dbfae5
SR
1239static LIST_HEAD(event_subsystems);
1240
1241static int subsystem_open(struct inode *inode, struct file *filp)
1242{
1243 struct event_subsystem *system = NULL;
7967b3e0 1244 struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
ae63b31e 1245 struct trace_array *tr;
e9dbfae5
SR
1246 int ret;
1247
d6d3523c
GB
1248 if (tracing_is_disabled())
1249 return -ENODEV;
1250
e9dbfae5 1251 /* Make sure the system still exists */
a8227415 1252 mutex_lock(&trace_types_lock);
e9dbfae5 1253 mutex_lock(&event_mutex);
ae63b31e
SR
1254 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1255 list_for_each_entry(dir, &tr->systems, list) {
1256 if (dir == inode->i_private) {
1257 /* Don't open systems with no events */
1258 if (dir->nr_events) {
1259 __get_system_dir(dir);
1260 system = dir->subsystem;
1261 }
1262 goto exit_loop;
e9dbfae5 1263 }
e9dbfae5
SR
1264 }
1265 }
ae63b31e 1266 exit_loop:
e9dbfae5 1267 mutex_unlock(&event_mutex);
a8227415 1268 mutex_unlock(&trace_types_lock);
e9dbfae5 1269
ae63b31e 1270 if (!system)
e9dbfae5
SR
1271 return -ENODEV;
1272
ae63b31e
SR
1273 /* Some versions of gcc think dir can be uninitialized here */
1274 WARN_ON(!dir);
1275
8e2e2fa4
SRRH
1276 /* Still need to increment the ref count of the system */
1277 if (trace_array_get(tr) < 0) {
1278 put_system(dir);
1279 return -ENODEV;
1280 }
1281
e9dbfae5 1282 ret = tracing_open_generic(inode, filp);
8e2e2fa4
SRRH
1283 if (ret < 0) {
1284 trace_array_put(tr);
ae63b31e 1285 put_system(dir);
8e2e2fa4 1286 }
ae63b31e
SR
1287
1288 return ret;
1289}
1290
1291static int system_tr_open(struct inode *inode, struct file *filp)
1292{
7967b3e0 1293 struct trace_subsystem_dir *dir;
ae63b31e
SR
1294 struct trace_array *tr = inode->i_private;
1295 int ret;
1296
d6d3523c
GB
1297 if (tracing_is_disabled())
1298 return -ENODEV;
1299
8e2e2fa4
SRRH
1300 if (trace_array_get(tr) < 0)
1301 return -ENODEV;
1302
ae63b31e
SR
1303 /* Make a temporary dir that has no system but points to tr */
1304 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
8e2e2fa4
SRRH
1305 if (!dir) {
1306 trace_array_put(tr);
ae63b31e 1307 return -ENOMEM;
8e2e2fa4 1308 }
ae63b31e
SR
1309
1310 dir->tr = tr;
1311
1312 ret = tracing_open_generic(inode, filp);
8e2e2fa4
SRRH
1313 if (ret < 0) {
1314 trace_array_put(tr);
ae63b31e 1315 kfree(dir);
d6d3523c 1316 return ret;
8e2e2fa4 1317 }
ae63b31e
SR
1318
1319 filp->private_data = dir;
e9dbfae5 1320
d6d3523c 1321 return 0;
e9dbfae5
SR
1322}
1323
1324static int subsystem_release(struct inode *inode, struct file *file)
1325{
7967b3e0 1326 struct trace_subsystem_dir *dir = file->private_data;
e9dbfae5 1327
8e2e2fa4
SRRH
1328 trace_array_put(dir->tr);
1329
ae63b31e
SR
1330 /*
1331 * If dir->subsystem is NULL, then this is a temporary
1332 * descriptor that was made for a trace_array to enable
1333 * all subsystems.
1334 */
1335 if (dir->subsystem)
1336 put_system(dir);
1337 else
1338 kfree(dir);
e9dbfae5
SR
1339
1340 return 0;
1341}
1342
cfb180f3
TZ
1343static ssize_t
1344subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1345 loff_t *ppos)
1346{
7967b3e0 1347 struct trace_subsystem_dir *dir = filp->private_data;
ae63b31e 1348 struct event_subsystem *system = dir->subsystem;
cfb180f3
TZ
1349 struct trace_seq *s;
1350 int r;
1351
1352 if (*ppos)
1353 return 0;
1354
1355 s = kmalloc(sizeof(*s), GFP_KERNEL);
1356 if (!s)
1357 return -ENOMEM;
1358
1359 trace_seq_init(s);
1360
8b372562 1361 print_subsystem_event_filter(system, s);
5ac48378
SRRH
1362 r = simple_read_from_buffer(ubuf, cnt, ppos,
1363 s->buffer, trace_seq_used(s));
cfb180f3
TZ
1364
1365 kfree(s);
1366
1367 return r;
1368}
1369
1370static ssize_t
1371subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1372 loff_t *ppos)
1373{
7967b3e0 1374 struct trace_subsystem_dir *dir = filp->private_data;
8b372562 1375 char *buf;
cfb180f3
TZ
1376 int err;
1377
8b372562 1378 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
1379 return -EINVAL;
1380
8b372562
TZ
1381 buf = (char *)__get_free_page(GFP_TEMPORARY);
1382 if (!buf)
cfb180f3
TZ
1383 return -ENOMEM;
1384
8b372562
TZ
1385 if (copy_from_user(buf, ubuf, cnt)) {
1386 free_page((unsigned long) buf);
1387 return -EFAULT;
cfb180f3 1388 }
8b372562 1389 buf[cnt] = '\0';
cfb180f3 1390
ae63b31e 1391 err = apply_subsystem_event_filter(dir, buf);
8b372562
TZ
1392 free_page((unsigned long) buf);
1393 if (err < 0)
44e9c8b7 1394 return err;
cfb180f3
TZ
1395
1396 *ppos += cnt;
1397
1398 return cnt;
1399}
1400
d1b182a8
SR
1401static ssize_t
1402show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1403{
1404 int (*func)(struct trace_seq *s) = filp->private_data;
1405 struct trace_seq *s;
1406 int r;
1407
1408 if (*ppos)
1409 return 0;
1410
1411 s = kmalloc(sizeof(*s), GFP_KERNEL);
1412 if (!s)
1413 return -ENOMEM;
1414
1415 trace_seq_init(s);
1416
1417 func(s);
5ac48378
SRRH
1418 r = simple_read_from_buffer(ubuf, cnt, ppos,
1419 s->buffer, trace_seq_used(s));
d1b182a8
SR
1420
1421 kfree(s);
1422
1423 return r;
1424}
1425
49090107
SRRH
1426static int max_pids(struct trace_pid_list *pid_list)
1427{
1428 return (PAGE_SIZE << pid_list->order) / sizeof(pid_t);
1429}
1430
1431static ssize_t
1432ftrace_event_pid_write(struct file *file, const char __user *ubuf,
1433 size_t cnt, loff_t *ppos)
1434{
1435 struct seq_file *m = file->private_data;
1436 struct trace_array *tr = m->private;
1437 struct trace_pid_list *filtered_pids = NULL;
1438 struct trace_pid_list *pid_list = NULL;
1439 struct trace_parser parser;
1440 unsigned long val;
1441 loff_t this_pos;
1442 ssize_t read = 0;
1443 ssize_t ret = 0;
1444 pid_t pid;
1445 int i;
1446
1447 if (!cnt)
1448 return 0;
1449
1450 ret = tracing_update_buffers();
1451 if (ret < 0)
1452 return ret;
1453
1454 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
1455 return -ENOMEM;
1456
1457 mutex_lock(&event_mutex);
1458 /*
1459 * Load as many pids into the array before doing a
1460 * swap from the tr->filtered_pids to the new list.
1461 */
1462 while (cnt > 0) {
1463
1464 this_pos = 0;
1465
1466 ret = trace_get_user(&parser, ubuf, cnt, &this_pos);
1467 if (ret < 0 || !trace_parser_loaded(&parser))
1468 break;
1469
1470 read += ret;
1471 ubuf += ret;
1472 cnt -= ret;
1473
1474 parser.buffer[parser.idx] = 0;
1475
1476 ret = -EINVAL;
1477 if (kstrtoul(parser.buffer, 0, &val))
1478 break;
1479 if (val > INT_MAX)
1480 break;
1481
1482 pid = (pid_t)val;
1483
1484 ret = -ENOMEM;
1485 if (!pid_list) {
1486 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
1487 if (!pid_list)
1488 break;
1489
1490 filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1491 lockdep_is_held(&event_mutex));
1492 if (filtered_pids)
1493 pid_list->order = filtered_pids->order;
1494 else
1495 pid_list->order = 0;
1496
1497 pid_list->pids = (void *)__get_free_pages(GFP_KERNEL,
1498 pid_list->order);
1499 if (!pid_list->pids)
1500 break;
1501
1502 if (filtered_pids) {
1503 pid_list->nr_pids = filtered_pids->nr_pids;
1504 memcpy(pid_list->pids, filtered_pids->pids,
1505 pid_list->nr_pids * sizeof(pid_t));
1506 } else
1507 pid_list->nr_pids = 0;
1508 }
1509
1510 if (pid_list->nr_pids >= max_pids(pid_list)) {
1511 pid_t *pid_page;
1512
1513 pid_page = (void *)__get_free_pages(GFP_KERNEL,
1514 pid_list->order + 1);
1515 if (!pid_page)
1516 break;
1517 memcpy(pid_page, pid_list->pids,
1518 pid_list->nr_pids * sizeof(pid_t));
1519 free_pages((unsigned long)pid_list->pids, pid_list->order);
1520
1521 pid_list->order++;
1522 pid_list->pids = pid_page;
1523 }
1524
1525 pid_list->pids[pid_list->nr_pids++] = pid;
1526 trace_parser_clear(&parser);
1527 ret = 0;
1528 }
1529 trace_parser_put(&parser);
1530
1531 if (ret < 0) {
1532 if (pid_list)
1533 free_pages((unsigned long)pid_list->pids, pid_list->order);
1534 kfree(pid_list);
1535 mutex_unlock(&event_mutex);
1536 return ret;
1537 }
1538
1539 if (!pid_list) {
1540 mutex_unlock(&event_mutex);
1541 return ret;
1542 }
1543
1544 sort(pid_list->pids, pid_list->nr_pids, sizeof(pid_t), cmp_pid, NULL);
1545
1546 /* Remove duplicates */
1547 for (i = 1; i < pid_list->nr_pids; i++) {
1548 int start = i;
1549
1550 while (i < pid_list->nr_pids &&
1551 pid_list->pids[i - 1] == pid_list->pids[i])
1552 i++;
1553
1554 if (start != i) {
1555 if (i < pid_list->nr_pids) {
1556 memmove(&pid_list->pids[start], &pid_list->pids[i],
1557 (pid_list->nr_pids - i) * sizeof(pid_t));
1558 pid_list->nr_pids -= i - start;
1559 i = start;
1560 } else
1561 pid_list->nr_pids = start;
1562 }
1563 }
1564
1565 rcu_assign_pointer(tr->filtered_pids, pid_list);
1566
1567 mutex_unlock(&event_mutex);
1568
1569 if (filtered_pids) {
1570 synchronize_sched();
1571
1572 free_pages((unsigned long)filtered_pids->pids, filtered_pids->order);
1573 kfree(filtered_pids);
1574 }
1575
1576 ret = read;
1577 *ppos += read;
1578
1579 return ret;
1580}
1581
15075cac
SR
1582static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1583static int ftrace_event_set_open(struct inode *inode, struct file *file);
49090107 1584static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
f77d09a3 1585static int ftrace_event_release(struct inode *inode, struct file *file);
15075cac 1586
b77e38aa
SR
1587static const struct seq_operations show_event_seq_ops = {
1588 .start = t_start,
1589 .next = t_next,
1590 .show = t_show,
1591 .stop = t_stop,
1592};
1593
1594static const struct seq_operations show_set_event_seq_ops = {
1595 .start = s_start,
1596 .next = s_next,
1597 .show = t_show,
1598 .stop = t_stop,
1599};
1600
49090107
SRRH
1601static const struct seq_operations show_set_pid_seq_ops = {
1602 .start = p_start,
1603 .next = p_next,
1604 .show = p_show,
1605 .stop = p_stop,
1606};
1607
2314c4ae 1608static const struct file_operations ftrace_avail_fops = {
15075cac 1609 .open = ftrace_event_avail_open,
2314c4ae
SR
1610 .read = seq_read,
1611 .llseek = seq_lseek,
1612 .release = seq_release,
1613};
1614
b77e38aa 1615static const struct file_operations ftrace_set_event_fops = {
15075cac 1616 .open = ftrace_event_set_open,
b77e38aa
SR
1617 .read = seq_read,
1618 .write = ftrace_event_write,
1619 .llseek = seq_lseek,
f77d09a3 1620 .release = ftrace_event_release,
b77e38aa
SR
1621};
1622
49090107
SRRH
1623static const struct file_operations ftrace_set_event_pid_fops = {
1624 .open = ftrace_event_set_pid_open,
1625 .read = seq_read,
1626 .write = ftrace_event_pid_write,
1627 .llseek = seq_lseek,
1628 .release = ftrace_event_release,
1629};
1630
1473e441 1631static const struct file_operations ftrace_enable_fops = {
bf682c31 1632 .open = tracing_open_generic,
1473e441
SR
1633 .read = event_enable_read,
1634 .write = event_enable_write,
6038f373 1635 .llseek = default_llseek,
1473e441
SR
1636};
1637
981d081e 1638static const struct file_operations ftrace_event_format_fops = {
2a37a3df
SR
1639 .open = trace_format_open,
1640 .read = seq_read,
1641 .llseek = seq_lseek,
1642 .release = seq_release,
981d081e
SR
1643};
1644
23725aee 1645static const struct file_operations ftrace_event_id_fops = {
23725aee 1646 .read = event_id_read,
6038f373 1647 .llseek = default_llseek,
23725aee
PZ
1648};
1649
7ce7e424
TZ
1650static const struct file_operations ftrace_event_filter_fops = {
1651 .open = tracing_open_generic,
1652 .read = event_filter_read,
1653 .write = event_filter_write,
6038f373 1654 .llseek = default_llseek,
7ce7e424
TZ
1655};
1656
cfb180f3 1657static const struct file_operations ftrace_subsystem_filter_fops = {
e9dbfae5 1658 .open = subsystem_open,
cfb180f3
TZ
1659 .read = subsystem_filter_read,
1660 .write = subsystem_filter_write,
6038f373 1661 .llseek = default_llseek,
e9dbfae5 1662 .release = subsystem_release,
cfb180f3
TZ
1663};
1664
8ae79a13 1665static const struct file_operations ftrace_system_enable_fops = {
40ee4dff 1666 .open = subsystem_open,
8ae79a13
SR
1667 .read = system_enable_read,
1668 .write = system_enable_write,
6038f373 1669 .llseek = default_llseek,
40ee4dff 1670 .release = subsystem_release,
8ae79a13
SR
1671};
1672
ae63b31e
SR
1673static const struct file_operations ftrace_tr_enable_fops = {
1674 .open = system_tr_open,
1675 .read = system_enable_read,
1676 .write = system_enable_write,
1677 .llseek = default_llseek,
1678 .release = subsystem_release,
1679};
1680
d1b182a8
SR
1681static const struct file_operations ftrace_show_header_fops = {
1682 .open = tracing_open_generic,
1683 .read = show_header,
6038f373 1684 .llseek = default_llseek,
d1b182a8
SR
1685};
1686
ae63b31e
SR
1687static int
1688ftrace_event_open(struct inode *inode, struct file *file,
1689 const struct seq_operations *seq_ops)
1473e441 1690{
ae63b31e
SR
1691 struct seq_file *m;
1692 int ret;
1473e441 1693
ae63b31e
SR
1694 ret = seq_open(file, seq_ops);
1695 if (ret < 0)
1696 return ret;
1697 m = file->private_data;
1698 /* copy tr over to seq ops */
1699 m->private = inode->i_private;
1473e441 1700
ae63b31e 1701 return ret;
1473e441
SR
1702}
1703
f77d09a3
AL
1704static int ftrace_event_release(struct inode *inode, struct file *file)
1705{
1706 struct trace_array *tr = inode->i_private;
1707
1708 trace_array_put(tr);
1709
1710 return seq_release(inode, file);
1711}
1712
15075cac
SR
1713static int
1714ftrace_event_avail_open(struct inode *inode, struct file *file)
1715{
1716 const struct seq_operations *seq_ops = &show_event_seq_ops;
1717
ae63b31e 1718 return ftrace_event_open(inode, file, seq_ops);
15075cac
SR
1719}
1720
1721static int
1722ftrace_event_set_open(struct inode *inode, struct file *file)
1723{
1724 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
ae63b31e 1725 struct trace_array *tr = inode->i_private;
f77d09a3
AL
1726 int ret;
1727
1728 if (trace_array_get(tr) < 0)
1729 return -ENODEV;
15075cac
SR
1730
1731 if ((file->f_mode & FMODE_WRITE) &&
1732 (file->f_flags & O_TRUNC))
ae63b31e 1733 ftrace_clear_events(tr);
15075cac 1734
f77d09a3
AL
1735 ret = ftrace_event_open(inode, file, seq_ops);
1736 if (ret < 0)
1737 trace_array_put(tr);
1738 return ret;
ae63b31e
SR
1739}
1740
49090107
SRRH
1741static int
1742ftrace_event_set_pid_open(struct inode *inode, struct file *file)
1743{
1744 const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
1745 struct trace_array *tr = inode->i_private;
1746 int ret;
1747
1748 if (trace_array_get(tr) < 0)
1749 return -ENODEV;
1750
1751 if ((file->f_mode & FMODE_WRITE) &&
1752 (file->f_flags & O_TRUNC))
1753 ftrace_clear_event_pids(tr);
1754
1755 ret = ftrace_event_open(inode, file, seq_ops);
1756 if (ret < 0)
1757 trace_array_put(tr);
1758 return ret;
1759}
1760
ae63b31e
SR
1761static struct event_subsystem *
1762create_new_subsystem(const char *name)
1763{
1764 struct event_subsystem *system;
1765
1766 /* need to create new entry */
1767 system = kmalloc(sizeof(*system), GFP_KERNEL);
1768 if (!system)
1769 return NULL;
1770
1771 system->ref_count = 1;
6e94a780
SR
1772
1773 /* Only allocate if dynamic (kprobes and modules) */
79ac6ef5
RV
1774 system->name = kstrdup_const(name, GFP_KERNEL);
1775 if (!system->name)
1776 goto out_free;
ae63b31e
SR
1777
1778 system->filter = NULL;
1779
1780 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1781 if (!system->filter)
1782 goto out_free;
1783
1784 list_add(&system->list, &event_subsystems);
1785
1786 return system;
1787
1788 out_free:
79ac6ef5 1789 kfree_const(system->name);
ae63b31e
SR
1790 kfree(system);
1791 return NULL;
15075cac
SR
1792}
1793
6ecc2d1c 1794static struct dentry *
ae63b31e 1795event_subsystem_dir(struct trace_array *tr, const char *name,
7f1d2f82 1796 struct trace_event_file *file, struct dentry *parent)
6ecc2d1c 1797{
7967b3e0 1798 struct trace_subsystem_dir *dir;
6ecc2d1c 1799 struct event_subsystem *system;
e1112b4d 1800 struct dentry *entry;
6ecc2d1c
SR
1801
1802 /* First see if we did not already create this dir */
ae63b31e
SR
1803 list_for_each_entry(dir, &tr->systems, list) {
1804 system = dir->subsystem;
dc82ec98 1805 if (strcmp(system->name, name) == 0) {
ae63b31e
SR
1806 dir->nr_events++;
1807 file->system = dir;
1808 return dir->entry;
dc82ec98 1809 }
6ecc2d1c
SR
1810 }
1811
ae63b31e
SR
1812 /* Now see if the system itself exists. */
1813 list_for_each_entry(system, &event_subsystems, list) {
1814 if (strcmp(system->name, name) == 0)
1815 break;
6ecc2d1c 1816 }
ae63b31e
SR
1817 /* Reset system variable when not found */
1818 if (&system->list == &event_subsystems)
1819 system = NULL;
6ecc2d1c 1820
ae63b31e
SR
1821 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1822 if (!dir)
1823 goto out_fail;
6ecc2d1c 1824
ae63b31e
SR
1825 if (!system) {
1826 system = create_new_subsystem(name);
1827 if (!system)
1828 goto out_free;
1829 } else
1830 __get_system(system);
1831
8434dc93 1832 dir->entry = tracefs_create_dir(name, parent);
ae63b31e 1833 if (!dir->entry) {
3448bac3 1834 pr_warn("Failed to create system directory %s\n", name);
ae63b31e
SR
1835 __put_system(system);
1836 goto out_free;
6d723736
SR
1837 }
1838
ae63b31e
SR
1839 dir->tr = tr;
1840 dir->ref_count = 1;
1841 dir->nr_events = 1;
1842 dir->subsystem = system;
1843 file->system = dir;
8b372562 1844
8434dc93 1845 entry = tracefs_create_file("filter", 0644, dir->entry, dir,
e1112b4d 1846 &ftrace_subsystem_filter_fops);
8b372562
TZ
1847 if (!entry) {
1848 kfree(system->filter);
1849 system->filter = NULL;
8434dc93 1850 pr_warn("Could not create tracefs '%s/filter' entry\n", name);
8b372562 1851 }
e1112b4d 1852
ae63b31e 1853 trace_create_file("enable", 0644, dir->entry, dir,
f3f3f009 1854 &ftrace_system_enable_fops);
8ae79a13 1855
ae63b31e
SR
1856 list_add(&dir->list, &tr->systems);
1857
1858 return dir->entry;
1859
1860 out_free:
1861 kfree(dir);
1862 out_fail:
1863 /* Only print this message if failed on memory allocation */
1864 if (!dir || !system)
3448bac3 1865 pr_warn("No memory to create event subsystem %s\n", name);
ae63b31e 1866 return NULL;
6ecc2d1c
SR
1867}
1868
1473e441 1869static int
7f1d2f82 1870event_create_dir(struct dentry *parent, struct trace_event_file *file)
1473e441 1871{
2425bcb9 1872 struct trace_event_call *call = file->event_call;
ae63b31e 1873 struct trace_array *tr = file->tr;
2e33af02 1874 struct list_head *head;
ae63b31e 1875 struct dentry *d_events;
de7b2973 1876 const char *name;
fd994989 1877 int ret;
1473e441 1878
6ecc2d1c
SR
1879 /*
1880 * If the trace point header did not define TRACE_SYSTEM
1881 * then the system would be called "TRACE_SYSTEM".
1882 */
ae63b31e
SR
1883 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1884 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1885 if (!d_events)
1886 return -ENOMEM;
1887 } else
1888 d_events = parent;
1889
687fcc4a 1890 name = trace_event_name(call);
8434dc93 1891 file->dir = tracefs_create_dir(name, d_events);
ae63b31e 1892 if (!file->dir) {
8434dc93 1893 pr_warn("Could not create tracefs '%s' directory\n", name);
1473e441
SR
1894 return -1;
1895 }
1896
9b63776f 1897 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
ae63b31e 1898 trace_create_file("enable", 0644, file->dir, file,
620a30e9 1899 &ftrace_enable_fops);
1473e441 1900
2239291a 1901#ifdef CONFIG_PERF_EVENTS
a1d0ce82 1902 if (call->event.type && call->class->reg)
1a11126b 1903 trace_create_file("id", 0444, file->dir,
620a30e9
ON
1904 (void *)(long)call->event.type,
1905 &ftrace_event_id_fops);
2239291a 1906#endif
23725aee 1907
c9d932cf
LZ
1908 /*
1909 * Other events may have the same class. Only update
1910 * the fields if they are not already defined.
1911 */
1912 head = trace_get_fields(call);
1913 if (list_empty(head)) {
1914 ret = call->class->define_fields(call);
1915 if (ret < 0) {
3448bac3
FF
1916 pr_warn("Could not initialize trace point events/%s\n",
1917 name);
ae63b31e 1918 return -1;
cf027f64
TZ
1919 }
1920 }
f306cc82 1921 trace_create_file("filter", 0644, file->dir, file,
620a30e9 1922 &ftrace_event_filter_fops);
cf027f64 1923
85f2b082
TZ
1924 trace_create_file("trigger", 0644, file->dir, file,
1925 &event_trigger_fops);
1926
ae63b31e 1927 trace_create_file("format", 0444, file->dir, call,
620a30e9 1928 &ftrace_event_format_fops);
6d723736
SR
1929
1930 return 0;
1931}
1932
2425bcb9 1933static void remove_event_from_tracers(struct trace_event_call *call)
ae63b31e 1934{
7f1d2f82 1935 struct trace_event_file *file;
ae63b31e
SR
1936 struct trace_array *tr;
1937
1938 do_for_each_event_file_safe(tr, file) {
ae63b31e
SR
1939 if (file->event_call != call)
1940 continue;
1941
f6a84bdc 1942 remove_event_file_dir(file);
ae63b31e
SR
1943 /*
1944 * The do_for_each_event_file_safe() is
1945 * a double loop. After finding the call for this
1946 * trace_array, we use break to jump to the next
1947 * trace_array.
1948 */
1949 break;
1950 } while_for_each_event_file();
1951}
1952
2425bcb9 1953static void event_remove(struct trace_event_call *call)
8781915a 1954{
ae63b31e 1955 struct trace_array *tr;
7f1d2f82 1956 struct trace_event_file *file;
ae63b31e
SR
1957
1958 do_for_each_event_file(tr, file) {
1959 if (file->event_call != call)
1960 continue;
1961 ftrace_event_enable_disable(file, 0);
1962 /*
1963 * The do_for_each_event_file() is
1964 * a double loop. After finding the call for this
1965 * trace_array, we use break to jump to the next
1966 * trace_array.
1967 */
1968 break;
1969 } while_for_each_event_file();
1970
8781915a 1971 if (call->event.funcs)
9023c930 1972 __unregister_trace_event(&call->event);
ae63b31e 1973 remove_event_from_tracers(call);
8781915a
EG
1974 list_del(&call->list);
1975}
1976
2425bcb9 1977static int event_init(struct trace_event_call *call)
8781915a
EG
1978{
1979 int ret = 0;
de7b2973 1980 const char *name;
8781915a 1981
687fcc4a 1982 name = trace_event_name(call);
de7b2973 1983 if (WARN_ON(!name))
8781915a
EG
1984 return -EINVAL;
1985
1986 if (call->class->raw_init) {
1987 ret = call->class->raw_init(call);
1988 if (ret < 0 && ret != -ENOSYS)
3448bac3 1989 pr_warn("Could not initialize trace events/%s\n", name);
8781915a
EG
1990 }
1991
1992 return ret;
1993}
1994
67ead0a6 1995static int
2425bcb9 1996__register_event(struct trace_event_call *call, struct module *mod)
bd1a5c84 1997{
bd1a5c84 1998 int ret;
6d723736 1999
8781915a
EG
2000 ret = event_init(call);
2001 if (ret < 0)
2002 return ret;
701970b3 2003
ae63b31e 2004 list_add(&call->list, &ftrace_events);
67ead0a6 2005 call->mod = mod;
88f70d75 2006
ae63b31e 2007 return 0;
bd1a5c84
MH
2008}
2009
0c564a53
SRRH
2010static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
2011{
2012 int rlen;
2013 int elen;
2014
2015 /* Find the length of the enum value as a string */
2016 elen = snprintf(ptr, 0, "%ld", map->enum_value);
2017 /* Make sure there's enough room to replace the string with the value */
2018 if (len < elen)
2019 return NULL;
2020
2021 snprintf(ptr, elen + 1, "%ld", map->enum_value);
2022
2023 /* Get the rest of the string of ptr */
2024 rlen = strlen(ptr + len);
2025 memmove(ptr + elen, ptr + len, rlen);
2026 /* Make sure we end the new string */
2027 ptr[elen + rlen] = 0;
2028
2029 return ptr + elen;
2030}
2031
2425bcb9 2032static void update_event_printk(struct trace_event_call *call,
0c564a53
SRRH
2033 struct trace_enum_map *map)
2034{
2035 char *ptr;
2036 int quote = 0;
2037 int len = strlen(map->enum_string);
2038
2039 for (ptr = call->print_fmt; *ptr; ptr++) {
2040 if (*ptr == '\\') {
2041 ptr++;
2042 /* paranoid */
2043 if (!*ptr)
2044 break;
2045 continue;
2046 }
2047 if (*ptr == '"') {
2048 quote ^= 1;
2049 continue;
2050 }
2051 if (quote)
2052 continue;
2053 if (isdigit(*ptr)) {
2054 /* skip numbers */
2055 do {
2056 ptr++;
2057 /* Check for alpha chars like ULL */
2058 } while (isalnum(*ptr));
3193899d
SRRH
2059 if (!*ptr)
2060 break;
0c564a53
SRRH
2061 /*
2062 * A number must have some kind of delimiter after
2063 * it, and we can ignore that too.
2064 */
2065 continue;
2066 }
2067 if (isalpha(*ptr) || *ptr == '_') {
2068 if (strncmp(map->enum_string, ptr, len) == 0 &&
2069 !isalnum(ptr[len]) && ptr[len] != '_') {
2070 ptr = enum_replace(ptr, map, len);
2071 /* Hmm, enum string smaller than value */
2072 if (WARN_ON_ONCE(!ptr))
2073 return;
2074 /*
2075 * No need to decrement here, as enum_replace()
2076 * returns the pointer to the character passed
2077 * the enum, and two enums can not be placed
2078 * back to back without something in between.
2079 * We can skip that something in between.
2080 */
2081 continue;
2082 }
2083 skip_more:
2084 do {
2085 ptr++;
2086 } while (isalnum(*ptr) || *ptr == '_');
3193899d
SRRH
2087 if (!*ptr)
2088 break;
0c564a53
SRRH
2089 /*
2090 * If what comes after this variable is a '.' or
2091 * '->' then we can continue to ignore that string.
2092 */
2093 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2094 ptr += *ptr == '.' ? 1 : 2;
3193899d
SRRH
2095 if (!*ptr)
2096 break;
0c564a53
SRRH
2097 goto skip_more;
2098 }
2099 /*
2100 * Once again, we can skip the delimiter that came
2101 * after the string.
2102 */
2103 continue;
2104 }
2105 }
2106}
2107
2108void trace_event_enum_update(struct trace_enum_map **map, int len)
2109{
2425bcb9 2110 struct trace_event_call *call, *p;
0c564a53
SRRH
2111 const char *last_system = NULL;
2112 int last_i;
2113 int i;
2114
2115 down_write(&trace_event_sem);
2116 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2117 /* events are usually grouped together with systems */
2118 if (!last_system || call->class->system != last_system) {
2119 last_i = 0;
2120 last_system = call->class->system;
2121 }
2122
2123 for (i = last_i; i < len; i++) {
2124 if (call->class->system == map[i]->system) {
2125 /* Save the first system if need be */
2126 if (!last_i)
2127 last_i = i;
2128 update_event_printk(call, map[i]);
2129 }
2130 }
2131 }
2132 up_write(&trace_event_sem);
2133}
2134
7f1d2f82 2135static struct trace_event_file *
2425bcb9 2136trace_create_new_event(struct trace_event_call *call,
da511bf3
SRRH
2137 struct trace_array *tr)
2138{
7f1d2f82 2139 struct trace_event_file *file;
da511bf3
SRRH
2140
2141 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2142 if (!file)
2143 return NULL;
2144
2145 file->event_call = call;
2146 file->tr = tr;
2147 atomic_set(&file->sm_ref, 0);
85f2b082
TZ
2148 atomic_set(&file->tm_ref, 0);
2149 INIT_LIST_HEAD(&file->triggers);
da511bf3
SRRH
2150 list_add(&file->list, &tr->events);
2151
2152 return file;
2153}
2154
ae63b31e
SR
2155/* Add an event to a trace directory */
2156static int
2425bcb9 2157__trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
ae63b31e 2158{
7f1d2f82 2159 struct trace_event_file *file;
ae63b31e 2160
da511bf3 2161 file = trace_create_new_event(call, tr);
ae63b31e
SR
2162 if (!file)
2163 return -ENOMEM;
2164
620a30e9 2165 return event_create_dir(tr->event_dir, file);
ae63b31e
SR
2166}
2167
77248221
SR
2168/*
2169 * Just create a decriptor for early init. A descriptor is required
2170 * for enabling events at boot. We want to enable events before
2171 * the filesystem is initialized.
2172 */
2173static __init int
2425bcb9 2174__trace_early_add_new_event(struct trace_event_call *call,
77248221
SR
2175 struct trace_array *tr)
2176{
7f1d2f82 2177 struct trace_event_file *file;
77248221 2178
da511bf3 2179 file = trace_create_new_event(call, tr);
77248221
SR
2180 if (!file)
2181 return -ENOMEM;
2182
77248221
SR
2183 return 0;
2184}
2185
ae63b31e 2186struct ftrace_module_file_ops;
2425bcb9 2187static void __add_event_to_tracers(struct trace_event_call *call);
ae63b31e 2188
bd1a5c84 2189/* Add an additional event_call dynamically */
2425bcb9 2190int trace_add_event_call(struct trace_event_call *call)
bd1a5c84
MH
2191{
2192 int ret;
a8227415 2193 mutex_lock(&trace_types_lock);
bd1a5c84 2194 mutex_lock(&event_mutex);
701970b3 2195
ae63b31e
SR
2196 ret = __register_event(call, NULL);
2197 if (ret >= 0)
779c5e37 2198 __add_event_to_tracers(call);
a2ca5e03 2199
ae63b31e 2200 mutex_unlock(&event_mutex);
a8227415 2201 mutex_unlock(&trace_types_lock);
ae63b31e 2202 return ret;
a2ca5e03
FW
2203}
2204
4fead8e4 2205/*
a8227415
AL
2206 * Must be called under locking of trace_types_lock, event_mutex and
2207 * trace_event_sem.
4fead8e4 2208 */
2425bcb9 2209static void __trace_remove_event_call(struct trace_event_call *call)
bd1a5c84 2210{
8781915a 2211 event_remove(call);
bd1a5c84 2212 trace_destroy_fields(call);
57375747
ON
2213 free_event_filter(call->filter);
2214 call->filter = NULL;
bd1a5c84
MH
2215}
2216
2425bcb9 2217static int probe_remove_event_call(struct trace_event_call *call)
2816c551
ON
2218{
2219 struct trace_array *tr;
7f1d2f82 2220 struct trace_event_file *file;
2816c551
ON
2221
2222#ifdef CONFIG_PERF_EVENTS
2223 if (call->perf_refcount)
2224 return -EBUSY;
2225#endif
2226 do_for_each_event_file(tr, file) {
2227 if (file->event_call != call)
2228 continue;
2229 /*
2230 * We can't rely on ftrace_event_enable_disable(enable => 0)
5d6ad960 2231 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
2816c551
ON
2232 * TRACE_REG_UNREGISTER.
2233 */
5d6ad960 2234 if (file->flags & EVENT_FILE_FL_ENABLED)
2816c551 2235 return -EBUSY;
2ba64035
SRRH
2236 /*
2237 * The do_for_each_event_file_safe() is
2238 * a double loop. After finding the call for this
2239 * trace_array, we use break to jump to the next
2240 * trace_array.
2241 */
2816c551
ON
2242 break;
2243 } while_for_each_event_file();
2244
2245 __trace_remove_event_call(call);
2246
2247 return 0;
2248}
2249
bd1a5c84 2250/* Remove an event_call */
2425bcb9 2251int trace_remove_event_call(struct trace_event_call *call)
bd1a5c84 2252{
2816c551
ON
2253 int ret;
2254
a8227415 2255 mutex_lock(&trace_types_lock);
bd1a5c84 2256 mutex_lock(&event_mutex);
52f6ad6d 2257 down_write(&trace_event_sem);
2816c551 2258 ret = probe_remove_event_call(call);
52f6ad6d 2259 up_write(&trace_event_sem);
bd1a5c84 2260 mutex_unlock(&event_mutex);
a8227415 2261 mutex_unlock(&trace_types_lock);
2816c551
ON
2262
2263 return ret;
bd1a5c84
MH
2264}
2265
2266#define for_each_event(event, start, end) \
2267 for (event = start; \
2268 (unsigned long)event < (unsigned long)end; \
2269 event++)
2270
2271#ifdef CONFIG_MODULES
2272
6d723736
SR
2273static void trace_module_add_events(struct module *mod)
2274{
2425bcb9 2275 struct trace_event_call **call, **start, **end;
6d723736 2276
45ab2813
SRRH
2277 if (!mod->num_trace_events)
2278 return;
2279
2280 /* Don't add infrastructure for mods without tracepoints */
2281 if (trace_module_has_bad_taint(mod)) {
2282 pr_err("%s: module has bad taint, not creating trace events\n",
2283 mod->name);
2284 return;
2285 }
2286
6d723736
SR
2287 start = mod->trace_events;
2288 end = mod->trace_events + mod->num_trace_events;
2289
6d723736 2290 for_each_event(call, start, end) {
ae63b31e 2291 __register_event(*call, mod);
779c5e37 2292 __add_event_to_tracers(*call);
6d723736
SR
2293 }
2294}
2295
2296static void trace_module_remove_events(struct module *mod)
2297{
2425bcb9 2298 struct trace_event_call *call, *p;
575380da 2299 bool clear_trace = false;
6d723736 2300
52f6ad6d 2301 down_write(&trace_event_sem);
6d723736
SR
2302 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2303 if (call->mod == mod) {
575380da
SRRH
2304 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
2305 clear_trace = true;
bd1a5c84 2306 __trace_remove_event_call(call);
6d723736
SR
2307 }
2308 }
52f6ad6d 2309 up_write(&trace_event_sem);
9456f0fa
SR
2310
2311 /*
2312 * It is safest to reset the ring buffer if the module being unloaded
873c642f
SRRH
2313 * registered any events that were used. The only worry is if
2314 * a new module gets loaded, and takes on the same id as the events
2315 * of this module. When printing out the buffer, traced events left
2316 * over from this module may be passed to the new module events and
2317 * unexpected results may occur.
9456f0fa 2318 */
575380da 2319 if (clear_trace)
873c642f 2320 tracing_reset_all_online_cpus();
6d723736
SR
2321}
2322
61f919a1
SR
2323static int trace_module_notify(struct notifier_block *self,
2324 unsigned long val, void *data)
6d723736
SR
2325{
2326 struct module *mod = data;
2327
a8227415 2328 mutex_lock(&trace_types_lock);
6d723736
SR
2329 mutex_lock(&event_mutex);
2330 switch (val) {
2331 case MODULE_STATE_COMING:
2332 trace_module_add_events(mod);
2333 break;
2334 case MODULE_STATE_GOING:
2335 trace_module_remove_events(mod);
2336 break;
2337 }
2338 mutex_unlock(&event_mutex);
a8227415 2339 mutex_unlock(&trace_types_lock);
fd994989 2340
1473e441
SR
2341 return 0;
2342}
315326c1 2343
836d481e
ON
2344static struct notifier_block trace_module_nb = {
2345 .notifier_call = trace_module_notify,
3673b8e4 2346 .priority = 1, /* higher than trace.c module notify */
836d481e 2347};
61f919a1 2348#endif /* CONFIG_MODULES */
1473e441 2349
ae63b31e
SR
2350/* Create a new event directory structure for a trace directory. */
2351static void
2352__trace_add_event_dirs(struct trace_array *tr)
2353{
2425bcb9 2354 struct trace_event_call *call;
ae63b31e
SR
2355 int ret;
2356
2357 list_for_each_entry(call, &ftrace_events, list) {
620a30e9 2358 ret = __trace_add_new_event(call, tr);
ae63b31e 2359 if (ret < 0)
3448bac3 2360 pr_warn("Could not create directory for event %s\n",
687fcc4a 2361 trace_event_name(call));
ae63b31e
SR
2362 }
2363}
2364
7f1d2f82 2365struct trace_event_file *
3cd715de
SRRH
2366find_event_file(struct trace_array *tr, const char *system, const char *event)
2367{
7f1d2f82 2368 struct trace_event_file *file;
2425bcb9 2369 struct trace_event_call *call;
de7b2973 2370 const char *name;
3cd715de
SRRH
2371
2372 list_for_each_entry(file, &tr->events, list) {
2373
2374 call = file->event_call;
687fcc4a 2375 name = trace_event_name(call);
3cd715de 2376
de7b2973 2377 if (!name || !call->class || !call->class->reg)
3cd715de
SRRH
2378 continue;
2379
2380 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2381 continue;
2382
de7b2973 2383 if (strcmp(event, name) == 0 &&
3cd715de
SRRH
2384 strcmp(system, call->class->system) == 0)
2385 return file;
2386 }
2387 return NULL;
2388}
2389
2875a08b
SRRH
2390#ifdef CONFIG_DYNAMIC_FTRACE
2391
2392/* Avoid typos */
2393#define ENABLE_EVENT_STR "enable_event"
2394#define DISABLE_EVENT_STR "disable_event"
2395
2396struct event_probe_data {
7f1d2f82 2397 struct trace_event_file *file;
2875a08b
SRRH
2398 unsigned long count;
2399 int ref;
2400 bool enable;
2401};
2402
3cd715de
SRRH
2403static void
2404event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2405{
2406 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2407 struct event_probe_data *data = *pdata;
2408
2409 if (!data)
2410 return;
2411
2412 if (data->enable)
5d6ad960 2413 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
3cd715de 2414 else
5d6ad960 2415 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
3cd715de
SRRH
2416}
2417
2418static void
2419event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2420{
2421 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2422 struct event_probe_data *data = *pdata;
2423
2424 if (!data)
2425 return;
2426
2427 if (!data->count)
2428 return;
2429
2430 /* Skip if the event is in a state we want to switch to */
5d6ad960 2431 if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
3cd715de
SRRH
2432 return;
2433
2434 if (data->count != -1)
2435 (data->count)--;
2436
2437 event_enable_probe(ip, parent_ip, _data);
2438}
2439
2440static int
2441event_enable_print(struct seq_file *m, unsigned long ip,
2442 struct ftrace_probe_ops *ops, void *_data)
2443{
2444 struct event_probe_data *data = _data;
2445
2446 seq_printf(m, "%ps:", (void *)ip);
2447
2448 seq_printf(m, "%s:%s:%s",
2449 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2450 data->file->event_call->class->system,
687fcc4a 2451 trace_event_name(data->file->event_call));
3cd715de
SRRH
2452
2453 if (data->count == -1)
fa6f0cc7 2454 seq_puts(m, ":unlimited\n");
3cd715de
SRRH
2455 else
2456 seq_printf(m, ":count=%ld\n", data->count);
2457
2458 return 0;
2459}
2460
2461static int
2462event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2463 void **_data)
2464{
2465 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2466 struct event_probe_data *data = *pdata;
2467
2468 data->ref++;
2469 return 0;
2470}
2471
2472static void
2473event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2474 void **_data)
2475{
2476 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2477 struct event_probe_data *data = *pdata;
2478
2479 if (WARN_ON_ONCE(data->ref <= 0))
2480 return;
2481
2482 data->ref--;
2483 if (!data->ref) {
2484 /* Remove the SOFT_MODE flag */
2485 __ftrace_event_enable_disable(data->file, 0, 1);
2486 module_put(data->file->event_call->mod);
2487 kfree(data);
2488 }
2489 *pdata = NULL;
2490}
2491
2492static struct ftrace_probe_ops event_enable_probe_ops = {
2493 .func = event_enable_probe,
2494 .print = event_enable_print,
2495 .init = event_enable_init,
2496 .free = event_enable_free,
2497};
2498
2499static struct ftrace_probe_ops event_enable_count_probe_ops = {
2500 .func = event_enable_count_probe,
2501 .print = event_enable_print,
2502 .init = event_enable_init,
2503 .free = event_enable_free,
2504};
2505
2506static struct ftrace_probe_ops event_disable_probe_ops = {
2507 .func = event_enable_probe,
2508 .print = event_enable_print,
2509 .init = event_enable_init,
2510 .free = event_enable_free,
2511};
2512
2513static struct ftrace_probe_ops event_disable_count_probe_ops = {
2514 .func = event_enable_count_probe,
2515 .print = event_enable_print,
2516 .init = event_enable_init,
2517 .free = event_enable_free,
2518};
2519
2520static int
2521event_enable_func(struct ftrace_hash *hash,
2522 char *glob, char *cmd, char *param, int enabled)
2523{
2524 struct trace_array *tr = top_trace_array();
7f1d2f82 2525 struct trace_event_file *file;
3cd715de
SRRH
2526 struct ftrace_probe_ops *ops;
2527 struct event_probe_data *data;
2528 const char *system;
2529 const char *event;
2530 char *number;
2531 bool enable;
2532 int ret;
2533
dc81e5e3
YY
2534 if (!tr)
2535 return -ENODEV;
2536
3cd715de 2537 /* hash funcs only work with set_ftrace_filter */
8092e808 2538 if (!enabled || !param)
3cd715de
SRRH
2539 return -EINVAL;
2540
2541 system = strsep(&param, ":");
2542 if (!param)
2543 return -EINVAL;
2544
2545 event = strsep(&param, ":");
2546
2547 mutex_lock(&event_mutex);
2548
2549 ret = -EINVAL;
2550 file = find_event_file(tr, system, event);
2551 if (!file)
2552 goto out;
2553
2554 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2555
2556 if (enable)
2557 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2558 else
2559 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2560
2561 if (glob[0] == '!') {
2562 unregister_ftrace_function_probe_func(glob+1, ops);
2563 ret = 0;
2564 goto out;
2565 }
2566
2567 ret = -ENOMEM;
2568 data = kzalloc(sizeof(*data), GFP_KERNEL);
2569 if (!data)
2570 goto out;
2571
2572 data->enable = enable;
2573 data->count = -1;
2574 data->file = file;
2575
2576 if (!param)
2577 goto out_reg;
2578
2579 number = strsep(&param, ":");
2580
2581 ret = -EINVAL;
2582 if (!strlen(number))
2583 goto out_free;
2584
2585 /*
2586 * We use the callback data field (which is a pointer)
2587 * as our counter.
2588 */
2589 ret = kstrtoul(number, 0, &data->count);
2590 if (ret)
2591 goto out_free;
2592
2593 out_reg:
2594 /* Don't let event modules unload while probe registered */
2595 ret = try_module_get(file->event_call->mod);
6ed01066
MH
2596 if (!ret) {
2597 ret = -EBUSY;
3cd715de 2598 goto out_free;
6ed01066 2599 }
3cd715de
SRRH
2600
2601 ret = __ftrace_event_enable_disable(file, 1, 1);
2602 if (ret < 0)
2603 goto out_put;
2604 ret = register_ftrace_function_probe(glob, ops, data);
ff305ded
SRRH
2605 /*
2606 * The above returns on success the # of functions enabled,
2607 * but if it didn't find any functions it returns zero.
2608 * Consider no functions a failure too.
2609 */
a5b85bd1
MH
2610 if (!ret) {
2611 ret = -ENOENT;
3cd715de 2612 goto out_disable;
ff305ded
SRRH
2613 } else if (ret < 0)
2614 goto out_disable;
2615 /* Just return zero, not the number of enabled functions */
2616 ret = 0;
3cd715de
SRRH
2617 out:
2618 mutex_unlock(&event_mutex);
2619 return ret;
2620
2621 out_disable:
2622 __ftrace_event_enable_disable(file, 0, 1);
2623 out_put:
2624 module_put(file->event_call->mod);
2625 out_free:
2626 kfree(data);
2627 goto out;
2628}
2629
2630static struct ftrace_func_command event_enable_cmd = {
2631 .name = ENABLE_EVENT_STR,
2632 .func = event_enable_func,
2633};
2634
2635static struct ftrace_func_command event_disable_cmd = {
2636 .name = DISABLE_EVENT_STR,
2637 .func = event_enable_func,
2638};
2639
2640static __init int register_event_cmds(void)
2641{
2642 int ret;
2643
2644 ret = register_ftrace_command(&event_enable_cmd);
2645 if (WARN_ON(ret < 0))
2646 return ret;
2647 ret = register_ftrace_command(&event_disable_cmd);
2648 if (WARN_ON(ret < 0))
2649 unregister_ftrace_command(&event_enable_cmd);
2650 return ret;
2651}
2652#else
2653static inline int register_event_cmds(void) { return 0; }
2654#endif /* CONFIG_DYNAMIC_FTRACE */
2655
77248221 2656/*
7f1d2f82 2657 * The top level array has already had its trace_event_file
77248221 2658 * descriptors created in order to allow for early events to
8434dc93 2659 * be recorded. This function is called after the tracefs has been
77248221
SR
2660 * initialized, and we now have to create the files associated
2661 * to the events.
2662 */
2663static __init void
2664__trace_early_add_event_dirs(struct trace_array *tr)
2665{
7f1d2f82 2666 struct trace_event_file *file;
77248221
SR
2667 int ret;
2668
2669
2670 list_for_each_entry(file, &tr->events, list) {
620a30e9 2671 ret = event_create_dir(tr->event_dir, file);
77248221 2672 if (ret < 0)
3448bac3 2673 pr_warn("Could not create directory for event %s\n",
687fcc4a 2674 trace_event_name(file->event_call));
77248221
SR
2675 }
2676}
2677
2678/*
2679 * For early boot up, the top trace array requires to have
2680 * a list of events that can be enabled. This must be done before
2681 * the filesystem is set up in order to allow events to be traced
2682 * early.
2683 */
2684static __init void
2685__trace_early_add_events(struct trace_array *tr)
2686{
2425bcb9 2687 struct trace_event_call *call;
77248221
SR
2688 int ret;
2689
2690 list_for_each_entry(call, &ftrace_events, list) {
2691 /* Early boot up should not have any modules loaded */
2692 if (WARN_ON_ONCE(call->mod))
2693 continue;
2694
2695 ret = __trace_early_add_new_event(call, tr);
2696 if (ret < 0)
3448bac3 2697 pr_warn("Could not create early event %s\n",
687fcc4a 2698 trace_event_name(call));
77248221
SR
2699 }
2700}
2701
0c8916c3
SR
2702/* Remove the event directory structure for a trace directory. */
2703static void
2704__trace_remove_event_dirs(struct trace_array *tr)
2705{
7f1d2f82 2706 struct trace_event_file *file, *next;
0c8916c3 2707
f6a84bdc
ON
2708 list_for_each_entry_safe(file, next, &tr->events, list)
2709 remove_event_file_dir(file);
0c8916c3
SR
2710}
2711
2425bcb9 2712static void __add_event_to_tracers(struct trace_event_call *call)
ae63b31e
SR
2713{
2714 struct trace_array *tr;
2715
620a30e9
ON
2716 list_for_each_entry(tr, &ftrace_trace_arrays, list)
2717 __trace_add_new_event(call, tr);
ae63b31e
SR
2718}
2719
2425bcb9
SRRH
2720extern struct trace_event_call *__start_ftrace_events[];
2721extern struct trace_event_call *__stop_ftrace_events[];
a59fd602 2722
020e5f85
LZ
2723static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2724
2725static __init int setup_trace_event(char *str)
2726{
2727 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
55034cd6
SRRH
2728 ring_buffer_expanded = true;
2729 tracing_selftest_disabled = true;
020e5f85
LZ
2730
2731 return 1;
2732}
2733__setup("trace_event=", setup_trace_event);
2734
77248221
SR
2735/* Expects to have event_mutex held when called */
2736static int
2737create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
ae63b31e
SR
2738{
2739 struct dentry *d_events;
2740 struct dentry *entry;
2741
8434dc93 2742 entry = tracefs_create_file("set_event", 0644, parent,
ae63b31e
SR
2743 tr, &ftrace_set_event_fops);
2744 if (!entry) {
8434dc93 2745 pr_warn("Could not create tracefs 'set_event' entry\n");
ae63b31e
SR
2746 return -ENOMEM;
2747 }
2748
8434dc93 2749 d_events = tracefs_create_dir("events", parent);
277ba044 2750 if (!d_events) {
8434dc93 2751 pr_warn("Could not create tracefs 'events' directory\n");
277ba044
SR
2752 return -ENOMEM;
2753 }
ae63b31e 2754
49090107
SRRH
2755 entry = tracefs_create_file("set_event_pid", 0644, parent,
2756 tr, &ftrace_set_event_pid_fops);
2757
ae63b31e
SR
2758 /* ring buffer internal formats */
2759 trace_create_file("header_page", 0444, d_events,
2760 ring_buffer_print_page_header,
2761 &ftrace_show_header_fops);
2762
2763 trace_create_file("header_event", 0444, d_events,
2764 ring_buffer_print_entry_header,
2765 &ftrace_show_header_fops);
2766
2767 trace_create_file("enable", 0644, d_events,
2768 tr, &ftrace_tr_enable_fops);
2769
2770 tr->event_dir = d_events;
77248221
SR
2771
2772 return 0;
2773}
2774
2775/**
2776 * event_trace_add_tracer - add a instance of a trace_array to events
2777 * @parent: The parent dentry to place the files/directories for events in
2778 * @tr: The trace array associated with these events
2779 *
2780 * When a new instance is created, it needs to set up its events
2781 * directory, as well as other files associated with events. It also
2782 * creates the event hierachry in the @parent/events directory.
2783 *
2784 * Returns 0 on success.
2785 */
2786int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2787{
2788 int ret;
2789
2790 mutex_lock(&event_mutex);
2791
2792 ret = create_event_toplevel_files(parent, tr);
2793 if (ret)
2794 goto out_unlock;
2795
52f6ad6d 2796 down_write(&trace_event_sem);
ae63b31e 2797 __trace_add_event_dirs(tr);
52f6ad6d 2798 up_write(&trace_event_sem);
277ba044 2799
77248221 2800 out_unlock:
277ba044 2801 mutex_unlock(&event_mutex);
ae63b31e 2802
77248221
SR
2803 return ret;
2804}
2805
2806/*
2807 * The top trace array already had its file descriptors created.
2808 * Now the files themselves need to be created.
2809 */
2810static __init int
2811early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2812{
2813 int ret;
2814
2815 mutex_lock(&event_mutex);
2816
2817 ret = create_event_toplevel_files(parent, tr);
2818 if (ret)
2819 goto out_unlock;
2820
52f6ad6d 2821 down_write(&trace_event_sem);
77248221 2822 __trace_early_add_event_dirs(tr);
52f6ad6d 2823 up_write(&trace_event_sem);
77248221
SR
2824
2825 out_unlock:
2826 mutex_unlock(&event_mutex);
2827
2828 return ret;
ae63b31e
SR
2829}
2830
0c8916c3
SR
2831int event_trace_del_tracer(struct trace_array *tr)
2832{
0c8916c3
SR
2833 mutex_lock(&event_mutex);
2834
85f2b082
TZ
2835 /* Disable any event triggers and associated soft-disabled events */
2836 clear_event_triggers(tr);
2837
49090107
SRRH
2838 /* Clear the pid list */
2839 __ftrace_clear_event_pids(tr);
2840
2a6c24af
SRRH
2841 /* Disable any running events */
2842 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2843
3ccb0123
SR
2844 /* Access to events are within rcu_read_lock_sched() */
2845 synchronize_sched();
2846
52f6ad6d 2847 down_write(&trace_event_sem);
0c8916c3 2848 __trace_remove_event_dirs(tr);
8434dc93 2849 tracefs_remove_recursive(tr->event_dir);
52f6ad6d 2850 up_write(&trace_event_sem);
0c8916c3
SR
2851
2852 tr->event_dir = NULL;
2853
2854 mutex_unlock(&event_mutex);
2855
2856 return 0;
2857}
2858
d1a29143
SR
2859static __init int event_trace_memsetup(void)
2860{
2861 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
7f1d2f82 2862 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
d1a29143
SR
2863 return 0;
2864}
2865
ce1039bd
SRRH
2866static __init void
2867early_enable_events(struct trace_array *tr, bool disable_first)
2868{
2869 char *buf = bootup_event_buf;
2870 char *token;
2871 int ret;
2872
2873 while (true) {
2874 token = strsep(&buf, ",");
2875
2876 if (!token)
2877 break;
2878 if (!*token)
2879 continue;
2880
2881 /* Restarting syscalls requires that we stop them first */
2882 if (disable_first)
2883 ftrace_set_clr_event(tr, token, 0);
2884
2885 ret = ftrace_set_clr_event(tr, token, 1);
2886 if (ret)
2887 pr_warn("Failed to enable trace event: %s\n", token);
2888
2889 /* Put back the comma to allow this to be called again */
2890 if (buf)
2891 *(buf - 1) = ',';
2892 }
2893}
2894
8781915a
EG
2895static __init int event_trace_enable(void)
2896{
ae63b31e 2897 struct trace_array *tr = top_trace_array();
2425bcb9 2898 struct trace_event_call **iter, *call;
8781915a
EG
2899 int ret;
2900
dc81e5e3
YY
2901 if (!tr)
2902 return -ENODEV;
2903
8781915a
EG
2904 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2905
2906 call = *iter;
2907 ret = event_init(call);
2908 if (!ret)
2909 list_add(&call->list, &ftrace_events);
2910 }
2911
77248221
SR
2912 /*
2913 * We need the top trace array to have a working set of trace
2914 * points at early init, before the debug files and directories
2915 * are created. Create the file entries now, and attach them
2916 * to the actual file dentries later.
2917 */
2918 __trace_early_add_events(tr);
2919
ce1039bd 2920 early_enable_events(tr, false);
81698831
SR
2921
2922 trace_printk_start_comm();
2923
3cd715de
SRRH
2924 register_event_cmds();
2925
85f2b082
TZ
2926 register_trigger_cmds();
2927
8781915a
EG
2928 return 0;
2929}
2930
ce1039bd
SRRH
2931/*
2932 * event_trace_enable() is called from trace_event_init() first to
2933 * initialize events and perhaps start any events that are on the
2934 * command line. Unfortunately, there are some events that will not
2935 * start this early, like the system call tracepoints that need
2936 * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
2937 * is called before pid 1 starts, and this flag is never set, making
2938 * the syscall tracepoint never get reached, but the event is enabled
2939 * regardless (and not doing anything).
2940 */
2941static __init int event_trace_enable_again(void)
2942{
2943 struct trace_array *tr;
2944
2945 tr = top_trace_array();
2946 if (!tr)
2947 return -ENODEV;
2948
2949 early_enable_events(tr, true);
2950
2951 return 0;
2952}
2953
2954early_initcall(event_trace_enable_again);
2955
b77e38aa
SR
2956static __init int event_trace_init(void)
2957{
ae63b31e 2958 struct trace_array *tr;
b77e38aa
SR
2959 struct dentry *d_tracer;
2960 struct dentry *entry;
6d723736 2961 int ret;
b77e38aa 2962
ae63b31e 2963 tr = top_trace_array();
dc81e5e3
YY
2964 if (!tr)
2965 return -ENODEV;
ae63b31e 2966
b77e38aa 2967 d_tracer = tracing_init_dentry();
14a5ae40 2968 if (IS_ERR(d_tracer))
b77e38aa
SR
2969 return 0;
2970
8434dc93 2971 entry = tracefs_create_file("available_events", 0444, d_tracer,
ae63b31e 2972 tr, &ftrace_avail_fops);
2314c4ae 2973 if (!entry)
8434dc93 2974 pr_warn("Could not create tracefs 'available_events' entry\n");
2314c4ae 2975
9f616680
DW
2976 if (trace_define_generic_fields())
2977 pr_warn("tracing: Failed to allocated generic fields");
2978
8728fe50 2979 if (trace_define_common_fields())
3448bac3 2980 pr_warn("tracing: Failed to allocate common fields");
8728fe50 2981
77248221 2982 ret = early_event_add_tracer(d_tracer, tr);
ae63b31e
SR
2983 if (ret)
2984 return ret;
020e5f85 2985
836d481e 2986#ifdef CONFIG_MODULES
6d723736 2987 ret = register_module_notifier(&trace_module_nb);
55379376 2988 if (ret)
3448bac3 2989 pr_warn("Failed to register trace events module notifier\n");
836d481e 2990#endif
b77e38aa
SR
2991 return 0;
2992}
5f893b26
SRRH
2993
2994void __init trace_event_init(void)
2995{
2996 event_trace_memsetup();
2997 init_ftrace_syscalls();
2998 event_trace_enable();
2999}
3000
b77e38aa 3001fs_initcall(event_trace_init);
e6187007
SR
3002
3003#ifdef CONFIG_FTRACE_STARTUP_TEST
3004
3005static DEFINE_SPINLOCK(test_spinlock);
3006static DEFINE_SPINLOCK(test_spinlock_irq);
3007static DEFINE_MUTEX(test_mutex);
3008
3009static __init void test_work(struct work_struct *dummy)
3010{
3011 spin_lock(&test_spinlock);
3012 spin_lock_irq(&test_spinlock_irq);
3013 udelay(1);
3014 spin_unlock_irq(&test_spinlock_irq);
3015 spin_unlock(&test_spinlock);
3016
3017 mutex_lock(&test_mutex);
3018 msleep(1);
3019 mutex_unlock(&test_mutex);
3020}
3021
3022static __init int event_test_thread(void *unused)
3023{
3024 void *test_malloc;
3025
3026 test_malloc = kmalloc(1234, GFP_KERNEL);
3027 if (!test_malloc)
3028 pr_info("failed to kmalloc\n");
3029
3030 schedule_on_each_cpu(test_work);
3031
3032 kfree(test_malloc);
3033
3034 set_current_state(TASK_INTERRUPTIBLE);
fe0e01c7 3035 while (!kthread_should_stop()) {
e6187007 3036 schedule();
fe0e01c7
PZ
3037 set_current_state(TASK_INTERRUPTIBLE);
3038 }
3039 __set_current_state(TASK_RUNNING);
e6187007
SR
3040
3041 return 0;
3042}
3043
3044/*
3045 * Do various things that may trigger events.
3046 */
3047static __init void event_test_stuff(void)
3048{
3049 struct task_struct *test_thread;
3050
3051 test_thread = kthread_run(event_test_thread, NULL, "test-events");
3052 msleep(1);
3053 kthread_stop(test_thread);
3054}
3055
3056/*
3057 * For every trace event defined, we will test each trace point separately,
3058 * and then by groups, and finally all trace points.
3059 */
9ea21c1e 3060static __init void event_trace_self_tests(void)
e6187007 3061{
7967b3e0 3062 struct trace_subsystem_dir *dir;
7f1d2f82 3063 struct trace_event_file *file;
2425bcb9 3064 struct trace_event_call *call;
e6187007 3065 struct event_subsystem *system;
ae63b31e 3066 struct trace_array *tr;
e6187007
SR
3067 int ret;
3068
ae63b31e 3069 tr = top_trace_array();
dc81e5e3
YY
3070 if (!tr)
3071 return;
ae63b31e 3072
e6187007
SR
3073 pr_info("Running tests on trace events:\n");
3074
ae63b31e
SR
3075 list_for_each_entry(file, &tr->events, list) {
3076
3077 call = file->event_call;
e6187007 3078
2239291a
SR
3079 /* Only test those that have a probe */
3080 if (!call->class || !call->class->probe)
e6187007
SR
3081 continue;
3082
1f5a6b45
SR
3083/*
3084 * Testing syscall events here is pretty useless, but
3085 * we still do it if configured. But this is time consuming.
3086 * What we really need is a user thread to perform the
3087 * syscalls as we test.
3088 */
3089#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
8f082018
SR
3090 if (call->class->system &&
3091 strcmp(call->class->system, "syscalls") == 0)
1f5a6b45
SR
3092 continue;
3093#endif
3094
687fcc4a 3095 pr_info("Testing event %s: ", trace_event_name(call));
e6187007
SR
3096
3097 /*
3098 * If an event is already enabled, someone is using
3099 * it and the self test should not be on.
3100 */
5d6ad960 3101 if (file->flags & EVENT_FILE_FL_ENABLED) {
3448bac3 3102 pr_warn("Enabled event during self test!\n");
e6187007
SR
3103 WARN_ON_ONCE(1);
3104 continue;
3105 }
3106
ae63b31e 3107 ftrace_event_enable_disable(file, 1);
e6187007 3108 event_test_stuff();
ae63b31e 3109 ftrace_event_enable_disable(file, 0);
e6187007
SR
3110
3111 pr_cont("OK\n");
3112 }
3113
3114 /* Now test at the sub system level */
3115
3116 pr_info("Running tests on trace event systems:\n");
3117
ae63b31e
SR
3118 list_for_each_entry(dir, &tr->systems, list) {
3119
3120 system = dir->subsystem;
e6187007
SR
3121
3122 /* the ftrace system is special, skip it */
3123 if (strcmp(system->name, "ftrace") == 0)
3124 continue;
3125
3126 pr_info("Testing event system %s: ", system->name);
3127
ae63b31e 3128 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
e6187007 3129 if (WARN_ON_ONCE(ret)) {
3448bac3
FF
3130 pr_warn("error enabling system %s\n",
3131 system->name);
e6187007
SR
3132 continue;
3133 }
3134
3135 event_test_stuff();
3136
ae63b31e 3137 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
76bab1b7 3138 if (WARN_ON_ONCE(ret)) {
3448bac3
FF
3139 pr_warn("error disabling system %s\n",
3140 system->name);
76bab1b7
YL
3141 continue;
3142 }
e6187007
SR
3143
3144 pr_cont("OK\n");
3145 }
3146
3147 /* Test with all events enabled */
3148
3149 pr_info("Running tests on all trace events:\n");
3150 pr_info("Testing all events: ");
3151
ae63b31e 3152 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
e6187007 3153 if (WARN_ON_ONCE(ret)) {
3448bac3 3154 pr_warn("error enabling all events\n");
9ea21c1e 3155 return;
e6187007
SR
3156 }
3157
3158 event_test_stuff();
3159
3160 /* reset sysname */
ae63b31e 3161 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
e6187007 3162 if (WARN_ON_ONCE(ret)) {
3448bac3 3163 pr_warn("error disabling all events\n");
9ea21c1e 3164 return;
e6187007
SR
3165 }
3166
3167 pr_cont("OK\n");
9ea21c1e
SR
3168}
3169
3170#ifdef CONFIG_FUNCTION_TRACER
3171
245b2e70 3172static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
9ea21c1e 3173
b7f0c959
SRRH
3174static struct trace_array *event_tr;
3175
3176static void __init
2f5f6ad9 3177function_test_events_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 3178 struct ftrace_ops *op, struct pt_regs *pt_regs)
9ea21c1e
SR
3179{
3180 struct ring_buffer_event *event;
e77405ad 3181 struct ring_buffer *buffer;
9ea21c1e
SR
3182 struct ftrace_entry *entry;
3183 unsigned long flags;
3184 long disabled;
9ea21c1e
SR
3185 int cpu;
3186 int pc;
3187
3188 pc = preempt_count();
5168ae50 3189 preempt_disable_notrace();
9ea21c1e 3190 cpu = raw_smp_processor_id();
245b2e70 3191 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
3192
3193 if (disabled != 1)
3194 goto out;
3195
3196 local_save_flags(flags);
3197
e77405ad
SR
3198 event = trace_current_buffer_lock_reserve(&buffer,
3199 TRACE_FN, sizeof(*entry),
9ea21c1e
SR
3200 flags, pc);
3201 if (!event)
3202 goto out;
3203 entry = ring_buffer_event_data(event);
3204 entry->ip = ip;
3205 entry->parent_ip = parent_ip;
3206
b7f0c959 3207 trace_buffer_unlock_commit(event_tr, buffer, event, flags, pc);
9ea21c1e
SR
3208
3209 out:
245b2e70 3210 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
5168ae50 3211 preempt_enable_notrace();
9ea21c1e
SR
3212}
3213
3214static struct ftrace_ops trace_ops __initdata =
3215{
3216 .func = function_test_events_call,
4740974a 3217 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
9ea21c1e
SR
3218};
3219
3220static __init void event_trace_self_test_with_function(void)
3221{
17bb615a 3222 int ret;
2d34f489
SRRH
3223 event_tr = top_trace_array();
3224 if (WARN_ON(!event_tr))
3225 return;
17bb615a
SR
3226 ret = register_ftrace_function(&trace_ops);
3227 if (WARN_ON(ret < 0)) {
3228 pr_info("Failed to enable function tracer for event tests\n");
3229 return;
3230 }
9ea21c1e
SR
3231 pr_info("Running tests again, along with the function tracer\n");
3232 event_trace_self_tests();
3233 unregister_ftrace_function(&trace_ops);
3234}
3235#else
3236static __init void event_trace_self_test_with_function(void)
3237{
3238}
3239#endif
3240
3241static __init int event_trace_self_tests_init(void)
3242{
020e5f85
LZ
3243 if (!tracing_selftest_disabled) {
3244 event_trace_self_tests();
3245 event_trace_self_test_with_function();
3246 }
e6187007
SR
3247
3248 return 0;
3249}
3250
28d20e2d 3251late_initcall(event_trace_self_tests_init);
e6187007
SR
3252
3253#endif
This page took 0.893828 seconds and 4 git commands to generate.