]>
Commit | Line | Data |
---|---|---|
ac199db0 | 1 | /* |
97d5a220 | 2 | * trace event based perf event profiling/tracing |
ac199db0 | 3 | * |
90eec103 | 4 | * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra |
c530665c | 5 | * Copyright (C) 2009-2010 Frederic Weisbecker <[email protected]> |
ac199db0 PZ |
6 | */ |
7 | ||
558e6547 | 8 | #include <linux/module.h> |
430ad5a6 | 9 | #include <linux/kprobes.h> |
ac199db0 PZ |
10 | #include "trace.h" |
11 | ||
6016ee13 | 12 | static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS]; |
20ab4425 | 13 | |
eb1e7961 FW |
14 | /* |
15 | * Force it to be aligned to unsigned long to avoid misaligned accesses | |
16 | * suprises | |
17 | */ | |
18 | typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) | |
19 | perf_trace_t; | |
ce71b9df | 20 | |
20ab4425 | 21 | /* Count the events in use (per event id, not per instance) */ |
97d5a220 | 22 | static int total_ref_count; |
20ab4425 | 23 | |
2425bcb9 | 24 | static int perf_trace_event_perm(struct trace_event_call *tp_event, |
61c32659 FW |
25 | struct perf_event *p_event) |
26 | { | |
d5b5f391 PZ |
27 | if (tp_event->perf_perm) { |
28 | int ret = tp_event->perf_perm(tp_event, p_event); | |
29 | if (ret) | |
30 | return ret; | |
31 | } | |
32 | ||
f4be073d JO |
33 | /* |
34 | * We checked and allowed to create parent, | |
35 | * allow children without checking. | |
36 | */ | |
37 | if (p_event->parent) | |
38 | return 0; | |
39 | ||
40 | /* | |
41 | * It's ok to check current process (owner) permissions in here, | |
42 | * because code below is called only via perf_event_open syscall. | |
43 | */ | |
44 | ||
ced39002 | 45 | /* The ftrace function trace is allowed only for root. */ |
cfa77bc4 JO |
46 | if (ftrace_event_is_function(tp_event)) { |
47 | if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) | |
48 | return -EPERM; | |
49 | ||
50 | /* | |
51 | * We don't allow user space callchains for function trace | |
52 | * event, due to issues with page faults while tracing page | |
53 | * fault handler and its overall trickiness nature. | |
54 | */ | |
55 | if (!p_event->attr.exclude_callchain_user) | |
56 | return -EINVAL; | |
63c45f4b JO |
57 | |
58 | /* | |
59 | * Same reason to disable user stack dump as for user space | |
60 | * callchains above. | |
61 | */ | |
62 | if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER) | |
63 | return -EINVAL; | |
cfa77bc4 | 64 | } |
ced39002 | 65 | |
61c32659 FW |
66 | /* No tracing, just counting, so no obvious leak */ |
67 | if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) | |
68 | return 0; | |
69 | ||
70 | /* Some events are ok to be traced by non-root users... */ | |
71 | if (p_event->attach_state == PERF_ATTACH_TASK) { | |
72 | if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY) | |
73 | return 0; | |
74 | } | |
75 | ||
76 | /* | |
77 | * ...otherwise raw tracepoint data can be a severe data leak, | |
78 | * only allow root to have these. | |
79 | */ | |
80 | if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) | |
81 | return -EPERM; | |
82 | ||
83 | return 0; | |
84 | } | |
85 | ||
2425bcb9 | 86 | static int perf_trace_event_reg(struct trace_event_call *tp_event, |
ceec0b6f | 87 | struct perf_event *p_event) |
e5e25cf4 | 88 | { |
6016ee13 | 89 | struct hlist_head __percpu *list; |
ceec0b6f | 90 | int ret = -ENOMEM; |
1c024eca | 91 | int cpu; |
20ab4425 | 92 | |
1c024eca PZ |
93 | p_event->tp_event = tp_event; |
94 | if (tp_event->perf_refcount++ > 0) | |
e5e25cf4 FW |
95 | return 0; |
96 | ||
1c024eca PZ |
97 | list = alloc_percpu(struct hlist_head); |
98 | if (!list) | |
99 | goto fail; | |
100 | ||
101 | for_each_possible_cpu(cpu) | |
102 | INIT_HLIST_HEAD(per_cpu_ptr(list, cpu)); | |
20ab4425 | 103 | |
1c024eca | 104 | tp_event->perf_events = list; |
e5e25cf4 | 105 | |
97d5a220 | 106 | if (!total_ref_count) { |
6016ee13 | 107 | char __percpu *buf; |
b7e2ecef | 108 | int i; |
20ab4425 | 109 | |
7ae07ea3 | 110 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { |
6016ee13 | 111 | buf = (char __percpu *)alloc_percpu(perf_trace_t); |
b7e2ecef | 112 | if (!buf) |
1c024eca | 113 | goto fail; |
20ab4425 | 114 | |
1c024eca | 115 | perf_trace_buf[i] = buf; |
b7e2ecef | 116 | } |
20ab4425 FW |
117 | } |
118 | ||
ceec0b6f | 119 | ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL); |
1c024eca PZ |
120 | if (ret) |
121 | goto fail; | |
20ab4425 | 122 | |
1c024eca PZ |
123 | total_ref_count++; |
124 | return 0; | |
125 | ||
126 | fail: | |
97d5a220 | 127 | if (!total_ref_count) { |
b7e2ecef PZ |
128 | int i; |
129 | ||
7ae07ea3 | 130 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { |
b7e2ecef PZ |
131 | free_percpu(perf_trace_buf[i]); |
132 | perf_trace_buf[i] = NULL; | |
133 | } | |
fe8e5b5a | 134 | } |
1c024eca PZ |
135 | |
136 | if (!--tp_event->perf_refcount) { | |
137 | free_percpu(tp_event->perf_events); | |
138 | tp_event->perf_events = NULL; | |
fe8e5b5a | 139 | } |
20ab4425 FW |
140 | |
141 | return ret; | |
e5e25cf4 FW |
142 | } |
143 | ||
ceec0b6f JO |
144 | static void perf_trace_event_unreg(struct perf_event *p_event) |
145 | { | |
2425bcb9 | 146 | struct trace_event_call *tp_event = p_event->tp_event; |
ceec0b6f JO |
147 | int i; |
148 | ||
149 | if (--tp_event->perf_refcount > 0) | |
150 | goto out; | |
151 | ||
152 | tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL); | |
153 | ||
154 | /* | |
155 | * Ensure our callback won't be called anymore. The buffers | |
156 | * will be freed after that. | |
157 | */ | |
158 | tracepoint_synchronize_unregister(); | |
159 | ||
160 | free_percpu(tp_event->perf_events); | |
161 | tp_event->perf_events = NULL; | |
162 | ||
163 | if (!--total_ref_count) { | |
164 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { | |
165 | free_percpu(perf_trace_buf[i]); | |
166 | perf_trace_buf[i] = NULL; | |
167 | } | |
168 | } | |
169 | out: | |
170 | module_put(tp_event->mod); | |
171 | } | |
172 | ||
173 | static int perf_trace_event_open(struct perf_event *p_event) | |
174 | { | |
2425bcb9 | 175 | struct trace_event_call *tp_event = p_event->tp_event; |
ceec0b6f JO |
176 | return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event); |
177 | } | |
178 | ||
179 | static void perf_trace_event_close(struct perf_event *p_event) | |
180 | { | |
2425bcb9 | 181 | struct trace_event_call *tp_event = p_event->tp_event; |
ceec0b6f JO |
182 | tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event); |
183 | } | |
184 | ||
2425bcb9 | 185 | static int perf_trace_event_init(struct trace_event_call *tp_event, |
ceec0b6f JO |
186 | struct perf_event *p_event) |
187 | { | |
188 | int ret; | |
189 | ||
190 | ret = perf_trace_event_perm(tp_event, p_event); | |
191 | if (ret) | |
192 | return ret; | |
193 | ||
194 | ret = perf_trace_event_reg(tp_event, p_event); | |
195 | if (ret) | |
196 | return ret; | |
197 | ||
198 | ret = perf_trace_event_open(p_event); | |
199 | if (ret) { | |
200 | perf_trace_event_unreg(p_event); | |
201 | return ret; | |
202 | } | |
203 | ||
204 | return 0; | |
205 | } | |
206 | ||
1c024eca | 207 | int perf_trace_init(struct perf_event *p_event) |
ac199db0 | 208 | { |
2425bcb9 | 209 | struct trace_event_call *tp_event; |
0022cedd | 210 | u64 event_id = p_event->attr.config; |
20c8928a | 211 | int ret = -EINVAL; |
ac199db0 | 212 | |
20c8928a | 213 | mutex_lock(&event_mutex); |
1c024eca | 214 | list_for_each_entry(tp_event, &ftrace_events, list) { |
ff5f149b | 215 | if (tp_event->event.type == event_id && |
a1d0ce82 | 216 | tp_event->class && tp_event->class->reg && |
1c024eca PZ |
217 | try_module_get(tp_event->mod)) { |
218 | ret = perf_trace_event_init(tp_event, p_event); | |
9cb627d5 LZ |
219 | if (ret) |
220 | module_put(tp_event->mod); | |
20c8928a LZ |
221 | break; |
222 | } | |
ac199db0 | 223 | } |
20c8928a | 224 | mutex_unlock(&event_mutex); |
ac199db0 | 225 | |
20c8928a | 226 | return ret; |
ac199db0 PZ |
227 | } |
228 | ||
ceec0b6f JO |
229 | void perf_trace_destroy(struct perf_event *p_event) |
230 | { | |
231 | mutex_lock(&event_mutex); | |
232 | perf_trace_event_close(p_event); | |
233 | perf_trace_event_unreg(p_event); | |
234 | mutex_unlock(&event_mutex); | |
235 | } | |
236 | ||
a4eaf7f1 | 237 | int perf_trace_add(struct perf_event *p_event, int flags) |
e5e25cf4 | 238 | { |
2425bcb9 | 239 | struct trace_event_call *tp_event = p_event->tp_event; |
6016ee13 | 240 | struct hlist_head __percpu *pcpu_list; |
1c024eca | 241 | struct hlist_head *list; |
20ab4425 | 242 | |
6016ee13 NK |
243 | pcpu_list = tp_event->perf_events; |
244 | if (WARN_ON_ONCE(!pcpu_list)) | |
1c024eca | 245 | return -EINVAL; |
20ab4425 | 246 | |
a4eaf7f1 PZ |
247 | if (!(flags & PERF_EF_START)) |
248 | p_event->hw.state = PERF_HES_STOPPED; | |
249 | ||
6016ee13 | 250 | list = this_cpu_ptr(pcpu_list); |
1c024eca | 251 | hlist_add_head_rcu(&p_event->hlist_entry, list); |
20ab4425 | 252 | |
489c75c3 | 253 | return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event); |
1c024eca | 254 | } |
20ab4425 | 255 | |
a4eaf7f1 | 256 | void perf_trace_del(struct perf_event *p_event, int flags) |
1c024eca | 257 | { |
2425bcb9 | 258 | struct trace_event_call *tp_event = p_event->tp_event; |
1c024eca | 259 | hlist_del_rcu(&p_event->hlist_entry); |
489c75c3 | 260 | tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event); |
e5e25cf4 FW |
261 | } |
262 | ||
3da0f180 | 263 | void *perf_trace_buf_prepare(int size, unsigned short type, |
86038c5e | 264 | struct pt_regs **regs, int *rctxp) |
430ad5a6 XG |
265 | { |
266 | struct trace_entry *entry; | |
87f44bbc | 267 | unsigned long flags; |
1c024eca | 268 | char *raw_data; |
b7e2ecef | 269 | int pc; |
430ad5a6 | 270 | |
eb1e7961 FW |
271 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); |
272 | ||
cd92bf61 ON |
273 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
274 | "perf buffer not large enough")) | |
275 | return NULL; | |
276 | ||
430ad5a6 XG |
277 | pc = preempt_count(); |
278 | ||
430ad5a6 XG |
279 | *rctxp = perf_swevent_get_recursion_context(); |
280 | if (*rctxp < 0) | |
1c024eca | 281 | return NULL; |
430ad5a6 | 282 | |
86038c5e PZI |
283 | if (regs) |
284 | *regs = this_cpu_ptr(&__perf_regs[*rctxp]); | |
3771f077 | 285 | raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]); |
430ad5a6 XG |
286 | |
287 | /* zero the dead bytes from align to not leak stack to user */ | |
eb1e7961 | 288 | memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); |
430ad5a6 XG |
289 | |
290 | entry = (struct trace_entry *)raw_data; | |
87f44bbc PZ |
291 | local_save_flags(flags); |
292 | tracing_generic_entry_update(entry, flags, pc); | |
430ad5a6 XG |
293 | entry->type = type; |
294 | ||
295 | return raw_data; | |
430ad5a6 | 296 | } |
97d5a220 | 297 | EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); |
3da0f180 | 298 | NOKPROBE_SYMBOL(perf_trace_buf_prepare); |
ced39002 JO |
299 | |
300 | #ifdef CONFIG_FUNCTION_TRACER | |
301 | static void | |
2f5f6ad9 | 302 | perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, |
a1e2e31d | 303 | struct ftrace_ops *ops, struct pt_regs *pt_regs) |
ced39002 JO |
304 | { |
305 | struct ftrace_entry *entry; | |
306 | struct hlist_head *head; | |
307 | struct pt_regs regs; | |
308 | int rctx; | |
309 | ||
b8ebfd3f ON |
310 | head = this_cpu_ptr(event_function.perf_events); |
311 | if (hlist_empty(head)) | |
312 | return; | |
313 | ||
ced39002 JO |
314 | #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \ |
315 | sizeof(u64)) - sizeof(u32)) | |
316 | ||
317 | BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE); | |
318 | ||
319 | perf_fetch_caller_regs(®s); | |
320 | ||
321 | entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx); | |
322 | if (!entry) | |
323 | return; | |
324 | ||
325 | entry->ip = ip; | |
326 | entry->parent_ip = parent_ip; | |
ced39002 | 327 | perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, |
e6dab5ff | 328 | 1, ®s, head, NULL); |
ced39002 JO |
329 | |
330 | #undef ENTRY_SIZE | |
331 | } | |
332 | ||
333 | static int perf_ftrace_function_register(struct perf_event *event) | |
334 | { | |
335 | struct ftrace_ops *ops = &event->ftrace_ops; | |
336 | ||
ba27f2bc | 337 | ops->flags |= FTRACE_OPS_FL_PER_CPU | FTRACE_OPS_FL_RCU; |
ced39002 JO |
338 | ops->func = perf_ftrace_function_call; |
339 | return register_ftrace_function(ops); | |
340 | } | |
341 | ||
342 | static int perf_ftrace_function_unregister(struct perf_event *event) | |
343 | { | |
344 | struct ftrace_ops *ops = &event->ftrace_ops; | |
5500fa51 JO |
345 | int ret = unregister_ftrace_function(ops); |
346 | ftrace_free_filter(ops); | |
347 | return ret; | |
ced39002 JO |
348 | } |
349 | ||
350 | static void perf_ftrace_function_enable(struct perf_event *event) | |
351 | { | |
352 | ftrace_function_local_enable(&event->ftrace_ops); | |
353 | } | |
354 | ||
355 | static void perf_ftrace_function_disable(struct perf_event *event) | |
356 | { | |
357 | ftrace_function_local_disable(&event->ftrace_ops); | |
358 | } | |
359 | ||
2425bcb9 | 360 | int perf_ftrace_event_register(struct trace_event_call *call, |
ced39002 JO |
361 | enum trace_reg type, void *data) |
362 | { | |
363 | switch (type) { | |
364 | case TRACE_REG_REGISTER: | |
365 | case TRACE_REG_UNREGISTER: | |
366 | break; | |
367 | case TRACE_REG_PERF_REGISTER: | |
368 | case TRACE_REG_PERF_UNREGISTER: | |
369 | return 0; | |
370 | case TRACE_REG_PERF_OPEN: | |
371 | return perf_ftrace_function_register(data); | |
372 | case TRACE_REG_PERF_CLOSE: | |
373 | return perf_ftrace_function_unregister(data); | |
374 | case TRACE_REG_PERF_ADD: | |
375 | perf_ftrace_function_enable(data); | |
376 | return 0; | |
377 | case TRACE_REG_PERF_DEL: | |
378 | perf_ftrace_function_disable(data); | |
379 | return 0; | |
380 | } | |
381 | ||
382 | return -EINVAL; | |
383 | } | |
384 | #endif /* CONFIG_FUNCTION_TRACER */ |