]>
Commit | Line | Data |
---|---|---|
f8a95309 ACM |
1 | /* |
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]> | |
3 | * | |
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | |
5 | * copyright notes. | |
6 | * | |
7 | * Released under the GPL v2. (and only v2, not any later version) | |
8 | */ | |
9 | ||
936be503 | 10 | #include <byteswap.h> |
0f6a3015 | 11 | #include <linux/bitops.h> |
553873e1 | 12 | #include <api/fs/debugfs.h> |
4e319027 RR |
13 | #include <traceevent/event-parse.h> |
14 | #include <linux/hw_breakpoint.h> | |
15 | #include <linux/perf_event.h> | |
bec19672 | 16 | #include <sys/resource.h> |
4e319027 | 17 | #include "asm/bug.h" |
69aad6f1 | 18 | #include "evsel.h" |
70082dd9 | 19 | #include "evlist.h" |
69aad6f1 | 20 | #include "util.h" |
86bd5e86 | 21 | #include "cpumap.h" |
fd78260b | 22 | #include "thread_map.h" |
12864b31 | 23 | #include "target.h" |
26d33022 | 24 | #include "perf_regs.h" |
e3e1a54f | 25 | #include "debug.h" |
97978b3e | 26 | #include "trace-event.h" |
69aad6f1 | 27 | |
594ac61a ACM |
28 | static struct { |
29 | bool sample_id_all; | |
30 | bool exclude_guest; | |
5c5e854b | 31 | bool mmap2; |
594ac61a ACM |
32 | } perf_missing_features; |
33 | ||
c52b12ed ACM |
34 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) |
35 | ||
75562573 | 36 | int __perf_evsel__sample_size(u64 sample_type) |
c2a70653 ACM |
37 | { |
38 | u64 mask = sample_type & PERF_SAMPLE_MASK; | |
39 | int size = 0; | |
40 | int i; | |
41 | ||
42 | for (i = 0; i < 64; i++) { | |
43 | if (mask & (1ULL << i)) | |
44 | size++; | |
45 | } | |
46 | ||
47 | size *= sizeof(u64); | |
48 | ||
49 | return size; | |
50 | } | |
51 | ||
75562573 AH |
52 | /** |
53 | * __perf_evsel__calc_id_pos - calculate id_pos. | |
54 | * @sample_type: sample type | |
55 | * | |
56 | * This function returns the position of the event id (PERF_SAMPLE_ID or | |
57 | * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct | |
58 | * sample_event. | |
59 | */ | |
60 | static int __perf_evsel__calc_id_pos(u64 sample_type) | |
61 | { | |
62 | int idx = 0; | |
63 | ||
64 | if (sample_type & PERF_SAMPLE_IDENTIFIER) | |
65 | return 0; | |
66 | ||
67 | if (!(sample_type & PERF_SAMPLE_ID)) | |
68 | return -1; | |
69 | ||
70 | if (sample_type & PERF_SAMPLE_IP) | |
71 | idx += 1; | |
72 | ||
73 | if (sample_type & PERF_SAMPLE_TID) | |
74 | idx += 1; | |
75 | ||
76 | if (sample_type & PERF_SAMPLE_TIME) | |
77 | idx += 1; | |
78 | ||
79 | if (sample_type & PERF_SAMPLE_ADDR) | |
80 | idx += 1; | |
81 | ||
82 | return idx; | |
83 | } | |
84 | ||
85 | /** | |
86 | * __perf_evsel__calc_is_pos - calculate is_pos. | |
87 | * @sample_type: sample type | |
88 | * | |
89 | * This function returns the position (counting backwards) of the event id | |
90 | * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if | |
91 | * sample_id_all is used there is an id sample appended to non-sample events. | |
92 | */ | |
93 | static int __perf_evsel__calc_is_pos(u64 sample_type) | |
94 | { | |
95 | int idx = 1; | |
96 | ||
97 | if (sample_type & PERF_SAMPLE_IDENTIFIER) | |
98 | return 1; | |
99 | ||
100 | if (!(sample_type & PERF_SAMPLE_ID)) | |
101 | return -1; | |
102 | ||
103 | if (sample_type & PERF_SAMPLE_CPU) | |
104 | idx += 1; | |
105 | ||
106 | if (sample_type & PERF_SAMPLE_STREAM_ID) | |
107 | idx += 1; | |
108 | ||
109 | return idx; | |
110 | } | |
111 | ||
112 | void perf_evsel__calc_id_pos(struct perf_evsel *evsel) | |
113 | { | |
114 | evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type); | |
115 | evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type); | |
116 | } | |
117 | ||
4bf9ce1b | 118 | void hists__init(struct hists *hists) |
0e2a5f10 ACM |
119 | { |
120 | memset(hists, 0, sizeof(*hists)); | |
121 | hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT; | |
122 | hists->entries_in = &hists->entries_in_array[0]; | |
123 | hists->entries_collapsed = RB_ROOT; | |
124 | hists->entries = RB_ROOT; | |
125 | pthread_mutex_init(&hists->lock, NULL); | |
126 | } | |
127 | ||
7be5ebe8 ACM |
128 | void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, |
129 | enum perf_event_sample_format bit) | |
130 | { | |
131 | if (!(evsel->attr.sample_type & bit)) { | |
132 | evsel->attr.sample_type |= bit; | |
133 | evsel->sample_size += sizeof(u64); | |
75562573 | 134 | perf_evsel__calc_id_pos(evsel); |
7be5ebe8 ACM |
135 | } |
136 | } | |
137 | ||
138 | void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, | |
139 | enum perf_event_sample_format bit) | |
140 | { | |
141 | if (evsel->attr.sample_type & bit) { | |
142 | evsel->attr.sample_type &= ~bit; | |
143 | evsel->sample_size -= sizeof(u64); | |
75562573 | 144 | perf_evsel__calc_id_pos(evsel); |
7be5ebe8 ACM |
145 | } |
146 | } | |
147 | ||
75562573 AH |
148 | void perf_evsel__set_sample_id(struct perf_evsel *evsel, |
149 | bool can_sample_identifier) | |
7a5a5ca5 | 150 | { |
75562573 AH |
151 | if (can_sample_identifier) { |
152 | perf_evsel__reset_sample_bit(evsel, ID); | |
153 | perf_evsel__set_sample_bit(evsel, IDENTIFIER); | |
154 | } else { | |
155 | perf_evsel__set_sample_bit(evsel, ID); | |
156 | } | |
7a5a5ca5 ACM |
157 | evsel->attr.read_format |= PERF_FORMAT_ID; |
158 | } | |
159 | ||
ef1d1af2 ACM |
160 | void perf_evsel__init(struct perf_evsel *evsel, |
161 | struct perf_event_attr *attr, int idx) | |
162 | { | |
163 | evsel->idx = idx; | |
164 | evsel->attr = *attr; | |
2cfda562 | 165 | evsel->leader = evsel; |
410136f5 SE |
166 | evsel->unit = ""; |
167 | evsel->scale = 1.0; | |
ef1d1af2 | 168 | INIT_LIST_HEAD(&evsel->node); |
1980c2eb | 169 | hists__init(&evsel->hists); |
bde09467 | 170 | evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); |
75562573 | 171 | perf_evsel__calc_id_pos(evsel); |
ef1d1af2 ACM |
172 | } |
173 | ||
ef503831 | 174 | struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx) |
69aad6f1 ACM |
175 | { |
176 | struct perf_evsel *evsel = zalloc(sizeof(*evsel)); | |
177 | ||
ef1d1af2 ACM |
178 | if (evsel != NULL) |
179 | perf_evsel__init(evsel, attr, idx); | |
69aad6f1 ACM |
180 | |
181 | return evsel; | |
182 | } | |
183 | ||
ef503831 | 184 | struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx) |
efd2b924 ACM |
185 | { |
186 | struct perf_evsel *evsel = zalloc(sizeof(*evsel)); | |
187 | ||
188 | if (evsel != NULL) { | |
189 | struct perf_event_attr attr = { | |
0b80f8b3 ACM |
190 | .type = PERF_TYPE_TRACEPOINT, |
191 | .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | | |
192 | PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), | |
efd2b924 ACM |
193 | }; |
194 | ||
e48ffe2b ACM |
195 | if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) |
196 | goto out_free; | |
197 | ||
97978b3e | 198 | evsel->tp_format = trace_event__tp_format(sys, name); |
efd2b924 ACM |
199 | if (evsel->tp_format == NULL) |
200 | goto out_free; | |
201 | ||
0b80f8b3 | 202 | event_attr_init(&attr); |
efd2b924 | 203 | attr.config = evsel->tp_format->id; |
0b80f8b3 | 204 | attr.sample_period = 1; |
efd2b924 | 205 | perf_evsel__init(evsel, &attr, idx); |
efd2b924 ACM |
206 | } |
207 | ||
208 | return evsel; | |
209 | ||
210 | out_free: | |
74cf249d | 211 | zfree(&evsel->name); |
efd2b924 ACM |
212 | free(evsel); |
213 | return NULL; | |
214 | } | |
215 | ||
8ad7013b | 216 | const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { |
c410431c ACM |
217 | "cycles", |
218 | "instructions", | |
219 | "cache-references", | |
220 | "cache-misses", | |
221 | "branches", | |
222 | "branch-misses", | |
223 | "bus-cycles", | |
224 | "stalled-cycles-frontend", | |
225 | "stalled-cycles-backend", | |
226 | "ref-cycles", | |
227 | }; | |
228 | ||
dd4f5223 | 229 | static const char *__perf_evsel__hw_name(u64 config) |
c410431c ACM |
230 | { |
231 | if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config]) | |
232 | return perf_evsel__hw_names[config]; | |
233 | ||
234 | return "unknown-hardware"; | |
235 | } | |
236 | ||
27f18617 | 237 | static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size) |
c410431c | 238 | { |
27f18617 | 239 | int colon = 0, r = 0; |
c410431c | 240 | struct perf_event_attr *attr = &evsel->attr; |
c410431c ACM |
241 | bool exclude_guest_default = false; |
242 | ||
243 | #define MOD_PRINT(context, mod) do { \ | |
244 | if (!attr->exclude_##context) { \ | |
27f18617 | 245 | if (!colon) colon = ++r; \ |
c410431c ACM |
246 | r += scnprintf(bf + r, size - r, "%c", mod); \ |
247 | } } while(0) | |
248 | ||
249 | if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) { | |
250 | MOD_PRINT(kernel, 'k'); | |
251 | MOD_PRINT(user, 'u'); | |
252 | MOD_PRINT(hv, 'h'); | |
253 | exclude_guest_default = true; | |
254 | } | |
255 | ||
256 | if (attr->precise_ip) { | |
257 | if (!colon) | |
27f18617 | 258 | colon = ++r; |
c410431c ACM |
259 | r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp"); |
260 | exclude_guest_default = true; | |
261 | } | |
262 | ||
263 | if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) { | |
264 | MOD_PRINT(host, 'H'); | |
265 | MOD_PRINT(guest, 'G'); | |
266 | } | |
267 | #undef MOD_PRINT | |
268 | if (colon) | |
27f18617 | 269 | bf[colon - 1] = ':'; |
c410431c ACM |
270 | return r; |
271 | } | |
272 | ||
27f18617 ACM |
273 | static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size) |
274 | { | |
275 | int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config)); | |
276 | return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); | |
277 | } | |
278 | ||
8ad7013b | 279 | const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = { |
335c2f5d ACM |
280 | "cpu-clock", |
281 | "task-clock", | |
282 | "page-faults", | |
283 | "context-switches", | |
8ad7013b | 284 | "cpu-migrations", |
335c2f5d ACM |
285 | "minor-faults", |
286 | "major-faults", | |
287 | "alignment-faults", | |
288 | "emulation-faults", | |
d22d1a2a | 289 | "dummy", |
335c2f5d ACM |
290 | }; |
291 | ||
dd4f5223 | 292 | static const char *__perf_evsel__sw_name(u64 config) |
335c2f5d ACM |
293 | { |
294 | if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config]) | |
295 | return perf_evsel__sw_names[config]; | |
296 | return "unknown-software"; | |
297 | } | |
298 | ||
299 | static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size) | |
300 | { | |
301 | int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config)); | |
302 | return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); | |
303 | } | |
304 | ||
287e74aa JO |
305 | static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type) |
306 | { | |
307 | int r; | |
308 | ||
309 | r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr); | |
310 | ||
311 | if (type & HW_BREAKPOINT_R) | |
312 | r += scnprintf(bf + r, size - r, "r"); | |
313 | ||
314 | if (type & HW_BREAKPOINT_W) | |
315 | r += scnprintf(bf + r, size - r, "w"); | |
316 | ||
317 | if (type & HW_BREAKPOINT_X) | |
318 | r += scnprintf(bf + r, size - r, "x"); | |
319 | ||
320 | return r; | |
321 | } | |
322 | ||
323 | static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size) | |
324 | { | |
325 | struct perf_event_attr *attr = &evsel->attr; | |
326 | int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type); | |
327 | return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); | |
328 | } | |
329 | ||
0b668bc9 ACM |
330 | const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] |
331 | [PERF_EVSEL__MAX_ALIASES] = { | |
332 | { "L1-dcache", "l1-d", "l1d", "L1-data", }, | |
333 | { "L1-icache", "l1-i", "l1i", "L1-instruction", }, | |
334 | { "LLC", "L2", }, | |
335 | { "dTLB", "d-tlb", "Data-TLB", }, | |
336 | { "iTLB", "i-tlb", "Instruction-TLB", }, | |
337 | { "branch", "branches", "bpu", "btb", "bpc", }, | |
338 | { "node", }, | |
339 | }; | |
340 | ||
341 | const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] | |
342 | [PERF_EVSEL__MAX_ALIASES] = { | |
343 | { "load", "loads", "read", }, | |
344 | { "store", "stores", "write", }, | |
345 | { "prefetch", "prefetches", "speculative-read", "speculative-load", }, | |
346 | }; | |
347 | ||
348 | const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] | |
349 | [PERF_EVSEL__MAX_ALIASES] = { | |
350 | { "refs", "Reference", "ops", "access", }, | |
351 | { "misses", "miss", }, | |
352 | }; | |
353 | ||
354 | #define C(x) PERF_COUNT_HW_CACHE_##x | |
355 | #define CACHE_READ (1 << C(OP_READ)) | |
356 | #define CACHE_WRITE (1 << C(OP_WRITE)) | |
357 | #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) | |
358 | #define COP(x) (1 << x) | |
359 | ||
360 | /* | |
361 | * cache operartion stat | |
362 | * L1I : Read and prefetch only | |
363 | * ITLB and BPU : Read-only | |
364 | */ | |
365 | static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = { | |
366 | [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | |
367 | [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), | |
368 | [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | |
369 | [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | |
370 | [C(ITLB)] = (CACHE_READ), | |
371 | [C(BPU)] = (CACHE_READ), | |
372 | [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | |
373 | }; | |
374 | ||
375 | bool perf_evsel__is_cache_op_valid(u8 type, u8 op) | |
376 | { | |
377 | if (perf_evsel__hw_cache_stat[type] & COP(op)) | |
378 | return true; /* valid */ | |
379 | else | |
380 | return false; /* invalid */ | |
381 | } | |
382 | ||
383 | int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, | |
384 | char *bf, size_t size) | |
385 | { | |
386 | if (result) { | |
387 | return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0], | |
388 | perf_evsel__hw_cache_op[op][0], | |
389 | perf_evsel__hw_cache_result[result][0]); | |
390 | } | |
391 | ||
392 | return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0], | |
393 | perf_evsel__hw_cache_op[op][1]); | |
394 | } | |
395 | ||
dd4f5223 | 396 | static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size) |
0b668bc9 ACM |
397 | { |
398 | u8 op, result, type = (config >> 0) & 0xff; | |
399 | const char *err = "unknown-ext-hardware-cache-type"; | |
400 | ||
401 | if (type > PERF_COUNT_HW_CACHE_MAX) | |
402 | goto out_err; | |
403 | ||
404 | op = (config >> 8) & 0xff; | |
405 | err = "unknown-ext-hardware-cache-op"; | |
406 | if (op > PERF_COUNT_HW_CACHE_OP_MAX) | |
407 | goto out_err; | |
408 | ||
409 | result = (config >> 16) & 0xff; | |
410 | err = "unknown-ext-hardware-cache-result"; | |
411 | if (result > PERF_COUNT_HW_CACHE_RESULT_MAX) | |
412 | goto out_err; | |
413 | ||
414 | err = "invalid-cache"; | |
415 | if (!perf_evsel__is_cache_op_valid(type, op)) | |
416 | goto out_err; | |
417 | ||
418 | return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size); | |
419 | out_err: | |
420 | return scnprintf(bf, size, "%s", err); | |
421 | } | |
422 | ||
423 | static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size) | |
424 | { | |
425 | int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size); | |
426 | return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); | |
427 | } | |
428 | ||
6eef3d9c ACM |
429 | static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size) |
430 | { | |
431 | int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config); | |
432 | return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); | |
433 | } | |
434 | ||
7289f83c | 435 | const char *perf_evsel__name(struct perf_evsel *evsel) |
a4460836 | 436 | { |
7289f83c | 437 | char bf[128]; |
a4460836 | 438 | |
7289f83c ACM |
439 | if (evsel->name) |
440 | return evsel->name; | |
c410431c ACM |
441 | |
442 | switch (evsel->attr.type) { | |
443 | case PERF_TYPE_RAW: | |
6eef3d9c | 444 | perf_evsel__raw_name(evsel, bf, sizeof(bf)); |
c410431c ACM |
445 | break; |
446 | ||
447 | case PERF_TYPE_HARDWARE: | |
7289f83c | 448 | perf_evsel__hw_name(evsel, bf, sizeof(bf)); |
c410431c | 449 | break; |
0b668bc9 ACM |
450 | |
451 | case PERF_TYPE_HW_CACHE: | |
7289f83c | 452 | perf_evsel__hw_cache_name(evsel, bf, sizeof(bf)); |
0b668bc9 ACM |
453 | break; |
454 | ||
335c2f5d | 455 | case PERF_TYPE_SOFTWARE: |
7289f83c | 456 | perf_evsel__sw_name(evsel, bf, sizeof(bf)); |
335c2f5d ACM |
457 | break; |
458 | ||
a4460836 | 459 | case PERF_TYPE_TRACEPOINT: |
7289f83c | 460 | scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint"); |
a4460836 ACM |
461 | break; |
462 | ||
287e74aa JO |
463 | case PERF_TYPE_BREAKPOINT: |
464 | perf_evsel__bp_name(evsel, bf, sizeof(bf)); | |
465 | break; | |
466 | ||
c410431c | 467 | default: |
ca1b1457 RR |
468 | scnprintf(bf, sizeof(bf), "unknown attr type: %d", |
469 | evsel->attr.type); | |
a4460836 | 470 | break; |
c410431c ACM |
471 | } |
472 | ||
7289f83c ACM |
473 | evsel->name = strdup(bf); |
474 | ||
475 | return evsel->name ?: "unknown"; | |
c410431c ACM |
476 | } |
477 | ||
717e263f NK |
478 | const char *perf_evsel__group_name(struct perf_evsel *evsel) |
479 | { | |
480 | return evsel->group_name ?: "anon group"; | |
481 | } | |
482 | ||
483 | int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size) | |
484 | { | |
485 | int ret; | |
486 | struct perf_evsel *pos; | |
487 | const char *group_name = perf_evsel__group_name(evsel); | |
488 | ||
489 | ret = scnprintf(buf, size, "%s", group_name); | |
490 | ||
491 | ret += scnprintf(buf + ret, size - ret, " { %s", | |
492 | perf_evsel__name(evsel)); | |
493 | ||
494 | for_each_group_member(pos, evsel) | |
495 | ret += scnprintf(buf + ret, size - ret, ", %s", | |
496 | perf_evsel__name(pos)); | |
497 | ||
498 | ret += scnprintf(buf + ret, size - ret, " }"); | |
499 | ||
500 | return ret; | |
501 | } | |
502 | ||
774cb499 JO |
503 | /* |
504 | * The enable_on_exec/disabled value strategy: | |
505 | * | |
506 | * 1) For any type of traced program: | |
507 | * - all independent events and group leaders are disabled | |
508 | * - all group members are enabled | |
509 | * | |
510 | * Group members are ruled by group leaders. They need to | |
511 | * be enabled, because the group scheduling relies on that. | |
512 | * | |
513 | * 2) For traced programs executed by perf: | |
514 | * - all independent events and group leaders have | |
515 | * enable_on_exec set | |
516 | * - we don't specifically enable or disable any event during | |
517 | * the record command | |
518 | * | |
519 | * Independent events and group leaders are initially disabled | |
520 | * and get enabled by exec. Group members are ruled by group | |
521 | * leaders as stated in 1). | |
522 | * | |
523 | * 3) For traced programs attached by perf (pid/tid): | |
524 | * - we specifically enable or disable all events during | |
525 | * the record command | |
526 | * | |
527 | * When attaching events to already running traced we | |
528 | * enable/disable events specifically, as there's no | |
529 | * initial traced exec call. | |
530 | */ | |
b4006796 | 531 | void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) |
0f82ebc4 | 532 | { |
3c176311 | 533 | struct perf_evsel *leader = evsel->leader; |
0f82ebc4 ACM |
534 | struct perf_event_attr *attr = &evsel->attr; |
535 | int track = !evsel->idx; /* only the first counter needs these */ | |
3aa5939d | 536 | bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread; |
0f82ebc4 | 537 | |
594ac61a | 538 | attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; |
0f82ebc4 | 539 | attr->inherit = !opts->no_inherit; |
0f82ebc4 | 540 | |
7be5ebe8 ACM |
541 | perf_evsel__set_sample_bit(evsel, IP); |
542 | perf_evsel__set_sample_bit(evsel, TID); | |
0f82ebc4 | 543 | |
3c176311 JO |
544 | if (evsel->sample_read) { |
545 | perf_evsel__set_sample_bit(evsel, READ); | |
546 | ||
547 | /* | |
548 | * We need ID even in case of single event, because | |
549 | * PERF_SAMPLE_READ process ID specific data. | |
550 | */ | |
75562573 | 551 | perf_evsel__set_sample_id(evsel, false); |
3c176311 JO |
552 | |
553 | /* | |
554 | * Apply group format only if we belong to group | |
555 | * with more than one members. | |
556 | */ | |
557 | if (leader->nr_members > 1) { | |
558 | attr->read_format |= PERF_FORMAT_GROUP; | |
559 | attr->inherit = 0; | |
560 | } | |
561 | } | |
562 | ||
0f82ebc4 ACM |
563 | /* |
564 | * We default some events to a 1 default interval. But keep | |
565 | * it a weak assumption overridable by the user. | |
566 | */ | |
567 | if (!attr->sample_period || (opts->user_freq != UINT_MAX && | |
568 | opts->user_interval != ULLONG_MAX)) { | |
569 | if (opts->freq) { | |
7be5ebe8 | 570 | perf_evsel__set_sample_bit(evsel, PERIOD); |
0f82ebc4 ACM |
571 | attr->freq = 1; |
572 | attr->sample_freq = opts->freq; | |
573 | } else { | |
574 | attr->sample_period = opts->default_interval; | |
575 | } | |
576 | } | |
577 | ||
3c176311 JO |
578 | /* |
579 | * Disable sampling for all group members other | |
580 | * than leader in case leader 'leads' the sampling. | |
581 | */ | |
582 | if ((leader != evsel) && leader->sample_read) { | |
583 | attr->sample_freq = 0; | |
584 | attr->sample_period = 0; | |
585 | } | |
586 | ||
0f82ebc4 ACM |
587 | if (opts->no_samples) |
588 | attr->sample_freq = 0; | |
589 | ||
590 | if (opts->inherit_stat) | |
591 | attr->inherit_stat = 1; | |
592 | ||
593 | if (opts->sample_address) { | |
7be5ebe8 | 594 | perf_evsel__set_sample_bit(evsel, ADDR); |
0f82ebc4 ACM |
595 | attr->mmap_data = track; |
596 | } | |
597 | ||
26d33022 | 598 | if (opts->call_graph) { |
7be5ebe8 | 599 | perf_evsel__set_sample_bit(evsel, CALLCHAIN); |
0f82ebc4 | 600 | |
26d33022 | 601 | if (opts->call_graph == CALLCHAIN_DWARF) { |
7be5ebe8 ACM |
602 | perf_evsel__set_sample_bit(evsel, REGS_USER); |
603 | perf_evsel__set_sample_bit(evsel, STACK_USER); | |
26d33022 JO |
604 | attr->sample_regs_user = PERF_REGS_MASK; |
605 | attr->sample_stack_user = opts->stack_dump_size; | |
606 | attr->exclude_callchain_user = 1; | |
607 | } | |
608 | } | |
609 | ||
3aa5939d | 610 | if (target__has_cpu(&opts->target)) |
7be5ebe8 | 611 | perf_evsel__set_sample_bit(evsel, CPU); |
0f82ebc4 | 612 | |
3e76ac78 | 613 | if (opts->period) |
7be5ebe8 | 614 | perf_evsel__set_sample_bit(evsel, PERIOD); |
3e76ac78 | 615 | |
594ac61a | 616 | if (!perf_missing_features.sample_id_all && |
d67356e7 | 617 | (opts->sample_time || !opts->no_inherit || |
3aa5939d | 618 | target__has_cpu(&opts->target) || per_cpu)) |
7be5ebe8 | 619 | perf_evsel__set_sample_bit(evsel, TIME); |
0f82ebc4 ACM |
620 | |
621 | if (opts->raw_samples) { | |
7be5ebe8 ACM |
622 | perf_evsel__set_sample_bit(evsel, TIME); |
623 | perf_evsel__set_sample_bit(evsel, RAW); | |
624 | perf_evsel__set_sample_bit(evsel, CPU); | |
0f82ebc4 ACM |
625 | } |
626 | ||
ccf49bfc | 627 | if (opts->sample_address) |
1e7ed5ec | 628 | perf_evsel__set_sample_bit(evsel, DATA_SRC); |
ccf49bfc | 629 | |
0f82ebc4 ACM |
630 | if (opts->no_delay) { |
631 | attr->watermark = 0; | |
632 | attr->wakeup_events = 1; | |
633 | } | |
bdfebd84 | 634 | if (opts->branch_stack) { |
7be5ebe8 | 635 | perf_evsel__set_sample_bit(evsel, BRANCH_STACK); |
bdfebd84 RAV |
636 | attr->branch_sample_type = opts->branch_stack; |
637 | } | |
0f82ebc4 | 638 | |
05484298 | 639 | if (opts->sample_weight) |
1e7ed5ec | 640 | perf_evsel__set_sample_bit(evsel, WEIGHT); |
05484298 | 641 | |
5c5e854b | 642 | attr->mmap = track; |
5c5e854b | 643 | attr->comm = track; |
0f82ebc4 | 644 | |
475eeab9 | 645 | if (opts->sample_transaction) |
1e7ed5ec | 646 | perf_evsel__set_sample_bit(evsel, TRANSACTION); |
475eeab9 | 647 | |
774cb499 JO |
648 | /* |
649 | * XXX see the function comment above | |
650 | * | |
651 | * Disabling only independent events or group leaders, | |
652 | * keeping group members enabled. | |
653 | */ | |
823254ed | 654 | if (perf_evsel__is_group_leader(evsel)) |
774cb499 JO |
655 | attr->disabled = 1; |
656 | ||
657 | /* | |
658 | * Setting enable_on_exec for independent events and | |
659 | * group leaders for traced executed by perf. | |
660 | */ | |
602ad878 | 661 | if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel)) |
0f82ebc4 | 662 | attr->enable_on_exec = 1; |
0f82ebc4 ACM |
663 | } |
664 | ||
69aad6f1 ACM |
665 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) |
666 | { | |
4af4c955 | 667 | int cpu, thread; |
69aad6f1 | 668 | evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); |
4af4c955 DA |
669 | |
670 | if (evsel->fd) { | |
671 | for (cpu = 0; cpu < ncpus; cpu++) { | |
672 | for (thread = 0; thread < nthreads; thread++) { | |
673 | FD(evsel, cpu, thread) = -1; | |
674 | } | |
675 | } | |
676 | } | |
677 | ||
69aad6f1 ACM |
678 | return evsel->fd != NULL ? 0 : -ENOMEM; |
679 | } | |
680 | ||
e2407bef AK |
681 | static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads, |
682 | int ioc, void *arg) | |
745cefc5 ACM |
683 | { |
684 | int cpu, thread; | |
685 | ||
686 | for (cpu = 0; cpu < ncpus; cpu++) { | |
687 | for (thread = 0; thread < nthreads; thread++) { | |
688 | int fd = FD(evsel, cpu, thread), | |
e2407bef | 689 | err = ioctl(fd, ioc, arg); |
745cefc5 ACM |
690 | |
691 | if (err) | |
692 | return err; | |
693 | } | |
694 | } | |
695 | ||
696 | return 0; | |
697 | } | |
698 | ||
e2407bef AK |
699 | int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, |
700 | const char *filter) | |
701 | { | |
702 | return perf_evsel__run_ioctl(evsel, ncpus, nthreads, | |
703 | PERF_EVENT_IOC_SET_FILTER, | |
704 | (void *)filter); | |
705 | } | |
706 | ||
707 | int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) | |
708 | { | |
709 | return perf_evsel__run_ioctl(evsel, ncpus, nthreads, | |
710 | PERF_EVENT_IOC_ENABLE, | |
711 | 0); | |
712 | } | |
713 | ||
70db7533 ACM |
714 | int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) |
715 | { | |
a91e5431 ACM |
716 | evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); |
717 | if (evsel->sample_id == NULL) | |
718 | return -ENOMEM; | |
719 | ||
720 | evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); | |
721 | if (evsel->id == NULL) { | |
722 | xyarray__delete(evsel->sample_id); | |
723 | evsel->sample_id = NULL; | |
724 | return -ENOMEM; | |
725 | } | |
726 | ||
727 | return 0; | |
70db7533 ACM |
728 | } |
729 | ||
a7e191c3 FD |
730 | void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus) |
731 | { | |
732 | memset(evsel->counts, 0, (sizeof(*evsel->counts) + | |
733 | (ncpus * sizeof(struct perf_counts_values)))); | |
734 | } | |
735 | ||
c52b12ed ACM |
736 | int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus) |
737 | { | |
738 | evsel->counts = zalloc((sizeof(*evsel->counts) + | |
739 | (ncpus * sizeof(struct perf_counts_values)))); | |
740 | return evsel->counts != NULL ? 0 : -ENOMEM; | |
741 | } | |
742 | ||
69aad6f1 ACM |
743 | void perf_evsel__free_fd(struct perf_evsel *evsel) |
744 | { | |
745 | xyarray__delete(evsel->fd); | |
746 | evsel->fd = NULL; | |
747 | } | |
748 | ||
70db7533 ACM |
749 | void perf_evsel__free_id(struct perf_evsel *evsel) |
750 | { | |
a91e5431 ACM |
751 | xyarray__delete(evsel->sample_id); |
752 | evsel->sample_id = NULL; | |
04662523 | 753 | zfree(&evsel->id); |
70db7533 ACM |
754 | } |
755 | ||
c52b12ed ACM |
756 | void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) |
757 | { | |
758 | int cpu, thread; | |
759 | ||
760 | for (cpu = 0; cpu < ncpus; cpu++) | |
761 | for (thread = 0; thread < nthreads; ++thread) { | |
762 | close(FD(evsel, cpu, thread)); | |
763 | FD(evsel, cpu, thread) = -1; | |
764 | } | |
765 | } | |
766 | ||
43f8e76e NK |
767 | void perf_evsel__free_counts(struct perf_evsel *evsel) |
768 | { | |
74cf249d | 769 | zfree(&evsel->counts); |
43f8e76e NK |
770 | } |
771 | ||
ef1d1af2 | 772 | void perf_evsel__exit(struct perf_evsel *evsel) |
69aad6f1 ACM |
773 | { |
774 | assert(list_empty(&evsel->node)); | |
736b05a0 NK |
775 | perf_evsel__free_fd(evsel); |
776 | perf_evsel__free_id(evsel); | |
ef1d1af2 ACM |
777 | } |
778 | ||
779 | void perf_evsel__delete(struct perf_evsel *evsel) | |
780 | { | |
781 | perf_evsel__exit(evsel); | |
023695d9 | 782 | close_cgroup(evsel->cgrp); |
74cf249d | 783 | zfree(&evsel->group_name); |
e48ffe2b | 784 | if (evsel->tp_format) |
efd2b924 | 785 | pevent_free_format(evsel->tp_format); |
74cf249d | 786 | zfree(&evsel->name); |
69aad6f1 ACM |
787 | free(evsel); |
788 | } | |
c52b12ed | 789 | |
c7a79c47 SE |
790 | static inline void compute_deltas(struct perf_evsel *evsel, |
791 | int cpu, | |
792 | struct perf_counts_values *count) | |
793 | { | |
794 | struct perf_counts_values tmp; | |
795 | ||
796 | if (!evsel->prev_raw_counts) | |
797 | return; | |
798 | ||
799 | if (cpu == -1) { | |
800 | tmp = evsel->prev_raw_counts->aggr; | |
801 | evsel->prev_raw_counts->aggr = *count; | |
802 | } else { | |
803 | tmp = evsel->prev_raw_counts->cpu[cpu]; | |
804 | evsel->prev_raw_counts->cpu[cpu] = *count; | |
805 | } | |
806 | ||
807 | count->val = count->val - tmp.val; | |
808 | count->ena = count->ena - tmp.ena; | |
809 | count->run = count->run - tmp.run; | |
810 | } | |
811 | ||
c52b12ed ACM |
812 | int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, |
813 | int cpu, int thread, bool scale) | |
814 | { | |
815 | struct perf_counts_values count; | |
816 | size_t nv = scale ? 3 : 1; | |
817 | ||
818 | if (FD(evsel, cpu, thread) < 0) | |
819 | return -EINVAL; | |
820 | ||
4eed11d5 ACM |
821 | if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0) |
822 | return -ENOMEM; | |
823 | ||
c52b12ed ACM |
824 | if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) |
825 | return -errno; | |
826 | ||
c7a79c47 SE |
827 | compute_deltas(evsel, cpu, &count); |
828 | ||
c52b12ed ACM |
829 | if (scale) { |
830 | if (count.run == 0) | |
831 | count.val = 0; | |
832 | else if (count.run < count.ena) | |
833 | count.val = (u64)((double)count.val * count.ena / count.run + 0.5); | |
834 | } else | |
835 | count.ena = count.run = 0; | |
836 | ||
837 | evsel->counts->cpu[cpu] = count; | |
838 | return 0; | |
839 | } | |
840 | ||
841 | int __perf_evsel__read(struct perf_evsel *evsel, | |
842 | int ncpus, int nthreads, bool scale) | |
843 | { | |
844 | size_t nv = scale ? 3 : 1; | |
845 | int cpu, thread; | |
846 | struct perf_counts_values *aggr = &evsel->counts->aggr, count; | |
847 | ||
52bcd994 | 848 | aggr->val = aggr->ena = aggr->run = 0; |
c52b12ed ACM |
849 | |
850 | for (cpu = 0; cpu < ncpus; cpu++) { | |
851 | for (thread = 0; thread < nthreads; thread++) { | |
852 | if (FD(evsel, cpu, thread) < 0) | |
853 | continue; | |
854 | ||
855 | if (readn(FD(evsel, cpu, thread), | |
856 | &count, nv * sizeof(u64)) < 0) | |
857 | return -errno; | |
858 | ||
859 | aggr->val += count.val; | |
860 | if (scale) { | |
861 | aggr->ena += count.ena; | |
862 | aggr->run += count.run; | |
863 | } | |
864 | } | |
865 | } | |
866 | ||
c7a79c47 SE |
867 | compute_deltas(evsel, -1, aggr); |
868 | ||
c52b12ed ACM |
869 | evsel->counts->scaled = 0; |
870 | if (scale) { | |
871 | if (aggr->run == 0) { | |
872 | evsel->counts->scaled = -1; | |
873 | aggr->val = 0; | |
874 | return 0; | |
875 | } | |
876 | ||
877 | if (aggr->run < aggr->ena) { | |
878 | evsel->counts->scaled = 1; | |
879 | aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5); | |
880 | } | |
881 | } else | |
882 | aggr->ena = aggr->run = 0; | |
883 | ||
884 | return 0; | |
885 | } | |
48290609 | 886 | |
6a4bb04c JO |
887 | static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) |
888 | { | |
889 | struct perf_evsel *leader = evsel->leader; | |
890 | int fd; | |
891 | ||
823254ed | 892 | if (perf_evsel__is_group_leader(evsel)) |
6a4bb04c JO |
893 | return -1; |
894 | ||
895 | /* | |
896 | * Leader must be already processed/open, | |
897 | * if not it's a bug. | |
898 | */ | |
899 | BUG_ON(!leader->fd); | |
900 | ||
901 | fd = FD(leader, cpu, thread); | |
902 | BUG_ON(fd == -1); | |
903 | ||
904 | return fd; | |
905 | } | |
906 | ||
e3e1a54f AH |
907 | #define __PRINT_ATTR(fmt, cast, field) \ |
908 | fprintf(fp, " %-19s "fmt"\n", #field, cast attr->field) | |
909 | ||
910 | #define PRINT_ATTR_U32(field) __PRINT_ATTR("%u" , , field) | |
911 | #define PRINT_ATTR_X32(field) __PRINT_ATTR("%#x", , field) | |
912 | #define PRINT_ATTR_U64(field) __PRINT_ATTR("%" PRIu64, (uint64_t), field) | |
913 | #define PRINT_ATTR_X64(field) __PRINT_ATTR("%#"PRIx64, (uint64_t), field) | |
914 | ||
915 | #define PRINT_ATTR2N(name1, field1, name2, field2) \ | |
916 | fprintf(fp, " %-19s %u %-19s %u\n", \ | |
917 | name1, attr->field1, name2, attr->field2) | |
918 | ||
919 | #define PRINT_ATTR2(field1, field2) \ | |
920 | PRINT_ATTR2N(#field1, field1, #field2, field2) | |
921 | ||
922 | static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp) | |
923 | { | |
924 | size_t ret = 0; | |
925 | ||
926 | ret += fprintf(fp, "%.60s\n", graph_dotted_line); | |
927 | ret += fprintf(fp, "perf_event_attr:\n"); | |
928 | ||
929 | ret += PRINT_ATTR_U32(type); | |
930 | ret += PRINT_ATTR_U32(size); | |
931 | ret += PRINT_ATTR_X64(config); | |
932 | ret += PRINT_ATTR_U64(sample_period); | |
933 | ret += PRINT_ATTR_U64(sample_freq); | |
934 | ret += PRINT_ATTR_X64(sample_type); | |
935 | ret += PRINT_ATTR_X64(read_format); | |
936 | ||
937 | ret += PRINT_ATTR2(disabled, inherit); | |
938 | ret += PRINT_ATTR2(pinned, exclusive); | |
939 | ret += PRINT_ATTR2(exclude_user, exclude_kernel); | |
940 | ret += PRINT_ATTR2(exclude_hv, exclude_idle); | |
941 | ret += PRINT_ATTR2(mmap, comm); | |
942 | ret += PRINT_ATTR2(freq, inherit_stat); | |
943 | ret += PRINT_ATTR2(enable_on_exec, task); | |
944 | ret += PRINT_ATTR2(watermark, precise_ip); | |
945 | ret += PRINT_ATTR2(mmap_data, sample_id_all); | |
946 | ret += PRINT_ATTR2(exclude_host, exclude_guest); | |
947 | ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel, | |
948 | "excl.callchain_user", exclude_callchain_user); | |
40d54ec2 | 949 | ret += PRINT_ATTR_U32(mmap2); |
e3e1a54f AH |
950 | |
951 | ret += PRINT_ATTR_U32(wakeup_events); | |
952 | ret += PRINT_ATTR_U32(wakeup_watermark); | |
953 | ret += PRINT_ATTR_X32(bp_type); | |
954 | ret += PRINT_ATTR_X64(bp_addr); | |
955 | ret += PRINT_ATTR_X64(config1); | |
956 | ret += PRINT_ATTR_U64(bp_len); | |
957 | ret += PRINT_ATTR_X64(config2); | |
958 | ret += PRINT_ATTR_X64(branch_sample_type); | |
959 | ret += PRINT_ATTR_X64(sample_regs_user); | |
960 | ret += PRINT_ATTR_U32(sample_stack_user); | |
961 | ||
962 | ret += fprintf(fp, "%.60s\n", graph_dotted_line); | |
963 | ||
964 | return ret; | |
965 | } | |
966 | ||
0252208e | 967 | static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, |
6a4bb04c | 968 | struct thread_map *threads) |
48290609 | 969 | { |
0252208e | 970 | int cpu, thread; |
023695d9 | 971 | unsigned long flags = 0; |
727ab04e | 972 | int pid = -1, err; |
bec19672 | 973 | enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; |
48290609 | 974 | |
0252208e ACM |
975 | if (evsel->fd == NULL && |
976 | perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) | |
727ab04e | 977 | return -ENOMEM; |
4eed11d5 | 978 | |
023695d9 SE |
979 | if (evsel->cgrp) { |
980 | flags = PERF_FLAG_PID_CGROUP; | |
981 | pid = evsel->cgrp->fd; | |
982 | } | |
983 | ||
594ac61a | 984 | fallback_missing_features: |
5c5e854b SE |
985 | if (perf_missing_features.mmap2) |
986 | evsel->attr.mmap2 = 0; | |
594ac61a ACM |
987 | if (perf_missing_features.exclude_guest) |
988 | evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; | |
989 | retry_sample_id: | |
990 | if (perf_missing_features.sample_id_all) | |
991 | evsel->attr.sample_id_all = 0; | |
992 | ||
e3e1a54f AH |
993 | if (verbose >= 2) |
994 | perf_event_attr__fprintf(&evsel->attr, stderr); | |
995 | ||
86bd5e86 | 996 | for (cpu = 0; cpu < cpus->nr; cpu++) { |
9d04f178 | 997 | |
0252208e | 998 | for (thread = 0; thread < threads->nr; thread++) { |
6a4bb04c | 999 | int group_fd; |
023695d9 SE |
1000 | |
1001 | if (!evsel->cgrp) | |
1002 | pid = threads->map[thread]; | |
1003 | ||
6a4bb04c | 1004 | group_fd = get_group_fd(evsel, cpu, thread); |
bec19672 | 1005 | retry_open: |
e3e1a54f AH |
1006 | pr_debug2("perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n", |
1007 | pid, cpus->map[cpu], group_fd, flags); | |
1008 | ||
0252208e | 1009 | FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, |
023695d9 | 1010 | pid, |
f08199d3 | 1011 | cpus->map[cpu], |
023695d9 | 1012 | group_fd, flags); |
727ab04e ACM |
1013 | if (FD(evsel, cpu, thread) < 0) { |
1014 | err = -errno; | |
f852fd62 AH |
1015 | pr_debug2("perf_event_open failed, error %d\n", |
1016 | err); | |
594ac61a | 1017 | goto try_fallback; |
727ab04e | 1018 | } |
bec19672 | 1019 | set_rlimit = NO_CHANGE; |
0252208e | 1020 | } |
48290609 ACM |
1021 | } |
1022 | ||
1023 | return 0; | |
1024 | ||
594ac61a | 1025 | try_fallback: |
bec19672 AK |
1026 | /* |
1027 | * perf stat needs between 5 and 22 fds per CPU. When we run out | |
1028 | * of them try to increase the limits. | |
1029 | */ | |
1030 | if (err == -EMFILE && set_rlimit < INCREASED_MAX) { | |
1031 | struct rlimit l; | |
1032 | int old_errno = errno; | |
1033 | ||
1034 | if (getrlimit(RLIMIT_NOFILE, &l) == 0) { | |
1035 | if (set_rlimit == NO_CHANGE) | |
1036 | l.rlim_cur = l.rlim_max; | |
1037 | else { | |
1038 | l.rlim_cur = l.rlim_max + 1000; | |
1039 | l.rlim_max = l.rlim_cur; | |
1040 | } | |
1041 | if (setrlimit(RLIMIT_NOFILE, &l) == 0) { | |
1042 | set_rlimit++; | |
1043 | errno = old_errno; | |
1044 | goto retry_open; | |
1045 | } | |
1046 | } | |
1047 | errno = old_errno; | |
1048 | } | |
1049 | ||
594ac61a ACM |
1050 | if (err != -EINVAL || cpu > 0 || thread > 0) |
1051 | goto out_close; | |
1052 | ||
5c5e854b SE |
1053 | if (!perf_missing_features.mmap2 && evsel->attr.mmap2) { |
1054 | perf_missing_features.mmap2 = true; | |
1055 | goto fallback_missing_features; | |
1056 | } else if (!perf_missing_features.exclude_guest && | |
1057 | (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { | |
594ac61a ACM |
1058 | perf_missing_features.exclude_guest = true; |
1059 | goto fallback_missing_features; | |
1060 | } else if (!perf_missing_features.sample_id_all) { | |
1061 | perf_missing_features.sample_id_all = true; | |
1062 | goto retry_sample_id; | |
1063 | } | |
1064 | ||
48290609 | 1065 | out_close: |
0252208e ACM |
1066 | do { |
1067 | while (--thread >= 0) { | |
1068 | close(FD(evsel, cpu, thread)); | |
1069 | FD(evsel, cpu, thread) = -1; | |
1070 | } | |
1071 | thread = threads->nr; | |
1072 | } while (--cpu >= 0); | |
727ab04e ACM |
1073 | return err; |
1074 | } | |
1075 | ||
1076 | void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads) | |
1077 | { | |
1078 | if (evsel->fd == NULL) | |
1079 | return; | |
1080 | ||
1081 | perf_evsel__close_fd(evsel, ncpus, nthreads); | |
1082 | perf_evsel__free_fd(evsel); | |
1083 | evsel->fd = NULL; | |
48290609 ACM |
1084 | } |
1085 | ||
0252208e ACM |
1086 | static struct { |
1087 | struct cpu_map map; | |
1088 | int cpus[1]; | |
1089 | } empty_cpu_map = { | |
1090 | .map.nr = 1, | |
1091 | .cpus = { -1, }, | |
1092 | }; | |
1093 | ||
1094 | static struct { | |
1095 | struct thread_map map; | |
1096 | int threads[1]; | |
1097 | } empty_thread_map = { | |
1098 | .map.nr = 1, | |
1099 | .threads = { -1, }, | |
1100 | }; | |
1101 | ||
f08199d3 | 1102 | int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, |
6a4bb04c | 1103 | struct thread_map *threads) |
48290609 | 1104 | { |
0252208e ACM |
1105 | if (cpus == NULL) { |
1106 | /* Work around old compiler warnings about strict aliasing */ | |
1107 | cpus = &empty_cpu_map.map; | |
48290609 ACM |
1108 | } |
1109 | ||
0252208e ACM |
1110 | if (threads == NULL) |
1111 | threads = &empty_thread_map.map; | |
48290609 | 1112 | |
6a4bb04c | 1113 | return __perf_evsel__open(evsel, cpus, threads); |
48290609 ACM |
1114 | } |
1115 | ||
f08199d3 | 1116 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, |
6a4bb04c | 1117 | struct cpu_map *cpus) |
48290609 | 1118 | { |
6a4bb04c | 1119 | return __perf_evsel__open(evsel, cpus, &empty_thread_map.map); |
0252208e | 1120 | } |
48290609 | 1121 | |
f08199d3 | 1122 | int perf_evsel__open_per_thread(struct perf_evsel *evsel, |
6a4bb04c | 1123 | struct thread_map *threads) |
0252208e | 1124 | { |
6a4bb04c | 1125 | return __perf_evsel__open(evsel, &empty_cpu_map.map, threads); |
48290609 | 1126 | } |
70082dd9 | 1127 | |
0807d2d8 ACM |
1128 | static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel, |
1129 | const union perf_event *event, | |
1130 | struct perf_sample *sample) | |
d0dd74e8 | 1131 | { |
0807d2d8 | 1132 | u64 type = evsel->attr.sample_type; |
d0dd74e8 | 1133 | const u64 *array = event->sample.array; |
0807d2d8 | 1134 | bool swapped = evsel->needs_swap; |
37073f9e | 1135 | union u64_swap u; |
d0dd74e8 ACM |
1136 | |
1137 | array += ((event->header.size - | |
1138 | sizeof(event->header)) / sizeof(u64)) - 1; | |
1139 | ||
75562573 AH |
1140 | if (type & PERF_SAMPLE_IDENTIFIER) { |
1141 | sample->id = *array; | |
1142 | array--; | |
1143 | } | |
1144 | ||
d0dd74e8 | 1145 | if (type & PERF_SAMPLE_CPU) { |
37073f9e JO |
1146 | u.val64 = *array; |
1147 | if (swapped) { | |
1148 | /* undo swap of u64, then swap on individual u32s */ | |
1149 | u.val64 = bswap_64(u.val64); | |
1150 | u.val32[0] = bswap_32(u.val32[0]); | |
1151 | } | |
1152 | ||
1153 | sample->cpu = u.val32[0]; | |
d0dd74e8 ACM |
1154 | array--; |
1155 | } | |
1156 | ||
1157 | if (type & PERF_SAMPLE_STREAM_ID) { | |
1158 | sample->stream_id = *array; | |
1159 | array--; | |
1160 | } | |
1161 | ||
1162 | if (type & PERF_SAMPLE_ID) { | |
1163 | sample->id = *array; | |
1164 | array--; | |
1165 | } | |
1166 | ||
1167 | if (type & PERF_SAMPLE_TIME) { | |
1168 | sample->time = *array; | |
1169 | array--; | |
1170 | } | |
1171 | ||
1172 | if (type & PERF_SAMPLE_TID) { | |
37073f9e JO |
1173 | u.val64 = *array; |
1174 | if (swapped) { | |
1175 | /* undo swap of u64, then swap on individual u32s */ | |
1176 | u.val64 = bswap_64(u.val64); | |
1177 | u.val32[0] = bswap_32(u.val32[0]); | |
1178 | u.val32[1] = bswap_32(u.val32[1]); | |
1179 | } | |
1180 | ||
1181 | sample->pid = u.val32[0]; | |
1182 | sample->tid = u.val32[1]; | |
dd44bc6b | 1183 | array--; |
d0dd74e8 ACM |
1184 | } |
1185 | ||
1186 | return 0; | |
1187 | } | |
1188 | ||
03b6ea9b AH |
1189 | static inline bool overflow(const void *endp, u16 max_size, const void *offset, |
1190 | u64 size) | |
98e1da90 | 1191 | { |
03b6ea9b AH |
1192 | return size > max_size || offset + size > endp; |
1193 | } | |
98e1da90 | 1194 | |
03b6ea9b AH |
1195 | #define OVERFLOW_CHECK(offset, size, max_size) \ |
1196 | do { \ | |
1197 | if (overflow(endp, (max_size), (offset), (size))) \ | |
1198 | return -EFAULT; \ | |
1199 | } while (0) | |
98e1da90 | 1200 | |
03b6ea9b AH |
1201 | #define OVERFLOW_CHECK_u64(offset) \ |
1202 | OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) | |
98e1da90 | 1203 | |
a3f698fe | 1204 | int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, |
0807d2d8 | 1205 | struct perf_sample *data) |
d0dd74e8 | 1206 | { |
a3f698fe | 1207 | u64 type = evsel->attr.sample_type; |
0807d2d8 | 1208 | bool swapped = evsel->needs_swap; |
d0dd74e8 | 1209 | const u64 *array; |
03b6ea9b AH |
1210 | u16 max_size = event->header.size; |
1211 | const void *endp = (void *)event + max_size; | |
1212 | u64 sz; | |
d0dd74e8 | 1213 | |
936be503 DA |
1214 | /* |
1215 | * used for cross-endian analysis. See git commit 65014ab3 | |
1216 | * for why this goofiness is needed. | |
1217 | */ | |
6a11f92e | 1218 | union u64_swap u; |
936be503 | 1219 | |
f3bda2c9 | 1220 | memset(data, 0, sizeof(*data)); |
d0dd74e8 ACM |
1221 | data->cpu = data->pid = data->tid = -1; |
1222 | data->stream_id = data->id = data->time = -1ULL; | |
a4a03fc7 | 1223 | data->period = 1; |
05484298 | 1224 | data->weight = 0; |
d0dd74e8 ACM |
1225 | |
1226 | if (event->header.type != PERF_RECORD_SAMPLE) { | |
a3f698fe | 1227 | if (!evsel->attr.sample_id_all) |
d0dd74e8 | 1228 | return 0; |
0807d2d8 | 1229 | return perf_evsel__parse_id_sample(evsel, event, data); |
d0dd74e8 ACM |
1230 | } |
1231 | ||
1232 | array = event->sample.array; | |
1233 | ||
03b6ea9b AH |
1234 | /* |
1235 | * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes | |
1236 | * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to | |
1237 | * check the format does not go past the end of the event. | |
1238 | */ | |
a3f698fe | 1239 | if (evsel->sample_size + sizeof(event->header) > event->header.size) |
a2854124 FW |
1240 | return -EFAULT; |
1241 | ||
75562573 AH |
1242 | data->id = -1ULL; |
1243 | if (type & PERF_SAMPLE_IDENTIFIER) { | |
1244 | data->id = *array; | |
1245 | array++; | |
1246 | } | |
1247 | ||
d0dd74e8 | 1248 | if (type & PERF_SAMPLE_IP) { |
ef89325f | 1249 | data->ip = *array; |
d0dd74e8 ACM |
1250 | array++; |
1251 | } | |
1252 | ||
1253 | if (type & PERF_SAMPLE_TID) { | |
936be503 DA |
1254 | u.val64 = *array; |
1255 | if (swapped) { | |
1256 | /* undo swap of u64, then swap on individual u32s */ | |
1257 | u.val64 = bswap_64(u.val64); | |
1258 | u.val32[0] = bswap_32(u.val32[0]); | |
1259 | u.val32[1] = bswap_32(u.val32[1]); | |
1260 | } | |
1261 | ||
1262 | data->pid = u.val32[0]; | |
1263 | data->tid = u.val32[1]; | |
d0dd74e8 ACM |
1264 | array++; |
1265 | } | |
1266 | ||
1267 | if (type & PERF_SAMPLE_TIME) { | |
1268 | data->time = *array; | |
1269 | array++; | |
1270 | } | |
1271 | ||
7cec0922 | 1272 | data->addr = 0; |
d0dd74e8 ACM |
1273 | if (type & PERF_SAMPLE_ADDR) { |
1274 | data->addr = *array; | |
1275 | array++; | |
1276 | } | |
1277 | ||
d0dd74e8 ACM |
1278 | if (type & PERF_SAMPLE_ID) { |
1279 | data->id = *array; | |
1280 | array++; | |
1281 | } | |
1282 | ||
1283 | if (type & PERF_SAMPLE_STREAM_ID) { | |
1284 | data->stream_id = *array; | |
1285 | array++; | |
1286 | } | |
1287 | ||
1288 | if (type & PERF_SAMPLE_CPU) { | |
936be503 DA |
1289 | |
1290 | u.val64 = *array; | |
1291 | if (swapped) { | |
1292 | /* undo swap of u64, then swap on individual u32s */ | |
1293 | u.val64 = bswap_64(u.val64); | |
1294 | u.val32[0] = bswap_32(u.val32[0]); | |
1295 | } | |
1296 | ||
1297 | data->cpu = u.val32[0]; | |
d0dd74e8 ACM |
1298 | array++; |
1299 | } | |
1300 | ||
1301 | if (type & PERF_SAMPLE_PERIOD) { | |
1302 | data->period = *array; | |
1303 | array++; | |
1304 | } | |
1305 | ||
1306 | if (type & PERF_SAMPLE_READ) { | |
9ede473c JO |
1307 | u64 read_format = evsel->attr.read_format; |
1308 | ||
03b6ea9b | 1309 | OVERFLOW_CHECK_u64(array); |
9ede473c JO |
1310 | if (read_format & PERF_FORMAT_GROUP) |
1311 | data->read.group.nr = *array; | |
1312 | else | |
1313 | data->read.one.value = *array; | |
1314 | ||
1315 | array++; | |
1316 | ||
1317 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | |
03b6ea9b | 1318 | OVERFLOW_CHECK_u64(array); |
9ede473c JO |
1319 | data->read.time_enabled = *array; |
1320 | array++; | |
1321 | } | |
1322 | ||
1323 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | |
03b6ea9b | 1324 | OVERFLOW_CHECK_u64(array); |
9ede473c JO |
1325 | data->read.time_running = *array; |
1326 | array++; | |
1327 | } | |
1328 | ||
1329 | /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ | |
1330 | if (read_format & PERF_FORMAT_GROUP) { | |
03b6ea9b AH |
1331 | const u64 max_group_nr = UINT64_MAX / |
1332 | sizeof(struct sample_read_value); | |
1333 | ||
1334 | if (data->read.group.nr > max_group_nr) | |
1335 | return -EFAULT; | |
1336 | sz = data->read.group.nr * | |
1337 | sizeof(struct sample_read_value); | |
1338 | OVERFLOW_CHECK(array, sz, max_size); | |
1339 | data->read.group.values = | |
1340 | (struct sample_read_value *)array; | |
1341 | array = (void *)array + sz; | |
9ede473c | 1342 | } else { |
03b6ea9b | 1343 | OVERFLOW_CHECK_u64(array); |
9ede473c JO |
1344 | data->read.one.id = *array; |
1345 | array++; | |
1346 | } | |
d0dd74e8 ACM |
1347 | } |
1348 | ||
1349 | if (type & PERF_SAMPLE_CALLCHAIN) { | |
03b6ea9b | 1350 | const u64 max_callchain_nr = UINT64_MAX / sizeof(u64); |
98e1da90 | 1351 | |
03b6ea9b AH |
1352 | OVERFLOW_CHECK_u64(array); |
1353 | data->callchain = (struct ip_callchain *)array++; | |
1354 | if (data->callchain->nr > max_callchain_nr) | |
98e1da90 | 1355 | return -EFAULT; |
03b6ea9b AH |
1356 | sz = data->callchain->nr * sizeof(u64); |
1357 | OVERFLOW_CHECK(array, sz, max_size); | |
1358 | array = (void *)array + sz; | |
d0dd74e8 ACM |
1359 | } |
1360 | ||
1361 | if (type & PERF_SAMPLE_RAW) { | |
03b6ea9b | 1362 | OVERFLOW_CHECK_u64(array); |
936be503 DA |
1363 | u.val64 = *array; |
1364 | if (WARN_ONCE(swapped, | |
1365 | "Endianness of raw data not corrected!\n")) { | |
1366 | /* undo swap of u64, then swap on individual u32s */ | |
1367 | u.val64 = bswap_64(u.val64); | |
1368 | u.val32[0] = bswap_32(u.val32[0]); | |
1369 | u.val32[1] = bswap_32(u.val32[1]); | |
1370 | } | |
936be503 | 1371 | data->raw_size = u.val32[0]; |
03b6ea9b | 1372 | array = (void *)array + sizeof(u32); |
98e1da90 | 1373 | |
03b6ea9b AH |
1374 | OVERFLOW_CHECK(array, data->raw_size, max_size); |
1375 | data->raw_data = (void *)array; | |
1376 | array = (void *)array + data->raw_size; | |
d0dd74e8 ACM |
1377 | } |
1378 | ||
b5387528 | 1379 | if (type & PERF_SAMPLE_BRANCH_STACK) { |
03b6ea9b AH |
1380 | const u64 max_branch_nr = UINT64_MAX / |
1381 | sizeof(struct branch_entry); | |
b5387528 | 1382 | |
03b6ea9b AH |
1383 | OVERFLOW_CHECK_u64(array); |
1384 | data->branch_stack = (struct branch_stack *)array++; | |
b5387528 | 1385 | |
03b6ea9b AH |
1386 | if (data->branch_stack->nr > max_branch_nr) |
1387 | return -EFAULT; | |
b5387528 | 1388 | sz = data->branch_stack->nr * sizeof(struct branch_entry); |
03b6ea9b AH |
1389 | OVERFLOW_CHECK(array, sz, max_size); |
1390 | array = (void *)array + sz; | |
b5387528 | 1391 | } |
0f6a3015 JO |
1392 | |
1393 | if (type & PERF_SAMPLE_REGS_USER) { | |
03b6ea9b | 1394 | OVERFLOW_CHECK_u64(array); |
5b95a4a3 AH |
1395 | data->user_regs.abi = *array; |
1396 | array++; | |
0f6a3015 | 1397 | |
5b95a4a3 | 1398 | if (data->user_regs.abi) { |
03b6ea9b AH |
1399 | u64 regs_user = evsel->attr.sample_regs_user; |
1400 | ||
1401 | sz = hweight_long(regs_user) * sizeof(u64); | |
1402 | OVERFLOW_CHECK(array, sz, max_size); | |
0f6a3015 | 1403 | data->user_regs.regs = (u64 *)array; |
03b6ea9b | 1404 | array = (void *)array + sz; |
0f6a3015 JO |
1405 | } |
1406 | } | |
1407 | ||
1408 | if (type & PERF_SAMPLE_STACK_USER) { | |
03b6ea9b AH |
1409 | OVERFLOW_CHECK_u64(array); |
1410 | sz = *array++; | |
0f6a3015 JO |
1411 | |
1412 | data->user_stack.offset = ((char *)(array - 1) | |
1413 | - (char *) event); | |
1414 | ||
03b6ea9b | 1415 | if (!sz) { |
0f6a3015 JO |
1416 | data->user_stack.size = 0; |
1417 | } else { | |
03b6ea9b | 1418 | OVERFLOW_CHECK(array, sz, max_size); |
0f6a3015 | 1419 | data->user_stack.data = (char *)array; |
03b6ea9b AH |
1420 | array = (void *)array + sz; |
1421 | OVERFLOW_CHECK_u64(array); | |
54bd2692 | 1422 | data->user_stack.size = *array++; |
a65cb4b9 JO |
1423 | if (WARN_ONCE(data->user_stack.size > sz, |
1424 | "user stack dump failure\n")) | |
1425 | return -EFAULT; | |
0f6a3015 JO |
1426 | } |
1427 | } | |
1428 | ||
05484298 AK |
1429 | data->weight = 0; |
1430 | if (type & PERF_SAMPLE_WEIGHT) { | |
03b6ea9b | 1431 | OVERFLOW_CHECK_u64(array); |
05484298 AK |
1432 | data->weight = *array; |
1433 | array++; | |
1434 | } | |
1435 | ||
98a3b32c SE |
1436 | data->data_src = PERF_MEM_DATA_SRC_NONE; |
1437 | if (type & PERF_SAMPLE_DATA_SRC) { | |
03b6ea9b | 1438 | OVERFLOW_CHECK_u64(array); |
98a3b32c SE |
1439 | data->data_src = *array; |
1440 | array++; | |
1441 | } | |
1442 | ||
475eeab9 AK |
1443 | data->transaction = 0; |
1444 | if (type & PERF_SAMPLE_TRANSACTION) { | |
87b95524 | 1445 | OVERFLOW_CHECK_u64(array); |
475eeab9 AK |
1446 | data->transaction = *array; |
1447 | array++; | |
1448 | } | |
1449 | ||
d0dd74e8 ACM |
1450 | return 0; |
1451 | } | |
74eec26f | 1452 | |
b1cf6f65 AH |
1453 | size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, |
1454 | u64 sample_regs_user, u64 read_format) | |
1455 | { | |
1456 | size_t sz, result = sizeof(struct sample_event); | |
1457 | ||
1458 | if (type & PERF_SAMPLE_IDENTIFIER) | |
1459 | result += sizeof(u64); | |
1460 | ||
1461 | if (type & PERF_SAMPLE_IP) | |
1462 | result += sizeof(u64); | |
1463 | ||
1464 | if (type & PERF_SAMPLE_TID) | |
1465 | result += sizeof(u64); | |
1466 | ||
1467 | if (type & PERF_SAMPLE_TIME) | |
1468 | result += sizeof(u64); | |
1469 | ||
1470 | if (type & PERF_SAMPLE_ADDR) | |
1471 | result += sizeof(u64); | |
1472 | ||
1473 | if (type & PERF_SAMPLE_ID) | |
1474 | result += sizeof(u64); | |
1475 | ||
1476 | if (type & PERF_SAMPLE_STREAM_ID) | |
1477 | result += sizeof(u64); | |
1478 | ||
1479 | if (type & PERF_SAMPLE_CPU) | |
1480 | result += sizeof(u64); | |
1481 | ||
1482 | if (type & PERF_SAMPLE_PERIOD) | |
1483 | result += sizeof(u64); | |
1484 | ||
1485 | if (type & PERF_SAMPLE_READ) { | |
1486 | result += sizeof(u64); | |
1487 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | |
1488 | result += sizeof(u64); | |
1489 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | |
1490 | result += sizeof(u64); | |
1491 | /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ | |
1492 | if (read_format & PERF_FORMAT_GROUP) { | |
1493 | sz = sample->read.group.nr * | |
1494 | sizeof(struct sample_read_value); | |
1495 | result += sz; | |
1496 | } else { | |
1497 | result += sizeof(u64); | |
1498 | } | |
1499 | } | |
1500 | ||
1501 | if (type & PERF_SAMPLE_CALLCHAIN) { | |
1502 | sz = (sample->callchain->nr + 1) * sizeof(u64); | |
1503 | result += sz; | |
1504 | } | |
1505 | ||
1506 | if (type & PERF_SAMPLE_RAW) { | |
1507 | result += sizeof(u32); | |
1508 | result += sample->raw_size; | |
1509 | } | |
1510 | ||
1511 | if (type & PERF_SAMPLE_BRANCH_STACK) { | |
1512 | sz = sample->branch_stack->nr * sizeof(struct branch_entry); | |
1513 | sz += sizeof(u64); | |
1514 | result += sz; | |
1515 | } | |
1516 | ||
1517 | if (type & PERF_SAMPLE_REGS_USER) { | |
1518 | if (sample->user_regs.abi) { | |
1519 | result += sizeof(u64); | |
1520 | sz = hweight_long(sample_regs_user) * sizeof(u64); | |
1521 | result += sz; | |
1522 | } else { | |
1523 | result += sizeof(u64); | |
1524 | } | |
1525 | } | |
1526 | ||
1527 | if (type & PERF_SAMPLE_STACK_USER) { | |
1528 | sz = sample->user_stack.size; | |
1529 | result += sizeof(u64); | |
1530 | if (sz) { | |
1531 | result += sz; | |
1532 | result += sizeof(u64); | |
1533 | } | |
1534 | } | |
1535 | ||
1536 | if (type & PERF_SAMPLE_WEIGHT) | |
1537 | result += sizeof(u64); | |
1538 | ||
1539 | if (type & PERF_SAMPLE_DATA_SRC) | |
1540 | result += sizeof(u64); | |
1541 | ||
42d88910 AH |
1542 | if (type & PERF_SAMPLE_TRANSACTION) |
1543 | result += sizeof(u64); | |
1544 | ||
b1cf6f65 AH |
1545 | return result; |
1546 | } | |
1547 | ||
74eec26f | 1548 | int perf_event__synthesize_sample(union perf_event *event, u64 type, |
d03f2170 | 1549 | u64 sample_regs_user, u64 read_format, |
74eec26f AV |
1550 | const struct perf_sample *sample, |
1551 | bool swapped) | |
1552 | { | |
1553 | u64 *array; | |
d03f2170 | 1554 | size_t sz; |
74eec26f AV |
1555 | /* |
1556 | * used for cross-endian analysis. See git commit 65014ab3 | |
1557 | * for why this goofiness is needed. | |
1558 | */ | |
6a11f92e | 1559 | union u64_swap u; |
74eec26f AV |
1560 | |
1561 | array = event->sample.array; | |
1562 | ||
75562573 AH |
1563 | if (type & PERF_SAMPLE_IDENTIFIER) { |
1564 | *array = sample->id; | |
1565 | array++; | |
1566 | } | |
1567 | ||
74eec26f | 1568 | if (type & PERF_SAMPLE_IP) { |
ef89325f | 1569 | *array = sample->ip; |
74eec26f AV |
1570 | array++; |
1571 | } | |
1572 | ||
1573 | if (type & PERF_SAMPLE_TID) { | |
1574 | u.val32[0] = sample->pid; | |
1575 | u.val32[1] = sample->tid; | |
1576 | if (swapped) { | |
1577 | /* | |
a3f698fe | 1578 | * Inverse of what is done in perf_evsel__parse_sample |
74eec26f AV |
1579 | */ |
1580 | u.val32[0] = bswap_32(u.val32[0]); | |
1581 | u.val32[1] = bswap_32(u.val32[1]); | |
1582 | u.val64 = bswap_64(u.val64); | |
1583 | } | |
1584 | ||
1585 | *array = u.val64; | |
1586 | array++; | |
1587 | } | |
1588 | ||
1589 | if (type & PERF_SAMPLE_TIME) { | |
1590 | *array = sample->time; | |
1591 | array++; | |
1592 | } | |
1593 | ||
1594 | if (type & PERF_SAMPLE_ADDR) { | |
1595 | *array = sample->addr; | |
1596 | array++; | |
1597 | } | |
1598 | ||
1599 | if (type & PERF_SAMPLE_ID) { | |
1600 | *array = sample->id; | |
1601 | array++; | |
1602 | } | |
1603 | ||
1604 | if (type & PERF_SAMPLE_STREAM_ID) { | |
1605 | *array = sample->stream_id; | |
1606 | array++; | |
1607 | } | |
1608 | ||
1609 | if (type & PERF_SAMPLE_CPU) { | |
1610 | u.val32[0] = sample->cpu; | |
1611 | if (swapped) { | |
1612 | /* | |
a3f698fe | 1613 | * Inverse of what is done in perf_evsel__parse_sample |
74eec26f AV |
1614 | */ |
1615 | u.val32[0] = bswap_32(u.val32[0]); | |
1616 | u.val64 = bswap_64(u.val64); | |
1617 | } | |
1618 | *array = u.val64; | |
1619 | array++; | |
1620 | } | |
1621 | ||
1622 | if (type & PERF_SAMPLE_PERIOD) { | |
1623 | *array = sample->period; | |
1624 | array++; | |
1625 | } | |
1626 | ||
d03f2170 AH |
1627 | if (type & PERF_SAMPLE_READ) { |
1628 | if (read_format & PERF_FORMAT_GROUP) | |
1629 | *array = sample->read.group.nr; | |
1630 | else | |
1631 | *array = sample->read.one.value; | |
1632 | array++; | |
1633 | ||
1634 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | |
1635 | *array = sample->read.time_enabled; | |
1636 | array++; | |
1637 | } | |
1638 | ||
1639 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | |
1640 | *array = sample->read.time_running; | |
1641 | array++; | |
1642 | } | |
1643 | ||
1644 | /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ | |
1645 | if (read_format & PERF_FORMAT_GROUP) { | |
1646 | sz = sample->read.group.nr * | |
1647 | sizeof(struct sample_read_value); | |
1648 | memcpy(array, sample->read.group.values, sz); | |
1649 | array = (void *)array + sz; | |
1650 | } else { | |
1651 | *array = sample->read.one.id; | |
1652 | array++; | |
1653 | } | |
1654 | } | |
1655 | ||
1656 | if (type & PERF_SAMPLE_CALLCHAIN) { | |
1657 | sz = (sample->callchain->nr + 1) * sizeof(u64); | |
1658 | memcpy(array, sample->callchain, sz); | |
1659 | array = (void *)array + sz; | |
1660 | } | |
1661 | ||
1662 | if (type & PERF_SAMPLE_RAW) { | |
1663 | u.val32[0] = sample->raw_size; | |
1664 | if (WARN_ONCE(swapped, | |
1665 | "Endianness of raw data not corrected!\n")) { | |
1666 | /* | |
1667 | * Inverse of what is done in perf_evsel__parse_sample | |
1668 | */ | |
1669 | u.val32[0] = bswap_32(u.val32[0]); | |
1670 | u.val32[1] = bswap_32(u.val32[1]); | |
1671 | u.val64 = bswap_64(u.val64); | |
1672 | } | |
1673 | *array = u.val64; | |
1674 | array = (void *)array + sizeof(u32); | |
1675 | ||
1676 | memcpy(array, sample->raw_data, sample->raw_size); | |
1677 | array = (void *)array + sample->raw_size; | |
1678 | } | |
1679 | ||
1680 | if (type & PERF_SAMPLE_BRANCH_STACK) { | |
1681 | sz = sample->branch_stack->nr * sizeof(struct branch_entry); | |
1682 | sz += sizeof(u64); | |
1683 | memcpy(array, sample->branch_stack, sz); | |
1684 | array = (void *)array + sz; | |
1685 | } | |
1686 | ||
1687 | if (type & PERF_SAMPLE_REGS_USER) { | |
1688 | if (sample->user_regs.abi) { | |
1689 | *array++ = sample->user_regs.abi; | |
1690 | sz = hweight_long(sample_regs_user) * sizeof(u64); | |
1691 | memcpy(array, sample->user_regs.regs, sz); | |
1692 | array = (void *)array + sz; | |
1693 | } else { | |
1694 | *array++ = 0; | |
1695 | } | |
1696 | } | |
1697 | ||
1698 | if (type & PERF_SAMPLE_STACK_USER) { | |
1699 | sz = sample->user_stack.size; | |
1700 | *array++ = sz; | |
1701 | if (sz) { | |
1702 | memcpy(array, sample->user_stack.data, sz); | |
1703 | array = (void *)array + sz; | |
1704 | *array++ = sz; | |
1705 | } | |
1706 | } | |
1707 | ||
1708 | if (type & PERF_SAMPLE_WEIGHT) { | |
1709 | *array = sample->weight; | |
1710 | array++; | |
1711 | } | |
1712 | ||
1713 | if (type & PERF_SAMPLE_DATA_SRC) { | |
1714 | *array = sample->data_src; | |
1715 | array++; | |
1716 | } | |
1717 | ||
42d88910 AH |
1718 | if (type & PERF_SAMPLE_TRANSACTION) { |
1719 | *array = sample->transaction; | |
1720 | array++; | |
1721 | } | |
1722 | ||
74eec26f AV |
1723 | return 0; |
1724 | } | |
5555ded4 | 1725 | |
efd2b924 ACM |
1726 | struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name) |
1727 | { | |
1728 | return pevent_find_field(evsel->tp_format, name); | |
1729 | } | |
1730 | ||
5d2074ea | 1731 | void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, |
5555ded4 ACM |
1732 | const char *name) |
1733 | { | |
efd2b924 | 1734 | struct format_field *field = perf_evsel__field(evsel, name); |
5555ded4 ACM |
1735 | int offset; |
1736 | ||
efd2b924 ACM |
1737 | if (!field) |
1738 | return NULL; | |
5555ded4 ACM |
1739 | |
1740 | offset = field->offset; | |
1741 | ||
1742 | if (field->flags & FIELD_IS_DYNAMIC) { | |
1743 | offset = *(int *)(sample->raw_data + field->offset); | |
1744 | offset &= 0xffff; | |
1745 | } | |
1746 | ||
1747 | return sample->raw_data + offset; | |
1748 | } | |
1749 | ||
1750 | u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, | |
1751 | const char *name) | |
1752 | { | |
efd2b924 | 1753 | struct format_field *field = perf_evsel__field(evsel, name); |
e6b6f679 ACM |
1754 | void *ptr; |
1755 | u64 value; | |
5555ded4 | 1756 | |
efd2b924 ACM |
1757 | if (!field) |
1758 | return 0; | |
5555ded4 | 1759 | |
e6b6f679 | 1760 | ptr = sample->raw_data + field->offset; |
5555ded4 | 1761 | |
e6b6f679 ACM |
1762 | switch (field->size) { |
1763 | case 1: | |
1764 | return *(u8 *)ptr; | |
1765 | case 2: | |
1766 | value = *(u16 *)ptr; | |
1767 | break; | |
1768 | case 4: | |
1769 | value = *(u32 *)ptr; | |
1770 | break; | |
1771 | case 8: | |
1772 | value = *(u64 *)ptr; | |
1773 | break; | |
1774 | default: | |
1775 | return 0; | |
1776 | } | |
1777 | ||
1778 | if (!evsel->needs_swap) | |
1779 | return value; | |
1780 | ||
1781 | switch (field->size) { | |
1782 | case 2: | |
1783 | return bswap_16(value); | |
1784 | case 4: | |
1785 | return bswap_32(value); | |
1786 | case 8: | |
1787 | return bswap_64(value); | |
1788 | default: | |
1789 | return 0; | |
1790 | } | |
1791 | ||
1792 | return 0; | |
5555ded4 | 1793 | } |
0698aedd ACM |
1794 | |
1795 | static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...) | |
1796 | { | |
1797 | va_list args; | |
1798 | int ret = 0; | |
1799 | ||
1800 | if (!*first) { | |
1801 | ret += fprintf(fp, ","); | |
1802 | } else { | |
1803 | ret += fprintf(fp, ":"); | |
1804 | *first = false; | |
1805 | } | |
1806 | ||
1807 | va_start(args, fmt); | |
1808 | ret += vfprintf(fp, fmt, args); | |
1809 | va_end(args); | |
1810 | return ret; | |
1811 | } | |
1812 | ||
1813 | static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value) | |
1814 | { | |
1815 | if (value == 0) | |
1816 | return 0; | |
1817 | ||
1818 | return comma_fprintf(fp, first, " %s: %" PRIu64, field, value); | |
1819 | } | |
1820 | ||
1821 | #define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field) | |
1822 | ||
c79a4393 ACM |
1823 | struct bit_names { |
1824 | int bit; | |
1825 | const char *name; | |
1826 | }; | |
1827 | ||
1828 | static int bits__fprintf(FILE *fp, const char *field, u64 value, | |
1829 | struct bit_names *bits, bool *first) | |
1830 | { | |
1831 | int i = 0, printed = comma_fprintf(fp, first, " %s: ", field); | |
1832 | bool first_bit = true; | |
1833 | ||
1834 | do { | |
1835 | if (value & bits[i].bit) { | |
1836 | printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name); | |
1837 | first_bit = false; | |
1838 | } | |
1839 | } while (bits[++i].name != NULL); | |
1840 | ||
1841 | return printed; | |
1842 | } | |
1843 | ||
1844 | static int sample_type__fprintf(FILE *fp, bool *first, u64 value) | |
1845 | { | |
1846 | #define bit_name(n) { PERF_SAMPLE_##n, #n } | |
1847 | struct bit_names bits[] = { | |
1848 | bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR), | |
1849 | bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), | |
1850 | bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), | |
1851 | bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), | |
75562573 | 1852 | bit_name(IDENTIFIER), |
c79a4393 ACM |
1853 | { .name = NULL, } |
1854 | }; | |
1855 | #undef bit_name | |
1856 | return bits__fprintf(fp, "sample_type", value, bits, first); | |
1857 | } | |
1858 | ||
1859 | static int read_format__fprintf(FILE *fp, bool *first, u64 value) | |
1860 | { | |
1861 | #define bit_name(n) { PERF_FORMAT_##n, #n } | |
1862 | struct bit_names bits[] = { | |
1863 | bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING), | |
1864 | bit_name(ID), bit_name(GROUP), | |
1865 | { .name = NULL, } | |
1866 | }; | |
1867 | #undef bit_name | |
1868 | return bits__fprintf(fp, "read_format", value, bits, first); | |
1869 | } | |
1870 | ||
0698aedd ACM |
1871 | int perf_evsel__fprintf(struct perf_evsel *evsel, |
1872 | struct perf_attr_details *details, FILE *fp) | |
1873 | { | |
1874 | bool first = true; | |
e6ab07d0 NK |
1875 | int printed = 0; |
1876 | ||
e35ef355 | 1877 | if (details->event_group) { |
e6ab07d0 NK |
1878 | struct perf_evsel *pos; |
1879 | ||
1880 | if (!perf_evsel__is_group_leader(evsel)) | |
1881 | return 0; | |
1882 | ||
1883 | if (evsel->nr_members > 1) | |
1884 | printed += fprintf(fp, "%s{", evsel->group_name ?: ""); | |
1885 | ||
1886 | printed += fprintf(fp, "%s", perf_evsel__name(evsel)); | |
1887 | for_each_group_member(pos, evsel) | |
1888 | printed += fprintf(fp, ",%s", perf_evsel__name(pos)); | |
1889 | ||
1890 | if (evsel->nr_members > 1) | |
1891 | printed += fprintf(fp, "}"); | |
1892 | goto out; | |
1893 | } | |
1894 | ||
1895 | printed += fprintf(fp, "%s", perf_evsel__name(evsel)); | |
0698aedd ACM |
1896 | |
1897 | if (details->verbose || details->freq) { | |
1898 | printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64, | |
1899 | (u64)evsel->attr.sample_freq); | |
1900 | } | |
1901 | ||
1902 | if (details->verbose) { | |
1903 | if_print(type); | |
1904 | if_print(config); | |
1905 | if_print(config1); | |
1906 | if_print(config2); | |
1907 | if_print(size); | |
c79a4393 ACM |
1908 | printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type); |
1909 | if (evsel->attr.read_format) | |
1910 | printed += read_format__fprintf(fp, &first, evsel->attr.read_format); | |
0698aedd ACM |
1911 | if_print(disabled); |
1912 | if_print(inherit); | |
1913 | if_print(pinned); | |
1914 | if_print(exclusive); | |
1915 | if_print(exclude_user); | |
1916 | if_print(exclude_kernel); | |
1917 | if_print(exclude_hv); | |
1918 | if_print(exclude_idle); | |
1919 | if_print(mmap); | |
5c5e854b | 1920 | if_print(mmap2); |
0698aedd ACM |
1921 | if_print(comm); |
1922 | if_print(freq); | |
1923 | if_print(inherit_stat); | |
1924 | if_print(enable_on_exec); | |
1925 | if_print(task); | |
1926 | if_print(watermark); | |
1927 | if_print(precise_ip); | |
1928 | if_print(mmap_data); | |
1929 | if_print(sample_id_all); | |
1930 | if_print(exclude_host); | |
1931 | if_print(exclude_guest); | |
1932 | if_print(__reserved_1); | |
1933 | if_print(wakeup_events); | |
1934 | if_print(bp_type); | |
1935 | if_print(branch_sample_type); | |
1936 | } | |
e6ab07d0 | 1937 | out: |
0698aedd ACM |
1938 | fputc('\n', fp); |
1939 | return ++printed; | |
1940 | } | |
c0a54341 ACM |
1941 | |
1942 | bool perf_evsel__fallback(struct perf_evsel *evsel, int err, | |
1943 | char *msg, size_t msgsize) | |
1944 | { | |
2b821cce | 1945 | if ((err == ENOENT || err == ENXIO || err == ENODEV) && |
c0a54341 ACM |
1946 | evsel->attr.type == PERF_TYPE_HARDWARE && |
1947 | evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) { | |
1948 | /* | |
1949 | * If it's cycles then fall back to hrtimer based | |
1950 | * cpu-clock-tick sw counter, which is always available even if | |
1951 | * no PMU support. | |
1952 | * | |
1953 | * PPC returns ENXIO until 2.6.37 (behavior changed with commit | |
1954 | * b0a873e). | |
1955 | */ | |
1956 | scnprintf(msg, msgsize, "%s", | |
1957 | "The cycles event is not supported, trying to fall back to cpu-clock-ticks"); | |
1958 | ||
1959 | evsel->attr.type = PERF_TYPE_SOFTWARE; | |
1960 | evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK; | |
1961 | ||
04662523 | 1962 | zfree(&evsel->name); |
c0a54341 ACM |
1963 | return true; |
1964 | } | |
1965 | ||
1966 | return false; | |
1967 | } | |
56e52e85 | 1968 | |
602ad878 | 1969 | int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, |
56e52e85 ACM |
1970 | int err, char *msg, size_t size) |
1971 | { | |
1972 | switch (err) { | |
1973 | case EPERM: | |
1974 | case EACCES: | |
b69e63a4 | 1975 | return scnprintf(msg, size, |
56e52e85 ACM |
1976 | "You may not have permission to collect %sstats.\n" |
1977 | "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" | |
1978 | " -1 - Not paranoid at all\n" | |
1979 | " 0 - Disallow raw tracepoint access for unpriv\n" | |
1980 | " 1 - Disallow cpu events for unpriv\n" | |
1981 | " 2 - Disallow kernel profiling for unpriv", | |
1982 | target->system_wide ? "system-wide " : ""); | |
1983 | case ENOENT: | |
1984 | return scnprintf(msg, size, "The %s event is not supported.", | |
1985 | perf_evsel__name(evsel)); | |
1986 | case EMFILE: | |
1987 | return scnprintf(msg, size, "%s", | |
1988 | "Too many events are opened.\n" | |
1989 | "Try again after reducing the number of events."); | |
1990 | case ENODEV: | |
1991 | if (target->cpu_list) | |
1992 | return scnprintf(msg, size, "%s", | |
1993 | "No such device - did you specify an out-of-range profile CPU?\n"); | |
1994 | break; | |
1995 | case EOPNOTSUPP: | |
1996 | if (evsel->attr.precise_ip) | |
1997 | return scnprintf(msg, size, "%s", | |
1998 | "\'precise\' request may not be supported. Try removing 'p' modifier."); | |
1999 | #if defined(__i386__) || defined(__x86_64__) | |
2000 | if (evsel->attr.type == PERF_TYPE_HARDWARE) | |
2001 | return scnprintf(msg, size, "%s", | |
2002 | "No hardware sampling interrupt available.\n" | |
2003 | "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); | |
2004 | #endif | |
2005 | break; | |
2006 | default: | |
2007 | break; | |
2008 | } | |
2009 | ||
2010 | return scnprintf(msg, size, | |
2011 | "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n" | |
2012 | "/bin/dmesg may provide additional information.\n" | |
2013 | "No CONFIG_PERF_EVENTS=y kernel support configured?\n", | |
2014 | err, strerror(err), perf_evsel__name(evsel)); | |
2015 | } |