]>
Commit | Line | Data |
---|---|---|
f8a95309 ACM |
1 | /* |
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]> | |
3 | * | |
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | |
5 | * copyright notes. | |
6 | * | |
7 | * Released under the GPL v2. (and only v2, not any later version) | |
8 | */ | |
a8c9ae18 ACM |
9 | #include "util.h" |
10 | #include "debugfs.h" | |
5c581041 | 11 | #include <poll.h> |
f8a95309 ACM |
12 | #include "cpumap.h" |
13 | #include "thread_map.h" | |
12864b31 | 14 | #include "target.h" |
361c99a6 ACM |
15 | #include "evlist.h" |
16 | #include "evsel.h" | |
35b9d88e | 17 | #include <unistd.h> |
361c99a6 | 18 | |
50d08e47 ACM |
19 | #include "parse-events.h" |
20 | ||
f8a95309 ACM |
21 | #include <sys/mman.h> |
22 | ||
70db7533 ACM |
23 | #include <linux/bitops.h> |
24 | #include <linux/hash.h> | |
25 | ||
f8a95309 | 26 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) |
a91e5431 | 27 | #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) |
f8a95309 | 28 | |
7e2ed097 ACM |
29 | void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, |
30 | struct thread_map *threads) | |
ef1d1af2 ACM |
31 | { |
32 | int i; | |
33 | ||
34 | for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) | |
35 | INIT_HLIST_HEAD(&evlist->heads[i]); | |
36 | INIT_LIST_HEAD(&evlist->entries); | |
7e2ed097 | 37 | perf_evlist__set_maps(evlist, cpus, threads); |
35b9d88e | 38 | evlist->workload.pid = -1; |
ef1d1af2 ACM |
39 | } |
40 | ||
7e2ed097 ACM |
41 | struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, |
42 | struct thread_map *threads) | |
361c99a6 ACM |
43 | { |
44 | struct perf_evlist *evlist = zalloc(sizeof(*evlist)); | |
45 | ||
ef1d1af2 | 46 | if (evlist != NULL) |
7e2ed097 | 47 | perf_evlist__init(evlist, cpus, threads); |
361c99a6 ACM |
48 | |
49 | return evlist; | |
50 | } | |
51 | ||
0f82ebc4 ACM |
52 | void perf_evlist__config_attrs(struct perf_evlist *evlist, |
53 | struct perf_record_opts *opts) | |
54 | { | |
5090c6ae | 55 | struct perf_evsel *evsel, *first; |
0f82ebc4 ACM |
56 | |
57 | if (evlist->cpus->map[0] < 0) | |
58 | opts->no_inherit = true; | |
59 | ||
0c21f736 | 60 | first = perf_evlist__first(evlist); |
5090c6ae | 61 | |
0f82ebc4 | 62 | list_for_each_entry(evsel, &evlist->entries, node) { |
5090c6ae | 63 | perf_evsel__config(evsel, opts, first); |
0f82ebc4 ACM |
64 | |
65 | if (evlist->nr_entries > 1) | |
66 | evsel->attr.sample_type |= PERF_SAMPLE_ID; | |
67 | } | |
68 | } | |
69 | ||
361c99a6 ACM |
70 | static void perf_evlist__purge(struct perf_evlist *evlist) |
71 | { | |
72 | struct perf_evsel *pos, *n; | |
73 | ||
74 | list_for_each_entry_safe(pos, n, &evlist->entries, node) { | |
75 | list_del_init(&pos->node); | |
76 | perf_evsel__delete(pos); | |
77 | } | |
78 | ||
79 | evlist->nr_entries = 0; | |
80 | } | |
81 | ||
ef1d1af2 | 82 | void perf_evlist__exit(struct perf_evlist *evlist) |
361c99a6 | 83 | { |
70db7533 | 84 | free(evlist->mmap); |
5c581041 | 85 | free(evlist->pollfd); |
ef1d1af2 ACM |
86 | evlist->mmap = NULL; |
87 | evlist->pollfd = NULL; | |
88 | } | |
89 | ||
90 | void perf_evlist__delete(struct perf_evlist *evlist) | |
91 | { | |
92 | perf_evlist__purge(evlist); | |
93 | perf_evlist__exit(evlist); | |
361c99a6 ACM |
94 | free(evlist); |
95 | } | |
96 | ||
97 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) | |
98 | { | |
99 | list_add_tail(&entry->node, &evlist->entries); | |
100 | ++evlist->nr_entries; | |
101 | } | |
102 | ||
0529bc1f JO |
103 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, |
104 | struct list_head *list, | |
105 | int nr_entries) | |
50d08e47 ACM |
106 | { |
107 | list_splice_tail(list, &evlist->entries); | |
108 | evlist->nr_entries += nr_entries; | |
109 | } | |
110 | ||
63dab225 ACM |
111 | void __perf_evlist__set_leader(struct list_head *list) |
112 | { | |
113 | struct perf_evsel *evsel, *leader; | |
114 | ||
115 | leader = list_entry(list->next, struct perf_evsel, node); | |
116 | leader->leader = NULL; | |
117 | ||
118 | list_for_each_entry(evsel, list, node) { | |
119 | if (evsel != leader) | |
120 | evsel->leader = leader; | |
121 | } | |
122 | } | |
123 | ||
124 | void perf_evlist__set_leader(struct perf_evlist *evlist) | |
6a4bb04c JO |
125 | { |
126 | if (evlist->nr_entries) | |
63dab225 | 127 | __perf_evlist__set_leader(&evlist->entries); |
6a4bb04c JO |
128 | } |
129 | ||
361c99a6 ACM |
130 | int perf_evlist__add_default(struct perf_evlist *evlist) |
131 | { | |
132 | struct perf_event_attr attr = { | |
133 | .type = PERF_TYPE_HARDWARE, | |
134 | .config = PERF_COUNT_HW_CPU_CYCLES, | |
135 | }; | |
1aed2671 JR |
136 | struct perf_evsel *evsel; |
137 | ||
138 | event_attr_init(&attr); | |
361c99a6 | 139 | |
1aed2671 | 140 | evsel = perf_evsel__new(&attr, 0); |
361c99a6 | 141 | if (evsel == NULL) |
cc2d86b0 SE |
142 | goto error; |
143 | ||
144 | /* use strdup() because free(evsel) assumes name is allocated */ | |
145 | evsel->name = strdup("cycles"); | |
146 | if (!evsel->name) | |
147 | goto error_free; | |
361c99a6 ACM |
148 | |
149 | perf_evlist__add(evlist, evsel); | |
150 | return 0; | |
cc2d86b0 SE |
151 | error_free: |
152 | perf_evsel__delete(evsel); | |
153 | error: | |
154 | return -ENOMEM; | |
361c99a6 | 155 | } |
5c581041 | 156 | |
50d08e47 ACM |
157 | int perf_evlist__add_attrs(struct perf_evlist *evlist, |
158 | struct perf_event_attr *attrs, size_t nr_attrs) | |
159 | { | |
160 | struct perf_evsel *evsel, *n; | |
161 | LIST_HEAD(head); | |
162 | size_t i; | |
163 | ||
164 | for (i = 0; i < nr_attrs; i++) { | |
165 | evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i); | |
166 | if (evsel == NULL) | |
167 | goto out_delete_partial_list; | |
168 | list_add_tail(&evsel->node, &head); | |
169 | } | |
170 | ||
171 | perf_evlist__splice_list_tail(evlist, &head, nr_attrs); | |
172 | ||
173 | return 0; | |
174 | ||
175 | out_delete_partial_list: | |
176 | list_for_each_entry_safe(evsel, n, &head, node) | |
177 | perf_evsel__delete(evsel); | |
178 | return -1; | |
179 | } | |
180 | ||
79695e1b ACM |
181 | int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, |
182 | struct perf_event_attr *attrs, size_t nr_attrs) | |
183 | { | |
184 | size_t i; | |
185 | ||
186 | for (i = 0; i < nr_attrs; i++) | |
187 | event_attr_init(attrs + i); | |
188 | ||
189 | return perf_evlist__add_attrs(evlist, attrs, nr_attrs); | |
190 | } | |
191 | ||
a8c9ae18 ACM |
192 | static int trace_event__id(const char *evname) |
193 | { | |
194 | char *filename, *colon; | |
195 | int err = -1, fd; | |
196 | ||
197 | if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0) | |
198 | return -1; | |
199 | ||
200 | colon = strrchr(filename, ':'); | |
201 | if (colon != NULL) | |
202 | *colon = '/'; | |
203 | ||
204 | fd = open(filename, O_RDONLY); | |
205 | if (fd >= 0) { | |
206 | char id[16]; | |
207 | if (read(fd, id, sizeof(id)) > 0) | |
208 | err = atoi(id); | |
209 | close(fd); | |
210 | } | |
211 | ||
212 | free(filename); | |
213 | return err; | |
214 | } | |
215 | ||
216 | int perf_evlist__add_tracepoints(struct perf_evlist *evlist, | |
217 | const char *tracepoints[], | |
218 | size_t nr_tracepoints) | |
219 | { | |
220 | int err; | |
221 | size_t i; | |
222 | struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs)); | |
223 | ||
224 | if (attrs == NULL) | |
225 | return -1; | |
226 | ||
227 | for (i = 0; i < nr_tracepoints; i++) { | |
228 | err = trace_event__id(tracepoints[i]); | |
229 | ||
230 | if (err < 0) | |
231 | goto out_free_attrs; | |
232 | ||
233 | attrs[i].type = PERF_TYPE_TRACEPOINT; | |
234 | attrs[i].config = err; | |
235 | attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | | |
0983cc0d | 236 | PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD); |
a8c9ae18 ACM |
237 | attrs[i].sample_period = 1; |
238 | } | |
239 | ||
240 | err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints); | |
241 | out_free_attrs: | |
242 | free(attrs); | |
243 | return err; | |
244 | } | |
245 | ||
da378962 ACM |
246 | struct perf_evsel * |
247 | perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) | |
ee29be62 ACM |
248 | { |
249 | struct perf_evsel *evsel; | |
250 | ||
251 | list_for_each_entry(evsel, &evlist->entries, node) { | |
252 | if (evsel->attr.type == PERF_TYPE_TRACEPOINT && | |
253 | (int)evsel->attr.config == id) | |
254 | return evsel; | |
255 | } | |
256 | ||
257 | return NULL; | |
258 | } | |
259 | ||
260 | int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, | |
261 | const struct perf_evsel_str_handler *assocs, | |
262 | size_t nr_assocs) | |
263 | { | |
264 | struct perf_evsel *evsel; | |
265 | int err; | |
266 | size_t i; | |
267 | ||
268 | for (i = 0; i < nr_assocs; i++) { | |
269 | err = trace_event__id(assocs[i].name); | |
270 | if (err < 0) | |
271 | goto out; | |
272 | ||
273 | evsel = perf_evlist__find_tracepoint_by_id(evlist, err); | |
274 | if (evsel == NULL) | |
275 | continue; | |
276 | ||
277 | err = -EEXIST; | |
278 | if (evsel->handler.func != NULL) | |
279 | goto out; | |
280 | evsel->handler.func = assocs[i].handler; | |
281 | } | |
282 | ||
283 | err = 0; | |
284 | out: | |
285 | return err; | |
286 | } | |
287 | ||
4152ab37 ACM |
288 | void perf_evlist__disable(struct perf_evlist *evlist) |
289 | { | |
290 | int cpu, thread; | |
291 | struct perf_evsel *pos; | |
292 | ||
293 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | |
294 | list_for_each_entry(pos, &evlist->entries, node) { | |
295 | for (thread = 0; thread < evlist->threads->nr; thread++) | |
55da8005 NK |
296 | ioctl(FD(pos, cpu, thread), |
297 | PERF_EVENT_IOC_DISABLE, 0); | |
4152ab37 ACM |
298 | } |
299 | } | |
300 | } | |
301 | ||
764e16a3 DA |
302 | void perf_evlist__enable(struct perf_evlist *evlist) |
303 | { | |
304 | int cpu, thread; | |
305 | struct perf_evsel *pos; | |
306 | ||
a14bb7a6 | 307 | for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { |
764e16a3 DA |
308 | list_for_each_entry(pos, &evlist->entries, node) { |
309 | for (thread = 0; thread < evlist->threads->nr; thread++) | |
55da8005 NK |
310 | ioctl(FD(pos, cpu, thread), |
311 | PERF_EVENT_IOC_ENABLE, 0); | |
764e16a3 DA |
312 | } |
313 | } | |
314 | } | |
315 | ||
806fb630 | 316 | static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) |
5c581041 | 317 | { |
a14bb7a6 | 318 | int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries; |
5c581041 ACM |
319 | evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); |
320 | return evlist->pollfd != NULL ? 0 : -ENOMEM; | |
321 | } | |
70082dd9 ACM |
322 | |
323 | void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) | |
324 | { | |
325 | fcntl(fd, F_SETFL, O_NONBLOCK); | |
326 | evlist->pollfd[evlist->nr_fds].fd = fd; | |
327 | evlist->pollfd[evlist->nr_fds].events = POLLIN; | |
328 | evlist->nr_fds++; | |
329 | } | |
70db7533 | 330 | |
a91e5431 ACM |
331 | static void perf_evlist__id_hash(struct perf_evlist *evlist, |
332 | struct perf_evsel *evsel, | |
333 | int cpu, int thread, u64 id) | |
3d3b5e95 ACM |
334 | { |
335 | int hash; | |
336 | struct perf_sample_id *sid = SID(evsel, cpu, thread); | |
337 | ||
338 | sid->id = id; | |
339 | sid->evsel = evsel; | |
340 | hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); | |
341 | hlist_add_head(&sid->node, &evlist->heads[hash]); | |
342 | } | |
343 | ||
a91e5431 ACM |
344 | void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, |
345 | int cpu, int thread, u64 id) | |
346 | { | |
347 | perf_evlist__id_hash(evlist, evsel, cpu, thread, id); | |
348 | evsel->id[evsel->ids++] = id; | |
349 | } | |
350 | ||
351 | static int perf_evlist__id_add_fd(struct perf_evlist *evlist, | |
352 | struct perf_evsel *evsel, | |
353 | int cpu, int thread, int fd) | |
f8a95309 | 354 | { |
f8a95309 | 355 | u64 read_data[4] = { 0, }; |
3d3b5e95 | 356 | int id_idx = 1; /* The first entry is the counter value */ |
f8a95309 ACM |
357 | |
358 | if (!(evsel->attr.read_format & PERF_FORMAT_ID) || | |
359 | read(fd, &read_data, sizeof(read_data)) == -1) | |
360 | return -1; | |
361 | ||
362 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | |
363 | ++id_idx; | |
364 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | |
365 | ++id_idx; | |
366 | ||
a91e5431 | 367 | perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]); |
f8a95309 ACM |
368 | return 0; |
369 | } | |
370 | ||
70db7533 ACM |
371 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) |
372 | { | |
373 | struct hlist_head *head; | |
374 | struct hlist_node *pos; | |
375 | struct perf_sample_id *sid; | |
376 | int hash; | |
377 | ||
378 | if (evlist->nr_entries == 1) | |
0c21f736 | 379 | return perf_evlist__first(evlist); |
70db7533 ACM |
380 | |
381 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); | |
382 | head = &evlist->heads[hash]; | |
383 | ||
384 | hlist_for_each_entry(sid, pos, head, node) | |
385 | if (sid->id == id) | |
386 | return sid->evsel; | |
30e68bcc NK |
387 | |
388 | if (!perf_evlist__sample_id_all(evlist)) | |
0c21f736 | 389 | return perf_evlist__first(evlist); |
30e68bcc | 390 | |
70db7533 ACM |
391 | return NULL; |
392 | } | |
04391deb | 393 | |
aece948f | 394 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) |
04391deb ACM |
395 | { |
396 | /* XXX Move this to perf.c, making it generally available */ | |
397 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | |
aece948f | 398 | struct perf_mmap *md = &evlist->mmap[idx]; |
04391deb ACM |
399 | unsigned int head = perf_mmap__read_head(md); |
400 | unsigned int old = md->prev; | |
401 | unsigned char *data = md->base + page_size; | |
8115d60c | 402 | union perf_event *event = NULL; |
04391deb | 403 | |
7bb41152 | 404 | if (evlist->overwrite) { |
04391deb | 405 | /* |
7bb41152 ACM |
406 | * If we're further behind than half the buffer, there's a chance |
407 | * the writer will bite our tail and mess up the samples under us. | |
408 | * | |
409 | * If we somehow ended up ahead of the head, we got messed up. | |
410 | * | |
411 | * In either case, truncate and restart at head. | |
04391deb | 412 | */ |
7bb41152 ACM |
413 | int diff = head - old; |
414 | if (diff > md->mask / 2 || diff < 0) { | |
415 | fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); | |
416 | ||
417 | /* | |
418 | * head points to a known good entry, start there. | |
419 | */ | |
420 | old = head; | |
421 | } | |
04391deb ACM |
422 | } |
423 | ||
424 | if (old != head) { | |
425 | size_t size; | |
426 | ||
8115d60c | 427 | event = (union perf_event *)&data[old & md->mask]; |
04391deb ACM |
428 | size = event->header.size; |
429 | ||
430 | /* | |
431 | * Event straddles the mmap boundary -- header should always | |
432 | * be inside due to u64 alignment of output. | |
433 | */ | |
434 | if ((old & md->mask) + size != ((old + size) & md->mask)) { | |
435 | unsigned int offset = old; | |
436 | unsigned int len = min(sizeof(*event), size), cpy; | |
437 | void *dst = &evlist->event_copy; | |
438 | ||
439 | do { | |
440 | cpy = min(md->mask + 1 - (offset & md->mask), len); | |
441 | memcpy(dst, &data[offset & md->mask], cpy); | |
442 | offset += cpy; | |
443 | dst += cpy; | |
444 | len -= cpy; | |
445 | } while (len); | |
446 | ||
447 | event = &evlist->event_copy; | |
448 | } | |
449 | ||
450 | old += size; | |
451 | } | |
452 | ||
453 | md->prev = old; | |
7bb41152 ACM |
454 | |
455 | if (!evlist->overwrite) | |
456 | perf_mmap__write_tail(md, old); | |
457 | ||
04391deb ACM |
458 | return event; |
459 | } | |
f8a95309 | 460 | |
7e2ed097 | 461 | void perf_evlist__munmap(struct perf_evlist *evlist) |
f8a95309 | 462 | { |
aece948f | 463 | int i; |
f8a95309 | 464 | |
aece948f ACM |
465 | for (i = 0; i < evlist->nr_mmaps; i++) { |
466 | if (evlist->mmap[i].base != NULL) { | |
467 | munmap(evlist->mmap[i].base, evlist->mmap_len); | |
468 | evlist->mmap[i].base = NULL; | |
f8a95309 ACM |
469 | } |
470 | } | |
aece948f ACM |
471 | |
472 | free(evlist->mmap); | |
473 | evlist->mmap = NULL; | |
f8a95309 ACM |
474 | } |
475 | ||
806fb630 | 476 | static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) |
f8a95309 | 477 | { |
a14bb7a6 ACM |
478 | evlist->nr_mmaps = cpu_map__nr(evlist->cpus); |
479 | if (cpu_map__all(evlist->cpus)) | |
aece948f ACM |
480 | evlist->nr_mmaps = evlist->threads->nr; |
481 | evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); | |
f8a95309 ACM |
482 | return evlist->mmap != NULL ? 0 : -ENOMEM; |
483 | } | |
484 | ||
bccdaba0 | 485 | static int __perf_evlist__mmap(struct perf_evlist *evlist, |
aece948f | 486 | int idx, int prot, int mask, int fd) |
f8a95309 | 487 | { |
aece948f ACM |
488 | evlist->mmap[idx].prev = 0; |
489 | evlist->mmap[idx].mask = mask; | |
490 | evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, | |
f8a95309 | 491 | MAP_SHARED, fd, 0); |
301b195d NE |
492 | if (evlist->mmap[idx].base == MAP_FAILED) { |
493 | evlist->mmap[idx].base = NULL; | |
f8a95309 | 494 | return -1; |
301b195d | 495 | } |
f8a95309 ACM |
496 | |
497 | perf_evlist__add_pollfd(evlist, fd); | |
498 | return 0; | |
499 | } | |
500 | ||
aece948f ACM |
501 | static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) |
502 | { | |
503 | struct perf_evsel *evsel; | |
504 | int cpu, thread; | |
505 | ||
506 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | |
507 | int output = -1; | |
508 | ||
509 | for (thread = 0; thread < evlist->threads->nr; thread++) { | |
510 | list_for_each_entry(evsel, &evlist->entries, node) { | |
511 | int fd = FD(evsel, cpu, thread); | |
512 | ||
513 | if (output == -1) { | |
514 | output = fd; | |
bccdaba0 | 515 | if (__perf_evlist__mmap(evlist, cpu, |
aece948f ACM |
516 | prot, mask, output) < 0) |
517 | goto out_unmap; | |
518 | } else { | |
519 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) | |
520 | goto out_unmap; | |
521 | } | |
522 | ||
523 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | |
524 | perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) | |
525 | goto out_unmap; | |
526 | } | |
527 | } | |
528 | } | |
529 | ||
530 | return 0; | |
531 | ||
532 | out_unmap: | |
533 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | |
534 | if (evlist->mmap[cpu].base != NULL) { | |
535 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | |
536 | evlist->mmap[cpu].base = NULL; | |
537 | } | |
538 | } | |
539 | return -1; | |
540 | } | |
541 | ||
542 | static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) | |
543 | { | |
544 | struct perf_evsel *evsel; | |
545 | int thread; | |
546 | ||
547 | for (thread = 0; thread < evlist->threads->nr; thread++) { | |
548 | int output = -1; | |
549 | ||
550 | list_for_each_entry(evsel, &evlist->entries, node) { | |
551 | int fd = FD(evsel, 0, thread); | |
552 | ||
553 | if (output == -1) { | |
554 | output = fd; | |
bccdaba0 | 555 | if (__perf_evlist__mmap(evlist, thread, |
aece948f ACM |
556 | prot, mask, output) < 0) |
557 | goto out_unmap; | |
558 | } else { | |
559 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) | |
560 | goto out_unmap; | |
561 | } | |
562 | ||
563 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | |
564 | perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) | |
565 | goto out_unmap; | |
566 | } | |
567 | } | |
568 | ||
569 | return 0; | |
570 | ||
571 | out_unmap: | |
572 | for (thread = 0; thread < evlist->threads->nr; thread++) { | |
573 | if (evlist->mmap[thread].base != NULL) { | |
574 | munmap(evlist->mmap[thread].base, evlist->mmap_len); | |
575 | evlist->mmap[thread].base = NULL; | |
576 | } | |
577 | } | |
578 | return -1; | |
579 | } | |
580 | ||
f8a95309 ACM |
581 | /** perf_evlist__mmap - Create per cpu maps to receive events |
582 | * | |
583 | * @evlist - list of events | |
f8a95309 ACM |
584 | * @pages - map length in pages |
585 | * @overwrite - overwrite older events? | |
586 | * | |
587 | * If overwrite is false the user needs to signal event consuption using: | |
588 | * | |
589 | * struct perf_mmap *m = &evlist->mmap[cpu]; | |
590 | * unsigned int head = perf_mmap__read_head(m); | |
591 | * | |
592 | * perf_mmap__write_tail(m, head) | |
7e2ed097 ACM |
593 | * |
594 | * Using perf_evlist__read_on_cpu does this automatically. | |
f8a95309 | 595 | */ |
50a682ce ACM |
596 | int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, |
597 | bool overwrite) | |
f8a95309 ACM |
598 | { |
599 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | |
aece948f | 600 | struct perf_evsel *evsel; |
7e2ed097 ACM |
601 | const struct cpu_map *cpus = evlist->cpus; |
602 | const struct thread_map *threads = evlist->threads; | |
50a682ce ACM |
603 | int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask; |
604 | ||
605 | /* 512 kiB: default amount of unprivileged mlocked memory */ | |
606 | if (pages == UINT_MAX) | |
607 | pages = (512 * 1024) / page_size; | |
41d0d933 NE |
608 | else if (!is_power_of_2(pages)) |
609 | return -EINVAL; | |
50a682ce ACM |
610 | |
611 | mask = pages * page_size - 1; | |
f8a95309 | 612 | |
7e2ed097 | 613 | if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) |
f8a95309 ACM |
614 | return -ENOMEM; |
615 | ||
7e2ed097 | 616 | if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) |
f8a95309 ACM |
617 | return -ENOMEM; |
618 | ||
619 | evlist->overwrite = overwrite; | |
620 | evlist->mmap_len = (pages + 1) * page_size; | |
f8a95309 ACM |
621 | |
622 | list_for_each_entry(evsel, &evlist->entries, node) { | |
623 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | |
a91e5431 | 624 | evsel->sample_id == NULL && |
a14bb7a6 | 625 | perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) |
f8a95309 | 626 | return -ENOMEM; |
f8a95309 ACM |
627 | } |
628 | ||
a14bb7a6 | 629 | if (cpu_map__all(cpus)) |
aece948f | 630 | return perf_evlist__mmap_per_thread(evlist, prot, mask); |
f8a95309 | 631 | |
aece948f | 632 | return perf_evlist__mmap_per_cpu(evlist, prot, mask); |
f8a95309 | 633 | } |
7e2ed097 | 634 | |
b809ac10 NK |
635 | int perf_evlist__create_maps(struct perf_evlist *evlist, |
636 | struct perf_target *target) | |
7e2ed097 | 637 | { |
b809ac10 NK |
638 | evlist->threads = thread_map__new_str(target->pid, target->tid, |
639 | target->uid); | |
7e2ed097 ACM |
640 | |
641 | if (evlist->threads == NULL) | |
642 | return -1; | |
643 | ||
879d77d0 | 644 | if (perf_target__has_task(target)) |
d67356e7 | 645 | evlist->cpus = cpu_map__dummy_new(); |
d1cb9fce NK |
646 | else if (!perf_target__has_cpu(target) && !target->uses_mmap) |
647 | evlist->cpus = cpu_map__dummy_new(); | |
879d77d0 NK |
648 | else |
649 | evlist->cpus = cpu_map__new(target->cpu_list); | |
7e2ed097 ACM |
650 | |
651 | if (evlist->cpus == NULL) | |
652 | goto out_delete_threads; | |
653 | ||
654 | return 0; | |
655 | ||
656 | out_delete_threads: | |
657 | thread_map__delete(evlist->threads); | |
658 | return -1; | |
659 | } | |
660 | ||
661 | void perf_evlist__delete_maps(struct perf_evlist *evlist) | |
662 | { | |
663 | cpu_map__delete(evlist->cpus); | |
664 | thread_map__delete(evlist->threads); | |
665 | evlist->cpus = NULL; | |
666 | evlist->threads = NULL; | |
667 | } | |
0a102479 | 668 | |
1491a632 | 669 | int perf_evlist__apply_filters(struct perf_evlist *evlist) |
0a102479 | 670 | { |
0a102479 | 671 | struct perf_evsel *evsel; |
745cefc5 ACM |
672 | int err = 0; |
673 | const int ncpus = cpu_map__nr(evlist->cpus), | |
674 | nthreads = evlist->threads->nr; | |
0a102479 FW |
675 | |
676 | list_for_each_entry(evsel, &evlist->entries, node) { | |
745cefc5 | 677 | if (evsel->filter == NULL) |
0a102479 | 678 | continue; |
745cefc5 ACM |
679 | |
680 | err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter); | |
681 | if (err) | |
682 | break; | |
0a102479 FW |
683 | } |
684 | ||
745cefc5 ACM |
685 | return err; |
686 | } | |
687 | ||
688 | int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) | |
689 | { | |
690 | struct perf_evsel *evsel; | |
691 | int err = 0; | |
692 | const int ncpus = cpu_map__nr(evlist->cpus), | |
693 | nthreads = evlist->threads->nr; | |
694 | ||
695 | list_for_each_entry(evsel, &evlist->entries, node) { | |
696 | err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); | |
697 | if (err) | |
698 | break; | |
699 | } | |
700 | ||
701 | return err; | |
0a102479 | 702 | } |
74429964 | 703 | |
0c21f736 | 704 | bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) |
74429964 | 705 | { |
0c21f736 | 706 | struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; |
c2a70653 ACM |
707 | |
708 | list_for_each_entry_continue(pos, &evlist->entries, node) { | |
709 | if (first->attr.sample_type != pos->attr.sample_type) | |
710 | return false; | |
74429964 FW |
711 | } |
712 | ||
c2a70653 | 713 | return true; |
74429964 FW |
714 | } |
715 | ||
0c21f736 | 716 | u64 perf_evlist__sample_type(struct perf_evlist *evlist) |
c2a70653 | 717 | { |
0c21f736 | 718 | struct perf_evsel *first = perf_evlist__first(evlist); |
c2a70653 ACM |
719 | return first->attr.sample_type; |
720 | } | |
721 | ||
0c21f736 | 722 | u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) |
81e36bff | 723 | { |
0c21f736 | 724 | struct perf_evsel *first = perf_evlist__first(evlist); |
81e36bff ACM |
725 | struct perf_sample *data; |
726 | u64 sample_type; | |
727 | u16 size = 0; | |
728 | ||
81e36bff ACM |
729 | if (!first->attr.sample_id_all) |
730 | goto out; | |
731 | ||
732 | sample_type = first->attr.sample_type; | |
733 | ||
734 | if (sample_type & PERF_SAMPLE_TID) | |
735 | size += sizeof(data->tid) * 2; | |
736 | ||
737 | if (sample_type & PERF_SAMPLE_TIME) | |
738 | size += sizeof(data->time); | |
739 | ||
740 | if (sample_type & PERF_SAMPLE_ID) | |
741 | size += sizeof(data->id); | |
742 | ||
743 | if (sample_type & PERF_SAMPLE_STREAM_ID) | |
744 | size += sizeof(data->stream_id); | |
745 | ||
746 | if (sample_type & PERF_SAMPLE_CPU) | |
747 | size += sizeof(data->cpu) * 2; | |
748 | out: | |
749 | return size; | |
750 | } | |
751 | ||
0c21f736 | 752 | bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) |
74429964 | 753 | { |
0c21f736 | 754 | struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; |
c2a70653 ACM |
755 | |
756 | list_for_each_entry_continue(pos, &evlist->entries, node) { | |
757 | if (first->attr.sample_id_all != pos->attr.sample_id_all) | |
758 | return false; | |
74429964 FW |
759 | } |
760 | ||
c2a70653 ACM |
761 | return true; |
762 | } | |
763 | ||
0c21f736 | 764 | bool perf_evlist__sample_id_all(struct perf_evlist *evlist) |
c2a70653 | 765 | { |
0c21f736 | 766 | struct perf_evsel *first = perf_evlist__first(evlist); |
c2a70653 | 767 | return first->attr.sample_id_all; |
74429964 | 768 | } |
81cce8de ACM |
769 | |
770 | void perf_evlist__set_selected(struct perf_evlist *evlist, | |
771 | struct perf_evsel *evsel) | |
772 | { | |
773 | evlist->selected = evsel; | |
774 | } | |
727ab04e | 775 | |
6a4bb04c | 776 | int perf_evlist__open(struct perf_evlist *evlist) |
727ab04e | 777 | { |
6a4bb04c | 778 | struct perf_evsel *evsel; |
727ab04e ACM |
779 | int err, ncpus, nthreads; |
780 | ||
727ab04e | 781 | list_for_each_entry(evsel, &evlist->entries, node) { |
6a4bb04c | 782 | err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); |
727ab04e ACM |
783 | if (err < 0) |
784 | goto out_err; | |
785 | } | |
786 | ||
787 | return 0; | |
788 | out_err: | |
789 | ncpus = evlist->cpus ? evlist->cpus->nr : 1; | |
790 | nthreads = evlist->threads ? evlist->threads->nr : 1; | |
791 | ||
792 | list_for_each_entry_reverse(evsel, &evlist->entries, node) | |
793 | perf_evsel__close(evsel, ncpus, nthreads); | |
794 | ||
41c21a68 | 795 | errno = -err; |
727ab04e ACM |
796 | return err; |
797 | } | |
35b9d88e ACM |
798 | |
799 | int perf_evlist__prepare_workload(struct perf_evlist *evlist, | |
800 | struct perf_record_opts *opts, | |
801 | const char *argv[]) | |
802 | { | |
803 | int child_ready_pipe[2], go_pipe[2]; | |
804 | char bf; | |
805 | ||
806 | if (pipe(child_ready_pipe) < 0) { | |
807 | perror("failed to create 'ready' pipe"); | |
808 | return -1; | |
809 | } | |
810 | ||
811 | if (pipe(go_pipe) < 0) { | |
812 | perror("failed to create 'go' pipe"); | |
813 | goto out_close_ready_pipe; | |
814 | } | |
815 | ||
816 | evlist->workload.pid = fork(); | |
817 | if (evlist->workload.pid < 0) { | |
818 | perror("failed to fork"); | |
819 | goto out_close_pipes; | |
820 | } | |
821 | ||
822 | if (!evlist->workload.pid) { | |
823 | if (opts->pipe_output) | |
824 | dup2(2, 1); | |
825 | ||
826 | close(child_ready_pipe[0]); | |
827 | close(go_pipe[1]); | |
828 | fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); | |
829 | ||
830 | /* | |
831 | * Do a dummy execvp to get the PLT entry resolved, | |
832 | * so we avoid the resolver overhead on the real | |
833 | * execvp call. | |
834 | */ | |
835 | execvp("", (char **)argv); | |
836 | ||
837 | /* | |
838 | * Tell the parent we're ready to go | |
839 | */ | |
840 | close(child_ready_pipe[1]); | |
841 | ||
842 | /* | |
843 | * Wait until the parent tells us to go. | |
844 | */ | |
845 | if (read(go_pipe[0], &bf, 1) == -1) | |
846 | perror("unable to read pipe"); | |
847 | ||
848 | execvp(argv[0], (char **)argv); | |
849 | ||
850 | perror(argv[0]); | |
851 | kill(getppid(), SIGUSR1); | |
852 | exit(-1); | |
853 | } | |
854 | ||
d67356e7 | 855 | if (perf_target__none(&opts->target)) |
35b9d88e ACM |
856 | evlist->threads->map[0] = evlist->workload.pid; |
857 | ||
858 | close(child_ready_pipe[1]); | |
859 | close(go_pipe[0]); | |
860 | /* | |
861 | * wait for child to settle | |
862 | */ | |
863 | if (read(child_ready_pipe[0], &bf, 1) == -1) { | |
864 | perror("unable to read pipe"); | |
865 | goto out_close_pipes; | |
866 | } | |
867 | ||
868 | evlist->workload.cork_fd = go_pipe[1]; | |
869 | close(child_ready_pipe[0]); | |
870 | return 0; | |
871 | ||
872 | out_close_pipes: | |
873 | close(go_pipe[0]); | |
874 | close(go_pipe[1]); | |
875 | out_close_ready_pipe: | |
876 | close(child_ready_pipe[0]); | |
877 | close(child_ready_pipe[1]); | |
878 | return -1; | |
879 | } | |
880 | ||
881 | int perf_evlist__start_workload(struct perf_evlist *evlist) | |
882 | { | |
883 | if (evlist->workload.cork_fd > 0) { | |
884 | /* | |
885 | * Remove the cork, let it rip! | |
886 | */ | |
887 | return close(evlist->workload.cork_fd); | |
888 | } | |
889 | ||
890 | return 0; | |
891 | } | |
cb0b29e0 | 892 | |
a3f698fe | 893 | int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, |
0807d2d8 | 894 | struct perf_sample *sample) |
cb0b29e0 | 895 | { |
0c21f736 | 896 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
0807d2d8 | 897 | return perf_evsel__parse_sample(evsel, event, sample); |
cb0b29e0 | 898 | } |
78f067b3 ACM |
899 | |
900 | size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) | |
901 | { | |
902 | struct perf_evsel *evsel; | |
903 | size_t printed = 0; | |
904 | ||
905 | list_for_each_entry(evsel, &evlist->entries, node) { | |
906 | printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", | |
907 | perf_evsel__name(evsel)); | |
908 | } | |
909 | ||
910 | return printed + fprintf(fp, "\n");; | |
911 | } |