]>
Commit | Line | Data |
---|---|---|
b8f46c5a XG |
1 | #define _FILE_OFFSET_BITS 64 |
2 | ||
94c744b6 ACM |
3 | #include <linux/kernel.h> |
4 | ||
ba21594c | 5 | #include <byteswap.h> |
94c744b6 ACM |
6 | #include <unistd.h> |
7 | #include <sys/types.h> | |
a41794cd | 8 | #include <sys/mman.h> |
94c744b6 ACM |
9 | |
10 | #include "session.h" | |
a328626b | 11 | #include "sort.h" |
94c744b6 ACM |
12 | #include "util.h" |
13 | ||
14 | static int perf_session__open(struct perf_session *self, bool force) | |
15 | { | |
16 | struct stat input_stat; | |
17 | ||
8dc58101 TZ |
18 | if (!strcmp(self->filename, "-")) { |
19 | self->fd_pipe = true; | |
20 | self->fd = STDIN_FILENO; | |
21 | ||
22 | if (perf_header__read(self, self->fd) < 0) | |
23 | pr_err("incompatible file format"); | |
24 | ||
25 | return 0; | |
26 | } | |
27 | ||
f887f301 | 28 | self->fd = open(self->filename, O_RDONLY); |
94c744b6 | 29 | if (self->fd < 0) { |
0f2c3de2 AI |
30 | int err = errno; |
31 | ||
32 | pr_err("failed to open %s: %s", self->filename, strerror(err)); | |
33 | if (err == ENOENT && !strcmp(self->filename, "perf.data")) | |
94c744b6 ACM |
34 | pr_err(" (try 'perf record' first)"); |
35 | pr_err("\n"); | |
36 | return -errno; | |
37 | } | |
38 | ||
39 | if (fstat(self->fd, &input_stat) < 0) | |
40 | goto out_close; | |
41 | ||
42 | if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { | |
43 | pr_err("file %s not owned by current user or root\n", | |
44 | self->filename); | |
45 | goto out_close; | |
46 | } | |
47 | ||
48 | if (!input_stat.st_size) { | |
49 | pr_info("zero-sized file (%s), nothing to do!\n", | |
50 | self->filename); | |
51 | goto out_close; | |
52 | } | |
53 | ||
8dc58101 | 54 | if (perf_header__read(self, self->fd) < 0) { |
94c744b6 ACM |
55 | pr_err("incompatible file format"); |
56 | goto out_close; | |
57 | } | |
58 | ||
59 | self->size = input_stat.st_size; | |
60 | return 0; | |
61 | ||
62 | out_close: | |
63 | close(self->fd); | |
64 | self->fd = -1; | |
65 | return -1; | |
66 | } | |
67 | ||
9c90a61c | 68 | static void perf_session__id_header_size(struct perf_session *session) |
8dc58101 | 69 | { |
9c90a61c ACM |
70 | struct sample_data *data; |
71 | u64 sample_type = session->sample_type; | |
72 | u16 size = 0; | |
73 | ||
74 | if (!session->sample_id_all) | |
75 | goto out; | |
76 | ||
77 | if (sample_type & PERF_SAMPLE_TID) | |
78 | size += sizeof(data->tid) * 2; | |
79 | ||
80 | if (sample_type & PERF_SAMPLE_TIME) | |
81 | size += sizeof(data->time); | |
82 | ||
83 | if (sample_type & PERF_SAMPLE_ID) | |
84 | size += sizeof(data->id); | |
85 | ||
86 | if (sample_type & PERF_SAMPLE_STREAM_ID) | |
87 | size += sizeof(data->stream_id); | |
88 | ||
89 | if (sample_type & PERF_SAMPLE_CPU) | |
90 | size += sizeof(data->cpu) * 2; | |
91 | out: | |
92 | session->id_hdr_size = size; | |
93 | } | |
94 | ||
95 | void perf_session__set_sample_id_all(struct perf_session *session, bool value) | |
96 | { | |
97 | session->sample_id_all = value; | |
98 | perf_session__id_header_size(session); | |
8dc58101 TZ |
99 | } |
100 | ||
640c03ce ACM |
101 | void perf_session__set_sample_type(struct perf_session *session, u64 type) |
102 | { | |
103 | session->sample_type = type; | |
104 | } | |
105 | ||
9c90a61c ACM |
106 | void perf_session__update_sample_type(struct perf_session *self) |
107 | { | |
108 | self->sample_type = perf_header__sample_type(&self->header); | |
109 | self->sample_id_all = perf_header__sample_id_all(&self->header); | |
110 | perf_session__id_header_size(self); | |
111 | } | |
112 | ||
a1645ce1 ZY |
113 | int perf_session__create_kernel_maps(struct perf_session *self) |
114 | { | |
d118f8ba | 115 | int ret = machine__create_kernel_maps(&self->host_machine); |
a1645ce1 | 116 | |
a1645ce1 | 117 | if (ret >= 0) |
d118f8ba | 118 | ret = machines__create_guest_kernel_maps(&self->machines); |
a1645ce1 ZY |
119 | return ret; |
120 | } | |
121 | ||
076c6e45 ACM |
122 | static void perf_session__destroy_kernel_maps(struct perf_session *self) |
123 | { | |
124 | machine__destroy_kernel_maps(&self->host_machine); | |
125 | machines__destroy_guest_kernel_maps(&self->machines); | |
126 | } | |
127 | ||
21ef97f0 IM |
128 | struct perf_session *perf_session__new(const char *filename, int mode, |
129 | bool force, bool repipe, | |
130 | struct perf_event_ops *ops) | |
94c744b6 | 131 | { |
b3165f41 | 132 | size_t len = filename ? strlen(filename) + 1 : 0; |
94c744b6 ACM |
133 | struct perf_session *self = zalloc(sizeof(*self) + len); |
134 | ||
135 | if (self == NULL) | |
136 | goto out; | |
137 | ||
138 | if (perf_header__init(&self->header) < 0) | |
4aa65636 | 139 | goto out_free; |
94c744b6 ACM |
140 | |
141 | memcpy(self->filename, filename, len); | |
b3165f41 | 142 | self->threads = RB_ROOT; |
720a3aeb | 143 | INIT_LIST_HEAD(&self->dead_threads); |
1c02c4d2 | 144 | self->hists_tree = RB_ROOT; |
b3165f41 | 145 | self->last_match = NULL; |
55b44629 TG |
146 | /* |
147 | * On 64bit we can mmap the data file in one go. No need for tiny mmap | |
148 | * slices. On 32bit we use 32MB. | |
149 | */ | |
150 | #if BITS_PER_LONG == 64 | |
151 | self->mmap_window = ULLONG_MAX; | |
152 | #else | |
153 | self->mmap_window = 32 * 1024 * 1024ULL; | |
154 | #endif | |
23346f21 | 155 | self->machines = RB_ROOT; |
454c407e | 156 | self->repipe = repipe; |
a1225dec | 157 | INIT_LIST_HEAD(&self->ordered_samples.samples); |
020bb75a | 158 | INIT_LIST_HEAD(&self->ordered_samples.sample_cache); |
5c891f38 | 159 | INIT_LIST_HEAD(&self->ordered_samples.to_free); |
1f626bc3 | 160 | machine__init(&self->host_machine, "", HOST_KERNEL_ID); |
94c744b6 | 161 | |
64abebf7 ACM |
162 | if (mode == O_RDONLY) { |
163 | if (perf_session__open(self, force) < 0) | |
164 | goto out_delete; | |
165 | } else if (mode == O_WRONLY) { | |
166 | /* | |
167 | * In O_RDONLY mode this will be performed when reading the | |
168 | * kernel MMAP event, in event__process_mmap(). | |
169 | */ | |
170 | if (perf_session__create_kernel_maps(self) < 0) | |
171 | goto out_delete; | |
172 | } | |
d549c769 | 173 | |
8dc58101 | 174 | perf_session__update_sample_type(self); |
21ef97f0 IM |
175 | |
176 | if (ops && ops->ordering_requires_timestamps && | |
177 | ops->ordered_samples && !self->sample_id_all) { | |
178 | dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); | |
179 | ops->ordered_samples = false; | |
180 | } | |
181 | ||
94c744b6 ACM |
182 | out: |
183 | return self; | |
4aa65636 | 184 | out_free: |
94c744b6 ACM |
185 | free(self); |
186 | return NULL; | |
4aa65636 ACM |
187 | out_delete: |
188 | perf_session__delete(self); | |
189 | return NULL; | |
94c744b6 ACM |
190 | } |
191 | ||
d65a458b ACM |
192 | static void perf_session__delete_dead_threads(struct perf_session *self) |
193 | { | |
194 | struct thread *n, *t; | |
195 | ||
196 | list_for_each_entry_safe(t, n, &self->dead_threads, node) { | |
197 | list_del(&t->node); | |
198 | thread__delete(t); | |
199 | } | |
200 | } | |
201 | ||
202 | static void perf_session__delete_threads(struct perf_session *self) | |
203 | { | |
204 | struct rb_node *nd = rb_first(&self->threads); | |
205 | ||
206 | while (nd) { | |
207 | struct thread *t = rb_entry(nd, struct thread, rb_node); | |
208 | ||
209 | rb_erase(&t->rb_node, &self->threads); | |
210 | nd = rb_next(nd); | |
211 | thread__delete(t); | |
212 | } | |
213 | } | |
214 | ||
94c744b6 ACM |
215 | void perf_session__delete(struct perf_session *self) |
216 | { | |
217 | perf_header__exit(&self->header); | |
076c6e45 | 218 | perf_session__destroy_kernel_maps(self); |
d65a458b ACM |
219 | perf_session__delete_dead_threads(self); |
220 | perf_session__delete_threads(self); | |
221 | machine__exit(&self->host_machine); | |
94c744b6 ACM |
222 | close(self->fd); |
223 | free(self); | |
224 | } | |
a328626b | 225 | |
720a3aeb ACM |
226 | void perf_session__remove_thread(struct perf_session *self, struct thread *th) |
227 | { | |
70597f21 | 228 | self->last_match = NULL; |
720a3aeb ACM |
229 | rb_erase(&th->rb_node, &self->threads); |
230 | /* | |
231 | * We may have references to this thread, for instance in some hist_entry | |
232 | * instances, so just move them to a separate list. | |
233 | */ | |
234 | list_add_tail(&th->node, &self->dead_threads); | |
235 | } | |
236 | ||
a328626b ACM |
237 | static bool symbol__match_parent_regex(struct symbol *sym) |
238 | { | |
239 | if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) | |
240 | return 1; | |
241 | ||
242 | return 0; | |
243 | } | |
244 | ||
b3c9ac08 ACM |
245 | struct map_symbol *perf_session__resolve_callchain(struct perf_session *self, |
246 | struct thread *thread, | |
247 | struct ip_callchain *chain, | |
248 | struct symbol **parent) | |
a328626b ACM |
249 | { |
250 | u8 cpumode = PERF_RECORD_MISC_USER; | |
a328626b | 251 | unsigned int i; |
ad5b217b | 252 | struct map_symbol *syms = calloc(chain->nr, sizeof(*syms)); |
a328626b | 253 | |
ad5b217b ACM |
254 | if (!syms) |
255 | return NULL; | |
a328626b ACM |
256 | |
257 | for (i = 0; i < chain->nr; i++) { | |
258 | u64 ip = chain->ips[i]; | |
259 | struct addr_location al; | |
260 | ||
261 | if (ip >= PERF_CONTEXT_MAX) { | |
262 | switch (ip) { | |
263 | case PERF_CONTEXT_HV: | |
264 | cpumode = PERF_RECORD_MISC_HYPERVISOR; break; | |
265 | case PERF_CONTEXT_KERNEL: | |
266 | cpumode = PERF_RECORD_MISC_KERNEL; break; | |
267 | case PERF_CONTEXT_USER: | |
268 | cpumode = PERF_RECORD_MISC_USER; break; | |
269 | default: | |
270 | break; | |
271 | } | |
272 | continue; | |
273 | } | |
274 | ||
a1645ce1 | 275 | al.filtered = false; |
a328626b | 276 | thread__find_addr_location(thread, self, cpumode, |
a1645ce1 | 277 | MAP__FUNCTION, thread->pid, ip, &al, NULL); |
a328626b ACM |
278 | if (al.sym != NULL) { |
279 | if (sort__has_parent && !*parent && | |
280 | symbol__match_parent_regex(al.sym)) | |
281 | *parent = al.sym; | |
d599db3f | 282 | if (!symbol_conf.use_callchain) |
a328626b | 283 | break; |
b3c9ac08 ACM |
284 | syms[i].map = al.map; |
285 | syms[i].sym = al.sym; | |
a328626b ACM |
286 | } |
287 | } | |
288 | ||
289 | return syms; | |
290 | } | |
06aae590 | 291 | |
640c03ce ACM |
292 | static int process_event_synth_stub(event_t *event __used, |
293 | struct perf_session *session __used) | |
294 | { | |
295 | dump_printf(": unhandled!\n"); | |
296 | return 0; | |
297 | } | |
298 | ||
06aae590 | 299 | static int process_event_stub(event_t *event __used, |
640c03ce | 300 | struct sample_data *sample __used, |
06aae590 ACM |
301 | struct perf_session *session __used) |
302 | { | |
303 | dump_printf(": unhandled!\n"); | |
304 | return 0; | |
305 | } | |
306 | ||
d6b17beb FW |
307 | static int process_finished_round_stub(event_t *event __used, |
308 | struct perf_session *session __used, | |
309 | struct perf_event_ops *ops __used) | |
310 | { | |
311 | dump_printf(": unhandled!\n"); | |
312 | return 0; | |
313 | } | |
314 | ||
315 | static int process_finished_round(event_t *event, | |
316 | struct perf_session *session, | |
317 | struct perf_event_ops *ops); | |
318 | ||
06aae590 ACM |
319 | static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) |
320 | { | |
55aa640f ACM |
321 | if (handler->sample == NULL) |
322 | handler->sample = process_event_stub; | |
323 | if (handler->mmap == NULL) | |
324 | handler->mmap = process_event_stub; | |
325 | if (handler->comm == NULL) | |
326 | handler->comm = process_event_stub; | |
327 | if (handler->fork == NULL) | |
328 | handler->fork = process_event_stub; | |
329 | if (handler->exit == NULL) | |
330 | handler->exit = process_event_stub; | |
331 | if (handler->lost == NULL) | |
37982ba0 | 332 | handler->lost = event__process_lost; |
55aa640f ACM |
333 | if (handler->read == NULL) |
334 | handler->read = process_event_stub; | |
335 | if (handler->throttle == NULL) | |
336 | handler->throttle = process_event_stub; | |
337 | if (handler->unthrottle == NULL) | |
338 | handler->unthrottle = process_event_stub; | |
2c46dbb5 | 339 | if (handler->attr == NULL) |
640c03ce | 340 | handler->attr = process_event_synth_stub; |
cd19a035 | 341 | if (handler->event_type == NULL) |
640c03ce | 342 | handler->event_type = process_event_synth_stub; |
9215545e | 343 | if (handler->tracing_data == NULL) |
640c03ce | 344 | handler->tracing_data = process_event_synth_stub; |
c7929e47 | 345 | if (handler->build_id == NULL) |
640c03ce | 346 | handler->build_id = process_event_synth_stub; |
d6b17beb FW |
347 | if (handler->finished_round == NULL) { |
348 | if (handler->ordered_samples) | |
349 | handler->finished_round = process_finished_round; | |
350 | else | |
351 | handler->finished_round = process_finished_round_stub; | |
352 | } | |
06aae590 ACM |
353 | } |
354 | ||
ba21594c ACM |
355 | void mem_bswap_64(void *src, int byte_size) |
356 | { | |
357 | u64 *m = src; | |
358 | ||
359 | while (byte_size > 0) { | |
360 | *m = bswap_64(*m); | |
361 | byte_size -= sizeof(u64); | |
362 | ++m; | |
363 | } | |
364 | } | |
365 | ||
366 | static void event__all64_swap(event_t *self) | |
367 | { | |
368 | struct perf_event_header *hdr = &self->header; | |
369 | mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr)); | |
370 | } | |
371 | ||
372 | static void event__comm_swap(event_t *self) | |
373 | { | |
374 | self->comm.pid = bswap_32(self->comm.pid); | |
375 | self->comm.tid = bswap_32(self->comm.tid); | |
376 | } | |
377 | ||
378 | static void event__mmap_swap(event_t *self) | |
379 | { | |
380 | self->mmap.pid = bswap_32(self->mmap.pid); | |
381 | self->mmap.tid = bswap_32(self->mmap.tid); | |
382 | self->mmap.start = bswap_64(self->mmap.start); | |
383 | self->mmap.len = bswap_64(self->mmap.len); | |
384 | self->mmap.pgoff = bswap_64(self->mmap.pgoff); | |
385 | } | |
386 | ||
387 | static void event__task_swap(event_t *self) | |
388 | { | |
389 | self->fork.pid = bswap_32(self->fork.pid); | |
390 | self->fork.tid = bswap_32(self->fork.tid); | |
391 | self->fork.ppid = bswap_32(self->fork.ppid); | |
392 | self->fork.ptid = bswap_32(self->fork.ptid); | |
393 | self->fork.time = bswap_64(self->fork.time); | |
394 | } | |
395 | ||
396 | static void event__read_swap(event_t *self) | |
397 | { | |
398 | self->read.pid = bswap_32(self->read.pid); | |
399 | self->read.tid = bswap_32(self->read.tid); | |
400 | self->read.value = bswap_64(self->read.value); | |
401 | self->read.time_enabled = bswap_64(self->read.time_enabled); | |
402 | self->read.time_running = bswap_64(self->read.time_running); | |
403 | self->read.id = bswap_64(self->read.id); | |
404 | } | |
405 | ||
2c46dbb5 TZ |
406 | static void event__attr_swap(event_t *self) |
407 | { | |
408 | size_t size; | |
409 | ||
410 | self->attr.attr.type = bswap_32(self->attr.attr.type); | |
411 | self->attr.attr.size = bswap_32(self->attr.attr.size); | |
412 | self->attr.attr.config = bswap_64(self->attr.attr.config); | |
413 | self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period); | |
414 | self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type); | |
415 | self->attr.attr.read_format = bswap_64(self->attr.attr.read_format); | |
416 | self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events); | |
417 | self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type); | |
418 | self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr); | |
419 | self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len); | |
420 | ||
421 | size = self->header.size; | |
422 | size -= (void *)&self->attr.id - (void *)self; | |
423 | mem_bswap_64(self->attr.id, size); | |
424 | } | |
425 | ||
cd19a035 TZ |
426 | static void event__event_type_swap(event_t *self) |
427 | { | |
428 | self->event_type.event_type.event_id = | |
429 | bswap_64(self->event_type.event_type.event_id); | |
430 | } | |
431 | ||
9215545e TZ |
432 | static void event__tracing_data_swap(event_t *self) |
433 | { | |
434 | self->tracing_data.size = bswap_32(self->tracing_data.size); | |
435 | } | |
436 | ||
ba21594c ACM |
437 | typedef void (*event__swap_op)(event_t *self); |
438 | ||
439 | static event__swap_op event__swap_ops[] = { | |
440 | [PERF_RECORD_MMAP] = event__mmap_swap, | |
441 | [PERF_RECORD_COMM] = event__comm_swap, | |
442 | [PERF_RECORD_FORK] = event__task_swap, | |
443 | [PERF_RECORD_EXIT] = event__task_swap, | |
444 | [PERF_RECORD_LOST] = event__all64_swap, | |
445 | [PERF_RECORD_READ] = event__read_swap, | |
446 | [PERF_RECORD_SAMPLE] = event__all64_swap, | |
2c46dbb5 | 447 | [PERF_RECORD_HEADER_ATTR] = event__attr_swap, |
cd19a035 | 448 | [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap, |
9215545e | 449 | [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap, |
c7929e47 | 450 | [PERF_RECORD_HEADER_BUILD_ID] = NULL, |
8dc58101 | 451 | [PERF_RECORD_HEADER_MAX] = NULL, |
ba21594c ACM |
452 | }; |
453 | ||
c61e52ee FW |
454 | struct sample_queue { |
455 | u64 timestamp; | |
e4c2df13 | 456 | u64 file_offset; |
28990f75 | 457 | event_t *event; |
c61e52ee FW |
458 | struct list_head list; |
459 | }; | |
460 | ||
020bb75a TG |
461 | static void perf_session_free_sample_buffers(struct perf_session *session) |
462 | { | |
463 | struct ordered_samples *os = &session->ordered_samples; | |
464 | ||
5c891f38 | 465 | while (!list_empty(&os->to_free)) { |
020bb75a TG |
466 | struct sample_queue *sq; |
467 | ||
5c891f38 | 468 | sq = list_entry(os->to_free.next, struct sample_queue, list); |
020bb75a TG |
469 | list_del(&sq->list); |
470 | free(sq); | |
471 | } | |
472 | } | |
473 | ||
cbf41645 TG |
474 | static int perf_session_deliver_event(struct perf_session *session, |
475 | event_t *event, | |
476 | struct sample_data *sample, | |
f74725dc TG |
477 | struct perf_event_ops *ops, |
478 | u64 file_offset); | |
cbf41645 | 479 | |
c61e52ee FW |
480 | static void flush_sample_queue(struct perf_session *s, |
481 | struct perf_event_ops *ops) | |
482 | { | |
a1225dec TG |
483 | struct ordered_samples *os = &s->ordered_samples; |
484 | struct list_head *head = &os->samples; | |
c61e52ee | 485 | struct sample_queue *tmp, *iter; |
640c03ce | 486 | struct sample_data sample; |
a1225dec TG |
487 | u64 limit = os->next_flush; |
488 | u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; | |
c61e52ee | 489 | |
d6b17beb | 490 | if (!ops->ordered_samples || !limit) |
c61e52ee FW |
491 | return; |
492 | ||
493 | list_for_each_entry_safe(iter, tmp, head, list) { | |
494 | if (iter->timestamp > limit) | |
a1225dec | 495 | break; |
c61e52ee | 496 | |
9c90a61c | 497 | event__parse_sample(iter->event, s, &sample); |
f74725dc TG |
498 | perf_session_deliver_event(s, iter->event, &sample, ops, |
499 | iter->file_offset); | |
c61e52ee | 500 | |
a1225dec | 501 | os->last_flush = iter->timestamp; |
c61e52ee | 502 | list_del(&iter->list); |
020bb75a | 503 | list_add(&iter->list, &os->sample_cache); |
c61e52ee | 504 | } |
a1225dec TG |
505 | |
506 | if (list_empty(head)) { | |
507 | os->last_sample = NULL; | |
508 | } else if (last_ts <= limit) { | |
509 | os->last_sample = | |
510 | list_entry(head->prev, struct sample_queue, list); | |
511 | } | |
c61e52ee FW |
512 | } |
513 | ||
d6b17beb FW |
514 | /* |
515 | * When perf record finishes a pass on every buffers, it records this pseudo | |
516 | * event. | |
517 | * We record the max timestamp t found in the pass n. | |
518 | * Assuming these timestamps are monotonic across cpus, we know that if | |
519 | * a buffer still has events with timestamps below t, they will be all | |
520 | * available and then read in the pass n + 1. | |
521 | * Hence when we start to read the pass n + 2, we can safely flush every | |
522 | * events with timestamps below t. | |
523 | * | |
524 | * ============ PASS n ================= | |
525 | * CPU 0 | CPU 1 | |
526 | * | | |
527 | * cnt1 timestamps | cnt2 timestamps | |
528 | * 1 | 2 | |
529 | * 2 | 3 | |
530 | * - | 4 <--- max recorded | |
531 | * | |
532 | * ============ PASS n + 1 ============== | |
533 | * CPU 0 | CPU 1 | |
534 | * | | |
535 | * cnt1 timestamps | cnt2 timestamps | |
536 | * 3 | 5 | |
537 | * 4 | 6 | |
538 | * 5 | 7 <---- max recorded | |
539 | * | |
540 | * Flush every events below timestamp 4 | |
541 | * | |
542 | * ============ PASS n + 2 ============== | |
543 | * CPU 0 | CPU 1 | |
544 | * | | |
545 | * cnt1 timestamps | cnt2 timestamps | |
546 | * 6 | 8 | |
547 | * 7 | 9 | |
548 | * - | 10 | |
549 | * | |
550 | * Flush every events below timestamp 7 | |
551 | * etc... | |
552 | */ | |
553 | static int process_finished_round(event_t *event __used, | |
554 | struct perf_session *session, | |
555 | struct perf_event_ops *ops) | |
556 | { | |
557 | flush_sample_queue(session, ops); | |
558 | session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; | |
559 | ||
560 | return 0; | |
561 | } | |
562 | ||
c61e52ee | 563 | /* The queue is ordered by time */ |
cbf41645 | 564 | static void __queue_event(struct sample_queue *new, struct perf_session *s) |
c61e52ee | 565 | { |
a1225dec TG |
566 | struct ordered_samples *os = &s->ordered_samples; |
567 | struct sample_queue *sample = os->last_sample; | |
568 | u64 timestamp = new->timestamp; | |
569 | struct list_head *p; | |
c61e52ee | 570 | |
a1225dec | 571 | os->last_sample = new; |
c61e52ee | 572 | |
a1225dec TG |
573 | if (!sample) { |
574 | list_add(&new->list, &os->samples); | |
575 | os->max_timestamp = timestamp; | |
c61e52ee FW |
576 | return; |
577 | } | |
578 | ||
579 | /* | |
a1225dec TG |
580 | * last_sample might point to some random place in the list as it's |
581 | * the last queued event. We expect that the new event is close to | |
582 | * this. | |
c61e52ee | 583 | */ |
a1225dec TG |
584 | if (sample->timestamp <= timestamp) { |
585 | while (sample->timestamp <= timestamp) { | |
586 | p = sample->list.next; | |
587 | if (p == &os->samples) { | |
588 | list_add_tail(&new->list, &os->samples); | |
589 | os->max_timestamp = timestamp; | |
590 | return; | |
591 | } | |
592 | sample = list_entry(p, struct sample_queue, list); | |
593 | } | |
594 | list_add_tail(&new->list, &sample->list); | |
595 | } else { | |
596 | while (sample->timestamp > timestamp) { | |
597 | p = sample->list.prev; | |
598 | if (p == &os->samples) { | |
599 | list_add(&new->list, &os->samples); | |
600 | return; | |
601 | } | |
602 | sample = list_entry(p, struct sample_queue, list); | |
603 | } | |
604 | list_add(&new->list, &sample->list); | |
605 | } | |
c61e52ee FW |
606 | } |
607 | ||
5c891f38 TG |
608 | #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) |
609 | ||
cbf41645 | 610 | static int perf_session_queue_event(struct perf_session *s, event_t *event, |
e4c2df13 | 611 | struct sample_data *data, u64 file_offset) |
c61e52ee | 612 | { |
5c891f38 TG |
613 | struct ordered_samples *os = &s->ordered_samples; |
614 | struct list_head *sc = &os->sample_cache; | |
c61e52ee FW |
615 | u64 timestamp = data->time; |
616 | struct sample_queue *new; | |
c61e52ee | 617 | |
79a14c1f | 618 | if (!timestamp || timestamp == ~0ULL) |
cbf41645 TG |
619 | return -ETIME; |
620 | ||
c61e52ee FW |
621 | if (timestamp < s->ordered_samples.last_flush) { |
622 | printf("Warning: Timestamp below last timeslice flush\n"); | |
623 | return -EINVAL; | |
624 | } | |
625 | ||
020bb75a TG |
626 | if (!list_empty(sc)) { |
627 | new = list_entry(sc->next, struct sample_queue, list); | |
628 | list_del(&new->list); | |
5c891f38 TG |
629 | } else if (os->sample_buffer) { |
630 | new = os->sample_buffer + os->sample_buffer_idx; | |
631 | if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER) | |
632 | os->sample_buffer = NULL; | |
020bb75a | 633 | } else { |
5c891f38 TG |
634 | os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new)); |
635 | if (!os->sample_buffer) | |
020bb75a | 636 | return -ENOMEM; |
5c891f38 TG |
637 | list_add(&os->sample_buffer->list, &os->to_free); |
638 | os->sample_buffer_idx = 2; | |
639 | new = os->sample_buffer + 1; | |
020bb75a | 640 | } |
c61e52ee FW |
641 | |
642 | new->timestamp = timestamp; | |
e4c2df13 | 643 | new->file_offset = file_offset; |
fe174207 | 644 | new->event = event; |
c61e52ee | 645 | |
cbf41645 | 646 | __queue_event(new, s); |
640c03ce | 647 | |
640c03ce ACM |
648 | return 0; |
649 | } | |
c61e52ee | 650 | |
ddbc24b7 | 651 | static void callchain__printf(struct sample_data *sample) |
640c03ce ACM |
652 | { |
653 | unsigned int i; | |
c61e52ee | 654 | |
640c03ce ACM |
655 | printf("... chain: nr:%Lu\n", sample->callchain->nr); |
656 | ||
657 | for (i = 0; i < sample->callchain->nr; i++) | |
658 | printf("..... %2d: %016Lx\n", i, sample->callchain->ips[i]); | |
c61e52ee FW |
659 | } |
660 | ||
9c90a61c ACM |
661 | static void perf_session__print_tstamp(struct perf_session *session, |
662 | event_t *event, | |
663 | struct sample_data *sample) | |
664 | { | |
665 | if (event->header.type != PERF_RECORD_SAMPLE && | |
666 | !session->sample_id_all) { | |
667 | fputs("-1 -1 ", stdout); | |
668 | return; | |
669 | } | |
670 | ||
671 | if ((session->sample_type & PERF_SAMPLE_CPU)) | |
672 | printf("%u ", sample->cpu); | |
673 | ||
674 | if (session->sample_type & PERF_SAMPLE_TIME) | |
675 | printf("%Lu ", sample->time); | |
676 | } | |
677 | ||
9aefcab0 TG |
678 | static void dump_event(struct perf_session *session, event_t *event, |
679 | u64 file_offset, struct sample_data *sample) | |
680 | { | |
681 | if (!dump_trace) | |
682 | return; | |
683 | ||
ddbc24b7 ACM |
684 | printf("\n%#Lx [%#x]: event: %d\n", file_offset, event->header.size, |
685 | event->header.type); | |
9aefcab0 TG |
686 | |
687 | trace_event(event); | |
688 | ||
689 | if (sample) | |
690 | perf_session__print_tstamp(session, event, sample); | |
691 | ||
ddbc24b7 ACM |
692 | printf("%#Lx [%#x]: PERF_RECORD_%s", file_offset, event->header.size, |
693 | event__get_event_name(event->header.type)); | |
9aefcab0 TG |
694 | } |
695 | ||
696 | static void dump_sample(struct perf_session *session, event_t *event, | |
697 | struct sample_data *sample) | |
698 | { | |
ddbc24b7 ACM |
699 | if (!dump_trace) |
700 | return; | |
701 | ||
702 | printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc, | |
703 | sample->pid, sample->tid, sample->ip, sample->period); | |
9aefcab0 TG |
704 | |
705 | if (session->sample_type & PERF_SAMPLE_CALLCHAIN) | |
ddbc24b7 | 706 | callchain__printf(sample); |
9aefcab0 TG |
707 | } |
708 | ||
cbf41645 TG |
709 | static int perf_session_deliver_event(struct perf_session *session, |
710 | event_t *event, | |
711 | struct sample_data *sample, | |
f74725dc | 712 | struct perf_event_ops *ops, |
532e7269 | 713 | u64 file_offset) |
cbf41645 | 714 | { |
532e7269 TG |
715 | dump_event(session, event, file_offset, sample); |
716 | ||
cbf41645 TG |
717 | switch (event->header.type) { |
718 | case PERF_RECORD_SAMPLE: | |
532e7269 | 719 | dump_sample(session, event, sample); |
cbf41645 TG |
720 | return ops->sample(event, sample, session); |
721 | case PERF_RECORD_MMAP: | |
722 | return ops->mmap(event, sample, session); | |
723 | case PERF_RECORD_COMM: | |
724 | return ops->comm(event, sample, session); | |
725 | case PERF_RECORD_FORK: | |
726 | return ops->fork(event, sample, session); | |
727 | case PERF_RECORD_EXIT: | |
728 | return ops->exit(event, sample, session); | |
729 | case PERF_RECORD_LOST: | |
730 | return ops->lost(event, sample, session); | |
731 | case PERF_RECORD_READ: | |
732 | return ops->read(event, sample, session); | |
733 | case PERF_RECORD_THROTTLE: | |
734 | return ops->throttle(event, sample, session); | |
735 | case PERF_RECORD_UNTHROTTLE: | |
736 | return ops->unthrottle(event, sample, session); | |
737 | default: | |
738 | ++session->hists.stats.nr_unknown_events; | |
739 | return -1; | |
740 | } | |
741 | } | |
742 | ||
3dfc2c0a TG |
743 | static int perf_session__preprocess_sample(struct perf_session *session, |
744 | event_t *event, struct sample_data *sample) | |
745 | { | |
746 | if (event->header.type != PERF_RECORD_SAMPLE || | |
747 | !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) | |
748 | return 0; | |
749 | ||
750 | if (!ip_callchain__valid(sample->callchain, event)) { | |
751 | pr_debug("call-chain problem with event, skipping it.\n"); | |
752 | ++session->hists.stats.nr_invalid_chains; | |
753 | session->hists.stats.total_invalid_chains += sample->period; | |
754 | return -EINVAL; | |
755 | } | |
756 | return 0; | |
757 | } | |
758 | ||
ba74f064 TG |
759 | static int perf_session__process_user_event(struct perf_session *session, event_t *event, |
760 | struct perf_event_ops *ops, u64 file_offset) | |
06aae590 | 761 | { |
ba74f064 | 762 | dump_event(session, event, file_offset, NULL); |
06aae590 | 763 | |
cbf41645 | 764 | /* These events are processed right away */ |
06aae590 | 765 | switch (event->header.type) { |
2c46dbb5 | 766 | case PERF_RECORD_HEADER_ATTR: |
cbf41645 | 767 | return ops->attr(event, session); |
cd19a035 | 768 | case PERF_RECORD_HEADER_EVENT_TYPE: |
cbf41645 | 769 | return ops->event_type(event, session); |
9215545e TZ |
770 | case PERF_RECORD_HEADER_TRACING_DATA: |
771 | /* setup for reading amidst mmap */ | |
cbf41645 TG |
772 | lseek(session->fd, file_offset, SEEK_SET); |
773 | return ops->tracing_data(event, session); | |
c7929e47 | 774 | case PERF_RECORD_HEADER_BUILD_ID: |
cbf41645 | 775 | return ops->build_id(event, session); |
d6b17beb | 776 | case PERF_RECORD_FINISHED_ROUND: |
cbf41645 | 777 | return ops->finished_round(event, session, ops); |
06aae590 | 778 | default: |
ba74f064 | 779 | return -EINVAL; |
06aae590 | 780 | } |
ba74f064 TG |
781 | } |
782 | ||
783 | static int perf_session__process_event(struct perf_session *session, | |
784 | event_t *event, | |
785 | struct perf_event_ops *ops, | |
786 | u64 file_offset) | |
787 | { | |
788 | struct sample_data sample; | |
789 | int ret; | |
790 | ||
791 | if (session->header.needs_swap && event__swap_ops[event->header.type]) | |
792 | event__swap_ops[event->header.type](event); | |
793 | ||
794 | if (event->header.type >= PERF_RECORD_HEADER_MAX) | |
795 | return -EINVAL; | |
796 | ||
797 | hists__inc_nr_events(&session->hists, event->header.type); | |
798 | ||
799 | if (event->header.type >= PERF_RECORD_USER_TYPE_START) | |
800 | return perf_session__process_user_event(session, event, ops, file_offset); | |
cbf41645 | 801 | |
3dfc2c0a TG |
802 | /* |
803 | * For all kernel events we get the sample data | |
804 | */ | |
805 | event__parse_sample(event, session, &sample); | |
806 | ||
807 | /* Preprocess sample records - precheck callchains */ | |
808 | if (perf_session__preprocess_sample(session, event, &sample)) | |
809 | return 0; | |
810 | ||
cbf41645 | 811 | if (ops->ordered_samples) { |
e4c2df13 TG |
812 | ret = perf_session_queue_event(session, event, &sample, |
813 | file_offset); | |
cbf41645 TG |
814 | if (ret != -ETIME) |
815 | return ret; | |
816 | } | |
817 | ||
f74725dc TG |
818 | return perf_session_deliver_event(session, event, &sample, ops, |
819 | file_offset); | |
06aae590 ACM |
820 | } |
821 | ||
ba21594c ACM |
822 | void perf_event_header__bswap(struct perf_event_header *self) |
823 | { | |
824 | self->type = bswap_32(self->type); | |
825 | self->misc = bswap_16(self->misc); | |
826 | self->size = bswap_16(self->size); | |
827 | } | |
828 | ||
06aae590 ACM |
829 | static struct thread *perf_session__register_idle_thread(struct perf_session *self) |
830 | { | |
831 | struct thread *thread = perf_session__findnew(self, 0); | |
832 | ||
833 | if (thread == NULL || thread__set_comm(thread, "swapper")) { | |
834 | pr_err("problem inserting idle task.\n"); | |
835 | thread = NULL; | |
836 | } | |
837 | ||
838 | return thread; | |
839 | } | |
840 | ||
8dc58101 TZ |
841 | int do_read(int fd, void *buf, size_t size) |
842 | { | |
843 | void *buf_start = buf; | |
844 | ||
845 | while (size) { | |
846 | int ret = read(fd, buf, size); | |
847 | ||
848 | if (ret <= 0) | |
849 | return ret; | |
850 | ||
851 | size -= ret; | |
852 | buf += ret; | |
853 | } | |
854 | ||
855 | return buf - buf_start; | |
856 | } | |
857 | ||
858 | #define session_done() (*(volatile int *)(&session_done)) | |
859 | volatile int session_done; | |
860 | ||
861 | static int __perf_session__process_pipe_events(struct perf_session *self, | |
862 | struct perf_event_ops *ops) | |
863 | { | |
864 | event_t event; | |
865 | uint32_t size; | |
866 | int skip = 0; | |
867 | u64 head; | |
868 | int err; | |
869 | void *p; | |
870 | ||
871 | perf_event_ops__fill_defaults(ops); | |
872 | ||
873 | head = 0; | |
874 | more: | |
875 | err = do_read(self->fd, &event, sizeof(struct perf_event_header)); | |
876 | if (err <= 0) { | |
877 | if (err == 0) | |
878 | goto done; | |
879 | ||
880 | pr_err("failed to read event header\n"); | |
881 | goto out_err; | |
882 | } | |
883 | ||
884 | if (self->header.needs_swap) | |
885 | perf_event_header__bswap(&event.header); | |
886 | ||
887 | size = event.header.size; | |
888 | if (size == 0) | |
889 | size = 8; | |
890 | ||
891 | p = &event; | |
892 | p += sizeof(struct perf_event_header); | |
893 | ||
794e43b5 TZ |
894 | if (size - sizeof(struct perf_event_header)) { |
895 | err = do_read(self->fd, p, | |
896 | size - sizeof(struct perf_event_header)); | |
897 | if (err <= 0) { | |
898 | if (err == 0) { | |
899 | pr_err("unexpected end of event stream\n"); | |
900 | goto done; | |
901 | } | |
8dc58101 | 902 | |
794e43b5 TZ |
903 | pr_err("failed to read event data\n"); |
904 | goto out_err; | |
905 | } | |
8dc58101 TZ |
906 | } |
907 | ||
908 | if (size == 0 || | |
0331ee0c | 909 | (skip = perf_session__process_event(self, &event, ops, head)) < 0) { |
8dc58101 TZ |
910 | dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", |
911 | head, event.header.size, event.header.type); | |
912 | /* | |
913 | * assume we lost track of the stream, check alignment, and | |
914 | * increment a single u64 in the hope to catch on again 'soon'. | |
915 | */ | |
916 | if (unlikely(head & 7)) | |
917 | head &= ~7ULL; | |
918 | ||
919 | size = 8; | |
920 | } | |
921 | ||
922 | head += size; | |
923 | ||
8dc58101 TZ |
924 | if (skip > 0) |
925 | head += skip; | |
926 | ||
927 | if (!session_done()) | |
928 | goto more; | |
929 | done: | |
930 | err = 0; | |
931 | out_err: | |
020bb75a | 932 | perf_session_free_sample_buffers(self); |
8dc58101 TZ |
933 | return err; |
934 | } | |
935 | ||
0331ee0c | 936 | int __perf_session__process_events(struct perf_session *session, |
6122e4e4 ACM |
937 | u64 data_offset, u64 data_size, |
938 | u64 file_size, struct perf_event_ops *ops) | |
06aae590 | 939 | { |
55b44629 | 940 | u64 head, page_offset, file_offset, file_pos, progress_next; |
fe174207 | 941 | int err, mmap_prot, mmap_flags, map_idx = 0; |
0331ee0c | 942 | struct ui_progress *progress; |
55b44629 | 943 | size_t page_size, mmap_size; |
fe174207 | 944 | char *buf, *mmaps[8]; |
06aae590 ACM |
945 | event_t *event; |
946 | uint32_t size; | |
0331ee0c | 947 | |
06aae590 ACM |
948 | perf_event_ops__fill_defaults(ops); |
949 | ||
1b75962e | 950 | page_size = sysconf(_SC_PAGESIZE); |
06aae590 | 951 | |
0331ee0c TG |
952 | page_offset = page_size * (data_offset / page_size); |
953 | file_offset = page_offset; | |
954 | head = data_offset - page_offset; | |
06aae590 | 955 | |
d6513281 TG |
956 | if (data_offset + data_size < file_size) |
957 | file_size = data_offset + data_size; | |
958 | ||
55b44629 TG |
959 | progress_next = file_size / 16; |
960 | progress = ui_progress__new("Processing events...", file_size); | |
961 | if (progress == NULL) | |
962 | return -1; | |
963 | ||
964 | mmap_size = session->mmap_window; | |
965 | if (mmap_size > file_size) | |
966 | mmap_size = file_size; | |
967 | ||
fe174207 TG |
968 | memset(mmaps, 0, sizeof(mmaps)); |
969 | ||
ba21594c ACM |
970 | mmap_prot = PROT_READ; |
971 | mmap_flags = MAP_SHARED; | |
972 | ||
0331ee0c | 973 | if (session->header.needs_swap) { |
ba21594c ACM |
974 | mmap_prot |= PROT_WRITE; |
975 | mmap_flags = MAP_PRIVATE; | |
976 | } | |
06aae590 | 977 | remap: |
55b44629 TG |
978 | buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd, |
979 | file_offset); | |
06aae590 ACM |
980 | if (buf == MAP_FAILED) { |
981 | pr_err("failed to mmap file\n"); | |
982 | err = -errno; | |
983 | goto out_err; | |
984 | } | |
fe174207 TG |
985 | mmaps[map_idx] = buf; |
986 | map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); | |
d6513281 | 987 | file_pos = file_offset + head; |
06aae590 ACM |
988 | |
989 | more: | |
990 | event = (event_t *)(buf + head); | |
991 | ||
0331ee0c | 992 | if (session->header.needs_swap) |
ba21594c | 993 | perf_event_header__bswap(&event->header); |
06aae590 ACM |
994 | size = event->header.size; |
995 | if (size == 0) | |
996 | size = 8; | |
997 | ||
55b44629 | 998 | if (head + event->header.size >= mmap_size) { |
fe174207 TG |
999 | if (mmaps[map_idx]) { |
1000 | munmap(mmaps[map_idx], mmap_size); | |
1001 | mmaps[map_idx] = NULL; | |
1002 | } | |
06aae590 | 1003 | |
0331ee0c TG |
1004 | page_offset = page_size * (head / page_size); |
1005 | file_offset += page_offset; | |
1006 | head -= page_offset; | |
06aae590 ACM |
1007 | goto remap; |
1008 | } | |
1009 | ||
1010 | size = event->header.size; | |
1011 | ||
d6513281 TG |
1012 | if (size == 0 || |
1013 | perf_session__process_event(session, event, ops, file_pos) < 0) { | |
ba21594c | 1014 | dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", |
0331ee0c | 1015 | file_offset + head, event->header.size, |
06aae590 ACM |
1016 | event->header.type); |
1017 | /* | |
1018 | * assume we lost track of the stream, check alignment, and | |
1019 | * increment a single u64 in the hope to catch on again 'soon'. | |
1020 | */ | |
1021 | if (unlikely(head & 7)) | |
1022 | head &= ~7ULL; | |
1023 | ||
1024 | size = 8; | |
1025 | } | |
1026 | ||
1027 | head += size; | |
d6513281 | 1028 | file_pos += size; |
06aae590 | 1029 | |
55b44629 TG |
1030 | if (file_pos >= progress_next) { |
1031 | progress_next += file_size / 16; | |
1032 | ui_progress__update(progress, file_pos); | |
1033 | } | |
1034 | ||
d6513281 | 1035 | if (file_pos < file_size) |
06aae590 | 1036 | goto more; |
d6513281 | 1037 | |
06aae590 | 1038 | err = 0; |
c61e52ee | 1039 | /* do the final flush for ordered samples */ |
0331ee0c TG |
1040 | session->ordered_samples.next_flush = ULLONG_MAX; |
1041 | flush_sample_queue(session, ops); | |
06aae590 | 1042 | out_err: |
5f4d3f88 | 1043 | ui_progress__delete(progress); |
068ffaa8 ACM |
1044 | |
1045 | if (ops->lost == event__process_lost && | |
0331ee0c | 1046 | session->hists.stats.total_lost != 0) { |
068ffaa8 ACM |
1047 | ui__warning("Processed %Lu events and LOST %Lu!\n\n" |
1048 | "Check IO/CPU overload!\n\n", | |
0331ee0c TG |
1049 | session->hists.stats.total_period, |
1050 | session->hists.stats.total_lost); | |
068ffaa8 | 1051 | } |
0331ee0c TG |
1052 | |
1053 | if (session->hists.stats.nr_unknown_events != 0) { | |
068ffaa8 ACM |
1054 | ui__warning("Found %u unknown events!\n\n" |
1055 | "Is this an older tool processing a perf.data " | |
1056 | "file generated by a more recent tool?\n\n" | |
1057 | "If that is not the case, consider " | |
1058 | "reporting to [email protected].\n\n", | |
0331ee0c | 1059 | session->hists.stats.nr_unknown_events); |
068ffaa8 | 1060 | } |
0331ee0c | 1061 | |
640c03ce ACM |
1062 | if (session->hists.stats.nr_invalid_chains != 0) { |
1063 | ui__warning("Found invalid callchains!\n\n" | |
1064 | "%u out of %u events were discarded for this reason.\n\n" | |
1065 | "Consider reporting to [email protected].\n\n", | |
1066 | session->hists.stats.nr_invalid_chains, | |
1067 | session->hists.stats.nr_events[PERF_RECORD_SAMPLE]); | |
1068 | } | |
1069 | ||
020bb75a | 1070 | perf_session_free_sample_buffers(session); |
06aae590 ACM |
1071 | return err; |
1072 | } | |
27295592 | 1073 | |
6122e4e4 ACM |
1074 | int perf_session__process_events(struct perf_session *self, |
1075 | struct perf_event_ops *ops) | |
1076 | { | |
1077 | int err; | |
1078 | ||
1079 | if (perf_session__register_idle_thread(self) == NULL) | |
1080 | return -ENOMEM; | |
1081 | ||
8dc58101 TZ |
1082 | if (!self->fd_pipe) |
1083 | err = __perf_session__process_events(self, | |
1084 | self->header.data_offset, | |
1085 | self->header.data_size, | |
1086 | self->size, ops); | |
1087 | else | |
1088 | err = __perf_session__process_pipe_events(self, ops); | |
88ca895d | 1089 | |
6122e4e4 ACM |
1090 | return err; |
1091 | } | |
1092 | ||
d549c769 | 1093 | bool perf_session__has_traces(struct perf_session *self, const char *msg) |
27295592 ACM |
1094 | { |
1095 | if (!(self->sample_type & PERF_SAMPLE_RAW)) { | |
d549c769 ACM |
1096 | pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); |
1097 | return false; | |
27295592 ACM |
1098 | } |
1099 | ||
d549c769 | 1100 | return true; |
27295592 | 1101 | } |
56b03f3c | 1102 | |
a1645ce1 | 1103 | int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps, |
56b03f3c ACM |
1104 | const char *symbol_name, |
1105 | u64 addr) | |
1106 | { | |
1107 | char *bracket; | |
9de89fe7 | 1108 | enum map_type i; |
a1645ce1 ZY |
1109 | struct ref_reloc_sym *ref; |
1110 | ||
1111 | ref = zalloc(sizeof(struct ref_reloc_sym)); | |
1112 | if (ref == NULL) | |
1113 | return -ENOMEM; | |
56b03f3c | 1114 | |
a1645ce1 ZY |
1115 | ref->name = strdup(symbol_name); |
1116 | if (ref->name == NULL) { | |
1117 | free(ref); | |
56b03f3c | 1118 | return -ENOMEM; |
a1645ce1 | 1119 | } |
56b03f3c | 1120 | |
a1645ce1 | 1121 | bracket = strchr(ref->name, ']'); |
56b03f3c ACM |
1122 | if (bracket) |
1123 | *bracket = '\0'; | |
1124 | ||
a1645ce1 | 1125 | ref->addr = addr; |
9de89fe7 ACM |
1126 | |
1127 | for (i = 0; i < MAP__NR_TYPES; ++i) { | |
a1645ce1 ZY |
1128 | struct kmap *kmap = map__kmap(maps[i]); |
1129 | kmap->ref_reloc_sym = ref; | |
9de89fe7 ACM |
1130 | } |
1131 | ||
56b03f3c ACM |
1132 | return 0; |
1133 | } | |
1f626bc3 ACM |
1134 | |
1135 | size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) | |
1136 | { | |
1137 | return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) + | |
1138 | __dsos__fprintf(&self->host_machine.user_dsos, fp) + | |
1139 | machines__fprintf_dsos(&self->machines, fp); | |
1140 | } | |
f869097e ACM |
1141 | |
1142 | size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, | |
1143 | bool with_hits) | |
1144 | { | |
1145 | size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); | |
1146 | return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); | |
1147 | } |