4 * Builtin 'trace' command:
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <
[email protected]>
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
16 * Released under the GPL v2. (and only v2, not any later version)
19 #include <traceevent/event-parse.h>
20 #include <api/fs/tracing_path.h>
22 #include "util/color.h"
23 #include "util/debug.h"
24 #include "util/evlist.h"
25 #include <subcmd/exec-cmd.h>
26 #include "util/machine.h"
27 #include "util/session.h"
28 #include "util/thread.h"
29 #include <subcmd/parse-options.h>
30 #include "util/strlist.h"
31 #include "util/intlist.h"
32 #include "util/thread_map.h"
33 #include "util/stat.h"
34 #include "trace-event.h"
35 #include "util/parse-events.h"
36 #include "util/bpf-loader.h"
37 #include "callchain.h"
38 #include "syscalltbl.h"
39 #include "rb_resort.h"
41 #include <libaudit.h> /* FIXME: Still needed for audit_errno_to_name */
43 #include <linux/err.h>
44 #include <linux/filter.h>
45 #include <linux/audit.h>
46 #include <sys/ptrace.h>
47 #include <linux/random.h>
48 #include <linux/stringify.h>
51 # define O_CLOEXEC 02000000
55 struct perf_tool tool;
56 struct syscalltbl *sctbl;
59 struct syscall *table;
61 struct perf_evsel *sys_enter,
65 struct record_opts opts;
66 struct perf_evlist *evlist;
68 struct thread *current;
71 unsigned long nr_events;
72 struct strlist *ev_qualifier;
77 struct intlist *tid_list;
78 struct intlist *pid_list;
83 double duration_filter;
89 unsigned int max_stack;
90 unsigned int min_stack;
91 bool not_ev_qualifier;
95 bool multiple_threads;
101 bool kernel_syscallchains;
111 u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
112 void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
116 #define TP_UINT_FIELD(bits) \
117 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
120 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
129 #define TP_UINT_FIELD__SWAPPED(bits) \
130 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
133 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
134 return bswap_##bits(value);\
137 TP_UINT_FIELD__SWAPPED(16);
138 TP_UINT_FIELD__SWAPPED(32);
139 TP_UINT_FIELD__SWAPPED(64);
141 static int tp_field__init_uint(struct tp_field *field,
142 struct format_field *format_field,
145 field->offset = format_field->offset;
147 switch (format_field->size) {
149 field->integer = tp_field__u8;
152 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
155 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
158 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
167 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
169 return sample->raw_data + field->offset;
172 static int tp_field__init_ptr(struct tp_field *field, struct format_field *format_field)
174 field->offset = format_field->offset;
175 field->pointer = tp_field__ptr;
182 struct tp_field args, ret;
186 static int perf_evsel__init_tp_uint_field(struct perf_evsel *evsel,
187 struct tp_field *field,
190 struct format_field *format_field = perf_evsel__field(evsel, name);
192 if (format_field == NULL)
195 return tp_field__init_uint(field, format_field, evsel->needs_swap);
198 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
199 ({ struct syscall_tp *sc = evsel->priv;\
200 perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
202 static int perf_evsel__init_tp_ptr_field(struct perf_evsel *evsel,
203 struct tp_field *field,
206 struct format_field *format_field = perf_evsel__field(evsel, name);
208 if (format_field == NULL)
211 return tp_field__init_ptr(field, format_field);
214 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
215 ({ struct syscall_tp *sc = evsel->priv;\
216 perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
218 static void perf_evsel__delete_priv(struct perf_evsel *evsel)
221 perf_evsel__delete(evsel);
224 static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel, void *handler)
226 evsel->priv = malloc(sizeof(struct syscall_tp));
227 if (evsel->priv != NULL) {
228 if (perf_evsel__init_sc_tp_uint_field(evsel, id))
231 evsel->handler = handler;
242 static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void *handler)
244 struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
246 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
248 evsel = perf_evsel__newtp("syscalls", direction);
253 if (perf_evsel__init_syscall_tp(evsel, handler))
259 perf_evsel__delete_priv(evsel);
263 #define perf_evsel__sc_tp_uint(evsel, name, sample) \
264 ({ struct syscall_tp *fields = evsel->priv; \
265 fields->name.integer(&fields->name, sample); })
267 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
268 ({ struct syscall_tp *fields = evsel->priv; \
269 fields->name.pointer(&fields->name, sample); })
273 struct thread *thread;
283 const char **entries;
286 #define DEFINE_STRARRAY(array) struct strarray strarray__##array = { \
287 .nr_entries = ARRAY_SIZE(array), \
291 #define DEFINE_STRARRAY_OFFSET(array, off) struct strarray strarray__##array = { \
293 .nr_entries = ARRAY_SIZE(array), \
297 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
299 struct syscall_arg *arg)
301 struct strarray *sa = arg->parm;
302 int idx = arg->val - sa->offset;
304 if (idx < 0 || idx >= sa->nr_entries)
305 return scnprintf(bf, size, intfmt, arg->val);
307 return scnprintf(bf, size, "%s", sa->entries[idx]);
310 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
311 struct syscall_arg *arg)
313 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
316 #define SCA_STRARRAY syscall_arg__scnprintf_strarray
318 #if defined(__i386__) || defined(__x86_64__)
320 * FIXME: Make this available to all arches as soon as the ioctl beautifier
321 * gets rewritten to support all arches.
323 static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
324 struct syscall_arg *arg)
326 return __syscall_arg__scnprintf_strarray(bf, size, "%#x", arg);
329 #define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray
330 #endif /* defined(__i386__) || defined(__x86_64__) */
332 static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
333 struct syscall_arg *arg);
335 #define SCA_FD syscall_arg__scnprintf_fd
337 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
338 struct syscall_arg *arg)
343 return scnprintf(bf, size, "CWD");
345 return syscall_arg__scnprintf_fd(bf, size, arg);
348 #define SCA_FDAT syscall_arg__scnprintf_fd_at
350 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
351 struct syscall_arg *arg);
353 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
355 static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
356 struct syscall_arg *arg)
358 return scnprintf(bf, size, "%#lx", arg->val);
361 #define SCA_HEX syscall_arg__scnprintf_hex
363 static size_t syscall_arg__scnprintf_int(char *bf, size_t size,
364 struct syscall_arg *arg)
366 return scnprintf(bf, size, "%d", arg->val);
369 #define SCA_INT syscall_arg__scnprintf_int
371 static const char *bpf_cmd[] = {
372 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
373 "MAP_GET_NEXT_KEY", "PROG_LOAD",
375 static DEFINE_STRARRAY(bpf_cmd);
377 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
378 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1);
380 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
381 static DEFINE_STRARRAY(itimers);
383 static const char *keyctl_options[] = {
384 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
385 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
386 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
387 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
388 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
390 static DEFINE_STRARRAY(keyctl_options);
392 static const char *whences[] = { "SET", "CUR", "END",
400 static DEFINE_STRARRAY(whences);
402 static const char *fcntl_cmds[] = {
403 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
404 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "F_GETLK64",
405 "F_SETLK64", "F_SETLKW64", "F_SETOWN_EX", "F_GETOWN_EX",
408 static DEFINE_STRARRAY(fcntl_cmds);
410 static const char *rlimit_resources[] = {
411 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
412 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
415 static DEFINE_STRARRAY(rlimit_resources);
417 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
418 static DEFINE_STRARRAY(sighow);
420 static const char *clockid[] = {
421 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
422 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
423 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
425 static DEFINE_STRARRAY(clockid);
427 static const char *socket_families[] = {
428 "UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM",
429 "BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI",
430 "SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC",
431 "RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC",
432 "BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
433 "ALG", "NFC", "VSOCK",
435 static DEFINE_STRARRAY(socket_families);
437 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
438 struct syscall_arg *arg)
443 if (mode == F_OK) /* 0 */
444 return scnprintf(bf, size, "F");
446 if (mode & n##_OK) { \
447 printed += scnprintf(bf + printed, size - printed, "%s", #n); \
457 printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
462 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
464 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
465 struct syscall_arg *arg);
467 #define SCA_FILENAME syscall_arg__scnprintf_filename
469 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
470 struct syscall_arg *arg)
472 int printed = 0, flags = arg->val;
475 if (flags & O_##n) { \
476 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
485 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
490 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
492 #if defined(__i386__) || defined(__x86_64__)
494 * FIXME: Make this available to all arches.
496 #define TCGETS 0x5401
498 static const char *tioctls[] = {
499 "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
500 "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL",
501 "TIOCSCTTY", "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI",
502 "TIOCGWINSZ", "TIOCSWINSZ", "TIOCMGET", "TIOCMBIS", "TIOCMBIC",
503 "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR", "FIONREAD", "TIOCLINUX",
504 "TIOCCONS", "TIOCGSERIAL", "TIOCSSERIAL", "TIOCPKT", "FIONBIO",
505 "TIOCNOTTY", "TIOCSETD", "TIOCGETD", "TCSBRKP", [0x27] = "TIOCSBRK",
506 "TIOCCBRK", "TIOCGSID", "TCGETS2", "TCSETS2", "TCSETSW2", "TCSETSF2",
507 "TIOCGRS485", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK",
508 "TIOCGDEV||TCGETX", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG",
509 "TIOCVHANGUP", "TIOCGPKT", "TIOCGPTLCK", "TIOCGEXCL",
510 [0x50] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG",
511 "TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS",
512 "TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI",
513 "TIOCMIWAIT", "TIOCGICOUNT", [0x60] = "FIOQSIZE",
516 static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
517 #endif /* defined(__i386__) || defined(__x86_64__) */
519 #ifndef GRND_NONBLOCK
520 #define GRND_NONBLOCK 0x0001
523 #define GRND_RANDOM 0x0002
526 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
527 struct syscall_arg *arg)
529 int printed = 0, flags = arg->val;
532 if (flags & GRND_##n) { \
533 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
534 flags &= ~GRND_##n; \
542 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
547 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
549 #define STRARRAY(arg, name, array) \
550 .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
551 .arg_parm = { [arg] = &strarray__##array, }
553 #include "trace/beauty/eventfd.c"
554 #include "trace/beauty/flock.c"
555 #include "trace/beauty/futex_op.c"
556 #include "trace/beauty/mmap.c"
557 #include "trace/beauty/mode_t.c"
558 #include "trace/beauty/msg_flags.c"
559 #include "trace/beauty/open_flags.c"
560 #include "trace/beauty/perf_event_open.c"
561 #include "trace/beauty/pid.c"
562 #include "trace/beauty/sched_policy.c"
563 #include "trace/beauty/seccomp.c"
564 #include "trace/beauty/signum.c"
565 #include "trace/beauty/socket_type.c"
566 #include "trace/beauty/waitid_options.c"
568 static struct syscall_fmt {
571 size_t (*arg_scnprintf[6])(char *bf, size_t size, struct syscall_arg *arg);
578 { .name = "access", .errmsg = true,
579 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */
580 [1] = SCA_ACCMODE, /* mode */ }, },
581 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
582 { .name = "bpf", .errmsg = true, STRARRAY(0, cmd, bpf_cmd), },
583 { .name = "brk", .hexret = true,
584 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
585 { .name = "chdir", .errmsg = true,
586 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
587 { .name = "chmod", .errmsg = true,
588 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
589 { .name = "chroot", .errmsg = true,
590 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
591 { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
592 { .name = "clone", .errpid = true, },
593 { .name = "close", .errmsg = true,
594 .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
595 { .name = "connect", .errmsg = true, },
596 { .name = "creat", .errmsg = true,
597 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
598 { .name = "dup", .errmsg = true,
599 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
600 { .name = "dup2", .errmsg = true,
601 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
602 { .name = "dup3", .errmsg = true,
603 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
604 { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
605 { .name = "eventfd2", .errmsg = true,
606 .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
607 { .name = "faccessat", .errmsg = true,
608 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
609 [1] = SCA_FILENAME, /* filename */ }, },
610 { .name = "fadvise64", .errmsg = true,
611 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
612 { .name = "fallocate", .errmsg = true,
613 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
614 { .name = "fchdir", .errmsg = true,
615 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
616 { .name = "fchmod", .errmsg = true,
617 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
618 { .name = "fchmodat", .errmsg = true,
619 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
620 [1] = SCA_FILENAME, /* filename */ }, },
621 { .name = "fchown", .errmsg = true,
622 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
623 { .name = "fchownat", .errmsg = true,
624 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
625 [1] = SCA_FILENAME, /* filename */ }, },
626 { .name = "fcntl", .errmsg = true,
627 .arg_scnprintf = { [0] = SCA_FD, /* fd */
628 [1] = SCA_STRARRAY, /* cmd */ },
629 .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
630 { .name = "fdatasync", .errmsg = true,
631 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
632 { .name = "flock", .errmsg = true,
633 .arg_scnprintf = { [0] = SCA_FD, /* fd */
634 [1] = SCA_FLOCK, /* cmd */ }, },
635 { .name = "fsetxattr", .errmsg = true,
636 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
637 { .name = "fstat", .errmsg = true, .alias = "newfstat",
638 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
639 { .name = "fstatat", .errmsg = true, .alias = "newfstatat",
640 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
641 [1] = SCA_FILENAME, /* filename */ }, },
642 { .name = "fstatfs", .errmsg = true,
643 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
644 { .name = "fsync", .errmsg = true,
645 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
646 { .name = "ftruncate", .errmsg = true,
647 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
648 { .name = "futex", .errmsg = true,
649 .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
650 { .name = "futimesat", .errmsg = true,
651 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
652 [1] = SCA_FILENAME, /* filename */ }, },
653 { .name = "getdents", .errmsg = true,
654 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
655 { .name = "getdents64", .errmsg = true,
656 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
657 { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
658 { .name = "getpid", .errpid = true, },
659 { .name = "getpgid", .errpid = true, },
660 { .name = "getppid", .errpid = true, },
661 { .name = "getrandom", .errmsg = true,
662 .arg_scnprintf = { [2] = SCA_GETRANDOM_FLAGS, /* flags */ }, },
663 { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
664 { .name = "getxattr", .errmsg = true,
665 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
666 { .name = "inotify_add_watch", .errmsg = true,
667 .arg_scnprintf = { [1] = SCA_FILENAME, /* pathname */ }, },
668 { .name = "ioctl", .errmsg = true,
669 .arg_scnprintf = { [0] = SCA_FD, /* fd */
670 #if defined(__i386__) || defined(__x86_64__)
672 * FIXME: Make this available to all arches.
674 [1] = SCA_STRHEXARRAY, /* cmd */
675 [2] = SCA_HEX, /* arg */ },
676 .arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, },
678 [2] = SCA_HEX, /* arg */ }, },
680 { .name = "keyctl", .errmsg = true, STRARRAY(0, option, keyctl_options), },
681 { .name = "kill", .errmsg = true,
682 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
683 { .name = "lchown", .errmsg = true,
684 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
685 { .name = "lgetxattr", .errmsg = true,
686 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
687 { .name = "linkat", .errmsg = true,
688 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
689 { .name = "listxattr", .errmsg = true,
690 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
691 { .name = "llistxattr", .errmsg = true,
692 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
693 { .name = "lremovexattr", .errmsg = true,
694 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
695 { .name = "lseek", .errmsg = true,
696 .arg_scnprintf = { [0] = SCA_FD, /* fd */
697 [2] = SCA_STRARRAY, /* whence */ },
698 .arg_parm = { [2] = &strarray__whences, /* whence */ }, },
699 { .name = "lsetxattr", .errmsg = true,
700 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
701 { .name = "lstat", .errmsg = true, .alias = "newlstat",
702 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
703 { .name = "lsxattr", .errmsg = true,
704 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
705 { .name = "madvise", .errmsg = true,
706 .arg_scnprintf = { [0] = SCA_HEX, /* start */
707 [2] = SCA_MADV_BHV, /* behavior */ }, },
708 { .name = "mkdir", .errmsg = true,
709 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
710 { .name = "mkdirat", .errmsg = true,
711 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
712 [1] = SCA_FILENAME, /* pathname */ }, },
713 { .name = "mknod", .errmsg = true,
714 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
715 { .name = "mknodat", .errmsg = true,
716 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
717 [1] = SCA_FILENAME, /* filename */ }, },
718 { .name = "mlock", .errmsg = true,
719 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
720 { .name = "mlockall", .errmsg = true,
721 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
722 { .name = "mmap", .hexret = true,
723 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
724 [2] = SCA_MMAP_PROT, /* prot */
725 [3] = SCA_MMAP_FLAGS, /* flags */
726 [4] = SCA_FD, /* fd */ }, },
727 { .name = "mprotect", .errmsg = true,
728 .arg_scnprintf = { [0] = SCA_HEX, /* start */
729 [2] = SCA_MMAP_PROT, /* prot */ }, },
730 { .name = "mq_unlink", .errmsg = true,
731 .arg_scnprintf = { [0] = SCA_FILENAME, /* u_name */ }, },
732 { .name = "mremap", .hexret = true,
733 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
734 [3] = SCA_MREMAP_FLAGS, /* flags */
735 [4] = SCA_HEX, /* new_addr */ }, },
736 { .name = "munlock", .errmsg = true,
737 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
738 { .name = "munmap", .errmsg = true,
739 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
740 { .name = "name_to_handle_at", .errmsg = true,
741 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
742 { .name = "newfstatat", .errmsg = true,
743 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
744 [1] = SCA_FILENAME, /* filename */ }, },
745 { .name = "open", .errmsg = true,
746 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */
747 [1] = SCA_OPEN_FLAGS, /* flags */ }, },
748 { .name = "open_by_handle_at", .errmsg = true,
749 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
750 [2] = SCA_OPEN_FLAGS, /* flags */ }, },
751 { .name = "openat", .errmsg = true,
752 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
753 [1] = SCA_FILENAME, /* filename */
754 [2] = SCA_OPEN_FLAGS, /* flags */ }, },
755 { .name = "perf_event_open", .errmsg = true,
756 .arg_scnprintf = { [2] = SCA_INT, /* cpu */
757 [3] = SCA_FD, /* group_fd */
758 [4] = SCA_PERF_FLAGS, /* flags */ }, },
759 { .name = "pipe2", .errmsg = true,
760 .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
761 { .name = "poll", .errmsg = true, .timeout = true, },
762 { .name = "ppoll", .errmsg = true, .timeout = true, },
763 { .name = "pread", .errmsg = true, .alias = "pread64",
764 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
765 { .name = "preadv", .errmsg = true, .alias = "pread",
766 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
767 { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
768 { .name = "pwrite", .errmsg = true, .alias = "pwrite64",
769 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
770 { .name = "pwritev", .errmsg = true,
771 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
772 { .name = "read", .errmsg = true,
773 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
774 { .name = "readlink", .errmsg = true,
775 .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
776 { .name = "readlinkat", .errmsg = true,
777 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
778 [1] = SCA_FILENAME, /* pathname */ }, },
779 { .name = "readv", .errmsg = true,
780 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
781 { .name = "recvfrom", .errmsg = true,
782 .arg_scnprintf = { [0] = SCA_FD, /* fd */
783 [3] = SCA_MSG_FLAGS, /* flags */ }, },
784 { .name = "recvmmsg", .errmsg = true,
785 .arg_scnprintf = { [0] = SCA_FD, /* fd */
786 [3] = SCA_MSG_FLAGS, /* flags */ }, },
787 { .name = "recvmsg", .errmsg = true,
788 .arg_scnprintf = { [0] = SCA_FD, /* fd */
789 [2] = SCA_MSG_FLAGS, /* flags */ }, },
790 { .name = "removexattr", .errmsg = true,
791 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
792 { .name = "renameat", .errmsg = true,
793 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
794 { .name = "rmdir", .errmsg = true,
795 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
796 { .name = "rt_sigaction", .errmsg = true,
797 .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
798 { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
799 { .name = "rt_sigqueueinfo", .errmsg = true,
800 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
801 { .name = "rt_tgsigqueueinfo", .errmsg = true,
802 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
803 { .name = "sched_setscheduler", .errmsg = true,
804 .arg_scnprintf = { [1] = SCA_SCHED_POLICY, /* policy */ }, },
805 { .name = "seccomp", .errmsg = true,
806 .arg_scnprintf = { [0] = SCA_SECCOMP_OP, /* op */
807 [1] = SCA_SECCOMP_FLAGS, /* flags */ }, },
808 { .name = "select", .errmsg = true, .timeout = true, },
809 { .name = "sendmmsg", .errmsg = true,
810 .arg_scnprintf = { [0] = SCA_FD, /* fd */
811 [3] = SCA_MSG_FLAGS, /* flags */ }, },
812 { .name = "sendmsg", .errmsg = true,
813 .arg_scnprintf = { [0] = SCA_FD, /* fd */
814 [2] = SCA_MSG_FLAGS, /* flags */ }, },
815 { .name = "sendto", .errmsg = true,
816 .arg_scnprintf = { [0] = SCA_FD, /* fd */
817 [3] = SCA_MSG_FLAGS, /* flags */ }, },
818 { .name = "set_tid_address", .errpid = true, },
819 { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
820 { .name = "setpgid", .errmsg = true, },
821 { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
822 { .name = "setxattr", .errmsg = true,
823 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
824 { .name = "shutdown", .errmsg = true,
825 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
826 { .name = "socket", .errmsg = true,
827 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
828 [1] = SCA_SK_TYPE, /* type */ },
829 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
830 { .name = "socketpair", .errmsg = true,
831 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
832 [1] = SCA_SK_TYPE, /* type */ },
833 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
834 { .name = "stat", .errmsg = true, .alias = "newstat",
835 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
836 { .name = "statfs", .errmsg = true,
837 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
838 { .name = "swapoff", .errmsg = true,
839 .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
840 { .name = "swapon", .errmsg = true,
841 .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
842 { .name = "symlinkat", .errmsg = true,
843 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
844 { .name = "tgkill", .errmsg = true,
845 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
846 { .name = "tkill", .errmsg = true,
847 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
848 { .name = "truncate", .errmsg = true,
849 .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
850 { .name = "uname", .errmsg = true, .alias = "newuname", },
851 { .name = "unlinkat", .errmsg = true,
852 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
853 [1] = SCA_FILENAME, /* pathname */ }, },
854 { .name = "utime", .errmsg = true,
855 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
856 { .name = "utimensat", .errmsg = true,
857 .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */
858 [1] = SCA_FILENAME, /* filename */ }, },
859 { .name = "utimes", .errmsg = true,
860 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
861 { .name = "vmsplice", .errmsg = true,
862 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
863 { .name = "wait4", .errpid = true,
864 .arg_scnprintf = { [2] = SCA_WAITID_OPTIONS, /* options */ }, },
865 { .name = "waitid", .errpid = true,
866 .arg_scnprintf = { [3] = SCA_WAITID_OPTIONS, /* options */ }, },
867 { .name = "write", .errmsg = true,
868 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
869 { .name = "writev", .errmsg = true,
870 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
873 static int syscall_fmt__cmp(const void *name, const void *fmtp)
875 const struct syscall_fmt *fmt = fmtp;
876 return strcmp(name, fmt->name);
879 static struct syscall_fmt *syscall_fmt__find(const char *name)
881 const int nmemb = ARRAY_SIZE(syscall_fmts);
882 return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
886 struct event_format *tp_format;
888 struct format_field *args;
891 struct syscall_fmt *fmt;
892 size_t (**arg_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
896 static size_t fprintf_duration(unsigned long t, FILE *fp)
898 double duration = (double)t / NSEC_PER_MSEC;
899 size_t printed = fprintf(fp, "(");
902 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
903 else if (duration >= 0.01)
904 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
906 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
907 return printed + fprintf(fp, "): ");
911 * filename.ptr: The filename char pointer that will be vfs_getname'd
912 * filename.entry_str_pos: Where to insert the string translated from
913 * filename.ptr by the vfs_getname tracepoint/kprobe.
915 struct thread_trace {
919 unsigned long nr_events;
920 unsigned long pfmaj, pfmin;
925 short int entry_str_pos;
927 unsigned int namelen;
935 struct intlist *syscall_stats;
938 static struct thread_trace *thread_trace__new(void)
940 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
943 ttrace->paths.max = -1;
945 ttrace->syscall_stats = intlist__new(NULL);
950 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
952 struct thread_trace *ttrace;
957 if (thread__priv(thread) == NULL)
958 thread__set_priv(thread, thread_trace__new());
960 if (thread__priv(thread) == NULL)
963 ttrace = thread__priv(thread);
968 color_fprintf(fp, PERF_COLOR_RED,
969 "WARNING: not enough memory, dropping samples!\n");
973 #define TRACE_PFMAJ (1 << 0)
974 #define TRACE_PFMIN (1 << 1)
976 static const size_t trace__entry_str_size = 2048;
978 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
980 struct thread_trace *ttrace = thread__priv(thread);
982 if (fd > ttrace->paths.max) {
983 char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *));
988 if (ttrace->paths.max != -1) {
989 memset(npath + ttrace->paths.max + 1, 0,
990 (fd - ttrace->paths.max) * sizeof(char *));
992 memset(npath, 0, (fd + 1) * sizeof(char *));
995 ttrace->paths.table = npath;
996 ttrace->paths.max = fd;
999 ttrace->paths.table[fd] = strdup(pathname);
1001 return ttrace->paths.table[fd] != NULL ? 0 : -1;
1004 static int thread__read_fd_path(struct thread *thread, int fd)
1006 char linkname[PATH_MAX], pathname[PATH_MAX];
1010 if (thread->pid_ == thread->tid) {
1011 scnprintf(linkname, sizeof(linkname),
1012 "/proc/%d/fd/%d", thread->pid_, fd);
1014 scnprintf(linkname, sizeof(linkname),
1015 "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
1018 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1021 ret = readlink(linkname, pathname, sizeof(pathname));
1023 if (ret < 0 || ret > st.st_size)
1026 pathname[ret] = '\0';
1027 return trace__set_fd_pathname(thread, fd, pathname);
1030 static const char *thread__fd_path(struct thread *thread, int fd,
1031 struct trace *trace)
1033 struct thread_trace *ttrace = thread__priv(thread);
1041 if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) {
1044 ++trace->stats.proc_getname;
1045 if (thread__read_fd_path(thread, fd))
1049 return ttrace->paths.table[fd];
1052 static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
1053 struct syscall_arg *arg)
1056 size_t printed = scnprintf(bf, size, "%d", fd);
1057 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1060 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1065 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1066 struct syscall_arg *arg)
1069 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1070 struct thread_trace *ttrace = thread__priv(arg->thread);
1072 if (ttrace && fd >= 0 && fd <= ttrace->paths.max)
1073 zfree(&ttrace->paths.table[fd]);
1078 static void thread__set_filename_pos(struct thread *thread, const char *bf,
1081 struct thread_trace *ttrace = thread__priv(thread);
1083 ttrace->filename.ptr = ptr;
1084 ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1087 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1088 struct syscall_arg *arg)
1090 unsigned long ptr = arg->val;
1092 if (!arg->trace->vfs_getname)
1093 return scnprintf(bf, size, "%#x", ptr);
1095 thread__set_filename_pos(arg->thread, bf, ptr);
1099 static bool trace__filter_duration(struct trace *trace, double t)
1101 return t < (trace->duration_filter * NSEC_PER_MSEC);
1104 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1106 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1108 return fprintf(fp, "%10.3f ", ts);
1111 static bool done = false;
1112 static bool interrupted = false;
1114 static void sig_handler(int sig)
1117 interrupted = sig == SIGINT;
1120 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1121 u64 duration, u64 tstamp, FILE *fp)
1123 size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
1124 printed += fprintf_duration(duration, fp);
1126 if (trace->multiple_threads) {
1127 if (trace->show_comm)
1128 printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1129 printed += fprintf(fp, "%d ", thread->tid);
1135 static int trace__process_event(struct trace *trace, struct machine *machine,
1136 union perf_event *event, struct perf_sample *sample)
1140 switch (event->header.type) {
1141 case PERF_RECORD_LOST:
1142 color_fprintf(trace->output, PERF_COLOR_RED,
1143 "LOST %" PRIu64 " events!\n", event->lost.lost);
1144 ret = machine__process_lost_event(machine, event, sample);
1147 ret = machine__process_event(machine, event, sample);
1154 static int trace__tool_process(struct perf_tool *tool,
1155 union perf_event *event,
1156 struct perf_sample *sample,
1157 struct machine *machine)
1159 struct trace *trace = container_of(tool, struct trace, tool);
1160 return trace__process_event(trace, machine, event, sample);
1163 static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1165 int err = symbol__init(NULL);
1170 trace->host = machine__new_host();
1171 if (trace->host == NULL)
1174 if (trace_event__register_resolver(trace->host, machine__resolve_kernel_addr) < 0)
1177 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1178 evlist->threads, trace__tool_process, false,
1179 trace->opts.proc_map_timeout);
1186 static int syscall__set_arg_fmts(struct syscall *sc)
1188 struct format_field *field;
1191 sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *));
1192 if (sc->arg_scnprintf == NULL)
1196 sc->arg_parm = sc->fmt->arg_parm;
1198 for (field = sc->args; field; field = field->next) {
1199 if (sc->fmt && sc->fmt->arg_scnprintf[idx])
1200 sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
1201 else if (field->flags & FIELD_IS_POINTER)
1202 sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
1203 else if (strcmp(field->type, "pid_t") == 0)
1204 sc->arg_scnprintf[idx] = SCA_PID;
1205 else if (strcmp(field->type, "umode_t") == 0)
1206 sc->arg_scnprintf[idx] = SCA_MODE_T;
1213 static int trace__read_syscall_info(struct trace *trace, int id)
1217 const char *name = syscalltbl__name(trace->sctbl, id);
1222 if (id > trace->syscalls.max) {
1223 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1225 if (nsyscalls == NULL)
1228 if (trace->syscalls.max != -1) {
1229 memset(nsyscalls + trace->syscalls.max + 1, 0,
1230 (id - trace->syscalls.max) * sizeof(*sc));
1232 memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
1235 trace->syscalls.table = nsyscalls;
1236 trace->syscalls.max = id;
1239 sc = trace->syscalls.table + id;
1242 sc->fmt = syscall_fmt__find(sc->name);
1244 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1245 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1247 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1248 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1249 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1252 if (IS_ERR(sc->tp_format))
1255 sc->args = sc->tp_format->format.fields;
1256 sc->nr_args = sc->tp_format->format.nr_fields;
1258 * We need to check and discard the first variable '__syscall_nr'
1259 * or 'nr' that mean the syscall number. It is needless here.
1260 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1262 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1263 sc->args = sc->args->next;
1267 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1269 return syscall__set_arg_fmts(sc);
1272 static int trace__validate_ev_qualifier(struct trace *trace)
1275 struct str_node *pos;
1277 trace->ev_qualifier_ids.nr = strlist__nr_entries(trace->ev_qualifier);
1278 trace->ev_qualifier_ids.entries = malloc(trace->ev_qualifier_ids.nr *
1279 sizeof(trace->ev_qualifier_ids.entries[0]));
1281 if (trace->ev_qualifier_ids.entries == NULL) {
1282 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1290 strlist__for_each(pos, trace->ev_qualifier) {
1291 const char *sc = pos->s;
1292 int id = syscalltbl__id(trace->sctbl, sc);
1296 fputs("Error:\tInvalid syscall ", trace->output);
1299 fputs(", ", trace->output);
1302 fputs(sc, trace->output);
1305 trace->ev_qualifier_ids.entries[i++] = id;
1309 fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
1310 "\nHint:\tand: 'man syscalls'\n", trace->output);
1311 zfree(&trace->ev_qualifier_ids.entries);
1312 trace->ev_qualifier_ids.nr = 0;
1319 * args is to be interpreted as a series of longs but we need to handle
1320 * 8-byte unaligned accesses. args points to raw_data within the event
1321 * and raw_data is guaranteed to be 8-byte unaligned because it is
1322 * preceded by raw_size which is a u32. So we need to copy args to a temp
1323 * variable to read it. Most notably this avoids extended load instructions
1324 * on unaligned addresses
1327 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1328 unsigned char *args, struct trace *trace,
1329 struct thread *thread)
1335 if (sc->args != NULL) {
1336 struct format_field *field;
1338 struct syscall_arg arg = {
1345 for (field = sc->args; field;
1346 field = field->next, ++arg.idx, bit <<= 1) {
1350 /* special care for unaligned accesses */
1351 p = args + sizeof(unsigned long) * arg.idx;
1352 memcpy(&val, p, sizeof(val));
1355 * Suppress this argument if its value is zero and
1356 * and we don't have a string associated in an
1360 !(sc->arg_scnprintf &&
1361 sc->arg_scnprintf[arg.idx] == SCA_STRARRAY &&
1362 sc->arg_parm[arg.idx]))
1365 printed += scnprintf(bf + printed, size - printed,
1366 "%s%s: ", printed ? ", " : "", field->name);
1367 if (sc->arg_scnprintf && sc->arg_scnprintf[arg.idx]) {
1370 arg.parm = sc->arg_parm[arg.idx];
1371 printed += sc->arg_scnprintf[arg.idx](bf + printed,
1372 size - printed, &arg);
1374 printed += scnprintf(bf + printed, size - printed,
1378 } else if (IS_ERR(sc->tp_format)) {
1380 * If we managed to read the tracepoint /format file, then we
1381 * may end up not having any args, like with gettid(), so only
1382 * print the raw args when we didn't manage to read it.
1387 /* special care for unaligned accesses */
1388 p = args + sizeof(unsigned long) * i;
1389 memcpy(&val, p, sizeof(val));
1390 printed += scnprintf(bf + printed, size - printed,
1392 printed ? ", " : "", i, val);
1400 typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
1401 union perf_event *event,
1402 struct perf_sample *sample);
1404 static struct syscall *trace__syscall_info(struct trace *trace,
1405 struct perf_evsel *evsel, int id)
1411 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1412 * before that, leaving at a higher verbosity level till that is
1413 * explained. Reproduced with plain ftrace with:
1415 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1416 * grep "NR -1 " /t/trace_pipe
1418 * After generating some load on the machine.
1422 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
1423 id, perf_evsel__name(evsel), ++n);
1428 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
1429 trace__read_syscall_info(trace, id))
1432 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
1435 return &trace->syscalls.table[id];
1439 fprintf(trace->output, "Problems reading syscall %d", id);
1440 if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
1441 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
1442 fputs(" information\n", trace->output);
1447 static void thread__update_stats(struct thread_trace *ttrace,
1448 int id, struct perf_sample *sample)
1450 struct int_node *inode;
1451 struct stats *stats;
1454 inode = intlist__findnew(ttrace->syscall_stats, id);
1458 stats = inode->priv;
1459 if (stats == NULL) {
1460 stats = malloc(sizeof(struct stats));
1464 inode->priv = stats;
1467 if (ttrace->entry_time && sample->time > ttrace->entry_time)
1468 duration = sample->time - ttrace->entry_time;
1470 update_stats(stats, duration);
1473 static int trace__printf_interrupted_entry(struct trace *trace, struct perf_sample *sample)
1475 struct thread_trace *ttrace;
1479 if (trace->current == NULL)
1482 ttrace = thread__priv(trace->current);
1484 if (!ttrace->entry_pending)
1487 duration = sample->time - ttrace->entry_time;
1489 printed = trace__fprintf_entry_head(trace, trace->current, duration, sample->time, trace->output);
1490 printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
1491 ttrace->entry_pending = false;
1496 static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
1497 union perf_event *event __maybe_unused,
1498 struct perf_sample *sample)
1503 struct thread *thread;
1504 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
1505 struct syscall *sc = trace__syscall_info(trace, evsel, id);
1506 struct thread_trace *ttrace;
1511 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1512 ttrace = thread__trace(thread, trace->output);
1516 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
1518 if (ttrace->entry_str == NULL) {
1519 ttrace->entry_str = malloc(trace__entry_str_size);
1520 if (!ttrace->entry_str)
1524 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
1525 trace__printf_interrupted_entry(trace, sample);
1527 ttrace->entry_time = sample->time;
1528 msg = ttrace->entry_str;
1529 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
1531 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
1532 args, trace, thread);
1535 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
1536 trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
1537 fprintf(trace->output, "%-70s\n", ttrace->entry_str);
1540 ttrace->entry_pending = true;
1541 /* See trace__vfs_getname & trace__sys_exit */
1542 ttrace->filename.pending_open = false;
1545 if (trace->current != thread) {
1546 thread__put(trace->current);
1547 trace->current = thread__get(thread);
1551 thread__put(thread);
1555 static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evsel,
1556 struct perf_sample *sample,
1557 struct callchain_cursor *cursor)
1559 struct addr_location al;
1561 if (machine__resolve(trace->host, &al, sample) < 0 ||
1562 thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, trace->max_stack))
1568 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
1570 /* TODO: user-configurable print_opts */
1571 const unsigned int print_opts = EVSEL__PRINT_SYM |
1573 EVSEL__PRINT_UNKNOWN_AS_ADDR;
1575 return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
1578 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
1579 union perf_event *event __maybe_unused,
1580 struct perf_sample *sample)
1584 struct thread *thread;
1585 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0;
1586 struct syscall *sc = trace__syscall_info(trace, evsel, id);
1587 struct thread_trace *ttrace;
1592 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1593 ttrace = thread__trace(thread, trace->output);
1598 thread__update_stats(ttrace, id, sample);
1600 ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
1602 if (id == trace->open_id && ret >= 0 && ttrace->filename.pending_open) {
1603 trace__set_fd_pathname(thread, ret, ttrace->filename.name);
1604 ttrace->filename.pending_open = false;
1605 ++trace->stats.vfs_getname;
1608 ttrace->exit_time = sample->time;
1610 if (ttrace->entry_time) {
1611 duration = sample->time - ttrace->entry_time;
1612 if (trace__filter_duration(trace, duration))
1614 } else if (trace->duration_filter)
1617 if (sample->callchain) {
1618 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1619 if (callchain_ret == 0) {
1620 if (callchain_cursor.nr < trace->min_stack)
1626 if (trace->summary_only)
1629 trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
1631 if (ttrace->entry_pending) {
1632 fprintf(trace->output, "%-70s", ttrace->entry_str);
1634 fprintf(trace->output, " ... [");
1635 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
1636 fprintf(trace->output, "]: %s()", sc->name);
1639 if (sc->fmt == NULL) {
1641 fprintf(trace->output, ") = %ld", ret);
1642 } else if (ret < 0 && (sc->fmt->errmsg || sc->fmt->errpid)) {
1643 char bf[STRERR_BUFSIZE];
1644 const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
1645 *e = audit_errno_to_name(-ret);
1647 fprintf(trace->output, ") = -1 %s %s", e, emsg);
1648 } else if (ret == 0 && sc->fmt->timeout)
1649 fprintf(trace->output, ") = 0 Timeout");
1650 else if (sc->fmt->hexret)
1651 fprintf(trace->output, ") = %#lx", ret);
1652 else if (sc->fmt->errpid) {
1653 struct thread *child = machine__find_thread(trace->host, ret, ret);
1655 if (child != NULL) {
1656 fprintf(trace->output, ") = %ld", ret);
1657 if (child->comm_set)
1658 fprintf(trace->output, " (%s)", thread__comm_str(child));
1664 fputc('\n', trace->output);
1666 if (callchain_ret > 0)
1667 trace__fprintf_callchain(trace, sample);
1668 else if (callchain_ret < 0)
1669 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
1671 ttrace->entry_pending = false;
1674 thread__put(thread);
1678 static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
1679 union perf_event *event __maybe_unused,
1680 struct perf_sample *sample)
1682 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1683 struct thread_trace *ttrace;
1684 size_t filename_len, entry_str_len, to_move;
1685 ssize_t remaining_space;
1687 const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
1692 ttrace = thread__priv(thread);
1696 filename_len = strlen(filename);
1698 if (ttrace->filename.namelen < filename_len) {
1699 char *f = realloc(ttrace->filename.name, filename_len + 1);
1704 ttrace->filename.namelen = filename_len;
1705 ttrace->filename.name = f;
1708 strcpy(ttrace->filename.name, filename);
1709 ttrace->filename.pending_open = true;
1711 if (!ttrace->filename.ptr)
1714 entry_str_len = strlen(ttrace->entry_str);
1715 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
1716 if (remaining_space <= 0)
1719 if (filename_len > (size_t)remaining_space) {
1720 filename += filename_len - remaining_space;
1721 filename_len = remaining_space;
1724 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
1725 pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
1726 memmove(pos + filename_len, pos, to_move);
1727 memcpy(pos, filename, filename_len);
1729 ttrace->filename.ptr = 0;
1730 ttrace->filename.entry_str_pos = 0;
1735 static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
1736 union perf_event *event __maybe_unused,
1737 struct perf_sample *sample)
1739 u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
1740 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
1741 struct thread *thread = machine__findnew_thread(trace->host,
1744 struct thread_trace *ttrace = thread__trace(thread, trace->output);
1749 ttrace->runtime_ms += runtime_ms;
1750 trace->runtime_ms += runtime_ms;
1751 thread__put(thread);
1755 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
1757 perf_evsel__strval(evsel, sample, "comm"),
1758 (pid_t)perf_evsel__intval(evsel, sample, "pid"),
1760 perf_evsel__intval(evsel, sample, "vruntime"));
1761 thread__put(thread);
1765 static void bpf_output__printer(enum binary_printer_ops op,
1766 unsigned int val, void *extra)
1768 FILE *output = extra;
1769 unsigned char ch = (unsigned char)val;
1772 case BINARY_PRINT_CHAR_DATA:
1773 fprintf(output, "%c", isprint(ch) ? ch : '.');
1775 case BINARY_PRINT_DATA_BEGIN:
1776 case BINARY_PRINT_LINE_BEGIN:
1777 case BINARY_PRINT_ADDR:
1778 case BINARY_PRINT_NUM_DATA:
1779 case BINARY_PRINT_NUM_PAD:
1780 case BINARY_PRINT_SEP:
1781 case BINARY_PRINT_CHAR_PAD:
1782 case BINARY_PRINT_LINE_END:
1783 case BINARY_PRINT_DATA_END:
1789 static void bpf_output__fprintf(struct trace *trace,
1790 struct perf_sample *sample)
1792 print_binary(sample->raw_data, sample->raw_size, 8,
1793 bpf_output__printer, trace->output);
1796 static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
1797 union perf_event *event __maybe_unused,
1798 struct perf_sample *sample)
1800 int callchain_ret = 0;
1802 if (sample->callchain) {
1803 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1804 if (callchain_ret == 0) {
1805 if (callchain_cursor.nr < trace->min_stack)
1811 trace__printf_interrupted_entry(trace, sample);
1812 trace__fprintf_tstamp(trace, sample->time, trace->output);
1814 if (trace->trace_syscalls)
1815 fprintf(trace->output, "( ): ");
1817 fprintf(trace->output, "%s:", evsel->name);
1819 if (perf_evsel__is_bpf_output(evsel)) {
1820 bpf_output__fprintf(trace, sample);
1821 } else if (evsel->tp_format) {
1822 event_format__fprintf(evsel->tp_format, sample->cpu,
1823 sample->raw_data, sample->raw_size,
1827 fprintf(trace->output, ")\n");
1829 if (callchain_ret > 0)
1830 trace__fprintf_callchain(trace, sample);
1831 else if (callchain_ret < 0)
1832 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
1837 static void print_location(FILE *f, struct perf_sample *sample,
1838 struct addr_location *al,
1839 bool print_dso, bool print_sym)
1842 if ((verbose || print_dso) && al->map)
1843 fprintf(f, "%s@", al->map->dso->long_name);
1845 if ((verbose || print_sym) && al->sym)
1846 fprintf(f, "%s+0x%" PRIx64, al->sym->name,
1847 al->addr - al->sym->start);
1849 fprintf(f, "0x%" PRIx64, al->addr);
1851 fprintf(f, "0x%" PRIx64, sample->addr);
1854 static int trace__pgfault(struct trace *trace,
1855 struct perf_evsel *evsel,
1856 union perf_event *event __maybe_unused,
1857 struct perf_sample *sample)
1859 struct thread *thread;
1860 struct addr_location al;
1861 char map_type = 'd';
1862 struct thread_trace *ttrace;
1864 int callchain_ret = 0;
1866 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1868 if (sample->callchain) {
1869 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1870 if (callchain_ret == 0) {
1871 if (callchain_cursor.nr < trace->min_stack)
1877 ttrace = thread__trace(thread, trace->output);
1881 if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
1886 if (trace->summary_only)
1889 thread__find_addr_location(thread, sample->cpumode, MAP__FUNCTION,
1892 trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output);
1894 fprintf(trace->output, "%sfault [",
1895 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
1898 print_location(trace->output, sample, &al, false, true);
1900 fprintf(trace->output, "] => ");
1902 thread__find_addr_location(thread, sample->cpumode, MAP__VARIABLE,
1906 thread__find_addr_location(thread, sample->cpumode,
1907 MAP__FUNCTION, sample->addr, &al);
1915 print_location(trace->output, sample, &al, true, false);
1917 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
1919 if (callchain_ret > 0)
1920 trace__fprintf_callchain(trace, sample);
1921 else if (callchain_ret < 0)
1922 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
1926 thread__put(thread);
1930 static bool skip_sample(struct trace *trace, struct perf_sample *sample)
1932 if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) ||
1933 (trace->tid_list && intlist__find(trace->tid_list, sample->tid)))
1936 if (trace->pid_list || trace->tid_list)
1942 static void trace__set_base_time(struct trace *trace,
1943 struct perf_evsel *evsel,
1944 struct perf_sample *sample)
1947 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
1948 * and don't use sample->time unconditionally, we may end up having
1949 * some other event in the future without PERF_SAMPLE_TIME for good
1950 * reason, i.e. we may not be interested in its timestamps, just in
1951 * it taking place, picking some piece of information when it
1952 * appears in our event stream (vfs_getname comes to mind).
1954 if (trace->base_time == 0 && !trace->full_time &&
1955 (evsel->attr.sample_type & PERF_SAMPLE_TIME))
1956 trace->base_time = sample->time;
1959 static int trace__process_sample(struct perf_tool *tool,
1960 union perf_event *event,
1961 struct perf_sample *sample,
1962 struct perf_evsel *evsel,
1963 struct machine *machine __maybe_unused)
1965 struct trace *trace = container_of(tool, struct trace, tool);
1968 tracepoint_handler handler = evsel->handler;
1970 if (skip_sample(trace, sample))
1973 trace__set_base_time(trace, evsel, sample);
1977 handler(trace, evsel, event, sample);
1983 static int parse_target_str(struct trace *trace)
1985 if (trace->opts.target.pid) {
1986 trace->pid_list = intlist__new(trace->opts.target.pid);
1987 if (trace->pid_list == NULL) {
1988 pr_err("Error parsing process id string\n");
1993 if (trace->opts.target.tid) {
1994 trace->tid_list = intlist__new(trace->opts.target.tid);
1995 if (trace->tid_list == NULL) {
1996 pr_err("Error parsing thread id string\n");
2004 static int trace__record(struct trace *trace, int argc, const char **argv)
2006 unsigned int rec_argc, i, j;
2007 const char **rec_argv;
2008 const char * const record_args[] = {
2015 const char * const sc_args[] = { "-e", };
2016 unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
2017 const char * const majpf_args[] = { "-e", "major-faults" };
2018 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
2019 const char * const minpf_args[] = { "-e", "minor-faults" };
2020 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
2022 /* +1 is for the event string below */
2023 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
2024 majpf_args_nr + minpf_args_nr + argc;
2025 rec_argv = calloc(rec_argc + 1, sizeof(char *));
2027 if (rec_argv == NULL)
2031 for (i = 0; i < ARRAY_SIZE(record_args); i++)
2032 rec_argv[j++] = record_args[i];
2034 if (trace->trace_syscalls) {
2035 for (i = 0; i < sc_args_nr; i++)
2036 rec_argv[j++] = sc_args[i];
2038 /* event string may be different for older kernels - e.g., RHEL6 */
2039 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
2040 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
2041 else if (is_valid_tracepoint("syscalls:sys_enter"))
2042 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
2044 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
2049 if (trace->trace_pgfaults & TRACE_PFMAJ)
2050 for (i = 0; i < majpf_args_nr; i++)
2051 rec_argv[j++] = majpf_args[i];
2053 if (trace->trace_pgfaults & TRACE_PFMIN)
2054 for (i = 0; i < minpf_args_nr; i++)
2055 rec_argv[j++] = minpf_args[i];
2057 for (i = 0; i < (unsigned int)argc; i++)
2058 rec_argv[j++] = argv[i];
2060 return cmd_record(j, rec_argv, NULL);
2063 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
2065 static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
2067 struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
2072 if (perf_evsel__field(evsel, "pathname") == NULL) {
2073 perf_evsel__delete(evsel);
2077 evsel->handler = trace__vfs_getname;
2078 perf_evlist__add(evlist, evsel);
2082 static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
2084 struct perf_evsel *evsel;
2085 struct perf_event_attr attr = {
2086 .type = PERF_TYPE_SOFTWARE,
2090 attr.config = config;
2091 attr.sample_period = 1;
2093 event_attr_init(&attr);
2095 evsel = perf_evsel__new(&attr);
2097 evsel->handler = trace__pgfault;
2102 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
2104 const u32 type = event->header.type;
2105 struct perf_evsel *evsel;
2107 if (type != PERF_RECORD_SAMPLE) {
2108 trace__process_event(trace, trace->host, event, sample);
2112 evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
2113 if (evsel == NULL) {
2114 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
2118 trace__set_base_time(trace, evsel, sample);
2120 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
2121 sample->raw_data == NULL) {
2122 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2123 perf_evsel__name(evsel), sample->tid,
2124 sample->cpu, sample->raw_size);
2126 tracepoint_handler handler = evsel->handler;
2127 handler(trace, evsel, event, sample);
2131 static int trace__add_syscall_newtp(struct trace *trace)
2134 struct perf_evlist *evlist = trace->evlist;
2135 struct perf_evsel *sys_enter, *sys_exit;
2137 sys_enter = perf_evsel__syscall_newtp("sys_enter", trace__sys_enter);
2138 if (sys_enter == NULL)
2141 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
2142 goto out_delete_sys_enter;
2144 sys_exit = perf_evsel__syscall_newtp("sys_exit", trace__sys_exit);
2145 if (sys_exit == NULL)
2146 goto out_delete_sys_enter;
2148 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
2149 goto out_delete_sys_exit;
2151 perf_evlist__add(evlist, sys_enter);
2152 perf_evlist__add(evlist, sys_exit);
2154 if (callchain_param.enabled && !trace->kernel_syscallchains) {
2156 * We're interested only in the user space callchain
2157 * leading to the syscall, allow overriding that for
2158 * debugging reasons using --kernel_syscall_callchains
2160 sys_exit->attr.exclude_callchain_kernel = 1;
2163 trace->syscalls.events.sys_enter = sys_enter;
2164 trace->syscalls.events.sys_exit = sys_exit;
2170 out_delete_sys_exit:
2171 perf_evsel__delete_priv(sys_exit);
2172 out_delete_sys_enter:
2173 perf_evsel__delete_priv(sys_enter);
2177 static int trace__set_ev_qualifier_filter(struct trace *trace)
2180 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
2181 trace->ev_qualifier_ids.nr,
2182 trace->ev_qualifier_ids.entries);
2187 if (!perf_evsel__append_filter(trace->syscalls.events.sys_enter, "&&", filter))
2188 err = perf_evsel__append_filter(trace->syscalls.events.sys_exit, "&&", filter);
2198 static int trace__run(struct trace *trace, int argc, const char **argv)
2200 struct perf_evlist *evlist = trace->evlist;
2201 struct perf_evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
2203 unsigned long before;
2204 const bool forks = argc > 0;
2205 bool draining = false;
2209 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
2210 goto out_error_raw_syscalls;
2212 if (trace->trace_syscalls)
2213 trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
2215 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
2216 pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
2217 if (pgfault_maj == NULL)
2219 perf_evlist__add(evlist, pgfault_maj);
2222 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
2223 pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
2224 if (pgfault_min == NULL)
2226 perf_evlist__add(evlist, pgfault_min);
2230 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
2231 trace__sched_stat_runtime))
2232 goto out_error_sched_stat_runtime;
2234 err = perf_evlist__create_maps(evlist, &trace->opts.target);
2236 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
2237 goto out_delete_evlist;
2240 err = trace__symbols_init(trace, evlist);
2242 fprintf(trace->output, "Problems initializing symbol libraries!\n");
2243 goto out_delete_evlist;
2246 perf_evlist__config(evlist, &trace->opts, NULL);
2248 if (callchain_param.enabled) {
2249 bool use_identifier = false;
2251 if (trace->syscalls.events.sys_exit) {
2252 perf_evsel__config_callchain(trace->syscalls.events.sys_exit,
2253 &trace->opts, &callchain_param);
2254 use_identifier = true;
2258 perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
2259 use_identifier = true;
2263 perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
2264 use_identifier = true;
2267 if (use_identifier) {
2269 * Now we have evsels with different sample_ids, use
2270 * PERF_SAMPLE_IDENTIFIER to map from sample to evsel
2271 * from a fixed position in each ring buffer record.
2273 * As of this the changeset introducing this comment, this
2274 * isn't strictly needed, as the fields that can come before
2275 * PERF_SAMPLE_ID are all used, but we'll probably disable
2276 * some of those for things like copying the payload of
2277 * pointer syscall arguments, and for vfs_getname we don't
2278 * need PERF_SAMPLE_ADDR and PERF_SAMPLE_IP, so do this
2279 * here as a warning we need to use PERF_SAMPLE_IDENTIFIER.
2281 perf_evlist__set_sample_bit(evlist, IDENTIFIER);
2282 perf_evlist__reset_sample_bit(evlist, ID);
2286 signal(SIGCHLD, sig_handler);
2287 signal(SIGINT, sig_handler);
2290 err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
2293 fprintf(trace->output, "Couldn't run the workload!\n");
2294 goto out_delete_evlist;
2298 err = perf_evlist__open(evlist);
2300 goto out_error_open;
2302 err = bpf__apply_obj_config();
2304 char errbuf[BUFSIZ];
2306 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
2307 pr_err("ERROR: Apply config to BPF failed: %s\n",
2309 goto out_error_open;
2313 * Better not use !target__has_task() here because we need to cover the
2314 * case where no threads were specified in the command line, but a
2315 * workload was, and in that case we will fill in the thread_map when
2316 * we fork the workload in perf_evlist__prepare_workload.
2318 if (trace->filter_pids.nr > 0)
2319 err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries);
2320 else if (thread_map__pid(evlist->threads, 0) == -1)
2321 err = perf_evlist__set_filter_pid(evlist, getpid());
2326 if (trace->ev_qualifier_ids.nr > 0) {
2327 err = trace__set_ev_qualifier_filter(trace);
2331 pr_debug("event qualifier tracepoint filter: %s\n",
2332 trace->syscalls.events.sys_exit->filter);
2335 err = perf_evlist__apply_filters(evlist, &evsel);
2337 goto out_error_apply_filters;
2339 err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
2341 goto out_error_mmap;
2343 if (!target__none(&trace->opts.target))
2344 perf_evlist__enable(evlist);
2347 perf_evlist__start_workload(evlist);
2349 trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
2350 evlist->threads->nr > 1 ||
2351 perf_evlist__first(evlist)->attr.inherit;
2353 before = trace->nr_events;
2355 for (i = 0; i < evlist->nr_mmaps; i++) {
2356 union perf_event *event;
2358 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
2359 struct perf_sample sample;
2363 err = perf_evlist__parse_sample(evlist, event, &sample);
2365 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
2369 trace__handle_event(trace, event, &sample);
2371 perf_evlist__mmap_consume(evlist, i);
2376 if (done && !draining) {
2377 perf_evlist__disable(evlist);
2383 if (trace->nr_events == before) {
2384 int timeout = done ? 100 : -1;
2386 if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
2387 if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0)
2397 thread__zput(trace->current);
2399 perf_evlist__disable(evlist);
2403 trace__fprintf_thread_summary(trace, trace->output);
2405 if (trace->show_tool_stats) {
2406 fprintf(trace->output, "Stats:\n "
2407 " vfs_getname : %" PRIu64 "\n"
2408 " proc_getname: %" PRIu64 "\n",
2409 trace->stats.vfs_getname,
2410 trace->stats.proc_getname);
2415 perf_evlist__delete(evlist);
2416 trace->evlist = NULL;
2417 trace->live = false;
2420 char errbuf[BUFSIZ];
2422 out_error_sched_stat_runtime:
2423 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
2426 out_error_raw_syscalls:
2427 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
2431 perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
2435 perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
2438 fprintf(trace->output, "%s\n", errbuf);
2439 goto out_delete_evlist;
2441 out_error_apply_filters:
2442 fprintf(trace->output,
2443 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
2444 evsel->filter, perf_evsel__name(evsel), errno,
2445 strerror_r(errno, errbuf, sizeof(errbuf)));
2446 goto out_delete_evlist;
2449 fprintf(trace->output, "Not enough memory to run!\n");
2450 goto out_delete_evlist;
2453 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
2454 goto out_delete_evlist;
2457 static int trace__replay(struct trace *trace)
2459 const struct perf_evsel_str_handler handlers[] = {
2460 { "probe:vfs_getname", trace__vfs_getname, },
2462 struct perf_data_file file = {
2464 .mode = PERF_DATA_MODE_READ,
2465 .force = trace->force,
2467 struct perf_session *session;
2468 struct perf_evsel *evsel;
2471 trace->tool.sample = trace__process_sample;
2472 trace->tool.mmap = perf_event__process_mmap;
2473 trace->tool.mmap2 = perf_event__process_mmap2;
2474 trace->tool.comm = perf_event__process_comm;
2475 trace->tool.exit = perf_event__process_exit;
2476 trace->tool.fork = perf_event__process_fork;
2477 trace->tool.attr = perf_event__process_attr;
2478 trace->tool.tracing_data = perf_event__process_tracing_data;
2479 trace->tool.build_id = perf_event__process_build_id;
2481 trace->tool.ordered_events = true;
2482 trace->tool.ordering_requires_timestamps = true;
2484 /* add tid to output */
2485 trace->multiple_threads = true;
2487 session = perf_session__new(&file, false, &trace->tool);
2488 if (session == NULL)
2491 if (symbol__init(&session->header.env) < 0)
2494 trace->host = &session->machines.host;
2496 err = perf_session__set_tracepoints_handlers(session, handlers);
2500 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2501 "raw_syscalls:sys_enter");
2502 /* older kernels have syscalls tp versus raw_syscalls */
2504 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2505 "syscalls:sys_enter");
2508 (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
2509 perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
2510 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
2514 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2515 "raw_syscalls:sys_exit");
2517 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2518 "syscalls:sys_exit");
2520 (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
2521 perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
2522 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
2526 evlist__for_each(session->evlist, evsel) {
2527 if (evsel->attr.type == PERF_TYPE_SOFTWARE &&
2528 (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
2529 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
2530 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS))
2531 evsel->handler = trace__pgfault;
2534 err = parse_target_str(trace);
2540 err = perf_session__process_events(session);
2542 pr_err("Failed to process events, error %d", err);
2544 else if (trace->summary)
2545 trace__fprintf_thread_summary(trace, trace->output);
2548 perf_session__delete(session);
2553 static size_t trace__fprintf_threads_header(FILE *fp)
2557 printed = fprintf(fp, "\n Summary of events:\n\n");
2562 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
2563 struct stats *stats;
2568 struct int_node *source = rb_entry(nd, struct int_node, rb_node);
2569 struct stats *stats = source->priv;
2571 entry->syscall = source->i;
2572 entry->stats = stats;
2573 entry->msecs = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0;
2576 static size_t thread__dump_stats(struct thread_trace *ttrace,
2577 struct trace *trace, FILE *fp)
2582 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
2584 if (syscall_stats == NULL)
2587 printed += fprintf(fp, "\n");
2589 printed += fprintf(fp, " syscall calls total min avg max stddev\n");
2590 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
2591 printed += fprintf(fp, " --------------- -------- --------- --------- --------- --------- ------\n");
2593 resort_rb__for_each(nd, syscall_stats) {
2594 struct stats *stats = syscall_stats_entry->stats;
2596 double min = (double)(stats->min) / NSEC_PER_MSEC;
2597 double max = (double)(stats->max) / NSEC_PER_MSEC;
2598 double avg = avg_stats(stats);
2600 u64 n = (u64) stats->n;
2602 pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
2603 avg /= NSEC_PER_MSEC;
2605 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
2606 printed += fprintf(fp, " %-15s", sc->name);
2607 printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
2608 n, syscall_stats_entry->msecs, min, avg);
2609 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
2613 resort_rb__delete(syscall_stats);
2614 printed += fprintf(fp, "\n\n");
2619 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
2622 struct thread_trace *ttrace = thread__priv(thread);
2628 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
2630 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
2631 printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
2632 printed += fprintf(fp, "%.1f%%", ratio);
2634 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
2636 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
2638 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
2639 else if (fputc('\n', fp) != EOF)
2642 printed += thread__dump_stats(ttrace, trace, fp);
2647 static unsigned long thread__nr_events(struct thread_trace *ttrace)
2649 return ttrace ? ttrace->nr_events : 0;
2652 DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
2653 struct thread *thread;
2656 entry->thread = rb_entry(nd, struct thread, rb_node);
2659 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
2661 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host);
2662 size_t printed = trace__fprintf_threads_header(fp);
2665 if (threads == NULL) {
2666 fprintf(fp, "%s", "Error sorting output by nr_events!\n");
2670 resort_rb__for_each(nd, threads)
2671 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
2673 resort_rb__delete(threads);
2678 static int trace__set_duration(const struct option *opt, const char *str,
2679 int unset __maybe_unused)
2681 struct trace *trace = opt->value;
2683 trace->duration_filter = atof(str);
2687 static int trace__set_filter_pids(const struct option *opt, const char *str,
2688 int unset __maybe_unused)
2692 struct trace *trace = opt->value;
2694 * FIXME: introduce a intarray class, plain parse csv and create a
2695 * { int nr, int entries[] } struct...
2697 struct intlist *list = intlist__new(str);
2702 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
2703 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
2705 if (trace->filter_pids.entries == NULL)
2708 trace->filter_pids.entries[0] = getpid();
2710 for (i = 1; i < trace->filter_pids.nr; ++i)
2711 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
2713 intlist__delete(list);
2719 static int trace__open_output(struct trace *trace, const char *filename)
2723 if (!stat(filename, &st) && st.st_size) {
2724 char oldname[PATH_MAX];
2726 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
2728 rename(filename, oldname);
2731 trace->output = fopen(filename, "w");
2733 return trace->output == NULL ? -errno : 0;
2736 static int parse_pagefaults(const struct option *opt, const char *str,
2737 int unset __maybe_unused)
2739 int *trace_pgfaults = opt->value;
2741 if (strcmp(str, "all") == 0)
2742 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
2743 else if (strcmp(str, "maj") == 0)
2744 *trace_pgfaults |= TRACE_PFMAJ;
2745 else if (strcmp(str, "min") == 0)
2746 *trace_pgfaults |= TRACE_PFMIN;
2753 static void evlist__set_evsel_handler(struct perf_evlist *evlist, void *handler)
2755 struct perf_evsel *evsel;
2757 evlist__for_each(evlist, evsel)
2758 evsel->handler = handler;
2761 int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
2763 const char *trace_usage[] = {
2764 "perf trace [<options>] [<command>]",
2765 "perf trace [<options>] -- <command> [<options>]",
2766 "perf trace record [<options>] [<command>]",
2767 "perf trace record [<options>] -- <command> [<options>]",
2770 struct trace trace = {
2779 .user_freq = UINT_MAX,
2780 .user_interval = ULLONG_MAX,
2781 .no_buffering = true,
2782 .mmap_pages = UINT_MAX,
2783 .proc_map_timeout = 500,
2787 .trace_syscalls = true,
2788 .kernel_syscallchains = false,
2789 .max_stack = UINT_MAX,
2791 const char *output_name = NULL;
2792 const char *ev_qualifier_str = NULL;
2793 const struct option trace_options[] = {
2794 OPT_CALLBACK(0, "event", &trace.evlist, "event",
2795 "event selector. use 'perf list' to list available events",
2796 parse_events_option),
2797 OPT_BOOLEAN(0, "comm", &trace.show_comm,
2798 "show the thread COMM next to its id"),
2799 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
2800 OPT_STRING('e', "expr", &ev_qualifier_str, "expr", "list of syscalls to trace"),
2801 OPT_STRING('o', "output", &output_name, "file", "output file name"),
2802 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
2803 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
2804 "trace events on existing process id"),
2805 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
2806 "trace events on existing thread id"),
2807 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
2808 "pids to filter (by the kernel)", trace__set_filter_pids),
2809 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
2810 "system-wide collection from all CPUs"),
2811 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
2812 "list of cpus to monitor"),
2813 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
2814 "child tasks do not inherit counters"),
2815 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
2816 "number of mmap data pages",
2817 perf_evlist__parse_mmap_pages),
2818 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
2820 OPT_CALLBACK(0, "duration", &trace, "float",
2821 "show only events with duration > N.M ms",
2822 trace__set_duration),
2823 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
2824 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
2825 OPT_BOOLEAN('T', "time", &trace.full_time,
2826 "Show full timestamp, not time relative to first start"),
2827 OPT_BOOLEAN('s', "summary", &trace.summary_only,
2828 "Show only syscall summary with statistics"),
2829 OPT_BOOLEAN('S', "with-summary", &trace.summary,
2830 "Show all syscalls and summary with statistics"),
2831 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
2832 "Trace pagefaults", parse_pagefaults, "maj"),
2833 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
2834 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
2835 OPT_CALLBACK(0, "call-graph", &trace.opts,
2836 "record_mode[,record_size]", record_callchain_help,
2837 &record_parse_callchain_opt),
2838 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
2839 "Show the kernel callchains on the syscall exit path"),
2840 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
2841 "Set the minimum stack depth when parsing the callchain, "
2842 "anything below the specified depth will be ignored."),
2843 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
2844 "Set the maximum stack depth when parsing the callchain, "
2845 "anything beyond the specified depth will be ignored. "
2846 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
2847 OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
2848 "per thread proc mmap processing timeout in ms"),
2851 bool __maybe_unused max_stack_user_set = true;
2852 bool mmap_pages_user_set = true;
2853 const char * const trace_subcommands[] = { "record", NULL };
2857 signal(SIGSEGV, sighandler_dump_stack);
2858 signal(SIGFPE, sighandler_dump_stack);
2860 trace.evlist = perf_evlist__new();
2861 trace.sctbl = syscalltbl__new();
2863 if (trace.evlist == NULL || trace.sctbl == NULL) {
2864 pr_err("Not enough memory to run!\n");
2869 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
2870 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2872 err = bpf__setup_stdout(trace.evlist);
2874 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
2875 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
2881 if (trace.trace_pgfaults) {
2882 trace.opts.sample_address = true;
2883 trace.opts.sample_time = true;
2886 if (trace.opts.mmap_pages == UINT_MAX)
2887 mmap_pages_user_set = false;
2889 if (trace.max_stack == UINT_MAX) {
2890 trace.max_stack = sysctl_perf_event_max_stack;
2891 max_stack_user_set = false;
2894 #ifdef HAVE_DWARF_UNWIND_SUPPORT
2895 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled)
2896 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
2899 if (callchain_param.enabled) {
2900 if (!mmap_pages_user_set && geteuid() == 0)
2901 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
2903 symbol_conf.use_callchain = true;
2906 if (trace.evlist->nr_entries > 0)
2907 evlist__set_evsel_handler(trace.evlist, trace__event_handler);
2909 if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
2910 return trace__record(&trace, argc-1, &argv[1]);
2912 /* summary_only implies summary option, but don't overwrite summary if set */
2913 if (trace.summary_only)
2914 trace.summary = trace.summary_only;
2916 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
2917 trace.evlist->nr_entries == 0 /* Was --events used? */) {
2918 pr_err("Please specify something to trace.\n");
2922 if (!trace.trace_syscalls && ev_qualifier_str) {
2923 pr_err("The -e option can't be used with --no-syscalls.\n");
2927 if (output_name != NULL) {
2928 err = trace__open_output(&trace, output_name);
2930 perror("failed to create output file");
2935 trace.open_id = syscalltbl__id(trace.sctbl, "open");
2937 if (ev_qualifier_str != NULL) {
2938 const char *s = ev_qualifier_str;
2939 struct strlist_config slist_config = {
2940 .dirname = system_path(STRACE_GROUPS_DIR),
2943 trace.not_ev_qualifier = *s == '!';
2944 if (trace.not_ev_qualifier)
2946 trace.ev_qualifier = strlist__new(s, &slist_config);
2947 if (trace.ev_qualifier == NULL) {
2948 fputs("Not enough memory to parse event qualifier",
2954 err = trace__validate_ev_qualifier(&trace);
2959 err = target__validate(&trace.opts.target);
2961 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
2962 fprintf(trace.output, "%s", bf);
2966 err = target__parse_uid(&trace.opts.target);
2968 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
2969 fprintf(trace.output, "%s", bf);
2973 if (!argc && target__none(&trace.opts.target))
2974 trace.opts.target.system_wide = true;
2977 err = trace__replay(&trace);
2979 err = trace__run(&trace, argc, argv);
2982 if (output_name != NULL)
2983 fclose(trace.output);