]> Git Repo - linux.git/commitdiff
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <[email protected]>
Mon, 16 May 2016 21:08:43 +0000 (14:08 -0700)
committerLinus Torvalds <[email protected]>
Mon, 16 May 2016 21:08:43 +0000 (14:08 -0700)
Pull perf updates from Ingo Molnar:
 "Bigger kernel side changes:

   - Add backwards writing capability to the perf ring-buffer code,
     which is preparation for future advanced features like robust
     'overwrite support' and snapshot mode.  (Wang Nan)

   - Add pause and resume ioctls for the perf ringbuffer (Wang Nan)

   - x86 Intel cstate code cleanups and reorgnization (Thomas Gleixner)

   - x86 Intel uncore and CPU PMU driver updates (Kan Liang, Peter
     Zijlstra)

   - x86 AUX (Intel PT) related enhancements and updates (Alexander
     Shishkin)

   - x86 MSR PMU driver enhancements and updates (Huang Rui)

   - ... and lots of other changes spread out over 40+ commits.

  Biggest tooling side changes:

   - 'perf trace' features and enhancements.  (Arnaldo Carvalho de Melo)

   - BPF tooling updates (Wang Nan)

   - 'perf sched' updates (Jiri Olsa)

   - 'perf probe' updates (Masami Hiramatsu)

   - ... plus 200+ other enhancements, fixes and cleanups to tools/

  The merge commits, the shortlog and the changelogs contain a lot more
  details"

* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (249 commits)
  perf/core: Disable the event on a truncated AUX record
  perf/x86/intel/pt: Generate PMI in the STOP region as well
  perf buildid-cache: Use lsdir() for looking up buildid caches
  perf symbols: Use lsdir() for the search in kcore cache directory
  perf tools: Use SBUILD_ID_SIZE where applicable
  perf tools: Fix lsdir to set errno correctly
  perf trace: Move seccomp args beautifiers to tools/perf/trace/beauty/
  perf trace: Move flock op beautifier to tools/perf/trace/beauty/
  perf build: Add build-test for debug-frame on arm/arm64
  perf build: Add build-test for libunwind cross-platforms support
  perf script: Fix export of callchains with recursion in db-export
  perf script: Fix callchain addresses in db-export
  perf script: Fix symbol insertion behavior in db-export
  perf symbols: Add dso__insert_symbol function
  perf scripting python: Use Py_FatalError instead of die()
  perf tools: Remove xrealloc and ALLOC_GROW
  perf help: Do not use ALLOC_GROW in add_cmd_list
  perf pmu: Make pmu_formats_string to check return value of strbuf
  perf header: Make topology checkers to check return value of strbuf
  perf tools: Make alias handler to check return value of strbuf
  ...

1  2 
tools/perf/builtin-stat.c
tools/perf/util/evsel.c

index 307e8a1a003c5ebfd7eefe6a8770a868cd3b55e9,5645a8361de6f494b1f9b56475dec3a5dd959d9d..e459b685a4e999817ff06e01994b3f30f233c528
@@@ -298,6 -298,14 +298,14 @@@ static int read_counter(struct perf_evs
                                        return -1;
                                }
                        }
+                       if (verbose > 1) {
+                               fprintf(stat_config.output,
+                                       "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
+                                               perf_evsel__name(counter),
+                                               cpu,
+                                               count->val, count->ena, count->run);
+                       }
                }
        }
  
@@@ -528,7 -536,6 +536,7 @@@ static int __run_perf_stat(int argc, co
                perf_evlist__set_leader(evsel_list);
  
        evlist__for_each(evsel_list, counter) {
 +try_again:
                if (create_perf_stat_counter(counter) < 0) {
                        /*
                         * PPC returns ENXIO for HW counters until 2.6.37
                                if ((counter->leader != counter) ||
                                    !(counter->leader->nr_members > 1))
                                        continue;
 -                      }
 +                      } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
 +                                if (verbose)
 +                                        ui__warning("%s\n", msg);
 +                                goto try_again;
 +                        }
  
                        perf_evsel__open_strerror(counter, &target,
                                                  errno, msg, sizeof(msg));
diff --combined tools/perf/util/evsel.c
index 645dc18288367733567b760f136b81657a5937ef,a23f54793e51c83a5bb5d9c50d2c6d3c7a3df32e..964c7c3602c04eed37b26cf123c6f96ce9a2f5a0
@@@ -226,7 -226,8 +226,8 @@@ struct perf_evsel *perf_evsel__new_idx(
                perf_evsel__init(evsel, attr, idx);
  
        if (perf_evsel__is_bpf_output(evsel)) {
-               evsel->attr.sample_type |= PERF_SAMPLE_RAW;
+               evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+                                           PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
                evsel->attr.sample_period = 1;
        }
  
@@@ -561,10 -562,9 +562,9 @@@ int perf_evsel__group_desc(struct perf_
        return ret;
  }
  
- static void
- perf_evsel__config_callgraph(struct perf_evsel *evsel,
-                            struct record_opts *opts,
-                            struct callchain_param *param)
+ void perf_evsel__config_callchain(struct perf_evsel *evsel,
+                                 struct record_opts *opts,
+                                 struct callchain_param *param)
  {
        bool function = perf_evsel__is_function_event(evsel);
        struct perf_event_attr *attr = &evsel->attr;
@@@ -704,7 -704,7 +704,7 @@@ static void apply_config_terms(struct p
  
                /* set perf-event callgraph */
                if (param.enabled)
-                       perf_evsel__config_callgraph(evsel, opts, &param);
+                       perf_evsel__config_callchain(evsel, opts, &param);
        }
  }
  
   *     enable/disable events specifically, as there's no
   *     initial traced exec call.
   */
- void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
+ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
+                       struct callchain_param *callchain)
  {
        struct perf_evsel *leader = evsel->leader;
        struct perf_event_attr *attr = &evsel->attr;
        if (perf_evsel__is_function_event(evsel))
                evsel->attr.exclude_callchain_user = 1;
  
-       if (callchain_param.enabled && !evsel->no_aux_samples)
-               perf_evsel__config_callgraph(evsel, opts, &callchain_param);
+       if (callchain && callchain->enabled && !evsel->no_aux_samples)
+               perf_evsel__config_callchain(evsel, opts, callchain);
  
        if (opts->sample_intr_regs) {
                attr->sample_regs_intr = opts->sample_intr_regs;
@@@ -1230,6 -1231,21 +1231,21 @@@ static void __p_sample_type(char *buf, 
        __p_bits(buf, size, value, bits);
  }
  
+ static void __p_branch_sample_type(char *buf, size_t size, u64 value)
+ {
+ #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
+       struct bit_names bits[] = {
+               bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY),
+               bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL),
+               bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
+               bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
+               bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
+               { .name = NULL, }
+       };
+ #undef bit_name
+       __p_bits(buf, size, value, bits);
+ }
  static void __p_read_format(char *buf, size_t size, u64 value)
  {
  #define bit_name(n) { PERF_FORMAT_##n, #n }
  #define p_unsigned(val)               snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
  #define p_signed(val)         snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
  #define p_sample_type(val)    __p_sample_type(buf, BUF_SIZE, val)
+ #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
  #define p_read_format(val)    __p_read_format(buf, BUF_SIZE, val)
  
  #define PRINT_ATTRn(_n, _f, _p)                               \
@@@ -1299,12 -1316,13 +1316,13 @@@ int perf_event_attr__fprintf(FILE *fp, 
        PRINT_ATTRf(comm_exec, p_unsigned);
        PRINT_ATTRf(use_clockid, p_unsigned);
        PRINT_ATTRf(context_switch, p_unsigned);
+       PRINT_ATTRf(write_backward, p_unsigned);
  
        PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
        PRINT_ATTRf(bp_type, p_unsigned);
        PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
        PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
-       PRINT_ATTRf(branch_sample_type, p_unsigned);
+       PRINT_ATTRf(branch_sample_type, p_branch_sample_type);
        PRINT_ATTRf(sample_regs_user, p_hex);
        PRINT_ATTRf(sample_stack_user, p_unsigned);
        PRINT_ATTRf(clockid, p_signed);
@@@ -2253,100 -2271,9 +2271,11 @@@ u64 perf_evsel__intval(struct perf_evse
        return 0;
  }
  
- static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
- {
-       va_list args;
-       int ret = 0;
-       if (!*first) {
-               ret += fprintf(fp, ",");
-       } else {
-               ret += fprintf(fp, ":");
-               *first = false;
-       }
-       va_start(args, fmt);
-       ret += vfprintf(fp, fmt, args);
-       va_end(args);
-       return ret;
- }
- static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv)
- {
-       return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val);
- }
- int perf_evsel__fprintf(struct perf_evsel *evsel,
-                       struct perf_attr_details *details, FILE *fp)
- {
-       bool first = true;
-       int printed = 0;
-       if (details->event_group) {
-               struct perf_evsel *pos;
-               if (!perf_evsel__is_group_leader(evsel))
-                       return 0;
-               if (evsel->nr_members > 1)
-                       printed += fprintf(fp, "%s{", evsel->group_name ?: "");
-               printed += fprintf(fp, "%s", perf_evsel__name(evsel));
-               for_each_group_member(pos, evsel)
-                       printed += fprintf(fp, ",%s", perf_evsel__name(pos));
-               if (evsel->nr_members > 1)
-                       printed += fprintf(fp, "}");
-               goto out;
-       }
-       printed += fprintf(fp, "%s", perf_evsel__name(evsel));
-       if (details->verbose) {
-               printed += perf_event_attr__fprintf(fp, &evsel->attr,
-                                                   __print_attr__fprintf, &first);
-       } else if (details->freq) {
-               const char *term = "sample_freq";
-               if (!evsel->attr.freq)
-                       term = "sample_period";
-               printed += comma_fprintf(fp, &first, " %s=%" PRIu64,
-                                        term, (u64)evsel->attr.sample_freq);
-       }
-       if (details->trace_fields) {
-               struct format_field *field;
-               if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
-                       printed += comma_fprintf(fp, &first, " (not a tracepoint)");
-                       goto out;
-               }
-               field = evsel->tp_format->format.fields;
-               if (field == NULL) {
-                       printed += comma_fprintf(fp, &first, " (no trace field)");
-                       goto out;
-               }
-               printed += comma_fprintf(fp, &first, " trace_fields: %s", field->name);
-               field = field->next;
-               while (field) {
-                       printed += comma_fprintf(fp, &first, "%s", field->name);
-                       field = field->next;
-               }
-       }
- out:
-       fputc('\n', fp);
-       return ++printed;
- }
  bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
                          char *msg, size_t msgsize)
  {
 +      int paranoid;
 +
        if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
            evsel->attr.type   == PERF_TYPE_HARDWARE &&
            evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
                evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
  
                zfree(&evsel->name);
 +              return true;
 +      } else if (err == EACCES && !evsel->attr.exclude_kernel &&
 +                 (paranoid = perf_event_paranoid()) > 1) {
 +              const char *name = perf_evsel__name(evsel);
 +              char *new_name;
 +
 +              if (asprintf(&new_name, "%s%su", name, strchr(name, ':') ? "" : ":") < 0)
 +                      return false;
 +
 +              if (evsel->name)
 +                      free(evsel->name);
 +              evsel->name = new_name;
 +              scnprintf(msg, msgsize,
 +"kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid);
 +              evsel->attr.exclude_kernel = 1;
 +
                return true;
        }
  
@@@ -2400,13 -2311,12 +2329,13 @@@ int perf_evsel__open_strerror(struct pe
                 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
                 "which controls use of the performance events system by\n"
                 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
 -               "The default value is 1:\n\n"
 +               "The current value is %d:\n\n"
                 "  -1: Allow use of (almost) all events by all users\n"
                 ">= 0: Disallow raw tracepoint access by users without CAP_IOC_LOCK\n"
                 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
                 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN",
 -                               target->system_wide ? "system-wide " : "");
 +                               target->system_wide ? "system-wide " : "",
 +                               perf_event_paranoid());
        case ENOENT:
                return scnprintf(msg, size, "The %s event is not supported.",
                                 perf_evsel__name(evsel));
                         "Probably the maximum number of open file descriptors has been reached.\n"
                         "Hint: Try again after reducing the number of events.\n"
                         "Hint: Try increasing the limit with 'ulimit -n <limit>'");
+       case ENOMEM:
+               if ((evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0 &&
+                   access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
+                       return scnprintf(msg, size,
+                                        "Not enough memory to setup event with callchain.\n"
+                                        "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
+                                        "Hint: Current value: %d", sysctl_perf_event_max_stack);
+               break;
        case ENODEV:
                if (target->cpu_list)
                        return scnprintf(msg, size, "%s",
-        "No such device - did you specify an out-of-range profile CPU?\n");
+        "No such device - did you specify an out-of-range profile CPU?");
                break;
        case EOPNOTSUPP:
                if (evsel->attr.precise_ip)
        return scnprintf(msg, size,
        "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
        "/bin/dmesg may provide additional information.\n"
-       "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
+       "No CONFIG_PERF_EVENTS=y kernel support configured?",
                         err, strerror_r(err, sbuf, sizeof(sbuf)),
                         perf_evsel__name(evsel));
  }
This page took 0.128994 seconds and 4 git commands to generate.