]> Git Repo - linux.git/commitdiff
Merge remote-tracking branch 'torvalds/master' into perf/core
authorArnaldo Carvalho de Melo <[email protected]>
Thu, 17 Feb 2022 21:40:54 +0000 (18:40 -0300)
committerArnaldo Carvalho de Melo <[email protected]>
Thu, 17 Feb 2022 21:40:54 +0000 (18:40 -0300)
To pick up fixes from perf/urgent that recently got merged.

Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
1  2 
tools/perf/builtin-trace.c
tools/perf/util/bpf-loader.c

index d8deecc836b3ad8391f431fd46f9a23c5361d72a,52b137a184a66a4c8312d7485ebe2584262c4673..897fc504918b9120ded4cefd33c371f5e51c90fe
@@@ -1536,13 -1536,20 +1536,20 @@@ static size_t trace__fprintf_tstamp(str
        return fprintf(fp, "         ? ");
  }
  
+ static pid_t workload_pid = -1;
  static bool done = false;
  static bool interrupted = false;
  
- static void sig_handler(int sig)
+ static void sighandler_interrupt(int sig __maybe_unused)
  {
-       done = true;
-       interrupted = sig == SIGINT;
+       done = interrupted = true;
+ }
+ static void sighandler_chld(int sig __maybe_unused, siginfo_t *info,
+                           void *context __maybe_unused)
+ {
+       if (info->si_pid == workload_pid)
+               done = true;
  }
  
  static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
@@@ -3782,7 -3789,7 +3789,7 @@@ static int trace__deliver_event(struct 
        if (err && err != -1)
                return err;
  
 -      err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
 +      err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL);
        if (err)
                return err;
  
@@@ -3938,7 -3945,6 +3945,6 @@@ static int trace__run(struct trace *tra
        bool draining = false;
  
        trace->live = true;
-       signal(SIGCHLD, sig_handler);
  
        if (!trace->raw_augmented_syscalls) {
                if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
                        fprintf(trace->output, "Couldn't run the workload!\n");
                        goto out_delete_evlist;
                }
+               workload_pid = evlist->workload.pid;
        }
  
        err = evlist__open(evlist);
@@@ -4887,10 -4894,16 +4894,16 @@@ int cmd_trace(int argc, const char **ar
        const char * const trace_subcommands[] = { "record", NULL };
        int err = -1;
        char bf[BUFSIZ];
+       struct sigaction sigchld_act;
  
        signal(SIGSEGV, sighandler_dump_stack);
        signal(SIGFPE, sighandler_dump_stack);
-       signal(SIGINT, sig_handler);
+       signal(SIGINT, sighandler_interrupt);
+       memset(&sigchld_act, 0, sizeof(sigchld_act));
+       sigchld_act.sa_flags = SA_SIGINFO;
+       sigchld_act.sa_sigaction = sighandler_chld;
+       sigaction(SIGCHLD, &sigchld_act, NULL);
  
        trace.evlist = evlist__new();
        trace.sctbl = syscalltbl__new();
index db61e09be585dcd6cddc530f5a3e8001ee9544c9,16ec605a9fe46a649da1cd18b5091180dfa15448..efd9c703b5ccb71668692886b3badb780b71f186
@@@ -49,52 -49,8 +49,52 @@@ struct bpf_prog_priv 
        int *type_mapping;
  };
  
 +struct bpf_perf_object {
 +      struct list_head list;
 +      struct bpf_object *obj;
 +};
 +
 +static LIST_HEAD(bpf_objects_list);
 +
 +static struct bpf_perf_object *
 +bpf_perf_object__next(struct bpf_perf_object *prev)
 +{
 +      struct bpf_perf_object *next;
 +
 +      if (!prev)
 +              next = list_first_entry(&bpf_objects_list,
 +                                      struct bpf_perf_object,
 +                                      list);
 +      else
 +              next = list_next_entry(prev, list);
 +
 +      /* Empty list is noticed here so don't need checking on entry. */
 +      if (&next->list == &bpf_objects_list)
 +              return NULL;
 +
 +      return next;
 +}
 +
 +#define bpf_perf_object__for_each(perf_obj, tmp)      \
 +      for ((perf_obj) = bpf_perf_object__next(NULL),  \
 +           (tmp) = bpf_perf_object__next(perf_obj);   \
 +           (perf_obj) != NULL;                        \
 +           (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
 +
  static bool libbpf_initialized;
  
 +static int bpf_perf_object__add(struct bpf_object *obj)
 +{
 +      struct bpf_perf_object *perf_obj = zalloc(sizeof(*perf_obj));
 +
 +      if (perf_obj) {
 +              INIT_LIST_HEAD(&perf_obj->list);
 +              perf_obj->obj = obj;
 +              list_add_tail(&perf_obj->list, &bpf_objects_list);
 +      }
 +      return perf_obj ? 0 : -ENOMEM;
 +}
 +
  struct bpf_object *
  bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
  {
                return ERR_PTR(-EINVAL);
        }
  
 +      if (bpf_perf_object__add(obj)) {
 +              bpf_object__close(obj);
 +              return ERR_PTR(-ENOMEM);
 +      }
 +
        return obj;
  }
  
 +static void bpf_perf_object__close(struct bpf_perf_object *perf_obj)
 +{
 +      list_del(&perf_obj->list);
 +      bpf_object__close(perf_obj->obj);
 +      free(perf_obj);
 +}
 +
  struct bpf_object *bpf__prepare_load(const char *filename, bool source)
  {
        struct bpf_object *obj;
                        llvm__dump_obj(filename, obj_buf, obj_buf_sz);
  
                free(obj_buf);
 -      } else
 +      } else {
                obj = bpf_object__open(filename);
 +      }
  
        if (IS_ERR_OR_NULL(obj)) {
                pr_debug("bpf: failed to load %s\n", filename);
                return obj;
        }
  
 +      if (bpf_perf_object__add(obj)) {
 +              bpf_object__close(obj);
 +              return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
 +      }
 +
        return obj;
  }
  
  void bpf__clear(void)
  {
 -      struct bpf_object *obj, *tmp;
 +      struct bpf_perf_object *perf_obj, *tmp;
  
 -      bpf_object__for_each_safe(obj, tmp) {
 -              bpf__unprobe(obj);
 -              bpf_object__close(obj);
 +      bpf_perf_object__for_each(perf_obj, tmp) {
 +              bpf__unprobe(perf_obj->obj);
 +              bpf_perf_object__close(perf_obj);
        }
  }
  
@@@ -1282,9 -1220,10 +1282,10 @@@ bpf__obj_config_map(struct bpf_object *
        pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
        err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
  out:
-       free(map_name);
        if (!err)
                *key_scan_pos += strlen(map_opt);
+       free(map_name);
        return err;
  }
  
@@@ -1563,11 -1502,11 +1564,11 @@@ apply_obj_config_object(struct bpf_obje
  
  int bpf__apply_obj_config(void)
  {
 -      struct bpf_object *obj, *tmp;
 +      struct bpf_perf_object *perf_obj, *tmp;
        int err;
  
 -      bpf_object__for_each_safe(obj, tmp) {
 -              err = apply_obj_config_object(obj);
 +      bpf_perf_object__for_each(perf_obj, tmp) {
 +              err = apply_obj_config_object(perf_obj->obj);
                if (err)
                        return err;
        }
        return 0;
  }
  
 -#define bpf__for_each_map(pos, obj, objtmp)   \
 -      bpf_object__for_each_safe(obj, objtmp)  \
 -              bpf_object__for_each_map(pos, obj)
 +#define bpf__perf_for_each_map(map, pobj, tmp)                        \
 +      bpf_perf_object__for_each(pobj, tmp)                    \
 +              bpf_object__for_each_map(map, pobj->obj)
  
 -#define bpf__for_each_map_named(pos, obj, objtmp, name)       \
 -      bpf__for_each_map(pos, obj, objtmp)             \
 -              if (bpf_map__name(pos) &&               \
 -                      (strcmp(name,                   \
 -                              bpf_map__name(pos)) == 0))
 +#define bpf__perf_for_each_map_named(map, pobj, pobjtmp, name)        \
 +      bpf__perf_for_each_map(map, pobj, pobjtmp)              \
 +              if (bpf_map__name(map) && (strcmp(name, bpf_map__name(map)) == 0))
  
  struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
  {
        struct bpf_map_priv *tmpl_priv = NULL;
 -      struct bpf_object *obj, *tmp;
 +      struct bpf_perf_object *perf_obj, *tmp;
        struct evsel *evsel = NULL;
        struct bpf_map *map;
        int err;
        bool need_init = false;
  
 -      bpf__for_each_map_named(map, obj, tmp, name) {
 +      bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
                struct bpf_map_priv *priv = bpf_map__priv(map);
  
                if (IS_ERR(priv))
                evsel = evlist__last(evlist);
        }
  
 -      bpf__for_each_map_named(map, obj, tmp, name) {
 +      bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
                struct bpf_map_priv *priv = bpf_map__priv(map);
  
                if (IS_ERR(priv))
This page took 0.086753 seconds and 4 git commands to generate.