]> Git Repo - J-linux.git/commitdiff
Merge tag 'perf-tools-for-v5.18-2022-03-26' of git://git.kernel.org/pub/scm/linux...
authorLinus Torvalds <[email protected]>
Sun, 27 Mar 2022 20:42:32 +0000 (13:42 -0700)
committerLinus Torvalds <[email protected]>
Sun, 27 Mar 2022 20:42:32 +0000 (13:42 -0700)
Pull perf tools updates from Arnaldo Carvalho de Melo:
 "New features:

  perf ftrace:

   - Add -n/--use-nsec option to the 'latency' subcommand.

     Default: usecs:

     $ sudo perf ftrace latency -T dput -a sleep 1
     #   DURATION     |      COUNT | GRAPH                          |
          0 - 1    us |    2098375 | #############################  |
          1 - 2    us |         61 |                                |
          2 - 4    us |         33 |                                |
          4 - 8    us |         13 |                                |
          8 - 16   us |        124 |                                |
         16 - 32   us |        123 |                                |
         32 - 64   us |          1 |                                |
         64 - 128  us |          0 |                                |
        128 - 256  us |          1 |                                |
        256 - 512  us |          0 |                                |

     Better granularity with nsec:

     $ sudo perf ftrace latency -T dput -a -n sleep 1
     #   DURATION     |      COUNT | GRAPH                          |
          0 - 1    us |          0 |                                |
          1 - 2    ns |          0 |                                |
          2 - 4    ns |          0 |                                |
          4 - 8    ns |          0 |                                |
          8 - 16   ns |          0 |                                |
         16 - 32   ns |          0 |                                |
         32 - 64   ns |          0 |                                |
         64 - 128  ns |    1163434 | ##############                 |
        128 - 256  ns |     914102 | #############                  |
        256 - 512  ns |        884 |                                |
        512 - 1024 ns |        613 |                                |
          1 - 2    us |         31 |                                |
          2 - 4    us |         17 |                                |
          4 - 8    us |          7 |                                |
          8 - 16   us |        123 |                                |
         16 - 32   us |         83 |                                |

  perf lock:

   - Add -c/--combine-locks option to merge lock instances in the same
     class into a single entry.

     # perf lock report -c
                    Name acquired contended avg wait(ns) total wait(ns) max wait(ns) min wait(ns)

           rcu_read_lock   251225         0            0              0            0            0
      hrtimer_bases.lock    39450         0            0              0            0            0
     &sb->s_type->i_l...    10301         1          662            662          662          662
        ptlock_ptr(page)    10173         2          701           1402          760          642
     &(ei->i_block_re...     8732         0            0              0            0            0
            &xa->xa_lock     8088         0            0              0            0            0
             &base->lock     6705         0            0              0            0            0
             &p->pi_lock     5549         0            0              0            0            0
     &dentry->d_lockr...     5010         4         1274           5097         1844          789
               &ep->lock     3958         0            0              0            0            0

      - Add -F/--field option to customize the list of fields to output:

     $ perf lock report -F contended,wait_max -k avg_wait
                     Name contended max wait(ns) avg wait(ns)

           slock-AF_INET6         1        23543        23543
        &lruvec->lru_lock         5        18317        11254
           slock-AF_INET6         1        10379        10379
               rcu_node_1         1         2104         2104
      &dentry->d_lockr...         1         1844         1844
      &dentry->d_lockr...         1         1672         1672
         &newf->file_lock        15         2279         1025
      &dentry->d_lockr...         1          792          792

   - Add --synth=no option for record, as there is no need to symbolize,
     lock names comes from the tracepoints.

  perf record:

   - Threaded recording, opt-in, via the new --threads command line
     option.

   - Improve AMD IBS (Instruction-Based Sampling) error handling
     messages.

  perf script:

   - Add 'brstackinsnlen' field (use it with -F) for branch stacks.

   - Output branch sample type in 'perf script'.

  perf report:

   - Add "addr_from" and "addr_to" sort dimensions.

   - Print branch stack entry type in 'perf report --dump-raw-trace'

   - Fix symbolization for chrooted workloads.

  Hardware tracing:

  Intel PT:

   - Add CFE (Control Flow Event) and EVD (Event Data) packets support.

   - Add MODE.Exec IFLAG bit support.

     Explanation about these features from the "Intel® 64 and IA-32
     architectures software developer’s manual combined volumes: 1, 2A,
     2B, 2C, 2D, 3A, 3B, 3C, 3D, and 4" PDF at:

        https://cdrdv2.intel.com/v1/dl/getContent/671200

     At page 3951:
      "32.2.4

       Event Trace is a capability that exposes details about the
       asynchronous events, when they are generated, and when their
       corresponding software event handler completes execution. These
       include:

        o Interrupts, including NMI and SMI, including the interrupt
          vector when defined.

        o Faults, exceptions including the fault vector.

           - Page faults additionally include the page fault address,
             when in context.

        o Event handler returns, including IRET and RSM.

        o VM exits and VM entries.¹

           - VM exits include the values written to the “exit reason”
             and “exit qualification” VMCS fields. INIT and SIPI events.

        o TSX aborts, including the abort status returned for the RTM
          instructions.

        o Shutdown.

       Additionally, it provides indication of the status of the
       Interrupt Flag (IF), to indicate when interrupts are masked"

  ARM CoreSight:

   - Use advertised caps/min_interval as default sample_period on ARM
     spe.

   - Update deduction of TRCCONFIGR register for branch broadcast on
     ARM's CoreSight ETM.

  Vendor Events (JSON):

  Intel:

   - Update events and metrics for: Alderlake, Broadwell, Broadwell DE,
     BroadwellX, CascadelakeX, Elkhartlake, Bonnell, Goldmont,
     GoldmontPlus, Westmere EP-DP, Haswell, HaswellX, Icelake, IcelakeX,
     Ivybridge, Ivytown, Jaketown, Knights Landing, Nehalem EP,
     Sandybridge, Silvermont, Skylake, Skylake Server, SkylakeX,
     Tigerlake, TremontX, Westmere EP-SP, and Westmere EX.

  ARM:

   - Add support for HiSilicon CPA PMU aliasing.

  perf stat:

   - Fix forked applications enablement of counters.

   - The 'slots' should only be printed on a different order than the
     one specified on the command line when 'topdown' events are
     present, fix it.

  Miscellaneous:

   - Sync msr-index, cpufeatures header files with the kernel sources.

   - Stop using some deprecated libbpf APIs in 'perf trace'.

   - Fix some spelling mistakes.

   - Refactor the maps pointers usage to pave the way for using refcount
     debugging.

   - Only offer the --tui option on perf top, report and annotate when
     perf was built with libslang.

   - Don't mention --to-ctf in 'perf data --help' when not linking with
     the required library, libbabeltrace.

   - Use ARRAY_SIZE() instead of ad hoc equivalent, spotted by
     array_size.cocci.

   - Enhance the matching of sub-commands abbreviations:
'perf c2c rec' -> 'perf c2c record'
'perf c2c recport -> error

   - Set build-id using build-id header on new mmap records.

   - Fix generation of 'perf --version' string.

  perf test:

   - Add test for the arm_spe event.

   - Add test to check unwinding using fame-pointer (fp) mode on arm64.

   - Make metric testing more robust in 'perf test'.

   - Add error message for unsupported branch stack cases.

  libperf:

   - Add API for allocating new thread map array.

   - Fix typo in perf_evlist__open() failure error messages in libperf
     tests.

  perf c2c:

   - Replace bitmap_weight() with bitmap_empty() where appropriate"

* tag 'perf-tools-for-v5.18-2022-03-26' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux: (143 commits)
  perf evsel: Improve AMD IBS (Instruction-Based Sampling) error handling messages
  perf python: Add perf_env stubs that will be needed in evsel__open_strerror()
  perf tools: Enhance the matching of sub-commands abbreviations
  libperf tests: Fix typo in perf_evlist__open() failure error messages
  tools arm64: Import cputype.h
  perf lock: Add -F/--field option to control output
  perf lock: Extend struct lock_key to have print function
  perf lock: Add --synth=no option for record
  tools headers cpufeatures: Sync with the kernel sources
  tools headers cpufeatures: Sync with the kernel sources
  perf stat: Fix forked applications enablement of counters
  tools arch x86: Sync the msr-index.h copy with the kernel sources
  perf evsel: Make evsel__env() always return a valid env
  perf build-id: Fix spelling mistake "Cant" -> "Can't"
  perf header: Fix spelling mistake "could't" -> "couldn't"
  perf script: Add 'brstackinsnlen' for branch stacks
  perf parse-events: Move slots only with topdown
  perf ftrace latency: Update documentation
  perf ftrace latency: Add -n/--use-nsec option
  perf tools: Fix version kernel tag
  ...

1  2 
tools/perf/util/bpf-loader.c

index ec6d9e7b446de5bb3c966751e80dc4324fd5ef85,4f6173756a9d481e0b44da62dd214b4de3b5492f..b72cef1ae959cdbd32e2f968ca6212d17797f095
@@@ -26,6 -26,8 +26,8 @@@
  #include "util.h"
  #include "llvm-utils.h"
  #include "c++/clang-c.h"
+ #include "hashmap.h"
+ #include "asm/bug.h"
  
  #include <internal/xyarray.h>
  
@@@ -49,12 -51,57 +51,58 @@@ struct bpf_prog_priv 
        int *type_mapping;
  };
  
+ struct bpf_perf_object {
+       struct list_head list;
+       struct bpf_object *obj;
+ };
+ static LIST_HEAD(bpf_objects_list);
+ static struct hashmap *bpf_program_hash;
+ static struct hashmap *bpf_map_hash;
+ static struct bpf_perf_object *
+ bpf_perf_object__next(struct bpf_perf_object *prev)
+ {
+       struct bpf_perf_object *next;
+       if (!prev)
+               next = list_first_entry(&bpf_objects_list,
+                                       struct bpf_perf_object,
+                                       list);
+       else
+               next = list_next_entry(prev, list);
+       /* Empty list is noticed here so don't need checking on entry. */
+       if (&next->list == &bpf_objects_list)
+               return NULL;
+       return next;
+ }
+ #define bpf_perf_object__for_each(perf_obj, tmp)      \
+       for ((perf_obj) = bpf_perf_object__next(NULL),  \
+            (tmp) = bpf_perf_object__next(perf_obj);   \
+            (perf_obj) != NULL;                        \
+            (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
  static bool libbpf_initialized;
  
+ static int bpf_perf_object__add(struct bpf_object *obj)
+ {
+       struct bpf_perf_object *perf_obj = zalloc(sizeof(*perf_obj));
+       if (perf_obj) {
+               INIT_LIST_HEAD(&perf_obj->list);
+               perf_obj->obj = obj;
+               list_add_tail(&perf_obj->list, &bpf_objects_list);
+       }
+       return perf_obj ? 0 : -ENOMEM;
+ }
  struct bpf_object *
  bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
  {
 +      LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = name);
        struct bpf_object *obj;
  
        if (!libbpf_initialized) {
                libbpf_initialized = true;
        }
  
 -      obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
 +      obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
        if (IS_ERR_OR_NULL(obj)) {
                pr_debug("bpf: failed to load buffer\n");
                return ERR_PTR(-EINVAL);
        }
  
+       if (bpf_perf_object__add(obj)) {
+               bpf_object__close(obj);
+               return ERR_PTR(-ENOMEM);
+       }
        return obj;
  }
  
+ static void bpf_perf_object__close(struct bpf_perf_object *perf_obj)
+ {
+       list_del(&perf_obj->list);
+       bpf_object__close(perf_obj->obj);
+       free(perf_obj);
+ }
  struct bpf_object *bpf__prepare_load(const char *filename, bool source)
  {
 +      LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = filename);
        struct bpf_object *obj;
  
        if (!libbpf_initialized) {
                                return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
                } else
                        pr_debug("bpf: successful builtin compilation\n");
 -              obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
 +              obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
  
                if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
                        llvm__dump_obj(filename, obj_buf, obj_buf_sz);
  
                free(obj_buf);
-       } else
+       } else {
                obj = bpf_object__open(filename);
+       }
  
        if (IS_ERR_OR_NULL(obj)) {
                pr_debug("bpf: failed to load %s\n", filename);
                return obj;
        }
  
-       return obj;
- }
- void bpf__clear(void)
- {
-       struct bpf_object *obj, *tmp;
-       bpf_object__for_each_safe(obj, tmp) {
-               bpf__unprobe(obj);
+       if (bpf_perf_object__add(obj)) {
                bpf_object__close(obj);
+               return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
        }
+       return obj;
  }
  
  static void
- clear_prog_priv(struct bpf_program *prog __maybe_unused,
+ clear_prog_priv(const struct bpf_program *prog __maybe_unused,
                void *_priv)
  {
        struct bpf_prog_priv *priv = _priv;
        free(priv);
  }
  
+ static void bpf_program_hash_free(void)
+ {
+       struct hashmap_entry *cur;
+       size_t bkt;
+       if (IS_ERR_OR_NULL(bpf_program_hash))
+               return;
+       hashmap__for_each_entry(bpf_program_hash, cur, bkt)
+               clear_prog_priv(cur->key, cur->value);
+       hashmap__free(bpf_program_hash);
+       bpf_program_hash = NULL;
+ }
+ static void bpf_map_hash_free(void);
+ void bpf__clear(void)
+ {
+       struct bpf_perf_object *perf_obj, *tmp;
+       bpf_perf_object__for_each(perf_obj, tmp) {
+               bpf__unprobe(perf_obj->obj);
+               bpf_perf_object__close(perf_obj);
+       }
+       bpf_program_hash_free();
+       bpf_map_hash_free();
+ }
+ static size_t ptr_hash(const void *__key, void *ctx __maybe_unused)
+ {
+       return (size_t) __key;
+ }
+ static bool ptr_equal(const void *key1, const void *key2,
+                         void *ctx __maybe_unused)
+ {
+       return key1 == key2;
+ }
+ static void *program_priv(const struct bpf_program *prog)
+ {
+       void *priv;
+       if (IS_ERR_OR_NULL(bpf_program_hash))
+               return NULL;
+       if (!hashmap__find(bpf_program_hash, prog, &priv))
+               return NULL;
+       return priv;
+ }
+ static int program_set_priv(struct bpf_program *prog, void *priv)
+ {
+       void *old_priv;
+       /*
+        * Should not happen, we warn about it in the
+        * caller function - config_bpf_program
+        */
+       if (IS_ERR(bpf_program_hash))
+               return PTR_ERR(bpf_program_hash);
+       if (!bpf_program_hash) {
+               bpf_program_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
+               if (IS_ERR(bpf_program_hash))
+                       return PTR_ERR(bpf_program_hash);
+       }
+       old_priv = program_priv(prog);
+       if (old_priv) {
+               clear_prog_priv(prog, old_priv);
+               return hashmap__set(bpf_program_hash, prog, priv, NULL, NULL);
+       }
+       return hashmap__add(bpf_program_hash, prog, priv);
+ }
  static int
  prog_config__exec(const char *value, struct perf_probe_event *pev)
  {
@@@ -378,7 -509,7 +511,7 @@@ config_bpf_program(struct bpf_program *
        pr_debug("bpf: config '%s' is ok\n", config_str);
  
  set_priv:
-       err = bpf_program__set_priv(prog, priv, clear_prog_priv);
+       err = program_set_priv(prog, priv);
        if (err) {
                pr_debug("Failed to set priv for program '%s'\n", config_str);
                goto errout;
@@@ -419,7 -550,7 +552,7 @@@ preproc_gen_prologue(struct bpf_progra
                     struct bpf_insn *orig_insns, int orig_insns_cnt,
                     struct bpf_prog_prep_result *res)
  {
-       struct bpf_prog_priv *priv = bpf_program__priv(prog);
+       struct bpf_prog_priv *priv = program_priv(prog);
        struct probe_trace_event *tev;
        struct perf_probe_event *pev;
        struct bpf_insn *buf;
@@@ -570,7 -701,7 +703,7 @@@ static int map_prologue(struct perf_pro
  
  static int hook_load_preprocessor(struct bpf_program *prog)
  {
-       struct bpf_prog_priv *priv = bpf_program__priv(prog);
+       struct bpf_prog_priv *priv = program_priv(prog);
        struct perf_probe_event *pev;
        bool need_prologue = false;
        int err, i;
@@@ -646,7 -777,7 +779,7 @@@ int bpf__probe(struct bpf_object *obj
                if (err)
                        goto out;
  
-               priv = bpf_program__priv(prog);
+               priv = program_priv(prog);
                if (IS_ERR_OR_NULL(priv)) {
                        if (!priv)
                                err = -BPF_LOADER_ERRNO__INTERNAL;
                }
  
                if (priv->is_tp) {
 -                      bpf_program__set_tracepoint(prog);
 +                      bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
                        continue;
                }
  
 -              bpf_program__set_kprobe(prog);
 +              bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
                pev = &priv->pev;
  
                err = convert_perf_probe_events(pev, 1);
@@@ -698,7 -829,7 +831,7 @@@ int bpf__unprobe(struct bpf_object *obj
        struct bpf_program *prog;
  
        bpf_object__for_each_program(prog, obj) {
-               struct bpf_prog_priv *priv = bpf_program__priv(prog);
+               struct bpf_prog_priv *priv = program_priv(prog);
                int i;
  
                if (IS_ERR_OR_NULL(priv) || priv->is_tp)
@@@ -754,7 -885,7 +887,7 @@@ int bpf__foreach_event(struct bpf_objec
        int err;
  
        bpf_object__for_each_program(prog, obj) {
-               struct bpf_prog_priv *priv = bpf_program__priv(prog);
+               struct bpf_prog_priv *priv = program_priv(prog);
                struct probe_trace_event *tev;
                struct perf_probe_event *pev;
                int i, fd;
@@@ -850,7 -981,7 +983,7 @@@ bpf_map_priv__purge(struct bpf_map_pri
  }
  
  static void
- bpf_map_priv__clear(struct bpf_map *map __maybe_unused,
+ bpf_map_priv__clear(const struct bpf_map *map __maybe_unused,
                    void *_priv)
  {
        struct bpf_map_priv *priv = _priv;
        free(priv);
  }
  
+ static void *map_priv(const struct bpf_map *map)
+ {
+       void *priv;
+       if (IS_ERR_OR_NULL(bpf_map_hash))
+               return NULL;
+       if (!hashmap__find(bpf_map_hash, map, &priv))
+               return NULL;
+       return priv;
+ }
+ static void bpf_map_hash_free(void)
+ {
+       struct hashmap_entry *cur;
+       size_t bkt;
+       if (IS_ERR_OR_NULL(bpf_map_hash))
+               return;
+       hashmap__for_each_entry(bpf_map_hash, cur, bkt)
+               bpf_map_priv__clear(cur->key, cur->value);
+       hashmap__free(bpf_map_hash);
+       bpf_map_hash = NULL;
+ }
+ static int map_set_priv(struct bpf_map *map, void *priv)
+ {
+       void *old_priv;
+       if (WARN_ON_ONCE(IS_ERR(bpf_map_hash)))
+               return PTR_ERR(bpf_program_hash);
+       if (!bpf_map_hash) {
+               bpf_map_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
+               if (IS_ERR(bpf_map_hash))
+                       return PTR_ERR(bpf_map_hash);
+       }
+       old_priv = map_priv(map);
+       if (old_priv) {
+               bpf_map_priv__clear(map, old_priv);
+               return hashmap__set(bpf_map_hash, map, priv, NULL, NULL);
+       }
+       return hashmap__add(bpf_map_hash, map, priv);
+ }
  static int
  bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
  {
@@@ -958,7 -1136,7 +1138,7 @@@ static in
  bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
  {
        const char *map_name = bpf_map__name(map);
-       struct bpf_map_priv *priv = bpf_map__priv(map);
+       struct bpf_map_priv *priv = map_priv(map);
  
        if (IS_ERR(priv)) {
                pr_debug("Failed to get private from map %s\n", map_name);
                }
                INIT_LIST_HEAD(&priv->ops_list);
  
-               if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) {
+               if (map_set_priv(map, priv)) {
                        free(priv);
                        return -BPF_LOADER_ERRNO__INTERNAL;
                }
@@@ -1007,22 -1185,24 +1187,22 @@@ __bpf_map__config_value(struct bpf_map 
  {
        struct bpf_map_op *op;
        const char *map_name = bpf_map__name(map);
 -      const struct bpf_map_def *def = bpf_map__def(map);
  
 -      if (IS_ERR(def)) {
 -              pr_debug("Unable to get map definition from '%s'\n",
 -                       map_name);
 +      if (!map) {
 +              pr_debug("Map '%s' is invalid\n", map_name);
                return -BPF_LOADER_ERRNO__INTERNAL;
        }
  
 -      if (def->type != BPF_MAP_TYPE_ARRAY) {
 +      if (bpf_map__type(map) != BPF_MAP_TYPE_ARRAY) {
                pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
                         map_name);
                return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
        }
 -      if (def->key_size < sizeof(unsigned int)) {
 +      if (bpf_map__key_size(map) < sizeof(unsigned int)) {
                pr_debug("Map %s has incorrect key size\n", map_name);
                return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
        }
 -      switch (def->value_size) {
 +      switch (bpf_map__value_size(map)) {
        case 1:
        case 2:
        case 4:
@@@ -1064,6 -1244,7 +1244,6 @@@ __bpf_map__config_event(struct bpf_map 
                        struct parse_events_term *term,
                        struct evlist *evlist)
  {
 -      const struct bpf_map_def *def;
        struct bpf_map_op *op;
        const char *map_name = bpf_map__name(map);
        struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str);
                return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
        }
  
 -      def = bpf_map__def(map);
 -      if (IS_ERR(def)) {
 -              pr_debug("Unable to get map definition from '%s'\n",
 -                       map_name);
 -              return PTR_ERR(def);
 +      if (!map) {
 +              pr_debug("Map '%s' is invalid\n", map_name);
 +              return PTR_ERR(map);
        }
  
        /*
         * No need to check key_size and value_size:
         * kernel has already checked them.
         */
 -      if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
 +      if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
                pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
                         map_name);
                return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
@@@ -1132,6 -1315,7 +1312,6 @@@ config_map_indices_range_check(struct p
                               const char *map_name)
  {
        struct parse_events_array *array = &term->array;
 -      const struct bpf_map_def *def;
        unsigned int i;
  
        if (!array->nr_ranges)
                return -BPF_LOADER_ERRNO__INTERNAL;
        }
  
 -      def = bpf_map__def(map);
 -      if (IS_ERR(def)) {
 -              pr_debug("ERROR: Unable to get map definition from '%s'\n",
 -                       map_name);
 +      if (!map) {
 +              pr_debug("Map '%s' is invalid\n", map_name);
                return -BPF_LOADER_ERRNO__INTERNAL;
        }
  
                size_t length = array->ranges[i].length;
                unsigned int idx = start + length - 1;
  
 -              if (idx >= def->max_entries) {
 +              if (idx >= bpf_map__max_entries(map)) {
                        pr_debug("ERROR: index %d too large\n", idx);
                        return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
                }
  }
  
  typedef int (*map_config_func_t)(const char *name, int map_fd,
 -                               const struct bpf_map_def *pdef,
 +                               const struct bpf_map *map,
                                 struct bpf_map_op *op,
                                 void *pkey, void *arg);
  
  static int
  foreach_key_array_all(map_config_func_t func,
                      void *arg, const char *name,
 -                    int map_fd, const struct bpf_map_def *pdef,
 +                    int map_fd, const struct bpf_map *map,
                      struct bpf_map_op *op)
  {
        unsigned int i;
        int err;
  
 -      for (i = 0; i < pdef->max_entries; i++) {
 -              err = func(name, map_fd, pdef, op, &i, arg);
 +      for (i = 0; i < bpf_map__max_entries(map); i++) {
 +              err = func(name, map_fd, map, op, &i, arg);
                if (err) {
                        pr_debug("ERROR: failed to insert value to %s[%u]\n",
                                 name, i);
  static int
  foreach_key_array_ranges(map_config_func_t func, void *arg,
                         const char *name, int map_fd,
 -                       const struct bpf_map_def *pdef,
 +                       const struct bpf_map *map,
                         struct bpf_map_op *op)
  {
        unsigned int i, j;
                for (j = 0; j < length; j++) {
                        unsigned int idx = start + j;
  
 -                      err = func(name, map_fd, pdef, op, &idx, arg);
 +                      err = func(name, map_fd, map, op, &idx, arg);
                        if (err) {
                                pr_debug("ERROR: failed to insert value to %s[%u]\n",
                                         name, idx);
@@@ -1302,10 -1488,11 +1482,10 @@@ bpf_map_config_foreach_key(struct bpf_m
                           map_config_func_t func,
                           void *arg)
  {
 -      int err, map_fd;
 +      int err, map_fd, type;
        struct bpf_map_op *op;
 -      const struct bpf_map_def *def;
        const char *name = bpf_map__name(map);
-       struct bpf_map_priv *priv = bpf_map__priv(map);
+       struct bpf_map_priv *priv = map_priv(map);
  
        if (IS_ERR(priv)) {
                pr_debug("ERROR: failed to get private from map %s\n", name);
                return 0;
        }
  
 -      def = bpf_map__def(map);
 -      if (IS_ERR(def)) {
 -              pr_debug("ERROR: failed to get definition from map %s\n", name);
 +      if (!map) {
 +              pr_debug("Map '%s' is invalid\n", name);
                return -BPF_LOADER_ERRNO__INTERNAL;
        }
        map_fd = bpf_map__fd(map);
                return map_fd;
        }
  
 +      type = bpf_map__type(map);
        list_for_each_entry(op, &priv->ops_list, list) {
 -              switch (def->type) {
 +              switch (type) {
                case BPF_MAP_TYPE_ARRAY:
                case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
                        switch (op->key_type) {
                        case BPF_MAP_KEY_ALL:
                                err = foreach_key_array_all(func, arg, name,
 -                                                          map_fd, def, op);
 +                                                          map_fd, map, op);
                                break;
                        case BPF_MAP_KEY_RANGES:
                                err = foreach_key_array_ranges(func, arg, name,
 -                                                             map_fd, def,
 -                                                             op);
 +                                                             map_fd, map, op);
                                break;
                        default:
                                pr_debug("ERROR: keytype for map '%s' invalid\n",
@@@ -1447,7 -1635,7 +1627,7 @@@ apply_config_evsel_for_key(const char *
  
  static int
  apply_obj_config_map_for_key(const char *name, int map_fd,
 -                           const struct bpf_map_def *pdef,
 +                           const struct bpf_map *map,
                             struct bpf_map_op *op,
                             void *pkey, void *arg __maybe_unused)
  {
        switch (op->op_type) {
        case BPF_MAP_OP_SET_VALUE:
                err = apply_config_value_for_key(map_fd, pkey,
 -                                               pdef->value_size,
 +                                               bpf_map__value_size(map),
                                                 op->v.value);
                break;
        case BPF_MAP_OP_SET_EVSEL:
@@@ -1494,11 -1682,11 +1674,11 @@@ apply_obj_config_object(struct bpf_obje
  
  int bpf__apply_obj_config(void)
  {
-       struct bpf_object *obj, *tmp;
+       struct bpf_perf_object *perf_obj, *tmp;
        int err;
  
-       bpf_object__for_each_safe(obj, tmp) {
-               err = apply_obj_config_object(obj);
+       bpf_perf_object__for_each(perf_obj, tmp) {
+               err = apply_obj_config_object(perf_obj->obj);
                if (err)
                        return err;
        }
        return 0;
  }
  
- #define bpf__for_each_map(pos, obj, objtmp)   \
-       bpf_object__for_each_safe(obj, objtmp)  \
-               bpf_object__for_each_map(pos, obj)
+ #define bpf__perf_for_each_map(map, pobj, tmp)                        \
+       bpf_perf_object__for_each(pobj, tmp)                    \
+               bpf_object__for_each_map(map, pobj->obj)
  
- #define bpf__for_each_map_named(pos, obj, objtmp, name)       \
-       bpf__for_each_map(pos, obj, objtmp)             \
-               if (bpf_map__name(pos) &&               \
-                       (strcmp(name,                   \
-                               bpf_map__name(pos)) == 0))
+ #define bpf__perf_for_each_map_named(map, pobj, pobjtmp, name)        \
+       bpf__perf_for_each_map(map, pobj, pobjtmp)              \
+               if (bpf_map__name(map) && (strcmp(name, bpf_map__name(map)) == 0))
  
  struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
  {
        struct bpf_map_priv *tmpl_priv = NULL;
-       struct bpf_object *obj, *tmp;
+       struct bpf_perf_object *perf_obj, *tmp;
        struct evsel *evsel = NULL;
        struct bpf_map *map;
        int err;
        bool need_init = false;
  
-       bpf__for_each_map_named(map, obj, tmp, name) {
-               struct bpf_map_priv *priv = bpf_map__priv(map);
+       bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
+               struct bpf_map_priv *priv = map_priv(map);
  
                if (IS_ERR(priv))
                        return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
                evsel = evlist__last(evlist);
        }
  
-       bpf__for_each_map_named(map, obj, tmp, name) {
-               struct bpf_map_priv *priv = bpf_map__priv(map);
+       bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
+               struct bpf_map_priv *priv = map_priv(map);
  
                if (IS_ERR(priv))
                        return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
                        if (!priv)
                                return ERR_PTR(-ENOMEM);
  
-                       err = bpf_map__set_priv(map, priv, bpf_map_priv__clear);
+                       err = map_set_priv(map, priv);
                        if (err) {
                                bpf_map_priv__clear(map, priv);
                                return ERR_PTR(err);
This page took 0.091855 seconds and 4 git commands to generate.