]> Git Repo - linux.git/commitdiff
Merge tag 'perf-core-for-mingo-5.1-20190225' of git://git.kernel.org/pub/scm/linux...
authorIngo Molnar <[email protected]>
Thu, 28 Feb 2019 07:29:50 +0000 (08:29 +0100)
committerIngo Molnar <[email protected]>
Thu, 28 Feb 2019 07:29:50 +0000 (08:29 +0100)
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

perf annotate:

  Wei Li:

  - Fix getting source line failure

perf script:

  Andi Kleen:

  - Handle missing fields with -F +...

perf data:

  Jiri Olsa:

  - Prep work to support per-cpu files in a directory.

Intel PT:

  Adrian Hunter:

  - Improve thread_stack__no_call_return()

  - Hide x86 retpolines in thread stacks.

  - exported SQL viewer refactorings, new 'top calls' report..

  Alexander Shishkin:

  - Copy parent's address filter offsets on clone

  - Fix address filters for vmas with non-zero offset. Applies to
    ARM's CoreSight as well.

python scripts:

  Tony Jones:

  - Python3 support for several 'perf script' python scripts.

Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
1  2 
include/linux/perf_event.h
kernel/events/core.c

index 67e485bfad63be180e6bacbffee4c5881d74f2b7,6ebc72f650172da5506e3b3bb9cd8a5f4cc1aa49..e47ef764f613ed5231121e8347575e37a53d5b1d
@@@ -448,11 -448,6 +448,11 @@@ struct pmu 
         * Filter events for PMU-specific reasons.
         */
        int (*filter_match)             (struct perf_event *event); /* optional */
 +
 +      /*
 +       * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
 +       */
 +      int (*check_period)             (struct perf_event *event, u64 value); /* optional */
  };
  
  enum perf_addr_filter_action_t {
@@@ -495,6 -490,11 +495,11 @@@ struct perf_addr_filters_head 
        unsigned int            nr_file_filters;
  };
  
+ struct perf_addr_filter_range {
+       unsigned long           start;
+       unsigned long           size;
+ };
  /**
   * enum perf_event_state - the states of an event:
   */
@@@ -671,7 -671,7 +676,7 @@@ struct perf_event 
        /* address range filters */
        struct perf_addr_filters_head   addr_filters;
        /* vma address array for file-based filders */
-       unsigned long                   *addr_filters_offs;
+       struct perf_addr_filter_range   *addr_filter_ranges;
        unsigned long                   addr_filters_gen;
  
        void (*destroy)(struct perf_event *);
diff --combined kernel/events/core.c
index 932babd9e86cbc17a0d07d259a398a7075c66b19,16609f6737da829efb350ac714d728a3eebef7d7..5f59d848171ea587f4cfcc8c8e18e4b5ea01ae85
@@@ -1255,6 -1255,7 +1255,7 @@@ static void put_ctx(struct perf_event_c
   *          perf_event_context::lock
   *        perf_event::mmap_mutex
   *        mmap_sem
+  *          perf_addr_filters_head::lock
   *
   *    cpu_hotplug_lock
   *      pmus_lock
@@@ -2798,7 -2799,7 +2799,7 @@@ static int perf_event_stop(struct perf_
   *
   * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
   *      we update the addresses of corresponding vmas in
-  *    event::addr_filters_offs array and bump the event::addr_filters_gen;
+  *    event::addr_filter_ranges array and bump the event::addr_filters_gen;
   * (p2) when an event is scheduled in (pmu::add), it calls
   *      perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
   *      if the generation has changed since the previous call.
@@@ -4445,7 -4446,7 +4446,7 @@@ static void _free_event(struct perf_eve
  
        perf_event_free_bpf_prog(event);
        perf_addr_filters_splice(event, NULL);
-       kfree(event->addr_filters_offs);
+       kfree(event->addr_filter_ranges);
  
        if (event->destroy)
                event->destroy(event);
@@@ -4968,11 -4969,6 +4969,11 @@@ static void __perf_event_period(struct 
        }
  }
  
 +static int perf_event_check_period(struct perf_event *event, u64 value)
 +{
 +      return event->pmu->check_period(event, value);
 +}
 +
  static int perf_event_period(struct perf_event *event, u64 __user *arg)
  {
        u64 value;
        if (event->attr.freq && value > sysctl_perf_event_sample_rate)
                return -EINVAL;
  
 +      if (perf_event_check_period(event, value))
 +              return -EINVAL;
 +
        event_function_call(event, __perf_event_period, &value);
  
        return 0;
@@@ -6694,7 -6687,8 +6695,8 @@@ static void perf_event_addr_filters_exe
        raw_spin_lock_irqsave(&ifh->lock, flags);
        list_for_each_entry(filter, &ifh->list, entry) {
                if (filter->path.dentry) {
-                       event->addr_filters_offs[count] = 0;
+                       event->addr_filter_ranges[count].start = 0;
+                       event->addr_filter_ranges[count].size = 0;
                        restart++;
                }
  
@@@ -7374,28 -7368,47 +7376,47 @@@ static bool perf_addr_filter_match(stru
        return true;
  }
  
+ static bool perf_addr_filter_vma_adjust(struct perf_addr_filter *filter,
+                                       struct vm_area_struct *vma,
+                                       struct perf_addr_filter_range *fr)
+ {
+       unsigned long vma_size = vma->vm_end - vma->vm_start;
+       unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+       struct file *file = vma->vm_file;
+       if (!perf_addr_filter_match(filter, file, off, vma_size))
+               return false;
+       if (filter->offset < off) {
+               fr->start = vma->vm_start;
+               fr->size = min(vma_size, filter->size - (off - filter->offset));
+       } else {
+               fr->start = vma->vm_start + filter->offset - off;
+               fr->size = min(vma->vm_end - fr->start, filter->size);
+       }
+       return true;
+ }
  static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
  {
        struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
        struct vm_area_struct *vma = data;
-       unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
-       struct file *file = vma->vm_file;
        struct perf_addr_filter *filter;
        unsigned int restart = 0, count = 0;
+       unsigned long flags;
  
        if (!has_addr_filter(event))
                return;
  
-       if (!file)
+       if (!vma->vm_file)
                return;
  
        raw_spin_lock_irqsave(&ifh->lock, flags);
        list_for_each_entry(filter, &ifh->list, entry) {
-               if (perf_addr_filter_match(filter, file, off,
-                                            vma->vm_end - vma->vm_start)) {
-                       event->addr_filters_offs[count] = vma->vm_start;
+               if (perf_addr_filter_vma_adjust(filter, vma,
+                                               &event->addr_filter_ranges[count]))
                        restart++;
-               }
  
                count++;
        }
@@@ -8985,26 -8998,19 +9006,19 @@@ static void perf_addr_filters_splice(st
   * @filter; if so, adjust filter's address range.
   * Called with mm::mmap_sem down for reading.
   */
- static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
-                                           struct mm_struct *mm)
+ static void perf_addr_filter_apply(struct perf_addr_filter *filter,
+                                  struct mm_struct *mm,
+                                  struct perf_addr_filter_range *fr)
  {
        struct vm_area_struct *vma;
  
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
-               struct file *file = vma->vm_file;
-               unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
-               unsigned long vma_size = vma->vm_end - vma->vm_start;
-               if (!file)
+               if (!vma->vm_file)
                        continue;
  
-               if (!perf_addr_filter_match(filter, file, off, vma_size))
-                       continue;
-               return vma->vm_start;
+               if (perf_addr_filter_vma_adjust(filter, vma, fr))
+                       return;
        }
-       return 0;
  }
  
  /*
@@@ -9038,15 -9044,15 +9052,15 @@@ static void perf_event_addr_filters_app
  
        raw_spin_lock_irqsave(&ifh->lock, flags);
        list_for_each_entry(filter, &ifh->list, entry) {
-               event->addr_filters_offs[count] = 0;
+               event->addr_filter_ranges[count].start = 0;
+               event->addr_filter_ranges[count].size = 0;
  
                /*
                 * Adjust base offset if the filter is associated to a binary
                 * that needs to be mapped:
                 */
                if (filter->path.dentry)
-                       event->addr_filters_offs[count] =
-                               perf_addr_filter_apply(filter, mm);
+                       perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
  
                count++;
        }
@@@ -9608,11 -9614,6 +9622,11 @@@ static int perf_pmu_nop_int(struct pmu 
        return 0;
  }
  
 +static int perf_event_nop_int(struct perf_event *event, u64 value)
 +{
 +      return 0;
 +}
 +
  static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
  
  static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
@@@ -9913,9 -9914,6 +9927,9 @@@ got_cpu_context
                pmu->pmu_disable = perf_pmu_nop_void;
        }
  
 +      if (!pmu->check_period)
 +              pmu->check_period = perf_event_nop_int;
 +
        if (!pmu->event_idx)
                pmu->event_idx = perf_event_idx_default;
  
@@@ -10320,14 -10318,28 +10334,28 @@@ perf_event_alloc(struct perf_event_att
                goto err_pmu;
  
        if (has_addr_filter(event)) {
-               event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
-                                                  sizeof(unsigned long),
-                                                  GFP_KERNEL);
-               if (!event->addr_filters_offs) {
+               event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
+                                                   sizeof(struct perf_addr_filter_range),
+                                                   GFP_KERNEL);
+               if (!event->addr_filter_ranges) {
                        err = -ENOMEM;
                        goto err_per_task;
                }
  
+               /*
+                * Clone the parent's vma offsets: they are valid until exec()
+                * even if the mm is not shared with the parent.
+                */
+               if (event->parent) {
+                       struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
+                       raw_spin_lock_irq(&ifh->lock);
+                       memcpy(event->addr_filter_ranges,
+                              event->parent->addr_filter_ranges,
+                              pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range));
+                       raw_spin_unlock_irq(&ifh->lock);
+               }
                /* force hw sync on the address filters */
                event->addr_filters_gen = 1;
        }
        return event;
  
  err_addr_filters:
-       kfree(event->addr_filters_offs);
+       kfree(event->addr_filter_ranges);
  
  err_per_task:
        exclusive_event_destroy(event);
This page took 0.127158 seconds and 4 git commands to generate.