]> Git Repo - linux.git/commitdiff
Merge branch 'tip/perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/roste...
authorIngo Molnar <[email protected]>
Thu, 19 Aug 2010 10:48:09 +0000 (12:48 +0200)
committerIngo Molnar <[email protected]>
Thu, 19 Aug 2010 10:48:09 +0000 (12:48 +0200)
1  2 
include/linux/perf_event.h
kernel/perf_event.c
kernel/trace/trace_functions_graph.c

index ae6fa6050925d5b73ea0640f1f2559c0dd086d5e,716f99b682c1a57fb3b6f1f72e90aec3982ca5fd..000610c4de710b1b55779025be349a09b76aeedb
@@@ -808,12 -808,6 +808,12 @@@ struct perf_event_context 
        struct rcu_head                 rcu_head;
  };
  
 +/*
 + * Number of contexts where an event can trigger:
 + *    task, softirq, hardirq, nmi.
 + */
 +#define PERF_NR_CONTEXTS      4
 +
  /**
   * struct perf_event_cpu_context - per cpu event context structure
   */
@@@ -827,8 -821,12 +827,8 @@@ struct perf_cpu_context 
        struct mutex                    hlist_mutex;
        int                             hlist_refcount;
  
 -      /*
 -       * Recursion avoidance:
 -       *
 -       * task, softirq, irq, nmi context
 -       */
 -      int                             recursion[4];
 +      /* Recursion avoidance in each contexts */
 +      int                             recursion[PERF_NR_CONTEXTS];
  };
  
  struct perf_output_handle {
@@@ -978,21 -976,7 +978,21 @@@ extern int perf_unregister_guest_info_c
  extern void perf_event_comm(struct task_struct *tsk);
  extern void perf_event_fork(struct task_struct *tsk);
  
 -extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
 +/* Callchains */
 +DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
 +
 +extern void perf_callchain_user(struct perf_callchain_entry *entry,
 +                              struct pt_regs *regs);
 +extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
 +                                struct pt_regs *regs);
 +
 +
 +static inline void
 +perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
 +{
 +      if (entry->nr < PERF_MAX_STACK_DEPTH)
 +              entry->ip[entry->nr++] = ip;
 +}
  
  extern int sysctl_perf_event_paranoid;
  extern int sysctl_perf_event_mlock;
@@@ -1083,7 -1067,7 +1083,7 @@@ static inline void perf_event_disable(s
  #define perf_cpu_notifier(fn)                                 \
  do {                                                          \
        static struct notifier_block fn##_nb __cpuinitdata =    \
-               { .notifier_call = fn, .priority = 20 };        \
+               { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
        fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,             \
                (void *)(unsigned long)smp_processor_id());     \
        fn(&fn##_nb, (unsigned long)CPU_STARTING,               \
diff --combined kernel/perf_event.c
index f416aef242c3325146c285da73f204d3f3018b4d,403d1804b198140e4f1355c70c0b25e6efa9e5d8..0d38f27ad8852cfabb050d05567c67b4c3df47eb
@@@ -214,7 -214,7 +214,7 @@@ static void perf_unpin_context(struct p
  
  static inline u64 perf_clock(void)
  {
-       return cpu_clock(raw_smp_processor_id());
+       return local_clock();
  }
  
  /*
@@@ -1763,216 -1763,6 +1763,216 @@@ static u64 perf_event_read(struct perf_
        return perf_event_count(event);
  }
  
 +/*
 + * Callchain support
 + */
 +
 +struct callchain_cpus_entries {
 +      struct rcu_head                 rcu_head;
 +      struct perf_callchain_entry     *cpu_entries[0];
 +};
 +
 +static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
 +static atomic_t nr_callchain_events;
 +static DEFINE_MUTEX(callchain_mutex);
 +struct callchain_cpus_entries *callchain_cpus_entries;
 +
 +
 +__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
 +                                struct pt_regs *regs)
 +{
 +}
 +
 +__weak void perf_callchain_user(struct perf_callchain_entry *entry,
 +                              struct pt_regs *regs)
 +{
 +}
 +
 +static void release_callchain_buffers_rcu(struct rcu_head *head)
 +{
 +      struct callchain_cpus_entries *entries;
 +      int cpu;
 +
 +      entries = container_of(head, struct callchain_cpus_entries, rcu_head);
 +
 +      for_each_possible_cpu(cpu)
 +              kfree(entries->cpu_entries[cpu]);
 +
 +      kfree(entries);
 +}
 +
 +static void release_callchain_buffers(void)
 +{
 +      struct callchain_cpus_entries *entries;
 +
 +      entries = callchain_cpus_entries;
 +      rcu_assign_pointer(callchain_cpus_entries, NULL);
 +      call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
 +}
 +
 +static int alloc_callchain_buffers(void)
 +{
 +      int cpu;
 +      int size;
 +      struct callchain_cpus_entries *entries;
 +
 +      /*
 +       * We can't use the percpu allocation API for data that can be
 +       * accessed from NMI. Use a temporary manual per cpu allocation
 +       * until that gets sorted out.
 +       */
 +      size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
 +              num_possible_cpus();
 +
 +      entries = kzalloc(size, GFP_KERNEL);
 +      if (!entries)
 +              return -ENOMEM;
 +
 +      size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
 +
 +      for_each_possible_cpu(cpu) {
 +              entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
 +                                                       cpu_to_node(cpu));
 +              if (!entries->cpu_entries[cpu])
 +                      goto fail;
 +      }
 +
 +      rcu_assign_pointer(callchain_cpus_entries, entries);
 +
 +      return 0;
 +
 +fail:
 +      for_each_possible_cpu(cpu)
 +              kfree(entries->cpu_entries[cpu]);
 +      kfree(entries);
 +
 +      return -ENOMEM;
 +}
 +
 +static int get_callchain_buffers(void)
 +{
 +      int err = 0;
 +      int count;
 +
 +      mutex_lock(&callchain_mutex);
 +
 +      count = atomic_inc_return(&nr_callchain_events);
 +      if (WARN_ON_ONCE(count < 1)) {
 +              err = -EINVAL;
 +              goto exit;
 +      }
 +
 +      if (count > 1) {
 +              /* If the allocation failed, give up */
 +              if (!callchain_cpus_entries)
 +                      err = -ENOMEM;
 +              goto exit;
 +      }
 +
 +      err = alloc_callchain_buffers();
 +      if (err)
 +              release_callchain_buffers();
 +exit:
 +      mutex_unlock(&callchain_mutex);
 +
 +      return err;
 +}
 +
 +static void put_callchain_buffers(void)
 +{
 +      if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
 +              release_callchain_buffers();
 +              mutex_unlock(&callchain_mutex);
 +      }
 +}
 +
 +static int get_recursion_context(int *recursion)
 +{
 +      int rctx;
 +
 +      if (in_nmi())
 +              rctx = 3;
 +      else if (in_irq())
 +              rctx = 2;
 +      else if (in_softirq())
 +              rctx = 1;
 +      else
 +              rctx = 0;
 +
 +      if (recursion[rctx])
 +              return -1;
 +
 +      recursion[rctx]++;
 +      barrier();
 +
 +      return rctx;
 +}
 +
 +static inline void put_recursion_context(int *recursion, int rctx)
 +{
 +      barrier();
 +      recursion[rctx]--;
 +}
 +
 +static struct perf_callchain_entry *get_callchain_entry(int *rctx)
 +{
 +      int cpu;
 +      struct callchain_cpus_entries *entries;
 +
 +      *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
 +      if (*rctx == -1)
 +              return NULL;
 +
 +      entries = rcu_dereference(callchain_cpus_entries);
 +      if (!entries)
 +              return NULL;
 +
 +      cpu = smp_processor_id();
 +
 +      return &entries->cpu_entries[cpu][*rctx];
 +}
 +
 +static void
 +put_callchain_entry(int rctx)
 +{
 +      put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
 +}
 +
 +static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
 +{
 +      int rctx;
 +      struct perf_callchain_entry *entry;
 +
 +
 +      entry = get_callchain_entry(&rctx);
 +      if (rctx == -1)
 +              return NULL;
 +
 +      if (!entry)
 +              goto exit_put;
 +
 +      entry->nr = 0;
 +
 +      if (!user_mode(regs)) {
 +              perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
 +              perf_callchain_kernel(entry, regs);
 +              if (current->mm)
 +                      regs = task_pt_regs(current);
 +              else
 +                      regs = NULL;
 +      }
 +
 +      if (regs) {
 +              perf_callchain_store(entry, PERF_CONTEXT_USER);
 +              perf_callchain_user(entry, regs);
 +      }
 +
 +exit_put:
 +      put_callchain_entry(rctx);
 +
 +      return entry;
 +}
 +
  /*
   * Initialize the perf_event context in a task_struct:
   */
@@@ -2105,8 -1895,6 +2105,8 @@@ static void free_event(struct perf_even
                        atomic_dec(&nr_comm_events);
                if (event->attr.task)
                        atomic_dec(&nr_task_events);
 +              if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
 +                      put_callchain_buffers();
        }
  
        if (event->buffer) {
@@@ -3149,6 -2937,16 +3149,6 @@@ void perf_event_do_pending(void
        __perf_pending_run();
  }
  
 -/*
 - * Callchain support -- arch specific
 - */
 -
 -__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
 -{
 -      return NULL;
 -}
 -
 -
  /*
   * We assume there is only KVM supporting the callbacks.
   * Later on, we might change it to a list if there is
@@@ -3643,20 -3441,14 +3643,20 @@@ static void perf_event_output(struct pe
        struct perf_output_handle handle;
        struct perf_event_header header;
  
 +      /* protect the callchain buffers */
 +      rcu_read_lock();
 +
        perf_prepare_sample(&header, data, event, regs);
  
        if (perf_output_begin(&handle, event, header.size, nmi, 1))
 -              return;
 +              goto exit;
  
        perf_output_sample(&handle, &header, data, event);
  
        perf_output_end(&handle);
 +
 +exit:
 +      rcu_read_unlock();
  }
  
  /*
  int perf_swevent_get_recursion_context(void)
  {
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
 -      int rctx;
  
 -      if (in_nmi())
 -              rctx = 3;
 -      else if (in_irq())
 -              rctx = 2;
 -      else if (in_softirq())
 -              rctx = 1;
 -      else
 -              rctx = 0;
 -
 -      if (cpuctx->recursion[rctx])
 -              return -1;
 -
 -      cpuctx->recursion[rctx]++;
 -      barrier();
 -
 -      return rctx;
 +      return get_recursion_context(cpuctx->recursion);
  }
  EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
  
  void inline perf_swevent_put_recursion_context(int rctx)
  {
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
 -      barrier();
 -      cpuctx->recursion[rctx]--;
 +
 +      put_recursion_context(cpuctx->recursion, rctx);
  }
  
  void __perf_sw_event(u32 event_id, u64 nr, int nmi,
@@@ -5121,13 -4929,6 +5121,13 @@@ done
                        atomic_inc(&nr_comm_events);
                if (event->attr.task)
                        atomic_inc(&nr_task_events);
 +              if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
 +                      err = get_callchain_buffers();
 +                      if (err) {
 +                              free_event(event);
 +                              return ERR_PTR(err);
 +                      }
 +              }
        }
  
        return event;
index fcb5a542cd21f3a415eb012044b3ea9ec48c89ab,6f233698518ede15cc9302e889de9f108aa0f1cb..c93bcb248638976849d9b513caede7f9a049e83b
@@@ -23,7 -23,7 +23,7 @@@ struct fgraph_cpu_data 
  };
  
  struct fgraph_data {
 -      struct fgraph_cpu_data          *cpu_data;
 +      struct fgraph_cpu_data __percpu *cpu_data;
  
        /* Place to preserve last processed entry. */
        struct ftrace_graph_ent_entry   ent;
@@@ -507,7 -507,15 +507,15 @@@ get_return_for_leaf(struct trace_iterat
                         * if the output fails.
                         */
                        data->ent = *curr;
-                       data->ret = *next;
+                       /*
+                        * If the next event is not a return type, then
+                        * we only care about what type it is. Otherwise we can
+                        * safely copy the entire event.
+                        */
+                       if (next->ent.type == TRACE_GRAPH_RET)
+                               data->ret = *next;
+                       else
+                               data->ret.ent.type = next->ent.type;
                }
        }
  
This page took 0.078863 seconds and 4 git commands to generate.