]> Git Repo - linux.git/commitdiff
Merge remote-tracking branch 'tip/perf/urgent' into perf/core
authorArnaldo Carvalho de Melo <[email protected]>
Fri, 18 May 2012 16:13:33 +0000 (13:13 -0300)
committerArnaldo Carvalho de Melo <[email protected]>
Fri, 18 May 2012 16:13:33 +0000 (13:13 -0300)
Merge reason: We are going to queue up a dependent patch:

"perf tools: Move parse event automated tests to separated object"

That depends on:

commit e7c72d8
perf tools: Add 'G' and 'H' modifiers to event parsing

Conflicts:
tools/perf/builtin-stat.c

Conflicted with the recent 'perf_target' patches when checking the
result of perf_evsel open routines to see if a retry is needed to cope
with older kernels where the exclude guest/host perf_event_attr bits
were not used.

Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
1  2 
Makefile
kernel/events/core.c
kernel/sched/core.c
kernel/trace/trace.c
kernel/trace/trace.h
tools/perf/Makefile
tools/perf/builtin-report.c
tools/perf/builtin-stat.c
tools/perf/builtin-test.c
tools/perf/util/header.c

diff --combined Makefile
index 2280dff31b62fab631d1ca9e6407a1b5137e0be8,a06ee9fa8022ca6e102a3b649afc8dade403bc9f..606048ac9d2a55822f729dccf600047a2e9bf0aa
+++ b/Makefile
@@@ -1,7 -1,7 +1,7 @@@
  VERSION = 3
  PATCHLEVEL = 4
  SUBLEVEL = 0
- EXTRAVERSION = -rc4
+ EXTRAVERSION = -rc5
  NAME = Saber-toothed Squirrel
  
  # *DOCUMENTATION*
@@@ -1468,13 -1468,6 +1468,13 @@@ kernelrelease
  kernelversion:
        @echo $(KERNELVERSION)
  
 +# Clear a bunch of variables before executing the submake
 +tools/: FORCE
 +      $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/
 +
 +tools/%: FORCE
 +      $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/ $*
 +
  # Single targets
  # ---------------------------------------------------------------------------
  # Single targets are compatible with:
diff --combined kernel/events/core.c
index e82c7a1face9c285c5a806c98b7a7d2f0509830e,fd126f82b57cc77db01c5fd42e3e30b230d98a30..91a4459258558a5bc9deef879884b827c777edab
@@@ -2039,8 -2039,8 +2039,8 @@@ static void perf_event_context_sched_ou
   * accessing the event control register. If a NMI hits, then it will
   * not restart the event.
   */
 -void __perf_event_task_sched_out(struct task_struct *task,
 -                               struct task_struct *next)
 +static void __perf_event_task_sched_out(struct task_struct *task,
 +                                      struct task_struct *next)
  {
        int ctxn;
  
@@@ -2279,8 -2279,8 +2279,8 @@@ static void perf_branch_stack_sched_in(
   * accessing the event control register. If a NMI hits, then it will
   * keep the event running.
   */
 -void __perf_event_task_sched_in(struct task_struct *prev,
 -                              struct task_struct *task)
 +static void __perf_event_task_sched_in(struct task_struct *prev,
 +                                     struct task_struct *task)
  {
        struct perf_event_context *ctx;
        int ctxn;
                perf_branch_stack_sched_in(prev, task);
  }
  
 +void __perf_event_task_sched(struct task_struct *prev, struct task_struct *next)
 +{
 +      __perf_event_task_sched_out(prev, next);
 +      __perf_event_task_sched_in(prev, next);
 +}
 +
  static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
  {
        u64 frequency = event->attr.sample_freq;
@@@ -3189,7 -3183,7 +3189,7 @@@ static void perf_event_for_each(struct 
        perf_event_for_each_child(event, func);
        func(event);
        list_for_each_entry(sibling, &event->sibling_list, group_entry)
-               perf_event_for_each_child(event, func);
+               perf_event_for_each_child(sibling, func);
        mutex_unlock(&ctx->mutex);
  }
  
@@@ -4963,7 -4957,7 +4963,7 @@@ void __perf_sw_event(u32 event_id, u64 
        if (rctx < 0)
                return;
  
 -      perf_sample_data_init(&data, addr);
 +      perf_sample_data_init(&data, addr, 0);
  
        do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
  
@@@ -5221,7 -5215,7 +5221,7 @@@ void perf_tp_event(u64 addr, u64 count
                .data = record,
        };
  
 -      perf_sample_data_init(&data, addr);
 +      perf_sample_data_init(&data, addr, 0);
        data.raw = &raw;
  
        hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
@@@ -5324,7 -5318,7 +5324,7 @@@ void perf_bp_event(struct perf_event *b
        struct perf_sample_data sample;
        struct pt_regs *regs = data;
  
 -      perf_sample_data_init(&sample, bp->attr.bp_addr);
 +      perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
  
        if (!bp->hw.state && !perf_exclude_event(bp, regs))
                perf_swevent_event(bp, 1, &sample, regs);
@@@ -5350,12 -5344,13 +5350,12 @@@ static enum hrtimer_restart perf_sweven
  
        event->pmu->read(event);
  
 -      perf_sample_data_init(&data, 0);
 -      data.period = event->hw.last_period;
 +      perf_sample_data_init(&data, 0, event->hw.last_period);
        regs = get_irq_regs();
  
        if (regs && !perf_exclude_event(event, regs)) {
                if (!(event->attr.exclude_idle && is_idle_task(current)))
 -                      if (perf_event_overflow(event, &data, regs))
 +                      if (__perf_event_overflow(event, 1, &data, regs))
                                ret = HRTIMER_NORESTART;
        }
  
diff --combined kernel/sched/core.c
index 5c692a0a555d01c2e869002021dd39df978e5682,0533a688ce22fc378dc66a02e901132049ee8efd..13c38837f2cdce4956d3612795c61705d9cf59d9
@@@ -1913,7 -1913,7 +1913,7 @@@ prepare_task_switch(struct rq *rq, stru
                    struct task_struct *next)
  {
        sched_info_switch(prev, next);
 -      perf_event_task_sched_out(prev, next);
 +      perf_event_task_sched(prev, next);
        fire_sched_out_preempt_notifiers(prev, next);
        prepare_lock_switch(rq, next);
        prepare_arch_switch(next);
@@@ -1956,6 -1956,13 +1956,6 @@@ static void finish_task_switch(struct r
         */
        prev_state = prev->state;
        finish_arch_switch(prev);
 -#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
 -      local_irq_disable();
 -#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
 -      perf_event_task_sched_in(prev, current);
 -#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
 -      local_irq_enable();
 -#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
        finish_lock_switch(rq, prev);
        finish_arch_post_lock_switch();
  
@@@ -6398,16 -6405,26 +6398,26 @@@ static void __sdt_free(const struct cpu
                struct sd_data *sdd = &tl->data;
  
                for_each_cpu(j, cpu_map) {
-                       struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
-                       if (sd && (sd->flags & SD_OVERLAP))
-                               free_sched_groups(sd->groups, 0);
-                       kfree(*per_cpu_ptr(sdd->sd, j));
-                       kfree(*per_cpu_ptr(sdd->sg, j));
-                       kfree(*per_cpu_ptr(sdd->sgp, j));
+                       struct sched_domain *sd;
+                       if (sdd->sd) {
+                               sd = *per_cpu_ptr(sdd->sd, j);
+                               if (sd && (sd->flags & SD_OVERLAP))
+                                       free_sched_groups(sd->groups, 0);
+                               kfree(*per_cpu_ptr(sdd->sd, j));
+                       }
+                       if (sdd->sg)
+                               kfree(*per_cpu_ptr(sdd->sg, j));
+                       if (sdd->sgp)
+                               kfree(*per_cpu_ptr(sdd->sgp, j));
                }
                free_percpu(sdd->sd);
+               sdd->sd = NULL;
                free_percpu(sdd->sg);
+               sdd->sg = NULL;
                free_percpu(sdd->sgp);
+               sdd->sgp = NULL;
        }
  }
  
diff --combined kernel/trace/trace.c
index 48ef4960ec90f60b2352e1f232b677e1c5e01718,2a22255c10101c7a55939955a04a9834bbbb940a..509e8615f5049beae545947c45391a393d432a57
@@@ -87,6 -87,18 +87,6 @@@ static int tracing_disabled = 1
  
  DEFINE_PER_CPU(int, ftrace_cpu_disabled);
  
 -static inline void ftrace_disable_cpu(void)
 -{
 -      preempt_disable();
 -      __this_cpu_inc(ftrace_cpu_disabled);
 -}
 -
 -static inline void ftrace_enable_cpu(void)
 -{
 -      __this_cpu_dec(ftrace_cpu_disabled);
 -      preempt_enable();
 -}
 -
  cpumask_var_t __read_mostly   tracing_buffer_mask;
  
  /*
@@@ -617,6 -629,7 +617,6 @@@ ssize_t trace_seq_to_user(struct trace_
  static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
  {
        int len;
 -      void *ret;
  
        if (s->len <= s->readpos)
                return -EBUSY;
        len = s->len - s->readpos;
        if (cnt > len)
                cnt = len;
 -      ret = memcpy(buf, s->buffer + s->readpos, cnt);
 -      if (!ret)
 -              return -EFAULT;
 +      memcpy(buf, s->buffer + s->readpos, cnt);
  
        s->readpos += cnt;
        return cnt;
@@@ -736,6 -751,8 +736,6 @@@ update_max_tr_single(struct trace_arra
  
        arch_spin_lock(&ftrace_max_lock);
  
 -      ftrace_disable_cpu();
 -
        ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
  
        if (ret == -EBUSY) {
                        "Failed to swap buffers due to commit in progress\n");
        }
  
 -      ftrace_enable_cpu();
 -
        WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
  
        __update_max_tr(tr, tsk, cpu);
@@@ -822,8 -841,7 +822,8 @@@ __acquires(kernel_lock
  
                /* If we expanded the buffers, make sure the max is expanded too */
                if (ring_buffer_expanded && type->use_max_tr)
 -                      ring_buffer_resize(max_tr.buffer, trace_buf_size);
 +                      ring_buffer_resize(max_tr.buffer, trace_buf_size,
 +                                              RING_BUFFER_ALL_CPUS);
  
                /* the test is responsible for initializing and enabling */
                pr_info("Testing tracer %s: ", type->name);
  
                /* Shrink the max buffer again */
                if (ring_buffer_expanded && type->use_max_tr)
 -                      ring_buffer_resize(max_tr.buffer, 1);
 +                      ring_buffer_resize(max_tr.buffer, 1,
 +                                              RING_BUFFER_ALL_CPUS);
  
                printk(KERN_CONT "PASSED\n");
        }
@@@ -900,6 -917,13 +900,6 @@@ out
        mutex_unlock(&trace_types_lock);
  }
  
 -static void __tracing_reset(struct ring_buffer *buffer, int cpu)
 -{
 -      ftrace_disable_cpu();
 -      ring_buffer_reset_cpu(buffer, cpu);
 -      ftrace_enable_cpu();
 -}
 -
  void tracing_reset(struct trace_array *tr, int cpu)
  {
        struct ring_buffer *buffer = tr->buffer;
  
        /* Make sure all commits have finished */
        synchronize_sched();
 -      __tracing_reset(buffer, cpu);
 +      ring_buffer_reset_cpu(buffer, cpu);
  
        ring_buffer_record_enable(buffer);
  }
@@@ -926,7 -950,7 +926,7 @@@ void tracing_reset_online_cpus(struct t
        tr->time_start = ftrace_now(tr->cpu);
  
        for_each_online_cpu(cpu)
 -              __tracing_reset(buffer, cpu);
 +              ring_buffer_reset_cpu(buffer, cpu);
  
        ring_buffer_record_enable(buffer);
  }
@@@ -1474,119 -1498,25 +1474,119 @@@ static void __trace_userstack(struct tr
  
  #endif /* CONFIG_STACKTRACE */
  
 +/* created for use with alloc_percpu */
 +struct trace_buffer_struct {
 +      char buffer[TRACE_BUF_SIZE];
 +};
 +
 +static struct trace_buffer_struct *trace_percpu_buffer;
 +static struct trace_buffer_struct *trace_percpu_sirq_buffer;
 +static struct trace_buffer_struct *trace_percpu_irq_buffer;
 +static struct trace_buffer_struct *trace_percpu_nmi_buffer;
 +
 +/*
 + * The buffer used is dependent on the context. There is a per cpu
 + * buffer for normal context, softirq contex, hard irq context and
 + * for NMI context. Thise allows for lockless recording.
 + *
 + * Note, if the buffers failed to be allocated, then this returns NULL
 + */
 +static char *get_trace_buf(void)
 +{
 +      struct trace_buffer_struct *percpu_buffer;
 +      struct trace_buffer_struct *buffer;
 +
 +      /*
 +       * If we have allocated per cpu buffers, then we do not
 +       * need to do any locking.
 +       */
 +      if (in_nmi())
 +              percpu_buffer = trace_percpu_nmi_buffer;
 +      else if (in_irq())
 +              percpu_buffer = trace_percpu_irq_buffer;
 +      else if (in_softirq())
 +              percpu_buffer = trace_percpu_sirq_buffer;
 +      else
 +              percpu_buffer = trace_percpu_buffer;
 +
 +      if (!percpu_buffer)
 +              return NULL;
 +
 +      buffer = per_cpu_ptr(percpu_buffer, smp_processor_id());
 +
 +      return buffer->buffer;
 +}
 +
 +static int alloc_percpu_trace_buffer(void)
 +{
 +      struct trace_buffer_struct *buffers;
 +      struct trace_buffer_struct *sirq_buffers;
 +      struct trace_buffer_struct *irq_buffers;
 +      struct trace_buffer_struct *nmi_buffers;
 +
 +      buffers = alloc_percpu(struct trace_buffer_struct);
 +      if (!buffers)
 +              goto err_warn;
 +
 +      sirq_buffers = alloc_percpu(struct trace_buffer_struct);
 +      if (!sirq_buffers)
 +              goto err_sirq;
 +
 +      irq_buffers = alloc_percpu(struct trace_buffer_struct);
 +      if (!irq_buffers)
 +              goto err_irq;
 +
 +      nmi_buffers = alloc_percpu(struct trace_buffer_struct);
 +      if (!nmi_buffers)
 +              goto err_nmi;
 +
 +      trace_percpu_buffer = buffers;
 +      trace_percpu_sirq_buffer = sirq_buffers;
 +      trace_percpu_irq_buffer = irq_buffers;
 +      trace_percpu_nmi_buffer = nmi_buffers;
 +
 +      return 0;
 +
 + err_nmi:
 +      free_percpu(irq_buffers);
 + err_irq:
 +      free_percpu(sirq_buffers);
 + err_sirq:
 +      free_percpu(buffers);
 + err_warn:
 +      WARN(1, "Could not allocate percpu trace_printk buffer");
 +      return -ENOMEM;
 +}
 +
 +void trace_printk_init_buffers(void)
 +{
 +      static int buffers_allocated;
 +
 +      if (buffers_allocated)
 +              return;
 +
 +      if (alloc_percpu_trace_buffer())
 +              return;
 +
 +      pr_info("ftrace: Allocated trace_printk buffers\n");
 +
 +      buffers_allocated = 1;
 +}
 +
  /**
   * trace_vbprintk - write binary msg to tracing buffer
   *
   */
  int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
  {
 -      static arch_spinlock_t trace_buf_lock =
 -              (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 -      static u32 trace_buf[TRACE_BUF_SIZE];
 -
        struct ftrace_event_call *call = &event_bprint;
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
        struct trace_array *tr = &global_trace;
 -      struct trace_array_cpu *data;
        struct bprint_entry *entry;
        unsigned long flags;
 -      int disable;
 -      int cpu, len = 0, size, pc;
 +      char *tbuffer;
 +      int len = 0, size, pc;
  
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
  
        pc = preempt_count();
        preempt_disable_notrace();
 -      cpu = raw_smp_processor_id();
 -      data = tr->data[cpu];
  
 -      disable = atomic_inc_return(&data->disabled);
 -      if (unlikely(disable != 1))
 +      tbuffer = get_trace_buf();
 +      if (!tbuffer) {
 +              len = 0;
                goto out;
 +      }
  
 -      /* Lockdep uses trace_printk for lock tracing */
 -      local_irq_save(flags);
 -      arch_spin_lock(&trace_buf_lock);
 -      len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
 +      len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
  
 -      if (len > TRACE_BUF_SIZE || len < 0)
 -              goto out_unlock;
 +      if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
 +              goto out;
  
 +      local_save_flags(flags);
        size = sizeof(*entry) + sizeof(u32) * len;
        buffer = tr->buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
                                          flags, pc);
        if (!event)
 -              goto out_unlock;
 +              goto out;
        entry = ring_buffer_event_data(event);
        entry->ip                       = ip;
        entry->fmt                      = fmt;
  
 -      memcpy(entry->buf, trace_buf, sizeof(u32) * len);
 +      memcpy(entry->buf, tbuffer, sizeof(u32) * len);
        if (!filter_check_discard(call, entry, buffer, event)) {
                ring_buffer_unlock_commit(buffer, event);
                ftrace_trace_stack(buffer, flags, 6, pc);
        }
  
 -out_unlock:
 -      arch_spin_unlock(&trace_buf_lock);
 -      local_irq_restore(flags);
 -
  out:
 -      atomic_dec_return(&data->disabled);
        preempt_enable_notrace();
        unpause_graph_tracing();
  
@@@ -1651,53 -1588,58 +1651,53 @@@ int trace_array_printk(struct trace_arr
  int trace_array_vprintk(struct trace_array *tr,
                        unsigned long ip, const char *fmt, va_list args)
  {
 -      static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 -      static char trace_buf[TRACE_BUF_SIZE];
 -
        struct ftrace_event_call *call = &event_print;
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
 -      struct trace_array_cpu *data;
 -      int cpu, len = 0, size, pc;
 +      int len = 0, size, pc;
        struct print_entry *entry;
 -      unsigned long irq_flags;
 -      int disable;
 +      unsigned long flags;
 +      char *tbuffer;
  
        if (tracing_disabled || tracing_selftest_running)
                return 0;
  
 +      /* Don't pollute graph traces with trace_vprintk internals */
 +      pause_graph_tracing();
 +
        pc = preempt_count();
        preempt_disable_notrace();
 -      cpu = raw_smp_processor_id();
 -      data = tr->data[cpu];
  
 -      disable = atomic_inc_return(&data->disabled);
 -      if (unlikely(disable != 1))
 +
 +      tbuffer = get_trace_buf();
 +      if (!tbuffer) {
 +              len = 0;
                goto out;
 +      }
  
 -      pause_graph_tracing();
 -      raw_local_irq_save(irq_flags);
 -      arch_spin_lock(&trace_buf_lock);
 -      len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
 +      len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
 +      if (len > TRACE_BUF_SIZE)
 +              goto out;
  
 +      local_save_flags(flags);
        size = sizeof(*entry) + len + 1;
        buffer = tr->buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
 -                                        irq_flags, pc);
 +                                        flags, pc);
        if (!event)
 -              goto out_unlock;
 +              goto out;
        entry = ring_buffer_event_data(event);
        entry->ip = ip;
  
 -      memcpy(&entry->buf, trace_buf, len);
 +      memcpy(&entry->buf, tbuffer, len);
        entry->buf[len] = '\0';
        if (!filter_check_discard(call, entry, buffer, event)) {
                ring_buffer_unlock_commit(buffer, event);
 -              ftrace_trace_stack(buffer, irq_flags, 6, pc);
 +              ftrace_trace_stack(buffer, flags, 6, pc);
        }
 -
 - out_unlock:
 -      arch_spin_unlock(&trace_buf_lock);
 -      raw_local_irq_restore(irq_flags);
 -      unpause_graph_tracing();
   out:
 -      atomic_dec_return(&data->disabled);
        preempt_enable_notrace();
 +      unpause_graph_tracing();
  
        return len;
  }
@@@ -1710,9 -1652,14 +1710,9 @@@ EXPORT_SYMBOL_GPL(trace_vprintk)
  
  static void trace_iterator_increment(struct trace_iterator *iter)
  {
 -      /* Don't allow ftrace to trace into the ring buffers */
 -      ftrace_disable_cpu();
 -
        iter->idx++;
        if (iter->buffer_iter[iter->cpu])
                ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
 -
 -      ftrace_enable_cpu();
  }
  
  static struct trace_entry *
@@@ -1722,12 -1669,17 +1722,12 @@@ peek_next_entry(struct trace_iterator *
        struct ring_buffer_event *event;
        struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
  
 -      /* Don't allow ftrace to trace into the ring buffers */
 -      ftrace_disable_cpu();
 -
        if (buf_iter)
                event = ring_buffer_iter_peek(buf_iter, ts);
        else
                event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
                                         lost_events);
  
 -      ftrace_enable_cpu();
 -
        if (event) {
                iter->ent_size = ring_buffer_event_length(event);
                return ring_buffer_event_data(event);
@@@ -1817,8 -1769,11 +1817,8 @@@ void *trace_find_next_entry_inc(struct 
  
  static void trace_consume(struct trace_iterator *iter)
  {
 -      /* Don't allow ftrace to trace into the ring buffers */
 -      ftrace_disable_cpu();
        ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
                            &iter->lost_events);
 -      ftrace_enable_cpu();
  }
  
  static void *s_next(struct seq_file *m, void *v, loff_t *pos)
@@@ -1907,12 -1862,16 +1907,12 @@@ static void *s_start(struct seq_file *m
                iter->cpu = 0;
                iter->idx = -1;
  
 -              ftrace_disable_cpu();
 -
                if (cpu_file == TRACE_PIPE_ALL_CPU) {
                        for_each_tracing_cpu(cpu)
                                tracing_iter_reset(iter, cpu);
                } else
                        tracing_iter_reset(iter, cpu_file);
  
 -              ftrace_enable_cpu();
 -
                iter->leftover = 0;
                for (p = iter; p && l < *pos; p = s_next(m, p, &l))
                        ;
@@@ -2373,13 -2332,15 +2373,13 @@@ static struct trace_iterator 
  __tracing_open(struct inode *inode, struct file *file)
  {
        long cpu_file = (long) inode->i_private;
 -      void *fail_ret = ERR_PTR(-ENOMEM);
        struct trace_iterator *iter;
 -      struct seq_file *m;
 -      int cpu, ret;
 +      int cpu;
  
        if (tracing_disabled)
                return ERR_PTR(-ENODEV);
  
 -      iter = kzalloc(sizeof(*iter), GFP_KERNEL);
 +      iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
        if (!iter)
                return ERR_PTR(-ENOMEM);
  
                tracing_iter_reset(iter, cpu);
        }
  
 -      ret = seq_open(file, &tracer_seq_ops);
 -      if (ret < 0) {
 -              fail_ret = ERR_PTR(ret);
 -              goto fail_buffer;
 -      }
 -
 -      m = file->private_data;
 -      m->private = iter;
 -
        mutex_unlock(&trace_types_lock);
  
        return iter;
  
 - fail_buffer:
 -      for_each_tracing_cpu(cpu) {
 -              if (iter->buffer_iter[cpu])
 -                      ring_buffer_read_finish(iter->buffer_iter[cpu]);
 -      }
 -      free_cpumask_var(iter->started);
 -      tracing_start();
   fail:
        mutex_unlock(&trace_types_lock);
        kfree(iter->trace);
 -      kfree(iter);
 -
 -      return fail_ret;
 +      seq_release_private(inode, file);
 +      return ERR_PTR(-ENOMEM);
  }
  
  int tracing_open_generic(struct inode *inode, struct file *filp)
@@@ -2480,10 -2458,11 +2480,10 @@@ static int tracing_release(struct inod
        tracing_start();
        mutex_unlock(&trace_types_lock);
  
 -      seq_release(inode, file);
        mutex_destroy(&iter->mutex);
        free_cpumask_var(iter->started);
        kfree(iter->trace);
 -      kfree(iter);
 +      seq_release_private(inode, file);
        return 0;
  }
  
@@@ -2995,14 -2974,7 +2995,14 @@@ int tracer_init(struct tracer *t, struc
        return t->init(tr);
  }
  
 -static int __tracing_resize_ring_buffer(unsigned long size)
 +static void set_buffer_entries(struct trace_array *tr, unsigned long val)
 +{
 +      int cpu;
 +      for_each_tracing_cpu(cpu)
 +              tr->data[cpu]->entries = val;
 +}
 +
 +static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
  {
        int ret;
  
         */
        ring_buffer_expanded = 1;
  
 -      ret = ring_buffer_resize(global_trace.buffer, size);
 +      ret = ring_buffer_resize(global_trace.buffer, size, cpu);
        if (ret < 0)
                return ret;
  
        if (!current_trace->use_max_tr)
                goto out;
  
 -      ret = ring_buffer_resize(max_tr.buffer, size);
 +      ret = ring_buffer_resize(max_tr.buffer, size, cpu);
        if (ret < 0) {
 -              int r;
 +              int r = 0;
 +
 +              if (cpu == RING_BUFFER_ALL_CPUS) {
 +                      int i;
 +                      for_each_tracing_cpu(i) {
 +                              r = ring_buffer_resize(global_trace.buffer,
 +                                              global_trace.data[i]->entries,
 +                                              i);
 +                              if (r < 0)
 +                                      break;
 +                      }
 +              } else {
 +                      r = ring_buffer_resize(global_trace.buffer,
 +                                              global_trace.data[cpu]->entries,
 +                                              cpu);
 +              }
  
 -              r = ring_buffer_resize(global_trace.buffer,
 -                                     global_trace.entries);
                if (r < 0) {
                        /*
                         * AARGH! We are left with different
                return ret;
        }
  
 -      max_tr.entries = size;
 +      if (cpu == RING_BUFFER_ALL_CPUS)
 +              set_buffer_entries(&max_tr, size);
 +      else
 +              max_tr.data[cpu]->entries = size;
 +
   out:
 -      global_trace.entries = size;
 +      if (cpu == RING_BUFFER_ALL_CPUS)
 +              set_buffer_entries(&global_trace, size);
 +      else
 +              global_trace.data[cpu]->entries = size;
  
        return ret;
  }
  
 -static ssize_t tracing_resize_ring_buffer(unsigned long size)
 +static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
  {
        int cpu, ret = size;
  
                        atomic_inc(&max_tr.data[cpu]->disabled);
        }
  
 -      if (size != global_trace.entries)
 -              ret = __tracing_resize_ring_buffer(size);
 +      if (cpu_id != RING_BUFFER_ALL_CPUS) {
 +              /* make sure, this cpu is enabled in the mask */
 +              if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
 +                      ret = -EINVAL;
 +                      goto out;
 +              }
 +      }
  
 +      ret = __tracing_resize_ring_buffer(size, cpu_id);
        if (ret < 0)
                ret = -ENOMEM;
  
 +out:
        for_each_tracing_cpu(cpu) {
                if (global_trace.data[cpu])
                        atomic_dec(&global_trace.data[cpu]->disabled);
@@@ -3133,8 -3078,7 +3133,8 @@@ int tracing_update_buffers(void
  
        mutex_lock(&trace_types_lock);
        if (!ring_buffer_expanded)
 -              ret = __tracing_resize_ring_buffer(trace_buf_size);
 +              ret = __tracing_resize_ring_buffer(trace_buf_size,
 +                                              RING_BUFFER_ALL_CPUS);
        mutex_unlock(&trace_types_lock);
  
        return ret;
@@@ -3158,8 -3102,7 +3158,8 @@@ static int tracing_set_tracer(const cha
        mutex_lock(&trace_types_lock);
  
        if (!ring_buffer_expanded) {
 -              ret = __tracing_resize_ring_buffer(trace_buf_size);
 +              ret = __tracing_resize_ring_buffer(trace_buf_size,
 +                                              RING_BUFFER_ALL_CPUS);
                if (ret < 0)
                        goto out;
                ret = 0;
                 * The max_tr ring buffer has some state (e.g. ring->clock) and
                 * we want preserve it.
                 */
 -              ring_buffer_resize(max_tr.buffer, 1);
 -              max_tr.entries = 1;
 +              ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
 +              set_buffer_entries(&max_tr, 1);
        }
        destroy_trace_option_files(topts);
  
  
        topts = create_trace_option_files(current_trace);
        if (current_trace->use_max_tr) {
 -              ret = ring_buffer_resize(max_tr.buffer, global_trace.entries);
 -              if (ret < 0)
 -                      goto out;
 -              max_tr.entries = global_trace.entries;
 +              int cpu;
 +              /* we need to make per cpu buffer sizes equivalent */
 +              for_each_tracing_cpu(cpu) {
 +                      ret = ring_buffer_resize(max_tr.buffer,
 +                                              global_trace.data[cpu]->entries,
 +                                              cpu);
 +                      if (ret < 0)
 +                              goto out;
 +                      max_tr.data[cpu]->entries =
 +                                      global_trace.data[cpu]->entries;
 +              }
        }
  
        if (t->init) {
@@@ -3706,82 -3642,30 +3706,82 @@@ out_err
        goto out;
  }
  
 +struct ftrace_entries_info {
 +      struct trace_array      *tr;
 +      int                     cpu;
 +};
 +
 +static int tracing_entries_open(struct inode *inode, struct file *filp)
 +{
 +      struct ftrace_entries_info *info;
 +
 +      if (tracing_disabled)
 +              return -ENODEV;
 +
 +      info = kzalloc(sizeof(*info), GFP_KERNEL);
 +      if (!info)
 +              return -ENOMEM;
 +
 +      info->tr = &global_trace;
 +      info->cpu = (unsigned long)inode->i_private;
 +
 +      filp->private_data = info;
 +
 +      return 0;
 +}
 +
  static ssize_t
  tracing_entries_read(struct file *filp, char __user *ubuf,
                     size_t cnt, loff_t *ppos)
  {
 -      struct trace_array *tr = filp->private_data;
 -      char buf[96];
 -      int r;
 +      struct ftrace_entries_info *info = filp->private_data;
 +      struct trace_array *tr = info->tr;
 +      char buf[64];
 +      int r = 0;
 +      ssize_t ret;
  
        mutex_lock(&trace_types_lock);
 -      if (!ring_buffer_expanded)
 -              r = sprintf(buf, "%lu (expanded: %lu)\n",
 -                          tr->entries >> 10,
 -                          trace_buf_size >> 10);
 -      else
 -              r = sprintf(buf, "%lu\n", tr->entries >> 10);
 +
 +      if (info->cpu == RING_BUFFER_ALL_CPUS) {
 +              int cpu, buf_size_same;
 +              unsigned long size;
 +
 +              size = 0;
 +              buf_size_same = 1;
 +              /* check if all cpu sizes are same */
 +              for_each_tracing_cpu(cpu) {
 +                      /* fill in the size from first enabled cpu */
 +                      if (size == 0)
 +                              size = tr->data[cpu]->entries;
 +                      if (size != tr->data[cpu]->entries) {
 +                              buf_size_same = 0;
 +                              break;
 +                      }
 +              }
 +
 +              if (buf_size_same) {
 +                      if (!ring_buffer_expanded)
 +                              r = sprintf(buf, "%lu (expanded: %lu)\n",
 +                                          size >> 10,
 +                                          trace_buf_size >> 10);
 +                      else
 +                              r = sprintf(buf, "%lu\n", size >> 10);
 +              } else
 +                      r = sprintf(buf, "X\n");
 +      } else
 +              r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10);
 +
        mutex_unlock(&trace_types_lock);
  
 -      return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 +      ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 +      return ret;
  }
  
  static ssize_t
  tracing_entries_write(struct file *filp, const char __user *ubuf,
                      size_t cnt, loff_t *ppos)
  {
 +      struct ftrace_entries_info *info = filp->private_data;
        unsigned long val;
        int ret;
  
        /* value is in KB */
        val <<= 10;
  
 -      ret = tracing_resize_ring_buffer(val);
 +      ret = tracing_resize_ring_buffer(val, info->cpu);
        if (ret < 0)
                return ret;
  
        return cnt;
  }
  
 +static int
 +tracing_entries_release(struct inode *inode, struct file *filp)
 +{
 +      struct ftrace_entries_info *info = filp->private_data;
 +
 +      kfree(info);
 +
 +      return 0;
 +}
 +
  static ssize_t
  tracing_total_entries_read(struct file *filp, char __user *ubuf,
                                size_t cnt, loff_t *ppos)
  
        mutex_lock(&trace_types_lock);
        for_each_tracing_cpu(cpu) {
 -              size += tr->entries >> 10;
 +              size += tr->data[cpu]->entries >> 10;
                if (!ring_buffer_expanded)
                        expanded_size += trace_buf_size >> 10;
        }
@@@ -3860,7 -3734,7 +3860,7 @@@ tracing_free_buffer_release(struct inod
        if (trace_flags & TRACE_ITER_STOP_ON_FREE)
                tracing_off();
        /* resize the ring buffer to 0 */
 -      tracing_resize_ring_buffer(0);
 +      tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
  
        return 0;
  }
@@@ -4059,10 -3933,9 +4059,10 @@@ static const struct file_operations tra
  };
  
  static const struct file_operations tracing_entries_fops = {
 -      .open           = tracing_open_generic,
 +      .open           = tracing_entries_open,
        .read           = tracing_entries_read,
        .write          = tracing_entries_write,
 +      .release        = tracing_entries_release,
        .llseek         = generic_file_llseek,
  };
  
@@@ -4514,9 -4387,6 +4514,9 @@@ static void tracing_init_debugfs_percpu
  
        trace_create_file("stats", 0444, d_cpu,
                        (void *) cpu, &tracing_stats_fops);
 +
 +      trace_create_file("buffer_size_kb", 0444, d_cpu,
 +                      (void *) cpu, &tracing_entries_fops);
  }
  
  #ifdef CONFIG_FTRACE_SELFTEST
@@@ -4759,7 -4629,8 +4759,8 @@@ static ssize_
  rb_simple_read(struct file *filp, char __user *ubuf,
               size_t cnt, loff_t *ppos)
  {
-       struct ring_buffer *buffer = filp->private_data;
+       struct trace_array *tr = filp->private_data;
+       struct ring_buffer *buffer = tr->buffer;
        char buf[64];
        int r;
  
@@@ -4777,7 -4648,8 +4778,8 @@@ static ssize_
  rb_simple_write(struct file *filp, const char __user *ubuf,
                size_t cnt, loff_t *ppos)
  {
-       struct ring_buffer *buffer = filp->private_data;
+       struct trace_array *tr = filp->private_data;
+       struct ring_buffer *buffer = tr->buffer;
        unsigned long val;
        int ret;
  
@@@ -4846,7 -4718,7 +4848,7 @@@ static __init int tracer_init_debugfs(v
                        (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
  
        trace_create_file("buffer_size_kb", 0644, d_tracer,
 -                      &global_trace, &tracing_entries_fops);
 +                      (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
  
        trace_create_file("buffer_total_size_kb", 0444, d_tracer,
                        &global_trace, &tracing_total_entries_fops);
                          &trace_clock_fops);
  
        trace_create_file("tracing_on", 0644, d_tracer,
-                           global_trace.buffer, &rb_simple_fops);
+                           &global_trace, &rb_simple_fops);
  
  #ifdef CONFIG_DYNAMIC_FTRACE
        trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
@@@ -5085,10 -4957,6 +5087,10 @@@ __init static int tracer_alloc_buffers(
        if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
                goto out_free_buffer_mask;
  
 +      /* Only allocate trace_printk buffers if a trace_printk exists */
 +      if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
 +              trace_printk_init_buffers();
 +
        /* To save memory, keep the ring buffer size to its minimum */
        if (ring_buffer_expanded)
                ring_buf_size = trace_buf_size;
                WARN_ON(1);
                goto out_free_cpumask;
        }
 -      global_trace.entries = ring_buffer_size(global_trace.buffer);
        if (global_trace.buffer_disabled)
                tracing_off();
  
                ring_buffer_free(global_trace.buffer);
                goto out_free_cpumask;
        }
 -      max_tr.entries = 1;
  #endif
  
        /* Allocate the first page for all buffers */
                global_trace.data[i] = &per_cpu(global_trace_cpu, i);
                max_tr.data[i] = &per_cpu(max_tr_data, i);
        }
 +
 +      set_buffer_entries(&global_trace, ring_buf_size);
 +#ifdef CONFIG_TRACER_MAX_TRACE
 +      set_buffer_entries(&max_tr, 1);
 +#endif
  
        trace_init_cmdlines();
  
diff --combined kernel/trace/trace.h
index 1c8b7c6f7b3b11bb978362a30bfaf5698ba4f9b8,f95d65da6db8acaba3498616bbac09643d48ed47..6c6f7933eede3ec1437f0ed1775d903b85ccb44f
@@@ -131,7 -131,6 +131,7 @@@ struct trace_array_cpu 
        atomic_t                disabled;
        void                    *buffer_page;   /* ring buffer spare */
  
 +      unsigned long           entries;
        unsigned long           saved_latency;
        unsigned long           critical_start;
        unsigned long           critical_end;
   */
  struct trace_array {
        struct ring_buffer      *buffer;
 -      unsigned long           entries;
        int                     cpu;
        int                     buffer_disabled;
        cycle_t                 time_start;
@@@ -826,8 -826,6 +826,8 @@@ extern struct list_head ftrace_events
  extern const char *__start___trace_bprintk_fmt[];
  extern const char *__stop___trace_bprintk_fmt[];
  
 +void trace_printk_init_buffers(void);
 +
  #undef FTRACE_ENTRY
  #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)   \
        extern struct ftrace_event_call                                 \
                     filter)
  #include "trace_entries.h"
  
- #ifdef CONFIG_FUNCTION_TRACER
+ #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
  int perf_ftrace_event_register(struct ftrace_event_call *call,
                               enum trace_reg type, void *data);
  #else
  #define perf_ftrace_event_register NULL
- #endif /* CONFIG_FUNCTION_TRACER */
+ #endif
  
  #endif /* _LINUX_KERNEL_TRACE_H */
diff --combined tools/perf/Makefile
index 4734f41f801d608f3b13cf04f1966bc87c6efdcc,92271d32bc30b2056c472f273b1af6bed9847b31..398094c3d3c997f51d91083d4a4b6ee9e192f785
@@@ -1,10 -1,18 +1,10 @@@
 -ifeq ("$(origin O)", "command line")
 -      OUTPUT := $(O)/
 -endif
 +include ../scripts/Makefile.include
  
  # The default target of this Makefile is...
  all:
  
  include config/utilities.mak
  
 -ifneq ($(OUTPUT),)
 -# check that the output directory actually exists
 -OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
 -$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
 -endif
 -
  # Define V to have a more verbose compile.
  #
  # Define O to save output files in a separate directory.
@@@ -76,6 -84,31 +76,6 @@@ ifneq ($(WERROR),0
        CFLAGS_WERROR := -Werror
  endif
  
 -#
 -# Include saner warnings here, which can catch bugs:
 -#
 -
 -EXTRA_WARNINGS := -Wformat
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-security
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-y2k
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wshadow
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Winit-self
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wpacked
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wredundant-decls
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-aliasing=3
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-default
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-enum
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wno-system-headers
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wundef
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wwrite-strings
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wbad-function-cast
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-declarations
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-prototypes
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wnested-externs
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes
 -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement
 -
  ifeq ("$(origin DEBUG)", "command line")
    PERF_DEBUG = $(DEBUG)
  endif
@@@ -201,8 -234,8 +201,8 @@@ endi
  
  export PERL_PATH
  
- FLEX = $(CROSS_COMPILE)flex
- BISON= $(CROSS_COMPILE)bison
+ FLEX = flex
+ BISON= bison
  
  $(OUTPUT)util/parse-events-flex.c: util/parse-events.l
        $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
@@@ -300,7 -333,6 +300,7 @@@ LIB_H += util/cpumap.
  LIB_H += util/top.h
  LIB_H += $(ARCH_INCLUDE)
  LIB_H += util/cgroup.h
 +LIB_H += util/target.h
  
  LIB_OBJS += $(OUTPUT)util/abspath.o
  LIB_OBJS += $(OUTPUT)util/alias.o
@@@ -362,7 -394,6 +362,7 @@@ LIB_OBJS += $(OUTPUT)util/util.
  LIB_OBJS += $(OUTPUT)util/xyarray.o
  LIB_OBJS += $(OUTPUT)util/cpumap.o
  LIB_OBJS += $(OUTPUT)util/cgroup.o
 +LIB_OBJS += $(OUTPUT)util/target.o
  
  BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
  
@@@ -475,23 -506,22 +475,23 @@@ els
                # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
                BASIC_CFLAGS += -I/usr/include/slang
                EXTLIBS += -lnewt -lslang
 -              LIB_OBJS += $(OUTPUT)util/ui/setup.o
 -              LIB_OBJS += $(OUTPUT)util/ui/browser.o
 -              LIB_OBJS += $(OUTPUT)util/ui/browsers/annotate.o
 -              LIB_OBJS += $(OUTPUT)util/ui/browsers/hists.o
 -              LIB_OBJS += $(OUTPUT)util/ui/browsers/map.o
 -              LIB_OBJS += $(OUTPUT)util/ui/helpline.o
 -              LIB_OBJS += $(OUTPUT)util/ui/progress.o
 -              LIB_OBJS += $(OUTPUT)util/ui/util.o
 -              LIB_H += util/ui/browser.h
 -              LIB_H += util/ui/browsers/map.h
 -              LIB_H += util/ui/helpline.h
 -              LIB_H += util/ui/keysyms.h
 -              LIB_H += util/ui/libslang.h
 -              LIB_H += util/ui/progress.h
 -              LIB_H += util/ui/util.h
 -              LIB_H += util/ui/ui.h
 +              LIB_OBJS += $(OUTPUT)ui/setup.o
 +              LIB_OBJS += $(OUTPUT)ui/browser.o
 +              LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o
 +              LIB_OBJS += $(OUTPUT)ui/browsers/hists.o
 +              LIB_OBJS += $(OUTPUT)ui/browsers/map.o
 +              LIB_OBJS += $(OUTPUT)ui/helpline.o
 +              LIB_OBJS += $(OUTPUT)ui/progress.o
 +              LIB_OBJS += $(OUTPUT)ui/util.o
 +              LIB_OBJS += $(OUTPUT)ui/tui/setup.o
 +              LIB_H += ui/browser.h
 +              LIB_H += ui/browsers/map.h
 +              LIB_H += ui/helpline.h
 +              LIB_H += ui/keysyms.h
 +              LIB_H += ui/libslang.h
 +              LIB_H += ui/progress.h
 +              LIB_H += ui/util.h
 +              LIB_H += ui/ui.h
        endif
  endif
  
@@@ -505,12 -535,7 +505,12 @@@ els
        else
                BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0)
                EXTLIBS += $(shell pkg-config --libs gtk+-2.0)
 -              LIB_OBJS += $(OUTPUT)util/gtk/browser.o
 +              LIB_OBJS += $(OUTPUT)ui/gtk/browser.o
 +              LIB_OBJS += $(OUTPUT)ui/gtk/setup.o
 +              # Make sure that it'd be included only once.
 +              ifneq ($(findstring -DNO_NEWT_SUPPORT,$(BASIC_CFLAGS)),)
 +                      LIB_OBJS += $(OUTPUT)ui/setup.o
 +              endif
        endif
  endif
  
@@@ -653,6 -678,18 +653,6 @@@ els
        endif
  endif
  
 -ifneq ($(findstring $(MAKEFLAGS),s),s)
 -ifndef V
 -      QUIET_CC       = @echo '   ' CC $@;
 -      QUIET_AR       = @echo '   ' AR $@;
 -      QUIET_LINK     = @echo '   ' LINK $@;
 -      QUIET_MKDIR    = @echo '   ' MKDIR $@;
 -      QUIET_GEN      = @echo '   ' GEN $@;
 -      QUIET_FLEX     = @echo '   ' FLEX $@;
 -      QUIET_BISON    = @echo '   ' BISON $@;
 -endif
 -endif
 -
  ifdef ASCIIDOC8
        export ASCIIDOC8
  endif
@@@ -737,10 -774,10 +737,10 @@@ $(OUTPUT)perf.o perf.spec 
  # over the general rule for .o
  
  $(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS
-       $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
+       $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -w $<
  
  $(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS
-       $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
+       $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -w $<
  
  $(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
@@@ -763,16 -800,16 +763,16 @@@ $(OUTPUT)util/exec_cmd.o: util/exec_cmd
  $(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
  
 -$(OUTPUT)util/ui/browser.o: util/ui/browser.c $(OUTPUT)PERF-CFLAGS
 +$(OUTPUT)ui/browser.o: ui/browser.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
  
 -$(OUTPUT)util/ui/browsers/annotate.o: util/ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS
 +$(OUTPUT)ui/browsers/annotate.o: ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
  
 -$(OUTPUT)util/ui/browsers/hists.o: util/ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS
 +$(OUTPUT)ui/browsers/hists.o: ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
  
 -$(OUTPUT)util/ui/browsers/map.o: util/ui/browsers/map.c $(OUTPUT)PERF-CFLAGS
 +$(OUTPUT)ui/browsers/map.o: ui/browsers/map.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
  
  $(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
index 74776558ddfb705bce1e5d48582df2d08e1dc527,cdae9b2db1cc0ed270e536e6c3b368a353cd780a..d58e41445d0d6dcfb4ca26fbe36eb9b7e02c596b
@@@ -296,15 -296,12 +296,15 @@@ static size_t hists__fprintf_nr_sample_
  {
        size_t ret;
        char unit;
 -      unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE];
 +      unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE];
 +      u64 nr_events = self->stats.total_period;
  
 -      nr_events = convert_unit(nr_events, &unit);
 -      ret = fprintf(fp, "# Events: %lu%c", nr_events, unit);
 +      nr_samples = convert_unit(nr_samples, &unit);
 +      ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
        if (evname != NULL)
 -              ret += fprintf(fp, " %s", evname);
 +              ret += fprintf(fp, " of event '%s'", evname);
 +
 +      ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
        return ret + fprintf(fp, "\n#\n");
  }
  
@@@ -377,16 -374,23 +377,23 @@@ static int __cmd_report(struct perf_rep
            (kernel_map->dso->hit &&
             (kernel_kmap->ref_reloc_sym == NULL ||
              kernel_kmap->ref_reloc_sym->addr == 0))) {
-               const struct dso *kdso = kernel_map->dso;
+               const char *desc =
+                   "As no suitable kallsyms nor vmlinux was found, kernel samples\n"
+                   "can't be resolved.";
+               if (kernel_map) {
+                       const struct dso *kdso = kernel_map->dso;
+                       if (!RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION])) {
+                               desc = "If some relocation was applied (e.g. "
+                                      "kexec) symbols may be misresolved.";
+                       }
+               }
  
                ui__warning(
  "Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n"
  "Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n"
  "Samples in kernel modules can't be resolved as well.\n\n",
-                           RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION]) ?
- "As no suitable kallsyms nor vmlinux was found, kernel samples\n"
- "can't be resolved." :
- "If some relocation was applied (e.g. kexec) symbols may be misresolved.");
+               desc);
        }
  
        if (dump_trace) {
@@@ -676,10 -680,14 +683,10 @@@ int cmd_report(int argc, const char **a
  
        }
  
 -      if (strcmp(report.input_name, "-") != 0) {
 -              if (report.use_gtk)
 -                      perf_gtk_setup_browser(argc, argv, true);
 -              else
 -                      setup_browser(true);
 -      } else {
 +      if (strcmp(report.input_name, "-") != 0)
 +              setup_browser(true);
 +      else
                use_browser = 0;
 -      }
  
        /*
         * Only in the newt browser we are doing integrated annotation,
index 0f4b51ae4be749c70748d0b4e13e8bd96d448665,1e5e9b270f5e75197880175065ed6fba681a8de4..62ae30d34fa6c14734a5f171b52e7b3bb0979257
@@@ -173,23 -173,24 +173,23 @@@ static struct perf_event_attr very_very
  
  
  
 -struct perf_evlist            *evsel_list;
 +static struct perf_evlist     *evsel_list;
  
 -static bool                   system_wide                     =  false;
 -static int                    run_idx                         =  0;
 +static struct perf_target     target = {
 +      .uid    = UINT_MAX,
 +};
  
 +static int                    run_idx                         =  0;
  static int                    run_count                       =  1;
  static bool                   no_inherit                      = false;
  static bool                   scale                           =  true;
  static bool                   no_aggr                         = false;
 -static const char             *target_pid;
 -static const char             *target_tid;
  static pid_t                  child_pid                       = -1;
  static bool                   null_run                        =  false;
  static int                    detailed_run                    =  0;
  static bool                   sync_run                        =  false;
  static bool                   big_num                         =  true;
  static int                    big_num_opt                     =  -1;
 -static const char             *cpu_list;
  static const char             *csv_sep                        = NULL;
  static bool                   csv_output                      = false;
  static bool                   group                           = false;
@@@ -264,24 -265,26 +264,26 @@@ static double stddev_stats(struct stat
        return sqrt(variance_mean);
  }
  
 -struct stats                  runtime_nsecs_stats[MAX_NR_CPUS];
 -struct stats                  runtime_cycles_stats[MAX_NR_CPUS];
 -struct stats                  runtime_stalled_cycles_front_stats[MAX_NR_CPUS];
 -struct stats                  runtime_stalled_cycles_back_stats[MAX_NR_CPUS];
 -struct stats                  runtime_branches_stats[MAX_NR_CPUS];
 -struct stats                  runtime_cacherefs_stats[MAX_NR_CPUS];
 -struct stats                  runtime_l1_dcache_stats[MAX_NR_CPUS];
 -struct stats                  runtime_l1_icache_stats[MAX_NR_CPUS];
 -struct stats                  runtime_ll_cache_stats[MAX_NR_CPUS];
 -struct stats                  runtime_itlb_cache_stats[MAX_NR_CPUS];
 -struct stats                  runtime_dtlb_cache_stats[MAX_NR_CPUS];
 -struct stats                  walltime_nsecs_stats;
 +static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
 +static struct stats runtime_cycles_stats[MAX_NR_CPUS];
 +static struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS];
 +static struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS];
 +static struct stats runtime_branches_stats[MAX_NR_CPUS];
 +static struct stats runtime_cacherefs_stats[MAX_NR_CPUS];
 +static struct stats runtime_l1_dcache_stats[MAX_NR_CPUS];
 +static struct stats runtime_l1_icache_stats[MAX_NR_CPUS];
 +static struct stats runtime_ll_cache_stats[MAX_NR_CPUS];
 +static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
 +static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
 +static struct stats walltime_nsecs_stats;
  
  static int create_perf_stat_counter(struct perf_evsel *evsel,
                                    struct perf_evsel *first)
  {
        struct perf_event_attr *attr = &evsel->attr;
        struct xyarray *group_fd = NULL;
+       bool exclude_guest_missing = false;
+       int ret;
  
        if (group && evsel != first)
                group_fd = first->fd;
  
        attr->inherit = !no_inherit;
  
-       if (perf_target__has_cpu(&target))
-               return perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
-                                               group, group_fd);
+ retry:
+       if (exclude_guest_missing)
+               evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
 -      if (system_wide) {
++      if (perf_target__has_cpu(&target)) {
+               ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
 -                                              group, group_fd);
++                                             group, group_fd);
+               if (ret)
+                       goto check_ret;
+               return 0;
+       }
 -      if (!target_pid && !target_tid && (!group || evsel == first)) {
 +      if (!perf_target__has_task(&target) && (!group || evsel == first)) {
                attr->disabled = 1;
                attr->enable_on_exec = 1;
        }
  
-       return perf_evsel__open_per_thread(evsel, evsel_list->threads,
-                                          group, group_fd);
+       ret = perf_evsel__open_per_thread(evsel, evsel_list->threads,
+                                         group, group_fd);
+       if (!ret)
+               return 0;
+       /* fall through */
+ check_ret:
+       if (ret && errno == EINVAL) {
+               if (!exclude_guest_missing &&
+                   (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
+                       pr_debug("Old kernel, cannot exclude "
+                                "guest or host samples.\n");
+                       exclude_guest_missing = true;
+                       goto retry;
+               }
+       }
+       return ret;
  }
  
  /*
@@@ -445,7 -471,7 +470,7 @@@ static int run_perf_stat(int argc __use
                        exit(-1);
                }
  
 -              if (!target_tid && !target_pid && !system_wide)
 +              if (perf_target__none(&target))
                        evsel_list->threads->map[0] = child_pid;
  
                /*
                                error("You may not have permission to collect %sstats.\n"
                                      "\t Consider tweaking"
                                      " /proc/sys/kernel/perf_event_paranoid or running as root.",
 -                                    system_wide ? "system-wide " : "");
 +                                    target.system_wide ? "system-wide " : "");
                        } else {
                                error("open_counter returned with %d (%s). "
                                      "/bin/dmesg may provide additional information.\n",
@@@ -972,14 -998,14 +997,14 @@@ static void print_stat(int argc, const 
        if (!csv_output) {
                fprintf(output, "\n");
                fprintf(output, " Performance counter stats for ");
 -              if (!target_pid && !target_tid) {
 +              if (!perf_target__has_task(&target)) {
                        fprintf(output, "\'%s", argv[0]);
                        for (i = 1; i < argc; i++)
                                fprintf(output, " %s", argv[i]);
 -              } else if (target_pid)
 -                      fprintf(output, "process id \'%s", target_pid);
 +              } else if (target.pid)
 +                      fprintf(output, "process id \'%s", target.pid);
                else
 -                      fprintf(output, "thread id \'%s", target_tid);
 +                      fprintf(output, "thread id \'%s", target.tid);
  
                fprintf(output, "\'");
                if (run_count > 1)
@@@ -1053,11 -1079,11 +1078,11 @@@ static const struct option options[] = 
                     "event filter", parse_filter),
        OPT_BOOLEAN('i', "no-inherit", &no_inherit,
                    "child tasks do not inherit counters"),
 -      OPT_STRING('p', "pid", &target_pid, "pid",
 +      OPT_STRING('p', "pid", &target.pid, "pid",
                   "stat events on existing process id"),
 -      OPT_STRING('t', "tid", &target_tid, "tid",
 +      OPT_STRING('t', "tid", &target.tid, "tid",
                   "stat events on existing thread id"),
 -      OPT_BOOLEAN('a', "all-cpus", &system_wide,
 +      OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
                    "system-wide collection from all CPUs"),
        OPT_BOOLEAN('g', "group", &group,
                    "put the counters into a counter group"),
        OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 
                           "print large numbers with thousands\' separators",
                           stat__set_big_num),
 -      OPT_STRING('C', "cpu", &cpu_list, "cpu",
 +      OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
                    "list of cpus to monitor in system-wide"),
        OPT_BOOLEAN('A', "no-aggr", &no_aggr,
                    "disable CPU count aggregation"),
@@@ -1194,13 -1220,13 +1219,13 @@@ int cmd_stat(int argc, const char **arg
        } else if (big_num_opt == 0) /* User passed --no-big-num */
                big_num = false;
  
 -      if (!argc && !target_pid && !target_tid)
 +      if (!argc && !perf_target__has_task(&target))
                usage_with_options(stat_usage, options);
        if (run_count <= 0)
                usage_with_options(stat_usage, options);
  
        /* no_aggr, cgroup are for system-wide only */
 -      if ((no_aggr || nr_cgroups) && !system_wide) {
 +      if ((no_aggr || nr_cgroups) && !perf_target__has_cpu(&target)) {
                fprintf(stderr, "both cgroup and no-aggregation "
                        "modes only available in system-wide mode\n");
  
        if (add_default_attributes())
                goto out;
  
 -      if (target_pid)
 -              target_tid = target_pid;
 +      perf_target__validate(&target);
  
 -      evsel_list->threads = thread_map__new_str(target_pid,
 -                                                target_tid, UINT_MAX);
 -      if (evsel_list->threads == NULL) {
 -              pr_err("Problems finding threads of monitor\n");
 -              usage_with_options(stat_usage, options);
 -      }
 -
 -      if (system_wide)
 -              evsel_list->cpus = cpu_map__new(cpu_list);
 -      else
 -              evsel_list->cpus = cpu_map__dummy_new();
 +      if (perf_evlist__create_maps(evsel_list, &target) < 0) {
 +              if (perf_target__has_task(&target))
 +                      pr_err("Problems finding threads of monitor\n");
 +              if (perf_target__has_cpu(&target))
 +                      perror("failed to parse CPUs map");
  
 -      if (evsel_list->cpus == NULL) {
 -              perror("failed to parse CPUs map");
                usage_with_options(stat_usage, options);
                return -1;
        }
index 4eaa665fd32b9739c2aec448417dc8176fb56495,223ffdcc0fd8a730079f09205c45ecb4fbf5d341..6c47376e29d81b93983ae5fd6d8cbafa47f293f2
@@@ -851,6 -851,28 +851,28 @@@ static int test__checkevent_symbolic_na
        return test__checkevent_symbolic_name(evlist);
  }
  
+ static int test__checkevent_exclude_host_modifier(struct perf_evlist *evlist)
+ {
+       struct perf_evsel *evsel = list_entry(evlist->entries.next,
+                                             struct perf_evsel, node);
+       TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+       TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
+       return test__checkevent_symbolic_name(evlist);
+ }
+ static int test__checkevent_exclude_guest_modifier(struct perf_evlist *evlist)
+ {
+       struct perf_evsel *evsel = list_entry(evlist->entries.next,
+                                             struct perf_evsel, node);
+       TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
+       TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+       return test__checkevent_symbolic_name(evlist);
+ }
  static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist)
  {
        struct perf_evsel *evsel = list_entry(evlist->entries.next,
@@@ -1091,6 -1113,14 +1113,14 @@@ static struct test__event_st 
                .name  = "r1,syscalls:sys_enter_open:k,1:1:hp",
                .check = test__checkevent_list,
        },
+       {
+               .name  = "instructions:G",
+               .check = test__checkevent_exclude_host_modifier,
+       },
+       {
+               .name  = "instructions:H",
+               .check = test__checkevent_exclude_guest_modifier,
+       },
  };
  
  #define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
@@@ -1165,10 -1195,6 +1195,10 @@@ realloc
  static int test__PERF_RECORD(void)
  {
        struct perf_record_opts opts = {
 +              .target = {
 +                      .uid = UINT_MAX,
 +                      .uses_mmap = true,
 +              },
                .no_delay   = true,
                .freq       = 10,
                .mmap_pages = 256,
         * perf_evlist__prepare_workload we'll fill in the only thread
         * we're monitoring, the one forked there.
         */
 -      err = perf_evlist__create_maps(evlist, opts.target_pid,
 -                                     opts.target_tid, UINT_MAX, opts.cpu_list);
 +      err = perf_evlist__create_maps(evlist, &opts.target);
        if (err < 0) {
                pr_debug("Not enough memory to create thread/cpu maps\n");
                goto out_delete_evlist;
@@@ -1552,6 -1579,8 +1582,6 @@@ static int __test__rdpmc(void
        sa.sa_sigaction = segfault_handler;
        sigaction(SIGSEGV, &sa, NULL);
  
 -      fprintf(stderr, "\n\n");
 -
        fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
        if (fd < 0) {
                die("Error: sys_perf_event_open() syscall returned "
                loops *= 10;
  
                delta = now - stamp;
 -              fprintf(stderr, "%14d: %14Lu\n", n, (long long)delta);
 +              pr_debug("%14d: %14Lu\n", n, (long long)delta);
  
                delta_sum += delta;
        }
        munmap(addr, page_size);
        close(fd);
  
 -      fprintf(stderr, "   ");
 +      pr_debug("   ");
  
        if (!delta_sum)
                return -1;
diff --combined tools/perf/util/header.c
index 6e618ba2138298d62b39dac22c3cdfd909709fdb,c0b70c697a363905d953b0af83202492c431ee80..538598012139cad703b063e7dd75607358c8bb2c
@@@ -31,16 -31,21 +31,16 @@@ static const char **header_argv
  
  int perf_header__push_event(u64 id, const char *name)
  {
 +      struct perf_trace_event_type *nevents;
 +
        if (strlen(name) > MAX_EVENT_NAME)
                pr_warning("Event %s will be truncated\n", name);
  
 -      if (!events) {
 -              events = malloc(sizeof(struct perf_trace_event_type));
 -              if (events == NULL)
 -                      return -ENOMEM;
 -      } else {
 -              struct perf_trace_event_type *nevents;
 +      nevents = realloc(events, (event_count + 1) * sizeof(*events));
 +      if (nevents == NULL)
 +              return -ENOMEM;
 +      events = nevents;
  
 -              nevents = realloc(events, (event_count + 1) * sizeof(*events));
 -              if (nevents == NULL)
 -                      return -ENOMEM;
 -              events = nevents;
 -      }
        memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
        events[event_count].event_id = id;
        strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
@@@ -291,7 -296,7 +291,7 @@@ int build_id_cache__add_s(const char *s
        if (mkdir_p(filename, 0755))
                goto out_free;
  
-       snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
+       snprintf(filename + len, size - len, "/%s", sbuild_id);
  
        if (access(filename, F_OK)) {
                if (is_kallsyms) {
This page took 0.181123 seconds and 4 git commands to generate.