]> Git Repo - linux.git/commitdiff
Merge branch 'perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux...
authorIngo Molnar <[email protected]>
Mon, 21 May 2012 07:17:31 +0000 (09:17 +0200)
committerIngo Molnar <[email protected]>
Mon, 21 May 2012 07:17:50 +0000 (09:17 +0200)
Fixes for perf/core:

 - Rename some perf_target methods to avoid double negation, from Namhyung Kim.
 - Revert change to use per task events with inheritance, from Namhyung Kim.
 - Events should start disabled till children starts running, from David Ahern.

Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
1  2 
kernel/trace/trace.c

diff --combined kernel/trace/trace.c
index 08a08bab57a3f104d658291fd873ade6b786d34b,509e8615f5049beae545947c45391a393d432a57..33ae2f196fa3f820ab03500a6d834bc3b205b5df
@@@ -2669,12 -2669,10 +2669,12 @@@ tracing_cpumask_write(struct file *filp
                if (cpumask_test_cpu(cpu, tracing_cpumask) &&
                                !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_inc(&global_trace.data[cpu]->disabled);
 +                      ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
                }
                if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
                                cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_dec(&global_trace.data[cpu]->disabled);
 +                      ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
                }
        }
        arch_spin_unlock(&ftrace_max_lock);
@@@ -3078,10 -3076,20 +3078,10 @@@ static int __tracing_resize_ring_buffer
  
  static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
  {
 -      int cpu, ret = size;
 +      int ret = size;
  
        mutex_lock(&trace_types_lock);
  
 -      tracing_stop();
 -
 -      /* disable all cpu buffers */
 -      for_each_tracing_cpu(cpu) {
 -              if (global_trace.data[cpu])
 -                      atomic_inc(&global_trace.data[cpu]->disabled);
 -              if (max_tr.data[cpu])
 -                      atomic_inc(&max_tr.data[cpu]->disabled);
 -      }
 -
        if (cpu_id != RING_BUFFER_ALL_CPUS) {
                /* make sure, this cpu is enabled in the mask */
                if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
                ret = -ENOMEM;
  
  out:
 -      for_each_tracing_cpu(cpu) {
 -              if (global_trace.data[cpu])
 -                      atomic_dec(&global_trace.data[cpu]->disabled);
 -              if (max_tr.data[cpu])
 -                      atomic_dec(&max_tr.data[cpu]->disabled);
 -      }
 -
 -      tracing_start();
        mutex_unlock(&trace_types_lock);
  
        return ret;
@@@ -3859,14 -3875,14 +3859,14 @@@ tracing_mark_write(struct file *filp, c
        struct print_entry *entry;
        unsigned long irq_flags;
        struct page *pages[2];
 +      void *map_page[2];
        int nr_pages = 1;
        ssize_t written;
 -      void *page1;
 -      void *page2;
        int offset;
        int size;
        int len;
        int ret;
 +      int i;
  
        if (tracing_disabled)
                return -EINVAL;
                goto out;
        }
  
 -      page1 = kmap_atomic(pages[0]);
 -      if (nr_pages == 2)
 -              page2 = kmap_atomic(pages[1]);
 +      for (i = 0; i < nr_pages; i++)
 +              map_page[i] = kmap_atomic(pages[i]);
  
        local_save_flags(irq_flags);
        size = sizeof(*entry) + cnt + 2; /* possible \n added */
  
        if (nr_pages == 2) {
                len = PAGE_SIZE - offset;
 -              memcpy(&entry->buf, page1 + offset, len);
 -              memcpy(&entry->buf[len], page2, cnt - len);
 +              memcpy(&entry->buf, map_page[0] + offset, len);
 +              memcpy(&entry->buf[len], map_page[1], cnt - len);
        } else
 -              memcpy(&entry->buf, page1 + offset, cnt);
 +              memcpy(&entry->buf, map_page[0] + offset, cnt);
  
        if (entry->buf[cnt - 1] != '\n') {
                entry->buf[cnt] = '\n';
        *fpos += written;
  
   out_unlock:
 -      if (nr_pages == 2)
 -              kunmap_atomic(page2);
 -      kunmap_atomic(page1);
 -      while (nr_pages > 0)
 -              put_page(pages[--nr_pages]);
 +      for (i = 0; i < nr_pages; i++){
 +              kunmap_atomic(map_page[i]);
 +              put_page(pages[i]);
 +      }
   out:
        return written;
  }
@@@ -4476,9 -4494,6 +4476,9 @@@ static void tracing_init_debugfs_percpu
        struct dentry *d_cpu;
        char cpu_dir[30]; /* 30 characters should be more than enough */
  
 +      if (!d_percpu)
 +              return;
 +
        snprintf(cpu_dir, 30, "cpu%ld", cpu);
        d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
        if (!d_cpu) {
@@@ -4744,7 -4759,8 +4744,8 @@@ static ssize_
  rb_simple_read(struct file *filp, char __user *ubuf,
               size_t cnt, loff_t *ppos)
  {
-       struct ring_buffer *buffer = filp->private_data;
+       struct trace_array *tr = filp->private_data;
+       struct ring_buffer *buffer = tr->buffer;
        char buf[64];
        int r;
  
@@@ -4762,7 -4778,8 +4763,8 @@@ static ssize_
  rb_simple_write(struct file *filp, const char __user *ubuf,
                size_t cnt, loff_t *ppos)
  {
-       struct ring_buffer *buffer = filp->private_data;
+       struct trace_array *tr = filp->private_data;
+       struct ring_buffer *buffer = tr->buffer;
        unsigned long val;
        int ret;
  
@@@ -4849,7 -4866,7 +4851,7 @@@ static __init int tracer_init_debugfs(v
                          &trace_clock_fops);
  
        trace_create_file("tracing_on", 0644, d_tracer,
-                           global_trace.buffer, &rb_simple_fops);
+                           &global_trace, &rb_simple_fops);
  
  #ifdef CONFIG_DYNAMIC_FTRACE
        trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
This page took 0.075127 seconds and 4 git commands to generate.