]> Git Repo - linux.git/commitdiff
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <[email protected]>
Tue, 18 May 2010 15:19:03 +0000 (08:19 -0700)
committerLinus Torvalds <[email protected]>
Tue, 18 May 2010 15:19:03 +0000 (08:19 -0700)
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (311 commits)
  perf tools: Add mode to build without newt support
  perf symbols: symbol inconsistency message should be done only at verbose=1
  perf tui: Add explicit -lslang option
  perf options: Type check all the remaining OPT_ variants
  perf options: Type check OPT_BOOLEAN and fix the offenders
  perf options: Check v type in OPT_U?INTEGER
  perf options: Introduce OPT_UINTEGER
  perf tui: Add workaround for slang < 2.1.4
  perf record: Fix bug mismatch with -c option definition
  perf options: Introduce OPT_U64
  perf tui: Add help window to show key associations
  perf tui: Make <- exit menus too
  perf newt: Add single key shortcuts for zoom into DSO and threads
  perf newt: Exit browser unconditionally when CTRL+C, q or Q is pressed
  perf newt: Fix the 'A'/'a' shortcut for annotate
  perf newt: Make <- exit the ui_browser
  x86, perf: P4 PMU - fix counters management logic
  perf newt: Make <- zoom out filters
  perf report: Report number of events, not samples
  perf hist: Clarify events_stats fields usage
  ...

Fix up trivial conflicts in kernel/fork.c and tools/perf/builtin-record.c

14 files changed:
1  2 
MAINTAINERS
arch/x86/kernel/kprobes.c
arch/x86/kernel/process.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/oprofile/op_model_ppro.c
include/linux/sched.h
include/trace/ftrace.h
kernel/fork.c
kernel/lockdep.c
kernel/ptrace.c
kernel/sched.c
kernel/trace/trace_selftest.c
tools/perf/util/trace-event-parse.c

diff --combined MAINTAINERS
index 033161805587c85ad62e4f5578662883d50fc6b1,5085c90a6ec8d4749962d8331409a2250bc7291e..28332e1b0863c2b19e86c913f3508b20db97a502
@@@ -4165,7 -4165,6 +4165,7 @@@ OPROFIL
  M:    Robert Richter <[email protected]>
  L:    [email protected]
  S:    Maintained
 +F:    arch/*/include/asm/oprofile*.h
  F:    arch/*/oprofile/
  F:    drivers/oprofile/
  F:    include/linux/oprofile.h
@@@ -4354,13 -4353,13 +4354,13 @@@ M:   Paul Mackerras <[email protected]
  M:    Ingo Molnar <[email protected]>
  M:    Arnaldo Carvalho de Melo <[email protected]>
  S:    Supported
- F:    kernel/perf_event.c
+ F:    kernel/perf_event*.c
  F:    include/linux/perf_event.h
- F:    arch/*/kernel/perf_event.c
- F:    arch/*/kernel/*/perf_event.c
- F:    arch/*/kernel/*/*/perf_event.c
+ F:    arch/*/kernel/perf_event*.c
+ F:    arch/*/kernel/*/perf_event*.c
+ F:    arch/*/kernel/*/*/perf_event*.c
  F:    arch/*/include/asm/perf_event.h
- F:    arch/*/lib/perf_event.c
+ F:    arch/*/lib/perf_event*.c
  F:    arch/*/kernel/perf_callchain.c
  F:    tools/perf/
  
@@@ -5493,7 -5492,7 +5493,7 @@@ S:      Maintaine
  F:    drivers/mmc/host/tmio_mmc.*
  
  TMPFS (SHMEM FILESYSTEM)
 -M:    Hugh Dickins <hugh[email protected]>
 +M:    Hugh Dickins <hugh[email protected]>
  L:    [email protected]
  S:    Maintained
  F:    include/linux/shmem_fs.h
index 1658efdfb4e5a617b8919de37ca7ca61c4a4306f,f2f56c0967b66e5723b293d41946084281fba223..345a4b1fe1446812d65e25fd424886d05aeb1fe4
@@@ -422,14 -422,22 +422,22 @@@ static void __kprobes set_current_kprob
  
  static void __kprobes clear_btf(void)
  {
-       if (test_thread_flag(TIF_DEBUGCTLMSR))
-               update_debugctlmsr(0);
+       if (test_thread_flag(TIF_BLOCKSTEP)) {
+               unsigned long debugctl = get_debugctlmsr();
+               debugctl &= ~DEBUGCTLMSR_BTF;
+               update_debugctlmsr(debugctl);
+       }
  }
  
  static void __kprobes restore_btf(void)
  {
-       if (test_thread_flag(TIF_DEBUGCTLMSR))
-               update_debugctlmsr(current->thread.debugctlmsr);
+       if (test_thread_flag(TIF_BLOCKSTEP)) {
+               unsigned long debugctl = get_debugctlmsr();
+               debugctl |= DEBUGCTLMSR_BTF;
+               update_debugctlmsr(debugctl);
+       }
  }
  
  void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
@@@ -534,6 -542,20 +542,6 @@@ static int __kprobes kprobe_handler(str
        struct kprobe_ctlblk *kcb;
  
        addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
 -      if (*addr != BREAKPOINT_INSTRUCTION) {
 -              /*
 -               * The breakpoint instruction was removed right
 -               * after we hit it.  Another cpu has removed
 -               * either a probepoint or a debugger breakpoint
 -               * at this address.  In either case, no further
 -               * handling of this interrupt is appropriate.
 -               * Back up over the (now missing) int3 and run
 -               * the original instruction.
 -               */
 -              regs->ip = (unsigned long)addr;
 -              return 1;
 -      }
 -
        /*
         * We don't want to be preempted for the entire
         * duration of kprobe processing. We conditionally
                                setup_singlestep(p, regs, kcb, 0);
                        return 1;
                }
 +      } else if (*addr != BREAKPOINT_INSTRUCTION) {
 +              /*
 +               * The breakpoint instruction was removed right
 +               * after we hit it.  Another cpu has removed
 +               * either a probepoint or a debugger breakpoint
 +               * at this address.  In either case, no further
 +               * handling of this interrupt is appropriate.
 +               * Back up over the (now missing) int3 and run
 +               * the original instruction.
 +               */
 +              regs->ip = (unsigned long)addr;
 +              preempt_enable_no_resched();
 +              return 1;
        } else if (kprobe_running()) {
                p = __get_cpu_var(current_kprobe);
                if (p->break_handler && p->break_handler(p, regs)) {
index 0415c3ef91b575e3fbef8bc0870ad459769a5010,eccdb57094e39e5ccd4dfb3b657c7fba273ba348..cc6877535ef49919f7f8d207d68f6e57d73489ea
@@@ -20,7 -20,6 +20,6 @@@
  #include <asm/idle.h>
  #include <asm/uaccess.h>
  #include <asm/i387.h>
- #include <asm/ds.h>
  #include <asm/debugreg.h>
  
  unsigned long idle_halt;
@@@ -50,8 -49,6 +49,6 @@@ void free_thread_xstate(struct task_str
                kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
                tsk->thread.xstate = NULL;
        }
-       WARN(tsk->thread.ds_ctx, "leaking DS context\n");
  }
  
  void free_thread_info(struct thread_info *ti)
@@@ -198,11 -195,16 +195,16 @@@ void __switch_to_xtra(struct task_struc
        prev = &prev_p->thread;
        next = &next_p->thread;
  
-       if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
-           test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
-               ds_switch_to(prev_p, next_p);
-       else if (next->debugctlmsr != prev->debugctlmsr)
-               update_debugctlmsr(next->debugctlmsr);
+       if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
+           test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
+               unsigned long debugctl = get_debugctlmsr();
+               debugctl &= ~DEBUGCTLMSR_BTF;
+               if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
+                       debugctl |= DEBUGCTLMSR_BTF;
+               update_debugctlmsr(debugctl);
+       }
  
        if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
            test_tsk_thread_flag(next_p, TIF_NOTSC)) {
@@@ -546,13 -548,11 +548,13 @@@ static int __cpuinit check_c1e_idle(con
                 * check OSVW bit for CPUs that are not affected
                 * by erratum #400
                 */
 -              rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
 -              if (val >= 2) {
 -                      rdmsrl(MSR_AMD64_OSVW_STATUS, val);
 -                      if (!(val & BIT(1)))
 -                              goto no_c1e_idle;
 +              if (cpu_has(c, X86_FEATURE_OSVW)) {
 +                      rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
 +                      if (val >= 2) {
 +                              rdmsrl(MSR_AMD64_OSVW_STATUS, val);
 +                              if (!(val & BIT(1)))
 +                                      goto no_c1e_idle;
 +                      }
                }
                return 1;
        }
diff --combined arch/x86/kvm/vmx.c
index 2f8db0ec8ae4ae2f346f35d1cc670571b79b1f1f,32022a8a5c3b342e9a69350e7241c010e3803268..edca080407a541a4ec331fe3a2516b6b8334eb64
@@@ -2703,7 -2703,8 +2703,7 @@@ static int vmx_nmi_allowed(struct kvm_v
                return 0;
  
        return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
 -                      (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS |
 -                              GUEST_INTR_STATE_NMI));
 +                      (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_NMI));
  }
  
  static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
@@@ -3659,8 -3660,11 +3659,11 @@@ static void vmx_complete_interrupts(str
  
        /* We need to handle NMIs before interrupts are enabled */
        if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
-           (exit_intr_info & INTR_INFO_VALID_MASK))
+           (exit_intr_info & INTR_INFO_VALID_MASK)) {
+               kvm_before_handle_nmi(&vmx->vcpu);
                asm("int $2");
+               kvm_after_handle_nmi(&vmx->vcpu);
+       }
  
        idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
  
diff --combined arch/x86/kvm/x86.c
index c4f35b545c1d14452fc281d6a7405de55bfecb0b,73d854c36e39ce1a9fbf748b99ade29ecc14489c..dd9bc8fb81abddc4dc633dbc20392d1b5fcf14eb
@@@ -40,6 -40,7 +40,7 @@@
  #include <linux/user-return-notifier.h>
  #include <linux/srcu.h>
  #include <linux/slab.h>
+ #include <linux/perf_event.h>
  #include <trace/events/kvm.h>
  #undef TRACE_INCLUDE_FILE
  #define CREATE_TRACE_POINTS
@@@ -1712,7 -1713,6 +1713,7 @@@ static int kvm_vcpu_ioctl_set_cpuid(str
        if (copy_from_user(cpuid_entries, entries,
                           cpuid->nent * sizeof(struct kvm_cpuid_entry)))
                goto out_free;
 +      vcpu_load(vcpu);
        for (i = 0; i < cpuid->nent; i++) {
                vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
                vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
        r = 0;
        kvm_apic_set_version(vcpu);
        kvm_x86_ops->cpuid_update(vcpu);
 +      vcpu_put(vcpu);
  
  out_free:
        vfree(cpuid_entries);
@@@ -1751,11 -1750,9 +1752,11 @@@ static int kvm_vcpu_ioctl_set_cpuid2(st
        if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
                           cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
                goto out;
 +      vcpu_load(vcpu);
        vcpu->arch.cpuid_nent = cpuid->nent;
        kvm_apic_set_version(vcpu);
        kvm_x86_ops->cpuid_update(vcpu);
 +      vcpu_put(vcpu);
        return 0;
  
  out:
@@@ -3747,6 -3744,51 +3748,51 @@@ static void kvm_timer_init(void
        }
  }
  
+ static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
+ static int kvm_is_in_guest(void)
+ {
+       return percpu_read(current_vcpu) != NULL;
+ }
+ static int kvm_is_user_mode(void)
+ {
+       int user_mode = 3;
+       if (percpu_read(current_vcpu))
+               user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
+       return user_mode != 0;
+ }
+ static unsigned long kvm_get_guest_ip(void)
+ {
+       unsigned long ip = 0;
+       if (percpu_read(current_vcpu))
+               ip = kvm_rip_read(percpu_read(current_vcpu));
+       return ip;
+ }
+ static struct perf_guest_info_callbacks kvm_guest_cbs = {
+       .is_in_guest            = kvm_is_in_guest,
+       .is_user_mode           = kvm_is_user_mode,
+       .get_guest_ip           = kvm_get_guest_ip,
+ };
+ void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
+ {
+       percpu_write(current_vcpu, vcpu);
+ }
+ EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
+ void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
+ {
+       percpu_write(current_vcpu, NULL);
+ }
+ EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
  int kvm_arch_init(void *opaque)
  {
        int r;
  
        kvm_timer_init();
  
+       perf_register_guest_info_callbacks(&kvm_guest_cbs);
        return 0;
  
  out:
  
  void kvm_arch_exit(void)
  {
+       perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
        if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
                cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
                                            CPUFREQ_TRANSITION_NOTIFIER);
index 1fd17cfb956bf65385c5a26bb861cf92fdd87296,c8abc4d1bf35e991dad321f0db21388892076959..d769cda540823e12a0dbfbdba923ec481f07abfd
@@@ -30,46 -30,19 +30,46 @@@ static int counter_width = 32
  
  static u64 *reset_value;
  
 -static void ppro_fill_in_addresses(struct op_msrs * const msrs)
 +static void ppro_shutdown(struct op_msrs const * const msrs)
  {
        int i;
  
 -      for (i = 0; i < num_counters; i++) {
 -              if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
 -                      msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
 +      for (i = 0; i < num_counters; ++i) {
 +              if (!msrs->counters[i].addr)
 +                      continue;
 +              release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
 +              release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
 +      }
 +      if (reset_value) {
 +              kfree(reset_value);
 +              reset_value = NULL;
        }
 +}
 +
 +static int ppro_fill_in_addresses(struct op_msrs * const msrs)
 +{
 +      int i;
  
        for (i = 0; i < num_counters; i++) {
 -              if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i))
 -                      msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
 +              if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
 +                      goto fail;
 +              if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) {
 +                      release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
 +                      goto fail;
 +              }
 +              /* both registers must be reserved */
 +              msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
 +              msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
 +              continue;
 +      fail:
 +              if (!counter_config[i].enabled)
 +                      continue;
 +              op_x86_warn_reserved(i);
 +              ppro_shutdown(msrs);
 +              return -EBUSY;
        }
 +
 +      return 0;
  }
  
  
@@@ -105,17 -78,26 +105,17 @@@ static void ppro_setup_ctrs(struct op_x
  
        /* clear all counters */
        for (i = 0; i < num_counters; ++i) {
 -              if (unlikely(!msrs->controls[i].addr)) {
 -                      if (counter_config[i].enabled && !smp_processor_id())
 -                              /*
 -                               * counter is reserved, this is on all
 -                               * cpus, so report only for cpu #0
 -                               */
 -                              op_x86_warn_reserved(i);
 +              if (!msrs->controls[i].addr)
                        continue;
 -              }
                rdmsrl(msrs->controls[i].addr, val);
                if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
                        op_x86_warn_in_use(i);
                val &= model->reserved;
                wrmsrl(msrs->controls[i].addr, val);
 -      }
 -
 -      /* avoid a false detection of ctr overflows in NMI handler */
 -      for (i = 0; i < num_counters; ++i) {
 -              if (unlikely(!msrs->counters[i].addr))
 -                      continue;
 +              /*
 +               * avoid a false detection of ctr overflows in NMI *
 +               * handler
 +               */
                wrmsrl(msrs->counters[i].addr, -1LL);
        }
  
@@@ -207,6 -189,25 +207,6 @@@ static void ppro_stop(struct op_msrs co
        }
  }
  
 -static void ppro_shutdown(struct op_msrs const * const msrs)
 -{
 -      int i;
 -
 -      for (i = 0; i < num_counters; ++i) {
 -              if (msrs->counters[i].addr)
 -                      release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
 -      }
 -      for (i = 0; i < num_counters; ++i) {
 -              if (msrs->controls[i].addr)
 -                      release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
 -      }
 -      if (reset_value) {
 -              kfree(reset_value);
 -              reset_value = NULL;
 -      }
 -}
 -
 -
  struct op_x86_model_spec op_ppro_spec = {
        .num_counters           = 2,
        .num_controls           = 2,
@@@ -238,11 -239,11 +238,11 @@@ static void arch_perfmon_setup_counters
        if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
                current_cpu_data.x86_model == 15) {
                eax.split.version_id = 2;
-               eax.split.num_events = 2;
+               eax.split.num_counters = 2;
                eax.split.bit_width = 40;
        }
  
-       num_counters = eax.split.num_events;
+       num_counters = eax.split.num_counters;
  
        op_arch_perfmon_spec.num_counters = num_counters;
        op_arch_perfmon_spec.num_controls = num_counters;
diff --combined include/linux/sched.h
index 2b7b81df78b307e432d9132db78277e0a3960ef7,e0447c64af6ad4bd84cc33e852809183ab449dc5..28b71ee133f02aafa2462384512337a628990ec8
@@@ -99,7 -99,6 +99,6 @@@ struct futex_pi_state
  struct robust_list_head;
  struct bio_list;
  struct fs_struct;
- struct bts_context;
  struct perf_event_context;
  
  /*
@@@ -1272,12 -1271,6 +1271,6 @@@ struct task_struct 
        struct list_head ptraced;
        struct list_head ptrace_entry;
  
-       /*
-        * This is the tracer handle for the ptrace BTS extension.
-        * This field actually belongs to the ptracer task.
-        */
-       struct bts_context *bts;
        /* PID/PID hash table linkage. */
        struct pid_link pids[PIDTYPE_MAX];
        struct list_head thread_group;
        /* bitmask of trace recursion */
        unsigned long trace_recursion;
  #endif /* CONFIG_TRACING */
 -      unsigned long stack_start;
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
        struct memcg_batch_info {
                int do_batch;   /* incremented when batch uncharge started */
@@@ -2122,10 -2116,8 +2115,8 @@@ extern void set_task_comm(struct task_s
  extern char *get_task_comm(char *to, struct task_struct *tsk);
  
  #ifdef CONFIG_SMP
- extern void wait_task_context_switch(struct task_struct *p);
  extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
  #else
- static inline void wait_task_context_switch(struct task_struct *p) {}
  static inline unsigned long wait_task_inactive(struct task_struct *p,
                                               long match_state)
  {
diff --combined include/trace/ftrace.h
index 75dd7787fb373acaec6151f95c398e0ce069c6ad,882c64832ffe07b774df0d8932eadc8b919ea51a..16253db38d73274e329a20519023b342db7e7bd1
   *
   *    field = (typeof(field))entry;
   *
 - *    p = get_cpu_var(ftrace_event_seq);
 + *    p = &get_cpu_var(ftrace_event_seq);
   *    trace_seq_init(p);
 - *    ret = trace_seq_printf(s, <TP_printk> "\n");
 + *    ret = trace_seq_printf(s, "%s: ", <call>);
 + *    if (ret)
 + *            ret = trace_seq_printf(s, <TP_printk> "\n");
   *    put_cpu();
   *    if (!ret)
   *            return TRACE_TYPE_PARTIAL_LINE;
@@@ -452,38 -450,38 +452,38 @@@ perf_trace_disable_##name(struct ftrace
   *
   * static void ftrace_raw_event_<call>(proto)
   * {
 + *    struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
   *    struct ring_buffer_event *event;
   *    struct ftrace_raw_<call> *entry; <-- defined in stage 1
   *    struct ring_buffer *buffer;
   *    unsigned long irq_flags;
 + *    int __data_size;
   *    int pc;
   *
   *    local_save_flags(irq_flags);
   *    pc = preempt_count();
   *
 + *    __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
 + *
   *    event = trace_current_buffer_lock_reserve(&buffer,
   *                              event_<call>.id,
 - *                              sizeof(struct ftrace_raw_<call>),
 + *                              sizeof(*entry) + __data_size,
   *                              irq_flags, pc);
   *    if (!event)
   *            return;
   *    entry   = ring_buffer_event_data(event);
   *
 - *    <assign>;  <-- Here we assign the entries by the __field and
 - *                    __array macros.
 + *    { <assign>; }  <-- Here we assign the entries by the __field and
 + *                       __array macros.
   *
 - *    trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
 + *    if (!filter_current_check_discard(buffer, event_call, entry, event))
 + *            trace_current_buffer_unlock_commit(buffer,
 + *                                               event, irq_flags, pc);
   * }
   *
   * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
   * {
 - *    int ret;
 - *
 - *    ret = register_trace_<call>(ftrace_raw_event_<call>);
 - *    if (!ret)
 - *            pr_info("event trace: Could not activate trace point "
 - *                    "probe to <call>");
 - *    return ret;
 + *    return register_trace_<call>(ftrace_raw_event_<call>);
   * }
   *
   * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
   *    .trace                  = ftrace_raw_output_<call>, <-- stage 2
   * };
   *
 + * static const char print_fmt_<call>[] = <TP_printk>;
 + *
   * static struct ftrace_event_call __used
   * __attribute__((__aligned__(4)))
   * __attribute__((section("_ftrace_events"))) event_<call> = {
   *    .raw_init               = trace_event_raw_init,
   *    .regfunc                = ftrace_reg_event_<call>,
   *    .unregfunc              = ftrace_unreg_event_<call>,
 + *    .print_fmt              = print_fmt_<call>,
 + *    .define_fields          = ftrace_define_fields_<call>,
   * }
   *
   */
@@@ -575,6 -569,7 +575,6 @@@ ftrace_raw_event_id_##call(struct ftrac
                return;                                                 \
        entry   = ring_buffer_event_data(event);                        \
                                                                        \
 -                                                                      \
        tstruct                                                         \
                                                                        \
        { assign; }                                                     \
@@@ -763,13 -758,12 +763,12 @@@ __attribute__((section("_ftrace_events"
  #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)        \
  static notrace void                                                   \
  perf_trace_templ_##call(struct ftrace_event_call *event_call,         \
-                           proto)                                      \
+                       struct pt_regs *__regs, proto)                  \
  {                                                                     \
        struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
        struct ftrace_raw_##call *entry;                                \
        u64 __addr = 0, __count = 1;                                    \
        unsigned long irq_flags;                                        \
-       struct pt_regs *__regs;                                         \
        int __entry_size;                                               \
        int __data_size;                                                \
        int rctx;                                                       \
                                                                        \
        { assign; }                                                     \
                                                                        \
-       __regs = &__get_cpu_var(perf_trace_regs);                       \
-       perf_fetch_caller_regs(__regs, 2);                              \
-                                                                       \
        perf_trace_buf_submit(entry, __entry_size, rctx, __addr,        \
                               __count, irq_flags, __regs);             \
  }
  
  #undef DEFINE_EVENT
- #define DEFINE_EVENT(template, call, proto, args)             \
- static notrace void perf_trace_##call(proto)                  \
- {                                                             \
-       struct ftrace_event_call *event_call = &event_##call;   \
-                                                               \
-       perf_trace_templ_##template(event_call, args);          \
+ #define DEFINE_EVENT(template, call, proto, args)                     \
+ static notrace void perf_trace_##call(proto)                          \
+ {                                                                     \
+       struct ftrace_event_call *event_call = &event_##call;           \
+       struct pt_regs *__regs = &get_cpu_var(perf_trace_regs);         \
+                                                                       \
+       perf_fetch_caller_regs(__regs, 1);                              \
+                                                                       \
+       perf_trace_templ_##template(event_call, __regs, args);          \
+                                                                       \
+       put_cpu_var(perf_trace_regs);                                   \
  }
  
  #undef DEFINE_EVENT_PRINT
diff --combined kernel/fork.c
index 4c14942a0ee36fa994abdf91fa218d68b9e4052d,5d3592deaf71cab3a6dc47acaa38317738e15fee..4d57d9e3a6e992a9d0a3514138008a0d8aefaeff
@@@ -1111,9 -1111,8 +1111,7 @@@ static struct task_struct *copy_process
        p->memcg_batch.do_batch = 0;
        p->memcg_batch.memcg = NULL;
  #endif
 -      p->stack_start = stack_start;
  
-       p->bts = NULL;
        /* Perform scheduler related setup. Assign this task to a CPU. */
        sched_fork(p, clone_flags);
  
diff --combined kernel/lockdep.c
index 4349e9793419d74d130cb30a2d2be44741395840,e9c759f06c1d114d5ab3d1bda286b5f30c6b194b..ec21304856d102814a796ef511f02d728f58a380
@@@ -431,7 -431,20 +431,7 @@@ static struct stack_trace lockdep_init_
  /*
   * Various lockdep statistics:
   */
 -atomic_t chain_lookup_hits;
 -atomic_t chain_lookup_misses;
 -atomic_t hardirqs_on_events;
 -atomic_t hardirqs_off_events;
 -atomic_t redundant_hardirqs_on;
 -atomic_t redundant_hardirqs_off;
 -atomic_t softirqs_on_events;
 -atomic_t softirqs_off_events;
 -atomic_t redundant_softirqs_on;
 -atomic_t redundant_softirqs_off;
 -atomic_t nr_unused_locks;
 -atomic_t nr_cyclic_checks;
 -atomic_t nr_find_usage_forwards_checks;
 -atomic_t nr_find_usage_backwards_checks;
 +DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
  #endif
  
  /*
@@@ -735,7 -748,7 +735,7 @@@ register_lock_class(struct lockdep_map 
                return NULL;
        }
        class = lock_classes + nr_lock_classes++;
 -      debug_atomic_inc(&nr_unused_locks);
 +      debug_atomic_inc(nr_unused_locks);
        class->key = key;
        class->name = lock->name;
        class->subclass = subclass;
@@@ -805,8 -818,7 +805,8 @@@ static struct lock_list *alloc_list_ent
   * Add a new dependency to the head of the list:
   */
  static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 -                          struct list_head *head, unsigned long ip, int distance)
 +                          struct list_head *head, unsigned long ip,
 +                          int distance, struct stack_trace *trace)
  {
        struct lock_list *entry;
        /*
        if (!entry)
                return 0;
  
 -      if (!save_trace(&entry->trace))
 -              return 0;
 -
        entry->class = this;
        entry->distance = distance;
 +      entry->trace = *trace;
        /*
         * Since we never remove from the dependency list, the list can
         * be walked lockless by other CPUs, it's only allocation
@@@ -1191,7 -1205,7 +1191,7 @@@ check_noncircular(struct lock_list *roo
  {
        int result;
  
 -      debug_atomic_inc(&nr_cyclic_checks);
 +      debug_atomic_inc(nr_cyclic_checks);
  
        result = __bfs_forwards(root, target, class_equal, target_entry);
  
@@@ -1228,7 -1242,7 +1228,7 @@@ find_usage_forwards(struct lock_list *r
  {
        int result;
  
 -      debug_atomic_inc(&nr_find_usage_forwards_checks);
 +      debug_atomic_inc(nr_find_usage_forwards_checks);
  
        result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
  
@@@ -1251,7 -1265,7 +1251,7 @@@ find_usage_backwards(struct lock_list *
  {
        int result;
  
 -      debug_atomic_inc(&nr_find_usage_backwards_checks);
 +      debug_atomic_inc(nr_find_usage_backwards_checks);
  
        result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
  
@@@ -1621,20 -1635,12 +1621,20 @@@ check_deadlock(struct task_struct *curr
   */
  static int
  check_prev_add(struct task_struct *curr, struct held_lock *prev,
 -             struct held_lock *next, int distance)
 +             struct held_lock *next, int distance, int trylock_loop)
  {
        struct lock_list *entry;
        int ret;
        struct lock_list this;
        struct lock_list *uninitialized_var(target_entry);
 +      /*
 +       * Static variable, serialized by the graph_lock().
 +       *
 +       * We use this static variable to save the stack trace in case
 +       * we call into this function multiple times due to encountering
 +       * trylocks in the held lock stack.
 +       */
 +      static struct stack_trace trace;
  
        /*
         * Prove that the new <prev> -> <next> dependency would not
                }
        }
  
 +      if (!trylock_loop && !save_trace(&trace))
 +              return 0;
 +
        /*
         * Ok, all validations passed, add the new lock
         * to the previous lock's dependency list:
         */
        ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
                               &hlock_class(prev)->locks_after,
 -                             next->acquire_ip, distance);
 +                             next->acquire_ip, distance, &trace);
  
        if (!ret)
                return 0;
  
        ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
                               &hlock_class(next)->locks_before,
 -                             next->acquire_ip, distance);
 +                             next->acquire_ip, distance, &trace);
        if (!ret)
                return 0;
  
@@@ -1728,7 -1731,6 +1728,7 @@@ static in
  check_prevs_add(struct task_struct *curr, struct held_lock *next)
  {
        int depth = curr->lockdep_depth;
 +      int trylock_loop = 0;
        struct held_lock *hlock;
  
        /*
                 * added:
                 */
                if (hlock->read != 2) {
 -                      if (!check_prev_add(curr, hlock, next, distance))
 +                      if (!check_prev_add(curr, hlock, next,
 +                                              distance, trylock_loop))
                                return 0;
                        /*
                         * Stop after the first non-trylock entry,
                if (curr->held_locks[depth].irq_context !=
                                curr->held_locks[depth-1].irq_context)
                        break;
 +              trylock_loop = 1;
        }
        return 1;
  out_bug:
@@@ -1825,7 -1825,7 +1825,7 @@@ static inline int lookup_chain_cache(st
        list_for_each_entry(chain, hash_head, entry) {
                if (chain->chain_key == chain_key) {
  cache_hit:
 -                      debug_atomic_inc(&chain_lookup_hits);
 +                      debug_atomic_inc(chain_lookup_hits);
                        if (very_verbose(class))
                                printk("\nhash chain already cached, key: "
                                        "%016Lx tail class: [%p] %s\n",
                chain_hlocks[chain->base + j] = class - lock_classes;
        }
        list_add_tail_rcu(&chain->entry, hash_head);
 -      debug_atomic_inc(&chain_lookup_misses);
 +      debug_atomic_inc(chain_lookup_misses);
        inc_chains();
  
        return 1;
@@@ -2311,12 -2311,7 +2311,12 @@@ void trace_hardirqs_on_caller(unsigned 
                return;
  
        if (unlikely(curr->hardirqs_enabled)) {
 -              debug_atomic_inc(&redundant_hardirqs_on);
 +              /*
 +               * Neither irq nor preemption are disabled here
 +               * so this is racy by nature but loosing one hit
 +               * in a stat is not a big deal.
 +               */
 +              __debug_atomic_inc(redundant_hardirqs_on);
                return;
        }
        /* we'll do an OFF -> ON transition: */
  
        curr->hardirq_enable_ip = ip;
        curr->hardirq_enable_event = ++curr->irq_events;
 -      debug_atomic_inc(&hardirqs_on_events);
 +      debug_atomic_inc(hardirqs_on_events);
  }
  EXPORT_SYMBOL(trace_hardirqs_on_caller);
  
@@@ -2375,9 -2370,9 +2375,9 @@@ void trace_hardirqs_off_caller(unsigne
                curr->hardirqs_enabled = 0;
                curr->hardirq_disable_ip = ip;
                curr->hardirq_disable_event = ++curr->irq_events;
 -              debug_atomic_inc(&hardirqs_off_events);
 +              debug_atomic_inc(hardirqs_off_events);
        } else
 -              debug_atomic_inc(&redundant_hardirqs_off);
 +              debug_atomic_inc(redundant_hardirqs_off);
  }
  EXPORT_SYMBOL(trace_hardirqs_off_caller);
  
@@@ -2401,7 -2396,7 +2401,7 @@@ void trace_softirqs_on(unsigned long ip
                return;
  
        if (curr->softirqs_enabled) {
 -              debug_atomic_inc(&redundant_softirqs_on);
 +              debug_atomic_inc(redundant_softirqs_on);
                return;
        }
  
        curr->softirqs_enabled = 1;
        curr->softirq_enable_ip = ip;
        curr->softirq_enable_event = ++curr->irq_events;
 -      debug_atomic_inc(&softirqs_on_events);
 +      debug_atomic_inc(softirqs_on_events);
        /*
         * We are going to turn softirqs on, so set the
         * usage bit for all held locks, if hardirqs are
@@@ -2441,10 -2436,10 +2441,10 @@@ void trace_softirqs_off(unsigned long i
                curr->softirqs_enabled = 0;
                curr->softirq_disable_ip = ip;
                curr->softirq_disable_event = ++curr->irq_events;
 -              debug_atomic_inc(&softirqs_off_events);
 +              debug_atomic_inc(softirqs_off_events);
                DEBUG_LOCKS_WARN_ON(!softirq_count());
        } else
 -              debug_atomic_inc(&redundant_softirqs_off);
 +              debug_atomic_inc(redundant_softirqs_off);
  }
  
  static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
@@@ -2649,7 -2644,7 +2649,7 @@@ static int mark_lock(struct task_struc
                        return 0;
                break;
        case LOCK_USED:
 -              debug_atomic_dec(&nr_unused_locks);
 +              debug_atomic_dec(nr_unused_locks);
                break;
        default:
                if (!debug_locks_off_graph_unlock())
@@@ -2755,7 -2750,7 +2755,7 @@@ static int __lock_acquire(struct lockde
                if (!class)
                        return 0;
        }
 -      debug_atomic_inc((atomic_t *)&class->ops);
 +      atomic_inc((atomic_t *)&class->ops);
        if (very_verbose(class)) {
                printk("\nacquire class [%p] %s", class->key, class->name);
                if (class->name_version > 1)
@@@ -3232,7 -3227,7 +3232,7 @@@ void lock_release(struct lockdep_map *l
        raw_local_irq_save(flags);
        check_flags(flags);
        current->lockdep_recursion = 1;
-       trace_lock_release(lock, nested, ip);
+       trace_lock_release(lock, ip);
        __lock_release(lock, nested, ip);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
@@@ -3385,7 -3380,7 +3385,7 @@@ found_it
                hlock->holdtime_stamp = now;
        }
  
-       trace_lock_acquired(lock, ip, waittime);
+       trace_lock_acquired(lock, ip);
  
        stats = get_lock_stats(hlock_class(hlock));
        if (waittime) {
@@@ -3806,11 -3801,8 +3806,11 @@@ void lockdep_rcu_dereference(const cha
  {
        struct task_struct *curr = current;
  
 +#ifndef CONFIG_PROVE_RCU_REPEATEDLY
        if (!debug_locks_off())
                return;
 +#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
 +      /* Note: the following can be executed concurrently, so be careful. */
        printk("\n===================================================\n");
        printk(  "[ INFO: suspicious rcu_dereference_check() usage. ]\n");
        printk(  "---------------------------------------------------\n");
diff --combined kernel/ptrace.c
index 2f0f50b450a336351b7d8ca547dab63b648a264e,9fb51237b18c3a03d2bf0561d13608f9a0a8452c..6af9cdd558b7b28b5d7e44cc78e3e7cced8582d5
@@@ -14,6 -14,7 +14,6 @@@
  #include <linux/mm.h>
  #include <linux/highmem.h>
  #include <linux/pagemap.h>
 -#include <linux/smp_lock.h>
  #include <linux/ptrace.h>
  #include <linux/security.h>
  #include <linux/signal.h>
@@@ -75,7 -76,6 +75,6 @@@ void __ptrace_unlink(struct task_struc
        child->parent = child->real_parent;
        list_del_init(&child->ptrace_entry);
  
-       arch_ptrace_untrace(child);
        if (task_is_traced(child))
                ptrace_untrace(child);
  }
@@@ -665,6 -665,10 +664,6 @@@ SYSCALL_DEFINE4(ptrace, long, request, 
        struct task_struct *child;
        long ret;
  
 -      /*
 -       * This lock_kernel fixes a subtle race with suid exec
 -       */
 -      lock_kernel();
        if (request == PTRACE_TRACEME) {
                ret = ptrace_traceme();
                if (!ret)
   out_put_task_struct:
        put_task_struct(child);
   out:
 -      unlock_kernel();
        return ret;
  }
  
@@@ -807,6 -812,10 +806,6 @@@ asmlinkage long compat_sys_ptrace(compa
        struct task_struct *child;
        long ret;
  
 -      /*
 -       * This lock_kernel fixes a subtle race with suid exec
 -       */
 -      lock_kernel();
        if (request == PTRACE_TRACEME) {
                ret = ptrace_traceme();
                goto out;
   out_put_task_struct:
        put_task_struct(child);
   out:
 -      unlock_kernel();
        return ret;
  }
  #endif        /* CONFIG_COMPAT */
diff --combined kernel/sched.c
index d8a213ccdc3b21f52669a8da7030071c38976e8d,b11b80a3eed36335c5a9ae24ffd8e9e2540114b8..5cd607ec8405f6869ef1a4e153b97d790fa3d0e1
@@@ -2087,49 -2087,6 +2087,6 @@@ migrate_task(struct task_struct *p, in
        return 1;
  }
  
- /*
-  * wait_task_context_switch - wait for a thread to complete at least one
-  *                            context switch.
-  *
-  * @p must not be current.
-  */
- void wait_task_context_switch(struct task_struct *p)
- {
-       unsigned long nvcsw, nivcsw, flags;
-       int running;
-       struct rq *rq;
-       nvcsw   = p->nvcsw;
-       nivcsw  = p->nivcsw;
-       for (;;) {
-               /*
-                * The runqueue is assigned before the actual context
-                * switch. We need to take the runqueue lock.
-                *
-                * We could check initially without the lock but it is
-                * very likely that we need to take the lock in every
-                * iteration.
-                */
-               rq = task_rq_lock(p, &flags);
-               running = task_running(rq, p);
-               task_rq_unlock(rq, &flags);
-               if (likely(!running))
-                       break;
-               /*
-                * The switch count is incremented before the actual
-                * context switch. We thus wait for two switches to be
-                * sure at least one completed.
-                */
-               if ((p->nvcsw - nvcsw) > 1)
-                       break;
-               if ((p->nivcsw - nivcsw) > 1)
-                       break;
-               cpu_relax();
-       }
- }
  /*
   * wait_task_inactive - wait for a thread to unschedule.
   *
@@@ -3706,7 -3663,7 +3663,7 @@@ need_resched
        preempt_disable();
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
 -      rcu_sched_qs(cpu);
 +      rcu_note_context_switch(cpu);
        prev = rq->curr;
        switch_count = &prev->nivcsw;
  
index 9398034f814aa964652acfc6b92f56ee2224144d,1cc9858258b33468627f0c9c787d8401480a152e..71fa771ee4d77f12347a2cbaffa6b085aefb0659
@@@ -17,7 -17,6 +17,6 @@@ static inline int trace_valid_entry(str
        case TRACE_BRANCH:
        case TRACE_GRAPH_ENT:
        case TRACE_GRAPH_RET:
-       case TRACE_HW_BRANCHES:
        case TRACE_KSYM:
                return 1;
        }
@@@ -30,7 -29,7 +29,7 @@@ static int trace_test_buffer_cpu(struc
        struct trace_entry *entry;
        unsigned int loops = 0;
  
 -      while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
 +      while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
                entry = ring_buffer_event_data(event);
  
                /*
@@@ -755,62 -754,6 +754,6 @@@ trace_selftest_startup_branch(struct tr
  }
  #endif /* CONFIG_BRANCH_TRACER */
  
- #ifdef CONFIG_HW_BRANCH_TRACER
- int
- trace_selftest_startup_hw_branches(struct tracer *trace,
-                                  struct trace_array *tr)
- {
-       struct trace_iterator *iter;
-       struct tracer tracer;
-       unsigned long count;
-       int ret;
-       if (!trace->open) {
-               printk(KERN_CONT "missing open function...");
-               return -1;
-       }
-       ret = tracer_init(trace, tr);
-       if (ret) {
-               warn_failed_init_tracer(trace, ret);
-               return ret;
-       }
-       /*
-        * The hw-branch tracer needs to collect the trace from the various
-        * cpu trace buffers - before tracing is stopped.
-        */
-       iter = kzalloc(sizeof(*iter), GFP_KERNEL);
-       if (!iter)
-               return -ENOMEM;
-       memcpy(&tracer, trace, sizeof(tracer));
-       iter->trace = &tracer;
-       iter->tr = tr;
-       iter->pos = -1;
-       mutex_init(&iter->mutex);
-       trace->open(iter);
-       mutex_destroy(&iter->mutex);
-       kfree(iter);
-       tracing_stop();
-       ret = trace_test_buffer(tr, &count);
-       trace->reset(tr);
-       tracing_start();
-       if (!ret && !count) {
-               printk(KERN_CONT "no entries found..");
-               ret = -1;
-       }
-       return ret;
- }
- #endif /* CONFIG_HW_BRANCH_TRACER */
  #ifdef CONFIG_KSYM_TRACER
  static int ksym_selftest_dummy;
  
index 613c9cc9057020675667756580623588fbc4dc43,069f261b225c3d58f3426257a665cd07a52ea495..73a02223c62922125b0dddb97e8a26099d8fdcd8
@@@ -37,10 -37,12 +37,12 @@@ int header_page_ts_offset
  int header_page_ts_size;
  int header_page_size_offset;
  int header_page_size_size;
+ int header_page_overwrite_offset;
+ int header_page_overwrite_size;
  int header_page_data_offset;
  int header_page_data_size;
  
int latency_format;
bool latency_format;
  
  static char *input_buf;
  static unsigned long long input_buf_ptr;
@@@ -628,23 -630,32 +630,32 @@@ static int test_type(enum event_type ty
        return 0;
  }
  
- static int test_type_token(enum event_type type, char *token,
-                   enum event_type expect, const char *expect_tok)
+ static int __test_type_token(enum event_type type, char *token,
+                            enum event_type expect, const char *expect_tok,
+                            bool warn)
  {
        if (type != expect) {
-               warning("Error: expected type %d but read %d",
-                   expect, type);
+               if (warn)
+                       warning("Error: expected type %d but read %d",
+                               expect, type);
                return -1;
        }
  
        if (strcmp(token, expect_tok) != 0) {
-               warning("Error: expected '%s' but read '%s'",
-                   expect_tok, token);
+               if (warn)
+                       warning("Error: expected '%s' but read '%s'",
+                               expect_tok, token);
                return -1;
        }
        return 0;
  }
  
+ static int test_type_token(enum event_type type, char *token,
+                          enum event_type expect, const char *expect_tok)
+ {
+       return __test_type_token(type, token, expect, expect_tok, true);
+ }
  static int __read_expect_type(enum event_type expect, char **tok, int newline_ok)
  {
        enum event_type type;
@@@ -661,7 -672,8 +672,8 @@@ static int read_expect_type(enum event_
        return __read_expect_type(expect, tok, 1);
  }
  
- static int __read_expected(enum event_type expect, const char *str, int newline_ok)
+ static int __read_expected(enum event_type expect, const char *str,
+                          int newline_ok, bool warn)
  {
        enum event_type type;
        char *token;
        else
                type = read_token_item(&token);
  
-       ret = test_type_token(type, token, expect, str);
+       ret = __test_type_token(type, token, expect, str, warn);
  
        free_token(token);
  
  
  static int read_expected(enum event_type expect, const char *str)
  {
-       return __read_expected(expect, str, 1);
+       return __read_expected(expect, str, 1, true);
  }
  
  static int read_expected_item(enum event_type expect, const char *str)
  {
-       return __read_expected(expect, str, 0);
+       return __read_expected(expect, str, 0, true);
  }
  
  static char *event_read_name(void)
@@@ -744,7 -756,7 +756,7 @@@ static int field_is_string(struct forma
  
  static int field_is_dynamic(struct format_field *field)
  {
-       if (!strcmp(field->type, "__data_loc"))
+       if (!strncmp(field->type, "__data_loc", 10))
                return 1;
  
        return 0;
@@@ -1925,7 -1937,7 +1937,7 @@@ void *raw_field_ptr(struct event *event
        if (!field)
                return NULL;
  
 -      if (field->flags & FIELD_IS_STRING) {
 +      if (field->flags & FIELD_IS_DYNAMIC) {
                int offset;
  
                offset = *(int *)(data + field->offset);
@@@ -3087,88 -3099,6 +3099,6 @@@ static void print_args(struct print_ar
        }
  }
  
- static void parse_header_field(const char *field,
-                              int *offset, int *size)
- {
-       char *token;
-       int type;
-       if (read_expected(EVENT_ITEM, "field") < 0)
-               return;
-       if (read_expected(EVENT_OP, ":") < 0)
-               return;
-       /* type */
-       if (read_expect_type(EVENT_ITEM, &token) < 0)
-               goto fail;
-       free_token(token);
-       if (read_expected(EVENT_ITEM, field) < 0)
-               return;
-       if (read_expected(EVENT_OP, ";") < 0)
-               return;
-       if (read_expected(EVENT_ITEM, "offset") < 0)
-               return;
-       if (read_expected(EVENT_OP, ":") < 0)
-               return;
-       if (read_expect_type(EVENT_ITEM, &token) < 0)
-               goto fail;
-       *offset = atoi(token);
-       free_token(token);
-       if (read_expected(EVENT_OP, ";") < 0)
-               return;
-       if (read_expected(EVENT_ITEM, "size") < 0)
-               return;
-       if (read_expected(EVENT_OP, ":") < 0)
-               return;
-       if (read_expect_type(EVENT_ITEM, &token) < 0)
-               goto fail;
-       *size = atoi(token);
-       free_token(token);
-       if (read_expected(EVENT_OP, ";") < 0)
-               return;
-       type = read_token(&token);
-       if (type != EVENT_NEWLINE) {
-               /* newer versions of the kernel have a "signed" type */
-               if (type != EVENT_ITEM)
-                       goto fail;
-               if (strcmp(token, "signed") != 0)
-                       goto fail;
-               free_token(token);
-               if (read_expected(EVENT_OP, ":") < 0)
-                       return;
-               if (read_expect_type(EVENT_ITEM, &token))
-                       goto fail;
-               free_token(token);
-               if (read_expected(EVENT_OP, ";") < 0)
-                       return;
-               if (read_expect_type(EVENT_NEWLINE, &token))
-                       goto fail;
-       }
-  fail:
-       free_token(token);
- }
- int parse_header_page(char *buf, unsigned long size)
- {
-       init_input_buf(buf, size);
-       parse_header_field("timestamp", &header_page_ts_offset,
-                          &header_page_ts_size);
-       parse_header_field("commit", &header_page_size_offset,
-                          &header_page_size_size);
-       parse_header_field("data", &header_page_data_offset,
-                          &header_page_data_size);
-       return 0;
- }
  int parse_ftrace_file(char *buf, unsigned long size)
  {
        struct format_field *field;
This page took 0.192751 seconds and 4 git commands to generate.