]> Git Repo - linux.git/commitdiff
Merge tag 'trace-v4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
authorLinus Torvalds <[email protected]>
Thu, 15 Dec 2016 21:49:34 +0000 (13:49 -0800)
committerLinus Torvalds <[email protected]>
Thu, 15 Dec 2016 21:49:34 +0000 (13:49 -0800)
Pull tracing updates from Steven Rostedt:
 "This release has a few updates:

   - STM can hook into the function tracer
   - Function filtering now supports more advance glob matching
   - Ftrace selftests updates and added tests
   - Softirq tag in traces now show only softirqs
   - ARM nop added to non traced locations at compile time
   - New trace_marker_raw file that allows for binary input
   - Optimizations to the ring buffer
   - Removal of kmap in trace_marker
   - Wakeup and irqsoff tracers now adhere to the set_graph_notrace file
   - Other various fixes and clean ups"

* tag 'trace-v4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (42 commits)
  selftests: ftrace: Shift down default message verbosity
  kprobes/trace: Fix kprobe selftest for newer gcc
  tracing/kprobes: Add a helper method to return number of probe hits
  tracing/rb: Init the CPU mask on allocation
  tracing: Use SOFTIRQ_OFFSET for softirq dectection for more accurate results
  tracing/fgraph: Have wakeup and irqsoff tracers ignore graph functions too
  fgraph: Handle a case where a tracer ignores set_graph_notrace
  tracing: Replace kmap with copy_from_user() in trace_marker writing
  ftrace/x86_32: Set ftrace_stub to weak to prevent gcc from using short jumps to it
  tracing: Allow benchmark to be enabled at early_initcall()
  tracing: Have system enable return error if one of the events fail
  tracing: Do not start benchmark on boot up
  tracing: Have the reg function allow to fail
  ring-buffer: Force rb_end_commit() and rb_set_commit_to_write() inline
  ring-buffer: Froce rb_update_write_stamp() to be inlined
  ring-buffer: Force inline of hotpath helper functions
  tracing: Make __buffer_unlock_commit() always_inline
  tracing: Make tracepoint_printk a static_key
  ring-buffer: Always inline rb_event_data()
  ring-buffer: Make rb_reserve_next_event() always inlined
  ...

1  2 
Documentation/trace/ftrace.txt
arch/powerpc/platforms/pseries/lpar.c
arch/x86/entry/entry_32.S
drivers/hwtracing/coresight/coresight-stm.c
drivers/hwtracing/stm/core.c
drivers/i2c/i2c-core.c
include/linux/ftrace.h
kernel/sysctl.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c

index 5596e2d71d6d836677fd98905ce0f7272b9c78c0,6c374c5fe400b57554189804b98cad917ab749e4..006f47c7d9135530d473ac80df5f83008ee408b0
@@@ -362,26 -362,6 +362,26 @@@ of ftrace. Here is a list of some of th
                  to correlate events across hypervisor/guest if
                  tb_offset is known.
  
 +        mono: This uses the fast monotonic clock (CLOCK_MONOTONIC)
 +              which is monotonic and is subject to NTP rate adjustments.
 +
 +        mono_raw:
 +              This is the raw monotonic clock (CLOCK_MONOTONIC_RAW)
 +              which is montonic but is not subject to any rate adjustments
 +              and ticks at the same rate as the hardware clocksource.
 +
 +        boot: This is the boot clock (CLOCK_BOOTTIME) and is based on the
 +              fast monotonic clock, but also accounts for time spent in
 +              suspend. Since the clock access is designed for use in
 +              tracing in the suspend path, some side effects are possible
 +              if clock is accessed after the suspend time is accounted before
 +              the fast mono clock is updated. In this case, the clock update
 +              appears to happen slightly sooner than it normally would have.
 +              Also on 32-bit systems, it's possible that the 64-bit boot offset
 +              sees a partial update. These effects are rare and post
 +              processing should be able to handle them. See comments in the
 +              ktime_get_boot_fast_ns() function for more information.
 +
        To set a clock, simply echo the clock name into this file.
  
          echo global > trace_clock
  
                trace_fd = open("trace_marker", WR_ONLY);
  
+   trace_marker_raw:
+       This is similar to trace_marker above, but is meant for for binary data
+       to be written to it, where a tool can be used to parse the data
+       from trace_pipe_raw.
    uprobe_events:
   
        Add dynamic tracepoints in programs.
@@@ -2238,16 -2224,13 +2244,13 @@@ hrtimer_interrup
  sys_nanosleep
  
  
- Perhaps this is not enough. The filters also allow simple wild
- cards. Only the following are currently available
+ Perhaps this is not enough. The filters also allow glob(7) matching.
  
    <match>*  - will match functions that begin with <match>
    *<match>  - will match functions that end with <match>
    *<match>* - will match functions that have <match> in it
- These are the only wild cards which are supported.
-   <match>*<match> will not work.
+   <match1>*<match2> - will match functions that begin with
+                       <match1> and end with <match2>
  
  Note: It is better to use quotes to enclose the wild cards,
        otherwise the shell may expand the parameters into names
index f2c98f6c1c9c93afec8a674f0e27c6a5e499767e,c0423ce3955c8a24b8ff9de646fcf1965e3365b0..a78da511ffeb588e632f575c3fe8673aaa9e5b42
@@@ -145,7 -145,7 +145,7 @@@ static long pSeries_lpar_hpte_insert(un
                         hpte_group, vpn,  pa, rflags, vflags, psize);
  
        hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
 -      hpte_r = hpte_encode_r(pa, psize, apsize, ssize) | rflags;
 +      hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
  
        if (!(vflags & HPTE_V_BOLTED))
                pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
@@@ -661,9 -661,10 +661,10 @@@ EXPORT_SYMBOL(arch_free_page)
  #ifdef HAVE_JUMP_LABEL
  struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
  
void hcall_tracepoint_regfunc(void)
int hcall_tracepoint_regfunc(void)
  {
        static_key_slow_inc(&hcall_tracepoint_key);
+       return 0;
  }
  
  void hcall_tracepoint_unregfunc(void)
  /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
  extern long hcall_tracepoint_refcount;
  
void hcall_tracepoint_regfunc(void)
int hcall_tracepoint_regfunc(void)
  {
        hcall_tracepoint_refcount++;
+       return 0;
  }
  
  void hcall_tracepoint_unregfunc(void)
index acc0c6f36f3f4c3a66f9ea81b92a409f680f1687,edba8606b99a0daa86853e6869553c6854b0efe3..701d29f8e4d300ac01553e5d8317a5e40dbdcc6d
@@@ -45,7 -45,6 +45,7 @@@
  #include <asm/asm.h>
  #include <asm/smap.h>
  #include <asm/export.h>
 +#include <asm/frame.h>
  
        .section .entry.text, "ax"
  
        SET_KERNEL_GS %edx
  .endm
  
 +/*
 + * This is a sneaky trick to help the unwinder find pt_regs on the stack.  The
 + * frame pointer is replaced with an encoded pointer to pt_regs.  The encoding
 + * is just setting the LSB, which makes it an invalid stack address and is also
 + * a signal to the unwinder that it's a pt_regs pointer in disguise.
 + *
 + * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
 + * original rbp.
 + */
 +.macro ENCODE_FRAME_POINTER
 +#ifdef CONFIG_FRAME_POINTER
 +      mov %esp, %ebp
 +      orl $0x1, %ebp
 +#endif
 +.endm
 +
  .macro RESTORE_INT_REGS
        popl    %ebx
        popl    %ecx
@@@ -254,23 -237,6 +254,23 @@@ ENTRY(__switch_to_asm
        jmp     __switch_to
  END(__switch_to_asm)
  
 +/*
 + * The unwinder expects the last frame on the stack to always be at the same
 + * offset from the end of the page, which allows it to validate the stack.
 + * Calling schedule_tail() directly would break that convention because its an
 + * asmlinkage function so its argument has to be pushed on the stack.  This
 + * wrapper creates a proper "end of stack" frame header before the call.
 + */
 +ENTRY(schedule_tail_wrapper)
 +      FRAME_BEGIN
 +
 +      pushl   %eax
 +      call    schedule_tail
 +      popl    %eax
 +
 +      FRAME_END
 +      ret
 +ENDPROC(schedule_tail_wrapper)
  /*
   * A newly forked process directly context switches into this address.
   *
   * edi: kernel thread arg
   */
  ENTRY(ret_from_fork)
 -      pushl   %eax
 -      call    schedule_tail
 -      popl    %eax
 +      call    schedule_tail_wrapper
  
        testl   %ebx, %ebx
        jnz     1f              /* kernel threads are uncommon */
@@@ -339,13 -307,13 +339,13 @@@ END(ret_from_exception
  #ifdef CONFIG_PREEMPT
  ENTRY(resume_kernel)
        DISABLE_INTERRUPTS(CLBR_ANY)
 -need_resched:
 +.Lneed_resched:
        cmpl    $0, PER_CPU_VAR(__preempt_count)
        jnz     restore_all
        testl   $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
        jz      restore_all
        call    preempt_schedule_irq
 -      jmp     need_resched
 +      jmp     .Lneed_resched
  END(resume_kernel)
  #endif
  
@@@ -366,7 -334,7 +366,7 @@@ GLOBAL(__begin_SYSENTER_singlestep_regi
   */
  ENTRY(xen_sysenter_target)
        addl    $5*4, %esp                      /* remove xen-provided frame */
 -      jmp     sysenter_past_esp
 +      jmp     .Lsysenter_past_esp
  #endif
  
  /*
   */
  ENTRY(entry_SYSENTER_32)
        movl    TSS_sysenter_sp0(%esp), %esp
 -sysenter_past_esp:
 +.Lsysenter_past_esp:
        pushl   $__USER_DS              /* pt_regs->ss */
        pushl   %ebp                    /* pt_regs->sp (stashed in bp) */
        pushfl                          /* pt_regs->flags (except IF = 0) */
@@@ -536,9 -504,9 +536,9 @@@ ENTRY(entry_INT80_32
  
  restore_all:
        TRACE_IRQS_IRET
 -restore_all_notrace:
 +.Lrestore_all_notrace:
  #ifdef CONFIG_X86_ESPFIX32
 -      ALTERNATIVE     "jmp restore_nocheck", "", X86_BUG_ESPFIX
 +      ALTERNATIVE     "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
  
        movl    PT_EFLAGS(%esp), %eax           # mix EFLAGS, SS and CS
        /*
        movb    PT_CS(%esp), %al
        andl    $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
        cmpl    $((SEGMENT_LDT << 8) | USER_RPL), %eax
 -      je ldt_ss                               # returning to user-space with LDT SS
 +      je .Lldt_ss                             # returning to user-space with LDT SS
  #endif
 -restore_nocheck:
 +.Lrestore_nocheck:
        RESTORE_REGS 4                          # skip orig_eax/error_code
 -irq_return:
 +.Lirq_return:
        INTERRUPT_RETURN
 +
  .section .fixup, "ax"
  ENTRY(iret_exc        )
        pushl   $0                              # no error code
        pushl   $do_iret_error
 -      jmp     error_code
 +      jmp     common_exception
  .previous
 -      _ASM_EXTABLE(irq_return, iret_exc)
 +      _ASM_EXTABLE(.Lirq_return, iret_exc)
  
  #ifdef CONFIG_X86_ESPFIX32
 -ldt_ss:
 +.Lldt_ss:
  /*
   * Setup and switch to ESPFIX stack
   *
         */
        DISABLE_INTERRUPTS(CLBR_EAX)
        lss     (%esp), %esp                    /* switch to espfix segment */
 -      jmp     restore_nocheck
 +      jmp     .Lrestore_nocheck
  #endif
  ENDPROC(entry_INT80_32)
  
@@@ -657,7 -624,6 +657,7 @@@ common_interrupt
        ASM_CLAC
        addl    $-0x80, (%esp)                  /* Adjust vector into the [-256, -1] range */
        SAVE_ALL
 +      ENCODE_FRAME_POINTER
        TRACE_IRQS_OFF
        movl    %esp, %eax
        call    do_IRQ
@@@ -669,7 -635,6 +669,7 @@@ ENTRY(name)                                
        ASM_CLAC;                       \
        pushl   $~(nr);                 \
        SAVE_ALL;                       \
 +      ENCODE_FRAME_POINTER;           \
        TRACE_IRQS_OFF                  \
        movl    %esp, %eax;             \
        call    fn;                     \
@@@ -694,7 -659,7 +694,7 @@@ ENTRY(coprocessor_error
        ASM_CLAC
        pushl   $0
        pushl   $do_coprocessor_error
 -      jmp     error_code
 +      jmp     common_exception
  END(coprocessor_error)
  
  ENTRY(simd_coprocessor_error)
  #else
        pushl   $do_simd_coprocessor_error
  #endif
 -      jmp     error_code
 +      jmp     common_exception
  END(simd_coprocessor_error)
  
  ENTRY(device_not_available)
        ASM_CLAC
        pushl   $-1                             # mark this as an int
        pushl   $do_device_not_available
 -      jmp     error_code
 +      jmp     common_exception
  END(device_not_available)
  
  #ifdef CONFIG_PARAVIRT
@@@ -729,59 -694,59 +729,59 @@@ ENTRY(overflow
        ASM_CLAC
        pushl   $0
        pushl   $do_overflow
 -      jmp     error_code
 +      jmp     common_exception
  END(overflow)
  
  ENTRY(bounds)
        ASM_CLAC
        pushl   $0
        pushl   $do_bounds
 -      jmp     error_code
 +      jmp     common_exception
  END(bounds)
  
  ENTRY(invalid_op)
        ASM_CLAC
        pushl   $0
        pushl   $do_invalid_op
 -      jmp     error_code
 +      jmp     common_exception
  END(invalid_op)
  
  ENTRY(coprocessor_segment_overrun)
        ASM_CLAC
        pushl   $0
        pushl   $do_coprocessor_segment_overrun
 -      jmp     error_code
 +      jmp     common_exception
  END(coprocessor_segment_overrun)
  
  ENTRY(invalid_TSS)
        ASM_CLAC
        pushl   $do_invalid_TSS
 -      jmp     error_code
 +      jmp     common_exception
  END(invalid_TSS)
  
  ENTRY(segment_not_present)
        ASM_CLAC
        pushl   $do_segment_not_present
 -      jmp     error_code
 +      jmp     common_exception
  END(segment_not_present)
  
  ENTRY(stack_segment)
        ASM_CLAC
        pushl   $do_stack_segment
 -      jmp     error_code
 +      jmp     common_exception
  END(stack_segment)
  
  ENTRY(alignment_check)
        ASM_CLAC
        pushl   $do_alignment_check
 -      jmp     error_code
 +      jmp     common_exception
  END(alignment_check)
  
  ENTRY(divide_error)
        ASM_CLAC
        pushl   $0                              # no error code
        pushl   $do_divide_error
 -      jmp     error_code
 +      jmp     common_exception
  END(divide_error)
  
  #ifdef CONFIG_X86_MCE
@@@ -789,7 -754,7 +789,7 @@@ ENTRY(machine_check
        ASM_CLAC
        pushl   $0
        pushl   machine_check_vector
 -      jmp     error_code
 +      jmp     common_exception
  END(machine_check)
  #endif
  
@@@ -797,14 -762,13 +797,14 @@@ ENTRY(spurious_interrupt_bug
        ASM_CLAC
        pushl   $0
        pushl   $do_spurious_interrupt_bug
 -      jmp     error_code
 +      jmp     common_exception
  END(spurious_interrupt_bug)
  
  #ifdef CONFIG_XEN
  ENTRY(xen_hypervisor_callback)
        pushl   $-1                             /* orig_ax = -1 => not a system call */
        SAVE_ALL
 +      ENCODE_FRAME_POINTER
        TRACE_IRQS_OFF
  
        /*
@@@ -859,7 -823,6 +859,7 @@@ ENTRY(xen_failsafe_callback
        jmp     iret_exc
  5:    pushl   $-1                             /* orig_ax = -1 => not a system call */
        SAVE_ALL
 +      ENCODE_FRAME_POINTER
        jmp     ret_from_exception
  
  .section .fixup, "ax"
@@@ -919,15 -882,15 +919,15 @@@ ftrace_call
        popl    %edx
        popl    %ecx
        popl    %eax
 -ftrace_ret:
 +.Lftrace_ret:
  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  .globl ftrace_graph_call
  ftrace_graph_call:
        jmp     ftrace_stub
  #endif
  
- .globl ftrace_stub
- ftrace_stub:
+ /* This is weak to keep gas from relaxing the jumps */
+ WEAK(ftrace_stub)
        ret
  END(ftrace_caller)
  
@@@ -989,7 -952,7 +989,7 @@@ GLOBAL(ftrace_regs_call
        popl    %gs
        addl    $8, %esp                        /* Skip orig_ax and ip */
        popf                                    /* Pop flags at end (no addl to corrupt flags) */
 -      jmp     ftrace_ret
 +      jmp     .Lftrace_ret
  
        popf
        jmp     ftrace_stub
@@@ -1000,7 -963,7 +1000,7 @@@ ENTRY(mcount
        jb      ftrace_stub                     /* Paging not enabled yet? */
  
        cmpl    $ftrace_stub, ftrace_trace_function
 -      jnz     trace
 +      jnz     .Ltrace
  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        cmpl    $ftrace_stub, ftrace_graph_return
        jnz     ftrace_graph_caller
@@@ -1013,7 -976,7 +1013,7 @@@ ftrace_stub
        ret
  
        /* taken from glibc */
 -trace:
 +.Ltrace:
        pushl   %eax
        pushl   %ecx
        pushl   %edx
@@@ -1064,7 -1027,7 +1064,7 @@@ return_to_handler
  ENTRY(trace_page_fault)
        ASM_CLAC
        pushl   $trace_do_page_fault
 -      jmp     error_code
 +      jmp     common_exception
  END(trace_page_fault)
  #endif
  
@@@ -1072,10 -1035,7 +1072,10 @@@ ENTRY(page_fault
        ASM_CLAC
        pushl   $do_page_fault
        ALIGN
 -error_code:
 +      jmp common_exception
 +END(page_fault)
 +
 +common_exception:
        /* the function address is in %gs's slot on the stack */
        pushl   %fs
        pushl   %es
        pushl   %edx
        pushl   %ecx
        pushl   %ebx
 +      ENCODE_FRAME_POINTER
        cld
        movl    $(__KERNEL_PERCPU), %ecx
        movl    %ecx, %fs
        movl    %esp, %eax                      # pt_regs pointer
        call    *%edi
        jmp     ret_from_exception
 -END(page_fault)
 +END(common_exception)
  
  ENTRY(debug)
        /*
        ASM_CLAC
        pushl   $-1                             # mark this as an int
        SAVE_ALL
 +      ENCODE_FRAME_POINTER
        xorl    %edx, %edx                      # error code 0
        movl    %esp, %eax                      # pt_regs pointer
  
  
  .Ldebug_from_sysenter_stack:
        /* We're on the SYSENTER stack.  Switch off. */
 -      movl    %esp, %ebp
 +      movl    %esp, %ebx
        movl    PER_CPU_VAR(cpu_current_top_of_stack), %esp
        TRACE_IRQS_OFF
        call    do_debug
 -      movl    %ebp, %esp
 +      movl    %ebx, %esp
        jmp     ret_from_exception
  END(debug)
  
@@@ -1158,12 -1116,11 +1158,12 @@@ ENTRY(nmi
        movl    %ss, %eax
        cmpw    $__ESPFIX_SS, %ax
        popl    %eax
 -      je      nmi_espfix_stack
 +      je      .Lnmi_espfix_stack
  #endif
  
        pushl   %eax                            # pt_regs->orig_ax
        SAVE_ALL
 +      ENCODE_FRAME_POINTER
        xorl    %edx, %edx                      # zero error code
        movl    %esp, %eax                      # pt_regs pointer
  
  
        /* Not on SYSENTER stack. */
        call    do_nmi
 -      jmp     restore_all_notrace
 +      jmp     .Lrestore_all_notrace
  
  .Lnmi_from_sysenter_stack:
        /*
         * We're on the SYSENTER stack.  Switch off.  No one (not even debug)
         * is using the thread stack right now, so it's safe for us to use it.
         */
 -      movl    %esp, %ebp
 +      movl    %esp, %ebx
        movl    PER_CPU_VAR(cpu_current_top_of_stack), %esp
        call    do_nmi
 -      movl    %ebp, %esp
 -      jmp     restore_all_notrace
 +      movl    %ebx, %esp
 +      jmp     .Lrestore_all_notrace
  
  #ifdef CONFIG_X86_ESPFIX32
 -nmi_espfix_stack:
 +.Lnmi_espfix_stack:
        /*
         * create the pointer to lss back
         */
        .endr
        pushl   %eax
        SAVE_ALL
 +      ENCODE_FRAME_POINTER
        FIXUP_ESPFIX_STACK                      # %eax == %esp
        xorl    %edx, %edx                      # zero error code
        call    do_nmi
        RESTORE_REGS
        lss     12+4(%esp), %esp                # back to espfix stack
 -      jmp     irq_return
 +      jmp     .Lirq_return
  #endif
  END(nmi)
  
@@@ -1216,7 -1172,6 +1216,7 @@@ ENTRY(int3
        ASM_CLAC
        pushl   $-1                             # mark this as an int
        SAVE_ALL
 +      ENCODE_FRAME_POINTER
        TRACE_IRQS_OFF
        xorl    %edx, %edx                      # zero error code
        movl    %esp, %eax                      # pt_regs pointer
@@@ -1226,14 -1181,14 +1226,14 @@@ END(int3
  
  ENTRY(general_protection)
        pushl   $do_general_protection
 -      jmp     error_code
 +      jmp     common_exception
  END(general_protection)
  
  #ifdef CONFIG_KVM_GUEST
  ENTRY(async_page_fault)
        ASM_CLAC
        pushl   $do_async_page_fault
 -      jmp     error_code
 +      jmp     common_exception
  END(async_page_fault)
  #endif
  
index 944c17b48d2340bd65f31d64429bba71817cff40,b7543bd19c9e45773289af883bd677f6a41580a0..e4c55c5f998884f24b216861a2bc5c624b2376c8
@@@ -406,7 -406,7 +406,7 @@@ static long stm_generic_set_options(str
        return 0;
  }
  
- static ssize_t stm_generic_packet(struct stm_data *stm_data,
+ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
                                  unsigned int master,
                                  unsigned int channel,
                                  unsigned int packet,
                                                   struct stm_drvdata, stm);
  
        if (!(drvdata && local_read(&drvdata->mode)))
 -              return 0;
 +              return -EACCES;
  
        if (channel >= drvdata->numsp)
 -              return 0;
 +              return -EINVAL;
  
        ch_addr = (unsigned long)stm_channel_addr(drvdata, channel);
  
@@@ -920,11 -920,6 +920,11 @@@ static struct amba_id stm_ids[] = 
                .mask   = 0x0003ffff,
                .data   = "STM32",
        },
 +      {
 +              .id     = 0x0003b963,
 +              .mask   = 0x0003ffff,
 +              .data   = "STM500",
 +      },
        { 0, 0},
  };
  
index a6ea387b5b0038a375e806d97e4c3e8a927aa0e7,37d3bcbd253457c0a15e49258dbb7a472abbfa8f..0e731143f6a489a6c1d3decb227e462265c4329e
@@@ -361,7 -361,7 +361,7 @@@ static int stm_char_open(struct inode *
        struct stm_file *stmf;
        struct device *dev;
        unsigned int major = imajor(inode);
 -      int err = -ENODEV;
 +      int err = -ENOMEM;
  
        dev = class_find_device(&stm_class, NULL, &major, major_match);
        if (!dev)
  
        stmf = kzalloc(sizeof(*stmf), GFP_KERNEL);
        if (!stmf)
 -              return -ENOMEM;
 +              goto err_put_device;
  
 +      err = -ENODEV;
        stm_output_init(&stmf->output);
        stmf->stm = to_stm_device(dev);
  
        return nonseekable_open(inode, file);
  
  err_free:
 +      kfree(stmf);
 +err_put_device:
        /* matches class_find_device() above */
        put_device(dev);
 -      kfree(stmf);
  
        return err;
  }
@@@ -427,7 -425,7 +427,7 @@@ static int stm_file_assign(struct stm_f
        return ret;
  }
  
- static ssize_t stm_write(struct stm_data *data, unsigned int master,
+ static ssize_t notrace stm_write(struct stm_data *data, unsigned int master,
                          unsigned int channel, const char *buf, size_t count)
  {
        unsigned int flags = STP_PACKET_TIMESTAMPED;
@@@ -1123,8 -1121,9 +1123,9 @@@ void stm_source_unregister_device(struc
  }
  EXPORT_SYMBOL_GPL(stm_source_unregister_device);
  
- int stm_source_write(struct stm_source_data *data, unsigned int chan,
-                    const char *buf, size_t count)
+ int notrace stm_source_write(struct stm_source_data *data,
+                            unsigned int chan,
+                            const char *buf, size_t count)
  {
        struct stm_source_device *src = data->src;
        struct stm_device *stm;
diff --combined drivers/i2c/i2c-core.c
index 3a1bc9c4efc7e8e3552b7c559eea19233a9c199d,6a2b995d7fc446672c177cd4ddda7c1ad868503a..3de95a29024cde47e975918fafd3dd1a718b97a5
@@@ -65,9 -65,6 +65,9 @@@
  #define I2C_ADDR_OFFSET_TEN_BIT       0xa000
  #define I2C_ADDR_OFFSET_SLAVE 0x1000
  
 +#define I2C_ADDR_7BITS_MAX    0x77
 +#define I2C_ADDR_7BITS_COUNT  (I2C_ADDR_7BITS_MAX + 1)
 +
  /* core_lock protects i2c_adapter_idr, and guarantees
     that device detection, deletion of detected devices, and attach_adapter
     calls are serialized */
@@@ -80,9 -77,10 +80,10 @@@ static int i2c_detect(struct i2c_adapte
  static struct static_key i2c_trace_msg = STATIC_KEY_INIT_FALSE;
  static bool is_registered;
  
void i2c_transfer_trace_reg(void)
int i2c_transfer_trace_reg(void)
  {
        static_key_slow_inc(&i2c_trace_msg);
+       return 0;
  }
  
  void i2c_transfer_trace_unreg(void)
@@@ -679,12 -677,9 +680,12 @@@ static inline int i2c_acpi_install_spac
  
  /* ------------------------------------------------------------------------- */
  
 -static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
 +const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
                                                const struct i2c_client *client)
  {
 +      if (!(id && client))
 +              return NULL;
 +
        while (id->name[0]) {
                if (strcmp(client->name, id->name) == 0)
                        return id;
        }
        return NULL;
  }
 +EXPORT_SYMBOL_GPL(i2c_match_id);
  
  static int i2c_device_match(struct device *dev, struct device_driver *drv)
  {
        struct i2c_client       *client = i2c_verify_client(dev);
        struct i2c_driver       *driver;
  
 -      if (!client)
 -              return 0;
  
        /* Attempt an OF style match */
 -      if (of_driver_match_device(dev, drv))
 +      if (i2c_of_match_device(drv->of_match_table, client))
                return 1;
  
        /* Then ACPI style match */
                return 1;
  
        driver = to_i2c_driver(drv);
 -      /* match on an id table if there is one */
 -      if (driver->id_table)
 -              return i2c_match_id(driver->id_table, client) != NULL;
 +
 +      /* Finally an I2C match */
 +      if (i2c_match_id(driver->id_table, client))
 +              return 1;
  
        return 0;
  }
@@@ -899,25 -894,6 +900,25 @@@ static void i2c_init_recovery(struct i2
        adap->bus_recovery_info = NULL;
  }
  
 +static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client)
 +{
 +      struct i2c_adapter *adap = client->adapter;
 +      unsigned int irq;
 +
 +      if (!adap->host_notify_domain)
 +              return -ENXIO;
 +
 +      if (client->flags & I2C_CLIENT_TEN)
 +              return -EINVAL;
 +
 +      irq = irq_find_mapping(adap->host_notify_domain, client->addr);
 +      if (!irq)
 +              irq = irq_create_mapping(adap->host_notify_domain,
 +                                       client->addr);
 +
 +      return irq > 0 ? irq : -ENXIO;
 +}
 +
  static int i2c_device_probe(struct device *dev)
  {
        struct i2c_client       *client = i2c_verify_client(dev);
                }
                if (irq == -EPROBE_DEFER)
                        return irq;
 +              /*
 +               * ACPI and OF did not find any useful IRQ, try to see
 +               * if Host Notify can be used.
 +               */
 +              if (irq < 0) {
 +                      dev_dbg(dev, "Using Host Notify IRQ\n");
 +                      irq = i2c_smbus_host_notify_to_irq(client);
 +              }
                if (irq < 0)
                        irq = 0;
  
        }
  
        driver = to_i2c_driver(dev->driver);
 -      if (!driver->probe || !driver->id_table)
 +
 +      /*
 +       * An I2C ID table is not mandatory, if and only if, a suitable Device
 +       * Tree match table entry is supplied for the probing device.
 +       */
 +      if (!driver->id_table &&
 +          !i2c_of_match_device(dev->driver->of_match_table, client))
                return -ENODEV;
  
        if (client->flags & I2C_CLIENT_WAKE) {
        if (status == -EPROBE_DEFER)
                goto err_clear_wakeup_irq;
  
 -      status = driver->probe(client, i2c_match_id(driver->id_table, client));
 +      /*
 +       * When there are no more users of probe(),
 +       * rename probe_new to probe.
 +       */
 +      if (driver->probe_new)
 +              status = driver->probe_new(client);
 +      else if (driver->probe)
 +              status = driver->probe(client,
 +                                     i2c_match_id(driver->id_table, client));
 +      else
 +              status = -EINVAL;
 +
        if (status)
                goto err_detach_pm_domain;
  
@@@ -1817,52 -1768,6 +1818,52 @@@ struct i2c_adapter *of_get_i2c_adapter_
        return adapter;
  }
  EXPORT_SYMBOL(of_get_i2c_adapter_by_node);
 +
 +static const struct of_device_id*
 +i2c_of_match_device_sysfs(const struct of_device_id *matches,
 +                                struct i2c_client *client)
 +{
 +      const char *name;
 +
 +      for (; matches->compatible[0]; matches++) {
 +              /*
 +               * Adding devices through the i2c sysfs interface provides us
 +               * a string to match which may be compatible with the device
 +               * tree compatible strings, however with no actual of_node the
 +               * of_match_device() will not match
 +               */
 +              if (sysfs_streq(client->name, matches->compatible))
 +                      return matches;
 +
 +              name = strchr(matches->compatible, ',');
 +              if (!name)
 +                      name = matches->compatible;
 +              else
 +                      name++;
 +
 +              if (sysfs_streq(client->name, name))
 +                      return matches;
 +      }
 +
 +      return NULL;
 +}
 +
 +const struct of_device_id
 +*i2c_of_match_device(const struct of_device_id *matches,
 +                   struct i2c_client *client)
 +{
 +      const struct of_device_id *match;
 +
 +      if (!(client && matches))
 +              return NULL;
 +
 +      match = of_match_device(matches, &client->dev);
 +      if (match)
 +              return match;
 +
 +      return i2c_of_match_device_sysfs(matches, client);
 +}
 +EXPORT_SYMBOL_GPL(i2c_of_match_device);
  #else
  static void of_i2c_register_devices(struct i2c_adapter *adap) { }
  #endif /* CONFIG_OF */
@@@ -1896,79 -1801,6 +1897,79 @@@ static const struct i2c_lock_operation
        .unlock_bus =  i2c_adapter_unlock_bus,
  };
  
 +static void i2c_host_notify_irq_teardown(struct i2c_adapter *adap)
 +{
 +      struct irq_domain *domain = adap->host_notify_domain;
 +      irq_hw_number_t hwirq;
 +
 +      if (!domain)
 +              return;
 +
 +      for (hwirq = 0 ; hwirq < I2C_ADDR_7BITS_COUNT ; hwirq++)
 +              irq_dispose_mapping(irq_find_mapping(domain, hwirq));
 +
 +      irq_domain_remove(domain);
 +      adap->host_notify_domain = NULL;
 +}
 +
 +static int i2c_host_notify_irq_map(struct irq_domain *h,
 +                                        unsigned int virq,
 +                                        irq_hw_number_t hw_irq_num)
 +{
 +      irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
 +
 +      return 0;
 +}
 +
 +static const struct irq_domain_ops i2c_host_notify_irq_ops = {
 +      .map = i2c_host_notify_irq_map,
 +};
 +
 +static int i2c_setup_host_notify_irq_domain(struct i2c_adapter *adap)
 +{
 +      struct irq_domain *domain;
 +
 +      if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_HOST_NOTIFY))
 +              return 0;
 +
 +      domain = irq_domain_create_linear(adap->dev.fwnode,
 +                                        I2C_ADDR_7BITS_COUNT,
 +                                        &i2c_host_notify_irq_ops, adap);
 +      if (!domain)
 +              return -ENOMEM;
 +
 +      adap->host_notify_domain = domain;
 +
 +      return 0;
 +}
 +
 +/**
 + * i2c_handle_smbus_host_notify - Forward a Host Notify event to the correct
 + * I2C client.
 + * @adap: the adapter
 + * @addr: the I2C address of the notifying device
 + * Context: can't sleep
 + *
 + * Helper function to be called from an I2C bus driver's interrupt
 + * handler. It will schedule the Host Notify IRQ.
 + */
 +int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr)
 +{
 +      int irq;
 +
 +      if (!adap)
 +              return -EINVAL;
 +
 +      irq = irq_find_mapping(adap->host_notify_domain, addr);
 +      if (irq <= 0)
 +              return -ENXIO;
 +
 +      generic_handle_irq(irq);
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(i2c_handle_smbus_host_notify);
 +
  static int i2c_register_adapter(struct i2c_adapter *adap)
  {
        int res = -EINVAL;
        if (adap->timeout == 0)
                adap->timeout = HZ;
  
 +      /* register soft irqs for Host Notify */
 +      res = i2c_setup_host_notify_irq_domain(adap);
 +      if (res) {
 +              pr_err("adapter '%s': can't create Host Notify IRQs (%d)\n",
 +                     adap->name, res);
 +              goto out_list;
 +      }
 +
        dev_set_name(&adap->dev, "i2c-%d", adap->nr);
        adap->dev.bus = &i2c_bus_type;
        adap->dev.type = &i2c_adapter_type;
@@@ -2245,8 -2069,6 +2246,8 @@@ void i2c_del_adapter(struct i2c_adapte
  
        pm_runtime_disable(&adap->dev);
  
 +      i2c_host_notify_irq_teardown(adap);
 +
        /* wait until all references to the device are gone
         *
         * FIXME: This is old code and should ideally be replaced by an
diff --combined include/linux/ftrace.h
index d4a884db16a3dc5a77942e5f435d2f2290a8936a,8700049fd0e5a95ddcce0648dade31774c78eb42..3633e8beff39e3ad67409a133109350128607ddd
@@@ -398,7 -398,6 +398,7 @@@ int ftrace_set_notrace(struct ftrace_op
  void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
  void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
  void ftrace_free_filter(struct ftrace_ops *ops);
 +void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
  
  int register_ftrace_command(struct ftrace_func_command *cmd);
  int unregister_ftrace_command(struct ftrace_func_command *cmd);
@@@ -646,7 -645,6 +646,7 @@@ static inline unsigned long ftrace_loca
  #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
  #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
  #define ftrace_free_filter(ops) do { } while (0)
 +#define ftrace_ops_set_global_filter(ops) do { } while (0)
  
  static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
                            size_t cnt, loff_t *ppos) { return -ENODEV; }
@@@ -947,6 -945,10 +947,10 @@@ extern int __disable_trace_on_warning
  #define INIT_TRACE_RECURSION          .trace_recursion = 0,
  #endif
  
+ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
+                            void __user *buffer, size_t *lenp,
+                            loff_t *ppos);
  #else /* CONFIG_TRACING */
  static inline void  disable_trace_on_warning(void) { }
  #endif /* CONFIG_TRACING */
diff --combined kernel/sysctl.c
index 1475d2545b7e8ba6ef789ad6276ab5191b1b741f,6ccc60dfbc7ab32e071830a0ecddabf1b57f2854..1a292ebcbbb6aade7d2ac340cd51c92fb282ccac
@@@ -347,6 -347,13 +347,6 @@@ static struct ctl_table kern_table[] = 
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
 -      {
 -              .procname       = "sched_shares_window_ns",
 -              .data           = &sysctl_sched_shares_window,
 -              .maxlen         = sizeof(unsigned int),
 -              .mode           = 0644,
 -              .proc_handler   = proc_dointvec,
 -      },
  #ifdef CONFIG_SCHEDSTATS
        {
                .procname       = "sched_schedstats",
                .data           = &tracepoint_printk,
                .maxlen         = sizeof(tracepoint_printk),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = tracepoint_printk_sysctl,
        },
  #endif
  #ifdef CONFIG_KEXEC_CORE
                .mode           = 0444,
                .proc_handler   = proc_dointvec,
        },
 -      {
 -              .procname       = "kstack_depth_to_print",
 -              .data           = &kstack_depth_to_print,
 -              .maxlen         = sizeof(int),
 -              .mode           = 0644,
 -              .proc_handler   = proc_dointvec,
 -      },
        {
                .procname       = "io_delay_type",
                .data           = &io_delay_type,
@@@ -2389,11 -2403,9 +2389,11 @@@ static void validate_coredump_safety(vo
  #ifdef CONFIG_COREDUMP
        if (suid_dumpable == SUID_DUMP_ROOT &&
            core_pattern[0] != '/' && core_pattern[0] != '|') {
 -              printk(KERN_WARNING "Unsafe core_pattern used with "\
 -                      "suid_dumpable=2. Pipe handler or fully qualified "\
 -                      "core dump path required.\n");
 +              printk(KERN_WARNING
 +"Unsafe core_pattern used with fs.suid_dumpable=2.\n"
 +"Pipe handler or fully qualified core dump path required.\n"
 +"Set kernel.core_pattern before fs.suid_dumpable.\n"
 +              );
        }
  #endif
  }
diff --combined kernel/trace/ftrace.c
index 33dd57f53f88d33c4e55b9c852c7a8b28b198ce1,356bb70d071e0001d2690c1b191dc8b282dd35d0..1f0f547c54da2fee935a89316e6e2e74df7e6f34
@@@ -3511,6 -3511,10 +3511,10 @@@ static int ftrace_match(char *str, stru
                    memcmp(str + slen - g->len, g->search, g->len) == 0)
                        matched = 1;
                break;
+       case MATCH_GLOB:
+               if (glob_match(g->search, str))
+                       matched = 1;
+               break;
        }
  
        return matched;
@@@ -4258,23 -4262,6 +4262,23 @@@ int ftrace_set_filter_ip(struct ftrace_
  }
  EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
  
 +/**
 + * ftrace_ops_set_global_filter - setup ops to use global filters
 + * @ops - the ops which will use the global filters
 + *
 + * ftrace users who need global function trace filtering should call this.
 + * It can set the global filter only if ops were not initialized before.
 + */
 +void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
 +{
 +      if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
 +              return;
 +
 +      ftrace_ops_init(ops);
 +      ops->func_hash = &global_ops.local_hash;
 +}
 +EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
 +
  static int
  ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
                 int reset, int enable)
index 89a2611a16357b87bd4fb3ea35c63f1d9c05744b,7edfd41d506c0e84245ada3e284bdc00d25e701e..a85739efcc304b30b3f2e08fc51aff5e84200628
@@@ -245,7 -245,7 +245,7 @@@ unsigned ring_buffer_event_length(struc
  EXPORT_SYMBOL_GPL(ring_buffer_event_length);
  
  /* inline for ring buffer fast paths */
- static void *
+ static __always_inline void *
  rb_event_data(struct ring_buffer_event *event)
  {
        if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
@@@ -479,7 -479,9 +479,7 @@@ struct ring_buffer 
  
        struct ring_buffer_per_cpu      **buffers;
  
 -#ifdef CONFIG_HOTPLUG_CPU
 -      struct notifier_block           cpu_notify;
 -#endif
 +      struct hlist_node               node;
        u64                             (*clock)(void);
  
        struct rb_irq_work              irq_work;
@@@ -1272,6 -1274,11 +1272,6 @@@ static void rb_free_cpu_buffer(struct r
        kfree(cpu_buffer);
  }
  
 -#ifdef CONFIG_HOTPLUG_CPU
 -static int rb_cpu_notify(struct notifier_block *self,
 -                       unsigned long action, void *hcpu);
 -#endif
 -
  /**
   * __ring_buffer_alloc - allocate a new ring_buffer
   * @size: the size in bytes per cpu that is needed.
@@@ -1289,7 -1296,6 +1289,7 @@@ struct ring_buffer *__ring_buffer_alloc
        long nr_pages;
        int bsize;
        int cpu;
 +      int ret;
  
        /* keep it in its own cache line */
        buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
        if (nr_pages < 2)
                nr_pages = 2;
  
 -      /*
 -       * In case of non-hotplug cpu, if the ring-buffer is allocated
 -       * in early initcall, it will not be notified of secondary cpus.
 -       * In that off case, we need to allocate for all possible cpus.
 -       */
 -#ifdef CONFIG_HOTPLUG_CPU
 -      cpu_notifier_register_begin();
 -      cpumask_copy(buffer->cpumask, cpu_online_mask);
 -#else
 -      cpumask_copy(buffer->cpumask, cpu_possible_mask);
 -#endif
        buffer->cpus = nr_cpu_ids;
  
        bsize = sizeof(void *) * nr_cpu_ids;
        if (!buffer->buffers)
                goto fail_free_cpumask;
  
 -      for_each_buffer_cpu(buffer, cpu) {
 -              buffer->buffers[cpu] =
 -                      rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
 -              if (!buffer->buffers[cpu])
 -                      goto fail_free_buffers;
 -      }
 +      cpu = raw_smp_processor_id();
 +      cpumask_set_cpu(cpu, buffer->cpumask);
 +      buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
 +      if (!buffer->buffers[cpu])
 +              goto fail_free_buffers;
  
 -#ifdef CONFIG_HOTPLUG_CPU
 -      buffer->cpu_notify.notifier_call = rb_cpu_notify;
 -      buffer->cpu_notify.priority = 0;
 -      __register_cpu_notifier(&buffer->cpu_notify);
 -      cpu_notifier_register_done();
 -#endif
 +      ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
 +      if (ret < 0)
 +              goto fail_free_buffers;
  
        mutex_init(&buffer->mutex);
  
  
   fail_free_cpumask:
        free_cpumask_var(buffer->cpumask);
 -#ifdef CONFIG_HOTPLUG_CPU
 -      cpu_notifier_register_done();
 -#endif
  
   fail_free_buffer:
        kfree(buffer);
@@@ -1359,11 -1383,18 +1359,11 @@@ ring_buffer_free(struct ring_buffer *bu
  {
        int cpu;
  
 -#ifdef CONFIG_HOTPLUG_CPU
 -      cpu_notifier_register_begin();
 -      __unregister_cpu_notifier(&buffer->cpu_notify);
 -#endif
 +      cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
  
        for_each_buffer_cpu(buffer, cpu)
                rb_free_cpu_buffer(buffer->buffers[cpu]);
  
 -#ifdef CONFIG_HOTPLUG_CPU
 -      cpu_notifier_register_done();
 -#endif
 -
        kfree(buffer->buffers);
        free_cpumask_var(buffer->cpumask);
  
@@@ -1798,48 -1829,48 +1798,48 @@@ void ring_buffer_change_overwrite(struc
  }
  EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
  
- static inline void *
+ static __always_inline void *
  __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
  {
        return bpage->data + index;
  }
  
- static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
+ static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
  {
        return bpage->page->data + index;
  }
  
- static inline struct ring_buffer_event *
+ static __always_inline struct ring_buffer_event *
  rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
  {
        return __rb_page_index(cpu_buffer->reader_page,
                               cpu_buffer->reader_page->read);
  }
  
- static inline struct ring_buffer_event *
+ static __always_inline struct ring_buffer_event *
  rb_iter_head_event(struct ring_buffer_iter *iter)
  {
        return __rb_page_index(iter->head_page, iter->head);
  }
  
- static inline unsigned rb_page_commit(struct buffer_page *bpage)
+ static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
  {
        return local_read(&bpage->page->commit);
  }
  
  /* Size is determined by what has been committed */
- static inline unsigned rb_page_size(struct buffer_page *bpage)
+ static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
  {
        return rb_page_commit(bpage);
  }
  
- static inline unsigned
+ static __always_inline unsigned
  rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
  {
        return rb_page_commit(cpu_buffer->commit_page);
  }
  
- static inline unsigned
+ static __always_inline unsigned
  rb_event_index(struct ring_buffer_event *event)
  {
        unsigned long addr = (unsigned long)event;
@@@ -2355,7 -2386,7 +2355,7 @@@ static void rb_start_commit(struct ring
        local_inc(&cpu_buffer->commits);
  }
  
- static void
+ static __always_inline void
  rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
  {
        unsigned long max_count;
                goto again;
  }
  
- static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
+ static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
  {
        unsigned long commits;
  
@@@ -2455,7 -2486,7 +2455,7 @@@ static inline void rb_event_discard(str
                event->time_delta = 1;
  }
  
- static inline bool
+ static __always_inline bool
  rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
                   struct ring_buffer_event *event)
  {
                rb_commit_index(cpu_buffer) == index;
  }
  
- static void
+ static __always_inline void
  rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
                      struct ring_buffer_event *event)
  {
@@@ -2702,7 -2733,7 +2702,7 @@@ __rb_reserve_next(struct ring_buffer_pe
        return event;
  }
  
- static struct ring_buffer_event *
+ static __always_inline struct ring_buffer_event *
  rb_reserve_next_event(struct ring_buffer *buffer,
                      struct ring_buffer_per_cpu *cpu_buffer,
                      unsigned long length)
@@@ -4602,48 -4633,62 +4602,48 @@@ int ring_buffer_read_page(struct ring_b
  }
  EXPORT_SYMBOL_GPL(ring_buffer_read_page);
  
 -#ifdef CONFIG_HOTPLUG_CPU
 -static int rb_cpu_notify(struct notifier_block *self,
 -                       unsigned long action, void *hcpu)
 +/*
 + * We only allocate new buffers, never free them if the CPU goes down.
 + * If we were to free the buffer, then the user would lose any trace that was in
 + * the buffer.
 + */
 +int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
  {
 -      struct ring_buffer *buffer =
 -              container_of(self, struct ring_buffer, cpu_notify);
 -      long cpu = (long)hcpu;
 +      struct ring_buffer *buffer;
        long nr_pages_same;
        int cpu_i;
        unsigned long nr_pages;
  
 -      switch (action) {
 -      case CPU_UP_PREPARE:
 -      case CPU_UP_PREPARE_FROZEN:
 -              if (cpumask_test_cpu(cpu, buffer->cpumask))
 -                      return NOTIFY_OK;
 -
 -              nr_pages = 0;
 -              nr_pages_same = 1;
 -              /* check if all cpu sizes are same */
 -              for_each_buffer_cpu(buffer, cpu_i) {
 -                      /* fill in the size from first enabled cpu */
 -                      if (nr_pages == 0)
 -                              nr_pages = buffer->buffers[cpu_i]->nr_pages;
 -                      if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
 -                              nr_pages_same = 0;
 -                              break;
 -                      }
 -              }
 -              /* allocate minimum pages, user can later expand it */
 -              if (!nr_pages_same)
 -                      nr_pages = 2;
 -              buffer->buffers[cpu] =
 -                      rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
 -              if (!buffer->buffers[cpu]) {
 -                      WARN(1, "failed to allocate ring buffer on CPU %ld\n",
 -                           cpu);
 -                      return NOTIFY_OK;
 +      buffer = container_of(node, struct ring_buffer, node);
 +      if (cpumask_test_cpu(cpu, buffer->cpumask))
 +              return 0;
 +
 +      nr_pages = 0;
 +      nr_pages_same = 1;
 +      /* check if all cpu sizes are same */
 +      for_each_buffer_cpu(buffer, cpu_i) {
 +              /* fill in the size from first enabled cpu */
 +              if (nr_pages == 0)
 +                      nr_pages = buffer->buffers[cpu_i]->nr_pages;
 +              if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
 +                      nr_pages_same = 0;
 +                      break;
                }
 -              smp_wmb();
 -              cpumask_set_cpu(cpu, buffer->cpumask);
 -              break;
 -      case CPU_DOWN_PREPARE:
 -      case CPU_DOWN_PREPARE_FROZEN:
 -              /*
 -               * Do nothing.
 -               *  If we were to free the buffer, then the user would
 -               *  lose any trace that was in the buffer.
 -               */
 -              break;
 -      default:
 -              break;
        }
 -      return NOTIFY_OK;
 +      /* allocate minimum pages, user can later expand it */
 +      if (!nr_pages_same)
 +              nr_pages = 2;
 +      buffer->buffers[cpu] =
 +              rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
 +      if (!buffer->buffers[cpu]) {
 +              WARN(1, "failed to allocate ring buffer on CPU %u\n",
 +                   cpu);
 +              return -ENOMEM;
 +      }
 +      smp_wmb();
 +      cpumask_set_cpu(cpu, buffer->cpumask);
 +      return 0;
  }
 -#endif
  
  #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
  /*
diff --combined kernel/trace/trace.c
index 54d5270a50422ff9634e146639d0194e06afc0a7,970aafe80b494a6eee976d9f1553f48a3a6348f9..66f829c47bec77c0dbfc1b9d45c92f303e21e5b3
@@@ -40,6 -40,7 +40,7 @@@
  #include <linux/poll.h>
  #include <linux/nmi.h>
  #include <linux/fs.h>
+ #include <linux/trace.h>
  #include <linux/sched/rt.h>
  
  #include "trace.h"
@@@ -68,6 -69,7 +69,7 @@@ bool __read_mostly tracing_selftest_dis
  /* Pipe tracepoints to printk */
  struct trace_iterator *tracepoint_print_iter;
  int tracepoint_printk;
+ static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
  
  /* For tracers that don't implement custom flags */
  static struct tracer_opt dummy_tracer_opt[] = {
@@@ -738,6 -740,31 +740,31 @@@ static inline void ftrace_trace_stack(s
  
  #endif
  
+ static __always_inline void
+ trace_event_setup(struct ring_buffer_event *event,
+                 int type, unsigned long flags, int pc)
+ {
+       struct trace_entry *ent = ring_buffer_event_data(event);
+       tracing_generic_entry_update(ent, flags, pc);
+       ent->type = type;
+ }
+ static __always_inline struct ring_buffer_event *
+ __trace_buffer_lock_reserve(struct ring_buffer *buffer,
+                         int type,
+                         unsigned long len,
+                         unsigned long flags, int pc)
+ {
+       struct ring_buffer_event *event;
+       event = ring_buffer_lock_reserve(buffer, len);
+       if (event != NULL)
+               trace_event_setup(event, type, flags, pc);
+       return event;
+ }
  static void tracer_tracing_on(struct trace_array *tr)
  {
        if (tr->trace_buffer.buffer)
@@@ -767,6 -794,22 +794,22 @@@ void tracing_on(void
  }
  EXPORT_SYMBOL_GPL(tracing_on);
  
+ static __always_inline void
+ __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
+ {
+       __this_cpu_write(trace_cmdline_save, true);
+       /* If this is the temp buffer, we need to commit fully */
+       if (this_cpu_read(trace_buffered_event) == event) {
+               /* Length is in event->array[0] */
+               ring_buffer_write(buffer, event->array[0], &event->array[1]);
+               /* Release the temp buffer */
+               this_cpu_dec(trace_buffered_event_cnt);
+       } else
+               ring_buffer_unlock_commit(buffer, event);
+ }
  /**
   * __trace_puts - write a constant string into the trace buffer.
   * @ip:          The address of the caller
@@@ -794,8 -837,8 +837,8 @@@ int __trace_puts(unsigned long ip, cons
  
        local_save_flags(irq_flags);
        buffer = global_trace.trace_buffer.buffer;
-       event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
-                                         irq_flags, pc);
+       event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
+                                           irq_flags, pc);
        if (!event)
                return 0;
  
@@@ -842,8 -885,8 +885,8 @@@ int __trace_bputs(unsigned long ip, con
  
        local_save_flags(irq_flags);
        buffer = global_trace.trace_buffer.buffer;
-       event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
-                                         irq_flags, pc);
+       event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
+                                           irq_flags, pc);
        if (!event)
                return 0;
  
@@@ -1125,7 -1168,6 +1168,7 @@@ static struct 
        { trace_clock,                  "perf",         1 },
        { ktime_get_mono_fast_ns,       "mono",         1 },
        { ktime_get_raw_fast_ns,        "mono_raw",     1 },
 +      { ktime_get_boot_fast_ns,       "boot",         1 },
        ARCH_TRACE_CLOCKS
  };
  
@@@ -1907,35 -1949,19 +1950,19 @@@ tracing_generic_entry_update(struct tra
  #endif
                ((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
                ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
-               ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+               ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
                (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
                (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
  }
  EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
  
- static __always_inline void
- trace_event_setup(struct ring_buffer_event *event,
-                 int type, unsigned long flags, int pc)
- {
-       struct trace_entry *ent = ring_buffer_event_data(event);
-       tracing_generic_entry_update(ent, flags, pc);
-       ent->type = type;
- }
  struct ring_buffer_event *
  trace_buffer_lock_reserve(struct ring_buffer *buffer,
                          int type,
                          unsigned long len,
                          unsigned long flags, int pc)
  {
-       struct ring_buffer_event *event;
-       event = ring_buffer_lock_reserve(buffer, len);
-       if (event != NULL)
-               trace_event_setup(event, type, flags, pc);
-       return event;
+       return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
  }
  
  DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
@@@ -2049,21 -2075,6 +2076,6 @@@ void trace_buffered_event_disable(void
        preempt_enable();
  }
  
- void
- __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
- {
-       __this_cpu_write(trace_cmdline_save, true);
-       /* If this is the temp buffer, we need to commit fully */
-       if (this_cpu_read(trace_buffered_event) == event) {
-               /* Length is in event->array[0] */
-               ring_buffer_write(buffer, event->array[0], &event->array[1]);
-               /* Release the temp buffer */
-               this_cpu_dec(trace_buffered_event_cnt);
-       } else
-               ring_buffer_unlock_commit(buffer, event);
- }
  static struct ring_buffer *temp_buffer;
  
  struct ring_buffer_event *
@@@ -2090,8 -2101,8 +2102,8 @@@ trace_event_buffer_lock_reserve(struct 
                this_cpu_dec(trace_buffered_event_cnt);
        }
  
-       entry = trace_buffer_lock_reserve(*current_rb,
-                                        type, len, flags, pc);
+       entry = __trace_buffer_lock_reserve(*current_rb,
+                                           type, len, flags, pc);
        /*
         * If tracing is off, but we have triggers enabled
         * we still need to look at the event data. Use the temp_buffer
         */
        if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
                *current_rb = temp_buffer;
-               entry = trace_buffer_lock_reserve(*current_rb,
-                                                 type, len, flags, pc);
+               entry = __trace_buffer_lock_reserve(*current_rb,
+                                                   type, len, flags, pc);
        }
        return entry;
  }
  EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
  
+ static DEFINE_SPINLOCK(tracepoint_iter_lock);
+ static DEFINE_MUTEX(tracepoint_printk_mutex);
+ static void output_printk(struct trace_event_buffer *fbuffer)
+ {
+       struct trace_event_call *event_call;
+       struct trace_event *event;
+       unsigned long flags;
+       struct trace_iterator *iter = tracepoint_print_iter;
+       /* We should never get here if iter is NULL */
+       if (WARN_ON_ONCE(!iter))
+               return;
+       event_call = fbuffer->trace_file->event_call;
+       if (!event_call || !event_call->event.funcs ||
+           !event_call->event.funcs->trace)
+               return;
+       event = &fbuffer->trace_file->event_call->event;
+       spin_lock_irqsave(&tracepoint_iter_lock, flags);
+       trace_seq_init(&iter->seq);
+       iter->ent = fbuffer->entry;
+       event_call->event.funcs->trace(iter, 0, event);
+       trace_seq_putc(&iter->seq, 0);
+       printk("%s", iter->seq.buffer);
+       spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
+ }
+ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
+                            void __user *buffer, size_t *lenp,
+                            loff_t *ppos)
+ {
+       int save_tracepoint_printk;
+       int ret;
+       mutex_lock(&tracepoint_printk_mutex);
+       save_tracepoint_printk = tracepoint_printk;
+       ret = proc_dointvec(table, write, buffer, lenp, ppos);
+       /*
+        * This will force exiting early, as tracepoint_printk
+        * is always zero when tracepoint_printk_iter is not allocated
+        */
+       if (!tracepoint_print_iter)
+               tracepoint_printk = 0;
+       if (save_tracepoint_printk == tracepoint_printk)
+               goto out;
+       if (tracepoint_printk)
+               static_key_enable(&tracepoint_printk_key.key);
+       else
+               static_key_disable(&tracepoint_printk_key.key);
+  out:
+       mutex_unlock(&tracepoint_printk_mutex);
+       return ret;
+ }
+ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
+ {
+       if (static_key_false(&tracepoint_printk_key.key))
+               output_printk(fbuffer);
+       event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
+                                   fbuffer->event, fbuffer->entry,
+                                   fbuffer->flags, fbuffer->pc);
+ }
+ EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
  void trace_buffer_unlock_commit_regs(struct trace_array *tr,
                                     struct ring_buffer *buffer,
                                     struct ring_buffer_event *event,
        ftrace_trace_userstack(buffer, flags, pc);
  }
  
+ /*
+  * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
+  */
+ void
+ trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
+                                  struct ring_buffer_event *event)
+ {
+       __buffer_unlock_commit(buffer, event);
+ }
+ static void
+ trace_process_export(struct trace_export *export,
+              struct ring_buffer_event *event)
+ {
+       struct trace_entry *entry;
+       unsigned int size = 0;
+       entry = ring_buffer_event_data(event);
+       size = ring_buffer_event_length(event);
+       export->write(entry, size);
+ }
+ static DEFINE_MUTEX(ftrace_export_lock);
+ static struct trace_export __rcu *ftrace_exports_list __read_mostly;
+ static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
+ static inline void ftrace_exports_enable(void)
+ {
+       static_branch_enable(&ftrace_exports_enabled);
+ }
+ static inline void ftrace_exports_disable(void)
+ {
+       static_branch_disable(&ftrace_exports_enabled);
+ }
+ void ftrace_exports(struct ring_buffer_event *event)
+ {
+       struct trace_export *export;
+       preempt_disable_notrace();
+       export = rcu_dereference_raw_notrace(ftrace_exports_list);
+       while (export) {
+               trace_process_export(export, event);
+               export = rcu_dereference_raw_notrace(export->next);
+       }
+       preempt_enable_notrace();
+ }
+ static inline void
+ add_trace_export(struct trace_export **list, struct trace_export *export)
+ {
+       rcu_assign_pointer(export->next, *list);
+       /*
+        * We are entering export into the list but another
+        * CPU might be walking that list. We need to make sure
+        * the export->next pointer is valid before another CPU sees
+        * the export pointer included into the list.
+        */
+       rcu_assign_pointer(*list, export);
+ }
+ static inline int
+ rm_trace_export(struct trace_export **list, struct trace_export *export)
+ {
+       struct trace_export **p;
+       for (p = list; *p != NULL; p = &(*p)->next)
+               if (*p == export)
+                       break;
+       if (*p != export)
+               return -1;
+       rcu_assign_pointer(*p, (*p)->next);
+       return 0;
+ }
+ static inline void
+ add_ftrace_export(struct trace_export **list, struct trace_export *export)
+ {
+       if (*list == NULL)
+               ftrace_exports_enable();
+       add_trace_export(list, export);
+ }
+ static inline int
+ rm_ftrace_export(struct trace_export **list, struct trace_export *export)
+ {
+       int ret;
+       ret = rm_trace_export(list, export);
+       if (*list == NULL)
+               ftrace_exports_disable();
+       return ret;
+ }
+ int register_ftrace_export(struct trace_export *export)
+ {
+       if (WARN_ON_ONCE(!export->write))
+               return -1;
+       mutex_lock(&ftrace_export_lock);
+       add_ftrace_export(&ftrace_exports_list, export);
+       mutex_unlock(&ftrace_export_lock);
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(register_ftrace_export);
+ int unregister_ftrace_export(struct trace_export *export)
+ {
+       int ret;
+       mutex_lock(&ftrace_export_lock);
+       ret = rm_ftrace_export(&ftrace_exports_list, export);
+       mutex_unlock(&ftrace_export_lock);
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(unregister_ftrace_export);
  void
  trace_function(struct trace_array *tr,
               unsigned long ip, unsigned long parent_ip, unsigned long flags,
        struct ring_buffer_event *event;
        struct ftrace_entry *entry;
  
-       event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
-                                         flags, pc);
+       event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
+                                           flags, pc);
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
        entry->ip                       = ip;
        entry->parent_ip                = parent_ip;
  
-       if (!call_filter_check_discard(call, entry, buffer, event))
+       if (!call_filter_check_discard(call, entry, buffer, event)) {
+               if (static_branch_unlikely(&ftrace_exports_enabled))
+                       ftrace_exports(event);
                __buffer_unlock_commit(buffer, event);
+       }
  }
  
  #ifdef CONFIG_STACKTRACE
@@@ -2216,8 -2438,8 +2439,8 @@@ static void __ftrace_trace_stack(struc
  
        size *= sizeof(unsigned long);
  
-       event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
-                                         sizeof(*entry) + size, flags, pc);
+       event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
+                                           sizeof(*entry) + size, flags, pc);
        if (!event)
                goto out;
        entry = ring_buffer_event_data(event);
@@@ -2318,8 -2540,8 +2541,8 @@@ ftrace_trace_userstack(struct ring_buff
  
        __this_cpu_inc(user_stack_count);
  
-       event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
-                                         sizeof(*entry), flags, pc);
+       event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
+                                           sizeof(*entry), flags, pc);
        if (!event)
                goto out_drop_count;
        entry   = ring_buffer_event_data(event);
@@@ -2489,8 -2711,8 +2712,8 @@@ int trace_vbprintk(unsigned long ip, co
        local_save_flags(flags);
        size = sizeof(*entry) + sizeof(u32) * len;
        buffer = tr->trace_buffer.buffer;
-       event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
-                                         flags, pc);
+       event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
+                                           flags, pc);
        if (!event)
                goto out;
        entry = ring_buffer_event_data(event);
@@@ -2545,8 -2767,8 +2768,8 @@@ __trace_array_vprintk(struct ring_buffe
  
        local_save_flags(flags);
        size = sizeof(*entry) + len + 1;
-       event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
-                                         flags, pc);
+       event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
+                                           flags, pc);
        if (!event)
                goto out;
        entry = ring_buffer_event_data(event);
@@@ -4055,6 -4277,7 +4278,7 @@@ static const char readme_msg[] 
        "     x86-tsc:   TSC cycle counter\n"
  #endif
        "\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
+       "\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
        "  tracing_cpumask\t- Limit which CPUs to trace\n"
        "  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
        "\t\t\t  Remove sub-buffer with rmdir\n"
        "\n  available_filter_functions - list of functions that can be filtered on\n"
        "  set_ftrace_filter\t- echo function name in here to only trace these\n"
        "\t\t\t  functions\n"
-       "\t     accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
+       "\t     accepts: func_full_name or glob-matching-pattern\n"
        "\t     modules: Can select a group via module\n"
        "\t      Format: :mod:<module-name>\n"
        "\t     example: echo :mod:ext3 > set_ftrace_filter\n"
@@@ -5519,21 -5742,18 +5743,18 @@@ static ssize_
  tracing_mark_write(struct file *filp, const char __user *ubuf,
                                        size_t cnt, loff_t *fpos)
  {
-       unsigned long addr = (unsigned long)ubuf;
        struct trace_array *tr = filp->private_data;
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
        struct print_entry *entry;
        unsigned long irq_flags;
-       struct page *pages[2];
-       void *map_page[2];
-       int nr_pages = 1;
+       const char faulted[] = "<faulted>";
        ssize_t written;
-       int offset;
        int size;
        int len;
-       int ret;
-       int i;
+ /* Used in tracing_mark_raw_write() as well */
+ #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
  
        if (tracing_disabled)
                return -EINVAL;
        if (cnt > TRACE_BUF_SIZE)
                cnt = TRACE_BUF_SIZE;
  
-       /*
-        * Userspace is injecting traces into the kernel trace buffer.
-        * We want to be as non intrusive as possible.
-        * To do so, we do not want to allocate any special buffers
-        * or take any locks, but instead write the userspace data
-        * straight into the ring buffer.
-        *
-        * First we need to pin the userspace buffer into memory,
-        * which, most likely it is, because it just referenced it.
-        * But there's no guarantee that it is. By using get_user_pages_fast()
-        * and kmap_atomic/kunmap_atomic() we can get access to the
-        * pages directly. We then write the data directly into the
-        * ring buffer.
-        */
        BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
  
-       /* check if we cross pages */
-       if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
-               nr_pages = 2;
-       offset = addr & (PAGE_SIZE - 1);
-       addr &= PAGE_MASK;
-       ret = get_user_pages_fast(addr, nr_pages, 0, pages);
-       if (ret < nr_pages) {
-               while (--ret >= 0)
-                       put_page(pages[ret]);
-               written = -EFAULT;
-               goto out;
-       }
+       local_save_flags(irq_flags);
+       size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
  
-       for (i = 0; i < nr_pages; i++)
-               map_page[i] = kmap_atomic(pages[i]);
+       /* If less than "<faulted>", then make sure we can still add that */
+       if (cnt < FAULTED_SIZE)
+               size += FAULTED_SIZE - cnt;
  
-       local_save_flags(irq_flags);
-       size = sizeof(*entry) + cnt + 2; /* possible \n added */
        buffer = tr->trace_buffer.buffer;
-       event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
-                                         irq_flags, preempt_count());
-       if (!event) {
+       event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
+                                           irq_flags, preempt_count());
+       if (unlikely(!event))
                /* Ring buffer disabled, return as if not open for write */
-               written = -EBADF;
-               goto out_unlock;
-       }
+               return -EBADF;
  
        entry = ring_buffer_event_data(event);
        entry->ip = _THIS_IP_;
  
-       if (nr_pages == 2) {
-               len = PAGE_SIZE - offset;
-               memcpy(&entry->buf, map_page[0] + offset, len);
-               memcpy(&entry->buf[len], map_page[1], cnt - len);
+       len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
+       if (len) {
+               memcpy(&entry->buf, faulted, FAULTED_SIZE);
+               cnt = FAULTED_SIZE;
+               written = -EFAULT;
        } else
-               memcpy(&entry->buf, map_page[0] + offset, cnt);
+               written = cnt;
+       len = cnt;
  
        if (entry->buf[cnt - 1] != '\n') {
                entry->buf[cnt] = '\n';
  
        __buffer_unlock_commit(buffer, event);
  
-       written = cnt;
+       if (written > 0)
+               *fpos += written;
  
-       *fpos += written;
+       return written;
+ }
+ /* Limit it for now to 3K (including tag) */
+ #define RAW_DATA_MAX_SIZE (1024*3)
+ static ssize_t
+ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
+                                       size_t cnt, loff_t *fpos)
+ {
+       struct trace_array *tr = filp->private_data;
+       struct ring_buffer_event *event;
+       struct ring_buffer *buffer;
+       struct raw_data_entry *entry;
+       const char faulted[] = "<faulted>";
+       unsigned long irq_flags;
+       ssize_t written;
+       int size;
+       int len;
+ #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
+       if (tracing_disabled)
+               return -EINVAL;
+       if (!(tr->trace_flags & TRACE_ITER_MARKERS))
+               return -EINVAL;
+       /* The marker must at least have a tag id */
+       if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
+               return -EINVAL;
+       if (cnt > TRACE_BUF_SIZE)
+               cnt = TRACE_BUF_SIZE;
+       BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
+       local_save_flags(irq_flags);
+       size = sizeof(*entry) + cnt;
+       if (cnt < FAULT_SIZE_ID)
+               size += FAULT_SIZE_ID - cnt;
+       buffer = tr->trace_buffer.buffer;
+       event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
+                                           irq_flags, preempt_count());
+       if (!event)
+               /* Ring buffer disabled, return as if not open for write */
+               return -EBADF;
+       entry = ring_buffer_event_data(event);
+       len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
+       if (len) {
+               entry->id = -1;
+               memcpy(&entry->buf, faulted, FAULTED_SIZE);
+               written = -EFAULT;
+       } else
+               written = cnt;
+       __buffer_unlock_commit(buffer, event);
+       if (written > 0)
+               *fpos += written;
  
-  out_unlock:
-       for (i = nr_pages - 1; i >= 0; i--) {
-               kunmap_atomic(map_page[i]);
-               put_page(pages[i]);
-       }
-  out:
        return written;
  }
  
@@@ -5946,6 -6196,13 +6197,13 @@@ static const struct file_operations tra
        .release        = tracing_release_generic_tr,
  };
  
+ static const struct file_operations tracing_mark_raw_fops = {
+       .open           = tracing_open_generic_tr,
+       .write          = tracing_mark_raw_write,
+       .llseek         = generic_file_llseek,
+       .release        = tracing_release_generic_tr,
+ };
  static const struct file_operations trace_clock_fops = {
        .open           = tracing_clock_open,
        .read           = seq_read,
@@@ -7215,6 -7472,9 +7473,9 @@@ init_tracer_tracefs(struct trace_array 
        trace_create_file("trace_marker", 0220, d_tracer,
                          tr, &tracing_mark_fops);
  
+       trace_create_file("trace_marker_raw", 0220, d_tracer,
+                         tr, &tracing_mark_raw_fops);
        trace_create_file("trace_clock", 0644, d_tracer, tr,
                          &trace_clock_fops);
  
@@@ -7660,21 -7920,10 +7921,21 @@@ __init static int tracer_alloc_buffers(
  
        raw_spin_lock_init(&global_trace.start_lock);
  
 +      /*
 +       * The prepare callbacks allocates some memory for the ring buffer. We
 +       * don't free the buffer if the if the CPU goes down. If we were to free
 +       * the buffer, then the user would lose any trace that was in the
 +       * buffer. The memory will be removed once the "instance" is removed.
 +       */
 +      ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
 +                                    "trace/RB:preapre", trace_rb_cpu_prepare,
 +                                    NULL);
 +      if (ret < 0)
 +              goto out_free_cpumask;
        /* Used for event triggers */
        temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
        if (!temp_buffer)
 -              goto out_free_cpumask;
 +              goto out_rm_hp_state;
  
        if (trace_create_savedcmd() < 0)
                goto out_free_temp_buffer;
@@@ -7735,8 -7984,6 +7996,8 @@@ out_free_savedcmd
        free_saved_cmdlines_buffer(savedcmd);
  out_free_temp_buffer:
        ring_buffer_free(temp_buffer);
 +out_rm_hp_state:
 +      cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
  out_free_cpumask:
        free_cpumask_var(global_trace.tracing_cpumask);
  out_free_buffer_mask:
@@@ -7752,6 -7999,8 +8013,8 @@@ void __init trace_init(void
                        kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
                if (WARN_ON(!tracepoint_print_iter))
                        tracepoint_printk = 0;
+               else
+                       static_key_enable(&tracepoint_printk_key.key);
        }
        tracer_alloc_buffers();
        trace_event_init();
This page took 0.20835 seconds and 4 git commands to generate.