]> Git Repo - linux.git/commitdiff
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <[email protected]>
Tue, 28 Jan 2020 17:44:15 +0000 (09:44 -0800)
committerLinus Torvalds <[email protected]>
Tue, 28 Jan 2020 17:44:15 +0000 (09:44 -0800)
Pull perf updates from Ingo Molnar:
 "Kernel side changes:

   - Ftrace is one of the last W^X violators (after this only KLP is
     left). These patches move it over to the generic text_poke()
     interface and thereby get rid of this oddity. This requires a
     surprising amount of surgery, by Peter Zijlstra.

   - x86/AMD PMUs: add support for 'Large Increment per Cycle Events' to
     count certain types of events that have a special, quirky hw ABI
     (by Kim Phillips)

   - kprobes fixes by Masami Hiramatsu

  Lots of tooling updates as well, the following subcommands were
  updated: annotate/report/top, c2c, clang, record, report/top TUI,
  sched timehist, tests; plus updates were done to the gtk ui, libperf,
  headers and the parser"

* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (57 commits)
  perf/x86/amd: Add support for Large Increment per Cycle Events
  perf/x86/amd: Constrain Large Increment per Cycle events
  perf/x86/intel/rapl: Add Comet Lake support
  tracing: Initialize ret in syscall_enter_define_fields()
  perf header: Use last modification time for timestamp
  perf c2c: Fix return type for histogram sorting comparision functions
  perf beauty sockaddr: Fix augmented syscall format warning
  perf/ui/gtk: Fix gtk2 build
  perf ui gtk: Add missing zalloc object
  perf tools: Use %define api.pure full instead of %pure-parser
  libperf: Setup initial evlist::all_cpus value
  perf report: Fix no libunwind compiled warning break s390 issue
  perf tools: Support --prefix/--prefix-strip
  perf report: Clarify in help that --children is default
  tools build: Fix test-clang.cpp with Clang 8+
  perf clang: Fix build with Clang 9
  kprobes: Fix optimize_kprobe()/unoptimize_kprobe() cancellation logic
  tools lib: Fix builds when glibc contains strlcpy()
  perf report/top: Make 'e' visible in the help and make it toggle showing callchains
  perf report/top: Do not offer annotation for symbols without samples
  ...

1  2 
arch/x86/kernel/ftrace.c
arch/x86/kernel/kprobes/core.c
arch/x86/mm/init_32.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_uprobe.c

diff --combined arch/x86/kernel/ftrace.c
index 2009047bb0156ece34ff93e3a3b86d9700a32724,108ee96f8b6632723f122a4741d0eeef40394373..37a0aeaf89e771b63bccbeab92979469cbd96ecb
@@@ -23,7 -23,6 +23,7 @@@
  #include <linux/list.h>
  #include <linux/module.h>
  #include <linux/memory.h>
 +#include <linux/vmalloc.h>
  
  #include <trace/syscall.h>
  
@@@ -35,6 -34,8 +35,8 @@@
  
  #ifdef CONFIG_DYNAMIC_FTRACE
  
+ static int ftrace_poke_late = 0;
  int ftrace_arch_code_modify_prepare(void)
      __acquires(&text_mutex)
  {
         * ftrace has it set to "read/write".
         */
        mutex_lock(&text_mutex);
-       set_kernel_text_rw();
-       set_all_modules_text_rw();
+       ftrace_poke_late = 1;
        return 0;
  }
  
  int ftrace_arch_code_modify_post_process(void)
      __releases(&text_mutex)
  {
-       set_all_modules_text_ro();
-       set_kernel_text_ro();
+       /*
+        * ftrace_make_{call,nop}() may be called during
+        * module load, and we need to finish the text_poke_queue()
+        * that they do, here.
+        */
+       text_poke_finish();
+       ftrace_poke_late = 0;
        mutex_unlock(&text_mutex);
        return 0;
  }
  
- union ftrace_code_union {
-       char code[MCOUNT_INSN_SIZE];
-       struct {
-               unsigned char op;
-               int offset;
-       } __attribute__((packed));
- };
- static int ftrace_calc_offset(long ip, long addr)
- {
-       return (int)(addr - ip);
- }
- static unsigned char *
- ftrace_text_replace(unsigned char op, unsigned long ip, unsigned long addr)
- {
-       static union ftrace_code_union calc;
-       calc.op         = op;
-       calc.offset     = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
-       return calc.code;
- }
- static unsigned char *
- ftrace_call_replace(unsigned long ip, unsigned long addr)
- {
-       return ftrace_text_replace(0xe8, ip, addr);
- }
- static inline int
- within(unsigned long addr, unsigned long start, unsigned long end)
- {
-       return addr >= start && addr < end;
- }
- static unsigned long text_ip_addr(unsigned long ip)
+ static const char *ftrace_nop_replace(void)
  {
-       /*
-        * On x86_64, kernel text mappings are mapped read-only, so we use
-        * the kernel identity mapping instead of the kernel text mapping
-        * to modify the kernel text.
-        *
-        * For 32bit kernels, these mappings are same and we can use
-        * kernel identity mapping to modify code.
-        */
-       if (within(ip, (unsigned long)_text, (unsigned long)_etext))
-               ip = (unsigned long)__va(__pa_symbol(ip));
-       return ip;
+       return ideal_nops[NOP_ATOMIC5];
  }
  
- static const unsigned char *ftrace_nop_replace(void)
+ static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
  {
-       return ideal_nops[NOP_ATOMIC5];
+       return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
  }
  
- static int
- ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
-                  unsigned const char *new_code)
+ static int ftrace_verify_code(unsigned long ip, const char *old_code)
  {
-       unsigned char replaced[MCOUNT_INSN_SIZE];
-       ftrace_expected = old_code;
+       char cur_code[MCOUNT_INSN_SIZE];
  
        /*
         * Note:
         * Carefully read and modify the code with probe_kernel_*(), and make
         * sure what we read is what we expected it to be before modifying it.
         */
        /* read the text we want to modify */
-       if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
+       if (probe_kernel_read(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
+               WARN_ON(1);
                return -EFAULT;
+       }
  
        /* Make sure it is what we expect it to be */
-       if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
+       if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
+               WARN_ON(1);
                return -EINVAL;
+       }
  
-       ip = text_ip_addr(ip);
-       /* replace the text with the new text */
-       if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
-               return -EPERM;
+       return 0;
+ }
  
-       sync_core();
+ /*
+  * Marked __ref because it calls text_poke_early() which is .init.text. That is
+  * ok because that call will happen early, during boot, when .init sections are
+  * still present.
+  */
+ static int __ref
+ ftrace_modify_code_direct(unsigned long ip, const char *old_code,
+                         const char *new_code)
+ {
+       int ret = ftrace_verify_code(ip, old_code);
+       if (ret)
+               return ret;
  
+       /* replace the text with the new text */
+       if (ftrace_poke_late)
+               text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
+       else
+               text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
        return 0;
  }
  
- int ftrace_make_nop(struct module *mod,
-                   struct dyn_ftrace *rec, unsigned long addr)
+ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
  {
-       unsigned const char *new, *old;
        unsigned long ip = rec->ip;
+       const char *new, *old;
  
        old = ftrace_call_replace(ip, addr);
        new = ftrace_nop_replace();
         * just modify the code directly.
         */
        if (addr == MCOUNT_ADDR)
-               return ftrace_modify_code_direct(rec->ip, old, new);
-       ftrace_expected = NULL;
+               return ftrace_modify_code_direct(ip, old, new);
  
-       /* Normal cases use add_brk_on_nop */
+       /*
+        * x86 overrides ftrace_replace_code -- this function will never be used
+        * in this case.
+        */
        WARN_ONCE(1, "invalid use of ftrace_make_nop");
        return -EINVAL;
  }
  
  int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  {
-       unsigned const char *new, *old;
        unsigned long ip = rec->ip;
+       const char *new, *old;
  
        old = ftrace_nop_replace();
        new = ftrace_call_replace(ip, addr);
        return ftrace_modify_code_direct(rec->ip, old, new);
  }
  
- /*
-  * The modifying_ftrace_code is used to tell the breakpoint
-  * handler to call ftrace_int3_handler(). If it fails to
-  * call this handler for a breakpoint added by ftrace, then
-  * the kernel may crash.
-  *
-  * As atomic_writes on x86 do not need a barrier, we do not
-  * need to add smp_mb()s for this to work. It is also considered
-  * that we can not read the modifying_ftrace_code before
-  * executing the breakpoint. That would be quite remarkable if
-  * it could do that. Here's the flow that is required:
-  *
-  *   CPU-0                          CPU-1
-  *
-  * atomic_inc(mfc);
-  * write int3s
-  *                            <trap-int3> // implicit (r)mb
-  *                            if (atomic_read(mfc))
-  *                                    call ftrace_int3_handler()
-  *
-  * Then when we are finished:
-  *
-  * atomic_dec(mfc);
-  *
-  * If we hit a breakpoint that was not set by ftrace, it does not
-  * matter if ftrace_int3_handler() is called or not. It will
-  * simply be ignored. But it is crucial that a ftrace nop/caller
-  * breakpoint is handled. No other user should ever place a
-  * breakpoint on an ftrace nop/caller location. It must only
-  * be done by this code.
-  */
- atomic_t modifying_ftrace_code __read_mostly;
- static int
- ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
-                  unsigned const char *new_code);
  /*
   * Should never be called:
   *  As it is only called by __ftrace_replace_code() which is called by
@@@ -238,452 -171,84 +172,84 @@@ int ftrace_modify_call(struct dyn_ftrac
                                 unsigned long addr)
  {
        WARN_ON(1);
-       ftrace_expected = NULL;
        return -EINVAL;
  }
  
- static unsigned long ftrace_update_func;
- static unsigned long ftrace_update_func_call;
- static int update_ftrace_func(unsigned long ip, void *new)
- {
-       unsigned char old[MCOUNT_INSN_SIZE];
-       int ret;
-       memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
-       ftrace_update_func = ip;
-       /* Make sure the breakpoints see the ftrace_update_func update */
-       smp_wmb();
-       /* See comment above by declaration of modifying_ftrace_code */
-       atomic_inc(&modifying_ftrace_code);
-       ret = ftrace_modify_code(ip, old, new);
-       atomic_dec(&modifying_ftrace_code);
-       return ret;
- }
  int ftrace_update_ftrace_func(ftrace_func_t func)
- {
-       unsigned long ip = (unsigned long)(&ftrace_call);
-       unsigned char *new;
-       int ret;
-       ftrace_update_func_call = (unsigned long)func;
-       new = ftrace_call_replace(ip, (unsigned long)func);
-       ret = update_ftrace_func(ip, new);
-       /* Also update the regs callback function */
-       if (!ret) {
-               ip = (unsigned long)(&ftrace_regs_call);
-               new = ftrace_call_replace(ip, (unsigned long)func);
-               ret = update_ftrace_func(ip, new);
-       }
-       return ret;
- }
- static nokprobe_inline int is_ftrace_caller(unsigned long ip)
- {
-       if (ip == ftrace_update_func)
-               return 1;
-       return 0;
- }
- /*
-  * A breakpoint was added to the code address we are about to
-  * modify, and this is the handle that will just skip over it.
-  * We are either changing a nop into a trace call, or a trace
-  * call to a nop. While the change is taking place, we treat
-  * it just like it was a nop.
-  */
- int ftrace_int3_handler(struct pt_regs *regs)
  {
        unsigned long ip;
+       const char *new;
  
-       if (WARN_ON_ONCE(!regs))
-               return 0;
-       ip = regs->ip - INT3_INSN_SIZE;
-       if (ftrace_location(ip)) {
-               int3_emulate_call(regs, (unsigned long)ftrace_regs_caller);
-               return 1;
-       } else if (is_ftrace_caller(ip)) {
-               if (!ftrace_update_func_call) {
-                       int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
-                       return 1;
-               }
-               int3_emulate_call(regs, ftrace_update_func_call);
-               return 1;
-       }
-       return 0;
- }
- NOKPROBE_SYMBOL(ftrace_int3_handler);
- static int ftrace_write(unsigned long ip, const char *val, int size)
- {
-       ip = text_ip_addr(ip);
-       if (probe_kernel_write((void *)ip, val, size))
-               return -EPERM;
-       return 0;
- }
- static int add_break(unsigned long ip, const char *old)
- {
-       unsigned char replaced[MCOUNT_INSN_SIZE];
-       unsigned char brk = BREAKPOINT_INSTRUCTION;
-       if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
-               return -EFAULT;
-       ftrace_expected = old;
-       /* Make sure it is what we expect it to be */
-       if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
-               return -EINVAL;
-       return ftrace_write(ip, &brk, 1);
- }
- static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
- {
-       unsigned const char *old;
-       unsigned long ip = rec->ip;
-       old = ftrace_call_replace(ip, addr);
-       return add_break(rec->ip, old);
- }
- static int add_brk_on_nop(struct dyn_ftrace *rec)
- {
-       unsigned const char *old;
-       old = ftrace_nop_replace();
-       return add_break(rec->ip, old);
- }
- static int add_breakpoints(struct dyn_ftrace *rec, bool enable)
- {
-       unsigned long ftrace_addr;
-       int ret;
-       ftrace_addr = ftrace_get_addr_curr(rec);
-       ret = ftrace_test_record(rec, enable);
-       switch (ret) {
-       case FTRACE_UPDATE_IGNORE:
-               return 0;
-       case FTRACE_UPDATE_MAKE_CALL:
-               /* converting nop to call */
-               return add_brk_on_nop(rec);
-       case FTRACE_UPDATE_MODIFY_CALL:
-       case FTRACE_UPDATE_MAKE_NOP:
-               /* converting a call to a nop */
-               return add_brk_on_call(rec, ftrace_addr);
-       }
-       return 0;
- }
- /*
-  * On error, we need to remove breakpoints. This needs to
-  * be done caefully. If the address does not currently have a
-  * breakpoint, we know we are done. Otherwise, we look at the
-  * remaining 4 bytes of the instruction. If it matches a nop
-  * we replace the breakpoint with the nop. Otherwise we replace
-  * it with the call instruction.
-  */
- static int remove_breakpoint(struct dyn_ftrace *rec)
- {
-       unsigned char ins[MCOUNT_INSN_SIZE];
-       unsigned char brk = BREAKPOINT_INSTRUCTION;
-       const unsigned char *nop;
-       unsigned long ftrace_addr;
-       unsigned long ip = rec->ip;
-       /* If we fail the read, just give up */
-       if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
-               return -EFAULT;
-       /* If this does not have a breakpoint, we are done */
-       if (ins[0] != brk)
-               return 0;
-       nop = ftrace_nop_replace();
-       /*
-        * If the last 4 bytes of the instruction do not match
-        * a nop, then we assume that this is a call to ftrace_addr.
-        */
-       if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
-               /*
-                * For extra paranoidism, we check if the breakpoint is on
-                * a call that would actually jump to the ftrace_addr.
-                * If not, don't touch the breakpoint, we make just create
-                * a disaster.
-                */
-               ftrace_addr = ftrace_get_addr_new(rec);
-               nop = ftrace_call_replace(ip, ftrace_addr);
-               if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
-                       goto update;
-               /* Check both ftrace_addr and ftrace_old_addr */
-               ftrace_addr = ftrace_get_addr_curr(rec);
-               nop = ftrace_call_replace(ip, ftrace_addr);
-               ftrace_expected = nop;
-               if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
-                       return -EINVAL;
-       }
-  update:
-       return ftrace_write(ip, nop, 1);
- }
- static int add_update_code(unsigned long ip, unsigned const char *new)
- {
-       /* skip breakpoint */
-       ip++;
-       new++;
-       return ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1);
- }
- static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
- {
-       unsigned long ip = rec->ip;
-       unsigned const char *new;
-       new = ftrace_call_replace(ip, addr);
-       return add_update_code(ip, new);
- }
- static int add_update_nop(struct dyn_ftrace *rec)
- {
-       unsigned long ip = rec->ip;
-       unsigned const char *new;
-       new = ftrace_nop_replace();
-       return add_update_code(ip, new);
- }
- static int add_update(struct dyn_ftrace *rec, bool enable)
- {
-       unsigned long ftrace_addr;
-       int ret;
-       ret = ftrace_test_record(rec, enable);
-       ftrace_addr  = ftrace_get_addr_new(rec);
-       switch (ret) {
-       case FTRACE_UPDATE_IGNORE:
-               return 0;
-       case FTRACE_UPDATE_MODIFY_CALL:
-       case FTRACE_UPDATE_MAKE_CALL:
-               /* converting nop to call */
-               return add_update_call(rec, ftrace_addr);
-       case FTRACE_UPDATE_MAKE_NOP:
-               /* converting a call to a nop */
-               return add_update_nop(rec);
-       }
-       return 0;
- }
- static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
- {
-       unsigned long ip = rec->ip;
-       unsigned const char *new;
-       new = ftrace_call_replace(ip, addr);
-       return ftrace_write(ip, new, 1);
- }
- static int finish_update_nop(struct dyn_ftrace *rec)
- {
-       unsigned long ip = rec->ip;
-       unsigned const char *new;
-       new = ftrace_nop_replace();
-       return ftrace_write(ip, new, 1);
- }
- static int finish_update(struct dyn_ftrace *rec, bool enable)
- {
-       unsigned long ftrace_addr;
-       int ret;
-       ret = ftrace_update_record(rec, enable);
-       ftrace_addr = ftrace_get_addr_new(rec);
-       switch (ret) {
-       case FTRACE_UPDATE_IGNORE:
-               return 0;
-       case FTRACE_UPDATE_MODIFY_CALL:
-       case FTRACE_UPDATE_MAKE_CALL:
-               /* converting nop to call */
-               return finish_update_call(rec, ftrace_addr);
+       ip = (unsigned long)(&ftrace_call);
+       new = ftrace_call_replace(ip, (unsigned long)func);
+       text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
  
-       case FTRACE_UPDATE_MAKE_NOP:
-               /* converting a call to a nop */
-               return finish_update_nop(rec);
-       }
+       ip = (unsigned long)(&ftrace_regs_call);
+       new = ftrace_call_replace(ip, (unsigned long)func);
+       text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
  
        return 0;
  }
  
- static void do_sync_core(void *data)
- {
-       sync_core();
- }
- static void run_sync(void)
- {
-       int enable_irqs;
-       /* No need to sync if there's only one CPU */
-       if (num_online_cpus() == 1)
-               return;
-       enable_irqs = irqs_disabled();
-       /* We may be called with interrupts disabled (on bootup). */
-       if (enable_irqs)
-               local_irq_enable();
-       on_each_cpu(do_sync_core, NULL, 1);
-       if (enable_irqs)
-               local_irq_disable();
- }
  void ftrace_replace_code(int enable)
  {
        struct ftrace_rec_iter *iter;
        struct dyn_ftrace *rec;
-       const char *report = "adding breakpoints";
-       int count = 0;
+       const char *new, *old;
        int ret;
  
        for_ftrace_rec_iter(iter) {
                rec = ftrace_rec_iter_record(iter);
  
-               ret = add_breakpoints(rec, enable);
-               if (ret)
-                       goto remove_breakpoints;
-               count++;
-       }
-       run_sync();
+               switch (ftrace_test_record(rec, enable)) {
+               case FTRACE_UPDATE_IGNORE:
+               default:
+                       continue;
  
-       report = "updating code";
-       count = 0;
+               case FTRACE_UPDATE_MAKE_CALL:
+                       old = ftrace_nop_replace();
+                       break;
  
-       for_ftrace_rec_iter(iter) {
-               rec = ftrace_rec_iter_record(iter);
+               case FTRACE_UPDATE_MODIFY_CALL:
+               case FTRACE_UPDATE_MAKE_NOP:
+                       old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
+                       break;
+               }
  
-               ret = add_update(rec, enable);
-               if (ret)
-                       goto remove_breakpoints;
-               count++;
+               ret = ftrace_verify_code(rec->ip, old);
+               if (ret) {
+                       ftrace_bug(ret, rec);
+                       return;
+               }
        }
  
-       run_sync();
-       report = "removing breakpoints";
-       count = 0;
        for_ftrace_rec_iter(iter) {
                rec = ftrace_rec_iter_record(iter);
  
-               ret = finish_update(rec, enable);
-               if (ret)
-                       goto remove_breakpoints;
-               count++;
-       }
+               switch (ftrace_test_record(rec, enable)) {
+               case FTRACE_UPDATE_IGNORE:
+               default:
+                       continue;
  
-       run_sync();
+               case FTRACE_UPDATE_MAKE_CALL:
+               case FTRACE_UPDATE_MODIFY_CALL:
+                       new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
+                       break;
  
-       return;
+               case FTRACE_UPDATE_MAKE_NOP:
+                       new = ftrace_nop_replace();
+                       break;
+               }
  
-  remove_breakpoints:
-       pr_warn("Failed on %s (%d):\n", report, count);
-       ftrace_bug(ret, rec);
-       for_ftrace_rec_iter(iter) {
-               rec = ftrace_rec_iter_record(iter);
-               /*
-                * Breakpoints are handled only when this function is in
-                * progress. The system could not work with them.
-                */
-               if (remove_breakpoint(rec))
-                       BUG();
+               text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
+               ftrace_update_record(rec, enable);
        }
-       run_sync();
- }
- static int
- ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
-                  unsigned const char *new_code)
- {
-       int ret;
-       ret = add_break(ip, old_code);
-       if (ret)
-               goto out;
-       run_sync();
-       ret = add_update_code(ip, new_code);
-       if (ret)
-               goto fail_update;
-       run_sync();
-       ret = ftrace_write(ip, new_code, 1);
-       /*
-        * The breakpoint is handled only when this function is in progress.
-        * The system could not work if we could not remove it.
-        */
-       BUG_ON(ret);
-  out:
-       run_sync();
-       return ret;
-  fail_update:
-       /* Also here the system could not work with the breakpoint */
-       if (ftrace_write(ip, old_code, 1))
-               BUG();
-       goto out;
+       text_poke_finish();
  }
  
  void arch_ftrace_update_code(int command)
  {
-       /* See comment above by declaration of modifying_ftrace_code */
-       atomic_inc(&modifying_ftrace_code);
        ftrace_modify_all_code(command);
-       atomic_dec(&modifying_ftrace_code);
  }
  
  int __init ftrace_dyn_arch_init(void)
@@@ -748,6 -313,7 +314,7 @@@ create_trampoline(struct ftrace_ops *op
        unsigned long start_offset;
        unsigned long end_offset;
        unsigned long op_offset;
+       unsigned long call_offset;
        unsigned long offset;
        unsigned long npages;
        unsigned long size;
                start_offset = (unsigned long)ftrace_regs_caller;
                end_offset = (unsigned long)ftrace_regs_caller_end;
                op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
+               call_offset = (unsigned long)ftrace_regs_call;
        } else {
                start_offset = (unsigned long)ftrace_caller;
                end_offset = (unsigned long)ftrace_epilogue;
                op_offset = (unsigned long)ftrace_caller_op_ptr;
+               call_offset = (unsigned long)ftrace_call;
        }
  
        size = end_offset - start_offset;
        /* put in the new offset to the ftrace_ops */
        memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
  
+       /* put in the call to the function */
+       mutex_lock(&text_mutex);
+       call_offset -= start_offset;
+       memcpy(trampoline + call_offset,
+              text_gen_insn(CALL_INSN_OPCODE,
+                            trampoline + call_offset,
+                            ftrace_ops_get_func(ops)), CALL_INSN_SIZE);
+       mutex_unlock(&text_mutex);
        /* ALLOC_TRAMP flags lets us know we created it */
        ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
  
        set_vm_flush_reset_perms(trampoline);
  
-       /*
-        * Module allocation needs to be completed by making the page
-        * executable. The page is still writable, which is a security hazard,
-        * but anyhow ftrace breaks W^X completely.
-        */
+       set_memory_ro((unsigned long)trampoline, npages);
        set_memory_x((unsigned long)trampoline, npages);
        return (unsigned long)trampoline;
  fail:
@@@ -860,62 -433,54 +434,54 @@@ static unsigned long calc_trampoline_ca
  void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
  {
        ftrace_func_t func;
-       unsigned char *new;
        unsigned long offset;
        unsigned long ip;
        unsigned int size;
-       int ret, npages;
+       const char *new;
  
-       if (ops->trampoline) {
-               /*
-                * The ftrace_ops caller may set up its own trampoline.
-                * In such a case, this code must not modify it.
-                */
-               if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
-                       return;
-               npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT;
-               set_memory_rw(ops->trampoline, npages);
-       } else {
+       if (!ops->trampoline) {
                ops->trampoline = create_trampoline(ops, &size);
                if (!ops->trampoline)
                        return;
                ops->trampoline_size = size;
-               npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+               return;
        }
  
+       /*
+        * The ftrace_ops caller may set up its own trampoline.
+        * In such a case, this code must not modify it.
+        */
+       if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
+               return;
        offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
        ip = ops->trampoline + offset;
        func = ftrace_ops_get_func(ops);
  
-       ftrace_update_func_call = (unsigned long)func;
+       mutex_lock(&text_mutex);
        /* Do a safe modify in case the trampoline is executing */
        new = ftrace_call_replace(ip, (unsigned long)func);
-       ret = update_ftrace_func(ip, new);
-       set_memory_ro(ops->trampoline, npages);
-       /* The update should never fail */
-       WARN_ON(ret);
+       text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
+       mutex_unlock(&text_mutex);
  }
  
  /* Return the address of the function the trampoline calls */
  static void *addr_from_call(void *ptr)
  {
-       union ftrace_code_union calc;
+       union text_poke_insn call;
        int ret;
  
-       ret = probe_kernel_read(&calc, ptr, MCOUNT_INSN_SIZE);
+       ret = probe_kernel_read(&call, ptr, CALL_INSN_SIZE);
        if (WARN_ON_ONCE(ret < 0))
                return NULL;
  
        /* Make sure this is a call */
-       if (WARN_ON_ONCE(calc.op != 0xe8)) {
-               pr_warn("Expected e8, got %x\n", calc.op);
+       if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
+               pr_warn("Expected E8, got %x\n", call.opcode);
                return NULL;
        }
  
-       return ptr + MCOUNT_INSN_SIZE + calc.offset;
+       return ptr + CALL_INSN_SIZE + call.disp;
  }
  
  void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
@@@ -982,19 -547,18 +548,18 @@@ void arch_ftrace_trampoline_free(struc
  #ifdef CONFIG_DYNAMIC_FTRACE
  extern void ftrace_graph_call(void);
  
- static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
+ static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
  {
-       return ftrace_text_replace(0xe9, ip, addr);
+       return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
  }
  
  static int ftrace_mod_jmp(unsigned long ip, void *func)
  {
-       unsigned char *new;
+       const char *new;
  
-       ftrace_update_func_call = 0UL;
        new = ftrace_jmp_replace(ip, (unsigned long)func);
-       return update_ftrace_func(ip, new);
+       text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
+       return 0;
  }
  
  int ftrace_enable_ftrace_graph_caller(void)
@@@ -1020,10 -584,9 +585,9 @@@ int ftrace_disable_ftrace_graph_caller(
  void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
                           unsigned long frame_pointer)
  {
+       unsigned long return_hooker = (unsigned long)&return_to_handler;
        unsigned long old;
        int faulted;
-       unsigned long return_hooker = (unsigned long)
-                               &return_to_handler;
  
        /*
         * When resuming from suspend-to-ram, this function can be indirectly
index a0c223ab72644dac043d96daf3af528996c1ee56,579d30e91a36441420e8db2fad850117ff8e15b6..4d7022a740ab0b3f8dd6e79644e5b4e1080642ff
@@@ -40,7 -40,6 +40,7 @@@
  #include <linux/frame.h>
  #include <linux/kasan.h>
  #include <linux/moduleloader.h>
 +#include <linux/vmalloc.h>
  
  #include <asm/text-patching.h>
  #include <asm/cacheflush.h>
@@@ -120,14 -119,14 +120,14 @@@ __synthesize_relative_insn(void *dest, 
  /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
  void synthesize_reljump(void *dest, void *from, void *to)
  {
-       __synthesize_relative_insn(dest, from, to, RELATIVEJUMP_OPCODE);
+       __synthesize_relative_insn(dest, from, to, JMP32_INSN_OPCODE);
  }
  NOKPROBE_SYMBOL(synthesize_reljump);
  
  /* Insert a call instruction at address 'from', which calls address 'to'.*/
  void synthesize_relcall(void *dest, void *from, void *to)
  {
-       __synthesize_relative_insn(dest, from, to, RELATIVECALL_OPCODE);
+       __synthesize_relative_insn(dest, from, to, CALL_INSN_OPCODE);
  }
  NOKPROBE_SYMBOL(synthesize_relcall);
  
@@@ -302,7 -301,7 +302,7 @@@ static int can_probe(unsigned long padd
                 * Another debugging subsystem might insert this breakpoint.
                 * In that case, we can't recover it.
                 */
-               if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+               if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
                        return 0;
                addr += insn.length;
        }
@@@ -357,7 -356,7 +357,7 @@@ int __copy_instruction(u8 *dest, u8 *sr
                return 0;
  
        /* Another subsystem puts a breakpoint, failed to recover */
-       if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+       if (insn->opcode.bytes[0] == INT3_INSN_OPCODE)
                return 0;
  
        /* We should not singlestep on the exception masking instructions */
@@@ -401,14 -400,14 +401,14 @@@ static int prepare_boost(kprobe_opcode_
        int len = insn->length;
  
        if (can_boost(insn, p->addr) &&
-           MAX_INSN_SIZE - len >= RELATIVEJUMP_SIZE) {
+           MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) {
                /*
                 * These instructions can be executed directly if it
                 * jumps back to correct address.
                 */
                synthesize_reljump(buf + len, p->ainsn.insn + len,
                                   p->addr + insn->length);
-               len += RELATIVEJUMP_SIZE;
+               len += JMP32_INSN_SIZE;
                p->ainsn.boostable = true;
        } else {
                p->ainsn.boostable = false;
@@@ -502,12 -501,14 +502,14 @@@ int arch_prepare_kprobe(struct kprobe *
  
  void arch_arm_kprobe(struct kprobe *p)
  {
-       text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
+       text_poke(p->addr, ((unsigned char []){INT3_INSN_OPCODE}), 1);
+       text_poke_sync();
  }
  
  void arch_disarm_kprobe(struct kprobe *p)
  {
        text_poke(p->addr, &p->opcode, 1);
+       text_poke_sync();
  }
  
  void arch_remove_kprobe(struct kprobe *p)
@@@ -610,7 -611,7 +612,7 @@@ static void setup_singlestep(struct kpr
        regs->flags |= X86_EFLAGS_TF;
        regs->flags &= ~X86_EFLAGS_IF;
        /* single step inline if the instruction is an int3 */
-       if (p->opcode == BREAKPOINT_INSTRUCTION)
+       if (p->opcode == INT3_INSN_OPCODE)
                regs->ip = (unsigned long)p->addr;
        else
                regs->ip = (unsigned long)p->ainsn.insn;
@@@ -696,7 -697,7 +698,7 @@@ int kprobe_int3_handler(struct pt_regs 
                                reset_current_kprobe();
                        return 1;
                }
-       } else if (*addr != BREAKPOINT_INSTRUCTION) {
+       } else if (*addr != INT3_INSN_OPCODE) {
                /*
                 * The breakpoint instruction was removed right
                 * after we hit it.  Another cpu has removed
diff --combined arch/x86/mm/init_32.c
index 8d29ae8d3eb7e0206cdc84bfb90ec1836da8f579,4d8672f9a208b9fca41d3f553ae5ec6703ba1428..23df4885bbede4a72763e95b5dbab74bcfb1c3a5
@@@ -52,7 -52,6 +52,7 @@@
  #include <asm/page_types.h>
  #include <asm/cpu_entry_area.h>
  #include <asm/init.h>
 +#include <asm/pgtable_areas.h>
  
  #include "mm_internal.h"
  
@@@ -873,34 -872,6 +873,6 @@@ void arch_remove_memory(int nid, u64 st
  
  int kernel_set_to_readonly __read_mostly;
  
- void set_kernel_text_rw(void)
- {
-       unsigned long start = PFN_ALIGN(_text);
-       unsigned long size = PFN_ALIGN(_etext) - start;
-       if (!kernel_set_to_readonly)
-               return;
-       pr_debug("Set kernel text: %lx - %lx for read write\n",
-                start, start+size);
-       set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
- }
- void set_kernel_text_ro(void)
- {
-       unsigned long start = PFN_ALIGN(_text);
-       unsigned long size = PFN_ALIGN(_etext) - start;
-       if (!kernel_set_to_readonly)
-               return;
-       pr_debug("Set kernel text: %lx - %lx for read only\n",
-                start, start+size);
-       set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
- }
  static void mark_nxdata_nx(void)
  {
        /*
index 6ac35b9e195de80491b27a82c7c19a9d5c01b20f,ce376c78439f4cff4b82a62145a873ecd4875668..f2896d13001b81301672379160a512b9d6022263
@@@ -116,7 -116,6 +116,7 @@@ struct hist_field 
        struct ftrace_event_field       *field;
        unsigned long                   flags;
        hist_field_fn_t                 fn;
 +      unsigned int                    ref;
        unsigned int                    size;
        unsigned int                    offset;
        unsigned int                    is_signed;
@@@ -1155,6 -1154,12 +1155,12 @@@ static struct synth_event *find_synth_e
        return NULL;
  }
  
+ static struct trace_event_fields synth_event_fields_array[] = {
+       { .type = TRACE_FUNCTION_TYPE,
+         .define_fields = synth_event_define_fields },
+       {}
+ };
  static int register_synth_event(struct synth_event *event)
  {
        struct trace_event_call *call = &event->call;
  
        INIT_LIST_HEAD(&call->class->fields);
        call->event.funcs = &synth_event_funcs;
-       call->class->define_fields = synth_event_define_fields;
+       call->class->fields_array = synth_event_fields_array;
  
        ret = register_trace_event(&call->event);
        if (!ret) {
@@@ -1767,13 -1772,11 +1773,13 @@@ static struct hist_field *find_var(stru
        struct event_trigger_data *test;
        struct hist_field *hist_field;
  
 +      lockdep_assert_held(&event_mutex);
 +
        hist_field = find_var_field(hist_data, var_name);
        if (hist_field)
                return hist_field;
  
 -      list_for_each_entry_rcu(test, &file->triggers, list) {
 +      list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        test_data = test->private_data;
                        hist_field = find_var_field(test_data, var_name);
@@@ -1823,9 -1826,7 +1829,9 @@@ static struct hist_field *find_file_var
        struct event_trigger_data *test;
        struct hist_field *hist_field;
  
 -      list_for_each_entry_rcu(test, &file->triggers, list) {
 +      lockdep_assert_held(&event_mutex);
 +
 +      list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        test_data = test->private_data;
                        hist_field = find_var_field(test_data, var_name);
@@@ -2428,16 -2429,8 +2434,16 @@@ static int contains_operator(char *str
        return field_op;
  }
  
 +static void get_hist_field(struct hist_field *hist_field)
 +{
 +      hist_field->ref++;
 +}
 +
  static void __destroy_hist_field(struct hist_field *hist_field)
  {
 +      if (--hist_field->ref > 1)
 +              return;
 +
        kfree(hist_field->var.name);
        kfree(hist_field->name);
        kfree(hist_field->type);
@@@ -2479,8 -2472,6 +2485,8 @@@ static struct hist_field *create_hist_f
        if (!hist_field)
                return NULL;
  
 +      hist_field->ref = 1;
 +
        hist_field->hist_data = hist_data;
  
        if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
@@@ -2676,17 -2667,6 +2682,17 @@@ static struct hist_field *create_var_re
  {
        unsigned long flags = HIST_FIELD_FL_VAR_REF;
        struct hist_field *ref_field;
 +      int i;
 +
 +      /* Check if the variable already exists */
 +      for (i = 0; i < hist_data->n_var_refs; i++) {
 +              ref_field = hist_data->var_refs[i];
 +              if (ref_field->var.idx == var_field->var.idx &&
 +                  ref_field->var.hist_data == var_field->hist_data) {
 +                      get_hist_field(ref_field);
 +                      return ref_field;
 +              }
 +      }
  
        ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
        if (ref_field) {
@@@ -3141,9 -3121,7 +3147,9 @@@ static char *find_trigger_filter(struc
  {
        struct event_trigger_data *test;
  
 -      list_for_each_entry_rcu(test, &file->triggers, list) {
 +      lockdep_assert_held(&event_mutex);
 +
 +      list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (test->private_data == hist_data)
                                return test->filter_str;
@@@ -3194,11 -3172,9 +3200,11 @@@ find_compatible_hist(struct hist_trigge
        struct event_trigger_data *test;
        unsigned int n_keys;
  
 +      lockdep_assert_held(&event_mutex);
 +
        n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
  
 -      list_for_each_entry_rcu(test, &file->triggers, list) {
 +      list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        hist_data = test->private_data;
  
@@@ -5558,7 -5534,7 +5564,7 @@@ static int hist_show(struct seq_file *m
                goto out_unlock;
        }
  
 -      list_for_each_entry_rcu(data, &event_file->triggers, list) {
 +      list_for_each_entry(data, &event_file->triggers, list) {
                if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
                        hist_trigger_show(m, data, n++);
        }
@@@ -5951,9 -5927,7 +5957,9 @@@ static int hist_register_trigger(char *
        if (hist_data->attrs->name && !named_data)
                goto new;
  
 -      list_for_each_entry_rcu(test, &file->triggers, list) {
 +      lockdep_assert_held(&event_mutex);
 +
 +      list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (!hist_trigger_match(data, test, named_data, false))
                                continue;
@@@ -6037,12 -6011,10 +6043,12 @@@ static bool have_hist_trigger_match(str
        struct event_trigger_data *test, *named_data = NULL;
        bool match = false;
  
 +      lockdep_assert_held(&event_mutex);
 +
        if (hist_data->attrs->name)
                named_data = find_named_trigger(hist_data->attrs->name);
  
 -      list_for_each_entry_rcu(test, &file->triggers, list) {
 +      list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (hist_trigger_match(data, test, named_data, false)) {
                                match = true;
@@@ -6060,12 -6032,10 +6066,12 @@@ static bool hist_trigger_check_refs(str
        struct hist_trigger_data *hist_data = data->private_data;
        struct event_trigger_data *test, *named_data = NULL;
  
 +      lockdep_assert_held(&event_mutex);
 +
        if (hist_data->attrs->name)
                named_data = find_named_trigger(hist_data->attrs->name);
  
 -      list_for_each_entry_rcu(test, &file->triggers, list) {
 +      list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (!hist_trigger_match(data, test, named_data, false))
                                continue;
@@@ -6087,12 -6057,10 +6093,12 @@@ static void hist_unregister_trigger(cha
        struct event_trigger_data *test, *named_data = NULL;
        bool unregistered = false;
  
 +      lockdep_assert_held(&event_mutex);
 +
        if (hist_data->attrs->name)
                named_data = find_named_trigger(hist_data->attrs->name);
  
 -      list_for_each_entry_rcu(test, &file->triggers, list) {
 +      list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (!hist_trigger_match(data, test, named_data, false))
                                continue;
@@@ -6118,9 -6086,7 +6124,9 @@@ static bool hist_file_check_refs(struc
        struct hist_trigger_data *hist_data;
        struct event_trigger_data *test;
  
 -      list_for_each_entry_rcu(test, &file->triggers, list) {
 +      lockdep_assert_held(&event_mutex);
 +
 +      list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        hist_data = test->private_data;
                        if (check_var_refs(hist_data))
@@@ -6363,8 -6329,7 +6369,8 @@@ hist_enable_trigger(struct event_trigge
        struct enable_trigger_data *enable_data = data->private_data;
        struct event_trigger_data *test;
  
 -      list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
 +      list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
 +                              lockdep_is_held(&event_mutex)) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (enable_data->enable)
                                test->paused = false;
index 3f54dc2f6e1c6ec5726e6c510277ff8198333a9e,cbdc4f4e64c7511d17b1b692c84aecccf3646d17..aa515d578c5b19638f4122795ab1ee73c930cdb1
@@@ -290,7 -290,7 +290,7 @@@ static struct trace_kprobe *alloc_trace
        INIT_HLIST_NODE(&tk->rp.kp.hlist);
        INIT_LIST_HEAD(&tk->rp.kp.list);
  
 -      ret = trace_probe_init(&tk->tp, event, group);
 +      ret = trace_probe_init(&tk->tp, event, group, false);
        if (ret < 0)
                goto error;
  
@@@ -1555,16 -1555,28 +1555,28 @@@ static struct trace_event_functions kpr
        .trace          = print_kprobe_event
  };
  
+ static struct trace_event_fields kretprobe_fields_array[] = {
+       { .type = TRACE_FUNCTION_TYPE,
+         .define_fields = kretprobe_event_define_fields },
+       {}
+ };
+ static struct trace_event_fields kprobe_fields_array[] = {
+       { .type = TRACE_FUNCTION_TYPE,
+         .define_fields = kprobe_event_define_fields },
+       {}
+ };
  static inline void init_trace_event_call(struct trace_kprobe *tk)
  {
        struct trace_event_call *call = trace_probe_event_call(&tk->tp);
  
        if (trace_kprobe_is_return(tk)) {
                call->event.funcs = &kretprobe_funcs;
-               call->class->define_fields = kretprobe_event_define_fields;
+               call->class->fields_array = kretprobe_fields_array;
        } else {
                call->event.funcs = &kprobe_funcs;
-               call->class->define_fields = kprobe_event_define_fields;
+               call->class->fields_array = kprobe_fields_array;
        }
  
        call->flags = TRACE_EVENT_FL_KPROBE;
index 2619bc5ed520c5832ab6e5768088fdf6e33b16a4,476a382f1f1b011e4bfac5107f7fb7597dc4d4f1..7885ebd23d0c10d974abaf6e1a305e4b05974479
@@@ -34,6 -34,12 +34,6 @@@ struct uprobe_trace_entry_head 
  #define DATAOF_TRACE_ENTRY(entry, is_return)          \
        ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
  
 -struct trace_uprobe_filter {
 -      rwlock_t                rwlock;
 -      int                     nr_systemwide;
 -      struct list_head        perf_events;
 -};
 -
  static int trace_uprobe_create(int argc, const char **argv);
  static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
  static int trace_uprobe_release(struct dyn_event *ev);
@@@ -54,6 -60,7 +54,6 @@@ static struct dyn_event_operations trac
   */
  struct trace_uprobe {
        struct dyn_event                devent;
 -      struct trace_uprobe_filter      filter;
        struct uprobe_consumer          consumer;
        struct path                     path;
        struct inode                    *inode;
@@@ -344,7 -351,7 +344,7 @@@ alloc_trace_uprobe(const char *group, c
        if (!tu)
                return ERR_PTR(-ENOMEM);
  
 -      ret = trace_probe_init(&tu->tp, event, group);
 +      ret = trace_probe_init(&tu->tp, event, group, true);
        if (ret < 0)
                goto error;
  
        tu->consumer.handler = uprobe_dispatcher;
        if (is_ret)
                tu->consumer.ret_handler = uretprobe_dispatcher;
 -      init_trace_uprobe_filter(&tu->filter);
 +      init_trace_uprobe_filter(tu->tp.event->filter);
        return tu;
  
  error:
@@@ -1060,14 -1067,13 +1060,14 @@@ static void __probe_event_disable(struc
        struct trace_probe *pos;
        struct trace_uprobe *tu;
  
 +      tu = container_of(tp, struct trace_uprobe, tp);
 +      WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
 +
        list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
                tu = container_of(pos, struct trace_uprobe, tp);
                if (!tu->inode)
                        continue;
  
 -              WARN_ON(!uprobe_filter_is_empty(&tu->filter));
 -
                uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
                tu->inode = NULL;
        }
@@@ -1102,7 -1108,7 +1102,7 @@@ static int probe_event_enable(struct tr
        }
  
        tu = container_of(tp, struct trace_uprobe, tp);
 -      WARN_ON(!uprobe_filter_is_empty(&tu->filter));
 +      WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
  
        if (enabled)
                return 0;
@@@ -1199,39 -1205,39 +1199,39 @@@ __uprobe_perf_filter(struct trace_uprob
  }
  
  static inline bool
 -uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
 +trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
 +                        struct perf_event *event)
  {
 -      return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
 +      return __uprobe_perf_filter(filter, event->hw.target->mm);
  }
  
 -static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
 +static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
 +                                     struct perf_event *event)
  {
        bool done;
  
 -      write_lock(&tu->filter.rwlock);
 +      write_lock(&filter->rwlock);
        if (event->hw.target) {
                list_del(&event->hw.tp_list);
 -              done = tu->filter.nr_systemwide ||
 +              done = filter->nr_systemwide ||
                        (event->hw.target->flags & PF_EXITING) ||
 -                      uprobe_filter_event(tu, event);
 +                      trace_uprobe_filter_event(filter, event);
        } else {
 -              tu->filter.nr_systemwide--;
 -              done = tu->filter.nr_systemwide;
 +              filter->nr_systemwide--;
 +              done = filter->nr_systemwide;
        }
 -      write_unlock(&tu->filter.rwlock);
 +      write_unlock(&filter->rwlock);
  
 -      if (!done)
 -              return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
 -
 -      return 0;
 +      return done;
  }
  
 -static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
 +/* This returns true if the filter always covers target mm */
 +static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
 +                                  struct perf_event *event)
  {
        bool done;
 -      int err;
  
 -      write_lock(&tu->filter.rwlock);
 +      write_lock(&filter->rwlock);
        if (event->hw.target) {
                /*
                 * event->parent != NULL means copy_process(), we can avoid
                 * attr.enable_on_exec means that exec/mmap will install the
                 * breakpoints we need.
                 */
 -              done = tu->filter.nr_systemwide ||
 +              done = filter->nr_systemwide ||
                        event->parent || event->attr.enable_on_exec ||
 -                      uprobe_filter_event(tu, event);
 -              list_add(&event->hw.tp_list, &tu->filter.perf_events);
 +                      trace_uprobe_filter_event(filter, event);
 +              list_add(&event->hw.tp_list, &filter->perf_events);
        } else {
 -              done = tu->filter.nr_systemwide;
 -              tu->filter.nr_systemwide++;
 +              done = filter->nr_systemwide;
 +              filter->nr_systemwide++;
        }
 -      write_unlock(&tu->filter.rwlock);
 +      write_unlock(&filter->rwlock);
  
 -      err = 0;
 -      if (!done) {
 -              err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
 -              if (err)
 -                      uprobe_perf_close(tu, event);
 -      }
 -      return err;
 +      return done;
  }
  
 -static int uprobe_perf_multi_call(struct trace_event_call *call,
 -                                struct perf_event *event,
 -              int (*op)(struct trace_uprobe *tu, struct perf_event *event))
 +static int uprobe_perf_close(struct trace_event_call *call,
 +                           struct perf_event *event)
  {
        struct trace_probe *pos, *tp;
        struct trace_uprobe *tu;
        if (WARN_ON_ONCE(!tp))
                return -ENODEV;
  
 +      tu = container_of(tp, struct trace_uprobe, tp);
 +      if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
 +              return 0;
 +
        list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
                tu = container_of(pos, struct trace_uprobe, tp);
 -              ret = op(tu, event);
 +              ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
                if (ret)
                        break;
        }
  
        return ret;
  }
 +
 +static int uprobe_perf_open(struct trace_event_call *call,
 +                          struct perf_event *event)
 +{
 +      struct trace_probe *pos, *tp;
 +      struct trace_uprobe *tu;
 +      int err = 0;
 +
 +      tp = trace_probe_primary_from_call(call);
 +      if (WARN_ON_ONCE(!tp))
 +              return -ENODEV;
 +
 +      tu = container_of(tp, struct trace_uprobe, tp);
 +      if (trace_uprobe_filter_add(tu->tp.event->filter, event))
 +              return 0;
 +
 +      list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
 +              err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
 +              if (err) {
 +                      uprobe_perf_close(call, event);
 +                      break;
 +              }
 +      }
 +
 +      return err;
 +}
 +
  static bool uprobe_perf_filter(struct uprobe_consumer *uc,
                                enum uprobe_filter_ctx ctx, struct mm_struct *mm)
  {
 +      struct trace_uprobe_filter *filter;
        struct trace_uprobe *tu;
        int ret;
  
        tu = container_of(uc, struct trace_uprobe, consumer);
 -      read_lock(&tu->filter.rwlock);
 -      ret = __uprobe_perf_filter(&tu->filter, mm);
 -      read_unlock(&tu->filter.rwlock);
 +      filter = tu->tp.event->filter;
 +
 +      read_lock(&filter->rwlock);
 +      ret = __uprobe_perf_filter(filter, mm);
 +      read_unlock(&filter->rwlock);
  
        return ret;
  }
@@@ -1440,10 -1419,10 +1440,10 @@@ trace_uprobe_register(struct trace_even
                return 0;
  
        case TRACE_REG_PERF_OPEN:
 -              return uprobe_perf_multi_call(event, data, uprobe_perf_open);
 +              return uprobe_perf_open(event, data);
  
        case TRACE_REG_PERF_CLOSE:
 -              return uprobe_perf_multi_call(event, data, uprobe_perf_close);
 +              return uprobe_perf_close(event, data);
  
  #endif
        default:
@@@ -1528,12 -1507,17 +1528,17 @@@ static struct trace_event_functions upr
        .trace          = print_uprobe_event
  };
  
+ static struct trace_event_fields uprobe_fields_array[] = {
+       { .type = TRACE_FUNCTION_TYPE,
+         .define_fields = uprobe_event_define_fields },
+       {}
+ };
  static inline void init_trace_event_call(struct trace_uprobe *tu)
  {
        struct trace_event_call *call = trace_probe_event_call(&tu->tp);
        call->event.funcs = &uprobe_funcs;
-       call->class->define_fields = uprobe_event_define_fields;
+       call->class->fields_array = uprobe_fields_array;
  
        call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
        call->class->reg = trace_uprobe_register;
This page took 0.124745 seconds and 4 git commands to generate.