]> Git Repo - linux.git/commitdiff
kprobes/x86: Check for invalid ftrace location in __recover_probed_insn()
authorPetr Mladek <[email protected]>
Fri, 20 Feb 2015 14:07:30 +0000 (15:07 +0100)
committerIngo Molnar <[email protected]>
Sat, 21 Feb 2015 09:33:31 +0000 (10:33 +0100)
__recover_probed_insn() should always be called from an address
where an instructions starts. The check for ftrace_location()
might help to discover a potential inconsistency.

This patch adds WARN_ON() when the inconsistency is detected.
Also it adds handling of the situation when the original code
can not get recovered.

Suggested-by: Masami Hiramatsu <[email protected]>
Signed-off-by: Petr Mladek <[email protected]>
Cc: Ananth NMavinakayanahalli <[email protected]>
Cc: Anil S Keshavamurthy <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Jiri Kosina <[email protected]>
Cc: Steven Rostedt <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kprobes/opt.c

index c3b4b46b479778a2dd59b9b1773e7299fc51f3c1..4e3d5a9621fe0052fac43d5ad6c109b9d3f54447 100644 (file)
@@ -227,6 +227,13 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
 
        kp = get_kprobe((void *)addr);
        faddr = ftrace_location(addr);
+       /*
+        * Addresses inside the ftrace location are refused by
+        * arch_check_ftrace_location(). Something went terribly wrong
+        * if such an address is checked here.
+        */
+       if (WARN_ON(faddr && faddr != addr))
+               return 0UL;
        /*
         * Use the current code if it is not modified by Kprobe
         * and it cannot be modified by ftrace.
@@ -265,6 +272,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
  * Recover the probed instruction at addr for further analysis.
  * Caller must lock kprobes by kprobe_mutex, or disable preemption
  * for preventing to release referencing kprobes.
+ * Returns zero if the instruction can not get recovered.
  */
 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
 {
@@ -299,6 +307,8 @@ static int can_probe(unsigned long paddr)
                 * normally used, we just go through if there is no kprobe.
                 */
                __addr = recover_probed_instruction(buf, addr);
+               if (!__addr)
+                       return 0;
                kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
                insn_get_length(&insn);
 
@@ -347,6 +357,8 @@ int __copy_instruction(u8 *dest, u8 *src)
        unsigned long recovered_insn =
                recover_probed_instruction(buf, (unsigned long)src);
 
+       if (!recovered_insn)
+               return 0;
        kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
        insn_get_length(&insn);
        /* Another subsystem puts a breakpoint, failed to recover */
index 7c523bbf3dc8fc3f95acf0a374e9f6f1164c3f47..3aef248ec1ee1d0df58174a8af74eb986249d2e2 100644 (file)
@@ -259,6 +259,8 @@ static int can_optimize(unsigned long paddr)
                         */
                        return 0;
                recovered_insn = recover_probed_instruction(buf, addr);
+               if (!recovered_insn)
+                       return 0;
                kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
                insn_get_length(&insn);
                /* Another subsystem puts a breakpoint */
This page took 0.061953 seconds and 4 git commands to generate.