]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Kernel Probes (KProbes) | |
1da177e4 LT |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright (C) IBM Corporation, 2002, 2004 | |
19 | * | |
20 | * 2002-Oct Created by Vamsi Krishna S <[email protected]> Kernel | |
21 | * Probes initial implementation ( includes contributions from | |
22 | * Rusty Russell). | |
23 | * 2004-July Suparna Bhattacharya <[email protected]> added jumper probes | |
24 | * interface to access function arguments. | |
d6be29b8 MH |
25 | * 2004-Oct Jim Keniston <[email protected]> and Prasanna S Panchamukhi |
26 | * <[email protected]> adapted for x86_64 from i386. | |
1da177e4 LT |
27 | * 2005-Mar Roland McGrath <[email protected]> |
28 | * Fixed to handle %rip-relative addressing mode correctly. | |
d6be29b8 MH |
29 | * 2005-May Hien Nguyen <[email protected]>, Jim Keniston |
30 | * <[email protected]> and Prasanna S Panchamukhi | |
31 | * <[email protected]> added function-return probes. | |
32 | * 2005-May Rusty Lynch <[email protected]> | |
33 | * Added function return probes functionality | |
34 | * 2006-Feb Masami Hiramatsu <[email protected]> added | |
35 | * kprobe-booster and kretprobe-booster for i386. | |
da07ab03 MH |
36 | * 2007-Dec Masami Hiramatsu <[email protected]> added kprobe-booster |
37 | * and kretprobe-booster for x86-64 | |
d6be29b8 MH |
38 | * 2007-Dec Masami Hiramatsu <[email protected]>, Arjan van de Ven |
39 | * <[email protected]> and Jim Keniston <[email protected]> | |
40 | * unified x86 kprobes code. | |
1da177e4 LT |
41 | */ |
42 | ||
1da177e4 LT |
43 | #include <linux/kprobes.h> |
44 | #include <linux/ptrace.h> | |
1da177e4 LT |
45 | #include <linux/string.h> |
46 | #include <linux/slab.h> | |
b506a9d0 | 47 | #include <linux/hardirq.h> |
1da177e4 | 48 | #include <linux/preempt.h> |
c28f8966 | 49 | #include <linux/module.h> |
1eeb66a1 | 50 | #include <linux/kdebug.h> |
b46b3d70 | 51 | #include <linux/kallsyms.h> |
9ec4b1f3 | 52 | |
8533bbe9 MH |
53 | #include <asm/cacheflush.h> |
54 | #include <asm/desc.h> | |
1da177e4 | 55 | #include <asm/pgtable.h> |
c28f8966 | 56 | #include <asm/uaccess.h> |
19d36ccd | 57 | #include <asm/alternative.h> |
b46b3d70 | 58 | #include <asm/insn.h> |
62edab90 | 59 | #include <asm/debugreg.h> |
1da177e4 | 60 | |
1da177e4 LT |
61 | void jprobe_return_end(void); |
62 | ||
e7a510f9 AM |
63 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
64 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | |
1da177e4 | 65 | |
98272ed0 | 66 | #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs)) |
8533bbe9 MH |
67 | |
68 | #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ | |
69 | (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ | |
70 | (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ | |
71 | (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ | |
72 | (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ | |
73 | << (row % 32)) | |
74 | /* | |
75 | * Undefined/reserved opcodes, conditional jump, Opcode Extension | |
76 | * Groups, and some special opcodes can not boost. | |
77 | */ | |
78 | static const u32 twobyte_is_boostable[256 / 32] = { | |
79 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | |
80 | /* ---------------------------------------------- */ | |
81 | W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ | |
82 | W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */ | |
83 | W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ | |
84 | W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ | |
85 | W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ | |
86 | W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ | |
87 | W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */ | |
88 | W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ | |
89 | W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */ | |
90 | W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ | |
91 | W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */ | |
92 | W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */ | |
93 | W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ | |
94 | W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */ | |
95 | W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */ | |
96 | W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */ | |
97 | /* ----------------------------------------------- */ | |
98 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | |
99 | }; | |
8533bbe9 MH |
100 | #undef W |
101 | ||
f438d914 MH |
102 | struct kretprobe_blackpoint kretprobe_blacklist[] = { |
103 | {"__switch_to", }, /* This function switches only current task, but | |
104 | doesn't switch kernel stack.*/ | |
105 | {NULL, NULL} /* Terminator */ | |
106 | }; | |
107 | const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); | |
108 | ||
aa470140 | 109 | /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ |
e7b5e11e | 110 | static void __kprobes set_jmp_op(void *from, void *to) |
aa470140 MH |
111 | { |
112 | struct __arch_jmp_op { | |
113 | char op; | |
114 | s32 raddr; | |
115 | } __attribute__((packed)) * jop; | |
116 | jop = (struct __arch_jmp_op *)from; | |
117 | jop->raddr = (s32)((long)(to) - ((long)(from) + 5)); | |
118 | jop->op = RELATIVEJUMP_INSTRUCTION; | |
119 | } | |
120 | ||
9930927f HH |
121 | /* |
122 | * Check for the REX prefix which can only exist on X86_64 | |
123 | * X86_32 always returns 0 | |
124 | */ | |
125 | static int __kprobes is_REX_prefix(kprobe_opcode_t *insn) | |
126 | { | |
127 | #ifdef CONFIG_X86_64 | |
128 | if ((*insn & 0xf0) == 0x40) | |
129 | return 1; | |
130 | #endif | |
131 | return 0; | |
132 | } | |
133 | ||
aa470140 | 134 | /* |
d6be29b8 MH |
135 | * Returns non-zero if opcode is boostable. |
136 | * RIP relative instructions are adjusted at copying time in 64 bits mode | |
aa470140 | 137 | */ |
e7b5e11e | 138 | static int __kprobes can_boost(kprobe_opcode_t *opcodes) |
aa470140 | 139 | { |
aa470140 MH |
140 | kprobe_opcode_t opcode; |
141 | kprobe_opcode_t *orig_opcodes = opcodes; | |
142 | ||
cde5edbd | 143 | if (search_exception_tables((unsigned long)opcodes)) |
30390880 MH |
144 | return 0; /* Page fault may occur on this address. */ |
145 | ||
aa470140 MH |
146 | retry: |
147 | if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) | |
148 | return 0; | |
149 | opcode = *(opcodes++); | |
150 | ||
151 | /* 2nd-byte opcode */ | |
152 | if (opcode == 0x0f) { | |
153 | if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) | |
154 | return 0; | |
8533bbe9 MH |
155 | return test_bit(*opcodes, |
156 | (unsigned long *)twobyte_is_boostable); | |
aa470140 MH |
157 | } |
158 | ||
159 | switch (opcode & 0xf0) { | |
d6be29b8 | 160 | #ifdef CONFIG_X86_64 |
aa470140 MH |
161 | case 0x40: |
162 | goto retry; /* REX prefix is boostable */ | |
d6be29b8 | 163 | #endif |
aa470140 MH |
164 | case 0x60: |
165 | if (0x63 < opcode && opcode < 0x67) | |
166 | goto retry; /* prefixes */ | |
167 | /* can't boost Address-size override and bound */ | |
168 | return (opcode != 0x62 && opcode != 0x67); | |
169 | case 0x70: | |
170 | return 0; /* can't boost conditional jump */ | |
171 | case 0xc0: | |
172 | /* can't boost software-interruptions */ | |
173 | return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; | |
174 | case 0xd0: | |
175 | /* can boost AA* and XLAT */ | |
176 | return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); | |
177 | case 0xe0: | |
178 | /* can boost in/out and absolute jmps */ | |
179 | return ((opcode & 0x04) || opcode == 0xea); | |
180 | case 0xf0: | |
181 | if ((opcode & 0x0c) == 0 && opcode != 0xf1) | |
182 | goto retry; /* lock/rep(ne) prefix */ | |
183 | /* clear and set flags are boostable */ | |
184 | return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); | |
185 | default: | |
186 | /* segment override prefixes are boostable */ | |
187 | if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e) | |
188 | goto retry; /* prefixes */ | |
189 | /* CS override prefix and call are not boostable */ | |
190 | return (opcode != 0x2e && opcode != 0x9a); | |
191 | } | |
192 | } | |
193 | ||
b46b3d70 MH |
194 | /* Recover the probed instruction at addr for further analysis. */ |
195 | static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) | |
196 | { | |
197 | struct kprobe *kp; | |
198 | kp = get_kprobe((void *)addr); | |
199 | if (!kp) | |
200 | return -EINVAL; | |
201 | ||
202 | /* | |
203 | * Basically, kp->ainsn.insn has an original instruction. | |
204 | * However, RIP-relative instruction can not do single-stepping | |
205 | * at different place, fix_riprel() tweaks the displacement of | |
206 | * that instruction. In that case, we can't recover the instruction | |
207 | * from the kp->ainsn.insn. | |
208 | * | |
209 | * On the other hand, kp->opcode has a copy of the first byte of | |
210 | * the probed instruction, which is overwritten by int3. And | |
211 | * the instruction at kp->addr is not modified by kprobes except | |
212 | * for the first byte, we can recover the original instruction | |
213 | * from it and kp->opcode. | |
214 | */ | |
215 | memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | |
216 | buf[0] = kp->opcode; | |
217 | return 0; | |
218 | } | |
219 | ||
220 | /* Dummy buffers for kallsyms_lookup */ | |
221 | static char __dummy_buf[KSYM_NAME_LEN]; | |
222 | ||
223 | /* Check if paddr is at an instruction boundary */ | |
224 | static int __kprobes can_probe(unsigned long paddr) | |
225 | { | |
226 | int ret; | |
227 | unsigned long addr, offset = 0; | |
228 | struct insn insn; | |
229 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | |
230 | ||
231 | if (!kallsyms_lookup(paddr, NULL, &offset, NULL, __dummy_buf)) | |
232 | return 0; | |
233 | ||
234 | /* Decode instructions */ | |
235 | addr = paddr - offset; | |
236 | while (addr < paddr) { | |
237 | kernel_insn_init(&insn, (void *)addr); | |
238 | insn_get_opcode(&insn); | |
239 | ||
240 | /* | |
241 | * Check if the instruction has been modified by another | |
242 | * kprobe, in which case we replace the breakpoint by the | |
243 | * original instruction in our buffer. | |
244 | */ | |
245 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { | |
246 | ret = recover_probed_instruction(buf, addr); | |
247 | if (ret) | |
248 | /* | |
249 | * Another debugging subsystem might insert | |
250 | * this breakpoint. In that case, we can't | |
251 | * recover it. | |
252 | */ | |
253 | return 0; | |
254 | kernel_insn_init(&insn, buf); | |
255 | } | |
256 | insn_get_length(&insn); | |
257 | addr += insn.length; | |
258 | } | |
259 | ||
260 | return (addr == paddr); | |
261 | } | |
262 | ||
1da177e4 | 263 | /* |
d6be29b8 | 264 | * Returns non-zero if opcode modifies the interrupt flag. |
1da177e4 | 265 | */ |
8645419c | 266 | static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) |
1da177e4 LT |
267 | { |
268 | switch (*insn) { | |
269 | case 0xfa: /* cli */ | |
270 | case 0xfb: /* sti */ | |
271 | case 0xcf: /* iret/iretd */ | |
272 | case 0x9d: /* popf/popfd */ | |
273 | return 1; | |
274 | } | |
9930927f | 275 | |
8533bbe9 | 276 | /* |
9930927f | 277 | * on X86_64, 0x40-0x4f are REX prefixes so we need to look |
8533bbe9 MH |
278 | * at the next byte instead.. but of course not recurse infinitely |
279 | */ | |
9930927f | 280 | if (is_REX_prefix(insn)) |
8533bbe9 | 281 | return is_IF_modifier(++insn); |
9930927f | 282 | |
1da177e4 LT |
283 | return 0; |
284 | } | |
285 | ||
286 | /* | |
8533bbe9 MH |
287 | * Adjust the displacement if the instruction uses the %rip-relative |
288 | * addressing mode. | |
aa470140 | 289 | * If it does, Return the address of the 32-bit displacement word. |
1da177e4 | 290 | * If not, return null. |
31f80e45 | 291 | * Only applicable to 64-bit x86. |
1da177e4 | 292 | */ |
8533bbe9 | 293 | static void __kprobes fix_riprel(struct kprobe *p) |
1da177e4 | 294 | { |
31f80e45 | 295 | #ifdef CONFIG_X86_64 |
89ae465b MH |
296 | struct insn insn; |
297 | kernel_insn_init(&insn, p->ainsn.insn); | |
1da177e4 | 298 | |
89ae465b MH |
299 | if (insn_rip_relative(&insn)) { |
300 | s64 newdisp; | |
301 | u8 *disp; | |
302 | insn_get_displacement(&insn); | |
303 | /* | |
304 | * The copied instruction uses the %rip-relative addressing | |
305 | * mode. Adjust the displacement for the difference between | |
306 | * the original location of this instruction and the location | |
307 | * of the copy that will actually be run. The tricky bit here | |
308 | * is making sure that the sign extension happens correctly in | |
309 | * this calculation, since we need a signed 32-bit result to | |
310 | * be sign-extended to 64 bits when it's added to the %rip | |
311 | * value and yield the same 64-bit result that the sign- | |
312 | * extension of the original signed 32-bit displacement would | |
313 | * have given. | |
314 | */ | |
315 | newdisp = (u8 *) p->addr + (s64) insn.displacement.value - | |
316 | (u8 *) p->ainsn.insn; | |
317 | BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ | |
318 | disp = (u8 *) p->ainsn.insn + insn_offset_displacement(&insn); | |
319 | *(s32 *) disp = (s32) newdisp; | |
1da177e4 | 320 | } |
d6be29b8 | 321 | #endif |
31f80e45 | 322 | } |
1da177e4 | 323 | |
f709b122 | 324 | static void __kprobes arch_copy_kprobe(struct kprobe *p) |
1da177e4 | 325 | { |
8533bbe9 | 326 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); |
31f80e45 | 327 | |
8533bbe9 | 328 | fix_riprel(p); |
31f80e45 | 329 | |
8533bbe9 | 330 | if (can_boost(p->addr)) |
aa470140 | 331 | p->ainsn.boostable = 0; |
8533bbe9 | 332 | else |
aa470140 | 333 | p->ainsn.boostable = -1; |
8533bbe9 | 334 | |
7e1048b1 | 335 | p->opcode = *p->addr; |
1da177e4 LT |
336 | } |
337 | ||
8533bbe9 MH |
338 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
339 | { | |
b46b3d70 MH |
340 | if (!can_probe((unsigned long)p->addr)) |
341 | return -EILSEQ; | |
8533bbe9 MH |
342 | /* insn: must be on special executable page on x86. */ |
343 | p->ainsn.insn = get_insn_slot(); | |
344 | if (!p->ainsn.insn) | |
345 | return -ENOMEM; | |
346 | arch_copy_kprobe(p); | |
347 | return 0; | |
348 | } | |
349 | ||
0f2fbdcb | 350 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
1da177e4 | 351 | { |
19d36ccd | 352 | text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); |
1da177e4 LT |
353 | } |
354 | ||
0f2fbdcb | 355 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
1da177e4 | 356 | { |
19d36ccd | 357 | text_poke(p->addr, &p->opcode, 1); |
7e1048b1 RL |
358 | } |
359 | ||
0498b635 | 360 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
7e1048b1 | 361 | { |
12941560 MH |
362 | if (p->ainsn.insn) { |
363 | free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); | |
364 | p->ainsn.insn = NULL; | |
365 | } | |
1da177e4 LT |
366 | } |
367 | ||
3b60211c | 368 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) |
aa3d7e3d | 369 | { |
e7a510f9 AM |
370 | kcb->prev_kprobe.kp = kprobe_running(); |
371 | kcb->prev_kprobe.status = kcb->kprobe_status; | |
8533bbe9 MH |
372 | kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags; |
373 | kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; | |
aa3d7e3d PP |
374 | } |
375 | ||
3b60211c | 376 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
aa3d7e3d | 377 | { |
e7a510f9 AM |
378 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; |
379 | kcb->kprobe_status = kcb->prev_kprobe.status; | |
8533bbe9 MH |
380 | kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; |
381 | kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; | |
aa3d7e3d PP |
382 | } |
383 | ||
3b60211c | 384 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
e7a510f9 | 385 | struct kprobe_ctlblk *kcb) |
aa3d7e3d | 386 | { |
e7a510f9 | 387 | __get_cpu_var(current_kprobe) = p; |
8533bbe9 | 388 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags |
053de044 | 389 | = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); |
aa3d7e3d | 390 | if (is_IF_modifier(p->ainsn.insn)) |
053de044 | 391 | kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; |
aa3d7e3d PP |
392 | } |
393 | ||
e7b5e11e | 394 | static void __kprobes clear_btf(void) |
1ecc798c RM |
395 | { |
396 | if (test_thread_flag(TIF_DEBUGCTLMSR)) | |
5b0e5084 | 397 | update_debugctlmsr(0); |
1ecc798c RM |
398 | } |
399 | ||
e7b5e11e | 400 | static void __kprobes restore_btf(void) |
1ecc798c RM |
401 | { |
402 | if (test_thread_flag(TIF_DEBUGCTLMSR)) | |
5b0e5084 | 403 | update_debugctlmsr(current->thread.debugctlmsr); |
1ecc798c RM |
404 | } |
405 | ||
0f2fbdcb | 406 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
1da177e4 | 407 | { |
1ecc798c | 408 | clear_btf(); |
053de044 GOC |
409 | regs->flags |= X86_EFLAGS_TF; |
410 | regs->flags &= ~X86_EFLAGS_IF; | |
e7b5e11e | 411 | /* single step inline if the instruction is an int3 */ |
1da177e4 | 412 | if (p->opcode == BREAKPOINT_INSTRUCTION) |
65ea5b03 | 413 | regs->ip = (unsigned long)p->addr; |
1da177e4 | 414 | else |
65ea5b03 | 415 | regs->ip = (unsigned long)p->ainsn.insn; |
1da177e4 LT |
416 | } |
417 | ||
4c4308cb | 418 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, |
0f2fbdcb | 419 | struct pt_regs *regs) |
73649dab | 420 | { |
8533bbe9 | 421 | unsigned long *sara = stack_addr(regs); |
ba8af12f | 422 | |
4c4308cb | 423 | ri->ret_addr = (kprobe_opcode_t *) *sara; |
8533bbe9 | 424 | |
4c4308cb CH |
425 | /* Replace the return addr with trampoline addr */ |
426 | *sara = (unsigned long) &kretprobe_trampoline; | |
73649dab | 427 | } |
f315decb | 428 | |
f315decb AS |
429 | static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, |
430 | struct kprobe_ctlblk *kcb) | |
431 | { | |
5a4ccaf3 | 432 | #if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER) |
f315decb AS |
433 | if (p->ainsn.boostable == 1 && !p->post_handler) { |
434 | /* Boost up -- we can execute copied instructions directly */ | |
435 | reset_current_kprobe(); | |
436 | regs->ip = (unsigned long)p->ainsn.insn; | |
437 | preempt_enable_no_resched(); | |
438 | return; | |
439 | } | |
440 | #endif | |
441 | prepare_singlestep(p, regs); | |
442 | kcb->kprobe_status = KPROBE_HIT_SS; | |
443 | } | |
444 | ||
40102d4a HH |
445 | /* |
446 | * We have reentered the kprobe_handler(), since another probe was hit while | |
447 | * within the handler. We save the original kprobes variables and just single | |
448 | * step on the instruction of the new probe without calling any user handlers. | |
449 | */ | |
59e87cdc MH |
450 | static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, |
451 | struct kprobe_ctlblk *kcb) | |
40102d4a | 452 | { |
f315decb AS |
453 | switch (kcb->kprobe_status) { |
454 | case KPROBE_HIT_SSDONE: | |
f315decb | 455 | case KPROBE_HIT_ACTIVE: |
fb8830e7 AS |
456 | save_previous_kprobe(kcb); |
457 | set_current_kprobe(p, regs, kcb); | |
458 | kprobes_inc_nmissed_count(p); | |
459 | prepare_singlestep(p, regs); | |
460 | kcb->kprobe_status = KPROBE_REENTER; | |
f315decb AS |
461 | break; |
462 | case KPROBE_HIT_SS: | |
e9afe9e1 MH |
463 | /* A probe has been hit in the codepath leading up to, or just |
464 | * after, single-stepping of a probed instruction. This entire | |
465 | * codepath should strictly reside in .kprobes.text section. | |
466 | * Raise a BUG or we'll continue in an endless reentering loop | |
467 | * and eventually a stack overflow. | |
468 | */ | |
469 | printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", | |
470 | p->addr); | |
471 | dump_kprobe(p); | |
472 | BUG(); | |
f315decb AS |
473 | default: |
474 | /* impossible cases */ | |
475 | WARN_ON(1); | |
fb8830e7 | 476 | return 0; |
59e87cdc | 477 | } |
f315decb | 478 | |
59e87cdc | 479 | return 1; |
40102d4a | 480 | } |
73649dab | 481 | |
8533bbe9 MH |
482 | /* |
483 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they | |
af901ca1 | 484 | * remain disabled throughout this function. |
8533bbe9 MH |
485 | */ |
486 | static int __kprobes kprobe_handler(struct pt_regs *regs) | |
1da177e4 | 487 | { |
8533bbe9 | 488 | kprobe_opcode_t *addr; |
f315decb | 489 | struct kprobe *p; |
d217d545 AM |
490 | struct kprobe_ctlblk *kcb; |
491 | ||
8533bbe9 | 492 | addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); |
f315decb AS |
493 | if (*addr != BREAKPOINT_INSTRUCTION) { |
494 | /* | |
495 | * The breakpoint instruction was removed right | |
496 | * after we hit it. Another cpu has removed | |
497 | * either a probepoint or a debugger breakpoint | |
498 | * at this address. In either case, no further | |
499 | * handling of this interrupt is appropriate. | |
500 | * Back up over the (now missing) int3 and run | |
501 | * the original instruction. | |
502 | */ | |
503 | regs->ip = (unsigned long)addr; | |
504 | return 1; | |
505 | } | |
8533bbe9 | 506 | |
d217d545 AM |
507 | /* |
508 | * We don't want to be preempted for the entire | |
f315decb AS |
509 | * duration of kprobe processing. We conditionally |
510 | * re-enable preemption at the end of this function, | |
511 | * and also in reenter_kprobe() and setup_singlestep(). | |
d217d545 AM |
512 | */ |
513 | preempt_disable(); | |
1da177e4 | 514 | |
f315decb | 515 | kcb = get_kprobe_ctlblk(); |
b9760156 | 516 | p = get_kprobe(addr); |
f315decb | 517 | |
b9760156 | 518 | if (p) { |
b9760156 | 519 | if (kprobe_running()) { |
f315decb AS |
520 | if (reenter_kprobe(p, regs, kcb)) |
521 | return 1; | |
1da177e4 | 522 | } else { |
b9760156 HH |
523 | set_current_kprobe(p, regs, kcb); |
524 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | |
f315decb | 525 | |
1da177e4 | 526 | /* |
f315decb AS |
527 | * If we have no pre-handler or it returned 0, we |
528 | * continue with normal processing. If we have a | |
529 | * pre-handler and it returned non-zero, it prepped | |
530 | * for calling the break_handler below on re-entry | |
531 | * for jprobe processing, so get out doing nothing | |
532 | * more here. | |
1da177e4 | 533 | */ |
f315decb AS |
534 | if (!p->pre_handler || !p->pre_handler(p, regs)) |
535 | setup_singlestep(p, regs, kcb); | |
536 | return 1; | |
b9760156 | 537 | } |
f315decb AS |
538 | } else if (kprobe_running()) { |
539 | p = __get_cpu_var(current_kprobe); | |
540 | if (p->break_handler && p->break_handler(p, regs)) { | |
541 | setup_singlestep(p, regs, kcb); | |
542 | return 1; | |
1da177e4 | 543 | } |
f315decb | 544 | } /* else: not a kprobe fault; let the kernel handle it */ |
1da177e4 | 545 | |
d217d545 | 546 | preempt_enable_no_resched(); |
f315decb | 547 | return 0; |
1da177e4 LT |
548 | } |
549 | ||
73649dab | 550 | /* |
da07ab03 MH |
551 | * When a retprobed function returns, this code saves registers and |
552 | * calls trampoline_handler() runs, which calls the kretprobe's handler. | |
73649dab | 553 | */ |
f1452d42 | 554 | static void __used __kprobes kretprobe_trampoline_holder(void) |
1017579a | 555 | { |
d6be29b8 MH |
556 | asm volatile ( |
557 | ".global kretprobe_trampoline\n" | |
da07ab03 | 558 | "kretprobe_trampoline: \n" |
d6be29b8 | 559 | #ifdef CONFIG_X86_64 |
da07ab03 MH |
560 | /* We don't bother saving the ss register */ |
561 | " pushq %rsp\n" | |
562 | " pushfq\n" | |
563 | /* | |
564 | * Skip cs, ip, orig_ax. | |
565 | * trampoline_handler() will plug in these values | |
566 | */ | |
567 | " subq $24, %rsp\n" | |
568 | " pushq %rdi\n" | |
569 | " pushq %rsi\n" | |
570 | " pushq %rdx\n" | |
571 | " pushq %rcx\n" | |
572 | " pushq %rax\n" | |
573 | " pushq %r8\n" | |
574 | " pushq %r9\n" | |
575 | " pushq %r10\n" | |
576 | " pushq %r11\n" | |
577 | " pushq %rbx\n" | |
578 | " pushq %rbp\n" | |
579 | " pushq %r12\n" | |
580 | " pushq %r13\n" | |
581 | " pushq %r14\n" | |
582 | " pushq %r15\n" | |
583 | " movq %rsp, %rdi\n" | |
584 | " call trampoline_handler\n" | |
585 | /* Replace saved sp with true return address. */ | |
586 | " movq %rax, 152(%rsp)\n" | |
587 | " popq %r15\n" | |
588 | " popq %r14\n" | |
589 | " popq %r13\n" | |
590 | " popq %r12\n" | |
591 | " popq %rbp\n" | |
592 | " popq %rbx\n" | |
593 | " popq %r11\n" | |
594 | " popq %r10\n" | |
595 | " popq %r9\n" | |
596 | " popq %r8\n" | |
597 | " popq %rax\n" | |
598 | " popq %rcx\n" | |
599 | " popq %rdx\n" | |
600 | " popq %rsi\n" | |
601 | " popq %rdi\n" | |
602 | /* Skip orig_ax, ip, cs */ | |
603 | " addq $24, %rsp\n" | |
604 | " popfq\n" | |
d6be29b8 MH |
605 | #else |
606 | " pushf\n" | |
607 | /* | |
fee039a1 | 608 | * Skip cs, ip, orig_ax and gs. |
d6be29b8 MH |
609 | * trampoline_handler() will plug in these values |
610 | */ | |
fee039a1 | 611 | " subl $16, %esp\n" |
d6be29b8 | 612 | " pushl %fs\n" |
d6be29b8 | 613 | " pushl %es\n" |
fee039a1 | 614 | " pushl %ds\n" |
d6be29b8 MH |
615 | " pushl %eax\n" |
616 | " pushl %ebp\n" | |
617 | " pushl %edi\n" | |
618 | " pushl %esi\n" | |
619 | " pushl %edx\n" | |
620 | " pushl %ecx\n" | |
621 | " pushl %ebx\n" | |
622 | " movl %esp, %eax\n" | |
623 | " call trampoline_handler\n" | |
624 | /* Move flags to cs */ | |
fee039a1 MH |
625 | " movl 56(%esp), %edx\n" |
626 | " movl %edx, 52(%esp)\n" | |
d6be29b8 | 627 | /* Replace saved flags with true return address. */ |
fee039a1 | 628 | " movl %eax, 56(%esp)\n" |
d6be29b8 MH |
629 | " popl %ebx\n" |
630 | " popl %ecx\n" | |
631 | " popl %edx\n" | |
632 | " popl %esi\n" | |
633 | " popl %edi\n" | |
634 | " popl %ebp\n" | |
635 | " popl %eax\n" | |
fee039a1 MH |
636 | /* Skip ds, es, fs, gs, orig_ax and ip */ |
637 | " addl $24, %esp\n" | |
d6be29b8 MH |
638 | " popf\n" |
639 | #endif | |
da07ab03 | 640 | " ret\n"); |
1017579a | 641 | } |
73649dab RL |
642 | |
643 | /* | |
da07ab03 | 644 | * Called from kretprobe_trampoline |
73649dab | 645 | */ |
f1452d42 | 646 | static __used __kprobes void *trampoline_handler(struct pt_regs *regs) |
73649dab | 647 | { |
62c27be0 | 648 | struct kretprobe_instance *ri = NULL; |
99219a3f | 649 | struct hlist_head *head, empty_rp; |
62c27be0 | 650 | struct hlist_node *node, *tmp; |
991a51d8 | 651 | unsigned long flags, orig_ret_address = 0; |
d6be29b8 | 652 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; |
73649dab | 653 | |
99219a3f | 654 | INIT_HLIST_HEAD(&empty_rp); |
ef53d9c5 | 655 | kretprobe_hash_lock(current, &head, &flags); |
8533bbe9 | 656 | /* fixup registers */ |
d6be29b8 | 657 | #ifdef CONFIG_X86_64 |
da07ab03 | 658 | regs->cs = __KERNEL_CS; |
d6be29b8 MH |
659 | #else |
660 | regs->cs = __KERNEL_CS | get_kernel_rpl(); | |
fee039a1 | 661 | regs->gs = 0; |
d6be29b8 | 662 | #endif |
da07ab03 | 663 | regs->ip = trampoline_address; |
8533bbe9 | 664 | regs->orig_ax = ~0UL; |
73649dab | 665 | |
ba8af12f RL |
666 | /* |
667 | * It is possible to have multiple instances associated with a given | |
8533bbe9 | 668 | * task either because multiple functions in the call path have |
025dfdaf | 669 | * return probes installed on them, and/or more than one |
ba8af12f RL |
670 | * return probe was registered for a target function. |
671 | * | |
672 | * We can handle this because: | |
8533bbe9 | 673 | * - instances are always pushed into the head of the list |
ba8af12f | 674 | * - when multiple return probes are registered for the same |
8533bbe9 MH |
675 | * function, the (chronologically) first instance's ret_addr |
676 | * will be the real return address, and all the rest will | |
677 | * point to kretprobe_trampoline. | |
ba8af12f RL |
678 | */ |
679 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | |
62c27be0 | 680 | if (ri->task != current) |
ba8af12f | 681 | /* another task is sharing our hash bucket */ |
62c27be0 | 682 | continue; |
ba8af12f | 683 | |
da07ab03 MH |
684 | if (ri->rp && ri->rp->handler) { |
685 | __get_cpu_var(current_kprobe) = &ri->rp->kp; | |
686 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; | |
ba8af12f | 687 | ri->rp->handler(ri, regs); |
da07ab03 MH |
688 | __get_cpu_var(current_kprobe) = NULL; |
689 | } | |
ba8af12f RL |
690 | |
691 | orig_ret_address = (unsigned long)ri->ret_addr; | |
99219a3f | 692 | recycle_rp_inst(ri, &empty_rp); |
ba8af12f RL |
693 | |
694 | if (orig_ret_address != trampoline_address) | |
695 | /* | |
696 | * This is the real return address. Any other | |
697 | * instances associated with this task are for | |
698 | * other calls deeper on the call stack | |
699 | */ | |
700 | break; | |
73649dab | 701 | } |
ba8af12f | 702 | |
0f95b7fc | 703 | kretprobe_assert(ri, orig_ret_address, trampoline_address); |
ba8af12f | 704 | |
ef53d9c5 | 705 | kretprobe_hash_unlock(current, &flags); |
ba8af12f | 706 | |
99219a3f | 707 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { |
708 | hlist_del(&ri->hlist); | |
709 | kfree(ri); | |
710 | } | |
da07ab03 | 711 | return (void *)orig_ret_address; |
73649dab RL |
712 | } |
713 | ||
1da177e4 LT |
714 | /* |
715 | * Called after single-stepping. p->addr is the address of the | |
716 | * instruction whose first byte has been replaced by the "int 3" | |
717 | * instruction. To avoid the SMP problems that can occur when we | |
718 | * temporarily put back the original opcode to single-step, we | |
719 | * single-stepped a copy of the instruction. The address of this | |
720 | * copy is p->ainsn.insn. | |
721 | * | |
722 | * This function prepares to return from the post-single-step | |
723 | * interrupt. We have to fix up the stack as follows: | |
724 | * | |
725 | * 0) Except in the case of absolute or indirect jump or call instructions, | |
65ea5b03 | 726 | * the new ip is relative to the copied instruction. We need to make |
1da177e4 LT |
727 | * it relative to the original instruction. |
728 | * | |
729 | * 1) If the single-stepped instruction was pushfl, then the TF and IF | |
65ea5b03 | 730 | * flags are set in the just-pushed flags, and may need to be cleared. |
1da177e4 LT |
731 | * |
732 | * 2) If the single-stepped instruction was a call, the return address | |
733 | * that is atop the stack is the address following the copied instruction. | |
734 | * We need to make it the address following the original instruction. | |
aa470140 MH |
735 | * |
736 | * If this is the first time we've single-stepped the instruction at | |
737 | * this probepoint, and the instruction is boostable, boost it: add a | |
738 | * jump instruction after the copied instruction, that jumps to the next | |
739 | * instruction after the probepoint. | |
1da177e4 | 740 | */ |
e7a510f9 AM |
741 | static void __kprobes resume_execution(struct kprobe *p, |
742 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) | |
1da177e4 | 743 | { |
8533bbe9 MH |
744 | unsigned long *tos = stack_addr(regs); |
745 | unsigned long copy_ip = (unsigned long)p->ainsn.insn; | |
746 | unsigned long orig_ip = (unsigned long)p->addr; | |
1da177e4 LT |
747 | kprobe_opcode_t *insn = p->ainsn.insn; |
748 | ||
749 | /*skip the REX prefix*/ | |
9930927f | 750 | if (is_REX_prefix(insn)) |
1da177e4 LT |
751 | insn++; |
752 | ||
053de044 | 753 | regs->flags &= ~X86_EFLAGS_TF; |
1da177e4 | 754 | switch (*insn) { |
0b0122fa | 755 | case 0x9c: /* pushfl */ |
053de044 | 756 | *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF); |
8533bbe9 | 757 | *tos |= kcb->kprobe_old_flags; |
1da177e4 | 758 | break; |
0b0122fa MH |
759 | case 0xc2: /* iret/ret/lret */ |
760 | case 0xc3: | |
0b9e2cac | 761 | case 0xca: |
0b0122fa MH |
762 | case 0xcb: |
763 | case 0xcf: | |
764 | case 0xea: /* jmp absolute -- ip is correct */ | |
765 | /* ip is already adjusted, no more changes required */ | |
aa470140 | 766 | p->ainsn.boostable = 1; |
0b0122fa MH |
767 | goto no_change; |
768 | case 0xe8: /* call relative - Fix return addr */ | |
8533bbe9 | 769 | *tos = orig_ip + (*tos - copy_ip); |
1da177e4 | 770 | break; |
e7b5e11e | 771 | #ifdef CONFIG_X86_32 |
d6be29b8 MH |
772 | case 0x9a: /* call absolute -- same as call absolute, indirect */ |
773 | *tos = orig_ip + (*tos - copy_ip); | |
774 | goto no_change; | |
775 | #endif | |
1da177e4 | 776 | case 0xff: |
dc49e344 | 777 | if ((insn[1] & 0x30) == 0x10) { |
8533bbe9 MH |
778 | /* |
779 | * call absolute, indirect | |
780 | * Fix return addr; ip is correct. | |
781 | * But this is not boostable | |
782 | */ | |
783 | *tos = orig_ip + (*tos - copy_ip); | |
0b0122fa | 784 | goto no_change; |
8533bbe9 MH |
785 | } else if (((insn[1] & 0x31) == 0x20) || |
786 | ((insn[1] & 0x31) == 0x21)) { | |
787 | /* | |
788 | * jmp near and far, absolute indirect | |
789 | * ip is correct. And this is boostable | |
790 | */ | |
aa470140 | 791 | p->ainsn.boostable = 1; |
0b0122fa | 792 | goto no_change; |
1da177e4 | 793 | } |
1da177e4 LT |
794 | default: |
795 | break; | |
796 | } | |
797 | ||
aa470140 | 798 | if (p->ainsn.boostable == 0) { |
8533bbe9 MH |
799 | if ((regs->ip > copy_ip) && |
800 | (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) { | |
aa470140 MH |
801 | /* |
802 | * These instructions can be executed directly if it | |
803 | * jumps back to correct address. | |
804 | */ | |
805 | set_jmp_op((void *)regs->ip, | |
8533bbe9 | 806 | (void *)orig_ip + (regs->ip - copy_ip)); |
aa470140 MH |
807 | p->ainsn.boostable = 1; |
808 | } else { | |
809 | p->ainsn.boostable = -1; | |
810 | } | |
811 | } | |
812 | ||
8533bbe9 | 813 | regs->ip += orig_ip - copy_ip; |
65ea5b03 | 814 | |
0b0122fa | 815 | no_change: |
1ecc798c | 816 | restore_btf(); |
1da177e4 LT |
817 | } |
818 | ||
8533bbe9 MH |
819 | /* |
820 | * Interrupts are disabled on entry as trap1 is an interrupt gate and they | |
af901ca1 | 821 | * remain disabled throughout this function. |
8533bbe9 MH |
822 | */ |
823 | static int __kprobes post_kprobe_handler(struct pt_regs *regs) | |
1da177e4 | 824 | { |
e7a510f9 AM |
825 | struct kprobe *cur = kprobe_running(); |
826 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
827 | ||
828 | if (!cur) | |
1da177e4 LT |
829 | return 0; |
830 | ||
acb5b8a2 YL |
831 | resume_execution(cur, regs, kcb); |
832 | regs->flags |= kcb->kprobe_saved_flags; | |
acb5b8a2 | 833 | |
e7a510f9 AM |
834 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
835 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | |
836 | cur->post_handler(cur, regs, 0); | |
aa3d7e3d | 837 | } |
1da177e4 | 838 | |
8533bbe9 | 839 | /* Restore back the original saved kprobes variables and continue. */ |
e7a510f9 AM |
840 | if (kcb->kprobe_status == KPROBE_REENTER) { |
841 | restore_previous_kprobe(kcb); | |
aa3d7e3d | 842 | goto out; |
aa3d7e3d | 843 | } |
e7a510f9 | 844 | reset_current_kprobe(); |
aa3d7e3d | 845 | out: |
1da177e4 LT |
846 | preempt_enable_no_resched(); |
847 | ||
848 | /* | |
65ea5b03 | 849 | * if somebody else is singlestepping across a probe point, flags |
1da177e4 LT |
850 | * will have TF set, in which case, continue the remaining processing |
851 | * of do_debug, as if this is not a probe hit. | |
852 | */ | |
053de044 | 853 | if (regs->flags & X86_EFLAGS_TF) |
1da177e4 LT |
854 | return 0; |
855 | ||
856 | return 1; | |
857 | } | |
858 | ||
0f2fbdcb | 859 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
1da177e4 | 860 | { |
e7a510f9 AM |
861 | struct kprobe *cur = kprobe_running(); |
862 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
863 | ||
d6be29b8 | 864 | switch (kcb->kprobe_status) { |
c28f8966 PP |
865 | case KPROBE_HIT_SS: |
866 | case KPROBE_REENTER: | |
867 | /* | |
868 | * We are here because the instruction being single | |
869 | * stepped caused a page fault. We reset the current | |
65ea5b03 | 870 | * kprobe and the ip points back to the probe address |
c28f8966 PP |
871 | * and allow the page fault handler to continue as a |
872 | * normal page fault. | |
873 | */ | |
65ea5b03 | 874 | regs->ip = (unsigned long)cur->addr; |
8533bbe9 | 875 | regs->flags |= kcb->kprobe_old_flags; |
c28f8966 PP |
876 | if (kcb->kprobe_status == KPROBE_REENTER) |
877 | restore_previous_kprobe(kcb); | |
878 | else | |
879 | reset_current_kprobe(); | |
1da177e4 | 880 | preempt_enable_no_resched(); |
c28f8966 PP |
881 | break; |
882 | case KPROBE_HIT_ACTIVE: | |
883 | case KPROBE_HIT_SSDONE: | |
884 | /* | |
885 | * We increment the nmissed count for accounting, | |
8533bbe9 | 886 | * we can also use npre/npostfault count for accounting |
c28f8966 PP |
887 | * these specific fault cases. |
888 | */ | |
889 | kprobes_inc_nmissed_count(cur); | |
890 | ||
891 | /* | |
892 | * We come here because instructions in the pre/post | |
893 | * handler caused the page_fault, this could happen | |
894 | * if handler tries to access user space by | |
895 | * copy_from_user(), get_user() etc. Let the | |
896 | * user-specified handler try to fix it first. | |
897 | */ | |
898 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | |
899 | return 1; | |
900 | ||
901 | /* | |
902 | * In case the user-specified fault handler returned | |
903 | * zero, try to fix up. | |
904 | */ | |
d6be29b8 MH |
905 | if (fixup_exception(regs)) |
906 | return 1; | |
6d48583b | 907 | |
c28f8966 | 908 | /* |
8533bbe9 | 909 | * fixup routine could not handle it, |
c28f8966 PP |
910 | * Let do_page_fault() fix it. |
911 | */ | |
912 | break; | |
913 | default: | |
914 | break; | |
1da177e4 LT |
915 | } |
916 | return 0; | |
917 | } | |
918 | ||
919 | /* | |
920 | * Wrapper routine for handling exceptions. | |
921 | */ | |
0f2fbdcb PP |
922 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, |
923 | unsigned long val, void *data) | |
1da177e4 | 924 | { |
ade1af77 | 925 | struct die_args *args = data; |
66ff2d06 AM |
926 | int ret = NOTIFY_DONE; |
927 | ||
8533bbe9 | 928 | if (args->regs && user_mode_vm(args->regs)) |
2326c770 | 929 | return ret; |
930 | ||
1da177e4 LT |
931 | switch (val) { |
932 | case DIE_INT3: | |
933 | if (kprobe_handler(args->regs)) | |
66ff2d06 | 934 | ret = NOTIFY_STOP; |
1da177e4 LT |
935 | break; |
936 | case DIE_DEBUG: | |
62edab90 P |
937 | if (post_kprobe_handler(args->regs)) { |
938 | /* | |
939 | * Reset the BS bit in dr6 (pointed by args->err) to | |
940 | * denote completion of processing | |
941 | */ | |
942 | (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; | |
66ff2d06 | 943 | ret = NOTIFY_STOP; |
62edab90 | 944 | } |
1da177e4 LT |
945 | break; |
946 | case DIE_GPF: | |
b506a9d0 QB |
947 | /* |
948 | * To be potentially processing a kprobe fault and to | |
949 | * trust the result from kprobe_running(), we have | |
950 | * be non-preemptible. | |
951 | */ | |
952 | if (!preemptible() && kprobe_running() && | |
1da177e4 | 953 | kprobe_fault_handler(args->regs, args->trapnr)) |
66ff2d06 | 954 | ret = NOTIFY_STOP; |
1da177e4 LT |
955 | break; |
956 | default: | |
957 | break; | |
958 | } | |
66ff2d06 | 959 | return ret; |
1da177e4 LT |
960 | } |
961 | ||
0f2fbdcb | 962 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
1da177e4 LT |
963 | { |
964 | struct jprobe *jp = container_of(p, struct jprobe, kp); | |
965 | unsigned long addr; | |
e7a510f9 | 966 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
1da177e4 | 967 | |
e7a510f9 | 968 | kcb->jprobe_saved_regs = *regs; |
8533bbe9 MH |
969 | kcb->jprobe_saved_sp = stack_addr(regs); |
970 | addr = (unsigned long)(kcb->jprobe_saved_sp); | |
971 | ||
1da177e4 LT |
972 | /* |
973 | * As Linus pointed out, gcc assumes that the callee | |
974 | * owns the argument space and could overwrite it, e.g. | |
975 | * tailcall optimization. So, to be absolutely safe | |
976 | * we also save and restore enough stack bytes to cover | |
977 | * the argument area. | |
978 | */ | |
e7a510f9 | 979 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, |
d6be29b8 | 980 | MIN_STACK_SIZE(addr)); |
053de044 | 981 | regs->flags &= ~X86_EFLAGS_IF; |
58dfe883 | 982 | trace_hardirqs_off(); |
65ea5b03 | 983 | regs->ip = (unsigned long)(jp->entry); |
1da177e4 LT |
984 | return 1; |
985 | } | |
986 | ||
0f2fbdcb | 987 | void __kprobes jprobe_return(void) |
1da177e4 | 988 | { |
e7a510f9 AM |
989 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
990 | ||
d6be29b8 MH |
991 | asm volatile ( |
992 | #ifdef CONFIG_X86_64 | |
993 | " xchg %%rbx,%%rsp \n" | |
994 | #else | |
995 | " xchgl %%ebx,%%esp \n" | |
996 | #endif | |
997 | " int3 \n" | |
998 | " .globl jprobe_return_end\n" | |
999 | " jprobe_return_end: \n" | |
1000 | " nop \n"::"b" | |
1001 | (kcb->jprobe_saved_sp):"memory"); | |
1da177e4 LT |
1002 | } |
1003 | ||
0f2fbdcb | 1004 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
1da177e4 | 1005 | { |
e7a510f9 | 1006 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
65ea5b03 | 1007 | u8 *addr = (u8 *) (regs->ip - 1); |
1da177e4 LT |
1008 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
1009 | ||
d6be29b8 MH |
1010 | if ((addr > (u8 *) jprobe_return) && |
1011 | (addr < (u8 *) jprobe_return_end)) { | |
8533bbe9 | 1012 | if (stack_addr(regs) != kcb->jprobe_saved_sp) { |
29b6cd79 | 1013 | struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; |
d6be29b8 MH |
1014 | printk(KERN_ERR |
1015 | "current sp %p does not match saved sp %p\n", | |
8533bbe9 | 1016 | stack_addr(regs), kcb->jprobe_saved_sp); |
d6be29b8 | 1017 | printk(KERN_ERR "Saved registers for jprobe %p\n", jp); |
1da177e4 | 1018 | show_registers(saved_regs); |
d6be29b8 | 1019 | printk(KERN_ERR "Current registers\n"); |
1da177e4 LT |
1020 | show_registers(regs); |
1021 | BUG(); | |
1022 | } | |
e7a510f9 | 1023 | *regs = kcb->jprobe_saved_regs; |
8533bbe9 MH |
1024 | memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp), |
1025 | kcb->jprobes_stack, | |
1026 | MIN_STACK_SIZE(kcb->jprobe_saved_sp)); | |
d217d545 | 1027 | preempt_enable_no_resched(); |
1da177e4 LT |
1028 | return 1; |
1029 | } | |
1030 | return 0; | |
1031 | } | |
ba8af12f | 1032 | |
6772926b | 1033 | int __init arch_init_kprobes(void) |
ba8af12f | 1034 | { |
da07ab03 | 1035 | return 0; |
ba8af12f | 1036 | } |
bf8f6e5b AM |
1037 | |
1038 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) | |
1039 | { | |
bf8f6e5b AM |
1040 | return 0; |
1041 | } |