]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Kernel Probes (KProbes) | |
3 | * kernel/kprobes.c | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
18 | * | |
19 | * Copyright (C) IBM Corporation, 2002, 2004 | |
20 | * | |
21 | * 2002-Oct Created by Vamsi Krishna S <[email protected]> Kernel | |
22 | * Probes initial implementation (includes suggestions from | |
23 | * Rusty Russell). | |
24 | * 2004-Aug Updated by Prasanna S Panchamukhi <[email protected]> with | |
25 | * hlists and exceptions notifier as suggested by Andi Kleen. | |
26 | * 2004-July Suparna Bhattacharya <[email protected]> added jumper probes | |
27 | * interface to access function arguments. | |
28 | * 2004-Sep Prasanna S Panchamukhi <[email protected]> Changed Kprobes | |
29 | * exceptions notifier to be first on the priority list. | |
b94cce92 HN |
30 | * 2005-May Hien Nguyen <[email protected]>, Jim Keniston |
31 | * <[email protected]> and Prasanna S Panchamukhi | |
32 | * <[email protected]> added function-return probes. | |
1da177e4 LT |
33 | */ |
34 | #include <linux/kprobes.h> | |
1da177e4 LT |
35 | #include <linux/hash.h> |
36 | #include <linux/init.h> | |
4e57b681 | 37 | #include <linux/slab.h> |
e3869792 | 38 | #include <linux/stddef.h> |
1da177e4 | 39 | #include <linux/module.h> |
9ec4b1f3 | 40 | #include <linux/moduleloader.h> |
3a872d89 | 41 | #include <linux/kallsyms.h> |
b4c6c34a | 42 | #include <linux/freezer.h> |
346fd59b SD |
43 | #include <linux/seq_file.h> |
44 | #include <linux/debugfs.h> | |
1eeb66a1 | 45 | #include <linux/kdebug.h> |
bf8f6e5b | 46 | |
d0aaff97 | 47 | #include <asm-generic/sections.h> |
1da177e4 LT |
48 | #include <asm/cacheflush.h> |
49 | #include <asm/errno.h> | |
bf8f6e5b | 50 | #include <asm/uaccess.h> |
1da177e4 LT |
51 | |
52 | #define KPROBE_HASH_BITS 6 | |
53 | #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) | |
54 | ||
3a872d89 AM |
55 | |
56 | /* | |
57 | * Some oddball architectures like 64bit powerpc have function descriptors | |
58 | * so this must be overridable. | |
59 | */ | |
60 | #ifndef kprobe_lookup_name | |
61 | #define kprobe_lookup_name(name, addr) \ | |
62 | addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) | |
63 | #endif | |
64 | ||
1da177e4 | 65 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; |
b94cce92 | 66 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; |
1da177e4 | 67 | |
bf8f6e5b AM |
68 | /* NOTE: change this value only with kprobe_mutex held */ |
69 | static bool kprobe_enabled; | |
70 | ||
7a7d1cf9 | 71 | DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ |
3516a460 | 72 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ |
e6584523 | 73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
1da177e4 | 74 | |
2d14e39d | 75 | #ifdef __ARCH_WANT_KPROBES_INSN_SLOT |
9ec4b1f3 AM |
76 | /* |
77 | * kprobe->ainsn.insn points to the copy of the instruction to be | |
78 | * single-stepped. x86_64, POWER4 and above have no-exec support and | |
79 | * stepping on the instruction on a vmalloced/kmalloced/data page | |
80 | * is a recipe for disaster | |
81 | */ | |
82 | #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) | |
83 | ||
84 | struct kprobe_insn_page { | |
85 | struct hlist_node hlist; | |
86 | kprobe_opcode_t *insns; /* Page of instruction slots */ | |
87 | char slot_used[INSNS_PER_PAGE]; | |
88 | int nused; | |
b4c6c34a | 89 | int ngarbage; |
9ec4b1f3 AM |
90 | }; |
91 | ||
ab40c5c6 MH |
92 | enum kprobe_slot_state { |
93 | SLOT_CLEAN = 0, | |
94 | SLOT_DIRTY = 1, | |
95 | SLOT_USED = 2, | |
96 | }; | |
97 | ||
9ec4b1f3 | 98 | static struct hlist_head kprobe_insn_pages; |
b4c6c34a MH |
99 | static int kprobe_garbage_slots; |
100 | static int collect_garbage_slots(void); | |
101 | ||
102 | static int __kprobes check_safety(void) | |
103 | { | |
104 | int ret = 0; | |
105 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM) | |
106 | ret = freeze_processes(); | |
107 | if (ret == 0) { | |
108 | struct task_struct *p, *q; | |
109 | do_each_thread(p, q) { | |
110 | if (p != current && p->state == TASK_RUNNING && | |
111 | p->pid != 0) { | |
112 | printk("Check failed: %s is running\n",p->comm); | |
113 | ret = -1; | |
114 | goto loop_end; | |
115 | } | |
116 | } while_each_thread(p, q); | |
117 | } | |
118 | loop_end: | |
119 | thaw_processes(); | |
120 | #else | |
121 | synchronize_sched(); | |
122 | #endif | |
123 | return ret; | |
124 | } | |
9ec4b1f3 AM |
125 | |
126 | /** | |
127 | * get_insn_slot() - Find a slot on an executable page for an instruction. | |
128 | * We allocate an executable page if there's no room on existing ones. | |
129 | */ | |
d0aaff97 | 130 | kprobe_opcode_t __kprobes *get_insn_slot(void) |
9ec4b1f3 AM |
131 | { |
132 | struct kprobe_insn_page *kip; | |
133 | struct hlist_node *pos; | |
134 | ||
6f716acd | 135 | retry: |
b0bb5016 | 136 | hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { |
9ec4b1f3 AM |
137 | if (kip->nused < INSNS_PER_PAGE) { |
138 | int i; | |
139 | for (i = 0; i < INSNS_PER_PAGE; i++) { | |
ab40c5c6 MH |
140 | if (kip->slot_used[i] == SLOT_CLEAN) { |
141 | kip->slot_used[i] = SLOT_USED; | |
9ec4b1f3 AM |
142 | kip->nused++; |
143 | return kip->insns + (i * MAX_INSN_SIZE); | |
144 | } | |
145 | } | |
146 | /* Surprise! No unused slots. Fix kip->nused. */ | |
147 | kip->nused = INSNS_PER_PAGE; | |
148 | } | |
149 | } | |
150 | ||
b4c6c34a MH |
151 | /* If there are any garbage slots, collect it and try again. */ |
152 | if (kprobe_garbage_slots && collect_garbage_slots() == 0) { | |
153 | goto retry; | |
154 | } | |
155 | /* All out of space. Need to allocate a new page. Use slot 0. */ | |
9ec4b1f3 | 156 | kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); |
6f716acd | 157 | if (!kip) |
9ec4b1f3 | 158 | return NULL; |
9ec4b1f3 AM |
159 | |
160 | /* | |
161 | * Use module_alloc so this page is within +/- 2GB of where the | |
162 | * kernel image and loaded module images reside. This is required | |
163 | * so x86_64 can correctly handle the %rip-relative fixups. | |
164 | */ | |
165 | kip->insns = module_alloc(PAGE_SIZE); | |
166 | if (!kip->insns) { | |
167 | kfree(kip); | |
168 | return NULL; | |
169 | } | |
170 | INIT_HLIST_NODE(&kip->hlist); | |
171 | hlist_add_head(&kip->hlist, &kprobe_insn_pages); | |
ab40c5c6 MH |
172 | memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE); |
173 | kip->slot_used[0] = SLOT_USED; | |
9ec4b1f3 | 174 | kip->nused = 1; |
b4c6c34a | 175 | kip->ngarbage = 0; |
9ec4b1f3 AM |
176 | return kip->insns; |
177 | } | |
178 | ||
b4c6c34a MH |
179 | /* Return 1 if all garbages are collected, otherwise 0. */ |
180 | static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) | |
181 | { | |
ab40c5c6 | 182 | kip->slot_used[idx] = SLOT_CLEAN; |
b4c6c34a MH |
183 | kip->nused--; |
184 | if (kip->nused == 0) { | |
185 | /* | |
186 | * Page is no longer in use. Free it unless | |
187 | * it's the last one. We keep the last one | |
188 | * so as not to have to set it up again the | |
189 | * next time somebody inserts a probe. | |
190 | */ | |
191 | hlist_del(&kip->hlist); | |
192 | if (hlist_empty(&kprobe_insn_pages)) { | |
193 | INIT_HLIST_NODE(&kip->hlist); | |
194 | hlist_add_head(&kip->hlist, | |
195 | &kprobe_insn_pages); | |
196 | } else { | |
197 | module_free(NULL, kip->insns); | |
198 | kfree(kip); | |
199 | } | |
200 | return 1; | |
201 | } | |
202 | return 0; | |
203 | } | |
204 | ||
205 | static int __kprobes collect_garbage_slots(void) | |
206 | { | |
207 | struct kprobe_insn_page *kip; | |
208 | struct hlist_node *pos, *next; | |
209 | ||
210 | /* Ensure no-one is preepmted on the garbages */ | |
211 | if (check_safety() != 0) | |
212 | return -EAGAIN; | |
213 | ||
b0bb5016 | 214 | hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { |
b4c6c34a | 215 | int i; |
b4c6c34a MH |
216 | if (kip->ngarbage == 0) |
217 | continue; | |
218 | kip->ngarbage = 0; /* we will collect all garbages */ | |
219 | for (i = 0; i < INSNS_PER_PAGE; i++) { | |
ab40c5c6 | 220 | if (kip->slot_used[i] == SLOT_DIRTY && |
b4c6c34a MH |
221 | collect_one_slot(kip, i)) |
222 | break; | |
223 | } | |
224 | } | |
225 | kprobe_garbage_slots = 0; | |
226 | return 0; | |
227 | } | |
228 | ||
229 | void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) | |
9ec4b1f3 AM |
230 | { |
231 | struct kprobe_insn_page *kip; | |
232 | struct hlist_node *pos; | |
233 | ||
b0bb5016 | 234 | hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { |
9ec4b1f3 AM |
235 | if (kip->insns <= slot && |
236 | slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { | |
237 | int i = (slot - kip->insns) / MAX_INSN_SIZE; | |
b4c6c34a | 238 | if (dirty) { |
ab40c5c6 | 239 | kip->slot_used[i] = SLOT_DIRTY; |
b4c6c34a MH |
240 | kip->ngarbage++; |
241 | } else { | |
242 | collect_one_slot(kip, i); | |
9ec4b1f3 | 243 | } |
b4c6c34a | 244 | break; |
9ec4b1f3 AM |
245 | } |
246 | } | |
6f716acd CH |
247 | |
248 | if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) | |
b4c6c34a | 249 | collect_garbage_slots(); |
9ec4b1f3 | 250 | } |
2d14e39d | 251 | #endif |
9ec4b1f3 | 252 | |
e6584523 AM |
253 | /* We have preemption disabled.. so it is safe to use __ versions */ |
254 | static inline void set_kprobe_instance(struct kprobe *kp) | |
255 | { | |
256 | __get_cpu_var(kprobe_instance) = kp; | |
257 | } | |
258 | ||
259 | static inline void reset_kprobe_instance(void) | |
260 | { | |
261 | __get_cpu_var(kprobe_instance) = NULL; | |
262 | } | |
263 | ||
3516a460 AM |
264 | /* |
265 | * This routine is called either: | |
49a2a1b8 | 266 | * - under the kprobe_mutex - during kprobe_[un]register() |
3516a460 | 267 | * OR |
d217d545 | 268 | * - with preemption disabled - from arch/xxx/kernel/kprobes.c |
3516a460 | 269 | */ |
d0aaff97 | 270 | struct kprobe __kprobes *get_kprobe(void *addr) |
1da177e4 LT |
271 | { |
272 | struct hlist_head *head; | |
273 | struct hlist_node *node; | |
3516a460 | 274 | struct kprobe *p; |
1da177e4 LT |
275 | |
276 | head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; | |
3516a460 | 277 | hlist_for_each_entry_rcu(p, node, head, hlist) { |
1da177e4 LT |
278 | if (p->addr == addr) |
279 | return p; | |
280 | } | |
281 | return NULL; | |
282 | } | |
283 | ||
64f562c6 AM |
284 | /* |
285 | * Aggregate handlers for multiple kprobes support - these handlers | |
286 | * take care of invoking the individual kprobe handlers on p->list | |
287 | */ | |
d0aaff97 | 288 | static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) |
64f562c6 AM |
289 | { |
290 | struct kprobe *kp; | |
291 | ||
3516a460 | 292 | list_for_each_entry_rcu(kp, &p->list, list) { |
64f562c6 | 293 | if (kp->pre_handler) { |
e6584523 | 294 | set_kprobe_instance(kp); |
8b0914ea PP |
295 | if (kp->pre_handler(kp, regs)) |
296 | return 1; | |
64f562c6 | 297 | } |
e6584523 | 298 | reset_kprobe_instance(); |
64f562c6 AM |
299 | } |
300 | return 0; | |
301 | } | |
302 | ||
d0aaff97 PP |
303 | static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, |
304 | unsigned long flags) | |
64f562c6 AM |
305 | { |
306 | struct kprobe *kp; | |
307 | ||
3516a460 | 308 | list_for_each_entry_rcu(kp, &p->list, list) { |
64f562c6 | 309 | if (kp->post_handler) { |
e6584523 | 310 | set_kprobe_instance(kp); |
64f562c6 | 311 | kp->post_handler(kp, regs, flags); |
e6584523 | 312 | reset_kprobe_instance(); |
64f562c6 AM |
313 | } |
314 | } | |
64f562c6 AM |
315 | } |
316 | ||
d0aaff97 PP |
317 | static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, |
318 | int trapnr) | |
64f562c6 | 319 | { |
e6584523 AM |
320 | struct kprobe *cur = __get_cpu_var(kprobe_instance); |
321 | ||
64f562c6 AM |
322 | /* |
323 | * if we faulted "during" the execution of a user specified | |
324 | * probe handler, invoke just that probe's fault handler | |
325 | */ | |
e6584523 AM |
326 | if (cur && cur->fault_handler) { |
327 | if (cur->fault_handler(cur, regs, trapnr)) | |
64f562c6 AM |
328 | return 1; |
329 | } | |
330 | return 0; | |
331 | } | |
332 | ||
d0aaff97 | 333 | static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) |
8b0914ea | 334 | { |
e6584523 AM |
335 | struct kprobe *cur = __get_cpu_var(kprobe_instance); |
336 | int ret = 0; | |
337 | ||
338 | if (cur && cur->break_handler) { | |
339 | if (cur->break_handler(cur, regs)) | |
340 | ret = 1; | |
8b0914ea | 341 | } |
e6584523 AM |
342 | reset_kprobe_instance(); |
343 | return ret; | |
8b0914ea PP |
344 | } |
345 | ||
bf8d5c52 KA |
346 | /* Walks the list and increments nmissed count for multiprobe case */ |
347 | void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) | |
348 | { | |
349 | struct kprobe *kp; | |
350 | if (p->pre_handler != aggr_pre_handler) { | |
351 | p->nmissed++; | |
352 | } else { | |
353 | list_for_each_entry_rcu(kp, &p->list, list) | |
354 | kp->nmissed++; | |
355 | } | |
356 | return; | |
357 | } | |
358 | ||
3516a460 | 359 | /* Called with kretprobe_lock held */ |
99219a3f | 360 | void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, |
361 | struct hlist_head *head) | |
b94cce92 HN |
362 | { |
363 | /* remove rp inst off the rprobe_inst_table */ | |
364 | hlist_del(&ri->hlist); | |
365 | if (ri->rp) { | |
366 | /* remove rp inst off the used list */ | |
367 | hlist_del(&ri->uflist); | |
368 | /* put rp inst back onto the free list */ | |
369 | INIT_HLIST_NODE(&ri->uflist); | |
370 | hlist_add_head(&ri->uflist, &ri->rp->free_instances); | |
371 | } else | |
372 | /* Unregistering */ | |
99219a3f | 373 | hlist_add_head(&ri->hlist, head); |
b94cce92 HN |
374 | } |
375 | ||
d0aaff97 | 376 | struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk) |
b94cce92 HN |
377 | { |
378 | return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; | |
379 | } | |
380 | ||
b94cce92 | 381 | /* |
c6fd91f0 | 382 | * This function is called from finish_task_switch when task tk becomes dead, |
383 | * so that we can recycle any function-return probe instances associated | |
384 | * with this task. These left over instances represent probed functions | |
385 | * that have been called but will never return. | |
b94cce92 | 386 | */ |
d0aaff97 | 387 | void __kprobes kprobe_flush_task(struct task_struct *tk) |
b94cce92 | 388 | { |
62c27be0 | 389 | struct kretprobe_instance *ri; |
99219a3f | 390 | struct hlist_head *head, empty_rp; |
802eae7c | 391 | struct hlist_node *node, *tmp; |
0aa55e4d | 392 | unsigned long flags = 0; |
802eae7c | 393 | |
99219a3f | 394 | INIT_HLIST_HEAD(&empty_rp); |
3516a460 | 395 | spin_lock_irqsave(&kretprobe_lock, flags); |
62c27be0 | 396 | head = kretprobe_inst_table_head(tk); |
397 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | |
398 | if (ri->task == tk) | |
99219a3f | 399 | recycle_rp_inst(ri, &empty_rp); |
62c27be0 | 400 | } |
3516a460 | 401 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
99219a3f | 402 | |
403 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { | |
404 | hlist_del(&ri->hlist); | |
405 | kfree(ri); | |
406 | } | |
b94cce92 HN |
407 | } |
408 | ||
b94cce92 HN |
409 | static inline void free_rp_inst(struct kretprobe *rp) |
410 | { | |
411 | struct kretprobe_instance *ri; | |
4c4308cb CH |
412 | struct hlist_node *pos, *next; |
413 | ||
414 | hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) { | |
b94cce92 HN |
415 | hlist_del(&ri->uflist); |
416 | kfree(ri); | |
417 | } | |
418 | } | |
419 | ||
8b0914ea PP |
420 | /* |
421 | * Keep all fields in the kprobe consistent | |
422 | */ | |
423 | static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) | |
424 | { | |
425 | memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t)); | |
426 | memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn)); | |
427 | } | |
428 | ||
429 | /* | |
430 | * Add the new probe to old_p->list. Fail if this is the | |
431 | * second jprobe at the address - two jprobes can't coexist | |
432 | */ | |
d0aaff97 | 433 | static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p) |
8b0914ea | 434 | { |
8b0914ea | 435 | if (p->break_handler) { |
36721656 | 436 | if (old_p->break_handler) |
437 | return -EEXIST; | |
3516a460 | 438 | list_add_tail_rcu(&p->list, &old_p->list); |
36721656 | 439 | old_p->break_handler = aggr_break_handler; |
8b0914ea | 440 | } else |
3516a460 | 441 | list_add_rcu(&p->list, &old_p->list); |
36721656 | 442 | if (p->post_handler && !old_p->post_handler) |
443 | old_p->post_handler = aggr_post_handler; | |
8b0914ea PP |
444 | return 0; |
445 | } | |
446 | ||
64f562c6 AM |
447 | /* |
448 | * Fill in the required fields of the "manager kprobe". Replace the | |
449 | * earlier kprobe in the hlist with the manager kprobe | |
450 | */ | |
451 | static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) | |
452 | { | |
8b0914ea | 453 | copy_kprobe(p, ap); |
a9ad965e | 454 | flush_insn_slot(ap); |
64f562c6 | 455 | ap->addr = p->addr; |
64f562c6 | 456 | ap->pre_handler = aggr_pre_handler; |
64f562c6 | 457 | ap->fault_handler = aggr_fault_handler; |
36721656 | 458 | if (p->post_handler) |
459 | ap->post_handler = aggr_post_handler; | |
460 | if (p->break_handler) | |
461 | ap->break_handler = aggr_break_handler; | |
64f562c6 AM |
462 | |
463 | INIT_LIST_HEAD(&ap->list); | |
3516a460 | 464 | list_add_rcu(&p->list, &ap->list); |
64f562c6 | 465 | |
adad0f33 | 466 | hlist_replace_rcu(&p->hlist, &ap->hlist); |
64f562c6 AM |
467 | } |
468 | ||
469 | /* | |
470 | * This is the second or subsequent kprobe at the address - handle | |
471 | * the intricacies | |
64f562c6 | 472 | */ |
d0aaff97 PP |
473 | static int __kprobes register_aggr_kprobe(struct kprobe *old_p, |
474 | struct kprobe *p) | |
64f562c6 AM |
475 | { |
476 | int ret = 0; | |
477 | struct kprobe *ap; | |
478 | ||
8b0914ea PP |
479 | if (old_p->pre_handler == aggr_pre_handler) { |
480 | copy_kprobe(old_p, p); | |
481 | ret = add_new_kprobe(old_p, p); | |
64f562c6 | 482 | } else { |
a0d50069 | 483 | ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); |
64f562c6 AM |
484 | if (!ap) |
485 | return -ENOMEM; | |
486 | add_aggr_kprobe(ap, old_p); | |
8b0914ea PP |
487 | copy_kprobe(ap, p); |
488 | ret = add_new_kprobe(ap, p); | |
64f562c6 AM |
489 | } |
490 | return ret; | |
491 | } | |
492 | ||
d0aaff97 PP |
493 | static int __kprobes in_kprobes_functions(unsigned long addr) |
494 | { | |
6f716acd CH |
495 | if (addr >= (unsigned long)__kprobes_text_start && |
496 | addr < (unsigned long)__kprobes_text_end) | |
d0aaff97 PP |
497 | return -EINVAL; |
498 | return 0; | |
499 | } | |
500 | ||
df019b1d KA |
501 | static int __kprobes __register_kprobe(struct kprobe *p, |
502 | unsigned long called_from) | |
1da177e4 LT |
503 | { |
504 | int ret = 0; | |
64f562c6 | 505 | struct kprobe *old_p; |
df019b1d | 506 | struct module *probed_mod; |
b3e55c72 | 507 | |
3a872d89 AM |
508 | /* |
509 | * If we have a symbol_name argument look it up, | |
510 | * and add it to the address. That way the addr | |
511 | * field can either be global or relative to a symbol. | |
512 | */ | |
513 | if (p->symbol_name) { | |
514 | if (p->addr) | |
515 | return -EINVAL; | |
516 | kprobe_lookup_name(p->symbol_name, p->addr); | |
517 | } | |
518 | ||
519 | if (!p->addr) | |
520 | return -EINVAL; | |
521 | p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset); | |
522 | ||
6f716acd CH |
523 | if (!kernel_text_address((unsigned long) p->addr) || |
524 | in_kprobes_functions((unsigned long) p->addr)) | |
b3e55c72 MB |
525 | return -EINVAL; |
526 | ||
df019b1d | 527 | p->mod_refcounted = 0; |
6f716acd CH |
528 | |
529 | /* | |
530 | * Check if are we probing a module. | |
531 | */ | |
532 | probed_mod = module_text_address((unsigned long) p->addr); | |
533 | if (probed_mod) { | |
df019b1d | 534 | struct module *calling_mod = module_text_address(called_from); |
6f716acd CH |
535 | /* |
536 | * We must allow modules to probe themself and in this case | |
537 | * avoid incrementing the module refcount, so as to allow | |
538 | * unloading of self probing modules. | |
df019b1d | 539 | */ |
6f716acd | 540 | if (calling_mod && calling_mod != probed_mod) { |
df019b1d KA |
541 | if (unlikely(!try_module_get(probed_mod))) |
542 | return -EINVAL; | |
543 | p->mod_refcounted = 1; | |
544 | } else | |
545 | probed_mod = NULL; | |
546 | } | |
1da177e4 | 547 | |
3516a460 | 548 | p->nmissed = 0; |
7a7d1cf9 | 549 | mutex_lock(&kprobe_mutex); |
64f562c6 AM |
550 | old_p = get_kprobe(p->addr); |
551 | if (old_p) { | |
552 | ret = register_aggr_kprobe(old_p, p); | |
1da177e4 LT |
553 | goto out; |
554 | } | |
1da177e4 | 555 | |
6f716acd CH |
556 | ret = arch_prepare_kprobe(p); |
557 | if (ret) | |
49a2a1b8 AK |
558 | goto out; |
559 | ||
64f562c6 | 560 | INIT_HLIST_NODE(&p->hlist); |
3516a460 | 561 | hlist_add_head_rcu(&p->hlist, |
1da177e4 LT |
562 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
563 | ||
74a0b576 | 564 | if (kprobe_enabled) |
bf8f6e5b | 565 | arch_arm_kprobe(p); |
74a0b576 | 566 | |
1da177e4 | 567 | out: |
7a7d1cf9 | 568 | mutex_unlock(&kprobe_mutex); |
49a2a1b8 | 569 | |
df019b1d KA |
570 | if (ret && probed_mod) |
571 | module_put(probed_mod); | |
1da177e4 LT |
572 | return ret; |
573 | } | |
574 | ||
df019b1d KA |
575 | int __kprobes register_kprobe(struct kprobe *p) |
576 | { | |
6f716acd | 577 | return __register_kprobe(p, (unsigned long)__builtin_return_address(0)); |
df019b1d KA |
578 | } |
579 | ||
d0aaff97 | 580 | void __kprobes unregister_kprobe(struct kprobe *p) |
1da177e4 | 581 | { |
b3e55c72 | 582 | struct module *mod; |
f709b122 KA |
583 | struct kprobe *old_p, *list_p; |
584 | int cleanup_p; | |
64f562c6 | 585 | |
7a7d1cf9 | 586 | mutex_lock(&kprobe_mutex); |
64f562c6 | 587 | old_p = get_kprobe(p->addr); |
49a2a1b8 | 588 | if (unlikely(!old_p)) { |
7a7d1cf9 | 589 | mutex_unlock(&kprobe_mutex); |
49a2a1b8 AK |
590 | return; |
591 | } | |
f709b122 KA |
592 | if (p != old_p) { |
593 | list_for_each_entry_rcu(list_p, &old_p->list, list) | |
594 | if (list_p == p) | |
595 | /* kprobe p is a valid probe */ | |
596 | goto valid_p; | |
7a7d1cf9 | 597 | mutex_unlock(&kprobe_mutex); |
f709b122 KA |
598 | return; |
599 | } | |
600 | valid_p: | |
6f716acd CH |
601 | if (old_p == p || |
602 | (old_p->pre_handler == aggr_pre_handler && | |
603 | p->list.next == &old_p->list && p->list.prev == &old_p->list)) { | |
bf8f6e5b AM |
604 | /* |
605 | * Only probe on the hash list. Disarm only if kprobes are | |
606 | * enabled - otherwise, the breakpoint would already have | |
607 | * been removed. We save on flushing icache. | |
608 | */ | |
609 | if (kprobe_enabled) | |
610 | arch_disarm_kprobe(p); | |
49a2a1b8 | 611 | hlist_del_rcu(&old_p->hlist); |
f709b122 | 612 | cleanup_p = 1; |
49a2a1b8 AK |
613 | } else { |
614 | list_del_rcu(&p->list); | |
f709b122 | 615 | cleanup_p = 0; |
49a2a1b8 | 616 | } |
3516a460 | 617 | |
7a7d1cf9 | 618 | mutex_unlock(&kprobe_mutex); |
b3e55c72 | 619 | |
49a2a1b8 | 620 | synchronize_sched(); |
6f716acd CH |
621 | if (p->mod_refcounted) { |
622 | mod = module_text_address((unsigned long)p->addr); | |
623 | if (mod) | |
624 | module_put(mod); | |
625 | } | |
b3e55c72 | 626 | |
49a2a1b8 | 627 | if (cleanup_p) { |
f709b122 | 628 | if (p != old_p) { |
49a2a1b8 | 629 | list_del_rcu(&p->list); |
3516a460 | 630 | kfree(old_p); |
49a2a1b8 | 631 | } |
0498b635 | 632 | arch_remove_kprobe(p); |
36721656 | 633 | } else { |
634 | mutex_lock(&kprobe_mutex); | |
635 | if (p->break_handler) | |
636 | old_p->break_handler = NULL; | |
637 | if (p->post_handler){ | |
638 | list_for_each_entry_rcu(list_p, &old_p->list, list){ | |
639 | if (list_p->post_handler){ | |
640 | cleanup_p = 2; | |
641 | break; | |
642 | } | |
643 | } | |
644 | if (cleanup_p == 0) | |
645 | old_p->post_handler = NULL; | |
646 | } | |
647 | mutex_unlock(&kprobe_mutex); | |
49a2a1b8 | 648 | } |
1da177e4 LT |
649 | } |
650 | ||
651 | static struct notifier_block kprobe_exceptions_nb = { | |
3d5631e0 AK |
652 | .notifier_call = kprobe_exceptions_notify, |
653 | .priority = 0x7fffffff /* we need to be notified first */ | |
654 | }; | |
655 | ||
3d7e3382 ME |
656 | unsigned long __weak arch_deref_entry_point(void *entry) |
657 | { | |
658 | return (unsigned long)entry; | |
659 | } | |
1da177e4 | 660 | |
d0aaff97 | 661 | int __kprobes register_jprobe(struct jprobe *jp) |
1da177e4 | 662 | { |
3d7e3382 ME |
663 | unsigned long addr = arch_deref_entry_point(jp->entry); |
664 | ||
665 | if (!kernel_text_address(addr)) | |
666 | return -EINVAL; | |
667 | ||
1da177e4 LT |
668 | /* Todo: Verify probepoint is a function entry point */ |
669 | jp->kp.pre_handler = setjmp_pre_handler; | |
670 | jp->kp.break_handler = longjmp_break_handler; | |
671 | ||
df019b1d KA |
672 | return __register_kprobe(&jp->kp, |
673 | (unsigned long)__builtin_return_address(0)); | |
1da177e4 LT |
674 | } |
675 | ||
d0aaff97 | 676 | void __kprobes unregister_jprobe(struct jprobe *jp) |
1da177e4 LT |
677 | { |
678 | unregister_kprobe(&jp->kp); | |
679 | } | |
680 | ||
b94cce92 HN |
681 | #ifdef ARCH_SUPPORTS_KRETPROBES |
682 | ||
e65cefe8 AB |
683 | /* |
684 | * This kprobe pre_handler is registered with every kretprobe. When probe | |
685 | * hits it will set up the return probe. | |
686 | */ | |
687 | static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |
688 | struct pt_regs *regs) | |
689 | { | |
690 | struct kretprobe *rp = container_of(p, struct kretprobe, kp); | |
691 | unsigned long flags = 0; | |
692 | ||
693 | /*TODO: consider to only swap the RA after the last pre_handler fired */ | |
694 | spin_lock_irqsave(&kretprobe_lock, flags); | |
4c4308cb CH |
695 | if (!hlist_empty(&rp->free_instances)) { |
696 | struct kretprobe_instance *ri; | |
697 | ||
698 | ri = hlist_entry(rp->free_instances.first, | |
699 | struct kretprobe_instance, uflist); | |
700 | ri->rp = rp; | |
701 | ri->task = current; | |
702 | arch_prepare_kretprobe(ri, regs); | |
703 | ||
704 | /* XXX(hch): why is there no hlist_move_head? */ | |
705 | hlist_del(&ri->uflist); | |
706 | hlist_add_head(&ri->uflist, &ri->rp->used_instances); | |
707 | hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task)); | |
708 | } else | |
709 | rp->nmissed++; | |
e65cefe8 AB |
710 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
711 | return 0; | |
712 | } | |
713 | ||
d0aaff97 | 714 | int __kprobes register_kretprobe(struct kretprobe *rp) |
b94cce92 HN |
715 | { |
716 | int ret = 0; | |
717 | struct kretprobe_instance *inst; | |
718 | int i; | |
f438d914 MH |
719 | void *addr = rp->kp.addr; |
720 | ||
721 | if (kretprobe_blacklist_size) { | |
722 | if (addr == NULL) | |
723 | kprobe_lookup_name(rp->kp.symbol_name, addr); | |
724 | addr += rp->kp.offset; | |
725 | ||
726 | for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { | |
727 | if (kretprobe_blacklist[i].addr == addr) | |
728 | return -EINVAL; | |
729 | } | |
730 | } | |
b94cce92 HN |
731 | |
732 | rp->kp.pre_handler = pre_handler_kretprobe; | |
7522a842 AM |
733 | rp->kp.post_handler = NULL; |
734 | rp->kp.fault_handler = NULL; | |
735 | rp->kp.break_handler = NULL; | |
b94cce92 HN |
736 | |
737 | /* Pre-allocate memory for max kretprobe instances */ | |
738 | if (rp->maxactive <= 0) { | |
739 | #ifdef CONFIG_PREEMPT | |
740 | rp->maxactive = max(10, 2 * NR_CPUS); | |
741 | #else | |
742 | rp->maxactive = NR_CPUS; | |
743 | #endif | |
744 | } | |
745 | INIT_HLIST_HEAD(&rp->used_instances); | |
746 | INIT_HLIST_HEAD(&rp->free_instances); | |
747 | for (i = 0; i < rp->maxactive; i++) { | |
748 | inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL); | |
749 | if (inst == NULL) { | |
750 | free_rp_inst(rp); | |
751 | return -ENOMEM; | |
752 | } | |
753 | INIT_HLIST_NODE(&inst->uflist); | |
754 | hlist_add_head(&inst->uflist, &rp->free_instances); | |
755 | } | |
756 | ||
757 | rp->nmissed = 0; | |
758 | /* Establish function entry probe point */ | |
df019b1d KA |
759 | if ((ret = __register_kprobe(&rp->kp, |
760 | (unsigned long)__builtin_return_address(0))) != 0) | |
b94cce92 HN |
761 | free_rp_inst(rp); |
762 | return ret; | |
763 | } | |
764 | ||
765 | #else /* ARCH_SUPPORTS_KRETPROBES */ | |
766 | ||
d0aaff97 | 767 | int __kprobes register_kretprobe(struct kretprobe *rp) |
b94cce92 HN |
768 | { |
769 | return -ENOSYS; | |
770 | } | |
771 | ||
346fd59b SD |
772 | static int __kprobes pre_handler_kretprobe(struct kprobe *p, |
773 | struct pt_regs *regs) | |
774 | { | |
775 | return 0; | |
776 | } | |
777 | ||
b94cce92 HN |
778 | #endif /* ARCH_SUPPORTS_KRETPROBES */ |
779 | ||
d0aaff97 | 780 | void __kprobes unregister_kretprobe(struct kretprobe *rp) |
b94cce92 HN |
781 | { |
782 | unsigned long flags; | |
783 | struct kretprobe_instance *ri; | |
4c4308cb | 784 | struct hlist_node *pos, *next; |
b94cce92 HN |
785 | |
786 | unregister_kprobe(&rp->kp); | |
4c4308cb | 787 | |
b94cce92 | 788 | /* No race here */ |
3516a460 | 789 | spin_lock_irqsave(&kretprobe_lock, flags); |
4c4308cb | 790 | hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) { |
b94cce92 HN |
791 | ri->rp = NULL; |
792 | hlist_del(&ri->uflist); | |
793 | } | |
3516a460 | 794 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
278ff953 | 795 | free_rp_inst(rp); |
b94cce92 HN |
796 | } |
797 | ||
1da177e4 LT |
798 | static int __init init_kprobes(void) |
799 | { | |
800 | int i, err = 0; | |
801 | ||
802 | /* FIXME allocate the probe table, currently defined statically */ | |
803 | /* initialize all list heads */ | |
b94cce92 | 804 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
1da177e4 | 805 | INIT_HLIST_HEAD(&kprobe_table[i]); |
b94cce92 HN |
806 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); |
807 | } | |
1da177e4 | 808 | |
f438d914 MH |
809 | if (kretprobe_blacklist_size) { |
810 | /* lookup the function address from its name */ | |
811 | for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { | |
812 | kprobe_lookup_name(kretprobe_blacklist[i].name, | |
813 | kretprobe_blacklist[i].addr); | |
814 | if (!kretprobe_blacklist[i].addr) | |
815 | printk("kretprobe: lookup failed: %s\n", | |
816 | kretprobe_blacklist[i].name); | |
817 | } | |
818 | } | |
819 | ||
bf8f6e5b AM |
820 | /* By default, kprobes are enabled */ |
821 | kprobe_enabled = true; | |
822 | ||
6772926b | 823 | err = arch_init_kprobes(); |
802eae7c RL |
824 | if (!err) |
825 | err = register_die_notifier(&kprobe_exceptions_nb); | |
826 | ||
1da177e4 LT |
827 | return err; |
828 | } | |
829 | ||
346fd59b SD |
830 | #ifdef CONFIG_DEBUG_FS |
831 | static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, | |
bf8f6e5b | 832 | const char *sym, int offset,char *modname) |
346fd59b SD |
833 | { |
834 | char *kprobe_type; | |
835 | ||
836 | if (p->pre_handler == pre_handler_kretprobe) | |
837 | kprobe_type = "r"; | |
838 | else if (p->pre_handler == setjmp_pre_handler) | |
839 | kprobe_type = "j"; | |
840 | else | |
841 | kprobe_type = "k"; | |
842 | if (sym) | |
843 | seq_printf(pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type, | |
844 | sym, offset, (modname ? modname : " ")); | |
845 | else | |
846 | seq_printf(pi, "%p %s %p\n", p->addr, kprobe_type, p->addr); | |
847 | } | |
848 | ||
849 | static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) | |
850 | { | |
851 | return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; | |
852 | } | |
853 | ||
854 | static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) | |
855 | { | |
856 | (*pos)++; | |
857 | if (*pos >= KPROBE_TABLE_SIZE) | |
858 | return NULL; | |
859 | return pos; | |
860 | } | |
861 | ||
862 | static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v) | |
863 | { | |
864 | /* Nothing to do */ | |
865 | } | |
866 | ||
867 | static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) | |
868 | { | |
869 | struct hlist_head *head; | |
870 | struct hlist_node *node; | |
871 | struct kprobe *p, *kp; | |
872 | const char *sym = NULL; | |
873 | unsigned int i = *(loff_t *) v; | |
ffb45122 | 874 | unsigned long offset = 0; |
346fd59b SD |
875 | char *modname, namebuf[128]; |
876 | ||
877 | head = &kprobe_table[i]; | |
878 | preempt_disable(); | |
879 | hlist_for_each_entry_rcu(p, node, head, hlist) { | |
ffb45122 | 880 | sym = kallsyms_lookup((unsigned long)p->addr, NULL, |
346fd59b SD |
881 | &offset, &modname, namebuf); |
882 | if (p->pre_handler == aggr_pre_handler) { | |
883 | list_for_each_entry_rcu(kp, &p->list, list) | |
884 | report_probe(pi, kp, sym, offset, modname); | |
885 | } else | |
886 | report_probe(pi, p, sym, offset, modname); | |
887 | } | |
888 | preempt_enable(); | |
889 | return 0; | |
890 | } | |
891 | ||
892 | static struct seq_operations kprobes_seq_ops = { | |
893 | .start = kprobe_seq_start, | |
894 | .next = kprobe_seq_next, | |
895 | .stop = kprobe_seq_stop, | |
896 | .show = show_kprobe_addr | |
897 | }; | |
898 | ||
899 | static int __kprobes kprobes_open(struct inode *inode, struct file *filp) | |
900 | { | |
901 | return seq_open(filp, &kprobes_seq_ops); | |
902 | } | |
903 | ||
904 | static struct file_operations debugfs_kprobes_operations = { | |
905 | .open = kprobes_open, | |
906 | .read = seq_read, | |
907 | .llseek = seq_lseek, | |
908 | .release = seq_release, | |
909 | }; | |
910 | ||
bf8f6e5b AM |
911 | static void __kprobes enable_all_kprobes(void) |
912 | { | |
913 | struct hlist_head *head; | |
914 | struct hlist_node *node; | |
915 | struct kprobe *p; | |
916 | unsigned int i; | |
917 | ||
918 | mutex_lock(&kprobe_mutex); | |
919 | ||
920 | /* If kprobes are already enabled, just return */ | |
921 | if (kprobe_enabled) | |
922 | goto already_enabled; | |
923 | ||
bf8f6e5b AM |
924 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
925 | head = &kprobe_table[i]; | |
926 | hlist_for_each_entry_rcu(p, node, head, hlist) | |
927 | arch_arm_kprobe(p); | |
928 | } | |
929 | ||
930 | kprobe_enabled = true; | |
931 | printk(KERN_INFO "Kprobes globally enabled\n"); | |
932 | ||
933 | already_enabled: | |
934 | mutex_unlock(&kprobe_mutex); | |
935 | return; | |
936 | } | |
937 | ||
938 | static void __kprobes disable_all_kprobes(void) | |
939 | { | |
940 | struct hlist_head *head; | |
941 | struct hlist_node *node; | |
942 | struct kprobe *p; | |
943 | unsigned int i; | |
944 | ||
945 | mutex_lock(&kprobe_mutex); | |
946 | ||
947 | /* If kprobes are already disabled, just return */ | |
948 | if (!kprobe_enabled) | |
949 | goto already_disabled; | |
950 | ||
951 | kprobe_enabled = false; | |
952 | printk(KERN_INFO "Kprobes globally disabled\n"); | |
953 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | |
954 | head = &kprobe_table[i]; | |
955 | hlist_for_each_entry_rcu(p, node, head, hlist) { | |
956 | if (!arch_trampoline_kprobe(p)) | |
957 | arch_disarm_kprobe(p); | |
958 | } | |
959 | } | |
960 | ||
961 | mutex_unlock(&kprobe_mutex); | |
962 | /* Allow all currently running kprobes to complete */ | |
963 | synchronize_sched(); | |
74a0b576 | 964 | return; |
bf8f6e5b AM |
965 | |
966 | already_disabled: | |
967 | mutex_unlock(&kprobe_mutex); | |
968 | return; | |
969 | } | |
970 | ||
971 | /* | |
972 | * XXX: The debugfs bool file interface doesn't allow for callbacks | |
973 | * when the bool state is switched. We can reuse that facility when | |
974 | * available | |
975 | */ | |
976 | static ssize_t read_enabled_file_bool(struct file *file, | |
977 | char __user *user_buf, size_t count, loff_t *ppos) | |
978 | { | |
979 | char buf[3]; | |
980 | ||
981 | if (kprobe_enabled) | |
982 | buf[0] = '1'; | |
983 | else | |
984 | buf[0] = '0'; | |
985 | buf[1] = '\n'; | |
986 | buf[2] = 0x00; | |
987 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); | |
988 | } | |
989 | ||
990 | static ssize_t write_enabled_file_bool(struct file *file, | |
991 | const char __user *user_buf, size_t count, loff_t *ppos) | |
992 | { | |
993 | char buf[32]; | |
994 | int buf_size; | |
995 | ||
996 | buf_size = min(count, (sizeof(buf)-1)); | |
997 | if (copy_from_user(buf, user_buf, buf_size)) | |
998 | return -EFAULT; | |
999 | ||
1000 | switch (buf[0]) { | |
1001 | case 'y': | |
1002 | case 'Y': | |
1003 | case '1': | |
1004 | enable_all_kprobes(); | |
1005 | break; | |
1006 | case 'n': | |
1007 | case 'N': | |
1008 | case '0': | |
1009 | disable_all_kprobes(); | |
1010 | break; | |
1011 | } | |
1012 | ||
1013 | return count; | |
1014 | } | |
1015 | ||
1016 | static struct file_operations fops_kp = { | |
1017 | .read = read_enabled_file_bool, | |
1018 | .write = write_enabled_file_bool, | |
1019 | }; | |
1020 | ||
346fd59b SD |
1021 | static int __kprobes debugfs_kprobe_init(void) |
1022 | { | |
1023 | struct dentry *dir, *file; | |
bf8f6e5b | 1024 | unsigned int value = 1; |
346fd59b SD |
1025 | |
1026 | dir = debugfs_create_dir("kprobes", NULL); | |
1027 | if (!dir) | |
1028 | return -ENOMEM; | |
1029 | ||
e3869792 | 1030 | file = debugfs_create_file("list", 0444, dir, NULL, |
346fd59b SD |
1031 | &debugfs_kprobes_operations); |
1032 | if (!file) { | |
1033 | debugfs_remove(dir); | |
1034 | return -ENOMEM; | |
1035 | } | |
1036 | ||
bf8f6e5b AM |
1037 | file = debugfs_create_file("enabled", 0600, dir, |
1038 | &value, &fops_kp); | |
1039 | if (!file) { | |
1040 | debugfs_remove(dir); | |
1041 | return -ENOMEM; | |
1042 | } | |
1043 | ||
346fd59b SD |
1044 | return 0; |
1045 | } | |
1046 | ||
1047 | late_initcall(debugfs_kprobe_init); | |
1048 | #endif /* CONFIG_DEBUG_FS */ | |
1049 | ||
1050 | module_init(init_kprobes); | |
1da177e4 LT |
1051 | |
1052 | EXPORT_SYMBOL_GPL(register_kprobe); | |
1053 | EXPORT_SYMBOL_GPL(unregister_kprobe); | |
1054 | EXPORT_SYMBOL_GPL(register_jprobe); | |
1055 | EXPORT_SYMBOL_GPL(unregister_jprobe); | |
cd5bfea2 | 1056 | #ifdef CONFIG_KPROBES |
1da177e4 | 1057 | EXPORT_SYMBOL_GPL(jprobe_return); |
cd5bfea2 PC |
1058 | #endif |
1059 | ||
1060 | #ifdef CONFIG_KPROBES | |
b94cce92 HN |
1061 | EXPORT_SYMBOL_GPL(register_kretprobe); |
1062 | EXPORT_SYMBOL_GPL(unregister_kretprobe); | |
cd5bfea2 | 1063 | #endif |