#include <linux/hash.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/kallsyms.h>
#include <linux/freezer.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/kdebug.h>
#include <asm-generic/sections.h>
#include <asm/cacheflush.h>
#include <asm/errno.h>
-#include <asm/kdebug.h>
#define KPROBE_HASH_BITS 6
#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
struct kprobe_insn_page *kip;
struct hlist_node *pos;
- retry:
- hlist_for_each(pos, &kprobe_insn_pages) {
- kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
+ retry:
+ hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
if (kip->nused < INSNS_PER_PAGE) {
int i;
for (i = 0; i < INSNS_PER_PAGE; i++) {
}
/* All out of space. Need to allocate a new page. Use slot 0. */
kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
- if (!kip) {
+ if (!kip)
return NULL;
- }
/*
* Use module_alloc so this page is within +/- 2GB of where the
if (check_safety() != 0)
return -EAGAIN;
- hlist_for_each_safe(pos, next, &kprobe_insn_pages) {
+ hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
int i;
- kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
if (kip->ngarbage == 0)
continue;
kip->ngarbage = 0; /* we will collect all garbages */
struct kprobe_insn_page *kip;
struct hlist_node *pos;
- hlist_for_each(pos, &kprobe_insn_pages) {
- kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
+ hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
if (kip->insns <= slot &&
slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
int i = (slot - kip->insns) / MAX_INSN_SIZE;
break;
}
}
- if (dirty && (++kprobe_garbage_slots > INSNS_PER_PAGE)) {
+
+ if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
collect_garbage_slots();
- }
}
#endif
reset_kprobe_instance();
}
}
- return;
}
static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
static int __kprobes in_kprobes_functions(unsigned long addr)
{
- if (addr >= (unsigned long)__kprobes_text_start
- && addr < (unsigned long)__kprobes_text_end)
+ if (addr >= (unsigned long)__kprobes_text_start &&
+ addr < (unsigned long)__kprobes_text_end)
return -EINVAL;
return 0;
}
return -EINVAL;
p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
- if ((!kernel_text_address((unsigned long) p->addr)) ||
- in_kprobes_functions((unsigned long) p->addr))
+ if (!kernel_text_address((unsigned long) p->addr) ||
+ in_kprobes_functions((unsigned long) p->addr))
return -EINVAL;
p->mod_refcounted = 0;
- /* Check are we probing a module */
- if ((probed_mod = module_text_address((unsigned long) p->addr))) {
+
+ /*
+ * Check if are we probing a module.
+ */
+ probed_mod = module_text_address((unsigned long) p->addr);
+ if (probed_mod) {
struct module *calling_mod = module_text_address(called_from);
- /* We must allow modules to probe themself and
- * in this case avoid incrementing the module refcount,
- * so as to allow unloading of self probing modules.
+ /*
+ * We must allow modules to probe themself and in this case
+ * avoid incrementing the module refcount, so as to allow
+ * unloading of self probing modules.
*/
- if (calling_mod && (calling_mod != probed_mod)) {
+ if (calling_mod && calling_mod != probed_mod) {
if (unlikely(!try_module_get(probed_mod)))
return -EINVAL;
p->mod_refcounted = 1;
goto out;
}
- if ((ret = arch_prepare_kprobe(p)) != 0)
+ ret = arch_prepare_kprobe(p);
+ if (ret)
goto out;
INIT_HLIST_NODE(&p->hlist);
int __kprobes register_kprobe(struct kprobe *p)
{
- return __register_kprobe(p,
- (unsigned long)__builtin_return_address(0));
+ return __register_kprobe(p, (unsigned long)__builtin_return_address(0));
}
void __kprobes unregister_kprobe(struct kprobe *p)
return;
}
valid_p:
- if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
- (p->list.next == &old_p->list) &&
- (p->list.prev == &old_p->list))) {
+ if (old_p == p ||
+ (old_p->pre_handler == aggr_pre_handler &&
+ p->list.next == &old_p->list && p->list.prev == &old_p->list)) {
/* Only probe on the hash list */
arch_disarm_kprobe(p);
hlist_del_rcu(&old_p->hlist);
mutex_unlock(&kprobe_mutex);
synchronize_sched();
- if (p->mod_refcounted &&
- (mod = module_text_address((unsigned long)p->addr)))
- module_put(mod);
+ if (p->mod_refcounted) {
+ mod = module_text_address((unsigned long)p->addr);
+ if (mod)
+ module_put(mod);
+ }
if (cleanup_p) {
if (p != old_p) {
struct kprobe *p, *kp;
const char *sym = NULL;
unsigned int i = *(loff_t *) v;
- unsigned long size, offset = 0;
+ unsigned long offset = 0;
char *modname, namebuf[128];
head = &kprobe_table[i];
preempt_disable();
hlist_for_each_entry_rcu(p, node, head, hlist) {
- sym = kallsyms_lookup((unsigned long)p->addr, &size,
+ sym = kallsyms_lookup((unsigned long)p->addr, NULL,
&offset, &modname, namebuf);
if (p->pre_handler == aggr_pre_handler) {
list_for_each_entry_rcu(kp, &p->list, list)
if (!dir)
return -ENOMEM;
- file = debugfs_create_file("list", 0444, dir , 0 ,
+ file = debugfs_create_file("list", 0444, dir, NULL,
&debugfs_kprobes_operations);
if (!file) {
debugfs_remove(dir);