2 * core.c - Kernel Live Patching Core
5 * Copyright (C) 2014 SUSE
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/ftrace.h>
28 #include <linux/list.h>
29 #include <linux/kallsyms.h>
30 #include <linux/livepatch.h>
33 * struct klp_ops - structure for tracking registered ftrace ops structs
35 * A single ftrace_ops is shared between all enabled replacement functions
36 * (klp_func structs) which have the same old_addr. This allows the switch
37 * between function versions to happen instantaneously by updating the klp_ops
38 * struct's func_stack list. The winner is the klp_func at the top of the
39 * func_stack (front of the list).
41 * @node: node for the global klp_ops list
42 * @func_stack: list head for the stack of klp_func's (active func is on top)
43 * @fops: registered ftrace ops struct
46 struct list_head node;
47 struct list_head func_stack;
48 struct ftrace_ops fops;
52 * The klp_mutex protects the global lists and state transitions of any
53 * structure reachable from them. References to any structure must be obtained
54 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
55 * ensure it gets consistent data).
57 static DEFINE_MUTEX(klp_mutex);
59 static LIST_HEAD(klp_patches);
60 static LIST_HEAD(klp_ops);
62 static struct kobject *klp_root_kobj;
64 static struct klp_ops *klp_find_ops(unsigned long old_addr)
67 struct klp_func *func;
69 list_for_each_entry(ops, &klp_ops, node) {
70 func = list_first_entry(&ops->func_stack, struct klp_func,
72 if (func->old_addr == old_addr)
79 static bool klp_is_module(struct klp_object *obj)
84 static bool klp_is_object_loaded(struct klp_object *obj)
86 return !obj->name || obj->mod;
89 /* sets obj->mod if object is not vmlinux and module is found */
90 static void klp_find_object_module(struct klp_object *obj)
94 if (!klp_is_module(obj))
97 mutex_lock(&module_mutex);
99 * We do not want to block removal of patched modules and therefore
100 * we do not take a reference here. The patches are removed by
101 * a going module handler instead.
103 mod = find_module(obj->name);
105 * Do not mess work of the module coming and going notifiers.
106 * Note that the patch might still be needed before the going handler
107 * is called. Module functions can be called even in the GOING state
108 * until mod->exit() finishes. This is especially important for
109 * patches that modify semantic of the functions.
111 if (mod && mod->klp_alive)
114 mutex_unlock(&module_mutex);
117 /* klp_mutex must be held by caller */
118 static bool klp_is_patch_registered(struct klp_patch *patch)
120 struct klp_patch *mypatch;
122 list_for_each_entry(mypatch, &klp_patches, list)
123 if (mypatch == patch)
129 static bool klp_initialized(void)
131 return !!klp_root_kobj;
134 struct klp_find_arg {
139 * If count == 0, the symbol was not found. If count == 1, a unique
140 * match was found and addr is set. If count > 1, there is
141 * unresolvable ambiguity among "count" number of symbols with the same
142 * name in the same object.
147 static int klp_find_callback(void *data, const char *name,
148 struct module *mod, unsigned long addr)
150 struct klp_find_arg *args = data;
152 if ((mod && !args->objname) || (!mod && args->objname))
155 if (strcmp(args->name, name))
158 if (args->objname && strcmp(args->objname, mod->name))
162 * args->addr might be overwritten if another match is found
163 * but klp_find_object_symbol() handles this and only returns the
164 * addr if count == 1.
172 static int klp_find_object_symbol(const char *objname, const char *name,
175 struct klp_find_arg args = {
182 mutex_lock(&module_mutex);
183 kallsyms_on_each_symbol(klp_find_callback, &args);
184 mutex_unlock(&module_mutex);
187 pr_err("symbol '%s' not found in symbol table\n", name);
188 else if (args.count > 1)
189 pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
190 args.count, name, objname);
200 struct klp_verify_args {
202 const unsigned long addr;
205 static int klp_verify_callback(void *data, const char *name,
206 struct module *mod, unsigned long addr)
208 struct klp_verify_args *args = data;
211 !strcmp(args->name, name) &&
218 static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
220 struct klp_verify_args args = {
226 mutex_lock(&module_mutex);
227 ret = kallsyms_on_each_symbol(klp_verify_callback, &args);
228 mutex_unlock(&module_mutex);
231 pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
239 static int klp_find_verify_func_addr(struct klp_object *obj,
240 struct klp_func *func)
244 #if defined(CONFIG_RANDOMIZE_BASE)
245 /* If KASLR has been enabled, adjust old_addr accordingly */
246 if (kaslr_enabled() && func->old_addr)
247 func->old_addr += kaslr_offset();
250 if (!func->old_addr || klp_is_module(obj))
251 ret = klp_find_object_symbol(obj->name, func->old_name,
254 ret = klp_verify_vmlinux_symbol(func->old_name,
261 * external symbols are located outside the parent object (where the parent
262 * object is either vmlinux or the kmod being patched).
264 static int klp_find_external_symbol(struct module *pmod, const char *name,
267 const struct kernel_symbol *sym;
269 /* first, check if it's an exported symbol */
271 sym = find_symbol(name, NULL, NULL, true, true);
279 /* otherwise check if it's in another .o within the patch module */
280 return klp_find_object_symbol(pmod->name, name, addr);
283 static int klp_write_object_relocations(struct module *pmod,
284 struct klp_object *obj)
287 struct klp_reloc *reloc;
289 if (WARN_ON(!klp_is_object_loaded(obj)))
292 if (WARN_ON(!obj->relocs))
295 for (reloc = obj->relocs; reloc->name; reloc++) {
296 if (!klp_is_module(obj)) {
297 ret = klp_verify_vmlinux_symbol(reloc->name,
302 /* module, reloc->val needs to be discovered */
304 ret = klp_find_external_symbol(pmod,
308 ret = klp_find_object_symbol(obj->mod->name,
314 ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
315 reloc->val + reloc->addend);
317 pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
318 reloc->name, reloc->val, ret);
326 static void notrace klp_ftrace_handler(unsigned long ip,
327 unsigned long parent_ip,
328 struct ftrace_ops *fops,
329 struct pt_regs *regs)
332 struct klp_func *func;
334 ops = container_of(fops, struct klp_ops, fops);
337 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
339 if (WARN_ON_ONCE(!func))
342 klp_arch_set_pc(regs, (unsigned long)func->new_func);
347 static void klp_disable_func(struct klp_func *func)
351 WARN_ON(func->state != KLP_ENABLED);
352 WARN_ON(!func->old_addr);
354 ops = klp_find_ops(func->old_addr);
358 if (list_is_singular(&ops->func_stack)) {
359 WARN_ON(unregister_ftrace_function(&ops->fops));
360 WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
362 list_del_rcu(&func->stack_node);
363 list_del(&ops->node);
366 list_del_rcu(&func->stack_node);
369 func->state = KLP_DISABLED;
372 static int klp_enable_func(struct klp_func *func)
377 if (WARN_ON(!func->old_addr))
380 if (WARN_ON(func->state != KLP_DISABLED))
383 ops = klp_find_ops(func->old_addr);
385 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
389 ops->fops.func = klp_ftrace_handler;
390 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
391 FTRACE_OPS_FL_DYNAMIC |
392 FTRACE_OPS_FL_IPMODIFY;
394 list_add(&ops->node, &klp_ops);
396 INIT_LIST_HEAD(&ops->func_stack);
397 list_add_rcu(&func->stack_node, &ops->func_stack);
399 ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
401 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
402 func->old_name, ret);
406 ret = register_ftrace_function(&ops->fops);
408 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
409 func->old_name, ret);
410 ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
416 list_add_rcu(&func->stack_node, &ops->func_stack);
419 func->state = KLP_ENABLED;
424 list_del_rcu(&func->stack_node);
425 list_del(&ops->node);
430 static void klp_disable_object(struct klp_object *obj)
432 struct klp_func *func;
434 klp_for_each_func(obj, func)
435 if (func->state == KLP_ENABLED)
436 klp_disable_func(func);
438 obj->state = KLP_DISABLED;
441 static int klp_enable_object(struct klp_object *obj)
443 struct klp_func *func;
446 if (WARN_ON(obj->state != KLP_DISABLED))
449 if (WARN_ON(!klp_is_object_loaded(obj)))
452 klp_for_each_func(obj, func) {
453 ret = klp_enable_func(func);
455 klp_disable_object(obj);
459 obj->state = KLP_ENABLED;
464 static int __klp_disable_patch(struct klp_patch *patch)
466 struct klp_object *obj;
468 /* enforce stacking: only the last enabled patch can be disabled */
469 if (!list_is_last(&patch->list, &klp_patches) &&
470 list_next_entry(patch, list)->state == KLP_ENABLED)
473 pr_notice("disabling patch '%s'\n", patch->mod->name);
475 klp_for_each_object(patch, obj) {
476 if (obj->state == KLP_ENABLED)
477 klp_disable_object(obj);
480 patch->state = KLP_DISABLED;
486 * klp_disable_patch() - disables a registered patch
487 * @patch: The registered, enabled patch to be disabled
489 * Unregisters the patched functions from ftrace.
491 * Return: 0 on success, otherwise error
493 int klp_disable_patch(struct klp_patch *patch)
497 mutex_lock(&klp_mutex);
499 if (!klp_is_patch_registered(patch)) {
504 if (patch->state == KLP_DISABLED) {
509 ret = __klp_disable_patch(patch);
512 mutex_unlock(&klp_mutex);
515 EXPORT_SYMBOL_GPL(klp_disable_patch);
517 static int __klp_enable_patch(struct klp_patch *patch)
519 struct klp_object *obj;
522 if (WARN_ON(patch->state != KLP_DISABLED))
525 /* enforce stacking: only the first disabled patch can be enabled */
526 if (patch->list.prev != &klp_patches &&
527 list_prev_entry(patch, list)->state == KLP_DISABLED)
530 pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
531 add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
533 pr_notice("enabling patch '%s'\n", patch->mod->name);
535 klp_for_each_object(patch, obj) {
536 if (!klp_is_object_loaded(obj))
539 ret = klp_enable_object(obj);
544 patch->state = KLP_ENABLED;
549 WARN_ON(__klp_disable_patch(patch));
554 * klp_enable_patch() - enables a registered patch
555 * @patch: The registered, disabled patch to be enabled
557 * Performs the needed symbol lookups and code relocations,
558 * then registers the patched functions with ftrace.
560 * Return: 0 on success, otherwise error
562 int klp_enable_patch(struct klp_patch *patch)
566 mutex_lock(&klp_mutex);
568 if (!klp_is_patch_registered(patch)) {
573 ret = __klp_enable_patch(patch);
576 mutex_unlock(&klp_mutex);
579 EXPORT_SYMBOL_GPL(klp_enable_patch);
584 * /sys/kernel/livepatch
585 * /sys/kernel/livepatch/<patch>
586 * /sys/kernel/livepatch/<patch>/enabled
587 * /sys/kernel/livepatch/<patch>/<object>
588 * /sys/kernel/livepatch/<patch>/<object>/<func>
591 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
592 const char *buf, size_t count)
594 struct klp_patch *patch;
598 ret = kstrtoul(buf, 10, &val);
602 if (val != KLP_DISABLED && val != KLP_ENABLED)
605 patch = container_of(kobj, struct klp_patch, kobj);
607 mutex_lock(&klp_mutex);
609 if (val == patch->state) {
610 /* already in requested state */
615 if (val == KLP_ENABLED) {
616 ret = __klp_enable_patch(patch);
620 ret = __klp_disable_patch(patch);
625 mutex_unlock(&klp_mutex);
630 mutex_unlock(&klp_mutex);
634 static ssize_t enabled_show(struct kobject *kobj,
635 struct kobj_attribute *attr, char *buf)
637 struct klp_patch *patch;
639 patch = container_of(kobj, struct klp_patch, kobj);
640 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
643 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
644 static struct attribute *klp_patch_attrs[] = {
645 &enabled_kobj_attr.attr,
649 static void klp_kobj_release_patch(struct kobject *kobj)
652 * Once we have a consistency model we'll need to module_put() the
653 * patch module here. See klp_register_patch() for more details.
657 static struct kobj_type klp_ktype_patch = {
658 .release = klp_kobj_release_patch,
659 .sysfs_ops = &kobj_sysfs_ops,
660 .default_attrs = klp_patch_attrs,
663 static void klp_kobj_release_object(struct kobject *kobj)
667 static struct kobj_type klp_ktype_object = {
668 .release = klp_kobj_release_object,
669 .sysfs_ops = &kobj_sysfs_ops,
672 static void klp_kobj_release_func(struct kobject *kobj)
676 static struct kobj_type klp_ktype_func = {
677 .release = klp_kobj_release_func,
678 .sysfs_ops = &kobj_sysfs_ops,
682 * Free all functions' kobjects in the array up to some limit. When limit is
683 * NULL, all kobjects are freed.
685 static void klp_free_funcs_limited(struct klp_object *obj,
686 struct klp_func *limit)
688 struct klp_func *func;
690 for (func = obj->funcs; func->old_name && func != limit; func++)
691 kobject_put(&func->kobj);
694 /* Clean up when a patched object is unloaded */
695 static void klp_free_object_loaded(struct klp_object *obj)
697 struct klp_func *func;
701 klp_for_each_func(obj, func)
706 * Free all objects' kobjects in the array up to some limit. When limit is
707 * NULL, all kobjects are freed.
709 static void klp_free_objects_limited(struct klp_patch *patch,
710 struct klp_object *limit)
712 struct klp_object *obj;
714 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
715 klp_free_funcs_limited(obj, NULL);
716 kobject_put(&obj->kobj);
720 static void klp_free_patch(struct klp_patch *patch)
722 klp_free_objects_limited(patch, NULL);
723 if (!list_empty(&patch->list))
724 list_del(&patch->list);
725 kobject_put(&patch->kobj);
728 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
730 INIT_LIST_HEAD(&func->stack_node);
731 func->state = KLP_DISABLED;
733 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
734 &obj->kobj, "%s", func->old_name);
737 /* parts of the initialization that is done only when the object is loaded */
738 static int klp_init_object_loaded(struct klp_patch *patch,
739 struct klp_object *obj)
741 struct klp_func *func;
745 ret = klp_write_object_relocations(patch->mod, obj);
750 klp_for_each_func(obj, func) {
751 ret = klp_find_verify_func_addr(obj, func);
759 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
761 struct klp_func *func;
768 obj->state = KLP_DISABLED;
771 klp_find_object_module(obj);
773 name = klp_is_module(obj) ? obj->name : "vmlinux";
774 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
775 &patch->kobj, "%s", name);
779 klp_for_each_func(obj, func) {
780 ret = klp_init_func(obj, func);
785 if (klp_is_object_loaded(obj)) {
786 ret = klp_init_object_loaded(patch, obj);
794 klp_free_funcs_limited(obj, func);
795 kobject_put(&obj->kobj);
799 static int klp_init_patch(struct klp_patch *patch)
801 struct klp_object *obj;
807 mutex_lock(&klp_mutex);
809 patch->state = KLP_DISABLED;
811 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
812 klp_root_kobj, "%s", patch->mod->name);
816 klp_for_each_object(patch, obj) {
817 ret = klp_init_object(patch, obj);
822 list_add_tail(&patch->list, &klp_patches);
824 mutex_unlock(&klp_mutex);
829 klp_free_objects_limited(patch, obj);
830 kobject_put(&patch->kobj);
832 mutex_unlock(&klp_mutex);
837 * klp_unregister_patch() - unregisters a patch
838 * @patch: Disabled patch to be unregistered
840 * Frees the data structures and removes the sysfs interface.
842 * Return: 0 on success, otherwise error
844 int klp_unregister_patch(struct klp_patch *patch)
848 mutex_lock(&klp_mutex);
850 if (!klp_is_patch_registered(patch)) {
855 if (patch->state == KLP_ENABLED) {
860 klp_free_patch(patch);
863 mutex_unlock(&klp_mutex);
866 EXPORT_SYMBOL_GPL(klp_unregister_patch);
869 * klp_register_patch() - registers a patch
870 * @patch: Patch to be registered
872 * Initializes the data structure associated with the patch and
873 * creates the sysfs interface.
875 * Return: 0 on success, otherwise error
877 int klp_register_patch(struct klp_patch *patch)
881 if (!klp_initialized())
884 if (!patch || !patch->mod)
888 * A reference is taken on the patch module to prevent it from being
889 * unloaded. Right now, we don't allow patch modules to unload since
890 * there is currently no method to determine if a thread is still
891 * running in the patched code contained in the patch module once
892 * the ftrace registration is successful.
894 if (!try_module_get(patch->mod))
897 ret = klp_init_patch(patch);
899 module_put(patch->mod);
903 EXPORT_SYMBOL_GPL(klp_register_patch);
905 static int klp_module_notify_coming(struct klp_patch *patch,
906 struct klp_object *obj)
908 struct module *pmod = patch->mod;
909 struct module *mod = obj->mod;
912 ret = klp_init_object_loaded(patch, obj);
914 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
915 pmod->name, mod->name, ret);
919 if (patch->state == KLP_DISABLED)
922 pr_notice("applying patch '%s' to loading module '%s'\n",
923 pmod->name, mod->name);
925 ret = klp_enable_object(obj);
927 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
928 pmod->name, mod->name, ret);
932 static void klp_module_notify_going(struct klp_patch *patch,
933 struct klp_object *obj)
935 struct module *pmod = patch->mod;
936 struct module *mod = obj->mod;
938 if (patch->state == KLP_DISABLED)
941 pr_notice("reverting patch '%s' on unloading module '%s'\n",
942 pmod->name, mod->name);
944 klp_disable_object(obj);
947 klp_free_object_loaded(obj);
950 static int klp_module_notify(struct notifier_block *nb, unsigned long action,
954 struct module *mod = data;
955 struct klp_patch *patch;
956 struct klp_object *obj;
958 if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
961 mutex_lock(&klp_mutex);
964 * Each module has to know that the notifier has been called.
965 * We never know what module will get patched by a new patch.
967 if (action == MODULE_STATE_COMING)
968 mod->klp_alive = true;
969 else /* MODULE_STATE_GOING */
970 mod->klp_alive = false;
972 list_for_each_entry(patch, &klp_patches, list) {
973 klp_for_each_object(patch, obj) {
974 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
977 if (action == MODULE_STATE_COMING) {
979 ret = klp_module_notify_coming(patch, obj);
982 pr_warn("patch '%s' is in an inconsistent state!\n",
985 } else /* MODULE_STATE_GOING */
986 klp_module_notify_going(patch, obj);
992 mutex_unlock(&klp_mutex);
997 static struct notifier_block klp_module_nb = {
998 .notifier_call = klp_module_notify,
999 .priority = INT_MIN+1, /* called late but before ftrace notifier */
1002 static int __init klp_init(void)
1006 ret = klp_check_compiler_support();
1008 pr_info("Your compiler is too old; turning off.\n");
1012 ret = register_module_notifier(&klp_module_nb);
1016 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1017 if (!klp_root_kobj) {
1025 unregister_module_notifier(&klp_module_nb);
1029 module_init(klp_init);