1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * transition.c - Kernel Live Patching transition functions
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/cpu.h>
11 #include <linux/stacktrace.h>
12 #include <linux/tracehook.h>
15 #include "transition.h"
16 #include "../sched/sched.h"
18 #define MAX_STACK_ENTRIES 100
19 #define STACK_ERR_BUF_SIZE 128
21 #define SIGNALS_TIMEOUT 15
23 struct klp_patch *klp_transition_patch;
25 static int klp_target_state = KLP_UNDEFINED;
27 static unsigned int klp_signals_cnt;
30 * This work can be performed periodically to finish patching or unpatching any
31 * "straggler" tasks which failed to transition in the first attempt.
33 static void klp_transition_work_fn(struct work_struct *work)
35 mutex_lock(&klp_mutex);
37 if (klp_transition_patch)
38 klp_try_complete_transition();
40 mutex_unlock(&klp_mutex);
42 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
45 * This function is just a stub to implement a hard force
46 * of synchronize_rcu(). This requires synchronizing
47 * tasks even in userspace and idle.
49 static void klp_sync(struct work_struct *work)
54 * We allow to patch also functions where RCU is not watching,
55 * e.g. before user_exit(). We can not rely on the RCU infrastructure
56 * to do the synchronization. Instead hard force the sched synchronization.
58 * This approach allows to use RCU functions for manipulating func_stack
61 static void klp_synchronize_transition(void)
63 schedule_on_each_cpu(klp_sync);
67 * The transition to the target patch state is complete. Clean up the data
70 static void klp_complete_transition(void)
72 struct klp_object *obj;
73 struct klp_func *func;
74 struct task_struct *g, *task;
77 pr_debug("'%s': completing %s transition\n",
78 klp_transition_patch->mod->name,
79 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
81 if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
82 klp_unpatch_replaced_patches(klp_transition_patch);
83 klp_discard_nops(klp_transition_patch);
86 if (klp_target_state == KLP_UNPATCHED) {
88 * All tasks have transitioned to KLP_UNPATCHED so we can now
89 * remove the new functions from the func_stack.
91 klp_unpatch_objects(klp_transition_patch);
94 * Make sure klp_ftrace_handler() can no longer see functions
95 * from this patch on the ops->func_stack. Otherwise, after
96 * func->transition gets cleared, the handler may choose a
99 klp_synchronize_transition();
102 klp_for_each_object(klp_transition_patch, obj)
103 klp_for_each_func(obj, func)
104 func->transition = false;
106 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
107 if (klp_target_state == KLP_PATCHED)
108 klp_synchronize_transition();
110 read_lock(&tasklist_lock);
111 for_each_process_thread(g, task) {
112 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
113 task->patch_state = KLP_UNDEFINED;
115 read_unlock(&tasklist_lock);
117 for_each_possible_cpu(cpu) {
118 task = idle_task(cpu);
119 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
120 task->patch_state = KLP_UNDEFINED;
123 klp_for_each_object(klp_transition_patch, obj) {
124 if (!klp_is_object_loaded(obj))
126 if (klp_target_state == KLP_PATCHED)
127 klp_post_patch_callback(obj);
128 else if (klp_target_state == KLP_UNPATCHED)
129 klp_post_unpatch_callback(obj);
132 pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
133 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
135 klp_target_state = KLP_UNDEFINED;
136 klp_transition_patch = NULL;
140 * This is called in the error path, to cancel a transition before it has
141 * started, i.e. klp_init_transition() has been called but
142 * klp_start_transition() hasn't. If the transition *has* been started,
143 * klp_reverse_transition() should be used instead.
145 void klp_cancel_transition(void)
147 if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
150 pr_debug("'%s': canceling patching transition, going to unpatch\n",
151 klp_transition_patch->mod->name);
153 klp_target_state = KLP_UNPATCHED;
154 klp_complete_transition();
158 * Switch the patched state of the task to the set of functions in the target
161 * NOTE: If task is not 'current', the caller must ensure the task is inactive.
162 * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
164 void klp_update_patch_state(struct task_struct *task)
167 * A variant of synchronize_rcu() is used to allow patching functions
168 * where RCU is not watching, see klp_synchronize_transition().
170 preempt_disable_notrace();
173 * This test_and_clear_tsk_thread_flag() call also serves as a read
174 * barrier (smp_rmb) for two cases:
176 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
177 * klp_target_state read. The corresponding write barrier is in
178 * klp_init_transition().
180 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
181 * of func->transition, if klp_ftrace_handler() is called later on
182 * the same CPU. See __klp_disable_patch().
184 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
185 task->patch_state = READ_ONCE(klp_target_state);
187 preempt_enable_notrace();
191 * Determine whether the given stack trace includes any references to a
192 * to-be-patched or to-be-unpatched function.
194 static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
195 unsigned int nr_entries)
197 unsigned long func_addr, func_size, address;
201 for (i = 0; i < nr_entries; i++) {
202 address = entries[i];
204 if (klp_target_state == KLP_UNPATCHED) {
206 * Check for the to-be-unpatched function
209 func_addr = (unsigned long)func->new_func;
210 func_size = func->new_size;
213 * Check for the to-be-patched function
214 * (the previous func).
216 ops = klp_find_ops(func->old_func);
218 if (list_is_singular(&ops->func_stack)) {
219 /* original function */
220 func_addr = (unsigned long)func->old_func;
221 func_size = func->old_size;
223 /* previously patched function */
224 struct klp_func *prev;
226 prev = list_next_entry(func, stack_node);
227 func_addr = (unsigned long)prev->new_func;
228 func_size = prev->new_size;
232 if (address >= func_addr && address < func_addr + func_size)
240 * Determine whether it's safe to transition the task to the target patch state
241 * by looking for any to-be-patched or to-be-unpatched functions on its stack.
243 static int klp_check_stack(struct task_struct *task, char *err_buf)
245 static unsigned long entries[MAX_STACK_ENTRIES];
246 struct klp_object *obj;
247 struct klp_func *func;
250 ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
252 snprintf(err_buf, STACK_ERR_BUF_SIZE,
253 "%s: %s:%d has an unreliable stack\n",
254 __func__, task->comm, task->pid);
259 klp_for_each_object(klp_transition_patch, obj) {
262 klp_for_each_func(obj, func) {
263 ret = klp_check_stack_func(func, entries, nr_entries);
265 snprintf(err_buf, STACK_ERR_BUF_SIZE,
266 "%s: %s:%d is sleeping on function %s\n",
267 __func__, task->comm, task->pid,
278 * Try to safely switch a task to the target patch state. If it's currently
279 * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
280 * if the stack is unreliable, return false.
282 static bool klp_try_switch_task(struct task_struct *task)
284 static char err_buf[STACK_ERR_BUF_SIZE];
286 struct rq_flags flags;
288 bool success = false;
292 /* check if this task has already switched over */
293 if (task->patch_state == klp_target_state)
297 * For arches which don't have reliable stack traces, we have to rely
298 * on other methods (e.g., switching tasks at kernel exit).
300 if (!klp_have_reliable_stack())
304 * Now try to check the stack for any to-be-patched or to-be-unpatched
305 * functions. If all goes well, switch the task to the target patch
308 rq = task_rq_lock(task, &flags);
310 if (task_running(rq, task) && task != current) {
311 snprintf(err_buf, STACK_ERR_BUF_SIZE,
312 "%s: %s:%d is running\n", __func__, task->comm,
317 ret = klp_check_stack(task, err_buf);
323 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
324 task->patch_state = klp_target_state;
327 task_rq_unlock(rq, task, &flags);
330 * Due to console deadlock issues, pr_debug() can't be used while
331 * holding the task rq lock. Instead we have to use a temporary buffer
332 * and print the debug message after releasing the lock.
334 if (err_buf[0] != '\0')
335 pr_debug("%s", err_buf);
341 * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
342 * Kthreads with TIF_PATCH_PENDING set are woken up.
344 static void klp_send_signals(void)
346 struct task_struct *g, *task;
348 if (klp_signals_cnt == SIGNALS_TIMEOUT)
349 pr_notice("signaling remaining tasks\n");
351 read_lock(&tasklist_lock);
352 for_each_process_thread(g, task) {
353 if (!klp_patch_pending(task))
357 * There is a small race here. We could see TIF_PATCH_PENDING
358 * set and decide to wake up a kthread or send a fake signal.
359 * Meanwhile the task could migrate itself and the action
360 * would be meaningless. It is not serious though.
362 if (task->flags & PF_KTHREAD) {
364 * Wake up a kthread which sleeps interruptedly and
365 * still has not been migrated.
367 wake_up_state(task, TASK_INTERRUPTIBLE);
370 * Send fake signal to all non-kthread tasks which are
371 * still not migrated.
373 set_notify_signal(task);
376 read_unlock(&tasklist_lock);
380 * Try to switch all remaining tasks to the target patch state by walking the
381 * stacks of sleeping tasks and looking for any to-be-patched or
382 * to-be-unpatched functions. If such functions are found, the task can't be
385 * If any tasks are still stuck in the initial patch state, schedule a retry.
387 void klp_try_complete_transition(void)
390 struct task_struct *g, *task;
391 struct klp_patch *patch;
392 bool complete = true;
394 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
397 * Try to switch the tasks to the target patch state by walking their
398 * stacks and looking for any to-be-patched or to-be-unpatched
399 * functions. If such functions are found on a stack, or if the stack
400 * is deemed unreliable, the task can't be switched yet.
402 * Usually this will transition most (or all) of the tasks on a system
403 * unless the patch includes changes to a very common function.
405 read_lock(&tasklist_lock);
406 for_each_process_thread(g, task)
407 if (!klp_try_switch_task(task))
409 read_unlock(&tasklist_lock);
412 * Ditto for the idle "swapper" tasks.
415 for_each_possible_cpu(cpu) {
416 task = idle_task(cpu);
417 if (cpu_online(cpu)) {
418 if (!klp_try_switch_task(task))
420 } else if (task->patch_state != klp_target_state) {
421 /* offline idle tasks can be switched immediately */
422 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
423 task->patch_state = klp_target_state;
429 if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
434 * Some tasks weren't able to be switched over. Try again
435 * later and/or wait for other methods like kernel exit
438 schedule_delayed_work(&klp_transition_work,
439 round_jiffies_relative(HZ));
443 /* we're done, now cleanup the data structures */
444 patch = klp_transition_patch;
445 klp_complete_transition();
448 * It would make more sense to free the unused patches in
449 * klp_complete_transition() but it is called also
450 * from klp_cancel_transition().
453 klp_free_patch_async(patch);
454 else if (patch->replace)
455 klp_free_replaced_patches_async(patch);
459 * Start the transition to the specified target patch state so tasks can begin
462 void klp_start_transition(void)
464 struct task_struct *g, *task;
467 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
469 pr_notice("'%s': starting %s transition\n",
470 klp_transition_patch->mod->name,
471 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
474 * Mark all normal tasks as needing a patch state update. They'll
475 * switch either in klp_try_complete_transition() or as they exit the
478 read_lock(&tasklist_lock);
479 for_each_process_thread(g, task)
480 if (task->patch_state != klp_target_state)
481 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
482 read_unlock(&tasklist_lock);
485 * Mark all idle tasks as needing a patch state update. They'll switch
486 * either in klp_try_complete_transition() or at the idle loop switch
489 for_each_possible_cpu(cpu) {
490 task = idle_task(cpu);
491 if (task->patch_state != klp_target_state)
492 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
499 * Initialize the global target patch state and all tasks to the initial patch
500 * state, and initialize all function transition states to true in preparation
501 * for patching or unpatching.
503 void klp_init_transition(struct klp_patch *patch, int state)
505 struct task_struct *g, *task;
507 struct klp_object *obj;
508 struct klp_func *func;
509 int initial_state = !state;
511 WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
513 klp_transition_patch = patch;
516 * Set the global target patch state which tasks will switch to. This
517 * has no effect until the TIF_PATCH_PENDING flags get set later.
519 klp_target_state = state;
521 pr_debug("'%s': initializing %s transition\n", patch->mod->name,
522 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
525 * Initialize all tasks to the initial patch state to prepare them for
526 * switching to the target state.
528 read_lock(&tasklist_lock);
529 for_each_process_thread(g, task) {
530 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
531 task->patch_state = initial_state;
533 read_unlock(&tasklist_lock);
536 * Ditto for the idle "swapper" tasks.
538 for_each_possible_cpu(cpu) {
539 task = idle_task(cpu);
540 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
541 task->patch_state = initial_state;
545 * Enforce the order of the task->patch_state initializations and the
546 * func->transition updates to ensure that klp_ftrace_handler() doesn't
547 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
549 * Also enforce the order of the klp_target_state write and future
550 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
551 * set a task->patch_state to KLP_UNDEFINED.
556 * Set the func transition states so klp_ftrace_handler() will know to
557 * switch to the transition logic.
559 * When patching, the funcs aren't yet in the func_stack and will be
560 * made visible to the ftrace handler shortly by the calls to
561 * klp_patch_object().
563 * When unpatching, the funcs are already in the func_stack and so are
564 * already visible to the ftrace handler.
566 klp_for_each_object(patch, obj)
567 klp_for_each_func(obj, func)
568 func->transition = true;
572 * This function can be called in the middle of an existing transition to
573 * reverse the direction of the target patch state. This can be done to
574 * effectively cancel an existing enable or disable operation if there are any
575 * tasks which are stuck in the initial patch state.
577 void klp_reverse_transition(void)
580 struct task_struct *g, *task;
582 pr_debug("'%s': reversing transition from %s\n",
583 klp_transition_patch->mod->name,
584 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
585 "unpatching to patching");
587 klp_transition_patch->enabled = !klp_transition_patch->enabled;
589 klp_target_state = !klp_target_state;
592 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
593 * klp_update_patch_state() running in parallel with
594 * klp_start_transition().
596 read_lock(&tasklist_lock);
597 for_each_process_thread(g, task)
598 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
599 read_unlock(&tasklist_lock);
601 for_each_possible_cpu(cpu)
602 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
604 /* Let any remaining calls to klp_update_patch_state() complete */
605 klp_synchronize_transition();
607 klp_start_transition();
610 /* Called from copy_process() during fork */
611 void klp_copy_process(struct task_struct *child)
613 child->patch_state = current->patch_state;
615 /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
619 * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
620 * existing transition to finish.
622 * NOTE: klp_update_patch_state(task) requires the task to be inactive or
623 * 'current'. This is not the case here and the consistency model could be
624 * broken. Administrator, who is the only one to execute the
625 * klp_force_transitions(), has to be aware of this.
627 void klp_force_transition(void)
629 struct klp_patch *patch;
630 struct task_struct *g, *task;
633 pr_warn("forcing remaining tasks to the patched state\n");
635 read_lock(&tasklist_lock);
636 for_each_process_thread(g, task)
637 klp_update_patch_state(task);
638 read_unlock(&tasklist_lock);
640 for_each_possible_cpu(cpu)
641 klp_update_patch_state(idle_task(cpu));
643 klp_for_each_patch(patch)
644 patch->forced = true;