]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
e73f8959 ON |
2 | #include <linux/spinlock.h> |
3 | #include <linux/task_work.h> | |
4 | #include <linux/tracehook.h> | |
5 | ||
9da33de6 ON |
6 | static struct callback_head work_exited; /* all we need is ->next == NULL */ |
7 | ||
892f6668 ON |
8 | /** |
9 | * task_work_add - ask the @task to execute @work->func() | |
10 | * @task: the task which should run the callback | |
11 | * @work: the callback to run | |
91989c70 | 12 | * @notify: how to notify the targeted task |
892f6668 | 13 | * |
91989c70 JA |
14 | * Queue @work for task_work_run() below and notify the @task if @notify |
15 | * is @TWA_RESUME or @TWA_SIGNAL. @TWA_SIGNAL works like signals, in that the | |
16 | * it will interrupt the targeted task and run the task_work. @TWA_RESUME | |
17 | * work is run only when the task exits the kernel and returns to user mode, | |
18 | * or before entering guest mode. Fails if the @task is exiting/exited and thus | |
19 | * it can't process this @work. Otherwise @work->func() will be called when the | |
20 | * @task goes through one of the aforementioned transitions, or exits. | |
892f6668 | 21 | * |
91989c70 JA |
22 | * If the targeted task is exiting, then an error is returned and the work item |
23 | * is not queued. It's up to the caller to arrange for an alternative mechanism | |
24 | * in that case. | |
892f6668 | 25 | * |
91989c70 JA |
26 | * Note: there is no ordering guarantee on works queued here. The task_work |
27 | * list is LIFO. | |
c8219906 | 28 | * |
892f6668 ON |
29 | * RETURNS: |
30 | * 0 if succeeds or -ESRCH. | |
31 | */ | |
91989c70 JA |
32 | int task_work_add(struct task_struct *task, struct callback_head *work, |
33 | enum task_work_notify_mode notify) | |
e73f8959 | 34 | { |
ac3d0da8 | 35 | struct callback_head *head; |
9da33de6 | 36 | |
ac3d0da8 | 37 | do { |
61e96496 | 38 | head = READ_ONCE(task->task_works); |
9da33de6 ON |
39 | if (unlikely(head == &work_exited)) |
40 | return -ESRCH; | |
ac3d0da8 ON |
41 | work->next = head; |
42 | } while (cmpxchg(&task->task_works, head, work) != head); | |
e73f8959 | 43 | |
e91b4816 | 44 | switch (notify) { |
91989c70 JA |
45 | case TWA_NONE: |
46 | break; | |
e91b4816 | 47 | case TWA_RESUME: |
e73f8959 | 48 | set_notify_resume(task); |
e91b4816 ON |
49 | break; |
50 | case TWA_SIGNAL: | |
03941ccf | 51 | set_notify_signal(task); |
e91b4816 | 52 | break; |
91989c70 JA |
53 | default: |
54 | WARN_ON_ONCE(1); | |
55 | break; | |
e91b4816 ON |
56 | } |
57 | ||
ed3e694d | 58 | return 0; |
e73f8959 ON |
59 | } |
60 | ||
892f6668 ON |
61 | /** |
62 | * task_work_cancel - cancel a pending work added by task_work_add() | |
63 | * @task: the task which should execute the work | |
64 | * @func: identifies the work to remove | |
65 | * | |
66 | * Find the last queued pending work with ->func == @func and remove | |
67 | * it from queue. | |
68 | * | |
69 | * RETURNS: | |
70 | * The found work or NULL if not found. | |
71 | */ | |
67d12145 | 72 | struct callback_head * |
e73f8959 ON |
73 | task_work_cancel(struct task_struct *task, task_work_func_t func) |
74 | { | |
ac3d0da8 | 75 | struct callback_head **pprev = &task->task_works; |
205e550a | 76 | struct callback_head *work; |
e73f8959 | 77 | unsigned long flags; |
61e96496 ON |
78 | |
79 | if (likely(!task->task_works)) | |
80 | return NULL; | |
ac3d0da8 ON |
81 | /* |
82 | * If cmpxchg() fails we continue without updating pprev. | |
83 | * Either we raced with task_work_add() which added the | |
84 | * new entry before this work, we will find it again. Or | |
9da33de6 | 85 | * we raced with task_work_run(), *pprev == NULL/exited. |
ac3d0da8 | 86 | */ |
e73f8959 | 87 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
506458ef | 88 | while ((work = READ_ONCE(*pprev))) { |
ac3d0da8 ON |
89 | if (work->func != func) |
90 | pprev = &work->next; | |
91 | else if (cmpxchg(pprev, work, work->next) == work) | |
92 | break; | |
e73f8959 | 93 | } |
e73f8959 | 94 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
ac3d0da8 ON |
95 | |
96 | return work; | |
e73f8959 ON |
97 | } |
98 | ||
892f6668 ON |
99 | /** |
100 | * task_work_run - execute the works added by task_work_add() | |
101 | * | |
102 | * Flush the pending works. Should be used by the core kernel code. | |
103 | * Called before the task returns to the user-mode or stops, or when | |
104 | * it exits. In the latter case task_work_add() can no longer add the | |
105 | * new work after task_work_run() returns. | |
106 | */ | |
e73f8959 ON |
107 | void task_work_run(void) |
108 | { | |
109 | struct task_struct *task = current; | |
ac3d0da8 | 110 | struct callback_head *work, *head, *next; |
e73f8959 | 111 | |
ac3d0da8 | 112 | for (;;) { |
9da33de6 ON |
113 | /* |
114 | * work->func() can do task_work_add(), do not set | |
115 | * work_exited unless the list is empty. | |
116 | */ | |
117 | do { | |
6fb61492 | 118 | head = NULL; |
61e96496 | 119 | work = READ_ONCE(task->task_works); |
6fb61492 ON |
120 | if (!work) { |
121 | if (task->flags & PF_EXITING) | |
122 | head = &work_exited; | |
123 | else | |
124 | break; | |
125 | } | |
9da33de6 ON |
126 | } while (cmpxchg(&task->task_works, work, head) != work); |
127 | ||
ac3d0da8 ON |
128 | if (!work) |
129 | break; | |
6fb61492 ON |
130 | /* |
131 | * Synchronize with task_work_cancel(). It can not remove | |
132 | * the first entry == work, cmpxchg(task_works) must fail. | |
133 | * But it can remove another entry from the ->next list. | |
134 | */ | |
135 | raw_spin_lock_irq(&task->pi_lock); | |
136 | raw_spin_unlock_irq(&task->pi_lock); | |
e73f8959 | 137 | |
ac3d0da8 ON |
138 | do { |
139 | next = work->next; | |
140 | work->func(work); | |
141 | work = next; | |
f341861f | 142 | cond_resched(); |
ac3d0da8 | 143 | } while (work); |
e73f8959 ON |
144 | } |
145 | } |