]>
Commit | Line | Data |
---|---|---|
d7822b1e MD |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * Restartable sequences system call | |
4 | * | |
5 | * Copyright (C) 2015, Google, Inc., | |
6 | * Paul Turner <[email protected]> and Andrew Hunter <[email protected]> | |
7 | * Copyright (C) 2015-2018, EfficiOS Inc., | |
8 | * Mathieu Desnoyers <[email protected]> | |
9 | */ | |
10 | ||
11 | #include <linux/sched.h> | |
12 | #include <linux/uaccess.h> | |
13 | #include <linux/syscalls.h> | |
14 | #include <linux/rseq.h> | |
15 | #include <linux/types.h> | |
16 | #include <asm/ptrace.h> | |
17 | ||
18 | #define CREATE_TRACE_POINTS | |
19 | #include <trace/events/rseq.h> | |
20 | ||
21 | #define RSEQ_CS_PREEMPT_MIGRATE_FLAGS (RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE | \ | |
22 | RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT) | |
23 | ||
24 | /* | |
25 | * | |
26 | * Restartable sequences are a lightweight interface that allows | |
27 | * user-level code to be executed atomically relative to scheduler | |
28 | * preemption and signal delivery. Typically used for implementing | |
29 | * per-cpu operations. | |
30 | * | |
31 | * It allows user-space to perform update operations on per-cpu data | |
32 | * without requiring heavy-weight atomic operations. | |
33 | * | |
34 | * Detailed algorithm of rseq user-space assembly sequences: | |
35 | * | |
36 | * init(rseq_cs) | |
37 | * cpu = TLS->rseq::cpu_id_start | |
38 | * [1] TLS->rseq::rseq_cs = rseq_cs | |
39 | * [start_ip] ---------------------------- | |
40 | * [2] if (cpu != TLS->rseq::cpu_id) | |
41 | * goto abort_ip; | |
42 | * [3] <last_instruction_in_cs> | |
43 | * [post_commit_ip] ---------------------------- | |
44 | * | |
45 | * The address of jump target abort_ip must be outside the critical | |
46 | * region, i.e.: | |
47 | * | |
48 | * [abort_ip] < [start_ip] || [abort_ip] >= [post_commit_ip] | |
49 | * | |
50 | * Steps [2]-[3] (inclusive) need to be a sequence of instructions in | |
51 | * userspace that can handle being interrupted between any of those | |
52 | * instructions, and then resumed to the abort_ip. | |
53 | * | |
54 | * 1. Userspace stores the address of the struct rseq_cs assembly | |
55 | * block descriptor into the rseq_cs field of the registered | |
56 | * struct rseq TLS area. This update is performed through a single | |
57 | * store within the inline assembly instruction sequence. | |
58 | * [start_ip] | |
59 | * | |
60 | * 2. Userspace tests to check whether the current cpu_id field match | |
61 | * the cpu number loaded before start_ip, branching to abort_ip | |
62 | * in case of a mismatch. | |
63 | * | |
64 | * If the sequence is preempted or interrupted by a signal | |
65 | * at or after start_ip and before post_commit_ip, then the kernel | |
66 | * clears TLS->__rseq_abi::rseq_cs, and sets the user-space return | |
67 | * ip to abort_ip before returning to user-space, so the preempted | |
68 | * execution resumes at abort_ip. | |
69 | * | |
70 | * 3. Userspace critical section final instruction before | |
71 | * post_commit_ip is the commit. The critical section is | |
72 | * self-terminating. | |
73 | * [post_commit_ip] | |
74 | * | |
75 | * 4. <success> | |
76 | * | |
77 | * On failure at [2], or if interrupted by preempt or signal delivery | |
78 | * between [1] and [3]: | |
79 | * | |
80 | * [abort_ip] | |
81 | * F1. <failure> | |
82 | */ | |
83 | ||
84 | static int rseq_update_cpu_id(struct task_struct *t) | |
85 | { | |
86 | u32 cpu_id = raw_smp_processor_id(); | |
87 | ||
8f281770 | 88 | if (put_user(cpu_id, &t->rseq->cpu_id_start)) |
d7822b1e | 89 | return -EFAULT; |
8f281770 | 90 | if (put_user(cpu_id, &t->rseq->cpu_id)) |
d7822b1e MD |
91 | return -EFAULT; |
92 | trace_rseq_update(t); | |
93 | return 0; | |
94 | } | |
95 | ||
96 | static int rseq_reset_rseq_cpu_id(struct task_struct *t) | |
97 | { | |
98 | u32 cpu_id_start = 0, cpu_id = RSEQ_CPU_ID_UNINITIALIZED; | |
99 | ||
100 | /* | |
101 | * Reset cpu_id_start to its initial state (0). | |
102 | */ | |
8f281770 | 103 | if (put_user(cpu_id_start, &t->rseq->cpu_id_start)) |
d7822b1e MD |
104 | return -EFAULT; |
105 | /* | |
106 | * Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming | |
107 | * in after unregistration can figure out that rseq needs to be | |
108 | * registered again. | |
109 | */ | |
8f281770 | 110 | if (put_user(cpu_id, &t->rseq->cpu_id)) |
d7822b1e MD |
111 | return -EFAULT; |
112 | return 0; | |
113 | } | |
114 | ||
115 | static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) | |
116 | { | |
117 | struct rseq_cs __user *urseq_cs; | |
ec9c82e0 | 118 | u64 ptr; |
d7822b1e MD |
119 | u32 __user *usig; |
120 | u32 sig; | |
121 | int ret; | |
122 | ||
ec9c82e0 MD |
123 | if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr))) |
124 | return -EFAULT; | |
d7822b1e MD |
125 | if (!ptr) { |
126 | memset(rseq_cs, 0, sizeof(*rseq_cs)); | |
127 | return 0; | |
128 | } | |
ec9c82e0 MD |
129 | if (ptr >= TASK_SIZE) |
130 | return -EINVAL; | |
131 | urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr; | |
d7822b1e MD |
132 | if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs))) |
133 | return -EFAULT; | |
d7822b1e | 134 | |
e96d7135 MD |
135 | if (rseq_cs->start_ip >= TASK_SIZE || |
136 | rseq_cs->start_ip + rseq_cs->post_commit_offset >= TASK_SIZE || | |
137 | rseq_cs->abort_ip >= TASK_SIZE || | |
138 | rseq_cs->version > 0) | |
139 | return -EINVAL; | |
140 | /* Check for overflow. */ | |
141 | if (rseq_cs->start_ip + rseq_cs->post_commit_offset < rseq_cs->start_ip) | |
142 | return -EINVAL; | |
d7822b1e MD |
143 | /* Ensure that abort_ip is not in the critical section. */ |
144 | if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset) | |
145 | return -EINVAL; | |
146 | ||
e96d7135 | 147 | usig = (u32 __user *)(unsigned long)(rseq_cs->abort_ip - sizeof(u32)); |
d7822b1e MD |
148 | ret = get_user(sig, usig); |
149 | if (ret) | |
150 | return ret; | |
151 | ||
152 | if (current->rseq_sig != sig) { | |
153 | printk_ratelimited(KERN_WARNING | |
154 | "Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n", | |
155 | sig, current->rseq_sig, current->pid, usig); | |
e96d7135 | 156 | return -EINVAL; |
d7822b1e MD |
157 | } |
158 | return 0; | |
159 | } | |
160 | ||
161 | static int rseq_need_restart(struct task_struct *t, u32 cs_flags) | |
162 | { | |
163 | u32 flags, event_mask; | |
164 | int ret; | |
165 | ||
166 | /* Get thread flags. */ | |
8f281770 | 167 | ret = get_user(flags, &t->rseq->flags); |
d7822b1e MD |
168 | if (ret) |
169 | return ret; | |
170 | ||
171 | /* Take critical section flags into account. */ | |
172 | flags |= cs_flags; | |
173 | ||
174 | /* | |
175 | * Restart on signal can only be inhibited when restart on | |
176 | * preempt and restart on migrate are inhibited too. Otherwise, | |
177 | * a preempted signal handler could fail to restart the prior | |
178 | * execution context on sigreturn. | |
179 | */ | |
180 | if (unlikely((flags & RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL) && | |
181 | (flags & RSEQ_CS_PREEMPT_MIGRATE_FLAGS) != | |
182 | RSEQ_CS_PREEMPT_MIGRATE_FLAGS)) | |
183 | return -EINVAL; | |
184 | ||
185 | /* | |
186 | * Load and clear event mask atomically with respect to | |
187 | * scheduler preemption. | |
188 | */ | |
189 | preempt_disable(); | |
190 | event_mask = t->rseq_event_mask; | |
191 | t->rseq_event_mask = 0; | |
192 | preempt_enable(); | |
193 | ||
194 | return !!(event_mask & ~flags); | |
195 | } | |
196 | ||
197 | static int clear_rseq_cs(struct task_struct *t) | |
198 | { | |
199 | /* | |
200 | * The rseq_cs field is set to NULL on preemption or signal | |
201 | * delivery on top of rseq assembly block, as well as on top | |
202 | * of code outside of the rseq assembly block. This performs | |
203 | * a lazy clear of the rseq_cs field. | |
204 | * | |
0fb9a1ab | 205 | * Set rseq_cs to NULL. |
d7822b1e | 206 | */ |
ec9c82e0 MD |
207 | if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64))) |
208 | return -EFAULT; | |
209 | return 0; | |
d7822b1e MD |
210 | } |
211 | ||
212 | /* | |
213 | * Unsigned comparison will be true when ip >= start_ip, and when | |
214 | * ip < start_ip + post_commit_offset. | |
215 | */ | |
216 | static bool in_rseq_cs(unsigned long ip, struct rseq_cs *rseq_cs) | |
217 | { | |
218 | return ip - rseq_cs->start_ip < rseq_cs->post_commit_offset; | |
219 | } | |
220 | ||
221 | static int rseq_ip_fixup(struct pt_regs *regs) | |
222 | { | |
223 | unsigned long ip = instruction_pointer(regs); | |
224 | struct task_struct *t = current; | |
225 | struct rseq_cs rseq_cs; | |
226 | int ret; | |
227 | ||
228 | ret = rseq_get_rseq_cs(t, &rseq_cs); | |
229 | if (ret) | |
230 | return ret; | |
231 | ||
232 | /* | |
233 | * Handle potentially not being within a critical section. | |
234 | * If not nested over a rseq critical section, restart is useless. | |
235 | * Clear the rseq_cs pointer and return. | |
236 | */ | |
237 | if (!in_rseq_cs(ip, &rseq_cs)) | |
238 | return clear_rseq_cs(t); | |
239 | ret = rseq_need_restart(t, rseq_cs.flags); | |
240 | if (ret <= 0) | |
241 | return ret; | |
242 | ret = clear_rseq_cs(t); | |
243 | if (ret) | |
244 | return ret; | |
245 | trace_rseq_ip_fixup(ip, rseq_cs.start_ip, rseq_cs.post_commit_offset, | |
246 | rseq_cs.abort_ip); | |
247 | instruction_pointer_set(regs, (unsigned long)rseq_cs.abort_ip); | |
248 | return 0; | |
249 | } | |
250 | ||
251 | /* | |
252 | * This resume handler must always be executed between any of: | |
253 | * - preemption, | |
254 | * - signal delivery, | |
255 | * and return to user-space. | |
256 | * | |
bff9504b | 257 | * This is how we can ensure that the entire rseq critical section |
d7822b1e MD |
258 | * will issue the commit instruction only if executed atomically with |
259 | * respect to other threads scheduled on the same CPU, and with respect | |
260 | * to signal handlers. | |
261 | */ | |
784e0300 | 262 | void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs) |
d7822b1e MD |
263 | { |
264 | struct task_struct *t = current; | |
784e0300 | 265 | int ret, sig; |
d7822b1e MD |
266 | |
267 | if (unlikely(t->flags & PF_EXITING)) | |
268 | return; | |
96d4f267 | 269 | if (unlikely(!access_ok(t->rseq, sizeof(*t->rseq)))) |
d7822b1e MD |
270 | goto error; |
271 | ret = rseq_ip_fixup(regs); | |
272 | if (unlikely(ret < 0)) | |
273 | goto error; | |
274 | if (unlikely(rseq_update_cpu_id(t))) | |
275 | goto error; | |
276 | return; | |
277 | ||
278 | error: | |
784e0300 | 279 | sig = ksig ? ksig->sig : 0; |
cb44c9a0 | 280 | force_sigsegv(sig); |
d7822b1e MD |
281 | } |
282 | ||
283 | #ifdef CONFIG_DEBUG_RSEQ | |
284 | ||
285 | /* | |
286 | * Terminate the process if a syscall is issued within a restartable | |
287 | * sequence. | |
288 | */ | |
289 | void rseq_syscall(struct pt_regs *regs) | |
290 | { | |
291 | unsigned long ip = instruction_pointer(regs); | |
292 | struct task_struct *t = current; | |
293 | struct rseq_cs rseq_cs; | |
294 | ||
295 | if (!t->rseq) | |
296 | return; | |
96d4f267 | 297 | if (!access_ok(t->rseq, sizeof(*t->rseq)) || |
d7822b1e | 298 | rseq_get_rseq_cs(t, &rseq_cs) || in_rseq_cs(ip, &rseq_cs)) |
3cf5d076 | 299 | force_sig(SIGSEGV); |
d7822b1e MD |
300 | } |
301 | ||
302 | #endif | |
303 | ||
304 | /* | |
305 | * sys_rseq - setup restartable sequences for caller thread. | |
306 | */ | |
307 | SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, | |
308 | int, flags, u32, sig) | |
309 | { | |
310 | int ret; | |
311 | ||
312 | if (flags & RSEQ_FLAG_UNREGISTER) { | |
313 | /* Unregister rseq for current thread. */ | |
314 | if (current->rseq != rseq || !current->rseq) | |
315 | return -EINVAL; | |
83b0b15b | 316 | if (rseq_len != sizeof(*rseq)) |
d7822b1e MD |
317 | return -EINVAL; |
318 | if (current->rseq_sig != sig) | |
319 | return -EPERM; | |
320 | ret = rseq_reset_rseq_cpu_id(current); | |
321 | if (ret) | |
322 | return ret; | |
323 | current->rseq = NULL; | |
d7822b1e MD |
324 | current->rseq_sig = 0; |
325 | return 0; | |
326 | } | |
327 | ||
328 | if (unlikely(flags)) | |
329 | return -EINVAL; | |
330 | ||
331 | if (current->rseq) { | |
332 | /* | |
333 | * If rseq is already registered, check whether | |
334 | * the provided address differs from the prior | |
335 | * one. | |
336 | */ | |
83b0b15b | 337 | if (current->rseq != rseq || rseq_len != sizeof(*rseq)) |
d7822b1e MD |
338 | return -EINVAL; |
339 | if (current->rseq_sig != sig) | |
340 | return -EPERM; | |
341 | /* Already registered. */ | |
342 | return -EBUSY; | |
343 | } | |
344 | ||
345 | /* | |
346 | * If there was no rseq previously registered, | |
347 | * ensure the provided rseq is properly aligned and valid. | |
348 | */ | |
349 | if (!IS_ALIGNED((unsigned long)rseq, __alignof__(*rseq)) || | |
350 | rseq_len != sizeof(*rseq)) | |
351 | return -EINVAL; | |
96d4f267 | 352 | if (!access_ok(rseq, rseq_len)) |
d7822b1e MD |
353 | return -EFAULT; |
354 | current->rseq = rseq; | |
d7822b1e MD |
355 | current->rseq_sig = sig; |
356 | /* | |
357 | * If rseq was previously inactive, and has just been | |
358 | * registered, ensure the cpu_id_start and cpu_id fields | |
359 | * are updated before returning to user-space. | |
360 | */ | |
361 | rseq_set_notify_resume(current); | |
362 | ||
363 | return 0; | |
364 | } |