]>
Commit | Line | Data |
---|---|---|
d7822b1e MD |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * Restartable sequences system call | |
4 | * | |
5 | * Copyright (C) 2015, Google, Inc., | |
6 | * Paul Turner <[email protected]> and Andrew Hunter <[email protected]> | |
7 | * Copyright (C) 2015-2018, EfficiOS Inc., | |
8 | * Mathieu Desnoyers <[email protected]> | |
9 | */ | |
10 | ||
11 | #include <linux/sched.h> | |
12 | #include <linux/uaccess.h> | |
13 | #include <linux/syscalls.h> | |
14 | #include <linux/rseq.h> | |
15 | #include <linux/types.h> | |
16 | #include <asm/ptrace.h> | |
17 | ||
18 | #define CREATE_TRACE_POINTS | |
19 | #include <trace/events/rseq.h> | |
20 | ||
ee3e3ac0 MD |
21 | /* The original rseq structure size (including padding) is 32 bytes. */ |
22 | #define ORIG_RSEQ_SIZE 32 | |
23 | ||
0190e419 MD |
24 | #define RSEQ_CS_NO_RESTART_FLAGS (RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT | \ |
25 | RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL | \ | |
26 | RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE) | |
d7822b1e MD |
27 | |
28 | /* | |
29 | * | |
30 | * Restartable sequences are a lightweight interface that allows | |
31 | * user-level code to be executed atomically relative to scheduler | |
32 | * preemption and signal delivery. Typically used for implementing | |
33 | * per-cpu operations. | |
34 | * | |
35 | * It allows user-space to perform update operations on per-cpu data | |
36 | * without requiring heavy-weight atomic operations. | |
37 | * | |
38 | * Detailed algorithm of rseq user-space assembly sequences: | |
39 | * | |
40 | * init(rseq_cs) | |
41 | * cpu = TLS->rseq::cpu_id_start | |
42 | * [1] TLS->rseq::rseq_cs = rseq_cs | |
43 | * [start_ip] ---------------------------- | |
44 | * [2] if (cpu != TLS->rseq::cpu_id) | |
45 | * goto abort_ip; | |
46 | * [3] <last_instruction_in_cs> | |
47 | * [post_commit_ip] ---------------------------- | |
48 | * | |
49 | * The address of jump target abort_ip must be outside the critical | |
50 | * region, i.e.: | |
51 | * | |
52 | * [abort_ip] < [start_ip] || [abort_ip] >= [post_commit_ip] | |
53 | * | |
54 | * Steps [2]-[3] (inclusive) need to be a sequence of instructions in | |
55 | * userspace that can handle being interrupted between any of those | |
56 | * instructions, and then resumed to the abort_ip. | |
57 | * | |
58 | * 1. Userspace stores the address of the struct rseq_cs assembly | |
59 | * block descriptor into the rseq_cs field of the registered | |
60 | * struct rseq TLS area. This update is performed through a single | |
61 | * store within the inline assembly instruction sequence. | |
62 | * [start_ip] | |
63 | * | |
64 | * 2. Userspace tests to check whether the current cpu_id field match | |
65 | * the cpu number loaded before start_ip, branching to abort_ip | |
66 | * in case of a mismatch. | |
67 | * | |
68 | * If the sequence is preempted or interrupted by a signal | |
69 | * at or after start_ip and before post_commit_ip, then the kernel | |
70 | * clears TLS->__rseq_abi::rseq_cs, and sets the user-space return | |
71 | * ip to abort_ip before returning to user-space, so the preempted | |
72 | * execution resumes at abort_ip. | |
73 | * | |
74 | * 3. Userspace critical section final instruction before | |
75 | * post_commit_ip is the commit. The critical section is | |
76 | * self-terminating. | |
77 | * [post_commit_ip] | |
78 | * | |
79 | * 4. <success> | |
80 | * | |
81 | * On failure at [2], or if interrupted by preempt or signal delivery | |
82 | * between [1] and [3]: | |
83 | * | |
84 | * [abort_ip] | |
85 | * F1. <failure> | |
86 | */ | |
87 | ||
cbae6bac | 88 | static int rseq_update_cpu_node_id(struct task_struct *t) |
d7822b1e | 89 | { |
60af388d | 90 | struct rseq __user *rseq = t->rseq; |
cbae6bac MD |
91 | u32 cpu_id = raw_smp_processor_id(); |
92 | u32 node_id = cpu_to_node(cpu_id); | |
f7b01bb0 | 93 | u32 mm_cid = task_mm_cid(t); |
d7822b1e | 94 | |
f7b01bb0 | 95 | WARN_ON_ONCE((int) mm_cid < 0); |
ee3e3ac0 | 96 | if (!user_write_access_begin(rseq, t->rseq_len)) |
60af388d ED |
97 | goto efault; |
98 | unsafe_put_user(cpu_id, &rseq->cpu_id_start, efault_end); | |
99 | unsafe_put_user(cpu_id, &rseq->cpu_id, efault_end); | |
cbae6bac | 100 | unsafe_put_user(node_id, &rseq->node_id, efault_end); |
f7b01bb0 | 101 | unsafe_put_user(mm_cid, &rseq->mm_cid, efault_end); |
ee3e3ac0 MD |
102 | /* |
103 | * Additional feature fields added after ORIG_RSEQ_SIZE | |
104 | * need to be conditionally updated only if | |
105 | * t->rseq_len != ORIG_RSEQ_SIZE. | |
106 | */ | |
60af388d | 107 | user_write_access_end(); |
d7822b1e MD |
108 | trace_rseq_update(t); |
109 | return 0; | |
60af388d ED |
110 | |
111 | efault_end: | |
112 | user_write_access_end(); | |
113 | efault: | |
114 | return -EFAULT; | |
d7822b1e MD |
115 | } |
116 | ||
cbae6bac | 117 | static int rseq_reset_rseq_cpu_node_id(struct task_struct *t) |
d7822b1e | 118 | { |
f7b01bb0 MD |
119 | u32 cpu_id_start = 0, cpu_id = RSEQ_CPU_ID_UNINITIALIZED, node_id = 0, |
120 | mm_cid = 0; | |
d7822b1e MD |
121 | |
122 | /* | |
123 | * Reset cpu_id_start to its initial state (0). | |
124 | */ | |
8f281770 | 125 | if (put_user(cpu_id_start, &t->rseq->cpu_id_start)) |
d7822b1e MD |
126 | return -EFAULT; |
127 | /* | |
128 | * Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming | |
129 | * in after unregistration can figure out that rseq needs to be | |
130 | * registered again. | |
131 | */ | |
8f281770 | 132 | if (put_user(cpu_id, &t->rseq->cpu_id)) |
d7822b1e | 133 | return -EFAULT; |
cbae6bac MD |
134 | /* |
135 | * Reset node_id to its initial state (0). | |
136 | */ | |
137 | if (put_user(node_id, &t->rseq->node_id)) | |
138 | return -EFAULT; | |
f7b01bb0 MD |
139 | /* |
140 | * Reset mm_cid to its initial state (0). | |
141 | */ | |
142 | if (put_user(mm_cid, &t->rseq->mm_cid)) | |
143 | return -EFAULT; | |
ee3e3ac0 MD |
144 | /* |
145 | * Additional feature fields added after ORIG_RSEQ_SIZE | |
146 | * need to be conditionally reset only if | |
147 | * t->rseq_len != ORIG_RSEQ_SIZE. | |
148 | */ | |
d7822b1e MD |
149 | return 0; |
150 | } | |
151 | ||
152 | static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) | |
153 | { | |
154 | struct rseq_cs __user *urseq_cs; | |
ec9c82e0 | 155 | u64 ptr; |
d7822b1e MD |
156 | u32 __user *usig; |
157 | u32 sig; | |
158 | int ret; | |
159 | ||
5e0ccd4a | 160 | #ifdef CONFIG_64BIT |
bfdf4e62 | 161 | if (get_user(ptr, &t->rseq->rseq_cs)) |
5e0ccd4a ED |
162 | return -EFAULT; |
163 | #else | |
bfdf4e62 | 164 | if (copy_from_user(&ptr, &t->rseq->rseq_cs, sizeof(ptr))) |
ec9c82e0 | 165 | return -EFAULT; |
5e0ccd4a | 166 | #endif |
d7822b1e MD |
167 | if (!ptr) { |
168 | memset(rseq_cs, 0, sizeof(*rseq_cs)); | |
169 | return 0; | |
170 | } | |
ec9c82e0 MD |
171 | if (ptr >= TASK_SIZE) |
172 | return -EINVAL; | |
173 | urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr; | |
d7822b1e MD |
174 | if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs))) |
175 | return -EFAULT; | |
d7822b1e | 176 | |
e96d7135 MD |
177 | if (rseq_cs->start_ip >= TASK_SIZE || |
178 | rseq_cs->start_ip + rseq_cs->post_commit_offset >= TASK_SIZE || | |
179 | rseq_cs->abort_ip >= TASK_SIZE || | |
180 | rseq_cs->version > 0) | |
181 | return -EINVAL; | |
182 | /* Check for overflow. */ | |
183 | if (rseq_cs->start_ip + rseq_cs->post_commit_offset < rseq_cs->start_ip) | |
184 | return -EINVAL; | |
d7822b1e MD |
185 | /* Ensure that abort_ip is not in the critical section. */ |
186 | if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset) | |
187 | return -EINVAL; | |
188 | ||
e96d7135 | 189 | usig = (u32 __user *)(unsigned long)(rseq_cs->abort_ip - sizeof(u32)); |
d7822b1e MD |
190 | ret = get_user(sig, usig); |
191 | if (ret) | |
192 | return ret; | |
193 | ||
194 | if (current->rseq_sig != sig) { | |
195 | printk_ratelimited(KERN_WARNING | |
196 | "Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n", | |
197 | sig, current->rseq_sig, current->pid, usig); | |
e96d7135 | 198 | return -EINVAL; |
d7822b1e MD |
199 | } |
200 | return 0; | |
201 | } | |
202 | ||
448dca8c MD |
203 | static bool rseq_warn_flags(const char *str, u32 flags) |
204 | { | |
205 | u32 test_flags; | |
206 | ||
207 | if (!flags) | |
208 | return false; | |
209 | test_flags = flags & RSEQ_CS_NO_RESTART_FLAGS; | |
210 | if (test_flags) | |
211 | pr_warn_once("Deprecated flags (%u) in %s ABI structure", test_flags, str); | |
212 | test_flags = flags & ~RSEQ_CS_NO_RESTART_FLAGS; | |
213 | if (test_flags) | |
214 | pr_warn_once("Unknown flags (%u) in %s ABI structure", test_flags, str); | |
215 | return true; | |
216 | } | |
217 | ||
d7822b1e MD |
218 | static int rseq_need_restart(struct task_struct *t, u32 cs_flags) |
219 | { | |
220 | u32 flags, event_mask; | |
221 | int ret; | |
222 | ||
448dca8c | 223 | if (rseq_warn_flags("rseq_cs", cs_flags)) |
0190e419 MD |
224 | return -EINVAL; |
225 | ||
d7822b1e | 226 | /* Get thread flags. */ |
8f281770 | 227 | ret = get_user(flags, &t->rseq->flags); |
d7822b1e MD |
228 | if (ret) |
229 | return ret; | |
230 | ||
448dca8c | 231 | if (rseq_warn_flags("rseq", flags)) |
d7822b1e MD |
232 | return -EINVAL; |
233 | ||
234 | /* | |
235 | * Load and clear event mask atomically with respect to | |
236 | * scheduler preemption. | |
237 | */ | |
238 | preempt_disable(); | |
239 | event_mask = t->rseq_event_mask; | |
240 | t->rseq_event_mask = 0; | |
241 | preempt_enable(); | |
242 | ||
0190e419 | 243 | return !!event_mask; |
d7822b1e MD |
244 | } |
245 | ||
246 | static int clear_rseq_cs(struct task_struct *t) | |
247 | { | |
248 | /* | |
249 | * The rseq_cs field is set to NULL on preemption or signal | |
250 | * delivery on top of rseq assembly block, as well as on top | |
251 | * of code outside of the rseq assembly block. This performs | |
252 | * a lazy clear of the rseq_cs field. | |
253 | * | |
0fb9a1ab | 254 | * Set rseq_cs to NULL. |
d7822b1e | 255 | */ |
5e0ccd4a | 256 | #ifdef CONFIG_64BIT |
bfdf4e62 | 257 | return put_user(0UL, &t->rseq->rseq_cs); |
5e0ccd4a | 258 | #else |
bfdf4e62 | 259 | if (clear_user(&t->rseq->rseq_cs, sizeof(t->rseq->rseq_cs))) |
ec9c82e0 MD |
260 | return -EFAULT; |
261 | return 0; | |
5e0ccd4a | 262 | #endif |
d7822b1e MD |
263 | } |
264 | ||
265 | /* | |
266 | * Unsigned comparison will be true when ip >= start_ip, and when | |
267 | * ip < start_ip + post_commit_offset. | |
268 | */ | |
269 | static bool in_rseq_cs(unsigned long ip, struct rseq_cs *rseq_cs) | |
270 | { | |
271 | return ip - rseq_cs->start_ip < rseq_cs->post_commit_offset; | |
272 | } | |
273 | ||
274 | static int rseq_ip_fixup(struct pt_regs *regs) | |
275 | { | |
276 | unsigned long ip = instruction_pointer(regs); | |
277 | struct task_struct *t = current; | |
278 | struct rseq_cs rseq_cs; | |
279 | int ret; | |
280 | ||
281 | ret = rseq_get_rseq_cs(t, &rseq_cs); | |
282 | if (ret) | |
283 | return ret; | |
284 | ||
285 | /* | |
286 | * Handle potentially not being within a critical section. | |
287 | * If not nested over a rseq critical section, restart is useless. | |
288 | * Clear the rseq_cs pointer and return. | |
289 | */ | |
290 | if (!in_rseq_cs(ip, &rseq_cs)) | |
291 | return clear_rseq_cs(t); | |
292 | ret = rseq_need_restart(t, rseq_cs.flags); | |
293 | if (ret <= 0) | |
294 | return ret; | |
295 | ret = clear_rseq_cs(t); | |
296 | if (ret) | |
297 | return ret; | |
298 | trace_rseq_ip_fixup(ip, rseq_cs.start_ip, rseq_cs.post_commit_offset, | |
299 | rseq_cs.abort_ip); | |
300 | instruction_pointer_set(regs, (unsigned long)rseq_cs.abort_ip); | |
301 | return 0; | |
302 | } | |
303 | ||
304 | /* | |
305 | * This resume handler must always be executed between any of: | |
306 | * - preemption, | |
307 | * - signal delivery, | |
308 | * and return to user-space. | |
309 | * | |
bff9504b | 310 | * This is how we can ensure that the entire rseq critical section |
d7822b1e MD |
311 | * will issue the commit instruction only if executed atomically with |
312 | * respect to other threads scheduled on the same CPU, and with respect | |
313 | * to signal handlers. | |
314 | */ | |
784e0300 | 315 | void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs) |
d7822b1e MD |
316 | { |
317 | struct task_struct *t = current; | |
784e0300 | 318 | int ret, sig; |
d7822b1e MD |
319 | |
320 | if (unlikely(t->flags & PF_EXITING)) | |
321 | return; | |
8646e536 SC |
322 | |
323 | /* | |
324 | * regs is NULL if and only if the caller is in a syscall path. Skip | |
325 | * fixup and leave rseq_cs as is so that rseq_sycall() will detect and | |
326 | * kill a misbehaving userspace on debug kernels. | |
327 | */ | |
328 | if (regs) { | |
329 | ret = rseq_ip_fixup(regs); | |
330 | if (unlikely(ret < 0)) | |
331 | goto error; | |
332 | } | |
cbae6bac | 333 | if (unlikely(rseq_update_cpu_node_id(t))) |
d7822b1e MD |
334 | goto error; |
335 | return; | |
336 | ||
337 | error: | |
784e0300 | 338 | sig = ksig ? ksig->sig : 0; |
cb44c9a0 | 339 | force_sigsegv(sig); |
d7822b1e MD |
340 | } |
341 | ||
342 | #ifdef CONFIG_DEBUG_RSEQ | |
343 | ||
344 | /* | |
345 | * Terminate the process if a syscall is issued within a restartable | |
346 | * sequence. | |
347 | */ | |
348 | void rseq_syscall(struct pt_regs *regs) | |
349 | { | |
350 | unsigned long ip = instruction_pointer(regs); | |
351 | struct task_struct *t = current; | |
352 | struct rseq_cs rseq_cs; | |
353 | ||
354 | if (!t->rseq) | |
355 | return; | |
0ed96051 | 356 | if (rseq_get_rseq_cs(t, &rseq_cs) || in_rseq_cs(ip, &rseq_cs)) |
3cf5d076 | 357 | force_sig(SIGSEGV); |
d7822b1e MD |
358 | } |
359 | ||
360 | #endif | |
361 | ||
362 | /* | |
363 | * sys_rseq - setup restartable sequences for caller thread. | |
364 | */ | |
365 | SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, | |
366 | int, flags, u32, sig) | |
367 | { | |
368 | int ret; | |
369 | ||
370 | if (flags & RSEQ_FLAG_UNREGISTER) { | |
66528a45 MD |
371 | if (flags & ~RSEQ_FLAG_UNREGISTER) |
372 | return -EINVAL; | |
d7822b1e MD |
373 | /* Unregister rseq for current thread. */ |
374 | if (current->rseq != rseq || !current->rseq) | |
375 | return -EINVAL; | |
ee3e3ac0 | 376 | if (rseq_len != current->rseq_len) |
d7822b1e MD |
377 | return -EINVAL; |
378 | if (current->rseq_sig != sig) | |
379 | return -EPERM; | |
cbae6bac | 380 | ret = rseq_reset_rseq_cpu_node_id(current); |
d7822b1e MD |
381 | if (ret) |
382 | return ret; | |
383 | current->rseq = NULL; | |
d7822b1e | 384 | current->rseq_sig = 0; |
ee3e3ac0 | 385 | current->rseq_len = 0; |
d7822b1e MD |
386 | return 0; |
387 | } | |
388 | ||
389 | if (unlikely(flags)) | |
390 | return -EINVAL; | |
391 | ||
392 | if (current->rseq) { | |
393 | /* | |
394 | * If rseq is already registered, check whether | |
395 | * the provided address differs from the prior | |
396 | * one. | |
397 | */ | |
ee3e3ac0 | 398 | if (current->rseq != rseq || rseq_len != current->rseq_len) |
d7822b1e MD |
399 | return -EINVAL; |
400 | if (current->rseq_sig != sig) | |
401 | return -EPERM; | |
402 | /* Already registered. */ | |
403 | return -EBUSY; | |
404 | } | |
405 | ||
406 | /* | |
ee3e3ac0 MD |
407 | * If there was no rseq previously registered, ensure the provided rseq |
408 | * is properly aligned, as communcated to user-space through the ELF | |
409 | * auxiliary vector AT_RSEQ_ALIGN. If rseq_len is the original rseq | |
410 | * size, the required alignment is the original struct rseq alignment. | |
411 | * | |
412 | * In order to be valid, rseq_len is either the original rseq size, or | |
413 | * large enough to contain all supported fields, as communicated to | |
414 | * user-space through the ELF auxiliary vector AT_RSEQ_FEATURE_SIZE. | |
d7822b1e | 415 | */ |
ee3e3ac0 MD |
416 | if (rseq_len < ORIG_RSEQ_SIZE || |
417 | (rseq_len == ORIG_RSEQ_SIZE && !IS_ALIGNED((unsigned long)rseq, ORIG_RSEQ_SIZE)) || | |
418 | (rseq_len != ORIG_RSEQ_SIZE && (!IS_ALIGNED((unsigned long)rseq, __alignof__(*rseq)) || | |
419 | rseq_len < offsetof(struct rseq, end)))) | |
d7822b1e | 420 | return -EINVAL; |
96d4f267 | 421 | if (!access_ok(rseq, rseq_len)) |
d7822b1e MD |
422 | return -EFAULT; |
423 | current->rseq = rseq; | |
ee3e3ac0 | 424 | current->rseq_len = rseq_len; |
d7822b1e MD |
425 | current->rseq_sig = sig; |
426 | /* | |
427 | * If rseq was previously inactive, and has just been | |
428 | * registered, ensure the cpu_id_start and cpu_id fields | |
429 | * are updated before returning to user-space. | |
430 | */ | |
431 | rseq_set_notify_resume(current); | |
432 | ||
433 | return 0; | |
434 | } |