]> Git Repo - linux.git/blob - arch/arm64/include/asm/uaccess.h
Linux 6.14-rc3
[linux.git] / arch / arm64 / include / asm / uaccess.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/uaccess.h
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #ifndef __ASM_UACCESS_H
8 #define __ASM_UACCESS_H
9
10 #include <asm/alternative.h>
11 #include <asm/kernel-pgtable.h>
12 #include <asm/sysreg.h>
13
14 /*
15  * User space memory access functions
16  */
17 #include <linux/bitops.h>
18 #include <linux/kasan-checks.h>
19 #include <linux/string.h>
20
21 #include <asm/asm-extable.h>
22 #include <asm/cpufeature.h>
23 #include <asm/mmu.h>
24 #include <asm/mte.h>
25 #include <asm/ptrace.h>
26 #include <asm/memory.h>
27 #include <asm/extable.h>
28
29 static inline int __access_ok(const void __user *ptr, unsigned long size);
30
31 /*
32  * Test whether a block of memory is a valid user space address.
33  * Returns 1 if the range is valid, 0 otherwise.
34  *
35  * This is equivalent to the following test:
36  * (u65)addr + (u65)size <= (u65)TASK_SIZE_MAX
37  */
38 static inline int access_ok(const void __user *addr, unsigned long size)
39 {
40         /*
41          * Asynchronous I/O running in a kernel thread does not have the
42          * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
43          * the user address before checking.
44          */
45         if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
46             (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
47                 addr = untagged_addr(addr);
48
49         return likely(__access_ok(addr, size));
50 }
51 #define access_ok access_ok
52
53 #include <asm-generic/access_ok.h>
54
55 /*
56  * User access enabling/disabling.
57  */
58 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
59 static inline void __uaccess_ttbr0_disable(void)
60 {
61         unsigned long flags, ttbr;
62
63         local_irq_save(flags);
64         ttbr = read_sysreg(ttbr1_el1);
65         ttbr &= ~TTBR_ASID_MASK;
66         /* reserved_pg_dir placed before swapper_pg_dir */
67         write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1);
68         /* Set reserved ASID */
69         write_sysreg(ttbr, ttbr1_el1);
70         isb();
71         local_irq_restore(flags);
72 }
73
74 static inline void __uaccess_ttbr0_enable(void)
75 {
76         unsigned long flags, ttbr0, ttbr1;
77
78         /*
79          * Disable interrupts to avoid preemption between reading the 'ttbr0'
80          * variable and the MSR. A context switch could trigger an ASID
81          * roll-over and an update of 'ttbr0'.
82          */
83         local_irq_save(flags);
84         ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
85
86         /* Restore active ASID */
87         ttbr1 = read_sysreg(ttbr1_el1);
88         ttbr1 &= ~TTBR_ASID_MASK;               /* safety measure */
89         ttbr1 |= ttbr0 & TTBR_ASID_MASK;
90         write_sysreg(ttbr1, ttbr1_el1);
91
92         /* Restore user page table */
93         write_sysreg(ttbr0, ttbr0_el1);
94         isb();
95         local_irq_restore(flags);
96 }
97
98 static inline bool uaccess_ttbr0_disable(void)
99 {
100         if (!system_uses_ttbr0_pan())
101                 return false;
102         __uaccess_ttbr0_disable();
103         return true;
104 }
105
106 static inline bool uaccess_ttbr0_enable(void)
107 {
108         if (!system_uses_ttbr0_pan())
109                 return false;
110         __uaccess_ttbr0_enable();
111         return true;
112 }
113 #else
114 static inline bool uaccess_ttbr0_disable(void)
115 {
116         return false;
117 }
118
119 static inline bool uaccess_ttbr0_enable(void)
120 {
121         return false;
122 }
123 #endif
124
125 static inline void __uaccess_disable_hw_pan(void)
126 {
127         asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
128                         CONFIG_ARM64_PAN));
129 }
130
131 static inline void __uaccess_enable_hw_pan(void)
132 {
133         asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
134                         CONFIG_ARM64_PAN));
135 }
136
137 static inline void uaccess_disable_privileged(void)
138 {
139         mte_disable_tco();
140
141         if (uaccess_ttbr0_disable())
142                 return;
143
144         __uaccess_enable_hw_pan();
145 }
146
147 static inline void uaccess_enable_privileged(void)
148 {
149         mte_enable_tco();
150
151         if (uaccess_ttbr0_enable())
152                 return;
153
154         __uaccess_disable_hw_pan();
155 }
156
157 /*
158  * Sanitize a uaccess pointer such that it cannot reach any kernel address.
159  *
160  * Clearing bit 55 ensures the pointer cannot address any portion of the TTBR1
161  * address range (i.e. any kernel address), and either the pointer falls within
162  * the TTBR0 address range or must cause a fault.
163  */
164 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
165 static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
166 {
167         void __user *safe_ptr;
168
169         asm volatile(
170         "       bic     %0, %1, %2\n"
171         : "=r" (safe_ptr)
172         : "r" (ptr),
173           "i" (BIT(55))
174         );
175
176         return safe_ptr;
177 }
178
179 /*
180  * The "__xxx" versions of the user access functions do not verify the address
181  * space - it must have been done previously with a separate "access_ok()"
182  * call.
183  *
184  * The "__xxx_error" versions set the third argument to -EFAULT if an error
185  * occurs, and leave it unchanged on success.
186  */
187 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
188 #define __get_mem_asm(load, reg, x, addr, label, type)                  \
189         asm_goto_output(                                                \
190         "1:     " load "        " reg "0, [%1]\n"                       \
191         _ASM_EXTABLE_##type##ACCESS(1b, %l2)                            \
192         : "=r" (x)                                                      \
193         : "r" (addr) : : label)
194 #else
195 #define __get_mem_asm(load, reg, x, addr, label, type) do {             \
196         int __gma_err = 0;                                              \
197         asm volatile(                                                   \
198         "1:     " load "        " reg "1, [%2]\n"                       \
199         "2:\n"                                                          \
200         _ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1)          \
201         : "+r" (__gma_err), "=r" (x)                                    \
202         : "r" (addr));                                                  \
203         if (__gma_err) goto label; } while (0)
204 #endif
205
206 #define __raw_get_mem(ldr, x, ptr, label, type)                                 \
207 do {                                                                            \
208         unsigned long __gu_val;                                                 \
209         switch (sizeof(*(ptr))) {                                               \
210         case 1:                                                                 \
211                 __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), label, type);     \
212                 break;                                                          \
213         case 2:                                                                 \
214                 __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), label, type);     \
215                 break;                                                          \
216         case 4:                                                                 \
217                 __get_mem_asm(ldr, "%w", __gu_val, (ptr), label, type);         \
218                 break;                                                          \
219         case 8:                                                                 \
220                 __get_mem_asm(ldr, "%x",  __gu_val, (ptr), label, type);        \
221                 break;                                                          \
222         default:                                                                \
223                 BUILD_BUG();                                                    \
224         }                                                                       \
225         (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
226 } while (0)
227
228 /*
229  * We must not call into the scheduler between uaccess_ttbr0_enable() and
230  * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
231  * we must evaluate these outside of the critical section.
232  */
233 #define __raw_get_user(x, ptr, label)                                   \
234 do {                                                                    \
235         __typeof__(*(ptr)) __user *__rgu_ptr = (ptr);                   \
236         __typeof__(x) __rgu_val;                                        \
237         __chk_user_ptr(ptr);                                            \
238         do {                                                            \
239                 __label__ __rgu_failed;                                 \
240                 uaccess_ttbr0_enable();                                 \
241                 __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, __rgu_failed, U);   \
242                 uaccess_ttbr0_disable();                                \
243                 (x) = __rgu_val;                                        \
244                 break;                                                  \
245         __rgu_failed:                                                   \
246                 uaccess_ttbr0_disable();                                \
247                 goto label;                                             \
248         } while (0);                                                    \
249 } while (0)
250
251 #define __get_user_error(x, ptr, err)                                   \
252 do {                                                                    \
253         __label__ __gu_failed;                                          \
254         __typeof__(*(ptr)) __user *__p = (ptr);                         \
255         might_fault();                                                  \
256         if (access_ok(__p, sizeof(*__p))) {                             \
257                 __p = uaccess_mask_ptr(__p);                            \
258                 __raw_get_user((x), __p, __gu_failed);                  \
259         } else {                                                        \
260         __gu_failed:                                                    \
261                 (x) = (__force __typeof__(x))0; (err) = -EFAULT;        \
262         }                                                               \
263 } while (0)
264
265 #define __get_user(x, ptr)                                              \
266 ({                                                                      \
267         int __gu_err = 0;                                               \
268         __get_user_error((x), (ptr), __gu_err);                         \
269         __gu_err;                                                       \
270 })
271
272 #define get_user        __get_user
273
274 /*
275  * We must not call into the scheduler between __mte_enable_tco_async() and
276  * __mte_disable_tco_async(). As `dst` and `src` may contain blocking
277  * functions, we must evaluate these outside of the critical section.
278  */
279 #define __get_kernel_nofault(dst, src, type, err_label)                 \
280 do {                                                                    \
281         __typeof__(dst) __gkn_dst = (dst);                              \
282         __typeof__(src) __gkn_src = (src);                              \
283         do {                                                            \
284                 __label__ __gkn_label;                                  \
285                                                                         \
286                 __mte_enable_tco_async();                               \
287                 __raw_get_mem("ldr", *((type *)(__gkn_dst)),            \
288                       (__force type *)(__gkn_src), __gkn_label, K);     \
289                 __mte_disable_tco_async();                              \
290                 break;                                                  \
291         __gkn_label:                                                    \
292                 __mte_disable_tco_async();                              \
293                 goto err_label;                                         \
294         } while (0);                                                    \
295 } while (0)
296
297 #define __put_mem_asm(store, reg, x, addr, label, type)                 \
298         asm goto(                                                       \
299         "1:     " store "       " reg "0, [%1]\n"                       \
300         "2:\n"                                                          \
301         _ASM_EXTABLE_##type##ACCESS(1b, %l2)                            \
302         : : "rZ" (x), "r" (addr) : : label)
303
304 #define __raw_put_mem(str, x, ptr, label, type)                                 \
305 do {                                                                            \
306         __typeof__(*(ptr)) __pu_val = (x);                                      \
307         switch (sizeof(*(ptr))) {                                               \
308         case 1:                                                                 \
309                 __put_mem_asm(str "b", "%w", __pu_val, (ptr), label, type);     \
310                 break;                                                          \
311         case 2:                                                                 \
312                 __put_mem_asm(str "h", "%w", __pu_val, (ptr), label, type);     \
313                 break;                                                          \
314         case 4:                                                                 \
315                 __put_mem_asm(str, "%w", __pu_val, (ptr), label, type);         \
316                 break;                                                          \
317         case 8:                                                                 \
318                 __put_mem_asm(str, "%x", __pu_val, (ptr), label, type);         \
319                 break;                                                          \
320         default:                                                                \
321                 BUILD_BUG();                                                    \
322         }                                                                       \
323 } while (0)
324
325 /*
326  * We must not call into the scheduler between uaccess_ttbr0_enable() and
327  * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
328  * we must evaluate these outside of the critical section.
329  */
330 #define __raw_put_user(x, ptr, label)                                   \
331 do {                                                                    \
332         __label__ __rpu_failed;                                         \
333         __typeof__(*(ptr)) __user *__rpu_ptr = (ptr);                   \
334         __typeof__(*(ptr)) __rpu_val = (x);                             \
335         __chk_user_ptr(__rpu_ptr);                                      \
336                                                                         \
337         do {                                                            \
338                 uaccess_ttbr0_enable();                                 \
339                 __raw_put_mem("sttr", __rpu_val, __rpu_ptr, __rpu_failed, U);   \
340                 uaccess_ttbr0_disable();                                \
341                 break;                                                  \
342         __rpu_failed:                                                   \
343                 uaccess_ttbr0_disable();                                \
344                 goto label;                                             \
345         } while (0);                                                    \
346 } while (0)
347
348 #define __put_user_error(x, ptr, err)                                   \
349 do {                                                                    \
350         __label__ __pu_failed;                                          \
351         __typeof__(*(ptr)) __user *__p = (ptr);                         \
352         might_fault();                                                  \
353         if (access_ok(__p, sizeof(*__p))) {                             \
354                 __p = uaccess_mask_ptr(__p);                            \
355                 __raw_put_user((x), __p, __pu_failed);                  \
356         } else  {                                                       \
357         __pu_failed:                                                    \
358                 (err) = -EFAULT;                                        \
359         }                                                               \
360 } while (0)
361
362 #define __put_user(x, ptr)                                              \
363 ({                                                                      \
364         int __pu_err = 0;                                               \
365         __put_user_error((x), (ptr), __pu_err);                         \
366         __pu_err;                                                       \
367 })
368
369 #define put_user        __put_user
370
371 /*
372  * We must not call into the scheduler between __mte_enable_tco_async() and
373  * __mte_disable_tco_async(). As `dst` and `src` may contain blocking
374  * functions, we must evaluate these outside of the critical section.
375  */
376 #define __put_kernel_nofault(dst, src, type, err_label)                 \
377 do {                                                                    \
378         __typeof__(dst) __pkn_dst = (dst);                              \
379         __typeof__(src) __pkn_src = (src);                              \
380                                                                         \
381         do {                                                            \
382                 __label__ __pkn_err;                                    \
383                 __mte_enable_tco_async();                               \
384                 __raw_put_mem("str", *((type *)(__pkn_src)),            \
385                               (__force type *)(__pkn_dst), __pkn_err, K);       \
386                 __mte_disable_tco_async();                              \
387                 break;                                                  \
388         __pkn_err:                                                      \
389                 __mte_disable_tco_async();                              \
390                 goto err_label;                                         \
391         } while (0);                                                    \
392 } while(0)
393
394 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
395 #define raw_copy_from_user(to, from, n)                                 \
396 ({                                                                      \
397         unsigned long __acfu_ret;                                       \
398         uaccess_ttbr0_enable();                                         \
399         __acfu_ret = __arch_copy_from_user((to),                        \
400                                       __uaccess_mask_ptr(from), (n));   \
401         uaccess_ttbr0_disable();                                        \
402         __acfu_ret;                                                     \
403 })
404
405 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
406 #define raw_copy_to_user(to, from, n)                                   \
407 ({                                                                      \
408         unsigned long __actu_ret;                                       \
409         uaccess_ttbr0_enable();                                         \
410         __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),        \
411                                     (from), (n));                       \
412         uaccess_ttbr0_disable();                                        \
413         __actu_ret;                                                     \
414 })
415
416 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
417 {
418         if (unlikely(!access_ok(ptr,len)))
419                 return 0;
420         uaccess_ttbr0_enable();
421         return 1;
422 }
423 #define user_access_begin(a,b)  user_access_begin(a,b)
424 #define user_access_end()       uaccess_ttbr0_disable()
425 #define unsafe_put_user(x, ptr, label) \
426         __raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), label, U)
427 #define unsafe_get_user(x, ptr, label) \
428         __raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
429
430 /*
431  * KCSAN uses these to save and restore ttbr state.
432  * We do not support KCSAN with ARM64_SW_TTBR0_PAN, so
433  * they are no-ops.
434  */
435 static inline unsigned long user_access_save(void) { return 0; }
436 static inline void user_access_restore(unsigned long enabled) { }
437
438 /*
439  * We want the unsafe accessors to always be inlined and use
440  * the error labels - thus the macro games.
441  */
442 #define unsafe_copy_loop(dst, src, len, type, label)                            \
443         while (len >= sizeof(type)) {                                           \
444                 unsafe_put_user(*(type *)(src),(type __user *)(dst),label);     \
445                 dst += sizeof(type);                                            \
446                 src += sizeof(type);                                            \
447                 len -= sizeof(type);                                            \
448         }
449
450 #define unsafe_copy_to_user(_dst,_src,_len,label)                       \
451 do {                                                                    \
452         char __user *__ucu_dst = (_dst);                                \
453         const char *__ucu_src = (_src);                                 \
454         size_t __ucu_len = (_len);                                      \
455         unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label);  \
456         unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label);  \
457         unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label);  \
458         unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label);   \
459 } while (0)
460
461 #define INLINE_COPY_TO_USER
462 #define INLINE_COPY_FROM_USER
463
464 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
465 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
466 {
467         if (access_ok(to, n)) {
468                 uaccess_ttbr0_enable();
469                 n = __arch_clear_user(__uaccess_mask_ptr(to), n);
470                 uaccess_ttbr0_disable();
471         }
472         return n;
473 }
474 #define clear_user      __clear_user
475
476 extern long strncpy_from_user(char *dest, const char __user *src, long count);
477
478 extern __must_check long strnlen_user(const char __user *str, long n);
479
480 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
481 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
482
483 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
484 {
485         kasan_check_write(dst, size);
486         return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
487 }
488 #endif
489
490 #ifdef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
491
492 /*
493  * Return 0 on success, the number of bytes not probed otherwise.
494  */
495 static inline size_t probe_subpage_writeable(const char __user *uaddr,
496                                              size_t size)
497 {
498         if (!system_supports_mte())
499                 return 0;
500         return mte_probe_user_range(uaddr, size);
501 }
502
503 #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
504
505 #ifdef CONFIG_ARM64_GCS
506
507 static inline int gcssttr(unsigned long __user *addr, unsigned long val)
508 {
509         register unsigned long __user *_addr __asm__ ("x0") = addr;
510         register unsigned long _val __asm__ ("x1") = val;
511         int err = 0;
512
513         /* GCSSTTR x1, x0 */
514         asm volatile(
515                 "1: .inst 0xd91f1c01\n"
516                 "2: \n"
517                 _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)
518                 : "+r" (err)
519                 : "rZ" (_val), "r" (_addr)
520                 : "memory");
521
522         return err;
523 }
524
525 static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,
526                                 int *err)
527 {
528         int ret;
529
530         if (!access_ok((char __user *)addr, sizeof(u64))) {
531                 *err = -EFAULT;
532                 return;
533         }
534
535         uaccess_ttbr0_enable();
536         ret = gcssttr(addr, val);
537         if (ret != 0)
538                 *err = ret;
539         uaccess_ttbr0_disable();
540 }
541
542
543 #endif /* CONFIG_ARM64_GCS */
544
545 #endif /* __ASM_UACCESS_H */
This page took 0.066071 seconds and 4 git commands to generate.