]> Git Repo - J-linux.git/blob - arch/arm64/include/asm/processor.h
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / arch / arm64 / include / asm / processor.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/processor.h
4  *
5  * Copyright (C) 1995-1999 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 #ifndef __ASM_PROCESSOR_H
9 #define __ASM_PROCESSOR_H
10
11 /*
12  * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
13  * no point in shifting all network buffers by 2 bytes just to make some IP
14  * header fields appear aligned in memory, potentially sacrificing some DMA
15  * performance on some platforms.
16  */
17 #define NET_IP_ALIGN    0
18
19 #define MTE_CTRL_GCR_USER_EXCL_SHIFT    0
20 #define MTE_CTRL_GCR_USER_EXCL_MASK     0xffff
21
22 #define MTE_CTRL_TCF_SYNC               (1UL << 16)
23 #define MTE_CTRL_TCF_ASYNC              (1UL << 17)
24 #define MTE_CTRL_TCF_ASYMM              (1UL << 18)
25
26 #ifndef __ASSEMBLY__
27
28 #include <linux/build_bug.h>
29 #include <linux/cache.h>
30 #include <linux/init.h>
31 #include <linux/stddef.h>
32 #include <linux/string.h>
33 #include <linux/thread_info.h>
34
35 #include <vdso/processor.h>
36
37 #include <asm/alternative.h>
38 #include <asm/cpufeature.h>
39 #include <asm/hw_breakpoint.h>
40 #include <asm/kasan.h>
41 #include <asm/lse.h>
42 #include <asm/pgtable-hwdef.h>
43 #include <asm/pointer_auth.h>
44 #include <asm/ptrace.h>
45 #include <asm/spectre.h>
46 #include <asm/types.h>
47
48 /*
49  * TASK_SIZE - the maximum size of a user space task.
50  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
51  */
52
53 #define DEFAULT_MAP_WINDOW_64   (UL(1) << VA_BITS_MIN)
54 #define TASK_SIZE_64            (UL(1) << vabits_actual)
55 #define TASK_SIZE_MAX           (UL(1) << VA_BITS)
56
57 #ifdef CONFIG_COMPAT
58 #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
59 /*
60  * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
61  * by the compat vectors page.
62  */
63 #define TASK_SIZE_32            UL(0x100000000)
64 #else
65 #define TASK_SIZE_32            (UL(0x100000000) - PAGE_SIZE)
66 #endif /* CONFIG_ARM64_64K_PAGES */
67 #define TASK_SIZE               (test_thread_flag(TIF_32BIT) ? \
68                                 TASK_SIZE_32 : TASK_SIZE_64)
69 #define TASK_SIZE_OF(tsk)       (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
70                                 TASK_SIZE_32 : TASK_SIZE_64)
71 #define DEFAULT_MAP_WINDOW      (test_thread_flag(TIF_32BIT) ? \
72                                 TASK_SIZE_32 : DEFAULT_MAP_WINDOW_64)
73 #else
74 #define TASK_SIZE               TASK_SIZE_64
75 #define DEFAULT_MAP_WINDOW      DEFAULT_MAP_WINDOW_64
76 #endif /* CONFIG_COMPAT */
77
78 #ifdef CONFIG_ARM64_FORCE_52BIT
79 #define STACK_TOP_MAX           TASK_SIZE_64
80 #define TASK_UNMAPPED_BASE      (PAGE_ALIGN(TASK_SIZE / 4))
81 #else
82 #define STACK_TOP_MAX           DEFAULT_MAP_WINDOW_64
83 #define TASK_UNMAPPED_BASE      (PAGE_ALIGN(DEFAULT_MAP_WINDOW / 4))
84 #endif /* CONFIG_ARM64_FORCE_52BIT */
85
86 #ifdef CONFIG_COMPAT
87 #define AARCH32_VECTORS_BASE    0xffff0000
88 #define STACK_TOP               (test_thread_flag(TIF_32BIT) ? \
89                                 AARCH32_VECTORS_BASE : STACK_TOP_MAX)
90 #else
91 #define STACK_TOP               STACK_TOP_MAX
92 #endif /* CONFIG_COMPAT */
93
94 #ifndef CONFIG_ARM64_FORCE_52BIT
95 #define arch_get_mmap_end(addr, len, flags) \
96                 (((addr) > DEFAULT_MAP_WINDOW) ? TASK_SIZE : DEFAULT_MAP_WINDOW)
97
98 #define arch_get_mmap_base(addr, base) ((addr > DEFAULT_MAP_WINDOW) ? \
99                                         base + TASK_SIZE - DEFAULT_MAP_WINDOW :\
100                                         base)
101 #endif /* CONFIG_ARM64_FORCE_52BIT */
102
103 extern phys_addr_t arm64_dma_phys_limit;
104 #define ARCH_LOW_ADDRESS_LIMIT  (arm64_dma_phys_limit - 1)
105
106 struct debug_info {
107 #ifdef CONFIG_HAVE_HW_BREAKPOINT
108         /* Have we suspended stepping by a debugger? */
109         int                     suspended_step;
110         /* Allow breakpoints and watchpoints to be disabled for this thread. */
111         int                     bps_disabled;
112         int                     wps_disabled;
113         /* Hardware breakpoints pinned to this task. */
114         struct perf_event       *hbp_break[ARM_MAX_BRP];
115         struct perf_event       *hbp_watch[ARM_MAX_WRP];
116 #endif
117 };
118
119 enum vec_type {
120         ARM64_VEC_SVE = 0,
121         ARM64_VEC_SME,
122         ARM64_VEC_MAX,
123 };
124
125 enum fp_type {
126         FP_STATE_CURRENT,       /* Save based on current task state. */
127         FP_STATE_FPSIMD,
128         FP_STATE_SVE,
129 };
130
131 struct cpu_context {
132         unsigned long x19;
133         unsigned long x20;
134         unsigned long x21;
135         unsigned long x22;
136         unsigned long x23;
137         unsigned long x24;
138         unsigned long x25;
139         unsigned long x26;
140         unsigned long x27;
141         unsigned long x28;
142         unsigned long fp;
143         unsigned long sp;
144         unsigned long pc;
145 };
146
147 struct thread_struct {
148         struct cpu_context      cpu_context;    /* cpu context */
149
150         /*
151          * Whitelisted fields for hardened usercopy:
152          * Maintainers must ensure manually that this contains no
153          * implicit padding.
154          */
155         struct {
156                 unsigned long   tp_value;       /* TLS register */
157                 unsigned long   tp2_value;
158                 u64             fpmr;
159                 unsigned long   pad;
160                 struct user_fpsimd_state fpsimd_state;
161         } uw;
162
163         enum fp_type            fp_type;        /* registers FPSIMD or SVE? */
164         unsigned int            fpsimd_cpu;
165         void                    *sve_state;     /* SVE registers, if any */
166         void                    *sme_state;     /* ZA and ZT state, if any */
167         unsigned int            vl[ARM64_VEC_MAX];      /* vector length */
168         unsigned int            vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */
169         unsigned long           fault_address;  /* fault info */
170         unsigned long           fault_code;     /* ESR_EL1 value */
171         struct debug_info       debug;          /* debugging */
172
173         struct user_fpsimd_state        kernel_fpsimd_state;
174         unsigned int                    kernel_fpsimd_cpu;
175 #ifdef CONFIG_ARM64_PTR_AUTH
176         struct ptrauth_keys_user        keys_user;
177 #ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
178         struct ptrauth_keys_kernel      keys_kernel;
179 #endif
180 #endif
181 #ifdef CONFIG_ARM64_MTE
182         u64                     mte_ctrl;
183 #endif
184         u64                     sctlr_user;
185         u64                     svcr;
186         u64                     tpidr2_el0;
187         u64                     por_el0;
188 #ifdef CONFIG_ARM64_GCS
189         unsigned int            gcs_el0_mode;
190         unsigned int            gcs_el0_locked;
191         u64                     gcspr_el0;
192         u64                     gcs_base;
193         u64                     gcs_size;
194 #endif
195 };
196
197 static inline unsigned int thread_get_vl(struct thread_struct *thread,
198                                          enum vec_type type)
199 {
200         return thread->vl[type];
201 }
202
203 static inline unsigned int thread_get_sve_vl(struct thread_struct *thread)
204 {
205         return thread_get_vl(thread, ARM64_VEC_SVE);
206 }
207
208 static inline unsigned int thread_get_sme_vl(struct thread_struct *thread)
209 {
210         return thread_get_vl(thread, ARM64_VEC_SME);
211 }
212
213 static inline unsigned int thread_get_cur_vl(struct thread_struct *thread)
214 {
215         if (system_supports_sme() && (thread->svcr & SVCR_SM_MASK))
216                 return thread_get_sme_vl(thread);
217         else
218                 return thread_get_sve_vl(thread);
219 }
220
221 unsigned int task_get_vl(const struct task_struct *task, enum vec_type type);
222 void task_set_vl(struct task_struct *task, enum vec_type type,
223                  unsigned long vl);
224 void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
225                         unsigned long vl);
226 unsigned int task_get_vl_onexec(const struct task_struct *task,
227                                 enum vec_type type);
228
229 static inline unsigned int task_get_sve_vl(const struct task_struct *task)
230 {
231         return task_get_vl(task, ARM64_VEC_SVE);
232 }
233
234 static inline unsigned int task_get_sme_vl(const struct task_struct *task)
235 {
236         return task_get_vl(task, ARM64_VEC_SME);
237 }
238
239 static inline void task_set_sve_vl(struct task_struct *task, unsigned long vl)
240 {
241         task_set_vl(task, ARM64_VEC_SVE, vl);
242 }
243
244 static inline unsigned int task_get_sve_vl_onexec(const struct task_struct *task)
245 {
246         return task_get_vl_onexec(task, ARM64_VEC_SVE);
247 }
248
249 static inline void task_set_sve_vl_onexec(struct task_struct *task,
250                                           unsigned long vl)
251 {
252         task_set_vl_onexec(task, ARM64_VEC_SVE, vl);
253 }
254
255 #define SCTLR_USER_MASK                                                        \
256         (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB |   \
257          SCTLR_EL1_TCF0_MASK)
258
259 static inline void arch_thread_struct_whitelist(unsigned long *offset,
260                                                 unsigned long *size)
261 {
262         /* Verify that there is no padding among the whitelisted fields: */
263         BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) !=
264                      sizeof_field(struct thread_struct, uw.tp_value) +
265                      sizeof_field(struct thread_struct, uw.tp2_value) +
266                      sizeof_field(struct thread_struct, uw.fpmr) +
267                      sizeof_field(struct thread_struct, uw.pad) +
268                      sizeof_field(struct thread_struct, uw.fpsimd_state));
269
270         *offset = offsetof(struct thread_struct, uw);
271         *size = sizeof_field(struct thread_struct, uw);
272 }
273
274 #ifdef CONFIG_COMPAT
275 #define task_user_tls(t)                                                \
276 ({                                                                      \
277         unsigned long *__tls;                                           \
278         if (is_compat_thread(task_thread_info(t)))                      \
279                 __tls = &(t)->thread.uw.tp2_value;                      \
280         else                                                            \
281                 __tls = &(t)->thread.uw.tp_value;                       \
282         __tls;                                                          \
283  })
284 #else
285 #define task_user_tls(t)        (&(t)->thread.uw.tp_value)
286 #endif
287
288 /* Sync TPIDR_EL0 back to thread_struct for current */
289 void tls_preserve_current_state(void);
290
291 #define INIT_THREAD {                           \
292         .fpsimd_cpu = NR_CPUS,                  \
293 }
294
295 static inline void start_thread_common(struct pt_regs *regs, unsigned long pc,
296                                        unsigned long pstate)
297 {
298         /*
299          * Ensure all GPRs are zeroed, and initialize PC + PSTATE.
300          * The SP (or compat SP) will be initialized later.
301          */
302         regs->user_regs = (struct user_pt_regs) {
303                 .pc = pc,
304                 .pstate = pstate,
305         };
306
307         /*
308          * To allow the syscalls:sys_exit_execve tracepoint we need to preserve
309          * syscallno, but do not need orig_x0 or the original GPRs.
310          */
311         regs->orig_x0 = 0;
312
313         /*
314          * An exec from a kernel thread won't have an existing PMR value.
315          */
316         if (system_uses_irq_prio_masking())
317                 regs->pmr = GIC_PRIO_IRQON;
318
319         /*
320          * The pt_regs::stackframe field must remain valid throughout this
321          * function as a stacktrace can be taken at any time. Any user or
322          * kernel task should have a valid final frame.
323          */
324         WARN_ON_ONCE(regs->stackframe.record.fp != 0);
325         WARN_ON_ONCE(regs->stackframe.record.lr != 0);
326         WARN_ON_ONCE(regs->stackframe.type != FRAME_META_TYPE_FINAL);
327 }
328
329 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
330                                 unsigned long sp)
331 {
332         start_thread_common(regs, pc, PSR_MODE_EL0t);
333         spectre_v4_enable_task_mitigation(current);
334         regs->sp = sp;
335 }
336
337 #ifdef CONFIG_COMPAT
338 static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
339                                        unsigned long sp)
340 {
341         unsigned long pstate = PSR_AA32_MODE_USR;
342         if (pc & 1)
343                 pstate |= PSR_AA32_T_BIT;
344         if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
345                 pstate |= PSR_AA32_E_BIT;
346
347         start_thread_common(regs, pc, pstate);
348         spectre_v4_enable_task_mitigation(current);
349         regs->compat_sp = sp;
350 }
351 #endif
352
353 static __always_inline bool is_ttbr0_addr(unsigned long addr)
354 {
355         /* entry assembly clears tags for TTBR0 addrs */
356         return addr < TASK_SIZE;
357 }
358
359 static __always_inline bool is_ttbr1_addr(unsigned long addr)
360 {
361         /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
362         return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
363 }
364
365 /* Forward declaration, a strange C thing */
366 struct task_struct;
367
368 unsigned long __get_wchan(struct task_struct *p);
369
370 void update_sctlr_el1(u64 sctlr);
371
372 /* Thread switching */
373 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
374                                          struct task_struct *next);
375
376 #define task_pt_regs(p) \
377         ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
378
379 #define KSTK_EIP(tsk)   ((unsigned long)task_pt_regs(tsk)->pc)
380 #define KSTK_ESP(tsk)   user_stack_pointer(task_pt_regs(tsk))
381
382 /*
383  * Prefetching support
384  */
385 #define ARCH_HAS_PREFETCH
386 static inline void prefetch(const void *ptr)
387 {
388         asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr));
389 }
390
391 #define ARCH_HAS_PREFETCHW
392 static inline void prefetchw(const void *ptr)
393 {
394         asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
395 }
396
397 extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
398 extern void __init minsigstksz_setup(void);
399
400 /*
401  * Not at the top of the file due to a direct #include cycle between
402  * <asm/fpsimd.h> and <asm/processor.h>.  Deferring this #include
403  * ensures that contents of processor.h are visible to fpsimd.h even if
404  * processor.h is included first.
405  *
406  * These prctl helpers are the only things in this file that require
407  * fpsimd.h.  The core code expects them to be in this header.
408  */
409 #include <asm/fpsimd.h>
410
411 /* Userspace interface for PR_S[MV]E_{SET,GET}_VL prctl()s: */
412 #define SVE_SET_VL(arg) sve_set_current_vl(arg)
413 #define SVE_GET_VL()    sve_get_current_vl()
414 #define SME_SET_VL(arg) sme_set_current_vl(arg)
415 #define SME_GET_VL()    sme_get_current_vl()
416
417 /* PR_PAC_RESET_KEYS prctl */
418 #define PAC_RESET_KEYS(tsk, arg)        ptrauth_prctl_reset_keys(tsk, arg)
419
420 /* PR_PAC_{SET,GET}_ENABLED_KEYS prctl */
421 #define PAC_SET_ENABLED_KEYS(tsk, keys, enabled)                                \
422         ptrauth_set_enabled_keys(tsk, keys, enabled)
423 #define PAC_GET_ENABLED_KEYS(tsk) ptrauth_get_enabled_keys(tsk)
424
425 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
426 /* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
427 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);
428 long get_tagged_addr_ctrl(struct task_struct *task);
429 #define SET_TAGGED_ADDR_CTRL(arg)       set_tagged_addr_ctrl(current, arg)
430 #define GET_TAGGED_ADDR_CTRL()          get_tagged_addr_ctrl(current)
431 #endif
432
433 int get_tsc_mode(unsigned long adr);
434 int set_tsc_mode(unsigned int val);
435 #define GET_TSC_CTL(adr)        get_tsc_mode((adr))
436 #define SET_TSC_CTL(val)        set_tsc_mode((val))
437
438 #endif /* __ASSEMBLY__ */
439 #endif /* __ASM_PROCESSOR_H */
This page took 0.048467 seconds and 4 git commands to generate.