1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/mm/proc.S
5 * Copyright (C) 2001 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
10 #include <linux/init.h>
11 #include <linux/linkage.h>
12 #include <linux/pgtable.h>
13 #include <linux/cfi_types.h>
14 #include <asm/assembler.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/asm_pointer_auth.h>
17 #include <asm/hwcap.h>
18 #include <asm/kernel-pgtable.h>
19 #include <asm/pgtable-hwdef.h>
20 #include <asm/cpufeature.h>
21 #include <asm/alternative.h>
23 #include <asm/sysreg.h>
25 #ifdef CONFIG_ARM64_64K_PAGES
26 #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
27 #elif defined(CONFIG_ARM64_16K_PAGES)
28 #define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K
29 #else /* CONFIG_ARM64_4K_PAGES */
30 #define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K
33 #ifdef CONFIG_RANDOMIZE_BASE
34 #define TCR_KASLR_FLAGS TCR_NFD1
36 #define TCR_KASLR_FLAGS 0
39 #define TCR_SMP_FLAGS TCR_SHARED
41 /* PTWs cacheable, inner/outer WBWA */
42 #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
44 #ifdef CONFIG_KASAN_SW_TAGS
45 #define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1
47 #define TCR_KASAN_SW_FLAGS 0
50 #ifdef CONFIG_KASAN_HW_TAGS
51 #define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1
52 #elif defined(CONFIG_ARM64_MTE)
54 * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
55 * TBI being enabled at EL1.
57 #define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
59 #define TCR_MTE_FLAGS 0
63 * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
64 * changed during mte_cpu_setup to Normal Tagged if the system supports MTE.
66 #define MAIR_EL1_SET \
67 (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
68 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
69 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
70 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
71 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
75 * cpu_do_suspend - save CPU registers context
77 * x0: virtual address of context pointer
79 * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>.
81 SYM_FUNC_START(cpu_do_suspend)
84 mrs x4, contextidr_el1
92 get_this_cpu_offset x12
98 stp x10, x11, [x0, #64]
99 stp x12, x13, [x0, #80]
101 * Save x18 as it may be used as a platform register, e.g. by shadow
106 SYM_FUNC_END(cpu_do_suspend)
109 * cpu_do_resume - restore CPU register context
111 * x0: Address of context pointer
113 .pushsection ".idmap.text", "awx"
114 SYM_FUNC_START(cpu_do_resume)
116 ldp x4, x5, [x0, #16]
117 ldp x6, x8, [x0, #32]
118 ldp x9, x10, [x0, #48]
119 ldp x11, x12, [x0, #64]
120 ldp x13, x14, [x0, #80]
122 * Restore x18, as it may be used as a platform register, and clear
123 * the buffer to minimize the risk of exposure when used for shadow
130 msr contextidr_el1, x4
133 /* Don't change t0sz here, mask those bits when restoring */
135 bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
141 * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
142 * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
143 * exception. Mask them until local_daif_restore() in cpu_suspend()
150 set_this_cpu_offset x13
153 * Restore oslsr_el1 by writing oslar_el1
156 ubfx x11, x11, #1, #1
158 reset_pmuserenr_el0 x0 // Disable PMU access from EL0
159 reset_amuserenr_el0 x0 // Disable AMU access from EL0
161 alternative_if ARM64_HAS_RAS_EXTN
162 msr_s SYS_DISR_EL1, xzr
163 alternative_else_nop_endif
165 ptrauth_keys_install_kernel_nosync x14, x1, x2, x3
168 SYM_FUNC_END(cpu_do_resume)
172 .pushsection ".idmap.text", "awx"
174 .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
175 adrp \tmp1, reserved_pg_dir
176 phys_to_ttbr \tmp2, \tmp1
177 offset_ttbr1 \tmp2, \tmp1
186 * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
188 * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
189 * called by anything else. It can only be executed from a TTBR0 mapping.
191 SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)
192 __idmap_cpu_set_reserved_ttbr1 x1, x3
199 SYM_FUNC_END(idmap_cpu_replace_ttbr1)
202 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
204 #define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
206 .pushsection ".idmap.text", "awx"
208 .macro kpti_mk_tbl_ng, type, num_entries
209 add end_\type\()p, cur_\type\()p, #\num_entries * 8
211 ldr \type, [cur_\type\()p] // Load the entry
212 tbz \type, #0, .Lnext_\type // Skip invalid and
213 tbnz \type, #11, .Lnext_\type // non-global entries
214 orr \type, \type, #PTE_NG // Same bit for blocks and pages
215 str \type, [cur_\type\()p] // Update the entry
217 tbnz \type, #1, .Lderef_\type
220 add cur_\type\()p, cur_\type\()p, #8
221 cmp cur_\type\()p, end_\type\()p
226 * Dereference the current table entry and map it into the temporary
227 * fixmap slot associated with the current level.
229 .macro kpti_map_pgtbl, type, level
230 str xzr, [temp_pte, #8 * (\level + 1)] // break before make
232 add pte, temp_pte, #PAGE_SIZE * (\level + 1)
238 phys_to_pte pte, cur_\type\()p
239 add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 1)
240 orr pte, pte, pte_flags
241 str pte, [temp_pte, #8 * (\level + 1)]
246 * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
247 * unsigned long temp_pte_va)
249 * Called exactly once from stop_machine context by each CPU found during boot.
251 .pushsection ".data", "aw", %progbits
252 SYM_DATA(__idmap_kpti_flag, .long 1)
255 SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
260 temp_pgd_phys .req x2
275 mov x5, x3 // preserve temp_pte arg
276 mrs swapper_ttb, ttbr1_el1
277 adr_l flag_ptr, __idmap_kpti_flag
279 cbnz cpu, __idmap_kpti_secondary
281 /* We're the boot CPU. Wait for the others to catch up */
284 ldaxr w17, [flag_ptr]
285 eor w17, w17, num_cpus
288 /* Switch to the temporary page tables on this CPU only */
289 __idmap_cpu_set_reserved_ttbr1 x8, x9
290 offset_ttbr1 temp_pgd_phys, x8
291 msr ttbr1_el1, temp_pgd_phys
295 mov pte_flags, #KPTI_NG_PTE_FLAGS
297 /* Everybody is enjoying the idmap, so we can rewrite swapper. */
299 adrp cur_pgdp, swapper_pg_dir
300 kpti_map_pgtbl pgd, 0
301 kpti_mk_tbl_ng pgd, PTRS_PER_PGD
303 /* Ensure all the updated entries are visible to secondary CPUs */
306 /* We're done: fire up swapper_pg_dir again */
307 __idmap_cpu_set_reserved_ttbr1 x8, x9
308 msr ttbr1_el1, swapper_ttb
311 /* Set the flag to zero to indicate that we're all done */
317 .if CONFIG_PGTABLE_LEVELS > 3
319 pte_to_phys cur_pudp, pgd
320 kpti_map_pgtbl pud, 1
321 kpti_mk_tbl_ng pud, PTRS_PER_PUD
323 .else /* CONFIG_PGTABLE_LEVELS <= 3 */
325 .set .Lnext_pud, .Lnext_pgd
330 .if CONFIG_PGTABLE_LEVELS > 2
332 pte_to_phys cur_pmdp, pud
333 kpti_map_pgtbl pmd, 2
334 kpti_mk_tbl_ng pmd, PTRS_PER_PMD
336 .else /* CONFIG_PGTABLE_LEVELS <= 2 */
338 .set .Lnext_pmd, .Lnext_pgd
343 pte_to_phys cur_ptep, pmd
344 kpti_map_pgtbl pte, 3
345 kpti_mk_tbl_ng pte, PTRS_PER_PTE
367 /* Secondary CPUs end up here */
368 __idmap_kpti_secondary:
369 /* Uninstall swapper before surgery begins */
370 __idmap_cpu_set_reserved_ttbr1 x16, x17
372 /* Increment the flag to let the boot CPU we're ready */
373 1: ldxr w16, [flag_ptr]
375 stxr w17, w16, [flag_ptr]
378 /* Wait for the boot CPU to finish messing around with swapper */
384 /* All done, act like nothing happened */
385 msr ttbr1_el1, swapper_ttb
391 SYM_FUNC_END(idmap_kpti_install_ng_mappings)
398 * Initialise the processor for turning the MMU on.
401 * x0 - actual number of VA bits (ignored unless VA_BITS > 48)
403 * Return in x0 the value of the SCTLR_EL1 register.
405 .pushsection ".idmap.text", "awx"
406 SYM_FUNC_START(__cpu_setup)
407 tlbi vmalle1 // Invalidate local TLB
411 msr cpacr_el1, x1 // Enable FP/ASIMD
412 mov x1, #1 << 12 // Reset mdscr_el1 and disable
413 msr mdscr_el1, x1 // access to the DCC from EL0
414 isb // Unmask debug exceptions now,
415 enable_dbg // since this is per-cpu
416 reset_pmuserenr_el0 x1 // Disable PMU access from EL0
417 reset_amuserenr_el0 x1 // Disable AMU access from EL0
420 * Default values for VMSA control registers. These will be adjusted
421 * below depending on detected CPU features.
425 mov_q mair, MAIR_EL1_SET
426 mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
427 TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
428 TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
430 tcr_clear_errata_bits tcr, x9, x5
432 #ifdef CONFIG_ARM64_VA_BITS_52
442 * Set the IPS bits in TCR_EL1.
444 tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6
445 #ifdef CONFIG_ARM64_HW_AFDBM
447 * Enable hardware update of the Access Flags bit.
448 * Hardware dirty bit management is enabled later,
451 mrs x9, ID_AA64MMFR1_EL1
454 orr tcr, tcr, #TCR_HA // hardware Access flag update
456 #endif /* CONFIG_ARM64_HW_AFDBM */
462 mov_q x0, INIT_SCTLR_EL1_MMU_ON
463 ret // return to head.S
467 SYM_FUNC_END(__cpu_setup)