1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/mmu_context.h
5 * Copyright (C) 1996 Russell King.
6 * Copyright (C) 2012 ARM Ltd.
8 #ifndef __ASM_MMU_CONTEXT_H
9 #define __ASM_MMU_CONTEXT_H
13 #include <linux/compiler.h>
14 #include <linux/sched.h>
15 #include <linux/sched/hotplug.h>
16 #include <linux/mm_types.h>
17 #include <linux/pgtable.h>
19 #include <asm/cacheflush.h>
20 #include <asm/cpufeature.h>
21 #include <asm/daifflags.h>
22 #include <asm/proc-fns.h>
23 #include <asm-generic/mm_hooks.h>
24 #include <asm/cputype.h>
25 #include <asm/sysreg.h>
26 #include <asm/tlbflush.h>
28 extern bool rodata_full;
30 static inline void contextidr_thread_switch(struct task_struct *next)
32 if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
35 write_sysreg(task_pid_nr(next), contextidr_el1);
40 * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
42 static inline void cpu_set_reserved_ttbr0(void)
44 unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
46 write_sysreg(ttbr, ttbr0_el1);
50 void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
52 static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
54 BUG_ON(pgd == swapper_pg_dir);
55 cpu_set_reserved_ttbr0();
56 cpu_do_switch_mm(virt_to_phys(pgd),mm);
60 * TCR.T0SZ value to use when the ID map is active. Usually equals
61 * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
62 * physical memory, in which case it will be smaller.
64 extern int idmap_t0sz;
67 * Ensure TCR.T0SZ is set to the provided value.
69 static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
71 unsigned long tcr = read_sysreg(tcr_el1);
73 if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz)
76 tcr &= ~TCR_T0SZ_MASK;
77 tcr |= t0sz << TCR_T0SZ_OFFSET;
78 write_sysreg(tcr, tcr_el1);
82 #define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
83 #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
86 * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
88 * The idmap lives in the same VA range as userspace, but uses global entries
89 * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
90 * speculative TLB fetches, we must temporarily install the reserved page
91 * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
93 * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
94 * which should not be installed in TTBR0_EL1. In this case we can leave the
95 * reserved page tables in place.
97 static inline void cpu_uninstall_idmap(void)
99 struct mm_struct *mm = current->active_mm;
101 cpu_set_reserved_ttbr0();
102 local_flush_tlb_all();
103 cpu_set_default_tcr_t0sz();
105 if (mm != &init_mm && !system_uses_ttbr0_pan())
106 cpu_switch_mm(mm->pgd, mm);
109 static inline void __cpu_install_idmap(pgd_t *idmap)
111 cpu_set_reserved_ttbr0();
112 local_flush_tlb_all();
113 cpu_set_idmap_tcr_t0sz();
115 cpu_switch_mm(lm_alias(idmap), &init_mm);
118 static inline void cpu_install_idmap(void)
120 __cpu_install_idmap(idmap_pg_dir);
124 * Load our new page tables. A strict BBM approach requires that we ensure that
125 * TLBs are free of any entries that may overlap with the global mappings we are
128 * For a real hibernate/resume/kexec cycle TTBR0 currently points to a zero
129 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime
130 * services), while for a userspace-driven test_resume cycle it points to
131 * userspace page tables (and we must point it at a zero page ourselves).
133 * We change T0SZ as part of installing the idmap. This is undone by
134 * cpu_uninstall_idmap() in __cpu_suspend_exit().
136 static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
138 cpu_set_reserved_ttbr0();
139 local_flush_tlb_all();
140 __cpu_set_tcr_t0sz(t0sz);
142 /* avoid cpu_switch_mm() and its SW-PAN and CNP interactions */
143 write_sysreg(ttbr0, ttbr0_el1);
148 * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
149 * avoiding the possibility of conflicting TLB entries being allocated.
151 static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
153 typedef void (ttbr_replace_func)(phys_addr_t);
154 extern ttbr_replace_func idmap_cpu_replace_ttbr1;
155 ttbr_replace_func *replace_phys;
158 /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
159 phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
161 if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
163 * cpu_replace_ttbr1() is used when there's a boot CPU
164 * up (i.e. cpufeature framework is not up yet) and
165 * latter only when we enable CNP via cpufeature's
167 * Also we rely on the cpu_hwcap bit being set before
168 * calling the enable() function.
170 ttbr1 |= TTBR_CNP_BIT;
173 replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
175 __cpu_install_idmap(idmap);
178 * We really don't want to take *any* exceptions while TTBR1 is
179 * in the process of being replaced so mask everything.
181 daif = local_daif_save();
183 local_daif_restore(daif);
185 cpu_uninstall_idmap();
189 * It would be nice to return ASIDs back to the allocator, but unfortunately
190 * that introduces a race with a generation rollover where we could erroneously
191 * free an ASID allocated in a future generation. We could workaround this by
192 * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
193 * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
194 * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
195 * take CPU migration into account.
197 void check_and_switch_context(struct mm_struct *mm);
199 #define init_new_context(tsk, mm) init_new_context(tsk, mm)
201 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
203 atomic64_set(&mm->context.id, 0);
204 refcount_set(&mm->context.pinned, 0);
208 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
209 static inline void update_saved_ttbr0(struct task_struct *tsk,
210 struct mm_struct *mm)
214 if (!system_uses_ttbr0_pan())
218 ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
220 ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
222 WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
225 static inline void update_saved_ttbr0(struct task_struct *tsk,
226 struct mm_struct *mm)
231 #define enter_lazy_tlb enter_lazy_tlb
233 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
236 * We don't actually care about the ttbr0 mapping, so point it at the
239 update_saved_ttbr0(tsk, &init_mm);
242 static inline void __switch_mm(struct mm_struct *next)
245 * init_mm.pgd does not contain any user mappings and it is always
246 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
248 if (next == &init_mm) {
249 cpu_set_reserved_ttbr0();
253 check_and_switch_context(next);
257 switch_mm(struct mm_struct *prev, struct mm_struct *next,
258 struct task_struct *tsk)
264 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
265 * value may have not been initialised yet (activate_mm caller) or the
266 * ASID has changed since the last run (following the context switch
267 * of another thread of the same process).
269 update_saved_ttbr0(tsk, next);
272 static inline const struct cpumask *
273 task_cpu_possible_mask(struct task_struct *p)
275 if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
276 return cpu_possible_mask;
278 if (!is_compat_thread(task_thread_info(p)))
279 return cpu_possible_mask;
281 return system_32bit_el0_cpumask();
283 #define task_cpu_possible_mask task_cpu_possible_mask
285 void verify_cpu_asid_bits(void);
286 void post_ttbr_update_workaround(void);
288 unsigned long arm64_mm_context_get(struct mm_struct *mm);
289 void arm64_mm_context_put(struct mm_struct *mm);
291 #define mm_untag_mask mm_untag_mask
292 static inline unsigned long mm_untag_mask(struct mm_struct *mm)
297 #include <asm-generic/mmu_context.h>
299 #endif /* !__ASSEMBLY__ */
301 #endif /* !__ASM_MMU_CONTEXT_H */