1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MMU_CONTEXT_H
3 #define _ASM_X86_MMU_CONTEXT_H
6 #include <linux/atomic.h>
7 #include <linux/mm_types.h>
8 #include <linux/pkeys.h>
10 #include <trace/events/tlb.h>
12 #include <asm/tlbflush.h>
13 #include <asm/paravirt.h>
14 #include <asm/debugreg.h>
15 #include <asm/gsseg.h>
17 extern atomic64_t last_mm_ctx_id;
19 #ifdef CONFIG_PERF_EVENTS
20 DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
21 DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
22 void cr4_update_pce(void *ignored);
25 #ifdef CONFIG_MODIFY_LDT_SYSCALL
27 * ldt_structs can be allocated, used, and freed, but they are never
28 * modified while live.
32 * Xen requires page-aligned LDTs with special permissions. This is
33 * needed to prevent us from installing evil descriptors such as
34 * call gates. On native, we could merge the ldt_struct and LDT
35 * allocations, but it's not worth trying to optimize.
37 struct desc_struct *entries;
38 unsigned int nr_entries;
41 * If PTI is in use, then the entries array is not mapped while we're
42 * in user mode. The whole array will be aliased at the addressed
43 * given by ldt_slot_va(slot). We use two slots so that we can allocate
44 * and map, and enable a new LDT without invalidating the mapping
45 * of an older, still-in-use LDT.
47 * slot will be -1 if this LDT doesn't have an alias mapping.
53 * Used for LDT copy/destruction.
55 static inline void init_new_context_ldt(struct mm_struct *mm)
57 mm->context.ldt = NULL;
58 init_rwsem(&mm->context.ldt_usr_sem);
60 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
61 void destroy_context_ldt(struct mm_struct *mm);
62 void ldt_arch_exit_mmap(struct mm_struct *mm);
63 #else /* CONFIG_MODIFY_LDT_SYSCALL */
64 static inline void init_new_context_ldt(struct mm_struct *mm) { }
65 static inline int ldt_dup_context(struct mm_struct *oldmm,
70 static inline void destroy_context_ldt(struct mm_struct *mm) { }
71 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
74 #ifdef CONFIG_MODIFY_LDT_SYSCALL
75 extern void load_mm_ldt(struct mm_struct *mm);
76 extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next);
78 static inline void load_mm_ldt(struct mm_struct *mm)
82 static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
84 DEBUG_LOCKS_WARN_ON(preemptible());
88 #ifdef CONFIG_ADDRESS_MASKING
89 static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
92 * When switch_mm_irqs_off() is called for a kthread, it may race with
93 * LAM enablement. switch_mm_irqs_off() uses the LAM mask to do two
94 * things: populate CR3 and populate 'cpu_tlbstate.lam'. Make sure it
95 * reads a single value for both.
97 return READ_ONCE(mm->context.lam_cr3_mask);
100 static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
102 mm->context.lam_cr3_mask = oldmm->context.lam_cr3_mask;
103 mm->context.untag_mask = oldmm->context.untag_mask;
106 #define mm_untag_mask mm_untag_mask
107 static inline unsigned long mm_untag_mask(struct mm_struct *mm)
109 return mm->context.untag_mask;
112 static inline void mm_reset_untag_mask(struct mm_struct *mm)
114 mm->context.untag_mask = -1UL;
117 #define arch_pgtable_dma_compat arch_pgtable_dma_compat
118 static inline bool arch_pgtable_dma_compat(struct mm_struct *mm)
120 return !mm_lam_cr3_mask(mm) ||
121 test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags);
125 static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
130 static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
134 static inline void mm_reset_untag_mask(struct mm_struct *mm)
139 #define enter_lazy_tlb enter_lazy_tlb
140 extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
143 * Init a new mm. Used on mm copies, like at fork()
144 * and on mm's that are brand-new, like at execve().
146 #define init_new_context init_new_context
147 static inline int init_new_context(struct task_struct *tsk,
148 struct mm_struct *mm)
150 mutex_init(&mm->context.lock);
152 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
153 atomic64_set(&mm->context.tlb_gen, 0);
154 mm->context.next_trim_cpumask = jiffies + HZ;
156 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
157 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
158 /* pkey 0 is the default and allocated implicitly */
159 mm->context.pkey_allocation_map = 0x1;
160 /* -1 means unallocated or invalid */
161 mm->context.execute_only_pkey = -1;
164 mm_reset_untag_mask(mm);
165 init_new_context_ldt(mm);
169 #define destroy_context destroy_context
170 static inline void destroy_context(struct mm_struct *mm)
172 destroy_context_ldt(mm);
175 extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
176 struct task_struct *tsk);
178 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
179 struct task_struct *tsk);
180 #define switch_mm_irqs_off switch_mm_irqs_off
182 #define activate_mm(prev, next) \
184 paravirt_enter_mmap(next); \
185 switch_mm((prev), (next), NULL); \
189 #define deactivate_mm(tsk, mm) \
191 loadsegment(gs, 0); \
194 #define deactivate_mm(tsk, mm) \
198 loadsegment(fs, 0); \
202 static inline void arch_dup_pkeys(struct mm_struct *oldmm,
203 struct mm_struct *mm)
205 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
206 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
209 /* Duplicate the oldmm pkey state in mm: */
210 mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
211 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
215 static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
217 arch_dup_pkeys(oldmm, mm);
218 paravirt_enter_mmap(mm);
220 return ldt_dup_context(oldmm, mm);
223 static inline void arch_exit_mmap(struct mm_struct *mm)
225 paravirt_arch_exit_mmap(mm);
226 ldt_arch_exit_mmap(mm);
230 static inline bool is_64bit_mm(struct mm_struct *mm)
232 return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
233 !test_bit(MM_CONTEXT_UPROBE_IA32, &mm->context.flags);
236 static inline bool is_64bit_mm(struct mm_struct *mm)
243 * We only want to enforce protection keys on the current process
244 * because we effectively have no access to PKRU for other
245 * processes or any way to tell *which * PKRU in a threaded
246 * process we could use.
248 * So do not enforce things if the VMA is not from the current
249 * mm, or if we are in a kernel thread.
251 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
252 bool write, bool execute, bool foreign)
254 /* pkeys never affect instruction fetches */
257 /* allow access if the VMA is not one from this process */
258 if (foreign || vma_is_foreign(vma))
260 return __pkru_allows_pkey(vma_pkey(vma), write);
263 unsigned long __get_current_cr3_fast(void);
265 #include <asm-generic/mmu_context.h>
267 #endif /* _ASM_X86_MMU_CONTEXT_H */