]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
047ea784 PM |
2 | #ifndef __ASM_POWERPC_MMU_CONTEXT_H |
3 | #define __ASM_POWERPC_MMU_CONTEXT_H | |
88ced031 | 4 | #ifdef __KERNEL__ |
047ea784 | 5 | |
5e696617 BH |
6 | #include <linux/kernel.h> |
7 | #include <linux/mm.h> | |
8 | #include <linux/sched.h> | |
9 | #include <linux/spinlock.h> | |
80a7cc6c KG |
10 | #include <asm/mmu.h> |
11 | #include <asm/cputable.h> | |
5e696617 | 12 | #include <asm/cputhreads.h> |
80a7cc6c KG |
13 | |
14 | /* | |
5e696617 | 15 | * Most if the context management is out of line |
80a7cc6c | 16 | */ |
1da177e4 LT |
17 | extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
18 | extern void destroy_context(struct mm_struct *mm); | |
15b244a8 AK |
19 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
20 | struct mm_iommu_table_group_mem_t; | |
21 | ||
2e5bbb54 | 22 | extern int isolate_lru_page(struct page *page); /* from internal.h */ |
d7baee69 AK |
23 | extern bool mm_iommu_preregistered(struct mm_struct *mm); |
24 | extern long mm_iommu_get(struct mm_struct *mm, | |
25 | unsigned long ua, unsigned long entries, | |
15b244a8 | 26 | struct mm_iommu_table_group_mem_t **pmem); |
d7baee69 AK |
27 | extern long mm_iommu_put(struct mm_struct *mm, |
28 | struct mm_iommu_table_group_mem_t *mem); | |
88f54a35 AK |
29 | extern void mm_iommu_init(struct mm_struct *mm); |
30 | extern void mm_iommu_cleanup(struct mm_struct *mm); | |
d7baee69 AK |
31 | extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, |
32 | unsigned long ua, unsigned long size); | |
6b5c19c5 AK |
33 | extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( |
34 | struct mm_struct *mm, unsigned long ua, unsigned long size); | |
d7baee69 AK |
35 | extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
36 | unsigned long ua, unsigned long entries); | |
15b244a8 | 37 | extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
76fa4975 | 38 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
6b5c19c5 | 39 | extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
76fa4975 | 40 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
15b244a8 AK |
41 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); |
42 | extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); | |
43 | #endif | |
1da177e4 | 44 | extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); |
5e696617 | 45 | extern void set_context(unsigned long id, pgd_t *pgd); |
1da177e4 | 46 | |
6f0ef0f5 | 47 | #ifdef CONFIG_PPC_BOOK3S_64 |
7e381c0f | 48 | extern void radix__switch_mmu_context(struct mm_struct *prev, |
a25bd72b | 49 | struct mm_struct *next); |
d2adba3f AK |
50 | static inline void switch_mmu_context(struct mm_struct *prev, |
51 | struct mm_struct *next, | |
52 | struct task_struct *tsk) | |
53 | { | |
7e381c0f AK |
54 | if (radix_enabled()) |
55 | return radix__switch_mmu_context(prev, next); | |
d2adba3f AK |
56 | return switch_slb(tsk, next); |
57 | } | |
58 | ||
a336f2f5 | 59 | extern int hash__alloc_context_id(void); |
82228e36 | 60 | extern void hash__reserve_context_id(int id); |
e85a4710 | 61 | extern void __destroy_context(int context_id); |
6f0ef0f5 | 62 | static inline void mmu_context_init(void) { } |
f384796c AK |
63 | |
64 | static inline int alloc_extended_context(struct mm_struct *mm, | |
65 | unsigned long ea) | |
66 | { | |
67 | int context_id; | |
68 | ||
69 | int index = ea >> MAX_EA_BITS_PER_CONTEXT; | |
70 | ||
71 | context_id = hash__alloc_context_id(); | |
72 | if (context_id < 0) | |
73 | return context_id; | |
74 | ||
75 | VM_WARN_ON(mm->context.extended_id[index]); | |
76 | mm->context.extended_id[index] = context_id; | |
77 | return context_id; | |
78 | } | |
79 | ||
80 | static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) | |
81 | { | |
82 | int context_id; | |
83 | ||
84 | context_id = get_ea_context(&mm->context, ea); | |
85 | if (!context_id) | |
86 | return true; | |
87 | return false; | |
88 | } | |
89 | ||
6f0ef0f5 | 90 | #else |
d2adba3f AK |
91 | extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, |
92 | struct task_struct *tsk); | |
c83ec269 AG |
93 | extern unsigned long __init_new_context(void); |
94 | extern void __destroy_context(unsigned long context_id); | |
6f0ef0f5 | 95 | extern void mmu_context_init(void); |
f384796c AK |
96 | static inline int alloc_extended_context(struct mm_struct *mm, |
97 | unsigned long ea) | |
98 | { | |
99 | /* non book3s_64 should never find this called */ | |
100 | WARN_ON(1); | |
101 | return -ENOMEM; | |
102 | } | |
103 | ||
104 | static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) | |
105 | { | |
106 | return false; | |
107 | } | |
6f0ef0f5 BH |
108 | #endif |
109 | ||
a25bd72b BH |
110 | #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU) |
111 | extern void radix_kvm_prefetch_workaround(struct mm_struct *mm); | |
112 | #else | |
113 | static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { } | |
114 | #endif | |
115 | ||
851d2e2f THFL |
116 | extern void switch_cop(struct mm_struct *next); |
117 | extern int use_cop(unsigned long acop, struct mm_struct *mm); | |
118 | extern void drop_cop(unsigned long acop, struct mm_struct *mm); | |
119 | ||
03b8abed FB |
120 | #ifdef CONFIG_PPC_BOOK3S_64 |
121 | static inline void inc_mm_active_cpus(struct mm_struct *mm) | |
122 | { | |
123 | atomic_inc(&mm->context.active_cpus); | |
124 | } | |
125 | ||
126 | static inline void dec_mm_active_cpus(struct mm_struct *mm) | |
127 | { | |
128 | atomic_dec(&mm->context.active_cpus); | |
129 | } | |
130 | ||
131 | static inline void mm_context_add_copro(struct mm_struct *mm) | |
132 | { | |
133 | /* | |
aff6f8cb BH |
134 | * If any copro is in use, increment the active CPU count |
135 | * in order to force TLB invalidations to be global as to | |
136 | * propagate to the Nest MMU. | |
03b8abed | 137 | */ |
aff6f8cb BH |
138 | if (atomic_inc_return(&mm->context.copros) == 1) |
139 | inc_mm_active_cpus(mm); | |
03b8abed FB |
140 | } |
141 | ||
142 | static inline void mm_context_remove_copro(struct mm_struct *mm) | |
143 | { | |
aff6f8cb BH |
144 | int c; |
145 | ||
146 | c = atomic_dec_if_positive(&mm->context.copros); | |
147 | ||
148 | /* Detect imbalance between add and remove */ | |
149 | WARN_ON(c < 0); | |
150 | ||
03b8abed FB |
151 | /* |
152 | * Need to broadcast a global flush of the full mm before | |
153 | * decrementing active_cpus count, as the next TLBI may be | |
154 | * local and the nMMU and/or PSL need to be cleaned up. | |
155 | * Should be rare enough so that it's acceptable. | |
156 | * | |
157 | * Skip on hash, as we don't know how to do the proper flush | |
158 | * for the time being. Invalidations will remain global if | |
159 | * used on hash. | |
160 | */ | |
aff6f8cb | 161 | if (c == 0 && radix_enabled()) { |
03b8abed FB |
162 | flush_all_mm(mm); |
163 | dec_mm_active_cpus(mm); | |
164 | } | |
165 | } | |
166 | #else | |
167 | static inline void inc_mm_active_cpus(struct mm_struct *mm) { } | |
168 | static inline void dec_mm_active_cpus(struct mm_struct *mm) { } | |
169 | static inline void mm_context_add_copro(struct mm_struct *mm) { } | |
170 | static inline void mm_context_remove_copro(struct mm_struct *mm) { } | |
171 | #endif | |
172 | ||
173 | ||
3a2df379 BH |
174 | extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
175 | struct task_struct *tsk); | |
1da177e4 | 176 | |
9765ad13 DG |
177 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
178 | struct task_struct *tsk) | |
179 | { | |
180 | unsigned long flags; | |
181 | ||
182 | local_irq_save(flags); | |
183 | switch_mm_irqs_off(prev, next, tsk); | |
184 | local_irq_restore(flags); | |
185 | } | |
186 | #define switch_mm_irqs_off switch_mm_irqs_off | |
187 | ||
188 | ||
1da177e4 LT |
189 | #define deactivate_mm(tsk,mm) do { } while (0) |
190 | ||
191 | /* | |
192 | * After we have set current->mm to a new value, this activates | |
193 | * the context for the new mm so we see the new mappings. | |
194 | */ | |
195 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) | |
196 | { | |
1da177e4 | 197 | switch_mm(prev, next, current); |
1da177e4 LT |
198 | } |
199 | ||
5e696617 BH |
200 | /* We don't currently use enter_lazy_tlb() for anything */ |
201 | static inline void enter_lazy_tlb(struct mm_struct *mm, | |
202 | struct task_struct *tsk) | |
203 | { | |
25d21ad6 BH |
204 | /* 64-bit Book3E keeps track of current PGD in the PACA */ |
205 | #ifdef CONFIG_PPC_BOOK3E_64 | |
206 | get_paca()->pgd = NULL; | |
207 | #endif | |
5e696617 BH |
208 | } |
209 | ||
c10e83f5 TG |
210 | static inline int arch_dup_mmap(struct mm_struct *oldmm, |
211 | struct mm_struct *mm) | |
83d3f0e9 | 212 | { |
c10e83f5 | 213 | return 0; |
83d3f0e9 LD |
214 | } |
215 | ||
30b49ec7 | 216 | #ifndef CONFIG_PPC_BOOK3S_64 |
83d3f0e9 LD |
217 | static inline void arch_exit_mmap(struct mm_struct *mm) |
218 | { | |
219 | } | |
30b49ec7 NP |
220 | #else |
221 | extern void arch_exit_mmap(struct mm_struct *mm); | |
222 | #endif | |
83d3f0e9 LD |
223 | |
224 | static inline void arch_unmap(struct mm_struct *mm, | |
225 | struct vm_area_struct *vma, | |
226 | unsigned long start, unsigned long end) | |
227 | { | |
228 | if (start <= mm->context.vdso_base && mm->context.vdso_base < end) | |
229 | mm->context.vdso_base = 0; | |
230 | } | |
231 | ||
232 | static inline void arch_bprm_mm_init(struct mm_struct *mm, | |
233 | struct vm_area_struct *vma) | |
234 | { | |
235 | } | |
236 | ||
1137573a RP |
237 | #ifdef CONFIG_PPC_MEM_KEYS |
238 | bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, | |
239 | bool execute, bool foreign); | |
240 | #else /* CONFIG_PPC_MEM_KEYS */ | |
1b2ee126 | 241 | static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, |
d61172b4 | 242 | bool write, bool execute, bool foreign) |
33a709b2 DH |
243 | { |
244 | /* by default, allow everything */ | |
245 | return true; | |
246 | } | |
92e3da3c | 247 | |
4fb158f6 | 248 | #define pkey_mm_init(mm) |
06bb53b3 RP |
249 | #define thread_pkey_regs_save(thread) |
250 | #define thread_pkey_regs_restore(new_thread, old_thread) | |
251 | #define thread_pkey_regs_init(thread) | |
87bbabbe | 252 | |
a6590ca5 RP |
253 | static inline u64 pte_to_hpte_pkey_bits(u64 pteflags) |
254 | { | |
255 | return 0x0UL; | |
256 | } | |
257 | ||
4fb158f6 RP |
258 | #endif /* CONFIG_PPC_MEM_KEYS */ |
259 | ||
88ced031 | 260 | #endif /* __KERNEL__ */ |
047ea784 | 261 | #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ |