]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* arch/sparc64/mm/tlb.c |
3 | * | |
4 | * Copyright (C) 2004 David S. Miller <[email protected]> | |
5 | */ | |
6 | ||
7 | #include <linux/kernel.h> | |
1da177e4 LT |
8 | #include <linux/percpu.h> |
9 | #include <linux/mm.h> | |
10 | #include <linux/swap.h> | |
c9f2946f | 11 | #include <linux/preempt.h> |
1da177e4 | 12 | |
1da177e4 LT |
13 | #include <asm/tlbflush.h> |
14 | #include <asm/cacheflush.h> | |
15 | #include <asm/mmu_context.h> | |
16 | #include <asm/tlb.h> | |
17 | ||
18 | /* Heavily inspired by the ppc64 code. */ | |
19 | ||
90f08e39 | 20 | static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); |
1da177e4 LT |
21 | |
22 | void flush_tlb_pending(void) | |
23 | { | |
90f08e39 | 24 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); |
f36391d2 | 25 | struct mm_struct *mm = tb->mm; |
1da177e4 | 26 | |
f36391d2 DM |
27 | if (!tb->tlb_nr) |
28 | goto out; | |
74bf4312 | 29 | |
f36391d2 DM |
30 | flush_tsb_user(tb); |
31 | ||
32 | if (CTX_VALID(mm->context)) { | |
33 | if (tb->tlb_nr == 1) { | |
34 | global_flush_tlb_page(mm, tb->vaddrs[0]); | |
35 | } else { | |
1da177e4 | 36 | #ifdef CONFIG_SMP |
90f08e39 PZ |
37 | smp_flush_tlb_pending(tb->mm, tb->tlb_nr, |
38 | &tb->vaddrs[0]); | |
1da177e4 | 39 | #else |
90f08e39 PZ |
40 | __flush_tlb_pending(CTX_HWBITS(tb->mm->context), |
41 | tb->tlb_nr, &tb->vaddrs[0]); | |
1da177e4 LT |
42 | #endif |
43 | } | |
1da177e4 | 44 | } |
c9f2946f | 45 | |
f36391d2 DM |
46 | tb->tlb_nr = 0; |
47 | ||
48 | out: | |
90f08e39 | 49 | put_cpu_var(tlb_batch); |
1da177e4 LT |
50 | } |
51 | ||
f36391d2 DM |
52 | void arch_enter_lazy_mmu_mode(void) |
53 | { | |
494fc421 | 54 | struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); |
f36391d2 DM |
55 | |
56 | tb->active = 1; | |
57 | } | |
58 | ||
59 | void arch_leave_lazy_mmu_mode(void) | |
60 | { | |
494fc421 | 61 | struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); |
f36391d2 DM |
62 | |
63 | if (tb->tlb_nr) | |
64 | flush_tlb_pending(); | |
65 | tb->active = 0; | |
66 | } | |
67 | ||
9e695d2e | 68 | static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, |
c7d9f77d | 69 | bool exec, unsigned int hugepage_shift) |
1da177e4 | 70 | { |
90f08e39 | 71 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); |
1da177e4 LT |
72 | unsigned long nr; |
73 | ||
74 | vaddr &= PAGE_MASK; | |
9e695d2e | 75 | if (exec) |
1da177e4 LT |
76 | vaddr |= 0x1UL; |
77 | ||
9e695d2e DM |
78 | nr = tb->tlb_nr; |
79 | ||
80 | if (unlikely(nr != 0 && mm != tb->mm)) { | |
81 | flush_tlb_pending(); | |
82 | nr = 0; | |
83 | } | |
84 | ||
f36391d2 | 85 | if (!tb->active) { |
c7d9f77d | 86 | flush_tsb_user_page(mm, vaddr, hugepage_shift); |
23a01138 | 87 | global_flush_tlb_page(mm, vaddr); |
f0af9707 | 88 | goto out; |
f36391d2 DM |
89 | } |
90 | ||
24e49ee3 | 91 | if (nr == 0) { |
9e695d2e | 92 | tb->mm = mm; |
c7d9f77d | 93 | tb->hugepage_shift = hugepage_shift; |
24e49ee3 NG |
94 | } |
95 | ||
c7d9f77d | 96 | if (tb->hugepage_shift != hugepage_shift) { |
24e49ee3 | 97 | flush_tlb_pending(); |
c7d9f77d | 98 | tb->hugepage_shift = hugepage_shift; |
24e49ee3 NG |
99 | nr = 0; |
100 | } | |
9e695d2e DM |
101 | |
102 | tb->vaddrs[nr] = vaddr; | |
103 | tb->tlb_nr = ++nr; | |
104 | if (nr >= TLB_BATCH_NR) | |
105 | flush_tlb_pending(); | |
106 | ||
f0af9707 | 107 | out: |
9e695d2e DM |
108 | put_cpu_var(tlb_batch); |
109 | } | |
110 | ||
111 | void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, | |
c7d9f77d NG |
112 | pte_t *ptep, pte_t orig, int fullmm, |
113 | unsigned int hugepage_shift) | |
9e695d2e | 114 | { |
7a591cfe DM |
115 | if (tlb_type != hypervisor && |
116 | pte_dirty(orig)) { | |
1da177e4 LT |
117 | unsigned long paddr, pfn = pte_pfn(orig); |
118 | struct address_space *mapping; | |
119 | struct page *page; | |
120 | ||
121 | if (!pfn_valid(pfn)) | |
122 | goto no_cache_flush; | |
123 | ||
124 | page = pfn_to_page(pfn); | |
125 | if (PageReserved(page)) | |
126 | goto no_cache_flush; | |
127 | ||
128 | /* A real file page? */ | |
cb9f753a | 129 | mapping = page_mapping_file(page); |
1da177e4 LT |
130 | if (!mapping) |
131 | goto no_cache_flush; | |
132 | ||
133 | paddr = (unsigned long) page_address(page); | |
134 | if ((paddr ^ vaddr) & (1 << 13)) | |
135 | flush_dcache_page_all(mm, page); | |
136 | } | |
137 | ||
138 | no_cache_flush: | |
9e695d2e | 139 | if (!fullmm) |
c7d9f77d | 140 | tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift); |
9e695d2e DM |
141 | } |
142 | ||
143 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
144 | static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, | |
5b1e94fa | 145 | pmd_t pmd) |
9e695d2e DM |
146 | { |
147 | unsigned long end; | |
148 | pte_t *pte; | |
149 | ||
150 | pte = pte_offset_map(&pmd, vaddr); | |
151 | end = vaddr + HPAGE_SIZE; | |
152 | while (vaddr < end) { | |
5b1e94fa DM |
153 | if (pte_val(*pte) & _PAGE_VALID) { |
154 | bool exec = pte_exec(*pte); | |
155 | ||
76811263 | 156 | tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT); |
5b1e94fa | 157 | } |
9e695d2e DM |
158 | pte++; |
159 | vaddr += PAGE_SIZE; | |
160 | } | |
161 | pte_unmap(pte); | |
162 | } | |
1da177e4 | 163 | |
9e695d2e | 164 | |
cfb61b5e DM |
165 | static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr, |
166 | pmd_t orig, pmd_t pmd) | |
167 | { | |
9e695d2e | 168 | if (mm == &init_mm) |
1da177e4 | 169 | return; |
9e695d2e | 170 | |
a7b9403f | 171 | if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { |
1e953d84 MK |
172 | /* |
173 | * Note that this routine only sets pmds for THP pages. | |
174 | * Hugetlb pages are handled elsewhere. We need to check | |
175 | * for huge zero page. Huge zero pages are like hugetlb | |
176 | * pages in that there is no RSS, but there is the need | |
177 | * for TSB entries. So, huge zero page counts go into | |
178 | * hugetlb_pte_count. | |
179 | */ | |
180 | if (pmd_val(pmd) & _PAGE_PMD_HUGE) { | |
181 | if (is_huge_zero_page(pmd_page(pmd))) | |
182 | mm->context.hugetlb_pte_count++; | |
183 | else | |
184 | mm->context.thp_pte_count++; | |
185 | } else { | |
186 | if (is_huge_zero_page(pmd_page(orig))) | |
187 | mm->context.hugetlb_pte_count--; | |
188 | else | |
189 | mm->context.thp_pte_count--; | |
190 | } | |
0fbebed6 DM |
191 | |
192 | /* Do not try to allocate the TSB hash table if we | |
193 | * don't have one already. We have various locks held | |
194 | * and thus we'll end up doing a GFP_KERNEL allocation | |
195 | * in an atomic context. | |
196 | * | |
197 | * Instead, we let the first TLB miss on a hugepage | |
198 | * take care of this. | |
199 | */ | |
90f08e39 | 200 | } |
1da177e4 | 201 | |
9e695d2e | 202 | if (!pmd_none(orig)) { |
9e695d2e | 203 | addr &= HPAGE_MASK; |
a7b9403f | 204 | if (pmd_trans_huge(orig)) { |
5b1e94fa DM |
205 | pte_t orig_pte = __pte(pmd_val(orig)); |
206 | bool exec = pte_exec(orig_pte); | |
207 | ||
76811263 | 208 | tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT); |
24e49ee3 | 209 | tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, |
76811263 | 210 | REAL_HPAGE_SHIFT); |
37b3a8ff | 211 | } else { |
5b1e94fa | 212 | tlb_batch_pmd_scan(mm, addr, orig); |
37b3a8ff | 213 | } |
1da177e4 | 214 | } |
9e695d2e | 215 | } |
1da177e4 | 216 | |
cfb61b5e DM |
217 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
218 | pmd_t *pmdp, pmd_t pmd) | |
219 | { | |
220 | pmd_t orig = *pmdp; | |
221 | ||
222 | *pmdp = pmd; | |
223 | __set_pmd_acct(mm, addr, orig, pmd); | |
224 | } | |
225 | ||
a8e654f0 NG |
226 | static inline pmd_t pmdp_establish(struct vm_area_struct *vma, |
227 | unsigned long address, pmd_t *pmdp, pmd_t pmd) | |
228 | { | |
229 | pmd_t old; | |
230 | ||
231 | do { | |
232 | old = *pmdp; | |
233 | } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd); | |
cfb61b5e | 234 | __set_pmd_acct(vma->vm_mm, address, old, pmd); |
a8e654f0 NG |
235 | |
236 | return old; | |
237 | } | |
238 | ||
1e953d84 MK |
239 | /* |
240 | * This routine is only called when splitting a THP | |
241 | */ | |
a8e654f0 | 242 | pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
51e5ef1b DM |
243 | pmd_t *pmdp) |
244 | { | |
a8e654f0 | 245 | pmd_t old, entry; |
51e5ef1b | 246 | |
a8e654f0 NG |
247 | entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID); |
248 | old = pmdp_establish(vma, address, pmdp, entry); | |
51e5ef1b | 249 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
1e953d84 MK |
250 | |
251 | /* | |
252 | * set_pmd_at() will not be called in a way to decrement | |
253 | * thp_pte_count when splitting a THP, so do it now. | |
254 | * Sanity check pmd before doing the actual decrement. | |
255 | */ | |
256 | if ((pmd_val(entry) & _PAGE_PMD_HUGE) && | |
257 | !is_huge_zero_page(pmd_page(entry))) | |
258 | (vma->vm_mm)->context.thp_pte_count--; | |
a8e654f0 NG |
259 | |
260 | return old; | |
51e5ef1b DM |
261 | } |
262 | ||
6b0b50b0 AK |
263 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
264 | pgtable_t pgtable) | |
9e695d2e DM |
265 | { |
266 | struct list_head *lh = (struct list_head *) pgtable; | |
1da177e4 | 267 | |
9e695d2e | 268 | assert_spin_locked(&mm->page_table_lock); |
90f08e39 | 269 | |
9e695d2e | 270 | /* FIFO */ |
c389a250 | 271 | if (!pmd_huge_pte(mm, pmdp)) |
9e695d2e DM |
272 | INIT_LIST_HEAD(lh); |
273 | else | |
c389a250 KS |
274 | list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); |
275 | pmd_huge_pte(mm, pmdp) = pgtable; | |
9e695d2e DM |
276 | } |
277 | ||
6b0b50b0 | 278 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
9e695d2e DM |
279 | { |
280 | struct list_head *lh; | |
281 | pgtable_t pgtable; | |
282 | ||
283 | assert_spin_locked(&mm->page_table_lock); | |
284 | ||
285 | /* FIFO */ | |
c389a250 | 286 | pgtable = pmd_huge_pte(mm, pmdp); |
9e695d2e DM |
287 | lh = (struct list_head *) pgtable; |
288 | if (list_empty(lh)) | |
c389a250 | 289 | pmd_huge_pte(mm, pmdp) = NULL; |
9e695d2e | 290 | else { |
c389a250 | 291 | pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; |
9e695d2e DM |
292 | list_del(lh); |
293 | } | |
294 | pte_val(pgtable[0]) = 0; | |
295 | pte_val(pgtable[1]) = 0; | |
296 | ||
297 | return pgtable; | |
1da177e4 | 298 | } |
9e695d2e | 299 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |