]> Git Repo - linux.git/blob - arch/sparc/mm/tlb.c
Merge tag 'hsi-for-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-hsi
[linux.git] / arch / sparc / mm / tlb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* arch/sparc64/mm/tlb.c
3  *
4  * Copyright (C) 2004 David S. Miller <[email protected]>
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/percpu.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/preempt.h>
12
13 #include <asm/tlbflush.h>
14 #include <asm/cacheflush.h>
15 #include <asm/mmu_context.h>
16 #include <asm/tlb.h>
17
18 /* Heavily inspired by the ppc64 code.  */
19
20 static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
21
22 void flush_tlb_pending(void)
23 {
24         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
25         struct mm_struct *mm = tb->mm;
26
27         if (!tb->tlb_nr)
28                 goto out;
29
30         flush_tsb_user(tb);
31
32         if (CTX_VALID(mm->context)) {
33                 if (tb->tlb_nr == 1) {
34                         global_flush_tlb_page(mm, tb->vaddrs[0]);
35                 } else {
36 #ifdef CONFIG_SMP
37                         smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
38                                               &tb->vaddrs[0]);
39 #else
40                         __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
41                                             tb->tlb_nr, &tb->vaddrs[0]);
42 #endif
43                 }
44         }
45
46         tb->tlb_nr = 0;
47
48 out:
49         put_cpu_var(tlb_batch);
50 }
51
52 void arch_enter_lazy_mmu_mode(void)
53 {
54         struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
55
56         tb->active = 1;
57 }
58
59 void arch_leave_lazy_mmu_mode(void)
60 {
61         struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
62
63         if (tb->tlb_nr)
64                 flush_tlb_pending();
65         tb->active = 0;
66 }
67
68 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
69                               bool exec, unsigned int hugepage_shift)
70 {
71         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
72         unsigned long nr;
73
74         vaddr &= PAGE_MASK;
75         if (exec)
76                 vaddr |= 0x1UL;
77
78         nr = tb->tlb_nr;
79
80         if (unlikely(nr != 0 && mm != tb->mm)) {
81                 flush_tlb_pending();
82                 nr = 0;
83         }
84
85         if (!tb->active) {
86                 flush_tsb_user_page(mm, vaddr, hugepage_shift);
87                 global_flush_tlb_page(mm, vaddr);
88                 goto out;
89         }
90
91         if (nr == 0) {
92                 tb->mm = mm;
93                 tb->hugepage_shift = hugepage_shift;
94         }
95
96         if (tb->hugepage_shift != hugepage_shift) {
97                 flush_tlb_pending();
98                 tb->hugepage_shift = hugepage_shift;
99                 nr = 0;
100         }
101
102         tb->vaddrs[nr] = vaddr;
103         tb->tlb_nr = ++nr;
104         if (nr >= TLB_BATCH_NR)
105                 flush_tlb_pending();
106
107 out:
108         put_cpu_var(tlb_batch);
109 }
110
111 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
112                    pte_t *ptep, pte_t orig, int fullmm,
113                    unsigned int hugepage_shift)
114 {
115         if (tlb_type != hypervisor &&
116             pte_dirty(orig)) {
117                 unsigned long paddr, pfn = pte_pfn(orig);
118                 struct address_space *mapping;
119                 struct page *page;
120
121                 if (!pfn_valid(pfn))
122                         goto no_cache_flush;
123
124                 page = pfn_to_page(pfn);
125                 if (PageReserved(page))
126                         goto no_cache_flush;
127
128                 /* A real file page? */
129                 mapping = page_mapping_file(page);
130                 if (!mapping)
131                         goto no_cache_flush;
132
133                 paddr = (unsigned long) page_address(page);
134                 if ((paddr ^ vaddr) & (1 << 13))
135                         flush_dcache_page_all(mm, page);
136         }
137
138 no_cache_flush:
139         if (!fullmm)
140                 tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
141 }
142
143 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
144 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
145                                pmd_t pmd)
146 {
147         unsigned long end;
148         pte_t *pte;
149
150         pte = pte_offset_map(&pmd, vaddr);
151         end = vaddr + HPAGE_SIZE;
152         while (vaddr < end) {
153                 if (pte_val(*pte) & _PAGE_VALID) {
154                         bool exec = pte_exec(*pte);
155
156                         tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
157                 }
158                 pte++;
159                 vaddr += PAGE_SIZE;
160         }
161         pte_unmap(pte);
162 }
163
164
165 static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
166                            pmd_t orig, pmd_t pmd)
167 {
168         if (mm == &init_mm)
169                 return;
170
171         if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
172                 /*
173                  * Note that this routine only sets pmds for THP pages.
174                  * Hugetlb pages are handled elsewhere.  We need to check
175                  * for huge zero page.  Huge zero pages are like hugetlb
176                  * pages in that there is no RSS, but there is the need
177                  * for TSB entries.  So, huge zero page counts go into
178                  * hugetlb_pte_count.
179                  */
180                 if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
181                         if (is_huge_zero_page(pmd_page(pmd)))
182                                 mm->context.hugetlb_pte_count++;
183                         else
184                                 mm->context.thp_pte_count++;
185                 } else {
186                         if (is_huge_zero_page(pmd_page(orig)))
187                                 mm->context.hugetlb_pte_count--;
188                         else
189                                 mm->context.thp_pte_count--;
190                 }
191
192                 /* Do not try to allocate the TSB hash table if we
193                  * don't have one already.  We have various locks held
194                  * and thus we'll end up doing a GFP_KERNEL allocation
195                  * in an atomic context.
196                  *
197                  * Instead, we let the first TLB miss on a hugepage
198                  * take care of this.
199                  */
200         }
201
202         if (!pmd_none(orig)) {
203                 addr &= HPAGE_MASK;
204                 if (pmd_trans_huge(orig)) {
205                         pte_t orig_pte = __pte(pmd_val(orig));
206                         bool exec = pte_exec(orig_pte);
207
208                         tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
209                         tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
210                                           REAL_HPAGE_SHIFT);
211                 } else {
212                         tlb_batch_pmd_scan(mm, addr, orig);
213                 }
214         }
215 }
216
217 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
218                 pmd_t *pmdp, pmd_t pmd)
219 {
220         pmd_t orig = *pmdp;
221
222         *pmdp = pmd;
223         __set_pmd_acct(mm, addr, orig, pmd);
224 }
225
226 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
227                 unsigned long address, pmd_t *pmdp, pmd_t pmd)
228 {
229         pmd_t old;
230
231         do {
232                 old = *pmdp;
233         } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
234         __set_pmd_acct(vma->vm_mm, address, old, pmd);
235
236         return old;
237 }
238
239 /*
240  * This routine is only called when splitting a THP
241  */
242 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
243                      pmd_t *pmdp)
244 {
245         pmd_t old, entry;
246
247         entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
248         old = pmdp_establish(vma, address, pmdp, entry);
249         flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
250
251         /*
252          * set_pmd_at() will not be called in a way to decrement
253          * thp_pte_count when splitting a THP, so do it now.
254          * Sanity check pmd before doing the actual decrement.
255          */
256         if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
257             !is_huge_zero_page(pmd_page(entry)))
258                 (vma->vm_mm)->context.thp_pte_count--;
259
260         return old;
261 }
262
263 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
264                                 pgtable_t pgtable)
265 {
266         struct list_head *lh = (struct list_head *) pgtable;
267
268         assert_spin_locked(&mm->page_table_lock);
269
270         /* FIFO */
271         if (!pmd_huge_pte(mm, pmdp))
272                 INIT_LIST_HEAD(lh);
273         else
274                 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
275         pmd_huge_pte(mm, pmdp) = pgtable;
276 }
277
278 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
279 {
280         struct list_head *lh;
281         pgtable_t pgtable;
282
283         assert_spin_locked(&mm->page_table_lock);
284
285         /* FIFO */
286         pgtable = pmd_huge_pte(mm, pmdp);
287         lh = (struct list_head *) pgtable;
288         if (list_empty(lh))
289                 pmd_huge_pte(mm, pmdp) = NULL;
290         else {
291                 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
292                 list_del(lh);
293         }
294         pte_val(pgtable[0]) = 0;
295         pte_val(pgtable[1]) = 0;
296
297         return pgtable;
298 }
299 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
This page took 0.049363 seconds and 4 git commands to generate.