1 // SPDX-License-Identifier: GPL-2.0
2 /* arch/sparc64/mm/tlb.c
7 #include <linux/kernel.h>
8 #include <linux/percpu.h>
10 #include <linux/swap.h>
11 #include <linux/preempt.h>
13 #include <asm/tlbflush.h>
14 #include <asm/cacheflush.h>
15 #include <asm/mmu_context.h>
18 /* Heavily inspired by the ppc64 code. */
20 static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
22 void flush_tlb_pending(void)
24 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
25 struct mm_struct *mm = tb->mm;
32 if (CTX_VALID(mm->context)) {
33 if (tb->tlb_nr == 1) {
34 global_flush_tlb_page(mm, tb->vaddrs[0]);
37 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
40 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
41 tb->tlb_nr, &tb->vaddrs[0]);
49 put_cpu_var(tlb_batch);
52 void arch_enter_lazy_mmu_mode(void)
54 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
59 void arch_leave_lazy_mmu_mode(void)
61 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
68 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
69 bool exec, unsigned int hugepage_shift)
71 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
80 if (unlikely(nr != 0 && mm != tb->mm)) {
86 flush_tsb_user_page(mm, vaddr, hugepage_shift);
87 global_flush_tlb_page(mm, vaddr);
93 tb->hugepage_shift = hugepage_shift;
96 if (tb->hugepage_shift != hugepage_shift) {
98 tb->hugepage_shift = hugepage_shift;
102 tb->vaddrs[nr] = vaddr;
104 if (nr >= TLB_BATCH_NR)
108 put_cpu_var(tlb_batch);
111 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
112 pte_t *ptep, pte_t orig, int fullmm,
113 unsigned int hugepage_shift)
115 if (tlb_type != hypervisor &&
117 unsigned long paddr, pfn = pte_pfn(orig);
118 struct address_space *mapping;
124 page = pfn_to_page(pfn);
125 if (PageReserved(page))
128 /* A real file page? */
129 mapping = page_mapping_file(page);
133 paddr = (unsigned long) page_address(page);
134 if ((paddr ^ vaddr) & (1 << 13))
135 flush_dcache_page_all(mm, page);
140 tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
143 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
144 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
150 pte = pte_offset_map(&pmd, vaddr);
151 end = vaddr + HPAGE_SIZE;
152 while (vaddr < end) {
153 if (pte_val(*pte) & _PAGE_VALID) {
154 bool exec = pte_exec(*pte);
156 tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
165 static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
166 pmd_t orig, pmd_t pmd)
171 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
173 * Note that this routine only sets pmds for THP pages.
174 * Hugetlb pages are handled elsewhere. We need to check
175 * for huge zero page. Huge zero pages are like hugetlb
176 * pages in that there is no RSS, but there is the need
177 * for TSB entries. So, huge zero page counts go into
180 if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
181 if (is_huge_zero_page(pmd_page(pmd)))
182 mm->context.hugetlb_pte_count++;
184 mm->context.thp_pte_count++;
186 if (is_huge_zero_page(pmd_page(orig)))
187 mm->context.hugetlb_pte_count--;
189 mm->context.thp_pte_count--;
192 /* Do not try to allocate the TSB hash table if we
193 * don't have one already. We have various locks held
194 * and thus we'll end up doing a GFP_KERNEL allocation
195 * in an atomic context.
197 * Instead, we let the first TLB miss on a hugepage
202 if (!pmd_none(orig)) {
204 if (pmd_trans_huge(orig)) {
205 pte_t orig_pte = __pte(pmd_val(orig));
206 bool exec = pte_exec(orig_pte);
208 tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
209 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
212 tlb_batch_pmd_scan(mm, addr, orig);
217 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
218 pmd_t *pmdp, pmd_t pmd)
223 __set_pmd_acct(mm, addr, orig, pmd);
226 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
227 unsigned long address, pmd_t *pmdp, pmd_t pmd)
233 } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
234 __set_pmd_acct(vma->vm_mm, address, old, pmd);
240 * This routine is only called when splitting a THP
242 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
247 entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
248 old = pmdp_establish(vma, address, pmdp, entry);
249 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
252 * set_pmd_at() will not be called in a way to decrement
253 * thp_pte_count when splitting a THP, so do it now.
254 * Sanity check pmd before doing the actual decrement.
256 if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
257 !is_huge_zero_page(pmd_page(entry)))
258 (vma->vm_mm)->context.thp_pte_count--;
263 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
266 struct list_head *lh = (struct list_head *) pgtable;
268 assert_spin_locked(&mm->page_table_lock);
271 if (!pmd_huge_pte(mm, pmdp))
274 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
275 pmd_huge_pte(mm, pmdp) = pgtable;
278 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
280 struct list_head *lh;
283 assert_spin_locked(&mm->page_table_lock);
286 pgtable = pmd_huge_pte(mm, pmdp);
287 lh = (struct list_head *) pgtable;
289 pmd_huge_pte(mm, pmdp) = NULL;
291 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
294 pte_val(pgtable[0]) = 0;
295 pte_val(pgtable[1]) = 0;
299 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */