1 /* arch/sparc64/mm/tlb.c
6 #include <linux/kernel.h>
7 #include <linux/percpu.h>
9 #include <linux/swap.h>
10 #include <linux/preempt.h>
12 #include <asm/pgtable.h>
13 #include <asm/pgalloc.h>
14 #include <asm/tlbflush.h>
15 #include <asm/cacheflush.h>
16 #include <asm/mmu_context.h>
19 /* Heavily inspired by the ppc64 code. */
21 static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
23 void flush_tlb_pending(void)
25 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
26 struct mm_struct *mm = tb->mm;
33 if (CTX_VALID(mm->context)) {
34 if (tb->tlb_nr == 1) {
35 global_flush_tlb_page(mm, tb->vaddrs[0]);
38 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
41 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
42 tb->tlb_nr, &tb->vaddrs[0]);
50 put_cpu_var(tlb_batch);
53 void arch_enter_lazy_mmu_mode(void)
55 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
60 void arch_leave_lazy_mmu_mode(void)
62 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
69 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
72 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
81 if (unlikely(nr != 0 && mm != tb->mm)) {
87 flush_tsb_user_page(mm, vaddr);
88 global_flush_tlb_page(mm, vaddr);
95 tb->vaddrs[nr] = vaddr;
97 if (nr >= TLB_BATCH_NR)
101 put_cpu_var(tlb_batch);
104 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
105 pte_t *ptep, pte_t orig, int fullmm)
107 if (tlb_type != hypervisor &&
109 unsigned long paddr, pfn = pte_pfn(orig);
110 struct address_space *mapping;
116 page = pfn_to_page(pfn);
117 if (PageReserved(page))
120 /* A real file page? */
121 mapping = page_mapping(page);
125 paddr = (unsigned long) page_address(page);
126 if ((paddr ^ vaddr) & (1 << 13))
127 flush_dcache_page_all(mm, page);
132 tlb_batch_add_one(mm, vaddr, pte_exec(orig));
135 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
136 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
142 pte = pte_offset_map(&pmd, vaddr);
143 end = vaddr + HPAGE_SIZE;
144 while (vaddr < end) {
145 if (pte_val(*pte) & _PAGE_VALID) {
146 bool exec = pte_exec(*pte);
148 tlb_batch_add_one(mm, vaddr, exec);
156 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
157 pmd_t *pmdp, pmd_t pmd)
166 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
167 if (pmd_val(pmd) & _PAGE_PMD_HUGE)
168 mm->context.huge_pte_count++;
170 mm->context.huge_pte_count--;
172 /* Do not try to allocate the TSB hash table if we
173 * don't have one already. We have various locks held
174 * and thus we'll end up doing a GFP_KERNEL allocation
175 * in an atomic context.
177 * Instead, we let the first TLB miss on a hugepage
182 if (!pmd_none(orig)) {
184 if (pmd_trans_huge(orig)) {
185 pte_t orig_pte = __pte(pmd_val(orig));
186 bool exec = pte_exec(orig_pte);
188 tlb_batch_add_one(mm, addr, exec);
189 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
191 tlb_batch_pmd_scan(mm, addr, orig);
196 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
201 pmd_val(entry) &= ~_PAGE_VALID;
203 set_pmd_at(vma->vm_mm, address, pmdp, entry);
204 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
207 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
210 struct list_head *lh = (struct list_head *) pgtable;
212 assert_spin_locked(&mm->page_table_lock);
215 if (!pmd_huge_pte(mm, pmdp))
218 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
219 pmd_huge_pte(mm, pmdp) = pgtable;
222 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
224 struct list_head *lh;
227 assert_spin_locked(&mm->page_table_lock);
230 pgtable = pmd_huge_pte(mm, pmdp);
231 lh = (struct list_head *) pgtable;
233 pmd_huge_pte(mm, pmdp) = NULL;
235 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
238 pte_val(pgtable[0]) = 0;
239 pte_val(pgtable[1]) = 0;
243 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */