1 // SPDX-License-Identifier: GPL-2.0
3 * PARISC64 Huge TLB page support.
5 * This parisc implementation is heavily based on the SPARC and x86 code.
12 #include <linux/sched/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/sysctl.h>
19 #include <asm/tlbflush.h>
20 #include <asm/cacheflush.h>
21 #include <asm/mmu_context.h>
26 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
27 unsigned long addr, unsigned long sz)
35 /* We must align the address, because our caller will run
36 * set_huge_pte_at() on whatever we return, which writes out
37 * all of the sub-ptes for the hugepage range. So we have
38 * to give it the first such sub-pte.
42 pgd = pgd_offset(mm, addr);
43 p4d = p4d_offset(pgd, addr);
44 pud = pud_alloc(mm, p4d, addr);
46 pmd = pmd_alloc(mm, pud, addr);
48 pte = pte_alloc_huge(mm, pmd, addr);
53 pte_t *huge_pte_offset(struct mm_struct *mm,
54 unsigned long addr, unsigned long sz)
64 pgd = pgd_offset(mm, addr);
65 if (!pgd_none(*pgd)) {
66 p4d = p4d_offset(pgd, addr);
67 if (!p4d_none(*p4d)) {
68 pud = pud_offset(p4d, addr);
69 if (!pud_none(*pud)) {
70 pmd = pmd_offset(pud, addr);
72 pte = pte_offset_huge(pmd, addr);
79 /* Purge data and instruction TLB entries. Must be called holding
80 * the pa_tlb_lock. The TLB purge instructions are slow on SMP
81 * machines since the purge must be broadcast to all CPUs.
83 static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
87 /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
88 * Linux standard huge pages (e.g. 2 MB) */
89 BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
92 addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
94 for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
95 purge_tlb_entries(mm, addr);
96 addr += (1UL << REAL_HPAGE_SHIFT);
100 /* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
101 static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
102 pte_t *ptep, pte_t entry)
104 unsigned long addr_start;
110 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
111 set_pte(ptep, entry);
115 pte_val(entry) += PAGE_SIZE;
118 purge_tlb_entries_huge(mm, addr_start);
121 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
122 pte_t *ptep, pte_t entry, unsigned long sz)
124 __set_huge_pte_at(mm, addr, ptep, entry);
128 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
134 __set_huge_pte_at(mm, addr, ptep, __pte(0));
140 void huge_ptep_set_wrprotect(struct mm_struct *mm,
141 unsigned long addr, pte_t *ptep)
146 __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
149 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
150 unsigned long addr, pte_t *ptep,
151 pte_t pte, int dirty)
154 struct mm_struct *mm = vma->vm_mm;
156 changed = !pte_same(*ptep, pte);
158 __set_huge_pte_at(mm, addr, ptep, pte);