1 // SPDX-License-Identifier: GPL-2.0
3 * SPARC64 Huge TLB page support.
10 #include <linux/sched/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/sysctl.h>
16 #include <asm/pgalloc.h>
18 #include <asm/tlbflush.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mmu_context.h>
22 /* Slightly simplified from the non-hugepage variant because by
23 * definition we don't have to worry about any page coloring stuff
26 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
32 struct hstate *h = hstate_file(filp);
33 unsigned long task_size = TASK_SIZE;
34 struct vm_unmapped_area_info info;
36 if (test_thread_flag(TIF_32BIT))
37 task_size = STACK_TOP32;
41 info.low_limit = TASK_UNMAPPED_BASE;
42 info.high_limit = min(task_size, VA_EXCLUDE_START);
43 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
44 info.align_offset = 0;
45 addr = vm_unmapped_area(&info);
47 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
48 VM_BUG_ON(addr != -ENOMEM);
49 info.low_limit = VA_EXCLUDE_END;
50 info.high_limit = task_size;
51 addr = vm_unmapped_area(&info);
58 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
59 const unsigned long len,
60 const unsigned long pgoff,
61 const unsigned long flags)
63 struct hstate *h = hstate_file(filp);
64 struct mm_struct *mm = current->mm;
65 unsigned long addr = addr0;
66 struct vm_unmapped_area_info info;
68 /* This should only ever run for 32-bit processes. */
69 BUG_ON(!test_thread_flag(TIF_32BIT));
71 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
73 info.low_limit = PAGE_SIZE;
74 info.high_limit = mm->mmap_base;
75 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
76 info.align_offset = 0;
77 addr = vm_unmapped_area(&info);
80 * A failed mmap() very likely causes application failure,
81 * so fall back to the bottom-up function here. This scenario
82 * can happen with large stack limits and large mmap()
85 if (addr & ~PAGE_MASK) {
86 VM_BUG_ON(addr != -ENOMEM);
88 info.low_limit = TASK_UNMAPPED_BASE;
89 info.high_limit = STACK_TOP32;
90 addr = vm_unmapped_area(&info);
97 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
98 unsigned long len, unsigned long pgoff, unsigned long flags)
100 struct hstate *h = hstate_file(file);
101 struct mm_struct *mm = current->mm;
102 struct vm_area_struct *vma;
103 unsigned long task_size = TASK_SIZE;
105 if (test_thread_flag(TIF_32BIT))
106 task_size = STACK_TOP32;
108 if (len & ~huge_page_mask(h))
113 if (flags & MAP_FIXED) {
114 if (prepare_hugepage_range(file, addr, len))
120 addr = ALIGN(addr, huge_page_size(h));
121 vma = find_vma(mm, addr);
122 if (task_size - len >= addr &&
123 (!vma || addr + len <= vm_start_gap(vma)))
126 if (mm->get_unmapped_area == arch_get_unmapped_area)
127 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
130 return hugetlb_get_unmapped_area_topdown(file, addr, len,
134 static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
139 static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
141 unsigned long hugepage_size = _PAGE_SZ4MB_4V;
143 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
146 case HPAGE_16GB_SHIFT:
147 hugepage_size = _PAGE_SZ16GB_4V;
148 pte_val(entry) |= _PAGE_PUD_HUGE;
150 case HPAGE_2GB_SHIFT:
151 hugepage_size = _PAGE_SZ2GB_4V;
152 pte_val(entry) |= _PAGE_PMD_HUGE;
154 case HPAGE_256MB_SHIFT:
155 hugepage_size = _PAGE_SZ256MB_4V;
156 pte_val(entry) |= _PAGE_PMD_HUGE;
159 pte_val(entry) |= _PAGE_PMD_HUGE;
161 case HPAGE_64K_SHIFT:
162 hugepage_size = _PAGE_SZ64K_4V;
165 WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
168 pte_val(entry) = pte_val(entry) | hugepage_size;
172 static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
174 if (tlb_type == hypervisor)
175 return sun4v_hugepage_shift_to_tte(entry, shift);
177 return sun4u_hugepage_shift_to_tte(entry, shift);
180 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
184 pte = hugepage_shift_to_tte(entry, shift);
186 #ifdef CONFIG_SPARC64
187 /* If this vma has ADI enabled on it, turn on TTE.mcd
189 if (flags & VM_SPARC_ADI)
190 return pte_mkmcd(pte);
192 return pte_mknotmcd(pte);
198 static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
200 unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
203 switch (tte_szbits) {
204 case _PAGE_SZ16GB_4V:
205 shift = HPAGE_16GB_SHIFT;
208 shift = HPAGE_2GB_SHIFT;
210 case _PAGE_SZ256MB_4V:
211 shift = HPAGE_256MB_SHIFT;
214 shift = REAL_HPAGE_SHIFT;
217 shift = HPAGE_64K_SHIFT;
226 static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
228 unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
231 switch (tte_szbits) {
232 case _PAGE_SZ256MB_4U:
233 shift = HPAGE_256MB_SHIFT;
236 shift = REAL_HPAGE_SHIFT;
239 shift = HPAGE_64K_SHIFT;
248 static unsigned long tte_to_shift(pte_t entry)
250 if (tlb_type == hypervisor)
251 return sun4v_huge_tte_to_shift(entry);
253 return sun4u_huge_tte_to_shift(entry);
256 static unsigned int huge_tte_to_shift(pte_t entry)
258 unsigned long shift = tte_to_shift(entry);
260 if (shift == PAGE_SHIFT)
261 WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
267 static unsigned long huge_tte_to_size(pte_t pte)
269 unsigned long size = 1UL << huge_tte_to_shift(pte);
271 if (size == REAL_HPAGE_SIZE)
276 unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); }
277 unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); }
278 unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }
280 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
281 unsigned long addr, unsigned long sz)
288 pgd = pgd_offset(mm, addr);
289 p4d = p4d_offset(pgd, addr);
290 pud = pud_alloc(mm, p4d, addr);
295 pmd = pmd_alloc(mm, pud, addr);
300 return pte_alloc_map(mm, pmd, addr);
303 pte_t *huge_pte_offset(struct mm_struct *mm,
304 unsigned long addr, unsigned long sz)
311 pgd = pgd_offset(mm, addr);
314 p4d = p4d_offset(pgd, addr);
317 pud = pud_offset(p4d, addr);
320 if (is_hugetlb_pud(*pud))
322 pmd = pmd_offset(pud, addr);
325 if (is_hugetlb_pmd(*pmd))
327 return pte_offset_map(pmd, addr);
330 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
331 pte_t *ptep, pte_t entry)
333 unsigned int nptes, orig_shift, shift;
334 unsigned long i, size;
337 size = huge_tte_to_size(entry);
340 if (size >= PUD_SIZE)
342 else if (size >= PMD_SIZE)
347 nptes = size >> shift;
349 if (!pte_present(*ptep) && pte_present(entry))
350 mm->context.hugetlb_pte_count += nptes;
354 orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
356 for (i = 0; i < nptes; i++)
357 ptep[i] = __pte(pte_val(entry) + (i << shift));
359 maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
360 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
361 if (size == HPAGE_SIZE)
362 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
366 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
369 unsigned int i, nptes, orig_shift, shift;
374 size = huge_tte_to_size(entry);
377 if (size >= PUD_SIZE)
379 else if (size >= PMD_SIZE)
384 nptes = size >> shift;
385 orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
387 if (pte_present(entry))
388 mm->context.hugetlb_pte_count -= nptes;
391 for (i = 0; i < nptes; i++)
392 ptep[i] = __pte(0UL);
394 maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
395 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
396 if (size == HPAGE_SIZE)
397 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
403 int pmd_huge(pmd_t pmd)
405 return !pmd_none(pmd) &&
406 (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
409 int pud_huge(pud_t pud)
411 return !pud_none(pud) &&
412 (pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
415 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
418 pgtable_t token = pmd_pgtable(*pmd);
421 pte_free_tlb(tlb, token, addr);
422 mm_dec_nr_ptes(tlb->mm);
425 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
426 unsigned long addr, unsigned long end,
427 unsigned long floor, unsigned long ceiling)
434 pmd = pmd_offset(pud, addr);
436 next = pmd_addr_end(addr, end);
439 if (is_hugetlb_pmd(*pmd))
442 hugetlb_free_pte_range(tlb, pmd, addr);
443 } while (pmd++, addr = next, addr != end);
453 if (end - 1 > ceiling - 1)
456 pmd = pmd_offset(pud, start);
458 pmd_free_tlb(tlb, pmd, start);
459 mm_dec_nr_pmds(tlb->mm);
462 static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
463 unsigned long addr, unsigned long end,
464 unsigned long floor, unsigned long ceiling)
471 pud = pud_offset(p4d, addr);
473 next = pud_addr_end(addr, end);
474 if (pud_none_or_clear_bad(pud))
476 if (is_hugetlb_pud(*pud))
479 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
481 } while (pud++, addr = next, addr != end);
487 ceiling &= PGDIR_MASK;
491 if (end - 1 > ceiling - 1)
494 pud = pud_offset(p4d, start);
496 pud_free_tlb(tlb, pud, start);
497 mm_dec_nr_puds(tlb->mm);
500 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
501 unsigned long addr, unsigned long end,
502 unsigned long floor, unsigned long ceiling)
519 if (end - 1 > ceiling - 1)
524 pgd = pgd_offset(tlb->mm, addr);
525 p4d = p4d_offset(pgd, addr);
527 next = p4d_addr_end(addr, end);
528 if (p4d_none_or_clear_bad(p4d))
530 hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
531 } while (p4d++, addr = next, addr != end);