1 // SPDX-License-Identifier: GPL-2.0
3 * Page table allocation functions
5 * Copyright IBM Corp. 2016
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
16 #include <asm/tlbflush.h>
20 int page_table_allocate_pgste = 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste);
23 static struct ctl_table page_table_sysctl[] = {
25 .procname = "allocate_pgste",
26 .data = &page_table_allocate_pgste,
27 .maxlen = sizeof(int),
28 .mode = S_IRUGO | S_IWUSR,
29 .proc_handler = proc_dointvec_minmax,
30 .extra1 = SYSCTL_ZERO,
36 static int __init page_table_register_sysctl(void)
38 return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
40 __initcall(page_table_register_sysctl);
42 #endif /* CONFIG_PGSTE */
44 unsigned long *crst_table_alloc(struct mm_struct *mm)
46 struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
50 arch_set_page_dat(ptdesc_page(ptdesc), CRST_ALLOC_ORDER);
51 return (unsigned long *) ptdesc_to_virt(ptdesc);
54 void crst_table_free(struct mm_struct *mm, unsigned long *table)
56 pagetable_free(virt_to_ptdesc(table));
59 static void __crst_table_upgrade(void *arg)
61 struct mm_struct *mm = arg;
63 /* change all active ASCEs to avoid the creation of new TLBs */
64 if (current->active_mm == mm) {
65 S390_lowcore.user_asce = mm->context.asce;
66 __ctl_load(S390_lowcore.user_asce, 7, 7);
71 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
73 unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
74 unsigned long asce_limit = mm->context.asce_limit;
76 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
77 VM_BUG_ON(asce_limit < _REGION2_SIZE);
79 if (end <= asce_limit)
82 if (asce_limit == _REGION2_SIZE) {
83 p4d = crst_table_alloc(mm);
86 crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
88 if (end > _REGION1_SIZE) {
89 pgd = crst_table_alloc(mm);
92 crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
95 spin_lock_bh(&mm->page_table_lock);
98 * This routine gets called with mmap_lock lock held and there is
99 * no reason to optimize for the case of otherwise. However, if
100 * that would ever change, the below check will let us know.
102 VM_BUG_ON(asce_limit != mm->context.asce_limit);
105 __pgd = (unsigned long *) mm->pgd;
106 p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
107 mm->pgd = (pgd_t *) p4d;
108 mm->context.asce_limit = _REGION1_SIZE;
109 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
110 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
114 __pgd = (unsigned long *) mm->pgd;
115 pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
116 mm->pgd = (pgd_t *) pgd;
117 mm->context.asce_limit = TASK_SIZE_MAX;
118 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
119 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
122 spin_unlock_bh(&mm->page_table_lock);
124 on_each_cpu(__crst_table_upgrade, mm, 0);
129 crst_table_free(mm, p4d);
134 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
136 return atomic_fetch_xor(bits, v) ^ bits;
141 struct page *page_table_alloc_pgste(struct mm_struct *mm)
143 struct ptdesc *ptdesc;
146 ptdesc = pagetable_alloc(GFP_KERNEL, 0);
148 table = (u64 *)ptdesc_to_virt(ptdesc);
149 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
150 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
152 return ptdesc_page(ptdesc);
155 void page_table_free_pgste(struct page *page)
157 pagetable_free(page_ptdesc(page));
160 #endif /* CONFIG_PGSTE */
163 * A 2KB-pgtable is either upper or lower half of a normal page.
164 * The second half of the page may be unused or used as another
167 * Whenever possible the parent page for a new 2KB-pgtable is picked
168 * from the list of partially allocated pages mm_context_t::pgtable_list.
169 * In case the list is empty a new parent page is allocated and added to
172 * When a parent page gets fully allocated it contains 2KB-pgtables in both
173 * upper and lower halves and is removed from mm_context_t::pgtable_list.
175 * When 2KB-pgtable is freed from to fully allocated parent page that
176 * page turns partially allocated and added to mm_context_t::pgtable_list.
178 * If 2KB-pgtable is freed from the partially allocated parent page that
179 * page turns unused and gets removed from mm_context_t::pgtable_list.
180 * Furthermore, the unused parent page is released.
182 * As follows from the above, no unallocated or fully allocated parent
183 * pages are contained in mm_context_t::pgtable_list.
185 * The upper byte (bits 24-31) of the parent page _refcount is used
186 * for tracking contained 2KB-pgtables and has the following format:
189 * 01234567 upper byte (bits 24-31) of struct page::_refcount
191 * || |+--- upper 2KB-pgtable is allocated
192 * || +---- lower 2KB-pgtable is allocated
193 * |+------- upper 2KB-pgtable is pending for removal
194 * +-------- lower 2KB-pgtable is pending for removal
196 * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
197 * using _refcount is possible).
199 * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
200 * The parent page is either:
201 * - added to mm_context_t::pgtable_list in case the second half of the
202 * parent page is still unallocated;
203 * - removed from mm_context_t::pgtable_list in case both hales of the
204 * parent page are allocated;
205 * These operations are protected with mm_context_t::lock.
207 * When 2KB-pgtable is deallocated the corresponding AA bit is set to 0
208 * and the corresponding PP bit is set to 1 in a single atomic operation.
209 * Thus, PP and AA bits corresponding to the same 2KB-pgtable are mutually
210 * exclusive and may never be both set to 1!
211 * The parent page is either:
212 * - added to mm_context_t::pgtable_list in case the second half of the
213 * parent page is still allocated;
214 * - removed from mm_context_t::pgtable_list in case the second half of
215 * the parent page is unallocated;
216 * These operations are protected with mm_context_t::lock.
218 * It is important to understand that mm_context_t::lock only protects
219 * mm_context_t::pgtable_list and AA bits, but not the parent page itself
222 * Releasing the parent page happens whenever the PP bit turns from 1 to 0,
223 * while both AA bits and the second PP bit are already unset. Then the
224 * parent page does not contain any 2KB-pgtable fragment anymore, and it has
225 * also been removed from mm_context_t::pgtable_list. It is safe to release
226 * the page therefore.
228 * PGSTE memory spaces use full 4KB-pgtables and do not need most of the
229 * logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable
230 * while the PP bits are never used, nor such a page is added to or removed
231 * from mm_context_t::pgtable_list.
233 * pte_free_defer() overrides those rules: it takes the page off pgtable_list,
234 * and prevents both 2K fragments from being reused. pte_free_defer() has to
235 * guarantee that its pgtable cannot be reused before the RCU grace period
236 * has elapsed (which page_table_free_rcu() does not actually guarantee).
237 * But for simplicity, because page->rcu_head overlays page->lru, and because
238 * the RCU callback might not be called before the mm_context_t has been freed,
239 * pte_free_defer() in this implementation prevents both fragments from being
240 * reused, and delays making the call to RCU until both fragments are freed.
242 unsigned long *page_table_alloc(struct mm_struct *mm)
244 unsigned long *table;
245 struct ptdesc *ptdesc;
246 unsigned int mask, bit;
248 /* Try to get a fragment of a 4K page as a 2K page table */
249 if (!mm_alloc_pgste(mm)) {
251 spin_lock_bh(&mm->context.lock);
252 if (!list_empty(&mm->context.pgtable_list)) {
253 ptdesc = list_first_entry(&mm->context.pgtable_list,
254 struct ptdesc, pt_list);
255 mask = atomic_read(&ptdesc->_refcount) >> 24;
257 * The pending removal bits must also be checked.
258 * Failure to do so might lead to an impossible
259 * value of (i.e 0x13 or 0x23) written to _refcount.
260 * Such values violate the assumption that pending and
261 * allocation bits are mutually exclusive, and the rest
262 * of the code unrails as result. That could lead to
263 * a whole bunch of races and corruptions.
265 mask = (mask | (mask >> 4)) & 0x03U;
267 table = (unsigned long *) ptdesc_to_virt(ptdesc);
268 bit = mask & 1; /* =1 -> second 2K */
270 table += PTRS_PER_PTE;
271 atomic_xor_bits(&ptdesc->_refcount,
272 0x01U << (bit + 24));
273 list_del_init(&ptdesc->pt_list);
276 spin_unlock_bh(&mm->context.lock);
280 /* Allocate a fresh page */
281 ptdesc = pagetable_alloc(GFP_KERNEL, 0);
284 if (!pagetable_pte_ctor(ptdesc)) {
285 pagetable_free(ptdesc);
288 arch_set_page_dat(ptdesc_page(ptdesc), 0);
289 /* Initialize page table */
290 table = (unsigned long *) ptdesc_to_virt(ptdesc);
291 if (mm_alloc_pgste(mm)) {
292 /* Return 4K page table with PGSTEs */
293 INIT_LIST_HEAD(&ptdesc->pt_list);
294 atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
295 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
296 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
298 /* Return the first 2K fragment of the page */
299 atomic_xor_bits(&ptdesc->_refcount, 0x01U << 24);
300 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
301 spin_lock_bh(&mm->context.lock);
302 list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
303 spin_unlock_bh(&mm->context.lock);
308 static void page_table_release_check(struct page *page, void *table,
309 unsigned int half, unsigned int mask)
313 if (!IS_ENABLED(CONFIG_DEBUG_VM))
315 if (!mask && list_empty(&page->lru))
317 snprintf(msg, sizeof(msg),
318 "Invalid pgtable %p release half 0x%02x mask 0x%02x",
320 dump_page(page, msg);
323 static void pte_free_now(struct rcu_head *head)
325 struct ptdesc *ptdesc;
327 ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
328 pagetable_pte_dtor(ptdesc);
329 pagetable_free(ptdesc);
332 void page_table_free(struct mm_struct *mm, unsigned long *table)
334 unsigned int mask, bit, half;
335 struct ptdesc *ptdesc = virt_to_ptdesc(table);
337 if (!mm_alloc_pgste(mm)) {
338 /* Free 2K page table fragment of a 4K page */
339 bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
340 spin_lock_bh(&mm->context.lock);
342 * Mark the page for delayed release. The actual release
343 * will happen outside of the critical section from this
344 * function or from __tlb_remove_table()
346 mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
348 if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
350 * Other half is allocated, and neither half has had
351 * its free deferred: add page to head of list, to make
352 * this freed half available for immediate reuse.
354 list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
356 /* If page is on list, now remove it. */
357 list_del_init(&ptdesc->pt_list);
359 spin_unlock_bh(&mm->context.lock);
360 mask = atomic_xor_bits(&ptdesc->_refcount, 0x10U << (bit + 24));
367 mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
371 page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
372 if (folio_test_clear_active(ptdesc_folio(ptdesc)))
373 call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
375 pte_free_now(&ptdesc->pt_rcu_head);
378 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
379 unsigned long vmaddr)
381 struct mm_struct *mm;
382 unsigned int bit, mask;
383 struct ptdesc *ptdesc = virt_to_ptdesc(table);
386 if (mm_alloc_pgste(mm)) {
387 gmap_unlink(mm, table, vmaddr);
388 table = (unsigned long *) ((unsigned long)table | 0x03U);
389 tlb_remove_ptdesc(tlb, table);
392 bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
393 spin_lock_bh(&mm->context.lock);
395 * Mark the page for delayed release. The actual release will happen
396 * outside of the critical section from __tlb_remove_table() or from
399 mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
401 if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
403 * Other half is allocated, and neither half has had
404 * its free deferred: add page to end of list, to make
405 * this freed half available for reuse once its pending
406 * bit has been cleared by __tlb_remove_table().
408 list_add_tail(&ptdesc->pt_list, &mm->context.pgtable_list);
410 /* If page is on list, now remove it. */
411 list_del_init(&ptdesc->pt_list);
413 spin_unlock_bh(&mm->context.lock);
414 table = (unsigned long *) ((unsigned long) table | (0x01U << bit));
415 tlb_remove_table(tlb, table);
418 void __tlb_remove_table(void *_table)
420 unsigned int mask = (unsigned long) _table & 0x03U, half = mask;
421 void *table = (void *)((unsigned long) _table ^ mask);
422 struct ptdesc *ptdesc = virt_to_ptdesc(table);
425 case 0x00U: /* pmd, pud, or p4d */
426 pagetable_free(ptdesc);
428 case 0x01U: /* lower 2K of a 4K page table */
429 case 0x02U: /* higher 2K of a 4K page table */
430 mask = atomic_xor_bits(&ptdesc->_refcount, mask << (4 + 24));
435 case 0x03U: /* 4K page table with pgstes */
436 mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
441 page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
442 if (folio_test_clear_active(ptdesc_folio(ptdesc)))
443 call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
445 pte_free_now(&ptdesc->pt_rcu_head);
448 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
449 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
453 page = virt_to_page(pgtable);
455 page_table_free(mm, (unsigned long *)pgtable);
457 * page_table_free() does not do the pgste gmap_unlink() which
458 * page_table_free_rcu() does: warn us if pgste ever reaches here.
460 WARN_ON_ONCE(mm_has_pgste(mm));
462 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
465 * Base infrastructure required to generate basic asces, region, segment,
466 * and page tables that do not make use of enhanced features like EDAT1.
469 static struct kmem_cache *base_pgt_cache;
471 static unsigned long *base_pgt_alloc(void)
473 unsigned long *table;
475 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
477 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
481 static void base_pgt_free(unsigned long *table)
483 kmem_cache_free(base_pgt_cache, table);
486 static unsigned long *base_crst_alloc(unsigned long val)
488 unsigned long *table;
489 struct ptdesc *ptdesc;
491 ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, CRST_ALLOC_ORDER);
494 table = ptdesc_address(ptdesc);
496 crst_table_init(table, val);
500 static void base_crst_free(unsigned long *table)
502 pagetable_free(virt_to_ptdesc(table));
505 #define BASE_ADDR_END_FUNC(NAME, SIZE) \
506 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
509 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
511 return (next - 1) < (end - 1) ? next : end; \
514 BASE_ADDR_END_FUNC(page, _PAGE_SIZE)
515 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
516 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
517 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
518 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
520 static inline unsigned long base_lra(unsigned long address)
526 : "=d" (real) : "a" (address) : "cc");
530 static int base_page_walk(unsigned long *origin, unsigned long addr,
531 unsigned long end, int alloc)
533 unsigned long *pte, next;
538 pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
540 next = base_page_addr_end(addr, end);
541 *pte = base_lra(addr);
542 } while (pte++, addr = next, addr < end);
546 static int base_segment_walk(unsigned long *origin, unsigned long addr,
547 unsigned long end, int alloc)
549 unsigned long *ste, next, *table;
553 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
555 next = base_segment_addr_end(addr, end);
556 if (*ste & _SEGMENT_ENTRY_INVALID) {
559 table = base_pgt_alloc();
562 *ste = __pa(table) | _SEGMENT_ENTRY;
564 table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
565 rc = base_page_walk(table, addr, next, alloc);
569 base_pgt_free(table);
571 } while (ste++, addr = next, addr < end);
575 static int base_region3_walk(unsigned long *origin, unsigned long addr,
576 unsigned long end, int alloc)
578 unsigned long *rtte, next, *table;
582 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
584 next = base_region3_addr_end(addr, end);
585 if (*rtte & _REGION_ENTRY_INVALID) {
588 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
591 *rtte = __pa(table) | _REGION3_ENTRY;
593 table = __va(*rtte & _REGION_ENTRY_ORIGIN);
594 rc = base_segment_walk(table, addr, next, alloc);
598 base_crst_free(table);
599 } while (rtte++, addr = next, addr < end);
603 static int base_region2_walk(unsigned long *origin, unsigned long addr,
604 unsigned long end, int alloc)
606 unsigned long *rste, next, *table;
610 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
612 next = base_region2_addr_end(addr, end);
613 if (*rste & _REGION_ENTRY_INVALID) {
616 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
619 *rste = __pa(table) | _REGION2_ENTRY;
621 table = __va(*rste & _REGION_ENTRY_ORIGIN);
622 rc = base_region3_walk(table, addr, next, alloc);
626 base_crst_free(table);
627 } while (rste++, addr = next, addr < end);
631 static int base_region1_walk(unsigned long *origin, unsigned long addr,
632 unsigned long end, int alloc)
634 unsigned long *rfte, next, *table;
638 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
640 next = base_region1_addr_end(addr, end);
641 if (*rfte & _REGION_ENTRY_INVALID) {
644 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
647 *rfte = __pa(table) | _REGION1_ENTRY;
649 table = __va(*rfte & _REGION_ENTRY_ORIGIN);
650 rc = base_region2_walk(table, addr, next, alloc);
654 base_crst_free(table);
655 } while (rfte++, addr = next, addr < end);
660 * base_asce_free - free asce and tables returned from base_asce_alloc()
661 * @asce: asce to be freed
663 * Frees all region, segment, and page tables that were allocated with a
664 * corresponding base_asce_alloc() call.
666 void base_asce_free(unsigned long asce)
668 unsigned long *table = __va(asce & _ASCE_ORIGIN);
672 switch (asce & _ASCE_TYPE_MASK) {
673 case _ASCE_TYPE_SEGMENT:
674 base_segment_walk(table, 0, _REGION3_SIZE, 0);
676 case _ASCE_TYPE_REGION3:
677 base_region3_walk(table, 0, _REGION2_SIZE, 0);
679 case _ASCE_TYPE_REGION2:
680 base_region2_walk(table, 0, _REGION1_SIZE, 0);
682 case _ASCE_TYPE_REGION1:
683 base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
686 base_crst_free(table);
689 static int base_pgt_cache_init(void)
691 static DEFINE_MUTEX(base_pgt_cache_mutex);
692 unsigned long sz = _PAGE_TABLE_SIZE;
696 mutex_lock(&base_pgt_cache_mutex);
698 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
699 mutex_unlock(&base_pgt_cache_mutex);
700 return base_pgt_cache ? 0 : -ENOMEM;
704 * base_asce_alloc - create kernel mapping without enhanced DAT features
705 * @addr: virtual start address of kernel mapping
706 * @num_pages: number of consecutive pages
708 * Generate an asce, including all required region, segment and page tables,
709 * that can be used to access the virtual kernel mapping. The difference is
710 * that the returned asce does not make use of any enhanced DAT features like
711 * e.g. large pages. This is required for some I/O functions that pass an
712 * asce, like e.g. some service call requests.
714 * Note: the returned asce may NEVER be attached to any cpu. It may only be
715 * used for I/O requests. tlb entries that might result because the
716 * asce was attached to a cpu won't be cleared.
718 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
720 unsigned long asce, *table, end;
723 if (base_pgt_cache_init())
725 end = addr + num_pages * PAGE_SIZE;
726 if (end <= _REGION3_SIZE) {
727 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
730 rc = base_segment_walk(table, addr, end, 1);
731 asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
732 } else if (end <= _REGION2_SIZE) {
733 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
736 rc = base_region3_walk(table, addr, end, 1);
737 asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
738 } else if (end <= _REGION1_SIZE) {
739 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
742 rc = base_region2_walk(table, addr, end, 1);
743 asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
745 table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
748 rc = base_region1_walk(table, addr, end, 1);
749 asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
752 base_asce_free(asce);