2 * KVM guest address space mapping code
4 * Copyright IBM Corp. 2007, 2016
8 #include <linux/kernel.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/spinlock.h>
13 #include <linux/slab.h>
14 #include <linux/swapops.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
23 #define GMAP_SHADOW_FAKE_TABLE 1ULL
26 * gmap_alloc - allocate and initialize a guest address space
27 * @mm: pointer to the parent mm_struct
28 * @limit: maximum address of the gmap address space
30 * Returns a guest address space structure.
32 static struct gmap *gmap_alloc(unsigned long limit)
37 unsigned long etype, atype;
39 if (limit < (1UL << 31)) {
40 limit = (1UL << 31) - 1;
41 atype = _ASCE_TYPE_SEGMENT;
42 etype = _SEGMENT_ENTRY_EMPTY;
43 } else if (limit < (1UL << 42)) {
44 limit = (1UL << 42) - 1;
45 atype = _ASCE_TYPE_REGION3;
46 etype = _REGION3_ENTRY_EMPTY;
47 } else if (limit < (1UL << 53)) {
48 limit = (1UL << 53) - 1;
49 atype = _ASCE_TYPE_REGION2;
50 etype = _REGION2_ENTRY_EMPTY;
53 atype = _ASCE_TYPE_REGION1;
54 etype = _REGION1_ENTRY_EMPTY;
56 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
59 INIT_LIST_HEAD(&gmap->crst_list);
60 INIT_LIST_HEAD(&gmap->children);
61 INIT_LIST_HEAD(&gmap->pt_list);
62 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
63 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
64 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
65 spin_lock_init(&gmap->guest_table_lock);
66 spin_lock_init(&gmap->shadow_lock);
67 atomic_set(&gmap->ref_count, 1);
68 page = alloc_pages(GFP_KERNEL, 2);
72 list_add(&page->lru, &gmap->crst_list);
73 table = (unsigned long *) page_to_phys(page);
74 crst_table_init(table, etype);
76 gmap->asce = atype | _ASCE_TABLE_LENGTH |
77 _ASCE_USER_BITS | __pa(table);
78 gmap->asce_end = limit;
88 * gmap_create - create a guest address space
89 * @mm: pointer to the parent mm_struct
90 * @limit: maximum size of the gmap address space
92 * Returns a guest address space structure.
94 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
98 gmap = gmap_alloc(limit);
102 spin_lock(&mm->context.gmap_lock);
103 list_add_rcu(&gmap->list, &mm->context.gmap_list);
104 spin_unlock(&mm->context.gmap_lock);
107 EXPORT_SYMBOL_GPL(gmap_create);
109 static void gmap_flush_tlb(struct gmap *gmap)
111 if (MACHINE_HAS_IDTE)
112 __tlb_flush_asce(gmap->mm, gmap->asce);
114 __tlb_flush_global();
117 static void gmap_radix_tree_free(struct radix_tree_root *root)
119 struct radix_tree_iter iter;
120 unsigned long indices[16];
125 /* A radix tree is freed by deleting all of its entries */
129 radix_tree_for_each_slot(slot, root, &iter, index) {
130 indices[nr] = iter.index;
134 for (i = 0; i < nr; i++) {
136 radix_tree_delete(root, index);
141 static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
143 struct gmap_rmap *rmap, *rnext, *head;
144 struct radix_tree_iter iter;
145 unsigned long indices[16];
150 /* A radix tree is freed by deleting all of its entries */
154 radix_tree_for_each_slot(slot, root, &iter, index) {
155 indices[nr] = iter.index;
159 for (i = 0; i < nr; i++) {
161 head = radix_tree_delete(root, index);
162 gmap_for_each_rmap_safe(rmap, rnext, head)
169 * gmap_free - free a guest address space
170 * @gmap: pointer to the guest address space structure
172 * No locks required. There are no references to this gmap anymore.
174 static void gmap_free(struct gmap *gmap)
176 struct page *page, *next;
178 /* Flush tlb of all gmaps (if not already done for shadows) */
179 if (!(gmap_is_shadow(gmap) && gmap->removed))
180 gmap_flush_tlb(gmap);
181 /* Free all segment & region tables. */
182 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
183 __free_pages(page, 2);
184 gmap_radix_tree_free(&gmap->guest_to_host);
185 gmap_radix_tree_free(&gmap->host_to_guest);
187 /* Free additional data for a shadow gmap */
188 if (gmap_is_shadow(gmap)) {
189 /* Free all page tables. */
190 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
191 page_table_free_pgste(page);
192 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
193 /* Release reference to the parent */
194 gmap_put(gmap->parent);
201 * gmap_get - increase reference counter for guest address space
202 * @gmap: pointer to the guest address space structure
204 * Returns the gmap pointer
206 struct gmap *gmap_get(struct gmap *gmap)
208 atomic_inc(&gmap->ref_count);
211 EXPORT_SYMBOL_GPL(gmap_get);
214 * gmap_put - decrease reference counter for guest address space
215 * @gmap: pointer to the guest address space structure
217 * If the reference counter reaches zero the guest address space is freed.
219 void gmap_put(struct gmap *gmap)
221 if (atomic_dec_return(&gmap->ref_count) == 0)
224 EXPORT_SYMBOL_GPL(gmap_put);
227 * gmap_remove - remove a guest address space but do not free it yet
228 * @gmap: pointer to the guest address space structure
230 void gmap_remove(struct gmap *gmap)
232 struct gmap *sg, *next;
234 /* Remove all shadow gmaps linked to this gmap */
235 if (!list_empty(&gmap->children)) {
236 spin_lock(&gmap->shadow_lock);
237 list_for_each_entry_safe(sg, next, &gmap->children, list) {
241 spin_unlock(&gmap->shadow_lock);
243 /* Remove gmap from the pre-mm list */
244 spin_lock(&gmap->mm->context.gmap_lock);
245 list_del_rcu(&gmap->list);
246 spin_unlock(&gmap->mm->context.gmap_lock);
251 EXPORT_SYMBOL_GPL(gmap_remove);
254 * gmap_enable - switch primary space to the guest address space
255 * @gmap: pointer to the guest address space structure
257 void gmap_enable(struct gmap *gmap)
259 S390_lowcore.gmap = (unsigned long) gmap;
261 EXPORT_SYMBOL_GPL(gmap_enable);
264 * gmap_disable - switch back to the standard primary address space
265 * @gmap: pointer to the guest address space structure
267 void gmap_disable(struct gmap *gmap)
269 S390_lowcore.gmap = 0UL;
271 EXPORT_SYMBOL_GPL(gmap_disable);
274 * gmap_get_enabled - get a pointer to the currently enabled gmap
276 * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
278 struct gmap *gmap_get_enabled(void)
280 return (struct gmap *) S390_lowcore.gmap;
282 EXPORT_SYMBOL_GPL(gmap_get_enabled);
285 * gmap_alloc_table is assumed to be called with mmap_sem held
287 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
288 unsigned long init, unsigned long gaddr)
293 /* since we dont free the gmap table until gmap_free we can unlock */
294 page = alloc_pages(GFP_KERNEL, 2);
297 new = (unsigned long *) page_to_phys(page);
298 crst_table_init(new, init);
299 spin_lock(&gmap->guest_table_lock);
300 if (*table & _REGION_ENTRY_INVALID) {
301 list_add(&page->lru, &gmap->crst_list);
302 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
303 (*table & _REGION_ENTRY_TYPE_MASK);
307 spin_unlock(&gmap->guest_table_lock);
309 __free_pages(page, 2);
314 * __gmap_segment_gaddr - find virtual address from segment pointer
315 * @entry: pointer to a segment table entry in the guest address space
317 * Returns the virtual address in the guest address space for the segment
319 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
322 unsigned long offset, mask;
324 offset = (unsigned long) entry / sizeof(unsigned long);
325 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
326 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
327 page = virt_to_page((void *)((unsigned long) entry & mask));
328 return page->index + offset;
332 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
333 * @gmap: pointer to the guest address space structure
334 * @vmaddr: address in the host process address space
336 * Returns 1 if a TLB flush is required
338 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
340 unsigned long *entry;
343 BUG_ON(gmap_is_shadow(gmap));
344 spin_lock(&gmap->guest_table_lock);
345 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
347 flush = (*entry != _SEGMENT_ENTRY_INVALID);
348 *entry = _SEGMENT_ENTRY_INVALID;
350 spin_unlock(&gmap->guest_table_lock);
355 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
356 * @gmap: pointer to the guest address space structure
357 * @gaddr: address in the guest address space
359 * Returns 1 if a TLB flush is required
361 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
363 unsigned long vmaddr;
365 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
367 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
371 * gmap_unmap_segment - unmap segment from the guest address space
372 * @gmap: pointer to the guest address space structure
373 * @to: address in the guest address space
374 * @len: length of the memory area to unmap
376 * Returns 0 if the unmap succeeded, -EINVAL if not.
378 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
383 BUG_ON(gmap_is_shadow(gmap));
384 if ((to | len) & (PMD_SIZE - 1))
386 if (len == 0 || to + len < to)
390 down_write(&gmap->mm->mmap_sem);
391 for (off = 0; off < len; off += PMD_SIZE)
392 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
393 up_write(&gmap->mm->mmap_sem);
395 gmap_flush_tlb(gmap);
398 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
401 * gmap_map_segment - map a segment to the guest address space
402 * @gmap: pointer to the guest address space structure
403 * @from: source address in the parent address space
404 * @to: target address in the guest address space
405 * @len: length of the memory area to map
407 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
409 int gmap_map_segment(struct gmap *gmap, unsigned long from,
410 unsigned long to, unsigned long len)
415 BUG_ON(gmap_is_shadow(gmap));
416 if ((from | to | len) & (PMD_SIZE - 1))
418 if (len == 0 || from + len < from || to + len < to ||
419 from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
423 down_write(&gmap->mm->mmap_sem);
424 for (off = 0; off < len; off += PMD_SIZE) {
425 /* Remove old translation */
426 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
427 /* Store new translation */
428 if (radix_tree_insert(&gmap->guest_to_host,
429 (to + off) >> PMD_SHIFT,
430 (void *) from + off))
433 up_write(&gmap->mm->mmap_sem);
435 gmap_flush_tlb(gmap);
438 gmap_unmap_segment(gmap, to, len);
441 EXPORT_SYMBOL_GPL(gmap_map_segment);
444 * __gmap_translate - translate a guest address to a user space address
445 * @gmap: pointer to guest mapping meta data structure
446 * @gaddr: guest address
448 * Returns user space address which corresponds to the guest address or
449 * -EFAULT if no such mapping exists.
450 * This function does not establish potentially missing page table entries.
451 * The mmap_sem of the mm that belongs to the address space must be held
452 * when this function gets called.
454 * Note: Can also be called for shadow gmaps.
456 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
458 unsigned long vmaddr;
460 vmaddr = (unsigned long)
461 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
462 /* Note: guest_to_host is empty for a shadow gmap */
463 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
465 EXPORT_SYMBOL_GPL(__gmap_translate);
468 * gmap_translate - translate a guest address to a user space address
469 * @gmap: pointer to guest mapping meta data structure
470 * @gaddr: guest address
472 * Returns user space address which corresponds to the guest address or
473 * -EFAULT if no such mapping exists.
474 * This function does not establish potentially missing page table entries.
476 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
480 down_read(&gmap->mm->mmap_sem);
481 rc = __gmap_translate(gmap, gaddr);
482 up_read(&gmap->mm->mmap_sem);
485 EXPORT_SYMBOL_GPL(gmap_translate);
488 * gmap_unlink - disconnect a page table from the gmap shadow tables
489 * @gmap: pointer to guest mapping meta data structure
490 * @table: pointer to the host page table
491 * @vmaddr: vm address associated with the host page table
493 void gmap_unlink(struct mm_struct *mm, unsigned long *table,
494 unsigned long vmaddr)
500 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
501 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
503 gmap_flush_tlb(gmap);
509 * gmap_link - set up shadow page tables to connect a host to a guest address
510 * @gmap: pointer to guest mapping meta data structure
511 * @gaddr: guest address
512 * @vmaddr: vm address
514 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
515 * if the vm address is already mapped to a different guest segment.
516 * The mmap_sem of the mm that belongs to the address space must be held
517 * when this function gets called.
519 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
521 struct mm_struct *mm;
522 unsigned long *table;
529 BUG_ON(gmap_is_shadow(gmap));
530 /* Create higher level tables in the gmap page table */
532 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
533 table += (gaddr >> 53) & 0x7ff;
534 if ((*table & _REGION_ENTRY_INVALID) &&
535 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
536 gaddr & 0xffe0000000000000UL))
538 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
540 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
541 table += (gaddr >> 42) & 0x7ff;
542 if ((*table & _REGION_ENTRY_INVALID) &&
543 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
544 gaddr & 0xfffffc0000000000UL))
546 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
548 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
549 table += (gaddr >> 31) & 0x7ff;
550 if ((*table & _REGION_ENTRY_INVALID) &&
551 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
552 gaddr & 0xffffffff80000000UL))
554 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
556 table += (gaddr >> 20) & 0x7ff;
557 /* Walk the parent mm page table */
559 pgd = pgd_offset(mm, vmaddr);
560 VM_BUG_ON(pgd_none(*pgd));
561 pud = pud_offset(pgd, vmaddr);
562 VM_BUG_ON(pud_none(*pud));
563 pmd = pmd_offset(pud, vmaddr);
564 VM_BUG_ON(pmd_none(*pmd));
565 /* large pmds cannot yet be handled */
568 /* Link gmap segment table entry location to page table. */
569 rc = radix_tree_preload(GFP_KERNEL);
572 ptl = pmd_lock(mm, pmd);
573 spin_lock(&gmap->guest_table_lock);
574 if (*table == _SEGMENT_ENTRY_INVALID) {
575 rc = radix_tree_insert(&gmap->host_to_guest,
576 vmaddr >> PMD_SHIFT, table);
578 *table = pmd_val(*pmd);
581 spin_unlock(&gmap->guest_table_lock);
583 radix_tree_preload_end();
588 * gmap_fault - resolve a fault on a guest address
589 * @gmap: pointer to guest mapping meta data structure
590 * @gaddr: guest address
591 * @fault_flags: flags to pass down to handle_mm_fault()
593 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
594 * if the vm address is already mapped to a different guest segment.
596 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
597 unsigned int fault_flags)
599 unsigned long vmaddr;
603 down_read(&gmap->mm->mmap_sem);
607 vmaddr = __gmap_translate(gmap, gaddr);
608 if (IS_ERR_VALUE(vmaddr)) {
612 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
618 * In the case that fixup_user_fault unlocked the mmap_sem during
619 * faultin redo __gmap_translate to not race with a map/unmap_segment.
624 rc = __gmap_link(gmap, gaddr, vmaddr);
626 up_read(&gmap->mm->mmap_sem);
629 EXPORT_SYMBOL_GPL(gmap_fault);
632 * this function is assumed to be called with mmap_sem held
634 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
636 unsigned long vmaddr;
640 /* Find the vm address for the guest address */
641 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
644 vmaddr |= gaddr & ~PMD_MASK;
645 /* Get pointer to the page table entry */
646 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
648 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
649 pte_unmap_unlock(ptep, ptl);
652 EXPORT_SYMBOL_GPL(__gmap_zap);
654 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
656 unsigned long gaddr, vmaddr, size;
657 struct vm_area_struct *vma;
659 down_read(&gmap->mm->mmap_sem);
660 for (gaddr = from; gaddr < to;
661 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
662 /* Find the vm address for the guest address */
663 vmaddr = (unsigned long)
664 radix_tree_lookup(&gmap->guest_to_host,
668 vmaddr |= gaddr & ~PMD_MASK;
669 /* Find vma in the parent mm */
670 vma = find_vma(gmap->mm, vmaddr);
671 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
672 zap_page_range(vma, vmaddr, size, NULL);
674 up_read(&gmap->mm->mmap_sem);
676 EXPORT_SYMBOL_GPL(gmap_discard);
678 static LIST_HEAD(gmap_notifier_list);
679 static DEFINE_SPINLOCK(gmap_notifier_lock);
682 * gmap_register_pte_notifier - register a pte invalidation callback
683 * @nb: pointer to the gmap notifier block
685 void gmap_register_pte_notifier(struct gmap_notifier *nb)
687 spin_lock(&gmap_notifier_lock);
688 list_add_rcu(&nb->list, &gmap_notifier_list);
689 spin_unlock(&gmap_notifier_lock);
691 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
694 * gmap_unregister_pte_notifier - remove a pte invalidation callback
695 * @nb: pointer to the gmap notifier block
697 void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
699 spin_lock(&gmap_notifier_lock);
700 list_del_rcu(&nb->list);
701 spin_unlock(&gmap_notifier_lock);
704 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
707 * gmap_call_notifier - call all registered invalidation callbacks
708 * @gmap: pointer to guest mapping meta data structure
709 * @start: start virtual address in the guest address space
710 * @end: end virtual address in the guest address space
712 static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
715 struct gmap_notifier *nb;
717 list_for_each_entry(nb, &gmap_notifier_list, list)
718 nb->notifier_call(gmap, start, end);
722 * gmap_table_walk - walk the gmap page tables
723 * @gmap: pointer to guest mapping meta data structure
724 * @gaddr: virtual address in the guest address space
725 * @level: page table level to stop at
727 * Returns a table entry pointer for the given guest address and @level
728 * @level=0 : returns a pointer to a page table table entry (or NULL)
729 * @level=1 : returns a pointer to a segment table entry (or NULL)
730 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
731 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
732 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
734 * Returns NULL if the gmap page tables could not be walked to the
737 * Note: Can also be called for shadow gmaps.
739 static inline unsigned long *gmap_table_walk(struct gmap *gmap,
740 unsigned long gaddr, int level)
742 unsigned long *table;
744 if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
746 if (gmap_is_shadow(gmap) && gmap->removed)
748 if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
751 switch (gmap->asce & _ASCE_TYPE_MASK) {
752 case _ASCE_TYPE_REGION1:
753 table += (gaddr >> 53) & 0x7ff;
756 if (*table & _REGION_ENTRY_INVALID)
758 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
760 case _ASCE_TYPE_REGION2:
761 table += (gaddr >> 42) & 0x7ff;
764 if (*table & _REGION_ENTRY_INVALID)
766 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
768 case _ASCE_TYPE_REGION3:
769 table += (gaddr >> 31) & 0x7ff;
772 if (*table & _REGION_ENTRY_INVALID)
774 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
776 case _ASCE_TYPE_SEGMENT:
777 table += (gaddr >> 20) & 0x7ff;
780 if (*table & _REGION_ENTRY_INVALID)
782 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
783 table += (gaddr >> 12) & 0xff;
789 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
790 * and return the pte pointer
791 * @gmap: pointer to guest mapping meta data structure
792 * @gaddr: virtual address in the guest address space
793 * @ptl: pointer to the spinlock pointer
795 * Returns a pointer to the locked pte for a guest address, or NULL
797 * Note: Can also be called for shadow gmaps.
799 static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
802 unsigned long *table;
804 if (gmap_is_shadow(gmap))
805 spin_lock(&gmap->guest_table_lock);
806 /* Walk the gmap page table, lock and get pte pointer */
807 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
808 if (!table || *table & _SEGMENT_ENTRY_INVALID) {
809 if (gmap_is_shadow(gmap))
810 spin_unlock(&gmap->guest_table_lock);
813 if (gmap_is_shadow(gmap)) {
814 *ptl = &gmap->guest_table_lock;
815 return pte_offset_map((pmd_t *) table, gaddr);
817 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
821 * gmap_pte_op_fixup - force a page in and connect the gmap page table
822 * @gmap: pointer to guest mapping meta data structure
823 * @gaddr: virtual address in the guest address space
824 * @vmaddr: address in the host process address space
825 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
827 * Returns 0 if the caller can retry __gmap_translate (might fail again),
828 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
829 * up or connecting the gmap page table.
831 static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
832 unsigned long vmaddr, int prot)
834 struct mm_struct *mm = gmap->mm;
835 unsigned int fault_flags;
836 bool unlocked = false;
838 BUG_ON(gmap_is_shadow(gmap));
839 fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
840 if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
843 /* lost mmap_sem, caller has to retry __gmap_translate */
845 /* Connect the page tables */
846 return __gmap_link(gmap, gaddr, vmaddr);
850 * gmap_pte_op_end - release the page table lock
851 * @ptl: pointer to the spinlock pointer
853 static void gmap_pte_op_end(spinlock_t *ptl)
859 * gmap_protect_range - remove access rights to memory and set pgste bits
860 * @gmap: pointer to guest mapping meta data structure
861 * @gaddr: virtual address in the guest address space
863 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
864 * @bits: pgste notification bits to set
866 * Returns 0 if successfully protected, -ENOMEM if out of memory and
867 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
869 * Called with sg->mm->mmap_sem in read.
871 * Note: Can also be called for shadow gmaps.
873 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
874 unsigned long len, int prot, unsigned long bits)
876 unsigned long vmaddr;
883 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
885 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
886 gmap_pte_op_end(ptl);
889 vmaddr = __gmap_translate(gmap, gaddr);
890 if (IS_ERR_VALUE(vmaddr))
892 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
904 * gmap_mprotect_notify - change access rights for a range of ptes and
905 * call the notifier if any pte changes again
906 * @gmap: pointer to guest mapping meta data structure
907 * @gaddr: virtual address in the guest address space
909 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
911 * Returns 0 if for each page in the given range a gmap mapping exists,
912 * the new access rights could be set and the notifier could be armed.
913 * If the gmap mapping is missing for one or more pages -EFAULT is
914 * returned. If no memory could be allocated -ENOMEM is returned.
915 * This function establishes missing page table entries.
917 int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
918 unsigned long len, int prot)
922 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
924 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
926 down_read(&gmap->mm->mmap_sem);
927 rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
928 up_read(&gmap->mm->mmap_sem);
931 EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
934 * gmap_read_table - get an unsigned long value from a guest page table using
935 * absolute addressing, without marking the page referenced.
936 * @gmap: pointer to guest mapping meta data structure
937 * @gaddr: virtual address in the guest address space
938 * @val: pointer to the unsigned long value to return
940 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
941 * if reading using the virtual address failed.
943 * Called with gmap->mm->mmap_sem in read.
945 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
947 unsigned long address, vmaddr;
954 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
957 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
958 address = pte_val(pte) & PAGE_MASK;
959 address += gaddr & ~PAGE_MASK;
960 *val = *(unsigned long *) address;
961 pte_val(*ptep) |= _PAGE_YOUNG;
962 /* Do *NOT* clear the _PAGE_INVALID bit! */
965 gmap_pte_op_end(ptl);
969 vmaddr = __gmap_translate(gmap, gaddr);
970 if (IS_ERR_VALUE(vmaddr)) {
974 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
980 EXPORT_SYMBOL_GPL(gmap_read_table);
983 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
984 * @sg: pointer to the shadow guest address space structure
985 * @vmaddr: vm address associated with the rmap
986 * @rmap: pointer to the rmap structure
988 * Called with the sg->guest_table_lock
990 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
991 struct gmap_rmap *rmap)
995 BUG_ON(!gmap_is_shadow(sg));
996 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
998 rmap->next = radix_tree_deref_slot_protected(slot,
999 &sg->guest_table_lock);
1000 radix_tree_replace_slot(slot, rmap);
1003 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
1009 * gmap_protect_rmap - modify access rights to memory and create an rmap
1010 * @sg: pointer to the shadow guest address space structure
1011 * @raddr: rmap address in the shadow gmap
1012 * @paddr: address in the parent guest address space
1013 * @len: length of the memory area to protect
1014 * @prot: indicates access rights: none, read-only or read-write
1016 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1017 * if out of memory and -EFAULT if paddr is invalid.
1019 static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1020 unsigned long paddr, unsigned long len, int prot)
1022 struct gmap *parent;
1023 struct gmap_rmap *rmap;
1024 unsigned long vmaddr;
1029 BUG_ON(!gmap_is_shadow(sg));
1030 parent = sg->parent;
1032 vmaddr = __gmap_translate(parent, paddr);
1033 if (IS_ERR_VALUE(vmaddr))
1035 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1038 rmap->raddr = raddr;
1039 rc = radix_tree_preload(GFP_KERNEL);
1045 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1047 spin_lock(&sg->guest_table_lock);
1048 rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
1051 gmap_insert_rmap(sg, vmaddr, rmap);
1052 spin_unlock(&sg->guest_table_lock);
1053 gmap_pte_op_end(ptl);
1055 radix_tree_preload_end();
1058 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
1069 #define _SHADOW_RMAP_MASK 0x7
1070 #define _SHADOW_RMAP_REGION1 0x5
1071 #define _SHADOW_RMAP_REGION2 0x4
1072 #define _SHADOW_RMAP_REGION3 0x3
1073 #define _SHADOW_RMAP_SEGMENT 0x2
1074 #define _SHADOW_RMAP_PGTABLE 0x1
1077 * gmap_idte_one - invalidate a single region or segment table entry
1078 * @asce: region or segment table *origin* + table-type bits
1079 * @vaddr: virtual address to identify the table entry to flush
1081 * The invalid bit of a single region or segment table entry is set
1082 * and the associated TLB entries depending on the entry are flushed.
1083 * The table-type of the @asce identifies the portion of the @vaddr
1084 * that is used as the invalidation index.
1086 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1089 " .insn rrf,0xb98e0000,%0,%1,0,0"
1090 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1094 * gmap_unshadow_page - remove a page from a shadow page table
1095 * @sg: pointer to the shadow guest address space structure
1096 * @raddr: rmap address in the shadow guest address space
1098 * Called with the sg->guest_table_lock
1100 static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1102 unsigned long *table;
1104 BUG_ON(!gmap_is_shadow(sg));
1105 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1106 if (!table || *table & _PAGE_INVALID)
1108 gmap_call_notifier(sg, raddr, raddr + (1UL << 12) - 1);
1109 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1113 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1114 * @sg: pointer to the shadow guest address space structure
1115 * @raddr: rmap address in the shadow guest address space
1116 * @pgt: pointer to the start of a shadow page table
1118 * Called with the sg->guest_table_lock
1120 static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1125 BUG_ON(!gmap_is_shadow(sg));
1126 for (i = 0; i < 256; i++, raddr += 1UL << 12)
1127 pgt[i] = _PAGE_INVALID;
1131 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1132 * @sg: pointer to the shadow guest address space structure
1133 * @raddr: address in the shadow guest address space
1135 * Called with the sg->guest_table_lock
1137 static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1139 unsigned long sto, *ste, *pgt;
1142 BUG_ON(!gmap_is_shadow(sg));
1143 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1144 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
1146 gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1);
1147 sto = (unsigned long) (ste - ((raddr >> 20) & 0x7ff));
1148 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1149 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1150 *ste = _SEGMENT_ENTRY_EMPTY;
1151 __gmap_unshadow_pgt(sg, raddr, pgt);
1152 /* Free page table */
1153 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1154 list_del(&page->lru);
1155 page_table_free_pgste(page);
1159 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1160 * @sg: pointer to the shadow guest address space structure
1161 * @raddr: rmap address in the shadow guest address space
1162 * @sgt: pointer to the start of a shadow segment table
1164 * Called with the sg->guest_table_lock
1166 static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1169 unsigned long asce, *pgt;
1173 BUG_ON(!gmap_is_shadow(sg));
1174 asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
1175 for (i = 0; i < 2048; i++, raddr += 1UL << 20) {
1176 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
1178 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1179 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1180 __gmap_unshadow_pgt(sg, raddr, pgt);
1181 /* Free page table */
1182 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1183 list_del(&page->lru);
1184 page_table_free_pgste(page);
1189 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1190 * @sg: pointer to the shadow guest address space structure
1191 * @raddr: rmap address in the shadow guest address space
1193 * Called with the shadow->guest_table_lock
1195 static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1197 unsigned long r3o, *r3e, *sgt;
1200 BUG_ON(!gmap_is_shadow(sg));
1201 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1202 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
1204 gmap_call_notifier(sg, raddr, raddr + (1UL << 31) - 1);
1205 r3o = (unsigned long) (r3e - ((raddr >> 31) & 0x7ff));
1206 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1207 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1208 *r3e = _REGION3_ENTRY_EMPTY;
1209 __gmap_unshadow_sgt(sg, raddr, sgt);
1210 /* Free segment table */
1211 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1212 list_del(&page->lru);
1213 __free_pages(page, 2);
1217 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1218 * @sg: pointer to the shadow guest address space structure
1219 * @raddr: address in the shadow guest address space
1220 * @r3t: pointer to the start of a shadow region-3 table
1222 * Called with the sg->guest_table_lock
1224 static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1227 unsigned long asce, *sgt;
1231 BUG_ON(!gmap_is_shadow(sg));
1232 asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
1233 for (i = 0; i < 2048; i++, raddr += 1UL << 31) {
1234 if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
1236 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1237 r3t[i] = _REGION3_ENTRY_EMPTY;
1238 __gmap_unshadow_sgt(sg, raddr, sgt);
1239 /* Free segment table */
1240 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1241 list_del(&page->lru);
1242 __free_pages(page, 2);
1247 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1248 * @sg: pointer to the shadow guest address space structure
1249 * @raddr: rmap address in the shadow guest address space
1251 * Called with the sg->guest_table_lock
1253 static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1255 unsigned long r2o, *r2e, *r3t;
1258 BUG_ON(!gmap_is_shadow(sg));
1259 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1260 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
1262 gmap_call_notifier(sg, raddr, raddr + (1UL << 42) - 1);
1263 r2o = (unsigned long) (r2e - ((raddr >> 42) & 0x7ff));
1264 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1265 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1266 *r2e = _REGION2_ENTRY_EMPTY;
1267 __gmap_unshadow_r3t(sg, raddr, r3t);
1268 /* Free region 3 table */
1269 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1270 list_del(&page->lru);
1271 __free_pages(page, 2);
1275 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1276 * @sg: pointer to the shadow guest address space structure
1277 * @raddr: rmap address in the shadow guest address space
1278 * @r2t: pointer to the start of a shadow region-2 table
1280 * Called with the sg->guest_table_lock
1282 static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1285 unsigned long asce, *r3t;
1289 BUG_ON(!gmap_is_shadow(sg));
1290 asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
1291 for (i = 0; i < 2048; i++, raddr += 1UL << 42) {
1292 if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
1294 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1295 r2t[i] = _REGION2_ENTRY_EMPTY;
1296 __gmap_unshadow_r3t(sg, raddr, r3t);
1297 /* Free region 3 table */
1298 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1299 list_del(&page->lru);
1300 __free_pages(page, 2);
1305 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1306 * @sg: pointer to the shadow guest address space structure
1307 * @raddr: rmap address in the shadow guest address space
1309 * Called with the sg->guest_table_lock
1311 static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1313 unsigned long r1o, *r1e, *r2t;
1316 BUG_ON(!gmap_is_shadow(sg));
1317 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1318 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
1320 gmap_call_notifier(sg, raddr, raddr + (1UL << 53) - 1);
1321 r1o = (unsigned long) (r1e - ((raddr >> 53) & 0x7ff));
1322 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1323 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1324 *r1e = _REGION1_ENTRY_EMPTY;
1325 __gmap_unshadow_r2t(sg, raddr, r2t);
1326 /* Free region 2 table */
1327 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1328 list_del(&page->lru);
1329 __free_pages(page, 2);
1333 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1334 * @sg: pointer to the shadow guest address space structure
1335 * @raddr: rmap address in the shadow guest address space
1336 * @r1t: pointer to the start of a shadow region-1 table
1338 * Called with the shadow->guest_table_lock
1340 static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1343 unsigned long asce, *r2t;
1347 BUG_ON(!gmap_is_shadow(sg));
1348 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1349 for (i = 0; i < 2048; i++, raddr += 1UL << 53) {
1350 if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
1352 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1353 __gmap_unshadow_r2t(sg, raddr, r2t);
1354 /* Clear entry and flush translation r1t -> r2t */
1355 gmap_idte_one(asce, raddr);
1356 r1t[i] = _REGION1_ENTRY_EMPTY;
1357 /* Free region 2 table */
1358 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1359 list_del(&page->lru);
1360 __free_pages(page, 2);
1365 * gmap_unshadow - remove a shadow page table completely
1366 * @sg: pointer to the shadow guest address space structure
1368 * Called with sg->guest_table_lock
1370 static void gmap_unshadow(struct gmap *sg)
1372 unsigned long *table;
1374 BUG_ON(!gmap_is_shadow(sg));
1378 gmap_call_notifier(sg, 0, -1UL);
1380 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1381 switch (sg->asce & _ASCE_TYPE_MASK) {
1382 case _ASCE_TYPE_REGION1:
1383 __gmap_unshadow_r1t(sg, 0, table);
1385 case _ASCE_TYPE_REGION2:
1386 __gmap_unshadow_r2t(sg, 0, table);
1388 case _ASCE_TYPE_REGION3:
1389 __gmap_unshadow_r3t(sg, 0, table);
1391 case _ASCE_TYPE_SEGMENT:
1392 __gmap_unshadow_sgt(sg, 0, table);
1398 * gmap_find_shadow - find a specific asce in the list of shadow tables
1399 * @parent: pointer to the parent gmap
1400 * @asce: ASCE for which the shadow table is created
1401 * @edat_level: edat level to be used for the shadow translation
1403 * Returns the pointer to a gmap if a shadow table with the given asce is
1404 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1407 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1412 list_for_each_entry(sg, &parent->children, list) {
1413 if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1416 if (!sg->initialized)
1417 return ERR_PTR(-EAGAIN);
1418 atomic_inc(&sg->ref_count);
1425 * gmap_shadow_valid - check if a shadow guest address space matches the
1426 * given properties and is still valid
1427 * @sg: pointer to the shadow guest address space structure
1428 * @asce: ASCE for which the shadow table is requested
1429 * @edat_level: edat level to be used for the shadow translation
1431 * Returns 1 if the gmap shadow is still valid and matches the given
1432 * properties, the caller can continue using it. Returns 0 otherwise, the
1433 * caller has to request a new shadow gmap in this case.
1436 int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1440 return sg->orig_asce == asce && sg->edat_level == edat_level;
1442 EXPORT_SYMBOL_GPL(gmap_shadow_valid);
1445 * gmap_shadow - create/find a shadow guest address space
1446 * @parent: pointer to the parent gmap
1447 * @asce: ASCE for which the shadow table is created
1448 * @edat_level: edat level to be used for the shadow translation
1450 * The pages of the top level page table referred by the asce parameter
1451 * will be set to read-only and marked in the PGSTEs of the kvm process.
1452 * The shadow table will be removed automatically on any change to the
1453 * PTE mapping for the source table.
1455 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1456 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1457 * parent gmap table could not be protected.
1459 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1462 struct gmap *sg, *new;
1463 unsigned long limit;
1466 BUG_ON(gmap_is_shadow(parent));
1467 spin_lock(&parent->shadow_lock);
1468 sg = gmap_find_shadow(parent, asce, edat_level);
1469 spin_unlock(&parent->shadow_lock);
1472 /* Create a new shadow gmap */
1473 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1474 if (asce & _ASCE_REAL_SPACE)
1476 new = gmap_alloc(limit);
1478 return ERR_PTR(-ENOMEM);
1479 new->mm = parent->mm;
1480 new->parent = gmap_get(parent);
1481 new->orig_asce = asce;
1482 new->edat_level = edat_level;
1483 new->initialized = false;
1484 spin_lock(&parent->shadow_lock);
1485 /* Recheck if another CPU created the same shadow */
1486 sg = gmap_find_shadow(parent, asce, edat_level);
1488 spin_unlock(&parent->shadow_lock);
1492 if (asce & _ASCE_REAL_SPACE) {
1493 /* only allow one real-space gmap shadow */
1494 list_for_each_entry(sg, &parent->children, list) {
1495 if (sg->orig_asce & _ASCE_REAL_SPACE) {
1496 spin_lock(&sg->guest_table_lock);
1498 spin_unlock(&sg->guest_table_lock);
1499 list_del(&sg->list);
1505 atomic_set(&new->ref_count, 2);
1506 list_add(&new->list, &parent->children);
1507 if (asce & _ASCE_REAL_SPACE) {
1508 /* nothing to protect, return right away */
1509 new->initialized = true;
1510 spin_unlock(&parent->shadow_lock);
1513 spin_unlock(&parent->shadow_lock);
1514 /* protect after insertion, so it will get properly invalidated */
1515 down_read(&parent->mm->mmap_sem);
1516 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1517 ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096,
1518 PROT_READ, PGSTE_VSIE_BIT);
1519 up_read(&parent->mm->mmap_sem);
1520 spin_lock(&parent->shadow_lock);
1521 new->initialized = true;
1523 list_del(&new->list);
1527 spin_unlock(&parent->shadow_lock);
1530 EXPORT_SYMBOL_GPL(gmap_shadow);
1533 * gmap_shadow_r2t - create an empty shadow region 2 table
1534 * @sg: pointer to the shadow guest address space structure
1535 * @saddr: faulting address in the shadow gmap
1536 * @r2t: parent gmap address of the region 2 table to get shadowed
1537 * @fake: r2t references contiguous guest memory block, not a r2t
1539 * The r2t parameter specifies the address of the source table. The
1540 * four pages of the source table are made read-only in the parent gmap
1541 * address space. A write to the source table area @r2t will automatically
1542 * remove the shadow r2 table and all of its decendents.
1544 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1545 * shadow table structure is incomplete, -ENOMEM if out of memory and
1546 * -EFAULT if an address in the parent gmap could not be resolved.
1548 * Called with sg->mm->mmap_sem in read.
1550 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1553 unsigned long raddr, origin, offset, len;
1554 unsigned long *s_r2t, *table;
1558 BUG_ON(!gmap_is_shadow(sg));
1559 /* Allocate a shadow region second table */
1560 page = alloc_pages(GFP_KERNEL, 2);
1563 page->index = r2t & _REGION_ENTRY_ORIGIN;
1565 page->index |= GMAP_SHADOW_FAKE_TABLE;
1566 s_r2t = (unsigned long *) page_to_phys(page);
1567 /* Install shadow region second table */
1568 spin_lock(&sg->guest_table_lock);
1569 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1571 rc = -EAGAIN; /* Race with unshadow */
1574 if (!(*table & _REGION_ENTRY_INVALID)) {
1575 rc = 0; /* Already established */
1577 } else if (*table & _REGION_ENTRY_ORIGIN) {
1578 rc = -EAGAIN; /* Race with shadow */
1581 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
1582 /* mark as invalid as long as the parent table is not protected */
1583 *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1584 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
1585 if (sg->edat_level >= 1)
1586 *table |= (r2t & _REGION_ENTRY_PROTECT);
1587 list_add(&page->lru, &sg->crst_list);
1589 /* nothing to protect for fake tables */
1590 *table &= ~_REGION_ENTRY_INVALID;
1591 spin_unlock(&sg->guest_table_lock);
1594 spin_unlock(&sg->guest_table_lock);
1595 /* Make r2t read-only in parent gmap page table */
1596 raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1;
1597 origin = r2t & _REGION_ENTRY_ORIGIN;
1598 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1599 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1600 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1601 spin_lock(&sg->guest_table_lock);
1603 table = gmap_table_walk(sg, saddr, 4);
1604 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1605 (unsigned long) s_r2t)
1606 rc = -EAGAIN; /* Race with unshadow */
1608 *table &= ~_REGION_ENTRY_INVALID;
1610 gmap_unshadow_r2t(sg, raddr);
1612 spin_unlock(&sg->guest_table_lock);
1615 spin_unlock(&sg->guest_table_lock);
1616 __free_pages(page, 2);
1619 EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1622 * gmap_shadow_r3t - create a shadow region 3 table
1623 * @sg: pointer to the shadow guest address space structure
1624 * @saddr: faulting address in the shadow gmap
1625 * @r3t: parent gmap address of the region 3 table to get shadowed
1626 * @fake: r3t references contiguous guest memory block, not a r3t
1628 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1629 * shadow table structure is incomplete, -ENOMEM if out of memory and
1630 * -EFAULT if an address in the parent gmap could not be resolved.
1632 * Called with sg->mm->mmap_sem in read.
1634 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1637 unsigned long raddr, origin, offset, len;
1638 unsigned long *s_r3t, *table;
1642 BUG_ON(!gmap_is_shadow(sg));
1643 /* Allocate a shadow region second table */
1644 page = alloc_pages(GFP_KERNEL, 2);
1647 page->index = r3t & _REGION_ENTRY_ORIGIN;
1649 page->index |= GMAP_SHADOW_FAKE_TABLE;
1650 s_r3t = (unsigned long *) page_to_phys(page);
1651 /* Install shadow region second table */
1652 spin_lock(&sg->guest_table_lock);
1653 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1655 rc = -EAGAIN; /* Race with unshadow */
1658 if (!(*table & _REGION_ENTRY_INVALID)) {
1659 rc = 0; /* Already established */
1661 } else if (*table & _REGION_ENTRY_ORIGIN) {
1662 rc = -EAGAIN; /* Race with shadow */
1664 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
1665 /* mark as invalid as long as the parent table is not protected */
1666 *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1667 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
1668 if (sg->edat_level >= 1)
1669 *table |= (r3t & _REGION_ENTRY_PROTECT);
1670 list_add(&page->lru, &sg->crst_list);
1672 /* nothing to protect for fake tables */
1673 *table &= ~_REGION_ENTRY_INVALID;
1674 spin_unlock(&sg->guest_table_lock);
1677 spin_unlock(&sg->guest_table_lock);
1678 /* Make r3t read-only in parent gmap page table */
1679 raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2;
1680 origin = r3t & _REGION_ENTRY_ORIGIN;
1681 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1682 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1683 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1684 spin_lock(&sg->guest_table_lock);
1686 table = gmap_table_walk(sg, saddr, 3);
1687 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1688 (unsigned long) s_r3t)
1689 rc = -EAGAIN; /* Race with unshadow */
1691 *table &= ~_REGION_ENTRY_INVALID;
1693 gmap_unshadow_r3t(sg, raddr);
1695 spin_unlock(&sg->guest_table_lock);
1698 spin_unlock(&sg->guest_table_lock);
1699 __free_pages(page, 2);
1702 EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1705 * gmap_shadow_sgt - create a shadow segment table
1706 * @sg: pointer to the shadow guest address space structure
1707 * @saddr: faulting address in the shadow gmap
1708 * @sgt: parent gmap address of the segment table to get shadowed
1709 * @fake: sgt references contiguous guest memory block, not a sgt
1711 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1712 * shadow table structure is incomplete, -ENOMEM if out of memory and
1713 * -EFAULT if an address in the parent gmap could not be resolved.
1715 * Called with sg->mm->mmap_sem in read.
1717 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1720 unsigned long raddr, origin, offset, len;
1721 unsigned long *s_sgt, *table;
1725 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
1726 /* Allocate a shadow segment table */
1727 page = alloc_pages(GFP_KERNEL, 2);
1730 page->index = sgt & _REGION_ENTRY_ORIGIN;
1732 page->index |= GMAP_SHADOW_FAKE_TABLE;
1733 s_sgt = (unsigned long *) page_to_phys(page);
1734 /* Install shadow region second table */
1735 spin_lock(&sg->guest_table_lock);
1736 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1738 rc = -EAGAIN; /* Race with unshadow */
1741 if (!(*table & _REGION_ENTRY_INVALID)) {
1742 rc = 0; /* Already established */
1744 } else if (*table & _REGION_ENTRY_ORIGIN) {
1745 rc = -EAGAIN; /* Race with shadow */
1748 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
1749 /* mark as invalid as long as the parent table is not protected */
1750 *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1751 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
1752 if (sg->edat_level >= 1)
1753 *table |= sgt & _REGION_ENTRY_PROTECT;
1754 list_add(&page->lru, &sg->crst_list);
1756 /* nothing to protect for fake tables */
1757 *table &= ~_REGION_ENTRY_INVALID;
1758 spin_unlock(&sg->guest_table_lock);
1761 spin_unlock(&sg->guest_table_lock);
1762 /* Make sgt read-only in parent gmap page table */
1763 raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3;
1764 origin = sgt & _REGION_ENTRY_ORIGIN;
1765 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1766 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1767 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1768 spin_lock(&sg->guest_table_lock);
1770 table = gmap_table_walk(sg, saddr, 2);
1771 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1772 (unsigned long) s_sgt)
1773 rc = -EAGAIN; /* Race with unshadow */
1775 *table &= ~_REGION_ENTRY_INVALID;
1777 gmap_unshadow_sgt(sg, raddr);
1779 spin_unlock(&sg->guest_table_lock);
1782 spin_unlock(&sg->guest_table_lock);
1783 __free_pages(page, 2);
1786 EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1789 * gmap_shadow_lookup_pgtable - find a shadow page table
1790 * @sg: pointer to the shadow guest address space structure
1791 * @saddr: the address in the shadow aguest address space
1792 * @pgt: parent gmap address of the page table to get shadowed
1793 * @dat_protection: if the pgtable is marked as protected by dat
1794 * @fake: pgt references contiguous guest memory block, not a pgtable
1796 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1797 * table was not found.
1799 * Called with sg->mm->mmap_sem in read.
1801 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
1802 unsigned long *pgt, int *dat_protection,
1805 unsigned long *table;
1809 BUG_ON(!gmap_is_shadow(sg));
1810 spin_lock(&sg->guest_table_lock);
1811 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1812 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1813 /* Shadow page tables are full pages (pte+pgste) */
1814 page = pfn_to_page(*table >> PAGE_SHIFT);
1815 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
1816 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
1817 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
1822 spin_unlock(&sg->guest_table_lock);
1826 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
1829 * gmap_shadow_pgt - instantiate a shadow page table
1830 * @sg: pointer to the shadow guest address space structure
1831 * @saddr: faulting address in the shadow gmap
1832 * @pgt: parent gmap address of the page table to get shadowed
1833 * @fake: pgt references contiguous guest memory block, not a pgtable
1835 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1836 * shadow table structure is incomplete, -ENOMEM if out of memory,
1837 * -EFAULT if an address in the parent gmap could not be resolved and
1839 * Called with gmap->mm->mmap_sem in read
1841 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
1844 unsigned long raddr, origin;
1845 unsigned long *s_pgt, *table;
1849 BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
1850 /* Allocate a shadow page table */
1851 page = page_table_alloc_pgste(sg->mm);
1854 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
1856 page->index |= GMAP_SHADOW_FAKE_TABLE;
1857 s_pgt = (unsigned long *) page_to_phys(page);
1858 /* Install shadow page table */
1859 spin_lock(&sg->guest_table_lock);
1860 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1862 rc = -EAGAIN; /* Race with unshadow */
1865 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
1866 rc = 0; /* Already established */
1868 } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
1869 rc = -EAGAIN; /* Race with shadow */
1872 /* mark as invalid as long as the parent table is not protected */
1873 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
1874 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
1875 list_add(&page->lru, &sg->pt_list);
1877 /* nothing to protect for fake tables */
1878 *table &= ~_SEGMENT_ENTRY_INVALID;
1879 spin_unlock(&sg->guest_table_lock);
1882 spin_unlock(&sg->guest_table_lock);
1883 /* Make pgt read-only in parent gmap page table (not the pgste) */
1884 raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT;
1885 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
1886 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
1887 spin_lock(&sg->guest_table_lock);
1889 table = gmap_table_walk(sg, saddr, 1);
1890 if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
1891 (unsigned long) s_pgt)
1892 rc = -EAGAIN; /* Race with unshadow */
1894 *table &= ~_SEGMENT_ENTRY_INVALID;
1896 gmap_unshadow_pgt(sg, raddr);
1898 spin_unlock(&sg->guest_table_lock);
1901 spin_unlock(&sg->guest_table_lock);
1902 page_table_free_pgste(page);
1906 EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
1909 * gmap_shadow_page - create a shadow page mapping
1910 * @sg: pointer to the shadow guest address space structure
1911 * @saddr: faulting address in the shadow gmap
1912 * @pte: pte in parent gmap address space to get shadowed
1914 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1915 * shadow table structure is incomplete, -ENOMEM if out of memory and
1916 * -EFAULT if an address in the parent gmap could not be resolved.
1918 * Called with sg->mm->mmap_sem in read.
1920 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
1922 struct gmap *parent;
1923 struct gmap_rmap *rmap;
1924 unsigned long vmaddr, paddr;
1926 pte_t *sptep, *tptep;
1930 BUG_ON(!gmap_is_shadow(sg));
1931 parent = sg->parent;
1932 prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
1934 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1937 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
1940 paddr = pte_val(pte) & PAGE_MASK;
1941 vmaddr = __gmap_translate(parent, paddr);
1942 if (IS_ERR_VALUE(vmaddr)) {
1946 rc = radix_tree_preload(GFP_KERNEL);
1950 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
1952 spin_lock(&sg->guest_table_lock);
1953 /* Get page table pointer */
1954 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
1956 spin_unlock(&sg->guest_table_lock);
1957 gmap_pte_op_end(ptl);
1958 radix_tree_preload_end();
1961 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
1963 /* Success and a new mapping */
1964 gmap_insert_rmap(sg, vmaddr, rmap);
1968 gmap_pte_op_end(ptl);
1969 spin_unlock(&sg->guest_table_lock);
1971 radix_tree_preload_end();
1974 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
1981 EXPORT_SYMBOL_GPL(gmap_shadow_page);
1984 * gmap_shadow_notify - handle notifications for shadow gmap
1986 * Called with sg->parent->shadow_lock.
1988 static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
1989 unsigned long offset, pte_t *pte)
1991 struct gmap_rmap *rmap, *rnext, *head;
1992 unsigned long gaddr, start, end, bits, raddr;
1993 unsigned long *table;
1995 BUG_ON(!gmap_is_shadow(sg));
1996 spin_lock(&sg->parent->guest_table_lock);
1997 table = radix_tree_lookup(&sg->parent->host_to_guest,
1998 vmaddr >> PMD_SHIFT);
1999 gaddr = table ? __gmap_segment_gaddr(table) + offset : 0;
2000 spin_unlock(&sg->parent->guest_table_lock);
2004 spin_lock(&sg->guest_table_lock);
2006 spin_unlock(&sg->guest_table_lock);
2009 /* Check for top level table */
2010 start = sg->orig_asce & _ASCE_ORIGIN;
2011 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
2012 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
2014 /* The complete shadow table has to go */
2016 spin_unlock(&sg->guest_table_lock);
2017 list_del(&sg->list);
2021 /* Remove the page table tree from on specific entry */
2022 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> 12);
2023 gmap_for_each_rmap_safe(rmap, rnext, head) {
2024 bits = rmap->raddr & _SHADOW_RMAP_MASK;
2025 raddr = rmap->raddr ^ bits;
2027 case _SHADOW_RMAP_REGION1:
2028 gmap_unshadow_r2t(sg, raddr);
2030 case _SHADOW_RMAP_REGION2:
2031 gmap_unshadow_r3t(sg, raddr);
2033 case _SHADOW_RMAP_REGION3:
2034 gmap_unshadow_sgt(sg, raddr);
2036 case _SHADOW_RMAP_SEGMENT:
2037 gmap_unshadow_pgt(sg, raddr);
2039 case _SHADOW_RMAP_PGTABLE:
2040 gmap_unshadow_page(sg, raddr);
2045 spin_unlock(&sg->guest_table_lock);
2049 * ptep_notify - call all invalidation callbacks for a specific pte.
2050 * @mm: pointer to the process mm_struct
2051 * @addr: virtual address in the process address space
2052 * @pte: pointer to the page table entry
2053 * @bits: bits from the pgste that caused the notify call
2055 * This function is assumed to be called with the page table lock held
2056 * for the pte to notify.
2058 void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2059 pte_t *pte, unsigned long bits)
2061 unsigned long offset, gaddr;
2062 unsigned long *table;
2063 struct gmap *gmap, *sg, *next;
2065 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
2066 offset = offset * (4096 / sizeof(pte_t));
2068 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2069 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2070 spin_lock(&gmap->shadow_lock);
2071 list_for_each_entry_safe(sg, next,
2072 &gmap->children, list)
2073 gmap_shadow_notify(sg, vmaddr, offset, pte);
2074 spin_unlock(&gmap->shadow_lock);
2076 if (!(bits & PGSTE_IN_BIT))
2078 spin_lock(&gmap->guest_table_lock);
2079 table = radix_tree_lookup(&gmap->host_to_guest,
2080 vmaddr >> PMD_SHIFT);
2082 gaddr = __gmap_segment_gaddr(table) + offset;
2083 spin_unlock(&gmap->guest_table_lock);
2085 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
2089 EXPORT_SYMBOL_GPL(ptep_notify);
2091 static inline void thp_split_mm(struct mm_struct *mm)
2093 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2094 struct vm_area_struct *vma;
2097 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
2098 for (addr = vma->vm_start;
2101 follow_page(vma, addr, FOLL_SPLIT);
2102 vma->vm_flags &= ~VM_HUGEPAGE;
2103 vma->vm_flags |= VM_NOHUGEPAGE;
2105 mm->def_flags |= VM_NOHUGEPAGE;
2110 * switch on pgstes for its userspace process (for kvm)
2112 int s390_enable_sie(void)
2114 struct mm_struct *mm = current->mm;
2116 /* Do we have pgstes? if yes, we are done */
2117 if (mm_has_pgste(mm))
2119 /* Fail if the page tables are 2K */
2120 if (!mm_alloc_pgste(mm))
2122 down_write(&mm->mmap_sem);
2123 mm->context.has_pgste = 1;
2124 /* split thp mappings and disable thp for future mappings */
2126 up_write(&mm->mmap_sem);
2129 EXPORT_SYMBOL_GPL(s390_enable_sie);
2132 * Enable storage key handling from now on and initialize the storage
2133 * keys with the default key.
2135 static int __s390_enable_skey(pte_t *pte, unsigned long addr,
2136 unsigned long next, struct mm_walk *walk)
2139 * Remove all zero page mappings,
2140 * after establishing a policy to forbid zero page mappings
2141 * following faults for that page will get fresh anonymous pages
2143 if (is_zero_pfn(pte_pfn(*pte)))
2144 ptep_xchg_direct(walk->mm, addr, pte, __pte(_PAGE_INVALID));
2145 /* Clear storage key */
2146 ptep_zap_key(walk->mm, addr, pte);
2150 int s390_enable_skey(void)
2152 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
2153 struct mm_struct *mm = current->mm;
2154 struct vm_area_struct *vma;
2157 down_write(&mm->mmap_sem);
2158 if (mm_use_skey(mm))
2161 mm->context.use_skey = 1;
2162 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2163 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
2164 MADV_UNMERGEABLE, &vma->vm_flags)) {
2165 mm->context.use_skey = 0;
2170 mm->def_flags &= ~VM_MERGEABLE;
2173 walk_page_range(0, TASK_SIZE, &walk);
2176 up_write(&mm->mmap_sem);
2179 EXPORT_SYMBOL_GPL(s390_enable_skey);
2182 * Reset CMMA state, make all pages stable again.
2184 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2185 unsigned long next, struct mm_walk *walk)
2187 ptep_zap_unused(walk->mm, addr, pte, 1);
2191 void s390_reset_cmma(struct mm_struct *mm)
2193 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
2195 down_write(&mm->mmap_sem);
2197 walk_page_range(0, TASK_SIZE, &walk);
2198 up_write(&mm->mmap_sem);
2200 EXPORT_SYMBOL_GPL(s390_reset_cmma);