1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7 #include <linux/module.h>
8 #include <linux/sched/signal.h>
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <as-layout.h>
16 #include <kern_util.h>
21 int (*mmap)(struct mm_id *mm_idp,
22 unsigned long virt, unsigned long len, int prot,
23 int phys_fd, unsigned long long offset);
24 int (*unmap)(struct mm_id *mm_idp,
25 unsigned long virt, unsigned long len);
28 static int kern_map(struct mm_id *mm_idp,
29 unsigned long virt, unsigned long len, int prot,
30 int phys_fd, unsigned long long offset)
32 /* TODO: Why is executable needed to be always set in the kernel? */
33 return os_map_memory((void *)virt, phys_fd, offset, len,
34 prot & UM_PROT_READ, prot & UM_PROT_WRITE,
38 static int kern_unmap(struct mm_id *mm_idp,
39 unsigned long virt, unsigned long len)
41 return os_unmap_memory((void *)virt, len);
44 void report_enomem(void)
46 printk(KERN_ERR "UML ran out of memory on the host side! "
47 "This can happen due to a memory limitation or "
48 "vm.max_map_count has been reached.\n");
51 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
58 pte = pte_offset_kernel(pmd, addr);
60 if (!pte_needsync(*pte))
63 if (pte_present(*pte)) {
65 unsigned long phys = pte_val(*pte) & PAGE_MASK;
66 int fd = phys_mapping(phys, &offset);
72 if (!pte_young(*pte)) {
75 } else if (!pte_dirty(*pte))
78 prot = (r ? UM_PROT_READ : 0) |
79 (w ? UM_PROT_WRITE : 0) |
80 (x ? UM_PROT_EXEC : 0);
82 ret = ops->mmap(ops->mm_idp, addr, PAGE_SIZE,
85 ret = ops->unmap(ops->mm_idp, addr, PAGE_SIZE);
87 *pte = pte_mkuptodate(*pte);
88 } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
92 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
100 pmd = pmd_offset(pud, addr);
102 next = pmd_addr_end(addr, end);
103 if (!pmd_present(*pmd)) {
104 if (pmd_needsync(*pmd)) {
105 ret = ops->unmap(ops->mm_idp, addr,
107 pmd_mkuptodate(*pmd);
110 else ret = update_pte_range(pmd, addr, next, ops);
111 } while (pmd++, addr = next, ((addr < end) && !ret));
115 static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
123 pud = pud_offset(p4d, addr);
125 next = pud_addr_end(addr, end);
126 if (!pud_present(*pud)) {
127 if (pud_needsync(*pud)) {
128 ret = ops->unmap(ops->mm_idp, addr,
130 pud_mkuptodate(*pud);
133 else ret = update_pmd_range(pud, addr, next, ops);
134 } while (pud++, addr = next, ((addr < end) && !ret));
138 static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
146 p4d = p4d_offset(pgd, addr);
148 next = p4d_addr_end(addr, end);
149 if (!p4d_present(*p4d)) {
150 if (p4d_needsync(*p4d)) {
151 ret = ops->unmap(ops->mm_idp, addr,
153 p4d_mkuptodate(*p4d);
156 ret = update_pud_range(p4d, addr, next, ops);
157 } while (p4d++, addr = next, ((addr < end) && !ret));
161 int um_tlb_sync(struct mm_struct *mm)
165 unsigned long addr = mm->context.sync_tlb_range_from, next;
168 if (mm->context.sync_tlb_range_to == 0)
171 ops.mm_idp = &mm->context.id;
172 if (mm == &init_mm) {
174 ops.unmap = kern_unmap;
180 pgd = pgd_offset(mm, addr);
182 next = pgd_addr_end(addr, mm->context.sync_tlb_range_to);
183 if (!pgd_present(*pgd)) {
184 if (pgd_needsync(*pgd)) {
185 ret = ops.unmap(ops.mm_idp, addr,
187 pgd_mkuptodate(*pgd);
190 ret = update_p4d_range(pgd, addr, next, &ops);
191 } while (pgd++, addr = next,
192 ((addr < mm->context.sync_tlb_range_to) && !ret));
197 mm->context.sync_tlb_range_from = 0;
198 mm->context.sync_tlb_range_to = 0;
203 void flush_tlb_all(void)
206 * Don't bother flushing if this address space is about to be
209 if (atomic_read(¤t->mm->mm_users) == 0)
212 flush_tlb_mm(current->mm);
215 void flush_tlb_mm(struct mm_struct *mm)
217 struct vm_area_struct *vma;
218 VMA_ITERATOR(vmi, mm, 0);
220 for_each_vma(vmi, vma)
221 um_tlb_mark_sync(mm, vma->vm_start, vma->vm_end);