]> Git Repo - J-linux.git/blob - arch/um/kernel/tlb.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / arch / um / kernel / tlb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4  */
5
6 #include <linux/mm.h>
7 #include <linux/module.h>
8 #include <linux/sched/signal.h>
9
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <as-layout.h>
13 #include <mem_user.h>
14 #include <os.h>
15 #include <skas.h>
16 #include <kern_util.h>
17
18 struct vm_ops {
19         struct mm_id *mm_idp;
20
21         int (*mmap)(struct mm_id *mm_idp,
22                     unsigned long virt, unsigned long len, int prot,
23                     int phys_fd, unsigned long long offset);
24         int (*unmap)(struct mm_id *mm_idp,
25                      unsigned long virt, unsigned long len);
26 };
27
28 static int kern_map(struct mm_id *mm_idp,
29                     unsigned long virt, unsigned long len, int prot,
30                     int phys_fd, unsigned long long offset)
31 {
32         /* TODO: Why is executable needed to be always set in the kernel? */
33         return os_map_memory((void *)virt, phys_fd, offset, len,
34                              prot & UM_PROT_READ, prot & UM_PROT_WRITE,
35                              1);
36 }
37
38 static int kern_unmap(struct mm_id *mm_idp,
39                       unsigned long virt, unsigned long len)
40 {
41         return os_unmap_memory((void *)virt, len);
42 }
43
44 void report_enomem(void)
45 {
46         printk(KERN_ERR "UML ran out of memory on the host side! "
47                         "This can happen due to a memory limitation or "
48                         "vm.max_map_count has been reached.\n");
49 }
50
51 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
52                                    unsigned long end,
53                                    struct vm_ops *ops)
54 {
55         pte_t *pte;
56         int ret = 0;
57
58         pte = pte_offset_kernel(pmd, addr);
59         do {
60                 if (!pte_needsync(*pte))
61                         continue;
62
63                 if (pte_present(*pte)) {
64                         __u64 offset;
65                         unsigned long phys = pte_val(*pte) & PAGE_MASK;
66                         int fd = phys_mapping(phys, &offset);
67                         int r, w, x, prot;
68
69                         r = pte_read(*pte);
70                         w = pte_write(*pte);
71                         x = pte_exec(*pte);
72                         if (!pte_young(*pte)) {
73                                 r = 0;
74                                 w = 0;
75                         } else if (!pte_dirty(*pte))
76                                 w = 0;
77
78                         prot = (r ? UM_PROT_READ : 0) |
79                                (w ? UM_PROT_WRITE : 0) |
80                                (x ? UM_PROT_EXEC : 0);
81
82                         ret = ops->mmap(ops->mm_idp, addr, PAGE_SIZE,
83                                         prot, fd, offset);
84                 } else
85                         ret = ops->unmap(ops->mm_idp, addr, PAGE_SIZE);
86
87                 *pte = pte_mkuptodate(*pte);
88         } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
89         return ret;
90 }
91
92 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
93                                    unsigned long end,
94                                    struct vm_ops *ops)
95 {
96         pmd_t *pmd;
97         unsigned long next;
98         int ret = 0;
99
100         pmd = pmd_offset(pud, addr);
101         do {
102                 next = pmd_addr_end(addr, end);
103                 if (!pmd_present(*pmd)) {
104                         if (pmd_needsync(*pmd)) {
105                                 ret = ops->unmap(ops->mm_idp, addr,
106                                                  next - addr);
107                                 pmd_mkuptodate(*pmd);
108                         }
109                 }
110                 else ret = update_pte_range(pmd, addr, next, ops);
111         } while (pmd++, addr = next, ((addr < end) && !ret));
112         return ret;
113 }
114
115 static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
116                                    unsigned long end,
117                                    struct vm_ops *ops)
118 {
119         pud_t *pud;
120         unsigned long next;
121         int ret = 0;
122
123         pud = pud_offset(p4d, addr);
124         do {
125                 next = pud_addr_end(addr, end);
126                 if (!pud_present(*pud)) {
127                         if (pud_needsync(*pud)) {
128                                 ret = ops->unmap(ops->mm_idp, addr,
129                                                  next - addr);
130                                 pud_mkuptodate(*pud);
131                         }
132                 }
133                 else ret = update_pmd_range(pud, addr, next, ops);
134         } while (pud++, addr = next, ((addr < end) && !ret));
135         return ret;
136 }
137
138 static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
139                                    unsigned long end,
140                                    struct vm_ops *ops)
141 {
142         p4d_t *p4d;
143         unsigned long next;
144         int ret = 0;
145
146         p4d = p4d_offset(pgd, addr);
147         do {
148                 next = p4d_addr_end(addr, end);
149                 if (!p4d_present(*p4d)) {
150                         if (p4d_needsync(*p4d)) {
151                                 ret = ops->unmap(ops->mm_idp, addr,
152                                                  next - addr);
153                                 p4d_mkuptodate(*p4d);
154                         }
155                 } else
156                         ret = update_pud_range(p4d, addr, next, ops);
157         } while (p4d++, addr = next, ((addr < end) && !ret));
158         return ret;
159 }
160
161 int um_tlb_sync(struct mm_struct *mm)
162 {
163         pgd_t *pgd;
164         struct vm_ops ops;
165         unsigned long addr = mm->context.sync_tlb_range_from, next;
166         int ret = 0;
167
168         if (mm->context.sync_tlb_range_to == 0)
169                 return 0;
170
171         ops.mm_idp = &mm->context.id;
172         if (mm == &init_mm) {
173                 ops.mmap = kern_map;
174                 ops.unmap = kern_unmap;
175         } else {
176                 ops.mmap = map;
177                 ops.unmap = unmap;
178         }
179
180         pgd = pgd_offset(mm, addr);
181         do {
182                 next = pgd_addr_end(addr, mm->context.sync_tlb_range_to);
183                 if (!pgd_present(*pgd)) {
184                         if (pgd_needsync(*pgd)) {
185                                 ret = ops.unmap(ops.mm_idp, addr,
186                                                 next - addr);
187                                 pgd_mkuptodate(*pgd);
188                         }
189                 } else
190                         ret = update_p4d_range(pgd, addr, next, &ops);
191         } while (pgd++, addr = next,
192                  ((addr < mm->context.sync_tlb_range_to) && !ret));
193
194         if (ret == -ENOMEM)
195                 report_enomem();
196
197         mm->context.sync_tlb_range_from = 0;
198         mm->context.sync_tlb_range_to = 0;
199
200         return ret;
201 }
202
203 void flush_tlb_all(void)
204 {
205         /*
206          * Don't bother flushing if this address space is about to be
207          * destroyed.
208          */
209         if (atomic_read(&current->mm->mm_users) == 0)
210                 return;
211
212         flush_tlb_mm(current->mm);
213 }
214
215 void flush_tlb_mm(struct mm_struct *mm)
216 {
217         struct vm_area_struct *vma;
218         VMA_ITERATOR(vmi, mm, 0);
219
220         for_each_vma(vmi, vma)
221                 um_tlb_mark_sync(mm, vma->vm_start, vma->vm_end);
222 }
This page took 0.03765 seconds and 4 git commands to generate.