]>
Commit | Line | Data |
---|---|---|
c942fddf | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
133ff0ea JG |
2 | /* |
3 | * Copyright 2013 Red Hat Inc. | |
4 | * | |
f813f219 | 5 | * Authors: Jérôme Glisse <[email protected]> |
133ff0ea JG |
6 | */ |
7 | /* | |
8 | * Refer to include/linux/hmm.h for information about heterogeneous memory | |
9 | * management or HMM for short. | |
10 | */ | |
a520110e | 11 | #include <linux/pagewalk.h> |
133ff0ea | 12 | #include <linux/hmm.h> |
858b54da | 13 | #include <linux/init.h> |
da4c3c73 JG |
14 | #include <linux/rmap.h> |
15 | #include <linux/swap.h> | |
133ff0ea JG |
16 | #include <linux/slab.h> |
17 | #include <linux/sched.h> | |
4ef589dc JG |
18 | #include <linux/mmzone.h> |
19 | #include <linux/pagemap.h> | |
da4c3c73 JG |
20 | #include <linux/swapops.h> |
21 | #include <linux/hugetlb.h> | |
4ef589dc | 22 | #include <linux/memremap.h> |
c8a53b2d | 23 | #include <linux/sched/mm.h> |
7b2d55d2 | 24 | #include <linux/jump_label.h> |
55c0ece8 | 25 | #include <linux/dma-mapping.h> |
c0b12405 | 26 | #include <linux/mmu_notifier.h> |
4ef589dc JG |
27 | #include <linux/memory_hotplug.h> |
28 | ||
b756a3b5 AP |
29 | #include "internal.h" |
30 | ||
74eee180 JG |
31 | struct hmm_vma_walk { |
32 | struct hmm_range *range; | |
33 | unsigned long last; | |
74eee180 JG |
34 | }; |
35 | ||
a3eb13c1 JG |
36 | enum { |
37 | HMM_NEED_FAULT = 1 << 0, | |
38 | HMM_NEED_WRITE_FAULT = 1 << 1, | |
39 | HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT, | |
40 | }; | |
41 | ||
d28c2c9a | 42 | static int hmm_pfns_fill(unsigned long addr, unsigned long end, |
2733ea14 | 43 | struct hmm_range *range, unsigned long cpu_flags) |
da4c3c73 | 44 | { |
2733ea14 | 45 | unsigned long i = (addr - range->start) >> PAGE_SHIFT; |
da4c3c73 | 46 | |
da4c3c73 | 47 | for (; addr < end; addr += PAGE_SIZE, i++) |
2733ea14 | 48 | range->hmm_pfns[i] = cpu_flags; |
da4c3c73 JG |
49 | return 0; |
50 | } | |
51 | ||
5504ed29 | 52 | /* |
f8c888a3 | 53 | * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s) |
d2e8d551 | 54 | * @addr: range virtual start address (inclusive) |
5504ed29 | 55 | * @end: range virtual end address (exclusive) |
a3eb13c1 | 56 | * @required_fault: HMM_NEED_* flags |
5504ed29 | 57 | * @walk: mm_walk structure |
f8c888a3 | 58 | * Return: -EBUSY after page fault, or page fault error |
5504ed29 JG |
59 | * |
60 | * This function will be called whenever pmd_none() or pte_none() returns true, | |
61 | * or whenever there is no page directory covering the virtual address range. | |
62 | */ | |
f8c888a3 | 63 | static int hmm_vma_fault(unsigned long addr, unsigned long end, |
a3eb13c1 | 64 | unsigned int required_fault, struct mm_walk *walk) |
da4c3c73 | 65 | { |
74eee180 | 66 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
5a0c38d3 | 67 | struct vm_area_struct *vma = walk->vma; |
5a0c38d3 | 68 | unsigned int fault_flags = FAULT_FLAG_REMOTE; |
da4c3c73 | 69 | |
a3eb13c1 | 70 | WARN_ON_ONCE(!required_fault); |
74eee180 | 71 | hmm_vma_walk->last = addr; |
63d5066f | 72 | |
a3eb13c1 | 73 | if (required_fault & HMM_NEED_WRITE_FAULT) { |
5a0c38d3 CH |
74 | if (!(vma->vm_flags & VM_WRITE)) |
75 | return -EPERM; | |
76 | fault_flags |= FAULT_FLAG_WRITE; | |
74eee180 JG |
77 | } |
78 | ||
53bfe17f | 79 | for (; addr < end; addr += PAGE_SIZE) |
bce617ed PX |
80 | if (handle_mm_fault(vma, addr, fault_flags, NULL) & |
81 | VM_FAULT_ERROR) | |
53bfe17f | 82 | return -EFAULT; |
f8c888a3 | 83 | return -EBUSY; |
2aee09d8 JG |
84 | } |
85 | ||
a3eb13c1 | 86 | static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, |
2733ea14 JG |
87 | unsigned long pfn_req_flags, |
88 | unsigned long cpu_flags) | |
2aee09d8 | 89 | { |
f88a1e90 JG |
90 | struct hmm_range *range = hmm_vma_walk->range; |
91 | ||
023a019a JG |
92 | /* |
93 | * So we not only consider the individual per page request we also | |
94 | * consider the default flags requested for the range. The API can | |
d2e8d551 RC |
95 | * be used 2 ways. The first one where the HMM user coalesces |
96 | * multiple page faults into one request and sets flags per pfn for | |
97 | * those faults. The second one where the HMM user wants to pre- | |
023a019a JG |
98 | * fault a range with specific flags. For the latter one it is a |
99 | * waste to have the user pre-fill the pfn arrays with a default | |
100 | * flags value. | |
101 | */ | |
2733ea14 JG |
102 | pfn_req_flags &= range->pfn_flags_mask; |
103 | pfn_req_flags |= range->default_flags; | |
023a019a | 104 | |
2aee09d8 | 105 | /* We aren't ask to do anything ... */ |
2733ea14 | 106 | if (!(pfn_req_flags & HMM_PFN_REQ_FAULT)) |
a3eb13c1 | 107 | return 0; |
f88a1e90 | 108 | |
f88a1e90 | 109 | /* Need to write fault ? */ |
2733ea14 JG |
110 | if ((pfn_req_flags & HMM_PFN_REQ_WRITE) && |
111 | !(cpu_flags & HMM_PFN_WRITE)) | |
a3eb13c1 JG |
112 | return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT; |
113 | ||
114 | /* If CPU page table is not valid then we need to fault */ | |
2733ea14 | 115 | if (!(cpu_flags & HMM_PFN_VALID)) |
a3eb13c1 JG |
116 | return HMM_NEED_FAULT; |
117 | return 0; | |
2aee09d8 JG |
118 | } |
119 | ||
a3eb13c1 JG |
120 | static unsigned int |
121 | hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, | |
2733ea14 JG |
122 | const unsigned long hmm_pfns[], unsigned long npages, |
123 | unsigned long cpu_flags) | |
2aee09d8 | 124 | { |
6bfef2f9 | 125 | struct hmm_range *range = hmm_vma_walk->range; |
a3eb13c1 | 126 | unsigned int required_fault = 0; |
2aee09d8 JG |
127 | unsigned long i; |
128 | ||
6bfef2f9 JG |
129 | /* |
130 | * If the default flags do not request to fault pages, and the mask does | |
131 | * not allow for individual pages to be faulted, then | |
132 | * hmm_pte_need_fault() will always return 0. | |
133 | */ | |
134 | if (!((range->default_flags | range->pfn_flags_mask) & | |
2733ea14 | 135 | HMM_PFN_REQ_FAULT)) |
a3eb13c1 | 136 | return 0; |
2aee09d8 JG |
137 | |
138 | for (i = 0; i < npages; ++i) { | |
2733ea14 JG |
139 | required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i], |
140 | cpu_flags); | |
a3eb13c1 JG |
141 | if (required_fault == HMM_NEED_ALL_BITS) |
142 | return required_fault; | |
2aee09d8 | 143 | } |
a3eb13c1 | 144 | return required_fault; |
2aee09d8 JG |
145 | } |
146 | ||
147 | static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, | |
b7a16c7a | 148 | __always_unused int depth, struct mm_walk *walk) |
2aee09d8 JG |
149 | { |
150 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
151 | struct hmm_range *range = hmm_vma_walk->range; | |
a3eb13c1 | 152 | unsigned int required_fault; |
2aee09d8 | 153 | unsigned long i, npages; |
2733ea14 | 154 | unsigned long *hmm_pfns; |
2aee09d8 JG |
155 | |
156 | i = (addr - range->start) >> PAGE_SHIFT; | |
157 | npages = (end - addr) >> PAGE_SHIFT; | |
2733ea14 JG |
158 | hmm_pfns = &range->hmm_pfns[i]; |
159 | required_fault = | |
160 | hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0); | |
bd5d3587 JG |
161 | if (!walk->vma) { |
162 | if (required_fault) | |
163 | return -EFAULT; | |
164 | return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR); | |
165 | } | |
a3eb13c1 JG |
166 | if (required_fault) |
167 | return hmm_vma_fault(addr, end, required_fault, walk); | |
2733ea14 | 168 | return hmm_pfns_fill(addr, end, range, 0); |
2aee09d8 JG |
169 | } |
170 | ||
3b50a6e5 RC |
171 | static inline unsigned long hmm_pfn_flags_order(unsigned long order) |
172 | { | |
173 | return order << HMM_PFN_ORDER_SHIFT; | |
174 | } | |
175 | ||
2733ea14 JG |
176 | static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range, |
177 | pmd_t pmd) | |
2aee09d8 JG |
178 | { |
179 | if (pmd_protnone(pmd)) | |
180 | return 0; | |
3b50a6e5 RC |
181 | return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : |
182 | HMM_PFN_VALID) | | |
183 | hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT); | |
da4c3c73 JG |
184 | } |
185 | ||
992de9a8 | 186 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
9d3973d6 | 187 | static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, |
2733ea14 JG |
188 | unsigned long end, unsigned long hmm_pfns[], |
189 | pmd_t pmd) | |
9d3973d6 | 190 | { |
53f5c3f4 | 191 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
f88a1e90 | 192 | struct hmm_range *range = hmm_vma_walk->range; |
2aee09d8 | 193 | unsigned long pfn, npages, i; |
a3eb13c1 | 194 | unsigned int required_fault; |
2733ea14 | 195 | unsigned long cpu_flags; |
53f5c3f4 | 196 | |
2aee09d8 | 197 | npages = (end - addr) >> PAGE_SHIFT; |
f88a1e90 | 198 | cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); |
a3eb13c1 | 199 | required_fault = |
2733ea14 | 200 | hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags); |
a3eb13c1 JG |
201 | if (required_fault) |
202 | return hmm_vma_fault(addr, end, required_fault, walk); | |
53f5c3f4 | 203 | |
309f9a4f | 204 | pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
068354ad | 205 | for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) |
2733ea14 | 206 | hmm_pfns[i] = pfn | cpu_flags; |
53f5c3f4 JG |
207 | return 0; |
208 | } | |
9d3973d6 CH |
209 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
210 | /* stub to allow the code below to compile */ | |
211 | int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, | |
2733ea14 | 212 | unsigned long end, unsigned long hmm_pfns[], pmd_t pmd); |
9d3973d6 | 213 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
53f5c3f4 | 214 | |
2733ea14 JG |
215 | static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range, |
216 | pte_t pte) | |
2aee09d8 | 217 | { |
789c2af8 | 218 | if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) |
2aee09d8 | 219 | return 0; |
2733ea14 | 220 | return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; |
2aee09d8 JG |
221 | } |
222 | ||
53f5c3f4 JG |
223 | static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, |
224 | unsigned long end, pmd_t *pmdp, pte_t *ptep, | |
2733ea14 | 225 | unsigned long *hmm_pfn) |
53f5c3f4 JG |
226 | { |
227 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
f88a1e90 | 228 | struct hmm_range *range = hmm_vma_walk->range; |
a3eb13c1 | 229 | unsigned int required_fault; |
2733ea14 | 230 | unsigned long cpu_flags; |
53f5c3f4 | 231 | pte_t pte = *ptep; |
2733ea14 | 232 | uint64_t pfn_req_flags = *hmm_pfn; |
53f5c3f4 | 233 | |
5c041f5d | 234 | if (pte_none_mostly(pte)) { |
2733ea14 JG |
235 | required_fault = |
236 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); | |
a3eb13c1 | 237 | if (required_fault) |
53f5c3f4 | 238 | goto fault; |
2733ea14 | 239 | *hmm_pfn = 0; |
53f5c3f4 JG |
240 | return 0; |
241 | } | |
242 | ||
243 | if (!pte_present(pte)) { | |
244 | swp_entry_t entry = pte_to_swp_entry(pte); | |
245 | ||
53f5c3f4 | 246 | /* |
8a295dbb RC |
247 | * Don't fault in device private pages owned by the caller, |
248 | * just report the PFN. | |
53f5c3f4 | 249 | */ |
8a295dbb RC |
250 | if (is_device_private_entry(entry) && |
251 | pfn_swap_entry_to_page(entry)->pgmap->owner == | |
252 | range->dev_private_owner) { | |
2733ea14 | 253 | cpu_flags = HMM_PFN_VALID; |
4dd845b5 | 254 | if (is_writable_device_private_entry(entry)) |
2733ea14 | 255 | cpu_flags |= HMM_PFN_WRITE; |
0d206b5d | 256 | *hmm_pfn = swp_offset_pfn(entry) | cpu_flags; |
53f5c3f4 JG |
257 | return 0; |
258 | } | |
259 | ||
2733ea14 JG |
260 | required_fault = |
261 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); | |
846babe8 | 262 | if (!required_fault) { |
2733ea14 | 263 | *hmm_pfn = 0; |
53f5c3f4 | 264 | return 0; |
846babe8 | 265 | } |
76612d6c JG |
266 | |
267 | if (!non_swap_entry(entry)) | |
268 | goto fault; | |
269 | ||
8a295dbb RC |
270 | if (is_device_private_entry(entry)) |
271 | goto fault; | |
272 | ||
b756a3b5 AP |
273 | if (is_device_exclusive_entry(entry)) |
274 | goto fault; | |
275 | ||
76612d6c JG |
276 | if (is_migration_entry(entry)) { |
277 | pte_unmap(ptep); | |
278 | hmm_vma_walk->last = addr; | |
279 | migration_entry_wait(walk->mm, pmdp, addr); | |
280 | return -EBUSY; | |
53f5c3f4 JG |
281 | } |
282 | ||
283 | /* Report error for everything else */ | |
dfdc2207 | 284 | pte_unmap(ptep); |
53f5c3f4 JG |
285 | return -EFAULT; |
286 | } | |
287 | ||
76612d6c | 288 | cpu_flags = pte_to_hmm_pfn_flags(range, pte); |
2733ea14 JG |
289 | required_fault = |
290 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); | |
a3eb13c1 | 291 | if (required_fault) |
53f5c3f4 JG |
292 | goto fault; |
293 | ||
40550627 | 294 | /* |
4b42fb21 LZ |
295 | * Bypass devmap pte such as DAX page when all pfn requested |
296 | * flags(pfn_req_flags) are fulfilled. | |
40550627 JG |
297 | * Since each architecture defines a struct page for the zero page, just |
298 | * fall through and treat it like a normal page. | |
299 | */ | |
87c01d57 AP |
300 | if (!vm_normal_page(walk->vma, addr, pte) && |
301 | !pte_devmap(pte) && | |
4b42fb21 | 302 | !is_zero_pfn(pte_pfn(pte))) { |
2733ea14 | 303 | if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) { |
dfdc2207 | 304 | pte_unmap(ptep); |
ac541f25 RC |
305 | return -EFAULT; |
306 | } | |
2733ea14 | 307 | *hmm_pfn = HMM_PFN_ERROR; |
40550627 | 308 | return 0; |
992de9a8 JG |
309 | } |
310 | ||
2733ea14 | 311 | *hmm_pfn = pte_pfn(pte) | cpu_flags; |
53f5c3f4 JG |
312 | return 0; |
313 | ||
314 | fault: | |
315 | pte_unmap(ptep); | |
316 | /* Fault any virtual address we were asked to fault */ | |
a3eb13c1 | 317 | return hmm_vma_fault(addr, end, required_fault, walk); |
53f5c3f4 JG |
318 | } |
319 | ||
da4c3c73 JG |
320 | static int hmm_vma_walk_pmd(pmd_t *pmdp, |
321 | unsigned long start, | |
322 | unsigned long end, | |
323 | struct mm_walk *walk) | |
324 | { | |
74eee180 JG |
325 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
326 | struct hmm_range *range = hmm_vma_walk->range; | |
2733ea14 JG |
327 | unsigned long *hmm_pfns = |
328 | &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT]; | |
2288a9a6 JG |
329 | unsigned long npages = (end - start) >> PAGE_SHIFT; |
330 | unsigned long addr = start; | |
da4c3c73 | 331 | pte_t *ptep; |
d08faca0 | 332 | pmd_t pmd; |
da4c3c73 | 333 | |
da4c3c73 | 334 | again: |
d08faca0 JG |
335 | pmd = READ_ONCE(*pmdp); |
336 | if (pmd_none(pmd)) | |
b7a16c7a | 337 | return hmm_vma_walk_hole(start, end, -1, walk); |
da4c3c73 | 338 | |
d08faca0 | 339 | if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { |
2733ea14 | 340 | if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) { |
d08faca0 | 341 | hmm_vma_walk->last = addr; |
d2e8d551 | 342 | pmd_migration_entry_wait(walk->mm, pmdp); |
73231612 | 343 | return -EBUSY; |
d08faca0 | 344 | } |
2733ea14 | 345 | return hmm_pfns_fill(start, end, range, 0); |
2288a9a6 JG |
346 | } |
347 | ||
348 | if (!pmd_present(pmd)) { | |
2733ea14 | 349 | if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) |
2288a9a6 | 350 | return -EFAULT; |
d28c2c9a | 351 | return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
2288a9a6 | 352 | } |
da4c3c73 | 353 | |
d08faca0 | 354 | if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { |
da4c3c73 | 355 | /* |
d2e8d551 | 356 | * No need to take pmd_lock here, even if some other thread |
da4c3c73 JG |
357 | * is splitting the huge pmd we will get that event through |
358 | * mmu_notifier callback. | |
359 | * | |
d2e8d551 | 360 | * So just read pmd value and check again it's a transparent |
da4c3c73 JG |
361 | * huge or device mapping one and compute corresponding pfn |
362 | * values. | |
363 | */ | |
dab6e717 | 364 | pmd = pmdp_get_lockless(pmdp); |
da4c3c73 JG |
365 | if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) |
366 | goto again; | |
74eee180 | 367 | |
2733ea14 | 368 | return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd); |
da4c3c73 JG |
369 | } |
370 | ||
d08faca0 | 371 | /* |
d2e8d551 | 372 | * We have handled all the valid cases above ie either none, migration, |
d08faca0 JG |
373 | * huge or transparent huge. At this point either it is a valid pmd |
374 | * entry pointing to pte directory or it is a bad pmd that will not | |
375 | * recover. | |
376 | */ | |
2288a9a6 | 377 | if (pmd_bad(pmd)) { |
2733ea14 | 378 | if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) |
2288a9a6 | 379 | return -EFAULT; |
d28c2c9a | 380 | return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
2288a9a6 | 381 | } |
da4c3c73 JG |
382 | |
383 | ptep = pte_offset_map(pmdp, addr); | |
2733ea14 | 384 | for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) { |
53f5c3f4 | 385 | int r; |
74eee180 | 386 | |
2733ea14 | 387 | r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns); |
53f5c3f4 | 388 | if (r) { |
dfdc2207 | 389 | /* hmm_vma_handle_pte() did pte_unmap() */ |
53f5c3f4 | 390 | return r; |
74eee180 | 391 | } |
da4c3c73 JG |
392 | } |
393 | pte_unmap(ptep - 1); | |
da4c3c73 JG |
394 | return 0; |
395 | } | |
396 | ||
f0b3c45c CH |
397 | #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \ |
398 | defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) | |
2733ea14 JG |
399 | static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, |
400 | pud_t pud) | |
f0b3c45c CH |
401 | { |
402 | if (!pud_present(pud)) | |
403 | return 0; | |
3b50a6e5 RC |
404 | return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : |
405 | HMM_PFN_VALID) | | |
406 | hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT); | |
f0b3c45c CH |
407 | } |
408 | ||
409 | static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, | |
410 | struct mm_walk *walk) | |
992de9a8 JG |
411 | { |
412 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
413 | struct hmm_range *range = hmm_vma_walk->range; | |
3afc4236 | 414 | unsigned long addr = start; |
992de9a8 | 415 | pud_t pud; |
3afc4236 SP |
416 | spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma); |
417 | ||
418 | if (!ptl) | |
419 | return 0; | |
420 | ||
421 | /* Normally we don't want to split the huge page */ | |
422 | walk->action = ACTION_CONTINUE; | |
992de9a8 | 423 | |
992de9a8 | 424 | pud = READ_ONCE(*pudp); |
3afc4236 | 425 | if (pud_none(pud)) { |
05fc1df9 JG |
426 | spin_unlock(ptl); |
427 | return hmm_vma_walk_hole(start, end, -1, walk); | |
3afc4236 | 428 | } |
992de9a8 JG |
429 | |
430 | if (pud_huge(pud) && pud_devmap(pud)) { | |
431 | unsigned long i, npages, pfn; | |
a3eb13c1 | 432 | unsigned int required_fault; |
2733ea14 JG |
433 | unsigned long *hmm_pfns; |
434 | unsigned long cpu_flags; | |
992de9a8 | 435 | |
3afc4236 | 436 | if (!pud_present(pud)) { |
05fc1df9 JG |
437 | spin_unlock(ptl); |
438 | return hmm_vma_walk_hole(start, end, -1, walk); | |
3afc4236 | 439 | } |
992de9a8 JG |
440 | |
441 | i = (addr - range->start) >> PAGE_SHIFT; | |
442 | npages = (end - addr) >> PAGE_SHIFT; | |
2733ea14 | 443 | hmm_pfns = &range->hmm_pfns[i]; |
992de9a8 JG |
444 | |
445 | cpu_flags = pud_to_hmm_pfn_flags(range, pud); | |
2733ea14 | 446 | required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns, |
a3eb13c1 JG |
447 | npages, cpu_flags); |
448 | if (required_fault) { | |
05fc1df9 | 449 | spin_unlock(ptl); |
a3eb13c1 | 450 | return hmm_vma_fault(addr, end, required_fault, walk); |
3afc4236 | 451 | } |
992de9a8 | 452 | |
992de9a8 | 453 | pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
068354ad | 454 | for (i = 0; i < npages; ++i, ++pfn) |
2733ea14 | 455 | hmm_pfns[i] = pfn | cpu_flags; |
3afc4236 | 456 | goto out_unlock; |
992de9a8 JG |
457 | } |
458 | ||
3afc4236 SP |
459 | /* Ask for the PUD to be split */ |
460 | walk->action = ACTION_SUBTREE; | |
992de9a8 | 461 | |
3afc4236 SP |
462 | out_unlock: |
463 | spin_unlock(ptl); | |
d0977efa | 464 | return 0; |
992de9a8 | 465 | } |
f0b3c45c CH |
466 | #else |
467 | #define hmm_vma_walk_pud NULL | |
468 | #endif | |
992de9a8 | 469 | |
251bbe59 | 470 | #ifdef CONFIG_HUGETLB_PAGE |
63d5066f JG |
471 | static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, |
472 | unsigned long start, unsigned long end, | |
473 | struct mm_walk *walk) | |
474 | { | |
05c23af4 | 475 | unsigned long addr = start, i, pfn; |
63d5066f JG |
476 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
477 | struct hmm_range *range = hmm_vma_walk->range; | |
478 | struct vm_area_struct *vma = walk->vma; | |
a3eb13c1 | 479 | unsigned int required_fault; |
2733ea14 JG |
480 | unsigned long pfn_req_flags; |
481 | unsigned long cpu_flags; | |
63d5066f JG |
482 | spinlock_t *ptl; |
483 | pte_t entry; | |
63d5066f | 484 | |
d2e8d551 | 485 | ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); |
63d5066f JG |
486 | entry = huge_ptep_get(pte); |
487 | ||
7f08263d | 488 | i = (start - range->start) >> PAGE_SHIFT; |
2733ea14 | 489 | pfn_req_flags = range->hmm_pfns[i]; |
3b50a6e5 RC |
490 | cpu_flags = pte_to_hmm_pfn_flags(range, entry) | |
491 | hmm_pfn_flags_order(huge_page_order(hstate_vma(vma))); | |
2733ea14 JG |
492 | required_fault = |
493 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); | |
a3eb13c1 | 494 | if (required_fault) { |
dd361e50 PX |
495 | int ret; |
496 | ||
45050692 | 497 | spin_unlock(ptl); |
dd361e50 PX |
498 | hugetlb_vma_unlock_read(vma); |
499 | /* | |
500 | * Avoid deadlock: drop the vma lock before calling | |
501 | * hmm_vma_fault(), which will itself potentially take and | |
502 | * drop the vma lock. This is also correct from a | |
503 | * protection point of view, because there is no further | |
504 | * use here of either pte or ptl after dropping the vma | |
505 | * lock. | |
506 | */ | |
507 | ret = hmm_vma_fault(addr, end, required_fault, walk); | |
508 | hugetlb_vma_lock_read(vma); | |
509 | return ret; | |
63d5066f JG |
510 | } |
511 | ||
05c23af4 | 512 | pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); |
7f08263d | 513 | for (; addr < end; addr += PAGE_SIZE, i++, pfn++) |
2733ea14 JG |
514 | range->hmm_pfns[i] = pfn | cpu_flags; |
515 | ||
63d5066f | 516 | spin_unlock(ptl); |
45050692 | 517 | return 0; |
63d5066f | 518 | } |
251bbe59 CH |
519 | #else |
520 | #define hmm_vma_walk_hugetlb_entry NULL | |
521 | #endif /* CONFIG_HUGETLB_PAGE */ | |
63d5066f | 522 | |
d28c2c9a RC |
523 | static int hmm_vma_walk_test(unsigned long start, unsigned long end, |
524 | struct mm_walk *walk) | |
33cd47dc | 525 | { |
d28c2c9a RC |
526 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
527 | struct hmm_range *range = hmm_vma_walk->range; | |
528 | struct vm_area_struct *vma = walk->vma; | |
529 | ||
87c01d57 | 530 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) && |
a3eb13c1 JG |
531 | vma->vm_flags & VM_READ) |
532 | return 0; | |
533 | ||
d28c2c9a | 534 | /* |
a3eb13c1 JG |
535 | * vma ranges that don't have struct page backing them or map I/O |
536 | * devices directly cannot be handled by hmm_range_fault(). | |
c2579c9c | 537 | * |
d28c2c9a | 538 | * If the vma does not allow read access, then assume that it does not |
c2579c9c JG |
539 | * allow write access either. HMM does not support architectures that |
540 | * allow write without read. | |
a3eb13c1 JG |
541 | * |
542 | * If a fault is requested for an unsupported range then it is a hard | |
543 | * failure. | |
d28c2c9a | 544 | */ |
a3eb13c1 | 545 | if (hmm_range_need_fault(hmm_vma_walk, |
2733ea14 | 546 | range->hmm_pfns + |
a3eb13c1 JG |
547 | ((start - range->start) >> PAGE_SHIFT), |
548 | (end - start) >> PAGE_SHIFT, 0)) | |
549 | return -EFAULT; | |
d28c2c9a | 550 | |
a3eb13c1 | 551 | hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
d28c2c9a | 552 | |
a3eb13c1 JG |
553 | /* Skip this vma and continue processing the next vma. */ |
554 | return 1; | |
33cd47dc JG |
555 | } |
556 | ||
7b86ac33 CH |
557 | static const struct mm_walk_ops hmm_walk_ops = { |
558 | .pud_entry = hmm_vma_walk_pud, | |
559 | .pmd_entry = hmm_vma_walk_pmd, | |
560 | .pte_hole = hmm_vma_walk_hole, | |
561 | .hugetlb_entry = hmm_vma_walk_hugetlb_entry, | |
d28c2c9a | 562 | .test_walk = hmm_vma_walk_test, |
7b86ac33 CH |
563 | }; |
564 | ||
9a4903e4 CH |
565 | /** |
566 | * hmm_range_fault - try to fault some address in a virtual address range | |
f970b977 | 567 | * @range: argument structure |
9a4903e4 | 568 | * |
be957c88 | 569 | * Returns 0 on success or one of the following error codes: |
73231612 | 570 | * |
9a4903e4 CH |
571 | * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma |
572 | * (e.g., device file vma). | |
573 | * -ENOMEM: Out of memory. | |
574 | * -EPERM: Invalid permission (e.g., asking for write and range is read | |
575 | * only). | |
9a4903e4 CH |
576 | * -EBUSY: The range has been invalidated and the caller needs to wait for |
577 | * the invalidation to finish. | |
f970b977 JG |
578 | * -EFAULT: A page was requested to be valid and could not be made valid |
579 | * ie it has no backing VMA or it is illegal to access | |
74eee180 | 580 | * |
f970b977 JG |
581 | * This is similar to get_user_pages(), except that it can read the page tables |
582 | * without mutating them (ie causing faults). | |
74eee180 | 583 | */ |
be957c88 | 584 | int hmm_range_fault(struct hmm_range *range) |
74eee180 | 585 | { |
d28c2c9a RC |
586 | struct hmm_vma_walk hmm_vma_walk = { |
587 | .range = range, | |
588 | .last = range->start, | |
d28c2c9a | 589 | }; |
a22dd506 | 590 | struct mm_struct *mm = range->notifier->mm; |
74eee180 JG |
591 | int ret; |
592 | ||
42fc5414 | 593 | mmap_assert_locked(mm); |
704f3f2c | 594 | |
a3e0d41c JG |
595 | do { |
596 | /* If range is no longer valid force retry. */ | |
a22dd506 JG |
597 | if (mmu_interval_check_retry(range->notifier, |
598 | range->notifier_seq)) | |
2bcbeaef | 599 | return -EBUSY; |
d28c2c9a RC |
600 | ret = walk_page_range(mm, hmm_vma_walk.last, range->end, |
601 | &hmm_walk_ops, &hmm_vma_walk); | |
be957c88 JG |
602 | /* |
603 | * When -EBUSY is returned the loop restarts with | |
604 | * hmm_vma_walk.last set to an address that has not been stored | |
605 | * in pfns. All entries < last in the pfn array are set to their | |
606 | * output, and all >= are still at their input values. | |
607 | */ | |
d28c2c9a | 608 | } while (ret == -EBUSY); |
be957c88 | 609 | return ret; |
74eee180 | 610 | } |
73231612 | 611 | EXPORT_SYMBOL(hmm_range_fault); |