]>
Commit | Line | Data |
---|---|---|
76cbbead CH |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Device Memory Migration functionality. | |
4 | * | |
5 | * Originally written by Jérôme Glisse. | |
6 | */ | |
7 | #include <linux/export.h> | |
8 | #include <linux/memremap.h> | |
9 | #include <linux/migrate.h> | |
fd35ca3d | 10 | #include <linux/mm.h> |
76cbbead CH |
11 | #include <linux/mm_inline.h> |
12 | #include <linux/mmu_notifier.h> | |
13 | #include <linux/oom.h> | |
14 | #include <linux/pagewalk.h> | |
15 | #include <linux/rmap.h> | |
16 | #include <linux/swapops.h> | |
17 | #include <asm/tlbflush.h> | |
18 | #include "internal.h" | |
19 | ||
20 | static int migrate_vma_collect_skip(unsigned long start, | |
21 | unsigned long end, | |
22 | struct mm_walk *walk) | |
23 | { | |
24 | struct migrate_vma *migrate = walk->private; | |
25 | unsigned long addr; | |
26 | ||
27 | for (addr = start; addr < end; addr += PAGE_SIZE) { | |
28 | migrate->dst[migrate->npages] = 0; | |
29 | migrate->src[migrate->npages++] = 0; | |
30 | } | |
31 | ||
32 | return 0; | |
33 | } | |
34 | ||
35 | static int migrate_vma_collect_hole(unsigned long start, | |
36 | unsigned long end, | |
37 | __always_unused int depth, | |
38 | struct mm_walk *walk) | |
39 | { | |
40 | struct migrate_vma *migrate = walk->private; | |
41 | unsigned long addr; | |
42 | ||
43 | /* Only allow populating anonymous memory. */ | |
44 | if (!vma_is_anonymous(walk->vma)) | |
45 | return migrate_vma_collect_skip(start, end, walk); | |
46 | ||
47 | for (addr = start; addr < end; addr += PAGE_SIZE) { | |
48 | migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; | |
49 | migrate->dst[migrate->npages] = 0; | |
50 | migrate->npages++; | |
51 | migrate->cpages++; | |
52 | } | |
53 | ||
54 | return 0; | |
55 | } | |
56 | ||
57 | static int migrate_vma_collect_pmd(pmd_t *pmdp, | |
58 | unsigned long start, | |
59 | unsigned long end, | |
60 | struct mm_walk *walk) | |
61 | { | |
62 | struct migrate_vma *migrate = walk->private; | |
63 | struct vm_area_struct *vma = walk->vma; | |
64 | struct mm_struct *mm = vma->vm_mm; | |
65 | unsigned long addr = start, unmapped = 0; | |
66 | spinlock_t *ptl; | |
67 | pte_t *ptep; | |
68 | ||
69 | again: | |
70 | if (pmd_none(*pmdp)) | |
71 | return migrate_vma_collect_hole(start, end, -1, walk); | |
72 | ||
73 | if (pmd_trans_huge(*pmdp)) { | |
b002a7b0 | 74 | struct folio *folio; |
76cbbead CH |
75 | |
76 | ptl = pmd_lock(mm, pmdp); | |
77 | if (unlikely(!pmd_trans_huge(*pmdp))) { | |
78 | spin_unlock(ptl); | |
79 | goto again; | |
80 | } | |
81 | ||
b002a7b0 MWO |
82 | folio = pmd_folio(*pmdp); |
83 | if (is_huge_zero_folio(folio)) { | |
76cbbead CH |
84 | spin_unlock(ptl); |
85 | split_huge_pmd(vma, pmdp, addr); | |
76cbbead CH |
86 | } else { |
87 | int ret; | |
88 | ||
b002a7b0 | 89 | folio_get(folio); |
76cbbead | 90 | spin_unlock(ptl); |
b002a7b0 | 91 | if (unlikely(!folio_trylock(folio))) |
76cbbead CH |
92 | return migrate_vma_collect_skip(start, end, |
93 | walk); | |
b002a7b0 MWO |
94 | ret = split_folio(folio); |
95 | folio_unlock(folio); | |
96 | folio_put(folio); | |
76cbbead CH |
97 | if (ret) |
98 | return migrate_vma_collect_skip(start, end, | |
99 | walk); | |
76cbbead CH |
100 | } |
101 | } | |
102 | ||
76cbbead | 103 | ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); |
4b56069c HD |
104 | if (!ptep) |
105 | goto again; | |
76cbbead CH |
106 | arch_enter_lazy_mmu_mode(); |
107 | ||
108 | for (; addr < end; addr += PAGE_SIZE, ptep++) { | |
109 | unsigned long mpfn = 0, pfn; | |
5b205c7f | 110 | struct folio *folio; |
76cbbead CH |
111 | struct page *page; |
112 | swp_entry_t entry; | |
113 | pte_t pte; | |
114 | ||
c33c7948 | 115 | pte = ptep_get(ptep); |
76cbbead CH |
116 | |
117 | if (pte_none(pte)) { | |
118 | if (vma_is_anonymous(vma)) { | |
119 | mpfn = MIGRATE_PFN_MIGRATE; | |
120 | migrate->cpages++; | |
121 | } | |
122 | goto next; | |
123 | } | |
124 | ||
125 | if (!pte_present(pte)) { | |
126 | /* | |
127 | * Only care about unaddressable device page special | |
128 | * page table entry. Other special swap entries are not | |
129 | * migratable, and we ignore regular swapped page. | |
130 | */ | |
131 | entry = pte_to_swp_entry(pte); | |
132 | if (!is_device_private_entry(entry)) | |
133 | goto next; | |
134 | ||
135 | page = pfn_swap_entry_to_page(entry); | |
136 | if (!(migrate->flags & | |
137 | MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || | |
138 | page->pgmap->owner != migrate->pgmap_owner) | |
139 | goto next; | |
140 | ||
141 | mpfn = migrate_pfn(page_to_pfn(page)) | | |
142 | MIGRATE_PFN_MIGRATE; | |
143 | if (is_writable_device_private_entry(entry)) | |
144 | mpfn |= MIGRATE_PFN_WRITE; | |
145 | } else { | |
76cbbead | 146 | pfn = pte_pfn(pte); |
dd19e6d8 AS |
147 | if (is_zero_pfn(pfn) && |
148 | (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) { | |
76cbbead CH |
149 | mpfn = MIGRATE_PFN_MIGRATE; |
150 | migrate->cpages++; | |
151 | goto next; | |
152 | } | |
153 | page = vm_normal_page(migrate->vma, addr, pte); | |
dd19e6d8 AS |
154 | if (page && !is_zone_device_page(page) && |
155 | !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) | |
156 | goto next; | |
157 | else if (page && is_device_coherent_page(page) && | |
158 | (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) || | |
159 | page->pgmap->owner != migrate->pgmap_owner)) | |
160 | goto next; | |
76cbbead CH |
161 | mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; |
162 | mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; | |
163 | } | |
164 | ||
165 | /* FIXME support THP */ | |
166 | if (!page || !page->mapping || PageTransCompound(page)) { | |
167 | mpfn = 0; | |
168 | goto next; | |
169 | } | |
170 | ||
171 | /* | |
5b205c7f | 172 | * By getting a reference on the folio we pin it and that blocks |
76cbbead CH |
173 | * any kind of migration. Side effect is that it "freezes" the |
174 | * pte. | |
175 | * | |
5b205c7f DH |
176 | * We drop this reference after isolating the folio from the lru |
177 | * for non device folio (device folio are not on the lru and thus | |
76cbbead CH |
178 | * can't be dropped from it). |
179 | */ | |
5b205c7f DH |
180 | folio = page_folio(page); |
181 | folio_get(folio); | |
76cbbead CH |
182 | |
183 | /* | |
5b205c7f | 184 | * We rely on folio_trylock() to avoid deadlock between |
0742e490 | 185 | * concurrent migrations where each is waiting on the others |
5b205c7f | 186 | * folio lock. If we can't immediately lock the folio we fail this |
0742e490 AP |
187 | * migration as it is only best effort anyway. |
188 | * | |
5b205c7f DH |
189 | * If we can lock the folio it's safe to set up a migration entry |
190 | * now. In the common case where the folio is mapped once in a | |
0742e490 AP |
191 | * single process setting up the migration entry now is an |
192 | * optimisation to avoid walking the rmap later with | |
193 | * try_to_migrate(). | |
76cbbead | 194 | */ |
5b205c7f | 195 | if (folio_trylock(folio)) { |
6c287605 | 196 | bool anon_exclusive; |
76cbbead CH |
197 | pte_t swp_pte; |
198 | ||
c33c7948 | 199 | flush_cache_page(vma, addr, pte_pfn(pte)); |
5b205c7f DH |
200 | anon_exclusive = folio_test_anon(folio) && |
201 | PageAnonExclusive(page); | |
6c287605 | 202 | if (anon_exclusive) { |
fd35ca3d | 203 | pte = ptep_clear_flush(vma, addr, ptep); |
6c287605 | 204 | |
e3b4b137 | 205 | if (folio_try_share_anon_rmap_pte(folio, page)) { |
6c287605 | 206 | set_pte_at(mm, addr, ptep, pte); |
5b205c7f DH |
207 | folio_unlock(folio); |
208 | folio_put(folio); | |
6c287605 DH |
209 | mpfn = 0; |
210 | goto next; | |
211 | } | |
212 | } else { | |
fd35ca3d | 213 | pte = ptep_get_and_clear(mm, addr, ptep); |
6c287605 DH |
214 | } |
215 | ||
76cbbead | 216 | migrate->cpages++; |
76cbbead | 217 | |
fd35ca3d AP |
218 | /* Set the dirty flag on the folio now the pte is gone. */ |
219 | if (pte_dirty(pte)) | |
5b205c7f | 220 | folio_mark_dirty(folio); |
fd35ca3d | 221 | |
76cbbead CH |
222 | /* Setup special migration page table entry */ |
223 | if (mpfn & MIGRATE_PFN_WRITE) | |
224 | entry = make_writable_migration_entry( | |
225 | page_to_pfn(page)); | |
6c287605 DH |
226 | else if (anon_exclusive) |
227 | entry = make_readable_exclusive_migration_entry( | |
228 | page_to_pfn(page)); | |
76cbbead CH |
229 | else |
230 | entry = make_readable_migration_entry( | |
231 | page_to_pfn(page)); | |
2e346877 PX |
232 | if (pte_present(pte)) { |
233 | if (pte_young(pte)) | |
234 | entry = make_migration_entry_young(entry); | |
235 | if (pte_dirty(pte)) | |
236 | entry = make_migration_entry_dirty(entry); | |
237 | } | |
76cbbead CH |
238 | swp_pte = swp_entry_to_pte(entry); |
239 | if (pte_present(pte)) { | |
240 | if (pte_soft_dirty(pte)) | |
241 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | |
242 | if (pte_uffd_wp(pte)) | |
243 | swp_pte = pte_swp_mkuffd_wp(swp_pte); | |
244 | } else { | |
245 | if (pte_swp_soft_dirty(pte)) | |
246 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | |
247 | if (pte_swp_uffd_wp(pte)) | |
248 | swp_pte = pte_swp_mkuffd_wp(swp_pte); | |
249 | } | |
250 | set_pte_at(mm, addr, ptep, swp_pte); | |
251 | ||
252 | /* | |
253 | * This is like regular unmap: we remove the rmap and | |
5b205c7f DH |
254 | * drop the folio refcount. The folio won't be freed, as |
255 | * we took a reference just above. | |
76cbbead | 256 | */ |
5b205c7f DH |
257 | folio_remove_rmap_pte(folio, page, vma); |
258 | folio_put(folio); | |
76cbbead CH |
259 | |
260 | if (pte_present(pte)) | |
261 | unmapped++; | |
262 | } else { | |
5b205c7f | 263 | folio_put(folio); |
76cbbead CH |
264 | mpfn = 0; |
265 | } | |
266 | ||
267 | next: | |
268 | migrate->dst[migrate->npages] = 0; | |
269 | migrate->src[migrate->npages++] = mpfn; | |
270 | } | |
76cbbead CH |
271 | |
272 | /* Only flush the TLB if we actually modified any entries */ | |
273 | if (unmapped) | |
274 | flush_tlb_range(walk->vma, start, end); | |
275 | ||
60bae737 AP |
276 | arch_leave_lazy_mmu_mode(); |
277 | pte_unmap_unlock(ptep - 1, ptl); | |
278 | ||
76cbbead CH |
279 | return 0; |
280 | } | |
281 | ||
282 | static const struct mm_walk_ops migrate_vma_walk_ops = { | |
283 | .pmd_entry = migrate_vma_collect_pmd, | |
284 | .pte_hole = migrate_vma_collect_hole, | |
49b06385 | 285 | .walk_lock = PGWALK_RDLOCK, |
76cbbead CH |
286 | }; |
287 | ||
288 | /* | |
289 | * migrate_vma_collect() - collect pages over a range of virtual addresses | |
290 | * @migrate: migrate struct containing all migration information | |
291 | * | |
292 | * This will walk the CPU page table. For each virtual address backed by a | |
293 | * valid page, it updates the src array and takes a reference on the page, in | |
294 | * order to pin the page until we lock it and unmap it. | |
295 | */ | |
296 | static void migrate_vma_collect(struct migrate_vma *migrate) | |
297 | { | |
298 | struct mmu_notifier_range range; | |
299 | ||
300 | /* | |
301 | * Note that the pgmap_owner is passed to the mmu notifier callback so | |
302 | * that the registered device driver can skip invalidating device | |
303 | * private page mappings that won't be migrated. | |
304 | */ | |
305 | mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0, | |
7d4a8be0 | 306 | migrate->vma->vm_mm, migrate->start, migrate->end, |
76cbbead CH |
307 | migrate->pgmap_owner); |
308 | mmu_notifier_invalidate_range_start(&range); | |
309 | ||
310 | walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, | |
311 | &migrate_vma_walk_ops, migrate); | |
312 | ||
313 | mmu_notifier_invalidate_range_end(&range); | |
314 | migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); | |
315 | } | |
316 | ||
317 | /* | |
318 | * migrate_vma_check_page() - check if page is pinned or not | |
319 | * @page: struct page to check | |
320 | * | |
321 | * Pinned pages cannot be migrated. This is the same test as in | |
322 | * folio_migrate_mapping(), except that here we allow migration of a | |
323 | * ZONE_DEVICE page. | |
324 | */ | |
16ce101d | 325 | static bool migrate_vma_check_page(struct page *page, struct page *fault_page) |
76cbbead | 326 | { |
f2f8a7a0 DH |
327 | struct folio *folio = page_folio(page); |
328 | ||
76cbbead CH |
329 | /* |
330 | * One extra ref because caller holds an extra reference, either from | |
331 | * isolate_lru_page() for a regular page, or migrate_vma_collect() for | |
332 | * a device page. | |
333 | */ | |
16ce101d | 334 | int extra = 1 + (page == fault_page); |
76cbbead CH |
335 | |
336 | /* | |
337 | * FIXME support THP (transparent huge page), it is bit more complex to | |
338 | * check them than regular pages, because they can be mapped with a pmd | |
339 | * or with a pte (split pte mapping). | |
340 | */ | |
f2f8a7a0 | 341 | if (folio_test_large(folio)) |
76cbbead CH |
342 | return false; |
343 | ||
344 | /* Page from ZONE_DEVICE have one extra reference */ | |
f2f8a7a0 | 345 | if (folio_is_zone_device(folio)) |
76cbbead CH |
346 | extra++; |
347 | ||
348 | /* For file back page */ | |
f2f8a7a0 DH |
349 | if (folio_mapping(folio)) |
350 | extra += 1 + folio_has_private(folio); | |
76cbbead | 351 | |
f2f8a7a0 | 352 | if ((folio_ref_count(folio) - extra) > folio_mapcount(folio)) |
76cbbead CH |
353 | return false; |
354 | ||
355 | return true; | |
356 | } | |
357 | ||
358 | /* | |
44af0b45 AP |
359 | * Unmaps pages for migration. Returns number of source pfns marked as |
360 | * migrating. | |
76cbbead | 361 | */ |
241f6885 AP |
362 | static unsigned long migrate_device_unmap(unsigned long *src_pfns, |
363 | unsigned long npages, | |
364 | struct page *fault_page) | |
76cbbead | 365 | { |
76cbbead CH |
366 | unsigned long i, restore = 0; |
367 | bool allow_drain = true; | |
241f6885 | 368 | unsigned long unmapped = 0; |
76cbbead CH |
369 | |
370 | lru_add_drain(); | |
371 | ||
372 | for (i = 0; i < npages; i++) { | |
241f6885 | 373 | struct page *page = migrate_pfn_to_page(src_pfns[i]); |
4b8554c5 | 374 | struct folio *folio; |
76cbbead | 375 | |
44af0b45 AP |
376 | if (!page) { |
377 | if (src_pfns[i] & MIGRATE_PFN_MIGRATE) | |
378 | unmapped++; | |
76cbbead | 379 | continue; |
44af0b45 | 380 | } |
76cbbead CH |
381 | |
382 | /* ZONE_DEVICE pages are not on LRU */ | |
383 | if (!is_zone_device_page(page)) { | |
384 | if (!PageLRU(page) && allow_drain) { | |
1fec6890 | 385 | /* Drain CPU's lru cache */ |
76cbbead CH |
386 | lru_add_drain_all(); |
387 | allow_drain = false; | |
388 | } | |
389 | ||
f7f9c00d | 390 | if (!isolate_lru_page(page)) { |
241f6885 | 391 | src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; |
76cbbead CH |
392 | restore++; |
393 | continue; | |
394 | } | |
395 | ||
396 | /* Drop the reference we took in collect */ | |
397 | put_page(page); | |
398 | } | |
399 | ||
4b8554c5 MWO |
400 | folio = page_folio(page); |
401 | if (folio_mapped(folio)) | |
402 | try_to_migrate(folio, 0); | |
76cbbead | 403 | |
16ce101d | 404 | if (page_mapped(page) || |
241f6885 | 405 | !migrate_vma_check_page(page, fault_page)) { |
76cbbead CH |
406 | if (!is_zone_device_page(page)) { |
407 | get_page(page); | |
408 | putback_lru_page(page); | |
409 | } | |
410 | ||
241f6885 | 411 | src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; |
76cbbead CH |
412 | restore++; |
413 | continue; | |
414 | } | |
241f6885 AP |
415 | |
416 | unmapped++; | |
76cbbead CH |
417 | } |
418 | ||
419 | for (i = 0; i < npages && restore; i++) { | |
241f6885 | 420 | struct page *page = migrate_pfn_to_page(src_pfns[i]); |
4eecb8b9 | 421 | struct folio *folio; |
76cbbead | 422 | |
241f6885 | 423 | if (!page || (src_pfns[i] & MIGRATE_PFN_MIGRATE)) |
76cbbead CH |
424 | continue; |
425 | ||
4eecb8b9 MWO |
426 | folio = page_folio(page); |
427 | remove_migration_ptes(folio, folio, false); | |
76cbbead | 428 | |
241f6885 | 429 | src_pfns[i] = 0; |
4eecb8b9 MWO |
430 | folio_unlock(folio); |
431 | folio_put(folio); | |
76cbbead CH |
432 | restore--; |
433 | } | |
241f6885 AP |
434 | |
435 | return unmapped; | |
436 | } | |
437 | ||
438 | /* | |
439 | * migrate_vma_unmap() - replace page mapping with special migration pte entry | |
440 | * @migrate: migrate struct containing all migration information | |
441 | * | |
442 | * Isolate pages from the LRU and replace mappings (CPU page table pte) with a | |
443 | * special migration pte entry and check if it has been pinned. Pinned pages are | |
444 | * restored because we cannot migrate them. | |
445 | * | |
446 | * This is the last step before we call the device driver callback to allocate | |
447 | * destination memory and copy contents of original page over to new page. | |
448 | */ | |
449 | static void migrate_vma_unmap(struct migrate_vma *migrate) | |
450 | { | |
451 | migrate->cpages = migrate_device_unmap(migrate->src, migrate->npages, | |
452 | migrate->fault_page); | |
76cbbead CH |
453 | } |
454 | ||
455 | /** | |
456 | * migrate_vma_setup() - prepare to migrate a range of memory | |
457 | * @args: contains the vma, start, and pfns arrays for the migration | |
458 | * | |
459 | * Returns: negative errno on failures, 0 when 0 or more pages were migrated | |
460 | * without an error. | |
461 | * | |
462 | * Prepare to migrate a range of memory virtual address range by collecting all | |
463 | * the pages backing each virtual address in the range, saving them inside the | |
464 | * src array. Then lock those pages and unmap them. Once the pages are locked | |
465 | * and unmapped, check whether each page is pinned or not. Pages that aren't | |
466 | * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the | |
467 | * corresponding src array entry. Then restores any pages that are pinned, by | |
468 | * remapping and unlocking those pages. | |
469 | * | |
470 | * The caller should then allocate destination memory and copy source memory to | |
471 | * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE | |
472 | * flag set). Once these are allocated and copied, the caller must update each | |
473 | * corresponding entry in the dst array with the pfn value of the destination | |
474 | * page and with MIGRATE_PFN_VALID. Destination pages must be locked via | |
475 | * lock_page(). | |
476 | * | |
477 | * Note that the caller does not have to migrate all the pages that are marked | |
478 | * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from | |
479 | * device memory to system memory. If the caller cannot migrate a device page | |
480 | * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe | |
481 | * consequences for the userspace process, so it must be avoided if at all | |
482 | * possible. | |
483 | * | |
484 | * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we | |
485 | * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus | |
486 | * allowing the caller to allocate device memory for those unbacked virtual | |
487 | * addresses. For this the caller simply has to allocate device memory and | |
488 | * properly set the destination entry like for regular migration. Note that | |
489 | * this can still fail, and thus inside the device driver you must check if the | |
490 | * migration was successful for those entries after calling migrate_vma_pages(), | |
491 | * just like for regular migration. | |
492 | * | |
493 | * After that, the callers must call migrate_vma_pages() to go over each entry | |
494 | * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag | |
495 | * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set, | |
496 | * then migrate_vma_pages() to migrate struct page information from the source | |
497 | * struct page to the destination struct page. If it fails to migrate the | |
498 | * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the | |
499 | * src array. | |
500 | * | |
501 | * At this point all successfully migrated pages have an entry in the src | |
502 | * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst | |
503 | * array entry with MIGRATE_PFN_VALID flag set. | |
504 | * | |
505 | * Once migrate_vma_pages() returns the caller may inspect which pages were | |
506 | * successfully migrated, and which were not. Successfully migrated pages will | |
507 | * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. | |
508 | * | |
509 | * It is safe to update device page table after migrate_vma_pages() because | |
510 | * both destination and source page are still locked, and the mmap_lock is held | |
511 | * in read mode (hence no one can unmap the range being migrated). | |
512 | * | |
513 | * Once the caller is done cleaning up things and updating its page table (if it | |
514 | * chose to do so, this is not an obligation) it finally calls | |
515 | * migrate_vma_finalize() to update the CPU page table to point to new pages | |
516 | * for successfully migrated pages or otherwise restore the CPU page table to | |
517 | * point to the original source pages. | |
518 | */ | |
519 | int migrate_vma_setup(struct migrate_vma *args) | |
520 | { | |
521 | long nr_pages = (args->end - args->start) >> PAGE_SHIFT; | |
522 | ||
523 | args->start &= PAGE_MASK; | |
524 | args->end &= PAGE_MASK; | |
525 | if (!args->vma || is_vm_hugetlb_page(args->vma) || | |
526 | (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) | |
527 | return -EINVAL; | |
528 | if (nr_pages <= 0) | |
529 | return -EINVAL; | |
530 | if (args->start < args->vma->vm_start || | |
531 | args->start >= args->vma->vm_end) | |
532 | return -EINVAL; | |
533 | if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) | |
534 | return -EINVAL; | |
535 | if (!args->src || !args->dst) | |
536 | return -EINVAL; | |
16ce101d AP |
537 | if (args->fault_page && !is_device_private_page(args->fault_page)) |
538 | return -EINVAL; | |
76cbbead CH |
539 | |
540 | memset(args->src, 0, sizeof(*args->src) * nr_pages); | |
541 | args->cpages = 0; | |
542 | args->npages = 0; | |
543 | ||
544 | migrate_vma_collect(args); | |
545 | ||
546 | if (args->cpages) | |
547 | migrate_vma_unmap(args); | |
548 | ||
549 | /* | |
550 | * At this point pages are locked and unmapped, and thus they have | |
551 | * stable content and can safely be copied to destination memory that | |
552 | * is allocated by the drivers. | |
553 | */ | |
554 | return 0; | |
555 | ||
556 | } | |
557 | EXPORT_SYMBOL(migrate_vma_setup); | |
558 | ||
559 | /* | |
560 | * This code closely matches the code in: | |
561 | * __handle_mm_fault() | |
562 | * handle_pte_fault() | |
563 | * do_anonymous_page() | |
564 | * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE | |
f25cbb7a | 565 | * private or coherent page. |
76cbbead CH |
566 | */ |
567 | static void migrate_vma_insert_page(struct migrate_vma *migrate, | |
568 | unsigned long addr, | |
569 | struct page *page, | |
570 | unsigned long *src) | |
571 | { | |
d3b08273 | 572 | struct folio *folio = page_folio(page); |
76cbbead CH |
573 | struct vm_area_struct *vma = migrate->vma; |
574 | struct mm_struct *mm = vma->vm_mm; | |
575 | bool flush = false; | |
576 | spinlock_t *ptl; | |
577 | pte_t entry; | |
578 | pgd_t *pgdp; | |
579 | p4d_t *p4dp; | |
580 | pud_t *pudp; | |
581 | pmd_t *pmdp; | |
582 | pte_t *ptep; | |
c33c7948 | 583 | pte_t orig_pte; |
76cbbead CH |
584 | |
585 | /* Only allow populating anonymous memory */ | |
586 | if (!vma_is_anonymous(vma)) | |
587 | goto abort; | |
588 | ||
589 | pgdp = pgd_offset(mm, addr); | |
590 | p4dp = p4d_alloc(mm, pgdp, addr); | |
591 | if (!p4dp) | |
592 | goto abort; | |
593 | pudp = pud_alloc(mm, p4dp, addr); | |
594 | if (!pudp) | |
595 | goto abort; | |
596 | pmdp = pmd_alloc(mm, pudp, addr); | |
597 | if (!pmdp) | |
598 | goto abort; | |
76cbbead CH |
599 | if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp)) |
600 | goto abort; | |
76cbbead CH |
601 | if (pte_alloc(mm, pmdp)) |
602 | goto abort; | |
76cbbead CH |
603 | if (unlikely(anon_vma_prepare(vma))) |
604 | goto abort; | |
d3b08273 | 605 | if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) |
76cbbead CH |
606 | goto abort; |
607 | ||
608 | /* | |
d3b08273 MWO |
609 | * The memory barrier inside __folio_mark_uptodate makes sure that |
610 | * preceding stores to the folio contents become visible before | |
76cbbead CH |
611 | * the set_pte_at() write. |
612 | */ | |
d3b08273 | 613 | __folio_mark_uptodate(folio); |
76cbbead | 614 | |
d3b08273 | 615 | if (folio_is_device_private(folio)) { |
76cbbead CH |
616 | swp_entry_t swp_entry; |
617 | ||
618 | if (vma->vm_flags & VM_WRITE) | |
619 | swp_entry = make_writable_device_private_entry( | |
620 | page_to_pfn(page)); | |
621 | else | |
622 | swp_entry = make_readable_device_private_entry( | |
623 | page_to_pfn(page)); | |
624 | entry = swp_entry_to_pte(swp_entry); | |
625 | } else { | |
d3b08273 MWO |
626 | if (folio_is_zone_device(folio) && |
627 | !folio_is_device_coherent(folio)) { | |
76cbbead CH |
628 | pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); |
629 | goto abort; | |
630 | } | |
631 | entry = mk_pte(page, vma->vm_page_prot); | |
632 | if (vma->vm_flags & VM_WRITE) | |
161e393c | 633 | entry = pte_mkwrite(pte_mkdirty(entry), vma); |
76cbbead CH |
634 | } |
635 | ||
636 | ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); | |
4b56069c HD |
637 | if (!ptep) |
638 | goto abort; | |
c33c7948 RR |
639 | orig_pte = ptep_get(ptep); |
640 | ||
76cbbead CH |
641 | if (check_stable_address_space(mm)) |
642 | goto unlock_abort; | |
643 | ||
c33c7948 RR |
644 | if (pte_present(orig_pte)) { |
645 | unsigned long pfn = pte_pfn(orig_pte); | |
76cbbead CH |
646 | |
647 | if (!is_zero_pfn(pfn)) | |
648 | goto unlock_abort; | |
649 | flush = true; | |
c33c7948 | 650 | } else if (!pte_none(orig_pte)) |
76cbbead CH |
651 | goto unlock_abort; |
652 | ||
653 | /* | |
654 | * Check for userfaultfd but do not deliver the fault. Instead, | |
655 | * just back off. | |
656 | */ | |
657 | if (userfaultfd_missing(vma)) | |
658 | goto unlock_abort; | |
659 | ||
660 | inc_mm_counter(mm, MM_ANONPAGES); | |
15bde4ab | 661 | folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); |
d3b08273 MWO |
662 | if (!folio_is_zone_device(folio)) |
663 | folio_add_lru_vma(folio, vma); | |
664 | folio_get(folio); | |
76cbbead CH |
665 | |
666 | if (flush) { | |
c33c7948 | 667 | flush_cache_page(vma, addr, pte_pfn(orig_pte)); |
ec8832d0 | 668 | ptep_clear_flush(vma, addr, ptep); |
76cbbead | 669 | } |
f7842747 PB |
670 | set_pte_at(mm, addr, ptep, entry); |
671 | update_mmu_cache(vma, addr, ptep); | |
76cbbead CH |
672 | |
673 | pte_unmap_unlock(ptep, ptl); | |
674 | *src = MIGRATE_PFN_MIGRATE; | |
675 | return; | |
676 | ||
677 | unlock_abort: | |
678 | pte_unmap_unlock(ptep, ptl); | |
679 | abort: | |
680 | *src &= ~MIGRATE_PFN_MIGRATE; | |
681 | } | |
682 | ||
e778406b | 683 | static void __migrate_device_pages(unsigned long *src_pfns, |
241f6885 AP |
684 | unsigned long *dst_pfns, unsigned long npages, |
685 | struct migrate_vma *migrate) | |
76cbbead | 686 | { |
76cbbead | 687 | struct mmu_notifier_range range; |
241f6885 | 688 | unsigned long i; |
76cbbead CH |
689 | bool notified = false; |
690 | ||
241f6885 AP |
691 | for (i = 0; i < npages; i++) { |
692 | struct page *newpage = migrate_pfn_to_page(dst_pfns[i]); | |
693 | struct page *page = migrate_pfn_to_page(src_pfns[i]); | |
76cbbead | 694 | struct address_space *mapping; |
6aaaef5b | 695 | struct folio *newfolio, *folio; |
15b0c79c | 696 | int r, extra_cnt = 0; |
76cbbead CH |
697 | |
698 | if (!newpage) { | |
241f6885 | 699 | src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; |
76cbbead CH |
700 | continue; |
701 | } | |
702 | ||
703 | if (!page) { | |
241f6885 AP |
704 | unsigned long addr; |
705 | ||
e778406b AP |
706 | if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE)) |
707 | continue; | |
708 | ||
b05a79d4 AP |
709 | /* |
710 | * The only time there is no vma is when called from | |
711 | * migrate_device_coherent_page(). However this isn't | |
712 | * called if the page could not be unmapped. | |
713 | */ | |
241f6885 AP |
714 | VM_BUG_ON(!migrate); |
715 | addr = migrate->start + i*PAGE_SIZE; | |
76cbbead CH |
716 | if (!notified) { |
717 | notified = true; | |
718 | ||
719 | mmu_notifier_range_init_owner(&range, | |
7d4a8be0 | 720 | MMU_NOTIFY_MIGRATE, 0, |
76cbbead CH |
721 | migrate->vma->vm_mm, addr, migrate->end, |
722 | migrate->pgmap_owner); | |
723 | mmu_notifier_invalidate_range_start(&range); | |
724 | } | |
725 | migrate_vma_insert_page(migrate, addr, newpage, | |
241f6885 | 726 | &src_pfns[i]); |
76cbbead CH |
727 | continue; |
728 | } | |
729 | ||
6aaaef5b | 730 | newfolio = page_folio(newpage); |
e18a9faf MWO |
731 | folio = page_folio(page); |
732 | mapping = folio_mapping(folio); | |
76cbbead | 733 | |
6aaaef5b KW |
734 | if (folio_is_device_private(newfolio) || |
735 | folio_is_device_coherent(newfolio)) { | |
76cbbead | 736 | if (mapping) { |
df263d9a MP |
737 | /* |
738 | * For now only support anonymous memory migrating to | |
739 | * device private or coherent memory. | |
740 | * | |
741 | * Try to get rid of swap cache if possible. | |
742 | */ | |
743 | if (!folio_test_anon(folio) || | |
744 | !folio_free_swap(folio)) { | |
745 | src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; | |
746 | continue; | |
747 | } | |
76cbbead | 748 | } |
6aaaef5b | 749 | } else if (folio_is_zone_device(newfolio)) { |
76cbbead CH |
750 | /* |
751 | * Other types of ZONE_DEVICE page are not supported. | |
752 | */ | |
241f6885 | 753 | src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; |
76cbbead CH |
754 | continue; |
755 | } | |
756 | ||
15b0c79c KW |
757 | BUG_ON(folio_test_writeback(folio)); |
758 | ||
241f6885 | 759 | if (migrate && migrate->fault_page == page) |
15b0c79c KW |
760 | extra_cnt = 1; |
761 | r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt); | |
76cbbead | 762 | if (r != MIGRATEPAGE_SUCCESS) |
241f6885 | 763 | src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; |
15b0c79c KW |
764 | else |
765 | folio_migrate_flags(newfolio, folio); | |
76cbbead CH |
766 | } |
767 | ||
76cbbead | 768 | if (notified) |
ec8832d0 | 769 | mmu_notifier_invalidate_range_end(&range); |
76cbbead | 770 | } |
76cbbead | 771 | |
e778406b AP |
772 | /** |
773 | * migrate_device_pages() - migrate meta-data from src page to dst page | |
774 | * @src_pfns: src_pfns returned from migrate_device_range() | |
775 | * @dst_pfns: array of pfns allocated by the driver to migrate memory to | |
776 | * @npages: number of pages in the range | |
777 | * | |
778 | * Equivalent to migrate_vma_pages(). This is called to migrate struct page | |
779 | * meta-data from source struct page to destination. | |
780 | */ | |
781 | void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns, | |
782 | unsigned long npages) | |
783 | { | |
784 | __migrate_device_pages(src_pfns, dst_pfns, npages, NULL); | |
785 | } | |
786 | EXPORT_SYMBOL(migrate_device_pages); | |
787 | ||
76cbbead | 788 | /** |
241f6885 | 789 | * migrate_vma_pages() - migrate meta-data from src page to dst page |
76cbbead CH |
790 | * @migrate: migrate struct containing all migration information |
791 | * | |
241f6885 AP |
792 | * This migrates struct page meta-data from source struct page to destination |
793 | * struct page. This effectively finishes the migration from source page to the | |
794 | * destination page. | |
76cbbead | 795 | */ |
241f6885 AP |
796 | void migrate_vma_pages(struct migrate_vma *migrate) |
797 | { | |
e778406b | 798 | __migrate_device_pages(migrate->src, migrate->dst, migrate->npages, migrate); |
241f6885 AP |
799 | } |
800 | EXPORT_SYMBOL(migrate_vma_pages); | |
801 | ||
e778406b AP |
802 | /* |
803 | * migrate_device_finalize() - complete page migration | |
804 | * @src_pfns: src_pfns returned from migrate_device_range() | |
805 | * @dst_pfns: array of pfns allocated by the driver to migrate memory to | |
806 | * @npages: number of pages in the range | |
807 | * | |
808 | * Completes migration of the page by removing special migration entries. | |
809 | * Drivers must ensure copying of page data is complete and visible to the CPU | |
810 | * before calling this. | |
811 | */ | |
812 | void migrate_device_finalize(unsigned long *src_pfns, | |
813 | unsigned long *dst_pfns, unsigned long npages) | |
76cbbead | 814 | { |
76cbbead CH |
815 | unsigned long i; |
816 | ||
817 | for (i = 0; i < npages; i++) { | |
4eecb8b9 | 818 | struct folio *dst, *src; |
241f6885 AP |
819 | struct page *newpage = migrate_pfn_to_page(dst_pfns[i]); |
820 | struct page *page = migrate_pfn_to_page(src_pfns[i]); | |
76cbbead CH |
821 | |
822 | if (!page) { | |
823 | if (newpage) { | |
824 | unlock_page(newpage); | |
825 | put_page(newpage); | |
826 | } | |
827 | continue; | |
828 | } | |
829 | ||
241f6885 | 830 | if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !newpage) { |
76cbbead CH |
831 | if (newpage) { |
832 | unlock_page(newpage); | |
833 | put_page(newpage); | |
834 | } | |
835 | newpage = page; | |
836 | } | |
837 | ||
4eecb8b9 MWO |
838 | src = page_folio(page); |
839 | dst = page_folio(newpage); | |
840 | remove_migration_ptes(src, dst, false); | |
841 | folio_unlock(src); | |
76cbbead CH |
842 | |
843 | if (is_zone_device_page(page)) | |
844 | put_page(page); | |
845 | else | |
846 | putback_lru_page(page); | |
847 | ||
848 | if (newpage != page) { | |
849 | unlock_page(newpage); | |
850 | if (is_zone_device_page(newpage)) | |
851 | put_page(newpage); | |
852 | else | |
853 | putback_lru_page(newpage); | |
854 | } | |
855 | } | |
856 | } | |
e778406b | 857 | EXPORT_SYMBOL(migrate_device_finalize); |
241f6885 AP |
858 | |
859 | /** | |
860 | * migrate_vma_finalize() - restore CPU page table entry | |
861 | * @migrate: migrate struct containing all migration information | |
862 | * | |
863 | * This replaces the special migration pte entry with either a mapping to the | |
864 | * new page if migration was successful for that page, or to the original page | |
865 | * otherwise. | |
866 | * | |
867 | * This also unlocks the pages and puts them back on the lru, or drops the extra | |
868 | * refcount, for device pages. | |
869 | */ | |
870 | void migrate_vma_finalize(struct migrate_vma *migrate) | |
871 | { | |
872 | migrate_device_finalize(migrate->src, migrate->dst, migrate->npages); | |
873 | } | |
76cbbead | 874 | EXPORT_SYMBOL(migrate_vma_finalize); |
b05a79d4 | 875 | |
e778406b AP |
876 | /** |
877 | * migrate_device_range() - migrate device private pfns to normal memory. | |
878 | * @src_pfns: array large enough to hold migrating source device private pfns. | |
879 | * @start: starting pfn in the range to migrate. | |
880 | * @npages: number of pages to migrate. | |
881 | * | |
882 | * migrate_vma_setup() is similar in concept to migrate_vma_setup() except that | |
883 | * instead of looking up pages based on virtual address mappings a range of | |
884 | * device pfns that should be migrated to system memory is used instead. | |
885 | * | |
886 | * This is useful when a driver needs to free device memory but doesn't know the | |
887 | * virtual mappings of every page that may be in device memory. For example this | |
888 | * is often the case when a driver is being unloaded or unbound from a device. | |
889 | * | |
890 | * Like migrate_vma_setup() this function will take a reference and lock any | |
891 | * migrating pages that aren't free before unmapping them. Drivers may then | |
892 | * allocate destination pages and start copying data from the device to CPU | |
893 | * memory before calling migrate_device_pages(). | |
894 | */ | |
895 | int migrate_device_range(unsigned long *src_pfns, unsigned long start, | |
896 | unsigned long npages) | |
897 | { | |
898 | unsigned long i, pfn; | |
899 | ||
900 | for (pfn = start, i = 0; i < npages; pfn++, i++) { | |
901 | struct page *page = pfn_to_page(pfn); | |
902 | ||
903 | if (!get_page_unless_zero(page)) { | |
904 | src_pfns[i] = 0; | |
905 | continue; | |
906 | } | |
907 | ||
908 | if (!trylock_page(page)) { | |
909 | src_pfns[i] = 0; | |
910 | put_page(page); | |
911 | continue; | |
912 | } | |
913 | ||
914 | src_pfns[i] = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; | |
915 | } | |
916 | ||
917 | migrate_device_unmap(src_pfns, npages, NULL); | |
918 | ||
919 | return 0; | |
920 | } | |
921 | EXPORT_SYMBOL(migrate_device_range); | |
922 | ||
b05a79d4 AP |
923 | /* |
924 | * Migrate a device coherent page back to normal memory. The caller should have | |
925 | * a reference on page which will be copied to the new page if migration is | |
926 | * successful or dropped on failure. | |
927 | */ | |
928 | int migrate_device_coherent_page(struct page *page) | |
929 | { | |
930 | unsigned long src_pfn, dst_pfn = 0; | |
b05a79d4 AP |
931 | struct page *dpage; |
932 | ||
933 | WARN_ON_ONCE(PageCompound(page)); | |
934 | ||
935 | lock_page(page); | |
936 | src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; | |
b05a79d4 AP |
937 | |
938 | /* | |
939 | * We don't have a VMA and don't need to walk the page tables to find | |
940 | * the source page. So call migrate_vma_unmap() directly to unmap the | |
941 | * page as migrate_vma_setup() will fail if args.vma == NULL. | |
942 | */ | |
241f6885 | 943 | migrate_device_unmap(&src_pfn, 1, NULL); |
b05a79d4 AP |
944 | if (!(src_pfn & MIGRATE_PFN_MIGRATE)) |
945 | return -EBUSY; | |
946 | ||
947 | dpage = alloc_page(GFP_USER | __GFP_NOWARN); | |
948 | if (dpage) { | |
949 | lock_page(dpage); | |
950 | dst_pfn = migrate_pfn(page_to_pfn(dpage)); | |
951 | } | |
952 | ||
e778406b | 953 | migrate_device_pages(&src_pfn, &dst_pfn, 1); |
b05a79d4 AP |
954 | if (src_pfn & MIGRATE_PFN_MIGRATE) |
955 | copy_highpage(dpage, page); | |
241f6885 | 956 | migrate_device_finalize(&src_pfn, &dst_pfn, 1); |
b05a79d4 AP |
957 | |
958 | if (src_pfn & MIGRATE_PFN_MIGRATE) | |
959 | return 0; | |
960 | return -EBUSY; | |
961 | } |