]> Git Repo - linux.git/blob - mm/migrate.c
mm: migrate: skip shared exec THP for NUMA balancing
[linux.git] / mm / migrate.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <[email protected]>
11  * Hirokazu Takahashi <[email protected]>
12  * Dave Hansen <[email protected]>
13  * Christoph Lameter
14  */
15
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pagevec.h>
25 #include <linux/ksm.h>
26 #include <linux/rmap.h>
27 #include <linux/topology.h>
28 #include <linux/cpu.h>
29 #include <linux/cpuset.h>
30 #include <linux/writeback.h>
31 #include <linux/mempolicy.h>
32 #include <linux/vmalloc.h>
33 #include <linux/security.h>
34 #include <linux/backing-dev.h>
35 #include <linux/compaction.h>
36 #include <linux/syscalls.h>
37 #include <linux/compat.h>
38 #include <linux/hugetlb.h>
39 #include <linux/hugetlb_cgroup.h>
40 #include <linux/gfp.h>
41 #include <linux/pagewalk.h>
42 #include <linux/pfn_t.h>
43 #include <linux/memremap.h>
44 #include <linux/userfaultfd_k.h>
45 #include <linux/balloon_compaction.h>
46 #include <linux/mmu_notifier.h>
47 #include <linux/page_idle.h>
48 #include <linux/page_owner.h>
49 #include <linux/sched/mm.h>
50 #include <linux/ptrace.h>
51 #include <linux/oom.h>
52
53 #include <asm/tlbflush.h>
54
55 #define CREATE_TRACE_POINTS
56 #include <trace/events/migrate.h>
57
58 #include "internal.h"
59
60 /*
61  * migrate_prep() needs to be called before we start compiling a list of pages
62  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
63  * undesirable, use migrate_prep_local()
64  */
65 int migrate_prep(void)
66 {
67         /*
68          * Clear the LRU lists so pages can be isolated.
69          * Note that pages may be moved off the LRU after we have
70          * drained them. Those pages will fail to migrate like other
71          * pages that may be busy.
72          */
73         lru_add_drain_all();
74
75         return 0;
76 }
77
78 /* Do the necessary work of migrate_prep but not if it involves other CPUs */
79 int migrate_prep_local(void)
80 {
81         lru_add_drain();
82
83         return 0;
84 }
85
86 int isolate_movable_page(struct page *page, isolate_mode_t mode)
87 {
88         struct address_space *mapping;
89
90         /*
91          * Avoid burning cycles with pages that are yet under __free_pages(),
92          * or just got freed under us.
93          *
94          * In case we 'win' a race for a movable page being freed under us and
95          * raise its refcount preventing __free_pages() from doing its job
96          * the put_page() at the end of this block will take care of
97          * release this page, thus avoiding a nasty leakage.
98          */
99         if (unlikely(!get_page_unless_zero(page)))
100                 goto out;
101
102         /*
103          * Check PageMovable before holding a PG_lock because page's owner
104          * assumes anybody doesn't touch PG_lock of newly allocated page
105          * so unconditionally grabbing the lock ruins page's owner side.
106          */
107         if (unlikely(!__PageMovable(page)))
108                 goto out_putpage;
109         /*
110          * As movable pages are not isolated from LRU lists, concurrent
111          * compaction threads can race against page migration functions
112          * as well as race against the releasing a page.
113          *
114          * In order to avoid having an already isolated movable page
115          * being (wrongly) re-isolated while it is under migration,
116          * or to avoid attempting to isolate pages being released,
117          * lets be sure we have the page lock
118          * before proceeding with the movable page isolation steps.
119          */
120         if (unlikely(!trylock_page(page)))
121                 goto out_putpage;
122
123         if (!PageMovable(page) || PageIsolated(page))
124                 goto out_no_isolated;
125
126         mapping = page_mapping(page);
127         VM_BUG_ON_PAGE(!mapping, page);
128
129         if (!mapping->a_ops->isolate_page(page, mode))
130                 goto out_no_isolated;
131
132         /* Driver shouldn't use PG_isolated bit of page->flags */
133         WARN_ON_ONCE(PageIsolated(page));
134         __SetPageIsolated(page);
135         unlock_page(page);
136
137         return 0;
138
139 out_no_isolated:
140         unlock_page(page);
141 out_putpage:
142         put_page(page);
143 out:
144         return -EBUSY;
145 }
146
147 /* It should be called on page which is PG_movable */
148 void putback_movable_page(struct page *page)
149 {
150         struct address_space *mapping;
151
152         VM_BUG_ON_PAGE(!PageLocked(page), page);
153         VM_BUG_ON_PAGE(!PageMovable(page), page);
154         VM_BUG_ON_PAGE(!PageIsolated(page), page);
155
156         mapping = page_mapping(page);
157         mapping->a_ops->putback_page(page);
158         __ClearPageIsolated(page);
159 }
160
161 /*
162  * Put previously isolated pages back onto the appropriate lists
163  * from where they were once taken off for compaction/migration.
164  *
165  * This function shall be used whenever the isolated pageset has been
166  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
167  * and isolate_huge_page().
168  */
169 void putback_movable_pages(struct list_head *l)
170 {
171         struct page *page;
172         struct page *page2;
173
174         list_for_each_entry_safe(page, page2, l, lru) {
175                 if (unlikely(PageHuge(page))) {
176                         putback_active_hugepage(page);
177                         continue;
178                 }
179                 list_del(&page->lru);
180                 /*
181                  * We isolated non-lru movable page so here we can use
182                  * __PageMovable because LRU page's mapping cannot have
183                  * PAGE_MAPPING_MOVABLE.
184                  */
185                 if (unlikely(__PageMovable(page))) {
186                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
187                         lock_page(page);
188                         if (PageMovable(page))
189                                 putback_movable_page(page);
190                         else
191                                 __ClearPageIsolated(page);
192                         unlock_page(page);
193                         put_page(page);
194                 } else {
195                         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
196                                         page_is_file_lru(page), -thp_nr_pages(page));
197                         putback_lru_page(page);
198                 }
199         }
200 }
201
202 /*
203  * Restore a potential migration pte to a working pte entry
204  */
205 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
206                                  unsigned long addr, void *old)
207 {
208         struct page_vma_mapped_walk pvmw = {
209                 .page = old,
210                 .vma = vma,
211                 .address = addr,
212                 .flags = PVMW_SYNC | PVMW_MIGRATION,
213         };
214         struct page *new;
215         pte_t pte;
216         swp_entry_t entry;
217
218         VM_BUG_ON_PAGE(PageTail(page), page);
219         while (page_vma_mapped_walk(&pvmw)) {
220                 if (PageKsm(page))
221                         new = page;
222                 else
223                         new = page - pvmw.page->index +
224                                 linear_page_index(vma, pvmw.address);
225
226 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
227                 /* PMD-mapped THP migration entry */
228                 if (!pvmw.pte) {
229                         VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
230                         remove_migration_pmd(&pvmw, new);
231                         continue;
232                 }
233 #endif
234
235                 get_page(new);
236                 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
237                 if (pte_swp_soft_dirty(*pvmw.pte))
238                         pte = pte_mksoft_dirty(pte);
239
240                 /*
241                  * Recheck VMA as permissions can change since migration started
242                  */
243                 entry = pte_to_swp_entry(*pvmw.pte);
244                 if (is_write_migration_entry(entry))
245                         pte = maybe_mkwrite(pte, vma);
246                 else if (pte_swp_uffd_wp(*pvmw.pte))
247                         pte = pte_mkuffd_wp(pte);
248
249                 if (unlikely(is_device_private_page(new))) {
250                         entry = make_device_private_entry(new, pte_write(pte));
251                         pte = swp_entry_to_pte(entry);
252                         if (pte_swp_soft_dirty(*pvmw.pte))
253                                 pte = pte_swp_mksoft_dirty(pte);
254                         if (pte_swp_uffd_wp(*pvmw.pte))
255                                 pte = pte_swp_mkuffd_wp(pte);
256                 }
257
258 #ifdef CONFIG_HUGETLB_PAGE
259                 if (PageHuge(new)) {
260                         pte = pte_mkhuge(pte);
261                         pte = arch_make_huge_pte(pte, vma, new, 0);
262                         set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
263                         if (PageAnon(new))
264                                 hugepage_add_anon_rmap(new, vma, pvmw.address);
265                         else
266                                 page_dup_rmap(new, true);
267                 } else
268 #endif
269                 {
270                         set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
271
272                         if (PageAnon(new))
273                                 page_add_anon_rmap(new, vma, pvmw.address, false);
274                         else
275                                 page_add_file_rmap(new, false);
276                 }
277                 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
278                         mlock_vma_page(new);
279
280                 if (PageTransHuge(page) && PageMlocked(page))
281                         clear_page_mlock(page);
282
283                 /* No need to invalidate - it was non-present before */
284                 update_mmu_cache(vma, pvmw.address, pvmw.pte);
285         }
286
287         return true;
288 }
289
290 /*
291  * Get rid of all migration entries and replace them by
292  * references to the indicated page.
293  */
294 void remove_migration_ptes(struct page *old, struct page *new, bool locked)
295 {
296         struct rmap_walk_control rwc = {
297                 .rmap_one = remove_migration_pte,
298                 .arg = old,
299         };
300
301         if (locked)
302                 rmap_walk_locked(new, &rwc);
303         else
304                 rmap_walk(new, &rwc);
305 }
306
307 /*
308  * Something used the pte of a page under migration. We need to
309  * get to the page and wait until migration is finished.
310  * When we return from this function the fault will be retried.
311  */
312 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
313                                 spinlock_t *ptl)
314 {
315         pte_t pte;
316         swp_entry_t entry;
317         struct page *page;
318
319         spin_lock(ptl);
320         pte = *ptep;
321         if (!is_swap_pte(pte))
322                 goto out;
323
324         entry = pte_to_swp_entry(pte);
325         if (!is_migration_entry(entry))
326                 goto out;
327
328         page = migration_entry_to_page(entry);
329
330         /*
331          * Once page cache replacement of page migration started, page_count
332          * is zero; but we must not call put_and_wait_on_page_locked() without
333          * a ref. Use get_page_unless_zero(), and just fault again if it fails.
334          */
335         if (!get_page_unless_zero(page))
336                 goto out;
337         pte_unmap_unlock(ptep, ptl);
338         put_and_wait_on_page_locked(page);
339         return;
340 out:
341         pte_unmap_unlock(ptep, ptl);
342 }
343
344 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
345                                 unsigned long address)
346 {
347         spinlock_t *ptl = pte_lockptr(mm, pmd);
348         pte_t *ptep = pte_offset_map(pmd, address);
349         __migration_entry_wait(mm, ptep, ptl);
350 }
351
352 void migration_entry_wait_huge(struct vm_area_struct *vma,
353                 struct mm_struct *mm, pte_t *pte)
354 {
355         spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
356         __migration_entry_wait(mm, pte, ptl);
357 }
358
359 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
360 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
361 {
362         spinlock_t *ptl;
363         struct page *page;
364
365         ptl = pmd_lock(mm, pmd);
366         if (!is_pmd_migration_entry(*pmd))
367                 goto unlock;
368         page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
369         if (!get_page_unless_zero(page))
370                 goto unlock;
371         spin_unlock(ptl);
372         put_and_wait_on_page_locked(page);
373         return;
374 unlock:
375         spin_unlock(ptl);
376 }
377 #endif
378
379 static int expected_page_refs(struct address_space *mapping, struct page *page)
380 {
381         int expected_count = 1;
382
383         /*
384          * Device private pages have an extra refcount as they are
385          * ZONE_DEVICE pages.
386          */
387         expected_count += is_device_private_page(page);
388         if (mapping)
389                 expected_count += thp_nr_pages(page) + page_has_private(page);
390
391         return expected_count;
392 }
393
394 /*
395  * Replace the page in the mapping.
396  *
397  * The number of remaining references must be:
398  * 1 for anonymous pages without a mapping
399  * 2 for pages with a mapping
400  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
401  */
402 int migrate_page_move_mapping(struct address_space *mapping,
403                 struct page *newpage, struct page *page, int extra_count)
404 {
405         XA_STATE(xas, &mapping->i_pages, page_index(page));
406         struct zone *oldzone, *newzone;
407         int dirty;
408         int expected_count = expected_page_refs(mapping, page) + extra_count;
409
410         if (!mapping) {
411                 /* Anonymous page without mapping */
412                 if (page_count(page) != expected_count)
413                         return -EAGAIN;
414
415                 /* No turning back from here */
416                 newpage->index = page->index;
417                 newpage->mapping = page->mapping;
418                 if (PageSwapBacked(page))
419                         __SetPageSwapBacked(newpage);
420
421                 return MIGRATEPAGE_SUCCESS;
422         }
423
424         oldzone = page_zone(page);
425         newzone = page_zone(newpage);
426
427         xas_lock_irq(&xas);
428         if (page_count(page) != expected_count || xas_load(&xas) != page) {
429                 xas_unlock_irq(&xas);
430                 return -EAGAIN;
431         }
432
433         if (!page_ref_freeze(page, expected_count)) {
434                 xas_unlock_irq(&xas);
435                 return -EAGAIN;
436         }
437
438         /*
439          * Now we know that no one else is looking at the page:
440          * no turning back from here.
441          */
442         newpage->index = page->index;
443         newpage->mapping = page->mapping;
444         page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
445         if (PageSwapBacked(page)) {
446                 __SetPageSwapBacked(newpage);
447                 if (PageSwapCache(page)) {
448                         SetPageSwapCache(newpage);
449                         set_page_private(newpage, page_private(page));
450                 }
451         } else {
452                 VM_BUG_ON_PAGE(PageSwapCache(page), page);
453         }
454
455         /* Move dirty while page refs frozen and newpage not yet exposed */
456         dirty = PageDirty(page);
457         if (dirty) {
458                 ClearPageDirty(page);
459                 SetPageDirty(newpage);
460         }
461
462         xas_store(&xas, newpage);
463         if (PageTransHuge(page)) {
464                 int i;
465
466                 for (i = 1; i < HPAGE_PMD_NR; i++) {
467                         xas_next(&xas);
468                         xas_store(&xas, newpage);
469                 }
470         }
471
472         /*
473          * Drop cache reference from old page by unfreezing
474          * to one less reference.
475          * We know this isn't the last reference.
476          */
477         page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
478
479         xas_unlock(&xas);
480         /* Leave irq disabled to prevent preemption while updating stats */
481
482         /*
483          * If moved to a different zone then also account
484          * the page for that zone. Other VM counters will be
485          * taken care of when we establish references to the
486          * new page and drop references to the old page.
487          *
488          * Note that anonymous pages are accounted for
489          * via NR_FILE_PAGES and NR_ANON_MAPPED if they
490          * are mapped to swap space.
491          */
492         if (newzone != oldzone) {
493                 struct lruvec *old_lruvec, *new_lruvec;
494                 struct mem_cgroup *memcg;
495
496                 memcg = page_memcg(page);
497                 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
498                 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
499
500                 __dec_lruvec_state(old_lruvec, NR_FILE_PAGES);
501                 __inc_lruvec_state(new_lruvec, NR_FILE_PAGES);
502                 if (PageSwapBacked(page) && !PageSwapCache(page)) {
503                         __dec_lruvec_state(old_lruvec, NR_SHMEM);
504                         __inc_lruvec_state(new_lruvec, NR_SHMEM);
505                 }
506                 if (dirty && mapping_can_writeback(mapping)) {
507                         __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
508                         __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
509                         __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
510                         __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
511                 }
512         }
513         local_irq_enable();
514
515         return MIGRATEPAGE_SUCCESS;
516 }
517 EXPORT_SYMBOL(migrate_page_move_mapping);
518
519 /*
520  * The expected number of remaining references is the same as that
521  * of migrate_page_move_mapping().
522  */
523 int migrate_huge_page_move_mapping(struct address_space *mapping,
524                                    struct page *newpage, struct page *page)
525 {
526         XA_STATE(xas, &mapping->i_pages, page_index(page));
527         int expected_count;
528
529         xas_lock_irq(&xas);
530         expected_count = 2 + page_has_private(page);
531         if (page_count(page) != expected_count || xas_load(&xas) != page) {
532                 xas_unlock_irq(&xas);
533                 return -EAGAIN;
534         }
535
536         if (!page_ref_freeze(page, expected_count)) {
537                 xas_unlock_irq(&xas);
538                 return -EAGAIN;
539         }
540
541         newpage->index = page->index;
542         newpage->mapping = page->mapping;
543
544         get_page(newpage);
545
546         xas_store(&xas, newpage);
547
548         page_ref_unfreeze(page, expected_count - 1);
549
550         xas_unlock_irq(&xas);
551
552         return MIGRATEPAGE_SUCCESS;
553 }
554
555 /*
556  * Gigantic pages are so large that we do not guarantee that page++ pointer
557  * arithmetic will work across the entire page.  We need something more
558  * specialized.
559  */
560 static void __copy_gigantic_page(struct page *dst, struct page *src,
561                                 int nr_pages)
562 {
563         int i;
564         struct page *dst_base = dst;
565         struct page *src_base = src;
566
567         for (i = 0; i < nr_pages; ) {
568                 cond_resched();
569                 copy_highpage(dst, src);
570
571                 i++;
572                 dst = mem_map_next(dst, dst_base, i);
573                 src = mem_map_next(src, src_base, i);
574         }
575 }
576
577 static void copy_huge_page(struct page *dst, struct page *src)
578 {
579         int i;
580         int nr_pages;
581
582         if (PageHuge(src)) {
583                 /* hugetlbfs page */
584                 struct hstate *h = page_hstate(src);
585                 nr_pages = pages_per_huge_page(h);
586
587                 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
588                         __copy_gigantic_page(dst, src, nr_pages);
589                         return;
590                 }
591         } else {
592                 /* thp page */
593                 BUG_ON(!PageTransHuge(src));
594                 nr_pages = thp_nr_pages(src);
595         }
596
597         for (i = 0; i < nr_pages; i++) {
598                 cond_resched();
599                 copy_highpage(dst + i, src + i);
600         }
601 }
602
603 /*
604  * Copy the page to its new location
605  */
606 void migrate_page_states(struct page *newpage, struct page *page)
607 {
608         int cpupid;
609
610         if (PageError(page))
611                 SetPageError(newpage);
612         if (PageReferenced(page))
613                 SetPageReferenced(newpage);
614         if (PageUptodate(page))
615                 SetPageUptodate(newpage);
616         if (TestClearPageActive(page)) {
617                 VM_BUG_ON_PAGE(PageUnevictable(page), page);
618                 SetPageActive(newpage);
619         } else if (TestClearPageUnevictable(page))
620                 SetPageUnevictable(newpage);
621         if (PageWorkingset(page))
622                 SetPageWorkingset(newpage);
623         if (PageChecked(page))
624                 SetPageChecked(newpage);
625         if (PageMappedToDisk(page))
626                 SetPageMappedToDisk(newpage);
627
628         /* Move dirty on pages not done by migrate_page_move_mapping() */
629         if (PageDirty(page))
630                 SetPageDirty(newpage);
631
632         if (page_is_young(page))
633                 set_page_young(newpage);
634         if (page_is_idle(page))
635                 set_page_idle(newpage);
636
637         /*
638          * Copy NUMA information to the new page, to prevent over-eager
639          * future migrations of this same page.
640          */
641         cpupid = page_cpupid_xchg_last(page, -1);
642         page_cpupid_xchg_last(newpage, cpupid);
643
644         ksm_migrate_page(newpage, page);
645         /*
646          * Please do not reorder this without considering how mm/ksm.c's
647          * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
648          */
649         if (PageSwapCache(page))
650                 ClearPageSwapCache(page);
651         ClearPagePrivate(page);
652         set_page_private(page, 0);
653
654         /*
655          * If any waiters have accumulated on the new page then
656          * wake them up.
657          */
658         if (PageWriteback(newpage))
659                 end_page_writeback(newpage);
660
661         /*
662          * PG_readahead shares the same bit with PG_reclaim.  The above
663          * end_page_writeback() may clear PG_readahead mistakenly, so set the
664          * bit after that.
665          */
666         if (PageReadahead(page))
667                 SetPageReadahead(newpage);
668
669         copy_page_owner(page, newpage);
670
671         if (!PageHuge(page))
672                 mem_cgroup_migrate(page, newpage);
673 }
674 EXPORT_SYMBOL(migrate_page_states);
675
676 void migrate_page_copy(struct page *newpage, struct page *page)
677 {
678         if (PageHuge(page) || PageTransHuge(page))
679                 copy_huge_page(newpage, page);
680         else
681                 copy_highpage(newpage, page);
682
683         migrate_page_states(newpage, page);
684 }
685 EXPORT_SYMBOL(migrate_page_copy);
686
687 /************************************************************
688  *                    Migration functions
689  ***********************************************************/
690
691 /*
692  * Common logic to directly migrate a single LRU page suitable for
693  * pages that do not use PagePrivate/PagePrivate2.
694  *
695  * Pages are locked upon entry and exit.
696  */
697 int migrate_page(struct address_space *mapping,
698                 struct page *newpage, struct page *page,
699                 enum migrate_mode mode)
700 {
701         int rc;
702
703         BUG_ON(PageWriteback(page));    /* Writeback must be complete */
704
705         rc = migrate_page_move_mapping(mapping, newpage, page, 0);
706
707         if (rc != MIGRATEPAGE_SUCCESS)
708                 return rc;
709
710         if (mode != MIGRATE_SYNC_NO_COPY)
711                 migrate_page_copy(newpage, page);
712         else
713                 migrate_page_states(newpage, page);
714         return MIGRATEPAGE_SUCCESS;
715 }
716 EXPORT_SYMBOL(migrate_page);
717
718 #ifdef CONFIG_BLOCK
719 /* Returns true if all buffers are successfully locked */
720 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
721                                                         enum migrate_mode mode)
722 {
723         struct buffer_head *bh = head;
724
725         /* Simple case, sync compaction */
726         if (mode != MIGRATE_ASYNC) {
727                 do {
728                         lock_buffer(bh);
729                         bh = bh->b_this_page;
730
731                 } while (bh != head);
732
733                 return true;
734         }
735
736         /* async case, we cannot block on lock_buffer so use trylock_buffer */
737         do {
738                 if (!trylock_buffer(bh)) {
739                         /*
740                          * We failed to lock the buffer and cannot stall in
741                          * async migration. Release the taken locks
742                          */
743                         struct buffer_head *failed_bh = bh;
744                         bh = head;
745                         while (bh != failed_bh) {
746                                 unlock_buffer(bh);
747                                 bh = bh->b_this_page;
748                         }
749                         return false;
750                 }
751
752                 bh = bh->b_this_page;
753         } while (bh != head);
754         return true;
755 }
756
757 static int __buffer_migrate_page(struct address_space *mapping,
758                 struct page *newpage, struct page *page, enum migrate_mode mode,
759                 bool check_refs)
760 {
761         struct buffer_head *bh, *head;
762         int rc;
763         int expected_count;
764
765         if (!page_has_buffers(page))
766                 return migrate_page(mapping, newpage, page, mode);
767
768         /* Check whether page does not have extra refs before we do more work */
769         expected_count = expected_page_refs(mapping, page);
770         if (page_count(page) != expected_count)
771                 return -EAGAIN;
772
773         head = page_buffers(page);
774         if (!buffer_migrate_lock_buffers(head, mode))
775                 return -EAGAIN;
776
777         if (check_refs) {
778                 bool busy;
779                 bool invalidated = false;
780
781 recheck_buffers:
782                 busy = false;
783                 spin_lock(&mapping->private_lock);
784                 bh = head;
785                 do {
786                         if (atomic_read(&bh->b_count)) {
787                                 busy = true;
788                                 break;
789                         }
790                         bh = bh->b_this_page;
791                 } while (bh != head);
792                 if (busy) {
793                         if (invalidated) {
794                                 rc = -EAGAIN;
795                                 goto unlock_buffers;
796                         }
797                         spin_unlock(&mapping->private_lock);
798                         invalidate_bh_lrus();
799                         invalidated = true;
800                         goto recheck_buffers;
801                 }
802         }
803
804         rc = migrate_page_move_mapping(mapping, newpage, page, 0);
805         if (rc != MIGRATEPAGE_SUCCESS)
806                 goto unlock_buffers;
807
808         attach_page_private(newpage, detach_page_private(page));
809
810         bh = head;
811         do {
812                 set_bh_page(bh, newpage, bh_offset(bh));
813                 bh = bh->b_this_page;
814
815         } while (bh != head);
816
817         if (mode != MIGRATE_SYNC_NO_COPY)
818                 migrate_page_copy(newpage, page);
819         else
820                 migrate_page_states(newpage, page);
821
822         rc = MIGRATEPAGE_SUCCESS;
823 unlock_buffers:
824         if (check_refs)
825                 spin_unlock(&mapping->private_lock);
826         bh = head;
827         do {
828                 unlock_buffer(bh);
829                 bh = bh->b_this_page;
830
831         } while (bh != head);
832
833         return rc;
834 }
835
836 /*
837  * Migration function for pages with buffers. This function can only be used
838  * if the underlying filesystem guarantees that no other references to "page"
839  * exist. For example attached buffer heads are accessed only under page lock.
840  */
841 int buffer_migrate_page(struct address_space *mapping,
842                 struct page *newpage, struct page *page, enum migrate_mode mode)
843 {
844         return __buffer_migrate_page(mapping, newpage, page, mode, false);
845 }
846 EXPORT_SYMBOL(buffer_migrate_page);
847
848 /*
849  * Same as above except that this variant is more careful and checks that there
850  * are also no buffer head references. This function is the right one for
851  * mappings where buffer heads are directly looked up and referenced (such as
852  * block device mappings).
853  */
854 int buffer_migrate_page_norefs(struct address_space *mapping,
855                 struct page *newpage, struct page *page, enum migrate_mode mode)
856 {
857         return __buffer_migrate_page(mapping, newpage, page, mode, true);
858 }
859 #endif
860
861 /*
862  * Writeback a page to clean the dirty state
863  */
864 static int writeout(struct address_space *mapping, struct page *page)
865 {
866         struct writeback_control wbc = {
867                 .sync_mode = WB_SYNC_NONE,
868                 .nr_to_write = 1,
869                 .range_start = 0,
870                 .range_end = LLONG_MAX,
871                 .for_reclaim = 1
872         };
873         int rc;
874
875         if (!mapping->a_ops->writepage)
876                 /* No write method for the address space */
877                 return -EINVAL;
878
879         if (!clear_page_dirty_for_io(page))
880                 /* Someone else already triggered a write */
881                 return -EAGAIN;
882
883         /*
884          * A dirty page may imply that the underlying filesystem has
885          * the page on some queue. So the page must be clean for
886          * migration. Writeout may mean we loose the lock and the
887          * page state is no longer what we checked for earlier.
888          * At this point we know that the migration attempt cannot
889          * be successful.
890          */
891         remove_migration_ptes(page, page, false);
892
893         rc = mapping->a_ops->writepage(page, &wbc);
894
895         if (rc != AOP_WRITEPAGE_ACTIVATE)
896                 /* unlocked. Relock */
897                 lock_page(page);
898
899         return (rc < 0) ? -EIO : -EAGAIN;
900 }
901
902 /*
903  * Default handling if a filesystem does not provide a migration function.
904  */
905 static int fallback_migrate_page(struct address_space *mapping,
906         struct page *newpage, struct page *page, enum migrate_mode mode)
907 {
908         if (PageDirty(page)) {
909                 /* Only writeback pages in full synchronous migration */
910                 switch (mode) {
911                 case MIGRATE_SYNC:
912                 case MIGRATE_SYNC_NO_COPY:
913                         break;
914                 default:
915                         return -EBUSY;
916                 }
917                 return writeout(mapping, page);
918         }
919
920         /*
921          * Buffers may be managed in a filesystem specific way.
922          * We must have no buffers or drop them.
923          */
924         if (page_has_private(page) &&
925             !try_to_release_page(page, GFP_KERNEL))
926                 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
927
928         return migrate_page(mapping, newpage, page, mode);
929 }
930
931 /*
932  * Move a page to a newly allocated page
933  * The page is locked and all ptes have been successfully removed.
934  *
935  * The new page will have replaced the old page if this function
936  * is successful.
937  *
938  * Return value:
939  *   < 0 - error code
940  *  MIGRATEPAGE_SUCCESS - success
941  */
942 static int move_to_new_page(struct page *newpage, struct page *page,
943                                 enum migrate_mode mode)
944 {
945         struct address_space *mapping;
946         int rc = -EAGAIN;
947         bool is_lru = !__PageMovable(page);
948
949         VM_BUG_ON_PAGE(!PageLocked(page), page);
950         VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
951
952         mapping = page_mapping(page);
953
954         if (likely(is_lru)) {
955                 if (!mapping)
956                         rc = migrate_page(mapping, newpage, page, mode);
957                 else if (mapping->a_ops->migratepage)
958                         /*
959                          * Most pages have a mapping and most filesystems
960                          * provide a migratepage callback. Anonymous pages
961                          * are part of swap space which also has its own
962                          * migratepage callback. This is the most common path
963                          * for page migration.
964                          */
965                         rc = mapping->a_ops->migratepage(mapping, newpage,
966                                                         page, mode);
967                 else
968                         rc = fallback_migrate_page(mapping, newpage,
969                                                         page, mode);
970         } else {
971                 /*
972                  * In case of non-lru page, it could be released after
973                  * isolation step. In that case, we shouldn't try migration.
974                  */
975                 VM_BUG_ON_PAGE(!PageIsolated(page), page);
976                 if (!PageMovable(page)) {
977                         rc = MIGRATEPAGE_SUCCESS;
978                         __ClearPageIsolated(page);
979                         goto out;
980                 }
981
982                 rc = mapping->a_ops->migratepage(mapping, newpage,
983                                                 page, mode);
984                 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
985                         !PageIsolated(page));
986         }
987
988         /*
989          * When successful, old pagecache page->mapping must be cleared before
990          * page is freed; but stats require that PageAnon be left as PageAnon.
991          */
992         if (rc == MIGRATEPAGE_SUCCESS) {
993                 if (__PageMovable(page)) {
994                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
995
996                         /*
997                          * We clear PG_movable under page_lock so any compactor
998                          * cannot try to migrate this page.
999                          */
1000                         __ClearPageIsolated(page);
1001                 }
1002
1003                 /*
1004                  * Anonymous and movable page->mapping will be cleared by
1005                  * free_pages_prepare so don't reset it here for keeping
1006                  * the type to work PageAnon, for example.
1007                  */
1008                 if (!PageMappingFlags(page))
1009                         page->mapping = NULL;
1010
1011                 if (likely(!is_zone_device_page(newpage)))
1012                         flush_dcache_page(newpage);
1013
1014         }
1015 out:
1016         return rc;
1017 }
1018
1019 static int __unmap_and_move(struct page *page, struct page *newpage,
1020                                 int force, enum migrate_mode mode)
1021 {
1022         int rc = -EAGAIN;
1023         int page_was_mapped = 0;
1024         struct anon_vma *anon_vma = NULL;
1025         bool is_lru = !__PageMovable(page);
1026
1027         if (!trylock_page(page)) {
1028                 if (!force || mode == MIGRATE_ASYNC)
1029                         goto out;
1030
1031                 /*
1032                  * It's not safe for direct compaction to call lock_page.
1033                  * For example, during page readahead pages are added locked
1034                  * to the LRU. Later, when the IO completes the pages are
1035                  * marked uptodate and unlocked. However, the queueing
1036                  * could be merging multiple pages for one bio (e.g.
1037                  * mpage_readahead). If an allocation happens for the
1038                  * second or third page, the process can end up locking
1039                  * the same page twice and deadlocking. Rather than
1040                  * trying to be clever about what pages can be locked,
1041                  * avoid the use of lock_page for direct compaction
1042                  * altogether.
1043                  */
1044                 if (current->flags & PF_MEMALLOC)
1045                         goto out;
1046
1047                 lock_page(page);
1048         }
1049
1050         if (PageWriteback(page)) {
1051                 /*
1052                  * Only in the case of a full synchronous migration is it
1053                  * necessary to wait for PageWriteback. In the async case,
1054                  * the retry loop is too short and in the sync-light case,
1055                  * the overhead of stalling is too much
1056                  */
1057                 switch (mode) {
1058                 case MIGRATE_SYNC:
1059                 case MIGRATE_SYNC_NO_COPY:
1060                         break;
1061                 default:
1062                         rc = -EBUSY;
1063                         goto out_unlock;
1064                 }
1065                 if (!force)
1066                         goto out_unlock;
1067                 wait_on_page_writeback(page);
1068         }
1069
1070         /*
1071          * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1072          * we cannot notice that anon_vma is freed while we migrates a page.
1073          * This get_anon_vma() delays freeing anon_vma pointer until the end
1074          * of migration. File cache pages are no problem because of page_lock()
1075          * File Caches may use write_page() or lock_page() in migration, then,
1076          * just care Anon page here.
1077          *
1078          * Only page_get_anon_vma() understands the subtleties of
1079          * getting a hold on an anon_vma from outside one of its mms.
1080          * But if we cannot get anon_vma, then we won't need it anyway,
1081          * because that implies that the anon page is no longer mapped
1082          * (and cannot be remapped so long as we hold the page lock).
1083          */
1084         if (PageAnon(page) && !PageKsm(page))
1085                 anon_vma = page_get_anon_vma(page);
1086
1087         /*
1088          * Block others from accessing the new page when we get around to
1089          * establishing additional references. We are usually the only one
1090          * holding a reference to newpage at this point. We used to have a BUG
1091          * here if trylock_page(newpage) fails, but would like to allow for
1092          * cases where there might be a race with the previous use of newpage.
1093          * This is much like races on refcount of oldpage: just don't BUG().
1094          */
1095         if (unlikely(!trylock_page(newpage)))
1096                 goto out_unlock;
1097
1098         if (unlikely(!is_lru)) {
1099                 rc = move_to_new_page(newpage, page, mode);
1100                 goto out_unlock_both;
1101         }
1102
1103         /*
1104          * Corner case handling:
1105          * 1. When a new swap-cache page is read into, it is added to the LRU
1106          * and treated as swapcache but it has no rmap yet.
1107          * Calling try_to_unmap() against a page->mapping==NULL page will
1108          * trigger a BUG.  So handle it here.
1109          * 2. An orphaned page (see truncate_cleanup_page) might have
1110          * fs-private metadata. The page can be picked up due to memory
1111          * offlining.  Everywhere else except page reclaim, the page is
1112          * invisible to the vm, so the page can not be migrated.  So try to
1113          * free the metadata, so the page can be freed.
1114          */
1115         if (!page->mapping) {
1116                 VM_BUG_ON_PAGE(PageAnon(page), page);
1117                 if (page_has_private(page)) {
1118                         try_to_free_buffers(page);
1119                         goto out_unlock_both;
1120                 }
1121         } else if (page_mapped(page)) {
1122                 /* Establish migration ptes */
1123                 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1124                                 page);
1125                 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK);
1126                 page_was_mapped = 1;
1127         }
1128
1129         if (!page_mapped(page))
1130                 rc = move_to_new_page(newpage, page, mode);
1131
1132         if (page_was_mapped)
1133                 remove_migration_ptes(page,
1134                         rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1135
1136 out_unlock_both:
1137         unlock_page(newpage);
1138 out_unlock:
1139         /* Drop an anon_vma reference if we took one */
1140         if (anon_vma)
1141                 put_anon_vma(anon_vma);
1142         unlock_page(page);
1143 out:
1144         /*
1145          * If migration is successful, decrease refcount of the newpage
1146          * which will not free the page because new page owner increased
1147          * refcounter. As well, if it is LRU page, add the page to LRU
1148          * list in here. Use the old state of the isolated source page to
1149          * determine if we migrated a LRU page. newpage was already unlocked
1150          * and possibly modified by its owner - don't rely on the page
1151          * state.
1152          */
1153         if (rc == MIGRATEPAGE_SUCCESS) {
1154                 if (unlikely(!is_lru))
1155                         put_page(newpage);
1156                 else
1157                         putback_lru_page(newpage);
1158         }
1159
1160         return rc;
1161 }
1162
1163 /*
1164  * Obtain the lock on page, remove all ptes and migrate the page
1165  * to the newly allocated page in newpage.
1166  */
1167 static int unmap_and_move(new_page_t get_new_page,
1168                                    free_page_t put_new_page,
1169                                    unsigned long private, struct page *page,
1170                                    int force, enum migrate_mode mode,
1171                                    enum migrate_reason reason,
1172                                    struct list_head *ret)
1173 {
1174         int rc = MIGRATEPAGE_SUCCESS;
1175         struct page *newpage = NULL;
1176
1177         if (!thp_migration_supported() && PageTransHuge(page))
1178                 return -ENOMEM;
1179
1180         if (page_count(page) == 1) {
1181                 /* page was freed from under us. So we are done. */
1182                 ClearPageActive(page);
1183                 ClearPageUnevictable(page);
1184                 if (unlikely(__PageMovable(page))) {
1185                         lock_page(page);
1186                         if (!PageMovable(page))
1187                                 __ClearPageIsolated(page);
1188                         unlock_page(page);
1189                 }
1190                 goto out;
1191         }
1192
1193         newpage = get_new_page(page, private);
1194         if (!newpage)
1195                 return -ENOMEM;
1196
1197         rc = __unmap_and_move(page, newpage, force, mode);
1198         if (rc == MIGRATEPAGE_SUCCESS)
1199                 set_page_owner_migrate_reason(newpage, reason);
1200
1201 out:
1202         if (rc != -EAGAIN) {
1203                 /*
1204                  * A page that has been migrated has all references
1205                  * removed and will be freed. A page that has not been
1206                  * migrated will have kept its references and be restored.
1207                  */
1208                 list_del(&page->lru);
1209         }
1210
1211         /*
1212          * If migration is successful, releases reference grabbed during
1213          * isolation. Otherwise, restore the page to right list unless
1214          * we want to retry.
1215          */
1216         if (rc == MIGRATEPAGE_SUCCESS) {
1217                 /*
1218                  * Compaction can migrate also non-LRU pages which are
1219                  * not accounted to NR_ISOLATED_*. They can be recognized
1220                  * as __PageMovable
1221                  */
1222                 if (likely(!__PageMovable(page)))
1223                         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1224                                         page_is_file_lru(page), -thp_nr_pages(page));
1225
1226                 if (reason != MR_MEMORY_FAILURE)
1227                         /*
1228                          * We release the page in page_handle_poison.
1229                          */
1230                         put_page(page);
1231         } else {
1232                 if (rc != -EAGAIN)
1233                         list_add_tail(&page->lru, ret);
1234
1235                 if (put_new_page)
1236                         put_new_page(newpage, private);
1237                 else
1238                         put_page(newpage);
1239         }
1240
1241         return rc;
1242 }
1243
1244 /*
1245  * Counterpart of unmap_and_move_page() for hugepage migration.
1246  *
1247  * This function doesn't wait the completion of hugepage I/O
1248  * because there is no race between I/O and migration for hugepage.
1249  * Note that currently hugepage I/O occurs only in direct I/O
1250  * where no lock is held and PG_writeback is irrelevant,
1251  * and writeback status of all subpages are counted in the reference
1252  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1253  * under direct I/O, the reference of the head page is 512 and a bit more.)
1254  * This means that when we try to migrate hugepage whose subpages are
1255  * doing direct I/O, some references remain after try_to_unmap() and
1256  * hugepage migration fails without data corruption.
1257  *
1258  * There is also no race when direct I/O is issued on the page under migration,
1259  * because then pte is replaced with migration swap entry and direct I/O code
1260  * will wait in the page fault for migration to complete.
1261  */
1262 static int unmap_and_move_huge_page(new_page_t get_new_page,
1263                                 free_page_t put_new_page, unsigned long private,
1264                                 struct page *hpage, int force,
1265                                 enum migrate_mode mode, int reason,
1266                                 struct list_head *ret)
1267 {
1268         int rc = -EAGAIN;
1269         int page_was_mapped = 0;
1270         struct page *new_hpage;
1271         struct anon_vma *anon_vma = NULL;
1272         struct address_space *mapping = NULL;
1273
1274         /*
1275          * Migratability of hugepages depends on architectures and their size.
1276          * This check is necessary because some callers of hugepage migration
1277          * like soft offline and memory hotremove don't walk through page
1278          * tables or check whether the hugepage is pmd-based or not before
1279          * kicking migration.
1280          */
1281         if (!hugepage_migration_supported(page_hstate(hpage))) {
1282                 list_move_tail(&hpage->lru, ret);
1283                 return -ENOSYS;
1284         }
1285
1286         new_hpage = get_new_page(hpage, private);
1287         if (!new_hpage)
1288                 return -ENOMEM;
1289
1290         if (!trylock_page(hpage)) {
1291                 if (!force)
1292                         goto out;
1293                 switch (mode) {
1294                 case MIGRATE_SYNC:
1295                 case MIGRATE_SYNC_NO_COPY:
1296                         break;
1297                 default:
1298                         goto out;
1299                 }
1300                 lock_page(hpage);
1301         }
1302
1303         /*
1304          * Check for pages which are in the process of being freed.  Without
1305          * page_mapping() set, hugetlbfs specific move page routine will not
1306          * be called and we could leak usage counts for subpools.
1307          */
1308         if (page_private(hpage) && !page_mapping(hpage)) {
1309                 rc = -EBUSY;
1310                 goto out_unlock;
1311         }
1312
1313         if (PageAnon(hpage))
1314                 anon_vma = page_get_anon_vma(hpage);
1315
1316         if (unlikely(!trylock_page(new_hpage)))
1317                 goto put_anon;
1318
1319         if (page_mapped(hpage)) {
1320                 bool mapping_locked = false;
1321                 enum ttu_flags ttu = TTU_MIGRATION|TTU_IGNORE_MLOCK;
1322
1323                 if (!PageAnon(hpage)) {
1324                         /*
1325                          * In shared mappings, try_to_unmap could potentially
1326                          * call huge_pmd_unshare.  Because of this, take
1327                          * semaphore in write mode here and set TTU_RMAP_LOCKED
1328                          * to let lower levels know we have taken the lock.
1329                          */
1330                         mapping = hugetlb_page_mapping_lock_write(hpage);
1331                         if (unlikely(!mapping))
1332                                 goto unlock_put_anon;
1333
1334                         mapping_locked = true;
1335                         ttu |= TTU_RMAP_LOCKED;
1336                 }
1337
1338                 try_to_unmap(hpage, ttu);
1339                 page_was_mapped = 1;
1340
1341                 if (mapping_locked)
1342                         i_mmap_unlock_write(mapping);
1343         }
1344
1345         if (!page_mapped(hpage))
1346                 rc = move_to_new_page(new_hpage, hpage, mode);
1347
1348         if (page_was_mapped)
1349                 remove_migration_ptes(hpage,
1350                         rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1351
1352 unlock_put_anon:
1353         unlock_page(new_hpage);
1354
1355 put_anon:
1356         if (anon_vma)
1357                 put_anon_vma(anon_vma);
1358
1359         if (rc == MIGRATEPAGE_SUCCESS) {
1360                 move_hugetlb_state(hpage, new_hpage, reason);
1361                 put_new_page = NULL;
1362         }
1363
1364 out_unlock:
1365         unlock_page(hpage);
1366 out:
1367         if (rc == MIGRATEPAGE_SUCCESS)
1368                 putback_active_hugepage(hpage);
1369         else if (rc != -EAGAIN && rc != MIGRATEPAGE_SUCCESS)
1370                 list_move_tail(&hpage->lru, ret);
1371
1372         /*
1373          * If migration was not successful and there's a freeing callback, use
1374          * it.  Otherwise, put_page() will drop the reference grabbed during
1375          * isolation.
1376          */
1377         if (put_new_page)
1378                 put_new_page(new_hpage, private);
1379         else
1380                 putback_active_hugepage(new_hpage);
1381
1382         return rc;
1383 }
1384
1385 /*
1386  * migrate_pages - migrate the pages specified in a list, to the free pages
1387  *                 supplied as the target for the page migration
1388  *
1389  * @from:               The list of pages to be migrated.
1390  * @get_new_page:       The function used to allocate free pages to be used
1391  *                      as the target of the page migration.
1392  * @put_new_page:       The function used to free target pages if migration
1393  *                      fails, or NULL if no special handling is necessary.
1394  * @private:            Private data to be passed on to get_new_page()
1395  * @mode:               The migration mode that specifies the constraints for
1396  *                      page migration, if any.
1397  * @reason:             The reason for page migration.
1398  *
1399  * The function returns after 10 attempts or if no pages are movable any more
1400  * because the list has become empty or no retryable pages exist any more.
1401  * It is caller's responsibility to call putback_movable_pages() to return pages
1402  * to the LRU or free list only if ret != 0.
1403  *
1404  * Returns the number of pages that were not migrated, or an error code.
1405  */
1406 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1407                 free_page_t put_new_page, unsigned long private,
1408                 enum migrate_mode mode, int reason)
1409 {
1410         int retry = 1;
1411         int thp_retry = 1;
1412         int nr_failed = 0;
1413         int nr_succeeded = 0;
1414         int nr_thp_succeeded = 0;
1415         int nr_thp_failed = 0;
1416         int nr_thp_split = 0;
1417         int pass = 0;
1418         bool is_thp = false;
1419         struct page *page;
1420         struct page *page2;
1421         int swapwrite = current->flags & PF_SWAPWRITE;
1422         int rc, nr_subpages;
1423         LIST_HEAD(ret_pages);
1424
1425         if (!swapwrite)
1426                 current->flags |= PF_SWAPWRITE;
1427
1428         for (pass = 0; pass < 10 && (retry || thp_retry); pass++) {
1429                 retry = 0;
1430                 thp_retry = 0;
1431
1432                 list_for_each_entry_safe(page, page2, from, lru) {
1433 retry:
1434                         /*
1435                          * THP statistics is based on the source huge page.
1436                          * Capture required information that might get lost
1437                          * during migration.
1438                          */
1439                         is_thp = PageTransHuge(page) && !PageHuge(page);
1440                         nr_subpages = thp_nr_pages(page);
1441                         cond_resched();
1442
1443                         if (PageHuge(page))
1444                                 rc = unmap_and_move_huge_page(get_new_page,
1445                                                 put_new_page, private, page,
1446                                                 pass > 2, mode, reason,
1447                                                 &ret_pages);
1448                         else
1449                                 rc = unmap_and_move(get_new_page, put_new_page,
1450                                                 private, page, pass > 2, mode,
1451                                                 reason, &ret_pages);
1452                         /*
1453                          * The rules are:
1454                          *      Success: non hugetlb page will be freed, hugetlb
1455                          *               page will be put back
1456                          *      -EAGAIN: stay on the from list
1457                          *      -ENOMEM: stay on the from list
1458                          *      Other errno: put on ret_pages list then splice to
1459                          *                   from list
1460                          */
1461                         switch(rc) {
1462                         case -ENOMEM:
1463                                 /*
1464                                  * THP migration might be unsupported or the
1465                                  * allocation could've failed so we should
1466                                  * retry on the same page with the THP split
1467                                  * to base pages.
1468                                  *
1469                                  * Head page is retried immediately and tail
1470                                  * pages are added to the tail of the list so
1471                                  * we encounter them after the rest of the list
1472                                  * is processed.
1473                                  */
1474                                 if (is_thp) {
1475                                         lock_page(page);
1476                                         rc = split_huge_page_to_list(page, from);
1477                                         unlock_page(page);
1478                                         if (!rc) {
1479                                                 list_safe_reset_next(page, page2, lru);
1480                                                 nr_thp_split++;
1481                                                 goto retry;
1482                                         }
1483
1484                                         nr_thp_failed++;
1485                                         nr_failed += nr_subpages;
1486                                         goto out;
1487                                 }
1488                                 nr_failed++;
1489                                 goto out;
1490                         case -EAGAIN:
1491                                 if (is_thp) {
1492                                         thp_retry++;
1493                                         break;
1494                                 }
1495                                 retry++;
1496                                 break;
1497                         case MIGRATEPAGE_SUCCESS:
1498                                 if (is_thp) {
1499                                         nr_thp_succeeded++;
1500                                         nr_succeeded += nr_subpages;
1501                                         break;
1502                                 }
1503                                 nr_succeeded++;
1504                                 break;
1505                         default:
1506                                 /*
1507                                  * Permanent failure (-EBUSY, -ENOSYS, etc.):
1508                                  * unlike -EAGAIN case, the failed page is
1509                                  * removed from migration page list and not
1510                                  * retried in the next outer loop.
1511                                  */
1512                                 if (is_thp) {
1513                                         nr_thp_failed++;
1514                                         nr_failed += nr_subpages;
1515                                         break;
1516                                 }
1517                                 nr_failed++;
1518                                 break;
1519                         }
1520                 }
1521         }
1522         nr_failed += retry + thp_retry;
1523         nr_thp_failed += thp_retry;
1524         rc = nr_failed;
1525 out:
1526         /*
1527          * Put the permanent failure page back to migration list, they
1528          * will be put back to the right list by the caller.
1529          */
1530         list_splice(&ret_pages, from);
1531
1532         count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1533         count_vm_events(PGMIGRATE_FAIL, nr_failed);
1534         count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
1535         count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
1536         count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
1537         trace_mm_migrate_pages(nr_succeeded, nr_failed, nr_thp_succeeded,
1538                                nr_thp_failed, nr_thp_split, mode, reason);
1539
1540         if (!swapwrite)
1541                 current->flags &= ~PF_SWAPWRITE;
1542
1543         return rc;
1544 }
1545
1546 struct page *alloc_migration_target(struct page *page, unsigned long private)
1547 {
1548         struct migration_target_control *mtc;
1549         gfp_t gfp_mask;
1550         unsigned int order = 0;
1551         struct page *new_page = NULL;
1552         int nid;
1553         int zidx;
1554
1555         mtc = (struct migration_target_control *)private;
1556         gfp_mask = mtc->gfp_mask;
1557         nid = mtc->nid;
1558         if (nid == NUMA_NO_NODE)
1559                 nid = page_to_nid(page);
1560
1561         if (PageHuge(page)) {
1562                 struct hstate *h = page_hstate(compound_head(page));
1563
1564                 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
1565                 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
1566         }
1567
1568         if (PageTransHuge(page)) {
1569                 /*
1570                  * clear __GFP_RECLAIM to make the migration callback
1571                  * consistent with regular THP allocations.
1572                  */
1573                 gfp_mask &= ~__GFP_RECLAIM;
1574                 gfp_mask |= GFP_TRANSHUGE;
1575                 order = HPAGE_PMD_ORDER;
1576         }
1577         zidx = zone_idx(page_zone(page));
1578         if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
1579                 gfp_mask |= __GFP_HIGHMEM;
1580
1581         new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
1582
1583         if (new_page && PageTransHuge(new_page))
1584                 prep_transhuge_page(new_page);
1585
1586         return new_page;
1587 }
1588
1589 #ifdef CONFIG_NUMA
1590
1591 static int store_status(int __user *status, int start, int value, int nr)
1592 {
1593         while (nr-- > 0) {
1594                 if (put_user(value, status + start))
1595                         return -EFAULT;
1596                 start++;
1597         }
1598
1599         return 0;
1600 }
1601
1602 static int do_move_pages_to_node(struct mm_struct *mm,
1603                 struct list_head *pagelist, int node)
1604 {
1605         int err;
1606         struct migration_target_control mtc = {
1607                 .nid = node,
1608                 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1609         };
1610
1611         err = migrate_pages(pagelist, alloc_migration_target, NULL,
1612                         (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
1613         if (err)
1614                 putback_movable_pages(pagelist);
1615         return err;
1616 }
1617
1618 /*
1619  * Resolves the given address to a struct page, isolates it from the LRU and
1620  * puts it to the given pagelist.
1621  * Returns:
1622  *     errno - if the page cannot be found/isolated
1623  *     0 - when it doesn't have to be migrated because it is already on the
1624  *         target node
1625  *     1 - when it has been queued
1626  */
1627 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1628                 int node, struct list_head *pagelist, bool migrate_all)
1629 {
1630         struct vm_area_struct *vma;
1631         struct page *page;
1632         unsigned int follflags;
1633         int err;
1634
1635         mmap_read_lock(mm);
1636         err = -EFAULT;
1637         vma = find_vma(mm, addr);
1638         if (!vma || addr < vma->vm_start || !vma_migratable(vma))
1639                 goto out;
1640
1641         /* FOLL_DUMP to ignore special (like zero) pages */
1642         follflags = FOLL_GET | FOLL_DUMP;
1643         page = follow_page(vma, addr, follflags);
1644
1645         err = PTR_ERR(page);
1646         if (IS_ERR(page))
1647                 goto out;
1648
1649         err = -ENOENT;
1650         if (!page)
1651                 goto out;
1652
1653         err = 0;
1654         if (page_to_nid(page) == node)
1655                 goto out_putpage;
1656
1657         err = -EACCES;
1658         if (page_mapcount(page) > 1 && !migrate_all)
1659                 goto out_putpage;
1660
1661         if (PageHuge(page)) {
1662                 if (PageHead(page)) {
1663                         isolate_huge_page(page, pagelist);
1664                         err = 1;
1665                 }
1666         } else {
1667                 struct page *head;
1668
1669                 head = compound_head(page);
1670                 err = isolate_lru_page(head);
1671                 if (err)
1672                         goto out_putpage;
1673
1674                 err = 1;
1675                 list_add_tail(&head->lru, pagelist);
1676                 mod_node_page_state(page_pgdat(head),
1677                         NR_ISOLATED_ANON + page_is_file_lru(head),
1678                         thp_nr_pages(head));
1679         }
1680 out_putpage:
1681         /*
1682          * Either remove the duplicate refcount from
1683          * isolate_lru_page() or drop the page ref if it was
1684          * not isolated.
1685          */
1686         put_page(page);
1687 out:
1688         mmap_read_unlock(mm);
1689         return err;
1690 }
1691
1692 static int move_pages_and_store_status(struct mm_struct *mm, int node,
1693                 struct list_head *pagelist, int __user *status,
1694                 int start, int i, unsigned long nr_pages)
1695 {
1696         int err;
1697
1698         if (list_empty(pagelist))
1699                 return 0;
1700
1701         err = do_move_pages_to_node(mm, pagelist, node);
1702         if (err) {
1703                 /*
1704                  * Positive err means the number of failed
1705                  * pages to migrate.  Since we are going to
1706                  * abort and return the number of non-migrated
1707                  * pages, so need to include the rest of the
1708                  * nr_pages that have not been attempted as
1709                  * well.
1710                  */
1711                 if (err > 0)
1712                         err += nr_pages - i - 1;
1713                 return err;
1714         }
1715         return store_status(status, start, node, i - start);
1716 }
1717
1718 /*
1719  * Migrate an array of page address onto an array of nodes and fill
1720  * the corresponding array of status.
1721  */
1722 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1723                          unsigned long nr_pages,
1724                          const void __user * __user *pages,
1725                          const int __user *nodes,
1726                          int __user *status, int flags)
1727 {
1728         int current_node = NUMA_NO_NODE;
1729         LIST_HEAD(pagelist);
1730         int start, i;
1731         int err = 0, err1;
1732
1733         migrate_prep();
1734
1735         for (i = start = 0; i < nr_pages; i++) {
1736                 const void __user *p;
1737                 unsigned long addr;
1738                 int node;
1739
1740                 err = -EFAULT;
1741                 if (get_user(p, pages + i))
1742                         goto out_flush;
1743                 if (get_user(node, nodes + i))
1744                         goto out_flush;
1745                 addr = (unsigned long)untagged_addr(p);
1746
1747                 err = -ENODEV;
1748                 if (node < 0 || node >= MAX_NUMNODES)
1749                         goto out_flush;
1750                 if (!node_state(node, N_MEMORY))
1751                         goto out_flush;
1752
1753                 err = -EACCES;
1754                 if (!node_isset(node, task_nodes))
1755                         goto out_flush;
1756
1757                 if (current_node == NUMA_NO_NODE) {
1758                         current_node = node;
1759                         start = i;
1760                 } else if (node != current_node) {
1761                         err = move_pages_and_store_status(mm, current_node,
1762                                         &pagelist, status, start, i, nr_pages);
1763                         if (err)
1764                                 goto out;
1765                         start = i;
1766                         current_node = node;
1767                 }
1768
1769                 /*
1770                  * Errors in the page lookup or isolation are not fatal and we simply
1771                  * report them via status
1772                  */
1773                 err = add_page_for_migration(mm, addr, current_node,
1774                                 &pagelist, flags & MPOL_MF_MOVE_ALL);
1775
1776                 if (err > 0) {
1777                         /* The page is successfully queued for migration */
1778                         continue;
1779                 }
1780
1781                 /*
1782                  * If the page is already on the target node (!err), store the
1783                  * node, otherwise, store the err.
1784                  */
1785                 err = store_status(status, i, err ? : current_node, 1);
1786                 if (err)
1787                         goto out_flush;
1788
1789                 err = move_pages_and_store_status(mm, current_node, &pagelist,
1790                                 status, start, i, nr_pages);
1791                 if (err)
1792                         goto out;
1793                 current_node = NUMA_NO_NODE;
1794         }
1795 out_flush:
1796         /* Make sure we do not overwrite the existing error */
1797         err1 = move_pages_and_store_status(mm, current_node, &pagelist,
1798                                 status, start, i, nr_pages);
1799         if (err >= 0)
1800                 err = err1;
1801 out:
1802         return err;
1803 }
1804
1805 /*
1806  * Determine the nodes of an array of pages and store it in an array of status.
1807  */
1808 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1809                                 const void __user **pages, int *status)
1810 {
1811         unsigned long i;
1812
1813         mmap_read_lock(mm);
1814
1815         for (i = 0; i < nr_pages; i++) {
1816                 unsigned long addr = (unsigned long)(*pages);
1817                 struct vm_area_struct *vma;
1818                 struct page *page;
1819                 int err = -EFAULT;
1820
1821                 vma = find_vma(mm, addr);
1822                 if (!vma || addr < vma->vm_start)
1823                         goto set_status;
1824
1825                 /* FOLL_DUMP to ignore special (like zero) pages */
1826                 page = follow_page(vma, addr, FOLL_DUMP);
1827
1828                 err = PTR_ERR(page);
1829                 if (IS_ERR(page))
1830                         goto set_status;
1831
1832                 err = page ? page_to_nid(page) : -ENOENT;
1833 set_status:
1834                 *status = err;
1835
1836                 pages++;
1837                 status++;
1838         }
1839
1840         mmap_read_unlock(mm);
1841 }
1842
1843 /*
1844  * Determine the nodes of a user array of pages and store it in
1845  * a user array of status.
1846  */
1847 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1848                          const void __user * __user *pages,
1849                          int __user *status)
1850 {
1851 #define DO_PAGES_STAT_CHUNK_NR 16
1852         const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1853         int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1854
1855         while (nr_pages) {
1856                 unsigned long chunk_nr;
1857
1858                 chunk_nr = nr_pages;
1859                 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1860                         chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1861
1862                 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1863                         break;
1864
1865                 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1866
1867                 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1868                         break;
1869
1870                 pages += chunk_nr;
1871                 status += chunk_nr;
1872                 nr_pages -= chunk_nr;
1873         }
1874         return nr_pages ? -EFAULT : 0;
1875 }
1876
1877 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
1878 {
1879         struct task_struct *task;
1880         struct mm_struct *mm;
1881
1882         /*
1883          * There is no need to check if current process has the right to modify
1884          * the specified process when they are same.
1885          */
1886         if (!pid) {
1887                 mmget(current->mm);
1888                 *mem_nodes = cpuset_mems_allowed(current);
1889                 return current->mm;
1890         }
1891
1892         /* Find the mm_struct */
1893         rcu_read_lock();
1894         task = find_task_by_vpid(pid);
1895         if (!task) {
1896                 rcu_read_unlock();
1897                 return ERR_PTR(-ESRCH);
1898         }
1899         get_task_struct(task);
1900
1901         /*
1902          * Check if this process has the right to modify the specified
1903          * process. Use the regular "ptrace_may_access()" checks.
1904          */
1905         if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1906                 rcu_read_unlock();
1907                 mm = ERR_PTR(-EPERM);
1908                 goto out;
1909         }
1910         rcu_read_unlock();
1911
1912         mm = ERR_PTR(security_task_movememory(task));
1913         if (IS_ERR(mm))
1914                 goto out;
1915         *mem_nodes = cpuset_mems_allowed(task);
1916         mm = get_task_mm(task);
1917 out:
1918         put_task_struct(task);
1919         if (!mm)
1920                 mm = ERR_PTR(-EINVAL);
1921         return mm;
1922 }
1923
1924 /*
1925  * Move a list of pages in the address space of the currently executing
1926  * process.
1927  */
1928 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
1929                              const void __user * __user *pages,
1930                              const int __user *nodes,
1931                              int __user *status, int flags)
1932 {
1933         struct mm_struct *mm;
1934         int err;
1935         nodemask_t task_nodes;
1936
1937         /* Check flags */
1938         if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1939                 return -EINVAL;
1940
1941         if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1942                 return -EPERM;
1943
1944         mm = find_mm_struct(pid, &task_nodes);
1945         if (IS_ERR(mm))
1946                 return PTR_ERR(mm);
1947
1948         if (nodes)
1949                 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1950                                     nodes, status, flags);
1951         else
1952                 err = do_pages_stat(mm, nr_pages, pages, status);
1953
1954         mmput(mm);
1955         return err;
1956 }
1957
1958 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1959                 const void __user * __user *, pages,
1960                 const int __user *, nodes,
1961                 int __user *, status, int, flags)
1962 {
1963         return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1964 }
1965
1966 #ifdef CONFIG_COMPAT
1967 COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
1968                        compat_uptr_t __user *, pages32,
1969                        const int __user *, nodes,
1970                        int __user *, status,
1971                        int, flags)
1972 {
1973         const void __user * __user *pages;
1974         int i;
1975
1976         pages = compat_alloc_user_space(nr_pages * sizeof(void *));
1977         for (i = 0; i < nr_pages; i++) {
1978                 compat_uptr_t p;
1979
1980                 if (get_user(p, pages32 + i) ||
1981                         put_user(compat_ptr(p), pages + i))
1982                         return -EFAULT;
1983         }
1984         return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1985 }
1986 #endif /* CONFIG_COMPAT */
1987
1988 #ifdef CONFIG_NUMA_BALANCING
1989 /*
1990  * Returns true if this is a safe migration target node for misplaced NUMA
1991  * pages. Currently it only checks the watermarks which crude
1992  */
1993 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1994                                    unsigned long nr_migrate_pages)
1995 {
1996         int z;
1997
1998         for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1999                 struct zone *zone = pgdat->node_zones + z;
2000
2001                 if (!populated_zone(zone))
2002                         continue;
2003
2004                 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2005                 if (!zone_watermark_ok(zone, 0,
2006                                        high_wmark_pages(zone) +
2007                                        nr_migrate_pages,
2008                                        ZONE_MOVABLE, 0))
2009                         continue;
2010                 return true;
2011         }
2012         return false;
2013 }
2014
2015 static struct page *alloc_misplaced_dst_page(struct page *page,
2016                                            unsigned long data)
2017 {
2018         int nid = (int) data;
2019         struct page *newpage;
2020
2021         newpage = __alloc_pages_node(nid,
2022                                          (GFP_HIGHUSER_MOVABLE |
2023                                           __GFP_THISNODE | __GFP_NOMEMALLOC |
2024                                           __GFP_NORETRY | __GFP_NOWARN) &
2025                                          ~__GFP_RECLAIM, 0);
2026
2027         return newpage;
2028 }
2029
2030 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2031 {
2032         int page_lru;
2033
2034         VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
2035
2036         /* Avoid migrating to a node that is nearly full */
2037         if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
2038                 return 0;
2039
2040         if (isolate_lru_page(page))
2041                 return 0;
2042
2043         /*
2044          * migrate_misplaced_transhuge_page() skips page migration's usual
2045          * check on page_count(), so we must do it here, now that the page
2046          * has been isolated: a GUP pin, or any other pin, prevents migration.
2047          * The expected page count is 3: 1 for page's mapcount and 1 for the
2048          * caller's pin and 1 for the reference taken by isolate_lru_page().
2049          */
2050         if (PageTransHuge(page) && page_count(page) != 3) {
2051                 putback_lru_page(page);
2052                 return 0;
2053         }
2054
2055         page_lru = page_is_file_lru(page);
2056         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
2057                                 thp_nr_pages(page));
2058
2059         /*
2060          * Isolating the page has taken another reference, so the
2061          * caller's reference can be safely dropped without the page
2062          * disappearing underneath us during migration.
2063          */
2064         put_page(page);
2065         return 1;
2066 }
2067
2068 bool pmd_trans_migrating(pmd_t pmd)
2069 {
2070         struct page *page = pmd_page(pmd);
2071         return PageLocked(page);
2072 }
2073
2074 static inline bool is_shared_exec_page(struct vm_area_struct *vma,
2075                                        struct page *page)
2076 {
2077         if (page_mapcount(page) != 1 &&
2078             (page_is_file_lru(page) || vma_is_shmem(vma)) &&
2079             (vma->vm_flags & VM_EXEC))
2080                 return true;
2081
2082         return false;
2083 }
2084
2085 /*
2086  * Attempt to migrate a misplaced page to the specified destination
2087  * node. Caller is expected to have an elevated reference count on
2088  * the page that will be dropped by this function before returning.
2089  */
2090 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2091                            int node)
2092 {
2093         pg_data_t *pgdat = NODE_DATA(node);
2094         int isolated;
2095         int nr_remaining;
2096         LIST_HEAD(migratepages);
2097
2098         /*
2099          * Don't migrate file pages that are mapped in multiple processes
2100          * with execute permissions as they are probably shared libraries.
2101          */
2102         if (is_shared_exec_page(vma, page))
2103                 goto out;
2104
2105         /*
2106          * Also do not migrate dirty pages as not all filesystems can move
2107          * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2108          */
2109         if (page_is_file_lru(page) && PageDirty(page))
2110                 goto out;
2111
2112         isolated = numamigrate_isolate_page(pgdat, page);
2113         if (!isolated)
2114                 goto out;
2115
2116         list_add(&page->lru, &migratepages);
2117         nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
2118                                      NULL, node, MIGRATE_ASYNC,
2119                                      MR_NUMA_MISPLACED);
2120         if (nr_remaining) {
2121                 if (!list_empty(&migratepages)) {
2122                         list_del(&page->lru);
2123                         dec_node_page_state(page, NR_ISOLATED_ANON +
2124                                         page_is_file_lru(page));
2125                         putback_lru_page(page);
2126                 }
2127                 isolated = 0;
2128         } else
2129                 count_vm_numa_event(NUMA_PAGE_MIGRATE);
2130         BUG_ON(!list_empty(&migratepages));
2131         return isolated;
2132
2133 out:
2134         put_page(page);
2135         return 0;
2136 }
2137 #endif /* CONFIG_NUMA_BALANCING */
2138
2139 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
2140 /*
2141  * Migrates a THP to a given target node. page must be locked and is unlocked
2142  * before returning.
2143  */
2144 int migrate_misplaced_transhuge_page(struct mm_struct *mm,
2145                                 struct vm_area_struct *vma,
2146                                 pmd_t *pmd, pmd_t entry,
2147                                 unsigned long address,
2148                                 struct page *page, int node)
2149 {
2150         spinlock_t *ptl;
2151         pg_data_t *pgdat = NODE_DATA(node);
2152         int isolated = 0;
2153         struct page *new_page = NULL;
2154         int page_lru = page_is_file_lru(page);
2155         unsigned long start = address & HPAGE_PMD_MASK;
2156
2157         if (is_shared_exec_page(vma, page))
2158                 goto out;
2159
2160         new_page = alloc_pages_node(node,
2161                 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
2162                 HPAGE_PMD_ORDER);
2163         if (!new_page)
2164                 goto out_fail;
2165         prep_transhuge_page(new_page);
2166
2167         isolated = numamigrate_isolate_page(pgdat, page);
2168         if (!isolated) {
2169                 put_page(new_page);
2170                 goto out_fail;
2171         }
2172
2173         /* Prepare a page as a migration target */
2174         __SetPageLocked(new_page);
2175         if (PageSwapBacked(page))
2176                 __SetPageSwapBacked(new_page);
2177
2178         /* anon mapping, we can simply copy page->mapping to the new page: */
2179         new_page->mapping = page->mapping;
2180         new_page->index = page->index;
2181         /* flush the cache before copying using the kernel virtual address */
2182         flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
2183         migrate_page_copy(new_page, page);
2184         WARN_ON(PageLRU(new_page));
2185
2186         /* Recheck the target PMD */
2187         ptl = pmd_lock(mm, pmd);
2188         if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
2189                 spin_unlock(ptl);
2190
2191                 /* Reverse changes made by migrate_page_copy() */
2192                 if (TestClearPageActive(new_page))
2193                         SetPageActive(page);
2194                 if (TestClearPageUnevictable(new_page))
2195                         SetPageUnevictable(page);
2196
2197                 unlock_page(new_page);
2198                 put_page(new_page);             /* Free it */
2199
2200                 /* Retake the callers reference and putback on LRU */
2201                 get_page(page);
2202                 putback_lru_page(page);
2203                 mod_node_page_state(page_pgdat(page),
2204                          NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
2205
2206                 goto out_unlock;
2207         }
2208
2209         entry = mk_huge_pmd(new_page, vma->vm_page_prot);
2210         entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2211
2212         /*
2213          * Overwrite the old entry under pagetable lock and establish
2214          * the new PTE. Any parallel GUP will either observe the old
2215          * page blocking on the page lock, block on the page table
2216          * lock or observe the new page. The SetPageUptodate on the
2217          * new page and page_add_new_anon_rmap guarantee the copy is
2218          * visible before the pagetable update.
2219          */
2220         page_add_anon_rmap(new_page, vma, start, true);
2221         /*
2222          * At this point the pmd is numa/protnone (i.e. non present) and the TLB
2223          * has already been flushed globally.  So no TLB can be currently
2224          * caching this non present pmd mapping.  There's no need to clear the
2225          * pmd before doing set_pmd_at(), nor to flush the TLB after
2226          * set_pmd_at().  Clearing the pmd here would introduce a race
2227          * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
2228          * mmap_lock for reading.  If the pmd is set to NULL at any given time,
2229          * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
2230          * pmd.
2231          */
2232         set_pmd_at(mm, start, pmd, entry);
2233         update_mmu_cache_pmd(vma, address, &entry);
2234
2235         page_ref_unfreeze(page, 2);
2236         mlock_migrate_page(new_page, page);
2237         page_remove_rmap(page, true);
2238         set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2239
2240         spin_unlock(ptl);
2241
2242         /* Take an "isolate" reference and put new page on the LRU. */
2243         get_page(new_page);
2244         putback_lru_page(new_page);
2245
2246         unlock_page(new_page);
2247         unlock_page(page);
2248         put_page(page);                 /* Drop the rmap reference */
2249         put_page(page);                 /* Drop the LRU isolation reference */
2250
2251         count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2252         count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2253
2254         mod_node_page_state(page_pgdat(page),
2255                         NR_ISOLATED_ANON + page_lru,
2256                         -HPAGE_PMD_NR);
2257         return isolated;
2258
2259 out_fail:
2260         count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2261         ptl = pmd_lock(mm, pmd);
2262         if (pmd_same(*pmd, entry)) {
2263                 entry = pmd_modify(entry, vma->vm_page_prot);
2264                 set_pmd_at(mm, start, pmd, entry);
2265                 update_mmu_cache_pmd(vma, address, &entry);
2266         }
2267         spin_unlock(ptl);
2268
2269 out_unlock:
2270         unlock_page(page);
2271 out:
2272         put_page(page);
2273         return 0;
2274 }
2275 #endif /* CONFIG_NUMA_BALANCING */
2276
2277 #endif /* CONFIG_NUMA */
2278
2279 #ifdef CONFIG_DEVICE_PRIVATE
2280 static int migrate_vma_collect_hole(unsigned long start,
2281                                     unsigned long end,
2282                                     __always_unused int depth,
2283                                     struct mm_walk *walk)
2284 {
2285         struct migrate_vma *migrate = walk->private;
2286         unsigned long addr;
2287
2288         /* Only allow populating anonymous memory. */
2289         if (!vma_is_anonymous(walk->vma)) {
2290                 for (addr = start; addr < end; addr += PAGE_SIZE) {
2291                         migrate->src[migrate->npages] = 0;
2292                         migrate->dst[migrate->npages] = 0;
2293                         migrate->npages++;
2294                 }
2295                 return 0;
2296         }
2297
2298         for (addr = start; addr < end; addr += PAGE_SIZE) {
2299                 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
2300                 migrate->dst[migrate->npages] = 0;
2301                 migrate->npages++;
2302                 migrate->cpages++;
2303         }
2304
2305         return 0;
2306 }
2307
2308 static int migrate_vma_collect_skip(unsigned long start,
2309                                     unsigned long end,
2310                                     struct mm_walk *walk)
2311 {
2312         struct migrate_vma *migrate = walk->private;
2313         unsigned long addr;
2314
2315         for (addr = start; addr < end; addr += PAGE_SIZE) {
2316                 migrate->dst[migrate->npages] = 0;
2317                 migrate->src[migrate->npages++] = 0;
2318         }
2319
2320         return 0;
2321 }
2322
2323 static int migrate_vma_collect_pmd(pmd_t *pmdp,
2324                                    unsigned long start,
2325                                    unsigned long end,
2326                                    struct mm_walk *walk)
2327 {
2328         struct migrate_vma *migrate = walk->private;
2329         struct vm_area_struct *vma = walk->vma;
2330         struct mm_struct *mm = vma->vm_mm;
2331         unsigned long addr = start, unmapped = 0;
2332         spinlock_t *ptl;
2333         pte_t *ptep;
2334
2335 again:
2336         if (pmd_none(*pmdp))
2337                 return migrate_vma_collect_hole(start, end, -1, walk);
2338
2339         if (pmd_trans_huge(*pmdp)) {
2340                 struct page *page;
2341
2342                 ptl = pmd_lock(mm, pmdp);
2343                 if (unlikely(!pmd_trans_huge(*pmdp))) {
2344                         spin_unlock(ptl);
2345                         goto again;
2346                 }
2347
2348                 page = pmd_page(*pmdp);
2349                 if (is_huge_zero_page(page)) {
2350                         spin_unlock(ptl);
2351                         split_huge_pmd(vma, pmdp, addr);
2352                         if (pmd_trans_unstable(pmdp))
2353                                 return migrate_vma_collect_skip(start, end,
2354                                                                 walk);
2355                 } else {
2356                         int ret;
2357
2358                         get_page(page);
2359                         spin_unlock(ptl);
2360                         if (unlikely(!trylock_page(page)))
2361                                 return migrate_vma_collect_skip(start, end,
2362                                                                 walk);
2363                         ret = split_huge_page(page);
2364                         unlock_page(page);
2365                         put_page(page);
2366                         if (ret)
2367                                 return migrate_vma_collect_skip(start, end,
2368                                                                 walk);
2369                         if (pmd_none(*pmdp))
2370                                 return migrate_vma_collect_hole(start, end, -1,
2371                                                                 walk);
2372                 }
2373         }
2374
2375         if (unlikely(pmd_bad(*pmdp)))
2376                 return migrate_vma_collect_skip(start, end, walk);
2377
2378         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2379         arch_enter_lazy_mmu_mode();
2380
2381         for (; addr < end; addr += PAGE_SIZE, ptep++) {
2382                 unsigned long mpfn = 0, pfn;
2383                 struct page *page;
2384                 swp_entry_t entry;
2385                 pte_t pte;
2386
2387                 pte = *ptep;
2388
2389                 if (pte_none(pte)) {
2390                         if (vma_is_anonymous(vma)) {
2391                                 mpfn = MIGRATE_PFN_MIGRATE;
2392                                 migrate->cpages++;
2393                         }
2394                         goto next;
2395                 }
2396
2397                 if (!pte_present(pte)) {
2398                         /*
2399                          * Only care about unaddressable device page special
2400                          * page table entry. Other special swap entries are not
2401                          * migratable, and we ignore regular swapped page.
2402                          */
2403                         entry = pte_to_swp_entry(pte);
2404                         if (!is_device_private_entry(entry))
2405                                 goto next;
2406
2407                         page = device_private_entry_to_page(entry);
2408                         if (!(migrate->flags &
2409                                 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
2410                             page->pgmap->owner != migrate->pgmap_owner)
2411                                 goto next;
2412
2413                         mpfn = migrate_pfn(page_to_pfn(page)) |
2414                                         MIGRATE_PFN_MIGRATE;
2415                         if (is_write_device_private_entry(entry))
2416                                 mpfn |= MIGRATE_PFN_WRITE;
2417                 } else {
2418                         if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
2419                                 goto next;
2420                         pfn = pte_pfn(pte);
2421                         if (is_zero_pfn(pfn)) {
2422                                 mpfn = MIGRATE_PFN_MIGRATE;
2423                                 migrate->cpages++;
2424                                 goto next;
2425                         }
2426                         page = vm_normal_page(migrate->vma, addr, pte);
2427                         mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2428                         mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2429                 }
2430
2431                 /* FIXME support THP */
2432                 if (!page || !page->mapping || PageTransCompound(page)) {
2433                         mpfn = 0;
2434                         goto next;
2435                 }
2436
2437                 /*
2438                  * By getting a reference on the page we pin it and that blocks
2439                  * any kind of migration. Side effect is that it "freezes" the
2440                  * pte.
2441                  *
2442                  * We drop this reference after isolating the page from the lru
2443                  * for non device page (device page are not on the lru and thus
2444                  * can't be dropped from it).
2445                  */
2446                 get_page(page);
2447                 migrate->cpages++;
2448
2449                 /*
2450                  * Optimize for the common case where page is only mapped once
2451                  * in one process. If we can lock the page, then we can safely
2452                  * set up a special migration page table entry now.
2453                  */
2454                 if (trylock_page(page)) {
2455                         pte_t swp_pte;
2456
2457                         mpfn |= MIGRATE_PFN_LOCKED;
2458                         ptep_get_and_clear(mm, addr, ptep);
2459
2460                         /* Setup special migration page table entry */
2461                         entry = make_migration_entry(page, mpfn &
2462                                                      MIGRATE_PFN_WRITE);
2463                         swp_pte = swp_entry_to_pte(entry);
2464                         if (pte_present(pte)) {
2465                                 if (pte_soft_dirty(pte))
2466                                         swp_pte = pte_swp_mksoft_dirty(swp_pte);
2467                                 if (pte_uffd_wp(pte))
2468                                         swp_pte = pte_swp_mkuffd_wp(swp_pte);
2469                         } else {
2470                                 if (pte_swp_soft_dirty(pte))
2471                                         swp_pte = pte_swp_mksoft_dirty(swp_pte);
2472                                 if (pte_swp_uffd_wp(pte))
2473                                         swp_pte = pte_swp_mkuffd_wp(swp_pte);
2474                         }
2475                         set_pte_at(mm, addr, ptep, swp_pte);
2476
2477                         /*
2478                          * This is like regular unmap: we remove the rmap and
2479                          * drop page refcount. Page won't be freed, as we took
2480                          * a reference just above.
2481                          */
2482                         page_remove_rmap(page, false);
2483                         put_page(page);
2484
2485                         if (pte_present(pte))
2486                                 unmapped++;
2487                 }
2488
2489 next:
2490                 migrate->dst[migrate->npages] = 0;
2491                 migrate->src[migrate->npages++] = mpfn;
2492         }
2493         arch_leave_lazy_mmu_mode();
2494         pte_unmap_unlock(ptep - 1, ptl);
2495
2496         /* Only flush the TLB if we actually modified any entries */
2497         if (unmapped)
2498                 flush_tlb_range(walk->vma, start, end);
2499
2500         return 0;
2501 }
2502
2503 static const struct mm_walk_ops migrate_vma_walk_ops = {
2504         .pmd_entry              = migrate_vma_collect_pmd,
2505         .pte_hole               = migrate_vma_collect_hole,
2506 };
2507
2508 /*
2509  * migrate_vma_collect() - collect pages over a range of virtual addresses
2510  * @migrate: migrate struct containing all migration information
2511  *
2512  * This will walk the CPU page table. For each virtual address backed by a
2513  * valid page, it updates the src array and takes a reference on the page, in
2514  * order to pin the page until we lock it and unmap it.
2515  */
2516 static void migrate_vma_collect(struct migrate_vma *migrate)
2517 {
2518         struct mmu_notifier_range range;
2519
2520         /*
2521          * Note that the pgmap_owner is passed to the mmu notifier callback so
2522          * that the registered device driver can skip invalidating device
2523          * private page mappings that won't be migrated.
2524          */
2525         mmu_notifier_range_init_migrate(&range, 0, migrate->vma,
2526                 migrate->vma->vm_mm, migrate->start, migrate->end,
2527                 migrate->pgmap_owner);
2528         mmu_notifier_invalidate_range_start(&range);
2529
2530         walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
2531                         &migrate_vma_walk_ops, migrate);
2532
2533         mmu_notifier_invalidate_range_end(&range);
2534         migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2535 }
2536
2537 /*
2538  * migrate_vma_check_page() - check if page is pinned or not
2539  * @page: struct page to check
2540  *
2541  * Pinned pages cannot be migrated. This is the same test as in
2542  * migrate_page_move_mapping(), except that here we allow migration of a
2543  * ZONE_DEVICE page.
2544  */
2545 static bool migrate_vma_check_page(struct page *page)
2546 {
2547         /*
2548          * One extra ref because caller holds an extra reference, either from
2549          * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2550          * a device page.
2551          */
2552         int extra = 1;
2553
2554         /*
2555          * FIXME support THP (transparent huge page), it is bit more complex to
2556          * check them than regular pages, because they can be mapped with a pmd
2557          * or with a pte (split pte mapping).
2558          */
2559         if (PageCompound(page))
2560                 return false;
2561
2562         /* Page from ZONE_DEVICE have one extra reference */
2563         if (is_zone_device_page(page)) {
2564                 /*
2565                  * Private page can never be pin as they have no valid pte and
2566                  * GUP will fail for those. Yet if there is a pending migration
2567                  * a thread might try to wait on the pte migration entry and
2568                  * will bump the page reference count. Sadly there is no way to
2569                  * differentiate a regular pin from migration wait. Hence to
2570                  * avoid 2 racing thread trying to migrate back to CPU to enter
2571                  * infinite loop (one stoping migration because the other is
2572                  * waiting on pte migration entry). We always return true here.
2573                  *
2574                  * FIXME proper solution is to rework migration_entry_wait() so
2575                  * it does not need to take a reference on page.
2576                  */
2577                 return is_device_private_page(page);
2578         }
2579
2580         /* For file back page */
2581         if (page_mapping(page))
2582                 extra += 1 + page_has_private(page);
2583
2584         if ((page_count(page) - extra) > page_mapcount(page))
2585                 return false;
2586
2587         return true;
2588 }
2589
2590 /*
2591  * migrate_vma_prepare() - lock pages and isolate them from the lru
2592  * @migrate: migrate struct containing all migration information
2593  *
2594  * This locks pages that have been collected by migrate_vma_collect(). Once each
2595  * page is locked it is isolated from the lru (for non-device pages). Finally,
2596  * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
2597  * migrated by concurrent kernel threads.
2598  */
2599 static void migrate_vma_prepare(struct migrate_vma *migrate)
2600 {
2601         const unsigned long npages = migrate->npages;
2602         const unsigned long start = migrate->start;
2603         unsigned long addr, i, restore = 0;
2604         bool allow_drain = true;
2605
2606         lru_add_drain();
2607
2608         for (i = 0; (i < npages) && migrate->cpages; i++) {
2609                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2610                 bool remap = true;
2611
2612                 if (!page)
2613                         continue;
2614
2615                 if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
2616                         /*
2617                          * Because we are migrating several pages there can be
2618                          * a deadlock between 2 concurrent migration where each
2619                          * are waiting on each other page lock.
2620                          *
2621                          * Make migrate_vma() a best effort thing and backoff
2622                          * for any page we can not lock right away.
2623                          */
2624                         if (!trylock_page(page)) {
2625                                 migrate->src[i] = 0;
2626                                 migrate->cpages--;
2627                                 put_page(page);
2628                                 continue;
2629                         }
2630                         remap = false;
2631                         migrate->src[i] |= MIGRATE_PFN_LOCKED;
2632                 }
2633
2634                 /* ZONE_DEVICE pages are not on LRU */
2635                 if (!is_zone_device_page(page)) {
2636                         if (!PageLRU(page) && allow_drain) {
2637                                 /* Drain CPU's pagevec */
2638                                 lru_add_drain_all();
2639                                 allow_drain = false;
2640                         }
2641
2642                         if (isolate_lru_page(page)) {
2643                                 if (remap) {
2644                                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2645                                         migrate->cpages--;
2646                                         restore++;
2647                                 } else {
2648                                         migrate->src[i] = 0;
2649                                         unlock_page(page);
2650                                         migrate->cpages--;
2651                                         put_page(page);
2652                                 }
2653                                 continue;
2654                         }
2655
2656                         /* Drop the reference we took in collect */
2657                         put_page(page);
2658                 }
2659
2660                 if (!migrate_vma_check_page(page)) {
2661                         if (remap) {
2662                                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2663                                 migrate->cpages--;
2664                                 restore++;
2665
2666                                 if (!is_zone_device_page(page)) {
2667                                         get_page(page);
2668                                         putback_lru_page(page);
2669                                 }
2670                         } else {
2671                                 migrate->src[i] = 0;
2672                                 unlock_page(page);
2673                                 migrate->cpages--;
2674
2675                                 if (!is_zone_device_page(page))
2676                                         putback_lru_page(page);
2677                                 else
2678                                         put_page(page);
2679                         }
2680                 }
2681         }
2682
2683         for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
2684                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2685
2686                 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2687                         continue;
2688
2689                 remove_migration_pte(page, migrate->vma, addr, page);
2690
2691                 migrate->src[i] = 0;
2692                 unlock_page(page);
2693                 put_page(page);
2694                 restore--;
2695         }
2696 }
2697
2698 /*
2699  * migrate_vma_unmap() - replace page mapping with special migration pte entry
2700  * @migrate: migrate struct containing all migration information
2701  *
2702  * Replace page mapping (CPU page table pte) with a special migration pte entry
2703  * and check again if it has been pinned. Pinned pages are restored because we
2704  * cannot migrate them.
2705  *
2706  * This is the last step before we call the device driver callback to allocate
2707  * destination memory and copy contents of original page over to new page.
2708  */
2709 static void migrate_vma_unmap(struct migrate_vma *migrate)
2710 {
2711         int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK;
2712         const unsigned long npages = migrate->npages;
2713         const unsigned long start = migrate->start;
2714         unsigned long addr, i, restore = 0;
2715
2716         for (i = 0; i < npages; i++) {
2717                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2718
2719                 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2720                         continue;
2721
2722                 if (page_mapped(page)) {
2723                         try_to_unmap(page, flags);
2724                         if (page_mapped(page))
2725                                 goto restore;
2726                 }
2727
2728                 if (migrate_vma_check_page(page))
2729                         continue;
2730
2731 restore:
2732                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2733                 migrate->cpages--;
2734                 restore++;
2735         }
2736
2737         for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
2738                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2739
2740                 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2741                         continue;
2742
2743                 remove_migration_ptes(page, page, false);
2744
2745                 migrate->src[i] = 0;
2746                 unlock_page(page);
2747                 restore--;
2748
2749                 if (is_zone_device_page(page))
2750                         put_page(page);
2751                 else
2752                         putback_lru_page(page);
2753         }
2754 }
2755
2756 /**
2757  * migrate_vma_setup() - prepare to migrate a range of memory
2758  * @args: contains the vma, start, and pfns arrays for the migration
2759  *
2760  * Returns: negative errno on failures, 0 when 0 or more pages were migrated
2761  * without an error.
2762  *
2763  * Prepare to migrate a range of memory virtual address range by collecting all
2764  * the pages backing each virtual address in the range, saving them inside the
2765  * src array.  Then lock those pages and unmap them. Once the pages are locked
2766  * and unmapped, check whether each page is pinned or not.  Pages that aren't
2767  * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
2768  * corresponding src array entry.  Then restores any pages that are pinned, by
2769  * remapping and unlocking those pages.
2770  *
2771  * The caller should then allocate destination memory and copy source memory to
2772  * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
2773  * flag set).  Once these are allocated and copied, the caller must update each
2774  * corresponding entry in the dst array with the pfn value of the destination
2775  * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
2776  * (destination pages must have their struct pages locked, via lock_page()).
2777  *
2778  * Note that the caller does not have to migrate all the pages that are marked
2779  * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
2780  * device memory to system memory.  If the caller cannot migrate a device page
2781  * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
2782  * consequences for the userspace process, so it must be avoided if at all
2783  * possible.
2784  *
2785  * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
2786  * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
2787  * allowing the caller to allocate device memory for those unback virtual
2788  * address.  For this the caller simply has to allocate device memory and
2789  * properly set the destination entry like for regular migration.  Note that
2790  * this can still fails and thus inside the device driver must check if the
2791  * migration was successful for those entries after calling migrate_vma_pages()
2792  * just like for regular migration.
2793  *
2794  * After that, the callers must call migrate_vma_pages() to go over each entry
2795  * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2796  * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2797  * then migrate_vma_pages() to migrate struct page information from the source
2798  * struct page to the destination struct page.  If it fails to migrate the
2799  * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
2800  * src array.
2801  *
2802  * At this point all successfully migrated pages have an entry in the src
2803  * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2804  * array entry with MIGRATE_PFN_VALID flag set.
2805  *
2806  * Once migrate_vma_pages() returns the caller may inspect which pages were
2807  * successfully migrated, and which were not.  Successfully migrated pages will
2808  * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
2809  *
2810  * It is safe to update device page table after migrate_vma_pages() because
2811  * both destination and source page are still locked, and the mmap_lock is held
2812  * in read mode (hence no one can unmap the range being migrated).
2813  *
2814  * Once the caller is done cleaning up things and updating its page table (if it
2815  * chose to do so, this is not an obligation) it finally calls
2816  * migrate_vma_finalize() to update the CPU page table to point to new pages
2817  * for successfully migrated pages or otherwise restore the CPU page table to
2818  * point to the original source pages.
2819  */
2820 int migrate_vma_setup(struct migrate_vma *args)
2821 {
2822         long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
2823
2824         args->start &= PAGE_MASK;
2825         args->end &= PAGE_MASK;
2826         if (!args->vma || is_vm_hugetlb_page(args->vma) ||
2827             (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
2828                 return -EINVAL;
2829         if (nr_pages <= 0)
2830                 return -EINVAL;
2831         if (args->start < args->vma->vm_start ||
2832             args->start >= args->vma->vm_end)
2833                 return -EINVAL;
2834         if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
2835                 return -EINVAL;
2836         if (!args->src || !args->dst)
2837                 return -EINVAL;
2838
2839         memset(args->src, 0, sizeof(*args->src) * nr_pages);
2840         args->cpages = 0;
2841         args->npages = 0;
2842
2843         migrate_vma_collect(args);
2844
2845         if (args->cpages)
2846                 migrate_vma_prepare(args);
2847         if (args->cpages)
2848                 migrate_vma_unmap(args);
2849
2850         /*
2851          * At this point pages are locked and unmapped, and thus they have
2852          * stable content and can safely be copied to destination memory that
2853          * is allocated by the drivers.
2854          */
2855         return 0;
2856
2857 }
2858 EXPORT_SYMBOL(migrate_vma_setup);
2859
2860 /*
2861  * This code closely matches the code in:
2862  *   __handle_mm_fault()
2863  *     handle_pte_fault()
2864  *       do_anonymous_page()
2865  * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
2866  * private page.
2867  */
2868 static void migrate_vma_insert_page(struct migrate_vma *migrate,
2869                                     unsigned long addr,
2870                                     struct page *page,
2871                                     unsigned long *src,
2872                                     unsigned long *dst)
2873 {
2874         struct vm_area_struct *vma = migrate->vma;
2875         struct mm_struct *mm = vma->vm_mm;
2876         bool flush = false;
2877         spinlock_t *ptl;
2878         pte_t entry;
2879         pgd_t *pgdp;
2880         p4d_t *p4dp;
2881         pud_t *pudp;
2882         pmd_t *pmdp;
2883         pte_t *ptep;
2884
2885         /* Only allow populating anonymous memory */
2886         if (!vma_is_anonymous(vma))
2887                 goto abort;
2888
2889         pgdp = pgd_offset(mm, addr);
2890         p4dp = p4d_alloc(mm, pgdp, addr);
2891         if (!p4dp)
2892                 goto abort;
2893         pudp = pud_alloc(mm, p4dp, addr);
2894         if (!pudp)
2895                 goto abort;
2896         pmdp = pmd_alloc(mm, pudp, addr);
2897         if (!pmdp)
2898                 goto abort;
2899
2900         if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2901                 goto abort;
2902
2903         /*
2904          * Use pte_alloc() instead of pte_alloc_map().  We can't run
2905          * pte_offset_map() on pmds where a huge pmd might be created
2906          * from a different thread.
2907          *
2908          * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
2909          * parallel threads are excluded by other means.
2910          *
2911          * Here we only have mmap_read_lock(mm).
2912          */
2913         if (pte_alloc(mm, pmdp))
2914                 goto abort;
2915
2916         /* See the comment in pte_alloc_one_map() */
2917         if (unlikely(pmd_trans_unstable(pmdp)))
2918                 goto abort;
2919
2920         if (unlikely(anon_vma_prepare(vma)))
2921                 goto abort;
2922         if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
2923                 goto abort;
2924
2925         /*
2926          * The memory barrier inside __SetPageUptodate makes sure that
2927          * preceding stores to the page contents become visible before
2928          * the set_pte_at() write.
2929          */
2930         __SetPageUptodate(page);
2931
2932         if (is_zone_device_page(page)) {
2933                 if (is_device_private_page(page)) {
2934                         swp_entry_t swp_entry;
2935
2936                         swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
2937                         entry = swp_entry_to_pte(swp_entry);
2938                 }
2939         } else {
2940                 entry = mk_pte(page, vma->vm_page_prot);
2941                 if (vma->vm_flags & VM_WRITE)
2942                         entry = pte_mkwrite(pte_mkdirty(entry));
2943         }
2944
2945         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2946
2947         if (check_stable_address_space(mm))
2948                 goto unlock_abort;
2949
2950         if (pte_present(*ptep)) {
2951                 unsigned long pfn = pte_pfn(*ptep);
2952
2953                 if (!is_zero_pfn(pfn))
2954                         goto unlock_abort;
2955                 flush = true;
2956         } else if (!pte_none(*ptep))
2957                 goto unlock_abort;
2958
2959         /*
2960          * Check for userfaultfd but do not deliver the fault. Instead,
2961          * just back off.
2962          */
2963         if (userfaultfd_missing(vma))
2964                 goto unlock_abort;
2965
2966         inc_mm_counter(mm, MM_ANONPAGES);
2967         page_add_new_anon_rmap(page, vma, addr, false);
2968         if (!is_zone_device_page(page))
2969                 lru_cache_add_inactive_or_unevictable(page, vma);
2970         get_page(page);
2971
2972         if (flush) {
2973                 flush_cache_page(vma, addr, pte_pfn(*ptep));
2974                 ptep_clear_flush_notify(vma, addr, ptep);
2975                 set_pte_at_notify(mm, addr, ptep, entry);
2976                 update_mmu_cache(vma, addr, ptep);
2977         } else {
2978                 /* No need to invalidate - it was non-present before */
2979                 set_pte_at(mm, addr, ptep, entry);
2980                 update_mmu_cache(vma, addr, ptep);
2981         }
2982
2983         pte_unmap_unlock(ptep, ptl);
2984         *src = MIGRATE_PFN_MIGRATE;
2985         return;
2986
2987 unlock_abort:
2988         pte_unmap_unlock(ptep, ptl);
2989 abort:
2990         *src &= ~MIGRATE_PFN_MIGRATE;
2991 }
2992
2993 /**
2994  * migrate_vma_pages() - migrate meta-data from src page to dst page
2995  * @migrate: migrate struct containing all migration information
2996  *
2997  * This migrates struct page meta-data from source struct page to destination
2998  * struct page. This effectively finishes the migration from source page to the
2999  * destination page.
3000  */
3001 void migrate_vma_pages(struct migrate_vma *migrate)
3002 {
3003         const unsigned long npages = migrate->npages;
3004         const unsigned long start = migrate->start;
3005         struct mmu_notifier_range range;
3006         unsigned long addr, i;
3007         bool notified = false;
3008
3009         for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
3010                 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
3011                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
3012                 struct address_space *mapping;
3013                 int r;
3014
3015                 if (!newpage) {
3016                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
3017                         continue;
3018                 }
3019
3020                 if (!page) {
3021                         if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
3022                                 continue;
3023                         if (!notified) {
3024                                 notified = true;
3025
3026                                 mmu_notifier_range_init_migrate(&range, 0,
3027                                         migrate->vma, migrate->vma->vm_mm,
3028                                         addr, migrate->end,
3029                                         migrate->pgmap_owner);
3030                                 mmu_notifier_invalidate_range_start(&range);
3031                         }
3032                         migrate_vma_insert_page(migrate, addr, newpage,
3033                                                 &migrate->src[i],
3034                                                 &migrate->dst[i]);
3035                         continue;
3036                 }
3037
3038                 mapping = page_mapping(page);
3039
3040                 if (is_zone_device_page(newpage)) {
3041                         if (is_device_private_page(newpage)) {
3042                                 /*
3043                                  * For now only support private anonymous when
3044                                  * migrating to un-addressable device memory.
3045                                  */
3046                                 if (mapping) {
3047                                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
3048                                         continue;
3049                                 }
3050                         } else {
3051                                 /*
3052                                  * Other types of ZONE_DEVICE page are not
3053                                  * supported.
3054                                  */
3055                                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
3056                                 continue;
3057                         }
3058                 }
3059
3060                 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
3061                 if (r != MIGRATEPAGE_SUCCESS)
3062                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
3063         }
3064
3065         /*
3066          * No need to double call mmu_notifier->invalidate_range() callback as
3067          * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
3068          * did already call it.
3069          */
3070         if (notified)
3071                 mmu_notifier_invalidate_range_only_end(&range);
3072 }
3073 EXPORT_SYMBOL(migrate_vma_pages);
3074
3075 /**
3076  * migrate_vma_finalize() - restore CPU page table entry
3077  * @migrate: migrate struct containing all migration information
3078  *
3079  * This replaces the special migration pte entry with either a mapping to the
3080  * new page if migration was successful for that page, or to the original page
3081  * otherwise.
3082  *
3083  * This also unlocks the pages and puts them back on the lru, or drops the extra
3084  * refcount, for device pages.
3085  */
3086 void migrate_vma_finalize(struct migrate_vma *migrate)
3087 {
3088         const unsigned long npages = migrate->npages;
3089         unsigned long i;
3090
3091         for (i = 0; i < npages; i++) {
3092                 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
3093                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
3094
3095                 if (!page) {
3096                         if (newpage) {
3097                                 unlock_page(newpage);
3098                                 put_page(newpage);
3099                         }
3100                         continue;
3101                 }
3102
3103                 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
3104                         if (newpage) {
3105                                 unlock_page(newpage);
3106                                 put_page(newpage);
3107                         }
3108                         newpage = page;
3109                 }
3110
3111                 remove_migration_ptes(page, newpage, false);
3112                 unlock_page(page);
3113
3114                 if (is_zone_device_page(page))
3115                         put_page(page);
3116                 else
3117                         putback_lru_page(page);
3118
3119                 if (newpage != page) {
3120                         unlock_page(newpage);
3121                         if (is_zone_device_page(newpage))
3122                                 put_page(newpage);
3123                         else
3124                                 putback_lru_page(newpage);
3125                 }
3126         }
3127 }
3128 EXPORT_SYMBOL(migrate_vma_finalize);
3129 #endif /* CONFIG_DEVICE_PRIVATE */
This page took 0.200233 seconds and 4 git commands to generate.