1 // SPDX-License-Identifier: GPL-2.0
3 * Memory Migration functionality - linux/mm/migrate.c
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/ksm.h>
25 #include <linux/rmap.h>
26 #include <linux/topology.h>
27 #include <linux/cpu.h>
28 #include <linux/cpuset.h>
29 #include <linux/writeback.h>
30 #include <linux/mempolicy.h>
31 #include <linux/vmalloc.h>
32 #include <linux/security.h>
33 #include <linux/backing-dev.h>
34 #include <linux/compaction.h>
35 #include <linux/syscalls.h>
36 #include <linux/compat.h>
37 #include <linux/hugetlb.h>
38 #include <linux/hugetlb_cgroup.h>
39 #include <linux/gfp.h>
40 #include <linux/pfn_t.h>
41 #include <linux/memremap.h>
42 #include <linux/userfaultfd_k.h>
43 #include <linux/balloon_compaction.h>
44 #include <linux/page_idle.h>
45 #include <linux/page_owner.h>
46 #include <linux/sched/mm.h>
47 #include <linux/ptrace.h>
48 #include <linux/oom.h>
49 #include <linux/memory.h>
50 #include <linux/random.h>
51 #include <linux/sched/sysctl.h>
52 #include <linux/memory-tiers.h>
54 #include <asm/tlbflush.h>
56 #include <trace/events/migrate.h>
60 bool isolate_movable_page(struct page *page, isolate_mode_t mode)
62 struct folio *folio = folio_get_nontail_page(page);
63 const struct movable_operations *mops;
66 * Avoid burning cycles with pages that are yet under __free_pages(),
67 * or just got freed under us.
69 * In case we 'win' a race for a movable page being freed under us and
70 * raise its refcount preventing __free_pages() from doing its job
71 * the put_page() at the end of this block will take care of
72 * release this page, thus avoiding a nasty leakage.
77 if (unlikely(folio_test_slab(folio)))
79 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
82 * Check movable flag before taking the page lock because
83 * we use non-atomic bitops on newly allocated page flags so
84 * unconditionally grabbing the lock ruins page's owner side.
86 if (unlikely(!__folio_test_movable(folio)))
88 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
90 if (unlikely(folio_test_slab(folio)))
94 * As movable pages are not isolated from LRU lists, concurrent
95 * compaction threads can race against page migration functions
96 * as well as race against the releasing a page.
98 * In order to avoid having an already isolated movable page
99 * being (wrongly) re-isolated while it is under migration,
100 * or to avoid attempting to isolate pages being released,
101 * lets be sure we have the page lock
102 * before proceeding with the movable page isolation steps.
104 if (unlikely(!folio_trylock(folio)))
107 if (!folio_test_movable(folio) || folio_test_isolated(folio))
108 goto out_no_isolated;
110 mops = folio_movable_ops(folio);
111 VM_BUG_ON_FOLIO(!mops, folio);
113 if (!mops->isolate_page(&folio->page, mode))
114 goto out_no_isolated;
116 /* Driver shouldn't use the isolated flag */
117 WARN_ON_ONCE(folio_test_isolated(folio));
118 folio_set_isolated(folio);
131 static void putback_movable_folio(struct folio *folio)
133 const struct movable_operations *mops = folio_movable_ops(folio);
135 mops->putback_page(&folio->page);
136 folio_clear_isolated(folio);
140 * Put previously isolated pages back onto the appropriate lists
141 * from where they were once taken off for compaction/migration.
143 * This function shall be used whenever the isolated pageset has been
144 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
145 * and isolate_hugetlb().
147 void putback_movable_pages(struct list_head *l)
150 struct folio *folio2;
152 list_for_each_entry_safe(folio, folio2, l, lru) {
153 if (unlikely(folio_test_hugetlb(folio))) {
154 folio_putback_active_hugetlb(folio);
157 list_del(&folio->lru);
159 * We isolated non-lru movable folio so here we can use
160 * __folio_test_movable because LRU folio's mapping cannot
161 * have PAGE_MAPPING_MOVABLE.
163 if (unlikely(__folio_test_movable(folio))) {
164 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
166 if (folio_test_movable(folio))
167 putback_movable_folio(folio);
169 folio_clear_isolated(folio);
173 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
174 folio_is_file_lru(folio), -folio_nr_pages(folio));
175 folio_putback_lru(folio);
181 * Restore a potential migration pte to a working pte entry
183 static bool remove_migration_pte(struct folio *folio,
184 struct vm_area_struct *vma, unsigned long addr, void *old)
186 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
188 while (page_vma_mapped_walk(&pvmw)) {
189 rmap_t rmap_flags = RMAP_NONE;
194 unsigned long idx = 0;
196 /* pgoff is invalid for ksm pages, but they are never large */
197 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
199 new = folio_page(folio, idx);
201 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202 /* PMD-mapped THP migration entry */
204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205 !folio_test_pmd_mappable(folio), folio);
206 remove_migration_pmd(&pvmw, new);
212 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
213 old_pte = ptep_get(pvmw.pte);
215 entry = pte_to_swp_entry(old_pte);
216 if (!is_migration_entry_young(entry))
217 pte = pte_mkold(pte);
218 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
219 pte = pte_mkdirty(pte);
220 if (pte_swp_soft_dirty(old_pte))
221 pte = pte_mksoft_dirty(pte);
223 pte = pte_clear_soft_dirty(pte);
225 if (is_writable_migration_entry(entry))
226 pte = pte_mkwrite(pte, vma);
227 else if (pte_swp_uffd_wp(old_pte))
228 pte = pte_mkuffd_wp(pte);
230 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
231 rmap_flags |= RMAP_EXCLUSIVE;
233 if (unlikely(is_device_private_page(new))) {
235 entry = make_writable_device_private_entry(
238 entry = make_readable_device_private_entry(
240 pte = swp_entry_to_pte(entry);
241 if (pte_swp_soft_dirty(old_pte))
242 pte = pte_swp_mksoft_dirty(pte);
243 if (pte_swp_uffd_wp(old_pte))
244 pte = pte_swp_mkuffd_wp(pte);
247 #ifdef CONFIG_HUGETLB_PAGE
248 if (folio_test_hugetlb(folio)) {
249 struct hstate *h = hstate_vma(vma);
250 unsigned int shift = huge_page_shift(h);
251 unsigned long psize = huge_page_size(h);
253 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
254 if (folio_test_anon(folio))
255 hugetlb_add_anon_rmap(folio, vma, pvmw.address,
258 hugetlb_add_file_rmap(folio);
259 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
264 if (folio_test_anon(folio))
265 folio_add_anon_rmap_pte(folio, new, vma,
266 pvmw.address, rmap_flags);
268 folio_add_file_rmap_pte(folio, new, vma);
269 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
271 if (vma->vm_flags & VM_LOCKED)
274 trace_remove_migration_pte(pvmw.address, pte_val(pte),
275 compound_order(new));
277 /* No need to invalidate - it was non-present before */
278 update_mmu_cache(vma, pvmw.address, pvmw.pte);
285 * Get rid of all migration entries and replace them by
286 * references to the indicated page.
288 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
290 struct rmap_walk_control rwc = {
291 .rmap_one = remove_migration_pte,
296 rmap_walk_locked(dst, &rwc);
298 rmap_walk(dst, &rwc);
302 * Something used the pte of a page under migration. We need to
303 * get to the page and wait until migration is finished.
304 * When we return from this function the fault will be retried.
306 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
307 unsigned long address)
314 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
318 pte = ptep_get(ptep);
321 if (!is_swap_pte(pte))
324 entry = pte_to_swp_entry(pte);
325 if (!is_migration_entry(entry))
328 migration_entry_wait_on_locked(entry, ptl);
334 #ifdef CONFIG_HUGETLB_PAGE
336 * The vma read lock must be held upon entry. Holding that lock prevents either
337 * the pte or the ptl from being freed.
339 * This function will release the vma lock before returning.
341 void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
343 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
346 hugetlb_vma_assert_locked(vma);
348 pte = huge_ptep_get(ptep);
350 if (unlikely(!is_hugetlb_entry_migration(pte))) {
352 hugetlb_vma_unlock_read(vma);
355 * If migration entry existed, safe to release vma lock
356 * here because the pgtable page won't be freed without the
357 * pgtable lock released. See comment right above pgtable
358 * lock release in migration_entry_wait_on_locked().
360 hugetlb_vma_unlock_read(vma);
361 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
366 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
367 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
371 ptl = pmd_lock(mm, pmd);
372 if (!is_pmd_migration_entry(*pmd))
374 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
381 static int folio_expected_refs(struct address_space *mapping,
388 refs += folio_nr_pages(folio);
389 if (folio_test_private(folio))
396 * Replace the page in the mapping.
398 * The number of remaining references must be:
399 * 1 for anonymous pages without a mapping
400 * 2 for pages with a mapping
401 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
403 int folio_migrate_mapping(struct address_space *mapping,
404 struct folio *newfolio, struct folio *folio, int extra_count)
406 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
407 struct zone *oldzone, *newzone;
409 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
410 long nr = folio_nr_pages(folio);
414 /* Anonymous page without mapping */
415 if (folio_ref_count(folio) != expected_count)
418 /* Take off deferred split queue while frozen and memcg set */
419 if (folio_test_large(folio) &&
420 folio_test_large_rmappable(folio)) {
421 if (!folio_ref_freeze(folio, expected_count))
423 folio_undo_large_rmappable(folio);
424 folio_ref_unfreeze(folio, expected_count);
427 /* No turning back from here */
428 newfolio->index = folio->index;
429 newfolio->mapping = folio->mapping;
430 if (folio_test_swapbacked(folio))
431 __folio_set_swapbacked(newfolio);
433 return MIGRATEPAGE_SUCCESS;
436 oldzone = folio_zone(folio);
437 newzone = folio_zone(newfolio);
440 if (!folio_ref_freeze(folio, expected_count)) {
441 xas_unlock_irq(&xas);
445 /* Take off deferred split queue while frozen and memcg set */
446 if (folio_test_large(folio) && folio_test_large_rmappable(folio))
447 folio_undo_large_rmappable(folio);
450 * Now we know that no one else is looking at the folio:
451 * no turning back from here.
453 newfolio->index = folio->index;
454 newfolio->mapping = folio->mapping;
455 folio_ref_add(newfolio, nr); /* add cache reference */
456 if (folio_test_swapbacked(folio)) {
457 __folio_set_swapbacked(newfolio);
458 if (folio_test_swapcache(folio)) {
459 folio_set_swapcache(newfolio);
460 newfolio->private = folio_get_private(folio);
464 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
468 /* Move dirty while page refs frozen and newpage not yet exposed */
469 dirty = folio_test_dirty(folio);
471 folio_clear_dirty(folio);
472 folio_set_dirty(newfolio);
475 /* Swap cache still stores N entries instead of a high-order entry */
476 for (i = 0; i < entries; i++) {
477 xas_store(&xas, newfolio);
482 * Drop cache reference from old page by unfreezing
483 * to one less reference.
484 * We know this isn't the last reference.
486 folio_ref_unfreeze(folio, expected_count - nr);
489 /* Leave irq disabled to prevent preemption while updating stats */
492 * If moved to a different zone then also account
493 * the page for that zone. Other VM counters will be
494 * taken care of when we establish references to the
495 * new page and drop references to the old page.
497 * Note that anonymous pages are accounted for
498 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
499 * are mapped to swap space.
501 if (newzone != oldzone) {
502 struct lruvec *old_lruvec, *new_lruvec;
503 struct mem_cgroup *memcg;
505 memcg = folio_memcg(folio);
506 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
507 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
509 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
510 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
511 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
512 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
513 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
515 if (folio_test_pmd_mappable(folio)) {
516 __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
517 __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
521 if (folio_test_swapcache(folio)) {
522 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
523 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
526 if (dirty && mapping_can_writeback(mapping)) {
527 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
528 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
529 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
530 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
535 return MIGRATEPAGE_SUCCESS;
537 EXPORT_SYMBOL(folio_migrate_mapping);
540 * The expected number of remaining references is the same as that
541 * of folio_migrate_mapping().
543 int migrate_huge_page_move_mapping(struct address_space *mapping,
544 struct folio *dst, struct folio *src)
546 XA_STATE(xas, &mapping->i_pages, folio_index(src));
550 expected_count = folio_expected_refs(mapping, src);
551 if (!folio_ref_freeze(src, expected_count)) {
552 xas_unlock_irq(&xas);
556 dst->index = src->index;
557 dst->mapping = src->mapping;
559 folio_ref_add(dst, folio_nr_pages(dst));
561 xas_store(&xas, dst);
563 folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
565 xas_unlock_irq(&xas);
567 return MIGRATEPAGE_SUCCESS;
571 * Copy the flags and some other ancillary information
573 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
577 if (folio_test_error(folio))
578 folio_set_error(newfolio);
579 if (folio_test_referenced(folio))
580 folio_set_referenced(newfolio);
581 if (folio_test_uptodate(folio))
582 folio_mark_uptodate(newfolio);
583 if (folio_test_clear_active(folio)) {
584 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
585 folio_set_active(newfolio);
586 } else if (folio_test_clear_unevictable(folio))
587 folio_set_unevictable(newfolio);
588 if (folio_test_workingset(folio))
589 folio_set_workingset(newfolio);
590 if (folio_test_checked(folio))
591 folio_set_checked(newfolio);
593 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
594 * migration entries. We can still have PG_anon_exclusive set on an
595 * effectively unmapped and unreferenced first sub-pages of an
596 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
598 if (folio_test_mappedtodisk(folio))
599 folio_set_mappedtodisk(newfolio);
601 /* Move dirty on pages not done by folio_migrate_mapping() */
602 if (folio_test_dirty(folio))
603 folio_set_dirty(newfolio);
605 if (folio_test_young(folio))
606 folio_set_young(newfolio);
607 if (folio_test_idle(folio))
608 folio_set_idle(newfolio);
611 * Copy NUMA information to the new page, to prevent over-eager
612 * future migrations of this same page.
614 cpupid = folio_xchg_last_cpupid(folio, -1);
616 * For memory tiering mode, when migrate between slow and fast
617 * memory node, reset cpupid, because that is used to record
618 * page access time in slow memory node.
620 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
621 bool f_toptier = node_is_toptier(folio_nid(folio));
622 bool t_toptier = node_is_toptier(folio_nid(newfolio));
624 if (f_toptier != t_toptier)
627 folio_xchg_last_cpupid(newfolio, cpupid);
629 folio_migrate_ksm(newfolio, folio);
631 * Please do not reorder this without considering how mm/ksm.c's
632 * ksm_get_folio() depends upon ksm_migrate_page() and PageSwapCache().
634 if (folio_test_swapcache(folio))
635 folio_clear_swapcache(folio);
636 folio_clear_private(folio);
638 /* page->private contains hugetlb specific flags */
639 if (!folio_test_hugetlb(folio))
640 folio->private = NULL;
643 * If any waiters have accumulated on the new page then
646 if (folio_test_writeback(newfolio))
647 folio_end_writeback(newfolio);
650 * PG_readahead shares the same bit with PG_reclaim. The above
651 * end_page_writeback() may clear PG_readahead mistakenly, so set the
654 if (folio_test_readahead(folio))
655 folio_set_readahead(newfolio);
657 folio_copy_owner(newfolio, folio);
659 mem_cgroup_migrate(folio, newfolio);
661 EXPORT_SYMBOL(folio_migrate_flags);
663 void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
665 folio_copy(newfolio, folio);
666 folio_migrate_flags(newfolio, folio);
668 EXPORT_SYMBOL(folio_migrate_copy);
670 /************************************************************
671 * Migration functions
672 ***********************************************************/
674 int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
675 struct folio *src, enum migrate_mode mode, int extra_count)
679 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
681 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
683 if (rc != MIGRATEPAGE_SUCCESS)
686 if (mode != MIGRATE_SYNC_NO_COPY)
687 folio_migrate_copy(dst, src);
689 folio_migrate_flags(dst, src);
690 return MIGRATEPAGE_SUCCESS;
694 * migrate_folio() - Simple folio migration.
695 * @mapping: The address_space containing the folio.
696 * @dst: The folio to migrate the data to.
697 * @src: The folio containing the current data.
698 * @mode: How to migrate the page.
700 * Common logic to directly migrate a single LRU folio suitable for
701 * folios that do not use PagePrivate/PagePrivate2.
703 * Folios are locked upon entry and exit.
705 int migrate_folio(struct address_space *mapping, struct folio *dst,
706 struct folio *src, enum migrate_mode mode)
708 return migrate_folio_extra(mapping, dst, src, mode, 0);
710 EXPORT_SYMBOL(migrate_folio);
712 #ifdef CONFIG_BUFFER_HEAD
713 /* Returns true if all buffers are successfully locked */
714 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
715 enum migrate_mode mode)
717 struct buffer_head *bh = head;
718 struct buffer_head *failed_bh;
721 if (!trylock_buffer(bh)) {
722 if (mode == MIGRATE_ASYNC)
724 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
729 bh = bh->b_this_page;
730 } while (bh != head);
735 /* We failed to lock the buffer and cannot stall. */
738 while (bh != failed_bh) {
740 bh = bh->b_this_page;
746 static int __buffer_migrate_folio(struct address_space *mapping,
747 struct folio *dst, struct folio *src, enum migrate_mode mode,
750 struct buffer_head *bh, *head;
754 head = folio_buffers(src);
756 return migrate_folio(mapping, dst, src, mode);
758 /* Check whether page does not have extra refs before we do more work */
759 expected_count = folio_expected_refs(mapping, src);
760 if (folio_ref_count(src) != expected_count)
763 if (!buffer_migrate_lock_buffers(head, mode))
768 bool invalidated = false;
772 spin_lock(&mapping->i_private_lock);
775 if (atomic_read(&bh->b_count)) {
779 bh = bh->b_this_page;
780 } while (bh != head);
786 spin_unlock(&mapping->i_private_lock);
787 invalidate_bh_lrus();
789 goto recheck_buffers;
793 rc = folio_migrate_mapping(mapping, dst, src, 0);
794 if (rc != MIGRATEPAGE_SUCCESS)
797 folio_attach_private(dst, folio_detach_private(src));
801 folio_set_bh(bh, dst, bh_offset(bh));
802 bh = bh->b_this_page;
803 } while (bh != head);
805 if (mode != MIGRATE_SYNC_NO_COPY)
806 folio_migrate_copy(dst, src);
808 folio_migrate_flags(dst, src);
810 rc = MIGRATEPAGE_SUCCESS;
813 spin_unlock(&mapping->i_private_lock);
817 bh = bh->b_this_page;
818 } while (bh != head);
824 * buffer_migrate_folio() - Migration function for folios with buffers.
825 * @mapping: The address space containing @src.
826 * @dst: The folio to migrate to.
827 * @src: The folio to migrate from.
828 * @mode: How to migrate the folio.
830 * This function can only be used if the underlying filesystem guarantees
831 * that no other references to @src exist. For example attached buffer
832 * heads are accessed only under the folio lock. If your filesystem cannot
833 * provide this guarantee, buffer_migrate_folio_norefs() may be more
836 * Return: 0 on success or a negative errno on failure.
838 int buffer_migrate_folio(struct address_space *mapping,
839 struct folio *dst, struct folio *src, enum migrate_mode mode)
841 return __buffer_migrate_folio(mapping, dst, src, mode, false);
843 EXPORT_SYMBOL(buffer_migrate_folio);
846 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
847 * @mapping: The address space containing @src.
848 * @dst: The folio to migrate to.
849 * @src: The folio to migrate from.
850 * @mode: How to migrate the folio.
852 * Like buffer_migrate_folio() except that this variant is more careful
853 * and checks that there are also no buffer head references. This function
854 * is the right one for mappings where buffer heads are directly looked
855 * up and referenced (such as block device mappings).
857 * Return: 0 on success or a negative errno on failure.
859 int buffer_migrate_folio_norefs(struct address_space *mapping,
860 struct folio *dst, struct folio *src, enum migrate_mode mode)
862 return __buffer_migrate_folio(mapping, dst, src, mode, true);
864 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
865 #endif /* CONFIG_BUFFER_HEAD */
867 int filemap_migrate_folio(struct address_space *mapping,
868 struct folio *dst, struct folio *src, enum migrate_mode mode)
872 ret = folio_migrate_mapping(mapping, dst, src, 0);
873 if (ret != MIGRATEPAGE_SUCCESS)
876 if (folio_get_private(src))
877 folio_attach_private(dst, folio_detach_private(src));
879 if (mode != MIGRATE_SYNC_NO_COPY)
880 folio_migrate_copy(dst, src);
882 folio_migrate_flags(dst, src);
883 return MIGRATEPAGE_SUCCESS;
885 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
888 * Writeback a folio to clean the dirty state
890 static int writeout(struct address_space *mapping, struct folio *folio)
892 struct writeback_control wbc = {
893 .sync_mode = WB_SYNC_NONE,
896 .range_end = LLONG_MAX,
901 if (!mapping->a_ops->writepage)
902 /* No write method for the address space */
905 if (!folio_clear_dirty_for_io(folio))
906 /* Someone else already triggered a write */
910 * A dirty folio may imply that the underlying filesystem has
911 * the folio on some queue. So the folio must be clean for
912 * migration. Writeout may mean we lose the lock and the
913 * folio state is no longer what we checked for earlier.
914 * At this point we know that the migration attempt cannot
917 remove_migration_ptes(folio, folio, false);
919 rc = mapping->a_ops->writepage(&folio->page, &wbc);
921 if (rc != AOP_WRITEPAGE_ACTIVATE)
922 /* unlocked. Relock */
925 return (rc < 0) ? -EIO : -EAGAIN;
929 * Default handling if a filesystem does not provide a migration function.
931 static int fallback_migrate_folio(struct address_space *mapping,
932 struct folio *dst, struct folio *src, enum migrate_mode mode)
934 if (folio_test_dirty(src)) {
935 /* Only writeback folios in full synchronous migration */
938 case MIGRATE_SYNC_NO_COPY:
943 return writeout(mapping, src);
947 * Buffers may be managed in a filesystem specific way.
948 * We must have no buffers or drop them.
950 if (!filemap_release_folio(src, GFP_KERNEL))
951 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
953 return migrate_folio(mapping, dst, src, mode);
957 * Move a page to a newly allocated page
958 * The page is locked and all ptes have been successfully removed.
960 * The new page will have replaced the old page if this function
965 * MIGRATEPAGE_SUCCESS - success
967 static int move_to_new_folio(struct folio *dst, struct folio *src,
968 enum migrate_mode mode)
971 bool is_lru = !__folio_test_movable(src);
973 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
974 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
976 if (likely(is_lru)) {
977 struct address_space *mapping = folio_mapping(src);
980 rc = migrate_folio(mapping, dst, src, mode);
981 else if (mapping_unmovable(mapping))
983 else if (mapping->a_ops->migrate_folio)
985 * Most folios have a mapping and most filesystems
986 * provide a migrate_folio callback. Anonymous folios
987 * are part of swap space which also has its own
988 * migrate_folio callback. This is the most common path
989 * for page migration.
991 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
994 rc = fallback_migrate_folio(mapping, dst, src, mode);
996 const struct movable_operations *mops;
999 * In case of non-lru page, it could be released after
1000 * isolation step. In that case, we shouldn't try migration.
1002 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1003 if (!folio_test_movable(src)) {
1004 rc = MIGRATEPAGE_SUCCESS;
1005 folio_clear_isolated(src);
1009 mops = folio_movable_ops(src);
1010 rc = mops->migrate_page(&dst->page, &src->page, mode);
1011 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
1012 !folio_test_isolated(src));
1016 * When successful, old pagecache src->mapping must be cleared before
1017 * src is freed; but stats require that PageAnon be left as PageAnon.
1019 if (rc == MIGRATEPAGE_SUCCESS) {
1020 if (__folio_test_movable(src)) {
1021 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1024 * We clear PG_movable under page_lock so any compactor
1025 * cannot try to migrate this page.
1027 folio_clear_isolated(src);
1031 * Anonymous and movable src->mapping will be cleared by
1032 * free_pages_prepare so don't reset it here for keeping
1033 * the type to work PageAnon, for example.
1035 if (!folio_mapping_flags(src))
1036 src->mapping = NULL;
1038 if (likely(!folio_is_zone_device(dst)))
1039 flush_dcache_folio(dst);
1046 * To record some information during migration, we use unused private
1047 * field of struct folio of the newly allocated destination folio.
1048 * This is safe because nobody is using it except us.
1051 PAGE_WAS_MAPPED = BIT(0),
1052 PAGE_WAS_MLOCKED = BIT(1),
1053 PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1056 static void __migrate_folio_record(struct folio *dst,
1058 struct anon_vma *anon_vma)
1060 dst->private = (void *)anon_vma + old_page_state;
1063 static void __migrate_folio_extract(struct folio *dst,
1064 int *old_page_state,
1065 struct anon_vma **anon_vmap)
1067 unsigned long private = (unsigned long)dst->private;
1069 *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1070 *old_page_state = private & PAGE_OLD_STATES;
1071 dst->private = NULL;
1074 /* Restore the source folio to the original state upon failure */
1075 static void migrate_folio_undo_src(struct folio *src,
1076 int page_was_mapped,
1077 struct anon_vma *anon_vma,
1079 struct list_head *ret)
1081 if (page_was_mapped)
1082 remove_migration_ptes(src, src, false);
1083 /* Drop an anon_vma reference if we took one */
1085 put_anon_vma(anon_vma);
1089 list_move_tail(&src->lru, ret);
1092 /* Restore the destination folio to the original state upon failure */
1093 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1094 free_folio_t put_new_folio, unsigned long private)
1099 put_new_folio(dst, private);
1104 /* Cleanup src folio upon migration success */
1105 static void migrate_folio_done(struct folio *src,
1106 enum migrate_reason reason)
1109 * Compaction can migrate also non-LRU pages which are
1110 * not accounted to NR_ISOLATED_*. They can be recognized
1111 * as __folio_test_movable
1113 if (likely(!__folio_test_movable(src)))
1114 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1115 folio_is_file_lru(src), -folio_nr_pages(src));
1117 if (reason != MR_MEMORY_FAILURE)
1118 /* We release the page in page_handle_poison. */
1122 /* Obtain the lock on page, remove all ptes. */
1123 static int migrate_folio_unmap(new_folio_t get_new_folio,
1124 free_folio_t put_new_folio, unsigned long private,
1125 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1126 enum migrate_reason reason, struct list_head *ret)
1130 int old_page_state = 0;
1131 struct anon_vma *anon_vma = NULL;
1132 bool is_lru = !__folio_test_movable(src);
1133 bool locked = false;
1134 bool dst_locked = false;
1136 if (folio_ref_count(src) == 1) {
1137 /* Folio was freed from under us. So we are done. */
1138 folio_clear_active(src);
1139 folio_clear_unevictable(src);
1140 /* free_pages_prepare() will clear PG_isolated. */
1141 list_del(&src->lru);
1142 migrate_folio_done(src, reason);
1143 return MIGRATEPAGE_SUCCESS;
1146 dst = get_new_folio(src, private);
1151 dst->private = NULL;
1153 if (!folio_trylock(src)) {
1154 if (mode == MIGRATE_ASYNC)
1158 * It's not safe for direct compaction to call lock_page.
1159 * For example, during page readahead pages are added locked
1160 * to the LRU. Later, when the IO completes the pages are
1161 * marked uptodate and unlocked. However, the queueing
1162 * could be merging multiple pages for one bio (e.g.
1163 * mpage_readahead). If an allocation happens for the
1164 * second or third page, the process can end up locking
1165 * the same page twice and deadlocking. Rather than
1166 * trying to be clever about what pages can be locked,
1167 * avoid the use of lock_page for direct compaction
1170 if (current->flags & PF_MEMALLOC)
1174 * In "light" mode, we can wait for transient locks (eg
1175 * inserting a page into the page table), but it's not
1176 * worth waiting for I/O.
1178 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1184 if (folio_test_mlocked(src))
1185 old_page_state |= PAGE_WAS_MLOCKED;
1187 if (folio_test_writeback(src)) {
1189 * Only in the case of a full synchronous migration is it
1190 * necessary to wait for PageWriteback. In the async case,
1191 * the retry loop is too short and in the sync-light case,
1192 * the overhead of stalling is too much
1196 case MIGRATE_SYNC_NO_COPY:
1202 folio_wait_writeback(src);
1206 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1207 * we cannot notice that anon_vma is freed while we migrate a page.
1208 * This get_anon_vma() delays freeing anon_vma pointer until the end
1209 * of migration. File cache pages are no problem because of page_lock()
1210 * File Caches may use write_page() or lock_page() in migration, then,
1211 * just care Anon page here.
1213 * Only folio_get_anon_vma() understands the subtleties of
1214 * getting a hold on an anon_vma from outside one of its mms.
1215 * But if we cannot get anon_vma, then we won't need it anyway,
1216 * because that implies that the anon page is no longer mapped
1217 * (and cannot be remapped so long as we hold the page lock).
1219 if (folio_test_anon(src) && !folio_test_ksm(src))
1220 anon_vma = folio_get_anon_vma(src);
1223 * Block others from accessing the new page when we get around to
1224 * establishing additional references. We are usually the only one
1225 * holding a reference to dst at this point. We used to have a BUG
1226 * here if folio_trylock(dst) fails, but would like to allow for
1227 * cases where there might be a race with the previous use of dst.
1228 * This is much like races on refcount of oldpage: just don't BUG().
1230 if (unlikely(!folio_trylock(dst)))
1234 if (unlikely(!is_lru)) {
1235 __migrate_folio_record(dst, old_page_state, anon_vma);
1236 return MIGRATEPAGE_UNMAP;
1240 * Corner case handling:
1241 * 1. When a new swap-cache page is read into, it is added to the LRU
1242 * and treated as swapcache but it has no rmap yet.
1243 * Calling try_to_unmap() against a src->mapping==NULL page will
1244 * trigger a BUG. So handle it here.
1245 * 2. An orphaned page (see truncate_cleanup_page) might have
1246 * fs-private metadata. The page can be picked up due to memory
1247 * offlining. Everywhere else except page reclaim, the page is
1248 * invisible to the vm, so the page can not be migrated. So try to
1249 * free the metadata, so the page can be freed.
1251 if (!src->mapping) {
1252 if (folio_test_private(src)) {
1253 try_to_free_buffers(src);
1256 } else if (folio_mapped(src)) {
1257 /* Establish migration ptes */
1258 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1259 !folio_test_ksm(src) && !anon_vma, src);
1260 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1261 old_page_state |= PAGE_WAS_MAPPED;
1264 if (!folio_mapped(src)) {
1265 __migrate_folio_record(dst, old_page_state, anon_vma);
1266 return MIGRATEPAGE_UNMAP;
1271 * A folio that has not been unmapped will be restored to
1272 * right list unless we want to retry.
1277 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1278 anon_vma, locked, ret);
1279 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1284 /* Migrate the folio to the newly allocated folio in dst. */
1285 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1286 struct folio *src, struct folio *dst,
1287 enum migrate_mode mode, enum migrate_reason reason,
1288 struct list_head *ret)
1291 int old_page_state = 0;
1292 struct anon_vma *anon_vma = NULL;
1293 bool is_lru = !__folio_test_movable(src);
1294 struct list_head *prev;
1296 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1297 prev = dst->lru.prev;
1298 list_del(&dst->lru);
1300 rc = move_to_new_folio(dst, src, mode);
1304 if (unlikely(!is_lru))
1305 goto out_unlock_both;
1308 * When successful, push dst to LRU immediately: so that if it
1309 * turns out to be an mlocked page, remove_migration_ptes() will
1310 * automatically build up the correct dst->mlock_count for it.
1312 * We would like to do something similar for the old page, when
1313 * unsuccessful, and other cases when a page has been temporarily
1314 * isolated from the unevictable LRU: but this case is the easiest.
1317 if (old_page_state & PAGE_WAS_MLOCKED)
1320 if (old_page_state & PAGE_WAS_MAPPED)
1321 remove_migration_ptes(src, dst, false);
1325 set_page_owner_migrate_reason(&dst->page, reason);
1327 * If migration is successful, decrease refcount of dst,
1328 * which will not free the page because new page owner increased
1334 * A folio that has been migrated has all references removed
1335 * and will be freed.
1337 list_del(&src->lru);
1338 /* Drop an anon_vma reference if we took one */
1340 put_anon_vma(anon_vma);
1342 migrate_folio_done(src, reason);
1347 * A folio that has not been migrated will be restored to
1348 * right list unless we want to retry.
1350 if (rc == -EAGAIN) {
1351 list_add(&dst->lru, prev);
1352 __migrate_folio_record(dst, old_page_state, anon_vma);
1356 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1357 anon_vma, true, ret);
1358 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1364 * Counterpart of unmap_and_move_page() for hugepage migration.
1366 * This function doesn't wait the completion of hugepage I/O
1367 * because there is no race between I/O and migration for hugepage.
1368 * Note that currently hugepage I/O occurs only in direct I/O
1369 * where no lock is held and PG_writeback is irrelevant,
1370 * and writeback status of all subpages are counted in the reference
1371 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1372 * under direct I/O, the reference of the head page is 512 and a bit more.)
1373 * This means that when we try to migrate hugepage whose subpages are
1374 * doing direct I/O, some references remain after try_to_unmap() and
1375 * hugepage migration fails without data corruption.
1377 * There is also no race when direct I/O is issued on the page under migration,
1378 * because then pte is replaced with migration swap entry and direct I/O code
1379 * will wait in the page fault for migration to complete.
1381 static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1382 free_folio_t put_new_folio, unsigned long private,
1383 struct folio *src, int force, enum migrate_mode mode,
1384 int reason, struct list_head *ret)
1388 int page_was_mapped = 0;
1389 struct anon_vma *anon_vma = NULL;
1390 struct address_space *mapping = NULL;
1392 if (folio_ref_count(src) == 1) {
1393 /* page was freed from under us. So we are done. */
1394 folio_putback_active_hugetlb(src);
1395 return MIGRATEPAGE_SUCCESS;
1398 dst = get_new_folio(src, private);
1402 if (!folio_trylock(src)) {
1407 case MIGRATE_SYNC_NO_COPY:
1416 * Check for pages which are in the process of being freed. Without
1417 * folio_mapping() set, hugetlbfs specific move page routine will not
1418 * be called and we could leak usage counts for subpools.
1420 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1425 if (folio_test_anon(src))
1426 anon_vma = folio_get_anon_vma(src);
1428 if (unlikely(!folio_trylock(dst)))
1431 if (folio_mapped(src)) {
1432 enum ttu_flags ttu = 0;
1434 if (!folio_test_anon(src)) {
1436 * In shared mappings, try_to_unmap could potentially
1437 * call huge_pmd_unshare. Because of this, take
1438 * semaphore in write mode here and set TTU_RMAP_LOCKED
1439 * to let lower levels know we have taken the lock.
1441 mapping = hugetlb_folio_mapping_lock_write(src);
1442 if (unlikely(!mapping))
1443 goto unlock_put_anon;
1445 ttu = TTU_RMAP_LOCKED;
1448 try_to_migrate(src, ttu);
1449 page_was_mapped = 1;
1451 if (ttu & TTU_RMAP_LOCKED)
1452 i_mmap_unlock_write(mapping);
1455 if (!folio_mapped(src))
1456 rc = move_to_new_folio(dst, src, mode);
1458 if (page_was_mapped)
1459 remove_migration_ptes(src,
1460 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1467 put_anon_vma(anon_vma);
1469 if (rc == MIGRATEPAGE_SUCCESS) {
1470 move_hugetlb_state(src, dst, reason);
1471 put_new_folio = NULL;
1477 if (rc == MIGRATEPAGE_SUCCESS)
1478 folio_putback_active_hugetlb(src);
1479 else if (rc != -EAGAIN)
1480 list_move_tail(&src->lru, ret);
1483 * If migration was not successful and there's a freeing callback, use
1484 * it. Otherwise, put_page() will drop the reference grabbed during
1488 put_new_folio(dst, private);
1490 folio_putback_active_hugetlb(dst);
1495 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1500 rc = split_folio_to_list(folio, split_folios);
1501 folio_unlock(folio);
1503 list_move_tail(&folio->lru, split_folios);
1508 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1509 #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1511 #define NR_MAX_BATCHED_MIGRATION 512
1513 #define NR_MAX_MIGRATE_PAGES_RETRY 10
1514 #define NR_MAX_MIGRATE_ASYNC_RETRY 3
1515 #define NR_MAX_MIGRATE_SYNC_RETRY \
1516 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1518 struct migrate_pages_stats {
1519 int nr_succeeded; /* Normal and large folios migrated successfully, in
1520 units of base pages */
1521 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1522 units of base pages. Untried folios aren't counted */
1523 int nr_thp_succeeded; /* THP migrated successfully */
1524 int nr_thp_failed; /* THP failed to be migrated */
1525 int nr_thp_split; /* THP split before migrating */
1526 int nr_split; /* Large folio (include THP) split before migrating */
1530 * Returns the number of hugetlb folios that were not migrated, or an error code
1531 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1532 * any more because the list has become empty or no retryable hugetlb folios
1533 * exist any more. It is caller's responsibility to call putback_movable_pages()
1536 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1537 free_folio_t put_new_folio, unsigned long private,
1538 enum migrate_mode mode, int reason,
1539 struct migrate_pages_stats *stats,
1540 struct list_head *ret_folios)
1544 int nr_retry_pages = 0;
1546 struct folio *folio, *folio2;
1549 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1553 list_for_each_entry_safe(folio, folio2, from, lru) {
1554 if (!folio_test_hugetlb(folio))
1557 nr_pages = folio_nr_pages(folio);
1562 * Migratability of hugepages depends on architectures and
1563 * their size. This check is necessary because some callers
1564 * of hugepage migration like soft offline and memory
1565 * hotremove don't walk through page tables or check whether
1566 * the hugepage is pmd-based or not before kicking migration.
1568 if (!hugepage_migration_supported(folio_hstate(folio))) {
1570 stats->nr_failed_pages += nr_pages;
1571 list_move_tail(&folio->lru, ret_folios);
1575 rc = unmap_and_move_huge_page(get_new_folio,
1576 put_new_folio, private,
1577 folio, pass > 2, mode,
1578 reason, ret_folios);
1581 * Success: hugetlb folio will be put back
1582 * -EAGAIN: stay on the from list
1583 * -ENOMEM: stay on the from list
1584 * Other errno: put on ret_folios list
1589 * When memory is low, don't bother to try to migrate
1590 * other folios, just exit.
1592 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1596 nr_retry_pages += nr_pages;
1598 case MIGRATEPAGE_SUCCESS:
1599 stats->nr_succeeded += nr_pages;
1603 * Permanent failure (-EBUSY, etc.):
1604 * unlike -EAGAIN case, the failed folio is
1605 * removed from migration folio list and not
1606 * retried in the next outer loop.
1609 stats->nr_failed_pages += nr_pages;
1615 * nr_failed is number of hugetlb folios failed to be migrated. After
1616 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1620 stats->nr_failed_pages += nr_retry_pages;
1626 * migrate_pages_batch() first unmaps folios in the from list as many as
1627 * possible, then move the unmapped folios.
1629 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1630 * lock or bit when we have locked more than one folio. Which may cause
1631 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1632 * length of the from list must be <= 1.
1634 static int migrate_pages_batch(struct list_head *from,
1635 new_folio_t get_new_folio, free_folio_t put_new_folio,
1636 unsigned long private, enum migrate_mode mode, int reason,
1637 struct list_head *ret_folios, struct list_head *split_folios,
1638 struct migrate_pages_stats *stats, int nr_pass)
1643 int nr_retry_pages = 0;
1645 bool is_thp = false;
1646 bool is_large = false;
1647 struct folio *folio, *folio2, *dst = NULL, *dst2;
1648 int rc, rc_saved = 0, nr_pages;
1649 LIST_HEAD(unmap_folios);
1650 LIST_HEAD(dst_folios);
1651 bool nosplit = (reason == MR_NUMA_MISPLACED);
1653 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1654 !list_empty(from) && !list_is_singular(from));
1656 for (pass = 0; pass < nr_pass && retry; pass++) {
1661 list_for_each_entry_safe(folio, folio2, from, lru) {
1662 is_large = folio_test_large(folio);
1663 is_thp = is_large && folio_test_pmd_mappable(folio);
1664 nr_pages = folio_nr_pages(folio);
1669 * The rare folio on the deferred split list should
1670 * be split now. It should not count as a failure:
1671 * but increment nr_failed because, without doing so,
1672 * migrate_pages() may report success with (split but
1673 * unmigrated) pages still on its fromlist; whereas it
1674 * always reports success when its fromlist is empty.
1675 * stats->nr_thp_failed should be increased too,
1676 * otherwise stats inconsistency will happen when
1677 * migrate_pages_batch is called via migrate_pages()
1678 * with MIGRATE_SYNC and MIGRATE_ASYNC.
1680 * Only check it without removing it from the list.
1681 * Since the folio can be on deferred_split_scan()
1682 * local list and removing it can cause the local list
1683 * corruption. Folio split process below can handle it
1684 * with the help of folio_ref_freeze().
1686 * nr_pages > 2 is needed to avoid checking order-1
1687 * page cache folios. They exist, in contrast to
1688 * non-existent order-1 anonymous folios, and do not
1689 * use _deferred_list.
1692 !list_empty(&folio->_deferred_list)) {
1693 if (try_split_folio(folio, split_folios) == 0) {
1695 stats->nr_thp_failed += is_thp;
1696 stats->nr_thp_split += is_thp;
1703 * Large folio migration might be unsupported or
1704 * the allocation might be failed so we should retry
1705 * on the same folio with the large folio split
1708 * Split folios are put in split_folios, and
1709 * we will migrate them after the rest of the
1710 * list is processed.
1712 if (!thp_migration_supported() && is_thp) {
1714 stats->nr_thp_failed++;
1715 if (!try_split_folio(folio, split_folios)) {
1716 stats->nr_thp_split++;
1720 stats->nr_failed_pages += nr_pages;
1721 list_move_tail(&folio->lru, ret_folios);
1725 rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1726 private, folio, &dst, mode, reason,
1730 * Success: folio will be freed
1731 * Unmap: folio will be put on unmap_folios list,
1732 * dst folio put on dst_folios list
1733 * -EAGAIN: stay on the from list
1734 * -ENOMEM: stay on the from list
1735 * Other errno: put on ret_folios list
1740 * When memory is low, don't bother to try to migrate
1741 * other folios, move unmapped folios, then exit.
1744 stats->nr_thp_failed += is_thp;
1745 /* Large folio NUMA faulting doesn't split to retry. */
1746 if (is_large && !nosplit) {
1747 int ret = try_split_folio(folio, split_folios);
1750 stats->nr_thp_split += is_thp;
1753 } else if (reason == MR_LONGTERM_PIN &&
1756 * Try again to split large folio to
1757 * mitigate the failure of longterm pinning.
1760 thp_retry += is_thp;
1761 nr_retry_pages += nr_pages;
1762 /* Undo duplicated failure counting. */
1764 stats->nr_thp_failed -= is_thp;
1769 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1770 /* nr_failed isn't updated for not used */
1771 stats->nr_thp_failed += thp_retry;
1773 if (list_empty(&unmap_folios))
1779 thp_retry += is_thp;
1780 nr_retry_pages += nr_pages;
1782 case MIGRATEPAGE_SUCCESS:
1783 stats->nr_succeeded += nr_pages;
1784 stats->nr_thp_succeeded += is_thp;
1786 case MIGRATEPAGE_UNMAP:
1787 list_move_tail(&folio->lru, &unmap_folios);
1788 list_add_tail(&dst->lru, &dst_folios);
1792 * Permanent failure (-EBUSY, etc.):
1793 * unlike -EAGAIN case, the failed folio is
1794 * removed from migration folio list and not
1795 * retried in the next outer loop.
1798 stats->nr_thp_failed += is_thp;
1799 stats->nr_failed_pages += nr_pages;
1805 stats->nr_thp_failed += thp_retry;
1806 stats->nr_failed_pages += nr_retry_pages;
1808 /* Flush TLBs for all unmapped folios */
1809 try_to_unmap_flush();
1812 for (pass = 0; pass < nr_pass && retry; pass++) {
1817 dst = list_first_entry(&dst_folios, struct folio, lru);
1818 dst2 = list_next_entry(dst, lru);
1819 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1820 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1821 nr_pages = folio_nr_pages(folio);
1825 rc = migrate_folio_move(put_new_folio, private,
1827 reason, ret_folios);
1830 * Success: folio will be freed
1831 * -EAGAIN: stay on the unmap_folios list
1832 * Other errno: put on ret_folios list
1837 thp_retry += is_thp;
1838 nr_retry_pages += nr_pages;
1840 case MIGRATEPAGE_SUCCESS:
1841 stats->nr_succeeded += nr_pages;
1842 stats->nr_thp_succeeded += is_thp;
1846 stats->nr_thp_failed += is_thp;
1847 stats->nr_failed_pages += nr_pages;
1851 dst2 = list_next_entry(dst, lru);
1855 stats->nr_thp_failed += thp_retry;
1856 stats->nr_failed_pages += nr_retry_pages;
1858 rc = rc_saved ? : nr_failed;
1860 /* Cleanup remaining folios */
1861 dst = list_first_entry(&dst_folios, struct folio, lru);
1862 dst2 = list_next_entry(dst, lru);
1863 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1864 int old_page_state = 0;
1865 struct anon_vma *anon_vma = NULL;
1867 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1868 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1869 anon_vma, true, ret_folios);
1870 list_del(&dst->lru);
1871 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1873 dst2 = list_next_entry(dst, lru);
1879 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1880 free_folio_t put_new_folio, unsigned long private,
1881 enum migrate_mode mode, int reason,
1882 struct list_head *ret_folios, struct list_head *split_folios,
1883 struct migrate_pages_stats *stats)
1885 int rc, nr_failed = 0;
1887 struct migrate_pages_stats astats;
1889 memset(&astats, 0, sizeof(astats));
1890 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1891 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1892 reason, &folios, split_folios, &astats,
1893 NR_MAX_MIGRATE_ASYNC_RETRY);
1894 stats->nr_succeeded += astats.nr_succeeded;
1895 stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1896 stats->nr_thp_split += astats.nr_thp_split;
1897 stats->nr_split += astats.nr_split;
1899 stats->nr_failed_pages += astats.nr_failed_pages;
1900 stats->nr_thp_failed += astats.nr_thp_failed;
1901 list_splice_tail(&folios, ret_folios);
1904 stats->nr_thp_failed += astats.nr_thp_split;
1906 * Do not count rc, as pages will be retried below.
1907 * Count nr_split only, since it includes nr_thp_split.
1909 nr_failed += astats.nr_split;
1911 * Fall back to migrate all failed folios one by one synchronously. All
1912 * failed folios except split THPs will be retried, so their failure
1915 list_splice_tail_init(&folios, from);
1916 while (!list_empty(from)) {
1917 list_move(from->next, &folios);
1918 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1919 private, mode, reason, ret_folios,
1920 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1921 list_splice_tail_init(&folios, ret_folios);
1931 * migrate_pages - migrate the folios specified in a list, to the free folios
1932 * supplied as the target for the page migration
1934 * @from: The list of folios to be migrated.
1935 * @get_new_folio: The function used to allocate free folios to be used
1936 * as the target of the folio migration.
1937 * @put_new_folio: The function used to free target folios if migration
1938 * fails, or NULL if no special handling is necessary.
1939 * @private: Private data to be passed on to get_new_folio()
1940 * @mode: The migration mode that specifies the constraints for
1941 * folio migration, if any.
1942 * @reason: The reason for folio migration.
1943 * @ret_succeeded: Set to the number of folios migrated successfully if
1944 * the caller passes a non-NULL pointer.
1946 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1947 * are movable any more because the list has become empty or no retryable folios
1948 * exist any more. It is caller's responsibility to call putback_movable_pages()
1951 * Returns the number of {normal folio, large folio, hugetlb} that were not
1952 * migrated, or an error code. The number of large folio splits will be
1953 * considered as the number of non-migrated large folio, no matter how many
1954 * split folios of the large folio are migrated successfully.
1956 int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
1957 free_folio_t put_new_folio, unsigned long private,
1958 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1962 struct folio *folio, *folio2;
1964 LIST_HEAD(ret_folios);
1965 LIST_HEAD(split_folios);
1966 struct migrate_pages_stats stats;
1968 trace_mm_migrate_pages_start(mode, reason);
1970 memset(&stats, 0, sizeof(stats));
1972 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
1973 mode, reason, &stats, &ret_folios);
1979 list_for_each_entry_safe(folio, folio2, from, lru) {
1980 /* Retried hugetlb folios will be kept in list */
1981 if (folio_test_hugetlb(folio)) {
1982 list_move_tail(&folio->lru, &ret_folios);
1986 nr_pages += folio_nr_pages(folio);
1987 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1990 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1991 list_cut_before(&folios, from, &folio2->lru);
1993 list_splice_init(from, &folios);
1994 if (mode == MIGRATE_ASYNC)
1995 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1996 private, mode, reason, &ret_folios,
1997 &split_folios, &stats,
1998 NR_MAX_MIGRATE_PAGES_RETRY);
2000 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
2001 private, mode, reason, &ret_folios,
2002 &split_folios, &stats);
2003 list_splice_tail_init(&folios, &ret_folios);
2006 list_splice_tail(&split_folios, &ret_folios);
2009 if (!list_empty(&split_folios)) {
2011 * Failure isn't counted since all split folios of a large folio
2012 * is counted as 1 failure already. And, we only try to migrate
2013 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
2015 migrate_pages_batch(&split_folios, get_new_folio,
2016 put_new_folio, private, MIGRATE_ASYNC, reason,
2017 &ret_folios, NULL, &stats, 1);
2018 list_splice_tail_init(&split_folios, &ret_folios);
2021 if (!list_empty(from))
2025 * Put the permanent failure folio back to migration list, they
2026 * will be put back to the right list by the caller.
2028 list_splice(&ret_folios, from);
2031 * Return 0 in case all split folios of fail-to-migrate large folios
2032 * are migrated successfully.
2034 if (list_empty(from))
2037 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2038 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2039 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2040 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2041 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2042 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2043 stats.nr_thp_succeeded, stats.nr_thp_failed,
2044 stats.nr_thp_split, stats.nr_split, mode,
2048 *ret_succeeded = stats.nr_succeeded;
2053 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2055 struct migration_target_control *mtc;
2057 unsigned int order = 0;
2061 mtc = (struct migration_target_control *)private;
2062 gfp_mask = mtc->gfp_mask;
2064 if (nid == NUMA_NO_NODE)
2065 nid = folio_nid(src);
2067 if (folio_test_hugetlb(src)) {
2068 struct hstate *h = folio_hstate(src);
2070 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2071 return alloc_hugetlb_folio_nodemask(h, nid,
2072 mtc->nmask, gfp_mask,
2073 htlb_allow_alloc_fallback(mtc->reason));
2076 if (folio_test_large(src)) {
2078 * clear __GFP_RECLAIM to make the migration callback
2079 * consistent with regular THP allocations.
2081 gfp_mask &= ~__GFP_RECLAIM;
2082 gfp_mask |= GFP_TRANSHUGE;
2083 order = folio_order(src);
2085 zidx = zone_idx(folio_zone(src));
2086 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2087 gfp_mask |= __GFP_HIGHMEM;
2089 return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2094 static int store_status(int __user *status, int start, int value, int nr)
2097 if (put_user(value, status + start))
2105 static int do_move_pages_to_node(struct list_head *pagelist, int node)
2108 struct migration_target_control mtc = {
2110 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2111 .reason = MR_SYSCALL,
2114 err = migrate_pages(pagelist, alloc_migration_target, NULL,
2115 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2117 putback_movable_pages(pagelist);
2122 * Resolves the given address to a struct page, isolates it from the LRU and
2123 * puts it to the given pagelist.
2125 * errno - if the page cannot be found/isolated
2126 * 0 - when it doesn't have to be migrated because it is already on the
2128 * 1 - when it has been queued
2130 static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
2131 int node, struct list_head *pagelist, bool migrate_all)
2133 struct vm_area_struct *vma;
2136 struct folio *folio;
2140 addr = (unsigned long)untagged_addr_remote(mm, p);
2143 vma = vma_lookup(mm, addr);
2144 if (!vma || !vma_migratable(vma))
2147 /* FOLL_DUMP to ignore special (like zero) pages */
2148 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2150 err = PTR_ERR(page);
2158 folio = page_folio(page);
2159 if (folio_is_zone_device(folio))
2163 if (folio_nid(folio) == node)
2167 if (folio_likely_mapped_shared(folio) && !migrate_all)
2171 if (folio_test_hugetlb(folio)) {
2172 if (isolate_hugetlb(folio, pagelist))
2175 if (!folio_isolate_lru(folio))
2179 list_add_tail(&folio->lru, pagelist);
2180 node_stat_mod_folio(folio,
2181 NR_ISOLATED_ANON + folio_is_file_lru(folio),
2182 folio_nr_pages(folio));
2186 * Either remove the duplicate refcount from folio_isolate_lru()
2187 * or drop the folio ref if it was not isolated.
2191 mmap_read_unlock(mm);
2195 static int move_pages_and_store_status(int node,
2196 struct list_head *pagelist, int __user *status,
2197 int start, int i, unsigned long nr_pages)
2201 if (list_empty(pagelist))
2204 err = do_move_pages_to_node(pagelist, node);
2207 * Positive err means the number of failed
2208 * pages to migrate. Since we are going to
2209 * abort and return the number of non-migrated
2210 * pages, so need to include the rest of the
2211 * nr_pages that have not been attempted as
2215 err += nr_pages - i;
2218 return store_status(status, start, node, i - start);
2222 * Migrate an array of page address onto an array of nodes and fill
2223 * the corresponding array of status.
2225 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2226 unsigned long nr_pages,
2227 const void __user * __user *pages,
2228 const int __user *nodes,
2229 int __user *status, int flags)
2231 compat_uptr_t __user *compat_pages = (void __user *)pages;
2232 int current_node = NUMA_NO_NODE;
2233 LIST_HEAD(pagelist);
2237 lru_cache_disable();
2239 for (i = start = 0; i < nr_pages; i++) {
2240 const void __user *p;
2244 if (in_compat_syscall()) {
2247 if (get_user(cp, compat_pages + i))
2252 if (get_user(p, pages + i))
2255 if (get_user(node, nodes + i))
2259 if (node < 0 || node >= MAX_NUMNODES)
2261 if (!node_state(node, N_MEMORY))
2265 if (!node_isset(node, task_nodes))
2268 if (current_node == NUMA_NO_NODE) {
2269 current_node = node;
2271 } else if (node != current_node) {
2272 err = move_pages_and_store_status(current_node,
2273 &pagelist, status, start, i, nr_pages);
2277 current_node = node;
2281 * Errors in the page lookup or isolation are not fatal and we simply
2282 * report them via status
2284 err = add_page_for_migration(mm, p, current_node, &pagelist,
2285 flags & MPOL_MF_MOVE_ALL);
2288 /* The page is successfully queued for migration */
2293 * The move_pages() man page does not have an -EEXIST choice, so
2294 * use -EFAULT instead.
2300 * If the page is already on the target node (!err), store the
2301 * node, otherwise, store the err.
2303 err = store_status(status, i, err ? : current_node, 1);
2307 err = move_pages_and_store_status(current_node, &pagelist,
2308 status, start, i, nr_pages);
2310 /* We have accounted for page i */
2315 current_node = NUMA_NO_NODE;
2318 /* Make sure we do not overwrite the existing error */
2319 err1 = move_pages_and_store_status(current_node, &pagelist,
2320 status, start, i, nr_pages);
2329 * Determine the nodes of an array of pages and store it in an array of status.
2331 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2332 const void __user **pages, int *status)
2338 for (i = 0; i < nr_pages; i++) {
2339 unsigned long addr = (unsigned long)(*pages);
2340 struct vm_area_struct *vma;
2344 vma = vma_lookup(mm, addr);
2348 /* FOLL_DUMP to ignore special (like zero) pages */
2349 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2351 err = PTR_ERR(page);
2359 if (!is_zone_device_page(page))
2360 err = page_to_nid(page);
2370 mmap_read_unlock(mm);
2373 static int get_compat_pages_array(const void __user *chunk_pages[],
2374 const void __user * __user *pages,
2375 unsigned long chunk_nr)
2377 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2381 for (i = 0; i < chunk_nr; i++) {
2382 if (get_user(p, pages32 + i))
2384 chunk_pages[i] = compat_ptr(p);
2391 * Determine the nodes of a user array of pages and store it in
2392 * a user array of status.
2394 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2395 const void __user * __user *pages,
2398 #define DO_PAGES_STAT_CHUNK_NR 16UL
2399 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2400 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2403 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2405 if (in_compat_syscall()) {
2406 if (get_compat_pages_array(chunk_pages, pages,
2410 if (copy_from_user(chunk_pages, pages,
2411 chunk_nr * sizeof(*chunk_pages)))
2415 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2417 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2422 nr_pages -= chunk_nr;
2424 return nr_pages ? -EFAULT : 0;
2427 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2429 struct task_struct *task;
2430 struct mm_struct *mm;
2433 * There is no need to check if current process has the right to modify
2434 * the specified process when they are same.
2438 *mem_nodes = cpuset_mems_allowed(current);
2442 /* Find the mm_struct */
2444 task = find_task_by_vpid(pid);
2447 return ERR_PTR(-ESRCH);
2449 get_task_struct(task);
2452 * Check if this process has the right to modify the specified
2453 * process. Use the regular "ptrace_may_access()" checks.
2455 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2457 mm = ERR_PTR(-EPERM);
2462 mm = ERR_PTR(security_task_movememory(task));
2465 *mem_nodes = cpuset_mems_allowed(task);
2466 mm = get_task_mm(task);
2468 put_task_struct(task);
2470 mm = ERR_PTR(-EINVAL);
2475 * Move a list of pages in the address space of the currently executing
2478 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2479 const void __user * __user *pages,
2480 const int __user *nodes,
2481 int __user *status, int flags)
2483 struct mm_struct *mm;
2485 nodemask_t task_nodes;
2488 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2491 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2494 mm = find_mm_struct(pid, &task_nodes);
2499 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2500 nodes, status, flags);
2502 err = do_pages_stat(mm, nr_pages, pages, status);
2508 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2509 const void __user * __user *, pages,
2510 const int __user *, nodes,
2511 int __user *, status, int, flags)
2513 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2516 #ifdef CONFIG_NUMA_BALANCING
2518 * Returns true if this is a safe migration target node for misplaced NUMA
2519 * pages. Currently it only checks the watermarks which is crude.
2521 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2522 unsigned long nr_migrate_pages)
2526 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2527 struct zone *zone = pgdat->node_zones + z;
2529 if (!managed_zone(zone))
2532 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2533 if (!zone_watermark_ok(zone, 0,
2534 high_wmark_pages(zone) +
2543 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2546 int nid = (int) data;
2547 int order = folio_order(src);
2548 gfp_t gfp = __GFP_THISNODE;
2551 gfp |= GFP_TRANSHUGE_LIGHT;
2553 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2555 gfp &= ~__GFP_RECLAIM;
2557 return __folio_alloc_node(gfp, order, nid);
2560 static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
2562 int nr_pages = folio_nr_pages(folio);
2564 /* Avoid migrating to a node that is nearly full */
2565 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2568 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2570 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2571 if (managed_zone(pgdat->node_zones + z))
2576 * If there are no managed zones, it should not proceed
2582 wakeup_kswapd(pgdat->node_zones + z, 0,
2583 folio_order(folio), ZONE_MOVABLE);
2587 if (!folio_isolate_lru(folio))
2590 node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2594 * Isolating the folio has taken another reference, so the
2595 * caller's reference can be safely dropped without the folio
2596 * disappearing underneath us during migration.
2603 * Attempt to migrate a misplaced folio to the specified destination
2604 * node. Caller is expected to have an elevated reference count on
2605 * the folio that will be dropped by this function before returning.
2607 int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
2610 pg_data_t *pgdat = NODE_DATA(node);
2613 unsigned int nr_succeeded;
2614 LIST_HEAD(migratepages);
2615 int nr_pages = folio_nr_pages(folio);
2618 * Don't migrate file folios that are mapped in multiple processes
2619 * with execute permissions as they are probably shared libraries.
2621 * See folio_likely_mapped_shared() on possible imprecision when we
2622 * cannot easily detect if a folio is shared.
2624 if (folio_likely_mapped_shared(folio) && folio_is_file_lru(folio) &&
2625 (vma->vm_flags & VM_EXEC))
2629 * Also do not migrate dirty folios as not all filesystems can move
2630 * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
2632 if (folio_is_file_lru(folio) && folio_test_dirty(folio))
2635 isolated = numamigrate_isolate_folio(pgdat, folio);
2639 list_add(&folio->lru, &migratepages);
2640 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2641 NULL, node, MIGRATE_ASYNC,
2642 MR_NUMA_MISPLACED, &nr_succeeded);
2644 if (!list_empty(&migratepages)) {
2645 list_del(&folio->lru);
2646 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
2647 folio_is_file_lru(folio), -nr_pages);
2648 folio_putback_lru(folio);
2653 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2654 if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
2655 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2658 BUG_ON(!list_empty(&migratepages));
2665 #endif /* CONFIG_NUMA_BALANCING */
2666 #endif /* CONFIG_NUMA */