1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/highmem.h>
4 #include <linux/sched.h>
5 #include <linux/hugetlb.h>
6 #include <linux/mmu_context.h>
7 #include <linux/swap.h>
8 #include <linux/swapops.h>
10 #include <asm/tlbflush.h>
15 * We want to know the real level where a entry is located ignoring any
16 * folding of levels which may be happening. For example if p4d is folded then
17 * a missing entry found at level 1 (p4d) is actually at level 0 (pgd).
19 static int real_depth(int depth)
21 if (depth == 3 && PTRS_PER_PMD == 1)
23 if (depth == 2 && PTRS_PER_PUD == 1)
25 if (depth == 1 && PTRS_PER_P4D == 1)
30 static int walk_pte_range_inner(pte_t *pte, unsigned long addr,
31 unsigned long end, struct mm_walk *walk)
33 const struct mm_walk_ops *ops = walk->ops;
37 if (ops->install_pte && pte_none(ptep_get(pte))) {
40 err = ops->install_pte(addr, addr + PAGE_SIZE, &new_pte,
45 set_pte_at(walk->mm, addr, pte, new_pte);
46 /* Non-present before, so for arches that need it. */
47 if (!WARN_ON_ONCE(walk->no_vma))
48 update_mmu_cache(walk->vma, addr, pte);
50 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
54 if (addr >= end - PAGE_SIZE)
62 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
71 * pte_offset_map() might apply user-specific validation.
72 * Indeed, on x86_64 the pmd entries set up by init_espfix_ap()
73 * fit its pmd_bad() check (_PAGE_NX set and _PAGE_RW clear),
74 * and CONFIG_EFI_PGT_DUMP efi_mm goes so far as to walk them.
76 if (walk->mm == &init_mm || addr >= TASK_SIZE)
77 pte = pte_offset_kernel(pmd, addr);
79 pte = pte_offset_map(pmd, addr);
81 err = walk_pte_range_inner(pte, addr, end, walk);
82 if (walk->mm != &init_mm && addr < TASK_SIZE)
86 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
88 err = walk_pte_range_inner(pte, addr, end, walk);
89 pte_unmap_unlock(pte, ptl);
93 walk->action = ACTION_AGAIN;
97 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
102 const struct mm_walk_ops *ops = walk->ops;
103 bool has_handler = ops->pte_entry;
104 bool has_install = ops->install_pte;
106 int depth = real_depth(3);
108 pmd = pmd_offset(pud, addr);
111 next = pmd_addr_end(addr, end);
112 if (pmd_none(*pmd)) {
114 err = __pte_alloc(walk->mm, pmd);
115 else if (ops->pte_hole)
116 err = ops->pte_hole(addr, next, depth, walk);
123 walk->action = ACTION_SUBTREE;
126 * This implies that each ->pmd_entry() handler
127 * needs to know about pmd_trans_huge() pmds
130 err = ops->pmd_entry(pmd, addr, next, walk);
134 if (walk->action == ACTION_AGAIN)
136 if (walk->action == ACTION_CONTINUE)
139 if (!has_handler) { /* No handlers for lower page tables. */
141 continue; /* Nothing to do. */
143 * We are ONLY installing, so avoid unnecessarily
144 * splitting a present huge page.
146 if (pmd_present(*pmd) &&
147 (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
152 split_huge_pmd(walk->vma, pmd, addr);
153 else if (pmd_leaf(*pmd) || !pmd_present(*pmd))
154 continue; /* Nothing to do. */
156 err = walk_pte_range(pmd, addr, next, walk);
160 if (walk->action == ACTION_AGAIN)
163 } while (pmd++, addr = next, addr != end);
168 static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
169 struct mm_walk *walk)
173 const struct mm_walk_ops *ops = walk->ops;
174 bool has_handler = ops->pmd_entry || ops->pte_entry;
175 bool has_install = ops->install_pte;
177 int depth = real_depth(2);
179 pud = pud_offset(p4d, addr);
182 next = pud_addr_end(addr, end);
183 if (pud_none(*pud)) {
185 err = __pmd_alloc(walk->mm, pud, addr);
186 else if (ops->pte_hole)
187 err = ops->pte_hole(addr, next, depth, walk);
194 walk->action = ACTION_SUBTREE;
197 err = ops->pud_entry(pud, addr, next, walk);
201 if (walk->action == ACTION_AGAIN)
203 if (walk->action == ACTION_CONTINUE)
206 if (!has_handler) { /* No handlers for lower page tables. */
208 continue; /* Nothing to do. */
210 * We are ONLY installing, so avoid unnecessarily
211 * splitting a present huge page.
213 if (pud_present(*pud) &&
214 (pud_trans_huge(*pud) || pud_devmap(*pud)))
219 split_huge_pud(walk->vma, pud, addr);
220 else if (pud_leaf(*pud) || !pud_present(*pud))
221 continue; /* Nothing to do. */
226 err = walk_pmd_range(pud, addr, next, walk);
229 } while (pud++, addr = next, addr != end);
234 static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
235 struct mm_walk *walk)
239 const struct mm_walk_ops *ops = walk->ops;
240 bool has_handler = ops->pud_entry || ops->pmd_entry || ops->pte_entry;
241 bool has_install = ops->install_pte;
243 int depth = real_depth(1);
245 p4d = p4d_offset(pgd, addr);
247 next = p4d_addr_end(addr, end);
248 if (p4d_none_or_clear_bad(p4d)) {
250 err = __pud_alloc(walk->mm, p4d, addr);
251 else if (ops->pte_hole)
252 err = ops->pte_hole(addr, next, depth, walk);
258 if (ops->p4d_entry) {
259 err = ops->p4d_entry(p4d, addr, next, walk);
263 if (has_handler || has_install)
264 err = walk_pud_range(p4d, addr, next, walk);
267 } while (p4d++, addr = next, addr != end);
272 static int walk_pgd_range(unsigned long addr, unsigned long end,
273 struct mm_walk *walk)
277 const struct mm_walk_ops *ops = walk->ops;
278 bool has_handler = ops->p4d_entry || ops->pud_entry || ops->pmd_entry ||
280 bool has_install = ops->install_pte;
284 pgd = walk->pgd + pgd_index(addr);
286 pgd = pgd_offset(walk->mm, addr);
288 next = pgd_addr_end(addr, end);
289 if (pgd_none_or_clear_bad(pgd)) {
291 err = __p4d_alloc(walk->mm, pgd, addr);
292 else if (ops->pte_hole)
293 err = ops->pte_hole(addr, next, 0, walk);
299 if (ops->pgd_entry) {
300 err = ops->pgd_entry(pgd, addr, next, walk);
304 if (has_handler || has_install)
305 err = walk_p4d_range(pgd, addr, next, walk);
308 } while (pgd++, addr = next, addr != end);
313 #ifdef CONFIG_HUGETLB_PAGE
314 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
317 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
318 return boundary < end ? boundary : end;
321 static int walk_hugetlb_range(unsigned long addr, unsigned long end,
322 struct mm_walk *walk)
324 struct vm_area_struct *vma = walk->vma;
325 struct hstate *h = hstate_vma(vma);
327 unsigned long hmask = huge_page_mask(h);
328 unsigned long sz = huge_page_size(h);
330 const struct mm_walk_ops *ops = walk->ops;
333 hugetlb_vma_lock_read(vma);
335 next = hugetlb_entry_end(h, addr, end);
336 pte = hugetlb_walk(vma, addr & hmask, sz);
338 err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
339 else if (ops->pte_hole)
340 err = ops->pte_hole(addr, next, -1, walk);
343 } while (addr = next, addr != end);
344 hugetlb_vma_unlock_read(vma);
349 #else /* CONFIG_HUGETLB_PAGE */
350 static int walk_hugetlb_range(unsigned long addr, unsigned long end,
351 struct mm_walk *walk)
356 #endif /* CONFIG_HUGETLB_PAGE */
359 * Decide whether we really walk over the current vma on [@start, @end)
360 * or skip it via the returned value. Return 0 if we do walk over the
361 * current vma, and return 1 if we skip the vma. Negative values means
362 * error, where we abort the current walk.
364 static int walk_page_test(unsigned long start, unsigned long end,
365 struct mm_walk *walk)
367 struct vm_area_struct *vma = walk->vma;
368 const struct mm_walk_ops *ops = walk->ops;
371 return ops->test_walk(start, end, walk);
374 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
375 * range, so we don't walk over it as we do for normal vmas. However,
376 * Some callers are interested in handling hole range and they don't
377 * want to just ignore any single address range. Such users certainly
378 * define their ->pte_hole() callbacks, so let's delegate them to handle
381 if (vma->vm_flags & VM_PFNMAP) {
384 err = ops->pte_hole(start, end, -1, walk);
385 return err ? err : 1;
390 static int __walk_page_range(unsigned long start, unsigned long end,
391 struct mm_walk *walk)
394 struct vm_area_struct *vma = walk->vma;
395 const struct mm_walk_ops *ops = walk->ops;
396 bool is_hugetlb = is_vm_hugetlb_page(vma);
398 /* We do not support hugetlb PTE installation. */
399 if (ops->install_pte && is_hugetlb)
403 err = ops->pre_vma(start, end, walk);
409 if (ops->hugetlb_entry)
410 err = walk_hugetlb_range(start, end, walk);
412 err = walk_pgd_range(start, end, walk);
420 static inline void process_mm_walk_lock(struct mm_struct *mm,
421 enum page_walk_lock walk_lock)
423 if (walk_lock == PGWALK_RDLOCK)
424 mmap_assert_locked(mm);
426 mmap_assert_write_locked(mm);
429 static inline void process_vma_walk_lock(struct vm_area_struct *vma,
430 enum page_walk_lock walk_lock)
432 #ifdef CONFIG_PER_VMA_LOCK
435 vma_start_write(vma);
437 case PGWALK_WRLOCK_VERIFY:
438 vma_assert_write_locked(vma);
441 /* PGWALK_RDLOCK is handled by process_mm_walk_lock */
448 * See the comment for walk_page_range(), this performs the heavy lifting of the
449 * operation, only sets no restrictions on how the walk proceeds.
451 * We usually restrict the ability to install PTEs, but this functionality is
452 * available to internal memory management code and provided in mm/internal.h.
454 int walk_page_range_mm(struct mm_struct *mm, unsigned long start,
455 unsigned long end, const struct mm_walk_ops *ops,
460 struct vm_area_struct *vma;
461 struct mm_walk walk = {
473 process_mm_walk_lock(walk.mm, ops->walk_lock);
475 vma = find_vma(walk.mm, start);
477 if (!vma) { /* after the last vma */
481 err = ops->pte_hole(start, next, -1, &walk);
482 } else if (start < vma->vm_start) { /* outside vma */
484 next = min(end, vma->vm_start);
486 err = ops->pte_hole(start, next, -1, &walk);
487 } else { /* inside vma */
488 process_vma_walk_lock(vma, ops->walk_lock);
490 next = min(end, vma->vm_end);
491 vma = find_vma(mm, vma->vm_end);
493 err = walk_page_test(start, next, &walk);
496 * positive return values are purely for
497 * controlling the pagewalk, so should never
498 * be passed to the callers.
505 err = __walk_page_range(start, next, &walk);
509 } while (start = next, start < end);
514 * Determine if the walk operations specified are permitted to be used for a
517 * This check is performed on all functions which are parameterised by walk
518 * operations and exposed in include/linux/pagewalk.h.
520 * Internal memory management code can use the walk_page_range_mm() function to
521 * be able to use all page walking operations.
523 static bool check_ops_valid(const struct mm_walk_ops *ops)
526 * The installation of PTEs is solely under the control of memory
527 * management logic and subject to many subtle locking, security and
528 * cache considerations so we cannot permit other users to do so, and
529 * certainly not for exported symbols.
531 if (ops->install_pte)
538 * walk_page_range - walk page table with caller specific callbacks
539 * @mm: mm_struct representing the target process of page table walk
540 * @start: start address of the virtual address range
541 * @end: end address of the virtual address range
542 * @ops: operation to call during the walk
543 * @private: private data for callbacks' usage
545 * Recursively walk the page table tree of the process represented by @mm
546 * within the virtual address range [@start, @end). During walking, we can do
547 * some caller-specific works for each entry, by setting up pmd_entry(),
548 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
549 * callbacks, the associated entries/pages are just ignored.
550 * The return values of these callbacks are commonly defined like below:
552 * - 0 : succeeded to handle the current entry, and if you don't reach the
553 * end address yet, continue to walk.
554 * - >0 : succeeded to handle the current entry, and return to the caller
555 * with caller specific value.
556 * - <0 : failed to handle the current entry, and return to the caller
559 * Before starting to walk page table, some callers want to check whether
560 * they really want to walk over the current vma, typically by checking
561 * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
564 * If operations need to be staged before and committed after a vma is walked,
565 * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
566 * since it is intended to handle commit-type operations, can't return any
569 * struct mm_walk keeps current values of some common data like vma and pmd,
570 * which are useful for the access from callbacks. If you want to pass some
571 * caller-specific data to callbacks, @private should be helpful.
574 * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock,
575 * because these function traverse vma list and/or access to vma's data.
577 int walk_page_range(struct mm_struct *mm, unsigned long start,
578 unsigned long end, const struct mm_walk_ops *ops,
581 if (!check_ops_valid(ops))
584 return walk_page_range_mm(mm, start, end, ops, private);
588 * walk_page_range_novma - walk a range of pagetables not backed by a vma
589 * @mm: mm_struct representing the target process of page table walk
590 * @start: start address of the virtual address range
591 * @end: end address of the virtual address range
592 * @ops: operation to call during the walk
593 * @pgd: pgd to walk if different from mm->pgd
594 * @private: private data for callbacks' usage
596 * Similar to walk_page_range() but can walk any page tables even if they are
597 * not backed by VMAs. Because 'unusual' entries may be walked this function
598 * will also not lock the PTEs for the pte_entry() callback. This is useful for
599 * walking the kernel pages tables or page tables for firmware.
601 * Note: Be careful to walk the kernel pages tables, the caller may be need to
602 * take other effective approaches (mmap lock may be insufficient) to prevent
603 * the intermediate kernel page tables belonging to the specified address range
604 * from being freed (e.g. memory hot-remove).
606 int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
607 unsigned long end, const struct mm_walk_ops *ops,
611 struct mm_walk walk = {
619 if (start >= end || !walk.mm)
621 if (!check_ops_valid(ops))
625 * 1) For walking the user virtual address space:
627 * The mmap lock protects the page walker from changes to the page
628 * tables during the walk. However a read lock is insufficient to
629 * protect those areas which don't have a VMA as munmap() detaches
630 * the VMAs before downgrading to a read lock and actually tearing
631 * down PTEs/page tables. In which case, the mmap write lock should
634 * 2) For walking the kernel virtual address space:
636 * The kernel intermediate page tables usually do not be freed, so
637 * the mmap map read lock is sufficient. But there are some exceptions.
638 * E.g. memory hot-remove. In which case, the mmap lock is insufficient
639 * to prevent the intermediate kernel pages tables belonging to the
640 * specified address range from being freed. The caller should take
641 * other actions to prevent this race.
644 mmap_assert_locked(walk.mm);
646 mmap_assert_write_locked(walk.mm);
648 return walk_pgd_range(start, end, &walk);
651 int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
652 unsigned long end, const struct mm_walk_ops *ops,
655 struct mm_walk walk = {
662 if (start >= end || !walk.mm)
664 if (start < vma->vm_start || end > vma->vm_end)
666 if (!check_ops_valid(ops))
669 process_mm_walk_lock(walk.mm, ops->walk_lock);
670 process_vma_walk_lock(vma, ops->walk_lock);
671 return __walk_page_range(start, end, &walk);
674 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
677 struct mm_walk walk = {
686 if (!check_ops_valid(ops))
689 process_mm_walk_lock(walk.mm, ops->walk_lock);
690 process_vma_walk_lock(vma, ops->walk_lock);
691 return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
695 * walk_page_mapping - walk all memory areas mapped into a struct address_space.
696 * @mapping: Pointer to the struct address_space
697 * @first_index: First page offset in the address_space
698 * @nr: Number of incremental page offsets to cover
699 * @ops: operation to call during the walk
700 * @private: private data for callbacks' usage
702 * This function walks all memory areas mapped into a struct address_space.
703 * The walk is limited to only the given page-size index range, but if
704 * the index boundaries cross a huge page-table entry, that entry will be
707 * Also see walk_page_range() for additional information.
710 * This function can't require that the struct mm_struct::mmap_lock is held,
711 * since @mapping may be mapped by multiple processes. Instead
712 * @mapping->i_mmap_rwsem must be held. This might have implications in the
713 * callbacks, and it's up tho the caller to ensure that the
714 * struct mm_struct::mmap_lock is not needed.
716 * Also this means that a caller can't rely on the struct
717 * vm_area_struct::vm_flags to be constant across a call,
718 * except for immutable flags. Callers requiring this shouldn't use
721 * Return: 0 on success, negative error code on failure, positive number on
722 * caller defined premature termination.
724 int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
725 pgoff_t nr, const struct mm_walk_ops *ops,
728 struct mm_walk walk = {
732 struct vm_area_struct *vma;
733 pgoff_t vba, vea, cba, cea;
734 unsigned long start_addr, end_addr;
737 if (!check_ops_valid(ops))
740 lockdep_assert_held(&mapping->i_mmap_rwsem);
741 vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index,
742 first_index + nr - 1) {
743 /* Clip to the vma */
745 vea = vba + vma_pages(vma);
748 cea = first_index + nr;
751 start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start;
752 end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start;
753 if (start_addr >= end_addr)
757 walk.mm = vma->vm_mm;
759 err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
766 err = __walk_page_range(start_addr, end_addr, &walk);
775 * folio_walk_start - walk the page tables to a folio
776 * @fw: filled with information on success.
778 * @addr: the virtual address to use for the page table walk.
779 * @flags: flags modifying which folios to walk to.
781 * Walk the page tables using @addr in a given @vma to a mapped folio and
782 * return the folio, making sure that the page table entry referenced by
783 * @addr cannot change until folio_walk_end() was called.
785 * As default, this function returns only folios that are not special (e.g., not
786 * the zeropage) and never returns folios that are supposed to be ignored by the
787 * VM as documented by vm_normal_page(). If requested, zeropages will be
790 * As default, this function only considers present page table entries.
791 * If requested, it will also consider migration entries.
793 * If this function returns NULL it might either indicate "there is nothing" or
794 * "there is nothing suitable".
796 * On success, @fw is filled and the function returns the folio while the PTL
797 * is still held and folio_walk_end() must be called to clean up,
798 * releasing any held locks. The returned folio must *not* be used after the
799 * call to folio_walk_end(), unless a short-term folio reference is taken before
802 * @fw->page will correspond to the page that is effectively referenced by
803 * @addr. However, for migration entries and shared zeropages @fw->page is
804 * set to NULL. Note that large folios might be mapped by multiple page table
805 * entries, and this function will always only lookup a single entry as
806 * specified by @addr, which might or might not cover more than a single page of
807 * the returned folio.
809 * This function must *not* be used as a naive replacement for
810 * get_user_pages() / pin_user_pages(), especially not to perform DMA or
811 * to carelessly modify page content. This function may *only* be used to grab
812 * short-term folio references, never to grab long-term folio references.
814 * Using the page table entry pointers in @fw for reading or modifying the
815 * entry should be avoided where possible: however, there might be valid
818 * WARNING: Modifying page table entries in hugetlb VMAs requires a lot of care.
819 * For example, PMD page table sharing might require prior unsharing. Also,
820 * logical hugetlb entries might span multiple physical page table entries,
821 * which *must* be modified in a single operation (set_huge_pte_at(),
822 * huge_ptep_set_*, ...). Note that the page table entry stored in @fw might
823 * not correspond to the first physical entry of a logical hugetlb entry.
825 * The mmap lock must be held in read mode.
827 * Return: folio pointer on success, otherwise NULL.
829 struct folio *folio_walk_start(struct folio_walk *fw,
830 struct vm_area_struct *vma, unsigned long addr,
831 folio_walk_flags_t flags)
833 unsigned long entry_size;
834 bool expose_page = true;
843 mmap_assert_locked(vma->vm_mm);
844 vma_pgtable_walk_begin(vma);
846 if (WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end))
849 pgdp = pgd_offset(vma->vm_mm, addr);
850 if (pgd_none_or_clear_bad(pgdp))
853 p4dp = p4d_offset(pgdp, addr);
854 if (p4d_none_or_clear_bad(p4dp))
857 pudp = pud_offset(p4dp, addr);
858 pud = pudp_get(pudp);
861 if (IS_ENABLED(CONFIG_PGTABLE_HAS_HUGE_LEAVES) &&
862 (!pud_present(pud) || pud_leaf(pud))) {
863 ptl = pud_lock(vma->vm_mm, pudp);
864 pud = pudp_get(pudp);
866 entry_size = PUD_SIZE;
867 fw->level = FW_LEVEL_PUD;
872 * TODO: FW_MIGRATION support for PUD migration entries
873 * once there are relevant users.
875 if (!pud_present(pud) || pud_devmap(pud) || pud_special(pud)) {
878 } else if (!pud_leaf(pud)) {
883 * TODO: vm_normal_page_pud() will be handy once we want to
884 * support PUD mappings in VM_PFNMAP|VM_MIXEDMAP VMAs.
886 page = pud_page(pud);
891 VM_WARN_ON_ONCE(!pud_present(pud) || pud_leaf(pud));
892 pmdp = pmd_offset(pudp, addr);
893 pmd = pmdp_get_lockless(pmdp);
896 if (IS_ENABLED(CONFIG_PGTABLE_HAS_HUGE_LEAVES) &&
897 (!pmd_present(pmd) || pmd_leaf(pmd))) {
898 ptl = pmd_lock(vma->vm_mm, pmdp);
899 pmd = pmdp_get(pmdp);
901 entry_size = PMD_SIZE;
902 fw->level = FW_LEVEL_PMD;
909 } else if (pmd_present(pmd) && !pmd_leaf(pmd)) {
912 } else if (pmd_present(pmd)) {
913 page = vm_normal_page_pmd(vma, addr, pmd);
916 } else if ((flags & FW_ZEROPAGE) &&
917 is_huge_zero_pmd(pmd)) {
918 page = pfn_to_page(pmd_pfn(pmd));
922 } else if ((flags & FW_MIGRATION) &&
923 is_pmd_migration_entry(pmd)) {
924 swp_entry_t entry = pmd_to_swp_entry(pmd);
926 page = pfn_swap_entry_to_page(entry);
935 VM_WARN_ON_ONCE(!pmd_present(pmd) || pmd_leaf(pmd));
936 ptep = pte_offset_map_lock(vma->vm_mm, pmdp, addr, &ptl);
939 pte = ptep_get(ptep);
941 entry_size = PAGE_SIZE;
942 fw->level = FW_LEVEL_PTE;
946 if (pte_present(pte)) {
947 page = vm_normal_page(vma, addr, pte);
950 if ((flags & FW_ZEROPAGE) &&
951 is_zero_pfn(pte_pfn(pte))) {
952 page = pfn_to_page(pte_pfn(pte));
956 } else if (!pte_none(pte)) {
957 swp_entry_t entry = pte_to_swp_entry(pte);
959 if ((flags & FW_MIGRATION) &&
960 is_migration_entry(entry)) {
961 page = pfn_swap_entry_to_page(entry);
966 pte_unmap_unlock(ptep, ptl);
968 vma_pgtable_walk_end(vma);
972 /* Note: Offset from the mapped page, not the folio start. */
973 fw->page = nth_page(page, (addr & (entry_size - 1)) >> PAGE_SHIFT);
977 return page_folio(page);