]> Git Repo - linux.git/commitdiff
mm: hugetlb: kill set_huge_swap_pte_at()
authorQi Zheng <[email protected]>
Sun, 26 Jun 2022 14:57:17 +0000 (22:57 +0800)
committerakpm <[email protected]>
Mon, 4 Jul 2022 01:08:50 +0000 (18:08 -0700)
Commit e5251fd43007 ("mm/hugetlb: introduce set_huge_swap_pte_at()
helper") add set_huge_swap_pte_at() to handle swap entries on
architectures that support hugepages consisting of contiguous ptes.  And
currently the set_huge_swap_pte_at() is only overridden by arm64.

set_huge_swap_pte_at() provide a sz parameter to help determine the number
of entries to be updated.  But in fact, all hugetlb swap entries contain
pfn information, so we can find the corresponding folio through the pfn
recorded in the swap entry, then the folio_size() is the number of entries
that need to be updated.

And considering that users will easily cause bugs by ignoring the
difference between set_huge_swap_pte_at() and set_huge_pte_at().  Let's
handle swap entries in set_huge_pte_at() and remove the
set_huge_swap_pte_at(), then we can call set_huge_pte_at() anywhere, which
simplifies our coding.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Qi Zheng <[email protected]>
Acked-by: Muchun Song <[email protected]>
Cc: Mike Kravetz <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Will Deacon <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
arch/arm64/include/asm/hugetlb.h
arch/arm64/mm/hugetlbpage.c
include/linux/hugetlb.h
mm/hugetlb.c
mm/rmap.c

index 1fd2846dbefeb9bc9c8ed512a718aeca01bad99e..d20f5da2d76fa3ef876e99b14c28705da0d0ace1 100644 (file)
@@ -46,9 +46,6 @@ extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
                           pte_t *ptep, unsigned long sz);
 #define __HAVE_ARCH_HUGE_PTEP_GET
 extern pte_t huge_ptep_get(pte_t *ptep);
-extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
-                                pte_t *ptep, pte_t pte, unsigned long sz);
-#define set_huge_swap_pte_at set_huge_swap_pte_at
 
 void __init arm64_hugetlb_cma_reserve(void);
 
index e2a5ec9fdc0db0289c567090acbc11280e758fc1..3be8f25aa5bea5c7deb3b80bde6e34d8a83e6228 100644 (file)
@@ -238,6 +238,13 @@ static void clear_flush(struct mm_struct *mm,
        flush_tlb_range(&vma, saddr, addr);
 }
 
+static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry)
+{
+       VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry));
+
+       return page_folio(pfn_to_page(swp_offset(entry)));
+}
+
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
                            pte_t *ptep, pte_t pte)
 {
@@ -247,11 +254,16 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
        unsigned long pfn, dpfn;
        pgprot_t hugeprot;
 
-       /*
-        * Code needs to be expanded to handle huge swap and migration
-        * entries. Needed for HUGETLB and MEMORY_FAILURE.
-        */
-       WARN_ON(!pte_present(pte));
+       if (!pte_present(pte)) {
+               struct folio *folio;
+
+               folio = hugetlb_swap_entry_to_folio(pte_to_swp_entry(pte));
+               ncontig = num_contig_ptes(folio_size(folio), &pgsize);
+
+               for (i = 0; i < ncontig; i++, ptep++)
+                       set_pte_at(mm, addr, ptep, pte);
+               return;
+       }
 
        if (!pte_cont(pte)) {
                set_pte_at(mm, addr, ptep, pte);
@@ -269,18 +281,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
                set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
 }
 
-void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
-                         pte_t *ptep, pte_t pte, unsigned long sz)
-{
-       int i, ncontig;
-       size_t pgsize;
-
-       ncontig = num_contig_ptes(sz, &pgsize);
-
-       for (i = 0; i < ncontig; i++, ptep++)
-               set_pte(ptep, pte);
-}
-
 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
                      unsigned long addr, unsigned long sz)
 {
index 756b66ff025e5f8283d83df74e5bc51041d593a7..c6cccfaf8708b83c87c192d67d25ee634ebc8cd8 100644 (file)
@@ -903,14 +903,6 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
        atomic_long_sub(l, &mm->hugetlb_usage);
 }
 
-#ifndef set_huge_swap_pte_at
-static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
-                                       pte_t *ptep, pte_t pte, unsigned long sz)
-{
-       set_huge_pte_at(mm, addr, ptep, pte);
-}
-#endif
-
 #ifndef huge_ptep_modify_prot_start
 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
@@ -1094,11 +1086,6 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
 {
 }
 
-static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
-                                       pte_t *ptep, pte_t pte, unsigned long sz)
-{
-}
-
 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
                                          unsigned long addr, pte_t *ptep)
 {
index 65454896f17479bbac5d1a533eaa76d437e68e8d..064da8ffbac6a7c32281c8c1c4c73479114fd294 100644 (file)
@@ -4798,12 +4798,11 @@ again:
                                entry = swp_entry_to_pte(swp_entry);
                                if (userfaultfd_wp(src_vma) && uffd_wp)
                                        entry = huge_pte_mkuffd_wp(entry);
-                               set_huge_swap_pte_at(src, addr, src_pte,
-                                                    entry, sz);
+                               set_huge_pte_at(src, addr, src_pte, entry);
                        }
                        if (!userfaultfd_wp(dst_vma) && uffd_wp)
                                entry = huge_pte_clear_uffd_wp(entry);
-                       set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
+                       set_huge_pte_at(dst, addr, dst_pte, entry);
                } else if (unlikely(is_pte_marker(entry))) {
                        /*
                         * We copy the pte marker only if the dst vma has
@@ -6344,8 +6343,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                                        newpte = pte_swp_mkuffd_wp(newpte);
                                else if (uffd_wp_resolve)
                                        newpte = pte_swp_clear_uffd_wp(newpte);
-                               set_huge_swap_pte_at(mm, address, ptep,
-                                                    newpte, psize);
+                               set_huge_pte_at(mm, address, ptep, newpte);
                                pages++;
                        }
                        spin_unlock(ptl);
index 56134cdc5ca364cb584965b634d3b993ae849289..83172ee0ea354b342e5e5a4b45279880a4761fc0 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1618,9 +1618,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                        pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
                        if (folio_test_hugetlb(folio)) {
                                hugetlb_count_sub(folio_nr_pages(folio), mm);
-                               set_huge_swap_pte_at(mm, address,
-                                                    pvmw.pte, pteval,
-                                                    vma_mmu_pagesize(vma));
+                               set_huge_pte_at(mm, address, pvmw.pte, pteval);
                        } else {
                                dec_mm_counter(mm, mm_counter(&folio->page));
                                set_pte_at(mm, address, pvmw.pte, pteval);
@@ -2004,9 +2002,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                        pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
                        if (folio_test_hugetlb(folio)) {
                                hugetlb_count_sub(folio_nr_pages(folio), mm);
-                               set_huge_swap_pte_at(mm, address,
-                                                    pvmw.pte, pteval,
-                                                    vma_mmu_pagesize(vma));
+                               set_huge_pte_at(mm, address, pvmw.pte, pteval);
                        } else {
                                dec_mm_counter(mm, mm_counter(&folio->page));
                                set_pte_at(mm, address, pvmw.pte, pteval);
@@ -2074,8 +2070,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                        if (pte_uffd_wp(pteval))
                                swp_pte = pte_swp_mkuffd_wp(swp_pte);
                        if (folio_test_hugetlb(folio))
-                               set_huge_swap_pte_at(mm, address, pvmw.pte,
-                                                    swp_pte, vma_mmu_pagesize(vma));
+                               set_huge_pte_at(mm, address, pvmw.pte, swp_pte);
                        else
                                set_pte_at(mm, address, pvmw.pte, swp_pte);
                        trace_set_migration_pte(address, pte_val(swp_pte),
This page took 0.077493 seconds and 4 git commands to generate.