]> Git Repo - linux.git/commitdiff
mm: add page_rmappable_folio() wrapper
authorHugh Dickins <[email protected]>
Tue, 3 Oct 2023 09:25:33 +0000 (02:25 -0700)
committerAndrew Morton <[email protected]>
Wed, 25 Oct 2023 23:47:16 +0000 (16:47 -0700)
folio_prep_large_rmappable() is being used repeatedly along with a
conversion from page to folio, a check non-NULL, a check order > 1: wrap
it all up into struct folio *page_rmappable_folio(struct page *).

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Hugh Dickins <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: David Hildenbrand <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: "Huang, Ying" <[email protected]>
Cc: Kefeng Wang <[email protected]>
Cc: Matthew Wilcox (Oracle) <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Mike Kravetz <[email protected]>
Cc: Nhat Pham <[email protected]>
Cc: Sidhartha Kumar <[email protected]>
Cc: Suren Baghdasaryan <[email protected]>
Cc: Tejun heo <[email protected]>
Cc: Vishal Moola (Oracle) <[email protected]>
Cc: Yang Shi <[email protected]>
Cc: Yosry Ahmed <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
mm/internal.h
mm/mempolicy.c
mm/page_alloc.c

index 3eceae1ec4c0cac5747605ecbe87ee20a76e83f9..b61034bd50f5f88a2e7092d217280bdd57862707 100644 (file)
@@ -415,6 +415,15 @@ static inline void folio_set_order(struct folio *folio, unsigned int order)
 
 void folio_undo_large_rmappable(struct folio *folio);
 
+static inline struct folio *page_rmappable_folio(struct page *page)
+{
+       struct folio *folio = (struct folio *)page;
+
+       if (folio && folio_order(folio) > 1)
+               folio_prep_large_rmappable(folio);
+       return folio;
+}
+
 static inline void prep_compound_head(struct page *page, unsigned int order)
 {
        struct folio *folio = (struct folio *)page;
index e01d3f807f77312d6eb2fb7677a4146ca21a6674..596d580f92d1aa30907fb4140c671b7989f3fb1b 100644 (file)
@@ -2122,10 +2122,7 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
                mpol_cond_put(pol);
                gfp |= __GFP_COMP;
                page = alloc_page_interleave(gfp, order, nid);
-               folio = (struct folio *)page;
-               if (folio && order > 1)
-                       folio_prep_large_rmappable(folio);
-               goto out;
+               return page_rmappable_folio(page);
        }
 
        if (pol->mode == MPOL_PREFERRED_MANY) {
@@ -2135,10 +2132,7 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
                gfp |= __GFP_COMP;
                page = alloc_pages_preferred_many(gfp, order, node, pol);
                mpol_cond_put(pol);
-               folio = (struct folio *)page;
-               if (folio && order > 1)
-                       folio_prep_large_rmappable(folio);
-               goto out;
+               return page_rmappable_folio(page);
        }
 
        if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
@@ -2232,12 +2226,7 @@ EXPORT_SYMBOL(alloc_pages);
 
 struct folio *folio_alloc(gfp_t gfp, unsigned order)
 {
-       struct page *page = alloc_pages(gfp | __GFP_COMP, order);
-       struct folio *folio = (struct folio *)page;
-
-       if (folio && order > 1)
-               folio_prep_large_rmappable(folio);
-       return folio;
+       return page_rmappable_folio(alloc_pages(gfp | __GFP_COMP, order));
 }
 EXPORT_SYMBOL(folio_alloc);
 
index 5f46c7f85cb1bcaf1012cbff109a1473978f7f55..733732e7e0ba73822f079a257fdad9eddd579652 100644 (file)
@@ -4598,12 +4598,8 @@ struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
                nodemask_t *nodemask)
 {
        struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
-                       preferred_nid, nodemask);
-       struct folio *folio = (struct folio *)page;
-
-       if (folio && order > 1)
-               folio_prep_large_rmappable(folio);
-       return folio;
+                                       preferred_nid, nodemask);
+       return page_rmappable_folio(page);
 }
 EXPORT_SYMBOL(__folio_alloc);
 
This page took 0.075569 seconds and 4 git commands to generate.