]> Git Repo - linux.git/commitdiff
mm: hugetlb: improve the handling of hugetlb allocation failure for freed or in-use...
authorBaolin Wang <[email protected]>
Tue, 6 Feb 2024 03:08:11 +0000 (11:08 +0800)
committerAndrew Morton <[email protected]>
Thu, 22 Feb 2024 18:24:55 +0000 (10:24 -0800)
alloc_and_dissolve_hugetlb_folio() preallocates a new hugetlb page before
it takes hugetlb_lock.  In 3 out of 4 cases the page is not really used
and therefore the newly allocated page is just freed right away.  This is
wasteful and it might cause pre-mature failures in those cases.

Address that by moving the allocation down to the only case (hugetlb page
is really in the free pages pool).  We need to drop hugetlb_lock to do so
and therefore need to recheck the page state after regaining it.

The patch is more of a cleanup than an actual fix to an existing problem.
There are no known reports about pre-mature failures.

Link: https://lkml.kernel.org/r/62890fd60b1ecd5bf1cdc476c973f60fe37aa0cb.1707181934.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Reviewed-by: Muchun Song <[email protected]>
Cc: David Hildenbrand <[email protected]>
Cc: Oscar Salvador <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
mm/hugetlb.c

index 44f1e6366d049485a4a171476aaa9e4fdcaa68c6..3d651fc1dd668141aeb10ca95cb65c58070e7877 100644 (file)
@@ -3029,21 +3029,9 @@ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
 {
        gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
        int nid = folio_nid(old_folio);
-       struct folio *new_folio;
+       struct folio *new_folio = NULL;
        int ret = 0;
 
-       /*
-        * Before dissolving the folio, we need to allocate a new one for the
-        * pool to remain stable.  Here, we allocate the folio and 'prep' it
-        * by doing everything but actually updating counters and adding to
-        * the pool.  This simplifies and let us do most of the processing
-        * under the lock.
-        */
-       new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL);
-       if (!new_folio)
-               return -ENOMEM;
-       __prep_new_hugetlb_folio(h, new_folio);
-
 retry:
        spin_lock_irq(&hugetlb_lock);
        if (!folio_test_hugetlb(old_folio)) {
@@ -3073,6 +3061,16 @@ retry:
                cond_resched();
                goto retry;
        } else {
+               if (!new_folio) {
+                       spin_unlock_irq(&hugetlb_lock);
+                       new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid,
+                                                             NULL, NULL);
+                       if (!new_folio)
+                               return -ENOMEM;
+                       __prep_new_hugetlb_folio(h, new_folio);
+                       goto retry;
+               }
+
                /*
                 * Ok, old_folio is still a genuine free hugepage. Remove it from
                 * the freelist and decrease the counters. These will be
@@ -3100,9 +3098,11 @@ retry:
 
 free_new:
        spin_unlock_irq(&hugetlb_lock);
-       /* Folio has a zero ref count, but needs a ref to be freed */
-       folio_ref_unfreeze(new_folio, 1);
-       update_and_free_hugetlb_folio(h, new_folio, false);
+       if (new_folio) {
+               /* Folio has a zero ref count, but needs a ref to be freed */
+               folio_ref_unfreeze(new_folio, 1);
+               update_and_free_hugetlb_folio(h, new_folio, false);
+       }
 
        return ret;
 }
This page took 0.067699 seconds and 4 git commands to generate.