]> Git Repo - linux.git/commitdiff
mm/page_alloc: reduce duration that IRQs are disabled for VM counters
authorMel Gorman <[email protected]>
Tue, 29 Jun 2021 02:41:54 +0000 (19:41 -0700)
committerLinus Torvalds <[email protected]>
Tue, 29 Jun 2021 17:53:54 +0000 (10:53 -0700)
IRQs are left disabled for the zone and node VM event counters.  This is
unnecessary as the affected counters are allowed to race for preemmption
and IRQs.

This patch reduces the scope of IRQs being disabled via
local_[lock|unlock]_irq on !PREEMPT_RT kernels.  One
__mod_zone_freepage_state is still called with IRQs disabled.  While this
could be moved out, it's not free on all architectures as some require
IRQs to be disabled for mod_zone_page_state on !PREEMPT_RT kernels.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Mel Gorman <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Chuck Lever <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Jesper Dangaard Brouer <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Sebastian Andrzej Siewior <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
mm/page_alloc.c

index 6bb9b87cf7d5d60cdeed15682716758da3ec447b..161bcda61520a82c800dfa67b21f15d409c3383a 100644 (file)
@@ -3530,11 +3530,11 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
        pcp = this_cpu_ptr(zone->per_cpu_pageset);
        list = &pcp->lists[migratetype];
        page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list);
+       local_unlock_irqrestore(&pagesets.lock, flags);
        if (page) {
                __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
                zone_statistics(preferred_zone, zone, 1);
        }
-       local_unlock_irqrestore(&pagesets.lock, flags);
        return page;
 }
 
@@ -3586,15 +3586,15 @@ struct page *rmqueue(struct zone *preferred_zone,
                if (!page)
                        page = __rmqueue(zone, order, migratetype, alloc_flags);
        } while (page && check_new_pages(page, order));
-       spin_unlock(&zone->lock);
        if (!page)
                goto failed;
+
        __mod_zone_freepage_state(zone, -(1 << order),
                                  get_pcppage_migratetype(page));
+       spin_unlock_irqrestore(&zone->lock, flags);
 
        __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
        zone_statistics(preferred_zone, zone, 1);
-       local_irq_restore(flags);
 
 out:
        /* Separate test+clear to avoid unnecessary atomics */
@@ -3607,7 +3607,7 @@ out:
        return page;
 
 failed:
-       local_irq_restore(flags);
+       spin_unlock_irqrestore(&zone->lock, flags);
        return NULL;
 }
 
@@ -5165,11 +5165,11 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
                nr_populated++;
        }
 
+       local_unlock_irqrestore(&pagesets.lock, flags);
+
        __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
        zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
 
-       local_unlock_irqrestore(&pagesets.lock, flags);
-
        return nr_populated;
 
 failed_irq:
This page took 0.071271 seconds and 4 git commands to generate.