]> Git Repo - linux.git/commitdiff
mm/free_pcppages_bulk: do not hold lock when picking pages to free
authorAaron Lu <[email protected]>
Thu, 5 Apr 2018 23:24:10 +0000 (16:24 -0700)
committerLinus Torvalds <[email protected]>
Fri, 6 Apr 2018 04:36:26 +0000 (21:36 -0700)
When freeing a batch of pages from Per-CPU-Pages(PCP) back to buddy, the
zone->lock is held and then pages are chosen from PCP's migratetype
list.  While there is actually no need to do this 'choose part' under
lock since it's PCP pages, the only CPU that can touch them is us and
irq is also disabled.

Moving this part outside could reduce lock held time and improve
performance.  Test with will-it-scale/page_fault1 full load:

  kernel      Broadwell(2S)  Skylake(2S)   Broadwell(4S)  Skylake(4S)
  v4.16-rc2+  9034215        7971818       13667135       15677465
  this patch  9536374 +5.6%  8314710 +4.3% 14070408 +3.0% 16675866 +6.4%

What the test does is: starts $nr_cpu processes and each will repeatedly
do the following for 5 minutes:

 - mmap 128M anonymouse space

 - write access to that space

 - munmap.

The score is the aggregated iteration.

https://github.com/antonblanchard/will-it-scale/blob/master/tests/page_fault1.c

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Aaron Lu <[email protected]>
Acked-by: Mel Gorman <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Reviewed-by: Andrew Morton <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Huang Ying <[email protected]>
Cc: Kemi Wang <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Tim Chen <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
mm/page_alloc.c

index 08c195cdf161a822af3eee86d70cfe2e03e4a566..e29a6ba050c8087452f88d18a77d0652e56cfa5e 100644 (file)
@@ -1080,12 +1080,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
        int migratetype = 0;
        int batch_free = 0;
        bool isolated_pageblocks;
-
-       spin_lock(&zone->lock);
-       isolated_pageblocks = has_isolate_pageblock(zone);
+       struct page *page, *tmp;
+       LIST_HEAD(head);
 
        while (count) {
-               struct page *page;
                struct list_head *list;
 
                /*
@@ -1107,27 +1105,36 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        batch_free = count;
 
                do {
-                       int mt; /* migratetype of the to-be-freed page */
-
                        page = list_last_entry(list, struct page, lru);
-                       /* must delete as __free_one_page list manipulates */
+                       /* must delete to avoid corrupting pcp list */
                        list_del(&page->lru);
                        pcp->count--;
 
-                       mt = get_pcppage_migratetype(page);
-                       /* MIGRATE_ISOLATE page should not go to pcplists */
-                       VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
-                       /* Pageblock could have been isolated meanwhile */
-                       if (unlikely(isolated_pageblocks))
-                               mt = get_pageblock_migratetype(page);
-
                        if (bulkfree_pcp_prepare(page))
                                continue;
 
-                       __free_one_page(page, page_to_pfn(page), zone, 0, mt);
-                       trace_mm_page_pcpu_drain(page, 0, mt);
+                       list_add_tail(&page->lru, &head);
                } while (--count && --batch_free && !list_empty(list));
        }
+
+       spin_lock(&zone->lock);
+       isolated_pageblocks = has_isolate_pageblock(zone);
+
+       /*
+        * Use safe version since after __free_one_page(),
+        * page->lru.next will not point to original list.
+        */
+       list_for_each_entry_safe(page, tmp, &head, lru) {
+               int mt = get_pcppage_migratetype(page);
+               /* MIGRATE_ISOLATE page should not go to pcplists */
+               VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
+               /* Pageblock could have been isolated meanwhile */
+               if (unlikely(isolated_pageblocks))
+                       mt = get_pageblock_migratetype(page);
+
+               __free_one_page(page, page_to_pfn(page), zone, 0, mt);
+               trace_mm_page_pcpu_drain(page, 0, mt);
+       }
        spin_unlock(&zone->lock);
 }
 
This page took 0.070121 seconds and 4 git commands to generate.