]> Git Repo - linux.git/commitdiff
mm: move mem_cgroup_uncharge out of __page_cache_release()
authorYang Shi <[email protected]>
Mon, 23 Sep 2019 22:38:09 +0000 (15:38 -0700)
committerLinus Torvalds <[email protected]>
Tue, 24 Sep 2019 22:54:11 +0000 (15:54 -0700)
A later patch makes THP deferred split shrinker memcg aware, but it needs
page->mem_cgroup information in THP destructor, which is called after
mem_cgroup_uncharge() now.

So move mem_cgroup_uncharge() from __page_cache_release() to compound page
destructor, which is called by both THP and other compound pages except
HugeTLB.  And call it in __put_single_page() for single order page.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Yang Shi <[email protected]>
Suggested-by: "Kirill A . Shutemov" <[email protected]>
Acked-by: Kirill A. Shutemov <[email protected]>
Reviewed-by: Kirill Tkhai <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Shakeel Butt <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Qian Cai <[email protected]>
Cc: Vladimir Davydov <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
mm/page_alloc.c
mm/swap.c
mm/vmscan.c

index a41436cca563297dd7dac8221bd3210e8f5cfd20..3334a769eb91e1c1cc374560125c8c64e32da979 100644 (file)
@@ -670,6 +670,7 @@ out:
 
 void free_compound_page(struct page *page)
 {
+       mem_cgroup_uncharge(page);
        __free_pages_ok(page, compound_order(page));
 }
 
index 0226c53465604d2dc81706dfc5697c6f13490be5..784dc162062004e9b9e8540095c6fa1201e223f8 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -71,12 +71,12 @@ static void __page_cache_release(struct page *page)
                spin_unlock_irqrestore(&pgdat->lru_lock, flags);
        }
        __ClearPageWaiters(page);
-       mem_cgroup_uncharge(page);
 }
 
 static void __put_single_page(struct page *page)
 {
        __page_cache_release(page);
+       mem_cgroup_uncharge(page);
        free_unref_page(page);
 }
 
index c27dd62ed594205f6fd225be2883c4d1a4ad15da..c4ef8681637b6560db2983487695d5a4a6da7397 100644 (file)
@@ -1487,10 +1487,9 @@ free_it:
                 * Is there need to periodically free_page_list? It would
                 * appear not as the counts should be low
                 */
-               if (unlikely(PageTransHuge(page))) {
-                       mem_cgroup_uncharge(page);
+               if (unlikely(PageTransHuge(page)))
                        (*get_compound_page_dtor(page))(page);
-               else
+               else
                        list_add(&page->lru, &free_pages);
                continue;
 
@@ -1911,7 +1910,6 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
 
                        if (unlikely(PageCompound(page))) {
                                spin_unlock_irq(&pgdat->lru_lock);
-                               mem_cgroup_uncharge(page);
                                (*get_compound_page_dtor(page))(page);
                                spin_lock_irq(&pgdat->lru_lock);
                        } else
This page took 0.075289 seconds and 4 git commands to generate.