]> Git Repo - J-linux.git/commitdiff
mm, sl[ou]b: improve memory accounting
authorVlastimil Babka <[email protected]>
Mon, 7 Oct 2019 00:58:42 +0000 (17:58 -0700)
committerLinus Torvalds <[email protected]>
Mon, 7 Oct 2019 22:47:20 +0000 (15:47 -0700)
Patch series "guarantee natural alignment for kmalloc()", v2.

This patch (of 2):

SLOB currently doesn't account its pages at all, so in /proc/meminfo the
Slab field shows zero.  Modifying a counter on page allocation and
freeing should be acceptable even for the small system scenarios SLOB is
intended for.  Since reclaimable caches are not separated in SLOB,
account everything as unreclaimable.

SLUB currently doesn't account kmalloc() and kmalloc_node() allocations
larger than order-1 page, that are passed directly to the page
allocator.  As they also don't appear in /proc/slabinfo, it might look
like a memory leak.  For consistency, account them as well.  (SLAB
doesn't actually use page allocator directly, so no change there).

Ideally SLOB and SLUB would be handled in separate patches, but due to
the shared kmalloc_order() function and different kfree()
implementations, it's easier to patch both at once to prevent
inconsistencies.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Vlastimil Babka <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Ming Lei <[email protected]>
Cc: Dave Chinner <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: "Darrick J . Wong" <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: James Bottomley <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
mm/slab_common.c
mm/slob.c
mm/slub.c

index 6491c3a418053870ae600830f82fa7f72e16a58d..0a94cf858aa4f428352ddf8e1cb238f947ad10ba 100644 (file)
@@ -1287,12 +1287,16 @@ void __init create_kmalloc_caches(slab_flags_t flags)
  */
 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
 {
-       void *ret;
+       void *ret = NULL;
        struct page *page;
 
        flags |= __GFP_COMP;
        page = alloc_pages(flags, order);
-       ret = page ? page_address(page) : NULL;
+       if (likely(page)) {
+               ret = page_address(page);
+               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+                                   1 << order);
+       }
        ret = kasan_kmalloc_large(ret, size, flags);
        /* As ret might get tagged, call kmemleak hook after KASAN. */
        kmemleak_alloc(ret, size, 1, flags);
index cf377beab96212bc8e717eaabfac6b263b9108cf..835088d55645af9138cece374ac9f48c0e2d1e81 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -190,7 +190,7 @@ static int slob_last(slob_t *s)
 
 static void *slob_new_pages(gfp_t gfp, int order, int node)
 {
-       void *page;
+       struct page *page;
 
 #ifdef CONFIG_NUMA
        if (node != NUMA_NO_NODE)
@@ -202,14 +202,21 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
        if (!page)
                return NULL;
 
+       mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+                           1 << order);
        return page_address(page);
 }
 
 static void slob_free_pages(void *b, int order)
 {
+       struct page *sp = virt_to_page(b);
+
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += 1 << order;
-       free_pages((unsigned long)b, order);
+
+       mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
+                           -(1 << order));
+       __free_pages(sp, order);
 }
 
 /*
@@ -521,8 +528,13 @@ void kfree(const void *block)
                int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
                unsigned int *m = (unsigned int *)(block - align);
                slob_free(m, *m + align);
-       } else
-               __free_pages(sp, compound_order(sp));
+       } else {
+               unsigned int order = compound_order(sp);
+               mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
+                                   -(1 << order));
+               __free_pages(sp, order);
+
+       }
 }
 EXPORT_SYMBOL(kfree);
 
index 42c1b3af3c9805fd6ae0e7028aa614fac979f433..3d63ae320d31bb07a667ead9ee90768d234c155d 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3821,11 +3821,15 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
 {
        struct page *page;
        void *ptr = NULL;
+       unsigned int order = get_order(size);
 
        flags |= __GFP_COMP;
-       page = alloc_pages_node(node, flags, get_order(size));
-       if (page)
+       page = alloc_pages_node(node, flags, order);
+       if (page) {
                ptr = page_address(page);
+               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+                                   1 << order);
+       }
 
        return kmalloc_large_node_hook(ptr, size, flags);
 }
@@ -3951,9 +3955,13 @@ void kfree(const void *x)
 
        page = virt_to_head_page(x);
        if (unlikely(!PageSlab(page))) {
+               unsigned int order = compound_order(page);
+
                BUG_ON(!PageCompound(page));
                kfree_hook(object);
-               __free_pages(page, compound_order(page));
+               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+                                   -(1 << order));
+               __free_pages(page, order);
                return;
        }
        slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
This page took 0.069277 seconds and 4 git commands to generate.