summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2019-10-06 20:58:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-10-07 18:47:20 -0400
commit6a486c0ad4dcdee3946842c64884d2978bfe2602 (patch)
tree7bdc3b0ed7cc789f8bccc260da91523b9e82dd00 /mm
parent1bc63fb1272be0773e925f78c0fbd06c89701d55 (diff)
mm, sl[ou]b: improve memory accounting
Patch series "guarantee natural alignment for kmalloc()", v2. This patch (of 2): SLOB currently doesn't account its pages at all, so in /proc/meminfo the Slab field shows zero. Modifying a counter on page allocation and freeing should be acceptable even for the small system scenarios SLOB is intended for. Since reclaimable caches are not separated in SLOB, account everything as unreclaimable. SLUB currently doesn't account kmalloc() and kmalloc_node() allocations larger than order-1 page, that are passed directly to the page allocator. As they also don't appear in /proc/slabinfo, it might look like a memory leak. For consistency, account them as well. (SLAB doesn't actually use page allocator directly, so no change there). Ideally SLOB and SLUB would be handled in separate patches, but due to the shared kmalloc_order() function and different kfree() implementations, it's easier to patch both at once to prevent inconsistencies. Link: http://lkml.kernel.org/r/20190826111627.7505-2-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Ming Lei <ming.lei@redhat.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: "Darrick J . Wong" <darrick.wong@oracle.com> Cc: Christoph Hellwig <hch@lst.de> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab_common.c8
-rw-r--r--mm/slob.c20
-rw-r--r--mm/slub.c14
3 files changed, 33 insertions, 9 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 6491c3a41805..0a94cf858aa4 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1287,12 +1287,16 @@ void __init create_kmalloc_caches(slab_flags_t flags)
1287 */ 1287 */
1288void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) 1288void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1289{ 1289{
1290 void *ret; 1290 void *ret = NULL;
1291 struct page *page; 1291 struct page *page;
1292 1292
1293 flags |= __GFP_COMP; 1293 flags |= __GFP_COMP;
1294 page = alloc_pages(flags, order); 1294 page = alloc_pages(flags, order);
1295 ret = page ? page_address(page) : NULL; 1295 if (likely(page)) {
1296 ret = page_address(page);
1297 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1298 1 << order);
1299 }
1296 ret = kasan_kmalloc_large(ret, size, flags); 1300 ret = kasan_kmalloc_large(ret, size, flags);
1297 /* As ret might get tagged, call kmemleak hook after KASAN. */ 1301 /* As ret might get tagged, call kmemleak hook after KASAN. */
1298 kmemleak_alloc(ret, size, 1, flags); 1302 kmemleak_alloc(ret, size, 1, flags);
diff --git a/mm/slob.c b/mm/slob.c
index cf377beab962..835088d55645 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -190,7 +190,7 @@ static int slob_last(slob_t *s)
190 190
191static void *slob_new_pages(gfp_t gfp, int order, int node) 191static void *slob_new_pages(gfp_t gfp, int order, int node)
192{ 192{
193 void *page; 193 struct page *page;
194 194
195#ifdef CONFIG_NUMA 195#ifdef CONFIG_NUMA
196 if (node != NUMA_NO_NODE) 196 if (node != NUMA_NO_NODE)
@@ -202,14 +202,21 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
202 if (!page) 202 if (!page)
203 return NULL; 203 return NULL;
204 204
205 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
206 1 << order);
205 return page_address(page); 207 return page_address(page);
206} 208}
207 209
208static void slob_free_pages(void *b, int order) 210static void slob_free_pages(void *b, int order)
209{ 211{
212 struct page *sp = virt_to_page(b);
213
210 if (current->reclaim_state) 214 if (current->reclaim_state)
211 current->reclaim_state->reclaimed_slab += 1 << order; 215 current->reclaim_state->reclaimed_slab += 1 << order;
212 free_pages((unsigned long)b, order); 216
217 mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
218 -(1 << order));
219 __free_pages(sp, order);
213} 220}
214 221
215/* 222/*
@@ -521,8 +528,13 @@ void kfree(const void *block)
521 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 528 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
522 unsigned int *m = (unsigned int *)(block - align); 529 unsigned int *m = (unsigned int *)(block - align);
523 slob_free(m, *m + align); 530 slob_free(m, *m + align);
524 } else 531 } else {
525 __free_pages(sp, compound_order(sp)); 532 unsigned int order = compound_order(sp);
533 mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
534 -(1 << order));
535 __free_pages(sp, order);
536
537 }
526} 538}
527EXPORT_SYMBOL(kfree); 539EXPORT_SYMBOL(kfree);
528 540
diff --git a/mm/slub.c b/mm/slub.c
index 42c1b3af3c98..3d63ae320d31 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3821,11 +3821,15 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3821{ 3821{
3822 struct page *page; 3822 struct page *page;
3823 void *ptr = NULL; 3823 void *ptr = NULL;
3824 unsigned int order = get_order(size);
3824 3825
3825 flags |= __GFP_COMP; 3826 flags |= __GFP_COMP;
3826 page = alloc_pages_node(node, flags, get_order(size)); 3827 page = alloc_pages_node(node, flags, order);
3827 if (page) 3828 if (page) {
3828 ptr = page_address(page); 3829 ptr = page_address(page);
3830 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
3831 1 << order);
3832 }
3829 3833
3830 return kmalloc_large_node_hook(ptr, size, flags); 3834 return kmalloc_large_node_hook(ptr, size, flags);
3831} 3835}
@@ -3951,9 +3955,13 @@ void kfree(const void *x)
3951 3955
3952 page = virt_to_head_page(x); 3956 page = virt_to_head_page(x);
3953 if (unlikely(!PageSlab(page))) { 3957 if (unlikely(!PageSlab(page))) {
3958 unsigned int order = compound_order(page);
3959
3954 BUG_ON(!PageCompound(page)); 3960 BUG_ON(!PageCompound(page));
3955 kfree_hook(object); 3961 kfree_hook(object);
3956 __free_pages(page, compound_order(page)); 3962 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
3963 -(1 << order));
3964 __free_pages(page, order);
3957 return; 3965 return;
3958 } 3966 }
3959 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); 3967 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);