summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoman Gushchin <guro@fb.com>2019-07-11 23:56:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-12 14:05:44 -0400
commit6cea1d569d24af6f9e95f70cb301807440ae2981 (patch)
tree627558acba0010e257fe70d28a99ad3b35ed69ba
parent49a18eae2e98a794477b5af5d85938e430c0be72 (diff)
mm: memcg/slab: unify SLAB and SLUB page accounting
Currently the page accounting code is duplicated in SLAB and SLUB internals. Let's move it into new (un)charge_slab_page helpers in the slab_common.c file. These helpers will be responsible for statistics (global and memcg-aware) and memcg charging. So they are replacing direct memcg_(un)charge_slab() calls. Link: http://lkml.kernel.org/r/20190611231813.3148843-6-guro@fb.com Signed-off-by: Roman Gushchin <guro@fb.com> Reviewed-by: Shakeel Butt <shakeelb@google.com> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Waiman Long <longman@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Andrei Vagin <avagin@gmail.com> Cc: Qian Cai <cai@lca.pw> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slab.c19
-rw-r--r--mm/slab.h25
-rw-r--r--mm/slub.c14
3 files changed, 30 insertions, 28 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 30347bd3f19c..e9d90b0da47b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1360,7 +1360,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1360 int nodeid) 1360 int nodeid)
1361{ 1361{
1362 struct page *page; 1362 struct page *page;
1363 int nr_pages;
1364 1363
1365 flags |= cachep->allocflags; 1364 flags |= cachep->allocflags;
1366 1365
@@ -1370,17 +1369,11 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1370 return NULL; 1369 return NULL;
1371 } 1370 }
1372 1371
1373 if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) { 1372 if (charge_slab_page(page, flags, cachep->gfporder, cachep)) {
1374 __free_pages(page, cachep->gfporder); 1373 __free_pages(page, cachep->gfporder);
1375 return NULL; 1374 return NULL;
1376 } 1375 }
1377 1376
1378 nr_pages = (1 << cachep->gfporder);
1379 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1380 mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
1381 else
1382 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);
1383
1384 __SetPageSlab(page); 1377 __SetPageSlab(page);
1385 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ 1378 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1386 if (sk_memalloc_socks() && page_is_pfmemalloc(page)) 1379 if (sk_memalloc_socks() && page_is_pfmemalloc(page))
@@ -1395,12 +1388,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1395static void kmem_freepages(struct kmem_cache *cachep, struct page *page) 1388static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1396{ 1389{
1397 int order = cachep->gfporder; 1390 int order = cachep->gfporder;
1398 unsigned long nr_freed = (1 << order);
1399
1400 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1401 mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
1402 else
1403 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);
1404 1391
1405 BUG_ON(!PageSlab(page)); 1392 BUG_ON(!PageSlab(page));
1406 __ClearPageSlabPfmemalloc(page); 1393 __ClearPageSlabPfmemalloc(page);
@@ -1409,8 +1396,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1409 page->mapping = NULL; 1396 page->mapping = NULL;
1410 1397
1411 if (current->reclaim_state) 1398 if (current->reclaim_state)
1412 current->reclaim_state->reclaimed_slab += nr_freed; 1399 current->reclaim_state->reclaimed_slab += 1 << order;
1413 memcg_uncharge_slab(page, order, cachep); 1400 uncharge_slab_page(page, order, cachep);
1414 __free_pages(page, order); 1401 __free_pages(page, order);
1415} 1402}
1416 1403
diff --git a/mm/slab.h b/mm/slab.h
index dc83583ee9dd..46623a576a3c 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -205,6 +205,12 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
205void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 205void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
206int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 206int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
207 207
208static inline int cache_vmstat_idx(struct kmem_cache *s)
209{
210 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
211 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE;
212}
213
208#ifdef CONFIG_MEMCG_KMEM 214#ifdef CONFIG_MEMCG_KMEM
209 215
210/* List of all root caches. */ 216/* List of all root caches. */
@@ -361,6 +367,25 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
361 return page->slab_cache; 367 return page->slab_cache;
362} 368}
363 369
370static __always_inline int charge_slab_page(struct page *page,
371 gfp_t gfp, int order,
372 struct kmem_cache *s)
373{
374 int ret = memcg_charge_slab(page, gfp, order, s);
375
376 if (!ret)
377 mod_lruvec_page_state(page, cache_vmstat_idx(s), 1 << order);
378
379 return ret;
380}
381
382static __always_inline void uncharge_slab_page(struct page *page, int order,
383 struct kmem_cache *s)
384{
385 mod_lruvec_page_state(page, cache_vmstat_idx(s), -(1 << order));
386 memcg_uncharge_slab(page, order, s);
387}
388
364static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 389static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
365{ 390{
366 struct kmem_cache *cachep; 391 struct kmem_cache *cachep;
diff --git a/mm/slub.c b/mm/slub.c
index 845aeaa6c2d4..c9541a480627 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1488,7 +1488,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
1488 else 1488 else
1489 page = __alloc_pages_node(node, flags, order); 1489 page = __alloc_pages_node(node, flags, order);
1490 1490
1491 if (page && memcg_charge_slab(page, flags, order, s)) { 1491 if (page && charge_slab_page(page, flags, order, s)) {
1492 __free_pages(page, order); 1492 __free_pages(page, order);
1493 page = NULL; 1493 page = NULL;
1494 } 1494 }
@@ -1681,11 +1681,6 @@ out:
1681 if (!page) 1681 if (!page)
1682 return NULL; 1682 return NULL;
1683 1683
1684 mod_lruvec_page_state(page,
1685 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1686 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1687 1 << oo_order(oo));
1688
1689 inc_slabs_node(s, page_to_nid(page), page->objects); 1684 inc_slabs_node(s, page_to_nid(page), page->objects);
1690 1685
1691 return page; 1686 return page;
@@ -1719,18 +1714,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1719 check_object(s, page, p, SLUB_RED_INACTIVE); 1714 check_object(s, page, p, SLUB_RED_INACTIVE);
1720 } 1715 }
1721 1716
1722 mod_lruvec_page_state(page,
1723 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1724 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1725 -pages);
1726
1727 __ClearPageSlabPfmemalloc(page); 1717 __ClearPageSlabPfmemalloc(page);
1728 __ClearPageSlab(page); 1718 __ClearPageSlab(page);
1729 1719
1730 page->mapping = NULL; 1720 page->mapping = NULL;
1731 if (current->reclaim_state) 1721 if (current->reclaim_state)
1732 current->reclaim_state->reclaimed_slab += pages; 1722 current->reclaim_state->reclaimed_slab += pages;
1733 memcg_uncharge_slab(page, order, s); 1723 uncharge_slab_page(page, order, s);
1734 __free_pages(page, order); 1724 __free_pages(page, order);
1735} 1725}
1736 1726