aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/memcontrol.h4
-rw-r--r--mm/memcontrol.c22
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slab.h25
-rw-r--r--mm/slub.c2
5 files changed, 24 insertions, 31 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 087a45314181..d38d190f4cec 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -506,8 +506,8 @@ void memcg_update_array_size(int num_groups);
506struct kmem_cache * 506struct kmem_cache *
507__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); 507__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
508 508
509int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size); 509int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
510void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size); 510void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
511 511
512int __kmem_cache_destroy_memcg_children(struct kmem_cache *s); 512int __kmem_cache_destroy_memcg_children(struct kmem_cache *s);
513 513
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6b1c45ced733..86a2078805e5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2954,7 +2954,7 @@ static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
2954} 2954}
2955#endif 2955#endif
2956 2956
2957int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) 2957static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
2958{ 2958{
2959 struct res_counter *fail_res; 2959 struct res_counter *fail_res;
2960 int ret = 0; 2960 int ret = 0;
@@ -2992,7 +2992,7 @@ int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
2992 return ret; 2992 return ret;
2993} 2993}
2994 2994
2995void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) 2995static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
2996{ 2996{
2997 res_counter_uncharge(&memcg->res, size); 2997 res_counter_uncharge(&memcg->res, size);
2998 if (do_swap_account) 2998 if (do_swap_account)
@@ -3390,6 +3390,24 @@ static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3390 __memcg_create_cache_enqueue(memcg, cachep); 3390 __memcg_create_cache_enqueue(memcg, cachep);
3391 memcg_resume_kmem_account(); 3391 memcg_resume_kmem_account();
3392} 3392}
3393
3394int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order)
3395{
3396 int res;
3397
3398 res = memcg_charge_kmem(cachep->memcg_params->memcg, gfp,
3399 PAGE_SIZE << order);
3400 if (!res)
3401 atomic_add(1 << order, &cachep->memcg_params->nr_pages);
3402 return res;
3403}
3404
3405void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
3406{
3407 memcg_uncharge_kmem(cachep->memcg_params->memcg, PAGE_SIZE << order);
3408 atomic_sub(1 << order, &cachep->memcg_params->nr_pages);
3409}
3410
3393/* 3411/*
3394 * Return the kmem_cache we're supposed to use for a slab allocation. 3412 * Return the kmem_cache we're supposed to use for a slab allocation.
3395 * We try to use the current memcg's version of the cache. 3413 * We try to use the current memcg's version of the cache.
diff --git a/mm/slab.c b/mm/slab.c
index 7067ea7f3927..9ca3b87edabc 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1712,7 +1712,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1712 __SetPageSlab(page); 1712 __SetPageSlab(page);
1713 if (page->pfmemalloc) 1713 if (page->pfmemalloc)
1714 SetPageSlabPfmemalloc(page); 1714 SetPageSlabPfmemalloc(page);
1715 memcg_bind_pages(cachep, cachep->gfporder);
1716 1715
1717 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { 1716 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1718 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); 1717 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
@@ -1748,7 +1747,6 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1748 page_mapcount_reset(page); 1747 page_mapcount_reset(page);
1749 page->mapping = NULL; 1748 page->mapping = NULL;
1750 1749
1751 memcg_release_pages(cachep, cachep->gfporder);
1752 if (current->reclaim_state) 1750 if (current->reclaim_state)
1753 current->reclaim_state->reclaimed_slab += nr_freed; 1751 current->reclaim_state->reclaimed_slab += nr_freed;
1754 __free_pages(page, cachep->gfporder); 1752 __free_pages(page, cachep->gfporder);
diff --git a/mm/slab.h b/mm/slab.h
index b59447ac4533..961a3fb1f5a2 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -121,18 +121,6 @@ static inline bool is_root_cache(struct kmem_cache *s)
121 return !s->memcg_params || s->memcg_params->is_root_cache; 121 return !s->memcg_params || s->memcg_params->is_root_cache;
122} 122}
123 123
124static inline void memcg_bind_pages(struct kmem_cache *s, int order)
125{
126 if (!is_root_cache(s))
127 atomic_add(1 << order, &s->memcg_params->nr_pages);
128}
129
130static inline void memcg_release_pages(struct kmem_cache *s, int order)
131{
132 if (!is_root_cache(s))
133 atomic_sub(1 << order, &s->memcg_params->nr_pages);
134}
135
136static inline bool slab_equal_or_root(struct kmem_cache *s, 124static inline bool slab_equal_or_root(struct kmem_cache *s,
137 struct kmem_cache *p) 125 struct kmem_cache *p)
138{ 126{
@@ -198,8 +186,7 @@ static __always_inline int memcg_charge_slab(struct kmem_cache *s,
198 return 0; 186 return 0;
199 if (is_root_cache(s)) 187 if (is_root_cache(s))
200 return 0; 188 return 0;
201 return memcg_charge_kmem(s->memcg_params->memcg, gfp, 189 return __memcg_charge_slab(s, gfp, order);
202 PAGE_SIZE << order);
203} 190}
204 191
205static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) 192static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
@@ -208,7 +195,7 @@ static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
208 return; 195 return;
209 if (is_root_cache(s)) 196 if (is_root_cache(s))
210 return; 197 return;
211 memcg_uncharge_kmem(s->memcg_params->memcg, PAGE_SIZE << order); 198 __memcg_uncharge_slab(s, order);
212} 199}
213#else 200#else
214static inline bool is_root_cache(struct kmem_cache *s) 201static inline bool is_root_cache(struct kmem_cache *s)
@@ -216,14 +203,6 @@ static inline bool is_root_cache(struct kmem_cache *s)
216 return true; 203 return true;
217} 204}
218 205
219static inline void memcg_bind_pages(struct kmem_cache *s, int order)
220{
221}
222
223static inline void memcg_release_pages(struct kmem_cache *s, int order)
224{
225}
226
227static inline bool slab_equal_or_root(struct kmem_cache *s, 206static inline bool slab_equal_or_root(struct kmem_cache *s,
228 struct kmem_cache *p) 207 struct kmem_cache *p)
229{ 208{
diff --git a/mm/slub.c b/mm/slub.c
index 5d1b653183ab..9e288d7c5e6a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1422,7 +1422,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1422 1422
1423 order = compound_order(page); 1423 order = compound_order(page);
1424 inc_slabs_node(s, page_to_nid(page), page->objects); 1424 inc_slabs_node(s, page_to_nid(page), page->objects);
1425 memcg_bind_pages(s, order);
1426 page->slab_cache = s; 1425 page->slab_cache = s;
1427 __SetPageSlab(page); 1426 __SetPageSlab(page);
1428 if (page->pfmemalloc) 1427 if (page->pfmemalloc)
@@ -1473,7 +1472,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1473 __ClearPageSlabPfmemalloc(page); 1472 __ClearPageSlabPfmemalloc(page);
1474 __ClearPageSlab(page); 1473 __ClearPageSlab(page);
1475 1474
1476 memcg_release_pages(s, order);
1477 page_mapcount_reset(page); 1475 page_mapcount_reset(page);
1478 if (current->reclaim_state) 1476 if (current->reclaim_state)
1479 current->reclaim_state->reclaimed_slab += pages; 1477 current->reclaim_state->reclaimed_slab += pages;