aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@virtuozzo.com>2016-07-26 18:24:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 19:19:19 -0400
commit452647784b2fccfdeeb976f6f842c6719fb2daac (patch)
tree65b85947f5d5405f11fa2e438ce9b0438049eb9b
parent632c0a1affd861f81abdd136c886418571e19a51 (diff)
mm: memcontrol: cleanup kmem charge functions
- Handle memcg_kmem_enabled check out to the caller. This reduces the number of function definitions making the code easier to follow. At the same time it doesn't result in code bloat, because all of these functions are used only in one or two places. - Move __GFP_ACCOUNT check to the caller as well so that one wouldn't have to dive deep into memcg implementation to see which allocations are charged and which are not. - Refresh comments. Link: http://lkml.kernel.org/r/52882a28b542c1979fd9a033b4dc8637fc347399.1464079537.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memcontrol.h103
-rw-r--r--mm/memcontrol.c75
-rw-r--r--mm/page_alloc.c9
-rw-r--r--mm/slab.h16
4 files changed, 80 insertions, 123 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 56e6069d2452..71aff733a497 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -749,6 +749,13 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
749} 749}
750#endif 750#endif
751 751
752struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
753void memcg_kmem_put_cache(struct kmem_cache *cachep);
754int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
755 struct mem_cgroup *memcg);
756int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
757void memcg_kmem_uncharge(struct page *page, int order);
758
752#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 759#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
753extern struct static_key_false memcg_kmem_enabled_key; 760extern struct static_key_false memcg_kmem_enabled_key;
754 761
@@ -770,22 +777,6 @@ static inline bool memcg_kmem_enabled(void)
770} 777}
771 778
772/* 779/*
773 * In general, we'll do everything in our power to not incur in any overhead
774 * for non-memcg users for the kmem functions. Not even a function call, if we
775 * can avoid it.
776 *
777 * Therefore, we'll inline all those functions so that in the best case, we'll
778 * see that kmemcg is off for everybody and proceed quickly. If it is on,
779 * we'll still do most of the flag checking inline. We check a lot of
780 * conditions, but because they are pretty simple, they are expected to be
781 * fast.
782 */
783int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
784 struct mem_cgroup *memcg);
785int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
786void __memcg_kmem_uncharge(struct page *page, int order);
787
788/*
789 * helper for accessing a memcg's index. It will be used as an index in the 780 * helper for accessing a memcg's index. It will be used as an index in the
790 * child cache array in kmem_cache, and also to derive its name. This function 781 * child cache array in kmem_cache, and also to derive its name. This function
791 * will return -1 when this is not a kmem-limited memcg. 782 * will return -1 when this is not a kmem-limited memcg.
@@ -795,67 +786,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
795 return memcg ? memcg->kmemcg_id : -1; 786 return memcg ? memcg->kmemcg_id : -1;
796} 787}
797 788
798struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
799void __memcg_kmem_put_cache(struct kmem_cache *cachep);
800
801static inline bool __memcg_kmem_bypass(void)
802{
803 if (!memcg_kmem_enabled())
804 return true;
805 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
806 return true;
807 return false;
808}
809
810/**
811 * memcg_kmem_charge: charge a kmem page
812 * @page: page to charge
813 * @gfp: reclaim mode
814 * @order: allocation order
815 *
816 * Returns 0 on success, an error code on failure.
817 */
818static __always_inline int memcg_kmem_charge(struct page *page,
819 gfp_t gfp, int order)
820{
821 if (__memcg_kmem_bypass())
822 return 0;
823 if (!(gfp & __GFP_ACCOUNT))
824 return 0;
825 return __memcg_kmem_charge(page, gfp, order);
826}
827
828/**
829 * memcg_kmem_uncharge: uncharge a kmem page
830 * @page: page to uncharge
831 * @order: allocation order
832 */
833static __always_inline void memcg_kmem_uncharge(struct page *page, int order)
834{
835 if (memcg_kmem_enabled())
836 __memcg_kmem_uncharge(page, order);
837}
838
839/**
840 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
841 * @cachep: the original global kmem cache
842 *
843 * All memory allocated from a per-memcg cache is charged to the owner memcg.
844 */
845static __always_inline struct kmem_cache *
846memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
847{
848 if (__memcg_kmem_bypass())
849 return cachep;
850 return __memcg_kmem_get_cache(cachep, gfp);
851}
852
853static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
854{
855 if (memcg_kmem_enabled())
856 __memcg_kmem_put_cache(cachep);
857}
858
859/** 789/**
860 * memcg_kmem_update_page_stat - update kmem page state statistics 790 * memcg_kmem_update_page_stat - update kmem page state statistics
861 * @page: the page 791 * @page: the page
@@ -878,15 +808,6 @@ static inline bool memcg_kmem_enabled(void)
878 return false; 808 return false;
879} 809}
880 810
881static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
882{
883 return 0;
884}
885
886static inline void memcg_kmem_uncharge(struct page *page, int order)
887{
888}
889
890static inline int memcg_cache_id(struct mem_cgroup *memcg) 811static inline int memcg_cache_id(struct mem_cgroup *memcg)
891{ 812{
892 return -1; 813 return -1;
@@ -900,16 +821,6 @@ static inline void memcg_put_cache_ids(void)
900{ 821{
901} 822}
902 823
903static inline struct kmem_cache *
904memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
905{
906 return cachep;
907}
908
909static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
910{
911}
912
913static inline void memcg_kmem_update_page_stat(struct page *page, 824static inline void memcg_kmem_update_page_stat(struct page *page,
914 enum mem_cgroup_stat_index idx, int val) 825 enum mem_cgroup_stat_index idx, int val)
915{ 826{
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index caea25a21c70..089ef3614155 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2273,20 +2273,30 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2273 current->memcg_kmem_skip_account = 0; 2273 current->memcg_kmem_skip_account = 0;
2274} 2274}
2275 2275
2276/* 2276static inline bool memcg_kmem_bypass(void)
2277{
2278 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2279 return true;
2280 return false;
2281}
2282
2283/**
2284 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2285 * @cachep: the original global kmem cache
2286 *
2277 * Return the kmem_cache we're supposed to use for a slab allocation. 2287 * Return the kmem_cache we're supposed to use for a slab allocation.
2278 * We try to use the current memcg's version of the cache. 2288 * We try to use the current memcg's version of the cache.
2279 * 2289 *
2280 * If the cache does not exist yet, if we are the first user of it, 2290 * If the cache does not exist yet, if we are the first user of it, we
2281 * we either create it immediately, if possible, or create it asynchronously 2291 * create it asynchronously in a workqueue and let the current allocation
2282 * in a workqueue. 2292 * go through with the original cache.
2283 * In the latter case, we will let the current allocation go through with
2284 * the original cache.
2285 * 2293 *
2286 * Can't be called in interrupt context or from kernel threads. 2294 * This function takes a reference to the cache it returns to assure it
2287 * This function needs to be called with rcu_read_lock() held. 2295 * won't get destroyed while we are working with it. Once the caller is
2296 * done with it, memcg_kmem_put_cache() must be called to release the
2297 * reference.
2288 */ 2298 */
2289struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) 2299struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2290{ 2300{
2291 struct mem_cgroup *memcg; 2301 struct mem_cgroup *memcg;
2292 struct kmem_cache *memcg_cachep; 2302 struct kmem_cache *memcg_cachep;
@@ -2294,10 +2304,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
2294 2304
2295 VM_BUG_ON(!is_root_cache(cachep)); 2305 VM_BUG_ON(!is_root_cache(cachep));
2296 2306
2297 if (cachep->flags & SLAB_ACCOUNT) 2307 if (memcg_kmem_bypass())
2298 gfp |= __GFP_ACCOUNT;
2299
2300 if (!(gfp & __GFP_ACCOUNT))
2301 return cachep; 2308 return cachep;
2302 2309
2303 if (current->memcg_kmem_skip_account) 2310 if (current->memcg_kmem_skip_account)
@@ -2330,14 +2337,27 @@ out:
2330 return cachep; 2337 return cachep;
2331} 2338}
2332 2339
2333void __memcg_kmem_put_cache(struct kmem_cache *cachep) 2340/**
2341 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2342 * @cachep: the cache returned by memcg_kmem_get_cache
2343 */
2344void memcg_kmem_put_cache(struct kmem_cache *cachep)
2334{ 2345{
2335 if (!is_root_cache(cachep)) 2346 if (!is_root_cache(cachep))
2336 css_put(&cachep->memcg_params.memcg->css); 2347 css_put(&cachep->memcg_params.memcg->css);
2337} 2348}
2338 2349
2339int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 2350/**
2340 struct mem_cgroup *memcg) 2351 * memcg_kmem_charge: charge a kmem page
2352 * @page: page to charge
2353 * @gfp: reclaim mode
2354 * @order: allocation order
2355 * @memcg: memory cgroup to charge
2356 *
2357 * Returns 0 on success, an error code on failure.
2358 */
2359int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2360 struct mem_cgroup *memcg)
2341{ 2361{
2342 unsigned int nr_pages = 1 << order; 2362 unsigned int nr_pages = 1 << order;
2343 struct page_counter *counter; 2363 struct page_counter *counter;
@@ -2358,19 +2378,34 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2358 return 0; 2378 return 0;
2359} 2379}
2360 2380
2361int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 2381/**
2382 * memcg_kmem_charge: charge a kmem page to the current memory cgroup
2383 * @page: page to charge
2384 * @gfp: reclaim mode
2385 * @order: allocation order
2386 *
2387 * Returns 0 on success, an error code on failure.
2388 */
2389int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2362{ 2390{
2363 struct mem_cgroup *memcg; 2391 struct mem_cgroup *memcg;
2364 int ret = 0; 2392 int ret = 0;
2365 2393
2394 if (memcg_kmem_bypass())
2395 return 0;
2396
2366 memcg = get_mem_cgroup_from_mm(current->mm); 2397 memcg = get_mem_cgroup_from_mm(current->mm);
2367 if (!mem_cgroup_is_root(memcg)) 2398 if (!mem_cgroup_is_root(memcg))
2368 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); 2399 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
2369 css_put(&memcg->css); 2400 css_put(&memcg->css);
2370 return ret; 2401 return ret;
2371} 2402}
2372 2403/**
2373void __memcg_kmem_uncharge(struct page *page, int order) 2404 * memcg_kmem_uncharge: uncharge a kmem page
2405 * @page: page to uncharge
2406 * @order: allocation order
2407 */
2408void memcg_kmem_uncharge(struct page *page, int order)
2374{ 2409{
2375 struct mem_cgroup *memcg = page->mem_cgroup; 2410 struct mem_cgroup *memcg = page->mem_cgroup;
2376 unsigned int nr_pages = 1 << order; 2411 unsigned int nr_pages = 1 << order;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 13cf4c665321..de2491c42d4f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4009,7 +4009,8 @@ struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
4009 struct page *page; 4009 struct page *page;
4010 4010
4011 page = alloc_pages(gfp_mask, order); 4011 page = alloc_pages(gfp_mask, order);
4012 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { 4012 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) &&
4013 page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
4013 __free_pages(page, order); 4014 __free_pages(page, order);
4014 page = NULL; 4015 page = NULL;
4015 } 4016 }
@@ -4021,7 +4022,8 @@ struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
4021 struct page *page; 4022 struct page *page;
4022 4023
4023 page = alloc_pages_node(nid, gfp_mask, order); 4024 page = alloc_pages_node(nid, gfp_mask, order);
4024 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { 4025 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) &&
4026 page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
4025 __free_pages(page, order); 4027 __free_pages(page, order);
4026 page = NULL; 4028 page = NULL;
4027 } 4029 }
@@ -4034,7 +4036,8 @@ struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
4034 */ 4036 */
4035void __free_kmem_pages(struct page *page, unsigned int order) 4037void __free_kmem_pages(struct page *page, unsigned int order)
4036{ 4038{
4037 memcg_kmem_uncharge(page, order); 4039 if (memcg_kmem_enabled())
4040 memcg_kmem_uncharge(page, order);
4038 __free_pages(page, order); 4041 __free_pages(page, order);
4039} 4042}
4040 4043
diff --git a/mm/slab.h b/mm/slab.h
index 5fa8b8f20eb1..f33980ab0406 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -254,8 +254,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
254 if (is_root_cache(s)) 254 if (is_root_cache(s))
255 return 0; 255 return 0;
256 256
257 ret = __memcg_kmem_charge_memcg(page, gfp, order, 257 ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
258 s->memcg_params.memcg);
259 if (ret) 258 if (ret)
260 return ret; 259 return ret;
261 260
@@ -269,6 +268,9 @@ static __always_inline int memcg_charge_slab(struct page *page,
269static __always_inline void memcg_uncharge_slab(struct page *page, int order, 268static __always_inline void memcg_uncharge_slab(struct page *page, int order,
270 struct kmem_cache *s) 269 struct kmem_cache *s)
271{ 270{
271 if (!memcg_kmem_enabled())
272 return;
273
272 memcg_kmem_update_page_stat(page, 274 memcg_kmem_update_page_stat(page,
273 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 275 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
274 MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE, 276 MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
@@ -391,7 +393,11 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
391 if (should_failslab(s, flags)) 393 if (should_failslab(s, flags))
392 return NULL; 394 return NULL;
393 395
394 return memcg_kmem_get_cache(s, flags); 396 if (memcg_kmem_enabled() &&
397 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
398 return memcg_kmem_get_cache(s);
399
400 return s;
395} 401}
396 402
397static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 403static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
@@ -408,7 +414,9 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
408 s->flags, flags); 414 s->flags, flags);
409 kasan_slab_alloc(s, object, flags); 415 kasan_slab_alloc(s, object, flags);
410 } 416 }
411 memcg_kmem_put_cache(s); 417
418 if (memcg_kmem_enabled())
419 memcg_kmem_put_cache(s);
412} 420}
413 421
414#ifndef CONFIG_SLOB 422#ifndef CONFIG_SLOB