summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@virtuozzo.com>2016-07-26 18:24:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 19:19:19 -0400
commit452647784b2fccfdeeb976f6f842c6719fb2daac (patch)
tree65b85947f5d5405f11fa2e438ce9b0438049eb9b /mm/page_alloc.c
parent632c0a1affd861f81abdd136c886418571e19a51 (diff)
mm: memcontrol: cleanup kmem charge functions
- Handle memcg_kmem_enabled check out to the caller. This reduces the number of function definitions making the code easier to follow. At the same time it doesn't result in code bloat, because all of these functions are used only in one or two places. - Move __GFP_ACCOUNT check to the caller as well so that one wouldn't have to dive deep into memcg implementation to see which allocations are charged and which are not. - Refresh comments. Link: http://lkml.kernel.org/r/52882a28b542c1979fd9a033b4dc8637fc347399.1464079537.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 13cf4c665321..de2491c42d4f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4009,7 +4009,8 @@ struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
4009 struct page *page; 4009 struct page *page;
4010 4010
4011 page = alloc_pages(gfp_mask, order); 4011 page = alloc_pages(gfp_mask, order);
4012 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { 4012 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) &&
4013 page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
4013 __free_pages(page, order); 4014 __free_pages(page, order);
4014 page = NULL; 4015 page = NULL;
4015 } 4016 }
@@ -4021,7 +4022,8 @@ struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
4021 struct page *page; 4022 struct page *page;
4022 4023
4023 page = alloc_pages_node(nid, gfp_mask, order); 4024 page = alloc_pages_node(nid, gfp_mask, order);
4024 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { 4025 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) &&
4026 page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
4025 __free_pages(page, order); 4027 __free_pages(page, order);
4026 page = NULL; 4028 page = NULL;
4027 } 4029 }
@@ -4034,7 +4036,8 @@ struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
4034 */ 4036 */
4035void __free_kmem_pages(struct page *page, unsigned int order) 4037void __free_kmem_pages(struct page *page, unsigned int order)
4036{ 4038{
4037 memcg_kmem_uncharge(page, order); 4039 if (memcg_kmem_enabled())
4040 memcg_kmem_uncharge(page, order);
4038 __free_pages(page, order); 4041 __free_pages(page, order);
4039} 4042}
4040 4043