aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-06-04 19:06:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:53:56 -0400
commit52383431b37cdbec63944e953ffc2698a7ad9722 (patch)
tree5c7002b9f8723899099a6a8fb2d0039641b9ca09 /mm/slab_common.c
parent5dfb417509921eb90ee123a4d1525e8916b4ace4 (diff)
mm: get rid of __GFP_KMEMCG
Currently to allocate a page that should be charged to kmemcg (e.g. threadinfo), we pass __GFP_KMEMCG flag to the page allocator. The page allocated is then to be freed by free_memcg_kmem_pages. Apart from looking asymmetrical, this also requires intrusion to the general allocation path. So let's introduce separate functions that will alloc/free pages charged to kmemcg. The new functions are called alloc_kmem_pages and free_kmem_pages. They should be used when the caller actually would like to use kmalloc, but has to fall back to the page allocator for the allocation is large. They only differ from alloc_pages and free_pages in that besides allocating or freeing pages they also charge them to the kmem resource counter of the current memory cgroup. [sfr@canb.auug.org.au: export kmalloc_order() to modules] Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: Greg Thelen <gthelen@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Glauber Costa <glommer@gmail.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 06f0c6125632..1950c8f4d1a6 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -582,6 +582,19 @@ void __init create_kmalloc_caches(unsigned long flags)
582} 582}
583#endif /* !CONFIG_SLOB */ 583#endif /* !CONFIG_SLOB */
584 584
585void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
586{
587 void *ret;
588 struct page *page;
589
590 flags |= __GFP_COMP;
591 page = alloc_kmem_pages(flags, order);
592 ret = page ? page_address(page) : NULL;
593 kmemleak_alloc(ret, size, 1, flags);
594 return ret;
595}
596EXPORT_SYMBOL(kmalloc_order);
597
585#ifdef CONFIG_TRACING 598#ifdef CONFIG_TRACING
586void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 599void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
587{ 600{