summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@virtuozzo.com>2016-07-26 18:24:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 19:19:19 -0400
commit4949148ad433f6f11cf837978b2907092ec99f3a (patch)
tree9ae57d8b9d040aaa66c51ce3e62debced020094a /mm/page_alloc.c
parent452647784b2fccfdeeb976f6f842c6719fb2daac (diff)
mm: charge/uncharge kmemcg from generic page allocator paths
Currently, to charge a non-slab allocation to kmemcg one has to use alloc_kmem_pages helper with __GFP_ACCOUNT flag. A page allocated with this helper should finally be freed using free_kmem_pages, otherwise it won't be uncharged. This API suits its current users fine, but it turns out to be impossible to use along with page reference counting, i.e. when an allocation is supposed to be freed with put_page, as it is the case with pipe or unix socket buffers. To overcome this limitation, this patch moves charging/uncharging to generic page allocator paths, i.e. to __alloc_pages_nodemask and free_pages_prepare, and zaps alloc/free_kmem_pages helpers. This way, one can use any of the available page allocation functions to get the allocated page charged to kmemcg - it's enough to pass __GFP_ACCOUNT, just like in case of kmalloc and friends. A charged page will be automatically uncharged on free. To make it possible, we need to mark pages charged to kmemcg somehow. To avoid introducing a new page flag, we make use of page->_mapcount for marking such pages. Since pages charged to kmemcg are not supposed to be mapped to userspace, it should work just fine. There are other (ab)users of page->_mapcount - buddy and balloon pages - but we don't conflict with them. In case kmemcg is compiled out or not used at runtime, this patch introduces no overhead to generic page allocator paths. If kmemcg is used, it will be plus one gfp flags check on alloc and plus one page->_mapcount check on free, which shouldn't hurt performance, because the data accessed are hot. Link: http://lkml.kernel.org/r/a9736d856f895bcb465d9f257b54efe32eda6f99.1464079538.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c66
1 files changed, 13 insertions, 53 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index de2491c42d4f..7023a31edc5c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -63,6 +63,7 @@
63#include <linux/sched/rt.h> 63#include <linux/sched/rt.h>
64#include <linux/page_owner.h> 64#include <linux/page_owner.h>
65#include <linux/kthread.h> 65#include <linux/kthread.h>
66#include <linux/memcontrol.h>
66 67
67#include <asm/sections.h> 68#include <asm/sections.h>
68#include <asm/tlbflush.h> 69#include <asm/tlbflush.h>
@@ -1018,6 +1019,10 @@ static __always_inline bool free_pages_prepare(struct page *page,
1018 } 1019 }
1019 if (PageMappingFlags(page)) 1020 if (PageMappingFlags(page))
1020 page->mapping = NULL; 1021 page->mapping = NULL;
1022 if (memcg_kmem_enabled() && PageKmemcg(page)) {
1023 memcg_kmem_uncharge(page, order);
1024 __ClearPageKmemcg(page);
1025 }
1021 if (check_free) 1026 if (check_free)
1022 bad += free_pages_check(page); 1027 bad += free_pages_check(page);
1023 if (bad) 1028 if (bad)
@@ -3841,6 +3846,14 @@ no_zone:
3841 } 3846 }
3842 3847
3843out: 3848out:
3849 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page) {
3850 if (unlikely(memcg_kmem_charge(page, gfp_mask, order))) {
3851 __free_pages(page, order);
3852 page = NULL;
3853 } else
3854 __SetPageKmemcg(page);
3855 }
3856
3844 if (kmemcheck_enabled && page) 3857 if (kmemcheck_enabled && page)
3845 kmemcheck_pagealloc_alloc(page, order, gfp_mask); 3858 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
3846 3859
@@ -3996,59 +4009,6 @@ void __free_page_frag(void *addr)
3996} 4009}
3997EXPORT_SYMBOL(__free_page_frag); 4010EXPORT_SYMBOL(__free_page_frag);
3998 4011
3999/*
4000 * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
4001 * of the current memory cgroup if __GFP_ACCOUNT is set, other than that it is
4002 * equivalent to alloc_pages.
4003 *
4004 * It should be used when the caller would like to use kmalloc, but since the
4005 * allocation is large, it has to fall back to the page allocator.
4006 */
4007struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
4008{
4009 struct page *page;
4010
4011 page = alloc_pages(gfp_mask, order);
4012 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) &&
4013 page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
4014 __free_pages(page, order);
4015 page = NULL;
4016 }
4017 return page;
4018}
4019
4020struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
4021{
4022 struct page *page;
4023
4024 page = alloc_pages_node(nid, gfp_mask, order);
4025 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) &&
4026 page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
4027 __free_pages(page, order);
4028 page = NULL;
4029 }
4030 return page;
4031}
4032
4033/*
4034 * __free_kmem_pages and free_kmem_pages will free pages allocated with
4035 * alloc_kmem_pages.
4036 */
4037void __free_kmem_pages(struct page *page, unsigned int order)
4038{
4039 if (memcg_kmem_enabled())
4040 memcg_kmem_uncharge(page, order);
4041 __free_pages(page, order);
4042}
4043
4044void free_kmem_pages(unsigned long addr, unsigned int order)
4045{
4046 if (addr != 0) {
4047 VM_BUG_ON(!virt_addr_valid((void *)addr));
4048 __free_kmem_pages(virt_to_page((void *)addr), order);
4049 }
4050}
4051
4052static void *make_alloc_exact(unsigned long addr, unsigned int order, 4012static void *make_alloc_exact(unsigned long addr, unsigned int order,
4053 size_t size) 4013 size_t size)
4054{ 4014{