aboutsummaryrefslogtreecommitdiffstats
path: root/init
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-12-10 18:44:52 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 20:41:09 -0500
commit1306a85aed3ec3db98945aafb7dfbe5648a1203c (patch)
tree63643e556c64118d963020758faf915325ba613c /init
parent22811c6bc3c764d8935383ad0ddd7a96b45d75dc (diff)
mm: embed the memcg pointer directly into struct page
Memory cgroups used to have 5 per-page pointers. To allow users to disable that amount of overhead during runtime, those pointers were allocated in a separate array, with a translation layer between them and struct page. There is now only one page pointer remaining: the memcg pointer, that indicates which cgroup the page is associated with when charged. The complexity of runtime allocation and the runtime translation overhead is no longer justified to save that *potential* 0.19% of memory. With CONFIG_SLUB, page->mem_cgroup actually sits in the doubleword padding after the page->private member and doesn't even increase struct page, and then this patch actually saves space. Remaining users that care can still compile their kernels without CONFIG_MEMCG. text data bss dec hex filename 8828345 1725264 983040 11536649 b00909 vmlinux.old 8827425 1725264 966656 11519345 afc571 vmlinux.new [mhocko@suse.cz: update Documentation/cgroups/memory.txt] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Michal Hocko <mhocko@suse.cz> Cc: Vladimir Davydov <vdavydov@parallels.com> Cc: Tejun Heo <tj@kernel.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Konstantin Khlebnikov <koct9i@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'init')
-rw-r--r--init/main.c7
1 files changed, 0 insertions, 7 deletions
diff --git a/init/main.c b/init/main.c
index 321d0ceb26d3..d2e4ead4891f 100644
--- a/init/main.c
+++ b/init/main.c
@@ -51,7 +51,6 @@
51#include <linux/mempolicy.h> 51#include <linux/mempolicy.h>
52#include <linux/key.h> 52#include <linux/key.h>
53#include <linux/buffer_head.h> 53#include <linux/buffer_head.h>
54#include <linux/page_cgroup.h>
55#include <linux/debug_locks.h> 54#include <linux/debug_locks.h>
56#include <linux/debugobjects.h> 55#include <linux/debugobjects.h>
57#include <linux/lockdep.h> 56#include <linux/lockdep.h>
@@ -485,11 +484,6 @@ void __init __weak thread_info_cache_init(void)
485 */ 484 */
486static void __init mm_init(void) 485static void __init mm_init(void)
487{ 486{
488 /*
489 * page_cgroup requires contiguous pages,
490 * bigger than MAX_ORDER unless SPARSEMEM.
491 */
492 page_cgroup_init_flatmem();
493 mem_init(); 487 mem_init();
494 kmem_cache_init(); 488 kmem_cache_init();
495 percpu_init_late(); 489 percpu_init_late();
@@ -627,7 +621,6 @@ asmlinkage __visible void __init start_kernel(void)
627 initrd_start = 0; 621 initrd_start = 0;
628 } 622 }
629#endif 623#endif
630 page_cgroup_init();
631 debug_objects_mem_init(); 624 debug_objects_mem_init();
632 kmemleak_init(); 625 kmemleak_init();
633 setup_per_cpu_pageset(); 626 setup_per_cpu_pageset();