aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2015-02-11 18:26:33 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:06:03 -0500
commit95a045f63d9868ee189fe24ee61689df5a133d5b (patch)
tree65e1d614946bfed6bcb23e40d196db50db8b3944 /mm/memcontrol.c
parent9c608dbe6a0d137f78498a5181eb0cd309f8f067 (diff)
mm: memcontrol: consolidate memory controller initialization
The initialization code for the per-cpu charge stock and the soft limit tree is compact enough to inline it into mem_cgroup_init(). Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Reviewed-by: Vladimir Davydov <vdavydov@parallels.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c60
1 files changed, 25 insertions, 35 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2efec685793b..ebf1139f323e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2138,17 +2138,6 @@ static void drain_local_stock(struct work_struct *dummy)
2138 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2138 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2139} 2139}
2140 2140
2141static void __init memcg_stock_init(void)
2142{
2143 int cpu;
2144
2145 for_each_possible_cpu(cpu) {
2146 struct memcg_stock_pcp *stock =
2147 &per_cpu(memcg_stock, cpu);
2148 INIT_WORK(&stock->work, drain_local_stock);
2149 }
2150}
2151
2152/* 2141/*
2153 * Cache charges(val) to local per_cpu area. 2142 * Cache charges(val) to local per_cpu area.
2154 * This will be consumed by consume_stock() function, later. 2143 * This will be consumed by consume_stock() function, later.
@@ -4507,28 +4496,6 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
4507} 4496}
4508EXPORT_SYMBOL(parent_mem_cgroup); 4497EXPORT_SYMBOL(parent_mem_cgroup);
4509 4498
4510static void __init mem_cgroup_soft_limit_tree_init(void)
4511{
4512 int node;
4513
4514 for_each_node(node) {
4515 struct mem_cgroup_tree_per_node *rtpn;
4516 int zone;
4517
4518 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
4519 node_online(node) ? node : NUMA_NO_NODE);
4520
4521 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4522 struct mem_cgroup_tree_per_zone *rtpz;
4523
4524 rtpz = &rtpn->rb_tree_per_zone[zone];
4525 rtpz->rb_root = RB_ROOT;
4526 spin_lock_init(&rtpz->lock);
4527 }
4528 soft_limit_tree.rb_tree_per_node[node] = rtpn;
4529 }
4530}
4531
4532static struct cgroup_subsys_state * __ref 4499static struct cgroup_subsys_state * __ref
4533mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 4500mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4534{ 4501{
@@ -5905,10 +5872,33 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
5905 */ 5872 */
5906static int __init mem_cgroup_init(void) 5873static int __init mem_cgroup_init(void)
5907{ 5874{
5875 int cpu, node;
5876
5908 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 5877 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
5878
5879 for_each_possible_cpu(cpu)
5880 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5881 drain_local_stock);
5882
5883 for_each_node(node) {
5884 struct mem_cgroup_tree_per_node *rtpn;
5885 int zone;
5886
5887 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5888 node_online(node) ? node : NUMA_NO_NODE);
5889
5890 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5891 struct mem_cgroup_tree_per_zone *rtpz;
5892
5893 rtpz = &rtpn->rb_tree_per_zone[zone];
5894 rtpz->rb_root = RB_ROOT;
5895 spin_lock_init(&rtpz->lock);
5896 }
5897 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5898 }
5899
5909 enable_swap_cgroup(); 5900 enable_swap_cgroup();
5910 mem_cgroup_soft_limit_tree_init(); 5901
5911 memcg_stock_init();
5912 return 0; 5902 return 0;
5913} 5903}
5914subsys_initcall(mem_cgroup_init); 5904subsys_initcall(mem_cgroup_init);