aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.cz>2013-02-22 19:35:39 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:21 -0500
commit8787a1df30c7b75521fe8cbed42895d47e6b8d52 (patch)
tree12129663393911726999884a370199d7b09b66bd /mm
parent0e50ce3b50fb4ffc38c98fe7622361da4d0808c1 (diff)
memcg: move mem_cgroup_soft_limit_tree_init to mem_cgroup_init
Per-node-zone soft limit tree is currently initialized when the root cgroup is created which is OK but it pointlessly pollutes memcg allocation code with something that can be called when the memcg subsystem is initialized by mem_cgroup_init along with other controller specific parts. While we are at it let's make mem_cgroup_soft_limit_tree_init void because it doesn't make much sense to report memory failure because if we fail to allocate memory that early during the boot then we are screwed anyway (this saves some code). Signed-off-by: Michal Hocko <mhocko@suse.cz> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Tejun Heo <htejun@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c19
1 files changed, 3 insertions, 16 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index af4b04f4d744..bb894b071f59 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6052,7 +6052,7 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
6052} 6052}
6053EXPORT_SYMBOL(parent_mem_cgroup); 6053EXPORT_SYMBOL(parent_mem_cgroup);
6054 6054
6055static int mem_cgroup_soft_limit_tree_init(void) 6055static void __init mem_cgroup_soft_limit_tree_init(void)
6056{ 6056{
6057 struct mem_cgroup_tree_per_node *rtpn; 6057 struct mem_cgroup_tree_per_node *rtpn;
6058 struct mem_cgroup_tree_per_zone *rtpz; 6058 struct mem_cgroup_tree_per_zone *rtpz;
@@ -6063,8 +6063,7 @@ static int mem_cgroup_soft_limit_tree_init(void)
6063 if (!node_state(node, N_NORMAL_MEMORY)) 6063 if (!node_state(node, N_NORMAL_MEMORY))
6064 tmp = -1; 6064 tmp = -1;
6065 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp); 6065 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
6066 if (!rtpn) 6066 BUG_ON(!rtpn);
6067 goto err_cleanup;
6068 6067
6069 soft_limit_tree.rb_tree_per_node[node] = rtpn; 6068 soft_limit_tree.rb_tree_per_node[node] = rtpn;
6070 6069
@@ -6074,17 +6073,6 @@ static int mem_cgroup_soft_limit_tree_init(void)
6074 spin_lock_init(&rtpz->lock); 6073 spin_lock_init(&rtpz->lock);
6075 } 6074 }
6076 } 6075 }
6077 return 0;
6078
6079err_cleanup:
6080 for_each_node(node) {
6081 if (!soft_limit_tree.rb_tree_per_node[node])
6082 break;
6083 kfree(soft_limit_tree.rb_tree_per_node[node]);
6084 soft_limit_tree.rb_tree_per_node[node] = NULL;
6085 }
6086 return 1;
6087
6088} 6076}
6089 6077
6090static struct cgroup_subsys_state * __ref 6078static struct cgroup_subsys_state * __ref
@@ -6106,8 +6094,6 @@ mem_cgroup_css_alloc(struct cgroup *cont)
6106 if (cont->parent == NULL) { 6094 if (cont->parent == NULL) {
6107 int cpu; 6095 int cpu;
6108 6096
6109 if (mem_cgroup_soft_limit_tree_init())
6110 goto free_out;
6111 root_mem_cgroup = memcg; 6097 root_mem_cgroup = memcg;
6112 for_each_possible_cpu(cpu) { 6098 for_each_possible_cpu(cpu) {
6113 struct memcg_stock_pcp *stock = 6099 struct memcg_stock_pcp *stock =
@@ -6850,6 +6836,7 @@ static int __init mem_cgroup_init(void)
6850{ 6836{
6851 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 6837 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
6852 enable_swap_cgroup(); 6838 enable_swap_cgroup();
6839 mem_cgroup_soft_limit_tree_init();
6853 return 0; 6840 return 0;
6854} 6841}
6855subsys_initcall(mem_cgroup_init); 6842subsys_initcall(mem_cgroup_init);