aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLi Zefan <lizf@cn.fujitsu.com>2011-03-23 19:42:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-23 22:46:35 -0400
commitee24d3797780eee6ffe581a7b78d27896f9b494a (patch)
tree8034dac124ab8f0a29bc6f238c030915b2e08398 /kernel
parentc8163ca8afcac0fc54593fc60d1e1110edbd0eb2 (diff)
cpuset: fix unchecked calls to NODEMASK_ALLOC()
Those functions that use NODEMASK_ALLOC() can't propagate errno to users, but will fail silently. Fix it by using a static nodemask_t variable for each function, and those variables are protected by cgroup_mutex; [akpm@linux-foundation.org: fix comment spelling, strengthen cgroup_lock comment] Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> Cc: Paul Menage <menage@google.com> Acked-by: David Rientjes <rientjes@google.com> Cc: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c51
1 files changed, 16 insertions, 35 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 7f384f4013b2..e472fe139192 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1015,17 +1015,12 @@ static void cpuset_change_nodemask(struct task_struct *p,
1015 struct cpuset *cs; 1015 struct cpuset *cs;
1016 int migrate; 1016 int migrate;
1017 const nodemask_t *oldmem = scan->data; 1017 const nodemask_t *oldmem = scan->data;
1018 NODEMASK_ALLOC(nodemask_t, newmems, GFP_KERNEL); 1018 static nodemask_t newmems; /* protected by cgroup_mutex */
1019
1020 if (!newmems)
1021 return;
1022 1019
1023 cs = cgroup_cs(scan->cg); 1020 cs = cgroup_cs(scan->cg);
1024 guarantee_online_mems(cs, newmems); 1021 guarantee_online_mems(cs, &newmems);
1025 1022
1026 cpuset_change_task_nodemask(p, newmems); 1023 cpuset_change_task_nodemask(p, &newmems);
1027
1028 NODEMASK_FREE(newmems);
1029 1024
1030 mm = get_task_mm(p); 1025 mm = get_task_mm(p);
1031 if (!mm) 1026 if (!mm)
@@ -1438,41 +1433,35 @@ static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1438 struct mm_struct *mm; 1433 struct mm_struct *mm;
1439 struct cpuset *cs = cgroup_cs(cont); 1434 struct cpuset *cs = cgroup_cs(cont);
1440 struct cpuset *oldcs = cgroup_cs(oldcont); 1435 struct cpuset *oldcs = cgroup_cs(oldcont);
1441 NODEMASK_ALLOC(nodemask_t, to, GFP_KERNEL); 1436 static nodemask_t to; /* protected by cgroup_mutex */
1442
1443 if (to == NULL)
1444 goto alloc_fail;
1445 1437
1446 if (cs == &top_cpuset) { 1438 if (cs == &top_cpuset) {
1447 cpumask_copy(cpus_attach, cpu_possible_mask); 1439 cpumask_copy(cpus_attach, cpu_possible_mask);
1448 } else { 1440 } else {
1449 guarantee_online_cpus(cs, cpus_attach); 1441 guarantee_online_cpus(cs, cpus_attach);
1450 } 1442 }
1451 guarantee_online_mems(cs, to); 1443 guarantee_online_mems(cs, &to);
1452 1444
1453 /* do per-task migration stuff possibly for each in the threadgroup */ 1445 /* do per-task migration stuff possibly for each in the threadgroup */
1454 cpuset_attach_task(tsk, to, cs); 1446 cpuset_attach_task(tsk, &to, cs);
1455 if (threadgroup) { 1447 if (threadgroup) {
1456 struct task_struct *c; 1448 struct task_struct *c;
1457 rcu_read_lock(); 1449 rcu_read_lock();
1458 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { 1450 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
1459 cpuset_attach_task(c, to, cs); 1451 cpuset_attach_task(c, &to, cs);
1460 } 1452 }
1461 rcu_read_unlock(); 1453 rcu_read_unlock();
1462 } 1454 }
1463 1455
1464 /* change mm; only needs to be done once even if threadgroup */ 1456 /* change mm; only needs to be done once even if threadgroup */
1465 *to = cs->mems_allowed; 1457 to = cs->mems_allowed;
1466 mm = get_task_mm(tsk); 1458 mm = get_task_mm(tsk);
1467 if (mm) { 1459 if (mm) {
1468 mpol_rebind_mm(mm, to); 1460 mpol_rebind_mm(mm, &to);
1469 if (is_memory_migrate(cs)) 1461 if (is_memory_migrate(cs))
1470 cpuset_migrate_mm(mm, &oldcs->mems_allowed, to); 1462 cpuset_migrate_mm(mm, &oldcs->mems_allowed, &to);
1471 mmput(mm); 1463 mmput(mm);
1472 } 1464 }
1473
1474alloc_fail:
1475 NODEMASK_FREE(to);
1476} 1465}
1477 1466
1478/* The various types of files and directories in a cpuset file system */ 1467/* The various types of files and directories in a cpuset file system */
@@ -2055,10 +2044,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
2055 struct cpuset *cp; /* scans cpusets being updated */ 2044 struct cpuset *cp; /* scans cpusets being updated */
2056 struct cpuset *child; /* scans child cpusets of cp */ 2045 struct cpuset *child; /* scans child cpusets of cp */
2057 struct cgroup *cont; 2046 struct cgroup *cont;
2058 NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL); 2047 static nodemask_t oldmems; /* protected by cgroup_mutex */
2059
2060 if (oldmems == NULL)
2061 return;
2062 2048
2063 list_add_tail((struct list_head *)&root->stack_list, &queue); 2049 list_add_tail((struct list_head *)&root->stack_list, &queue);
2064 2050
@@ -2075,7 +2061,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
2075 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) 2061 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
2076 continue; 2062 continue;
2077 2063
2078 *oldmems = cp->mems_allowed; 2064 oldmems = cp->mems_allowed;
2079 2065
2080 /* Remove offline cpus and mems from this cpuset. */ 2066 /* Remove offline cpus and mems from this cpuset. */
2081 mutex_lock(&callback_mutex); 2067 mutex_lock(&callback_mutex);
@@ -2091,10 +2077,9 @@ static void scan_for_empty_cpusets(struct cpuset *root)
2091 remove_tasks_in_empty_cpuset(cp); 2077 remove_tasks_in_empty_cpuset(cp);
2092 else { 2078 else {
2093 update_tasks_cpumask(cp, NULL); 2079 update_tasks_cpumask(cp, NULL);
2094 update_tasks_nodemask(cp, oldmems, NULL); 2080 update_tasks_nodemask(cp, &oldmems, NULL);
2095 } 2081 }
2096 } 2082 }
2097 NODEMASK_FREE(oldmems);
2098} 2083}
2099 2084
2100/* 2085/*
@@ -2136,19 +2121,16 @@ void cpuset_update_active_cpus(void)
2136static int cpuset_track_online_nodes(struct notifier_block *self, 2121static int cpuset_track_online_nodes(struct notifier_block *self,
2137 unsigned long action, void *arg) 2122 unsigned long action, void *arg)
2138{ 2123{
2139 NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL); 2124 static nodemask_t oldmems; /* protected by cgroup_mutex */
2140
2141 if (oldmems == NULL)
2142 return NOTIFY_DONE;
2143 2125
2144 cgroup_lock(); 2126 cgroup_lock();
2145 switch (action) { 2127 switch (action) {
2146 case MEM_ONLINE: 2128 case MEM_ONLINE:
2147 *oldmems = top_cpuset.mems_allowed; 2129 oldmems = top_cpuset.mems_allowed;
2148 mutex_lock(&callback_mutex); 2130 mutex_lock(&callback_mutex);
2149 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 2131 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2150 mutex_unlock(&callback_mutex); 2132 mutex_unlock(&callback_mutex);
2151 update_tasks_nodemask(&top_cpuset, oldmems, NULL); 2133 update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
2152 break; 2134 break;
2153 case MEM_OFFLINE: 2135 case MEM_OFFLINE:
2154 /* 2136 /*
@@ -2162,7 +2144,6 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
2162 } 2144 }
2163 cgroup_unlock(); 2145 cgroup_unlock();
2164 2146
2165 NODEMASK_FREE(oldmems);
2166 return NOTIFY_OK; 2147 return NOTIFY_OK;
2167} 2148}
2168#endif 2149#endif