aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-11-24 11:05:02 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-24 11:49:27 -0500
commitea6f18ed5a1531caf678374f30a0990c9e6742f3 (patch)
tree721c45d123ffd4f1f3bfbb93f9f7675b1588c610 /kernel/sched.c
parent943f3d030003e1fa5f77647328e805441213bf49 (diff)
sched: reduce stack size requirements in kernel/sched.c
Impact: cleanup * use node_to_cpumask_ptr in place of node_to_cpumask to reduce stack requirements in sched.c Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index bb827651558e..dd22cec499b8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6110,8 +6110,9 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
6110 6110
6111 do { 6111 do {
6112 /* On same node? */ 6112 /* On same node? */
6113 mask = node_to_cpumask(cpu_to_node(dead_cpu)); 6113 node_to_cpumask_ptr(pnodemask, cpu_to_node(dead_cpu));
6114 cpus_and(mask, mask, p->cpus_allowed); 6114
6115 cpus_and(mask, *pnodemask, p->cpus_allowed);
6115 dest_cpu = any_online_cpu(mask); 6116 dest_cpu = any_online_cpu(mask);
6116 6117
6117 /* On any allowed CPU? */ 6118 /* On any allowed CPU? */
@@ -7098,9 +7099,9 @@ static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
7098 struct sched_group **sg, cpumask_t *nodemask) 7099 struct sched_group **sg, cpumask_t *nodemask)
7099{ 7100{
7100 int group; 7101 int group;
7102 node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu));
7101 7103
7102 *nodemask = node_to_cpumask(cpu_to_node(cpu)); 7104 cpus_and(*nodemask, *pnodemask, *cpu_map);
7103 cpus_and(*nodemask, *nodemask, *cpu_map);
7104 group = first_cpu(*nodemask); 7105 group = first_cpu(*nodemask);
7105 7106
7106 if (sg) 7107 if (sg)
@@ -7150,9 +7151,9 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
7150 7151
7151 for (i = 0; i < nr_node_ids; i++) { 7152 for (i = 0; i < nr_node_ids; i++) {
7152 struct sched_group *oldsg, *sg = sched_group_nodes[i]; 7153 struct sched_group *oldsg, *sg = sched_group_nodes[i];
7154 node_to_cpumask_ptr(pnodemask, i);
7153 7155
7154 *nodemask = node_to_cpumask(i); 7156 cpus_and(*nodemask, *pnodemask, *cpu_map);
7155 cpus_and(*nodemask, *nodemask, *cpu_map);
7156 if (cpus_empty(*nodemask)) 7157 if (cpus_empty(*nodemask))
7157 continue; 7158 continue;
7158 7159