aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-01-10 19:04:16 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-10 19:04:16 -0500
commit62ea9ceb17a74bc7544211bfeecf4170c554ac4f (patch)
treeef8f6b185463a4b68908ca23357efd3d1ddb6145 /kernel
parent3d14bdad40315b54470cb7812293d14c8af2bf7d (diff)
cpumask: fix CONFIG_NUMA=y sched.c
Impact: fix panic on ia64 with NR_CPUS=1024 struct sched_domain is now a dangling structure; where we really want static ones, we need to use static_sched_domain. (As the FIXME in this file says, cpumask_var_t would be better, but this code is hairy enough without trying to add initialization code to the right places). Reported-by: Mike Travis <travis@sgi.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index deb5ac8c12f..f0c0a81d763 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7282,10 +7282,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
7282 * groups, so roll our own. Now each node has its own list of groups which 7282 * groups, so roll our own. Now each node has its own list of groups which
7283 * gets dynamically allocated. 7283 * gets dynamically allocated.
7284 */ 7284 */
7285static DEFINE_PER_CPU(struct sched_domain, node_domains); 7285static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
7286static struct sched_group ***sched_group_nodes_bycpu; 7286static struct sched_group ***sched_group_nodes_bycpu;
7287 7287
7288static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); 7288static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
7289static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); 7289static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
7290 7290
7291static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, 7291static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
@@ -7560,7 +7560,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7560#ifdef CONFIG_NUMA 7560#ifdef CONFIG_NUMA
7561 if (cpumask_weight(cpu_map) > 7561 if (cpumask_weight(cpu_map) >
7562 SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { 7562 SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
7563 sd = &per_cpu(allnodes_domains, i); 7563 sd = &per_cpu(allnodes_domains, i).sd;
7564 SD_INIT(sd, ALLNODES); 7564 SD_INIT(sd, ALLNODES);
7565 set_domain_attribute(sd, attr); 7565 set_domain_attribute(sd, attr);
7566 cpumask_copy(sched_domain_span(sd), cpu_map); 7566 cpumask_copy(sched_domain_span(sd), cpu_map);
@@ -7570,7 +7570,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7570 } else 7570 } else
7571 p = NULL; 7571 p = NULL;
7572 7572
7573 sd = &per_cpu(node_domains, i); 7573 sd = &per_cpu(node_domains, i).sd;
7574 SD_INIT(sd, NODE); 7574 SD_INIT(sd, NODE);
7575 set_domain_attribute(sd, attr); 7575 set_domain_attribute(sd, attr);
7576 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); 7576 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
@@ -7688,7 +7688,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7688 for_each_cpu(j, nodemask) { 7688 for_each_cpu(j, nodemask) {
7689 struct sched_domain *sd; 7689 struct sched_domain *sd;
7690 7690
7691 sd = &per_cpu(node_domains, j); 7691 sd = &per_cpu(node_domains, j).sd;
7692 sd->groups = sg; 7692 sd->groups = sg;
7693 } 7693 }
7694 sg->__cpu_power = 0; 7694 sg->__cpu_power = 0;