aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index deb5ac8c12f3..f0c0a81d7638 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7282,10 +7282,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
7282 * groups, so roll our own. Now each node has its own list of groups which 7282 * groups, so roll our own. Now each node has its own list of groups which
7283 * gets dynamically allocated. 7283 * gets dynamically allocated.
7284 */ 7284 */
7285static DEFINE_PER_CPU(struct sched_domain, node_domains); 7285static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
7286static struct sched_group ***sched_group_nodes_bycpu; 7286static struct sched_group ***sched_group_nodes_bycpu;
7287 7287
7288static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); 7288static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
7289static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); 7289static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
7290 7290
7291static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, 7291static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
@@ -7560,7 +7560,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7560#ifdef CONFIG_NUMA 7560#ifdef CONFIG_NUMA
7561 if (cpumask_weight(cpu_map) > 7561 if (cpumask_weight(cpu_map) >
7562 SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { 7562 SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
7563 sd = &per_cpu(allnodes_domains, i); 7563 sd = &per_cpu(allnodes_domains, i).sd;
7564 SD_INIT(sd, ALLNODES); 7564 SD_INIT(sd, ALLNODES);
7565 set_domain_attribute(sd, attr); 7565 set_domain_attribute(sd, attr);
7566 cpumask_copy(sched_domain_span(sd), cpu_map); 7566 cpumask_copy(sched_domain_span(sd), cpu_map);
@@ -7570,7 +7570,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7570 } else 7570 } else
7571 p = NULL; 7571 p = NULL;
7572 7572
7573 sd = &per_cpu(node_domains, i); 7573 sd = &per_cpu(node_domains, i).sd;
7574 SD_INIT(sd, NODE); 7574 SD_INIT(sd, NODE);
7575 set_domain_attribute(sd, attr); 7575 set_domain_attribute(sd, attr);
7576 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); 7576 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
@@ -7688,7 +7688,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7688 for_each_cpu(j, nodemask) { 7688 for_each_cpu(j, nodemask) {
7689 struct sched_domain *sd; 7689 struct sched_domain *sd;
7690 7690
7691 sd = &per_cpu(node_domains, j); 7691 sd = &per_cpu(node_domains, j).sd;
7692 sd->groups = sg; 7692 sd->groups = sg;
7693 } 7693 }
7694 sg->__cpu_power = 0; 7694 sg->__cpu_power = 0;