diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index deb5ac8c12f3..8be2c13b50d0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -125,6 +125,9 @@ DEFINE_TRACE(sched_switch); | |||
125 | DEFINE_TRACE(sched_migrate_task); | 125 | DEFINE_TRACE(sched_migrate_task); |
126 | 126 | ||
127 | #ifdef CONFIG_SMP | 127 | #ifdef CONFIG_SMP |
128 | |||
129 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); | ||
130 | |||
128 | /* | 131 | /* |
129 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) | 132 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) |
130 | * Since cpu_power is a 'constant', we can use a reciprocal divide. | 133 | * Since cpu_power is a 'constant', we can use a reciprocal divide. |
@@ -7282,10 +7285,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, | |||
7282 | * groups, so roll our own. Now each node has its own list of groups which | 7285 | * groups, so roll our own. Now each node has its own list of groups which |
7283 | * gets dynamically allocated. | 7286 | * gets dynamically allocated. |
7284 | */ | 7287 | */ |
7285 | static DEFINE_PER_CPU(struct sched_domain, node_domains); | 7288 | static DEFINE_PER_CPU(struct static_sched_domain, node_domains); |
7286 | static struct sched_group ***sched_group_nodes_bycpu; | 7289 | static struct sched_group ***sched_group_nodes_bycpu; |
7287 | 7290 | ||
7288 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | 7291 | static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains); |
7289 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); | 7292 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); |
7290 | 7293 | ||
7291 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, | 7294 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, |
@@ -7560,7 +7563,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7560 | #ifdef CONFIG_NUMA | 7563 | #ifdef CONFIG_NUMA |
7561 | if (cpumask_weight(cpu_map) > | 7564 | if (cpumask_weight(cpu_map) > |
7562 | SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { | 7565 | SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { |
7563 | sd = &per_cpu(allnodes_domains, i); | 7566 | sd = &per_cpu(allnodes_domains, i).sd; |
7564 | SD_INIT(sd, ALLNODES); | 7567 | SD_INIT(sd, ALLNODES); |
7565 | set_domain_attribute(sd, attr); | 7568 | set_domain_attribute(sd, attr); |
7566 | cpumask_copy(sched_domain_span(sd), cpu_map); | 7569 | cpumask_copy(sched_domain_span(sd), cpu_map); |
@@ -7570,7 +7573,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7570 | } else | 7573 | } else |
7571 | p = NULL; | 7574 | p = NULL; |
7572 | 7575 | ||
7573 | sd = &per_cpu(node_domains, i); | 7576 | sd = &per_cpu(node_domains, i).sd; |
7574 | SD_INIT(sd, NODE); | 7577 | SD_INIT(sd, NODE); |
7575 | set_domain_attribute(sd, attr); | 7578 | set_domain_attribute(sd, attr); |
7576 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); | 7579 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); |
@@ -7688,7 +7691,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7688 | for_each_cpu(j, nodemask) { | 7691 | for_each_cpu(j, nodemask) { |
7689 | struct sched_domain *sd; | 7692 | struct sched_domain *sd; |
7690 | 7693 | ||
7691 | sd = &per_cpu(node_domains, j); | 7694 | sd = &per_cpu(node_domains, j).sd; |
7692 | sd->groups = sg; | 7695 | sd->groups = sg; |
7693 | } | 7696 | } |
7694 | sg->__cpu_power = 0; | 7697 | sg->__cpu_power = 0; |