aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-18 12:15:49 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-18 12:15:49 -0500
commitaf37501c792107c2bde1524bdae38d9a247b841a (patch)
treeb50ee90d29e72956b8b7d8d19677fe5996755d49 /kernel/sched.c
parentd859e29fe34cb833071b20aef860ee94fbad9bb2 (diff)
parent99937d6455cea95405ac681c86a857d0fcd530bd (diff)
Merge branch 'core/percpu' into perfcounters/core
Conflicts: arch/x86/include/asm/pda.h We merge tip/core/percpu into tip/perfcounters/core because of a semantic and contextual conflict: the former eliminates the PDA, while the latter extends it with apic_perf_irqs field. Resolve the conflict by moving the new field to the irq_cpustat structure on 64-bit too. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 43fd21233b9..ce9fecab5f0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -125,6 +125,9 @@ DEFINE_TRACE(sched_switch);
125DEFINE_TRACE(sched_migrate_task); 125DEFINE_TRACE(sched_migrate_task);
126 126
127#ifdef CONFIG_SMP 127#ifdef CONFIG_SMP
128
129static void double_rq_lock(struct rq *rq1, struct rq *rq2);
130
128/* 131/*
129 * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) 132 * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
130 * Since cpu_power is a 'constant', we can use a reciprocal divide. 133 * Since cpu_power is a 'constant', we can use a reciprocal divide.
@@ -7352,10 +7355,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
7352 * groups, so roll our own. Now each node has its own list of groups which 7355 * groups, so roll our own. Now each node has its own list of groups which
7353 * gets dynamically allocated. 7356 * gets dynamically allocated.
7354 */ 7357 */
7355static DEFINE_PER_CPU(struct sched_domain, node_domains); 7358static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
7356static struct sched_group ***sched_group_nodes_bycpu; 7359static struct sched_group ***sched_group_nodes_bycpu;
7357 7360
7358static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); 7361static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
7359static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); 7362static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
7360 7363
7361static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, 7364static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
@@ -7630,7 +7633,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7630#ifdef CONFIG_NUMA 7633#ifdef CONFIG_NUMA
7631 if (cpumask_weight(cpu_map) > 7634 if (cpumask_weight(cpu_map) >
7632 SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { 7635 SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
7633 sd = &per_cpu(allnodes_domains, i); 7636 sd = &per_cpu(allnodes_domains, i).sd;
7634 SD_INIT(sd, ALLNODES); 7637 SD_INIT(sd, ALLNODES);
7635 set_domain_attribute(sd, attr); 7638 set_domain_attribute(sd, attr);
7636 cpumask_copy(sched_domain_span(sd), cpu_map); 7639 cpumask_copy(sched_domain_span(sd), cpu_map);
@@ -7640,7 +7643,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7640 } else 7643 } else
7641 p = NULL; 7644 p = NULL;
7642 7645
7643 sd = &per_cpu(node_domains, i); 7646 sd = &per_cpu(node_domains, i).sd;
7644 SD_INIT(sd, NODE); 7647 SD_INIT(sd, NODE);
7645 set_domain_attribute(sd, attr); 7648 set_domain_attribute(sd, attr);
7646 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); 7649 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
@@ -7758,7 +7761,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7758 for_each_cpu(j, nodemask) { 7761 for_each_cpu(j, nodemask) {
7759 struct sched_domain *sd; 7762 struct sched_domain *sd;
7760 7763
7761 sd = &per_cpu(node_domains, j); 7764 sd = &per_cpu(node_domains, j).sd;
7762 sd->groups = sg; 7765 sd->groups = sg;
7763 } 7766 }
7764 sg->__cpu_power = 0; 7767 sg->__cpu_power = 0;