diff options
author | Siddha, Suresh B <suresh.b.siddha@intel.com> | 2006-10-03 04:14:08 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-03 11:04:06 -0400 |
commit | 1a84887080dc15f048db7c3a643e98f1435790d6 (patch) | |
tree | 7cd335fee247c0b60f8562c82806b49435b5fb9d /kernel | |
parent | 74732646431a1bb7e23e6b564127a8881cfef900 (diff) |
[PATCH] sched: introduce child field in sched_domain
Introduce the child field in sched_domain struct and use it in
sched_balance_self().
We will also use this field in cleaning up the sched group cpu_power
setup(done in a different patch) code.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Paul Jackson <pj@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 40 |
1 files changed, 30 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 6d7bf55ec33d..0feeacb91497 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1286,21 +1286,29 @@ static int sched_balance_self(int cpu, int flag) | |||
1286 | while (sd) { | 1286 | while (sd) { |
1287 | cpumask_t span; | 1287 | cpumask_t span; |
1288 | struct sched_group *group; | 1288 | struct sched_group *group; |
1289 | int new_cpu; | 1289 | int new_cpu, weight; |
1290 | int weight; | 1290 | |
1291 | if (!(sd->flags & flag)) { | ||
1292 | sd = sd->child; | ||
1293 | continue; | ||
1294 | } | ||
1291 | 1295 | ||
1292 | span = sd->span; | 1296 | span = sd->span; |
1293 | group = find_idlest_group(sd, t, cpu); | 1297 | group = find_idlest_group(sd, t, cpu); |
1294 | if (!group) | 1298 | if (!group) { |
1295 | goto nextlevel; | 1299 | sd = sd->child; |
1300 | continue; | ||
1301 | } | ||
1296 | 1302 | ||
1297 | new_cpu = find_idlest_cpu(group, t, cpu); | 1303 | new_cpu = find_idlest_cpu(group, t, cpu); |
1298 | if (new_cpu == -1 || new_cpu == cpu) | 1304 | if (new_cpu == -1 || new_cpu == cpu) { |
1299 | goto nextlevel; | 1305 | /* Now try balancing at a lower domain level of cpu */ |
1306 | sd = sd->child; | ||
1307 | continue; | ||
1308 | } | ||
1300 | 1309 | ||
1301 | /* Now try balancing at a lower domain level */ | 1310 | /* Now try balancing at a lower domain level of new_cpu */ |
1302 | cpu = new_cpu; | 1311 | cpu = new_cpu; |
1303 | nextlevel: | ||
1304 | sd = NULL; | 1312 | sd = NULL; |
1305 | weight = cpus_weight(span); | 1313 | weight = cpus_weight(span); |
1306 | for_each_domain(cpu, tmp) { | 1314 | for_each_domain(cpu, tmp) { |
@@ -5448,12 +5456,18 @@ static void cpu_attach_domain(struct sched_domain *sd, int cpu) | |||
5448 | struct sched_domain *parent = tmp->parent; | 5456 | struct sched_domain *parent = tmp->parent; |
5449 | if (!parent) | 5457 | if (!parent) |
5450 | break; | 5458 | break; |
5451 | if (sd_parent_degenerate(tmp, parent)) | 5459 | if (sd_parent_degenerate(tmp, parent)) { |
5452 | tmp->parent = parent->parent; | 5460 | tmp->parent = parent->parent; |
5461 | if (parent->parent) | ||
5462 | parent->parent->child = tmp; | ||
5463 | } | ||
5453 | } | 5464 | } |
5454 | 5465 | ||
5455 | if (sd && sd_degenerate(sd)) | 5466 | if (sd && sd_degenerate(sd)) { |
5456 | sd = sd->parent; | 5467 | sd = sd->parent; |
5468 | if (sd) | ||
5469 | sd->child = NULL; | ||
5470 | } | ||
5457 | 5471 | ||
5458 | sched_domain_debug(sd, cpu); | 5472 | sched_domain_debug(sd, cpu); |
5459 | 5473 | ||
@@ -6288,6 +6302,8 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
6288 | *sd = SD_NODE_INIT; | 6302 | *sd = SD_NODE_INIT; |
6289 | sd->span = sched_domain_node_span(cpu_to_node(i)); | 6303 | sd->span = sched_domain_node_span(cpu_to_node(i)); |
6290 | sd->parent = p; | 6304 | sd->parent = p; |
6305 | if (p) | ||
6306 | p->child = sd; | ||
6291 | cpus_and(sd->span, sd->span, *cpu_map); | 6307 | cpus_and(sd->span, sd->span, *cpu_map); |
6292 | #endif | 6308 | #endif |
6293 | 6309 | ||
@@ -6297,6 +6313,8 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
6297 | *sd = SD_CPU_INIT; | 6313 | *sd = SD_CPU_INIT; |
6298 | sd->span = nodemask; | 6314 | sd->span = nodemask; |
6299 | sd->parent = p; | 6315 | sd->parent = p; |
6316 | if (p) | ||
6317 | p->child = sd; | ||
6300 | sd->groups = &sched_group_phys[group]; | 6318 | sd->groups = &sched_group_phys[group]; |
6301 | 6319 | ||
6302 | #ifdef CONFIG_SCHED_MC | 6320 | #ifdef CONFIG_SCHED_MC |
@@ -6307,6 +6325,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
6307 | sd->span = cpu_coregroup_map(i); | 6325 | sd->span = cpu_coregroup_map(i); |
6308 | cpus_and(sd->span, sd->span, *cpu_map); | 6326 | cpus_and(sd->span, sd->span, *cpu_map); |
6309 | sd->parent = p; | 6327 | sd->parent = p; |
6328 | p->child = sd; | ||
6310 | sd->groups = &sched_group_core[group]; | 6329 | sd->groups = &sched_group_core[group]; |
6311 | #endif | 6330 | #endif |
6312 | 6331 | ||
@@ -6318,6 +6337,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
6318 | sd->span = cpu_sibling_map[i]; | 6337 | sd->span = cpu_sibling_map[i]; |
6319 | cpus_and(sd->span, sd->span, *cpu_map); | 6338 | cpus_and(sd->span, sd->span, *cpu_map); |
6320 | sd->parent = p; | 6339 | sd->parent = p; |
6340 | p->child = sd; | ||
6321 | sd->groups = &sched_group_cpus[group]; | 6341 | sd->groups = &sched_group_cpus[group]; |
6322 | #endif | 6342 | #endif |
6323 | } | 6343 | } |