aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorSiddha, Suresh B <suresh.b.siddha@intel.com>2006-03-27 04:15:23 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-27 11:44:44 -0500
commit0806903316d516a3b3851c51cea5c71724d9051d (patch)
tree05b453747a68a32bfd1d668a53963a4bb0bc36d1 /kernel/sched.c
parent1e9f28fa1eb9773bf65bae08288c6a0a38eef4a7 (diff)
[PATCH] sched: fix group power for allnodes_domains
Current sched groups power calculation for allnodes_domains is wrong. We should really be using cumulative power of the physical packages in that group (similar to the calculation in node_domains) Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c62
1 files changed, 29 insertions, 33 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8a8b71b5751b..7854ee516b92 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5621,6 +5621,32 @@ static int cpu_to_allnodes_group(int cpu)
5621{ 5621{
5622 return cpu_to_node(cpu); 5622 return cpu_to_node(cpu);
5623} 5623}
5624static void init_numa_sched_groups_power(struct sched_group *group_head)
5625{
5626 struct sched_group *sg = group_head;
5627 int j;
5628
5629 if (!sg)
5630 return;
5631next_sg:
5632 for_each_cpu_mask(j, sg->cpumask) {
5633 struct sched_domain *sd;
5634
5635 sd = &per_cpu(phys_domains, j);
5636 if (j != first_cpu(sd->groups->cpumask)) {
5637 /*
5638 * Only add "power" once for each
5639 * physical package.
5640 */
5641 continue;
5642 }
5643
5644 sg->cpu_power += sd->groups->cpu_power;
5645 }
5646 sg = sg->next;
5647 if (sg != group_head)
5648 goto next_sg;
5649}
5624#endif 5650#endif
5625 5651
5626/* 5652/*
@@ -5866,43 +5892,13 @@ void build_sched_domains(const cpumask_t *cpu_map)
5866 (cpus_weight(sd->groups->cpumask)-1) / 10; 5892 (cpus_weight(sd->groups->cpumask)-1) / 10;
5867 sd->groups->cpu_power = power; 5893 sd->groups->cpu_power = power;
5868#endif 5894#endif
5869
5870#ifdef CONFIG_NUMA
5871 sd = &per_cpu(allnodes_domains, i);
5872 if (sd->groups) {
5873 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
5874 (cpus_weight(sd->groups->cpumask)-1) / 10;
5875 sd->groups->cpu_power = power;
5876 }
5877#endif
5878 } 5895 }
5879 5896
5880#ifdef CONFIG_NUMA 5897#ifdef CONFIG_NUMA
5881 for (i = 0; i < MAX_NUMNODES; i++) { 5898 for (i = 0; i < MAX_NUMNODES; i++)
5882 struct sched_group *sg = sched_group_nodes[i]; 5899 init_numa_sched_groups_power(sched_group_nodes[i]);
5883 int j;
5884
5885 if (sg == NULL)
5886 continue;
5887next_sg:
5888 for_each_cpu_mask(j, sg->cpumask) {
5889 struct sched_domain *sd;
5890 5900
5891 sd = &per_cpu(phys_domains, j); 5901 init_numa_sched_groups_power(sched_group_allnodes);
5892 if (j != first_cpu(sd->groups->cpumask)) {
5893 /*
5894 * Only add "power" once for each
5895 * physical package.
5896 */
5897 continue;
5898 }
5899
5900 sg->cpu_power += sd->groups->cpu_power;
5901 }
5902 sg = sg->next;
5903 if (sg != sched_group_nodes[i])
5904 goto next_sg;
5905 }
5906#endif 5902#endif
5907 5903
5908 /* Attach the domains */ 5904 /* Attach the domains */