diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 62 |
1 files changed, 29 insertions, 33 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 8a8b71b5751b..7854ee516b92 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5621,6 +5621,32 @@ static int cpu_to_allnodes_group(int cpu) | |||
5621 | { | 5621 | { |
5622 | return cpu_to_node(cpu); | 5622 | return cpu_to_node(cpu); |
5623 | } | 5623 | } |
5624 | static void init_numa_sched_groups_power(struct sched_group *group_head) | ||
5625 | { | ||
5626 | struct sched_group *sg = group_head; | ||
5627 | int j; | ||
5628 | |||
5629 | if (!sg) | ||
5630 | return; | ||
5631 | next_sg: | ||
5632 | for_each_cpu_mask(j, sg->cpumask) { | ||
5633 | struct sched_domain *sd; | ||
5634 | |||
5635 | sd = &per_cpu(phys_domains, j); | ||
5636 | if (j != first_cpu(sd->groups->cpumask)) { | ||
5637 | /* | ||
5638 | * Only add "power" once for each | ||
5639 | * physical package. | ||
5640 | */ | ||
5641 | continue; | ||
5642 | } | ||
5643 | |||
5644 | sg->cpu_power += sd->groups->cpu_power; | ||
5645 | } | ||
5646 | sg = sg->next; | ||
5647 | if (sg != group_head) | ||
5648 | goto next_sg; | ||
5649 | } | ||
5624 | #endif | 5650 | #endif |
5625 | 5651 | ||
5626 | /* | 5652 | /* |
@@ -5866,43 +5892,13 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5866 | (cpus_weight(sd->groups->cpumask)-1) / 10; | 5892 | (cpus_weight(sd->groups->cpumask)-1) / 10; |
5867 | sd->groups->cpu_power = power; | 5893 | sd->groups->cpu_power = power; |
5868 | #endif | 5894 | #endif |
5869 | |||
5870 | #ifdef CONFIG_NUMA | ||
5871 | sd = &per_cpu(allnodes_domains, i); | ||
5872 | if (sd->groups) { | ||
5873 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * | ||
5874 | (cpus_weight(sd->groups->cpumask)-1) / 10; | ||
5875 | sd->groups->cpu_power = power; | ||
5876 | } | ||
5877 | #endif | ||
5878 | } | 5895 | } |
5879 | 5896 | ||
5880 | #ifdef CONFIG_NUMA | 5897 | #ifdef CONFIG_NUMA |
5881 | for (i = 0; i < MAX_NUMNODES; i++) { | 5898 | for (i = 0; i < MAX_NUMNODES; i++) |
5882 | struct sched_group *sg = sched_group_nodes[i]; | 5899 | init_numa_sched_groups_power(sched_group_nodes[i]); |
5883 | int j; | ||
5884 | |||
5885 | if (sg == NULL) | ||
5886 | continue; | ||
5887 | next_sg: | ||
5888 | for_each_cpu_mask(j, sg->cpumask) { | ||
5889 | struct sched_domain *sd; | ||
5890 | 5900 | ||
5891 | sd = &per_cpu(phys_domains, j); | 5901 | init_numa_sched_groups_power(sched_group_allnodes); |
5892 | if (j != first_cpu(sd->groups->cpumask)) { | ||
5893 | /* | ||
5894 | * Only add "power" once for each | ||
5895 | * physical package. | ||
5896 | */ | ||
5897 | continue; | ||
5898 | } | ||
5899 | |||
5900 | sg->cpu_power += sd->groups->cpu_power; | ||
5901 | } | ||
5902 | sg = sg->next; | ||
5903 | if (sg != sched_group_nodes[i]) | ||
5904 | goto next_sg; | ||
5905 | } | ||
5906 | #endif | 5902 | #endif |
5907 | 5903 | ||
5908 | /* Attach the domains */ | 5904 | /* Attach the domains */ |