diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-09-01 04:34:34 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-04 04:09:53 -0400 |
commit | cc9fba7d7672fa3ed58d9d9ecb6c45b1351c29a6 (patch) | |
tree | a2ae08f8edad60c17134fb5ff60b0deb21dc96ab /kernel/sched.c | |
parent | b5d978e0c7e79a7ff842e895c85a86b38c71f1cd (diff) |
sched: Update the cpu_power sum during load-balance
In order to prepare for a more dynamic cpu_power, update the
group sum while walking the sched domains during load-balance.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Tested-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Acked-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Acked-by: Gautham R Shenoy <ego@in.ibm.com>
Cc: Balbir Singh <balbir@in.ibm.com>
LKML-Reference: <20090901083825.985050292@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 33 |
1 files changed, 29 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 9d64cec9ae1d..ecb4a47d4214 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3699,6 +3699,28 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, | |||
3699 | } | 3699 | } |
3700 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | 3700 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ |
3701 | 3701 | ||
3702 | static void update_sched_power(struct sched_domain *sd) | ||
3703 | { | ||
3704 | struct sched_domain *child = sd->child; | ||
3705 | struct sched_group *group, *sdg = sd->groups; | ||
3706 | unsigned long power = sdg->__cpu_power; | ||
3707 | |||
3708 | if (!child) { | ||
3709 | /* compute cpu power for this cpu */ | ||
3710 | return; | ||
3711 | } | ||
3712 | |||
3713 | sdg->__cpu_power = 0; | ||
3714 | |||
3715 | group = child->groups; | ||
3716 | do { | ||
3717 | sdg->__cpu_power += group->__cpu_power; | ||
3718 | group = group->next; | ||
3719 | } while (group != child->groups); | ||
3720 | |||
3721 | if (power != sdg->__cpu_power) | ||
3722 | sdg->reciprocal_cpu_power = reciprocal_value(sdg->__cpu_power); | ||
3723 | } | ||
3702 | 3724 | ||
3703 | /** | 3725 | /** |
3704 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. | 3726 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. |
@@ -3712,7 +3734,8 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, | |||
3712 | * @balance: Should we balance. | 3734 | * @balance: Should we balance. |
3713 | * @sgs: variable to hold the statistics for this group. | 3735 | * @sgs: variable to hold the statistics for this group. |
3714 | */ | 3736 | */ |
3715 | static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu, | 3737 | static inline void update_sg_lb_stats(struct sched_domain *sd, |
3738 | struct sched_group *group, int this_cpu, | ||
3716 | enum cpu_idle_type idle, int load_idx, int *sd_idle, | 3739 | enum cpu_idle_type idle, int load_idx, int *sd_idle, |
3717 | int local_group, const struct cpumask *cpus, | 3740 | int local_group, const struct cpumask *cpus, |
3718 | int *balance, struct sg_lb_stats *sgs) | 3741 | int *balance, struct sg_lb_stats *sgs) |
@@ -3723,8 +3746,11 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu, | |||
3723 | unsigned long sum_avg_load_per_task; | 3746 | unsigned long sum_avg_load_per_task; |
3724 | unsigned long avg_load_per_task; | 3747 | unsigned long avg_load_per_task; |
3725 | 3748 | ||
3726 | if (local_group) | 3749 | if (local_group) { |
3727 | balance_cpu = group_first_cpu(group); | 3750 | balance_cpu = group_first_cpu(group); |
3751 | if (balance_cpu == this_cpu) | ||
3752 | update_sched_power(sd); | ||
3753 | } | ||
3728 | 3754 | ||
3729 | /* Tally up the load of all CPUs in the group */ | 3755 | /* Tally up the load of all CPUs in the group */ |
3730 | sum_avg_load_per_task = avg_load_per_task = 0; | 3756 | sum_avg_load_per_task = avg_load_per_task = 0; |
@@ -3828,7 +3854,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, | |||
3828 | local_group = cpumask_test_cpu(this_cpu, | 3854 | local_group = cpumask_test_cpu(this_cpu, |
3829 | sched_group_cpus(group)); | 3855 | sched_group_cpus(group)); |
3830 | memset(&sgs, 0, sizeof(sgs)); | 3856 | memset(&sgs, 0, sizeof(sgs)); |
3831 | update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle, | 3857 | update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle, |
3832 | local_group, cpus, balance, &sgs); | 3858 | local_group, cpus, balance, &sgs); |
3833 | 3859 | ||
3834 | if (local_group && balance && !(*balance)) | 3860 | if (local_group && balance && !(*balance)) |
@@ -3863,7 +3889,6 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, | |||
3863 | update_sd_power_savings_stats(group, sds, local_group, &sgs); | 3889 | update_sd_power_savings_stats(group, sds, local_group, &sgs); |
3864 | group = group->next; | 3890 | group = group->next; |
3865 | } while (group != sd->groups); | 3891 | } while (group != sd->groups); |
3866 | |||
3867 | } | 3892 | } |
3868 | 3893 | ||
3869 | /** | 3894 | /** |