diff options
author | Ingo Molnar <mingo@elte.hu> | 2011-07-21 11:59:54 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-07-21 12:00:01 -0400 |
commit | 994bf1c92270e3d7731ea08f1d1bd7a668314e60 (patch) | |
tree | 4409a21eab486e53fbe350a66e8a4f28b7a720c0 /kernel/sched_fair.c | |
parent | bd96efe17d945f0bad56d592f8686dc6309905e7 (diff) | |
parent | cf6ace16a3cd8b728fb0afa68368fd40bbeae19f (diff) |
Merge branch 'linus' into sched/core
Merge reason: pick up the latest scheduler fixes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 46 |
1 files changed, 23 insertions, 23 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index eb98f77b38ef..e7d67a9e259a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1583,7 +1583,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, | |||
1583 | } | 1583 | } |
1584 | 1584 | ||
1585 | /* Adjust by relative CPU power of the group */ | 1585 | /* Adjust by relative CPU power of the group */ |
1586 | avg_load = (avg_load * SCHED_POWER_SCALE) / group->cpu_power; | 1586 | avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power; |
1587 | 1587 | ||
1588 | if (local_group) { | 1588 | if (local_group) { |
1589 | this_load = avg_load; | 1589 | this_load = avg_load; |
@@ -2629,7 +2629,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu) | |||
2629 | power >>= SCHED_POWER_SHIFT; | 2629 | power >>= SCHED_POWER_SHIFT; |
2630 | } | 2630 | } |
2631 | 2631 | ||
2632 | sdg->cpu_power_orig = power; | 2632 | sdg->sgp->power_orig = power; |
2633 | 2633 | ||
2634 | if (sched_feat(ARCH_POWER)) | 2634 | if (sched_feat(ARCH_POWER)) |
2635 | power *= arch_scale_freq_power(sd, cpu); | 2635 | power *= arch_scale_freq_power(sd, cpu); |
@@ -2645,7 +2645,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu) | |||
2645 | power = 1; | 2645 | power = 1; |
2646 | 2646 | ||
2647 | cpu_rq(cpu)->cpu_power = power; | 2647 | cpu_rq(cpu)->cpu_power = power; |
2648 | sdg->cpu_power = power; | 2648 | sdg->sgp->power = power; |
2649 | } | 2649 | } |
2650 | 2650 | ||
2651 | static void update_group_power(struct sched_domain *sd, int cpu) | 2651 | static void update_group_power(struct sched_domain *sd, int cpu) |
@@ -2663,11 +2663,11 @@ static void update_group_power(struct sched_domain *sd, int cpu) | |||
2663 | 2663 | ||
2664 | group = child->groups; | 2664 | group = child->groups; |
2665 | do { | 2665 | do { |
2666 | power += group->cpu_power; | 2666 | power += group->sgp->power; |
2667 | group = group->next; | 2667 | group = group->next; |
2668 | } while (group != child->groups); | 2668 | } while (group != child->groups); |
2669 | 2669 | ||
2670 | sdg->cpu_power = power; | 2670 | sdg->sgp->power = power; |
2671 | } | 2671 | } |
2672 | 2672 | ||
2673 | /* | 2673 | /* |
@@ -2689,7 +2689,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) | |||
2689 | /* | 2689 | /* |
2690 | * If ~90% of the cpu_power is still there, we're good. | 2690 | * If ~90% of the cpu_power is still there, we're good. |
2691 | */ | 2691 | */ |
2692 | if (group->cpu_power * 32 > group->cpu_power_orig * 29) | 2692 | if (group->sgp->power * 32 > group->sgp->power_orig * 29) |
2693 | return 1; | 2693 | return 1; |
2694 | 2694 | ||
2695 | return 0; | 2695 | return 0; |
@@ -2769,7 +2769,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, | |||
2769 | } | 2769 | } |
2770 | 2770 | ||
2771 | /* Adjust by relative CPU power of the group */ | 2771 | /* Adjust by relative CPU power of the group */ |
2772 | sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->cpu_power; | 2772 | sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power; |
2773 | 2773 | ||
2774 | /* | 2774 | /* |
2775 | * Consider the group unbalanced when the imbalance is larger | 2775 | * Consider the group unbalanced when the imbalance is larger |
@@ -2786,7 +2786,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, | |||
2786 | if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1) | 2786 | if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1) |
2787 | sgs->group_imb = 1; | 2787 | sgs->group_imb = 1; |
2788 | 2788 | ||
2789 | sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, | 2789 | sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power, |
2790 | SCHED_POWER_SCALE); | 2790 | SCHED_POWER_SCALE); |
2791 | if (!sgs->group_capacity) | 2791 | if (!sgs->group_capacity) |
2792 | sgs->group_capacity = fix_small_capacity(sd, group); | 2792 | sgs->group_capacity = fix_small_capacity(sd, group); |
@@ -2875,7 +2875,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, | |||
2875 | return; | 2875 | return; |
2876 | 2876 | ||
2877 | sds->total_load += sgs.group_load; | 2877 | sds->total_load += sgs.group_load; |
2878 | sds->total_pwr += sg->cpu_power; | 2878 | sds->total_pwr += sg->sgp->power; |
2879 | 2879 | ||
2880 | /* | 2880 | /* |
2881 | * In case the child domain prefers tasks go to siblings | 2881 | * In case the child domain prefers tasks go to siblings |
@@ -2960,7 +2960,7 @@ static int check_asym_packing(struct sched_domain *sd, | |||
2960 | if (this_cpu > busiest_cpu) | 2960 | if (this_cpu > busiest_cpu) |
2961 | return 0; | 2961 | return 0; |
2962 | 2962 | ||
2963 | *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power, | 2963 | *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power, |
2964 | SCHED_POWER_SCALE); | 2964 | SCHED_POWER_SCALE); |
2965 | return 1; | 2965 | return 1; |
2966 | } | 2966 | } |
@@ -2991,7 +2991,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds, | |||
2991 | 2991 | ||
2992 | scaled_busy_load_per_task = sds->busiest_load_per_task | 2992 | scaled_busy_load_per_task = sds->busiest_load_per_task |
2993 | * SCHED_POWER_SCALE; | 2993 | * SCHED_POWER_SCALE; |
2994 | scaled_busy_load_per_task /= sds->busiest->cpu_power; | 2994 | scaled_busy_load_per_task /= sds->busiest->sgp->power; |
2995 | 2995 | ||
2996 | if (sds->max_load - sds->this_load + scaled_busy_load_per_task >= | 2996 | if (sds->max_load - sds->this_load + scaled_busy_load_per_task >= |
2997 | (scaled_busy_load_per_task * imbn)) { | 2997 | (scaled_busy_load_per_task * imbn)) { |
@@ -3005,28 +3005,28 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds, | |||
3005 | * moving them. | 3005 | * moving them. |
3006 | */ | 3006 | */ |
3007 | 3007 | ||
3008 | pwr_now += sds->busiest->cpu_power * | 3008 | pwr_now += sds->busiest->sgp->power * |
3009 | min(sds->busiest_load_per_task, sds->max_load); | 3009 | min(sds->busiest_load_per_task, sds->max_load); |
3010 | pwr_now += sds->this->cpu_power * | 3010 | pwr_now += sds->this->sgp->power * |
3011 | min(sds->this_load_per_task, sds->this_load); | 3011 | min(sds->this_load_per_task, sds->this_load); |
3012 | pwr_now /= SCHED_POWER_SCALE; | 3012 | pwr_now /= SCHED_POWER_SCALE; |
3013 | 3013 | ||
3014 | /* Amount of load we'd subtract */ | 3014 | /* Amount of load we'd subtract */ |
3015 | tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / | 3015 | tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / |
3016 | sds->busiest->cpu_power; | 3016 | sds->busiest->sgp->power; |
3017 | if (sds->max_load > tmp) | 3017 | if (sds->max_load > tmp) |
3018 | pwr_move += sds->busiest->cpu_power * | 3018 | pwr_move += sds->busiest->sgp->power * |
3019 | min(sds->busiest_load_per_task, sds->max_load - tmp); | 3019 | min(sds->busiest_load_per_task, sds->max_load - tmp); |
3020 | 3020 | ||
3021 | /* Amount of load we'd add */ | 3021 | /* Amount of load we'd add */ |
3022 | if (sds->max_load * sds->busiest->cpu_power < | 3022 | if (sds->max_load * sds->busiest->sgp->power < |
3023 | sds->busiest_load_per_task * SCHED_POWER_SCALE) | 3023 | sds->busiest_load_per_task * SCHED_POWER_SCALE) |
3024 | tmp = (sds->max_load * sds->busiest->cpu_power) / | 3024 | tmp = (sds->max_load * sds->busiest->sgp->power) / |
3025 | sds->this->cpu_power; | 3025 | sds->this->sgp->power; |
3026 | else | 3026 | else |
3027 | tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / | 3027 | tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / |
3028 | sds->this->cpu_power; | 3028 | sds->this->sgp->power; |
3029 | pwr_move += sds->this->cpu_power * | 3029 | pwr_move += sds->this->sgp->power * |
3030 | min(sds->this_load_per_task, sds->this_load + tmp); | 3030 | min(sds->this_load_per_task, sds->this_load + tmp); |
3031 | pwr_move /= SCHED_POWER_SCALE; | 3031 | pwr_move /= SCHED_POWER_SCALE; |
3032 | 3032 | ||
@@ -3072,7 +3072,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, | |||
3072 | 3072 | ||
3073 | load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE); | 3073 | load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE); |
3074 | 3074 | ||
3075 | load_above_capacity /= sds->busiest->cpu_power; | 3075 | load_above_capacity /= sds->busiest->sgp->power; |
3076 | } | 3076 | } |
3077 | 3077 | ||
3078 | /* | 3078 | /* |
@@ -3088,8 +3088,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, | |||
3088 | max_pull = min(sds->max_load - sds->avg_load, load_above_capacity); | 3088 | max_pull = min(sds->max_load - sds->avg_load, load_above_capacity); |
3089 | 3089 | ||
3090 | /* How much load to actually move to equalise the imbalance */ | 3090 | /* How much load to actually move to equalise the imbalance */ |
3091 | *imbalance = min(max_pull * sds->busiest->cpu_power, | 3091 | *imbalance = min(max_pull * sds->busiest->sgp->power, |
3092 | (sds->avg_load - sds->this_load) * sds->this->cpu_power) | 3092 | (sds->avg_load - sds->this_load) * sds->this->sgp->power) |
3093 | / SCHED_POWER_SCALE; | 3093 | / SCHED_POWER_SCALE; |
3094 | 3094 | ||
3095 | /* | 3095 | /* |