diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-12-23 09:10:31 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-01-21 07:40:13 -0500 |
commit | 1af3ed3ddf27499c3f57662c4c29871e2b95e5f9 (patch) | |
tree | 12e478c9ae078aa7efd8675564cffeb34708d074 /kernel/sched_fair.c | |
parent | baa8c1102f0cd86e69c1497d61d2ee177e663663 (diff) |
sched: Unify load_balance{,_newidle}()
load_balance() and load_balance_newidle() look remarkably similar, one
key point they differ in is the condition on when to active balance.
So split out that logic into a separate function.
One side effect is that previously load_balance_newidle() used to fail
and return -1 under these conditions, whereas now it doesn't. I've not
yet fully figured out the whole -1 return case for either
load_balance{,_newidle}().
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 115 |
1 files changed, 59 insertions, 56 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 65d08207e925..10408323794e 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -2816,6 +2816,39 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | |||
2816 | /* Working cpumask for load_balance and load_balance_newidle. */ | 2816 | /* Working cpumask for load_balance and load_balance_newidle. */ |
2817 | static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask); | 2817 | static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask); |
2818 | 2818 | ||
2819 | static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle) | ||
2820 | { | ||
2821 | if (idle == CPU_NEWLY_IDLE) { | ||
2822 | /* | ||
2823 | * The only task running in a non-idle cpu can be moved to this | ||
2824 | * cpu in an attempt to completely freeup the other CPU | ||
2825 | * package. | ||
2826 | * | ||
2827 | * The package power saving logic comes from | ||
2828 | * find_busiest_group(). If there are no imbalance, then | ||
2829 | * f_b_g() will return NULL. However when sched_mc={1,2} then | ||
2830 | * f_b_g() will select a group from which a running task may be | ||
2831 | * pulled to this cpu in order to make the other package idle. | ||
2832 | * If there is no opportunity to make a package idle and if | ||
2833 | * there are no imbalance, then f_b_g() will return NULL and no | ||
2834 | * action will be taken in load_balance_newidle(). | ||
2835 | * | ||
2836 | * Under normal task pull operation due to imbalance, there | ||
2837 | * will be more than one task in the source run queue and | ||
2838 | * move_tasks() will succeed. ld_moved will be true and this | ||
2839 | * active balance code will not be triggered. | ||
2840 | */ | ||
2841 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && | ||
2842 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | ||
2843 | return 0; | ||
2844 | |||
2845 | if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP) | ||
2846 | return 0; | ||
2847 | } | ||
2848 | |||
2849 | return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); | ||
2850 | } | ||
2851 | |||
2819 | /* | 2852 | /* |
2820 | * Check this_cpu to ensure it is balanced within domain. Attempt to move | 2853 | * Check this_cpu to ensure it is balanced within domain. Attempt to move |
2821 | * tasks if there is an imbalance. | 2854 | * tasks if there is an imbalance. |
@@ -2902,8 +2935,7 @@ redo: | |||
2902 | schedstat_inc(sd, lb_failed[idle]); | 2935 | schedstat_inc(sd, lb_failed[idle]); |
2903 | sd->nr_balance_failed++; | 2936 | sd->nr_balance_failed++; |
2904 | 2937 | ||
2905 | if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { | 2938 | if (need_active_balance(sd, sd_idle, idle)) { |
2906 | |||
2907 | raw_spin_lock_irqsave(&busiest->lock, flags); | 2939 | raw_spin_lock_irqsave(&busiest->lock, flags); |
2908 | 2940 | ||
2909 | /* don't kick the migration_thread, if the curr | 2941 | /* don't kick the migration_thread, if the curr |
@@ -3049,66 +3081,37 @@ redo: | |||
3049 | int active_balance = 0; | 3081 | int active_balance = 0; |
3050 | 3082 | ||
3051 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); | 3083 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); |
3052 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && | 3084 | sd->nr_balance_failed++; |
3053 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | ||
3054 | return -1; | ||
3055 | |||
3056 | if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP) | ||
3057 | return -1; | ||
3058 | 3085 | ||
3059 | if (sd->nr_balance_failed++ < 2) | 3086 | if (need_active_balance(sd, sd_idle, CPU_NEWLY_IDLE)) { |
3060 | return -1; | 3087 | double_lock_balance(this_rq, busiest); |
3061 | 3088 | ||
3062 | /* | 3089 | /* |
3063 | * The only task running in a non-idle cpu can be moved to this | 3090 | * don't kick the migration_thread, if the curr |
3064 | * cpu in an attempt to completely freeup the other CPU | 3091 | * task on busiest cpu can't be moved to this_cpu |
3065 | * package. The same method used to move task in load_balance() | 3092 | */ |
3066 | * have been extended for load_balance_newidle() to speedup | 3093 | if (!cpumask_test_cpu(this_cpu, |
3067 | * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2) | 3094 | &busiest->curr->cpus_allowed)) { |
3068 | * | 3095 | double_unlock_balance(this_rq, busiest); |
3069 | * The package power saving logic comes from | 3096 | all_pinned = 1; |
3070 | * find_busiest_group(). If there are no imbalance, then | 3097 | return ld_moved; |
3071 | * f_b_g() will return NULL. However when sched_mc={1,2} then | 3098 | } |
3072 | * f_b_g() will select a group from which a running task may be | ||
3073 | * pulled to this cpu in order to make the other package idle. | ||
3074 | * If there is no opportunity to make a package idle and if | ||
3075 | * there are no imbalance, then f_b_g() will return NULL and no | ||
3076 | * action will be taken in load_balance_newidle(). | ||
3077 | * | ||
3078 | * Under normal task pull operation due to imbalance, there | ||
3079 | * will be more than one task in the source run queue and | ||
3080 | * move_tasks() will succeed. ld_moved will be true and this | ||
3081 | * active balance code will not be triggered. | ||
3082 | */ | ||
3083 | 3099 | ||
3084 | /* Lock busiest in correct order while this_rq is held */ | 3100 | if (!busiest->active_balance) { |
3085 | double_lock_balance(this_rq, busiest); | 3101 | busiest->active_balance = 1; |
3102 | busiest->push_cpu = this_cpu; | ||
3103 | active_balance = 1; | ||
3104 | } | ||
3086 | 3105 | ||
3087 | /* | ||
3088 | * don't kick the migration_thread, if the curr | ||
3089 | * task on busiest cpu can't be moved to this_cpu | ||
3090 | */ | ||
3091 | if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { | ||
3092 | double_unlock_balance(this_rq, busiest); | 3106 | double_unlock_balance(this_rq, busiest); |
3093 | all_pinned = 1; | 3107 | /* |
3094 | return ld_moved; | 3108 | * Should not call ttwu while holding a rq->lock |
3095 | } | 3109 | */ |
3096 | 3110 | raw_spin_unlock(&this_rq->lock); | |
3097 | if (!busiest->active_balance) { | 3111 | if (active_balance) |
3098 | busiest->active_balance = 1; | 3112 | wake_up_process(busiest->migration_thread); |
3099 | busiest->push_cpu = this_cpu; | 3113 | raw_spin_lock(&this_rq->lock); |
3100 | active_balance = 1; | ||
3101 | } | 3114 | } |
3102 | |||
3103 | double_unlock_balance(this_rq, busiest); | ||
3104 | /* | ||
3105 | * Should not call ttwu while holding a rq->lock | ||
3106 | */ | ||
3107 | raw_spin_unlock(&this_rq->lock); | ||
3108 | if (active_balance) | ||
3109 | wake_up_process(busiest->migration_thread); | ||
3110 | raw_spin_lock(&this_rq->lock); | ||
3111 | |||
3112 | } else | 3115 | } else |
3113 | sd->nr_balance_failed = 0; | 3116 | sd->nr_balance_failed = 0; |
3114 | 3117 | ||