aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-02-21 12:52:53 -0500
committerIngo Molnar <mingo@elte.hu>2011-02-23 05:33:55 -0500
commitc186fafe9aba87c1a93df8c7120a6ae01fe435ad (patch)
treee6cbd31f1dd720d0f614df87f3e5b2876cf32a84 /kernel/sched_fair.c
parentd927dc937910ad8c7350266cac70e42a5f0b48cf (diff)
sched: Clean up remnants of sd_idle
With the wholesale removal of the sd_idle SMT logic we can clean up some more. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Nikhil Rao <ncrao@google.com> Cc: Venkatesh Pallipadi <venki@google.com> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Mike Galbraith <efault@gmx.de> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c23
1 files changed, 10 insertions, 13 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index d384e739ea95..cd18600a8a63 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -3150,25 +3150,23 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3150 if (sds.this_load >= sds.avg_load) 3150 if (sds.this_load >= sds.avg_load)
3151 goto out_balanced; 3151 goto out_balanced;
3152 3152
3153 /* 3153 if (idle == CPU_IDLE) {
3154 * In the CPU_NEWLY_IDLE, use imbalance_pct to be conservative.
3155 * And to check for busy balance use !idle_cpu instead of
3156 * CPU_NOT_IDLE. This is because HT siblings will use CPU_NOT_IDLE
3157 * even when they are idle.
3158 */
3159 if (idle == CPU_NEWLY_IDLE || !idle_cpu(this_cpu)) {
3160 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
3161 goto out_balanced;
3162 } else {
3163 /* 3154 /*
3164 * This cpu is idle. If the busiest group load doesn't 3155 * This cpu is idle. If the busiest group load doesn't
3165 * have more tasks than the number of available cpu's and 3156 * have more tasks than the number of available cpu's and
3166 * there is no imbalance between this and busiest group 3157 * there is no imbalance between this and busiest group
3167 * wrt to idle cpu's, it is balanced. 3158 * wrt to idle cpu's, it is balanced.
3168 */ 3159 */
3169 if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) && 3160 if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
3170 sds.busiest_nr_running <= sds.busiest_group_weight) 3161 sds.busiest_nr_running <= sds.busiest_group_weight)
3171 goto out_balanced; 3162 goto out_balanced;
3163 } else {
3164 /*
3165 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
3166 * imbalance_pct to be conservative.
3167 */
3168 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
3169 goto out_balanced;
3172 } 3170 }
3173 3171
3174force_balance: 3172force_balance:
@@ -3862,8 +3860,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3862 if (load_balance(cpu, rq, sd, idle, &balance)) { 3860 if (load_balance(cpu, rq, sd, idle, &balance)) {
3863 /* 3861 /*
3864 * We've pulled tasks over so either we're no 3862 * We've pulled tasks over so either we're no
3865 * longer idle, or one of our SMT siblings is 3863 * longer idle.
3866 * not idle.
3867 */ 3864 */
3868 idle = CPU_NOT_IDLE; 3865 idle = CPU_NOT_IDLE;
3869 } 3866 }