aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 645256b228c3..e36d99d1ddb1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2235,7 +2235,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2235 2235
2236 rq = cpu_rq(i); 2236 rq = cpu_rq(i);
2237 2237
2238 if (*sd_idle && !idle_cpu(i)) 2238 if (*sd_idle && rq->nr_running)
2239 *sd_idle = 0; 2239 *sd_idle = 0;
2240 2240
2241 /* Bias balancing toward cpus of our domain */ 2241 /* Bias balancing toward cpus of our domain */
@@ -2257,9 +2257,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2257 /* 2257 /*
2258 * First idle cpu or the first cpu(busiest) in this sched group 2258 * First idle cpu or the first cpu(busiest) in this sched group
2259 * is eligible for doing load balancing at this and above 2259 * is eligible for doing load balancing at this and above
2260 * domains. 2260 * domains. In the newly idle case, we will allow all the cpu's
2261 * to do the newly idle load balance.
2261 */ 2262 */
2262 if (local_group && balance_cpu != this_cpu && balance) { 2263 if (idle != CPU_NEWLY_IDLE && local_group &&
2264 balance_cpu != this_cpu && balance) {
2263 *balance = 0; 2265 *balance = 0;
2264 goto ret; 2266 goto ret;
2265 } 2267 }