aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c61ee3451a04..930189540f3b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2104,8 +2104,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
2104 */ 2104 */
2105 double_lock_balance(this_rq, busiest); 2105 double_lock_balance(this_rq, busiest);
2106 nr_moved = move_tasks(this_rq, this_cpu, busiest, 2106 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2107 imbalance, sd, idle, 2107 imbalance, sd, idle, &all_pinned);
2108 &all_pinned);
2109 spin_unlock(&busiest->lock); 2108 spin_unlock(&busiest->lock);
2110 2109
2111 /* All tasks on this runqueue were pinned by CPU affinity */ 2110 /* All tasks on this runqueue were pinned by CPU affinity */
@@ -2200,18 +2199,22 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
2200 2199
2201 BUG_ON(busiest == this_rq); 2200 BUG_ON(busiest == this_rq);
2202 2201
2203 /* Attempt to move tasks */
2204 double_lock_balance(this_rq, busiest);
2205
2206 schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance); 2202 schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
2207 nr_moved = move_tasks(this_rq, this_cpu, busiest, 2203
2204 nr_moved = 0;
2205 if (busiest->nr_running > 1) {
2206 /* Attempt to move tasks */
2207 double_lock_balance(this_rq, busiest);
2208 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2208 imbalance, sd, NEWLY_IDLE, NULL); 2209 imbalance, sd, NEWLY_IDLE, NULL);
2210 spin_unlock(&busiest->lock);
2211 }
2212
2209 if (!nr_moved) 2213 if (!nr_moved)
2210 schedstat_inc(sd, lb_failed[NEWLY_IDLE]); 2214 schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
2211 else 2215 else
2212 sd->nr_balance_failed = 0; 2216 sd->nr_balance_failed = 0;
2213 2217
2214 spin_unlock(&busiest->lock);
2215 return nr_moved; 2218 return nr_moved;
2216 2219
2217out_balanced: 2220out_balanced: