aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b5b350135002..d327511d268e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2546,8 +2546,6 @@ static inline unsigned long minus_1_or_zero(unsigned long n)
2546/* 2546/*
2547 * Check this_cpu to ensure it is balanced within domain. Attempt to move 2547 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2548 * tasks if there is an imbalance. 2548 * tasks if there is an imbalance.
2549 *
2550 * Called with this_rq unlocked.
2551 */ 2549 */
2552static int load_balance(int this_cpu, struct rq *this_rq, 2550static int load_balance(int this_cpu, struct rq *this_rq,
2553 struct sched_domain *sd, enum idle_type idle) 2551 struct sched_domain *sd, enum idle_type idle)
@@ -2557,6 +2555,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
2557 unsigned long imbalance; 2555 unsigned long imbalance;
2558 struct rq *busiest; 2556 struct rq *busiest;
2559 cpumask_t cpus = CPU_MASK_ALL; 2557 cpumask_t cpus = CPU_MASK_ALL;
2558 unsigned long flags;
2560 2559
2561 /* 2560 /*
2562 * When power savings policy is enabled for the parent domain, idle 2561 * When power savings policy is enabled for the parent domain, idle
@@ -2596,11 +2595,13 @@ redo:
2596 * still unbalanced. nr_moved simply stays zero, so it is 2595 * still unbalanced. nr_moved simply stays zero, so it is
2597 * correctly treated as an imbalance. 2596 * correctly treated as an imbalance.
2598 */ 2597 */
2598 local_irq_save(flags);
2599 double_rq_lock(this_rq, busiest); 2599 double_rq_lock(this_rq, busiest);
2600 nr_moved = move_tasks(this_rq, this_cpu, busiest, 2600 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2601 minus_1_or_zero(busiest->nr_running), 2601 minus_1_or_zero(busiest->nr_running),
2602 imbalance, sd, idle, &all_pinned); 2602 imbalance, sd, idle, &all_pinned);
2603 double_rq_unlock(this_rq, busiest); 2603 double_rq_unlock(this_rq, busiest);
2604 local_irq_restore(flags);
2604 2605
2605 /* All tasks on this runqueue were pinned by CPU affinity */ 2606 /* All tasks on this runqueue were pinned by CPU affinity */
2606 if (unlikely(all_pinned)) { 2607 if (unlikely(all_pinned)) {
@@ -2617,13 +2618,13 @@ redo:
2617 2618
2618 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { 2619 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
2619 2620
2620 spin_lock(&busiest->lock); 2621 spin_lock_irqsave(&busiest->lock, flags);
2621 2622
2622 /* don't kick the migration_thread, if the curr 2623 /* don't kick the migration_thread, if the curr
2623 * task on busiest cpu can't be moved to this_cpu 2624 * task on busiest cpu can't be moved to this_cpu
2624 */ 2625 */
2625 if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { 2626 if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
2626 spin_unlock(&busiest->lock); 2627 spin_unlock_irqrestore(&busiest->lock, flags);
2627 all_pinned = 1; 2628 all_pinned = 1;
2628 goto out_one_pinned; 2629 goto out_one_pinned;
2629 } 2630 }
@@ -2633,7 +2634,7 @@ redo:
2633 busiest->push_cpu = this_cpu; 2634 busiest->push_cpu = this_cpu;
2634 active_balance = 1; 2635 active_balance = 1;
2635 } 2636 }
2636 spin_unlock(&busiest->lock); 2637 spin_unlock_irqrestore(&busiest->lock, flags);
2637 if (active_balance) 2638 if (active_balance)
2638 wake_up_process(busiest->migration_thread); 2639 wake_up_process(busiest->migration_thread);
2639 2640