aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c54
1 files changed, 54 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c1b8b3031eb2..8fc0d5aa43b1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3670,10 +3670,64 @@ redo:
3670 } 3670 }
3671 3671
3672 if (!ld_moved) { 3672 if (!ld_moved) {
3673 int active_balance;
3674
3673 schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); 3675 schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
3674 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && 3676 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3675 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) 3677 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3676 return -1; 3678 return -1;
3679
3680 if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
3681 return -1;
3682
3683 if (sd->nr_balance_failed++ < 2)
3684 return -1;
3685
3686 /*
3687 * The only task running in a non-idle cpu can be moved to this
3688 * cpu in an attempt to completely freeup the other CPU
3689 * package. The same method used to move task in load_balance()
3690 * have been extended for load_balance_newidle() to speedup
3691 * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2)
3692 *
3693 * The package power saving logic comes from
3694 * find_busiest_group(). If there are no imbalance, then
3695 * f_b_g() will return NULL. However when sched_mc={1,2} then
3696 * f_b_g() will select a group from which a running task may be
3697 * pulled to this cpu in order to make the other package idle.
3698 * If there is no opportunity to make a package idle and if
3699 * there are no imbalance, then f_b_g() will return NULL and no
3700 * action will be taken in load_balance_newidle().
3701 *
3702 * Under normal task pull operation due to imbalance, there
3703 * will be more than one task in the source run queue and
3704 * move_tasks() will succeed. ld_moved will be true and this
3705 * active balance code will not be triggered.
3706 */
3707
3708 /* Lock busiest in correct order while this_rq is held */
3709 double_lock_balance(this_rq, busiest);
3710
3711 /*
3712 * don't kick the migration_thread, if the curr
3713 * task on busiest cpu can't be moved to this_cpu
3714 */
3715 if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
3716 double_unlock_balance(this_rq, busiest);
3717 all_pinned = 1;
3718 return ld_moved;
3719 }
3720
3721 if (!busiest->active_balance) {
3722 busiest->active_balance = 1;
3723 busiest->push_cpu = this_cpu;
3724 active_balance = 1;
3725 }
3726
3727 double_unlock_balance(this_rq, busiest);
3728 if (active_balance)
3729 wake_up_process(busiest->migration_thread);
3730
3677 } else 3731 } else
3678 sd->nr_balance_failed = 0; 3732 sd->nr_balance_failed = 0;
3679 3733