aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorGautham R Shenoy <ego@in.ibm.com>2009-03-25 05:14:12 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-25 05:30:47 -0400
commitdbc523a3b86f9e1765b5e70e6886913b99cc5cec (patch)
tree74e735ebf9ad70f4b204336eee333e4024fc9a9f /kernel/sched.c
parent2e6f44aeda426054fc58464df1ad571aecca0c92 (diff)
sched: Create a helper function to calculate imbalance
Move all the imbalance calculation out of find_busiest_group() through this helper function. With this change, the structure of find_busiest_group() will be as follows: - update_sched_domain_statistics. - check if imbalance exits. - update imbalance and return busiest. Signed-off-by: Gautham R Shenoy <ego@in.ibm.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: "Balbir Singh" <balbir@in.ibm.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com> Cc: Bharata B Rao <bharata@linux.vnet.ibm.com> Cc: "Vaidyanathan Srinivasan" <svaidy@linux.vnet.ibm.com> LKML-Reference: <20090325091411.13992.43293.stgit@sofia.in.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c78
1 files changed, 45 insertions, 33 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 540147e5e82b..934f615ccceb 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3487,8 +3487,8 @@ group_next:
3487 3487
3488/** 3488/**
3489 * fix_small_imbalance - Calculate the minor imbalance that exists 3489 * fix_small_imbalance - Calculate the minor imbalance that exists
3490 * amongst the groups of a sched_domain, during 3490 * amongst the groups of a sched_domain, during
3491 * load balancing. 3491 * load balancing.
3492 * @sds: Statistics of the sched_domain whose imbalance is to be calculated. 3492 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
3493 * @this_cpu: The cpu at whose sched_domain we're performing load-balance. 3493 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
3494 * @imbalance: Variable to store the imbalance. 3494 * @imbalance: Variable to store the imbalance.
@@ -3549,6 +3549,47 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3549 if (pwr_move > pwr_now) 3549 if (pwr_move > pwr_now)
3550 *imbalance = sds->busiest_load_per_task; 3550 *imbalance = sds->busiest_load_per_task;
3551} 3551}
3552
3553/**
3554 * calculate_imbalance - Calculate the amount of imbalance present within the
3555 * groups of a given sched_domain during load balance.
3556 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
3557 * @this_cpu: Cpu for which currently load balance is being performed.
3558 * @imbalance: The variable to store the imbalance.
3559 */
3560static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3561 unsigned long *imbalance)
3562{
3563 unsigned long max_pull;
3564 /*
3565 * In the presence of smp nice balancing, certain scenarios can have
3566 * max load less than avg load(as we skip the groups at or below
3567 * its cpu_power, while calculating max_load..)
3568 */
3569 if (sds->max_load < sds->avg_load) {
3570 *imbalance = 0;
3571 return fix_small_imbalance(sds, this_cpu, imbalance);
3572 }
3573
3574 /* Don't want to pull so many tasks that a group would go idle */
3575 max_pull = min(sds->max_load - sds->avg_load,
3576 sds->max_load - sds->busiest_load_per_task);
3577
3578 /* How much load to actually move to equalise the imbalance */
3579 *imbalance = min(max_pull * sds->busiest->__cpu_power,
3580 (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
3581 / SCHED_LOAD_SCALE;
3582
3583 /*
3584 * if *imbalance is less than the average load per runnable task
3585 * there is no gaurantee that any tasks will be moved so we'll have
3586 * a think about bumping its value to force at least one task to be
3587 * moved
3588 */
3589 if (*imbalance < sds->busiest_load_per_task)
3590 return fix_small_imbalance(sds, this_cpu, imbalance);
3591
3592}
3552/******* find_busiest_group() helpers end here *********************/ 3593/******* find_busiest_group() helpers end here *********************/
3553 3594
3554/* 3595/*
@@ -3562,7 +3603,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3562 int *sd_idle, const struct cpumask *cpus, int *balance) 3603 int *sd_idle, const struct cpumask *cpus, int *balance)
3563{ 3604{
3564 struct sd_lb_stats sds; 3605 struct sd_lb_stats sds;
3565 unsigned long max_pull;
3566 3606
3567 memset(&sds, 0, sizeof(sds)); 3607 memset(&sds, 0, sizeof(sds));
3568 3608
@@ -3605,36 +3645,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3605 if (sds.max_load <= sds.busiest_load_per_task) 3645 if (sds.max_load <= sds.busiest_load_per_task)
3606 goto out_balanced; 3646 goto out_balanced;
3607 3647
3608 /* 3648 /* Looks like there is an imbalance. Compute it */
3609 * In the presence of smp nice balancing, certain scenarios can have 3649 calculate_imbalance(&sds, this_cpu, imbalance);
3610 * max load less than avg load(as we skip the groups at or below
3611 * its cpu_power, while calculating max_load..)
3612 */
3613 if (sds.max_load < sds.avg_load) {
3614 *imbalance = 0;
3615 fix_small_imbalance(&sds, this_cpu, imbalance);
3616 goto ret_busiest;
3617 }
3618
3619 /* Don't want to pull so many tasks that a group would go idle */
3620 max_pull = min(sds.max_load - sds.avg_load,
3621 sds.max_load - sds.busiest_load_per_task);
3622
3623 /* How much load to actually move to equalise the imbalance */
3624 *imbalance = min(max_pull * sds.busiest->__cpu_power,
3625 (sds.avg_load - sds.this_load) * sds.this->__cpu_power)
3626 / SCHED_LOAD_SCALE;
3627
3628 /*
3629 * if *imbalance is less than the average load per runnable task
3630 * there is no gaurantee that any tasks will be moved so we'll have
3631 * a think about bumping its value to force at least one task to be
3632 * moved
3633 */
3634 if (*imbalance < sds.busiest_load_per_task)
3635 fix_small_imbalance(&sds, this_cpu, imbalance);
3636
3637ret_busiest:
3638 return sds.busiest; 3650 return sds.busiest;
3639 3651
3640out_balanced: 3652out_balanced: