aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorGautham R Shenoy <ego@in.ibm.com>2009-03-25 05:14:06 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-25 05:30:47 -0400
commit2e6f44aeda426054fc58464df1ad571aecca0c92 (patch)
treeeb6f6788ac6f0ac6c71e82f7741ed8f830c3b2b4 /kernel/sched.c
parent37abe198b1246ddd206319c43502a687db62d347 (diff)
sched: Create helper to calculate small_imbalance in fbg()
Impact: cleanup We have two places in find_busiest_group() where we need to calculate the minor imbalance before returning the busiest group. Encapsulate this functionality into a seperate helper function. Credit: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com> Signed-off-by: Gautham R Shenoy <ego@in.ibm.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: "Balbir Singh" <balbir@in.ibm.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com> Cc: Bharata B Rao <bharata@linux.vnet.ibm.com> LKML-Reference: <20090325091406.13992.54316.stgit@sofia.in.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c131
1 files changed, 70 insertions, 61 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ec715f97202e..540147e5e82b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3484,6 +3484,71 @@ group_next:
3484 } while (group != sd->groups); 3484 } while (group != sd->groups);
3485 3485
3486} 3486}
3487
3488/**
3489 * fix_small_imbalance - Calculate the minor imbalance that exists
3490 * amongst the groups of a sched_domain, during
3491 * load balancing.
3492 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
3493 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
3494 * @imbalance: Variable to store the imbalance.
3495 */
3496static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3497 int this_cpu, unsigned long *imbalance)
3498{
3499 unsigned long tmp, pwr_now = 0, pwr_move = 0;
3500 unsigned int imbn = 2;
3501
3502 if (sds->this_nr_running) {
3503 sds->this_load_per_task /= sds->this_nr_running;
3504 if (sds->busiest_load_per_task >
3505 sds->this_load_per_task)
3506 imbn = 1;
3507 } else
3508 sds->this_load_per_task =
3509 cpu_avg_load_per_task(this_cpu);
3510
3511 if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
3512 sds->busiest_load_per_task * imbn) {
3513 *imbalance = sds->busiest_load_per_task;
3514 return;
3515 }
3516
3517 /*
3518 * OK, we don't have enough imbalance to justify moving tasks,
3519 * however we may be able to increase total CPU power used by
3520 * moving them.
3521 */
3522
3523 pwr_now += sds->busiest->__cpu_power *
3524 min(sds->busiest_load_per_task, sds->max_load);
3525 pwr_now += sds->this->__cpu_power *
3526 min(sds->this_load_per_task, sds->this_load);
3527 pwr_now /= SCHED_LOAD_SCALE;
3528
3529 /* Amount of load we'd subtract */
3530 tmp = sg_div_cpu_power(sds->busiest,
3531 sds->busiest_load_per_task * SCHED_LOAD_SCALE);
3532 if (sds->max_load > tmp)
3533 pwr_move += sds->busiest->__cpu_power *
3534 min(sds->busiest_load_per_task, sds->max_load - tmp);
3535
3536 /* Amount of load we'd add */
3537 if (sds->max_load * sds->busiest->__cpu_power <
3538 sds->busiest_load_per_task * SCHED_LOAD_SCALE)
3539 tmp = sg_div_cpu_power(sds->this,
3540 sds->max_load * sds->busiest->__cpu_power);
3541 else
3542 tmp = sg_div_cpu_power(sds->this,
3543 sds->busiest_load_per_task * SCHED_LOAD_SCALE);
3544 pwr_move += sds->this->__cpu_power *
3545 min(sds->this_load_per_task, sds->this_load + tmp);
3546 pwr_move /= SCHED_LOAD_SCALE;
3547
3548 /* Move if we gain throughput */
3549 if (pwr_move > pwr_now)
3550 *imbalance = sds->busiest_load_per_task;
3551}
3487/******* find_busiest_group() helpers end here *********************/ 3552/******* find_busiest_group() helpers end here *********************/
3488 3553
3489/* 3554/*
@@ -3547,7 +3612,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3547 */ 3612 */
3548 if (sds.max_load < sds.avg_load) { 3613 if (sds.max_load < sds.avg_load) {
3549 *imbalance = 0; 3614 *imbalance = 0;
3550 goto small_imbalance; 3615 fix_small_imbalance(&sds, this_cpu, imbalance);
3616 goto ret_busiest;
3551 } 3617 }
3552 3618
3553 /* Don't want to pull so many tasks that a group would go idle */ 3619 /* Don't want to pull so many tasks that a group would go idle */
@@ -3565,67 +3631,10 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3565 * a think about bumping its value to force at least one task to be 3631 * a think about bumping its value to force at least one task to be
3566 * moved 3632 * moved
3567 */ 3633 */
3568 if (*imbalance < sds.busiest_load_per_task) { 3634 if (*imbalance < sds.busiest_load_per_task)
3569 unsigned long tmp, pwr_now, pwr_move; 3635 fix_small_imbalance(&sds, this_cpu, imbalance);
3570 unsigned int imbn;
3571
3572small_imbalance:
3573 pwr_move = pwr_now = 0;
3574 imbn = 2;
3575 if (sds.this_nr_running) {
3576 sds.this_load_per_task /= sds.this_nr_running;
3577 if (sds.busiest_load_per_task >
3578 sds.this_load_per_task)
3579 imbn = 1;
3580 } else
3581 sds.this_load_per_task =
3582 cpu_avg_load_per_task(this_cpu);
3583
3584 if (sds.max_load - sds.this_load +
3585 sds.busiest_load_per_task >=
3586 sds.busiest_load_per_task * imbn) {
3587 *imbalance = sds.busiest_load_per_task;
3588 return sds.busiest;
3589 }
3590
3591 /*
3592 * OK, we don't have enough imbalance to justify moving tasks,
3593 * however we may be able to increase total CPU power used by
3594 * moving them.
3595 */
3596
3597 pwr_now += sds.busiest->__cpu_power *
3598 min(sds.busiest_load_per_task, sds.max_load);
3599 pwr_now += sds.this->__cpu_power *
3600 min(sds.this_load_per_task, sds.this_load);
3601 pwr_now /= SCHED_LOAD_SCALE;
3602
3603 /* Amount of load we'd subtract */
3604 tmp = sg_div_cpu_power(sds.busiest,
3605 sds.busiest_load_per_task * SCHED_LOAD_SCALE);
3606 if (sds.max_load > tmp)
3607 pwr_move += sds.busiest->__cpu_power *
3608 min(sds.busiest_load_per_task,
3609 sds.max_load - tmp);
3610
3611 /* Amount of load we'd add */
3612 if (sds.max_load * sds.busiest->__cpu_power <
3613 sds.busiest_load_per_task * SCHED_LOAD_SCALE)
3614 tmp = sg_div_cpu_power(sds.this,
3615 sds.max_load * sds.busiest->__cpu_power);
3616 else
3617 tmp = sg_div_cpu_power(sds.this,
3618 sds.busiest_load_per_task * SCHED_LOAD_SCALE);
3619 pwr_move += sds.this->__cpu_power *
3620 min(sds.this_load_per_task,
3621 sds.this_load + tmp);
3622 pwr_move /= SCHED_LOAD_SCALE;
3623
3624 /* Move if we gain throughput */
3625 if (pwr_move > pwr_now)
3626 *imbalance = sds.busiest_load_per_task;
3627 }
3628 3636
3637ret_busiest:
3629 return sds.busiest; 3638 return sds.busiest;
3630 3639
3631out_balanced: 3640out_balanced: