aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorGautham R Shenoy <ego@in.ibm.com>2009-03-25 05:13:40 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-25 05:30:44 -0400
commit6dfdb0629019f307ab18864b1fd3e5dbb02f383c (patch)
treebb5f0e9b59998961d71d226ff46c162694691a7d /kernel
parent67bb6c036d1fc3d332c8527a36a546e3e72e822c (diff)
sched: Fix indentations in find_busiest_group() using gotos
Impact: cleanup Some indentations in find_busiest_group() can minimized by using early exits with the help of gotos. This improves readability in a couple of cases. Signed-off-by: Gautham R Shenoy <ego@in.ibm.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: "Balbir Singh" <balbir@in.ibm.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com> Cc: Bharata B Rao <bharata@linux.vnet.ibm.com> Cc: "Vaidyanathan Srinivasan" <svaidy@linux.vnet.ibm.com> LKML-Reference: <20090325091340.13992.45062.stgit@sofia.in.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c32
1 files changed, 17 insertions, 15 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6aec1e7a72a3..f87adbe999e0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3403,14 +3403,14 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3403 * capacity but still has some space to pick up some load 3403 * capacity but still has some space to pick up some load
3404 * from other group and save more power 3404 * from other group and save more power
3405 */ 3405 */
3406 if (sum_nr_running <= group_capacity - 1) { 3406 if (sum_nr_running > group_capacity - 1)
3407 if (sum_nr_running > leader_nr_running || 3407 goto group_next;
3408 (sum_nr_running == leader_nr_running && 3408
3409 group_first_cpu(group) < 3409 if (sum_nr_running > leader_nr_running ||
3410 group_first_cpu(group_leader))) { 3410 (sum_nr_running == leader_nr_running &&
3411 group_leader = group; 3411 group_first_cpu(group) < group_first_cpu(group_leader))) {
3412 leader_nr_running = sum_nr_running; 3412 group_leader = group;
3413 } 3413 leader_nr_running = sum_nr_running;
3414 } 3414 }
3415group_next: 3415group_next:
3416#endif 3416#endif
@@ -3531,14 +3531,16 @@ out_balanced:
3531 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) 3531 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3532 goto ret; 3532 goto ret;
3533 3533
3534 if (this == group_leader && group_leader != group_min) { 3534 if (this != group_leader || group_leader == group_min)
3535 *imbalance = min_load_per_task; 3535 goto ret;
3536 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) { 3536
3537 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = 3537 *imbalance = min_load_per_task;
3538 group_first_cpu(group_leader); 3538 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
3539 } 3539 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
3540 return group_min; 3540 group_first_cpu(group_leader);
3541 } 3541 }
3542 return group_min;
3543
3542#endif 3544#endif
3543ret: 3545ret:
3544 *imbalance = 0; 3546 *imbalance = 0;