diff options
author | Gautham R Shenoy <ego@in.ibm.com> | 2009-03-25 05:14:01 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-25 05:30:46 -0400 |
commit | 37abe198b1246ddd206319c43502a687db62d347 (patch) | |
tree | 8289ef7751b393fe90ba39c4576d93fe57059381 | |
parent | 222d656dea57e4e084fbd1e9383e6fed2ca9fa61 (diff) |
sched: Create a helper function to calculate sched_domain stats for fbg()
Impact: cleanup
Create a helper function named update_sd_lb_stats() to update the
various sched_domain related statistics in find_busiest_group().
With this we would have moved all the statistics computation out of
find_busiest_group().
Credit: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
LKML-Reference: <20090325091401.13992.88737.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/sched.c | 117 |
1 files changed, 73 insertions, 44 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 8198dbe8e4aa..ec715f97202e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3365,32 +3365,33 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu, | |||
3365 | sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; | 3365 | sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; |
3366 | 3366 | ||
3367 | } | 3367 | } |
3368 | /******* find_busiest_group() helpers end here *********************/ | ||
3369 | 3368 | ||
3370 | /* | 3369 | /** |
3371 | * find_busiest_group finds and returns the busiest CPU group within the | 3370 | * update_sd_lb_stats - Update sched_group's statistics for load balancing. |
3372 | * domain. It calculates and returns the amount of weighted load which | 3371 | * @sd: sched_domain whose statistics are to be updated. |
3373 | * should be moved to restore balance via the imbalance parameter. | 3372 | * @this_cpu: Cpu for which load balance is currently performed. |
3373 | * @idle: Idle status of this_cpu | ||
3374 | * @sd_idle: Idle status of the sched_domain containing group. | ||
3375 | * @cpus: Set of cpus considered for load balancing. | ||
3376 | * @balance: Should we balance. | ||
3377 | * @sds: variable to hold the statistics for this sched_domain. | ||
3374 | */ | 3378 | */ |
3375 | static struct sched_group * | 3379 | static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, |
3376 | find_busiest_group(struct sched_domain *sd, int this_cpu, | 3380 | enum cpu_idle_type idle, int *sd_idle, |
3377 | unsigned long *imbalance, enum cpu_idle_type idle, | 3381 | const struct cpumask *cpus, int *balance, |
3378 | int *sd_idle, const struct cpumask *cpus, int *balance) | 3382 | struct sd_lb_stats *sds) |
3379 | { | 3383 | { |
3380 | struct sd_lb_stats sds; | ||
3381 | struct sched_group *group = sd->groups; | 3384 | struct sched_group *group = sd->groups; |
3382 | unsigned long max_pull; | 3385 | struct sg_lb_stats sgs; |
3383 | int load_idx; | 3386 | int load_idx; |
3384 | 3387 | ||
3385 | memset(&sds, 0, sizeof(sds)); | ||
3386 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 3388 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
3387 | sds.power_savings_balance = 1; | 3389 | sds->power_savings_balance = 1; |
3388 | sds.min_nr_running = ULONG_MAX; | 3390 | sds->min_nr_running = ULONG_MAX; |
3389 | #endif | 3391 | #endif |
3390 | load_idx = get_sd_load_idx(sd, idle); | 3392 | load_idx = get_sd_load_idx(sd, idle); |
3391 | 3393 | ||
3392 | do { | 3394 | do { |
3393 | struct sg_lb_stats sgs; | ||
3394 | int local_group; | 3395 | int local_group; |
3395 | 3396 | ||
3396 | local_group = cpumask_test_cpu(this_cpu, | 3397 | local_group = cpumask_test_cpu(this_cpu, |
@@ -3399,25 +3400,25 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3399 | update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle, | 3400 | update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle, |
3400 | local_group, cpus, balance, &sgs); | 3401 | local_group, cpus, balance, &sgs); |
3401 | 3402 | ||
3402 | if (balance && !(*balance)) | 3403 | if (local_group && balance && !(*balance)) |
3403 | goto ret; | 3404 | return; |
3404 | 3405 | ||
3405 | sds.total_load += sgs.group_load; | 3406 | sds->total_load += sgs.group_load; |
3406 | sds.total_pwr += group->__cpu_power; | 3407 | sds->total_pwr += group->__cpu_power; |
3407 | 3408 | ||
3408 | if (local_group) { | 3409 | if (local_group) { |
3409 | sds.this_load = sgs.avg_load; | 3410 | sds->this_load = sgs.avg_load; |
3410 | sds.this = group; | 3411 | sds->this = group; |
3411 | sds.this_nr_running = sgs.sum_nr_running; | 3412 | sds->this_nr_running = sgs.sum_nr_running; |
3412 | sds.this_load_per_task = sgs.sum_weighted_load; | 3413 | sds->this_load_per_task = sgs.sum_weighted_load; |
3413 | } else if (sgs.avg_load > sds.max_load && | 3414 | } else if (sgs.avg_load > sds->max_load && |
3414 | (sgs.sum_nr_running > sgs.group_capacity || | 3415 | (sgs.sum_nr_running > sgs.group_capacity || |
3415 | sgs.group_imb)) { | 3416 | sgs.group_imb)) { |
3416 | sds.max_load = sgs.avg_load; | 3417 | sds->max_load = sgs.avg_load; |
3417 | sds.busiest = group; | 3418 | sds->busiest = group; |
3418 | sds.busiest_nr_running = sgs.sum_nr_running; | 3419 | sds->busiest_nr_running = sgs.sum_nr_running; |
3419 | sds.busiest_load_per_task = sgs.sum_weighted_load; | 3420 | sds->busiest_load_per_task = sgs.sum_weighted_load; |
3420 | sds.group_imb = sgs.group_imb; | 3421 | sds->group_imb = sgs.group_imb; |
3421 | } | 3422 | } |
3422 | 3423 | ||
3423 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 3424 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
@@ -3434,15 +3435,15 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3434 | * no need to do power savings balance at this domain | 3435 | * no need to do power savings balance at this domain |
3435 | */ | 3436 | */ |
3436 | if (local_group && | 3437 | if (local_group && |
3437 | (sds.this_nr_running >= sgs.group_capacity || | 3438 | (sds->this_nr_running >= sgs.group_capacity || |
3438 | !sds.this_nr_running)) | 3439 | !sds->this_nr_running)) |
3439 | sds.power_savings_balance = 0; | 3440 | sds->power_savings_balance = 0; |
3440 | 3441 | ||
3441 | /* | 3442 | /* |
3442 | * If a group is already running at full capacity or idle, | 3443 | * If a group is already running at full capacity or idle, |
3443 | * don't include that group in power savings calculations | 3444 | * don't include that group in power savings calculations |
3444 | */ | 3445 | */ |
3445 | if (!sds.power_savings_balance || | 3446 | if (!sds->power_savings_balance || |
3446 | sgs.sum_nr_running >= sgs.group_capacity || | 3447 | sgs.sum_nr_running >= sgs.group_capacity || |
3447 | !sgs.sum_nr_running) | 3448 | !sgs.sum_nr_running) |
3448 | goto group_next; | 3449 | goto group_next; |
@@ -3452,13 +3453,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3452 | * This is the group from where we need to pick up the load | 3453 | * This is the group from where we need to pick up the load |
3453 | * for saving power | 3454 | * for saving power |
3454 | */ | 3455 | */ |
3455 | if ((sgs.sum_nr_running < sds.min_nr_running) || | 3456 | if ((sgs.sum_nr_running < sds->min_nr_running) || |
3456 | (sgs.sum_nr_running == sds.min_nr_running && | 3457 | (sgs.sum_nr_running == sds->min_nr_running && |
3457 | group_first_cpu(group) > | 3458 | group_first_cpu(group) > |
3458 | group_first_cpu(sds.group_min))) { | 3459 | group_first_cpu(sds->group_min))) { |
3459 | sds.group_min = group; | 3460 | sds->group_min = group; |
3460 | sds.min_nr_running = sgs.sum_nr_running; | 3461 | sds->min_nr_running = sgs.sum_nr_running; |
3461 | sds.min_load_per_task = sgs.sum_weighted_load / | 3462 | sds->min_load_per_task = sgs.sum_weighted_load / |
3462 | sgs.sum_nr_running; | 3463 | sgs.sum_nr_running; |
3463 | } | 3464 | } |
3464 | 3465 | ||
@@ -3470,18 +3471,46 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3470 | if (sgs.sum_nr_running > sgs.group_capacity - 1) | 3471 | if (sgs.sum_nr_running > sgs.group_capacity - 1) |
3471 | goto group_next; | 3472 | goto group_next; |
3472 | 3473 | ||
3473 | if (sgs.sum_nr_running > sds.leader_nr_running || | 3474 | if (sgs.sum_nr_running > sds->leader_nr_running || |
3474 | (sgs.sum_nr_running == sds.leader_nr_running && | 3475 | (sgs.sum_nr_running == sds->leader_nr_running && |
3475 | group_first_cpu(group) < | 3476 | group_first_cpu(group) < |
3476 | group_first_cpu(sds.group_leader))) { | 3477 | group_first_cpu(sds->group_leader))) { |
3477 | sds.group_leader = group; | 3478 | sds->group_leader = group; |
3478 | sds.leader_nr_running = sgs.sum_nr_running; | 3479 | sds->leader_nr_running = sgs.sum_nr_running; |
3479 | } | 3480 | } |
3480 | group_next: | 3481 | group_next: |
3481 | #endif | 3482 | #endif |
3482 | group = group->next; | 3483 | group = group->next; |
3483 | } while (group != sd->groups); | 3484 | } while (group != sd->groups); |
3484 | 3485 | ||
3486 | } | ||
3487 | /******* find_busiest_group() helpers end here *********************/ | ||
3488 | |||
3489 | /* | ||
3490 | * find_busiest_group finds and returns the busiest CPU group within the | ||
3491 | * domain. It calculates and returns the amount of weighted load which | ||
3492 | * should be moved to restore balance via the imbalance parameter. | ||
3493 | */ | ||
3494 | static struct sched_group * | ||
3495 | find_busiest_group(struct sched_domain *sd, int this_cpu, | ||
3496 | unsigned long *imbalance, enum cpu_idle_type idle, | ||
3497 | int *sd_idle, const struct cpumask *cpus, int *balance) | ||
3498 | { | ||
3499 | struct sd_lb_stats sds; | ||
3500 | unsigned long max_pull; | ||
3501 | |||
3502 | memset(&sds, 0, sizeof(sds)); | ||
3503 | |||
3504 | /* | ||
3505 | * Compute the various statistics relavent for load balancing at | ||
3506 | * this level. | ||
3507 | */ | ||
3508 | update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus, | ||
3509 | balance, &sds); | ||
3510 | |||
3511 | if (balance && !(*balance)) | ||
3512 | goto ret; | ||
3513 | |||
3485 | if (!sds.busiest || sds.this_load >= sds.max_load | 3514 | if (!sds.busiest || sds.this_load >= sds.max_load |
3486 | || sds.busiest_nr_running == 0) | 3515 | || sds.busiest_nr_running == 0) |
3487 | goto out_balanced; | 3516 | goto out_balanced; |