aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorGautham R Shenoy <ego@in.ibm.com>2009-03-25 05:13:46 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-25 05:30:45 -0400
commit381be78fdc829a22f6327a0ed09f54b6270a976d (patch)
treed69536e051f14b67138267dd6010a895cdb5745e /kernel/sched.c
parent6dfdb0629019f307ab18864b1fd3e5dbb02f383c (diff)
sched: Define structure to store the sched_group statistics for fbg()
Impact: cleanup Currently a whole bunch of variables are used to store the various statistics pertaining to the groups we iterate over in find_busiest_group(). Group them together in a single data structure and add appropriate comments. This will be useful later on when we create helper functions to calculate the sched_group statistics. Credit: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com> Signed-off-by: Gautham R Shenoy <ego@in.ibm.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: "Balbir Singh" <balbir@in.ibm.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com> Cc: Bharata B Rao <bharata@linux.vnet.ibm.com> LKML-Reference: <20090325091345.13992.20099.stgit@sofia.in.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c79
1 files changed, 46 insertions, 33 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f87adbe999e0..109db122de50 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3192,6 +3192,18 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3192/********** Helpers for find_busiest_group ************************/ 3192/********** Helpers for find_busiest_group ************************/
3193 3193
3194/** 3194/**
3195 * sg_lb_stats - stats of a sched_group required for load_balancing
3196 */
3197struct sg_lb_stats {
3198 unsigned long avg_load; /*Avg load across the CPUs of the group */
3199 unsigned long group_load; /* Total load over the CPUs of the group */
3200 unsigned long sum_nr_running; /* Nr tasks running in the group */
3201 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
3202 unsigned long group_capacity;
3203 int group_imb; /* Is there an imbalance in the group ? */
3204};
3205
3206/**
3195 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. 3207 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
3196 * @group: The group whose first cpu is to be returned. 3208 * @group: The group whose first cpu is to be returned.
3197 */ 3209 */
@@ -3257,23 +3269,22 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3257 load_idx = get_sd_load_idx(sd, idle); 3269 load_idx = get_sd_load_idx(sd, idle);
3258 3270
3259 do { 3271 do {
3260 unsigned long load, group_capacity, max_cpu_load, min_cpu_load; 3272 struct sg_lb_stats sgs;
3273 unsigned long load, max_cpu_load, min_cpu_load;
3261 int local_group; 3274 int local_group;
3262 int i; 3275 int i;
3263 int __group_imb = 0;
3264 unsigned int balance_cpu = -1, first_idle_cpu = 0; 3276 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3265 unsigned long sum_nr_running, sum_weighted_load;
3266 unsigned long sum_avg_load_per_task; 3277 unsigned long sum_avg_load_per_task;
3267 unsigned long avg_load_per_task; 3278 unsigned long avg_load_per_task;
3268 3279
3269 local_group = cpumask_test_cpu(this_cpu, 3280 local_group = cpumask_test_cpu(this_cpu,
3270 sched_group_cpus(group)); 3281 sched_group_cpus(group));
3282 memset(&sgs, 0, sizeof(sgs));
3271 3283
3272 if (local_group) 3284 if (local_group)
3273 balance_cpu = group_first_cpu(group); 3285 balance_cpu = group_first_cpu(group);
3274 3286
3275 /* Tally up the load of all CPUs in the group */ 3287 /* Tally up the load of all CPUs in the group */
3276 sum_weighted_load = sum_nr_running = avg_load = 0;
3277 sum_avg_load_per_task = avg_load_per_task = 0; 3288 sum_avg_load_per_task = avg_load_per_task = 0;
3278 3289
3279 max_cpu_load = 0; 3290 max_cpu_load = 0;
@@ -3301,9 +3312,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3301 min_cpu_load = load; 3312 min_cpu_load = load;
3302 } 3313 }
3303 3314
3304 avg_load += load; 3315 sgs.group_load += load;
3305 sum_nr_running += rq->nr_running; 3316 sgs.sum_nr_running += rq->nr_running;
3306 sum_weighted_load += weighted_cpuload(i); 3317 sgs.sum_weighted_load += weighted_cpuload(i);
3307 3318
3308 sum_avg_load_per_task += cpu_avg_load_per_task(i); 3319 sum_avg_load_per_task += cpu_avg_load_per_task(i);
3309 } 3320 }
@@ -3320,12 +3331,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3320 goto ret; 3331 goto ret;
3321 } 3332 }
3322 3333
3323 total_load += avg_load; 3334 total_load += sgs.group_load;
3324 total_pwr += group->__cpu_power; 3335 total_pwr += group->__cpu_power;
3325 3336
3326 /* Adjust by relative CPU power of the group */ 3337 /* Adjust by relative CPU power of the group */
3327 avg_load = sg_div_cpu_power(group, 3338 sgs.avg_load = sg_div_cpu_power(group,
3328 avg_load * SCHED_LOAD_SCALE); 3339 sgs.group_load * SCHED_LOAD_SCALE);
3329 3340
3330 3341
3331 /* 3342 /*
@@ -3341,22 +3352,23 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3341 sum_avg_load_per_task * SCHED_LOAD_SCALE); 3352 sum_avg_load_per_task * SCHED_LOAD_SCALE);
3342 3353
3343 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task) 3354 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
3344 __group_imb = 1; 3355 sgs.group_imb = 1;
3345 3356
3346 group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; 3357 sgs.group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
3347 3358
3348 if (local_group) { 3359 if (local_group) {
3349 this_load = avg_load; 3360 this_load = sgs.avg_load;
3350 this = group; 3361 this = group;
3351 this_nr_running = sum_nr_running; 3362 this_nr_running = sgs.sum_nr_running;
3352 this_load_per_task = sum_weighted_load; 3363 this_load_per_task = sgs.sum_weighted_load;
3353 } else if (avg_load > max_load && 3364 } else if (sgs.avg_load > max_load &&
3354 (sum_nr_running > group_capacity || __group_imb)) { 3365 (sgs.sum_nr_running > sgs.group_capacity ||
3355 max_load = avg_load; 3366 sgs.group_imb)) {
3367 max_load = sgs.avg_load;
3356 busiest = group; 3368 busiest = group;
3357 busiest_nr_running = sum_nr_running; 3369 busiest_nr_running = sgs.sum_nr_running;
3358 busiest_load_per_task = sum_weighted_load; 3370 busiest_load_per_task = sgs.sum_weighted_load;
3359 group_imb = __group_imb; 3371 group_imb = sgs.group_imb;
3360 } 3372 }
3361 3373
3362#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 3374#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -3372,7 +3384,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3372 * If the local group is idle or completely loaded 3384 * If the local group is idle or completely loaded
3373 * no need to do power savings balance at this domain 3385 * no need to do power savings balance at this domain
3374 */ 3386 */
3375 if (local_group && (this_nr_running >= group_capacity || 3387 if (local_group && (this_nr_running >= sgs.group_capacity ||
3376 !this_nr_running)) 3388 !this_nr_running))
3377 power_savings_balance = 0; 3389 power_savings_balance = 0;
3378 3390
@@ -3380,8 +3392,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3380 * If a group is already running at full capacity or idle, 3392 * If a group is already running at full capacity or idle,
3381 * don't include that group in power savings calculations 3393 * don't include that group in power savings calculations
3382 */ 3394 */
3383 if (!power_savings_balance || sum_nr_running >= group_capacity 3395 if (!power_savings_balance ||
3384 || !sum_nr_running) 3396 sgs.sum_nr_running >= sgs.group_capacity ||
3397 !sgs.sum_nr_running)
3385 goto group_next; 3398 goto group_next;
3386 3399
3387 /* 3400 /*
@@ -3389,13 +3402,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3389 * This is the group from where we need to pick up the load 3402 * This is the group from where we need to pick up the load
3390 * for saving power 3403 * for saving power
3391 */ 3404 */
3392 if ((sum_nr_running < min_nr_running) || 3405 if ((sgs.sum_nr_running < min_nr_running) ||
3393 (sum_nr_running == min_nr_running && 3406 (sgs.sum_nr_running == min_nr_running &&
3394 group_first_cpu(group) > group_first_cpu(group_min))) { 3407 group_first_cpu(group) > group_first_cpu(group_min))) {
3395 group_min = group; 3408 group_min = group;
3396 min_nr_running = sum_nr_running; 3409 min_nr_running = sgs.sum_nr_running;
3397 min_load_per_task = sum_weighted_load / 3410 min_load_per_task = sgs.sum_weighted_load /
3398 sum_nr_running; 3411 sgs.sum_nr_running;
3399 } 3412 }
3400 3413
3401 /* 3414 /*
@@ -3403,14 +3416,14 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3403 * capacity but still has some space to pick up some load 3416 * capacity but still has some space to pick up some load
3404 * from other group and save more power 3417 * from other group and save more power
3405 */ 3418 */
3406 if (sum_nr_running > group_capacity - 1) 3419 if (sgs.sum_nr_running > sgs.group_capacity - 1)
3407 goto group_next; 3420 goto group_next;
3408 3421
3409 if (sum_nr_running > leader_nr_running || 3422 if (sgs.sum_nr_running > leader_nr_running ||
3410 (sum_nr_running == leader_nr_running && 3423 (sgs.sum_nr_running == leader_nr_running &&
3411 group_first_cpu(group) < group_first_cpu(group_leader))) { 3424 group_first_cpu(group) < group_first_cpu(group_leader))) {
3412 group_leader = group; 3425 group_leader = group;
3413 leader_nr_running = sum_nr_running; 3426 leader_nr_running = sgs.sum_nr_running;
3414 } 3427 }
3415group_next: 3428group_next:
3416#endif 3429#endif