diff options
author | Gautham R Shenoy <ego@in.ibm.com> | 2009-03-25 05:13:35 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-25 05:30:44 -0400 |
commit | 67bb6c036d1fc3d332c8527a36a546e3e72e822c (patch) | |
tree | 677795ae954a6fec174c8218e59d8301dc7f1a8a /kernel/sched.c | |
parent | b6d9842258d1ba27fb978cded74eb4b6aa15edc8 (diff) |
sched: Simple helper functions for find_busiest_group()
Impact: cleanup
Currently the load idx calculation code is in find_busiest_group().
Move that to a static inline helper function.
Similary, to find the first cpu of a sched_group we use
cpumask_first(sched_group_cpus(group))
Use a helper to that. It improves readability in some cases.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
Cc: "Vaidyanathan Srinivasan" <svaidy@linux.vnet.ibm.com>
LKML-Reference: <20090325091335.13992.55424.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 55 |
1 files changed, 43 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 7b389c74f8ff..6aec1e7a72a3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3189,6 +3189,43 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
3189 | 3189 | ||
3190 | return 0; | 3190 | return 0; |
3191 | } | 3191 | } |
3192 | /********** Helpers for find_busiest_group ************************/ | ||
3193 | |||
3194 | /** | ||
3195 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. | ||
3196 | * @group: The group whose first cpu is to be returned. | ||
3197 | */ | ||
3198 | static inline unsigned int group_first_cpu(struct sched_group *group) | ||
3199 | { | ||
3200 | return cpumask_first(sched_group_cpus(group)); | ||
3201 | } | ||
3202 | |||
3203 | /** | ||
3204 | * get_sd_load_idx - Obtain the load index for a given sched domain. | ||
3205 | * @sd: The sched_domain whose load_idx is to be obtained. | ||
3206 | * @idle: The Idle status of the CPU for whose sd load_icx is obtained. | ||
3207 | */ | ||
3208 | static inline int get_sd_load_idx(struct sched_domain *sd, | ||
3209 | enum cpu_idle_type idle) | ||
3210 | { | ||
3211 | int load_idx; | ||
3212 | |||
3213 | switch (idle) { | ||
3214 | case CPU_NOT_IDLE: | ||
3215 | load_idx = sd->busy_idx; | ||
3216 | break; | ||
3217 | |||
3218 | case CPU_NEWLY_IDLE: | ||
3219 | load_idx = sd->newidle_idx; | ||
3220 | break; | ||
3221 | default: | ||
3222 | load_idx = sd->idle_idx; | ||
3223 | break; | ||
3224 | } | ||
3225 | |||
3226 | return load_idx; | ||
3227 | } | ||
3228 | /******* find_busiest_group() helpers end here *********************/ | ||
3192 | 3229 | ||
3193 | /* | 3230 | /* |
3194 | * find_busiest_group finds and returns the busiest CPU group within the | 3231 | * find_busiest_group finds and returns the busiest CPU group within the |
@@ -3217,12 +3254,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3217 | busiest_load_per_task = busiest_nr_running = 0; | 3254 | busiest_load_per_task = busiest_nr_running = 0; |
3218 | this_load_per_task = this_nr_running = 0; | 3255 | this_load_per_task = this_nr_running = 0; |
3219 | 3256 | ||
3220 | if (idle == CPU_NOT_IDLE) | 3257 | load_idx = get_sd_load_idx(sd, idle); |
3221 | load_idx = sd->busy_idx; | ||
3222 | else if (idle == CPU_NEWLY_IDLE) | ||
3223 | load_idx = sd->newidle_idx; | ||
3224 | else | ||
3225 | load_idx = sd->idle_idx; | ||
3226 | 3258 | ||
3227 | do { | 3259 | do { |
3228 | unsigned long load, group_capacity, max_cpu_load, min_cpu_load; | 3260 | unsigned long load, group_capacity, max_cpu_load, min_cpu_load; |
@@ -3238,7 +3270,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3238 | sched_group_cpus(group)); | 3270 | sched_group_cpus(group)); |
3239 | 3271 | ||
3240 | if (local_group) | 3272 | if (local_group) |
3241 | balance_cpu = cpumask_first(sched_group_cpus(group)); | 3273 | balance_cpu = group_first_cpu(group); |
3242 | 3274 | ||
3243 | /* Tally up the load of all CPUs in the group */ | 3275 | /* Tally up the load of all CPUs in the group */ |
3244 | sum_weighted_load = sum_nr_running = avg_load = 0; | 3276 | sum_weighted_load = sum_nr_running = avg_load = 0; |
@@ -3359,8 +3391,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3359 | */ | 3391 | */ |
3360 | if ((sum_nr_running < min_nr_running) || | 3392 | if ((sum_nr_running < min_nr_running) || |
3361 | (sum_nr_running == min_nr_running && | 3393 | (sum_nr_running == min_nr_running && |
3362 | cpumask_first(sched_group_cpus(group)) > | 3394 | group_first_cpu(group) > group_first_cpu(group_min))) { |
3363 | cpumask_first(sched_group_cpus(group_min)))) { | ||
3364 | group_min = group; | 3395 | group_min = group; |
3365 | min_nr_running = sum_nr_running; | 3396 | min_nr_running = sum_nr_running; |
3366 | min_load_per_task = sum_weighted_load / | 3397 | min_load_per_task = sum_weighted_load / |
@@ -3375,8 +3406,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3375 | if (sum_nr_running <= group_capacity - 1) { | 3406 | if (sum_nr_running <= group_capacity - 1) { |
3376 | if (sum_nr_running > leader_nr_running || | 3407 | if (sum_nr_running > leader_nr_running || |
3377 | (sum_nr_running == leader_nr_running && | 3408 | (sum_nr_running == leader_nr_running && |
3378 | cpumask_first(sched_group_cpus(group)) < | 3409 | group_first_cpu(group) < |
3379 | cpumask_first(sched_group_cpus(group_leader)))) { | 3410 | group_first_cpu(group_leader))) { |
3380 | group_leader = group; | 3411 | group_leader = group; |
3381 | leader_nr_running = sum_nr_running; | 3412 | leader_nr_running = sum_nr_running; |
3382 | } | 3413 | } |
@@ -3504,7 +3535,7 @@ out_balanced: | |||
3504 | *imbalance = min_load_per_task; | 3535 | *imbalance = min_load_per_task; |
3505 | if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) { | 3536 | if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) { |
3506 | cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = | 3537 | cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = |
3507 | cpumask_first(sched_group_cpus(group_leader)); | 3538 | group_first_cpu(group_leader); |
3508 | } | 3539 | } |
3509 | return group_min; | 3540 | return group_min; |
3510 | } | 3541 | } |