aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-16 07:46:59 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-16 10:44:32 -0400
commit5158f4e4428c6b8d52796b3b460e95796123a114 (patch)
tree25f62ffd3e427fc8f5b6b6be434bc3918dc59960 /kernel
parent3b6408942206f940dd538e980e9904e48f4b64f8 (diff)
sched: Clean up the load_idx selection in select_task_rq_fair
Clean up the code a little. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c27
1 files changed, 8 insertions, 19 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 722d392b0dac..aeff40e7ec1b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1248,26 +1248,11 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1248 */ 1248 */
1249static struct sched_group * 1249static struct sched_group *
1250find_idlest_group(struct sched_domain *sd, struct task_struct *p, 1250find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1251 int this_cpu, int flag) 1251 int this_cpu, int load_idx)
1252{ 1252{
1253 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; 1253 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1254 unsigned long min_load = ULONG_MAX, this_load = 0; 1254 unsigned long min_load = ULONG_MAX, this_load = 0;
1255 int imbalance = 100 + (sd->imbalance_pct-100)/2; 1255 int imbalance = 100 + (sd->imbalance_pct-100)/2;
1256 int load_idx = 0;
1257
1258 switch (flag) {
1259 case SD_BALANCE_FORK:
1260 case SD_BALANCE_EXEC:
1261 load_idx = sd->forkexec_idx;
1262 break;
1263
1264 case SD_BALANCE_WAKE:
1265 load_idx = sd->wake_idx;
1266 break;
1267
1268 default:
1269 break;
1270 }
1271 1256
1272 do { 1257 do {
1273 unsigned long load, avg_load; 1258 unsigned long load, avg_load;
@@ -1346,14 +1331,14 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1346 * 1331 *
1347 * preempt must be disabled. 1332 * preempt must be disabled.
1348 */ 1333 */
1349static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags) 1334static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
1350{ 1335{
1351 struct sched_domain *tmp, *shares = NULL, *sd = NULL; 1336 struct sched_domain *tmp, *shares = NULL, *sd = NULL;
1352 int cpu = smp_processor_id(); 1337 int cpu = smp_processor_id();
1353 int prev_cpu = task_cpu(p); 1338 int prev_cpu = task_cpu(p);
1354 int new_cpu = cpu; 1339 int new_cpu = cpu;
1355 int want_affine = 0; 1340 int want_affine = 0;
1356 int sync = flags & WF_SYNC; 1341 int sync = wake_flags & WF_SYNC;
1357 1342
1358 if (sd_flag & SD_BALANCE_WAKE) { 1343 if (sd_flag & SD_BALANCE_WAKE) {
1359 if (sched_feat(AFFINE_WAKEUPS)) 1344 if (sched_feat(AFFINE_WAKEUPS))
@@ -1413,6 +1398,7 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
1413 update_shares(sd); 1398 update_shares(sd);
1414 1399
1415 while (sd) { 1400 while (sd) {
1401 int load_idx = sd->forkexec_idx;
1416 struct sched_group *group; 1402 struct sched_group *group;
1417 int weight; 1403 int weight;
1418 1404
@@ -1421,7 +1407,10 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
1421 continue; 1407 continue;
1422 } 1408 }
1423 1409
1424 group = find_idlest_group(sd, p, cpu, sd_flag); 1410 if (sd_flag & SD_BALANCE_WAKE)
1411 load_idx = sd->wake_idx;
1412
1413 group = find_idlest_group(sd, p, cpu, load_idx);
1425 if (!group) { 1414 if (!group) {
1426 sd = sd->child; 1415 sd = sd->child;
1427 continue; 1416 continue;