aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-11-12 09:55:28 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-13 04:09:58 -0500
commita50bde5130f65733142b32975616427d0ea50856 (patch)
treec2bf0c04fbebfa0f3d6ae8b709f754d5fce4b831 /kernel
parent761b1d26df542fd5eb348837351e4d2f3bc7bffe (diff)
sched: Cleanup select_task_rq_fair()
Clean up the new affine to idle sibling bits while trying to grok them. Should not have any function differences. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> LKML-Reference: <20091112145610.832503781@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c73
1 files changed, 51 insertions, 22 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index e4d4483fd617..a32df1524746 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1319,6 +1319,41 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1319} 1319}
1320 1320
1321/* 1321/*
1322 * Try and locate an idle CPU in the sched_domain.
1323 */
1324static int
1325select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
1326{
1327 int cpu = smp_processor_id();
1328 int prev_cpu = task_cpu(p);
1329 int i;
1330
1331 /*
1332 * If this domain spans both cpu and prev_cpu (see the SD_WAKE_AFFINE
1333 * test in select_task_rq_fair) and the prev_cpu is idle then that's
1334 * always a better target than the current cpu.
1335 */
1336 if (target == cpu) {
1337 if (!cpu_rq(prev_cpu)->cfs.nr_running)
1338 target = prev_cpu;
1339 }
1340
1341 /*
1342 * Otherwise, iterate the domain and find an elegible idle cpu.
1343 */
1344 if (target == -1 || target == cpu) {
1345 for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
1346 if (!cpu_rq(i)->cfs.nr_running) {
1347 target = i;
1348 break;
1349 }
1350 }
1351 }
1352
1353 return target;
1354}
1355
1356/*
1322 * sched_balance_self: balance the current task (running on cpu) in domains 1357 * sched_balance_self: balance the current task (running on cpu) in domains
1323 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and 1358 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1324 * SD_BALANCE_EXEC. 1359 * SD_BALANCE_EXEC.
@@ -1373,36 +1408,30 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1373 } 1408 }
1374 1409
1375 if (want_affine && (tmp->flags & SD_WAKE_AFFINE)) { 1410 if (want_affine && (tmp->flags & SD_WAKE_AFFINE)) {
1376 int candidate = -1, i; 1411 int target = -1;
1377 1412
1413 /*
1414 * If both cpu and prev_cpu are part of this domain,
1415 * cpu is a valid SD_WAKE_AFFINE target.
1416 */
1378 if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) 1417 if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
1379 candidate = cpu; 1418 target = cpu;
1380 1419
1381 /* 1420 /*
1382 * Check for an idle shared cache. 1421 * If there's an idle sibling in this domain, make that
1422 * the wake_affine target instead of the current cpu.
1423 *
1424 * XXX: should we possibly do this outside of
1425 * WAKE_AFFINE, in case the shared cache domain is
1426 * smaller than the WAKE_AFFINE domain?
1383 */ 1427 */
1384 if (tmp->flags & SD_PREFER_SIBLING) { 1428 if (tmp->flags & SD_PREFER_SIBLING)
1385 if (candidate == cpu) { 1429 target = select_idle_sibling(p, tmp, target);
1386 if (!cpu_rq(prev_cpu)->cfs.nr_running)
1387 candidate = prev_cpu;
1388 }
1389
1390 if (candidate == -1 || candidate == cpu) {
1391 for_each_cpu(i, sched_domain_span(tmp)) {
1392 if (!cpumask_test_cpu(i, &p->cpus_allowed))
1393 continue;
1394 if (!cpu_rq(i)->cfs.nr_running) {
1395 candidate = i;
1396 break;
1397 }
1398 }
1399 }
1400 }
1401 1430
1402 if (candidate >= 0) { 1431 if (target >= 0) {
1403 affine_sd = tmp; 1432 affine_sd = tmp;
1404 want_affine = 0; 1433 want_affine = 0;
1405 cpu = candidate; 1434 cpu = target;
1406 } 1435 }
1407 } 1436 }
1408 1437