aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c65
1 files changed, 60 insertions, 5 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 37087a7fac22..f61837ad336d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1345,6 +1345,37 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1345} 1345}
1346 1346
1347/* 1347/*
1348 * Try and locate an idle CPU in the sched_domain.
1349 */
1350static int
1351select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
1352{
1353 int cpu = smp_processor_id();
1354 int prev_cpu = task_cpu(p);
1355 int i;
1356
1357 /*
1358 * If this domain spans both cpu and prev_cpu (see the SD_WAKE_AFFINE
1359 * test in select_task_rq_fair) and the prev_cpu is idle then that's
1360 * always a better target than the current cpu.
1361 */
1362 if (target == cpu && !cpu_rq(prev_cpu)->cfs.nr_running)
1363 return prev_cpu;
1364
1365 /*
1366 * Otherwise, iterate the domain and find an elegible idle cpu.
1367 */
1368 for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
1369 if (!cpu_rq(i)->cfs.nr_running) {
1370 target = i;
1371 break;
1372 }
1373 }
1374
1375 return target;
1376}
1377
1378/*
1348 * sched_balance_self: balance the current task (running on cpu) in domains 1379 * sched_balance_self: balance the current task (running on cpu) in domains
1349 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and 1380 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1350 * SD_BALANCE_EXEC. 1381 * SD_BALANCE_EXEC.
@@ -1398,11 +1429,35 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1398 want_sd = 0; 1429 want_sd = 0;
1399 } 1430 }
1400 1431
1401 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && 1432 /*
1402 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { 1433 * While iterating the domains looking for a spanning
1434 * WAKE_AFFINE domain, adjust the affine target to any idle cpu
1435 * in cache sharing domains along the way.
1436 */
1437 if (want_affine) {
1438 int target = -1;
1403 1439
1404 affine_sd = tmp; 1440 /*
1405 want_affine = 0; 1441 * If both cpu and prev_cpu are part of this domain,
1442 * cpu is a valid SD_WAKE_AFFINE target.
1443 */
1444 if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
1445 target = cpu;
1446
1447 /*
1448 * If there's an idle sibling in this domain, make that
1449 * the wake_affine target instead of the current cpu.
1450 */
1451 if (tmp->flags & SD_PREFER_SIBLING)
1452 target = select_idle_sibling(p, tmp, target);
1453
1454 if (target >= 0) {
1455 if (tmp->flags & SD_WAKE_AFFINE) {
1456 affine_sd = tmp;
1457 want_affine = 0;
1458 }
1459 cpu = target;
1460 }
1406 } 1461 }
1407 1462
1408 if (!want_sd && !want_affine) 1463 if (!want_sd && !want_affine)
@@ -1679,7 +1734,7 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
1679 struct cfs_rq *cfs_rq = &rq->cfs; 1734 struct cfs_rq *cfs_rq = &rq->cfs;
1680 struct sched_entity *se; 1735 struct sched_entity *se;
1681 1736
1682 if (unlikely(!cfs_rq->nr_running)) 1737 if (!cfs_rq->nr_running)
1683 return NULL; 1738 return NULL;
1684 1739
1685 do { 1740 do {