aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2010-03-11 11:17:16 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-11 12:32:51 -0500
commit8b911acdf08477c059d1c36c21113ab1696c612b (patch)
treeff0127c87cf657b706c3dc68dd8f92248a448c76 /kernel/sched_fair.c
parent21406928afe43f1db6acab4931bb8c886f4d04ce (diff)
sched: Fix select_idle_sibling()
Don't bother with selection when the current cpu is idle. Recent load balancing changes also make it no longer necessary to check wake_affine() success before returning the selected sibling, so we now always use it. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1268301369.6785.36.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index d19df5bccfec..0008cc4a1199 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1439,7 +1439,7 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1439 int cpu = smp_processor_id(); 1439 int cpu = smp_processor_id();
1440 int prev_cpu = task_cpu(p); 1440 int prev_cpu = task_cpu(p);
1441 int new_cpu = cpu; 1441 int new_cpu = cpu;
1442 int want_affine = 0; 1442 int want_affine = 0, cpu_idle = !current->pid;
1443 int want_sd = 1; 1443 int want_sd = 1;
1444 int sync = wake_flags & WF_SYNC; 1444 int sync = wake_flags & WF_SYNC;
1445 1445
@@ -1497,13 +1497,15 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1497 * If there's an idle sibling in this domain, make that 1497 * If there's an idle sibling in this domain, make that
1498 * the wake_affine target instead of the current cpu. 1498 * the wake_affine target instead of the current cpu.
1499 */ 1499 */
1500 if (tmp->flags & SD_SHARE_PKG_RESOURCES) 1500 if (!cpu_idle && tmp->flags & SD_SHARE_PKG_RESOURCES)
1501 target = select_idle_sibling(p, tmp, target); 1501 target = select_idle_sibling(p, tmp, target);
1502 1502
1503 if (target >= 0) { 1503 if (target >= 0) {
1504 if (tmp->flags & SD_WAKE_AFFINE) { 1504 if (tmp->flags & SD_WAKE_AFFINE) {
1505 affine_sd = tmp; 1505 affine_sd = tmp;
1506 want_affine = 0; 1506 want_affine = 0;
1507 if (target != cpu)
1508 cpu_idle = 1;
1507 } 1509 }
1508 cpu = target; 1510 cpu = target;
1509 } 1511 }
@@ -1519,6 +1521,7 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1519 sd = tmp; 1521 sd = tmp;
1520 } 1522 }
1521 1523
1524#ifdef CONFIG_FAIR_GROUP_SCHED
1522 if (sched_feat(LB_SHARES_UPDATE)) { 1525 if (sched_feat(LB_SHARES_UPDATE)) {
1523 /* 1526 /*
1524 * Pick the largest domain to update shares over 1527 * Pick the largest domain to update shares over
@@ -1532,9 +1535,12 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1532 if (tmp) 1535 if (tmp)
1533 update_shares(tmp); 1536 update_shares(tmp);
1534 } 1537 }
1538#endif
1535 1539
1536 if (affine_sd && wake_affine(affine_sd, p, sync)) 1540 if (affine_sd) {
1537 return cpu; 1541 if (cpu_idle || cpu == prev_cpu || wake_affine(affine_sd, p, sync))
1542 return cpu;
1543 }
1538 1544
1539 while (sd) { 1545 while (sd) {
1540 int load_idx = sd->forkexec_idx; 1546 int load_idx = sd->forkexec_idx;