diff options
author | akpm@osdl.org <akpm@osdl.org> | 2006-01-12 04:05:32 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-12 12:08:50 -0500 |
commit | d7102e95b7b9c00277562c29aad421d2d521c5f6 (patch) | |
tree | 3ad3d94c329095962c6cd6dcea41e1ccf2db5a7e /kernel/sched.c | |
parent | 198e2f181163233b379dc7ce8a6d7516b84042e7 (diff) |
[PATCH] sched: filter affine wakeups
)
From: Nick Piggin <nickpiggin@yahoo.com.au>
Track the last waker CPU, and only consider wakeup-balancing if there's a
match between current waker CPU and the previous waker CPU. This ensures
that there is some correlation between two subsequent wakeup events before
we move the task. Should help random-wakeup workloads on large SMP
systems, by reducing the migration attempts by a factor of nr_cpus.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 10 |
1 files changed, 9 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 98461de1ab65..c9dec2aa1976 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1290,6 +1290,9 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync) | |||
1290 | } | 1290 | } |
1291 | } | 1291 | } |
1292 | 1292 | ||
1293 | if (p->last_waker_cpu != this_cpu) | ||
1294 | goto out_set_cpu; | ||
1295 | |||
1293 | if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) | 1296 | if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) |
1294 | goto out_set_cpu; | 1297 | goto out_set_cpu; |
1295 | 1298 | ||
@@ -1360,6 +1363,8 @@ out_set_cpu: | |||
1360 | cpu = task_cpu(p); | 1363 | cpu = task_cpu(p); |
1361 | } | 1364 | } |
1362 | 1365 | ||
1366 | p->last_waker_cpu = this_cpu; | ||
1367 | |||
1363 | out_activate: | 1368 | out_activate: |
1364 | #endif /* CONFIG_SMP */ | 1369 | #endif /* CONFIG_SMP */ |
1365 | if (old_state == TASK_UNINTERRUPTIBLE) { | 1370 | if (old_state == TASK_UNINTERRUPTIBLE) { |
@@ -1441,9 +1446,12 @@ void fastcall sched_fork(task_t *p, int clone_flags) | |||
1441 | #ifdef CONFIG_SCHEDSTATS | 1446 | #ifdef CONFIG_SCHEDSTATS |
1442 | memset(&p->sched_info, 0, sizeof(p->sched_info)); | 1447 | memset(&p->sched_info, 0, sizeof(p->sched_info)); |
1443 | #endif | 1448 | #endif |
1444 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 1449 | #if defined(CONFIG_SMP) |
1450 | p->last_waker_cpu = cpu; | ||
1451 | #if defined(__ARCH_WANT_UNLOCKED_CTXSW) | ||
1445 | p->oncpu = 0; | 1452 | p->oncpu = 0; |
1446 | #endif | 1453 | #endif |
1454 | #endif | ||
1447 | #ifdef CONFIG_PREEMPT | 1455 | #ifdef CONFIG_PREEMPT |
1448 | /* Want to start with kernel preemption disabled. */ | 1456 | /* Want to start with kernel preemption disabled. */ |
1449 | task_thread_info(p)->preempt_count = 1; | 1457 | task_thread_info(p)->preempt_count = 1; |