aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorakpm@osdl.org <akpm@osdl.org>2006-01-12 04:05:32 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-12 12:08:50 -0500
commitd7102e95b7b9c00277562c29aad421d2d521c5f6 (patch)
tree3ad3d94c329095962c6cd6dcea41e1ccf2db5a7e /include/linux
parent198e2f181163233b379dc7ce8a6d7516b84042e7 (diff)
[PATCH] sched: filter affine wakeups
) From: Nick Piggin <nickpiggin@yahoo.com.au> Track the last waker CPU, and only consider wakeup-balancing if there's a match between current waker CPU and the previous waker CPU. This ensures that there is some correlation between two subsequent wakeup events before we move the task. Should help random-wakeup workloads on large SMP systems, by reducing the migration attempts by a factor of nr_cpus. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/sched.h5
1 files changed, 4 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5d6b9228bba9..b5ef92a043a6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -696,9 +696,12 @@ struct task_struct {
696 696
697 int lock_depth; /* BKL lock depth */ 697 int lock_depth; /* BKL lock depth */
698 698
699#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 699#if defined(CONFIG_SMP)
700 int last_waker_cpu; /* CPU that last woke this task up */
701#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
700 int oncpu; 702 int oncpu;
701#endif 703#endif
704#endif
702 int prio, static_prio; 705 int prio, static_prio;
703 struct list_head run_list; 706 struct list_head run_list;
704 prio_array_t *array; 707 prio_array_t *array;