diff options
author | Chen, Kenneth W <kenneth.w.chen@intel.com> | 2006-02-14 16:53:10 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-02-14 19:09:34 -0500 |
commit | d6077cb80cde4506720f9165eba99ee07438513f (patch) | |
tree | f4462e51cf0a14a113c0c524711636c8429424bb /include/linux | |
parent | f822566165dd46ff5de9bf895cfa6c51f53bb0c4 (diff) |
[PATCH] sched: revert "filter affine wakeups"
Revert commit d7102e95b7b9c00277562c29aad421d2d521c5f6:
[PATCH] sched: filter affine wakeups
Apparently caused more than 10% performance regression for aim7 benchmark.
The setup in use is 16-cpu HP rx8620, 64Gb of memory and 12 MSA1000s with 144
disks. Each disk is 72Gb with a single ext3 filesystem (courtesy of HP, who
supplied benchmark results).
The problem is, for aim7, the wake-up pattern is random, but it still needs
load balancing action in the wake-up path to achieve best performance. With
the above commit, lack of load balancing hurts that workload.
However, for workloads like database transaction processing, the requirement
is exactly opposite. In the wake up path, best performance is achieved with
absolutely zero load balancing. We simply wake up the process on the CPU that
it was previously run. Worst performance is obtained when we do load
balancing at wake up.
There isn't an easy way to auto detect the workload characteristics. Ingo's
earlier patch that detects idle CPU and decide whether to load balance or not
doesn't perform with aim7 either since all CPUs are busy (it causes even
bigger perf. regression).
Revert commit d7102e95b7b9c00277562c29aad421d2d521c5f6, which causes more
than 10% performance regression with aim7.
Signed-off-by: Ken Chen <kenneth.w.chen@intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/sched.h | 5 |
1 files changed, 1 insertions, 4 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 9c1da0269a18..b6f51e3a38ec 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -697,12 +697,9 @@ struct task_struct { | |||
697 | 697 | ||
698 | int lock_depth; /* BKL lock depth */ | 698 | int lock_depth; /* BKL lock depth */ |
699 | 699 | ||
700 | #if defined(CONFIG_SMP) | 700 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
701 | int last_waker_cpu; /* CPU that last woke this task up */ | ||
702 | #if defined(__ARCH_WANT_UNLOCKED_CTXSW) | ||
703 | int oncpu; | 701 | int oncpu; |
704 | #endif | 702 | #endif |
705 | #endif | ||
706 | int prio, static_prio; | 703 | int prio, static_prio; |
707 | struct list_head run_list; | 704 | struct list_head run_list; |
708 | prio_array_t *array; | 705 | prio_array_t *array; |