diff options
author | Gerald Schaefer <gerald.schaefer@de.ibm.com> | 2010-11-22 09:47:36 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-11-26 09:05:34 -0500 |
commit | 335d7afbfb71faac833734a94240c1e07cf0ead8 (patch) | |
tree | 483a24c6005af6eb25a256d92a697411d94a7458 | |
parent | 22a867d81707b0a2720bb5f65255265b95d30526 (diff) |
mutexes, sched: Introduce arch_mutex_cpu_relax()
The spinning mutex implementation uses cpu_relax() in busy loops as a
compiler barrier. Depending on the architecture, cpu_relax() may do more
than needed in this specific mutex spin loops. On System z we also give
up the time slice of the virtual cpu in cpu_relax(), which prevents
effective spinning on the mutex.
This patch replaces cpu_relax() in the spinning mutex code with
arch_mutex_cpu_relax(), which can be defined by each architecture that
selects HAVE_ARCH_MUTEX_CPU_RELAX. The default is still cpu_relax(), so
this patch should not affect other architectures than System z for now.
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1290437256.7455.4.camel@thinkpad>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/Kconfig | 3 | ||||
-rw-r--r-- | arch/s390/Kconfig | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/mutex.h | 2 | ||||
-rw-r--r-- | include/linux/mutex.h | 4 | ||||
-rw-r--r-- | kernel/mutex.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 3 |
6 files changed, 13 insertions, 2 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 8bf0fa652eb6..f78c2be4242b 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -175,4 +175,7 @@ config HAVE_PERF_EVENTS_NMI | |||
175 | config HAVE_ARCH_JUMP_LABEL | 175 | config HAVE_ARCH_JUMP_LABEL |
176 | bool | 176 | bool |
177 | 177 | ||
178 | config HAVE_ARCH_MUTEX_CPU_RELAX | ||
179 | bool | ||
180 | |||
178 | source "kernel/gcov/Kconfig" | 181 | source "kernel/gcov/Kconfig" |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index e0b98e71ff47..6c6d7b339aae 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -99,6 +99,7 @@ config S390 | |||
99 | select HAVE_KERNEL_LZMA | 99 | select HAVE_KERNEL_LZMA |
100 | select HAVE_KERNEL_LZO | 100 | select HAVE_KERNEL_LZO |
101 | select HAVE_GET_USER_PAGES_FAST | 101 | select HAVE_GET_USER_PAGES_FAST |
102 | select HAVE_ARCH_MUTEX_CPU_RELAX | ||
102 | select ARCH_INLINE_SPIN_TRYLOCK | 103 | select ARCH_INLINE_SPIN_TRYLOCK |
103 | select ARCH_INLINE_SPIN_TRYLOCK_BH | 104 | select ARCH_INLINE_SPIN_TRYLOCK_BH |
104 | select ARCH_INLINE_SPIN_LOCK | 105 | select ARCH_INLINE_SPIN_LOCK |
diff --git a/arch/s390/include/asm/mutex.h b/arch/s390/include/asm/mutex.h index 458c1f7fbc18..688271f5f2e4 100644 --- a/arch/s390/include/asm/mutex.h +++ b/arch/s390/include/asm/mutex.h | |||
@@ -7,3 +7,5 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <asm-generic/mutex-dec.h> | 9 | #include <asm-generic/mutex-dec.h> |
10 | |||
11 | #define arch_mutex_cpu_relax() barrier() | ||
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index f363bc8fdc74..94b48bd40dd7 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -160,4 +160,8 @@ extern int mutex_trylock(struct mutex *lock); | |||
160 | extern void mutex_unlock(struct mutex *lock); | 160 | extern void mutex_unlock(struct mutex *lock); |
161 | extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | 161 | extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); |
162 | 162 | ||
163 | #ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX | ||
164 | #define arch_mutex_cpu_relax() cpu_relax() | ||
165 | #endif | ||
166 | |||
163 | #endif | 167 | #endif |
diff --git a/kernel/mutex.c b/kernel/mutex.c index 200407c1502f..a5889fb28ecf 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -199,7 +199,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
199 | * memory barriers as we'll eventually observe the right | 199 | * memory barriers as we'll eventually observe the right |
200 | * values at the cost of a few extra spins. | 200 | * values at the cost of a few extra spins. |
201 | */ | 201 | */ |
202 | cpu_relax(); | 202 | arch_mutex_cpu_relax(); |
203 | } | 203 | } |
204 | #endif | 204 | #endif |
205 | spin_lock_mutex(&lock->wait_lock, flags); | 205 | spin_lock_mutex(&lock->wait_lock, flags); |
diff --git a/kernel/sched.c b/kernel/sched.c index 3e8a7db951a6..abe7aec55763 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -75,6 +75,7 @@ | |||
75 | 75 | ||
76 | #include <asm/tlb.h> | 76 | #include <asm/tlb.h> |
77 | #include <asm/irq_regs.h> | 77 | #include <asm/irq_regs.h> |
78 | #include <asm/mutex.h> | ||
78 | 79 | ||
79 | #include "sched_cpupri.h" | 80 | #include "sched_cpupri.h" |
80 | #include "workqueue_sched.h" | 81 | #include "workqueue_sched.h" |
@@ -3888,7 +3889,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
3888 | if (task_thread_info(rq->curr) != owner || need_resched()) | 3889 | if (task_thread_info(rq->curr) != owner || need_resched()) |
3889 | return 0; | 3890 | return 0; |
3890 | 3891 | ||
3891 | cpu_relax(); | 3892 | arch_mutex_cpu_relax(); |
3892 | } | 3893 | } |
3893 | 3894 | ||
3894 | return 1; | 3895 | return 1; |