diff options
author | Anton Blanchard <anton@samba.org> | 2010-02-09 19:57:28 -0500 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-02-16 22:03:14 -0500 |
commit | 4e14a4d17a8cd66ccab180d32c977091922cfbed (patch) | |
tree | 5d6c9c97853ada47c4748d6965fca54692dbf665 /arch/powerpc/include/asm/spinlock.h | |
parent | 17081102a6e0fe32cf47cdbdf8f2e9ab55273b08 (diff) |
powerpc: Use lwarx hint in spinlocks
Recent versions of the PowerPC architecture added a hint bit to the larx
instructions to differentiate between an atomic operation and a lock operation:
> 0 Other programs might attempt to modify the word in storage addressed by EA
> even if the subsequent Store Conditional succeeds.
>
> 1 Other programs will not attempt to modify the word in storage addressed by
> EA until the program that has acquired the lock performs a subsequent store
> releasing the lock.
To avoid a binutils dependency this patch create macros for the extended lwarx
format and uses it in the spinlock code. To test this change I used a simple
test case that acquires and releases a global pthread mutex:
pthread_mutex_lock(&mutex);
pthread_mutex_unlock(&mutex);
On a 32 core POWER6, running 32 test threads we spend almost all our time in
the futex spinlock code:
94.37% perf [kernel] [k] ._raw_spin_lock
|
|--99.95%-- ._raw_spin_lock
| |
| |--63.29%-- .futex_wake
| |
| |--36.64%-- .futex_wait_setup
Which is a good test for this patch. The results (in lock/unlock operations per
second) are:
before: 1538203 ops/sec
after: 2189219 ops/sec
An improvement of 42%
A 32 core POWER7 improves even more:
before: 1279529 ops/sec
after: 2282076 ops/sec
An improvement of 78%
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/spinlock.h')
-rw-r--r-- | arch/powerpc/include/asm/spinlock.h | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 764094cff681..1c35b59f6f30 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #endif | 27 | #endif |
28 | #include <asm/asm-compat.h> | 28 | #include <asm/asm-compat.h> |
29 | #include <asm/synch.h> | 29 | #include <asm/synch.h> |
30 | #include <asm/ppc-opcode.h> | ||
30 | 31 | ||
31 | #define arch_spin_is_locked(x) ((x)->slock != 0) | 32 | #define arch_spin_is_locked(x) ((x)->slock != 0) |
32 | 33 | ||
@@ -60,7 +61,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) | |||
60 | 61 | ||
61 | token = LOCK_TOKEN; | 62 | token = LOCK_TOKEN; |
62 | __asm__ __volatile__( | 63 | __asm__ __volatile__( |
63 | "1: lwarx %0,0,%2\n\ | 64 | "1: " PPC_LWARX(%0,0,%2,1) "\n\ |
64 | cmpwi 0,%0,0\n\ | 65 | cmpwi 0,%0,0\n\ |
65 | bne- 2f\n\ | 66 | bne- 2f\n\ |
66 | stwcx. %1,0,%2\n\ | 67 | stwcx. %1,0,%2\n\ |
@@ -186,7 +187,7 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw) | |||
186 | long tmp; | 187 | long tmp; |
187 | 188 | ||
188 | __asm__ __volatile__( | 189 | __asm__ __volatile__( |
189 | "1: lwarx %0,0,%1\n" | 190 | "1: " PPC_LWARX(%0,0,%1,1) "\n" |
190 | __DO_SIGN_EXTEND | 191 | __DO_SIGN_EXTEND |
191 | " addic. %0,%0,1\n\ | 192 | " addic. %0,%0,1\n\ |
192 | ble- 2f\n" | 193 | ble- 2f\n" |
@@ -211,7 +212,7 @@ static inline long __arch_write_trylock(arch_rwlock_t *rw) | |||
211 | 212 | ||
212 | token = WRLOCK_TOKEN; | 213 | token = WRLOCK_TOKEN; |
213 | __asm__ __volatile__( | 214 | __asm__ __volatile__( |
214 | "1: lwarx %0,0,%2\n\ | 215 | "1: " PPC_LWARX(%0,0,%2,1) "\n\ |
215 | cmpwi 0,%0,0\n\ | 216 | cmpwi 0,%0,0\n\ |
216 | bne- 2f\n" | 217 | bne- 2f\n" |
217 | PPC405_ERR77(0,%1) | 218 | PPC405_ERR77(0,%1) |