aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2010-02-09 19:57:28 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-02-16 22:03:14 -0500
commit4e14a4d17a8cd66ccab180d32c977091922cfbed (patch)
tree5d6c9c97853ada47c4748d6965fca54692dbf665 /arch/powerpc/include
parent17081102a6e0fe32cf47cdbdf8f2e9ab55273b08 (diff)
powerpc: Use lwarx hint in spinlocks
Recent versions of the PowerPC architecture added a hint bit to the larx instructions to differentiate between an atomic operation and a lock operation: > 0 Other programs might attempt to modify the word in storage addressed by EA > even if the subsequent Store Conditional succeeds. > > 1 Other programs will not attempt to modify the word in storage addressed by > EA until the program that has acquired the lock performs a subsequent store > releasing the lock. To avoid a binutils dependency this patch create macros for the extended lwarx format and uses it in the spinlock code. To test this change I used a simple test case that acquires and releases a global pthread mutex: pthread_mutex_lock(&mutex); pthread_mutex_unlock(&mutex); On a 32 core POWER6, running 32 test threads we spend almost all our time in the futex spinlock code: 94.37% perf [kernel] [k] ._raw_spin_lock | |--99.95%-- ._raw_spin_lock | | | |--63.29%-- .futex_wake | | | |--36.64%-- .futex_wait_setup Which is a good test for this patch. The results (in lock/unlock operations per second) are: before: 1538203 ops/sec after: 2189219 ops/sec An improvement of 42% A 32 core POWER7 improves even more: before: 1279529 ops/sec after: 2282076 ops/sec An improvement of 78% Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h14
-rw-r--r--arch/powerpc/include/asm/spinlock.h7
2 files changed, 18 insertions, 3 deletions
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index ef9aa84cac5..ecec7605118 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -24,6 +24,7 @@
24#define PPC_INST_ISEL_MASK 0xfc00003e 24#define PPC_INST_ISEL_MASK 0xfc00003e
25#define PPC_INST_LSWI 0x7c0004aa 25#define PPC_INST_LSWI 0x7c0004aa
26#define PPC_INST_LSWX 0x7c00042a 26#define PPC_INST_LSWX 0x7c00042a
27#define PPC_INST_LWARX 0x7c000029
27#define PPC_INST_LWSYNC 0x7c2004ac 28#define PPC_INST_LWSYNC 0x7c2004ac
28#define PPC_INST_LXVD2X 0x7c000698 29#define PPC_INST_LXVD2X 0x7c000698
29#define PPC_INST_MCRXR 0x7c000400 30#define PPC_INST_MCRXR 0x7c000400
@@ -55,15 +56,28 @@
55#define __PPC_RA(a) (((a) & 0x1f) << 16) 56#define __PPC_RA(a) (((a) & 0x1f) << 16)
56#define __PPC_RB(b) (((b) & 0x1f) << 11) 57#define __PPC_RB(b) (((b) & 0x1f) << 11)
57#define __PPC_RS(s) (((s) & 0x1f) << 21) 58#define __PPC_RS(s) (((s) & 0x1f) << 21)
59#define __PPC_RT(s) __PPC_RS(s)
58#define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5)) 60#define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5))
59#define __PPC_T_TLB(t) (((t) & 0x3) << 21) 61#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
60#define __PPC_WC(w) (((w) & 0x3) << 21) 62#define __PPC_WC(w) (((w) & 0x3) << 21)
63/*
64 * Only use the larx hint bit on 64bit CPUs. Once we verify it doesn't have
65 * any side effects on all 32bit processors, we can do this all the time.
66 */
67#ifdef CONFIG_PPC64
68#define __PPC_EH(eh) (((eh) & 0x1) << 0)
69#else
70#define __PPC_EH(eh) 0
71#endif
61 72
62/* Deal with instructions that older assemblers aren't aware of */ 73/* Deal with instructions that older assemblers aren't aware of */
63#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ 74#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
64 __PPC_RA(a) | __PPC_RB(b)) 75 __PPC_RA(a) | __PPC_RB(b))
65#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \ 76#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \
66 __PPC_RA(a) | __PPC_RB(b)) 77 __PPC_RA(a) | __PPC_RB(b))
78#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
79 __PPC_RT(t) | __PPC_RA(a) | \
80 __PPC_RB(b) | __PPC_EH(eh))
67#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ 81#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
68 __PPC_RB(b)) 82 __PPC_RB(b))
69#define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI) 83#define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI)
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 764094cff68..1c35b59f6f3 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -27,6 +27,7 @@
27#endif 27#endif
28#include <asm/asm-compat.h> 28#include <asm/asm-compat.h>
29#include <asm/synch.h> 29#include <asm/synch.h>
30#include <asm/ppc-opcode.h>
30 31
31#define arch_spin_is_locked(x) ((x)->slock != 0) 32#define arch_spin_is_locked(x) ((x)->slock != 0)
32 33
@@ -60,7 +61,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
60 61
61 token = LOCK_TOKEN; 62 token = LOCK_TOKEN;
62 __asm__ __volatile__( 63 __asm__ __volatile__(
63"1: lwarx %0,0,%2\n\ 64"1: " PPC_LWARX(%0,0,%2,1) "\n\
64 cmpwi 0,%0,0\n\ 65 cmpwi 0,%0,0\n\
65 bne- 2f\n\ 66 bne- 2f\n\
66 stwcx. %1,0,%2\n\ 67 stwcx. %1,0,%2\n\
@@ -186,7 +187,7 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
186 long tmp; 187 long tmp;
187 188
188 __asm__ __volatile__( 189 __asm__ __volatile__(
189"1: lwarx %0,0,%1\n" 190"1: " PPC_LWARX(%0,0,%1,1) "\n"
190 __DO_SIGN_EXTEND 191 __DO_SIGN_EXTEND
191" addic. %0,%0,1\n\ 192" addic. %0,%0,1\n\
192 ble- 2f\n" 193 ble- 2f\n"
@@ -211,7 +212,7 @@ static inline long __arch_write_trylock(arch_rwlock_t *rw)
211 212
212 token = WRLOCK_TOKEN; 213 token = WRLOCK_TOKEN;
213 __asm__ __volatile__( 214 __asm__ __volatile__(
214"1: lwarx %0,0,%2\n\ 215"1: " PPC_LWARX(%0,0,%2,1) "\n\
215 cmpwi 0,%0,0\n\ 216 cmpwi 0,%0,0\n\
216 bne- 2f\n" 217 bne- 2f\n"
217 PPC405_ERR77(0,%1) 218 PPC405_ERR77(0,%1)