aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/include/asm/spinlock_64.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-11-15 16:33:25 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-04 12:16:47 -0500
commit293666b7a17cb7a389fc274980439212386a19c4 (patch)
tree075cc7661d2113cf04da7130b3383979d8024206 /arch/sparc/include/asm/spinlock_64.h
parent64f2dde3f743c8a1ad8c0a1aa74166c1034afd92 (diff)
sparc64: Stop using memory barriers for atomics and locks.
The kernel always executes in the TSO memory model now, so none of this stuff is necessary any more. With helpful feedback from Nick Piggin. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/include/asm/spinlock_64.h')
-rw-r--r--arch/sparc/include/asm/spinlock_64.h14
1 files changed, 0 insertions, 14 deletions
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index fbac9d00744a..c4d274d330e9 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -33,12 +33,10 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
33 33
34 __asm__ __volatile__( 34 __asm__ __volatile__(
35"1: ldstub [%1], %0\n" 35"1: ldstub [%1], %0\n"
36" membar #StoreLoad | #StoreStore\n"
37" brnz,pn %0, 2f\n" 36" brnz,pn %0, 2f\n"
38" nop\n" 37" nop\n"
39" .subsection 2\n" 38" .subsection 2\n"
40"2: ldub [%1], %0\n" 39"2: ldub [%1], %0\n"
41" membar #LoadLoad\n"
42" brnz,pt %0, 2b\n" 40" brnz,pt %0, 2b\n"
43" nop\n" 41" nop\n"
44" ba,a,pt %%xcc, 1b\n" 42" ba,a,pt %%xcc, 1b\n"
@@ -54,7 +52,6 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
54 52
55 __asm__ __volatile__( 53 __asm__ __volatile__(
56" ldstub [%1], %0\n" 54" ldstub [%1], %0\n"
57" membar #StoreLoad | #StoreStore"
58 : "=r" (result) 55 : "=r" (result)
59 : "r" (lock) 56 : "r" (lock)
60 : "memory"); 57 : "memory");
@@ -65,7 +62,6 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
65static inline void __raw_spin_unlock(raw_spinlock_t *lock) 62static inline void __raw_spin_unlock(raw_spinlock_t *lock)
66{ 63{
67 __asm__ __volatile__( 64 __asm__ __volatile__(
68" membar #StoreStore | #LoadStore\n"
69" stb %%g0, [%0]" 65" stb %%g0, [%0]"
70 : /* No outputs */ 66 : /* No outputs */
71 : "r" (lock) 67 : "r" (lock)
@@ -78,14 +74,12 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
78 74
79 __asm__ __volatile__( 75 __asm__ __volatile__(
80"1: ldstub [%2], %0\n" 76"1: ldstub [%2], %0\n"
81" membar #StoreLoad | #StoreStore\n"
82" brnz,pn %0, 2f\n" 77" brnz,pn %0, 2f\n"
83" nop\n" 78" nop\n"
84" .subsection 2\n" 79" .subsection 2\n"
85"2: rdpr %%pil, %1\n" 80"2: rdpr %%pil, %1\n"
86" wrpr %3, %%pil\n" 81" wrpr %3, %%pil\n"
87"3: ldub [%2], %0\n" 82"3: ldub [%2], %0\n"
88" membar #LoadLoad\n"
89" brnz,pt %0, 3b\n" 83" brnz,pt %0, 3b\n"
90" nop\n" 84" nop\n"
91" ba,pt %%xcc, 1b\n" 85" ba,pt %%xcc, 1b\n"
@@ -108,12 +102,10 @@ static void inline __read_lock(raw_rwlock_t *lock)
108"4: add %0, 1, %1\n" 102"4: add %0, 1, %1\n"
109" cas [%2], %0, %1\n" 103" cas [%2], %0, %1\n"
110" cmp %0, %1\n" 104" cmp %0, %1\n"
111" membar #StoreLoad | #StoreStore\n"
112" bne,pn %%icc, 1b\n" 105" bne,pn %%icc, 1b\n"
113" nop\n" 106" nop\n"
114" .subsection 2\n" 107" .subsection 2\n"
115"2: ldsw [%2], %0\n" 108"2: ldsw [%2], %0\n"
116" membar #LoadLoad\n"
117" brlz,pt %0, 2b\n" 109" brlz,pt %0, 2b\n"
118" nop\n" 110" nop\n"
119" ba,a,pt %%xcc, 4b\n" 111" ba,a,pt %%xcc, 4b\n"
@@ -134,7 +126,6 @@ static int inline __read_trylock(raw_rwlock_t *lock)
134" add %0, 1, %1\n" 126" add %0, 1, %1\n"
135" cas [%2], %0, %1\n" 127" cas [%2], %0, %1\n"
136" cmp %0, %1\n" 128" cmp %0, %1\n"
137" membar #StoreLoad | #StoreStore\n"
138" bne,pn %%icc, 1b\n" 129" bne,pn %%icc, 1b\n"
139" mov 1, %0\n" 130" mov 1, %0\n"
140"2:" 131"2:"
@@ -150,7 +141,6 @@ static void inline __read_unlock(raw_rwlock_t *lock)
150 unsigned long tmp1, tmp2; 141 unsigned long tmp1, tmp2;
151 142
152 __asm__ __volatile__( 143 __asm__ __volatile__(
153" membar #StoreLoad | #LoadLoad\n"
154"1: lduw [%2], %0\n" 144"1: lduw [%2], %0\n"
155" sub %0, 1, %1\n" 145" sub %0, 1, %1\n"
156" cas [%2], %0, %1\n" 146" cas [%2], %0, %1\n"
@@ -174,12 +164,10 @@ static void inline __write_lock(raw_rwlock_t *lock)
174"4: or %0, %3, %1\n" 164"4: or %0, %3, %1\n"
175" cas [%2], %0, %1\n" 165" cas [%2], %0, %1\n"
176" cmp %0, %1\n" 166" cmp %0, %1\n"
177" membar #StoreLoad | #StoreStore\n"
178" bne,pn %%icc, 1b\n" 167" bne,pn %%icc, 1b\n"
179" nop\n" 168" nop\n"
180" .subsection 2\n" 169" .subsection 2\n"
181"2: lduw [%2], %0\n" 170"2: lduw [%2], %0\n"
182" membar #LoadLoad\n"
183" brnz,pt %0, 2b\n" 171" brnz,pt %0, 2b\n"
184" nop\n" 172" nop\n"
185" ba,a,pt %%xcc, 4b\n" 173" ba,a,pt %%xcc, 4b\n"
@@ -192,7 +180,6 @@ static void inline __write_lock(raw_rwlock_t *lock)
192static void inline __write_unlock(raw_rwlock_t *lock) 180static void inline __write_unlock(raw_rwlock_t *lock)
193{ 181{
194 __asm__ __volatile__( 182 __asm__ __volatile__(
195" membar #LoadStore | #StoreStore\n"
196" stw %%g0, [%0]" 183" stw %%g0, [%0]"
197 : /* no outputs */ 184 : /* no outputs */
198 : "r" (lock) 185 : "r" (lock)
@@ -212,7 +199,6 @@ static int inline __write_trylock(raw_rwlock_t *lock)
212" or %0, %4, %1\n" 199" or %0, %4, %1\n"
213" cas [%3], %0, %1\n" 200" cas [%3], %0, %1\n"
214" cmp %0, %1\n" 201" cmp %0, %1\n"
215" membar #StoreLoad | #StoreStore\n"
216" bne,pn %%icc, 1b\n" 202" bne,pn %%icc, 1b\n"
217" nop\n" 203" nop\n"
218" mov 1, %2\n" 204" mov 1, %2\n"