aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/include/asm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-11-15 16:33:25 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-04 12:16:47 -0500
commit293666b7a17cb7a389fc274980439212386a19c4 (patch)
tree075cc7661d2113cf04da7130b3383979d8024206 /arch/sparc/include/asm
parent64f2dde3f743c8a1ad8c0a1aa74166c1034afd92 (diff)
sparc64: Stop using memory barriers for atomics and locks.
The kernel always executes in the TSO memory model now, so none of this stuff is necessary any more. With helpful feedback from Nick Piggin. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/include/asm')
-rw-r--r--arch/sparc/include/asm/atomic_64.h7
-rw-r--r--arch/sparc/include/asm/bitops_64.h5
-rw-r--r--arch/sparc/include/asm/spinlock_64.h14
-rw-r--r--arch/sparc/include/asm/system_64.h35
-rw-r--r--arch/sparc/include/asm/tsb.h6
5 files changed, 8 insertions, 59 deletions
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 2c71ec4a3b18..5982c5ae7f07 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -112,17 +112,10 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
112#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 112#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
113 113
114/* Atomic operations are already serializing */ 114/* Atomic operations are already serializing */
115#ifdef CONFIG_SMP
116#define smp_mb__before_atomic_dec() membar_storeload_loadload();
117#define smp_mb__after_atomic_dec() membar_storeload_storestore();
118#define smp_mb__before_atomic_inc() membar_storeload_loadload();
119#define smp_mb__after_atomic_inc() membar_storeload_storestore();
120#else
121#define smp_mb__before_atomic_dec() barrier() 115#define smp_mb__before_atomic_dec() barrier()
122#define smp_mb__after_atomic_dec() barrier() 116#define smp_mb__after_atomic_dec() barrier()
123#define smp_mb__before_atomic_inc() barrier() 117#define smp_mb__before_atomic_inc() barrier()
124#define smp_mb__after_atomic_inc() barrier() 118#define smp_mb__after_atomic_inc() barrier()
125#endif
126 119
127#include <asm-generic/atomic.h> 120#include <asm-generic/atomic.h>
128#endif /* !(__ARCH_SPARC64_ATOMIC__) */ 121#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h
index bb87b8080220..e72ac9cdfb98 100644
--- a/arch/sparc/include/asm/bitops_64.h
+++ b/arch/sparc/include/asm/bitops_64.h
@@ -23,13 +23,8 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
23 23
24#include <asm-generic/bitops/non-atomic.h> 24#include <asm-generic/bitops/non-atomic.h>
25 25
26#ifdef CONFIG_SMP
27#define smp_mb__before_clear_bit() membar_storeload_loadload()
28#define smp_mb__after_clear_bit() membar_storeload_storestore()
29#else
30#define smp_mb__before_clear_bit() barrier() 26#define smp_mb__before_clear_bit() barrier()
31#define smp_mb__after_clear_bit() barrier() 27#define smp_mb__after_clear_bit() barrier()
32#endif
33 28
34#include <asm-generic/bitops/ffz.h> 29#include <asm-generic/bitops/ffz.h>
35#include <asm-generic/bitops/__ffs.h> 30#include <asm-generic/bitops/__ffs.h>
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index fbac9d00744a..c4d274d330e9 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -33,12 +33,10 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
33 33
34 __asm__ __volatile__( 34 __asm__ __volatile__(
35"1: ldstub [%1], %0\n" 35"1: ldstub [%1], %0\n"
36" membar #StoreLoad | #StoreStore\n"
37" brnz,pn %0, 2f\n" 36" brnz,pn %0, 2f\n"
38" nop\n" 37" nop\n"
39" .subsection 2\n" 38" .subsection 2\n"
40"2: ldub [%1], %0\n" 39"2: ldub [%1], %0\n"
41" membar #LoadLoad\n"
42" brnz,pt %0, 2b\n" 40" brnz,pt %0, 2b\n"
43" nop\n" 41" nop\n"
44" ba,a,pt %%xcc, 1b\n" 42" ba,a,pt %%xcc, 1b\n"
@@ -54,7 +52,6 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
54 52
55 __asm__ __volatile__( 53 __asm__ __volatile__(
56" ldstub [%1], %0\n" 54" ldstub [%1], %0\n"
57" membar #StoreLoad | #StoreStore"
58 : "=r" (result) 55 : "=r" (result)
59 : "r" (lock) 56 : "r" (lock)
60 : "memory"); 57 : "memory");
@@ -65,7 +62,6 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
65static inline void __raw_spin_unlock(raw_spinlock_t *lock) 62static inline void __raw_spin_unlock(raw_spinlock_t *lock)
66{ 63{
67 __asm__ __volatile__( 64 __asm__ __volatile__(
68" membar #StoreStore | #LoadStore\n"
69" stb %%g0, [%0]" 65" stb %%g0, [%0]"
70 : /* No outputs */ 66 : /* No outputs */
71 : "r" (lock) 67 : "r" (lock)
@@ -78,14 +74,12 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
78 74
79 __asm__ __volatile__( 75 __asm__ __volatile__(
80"1: ldstub [%2], %0\n" 76"1: ldstub [%2], %0\n"
81" membar #StoreLoad | #StoreStore\n"
82" brnz,pn %0, 2f\n" 77" brnz,pn %0, 2f\n"
83" nop\n" 78" nop\n"
84" .subsection 2\n" 79" .subsection 2\n"
85"2: rdpr %%pil, %1\n" 80"2: rdpr %%pil, %1\n"
86" wrpr %3, %%pil\n" 81" wrpr %3, %%pil\n"
87"3: ldub [%2], %0\n" 82"3: ldub [%2], %0\n"
88" membar #LoadLoad\n"
89" brnz,pt %0, 3b\n" 83" brnz,pt %0, 3b\n"
90" nop\n" 84" nop\n"
91" ba,pt %%xcc, 1b\n" 85" ba,pt %%xcc, 1b\n"
@@ -108,12 +102,10 @@ static void inline __read_lock(raw_rwlock_t *lock)
108"4: add %0, 1, %1\n" 102"4: add %0, 1, %1\n"
109" cas [%2], %0, %1\n" 103" cas [%2], %0, %1\n"
110" cmp %0, %1\n" 104" cmp %0, %1\n"
111" membar #StoreLoad | #StoreStore\n"
112" bne,pn %%icc, 1b\n" 105" bne,pn %%icc, 1b\n"
113" nop\n" 106" nop\n"
114" .subsection 2\n" 107" .subsection 2\n"
115"2: ldsw [%2], %0\n" 108"2: ldsw [%2], %0\n"
116" membar #LoadLoad\n"
117" brlz,pt %0, 2b\n" 109" brlz,pt %0, 2b\n"
118" nop\n" 110" nop\n"
119" ba,a,pt %%xcc, 4b\n" 111" ba,a,pt %%xcc, 4b\n"
@@ -134,7 +126,6 @@ static int inline __read_trylock(raw_rwlock_t *lock)
134" add %0, 1, %1\n" 126" add %0, 1, %1\n"
135" cas [%2], %0, %1\n" 127" cas [%2], %0, %1\n"
136" cmp %0, %1\n" 128" cmp %0, %1\n"
137" membar #StoreLoad | #StoreStore\n"
138" bne,pn %%icc, 1b\n" 129" bne,pn %%icc, 1b\n"
139" mov 1, %0\n" 130" mov 1, %0\n"
140"2:" 131"2:"
@@ -150,7 +141,6 @@ static void inline __read_unlock(raw_rwlock_t *lock)
150 unsigned long tmp1, tmp2; 141 unsigned long tmp1, tmp2;
151 142
152 __asm__ __volatile__( 143 __asm__ __volatile__(
153" membar #StoreLoad | #LoadLoad\n"
154"1: lduw [%2], %0\n" 144"1: lduw [%2], %0\n"
155" sub %0, 1, %1\n" 145" sub %0, 1, %1\n"
156" cas [%2], %0, %1\n" 146" cas [%2], %0, %1\n"
@@ -174,12 +164,10 @@ static void inline __write_lock(raw_rwlock_t *lock)
174"4: or %0, %3, %1\n" 164"4: or %0, %3, %1\n"
175" cas [%2], %0, %1\n" 165" cas [%2], %0, %1\n"
176" cmp %0, %1\n" 166" cmp %0, %1\n"
177" membar #StoreLoad | #StoreStore\n"
178" bne,pn %%icc, 1b\n" 167" bne,pn %%icc, 1b\n"
179" nop\n" 168" nop\n"
180" .subsection 2\n" 169" .subsection 2\n"
181"2: lduw [%2], %0\n" 170"2: lduw [%2], %0\n"
182" membar #LoadLoad\n"
183" brnz,pt %0, 2b\n" 171" brnz,pt %0, 2b\n"
184" nop\n" 172" nop\n"
185" ba,a,pt %%xcc, 4b\n" 173" ba,a,pt %%xcc, 4b\n"
@@ -192,7 +180,6 @@ static void inline __write_lock(raw_rwlock_t *lock)
192static void inline __write_unlock(raw_rwlock_t *lock) 180static void inline __write_unlock(raw_rwlock_t *lock)
193{ 181{
194 __asm__ __volatile__( 182 __asm__ __volatile__(
195" membar #LoadStore | #StoreStore\n"
196" stw %%g0, [%0]" 183" stw %%g0, [%0]"
197 : /* no outputs */ 184 : /* no outputs */
198 : "r" (lock) 185 : "r" (lock)
@@ -212,7 +199,6 @@ static int inline __write_trylock(raw_rwlock_t *lock)
212" or %0, %4, %1\n" 199" or %0, %4, %1\n"
213" cas [%3], %0, %1\n" 200" cas [%3], %0, %1\n"
214" cmp %0, %1\n" 201" cmp %0, %1\n"
215" membar #StoreLoad | #StoreStore\n"
216" bne,pn %%icc, 1b\n" 202" bne,pn %%icc, 1b\n"
217" nop\n" 203" nop\n"
218" mov 1, %2\n" 204" mov 1, %2\n"
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
index 8759f2a1b837..7554ad39b5af 100644
--- a/arch/sparc/include/asm/system_64.h
+++ b/arch/sparc/include/asm/system_64.h
@@ -59,20 +59,9 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
59 : : : "memory"); \ 59 : : : "memory"); \
60} while (0) 60} while (0)
61 61
62#define mb() \ 62#define mb() membar_safe("#StoreLoad")
63 membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad") 63#define rmb() __asm__ __volatile__("":::"memory")
64#define rmb() \ 64#define wmb() __asm__ __volatile__("":::"memory")
65 membar_safe("#LoadLoad")
66#define wmb() \
67 membar_safe("#StoreStore")
68#define membar_storeload() \
69 membar_safe("#StoreLoad")
70#define membar_storeload_storestore() \
71 membar_safe("#StoreLoad | #StoreStore")
72#define membar_storeload_loadload() \
73 membar_safe("#StoreLoad | #LoadLoad")
74#define membar_storestore_loadstore() \
75 membar_safe("#StoreStore | #LoadStore")
76 65
77#endif 66#endif
78 67
@@ -80,20 +69,20 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
80 69
81#define read_barrier_depends() do { } while(0) 70#define read_barrier_depends() do { } while(0)
82#define set_mb(__var, __value) \ 71#define set_mb(__var, __value) \
83 do { __var = __value; membar_storeload_storestore(); } while(0) 72 do { __var = __value; membar_safe("#StoreLoad"); } while(0)
84 73
85#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
86#define smp_mb() mb() 75#define smp_mb() mb()
87#define smp_rmb() rmb() 76#define smp_rmb() rmb()
88#define smp_wmb() wmb() 77#define smp_wmb() wmb()
89#define smp_read_barrier_depends() read_barrier_depends()
90#else 78#else
91#define smp_mb() __asm__ __volatile__("":::"memory") 79#define smp_mb() __asm__ __volatile__("":::"memory")
92#define smp_rmb() __asm__ __volatile__("":::"memory") 80#define smp_rmb() __asm__ __volatile__("":::"memory")
93#define smp_wmb() __asm__ __volatile__("":::"memory") 81#define smp_wmb() __asm__ __volatile__("":::"memory")
94#define smp_read_barrier_depends() do { } while(0)
95#endif 82#endif
96 83
84#define smp_read_barrier_depends() do { } while(0)
85
97#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory") 86#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
98 87
99#define flushw_all() __asm__ __volatile__("flushw") 88#define flushw_all() __asm__ __volatile__("flushw")
@@ -209,14 +198,12 @@ static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int va
209 unsigned long tmp1, tmp2; 198 unsigned long tmp1, tmp2;
210 199
211 __asm__ __volatile__( 200 __asm__ __volatile__(
212" membar #StoreLoad | #LoadLoad\n"
213" mov %0, %1\n" 201" mov %0, %1\n"
214"1: lduw [%4], %2\n" 202"1: lduw [%4], %2\n"
215" cas [%4], %2, %0\n" 203" cas [%4], %2, %0\n"
216" cmp %2, %0\n" 204" cmp %2, %0\n"
217" bne,a,pn %%icc, 1b\n" 205" bne,a,pn %%icc, 1b\n"
218" mov %1, %0\n" 206" mov %1, %0\n"
219" membar #StoreLoad | #StoreStore\n"
220 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) 207 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
221 : "0" (val), "r" (m) 208 : "0" (val), "r" (m)
222 : "cc", "memory"); 209 : "cc", "memory");
@@ -228,14 +215,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
228 unsigned long tmp1, tmp2; 215 unsigned long tmp1, tmp2;
229 216
230 __asm__ __volatile__( 217 __asm__ __volatile__(
231" membar #StoreLoad | #LoadLoad\n"
232" mov %0, %1\n" 218" mov %0, %1\n"
233"1: ldx [%4], %2\n" 219"1: ldx [%4], %2\n"
234" casx [%4], %2, %0\n" 220" casx [%4], %2, %0\n"
235" cmp %2, %0\n" 221" cmp %2, %0\n"
236" bne,a,pn %%xcc, 1b\n" 222" bne,a,pn %%xcc, 1b\n"
237" mov %1, %0\n" 223" mov %1, %0\n"
238" membar #StoreLoad | #StoreStore\n"
239 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) 224 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
240 : "0" (val), "r" (m) 225 : "0" (val), "r" (m)
241 : "cc", "memory"); 226 : "cc", "memory");
@@ -272,9 +257,7 @@ extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noret
272static inline unsigned long 257static inline unsigned long
273__cmpxchg_u32(volatile int *m, int old, int new) 258__cmpxchg_u32(volatile int *m, int old, int new)
274{ 259{
275 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n" 260 __asm__ __volatile__("cas [%2], %3, %0"
276 "cas [%2], %3, %0\n\t"
277 "membar #StoreLoad | #StoreStore"
278 : "=&r" (new) 261 : "=&r" (new)
279 : "0" (new), "r" (m), "r" (old) 262 : "0" (new), "r" (m), "r" (old)
280 : "memory"); 263 : "memory");
@@ -285,9 +268,7 @@ __cmpxchg_u32(volatile int *m, int old, int new)
285static inline unsigned long 268static inline unsigned long
286__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) 269__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
287{ 270{
288 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n" 271 __asm__ __volatile__("casx [%2], %3, %0"
289 "casx [%2], %3, %0\n\t"
290 "membar #StoreLoad | #StoreStore"
291 : "=&r" (new) 272 : "=&r" (new)
292 : "0" (new), "r" (m), "r" (old) 273 : "0" (new), "r" (m), "r" (old)
293 : "memory"); 274 : "memory");
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
index 76e4299dd9bc..83c571d8c8a7 100644
--- a/arch/sparc/include/asm/tsb.h
+++ b/arch/sparc/include/asm/tsb.h
@@ -50,8 +50,6 @@
50#define TSB_TAG_INVALID_BIT 46 50#define TSB_TAG_INVALID_BIT 46
51#define TSB_TAG_INVALID_HIGH (1 << (TSB_TAG_INVALID_BIT - 32)) 51#define TSB_TAG_INVALID_HIGH (1 << (TSB_TAG_INVALID_BIT - 32))
52 52
53#define TSB_MEMBAR membar #StoreStore
54
55/* Some cpus support physical address quad loads. We want to use 53/* Some cpus support physical address quad loads. We want to use
56 * those if possible so we don't need to hard-lock the TSB mapping 54 * those if possible so we don't need to hard-lock the TSB mapping
57 * into the TLB. We encode some instruction patching in order to 55 * into the TLB. We encode some instruction patching in order to
@@ -128,13 +126,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
128 cmp REG1, REG2; \ 126 cmp REG1, REG2; \
129 bne,pn %icc, 99b; \ 127 bne,pn %icc, 99b; \
130 nop; \ 128 nop; \
131 TSB_MEMBAR
132 129
133#define TSB_WRITE(TSB, TTE, TAG) \ 130#define TSB_WRITE(TSB, TTE, TAG) \
134 add TSB, 0x8, TSB; \ 131 add TSB, 0x8, TSB; \
135 TSB_STORE(TSB, TTE); \ 132 TSB_STORE(TSB, TTE); \
136 sub TSB, 0x8, TSB; \ 133 sub TSB, 0x8, TSB; \
137 TSB_MEMBAR; \
138 TSB_STORE(TSB, TAG); 134 TSB_STORE(TSB, TAG);
139 135
140#define KTSB_LOAD_QUAD(TSB, REG) \ 136#define KTSB_LOAD_QUAD(TSB, REG) \
@@ -153,13 +149,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
153 cmp REG1, REG2; \ 149 cmp REG1, REG2; \
154 bne,pn %icc, 99b; \ 150 bne,pn %icc, 99b; \
155 nop; \ 151 nop; \
156 TSB_MEMBAR
157 152
158#define KTSB_WRITE(TSB, TTE, TAG) \ 153#define KTSB_WRITE(TSB, TTE, TAG) \
159 add TSB, 0x8, TSB; \ 154 add TSB, 0x8, TSB; \
160 stxa TTE, [TSB] ASI_N; \ 155 stxa TTE, [TSB] ASI_N; \
161 sub TSB, 0x8, TSB; \ 156 sub TSB, 0x8, TSB; \
162 TSB_MEMBAR; \
163 stxa TAG, [TSB] ASI_N; 157 stxa TAG, [TSB] ASI_N;
164 158
165 /* Do a kernel page table walk. Leaves physical PTE pointer in 159 /* Do a kernel page table walk. Leaves physical PTE pointer in