aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/include/asm/system_64.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-11-15 16:33:25 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-04 12:16:47 -0500
commit293666b7a17cb7a389fc274980439212386a19c4 (patch)
tree075cc7661d2113cf04da7130b3383979d8024206 /arch/sparc/include/asm/system_64.h
parent64f2dde3f743c8a1ad8c0a1aa74166c1034afd92 (diff)
sparc64: Stop using memory barriers for atomics and locks.
The kernel always executes in the TSO memory model now, so none of this stuff is necessary any more. With helpful feedback from Nick Piggin. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/include/asm/system_64.h')
-rw-r--r--arch/sparc/include/asm/system_64.h35
1 files changed, 8 insertions, 27 deletions
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
index 8759f2a1b837..7554ad39b5af 100644
--- a/arch/sparc/include/asm/system_64.h
+++ b/arch/sparc/include/asm/system_64.h
@@ -59,20 +59,9 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
59 : : : "memory"); \ 59 : : : "memory"); \
60} while (0) 60} while (0)
61 61
62#define mb() \ 62#define mb() membar_safe("#StoreLoad")
63 membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad") 63#define rmb() __asm__ __volatile__("":::"memory")
64#define rmb() \ 64#define wmb() __asm__ __volatile__("":::"memory")
65 membar_safe("#LoadLoad")
66#define wmb() \
67 membar_safe("#StoreStore")
68#define membar_storeload() \
69 membar_safe("#StoreLoad")
70#define membar_storeload_storestore() \
71 membar_safe("#StoreLoad | #StoreStore")
72#define membar_storeload_loadload() \
73 membar_safe("#StoreLoad | #LoadLoad")
74#define membar_storestore_loadstore() \
75 membar_safe("#StoreStore | #LoadStore")
76 65
77#endif 66#endif
78 67
@@ -80,20 +69,20 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
80 69
81#define read_barrier_depends() do { } while(0) 70#define read_barrier_depends() do { } while(0)
82#define set_mb(__var, __value) \ 71#define set_mb(__var, __value) \
83 do { __var = __value; membar_storeload_storestore(); } while(0) 72 do { __var = __value; membar_safe("#StoreLoad"); } while(0)
84 73
85#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
86#define smp_mb() mb() 75#define smp_mb() mb()
87#define smp_rmb() rmb() 76#define smp_rmb() rmb()
88#define smp_wmb() wmb() 77#define smp_wmb() wmb()
89#define smp_read_barrier_depends() read_barrier_depends()
90#else 78#else
91#define smp_mb() __asm__ __volatile__("":::"memory") 79#define smp_mb() __asm__ __volatile__("":::"memory")
92#define smp_rmb() __asm__ __volatile__("":::"memory") 80#define smp_rmb() __asm__ __volatile__("":::"memory")
93#define smp_wmb() __asm__ __volatile__("":::"memory") 81#define smp_wmb() __asm__ __volatile__("":::"memory")
94#define smp_read_barrier_depends() do { } while(0)
95#endif 82#endif
96 83
84#define smp_read_barrier_depends() do { } while(0)
85
97#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory") 86#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
98 87
99#define flushw_all() __asm__ __volatile__("flushw") 88#define flushw_all() __asm__ __volatile__("flushw")
@@ -209,14 +198,12 @@ static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int va
209 unsigned long tmp1, tmp2; 198 unsigned long tmp1, tmp2;
210 199
211 __asm__ __volatile__( 200 __asm__ __volatile__(
212" membar #StoreLoad | #LoadLoad\n"
213" mov %0, %1\n" 201" mov %0, %1\n"
214"1: lduw [%4], %2\n" 202"1: lduw [%4], %2\n"
215" cas [%4], %2, %0\n" 203" cas [%4], %2, %0\n"
216" cmp %2, %0\n" 204" cmp %2, %0\n"
217" bne,a,pn %%icc, 1b\n" 205" bne,a,pn %%icc, 1b\n"
218" mov %1, %0\n" 206" mov %1, %0\n"
219" membar #StoreLoad | #StoreStore\n"
220 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) 207 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
221 : "0" (val), "r" (m) 208 : "0" (val), "r" (m)
222 : "cc", "memory"); 209 : "cc", "memory");
@@ -228,14 +215,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
228 unsigned long tmp1, tmp2; 215 unsigned long tmp1, tmp2;
229 216
230 __asm__ __volatile__( 217 __asm__ __volatile__(
231" membar #StoreLoad | #LoadLoad\n"
232" mov %0, %1\n" 218" mov %0, %1\n"
233"1: ldx [%4], %2\n" 219"1: ldx [%4], %2\n"
234" casx [%4], %2, %0\n" 220" casx [%4], %2, %0\n"
235" cmp %2, %0\n" 221" cmp %2, %0\n"
236" bne,a,pn %%xcc, 1b\n" 222" bne,a,pn %%xcc, 1b\n"
237" mov %1, %0\n" 223" mov %1, %0\n"
238" membar #StoreLoad | #StoreStore\n"
239 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) 224 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
240 : "0" (val), "r" (m) 225 : "0" (val), "r" (m)
241 : "cc", "memory"); 226 : "cc", "memory");
@@ -272,9 +257,7 @@ extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noret
272static inline unsigned long 257static inline unsigned long
273__cmpxchg_u32(volatile int *m, int old, int new) 258__cmpxchg_u32(volatile int *m, int old, int new)
274{ 259{
275 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n" 260 __asm__ __volatile__("cas [%2], %3, %0"
276 "cas [%2], %3, %0\n\t"
277 "membar #StoreLoad | #StoreStore"
278 : "=&r" (new) 261 : "=&r" (new)
279 : "0" (new), "r" (m), "r" (old) 262 : "0" (new), "r" (m), "r" (old)
280 : "memory"); 263 : "memory");
@@ -285,9 +268,7 @@ __cmpxchg_u32(volatile int *m, int old, int new)
285static inline unsigned long 268static inline unsigned long
286__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) 269__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
287{ 270{
288 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n" 271 __asm__ __volatile__("casx [%2], %3, %0"
289 "casx [%2], %3, %0\n\t"
290 "membar #StoreLoad | #StoreStore"
291 : "=&r" (new) 272 : "=&r" (new)
292 : "0" (new), "r" (m), "r" (old) 273 : "0" (new), "r" (m), "r" (old)
293 : "memory"); 274 : "memory");