aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2007-07-14 08:24:05 -0400
committerRalf Baechle <ralf@linux-mips.org>2007-07-20 13:57:39 -0400
commit17099b1142f6c0359fca60a3464dea8fb30badea (patch)
tree26b9f3955dca84ccab594a76680c2a71e166768a
parented203dadcd1373e80e95b04075e1eefc554a914b (diff)
[MIPS] Make support for weakly ordered LL/SC a config option.
None of weakly ordered processor supported in tree need this but it seems like this could change ... Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r--arch/mips/Kconfig11
-rw-r--r--include/asm-mips/atomic.h33
-rw-r--r--include/asm-mips/barrier.h9
-rw-r--r--include/asm-mips/bitops.h10
-rw-r--r--include/asm-mips/futex.h8
-rw-r--r--include/asm-mips/spinlock.h18
-rw-r--r--include/asm-mips/system.h8
7 files changed, 59 insertions, 38 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 5c863bcd5614..1e3aeccd7322 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1190,8 +1190,19 @@ config SYS_HAS_CPU_RM9000
1190config SYS_HAS_CPU_SB1 1190config SYS_HAS_CPU_SB1
1191 bool 1191 bool
1192 1192
1193#
1194# CPU may reorder R->R, R->W, W->R, W->W
1195# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
1196#
1193config WEAK_ORDERING 1197config WEAK_ORDERING
1194 bool 1198 bool
1199
1200#
1201# CPU may reorder reads and writes beyond LL/SC
1202# CPU may reorder R->LL, R->LL, W->LL, W->LL, R->SC, R->SC, W->SC, W->SC
1203#
1204config WEAK_REORDERING_BEYOND_LLSC
1205 bool
1195endmenu 1206endmenu
1196 1207
1197# 1208#
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 1b60624dab7e..7d8003769a44 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -138,7 +138,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
138{ 138{
139 unsigned long result; 139 unsigned long result;
140 140
141 smp_mb(); 141 smp_llsc_mb();
142 142
143 if (cpu_has_llsc && R10000_LLSC_WAR) { 143 if (cpu_has_llsc && R10000_LLSC_WAR) {
144 unsigned long temp; 144 unsigned long temp;
@@ -181,7 +181,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
181 raw_local_irq_restore(flags); 181 raw_local_irq_restore(flags);
182 } 182 }
183 183
184 smp_mb(); 184 smp_llsc_mb();
185 185
186 return result; 186 return result;
187} 187}
@@ -190,7 +190,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
190{ 190{
191 unsigned long result; 191 unsigned long result;
192 192
193 smp_mb(); 193 smp_llsc_mb();
194 194
195 if (cpu_has_llsc && R10000_LLSC_WAR) { 195 if (cpu_has_llsc && R10000_LLSC_WAR) {
196 unsigned long temp; 196 unsigned long temp;
@@ -233,7 +233,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
233 raw_local_irq_restore(flags); 233 raw_local_irq_restore(flags);
234 } 234 }
235 235
236 smp_mb(); 236 smp_llsc_mb();
237 237
238 return result; 238 return result;
239} 239}
@@ -250,7 +250,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
250{ 250{
251 unsigned long result; 251 unsigned long result;
252 252
253 smp_mb(); 253 smp_llsc_mb();
254 254
255 if (cpu_has_llsc && R10000_LLSC_WAR) { 255 if (cpu_has_llsc && R10000_LLSC_WAR) {
256 unsigned long temp; 256 unsigned long temp;
@@ -302,7 +302,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
302 raw_local_irq_restore(flags); 302 raw_local_irq_restore(flags);
303 } 303 }
304 304
305 smp_mb(); 305 smp_llsc_mb();
306 306
307 return result; 307 return result;
308} 308}
@@ -519,7 +519,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
519{ 519{
520 unsigned long result; 520 unsigned long result;
521 521
522 smp_mb(); 522 smp_llsc_mb();
523 523
524 if (cpu_has_llsc && R10000_LLSC_WAR) { 524 if (cpu_has_llsc && R10000_LLSC_WAR) {
525 unsigned long temp; 525 unsigned long temp;
@@ -562,7 +562,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
562 raw_local_irq_restore(flags); 562 raw_local_irq_restore(flags);
563 } 563 }
564 564
565 smp_mb(); 565 smp_llsc_mb();
566 566
567 return result; 567 return result;
568} 568}
@@ -571,7 +571,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
571{ 571{
572 unsigned long result; 572 unsigned long result;
573 573
574 smp_mb(); 574 smp_llsc_mb();
575 575
576 if (cpu_has_llsc && R10000_LLSC_WAR) { 576 if (cpu_has_llsc && R10000_LLSC_WAR) {
577 unsigned long temp; 577 unsigned long temp;
@@ -614,7 +614,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
614 raw_local_irq_restore(flags); 614 raw_local_irq_restore(flags);
615 } 615 }
616 616
617 smp_mb(); 617 smp_llsc_mb();
618 618
619 return result; 619 return result;
620} 620}
@@ -631,7 +631,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
631{ 631{
632 unsigned long result; 632 unsigned long result;
633 633
634 smp_mb(); 634 smp_llsc_mb();
635 635
636 if (cpu_has_llsc && R10000_LLSC_WAR) { 636 if (cpu_has_llsc && R10000_LLSC_WAR) {
637 unsigned long temp; 637 unsigned long temp;
@@ -683,7 +683,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
683 raw_local_irq_restore(flags); 683 raw_local_irq_restore(flags);
684 } 684 }
685 685
686 smp_mb(); 686 smp_llsc_mb();
687 687
688 return result; 688 return result;
689} 689}
@@ -791,10 +791,11 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
791 * atomic*_return operations are serializing but not the non-*_return 791 * atomic*_return operations are serializing but not the non-*_return
792 * versions. 792 * versions.
793 */ 793 */
794#define smp_mb__before_atomic_dec() smp_mb() 794#define smp_mb__before_atomic_dec() smp_llsc_mb()
795#define smp_mb__after_atomic_dec() smp_mb() 795#define smp_mb__after_atomic_dec() smp_llsc_mb()
796#define smp_mb__before_atomic_inc() smp_mb() 796#define smp_mb__before_atomic_inc() smp_llsc_mb()
797#define smp_mb__after_atomic_inc() smp_mb() 797#define smp_mb__after_atomic_inc() smp_llsc_mb()
798 798
799#include <asm-generic/atomic.h> 799#include <asm-generic/atomic.h>
800
800#endif /* _ASM_ATOMIC_H */ 801#endif /* _ASM_ATOMIC_H */
diff --git a/include/asm-mips/barrier.h b/include/asm-mips/barrier.h
index ed82631b0017..9d8cfbb5e796 100644
--- a/include/asm-mips/barrier.h
+++ b/include/asm-mips/barrier.h
@@ -121,6 +121,11 @@
121#else 121#else
122#define __WEAK_ORDERING_MB " \n" 122#define __WEAK_ORDERING_MB " \n"
123#endif 123#endif
124#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
125#define __WEAK_LLSC_MB " sync \n"
126#else
127#define __WEAK_LLSC_MB " \n"
128#endif
124 129
125#define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") 130#define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
126#define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") 131#define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
@@ -129,4 +134,8 @@
129#define set_mb(var, value) \ 134#define set_mb(var, value) \
130 do { var = value; smp_mb(); } while (0) 135 do { var = value; smp_mb(); } while (0)
131 136
137#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
138#define smp_llsc_rmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
139#define smp_llsc_wmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
140
132#endif /* __ASM_BARRIER_H */ 141#endif /* __ASM_BARRIER_H */
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index d9e81af53f78..148bc79557f1 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -38,8 +38,8 @@
38/* 38/*
39 * clear_bit() doesn't provide any barrier for the compiler. 39 * clear_bit() doesn't provide any barrier for the compiler.
40 */ 40 */
41#define smp_mb__before_clear_bit() smp_mb() 41#define smp_mb__before_clear_bit() smp_llsc_mb()
42#define smp_mb__after_clear_bit() smp_mb() 42#define smp_mb__after_clear_bit() smp_llsc_mb()
43 43
44/* 44/*
45 * set_bit - Atomically set a bit in memory 45 * set_bit - Atomically set a bit in memory
@@ -289,7 +289,7 @@ static inline int test_and_set_bit(unsigned long nr,
289 raw_local_irq_restore(flags); 289 raw_local_irq_restore(flags);
290 } 290 }
291 291
292 smp_mb(); 292 smp_llsc_mb();
293 293
294 return res != 0; 294 return res != 0;
295} 295}
@@ -377,7 +377,7 @@ static inline int test_and_clear_bit(unsigned long nr,
377 raw_local_irq_restore(flags); 377 raw_local_irq_restore(flags);
378 } 378 }
379 379
380 smp_mb(); 380 smp_llsc_mb();
381 381
382 return res != 0; 382 return res != 0;
383} 383}
@@ -445,7 +445,7 @@ static inline int test_and_change_bit(unsigned long nr,
445 raw_local_irq_restore(flags); 445 raw_local_irq_restore(flags);
446 } 446 }
447 447
448 smp_mb(); 448 smp_llsc_mb();
449 449
450 return res != 0; 450 return res != 0;
451} 451}
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h
index 47e5679c2353..b623882bce19 100644
--- a/include/asm-mips/futex.h
+++ b/include/asm-mips/futex.h
@@ -29,7 +29,7 @@
29 " .set mips3 \n" \ 29 " .set mips3 \n" \
30 "2: sc $1, %2 \n" \ 30 "2: sc $1, %2 \n" \
31 " beqzl $1, 1b \n" \ 31 " beqzl $1, 1b \n" \
32 __WEAK_ORDERING_MB \ 32 __WEAK_LLSC_MB \
33 "3: \n" \ 33 "3: \n" \
34 " .set pop \n" \ 34 " .set pop \n" \
35 " .set mips0 \n" \ 35 " .set mips0 \n" \
@@ -55,7 +55,7 @@
55 " .set mips3 \n" \ 55 " .set mips3 \n" \
56 "2: sc $1, %2 \n" \ 56 "2: sc $1, %2 \n" \
57 " beqz $1, 1b \n" \ 57 " beqz $1, 1b \n" \
58 __WEAK_ORDERING_MB \ 58 __WEAK_LLSC_MB \
59 "3: \n" \ 59 "3: \n" \
60 " .set pop \n" \ 60 " .set pop \n" \
61 " .set mips0 \n" \ 61 " .set mips0 \n" \
@@ -152,7 +152,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
152 " .set mips3 \n" 152 " .set mips3 \n"
153 "2: sc $1, %1 \n" 153 "2: sc $1, %1 \n"
154 " beqzl $1, 1b \n" 154 " beqzl $1, 1b \n"
155 __WEAK_ORDERING_MB 155 __WEAK_LLSC_MB
156 "3: \n" 156 "3: \n"
157 " .set pop \n" 157 " .set pop \n"
158 " .section .fixup,\"ax\" \n" 158 " .section .fixup,\"ax\" \n"
@@ -179,7 +179,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
179 " .set mips3 \n" 179 " .set mips3 \n"
180 "2: sc $1, %1 \n" 180 "2: sc $1, %1 \n"
181 " beqz $1, 1b \n" 181 " beqz $1, 1b \n"
182 __WEAK_ORDERING_MB 182 __WEAK_LLSC_MB
183 "3: \n" 183 "3: \n"
184 " .set pop \n" 184 " .set pop \n"
185 " .section .fixup,\"ax\" \n" 185 " .section .fixup,\"ax\" \n"
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h
index 35e431cd796b..bb897016c491 100644
--- a/include/asm-mips/spinlock.h
+++ b/include/asm-mips/spinlock.h
@@ -67,7 +67,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
67 : "memory"); 67 : "memory");
68 } 68 }
69 69
70 smp_mb(); 70 smp_llsc_mb();
71} 71}
72 72
73static inline void __raw_spin_unlock(raw_spinlock_t *lock) 73static inline void __raw_spin_unlock(raw_spinlock_t *lock)
@@ -118,7 +118,7 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
118 : "memory"); 118 : "memory");
119 } 119 }
120 120
121 smp_mb(); 121 smp_llsc_mb();
122 122
123 return res == 0; 123 return res == 0;
124} 124}
@@ -183,7 +183,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
183 : "memory"); 183 : "memory");
184 } 184 }
185 185
186 smp_mb(); 186 smp_llsc_mb();
187} 187}
188 188
189/* Note the use of sub, not subu which will make the kernel die with an 189/* Note the use of sub, not subu which will make the kernel die with an
@@ -193,7 +193,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
193{ 193{
194 unsigned int tmp; 194 unsigned int tmp;
195 195
196 smp_mb(); 196 smp_llsc_mb();
197 197
198 if (R10000_LLSC_WAR) { 198 if (R10000_LLSC_WAR) {
199 __asm__ __volatile__( 199 __asm__ __volatile__(
@@ -262,7 +262,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
262 : "memory"); 262 : "memory");
263 } 263 }
264 264
265 smp_mb(); 265 smp_llsc_mb();
266} 266}
267 267
268static inline void __raw_write_unlock(raw_rwlock_t *rw) 268static inline void __raw_write_unlock(raw_rwlock_t *rw)
@@ -293,7 +293,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
293 " .set reorder \n" 293 " .set reorder \n"
294 " beqzl %1, 1b \n" 294 " beqzl %1, 1b \n"
295 " nop \n" 295 " nop \n"
296 __WEAK_ORDERING_MB 296 __WEAK_LLSC_MB
297 " li %2, 1 \n" 297 " li %2, 1 \n"
298 "2: \n" 298 "2: \n"
299 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 299 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
@@ -310,7 +310,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
310 " beqz %1, 1b \n" 310 " beqz %1, 1b \n"
311 " nop \n" 311 " nop \n"
312 " .set reorder \n" 312 " .set reorder \n"
313 __WEAK_ORDERING_MB 313 __WEAK_LLSC_MB
314 " li %2, 1 \n" 314 " li %2, 1 \n"
315 "2: \n" 315 "2: \n"
316 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 316 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
@@ -336,7 +336,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
336 " sc %1, %0 \n" 336 " sc %1, %0 \n"
337 " beqzl %1, 1b \n" 337 " beqzl %1, 1b \n"
338 " nop \n" 338 " nop \n"
339 __WEAK_ORDERING_MB 339 __WEAK_LLSC_MB
340 " li %2, 1 \n" 340 " li %2, 1 \n"
341 " .set reorder \n" 341 " .set reorder \n"
342 "2: \n" 342 "2: \n"
@@ -354,7 +354,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
354 " beqz %1, 3f \n" 354 " beqz %1, 3f \n"
355 " li %2, 1 \n" 355 " li %2, 1 \n"
356 "2: \n" 356 "2: \n"
357 __WEAK_ORDERING_MB 357 __WEAK_LLSC_MB
358 " .subsection 2 \n" 358 " .subsection 2 \n"
359 "3: b 1b \n" 359 "3: b 1b \n"
360 " li %2, 0 \n" 360 " li %2, 0 \n"
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 76339165bc20..eba2e3da9abe 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -117,7 +117,7 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
117 raw_local_irq_restore(flags); /* implies memory barrier */ 117 raw_local_irq_restore(flags); /* implies memory barrier */
118 } 118 }
119 119
120 smp_mb(); 120 smp_llsc_mb();
121 121
122 return retval; 122 return retval;
123} 123}
@@ -165,7 +165,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
165 raw_local_irq_restore(flags); /* implies memory barrier */ 165 raw_local_irq_restore(flags); /* implies memory barrier */
166 } 166 }
167 167
168 smp_mb(); 168 smp_llsc_mb();
169 169
170 return retval; 170 return retval;
171} 171}
@@ -246,7 +246,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
246 raw_local_irq_restore(flags); /* implies memory barrier */ 246 raw_local_irq_restore(flags); /* implies memory barrier */
247 } 247 }
248 248
249 smp_mb(); 249 smp_llsc_mb();
250 250
251 return retval; 251 return retval;
252} 252}
@@ -352,7 +352,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
352 raw_local_irq_restore(flags); /* implies memory barrier */ 352 raw_local_irq_restore(flags); /* implies memory barrier */
353 } 353 }
354 354
355 smp_mb(); 355 smp_llsc_mb();
356 356
357 return retval; 357 return retval;
358} 358}