aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMaciej W. Rozycki <macro@codesourcery.com>2014-11-15 17:08:48 -0500
committerRalf Baechle <ralf@linux-mips.org>2014-11-24 01:45:36 -0500
commitb0984c43702f0fe2dbb0c344843e36c8b2cd13f1 (patch)
tree5c8543266b5a2549ef2e90e0c22e05e0a08d32f8 /arch
parentaec711d563c135186639443af1c2f0d8452b9709 (diff)
MIPS: Fix microMIPS LL/SC immediate offsets
In the microMIPS encoding some memory access instructions have their immediate offset reduced to 12 bits only. That does not match the GCC `R' constraint we use in some places to satisfy the requirement, resulting in build failures like this: {standard input}: Assembler messages: {standard input}:720: Error: macro used $at after ".set noat" {standard input}:720: Warning: macro instruction expanded into multiple instructions Fix the problem by defining a macro, `GCC_OFF12_ASM', that expands to the right constraint depending on whether microMIPS or standard MIPS code is produced. Also apply the fix to where `m' is used as in the worst case this change does nothing, e.g. where the pointer was already in a register such as a function argument and no further offset was requested, and in the best case it avoids an extraneous sequence of up to two instructions to load the high 20 bits of the address in the LL/SC loop. This reduces the risk of lock contention that is the higher the more instructions there are in the critical section between LL and SC. Strictly speaking we could just bulk-replace `R' with `ZC' as the latter constraint adjusts automatically depending on the ISA selected. However it was only introduced with GCC 4.9 and we keep supporing older compilers for the standard MIPS configuration, hence the slightly more complicated approach I chose. The choice of a zero-argument function-like rather than an object-like macro was made so that it does not look like a function call taking the C expression used for the constraint as an argument. This is so as not to confuse the reader or formatting checkers like `checkpatch.pl' and follows previous practice. Signed-off-by: Maciej W. Rozycki <macro@codesourcery.com> Signed-off-by: Steven J. Hill <Steven.Hill@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/8482/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/include/asm/atomic.h39
-rw-r--r--arch/mips/include/asm/bitops.h35
-rw-r--r--arch/mips/include/asm/cmpxchg.h27
-rw-r--r--arch/mips/include/asm/compiler.h8
-rw-r--r--arch/mips/include/asm/edac.h6
-rw-r--r--arch/mips/include/asm/futex.h23
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h25
-rw-r--r--arch/mips/include/asm/octeon/cvmx-cmd-queue.h4
-rw-r--r--arch/mips/include/asm/spinlock.h50
9 files changed, 126 insertions, 91 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 6dd6bfc607e9..ec4b4d658bc4 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -17,6 +17,7 @@
17#include <linux/irqflags.h> 17#include <linux/irqflags.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <asm/barrier.h> 19#include <asm/barrier.h>
20#include <asm/compiler.h>
20#include <asm/cpu-features.h> 21#include <asm/cpu-features.h>
21#include <asm/cmpxchg.h> 22#include <asm/cmpxchg.h>
22#include <asm/war.h> 23#include <asm/war.h>
@@ -53,7 +54,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
53 " sc %0, %1 \n" \ 54 " sc %0, %1 \n" \
54 " beqzl %0, 1b \n" \ 55 " beqzl %0, 1b \n" \
55 " .set mips0 \n" \ 56 " .set mips0 \n" \
56 : "=&r" (temp), "+m" (v->counter) \ 57 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
57 : "Ir" (i)); \ 58 : "Ir" (i)); \
58 } else if (kernel_uses_llsc) { \ 59 } else if (kernel_uses_llsc) { \
59 int temp; \ 60 int temp; \
@@ -65,7 +66,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
65 " " #asm_op " %0, %2 \n" \ 66 " " #asm_op " %0, %2 \n" \
66 " sc %0, %1 \n" \ 67 " sc %0, %1 \n" \
67 " .set mips0 \n" \ 68 " .set mips0 \n" \
68 : "=&r" (temp), "+m" (v->counter) \ 69 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
69 : "Ir" (i)); \ 70 : "Ir" (i)); \
70 } while (unlikely(!temp)); \ 71 } while (unlikely(!temp)); \
71 } else { \ 72 } else { \
@@ -95,7 +96,8 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
95 " beqzl %0, 1b \n" \ 96 " beqzl %0, 1b \n" \
96 " " #asm_op " %0, %1, %3 \n" \ 97 " " #asm_op " %0, %1, %3 \n" \
97 " .set mips0 \n" \ 98 " .set mips0 \n" \
98 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ 99 : "=&r" (result), "=&r" (temp), \
100 "+" GCC_OFF12_ASM() (v->counter) \
99 : "Ir" (i)); \ 101 : "Ir" (i)); \
100 } else if (kernel_uses_llsc) { \ 102 } else if (kernel_uses_llsc) { \
101 int temp; \ 103 int temp; \
@@ -107,7 +109,8 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
107 " " #asm_op " %0, %1, %3 \n" \ 109 " " #asm_op " %0, %1, %3 \n" \
108 " sc %0, %2 \n" \ 110 " sc %0, %2 \n" \
109 " .set mips0 \n" \ 111 " .set mips0 \n" \
110 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ 112 : "=&r" (result), "=&r" (temp), \
113 "+" GCC_OFF12_ASM() (v->counter) \
111 : "Ir" (i)); \ 114 : "Ir" (i)); \
112 } while (unlikely(!result)); \ 115 } while (unlikely(!result)); \
113 \ 116 \
@@ -167,8 +170,9 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
167 " .set reorder \n" 170 " .set reorder \n"
168 "1: \n" 171 "1: \n"
169 " .set mips0 \n" 172 " .set mips0 \n"
170 : "=&r" (result), "=&r" (temp), "+m" (v->counter) 173 : "=&r" (result), "=&r" (temp),
171 : "Ir" (i), "m" (v->counter) 174 "+" GCC_OFF12_ASM() (v->counter)
175 : "Ir" (i), GCC_OFF12_ASM() (v->counter)
172 : "memory"); 176 : "memory");
173 } else if (kernel_uses_llsc) { 177 } else if (kernel_uses_llsc) {
174 int temp; 178 int temp;
@@ -185,7 +189,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
185 " .set reorder \n" 189 " .set reorder \n"
186 "1: \n" 190 "1: \n"
187 " .set mips0 \n" 191 " .set mips0 \n"
188 : "=&r" (result), "=&r" (temp), "+m" (v->counter) 192 : "=&r" (result), "=&r" (temp),
193 "+" GCC_OFF12_ASM() (v->counter)
189 : "Ir" (i)); 194 : "Ir" (i));
190 } else { 195 } else {
191 unsigned long flags; 196 unsigned long flags;
@@ -328,7 +333,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
328 " scd %0, %1 \n" \ 333 " scd %0, %1 \n" \
329 " beqzl %0, 1b \n" \ 334 " beqzl %0, 1b \n" \
330 " .set mips0 \n" \ 335 " .set mips0 \n" \
331 : "=&r" (temp), "+m" (v->counter) \ 336 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
332 : "Ir" (i)); \ 337 : "Ir" (i)); \
333 } else if (kernel_uses_llsc) { \ 338 } else if (kernel_uses_llsc) { \
334 long temp; \ 339 long temp; \
@@ -340,7 +345,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
340 " " #asm_op " %0, %2 \n" \ 345 " " #asm_op " %0, %2 \n" \
341 " scd %0, %1 \n" \ 346 " scd %0, %1 \n" \
342 " .set mips0 \n" \ 347 " .set mips0 \n" \
343 : "=&r" (temp), "+m" (v->counter) \ 348 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
344 : "Ir" (i)); \ 349 : "Ir" (i)); \
345 } while (unlikely(!temp)); \ 350 } while (unlikely(!temp)); \
346 } else { \ 351 } else { \
@@ -370,7 +375,8 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
370 " beqzl %0, 1b \n" \ 375 " beqzl %0, 1b \n" \
371 " " #asm_op " %0, %1, %3 \n" \ 376 " " #asm_op " %0, %1, %3 \n" \
372 " .set mips0 \n" \ 377 " .set mips0 \n" \
373 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ 378 : "=&r" (result), "=&r" (temp), \
379 "+" GCC_OFF12_ASM() (v->counter) \
374 : "Ir" (i)); \ 380 : "Ir" (i)); \
375 } else if (kernel_uses_llsc) { \ 381 } else if (kernel_uses_llsc) { \
376 long temp; \ 382 long temp; \
@@ -382,8 +388,9 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
382 " " #asm_op " %0, %1, %3 \n" \ 388 " " #asm_op " %0, %1, %3 \n" \
383 " scd %0, %2 \n" \ 389 " scd %0, %2 \n" \
384 " .set mips0 \n" \ 390 " .set mips0 \n" \
385 : "=&r" (result), "=&r" (temp), "=m" (v->counter) \ 391 : "=&r" (result), "=&r" (temp), \
386 : "Ir" (i), "m" (v->counter) \ 392 "=" GCC_OFF12_ASM() (v->counter) \
393 : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
387 : "memory"); \ 394 : "memory"); \
388 } while (unlikely(!result)); \ 395 } while (unlikely(!result)); \
389 \ 396 \
@@ -443,8 +450,9 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
443 " .set reorder \n" 450 " .set reorder \n"
444 "1: \n" 451 "1: \n"
445 " .set mips0 \n" 452 " .set mips0 \n"
446 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 453 : "=&r" (result), "=&r" (temp),
447 : "Ir" (i), "m" (v->counter) 454 "=" GCC_OFF12_ASM() (v->counter)
455 : "Ir" (i), GCC_OFF12_ASM() (v->counter)
448 : "memory"); 456 : "memory");
449 } else if (kernel_uses_llsc) { 457 } else if (kernel_uses_llsc) {
450 long temp; 458 long temp;
@@ -461,7 +469,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
461 " .set reorder \n" 469 " .set reorder \n"
462 "1: \n" 470 "1: \n"
463 " .set mips0 \n" 471 " .set mips0 \n"
464 : "=&r" (result), "=&r" (temp), "+m" (v->counter) 472 : "=&r" (result), "=&r" (temp),
473 "+" GCC_OFF12_ASM() (v->counter)
465 : "Ir" (i)); 474 : "Ir" (i));
466 } else { 475 } else {
467 unsigned long flags; 476 unsigned long flags;
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index bae6b0fa8ab5..6663bcca9d0c 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -17,6 +17,7 @@
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/barrier.h> 18#include <asm/barrier.h>
19#include <asm/byteorder.h> /* sigh ... */ 19#include <asm/byteorder.h> /* sigh ... */
20#include <asm/compiler.h>
20#include <asm/cpu-features.h> 21#include <asm/cpu-features.h>
21#include <asm/sgidefs.h> 22#include <asm/sgidefs.h>
22#include <asm/war.h> 23#include <asm/war.h>
@@ -78,8 +79,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
78 " " __SC "%0, %1 \n" 79 " " __SC "%0, %1 \n"
79 " beqzl %0, 1b \n" 80 " beqzl %0, 1b \n"
80 " .set mips0 \n" 81 " .set mips0 \n"
81 : "=&r" (temp), "=m" (*m) 82 : "=&r" (temp), "=" GCC_OFF12_ASM() (*m)
82 : "ir" (1UL << bit), "m" (*m)); 83 : "ir" (1UL << bit), GCC_OFF12_ASM() (*m));
83#ifdef CONFIG_CPU_MIPSR2 84#ifdef CONFIG_CPU_MIPSR2
84 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 85 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
85 do { 86 do {
@@ -87,7 +88,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
87 " " __LL "%0, %1 # set_bit \n" 88 " " __LL "%0, %1 # set_bit \n"
88 " " __INS "%0, %3, %2, 1 \n" 89 " " __INS "%0, %3, %2, 1 \n"
89 " " __SC "%0, %1 \n" 90 " " __SC "%0, %1 \n"
90 : "=&r" (temp), "+m" (*m) 91 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
91 : "ir" (bit), "r" (~0)); 92 : "ir" (bit), "r" (~0));
92 } while (unlikely(!temp)); 93 } while (unlikely(!temp));
93#endif /* CONFIG_CPU_MIPSR2 */ 94#endif /* CONFIG_CPU_MIPSR2 */
@@ -99,7 +100,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
99 " or %0, %2 \n" 100 " or %0, %2 \n"
100 " " __SC "%0, %1 \n" 101 " " __SC "%0, %1 \n"
101 " .set mips0 \n" 102 " .set mips0 \n"
102 : "=&r" (temp), "+m" (*m) 103 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
103 : "ir" (1UL << bit)); 104 : "ir" (1UL << bit));
104 } while (unlikely(!temp)); 105 } while (unlikely(!temp));
105 } else 106 } else
@@ -130,7 +131,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
130 " " __SC "%0, %1 \n" 131 " " __SC "%0, %1 \n"
131 " beqzl %0, 1b \n" 132 " beqzl %0, 1b \n"
132 " .set mips0 \n" 133 " .set mips0 \n"
133 : "=&r" (temp), "+m" (*m) 134 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
134 : "ir" (~(1UL << bit))); 135 : "ir" (~(1UL << bit)));
135#ifdef CONFIG_CPU_MIPSR2 136#ifdef CONFIG_CPU_MIPSR2
136 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 137 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
@@ -139,7 +140,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
139 " " __LL "%0, %1 # clear_bit \n" 140 " " __LL "%0, %1 # clear_bit \n"
140 " " __INS "%0, $0, %2, 1 \n" 141 " " __INS "%0, $0, %2, 1 \n"
141 " " __SC "%0, %1 \n" 142 " " __SC "%0, %1 \n"
142 : "=&r" (temp), "+m" (*m) 143 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
143 : "ir" (bit)); 144 : "ir" (bit));
144 } while (unlikely(!temp)); 145 } while (unlikely(!temp));
145#endif /* CONFIG_CPU_MIPSR2 */ 146#endif /* CONFIG_CPU_MIPSR2 */
@@ -151,7 +152,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
151 " and %0, %2 \n" 152 " and %0, %2 \n"
152 " " __SC "%0, %1 \n" 153 " " __SC "%0, %1 \n"
153 " .set mips0 \n" 154 " .set mips0 \n"
154 : "=&r" (temp), "+m" (*m) 155 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
155 : "ir" (~(1UL << bit))); 156 : "ir" (~(1UL << bit)));
156 } while (unlikely(!temp)); 157 } while (unlikely(!temp));
157 } else 158 } else
@@ -196,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
196 " " __SC "%0, %1 \n" 197 " " __SC "%0, %1 \n"
197 " beqzl %0, 1b \n" 198 " beqzl %0, 1b \n"
198 " .set mips0 \n" 199 " .set mips0 \n"
199 : "=&r" (temp), "+m" (*m) 200 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
200 : "ir" (1UL << bit)); 201 : "ir" (1UL << bit));
201 } else if (kernel_uses_llsc) { 202 } else if (kernel_uses_llsc) {
202 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 203 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -209,7 +210,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
209 " xor %0, %2 \n" 210 " xor %0, %2 \n"
210 " " __SC "%0, %1 \n" 211 " " __SC "%0, %1 \n"
211 " .set mips0 \n" 212 " .set mips0 \n"
212 : "=&r" (temp), "+m" (*m) 213 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
213 : "ir" (1UL << bit)); 214 : "ir" (1UL << bit));
214 } while (unlikely(!temp)); 215 } while (unlikely(!temp));
215 } else 216 } else
@@ -244,7 +245,7 @@ static inline int test_and_set_bit(unsigned long nr,
244 " beqzl %2, 1b \n" 245 " beqzl %2, 1b \n"
245 " and %2, %0, %3 \n" 246 " and %2, %0, %3 \n"
246 " .set mips0 \n" 247 " .set mips0 \n"
247 : "=&r" (temp), "+m" (*m), "=&r" (res) 248 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
248 : "r" (1UL << bit) 249 : "r" (1UL << bit)
249 : "memory"); 250 : "memory");
250 } else if (kernel_uses_llsc) { 251 } else if (kernel_uses_llsc) {
@@ -258,7 +259,7 @@ static inline int test_and_set_bit(unsigned long nr,
258 " or %2, %0, %3 \n" 259 " or %2, %0, %3 \n"
259 " " __SC "%2, %1 \n" 260 " " __SC "%2, %1 \n"
260 " .set mips0 \n" 261 " .set mips0 \n"
261 : "=&r" (temp), "+m" (*m), "=&r" (res) 262 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
262 : "r" (1UL << bit) 263 : "r" (1UL << bit)
263 : "memory"); 264 : "memory");
264 } while (unlikely(!res)); 265 } while (unlikely(!res));
@@ -312,7 +313,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
312 " or %2, %0, %3 \n" 313 " or %2, %0, %3 \n"
313 " " __SC "%2, %1 \n" 314 " " __SC "%2, %1 \n"
314 " .set mips0 \n" 315 " .set mips0 \n"
315 : "=&r" (temp), "+m" (*m), "=&r" (res) 316 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
316 : "r" (1UL << bit) 317 : "r" (1UL << bit)
317 : "memory"); 318 : "memory");
318 } while (unlikely(!res)); 319 } while (unlikely(!res));
@@ -354,7 +355,7 @@ static inline int test_and_clear_bit(unsigned long nr,
354 " beqzl %2, 1b \n" 355 " beqzl %2, 1b \n"
355 " and %2, %0, %3 \n" 356 " and %2, %0, %3 \n"
356 " .set mips0 \n" 357 " .set mips0 \n"
357 : "=&r" (temp), "+m" (*m), "=&r" (res) 358 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
358 : "r" (1UL << bit) 359 : "r" (1UL << bit)
359 : "memory"); 360 : "memory");
360#ifdef CONFIG_CPU_MIPSR2 361#ifdef CONFIG_CPU_MIPSR2
@@ -368,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr,
368 " " __EXT "%2, %0, %3, 1 \n" 369 " " __EXT "%2, %0, %3, 1 \n"
369 " " __INS "%0, $0, %3, 1 \n" 370 " " __INS "%0, $0, %3, 1 \n"
370 " " __SC "%0, %1 \n" 371 " " __SC "%0, %1 \n"
371 : "=&r" (temp), "+m" (*m), "=&r" (res) 372 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
372 : "ir" (bit) 373 : "ir" (bit)
373 : "memory"); 374 : "memory");
374 } while (unlikely(!temp)); 375 } while (unlikely(!temp));
@@ -385,7 +386,7 @@ static inline int test_and_clear_bit(unsigned long nr,
385 " xor %2, %3 \n" 386 " xor %2, %3 \n"
386 " " __SC "%2, %1 \n" 387 " " __SC "%2, %1 \n"
387 " .set mips0 \n" 388 " .set mips0 \n"
388 : "=&r" (temp), "+m" (*m), "=&r" (res) 389 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
389 : "r" (1UL << bit) 390 : "r" (1UL << bit)
390 : "memory"); 391 : "memory");
391 } while (unlikely(!res)); 392 } while (unlikely(!res));
@@ -427,7 +428,7 @@ static inline int test_and_change_bit(unsigned long nr,
427 " beqzl %2, 1b \n" 428 " beqzl %2, 1b \n"
428 " and %2, %0, %3 \n" 429 " and %2, %0, %3 \n"
429 " .set mips0 \n" 430 " .set mips0 \n"
430 : "=&r" (temp), "+m" (*m), "=&r" (res) 431 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
431 : "r" (1UL << bit) 432 : "r" (1UL << bit)
432 : "memory"); 433 : "memory");
433 } else if (kernel_uses_llsc) { 434 } else if (kernel_uses_llsc) {
@@ -441,7 +442,7 @@ static inline int test_and_change_bit(unsigned long nr,
441 " xor %2, %0, %3 \n" 442 " xor %2, %0, %3 \n"
442 " " __SC "\t%2, %1 \n" 443 " " __SC "\t%2, %1 \n"
443 " .set mips0 \n" 444 " .set mips0 \n"
444 : "=&r" (temp), "+m" (*m), "=&r" (res) 445 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
445 : "r" (1UL << bit) 446 : "r" (1UL << bit)
446 : "memory"); 447 : "memory");
447 } while (unlikely(!res)); 448 } while (unlikely(!res));
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index eefcaa363a87..28b1edf19501 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -10,6 +10,7 @@
10 10
11#include <linux/bug.h> 11#include <linux/bug.h>
12#include <linux/irqflags.h> 12#include <linux/irqflags.h>
13#include <asm/compiler.h>
13#include <asm/war.h> 14#include <asm/war.h>
14 15
15static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) 16static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
@@ -30,8 +31,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
30 " sc %2, %1 \n" 31 " sc %2, %1 \n"
31 " beqzl %2, 1b \n" 32 " beqzl %2, 1b \n"
32 " .set mips0 \n" 33 " .set mips0 \n"
33 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 34 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
34 : "R" (*m), "Jr" (val) 35 : GCC_OFF12_ASM() (*m), "Jr" (val)
35 : "memory"); 36 : "memory");
36 } else if (kernel_uses_llsc) { 37 } else if (kernel_uses_llsc) {
37 unsigned long dummy; 38 unsigned long dummy;
@@ -45,8 +46,9 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
45 " .set arch=r4000 \n" 46 " .set arch=r4000 \n"
46 " sc %2, %1 \n" 47 " sc %2, %1 \n"
47 " .set mips0 \n" 48 " .set mips0 \n"
48 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 49 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
49 : "R" (*m), "Jr" (val) 50 "=&r" (dummy)
51 : GCC_OFF12_ASM() (*m), "Jr" (val)
50 : "memory"); 52 : "memory");
51 } while (unlikely(!dummy)); 53 } while (unlikely(!dummy));
52 } else { 54 } else {
@@ -80,8 +82,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
80 " scd %2, %1 \n" 82 " scd %2, %1 \n"
81 " beqzl %2, 1b \n" 83 " beqzl %2, 1b \n"
82 " .set mips0 \n" 84 " .set mips0 \n"
83 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 85 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
84 : "R" (*m), "Jr" (val) 86 : GCC_OFF12_ASM() (*m), "Jr" (val)
85 : "memory"); 87 : "memory");
86 } else if (kernel_uses_llsc) { 88 } else if (kernel_uses_llsc) {
87 unsigned long dummy; 89 unsigned long dummy;
@@ -93,8 +95,9 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
93 " move %2, %z4 \n" 95 " move %2, %z4 \n"
94 " scd %2, %1 \n" 96 " scd %2, %1 \n"
95 " .set mips0 \n" 97 " .set mips0 \n"
96 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 98 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
97 : "R" (*m), "Jr" (val) 99 "=&r" (dummy)
100 : GCC_OFF12_ASM() (*m), "Jr" (val)
98 : "memory"); 101 : "memory");
99 } while (unlikely(!dummy)); 102 } while (unlikely(!dummy));
100 } else { 103 } else {
@@ -155,8 +158,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
155 " beqzl $1, 1b \n" \ 158 " beqzl $1, 1b \n" \
156 "2: \n" \ 159 "2: \n" \
157 " .set pop \n" \ 160 " .set pop \n" \
158 : "=&r" (__ret), "=R" (*m) \ 161 : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \
159 : "R" (*m), "Jr" (old), "Jr" (new) \ 162 : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \
160 : "memory"); \ 163 : "memory"); \
161 } else if (kernel_uses_llsc) { \ 164 } else if (kernel_uses_llsc) { \
162 __asm__ __volatile__( \ 165 __asm__ __volatile__( \
@@ -172,8 +175,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
172 " beqz $1, 1b \n" \ 175 " beqz $1, 1b \n" \
173 " .set pop \n" \ 176 " .set pop \n" \
174 "2: \n" \ 177 "2: \n" \
175 : "=&r" (__ret), "=R" (*m) \ 178 : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \
176 : "R" (*m), "Jr" (old), "Jr" (new) \ 179 : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \
177 : "memory"); \ 180 : "memory"); \
178 } else { \ 181 } else { \
179 unsigned long __flags; \ 182 unsigned long __flags; \
diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h
index 71f5c5cfc58a..c73815e0123a 100644
--- a/arch/mips/include/asm/compiler.h
+++ b/arch/mips/include/asm/compiler.h
@@ -16,4 +16,12 @@
16#define GCC_REG_ACCUM "accum" 16#define GCC_REG_ACCUM "accum"
17#endif 17#endif
18 18
19#ifndef CONFIG_CPU_MICROMIPS
20#define GCC_OFF12_ASM() "R"
21#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
22#define GCC_OFF12_ASM() "ZC"
23#else
24#error "microMIPS compilation unsupported with GCC older than 4.9"
25#endif
26
19#endif /* _ASM_COMPILER_H */ 27#endif /* _ASM_COMPILER_H */
diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h
index 4da0c1fe30d9..ae6fedcb0060 100644
--- a/arch/mips/include/asm/edac.h
+++ b/arch/mips/include/asm/edac.h
@@ -1,6 +1,8 @@
1#ifndef ASM_EDAC_H 1#ifndef ASM_EDAC_H
2#define ASM_EDAC_H 2#define ASM_EDAC_H
3 3
4#include <asm/compiler.h>
5
4/* ECC atomic, DMA, SMP and interrupt safe scrub function */ 6/* ECC atomic, DMA, SMP and interrupt safe scrub function */
5 7
6static inline void atomic_scrub(void *va, u32 size) 8static inline void atomic_scrub(void *va, u32 size)
@@ -24,8 +26,8 @@ static inline void atomic_scrub(void *va, u32 size)
24 " sc %0, %1 \n" 26 " sc %0, %1 \n"
25 " beqz %0, 1b \n" 27 " beqz %0, 1b \n"
26 " .set mips0 \n" 28 " .set mips0 \n"
27 : "=&r" (temp), "=m" (*virt_addr) 29 : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr)
28 : "m" (*virt_addr)); 30 : GCC_OFF12_ASM() (*virt_addr));
29 31
30 virt_addr++; 32 virt_addr++;
31 } 33 }
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 194cda0396a3..d0177bf915bb 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -14,6 +14,7 @@
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <asm/asm-eva.h> 15#include <asm/asm-eva.h>
16#include <asm/barrier.h> 16#include <asm/barrier.h>
17#include <asm/compiler.h>
17#include <asm/errno.h> 18#include <asm/errno.h>
18#include <asm/war.h> 19#include <asm/war.h>
19 20
@@ -42,8 +43,10 @@
42 " "__UA_ADDR "\t1b, 4b \n" \ 43 " "__UA_ADDR "\t1b, 4b \n" \
43 " "__UA_ADDR "\t2b, 4b \n" \ 44 " "__UA_ADDR "\t2b, 4b \n" \
44 " .previous \n" \ 45 " .previous \n" \
45 : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \ 46 : "=r" (ret), "=&r" (oldval), \
46 : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \ 47 "=" GCC_OFF12_ASM() (*uaddr) \
48 : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \
49 "i" (-EFAULT) \
47 : "memory"); \ 50 : "memory"); \
48 } else if (cpu_has_llsc) { \ 51 } else if (cpu_has_llsc) { \
49 __asm__ __volatile__( \ 52 __asm__ __volatile__( \
@@ -68,8 +71,10 @@
68 " "__UA_ADDR "\t1b, 4b \n" \ 71 " "__UA_ADDR "\t1b, 4b \n" \
69 " "__UA_ADDR "\t2b, 4b \n" \ 72 " "__UA_ADDR "\t2b, 4b \n" \
70 " .previous \n" \ 73 " .previous \n" \
71 : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \ 74 : "=r" (ret), "=&r" (oldval), \
72 : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \ 75 "=" GCC_OFF12_ASM() (*uaddr) \
76 : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \
77 "i" (-EFAULT) \
73 : "memory"); \ 78 : "memory"); \
74 } else \ 79 } else \
75 ret = -ENOSYS; \ 80 ret = -ENOSYS; \
@@ -166,8 +171,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
166 " "__UA_ADDR "\t1b, 4b \n" 171 " "__UA_ADDR "\t1b, 4b \n"
167 " "__UA_ADDR "\t2b, 4b \n" 172 " "__UA_ADDR "\t2b, 4b \n"
168 " .previous \n" 173 " .previous \n"
169 : "+r" (ret), "=&r" (val), "=R" (*uaddr) 174 : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
170 : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) 175 : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
176 "i" (-EFAULT)
171 : "memory"); 177 : "memory");
172 } else if (cpu_has_llsc) { 178 } else if (cpu_has_llsc) {
173 __asm__ __volatile__( 179 __asm__ __volatile__(
@@ -193,8 +199,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
193 " "__UA_ADDR "\t1b, 4b \n" 199 " "__UA_ADDR "\t1b, 4b \n"
194 " "__UA_ADDR "\t2b, 4b \n" 200 " "__UA_ADDR "\t2b, 4b \n"
195 " .previous \n" 201 " .previous \n"
196 : "+r" (ret), "=&r" (val), "=R" (*uaddr) 202 : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
197 : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) 203 : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
204 "i" (-EFAULT)
198 : "memory"); 205 : "memory");
199 } else 206 } else
200 return -ENOSYS; 207 return -ENOSYS;
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
index fc946c835995..2e54b4bff5cf 100644
--- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
@@ -49,6 +49,7 @@
49 49
50#include <linux/types.h> 50#include <linux/types.h>
51 51
52#include <asm/compiler.h>
52#include <asm/war.h> 53#include <asm/war.h>
53 54
54#ifndef R10000_LLSC_WAR 55#ifndef R10000_LLSC_WAR
@@ -84,8 +85,8 @@ static inline void set_value_reg32(volatile u32 *const addr,
84 " "__beqz"%0, 1b \n" 85 " "__beqz"%0, 1b \n"
85 " nop \n" 86 " nop \n"
86 " .set pop \n" 87 " .set pop \n"
87 : "=&r" (temp), "=m" (*addr) 88 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
88 : "ir" (~mask), "ir" (value), "m" (*addr)); 89 : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr));
89} 90}
90 91
91/* 92/*
@@ -105,8 +106,8 @@ static inline void set_reg32(volatile u32 *const addr,
105 " "__beqz"%0, 1b \n" 106 " "__beqz"%0, 1b \n"
106 " nop \n" 107 " nop \n"
107 " .set pop \n" 108 " .set pop \n"
108 : "=&r" (temp), "=m" (*addr) 109 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
109 : "ir" (mask), "m" (*addr)); 110 : "ir" (mask), GCC_OFF12_ASM() (*addr));
110} 111}
111 112
112/* 113/*
@@ -126,8 +127,8 @@ static inline void clear_reg32(volatile u32 *const addr,
126 " "__beqz"%0, 1b \n" 127 " "__beqz"%0, 1b \n"
127 " nop \n" 128 " nop \n"
128 " .set pop \n" 129 " .set pop \n"
129 : "=&r" (temp), "=m" (*addr) 130 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
130 : "ir" (~mask), "m" (*addr)); 131 : "ir" (~mask), GCC_OFF12_ASM() (*addr));
131} 132}
132 133
133/* 134/*
@@ -147,8 +148,8 @@ static inline void toggle_reg32(volatile u32 *const addr,
147 " "__beqz"%0, 1b \n" 148 " "__beqz"%0, 1b \n"
148 " nop \n" 149 " nop \n"
149 " .set pop \n" 150 " .set pop \n"
150 : "=&r" (temp), "=m" (*addr) 151 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
151 : "ir" (mask), "m" (*addr)); 152 : "ir" (mask), GCC_OFF12_ASM() (*addr));
152} 153}
153 154
154/* 155/*
@@ -219,8 +220,8 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
219 " .set arch=r4000 \n" \ 220 " .set arch=r4000 \n" \
220 "1: ll %0, %1 #custom_read_reg32 \n" \ 221 "1: ll %0, %1 #custom_read_reg32 \n" \
221 " .set pop \n" \ 222 " .set pop \n" \
222 : "=r" (tmp), "=m" (*address) \ 223 : "=r" (tmp), "=" GCC_OFF12_ASM() (*address) \
223 : "m" (*address)) 224 : GCC_OFF12_ASM() (*address))
224 225
225#define custom_write_reg32(address, tmp) \ 226#define custom_write_reg32(address, tmp) \
226 __asm__ __volatile__( \ 227 __asm__ __volatile__( \
@@ -230,7 +231,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
230 " "__beqz"%0, 1b \n" \ 231 " "__beqz"%0, 1b \n" \
231 " nop \n" \ 232 " nop \n" \
232 " .set pop \n" \ 233 " .set pop \n" \
233 : "=&r" (tmp), "=m" (*address) \ 234 : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address) \
234 : "0" (tmp), "m" (*address)) 235 : "0" (tmp), GCC_OFF12_ASM() (*address))
235 236
236#endif /* __ASM_REGOPS_H__ */ 237#endif /* __ASM_REGOPS_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
index 024a71b2bff9..75739c83f07e 100644
--- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
+++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
@@ -76,6 +76,8 @@
76 76
77#include <linux/prefetch.h> 77#include <linux/prefetch.h>
78 78
79#include <asm/compiler.h>
80
79#include <asm/octeon/cvmx-fpa.h> 81#include <asm/octeon/cvmx-fpa.h>
80/** 82/**
81 * By default we disable the max depth support. Most programs 83 * By default we disable the max depth support. Most programs
@@ -273,7 +275,7 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
273 " lbu %[ticket], %[now_serving]\n" 275 " lbu %[ticket], %[now_serving]\n"
274 "4:\n" 276 "4:\n"
275 ".set pop\n" : 277 ".set pop\n" :
276 [ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), 278 [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
277 [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp), 279 [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
278 [my_ticket] "=r"(my_ticket) 280 [my_ticket] "=r"(my_ticket)
279 ); 281 );
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 78d201fb6c87..c6d06d383ef9 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -12,6 +12,7 @@
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13 13
14#include <asm/barrier.h> 14#include <asm/barrier.h>
15#include <asm/compiler.h>
15#include <asm/war.h> 16#include <asm/war.h>
16 17
17/* 18/*
@@ -88,7 +89,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
88 " subu %[ticket], %[ticket], 1 \n" 89 " subu %[ticket], %[ticket], 1 \n"
89 " .previous \n" 90 " .previous \n"
90 " .set pop \n" 91 " .set pop \n"
91 : [ticket_ptr] "+m" (lock->lock), 92 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
92 [serving_now_ptr] "+m" (lock->h.serving_now), 93 [serving_now_ptr] "+m" (lock->h.serving_now),
93 [ticket] "=&r" (tmp), 94 [ticket] "=&r" (tmp),
94 [my_ticket] "=&r" (my_ticket) 95 [my_ticket] "=&r" (my_ticket)
@@ -121,7 +122,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
121 " subu %[ticket], %[ticket], 1 \n" 122 " subu %[ticket], %[ticket], 1 \n"
122 " .previous \n" 123 " .previous \n"
123 " .set pop \n" 124 " .set pop \n"
124 : [ticket_ptr] "+m" (lock->lock), 125 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
125 [serving_now_ptr] "+m" (lock->h.serving_now), 126 [serving_now_ptr] "+m" (lock->h.serving_now),
126 [ticket] "=&r" (tmp), 127 [ticket] "=&r" (tmp),
127 [my_ticket] "=&r" (my_ticket) 128 [my_ticket] "=&r" (my_ticket)
@@ -163,7 +164,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
163 " li %[ticket], 0 \n" 164 " li %[ticket], 0 \n"
164 " .previous \n" 165 " .previous \n"
165 " .set pop \n" 166 " .set pop \n"
166 : [ticket_ptr] "+m" (lock->lock), 167 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
167 [ticket] "=&r" (tmp), 168 [ticket] "=&r" (tmp),
168 [my_ticket] "=&r" (tmp2), 169 [my_ticket] "=&r" (tmp2),
169 [now_serving] "=&r" (tmp3) 170 [now_serving] "=&r" (tmp3)
@@ -187,7 +188,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
187 " li %[ticket], 0 \n" 188 " li %[ticket], 0 \n"
188 " .previous \n" 189 " .previous \n"
189 " .set pop \n" 190 " .set pop \n"
190 : [ticket_ptr] "+m" (lock->lock), 191 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
191 [ticket] "=&r" (tmp), 192 [ticket] "=&r" (tmp),
192 [my_ticket] "=&r" (tmp2), 193 [my_ticket] "=&r" (tmp2),
193 [now_serving] "=&r" (tmp3) 194 [now_serving] "=&r" (tmp3)
@@ -234,8 +235,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
234 " beqzl %1, 1b \n" 235 " beqzl %1, 1b \n"
235 " nop \n" 236 " nop \n"
236 " .set reorder \n" 237 " .set reorder \n"
237 : "=m" (rw->lock), "=&r" (tmp) 238 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
238 : "m" (rw->lock) 239 : GCC_OFF12_ASM() (rw->lock)
239 : "memory"); 240 : "memory");
240 } else { 241 } else {
241 do { 242 do {
@@ -244,8 +245,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
244 " bltz %1, 1b \n" 245 " bltz %1, 1b \n"
245 " addu %1, 1 \n" 246 " addu %1, 1 \n"
246 "2: sc %1, %0 \n" 247 "2: sc %1, %0 \n"
247 : "=m" (rw->lock), "=&r" (tmp) 248 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
248 : "m" (rw->lock) 249 : GCC_OFF12_ASM() (rw->lock)
249 : "memory"); 250 : "memory");
250 } while (unlikely(!tmp)); 251 } while (unlikely(!tmp));
251 } 252 }
@@ -268,8 +269,8 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
268 " sub %1, 1 \n" 269 " sub %1, 1 \n"
269 " sc %1, %0 \n" 270 " sc %1, %0 \n"
270 " beqzl %1, 1b \n" 271 " beqzl %1, 1b \n"
271 : "=m" (rw->lock), "=&r" (tmp) 272 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
272 : "m" (rw->lock) 273 : GCC_OFF12_ASM() (rw->lock)
273 : "memory"); 274 : "memory");
274 } else { 275 } else {
275 do { 276 do {
@@ -277,8 +278,8 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
277 "1: ll %1, %2 # arch_read_unlock \n" 278 "1: ll %1, %2 # arch_read_unlock \n"
278 " sub %1, 1 \n" 279 " sub %1, 1 \n"
279 " sc %1, %0 \n" 280 " sc %1, %0 \n"
280 : "=m" (rw->lock), "=&r" (tmp) 281 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
281 : "m" (rw->lock) 282 : GCC_OFF12_ASM() (rw->lock)
282 : "memory"); 283 : "memory");
283 } while (unlikely(!tmp)); 284 } while (unlikely(!tmp));
284 } 285 }
@@ -298,8 +299,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
298 " beqzl %1, 1b \n" 299 " beqzl %1, 1b \n"
299 " nop \n" 300 " nop \n"
300 " .set reorder \n" 301 " .set reorder \n"
301 : "=m" (rw->lock), "=&r" (tmp) 302 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
302 : "m" (rw->lock) 303 : GCC_OFF12_ASM() (rw->lock)
303 : "memory"); 304 : "memory");
304 } else { 305 } else {
305 do { 306 do {
@@ -308,8 +309,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
308 " bnez %1, 1b \n" 309 " bnez %1, 1b \n"
309 " lui %1, 0x8000 \n" 310 " lui %1, 0x8000 \n"
310 "2: sc %1, %0 \n" 311 "2: sc %1, %0 \n"
311 : "=m" (rw->lock), "=&r" (tmp) 312 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
312 : "m" (rw->lock) 313 : GCC_OFF12_ASM() (rw->lock)
313 : "memory"); 314 : "memory");
314 } while (unlikely(!tmp)); 315 } while (unlikely(!tmp));
315 } 316 }
@@ -348,8 +349,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
348 __WEAK_LLSC_MB 349 __WEAK_LLSC_MB
349 " li %2, 1 \n" 350 " li %2, 1 \n"
350 "2: \n" 351 "2: \n"
351 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 352 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
352 : "m" (rw->lock) 353 : GCC_OFF12_ASM() (rw->lock)
353 : "memory"); 354 : "memory");
354 } else { 355 } else {
355 __asm__ __volatile__( 356 __asm__ __volatile__(
@@ -365,8 +366,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
365 __WEAK_LLSC_MB 366 __WEAK_LLSC_MB
366 " li %2, 1 \n" 367 " li %2, 1 \n"
367 "2: \n" 368 "2: \n"
368 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 369 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
369 : "m" (rw->lock) 370 : GCC_OFF12_ASM() (rw->lock)
370 : "memory"); 371 : "memory");
371 } 372 }
372 373
@@ -392,8 +393,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
392 " li %2, 1 \n" 393 " li %2, 1 \n"
393 " .set reorder \n" 394 " .set reorder \n"
394 "2: \n" 395 "2: \n"
395 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 396 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
396 : "m" (rw->lock) 397 : GCC_OFF12_ASM() (rw->lock)
397 : "memory"); 398 : "memory");
398 } else { 399 } else {
399 do { 400 do {
@@ -405,8 +406,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
405 " sc %1, %0 \n" 406 " sc %1, %0 \n"
406 " li %2, 1 \n" 407 " li %2, 1 \n"
407 "2: \n" 408 "2: \n"
408 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 409 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp),
409 : "m" (rw->lock) 410 "=&r" (ret)
411 : GCC_OFF12_ASM() (rw->lock)
410 : "memory"); 412 : "memory");
411 } while (unlikely(!tmp)); 413 } while (unlikely(!tmp));
412 414