aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm/atomic.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2014-03-30 07:20:10 -0400
committerRalf Baechle <ralf@linux-mips.org>2014-03-31 12:17:12 -0400
commita809d46066d5171ed446d59a51cd1e57d99fcfc3 (patch)
treeede5e0f23d1577da4685034894f66f1de2529937 /arch/mips/include/asm/atomic.h
parent30ee615bb86ba640c9ec7f85fb95c1b0e31c41be (diff)
MIPS: Fix gigaton of warning building with microMIPS.
With binutils 2.24 the attempt to switch with microMIPS mode to MIPS III mode through .set mips3 results in *lots* of warnings like {standard input}: Assembler messages: {standard input}:397: Warning: the 64-bit MIPS architecture does not support the `smartmips' extension during a kernel build. Fixed by using .set arch=r4000 instead. This breaks support for building the kernel with binutils 2.13 which was supported for 32 bit kernels only anyway and 2.14 which was a bad vintage for MIPS anyway. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/include/asm/atomic.h')
-rw-r--r--arch/mips/include/asm/atomic.h40
1 files changed, 20 insertions, 20 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 7eed2f261710..e8eb3d53a241 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -53,7 +53,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
53 int temp; 53 int temp;
54 54
55 __asm__ __volatile__( 55 __asm__ __volatile__(
56 " .set mips3 \n" 56 " .set arch=r4000 \n"
57 "1: ll %0, %1 # atomic_add \n" 57 "1: ll %0, %1 # atomic_add \n"
58 " addu %0, %2 \n" 58 " addu %0, %2 \n"
59 " sc %0, %1 \n" 59 " sc %0, %1 \n"
@@ -66,7 +66,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
66 66
67 do { 67 do {
68 __asm__ __volatile__( 68 __asm__ __volatile__(
69 " .set mips3 \n" 69 " .set arch=r4000 \n"
70 " ll %0, %1 # atomic_add \n" 70 " ll %0, %1 # atomic_add \n"
71 " addu %0, %2 \n" 71 " addu %0, %2 \n"
72 " sc %0, %1 \n" 72 " sc %0, %1 \n"
@@ -96,7 +96,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
96 int temp; 96 int temp;
97 97
98 __asm__ __volatile__( 98 __asm__ __volatile__(
99 " .set mips3 \n" 99 " .set arch=r4000 \n"
100 "1: ll %0, %1 # atomic_sub \n" 100 "1: ll %0, %1 # atomic_sub \n"
101 " subu %0, %2 \n" 101 " subu %0, %2 \n"
102 " sc %0, %1 \n" 102 " sc %0, %1 \n"
@@ -109,7 +109,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
109 109
110 do { 110 do {
111 __asm__ __volatile__( 111 __asm__ __volatile__(
112 " .set mips3 \n" 112 " .set arch=r4000 \n"
113 " ll %0, %1 # atomic_sub \n" 113 " ll %0, %1 # atomic_sub \n"
114 " subu %0, %2 \n" 114 " subu %0, %2 \n"
115 " sc %0, %1 \n" 115 " sc %0, %1 \n"
@@ -139,7 +139,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
139 int temp; 139 int temp;
140 140
141 __asm__ __volatile__( 141 __asm__ __volatile__(
142 " .set mips3 \n" 142 " .set arch=r4000 \n"
143 "1: ll %1, %2 # atomic_add_return \n" 143 "1: ll %1, %2 # atomic_add_return \n"
144 " addu %0, %1, %3 \n" 144 " addu %0, %1, %3 \n"
145 " sc %0, %2 \n" 145 " sc %0, %2 \n"
@@ -153,7 +153,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
153 153
154 do { 154 do {
155 __asm__ __volatile__( 155 __asm__ __volatile__(
156 " .set mips3 \n" 156 " .set arch=r4000 \n"
157 " ll %1, %2 # atomic_add_return \n" 157 " ll %1, %2 # atomic_add_return \n"
158 " addu %0, %1, %3 \n" 158 " addu %0, %1, %3 \n"
159 " sc %0, %2 \n" 159 " sc %0, %2 \n"
@@ -188,7 +188,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
188 int temp; 188 int temp;
189 189
190 __asm__ __volatile__( 190 __asm__ __volatile__(
191 " .set mips3 \n" 191 " .set arch=r4000 \n"
192 "1: ll %1, %2 # atomic_sub_return \n" 192 "1: ll %1, %2 # atomic_sub_return \n"
193 " subu %0, %1, %3 \n" 193 " subu %0, %1, %3 \n"
194 " sc %0, %2 \n" 194 " sc %0, %2 \n"
@@ -205,7 +205,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
205 205
206 do { 206 do {
207 __asm__ __volatile__( 207 __asm__ __volatile__(
208 " .set mips3 \n" 208 " .set arch=r4000 \n"
209 " ll %1, %2 # atomic_sub_return \n" 209 " ll %1, %2 # atomic_sub_return \n"
210 " subu %0, %1, %3 \n" 210 " subu %0, %1, %3 \n"
211 " sc %0, %2 \n" 211 " sc %0, %2 \n"
@@ -248,7 +248,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
248 int temp; 248 int temp;
249 249
250 __asm__ __volatile__( 250 __asm__ __volatile__(
251 " .set mips3 \n" 251 " .set arch=r4000 \n"
252 "1: ll %1, %2 # atomic_sub_if_positive\n" 252 "1: ll %1, %2 # atomic_sub_if_positive\n"
253 " subu %0, %1, %3 \n" 253 " subu %0, %1, %3 \n"
254 " bltz %0, 1f \n" 254 " bltz %0, 1f \n"
@@ -266,7 +266,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
266 int temp; 266 int temp;
267 267
268 __asm__ __volatile__( 268 __asm__ __volatile__(
269 " .set mips3 \n" 269 " .set arch=r4000 \n"
270 "1: ll %1, %2 # atomic_sub_if_positive\n" 270 "1: ll %1, %2 # atomic_sub_if_positive\n"
271 " subu %0, %1, %3 \n" 271 " subu %0, %1, %3 \n"
272 " bltz %0, 1f \n" 272 " bltz %0, 1f \n"
@@ -420,7 +420,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
420 long temp; 420 long temp;
421 421
422 __asm__ __volatile__( 422 __asm__ __volatile__(
423 " .set mips3 \n" 423 " .set arch=r4000 \n"
424 "1: lld %0, %1 # atomic64_add \n" 424 "1: lld %0, %1 # atomic64_add \n"
425 " daddu %0, %2 \n" 425 " daddu %0, %2 \n"
426 " scd %0, %1 \n" 426 " scd %0, %1 \n"
@@ -433,7 +433,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
433 433
434 do { 434 do {
435 __asm__ __volatile__( 435 __asm__ __volatile__(
436 " .set mips3 \n" 436 " .set arch=r4000 \n"
437 " lld %0, %1 # atomic64_add \n" 437 " lld %0, %1 # atomic64_add \n"
438 " daddu %0, %2 \n" 438 " daddu %0, %2 \n"
439 " scd %0, %1 \n" 439 " scd %0, %1 \n"
@@ -463,7 +463,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
463 long temp; 463 long temp;
464 464
465 __asm__ __volatile__( 465 __asm__ __volatile__(
466 " .set mips3 \n" 466 " .set arch=r4000 \n"
467 "1: lld %0, %1 # atomic64_sub \n" 467 "1: lld %0, %1 # atomic64_sub \n"
468 " dsubu %0, %2 \n" 468 " dsubu %0, %2 \n"
469 " scd %0, %1 \n" 469 " scd %0, %1 \n"
@@ -476,7 +476,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
476 476
477 do { 477 do {
478 __asm__ __volatile__( 478 __asm__ __volatile__(
479 " .set mips3 \n" 479 " .set arch=r4000 \n"
480 " lld %0, %1 # atomic64_sub \n" 480 " lld %0, %1 # atomic64_sub \n"
481 " dsubu %0, %2 \n" 481 " dsubu %0, %2 \n"
482 " scd %0, %1 \n" 482 " scd %0, %1 \n"
@@ -506,7 +506,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
506 long temp; 506 long temp;
507 507
508 __asm__ __volatile__( 508 __asm__ __volatile__(
509 " .set mips3 \n" 509 " .set arch=r4000 \n"
510 "1: lld %1, %2 # atomic64_add_return \n" 510 "1: lld %1, %2 # atomic64_add_return \n"
511 " daddu %0, %1, %3 \n" 511 " daddu %0, %1, %3 \n"
512 " scd %0, %2 \n" 512 " scd %0, %2 \n"
@@ -520,7 +520,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
520 520
521 do { 521 do {
522 __asm__ __volatile__( 522 __asm__ __volatile__(
523 " .set mips3 \n" 523 " .set arch=r4000 \n"
524 " lld %1, %2 # atomic64_add_return \n" 524 " lld %1, %2 # atomic64_add_return \n"
525 " daddu %0, %1, %3 \n" 525 " daddu %0, %1, %3 \n"
526 " scd %0, %2 \n" 526 " scd %0, %2 \n"
@@ -556,7 +556,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
556 long temp; 556 long temp;
557 557
558 __asm__ __volatile__( 558 __asm__ __volatile__(
559 " .set mips3 \n" 559 " .set arch=r4000 \n"
560 "1: lld %1, %2 # atomic64_sub_return \n" 560 "1: lld %1, %2 # atomic64_sub_return \n"
561 " dsubu %0, %1, %3 \n" 561 " dsubu %0, %1, %3 \n"
562 " scd %0, %2 \n" 562 " scd %0, %2 \n"
@@ -571,7 +571,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
571 571
572 do { 572 do {
573 __asm__ __volatile__( 573 __asm__ __volatile__(
574 " .set mips3 \n" 574 " .set arch=r4000 \n"
575 " lld %1, %2 # atomic64_sub_return \n" 575 " lld %1, %2 # atomic64_sub_return \n"
576 " dsubu %0, %1, %3 \n" 576 " dsubu %0, %1, %3 \n"
577 " scd %0, %2 \n" 577 " scd %0, %2 \n"
@@ -615,7 +615,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
615 long temp; 615 long temp;
616 616
617 __asm__ __volatile__( 617 __asm__ __volatile__(
618 " .set mips3 \n" 618 " .set arch=r4000 \n"
619 "1: lld %1, %2 # atomic64_sub_if_positive\n" 619 "1: lld %1, %2 # atomic64_sub_if_positive\n"
620 " dsubu %0, %1, %3 \n" 620 " dsubu %0, %1, %3 \n"
621 " bltz %0, 1f \n" 621 " bltz %0, 1f \n"
@@ -633,7 +633,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
633 long temp; 633 long temp;
634 634
635 __asm__ __volatile__( 635 __asm__ __volatile__(
636 " .set mips3 \n" 636 " .set arch=r4000 \n"
637 "1: lld %1, %2 # atomic64_sub_if_positive\n" 637 "1: lld %1, %2 # atomic64_sub_if_positive\n"
638 " dsubu %0, %1, %3 \n" 638 " dsubu %0, %1, %3 \n"
639 " bltz %0, 1f \n" 639 " bltz %0, 1f \n"