summaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2014-03-30 07:20:10 -0400
committerRalf Baechle <ralf@linux-mips.org>2014-03-31 12:17:12 -0400
commita809d46066d5171ed446d59a51cd1e57d99fcfc3 (patch)
treeede5e0f23d1577da4685034894f66f1de2529937 /arch/mips
parent30ee615bb86ba640c9ec7f85fb95c1b0e31c41be (diff)
MIPS: Fix gigaton of warning building with microMIPS.
With binutils 2.24 the attempt to switch with microMIPS mode to MIPS III mode through .set mips3 results in *lots* of warnings like {standard input}: Assembler messages: {standard input}:397: Warning: the 64-bit MIPS architecture does not support the `smartmips' extension during a kernel build. Fixed by using .set arch=r4000 instead. This breaks support for building the kernel with binutils 2.13 which was supported for 32 bit kernels only anyway and 2.14 which was a bad vintage for MIPS anyway. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/alchemy/common/sleeper.S6
-rw-r--r--arch/mips/include/asm/asm.h4
-rw-r--r--arch/mips/include/asm/atomic.h40
-rw-r--r--arch/mips/include/asm/bitops.h28
-rw-r--r--arch/mips/include/asm/cmpxchg.h20
-rw-r--r--arch/mips/include/asm/futex.h16
-rw-r--r--arch/mips/include/asm/io.h4
-rw-r--r--arch/mips/include/asm/local.h8
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h12
-rw-r--r--arch/mips/include/asm/r4kcache.h4
-rw-r--r--arch/mips/include/asm/stackframe.h2
-rw-r--r--arch/mips/kernel/bmips_vec.S2
-rw-r--r--arch/mips/kernel/genex.S6
-rw-r--r--arch/mips/kernel/idle.c6
-rw-r--r--arch/mips/kernel/r4k_fpu.S2
-rw-r--r--arch/mips/kernel/r4k_switch.S2
-rw-r--r--arch/mips/kernel/syscall.c4
-rw-r--r--arch/mips/pmcs-msp71xx/msp_setup.c2
18 files changed, 84 insertions, 84 deletions
diff --git a/arch/mips/alchemy/common/sleeper.S b/arch/mips/alchemy/common/sleeper.S
index 706d933e0085..c73d81270b42 100644
--- a/arch/mips/alchemy/common/sleeper.S
+++ b/arch/mips/alchemy/common/sleeper.S
@@ -95,7 +95,7 @@ LEAF(alchemy_sleep_au1000)
95 95
96 /* cache following instructions, as memory gets put to sleep */ 96 /* cache following instructions, as memory gets put to sleep */
97 la t0, 1f 97 la t0, 1f
98 .set mips3 98 .set arch=r4000
99 cache 0x14, 0(t0) 99 cache 0x14, 0(t0)
100 cache 0x14, 32(t0) 100 cache 0x14, 32(t0)
101 cache 0x14, 64(t0) 101 cache 0x14, 64(t0)
@@ -121,7 +121,7 @@ LEAF(alchemy_sleep_au1550)
121 121
122 /* cache following instructions, as memory gets put to sleep */ 122 /* cache following instructions, as memory gets put to sleep */
123 la t0, 1f 123 la t0, 1f
124 .set mips3 124 .set arch=r4000
125 cache 0x14, 0(t0) 125 cache 0x14, 0(t0)
126 cache 0x14, 32(t0) 126 cache 0x14, 32(t0)
127 cache 0x14, 64(t0) 127 cache 0x14, 64(t0)
@@ -163,7 +163,7 @@ LEAF(alchemy_sleep_au1300)
163 la t1, 4f 163 la t1, 4f
164 subu t2, t1, t0 164 subu t2, t1, t0
165 165
166 .set mips3 166 .set arch=r4000
167 167
1681: cache 0x14, 0(t0) 1681: cache 0x14, 0(t0)
169 subu t2, t2, 32 169 subu t2, t2, 32
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index b153e7991a9d..7c26b28bf252 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -146,7 +146,7 @@ symbol = value
146 146
147#define PREF(hint,addr) \ 147#define PREF(hint,addr) \
148 .set push; \ 148 .set push; \
149 .set mips4; \ 149 .set arch=r5000; \
150 pref hint, addr; \ 150 pref hint, addr; \
151 .set pop 151 .set pop
152 152
@@ -159,7 +159,7 @@ symbol = value
159 159
160#define PREFX(hint,addr) \ 160#define PREFX(hint,addr) \
161 .set push; \ 161 .set push; \
162 .set mips4; \ 162 .set arch=r5000; \
163 prefx hint, addr; \ 163 prefx hint, addr; \
164 .set pop 164 .set pop
165 165
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 7eed2f261710..e8eb3d53a241 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -53,7 +53,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
53 int temp; 53 int temp;
54 54
55 __asm__ __volatile__( 55 __asm__ __volatile__(
56 " .set mips3 \n" 56 " .set arch=r4000 \n"
57 "1: ll %0, %1 # atomic_add \n" 57 "1: ll %0, %1 # atomic_add \n"
58 " addu %0, %2 \n" 58 " addu %0, %2 \n"
59 " sc %0, %1 \n" 59 " sc %0, %1 \n"
@@ -66,7 +66,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
66 66
67 do { 67 do {
68 __asm__ __volatile__( 68 __asm__ __volatile__(
69 " .set mips3 \n" 69 " .set arch=r4000 \n"
70 " ll %0, %1 # atomic_add \n" 70 " ll %0, %1 # atomic_add \n"
71 " addu %0, %2 \n" 71 " addu %0, %2 \n"
72 " sc %0, %1 \n" 72 " sc %0, %1 \n"
@@ -96,7 +96,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
96 int temp; 96 int temp;
97 97
98 __asm__ __volatile__( 98 __asm__ __volatile__(
99 " .set mips3 \n" 99 " .set arch=r4000 \n"
100 "1: ll %0, %1 # atomic_sub \n" 100 "1: ll %0, %1 # atomic_sub \n"
101 " subu %0, %2 \n" 101 " subu %0, %2 \n"
102 " sc %0, %1 \n" 102 " sc %0, %1 \n"
@@ -109,7 +109,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
109 109
110 do { 110 do {
111 __asm__ __volatile__( 111 __asm__ __volatile__(
112 " .set mips3 \n" 112 " .set arch=r4000 \n"
113 " ll %0, %1 # atomic_sub \n" 113 " ll %0, %1 # atomic_sub \n"
114 " subu %0, %2 \n" 114 " subu %0, %2 \n"
115 " sc %0, %1 \n" 115 " sc %0, %1 \n"
@@ -139,7 +139,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
139 int temp; 139 int temp;
140 140
141 __asm__ __volatile__( 141 __asm__ __volatile__(
142 " .set mips3 \n" 142 " .set arch=r4000 \n"
143 "1: ll %1, %2 # atomic_add_return \n" 143 "1: ll %1, %2 # atomic_add_return \n"
144 " addu %0, %1, %3 \n" 144 " addu %0, %1, %3 \n"
145 " sc %0, %2 \n" 145 " sc %0, %2 \n"
@@ -153,7 +153,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
153 153
154 do { 154 do {
155 __asm__ __volatile__( 155 __asm__ __volatile__(
156 " .set mips3 \n" 156 " .set arch=r4000 \n"
157 " ll %1, %2 # atomic_add_return \n" 157 " ll %1, %2 # atomic_add_return \n"
158 " addu %0, %1, %3 \n" 158 " addu %0, %1, %3 \n"
159 " sc %0, %2 \n" 159 " sc %0, %2 \n"
@@ -188,7 +188,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
188 int temp; 188 int temp;
189 189
190 __asm__ __volatile__( 190 __asm__ __volatile__(
191 " .set mips3 \n" 191 " .set arch=r4000 \n"
192 "1: ll %1, %2 # atomic_sub_return \n" 192 "1: ll %1, %2 # atomic_sub_return \n"
193 " subu %0, %1, %3 \n" 193 " subu %0, %1, %3 \n"
194 " sc %0, %2 \n" 194 " sc %0, %2 \n"
@@ -205,7 +205,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
205 205
206 do { 206 do {
207 __asm__ __volatile__( 207 __asm__ __volatile__(
208 " .set mips3 \n" 208 " .set arch=r4000 \n"
209 " ll %1, %2 # atomic_sub_return \n" 209 " ll %1, %2 # atomic_sub_return \n"
210 " subu %0, %1, %3 \n" 210 " subu %0, %1, %3 \n"
211 " sc %0, %2 \n" 211 " sc %0, %2 \n"
@@ -248,7 +248,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
248 int temp; 248 int temp;
249 249
250 __asm__ __volatile__( 250 __asm__ __volatile__(
251 " .set mips3 \n" 251 " .set arch=r4000 \n"
252 "1: ll %1, %2 # atomic_sub_if_positive\n" 252 "1: ll %1, %2 # atomic_sub_if_positive\n"
253 " subu %0, %1, %3 \n" 253 " subu %0, %1, %3 \n"
254 " bltz %0, 1f \n" 254 " bltz %0, 1f \n"
@@ -266,7 +266,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
266 int temp; 266 int temp;
267 267
268 __asm__ __volatile__( 268 __asm__ __volatile__(
269 " .set mips3 \n" 269 " .set arch=r4000 \n"
270 "1: ll %1, %2 # atomic_sub_if_positive\n" 270 "1: ll %1, %2 # atomic_sub_if_positive\n"
271 " subu %0, %1, %3 \n" 271 " subu %0, %1, %3 \n"
272 " bltz %0, 1f \n" 272 " bltz %0, 1f \n"
@@ -420,7 +420,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
420 long temp; 420 long temp;
421 421
422 __asm__ __volatile__( 422 __asm__ __volatile__(
423 " .set mips3 \n" 423 " .set arch=r4000 \n"
424 "1: lld %0, %1 # atomic64_add \n" 424 "1: lld %0, %1 # atomic64_add \n"
425 " daddu %0, %2 \n" 425 " daddu %0, %2 \n"
426 " scd %0, %1 \n" 426 " scd %0, %1 \n"
@@ -433,7 +433,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
433 433
434 do { 434 do {
435 __asm__ __volatile__( 435 __asm__ __volatile__(
436 " .set mips3 \n" 436 " .set arch=r4000 \n"
437 " lld %0, %1 # atomic64_add \n" 437 " lld %0, %1 # atomic64_add \n"
438 " daddu %0, %2 \n" 438 " daddu %0, %2 \n"
439 " scd %0, %1 \n" 439 " scd %0, %1 \n"
@@ -463,7 +463,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
463 long temp; 463 long temp;
464 464
465 __asm__ __volatile__( 465 __asm__ __volatile__(
466 " .set mips3 \n" 466 " .set arch=r4000 \n"
467 "1: lld %0, %1 # atomic64_sub \n" 467 "1: lld %0, %1 # atomic64_sub \n"
468 " dsubu %0, %2 \n" 468 " dsubu %0, %2 \n"
469 " scd %0, %1 \n" 469 " scd %0, %1 \n"
@@ -476,7 +476,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
476 476
477 do { 477 do {
478 __asm__ __volatile__( 478 __asm__ __volatile__(
479 " .set mips3 \n" 479 " .set arch=r4000 \n"
480 " lld %0, %1 # atomic64_sub \n" 480 " lld %0, %1 # atomic64_sub \n"
481 " dsubu %0, %2 \n" 481 " dsubu %0, %2 \n"
482 " scd %0, %1 \n" 482 " scd %0, %1 \n"
@@ -506,7 +506,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
506 long temp; 506 long temp;
507 507
508 __asm__ __volatile__( 508 __asm__ __volatile__(
509 " .set mips3 \n" 509 " .set arch=r4000 \n"
510 "1: lld %1, %2 # atomic64_add_return \n" 510 "1: lld %1, %2 # atomic64_add_return \n"
511 " daddu %0, %1, %3 \n" 511 " daddu %0, %1, %3 \n"
512 " scd %0, %2 \n" 512 " scd %0, %2 \n"
@@ -520,7 +520,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
520 520
521 do { 521 do {
522 __asm__ __volatile__( 522 __asm__ __volatile__(
523 " .set mips3 \n" 523 " .set arch=r4000 \n"
524 " lld %1, %2 # atomic64_add_return \n" 524 " lld %1, %2 # atomic64_add_return \n"
525 " daddu %0, %1, %3 \n" 525 " daddu %0, %1, %3 \n"
526 " scd %0, %2 \n" 526 " scd %0, %2 \n"
@@ -556,7 +556,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
556 long temp; 556 long temp;
557 557
558 __asm__ __volatile__( 558 __asm__ __volatile__(
559 " .set mips3 \n" 559 " .set arch=r4000 \n"
560 "1: lld %1, %2 # atomic64_sub_return \n" 560 "1: lld %1, %2 # atomic64_sub_return \n"
561 " dsubu %0, %1, %3 \n" 561 " dsubu %0, %1, %3 \n"
562 " scd %0, %2 \n" 562 " scd %0, %2 \n"
@@ -571,7 +571,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
571 571
572 do { 572 do {
573 __asm__ __volatile__( 573 __asm__ __volatile__(
574 " .set mips3 \n" 574 " .set arch=r4000 \n"
575 " lld %1, %2 # atomic64_sub_return \n" 575 " lld %1, %2 # atomic64_sub_return \n"
576 " dsubu %0, %1, %3 \n" 576 " dsubu %0, %1, %3 \n"
577 " scd %0, %2 \n" 577 " scd %0, %2 \n"
@@ -615,7 +615,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
615 long temp; 615 long temp;
616 616
617 __asm__ __volatile__( 617 __asm__ __volatile__(
618 " .set mips3 \n" 618 " .set arch=r4000 \n"
619 "1: lld %1, %2 # atomic64_sub_if_positive\n" 619 "1: lld %1, %2 # atomic64_sub_if_positive\n"
620 " dsubu %0, %1, %3 \n" 620 " dsubu %0, %1, %3 \n"
621 " bltz %0, 1f \n" 621 " bltz %0, 1f \n"
@@ -633,7 +633,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
633 long temp; 633 long temp;
634 634
635 __asm__ __volatile__( 635 __asm__ __volatile__(
636 " .set mips3 \n" 636 " .set arch=r4000 \n"
637 "1: lld %1, %2 # atomic64_sub_if_positive\n" 637 "1: lld %1, %2 # atomic64_sub_if_positive\n"
638 " dsubu %0, %1, %3 \n" 638 " dsubu %0, %1, %3 \n"
639 " bltz %0, 1f \n" 639 " bltz %0, 1f \n"
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 71305a8b3d78..6a65d49e2c0d 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -79,7 +79,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
79 79
80 if (kernel_uses_llsc && R10000_LLSC_WAR) { 80 if (kernel_uses_llsc && R10000_LLSC_WAR) {
81 __asm__ __volatile__( 81 __asm__ __volatile__(
82 " .set mips3 \n" 82 " .set arch=r4000 \n"
83 "1: " __LL "%0, %1 # set_bit \n" 83 "1: " __LL "%0, %1 # set_bit \n"
84 " or %0, %2 \n" 84 " or %0, %2 \n"
85 " " __SC "%0, %1 \n" 85 " " __SC "%0, %1 \n"
@@ -101,7 +101,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
101 } else if (kernel_uses_llsc) { 101 } else if (kernel_uses_llsc) {
102 do { 102 do {
103 __asm__ __volatile__( 103 __asm__ __volatile__(
104 " .set mips3 \n" 104 " .set arch=r4000 \n"
105 " " __LL "%0, %1 # set_bit \n" 105 " " __LL "%0, %1 # set_bit \n"
106 " or %0, %2 \n" 106 " or %0, %2 \n"
107 " " __SC "%0, %1 \n" 107 " " __SC "%0, %1 \n"
@@ -131,7 +131,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
131 131
132 if (kernel_uses_llsc && R10000_LLSC_WAR) { 132 if (kernel_uses_llsc && R10000_LLSC_WAR) {
133 __asm__ __volatile__( 133 __asm__ __volatile__(
134 " .set mips3 \n" 134 " .set arch=r4000 \n"
135 "1: " __LL "%0, %1 # clear_bit \n" 135 "1: " __LL "%0, %1 # clear_bit \n"
136 " and %0, %2 \n" 136 " and %0, %2 \n"
137 " " __SC "%0, %1 \n" 137 " " __SC "%0, %1 \n"
@@ -153,7 +153,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
153 } else if (kernel_uses_llsc) { 153 } else if (kernel_uses_llsc) {
154 do { 154 do {
155 __asm__ __volatile__( 155 __asm__ __volatile__(
156 " .set mips3 \n" 156 " .set arch=r4000 \n"
157 " " __LL "%0, %1 # clear_bit \n" 157 " " __LL "%0, %1 # clear_bit \n"
158 " and %0, %2 \n" 158 " and %0, %2 \n"
159 " " __SC "%0, %1 \n" 159 " " __SC "%0, %1 \n"
@@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
197 unsigned long temp; 197 unsigned long temp;
198 198
199 __asm__ __volatile__( 199 __asm__ __volatile__(
200 " .set mips3 \n" 200 " .set arch=r4000 \n"
201 "1: " __LL "%0, %1 # change_bit \n" 201 "1: " __LL "%0, %1 # change_bit \n"
202 " xor %0, %2 \n" 202 " xor %0, %2 \n"
203 " " __SC "%0, %1 \n" 203 " " __SC "%0, %1 \n"
@@ -211,7 +211,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
211 211
212 do { 212 do {
213 __asm__ __volatile__( 213 __asm__ __volatile__(
214 " .set mips3 \n" 214 " .set arch=r4000 \n"
215 " " __LL "%0, %1 # change_bit \n" 215 " " __LL "%0, %1 # change_bit \n"
216 " xor %0, %2 \n" 216 " xor %0, %2 \n"
217 " " __SC "%0, %1 \n" 217 " " __SC "%0, %1 \n"
@@ -244,7 +244,7 @@ static inline int test_and_set_bit(unsigned long nr,
244 unsigned long temp; 244 unsigned long temp;
245 245
246 __asm__ __volatile__( 246 __asm__ __volatile__(
247 " .set mips3 \n" 247 " .set arch=r4000 \n"
248 "1: " __LL "%0, %1 # test_and_set_bit \n" 248 "1: " __LL "%0, %1 # test_and_set_bit \n"
249 " or %2, %0, %3 \n" 249 " or %2, %0, %3 \n"
250 " " __SC "%2, %1 \n" 250 " " __SC "%2, %1 \n"
@@ -260,7 +260,7 @@ static inline int test_and_set_bit(unsigned long nr,
260 260
261 do { 261 do {
262 __asm__ __volatile__( 262 __asm__ __volatile__(
263 " .set mips3 \n" 263 " .set arch=r4000 \n"
264 " " __LL "%0, %1 # test_and_set_bit \n" 264 " " __LL "%0, %1 # test_and_set_bit \n"
265 " or %2, %0, %3 \n" 265 " or %2, %0, %3 \n"
266 " " __SC "%2, %1 \n" 266 " " __SC "%2, %1 \n"
@@ -298,7 +298,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
298 unsigned long temp; 298 unsigned long temp;
299 299
300 __asm__ __volatile__( 300 __asm__ __volatile__(
301 " .set mips3 \n" 301 " .set arch=r4000 \n"
302 "1: " __LL "%0, %1 # test_and_set_bit \n" 302 "1: " __LL "%0, %1 # test_and_set_bit \n"
303 " or %2, %0, %3 \n" 303 " or %2, %0, %3 \n"
304 " " __SC "%2, %1 \n" 304 " " __SC "%2, %1 \n"
@@ -314,7 +314,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
314 314
315 do { 315 do {
316 __asm__ __volatile__( 316 __asm__ __volatile__(
317 " .set mips3 \n" 317 " .set arch=r4000 \n"
318 " " __LL "%0, %1 # test_and_set_bit \n" 318 " " __LL "%0, %1 # test_and_set_bit \n"
319 " or %2, %0, %3 \n" 319 " or %2, %0, %3 \n"
320 " " __SC "%2, %1 \n" 320 " " __SC "%2, %1 \n"
@@ -353,7 +353,7 @@ static inline int test_and_clear_bit(unsigned long nr,
353 unsigned long temp; 353 unsigned long temp;
354 354
355 __asm__ __volatile__( 355 __asm__ __volatile__(
356 " .set mips3 \n" 356 " .set arch=r4000 \n"
357 "1: " __LL "%0, %1 # test_and_clear_bit \n" 357 "1: " __LL "%0, %1 # test_and_clear_bit \n"
358 " or %2, %0, %3 \n" 358 " or %2, %0, %3 \n"
359 " xor %2, %3 \n" 359 " xor %2, %3 \n"
@@ -386,7 +386,7 @@ static inline int test_and_clear_bit(unsigned long nr,
386 386
387 do { 387 do {
388 __asm__ __volatile__( 388 __asm__ __volatile__(
389 " .set mips3 \n" 389 " .set arch=r4000 \n"
390 " " __LL "%0, %1 # test_and_clear_bit \n" 390 " " __LL "%0, %1 # test_and_clear_bit \n"
391 " or %2, %0, %3 \n" 391 " or %2, %0, %3 \n"
392 " xor %2, %3 \n" 392 " xor %2, %3 \n"
@@ -427,7 +427,7 @@ static inline int test_and_change_bit(unsigned long nr,
427 unsigned long temp; 427 unsigned long temp;
428 428
429 __asm__ __volatile__( 429 __asm__ __volatile__(
430 " .set mips3 \n" 430 " .set arch=r4000 \n"
431 "1: " __LL "%0, %1 # test_and_change_bit \n" 431 "1: " __LL "%0, %1 # test_and_change_bit \n"
432 " xor %2, %0, %3 \n" 432 " xor %2, %0, %3 \n"
433 " " __SC "%2, %1 \n" 433 " " __SC "%2, %1 \n"
@@ -443,7 +443,7 @@ static inline int test_and_change_bit(unsigned long nr,
443 443
444 do { 444 do {
445 __asm__ __volatile__( 445 __asm__ __volatile__(
446 " .set mips3 \n" 446 " .set arch=r4000 \n"
447 " " __LL "%0, %1 # test_and_change_bit \n" 447 " " __LL "%0, %1 # test_and_change_bit \n"
448 " xor %2, %0, %3 \n" 448 " xor %2, %0, %3 \n"
449 " " __SC "\t%2, %1 \n" 449 " " __SC "\t%2, %1 \n"
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 466069bd8465..eefcaa363a87 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -22,11 +22,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
22 unsigned long dummy; 22 unsigned long dummy;
23 23
24 __asm__ __volatile__( 24 __asm__ __volatile__(
25 " .set mips3 \n" 25 " .set arch=r4000 \n"
26 "1: ll %0, %3 # xchg_u32 \n" 26 "1: ll %0, %3 # xchg_u32 \n"
27 " .set mips0 \n" 27 " .set mips0 \n"
28 " move %2, %z4 \n" 28 " move %2, %z4 \n"
29 " .set mips3 \n" 29 " .set arch=r4000 \n"
30 " sc %2, %1 \n" 30 " sc %2, %1 \n"
31 " beqzl %2, 1b \n" 31 " beqzl %2, 1b \n"
32 " .set mips0 \n" 32 " .set mips0 \n"
@@ -38,11 +38,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
38 38
39 do { 39 do {
40 __asm__ __volatile__( 40 __asm__ __volatile__(
41 " .set mips3 \n" 41 " .set arch=r4000 \n"
42 " ll %0, %3 # xchg_u32 \n" 42 " ll %0, %3 # xchg_u32 \n"
43 " .set mips0 \n" 43 " .set mips0 \n"
44 " move %2, %z4 \n" 44 " move %2, %z4 \n"
45 " .set mips3 \n" 45 " .set arch=r4000 \n"
46 " sc %2, %1 \n" 46 " sc %2, %1 \n"
47 " .set mips0 \n" 47 " .set mips0 \n"
48 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 48 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
@@ -74,7 +74,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
74 unsigned long dummy; 74 unsigned long dummy;
75 75
76 __asm__ __volatile__( 76 __asm__ __volatile__(
77 " .set mips3 \n" 77 " .set arch=r4000 \n"
78 "1: lld %0, %3 # xchg_u64 \n" 78 "1: lld %0, %3 # xchg_u64 \n"
79 " move %2, %z4 \n" 79 " move %2, %z4 \n"
80 " scd %2, %1 \n" 80 " scd %2, %1 \n"
@@ -88,7 +88,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
88 88
89 do { 89 do {
90 __asm__ __volatile__( 90 __asm__ __volatile__(
91 " .set mips3 \n" 91 " .set arch=r4000 \n"
92 " lld %0, %3 # xchg_u64 \n" 92 " lld %0, %3 # xchg_u64 \n"
93 " move %2, %z4 \n" 93 " move %2, %z4 \n"
94 " scd %2, %1 \n" 94 " scd %2, %1 \n"
@@ -145,12 +145,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
145 __asm__ __volatile__( \ 145 __asm__ __volatile__( \
146 " .set push \n" \ 146 " .set push \n" \
147 " .set noat \n" \ 147 " .set noat \n" \
148 " .set mips3 \n" \ 148 " .set arch=r4000 \n" \
149 "1: " ld " %0, %2 # __cmpxchg_asm \n" \ 149 "1: " ld " %0, %2 # __cmpxchg_asm \n" \
150 " bne %0, %z3, 2f \n" \ 150 " bne %0, %z3, 2f \n" \
151 " .set mips0 \n" \ 151 " .set mips0 \n" \
152 " move $1, %z4 \n" \ 152 " move $1, %z4 \n" \
153 " .set mips3 \n" \ 153 " .set arch=r4000 \n" \
154 " " st " $1, %1 \n" \ 154 " " st " $1, %1 \n" \
155 " beqzl $1, 1b \n" \ 155 " beqzl $1, 1b \n" \
156 "2: \n" \ 156 "2: \n" \
@@ -162,12 +162,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
162 __asm__ __volatile__( \ 162 __asm__ __volatile__( \
163 " .set push \n" \ 163 " .set push \n" \
164 " .set noat \n" \ 164 " .set noat \n" \
165 " .set mips3 \n" \ 165 " .set arch=r4000 \n" \
166 "1: " ld " %0, %2 # __cmpxchg_asm \n" \ 166 "1: " ld " %0, %2 # __cmpxchg_asm \n" \
167 " bne %0, %z3, 2f \n" \ 167 " bne %0, %z3, 2f \n" \
168 " .set mips0 \n" \ 168 " .set mips0 \n" \
169 " move $1, %z4 \n" \ 169 " move $1, %z4 \n" \
170 " .set mips3 \n" \ 170 " .set arch=r4000 \n" \
171 " " st " $1, %1 \n" \ 171 " " st " $1, %1 \n" \
172 " beqz $1, 1b \n" \ 172 " beqz $1, 1b \n" \
173 " .set pop \n" \ 173 " .set pop \n" \
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 1fe2cd6cdca0..194cda0396a3 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -23,11 +23,11 @@
23 __asm__ __volatile__( \ 23 __asm__ __volatile__( \
24 " .set push \n" \ 24 " .set push \n" \
25 " .set noat \n" \ 25 " .set noat \n" \
26 " .set mips3 \n" \ 26 " .set arch=r4000 \n" \
27 "1: ll %1, %4 # __futex_atomic_op \n" \ 27 "1: ll %1, %4 # __futex_atomic_op \n" \
28 " .set mips0 \n" \ 28 " .set mips0 \n" \
29 " " insn " \n" \ 29 " " insn " \n" \
30 " .set mips3 \n" \ 30 " .set arch=r4000 \n" \
31 "2: sc $1, %2 \n" \ 31 "2: sc $1, %2 \n" \
32 " beqzl $1, 1b \n" \ 32 " beqzl $1, 1b \n" \
33 __WEAK_LLSC_MB \ 33 __WEAK_LLSC_MB \
@@ -49,11 +49,11 @@
49 __asm__ __volatile__( \ 49 __asm__ __volatile__( \
50 " .set push \n" \ 50 " .set push \n" \
51 " .set noat \n" \ 51 " .set noat \n" \
52 " .set mips3 \n" \ 52 " .set arch=r4000 \n" \
53 "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \ 53 "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
54 " .set mips0 \n" \ 54 " .set mips0 \n" \
55 " " insn " \n" \ 55 " " insn " \n" \
56 " .set mips3 \n" \ 56 " .set arch=r4000 \n" \
57 "2: "user_sc("$1", "%2")" \n" \ 57 "2: "user_sc("$1", "%2")" \n" \
58 " beqz $1, 1b \n" \ 58 " beqz $1, 1b \n" \
59 __WEAK_LLSC_MB \ 59 __WEAK_LLSC_MB \
@@ -147,12 +147,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
147 "# futex_atomic_cmpxchg_inatomic \n" 147 "# futex_atomic_cmpxchg_inatomic \n"
148 " .set push \n" 148 " .set push \n"
149 " .set noat \n" 149 " .set noat \n"
150 " .set mips3 \n" 150 " .set arch=r4000 \n"
151 "1: ll %1, %3 \n" 151 "1: ll %1, %3 \n"
152 " bne %1, %z4, 3f \n" 152 " bne %1, %z4, 3f \n"
153 " .set mips0 \n" 153 " .set mips0 \n"
154 " move $1, %z5 \n" 154 " move $1, %z5 \n"
155 " .set mips3 \n" 155 " .set arch=r4000 \n"
156 "2: sc $1, %2 \n" 156 "2: sc $1, %2 \n"
157 " beqzl $1, 1b \n" 157 " beqzl $1, 1b \n"
158 __WEAK_LLSC_MB 158 __WEAK_LLSC_MB
@@ -174,12 +174,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
174 "# futex_atomic_cmpxchg_inatomic \n" 174 "# futex_atomic_cmpxchg_inatomic \n"
175 " .set push \n" 175 " .set push \n"
176 " .set noat \n" 176 " .set noat \n"
177 " .set mips3 \n" 177 " .set arch=r4000 \n"
178 "1: "user_ll("%1", "%3")" \n" 178 "1: "user_ll("%1", "%3")" \n"
179 " bne %1, %z4, 3f \n" 179 " bne %1, %z4, 3f \n"
180 " .set mips0 \n" 180 " .set mips0 \n"
181 " move $1, %z5 \n" 181 " move $1, %z5 \n"
182 " .set mips3 \n" 182 " .set arch=r4000 \n"
183 "2: "user_sc("$1", "%2")" \n" 183 "2: "user_sc("$1", "%2")" \n"
184 " beqz $1, 1b \n" 184 " beqz $1, 1b \n"
185 __WEAK_LLSC_MB 185 __WEAK_LLSC_MB
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index e221d1de32f3..933b50e125a0 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -331,7 +331,7 @@ static inline void pfx##write##bwlq(type val, \
331 if (irq) \ 331 if (irq) \
332 local_irq_save(__flags); \ 332 local_irq_save(__flags); \
333 __asm__ __volatile__( \ 333 __asm__ __volatile__( \
334 ".set mips3" "\t\t# __writeq""\n\t" \ 334 ".set arch=r4000" "\t\t# __writeq""\n\t" \
335 "dsll32 %L0, %L0, 0" "\n\t" \ 335 "dsll32 %L0, %L0, 0" "\n\t" \
336 "dsrl32 %L0, %L0, 0" "\n\t" \ 336 "dsrl32 %L0, %L0, 0" "\n\t" \
337 "dsll32 %M0, %M0, 0" "\n\t" \ 337 "dsll32 %M0, %M0, 0" "\n\t" \
@@ -361,7 +361,7 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
361 if (irq) \ 361 if (irq) \
362 local_irq_save(__flags); \ 362 local_irq_save(__flags); \
363 __asm__ __volatile__( \ 363 __asm__ __volatile__( \
364 ".set mips3" "\t\t# __readq" "\n\t" \ 364 ".set arch=r4000" "\t\t# __readq" "\n\t" \
365 "ld %L0, %1" "\n\t" \ 365 "ld %L0, %1" "\n\t" \
366 "dsra32 %M0, %L0, 0" "\n\t" \ 366 "dsra32 %M0, %L0, 0" "\n\t" \
367 "sll %L0, %L0, 0" "\n\t" \ 367 "sll %L0, %L0, 0" "\n\t" \
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index d44622cd74be..46dfc3c1fd49 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -33,7 +33,7 @@ static __inline__ long local_add_return(long i, local_t * l)
33 unsigned long temp; 33 unsigned long temp;
34 34
35 __asm__ __volatile__( 35 __asm__ __volatile__(
36 " .set mips3 \n" 36 " .set arch=r4000 \n"
37 "1:" __LL "%1, %2 # local_add_return \n" 37 "1:" __LL "%1, %2 # local_add_return \n"
38 " addu %0, %1, %3 \n" 38 " addu %0, %1, %3 \n"
39 __SC "%0, %2 \n" 39 __SC "%0, %2 \n"
@@ -47,7 +47,7 @@ static __inline__ long local_add_return(long i, local_t * l)
47 unsigned long temp; 47 unsigned long temp;
48 48
49 __asm__ __volatile__( 49 __asm__ __volatile__(
50 " .set mips3 \n" 50 " .set arch=r4000 \n"
51 "1:" __LL "%1, %2 # local_add_return \n" 51 "1:" __LL "%1, %2 # local_add_return \n"
52 " addu %0, %1, %3 \n" 52 " addu %0, %1, %3 \n"
53 __SC "%0, %2 \n" 53 __SC "%0, %2 \n"
@@ -78,7 +78,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
78 unsigned long temp; 78 unsigned long temp;
79 79
80 __asm__ __volatile__( 80 __asm__ __volatile__(
81 " .set mips3 \n" 81 " .set arch=r4000 \n"
82 "1:" __LL "%1, %2 # local_sub_return \n" 82 "1:" __LL "%1, %2 # local_sub_return \n"
83 " subu %0, %1, %3 \n" 83 " subu %0, %1, %3 \n"
84 __SC "%0, %2 \n" 84 __SC "%0, %2 \n"
@@ -92,7 +92,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
92 unsigned long temp; 92 unsigned long temp;
93 93
94 __asm__ __volatile__( 94 __asm__ __volatile__(
95 " .set mips3 \n" 95 " .set arch=r4000 \n"
96 "1:" __LL "%1, %2 # local_sub_return \n" 96 "1:" __LL "%1, %2 # local_sub_return \n"
97 " subu %0, %1, %3 \n" 97 " subu %0, %1, %3 \n"
98 __SC "%0, %2 \n" 98 __SC "%0, %2 \n"
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
index 2dbc7a8cec1a..fc946c835995 100644
--- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
@@ -76,7 +76,7 @@ static inline void set_value_reg32(volatile u32 *const addr,
76 76
77 __asm__ __volatile__( 77 __asm__ __volatile__(
78 " .set push \n" 78 " .set push \n"
79 " .set mips3 \n" 79 " .set arch=r4000 \n"
80 "1: ll %0, %1 # set_value_reg32 \n" 80 "1: ll %0, %1 # set_value_reg32 \n"
81 " and %0, %2 \n" 81 " and %0, %2 \n"
82 " or %0, %3 \n" 82 " or %0, %3 \n"
@@ -98,7 +98,7 @@ static inline void set_reg32(volatile u32 *const addr,
98 98
99 __asm__ __volatile__( 99 __asm__ __volatile__(
100 " .set push \n" 100 " .set push \n"
101 " .set mips3 \n" 101 " .set arch=r4000 \n"
102 "1: ll %0, %1 # set_reg32 \n" 102 "1: ll %0, %1 # set_reg32 \n"
103 " or %0, %2 \n" 103 " or %0, %2 \n"
104 " sc %0, %1 \n" 104 " sc %0, %1 \n"
@@ -119,7 +119,7 @@ static inline void clear_reg32(volatile u32 *const addr,
119 119
120 __asm__ __volatile__( 120 __asm__ __volatile__(
121 " .set push \n" 121 " .set push \n"
122 " .set mips3 \n" 122 " .set arch=r4000 \n"
123 "1: ll %0, %1 # clear_reg32 \n" 123 "1: ll %0, %1 # clear_reg32 \n"
124 " and %0, %2 \n" 124 " and %0, %2 \n"
125 " sc %0, %1 \n" 125 " sc %0, %1 \n"
@@ -140,7 +140,7 @@ static inline void toggle_reg32(volatile u32 *const addr,
140 140
141 __asm__ __volatile__( 141 __asm__ __volatile__(
142 " .set push \n" 142 " .set push \n"
143 " .set mips3 \n" 143 " .set arch=r4000 \n"
144 "1: ll %0, %1 # toggle_reg32 \n" 144 "1: ll %0, %1 # toggle_reg32 \n"
145 " xor %0, %2 \n" 145 " xor %0, %2 \n"
146 " sc %0, %1 \n" 146 " sc %0, %1 \n"
@@ -216,7 +216,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
216#define custom_read_reg32(address, tmp) \ 216#define custom_read_reg32(address, tmp) \
217 __asm__ __volatile__( \ 217 __asm__ __volatile__( \
218 " .set push \n" \ 218 " .set push \n" \
219 " .set mips3 \n" \ 219 " .set arch=r4000 \n" \
220 "1: ll %0, %1 #custom_read_reg32 \n" \ 220 "1: ll %0, %1 #custom_read_reg32 \n" \
221 " .set pop \n" \ 221 " .set pop \n" \
222 : "=r" (tmp), "=m" (*address) \ 222 : "=r" (tmp), "=m" (*address) \
@@ -225,7 +225,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
225#define custom_write_reg32(address, tmp) \ 225#define custom_write_reg32(address, tmp) \
226 __asm__ __volatile__( \ 226 __asm__ __volatile__( \
227 " .set push \n" \ 227 " .set push \n" \
228 " .set mips3 \n" \ 228 " .set arch=r4000 \n" \
229 " sc %0, %1 #custom_write_reg32 \n" \ 229 " sc %0, %1 #custom_write_reg32 \n" \
230 " "__beqz"%0, 1b \n" \ 230 " "__beqz"%0, 1b \n" \
231 " nop \n" \ 231 " nop \n" \
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 69c2ada7d4aa..ca64cbe44493 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -36,7 +36,7 @@
36 __asm__ __volatile__( \ 36 __asm__ __volatile__( \
37 " .set push \n" \ 37 " .set push \n" \
38 " .set noreorder \n" \ 38 " .set noreorder \n" \
39 " .set mips3\n\t \n" \ 39 " .set arch=r4000 \n" \
40 " cache %0, %1 \n" \ 40 " cache %0, %1 \n" \
41 " .set pop \n" \ 41 " .set pop \n" \
42 : \ 42 : \
@@ -204,7 +204,7 @@ static inline void flush_scache_line(unsigned long addr)
204 __asm__ __volatile__( \ 204 __asm__ __volatile__( \
205 " .set push \n" \ 205 " .set push \n" \
206 " .set noreorder \n" \ 206 " .set noreorder \n" \
207 " .set mips3 \n" \ 207 " .set arch=r4000 \n" \
208 "1: cache %0, (%1) \n" \ 208 "1: cache %0, (%1) \n" \
209 "2: .set pop \n" \ 209 "2: .set pop \n" \
210 " .section __ex_table,\"a\" \n" \ 210 " .section __ex_table,\"a\" \n" \
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 4857e2c8df5a..d301e108d5b8 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -435,7 +435,7 @@
435 435
436 .macro RESTORE_SP_AND_RET 436 .macro RESTORE_SP_AND_RET
437 LONG_L sp, PT_R29(sp) 437 LONG_L sp, PT_R29(sp)
438 .set mips3 438 .set arch=r4000
439 eret 439 eret
440 .set mips0 440 .set mips0
441 .endm 441 .endm
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index a5bf73d22fcc..290c23b51678 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -122,7 +122,7 @@ NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
122 jr k0 122 jr k0
123 123
124 RESTORE_ALL 124 RESTORE_ALL
125 .set mips3 125 .set arch=r4000
126 eret 126 eret
127 127
128/*********************************************************************** 128/***********************************************************************
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 7365cd6be702..a9ce3408be25 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -67,7 +67,7 @@ NESTED(except_vec3_generic, 0, sp)
67 */ 67 */
68NESTED(except_vec3_r4000, 0, sp) 68NESTED(except_vec3_r4000, 0, sp)
69 .set push 69 .set push
70 .set mips3 70 .set arch=r4000
71 .set noat 71 .set noat
72 mfc0 k1, CP0_CAUSE 72 mfc0 k1, CP0_CAUSE
73 li k0, 31<<2 73 li k0, 31<<2
@@ -139,7 +139,7 @@ LEAF(__r4k_wait)
139 nop 139 nop
140 nop 140 nop
141#endif 141#endif
142 .set mips3 142 .set arch=r4000
143 wait 143 wait
144 /* end of rollback region (the region size must be power of two) */ 144 /* end of rollback region (the region size must be power of two) */
1451: 1451:
@@ -577,7 +577,7 @@ isrdhwr:
577 ori k1, _THREAD_MASK 577 ori k1, _THREAD_MASK
578 xori k1, _THREAD_MASK 578 xori k1, _THREAD_MASK
579 LONG_L v1, TI_TP_VALUE(k1) 579 LONG_L v1, TI_TP_VALUE(k1)
580 .set mips3 580 .set arch=r4000
581 eret 581 eret
582 .set mips0 582 .set mips0
583#endif 583#endif
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 9f904eda5de5..837ff27950bc 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -64,7 +64,7 @@ void r4k_wait_irqoff(void)
64 if (!need_resched()) 64 if (!need_resched())
65 __asm__( 65 __asm__(
66 " .set push \n" 66 " .set push \n"
67 " .set mips3 \n" 67 " .set arch=r4000 \n"
68 " wait \n" 68 " wait \n"
69 " .set pop \n"); 69 " .set pop \n");
70 local_irq_enable(); 70 local_irq_enable();
@@ -82,7 +82,7 @@ static void rm7k_wait_irqoff(void)
82 if (!need_resched()) 82 if (!need_resched())
83 __asm__( 83 __asm__(
84 " .set push \n" 84 " .set push \n"
85 " .set mips3 \n" 85 " .set arch=r4000 \n"
86 " .set noat \n" 86 " .set noat \n"
87 " mfc0 $1, $12 \n" 87 " mfc0 $1, $12 \n"
88 " sync \n" 88 " sync \n"
@@ -103,7 +103,7 @@ static void au1k_wait(void)
103 unsigned long c0status = read_c0_status() | 1; /* irqs on */ 103 unsigned long c0status = read_c0_status() | 1; /* irqs on */
104 104
105 __asm__( 105 __asm__(
106 " .set mips3 \n" 106 " .set arch=r4000 \n"
107 " cache 0x14, 0(%0) \n" 107 " cache 0x14, 0(%0) \n"
108 " cache 0x14, 32(%0) \n" 108 " cache 0x14, 32(%0) \n"
109 " sync \n" 109 " sync \n"
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 752b50a69264..0cfa7a56a153 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -31,7 +31,7 @@
31 .endm 31 .endm
32 32
33 .set noreorder 33 .set noreorder
34 .set mips3 34 .set arch=r4000
35 35
36LEAF(_save_fp_context) 36LEAF(_save_fp_context)
37 cfc1 t1, fcr31 37 cfc1 t1, fcr31
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index f938ecd22af0..abacac7c33ef 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -294,7 +294,7 @@ LEAF(_init_fpu)
2941: .set pop 2941: .set pop
295#endif /* CONFIG_CPU_MIPS32_R2 */ 295#endif /* CONFIG_CPU_MIPS32_R2 */
296#else 296#else
297 .set mips3 297 .set arch=r4000
298 dmtc1 t1, $f0 298 dmtc1 t1, $f0
299 dmtc1 t1, $f2 299 dmtc1 t1, $f2
300 dmtc1 t1, $f4 300 dmtc1 t1, $f4
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index b79d13f95bf0..4a4f9dda5658 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -110,7 +110,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
110 110
111 if (cpu_has_llsc && R10000_LLSC_WAR) { 111 if (cpu_has_llsc && R10000_LLSC_WAR) {
112 __asm__ __volatile__ ( 112 __asm__ __volatile__ (
113 " .set mips3 \n" 113 " .set arch=r4000 \n"
114 " li %[err], 0 \n" 114 " li %[err], 0 \n"
115 "1: ll %[old], (%[addr]) \n" 115 "1: ll %[old], (%[addr]) \n"
116 " move %[tmp], %[new] \n" 116 " move %[tmp], %[new] \n"
@@ -135,7 +135,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
135 : "memory"); 135 : "memory");
136 } else if (cpu_has_llsc) { 136 } else if (cpu_has_llsc) {
137 __asm__ __volatile__ ( 137 __asm__ __volatile__ (
138 " .set mips3 \n" 138 " .set arch=r4000 \n"
139 " li %[err], 0 \n" 139 " li %[err], 0 \n"
140 "1: ll %[old], (%[addr]) \n" 140 "1: ll %[old], (%[addr]) \n"
141 " move %[tmp], %[new] \n" 141 " move %[tmp], %[new] \n"
diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c
index 396b2967ad85..7e980767679c 100644
--- a/arch/mips/pmcs-msp71xx/msp_setup.c
+++ b/arch/mips/pmcs-msp71xx/msp_setup.c
@@ -49,7 +49,7 @@ void msp7120_reset(void)
49 /* Cache the reset code of this function */ 49 /* Cache the reset code of this function */
50 __asm__ __volatile__ ( 50 __asm__ __volatile__ (
51 " .set push \n" 51 " .set push \n"
52 " .set mips3 \n" 52 " .set arch=r4000 \n"
53 " la %0,startpoint \n" 53 " la %0,startpoint \n"
54 " la %1,endpoint \n" 54 " la %1,endpoint \n"
55 " .set pop \n" 55 " .set pop \n"