aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm')
-rw-r--r--arch/mips/include/asm/bitops.h128
-rw-r--r--arch/mips/include/asm/compat.h2
-rw-r--r--arch/mips/include/asm/io.h1
-rw-r--r--arch/mips/include/asm/irqflags.h207
-rw-r--r--arch/mips/include/asm/thread_info.h6
5 files changed, 116 insertions, 228 deletions
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 82ad35ce2b4..46ac73abd5e 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -14,7 +14,6 @@
14#endif 14#endif
15 15
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/irqflags.h>
18#include <linux/types.h> 17#include <linux/types.h>
19#include <asm/barrier.h> 18#include <asm/barrier.h>
20#include <asm/byteorder.h> /* sigh ... */ 19#include <asm/byteorder.h> /* sigh ... */
@@ -44,6 +43,24 @@
44#define smp_mb__before_clear_bit() smp_mb__before_llsc() 43#define smp_mb__before_clear_bit() smp_mb__before_llsc()
45#define smp_mb__after_clear_bit() smp_llsc_mb() 44#define smp_mb__after_clear_bit() smp_llsc_mb()
46 45
46
47/*
48 * These are the "slower" versions of the functions and are in bitops.c.
49 * These functions call raw_local_irq_{save,restore}().
50 */
51void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
52void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
53void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
54int __mips_test_and_set_bit(unsigned long nr,
55 volatile unsigned long *addr);
56int __mips_test_and_set_bit_lock(unsigned long nr,
57 volatile unsigned long *addr);
58int __mips_test_and_clear_bit(unsigned long nr,
59 volatile unsigned long *addr);
60int __mips_test_and_change_bit(unsigned long nr,
61 volatile unsigned long *addr);
62
63
47/* 64/*
48 * set_bit - Atomically set a bit in memory 65 * set_bit - Atomically set a bit in memory
49 * @nr: the bit to set 66 * @nr: the bit to set
@@ -57,7 +74,7 @@
57static inline void set_bit(unsigned long nr, volatile unsigned long *addr) 74static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
58{ 75{
59 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 76 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
60 unsigned short bit = nr & SZLONG_MASK; 77 int bit = nr & SZLONG_MASK;
61 unsigned long temp; 78 unsigned long temp;
62 79
63 if (kernel_uses_llsc && R10000_LLSC_WAR) { 80 if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
92 : "=&r" (temp), "+m" (*m) 109 : "=&r" (temp), "+m" (*m)
93 : "ir" (1UL << bit)); 110 : "ir" (1UL << bit));
94 } while (unlikely(!temp)); 111 } while (unlikely(!temp));
95 } else { 112 } else
96 volatile unsigned long *a = addr; 113 __mips_set_bit(nr, addr);
97 unsigned long mask;
98 unsigned long flags;
99
100 a += nr >> SZLONG_LOG;
101 mask = 1UL << bit;
102 raw_local_irq_save(flags);
103 *a |= mask;
104 raw_local_irq_restore(flags);
105 }
106} 114}
107 115
108/* 116/*
@@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
118static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) 126static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
119{ 127{
120 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 128 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
121 unsigned short bit = nr & SZLONG_MASK; 129 int bit = nr & SZLONG_MASK;
122 unsigned long temp; 130 unsigned long temp;
123 131
124 if (kernel_uses_llsc && R10000_LLSC_WAR) { 132 if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
153 : "=&r" (temp), "+m" (*m) 161 : "=&r" (temp), "+m" (*m)
154 : "ir" (~(1UL << bit))); 162 : "ir" (~(1UL << bit)));
155 } while (unlikely(!temp)); 163 } while (unlikely(!temp));
156 } else { 164 } else
157 volatile unsigned long *a = addr; 165 __mips_clear_bit(nr, addr);
158 unsigned long mask;
159 unsigned long flags;
160
161 a += nr >> SZLONG_LOG;
162 mask = 1UL << bit;
163 raw_local_irq_save(flags);
164 *a &= ~mask;
165 raw_local_irq_restore(flags);
166 }
167} 166}
168 167
169/* 168/*
@@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
191 */ 190 */
192static inline void change_bit(unsigned long nr, volatile unsigned long *addr) 191static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
193{ 192{
194 unsigned short bit = nr & SZLONG_MASK; 193 int bit = nr & SZLONG_MASK;
195 194
196 if (kernel_uses_llsc && R10000_LLSC_WAR) { 195 if (kernel_uses_llsc && R10000_LLSC_WAR) {
197 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 196 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
220 : "=&r" (temp), "+m" (*m) 219 : "=&r" (temp), "+m" (*m)
221 : "ir" (1UL << bit)); 220 : "ir" (1UL << bit));
222 } while (unlikely(!temp)); 221 } while (unlikely(!temp));
223 } else { 222 } else
224 volatile unsigned long *a = addr; 223 __mips_change_bit(nr, addr);
225 unsigned long mask;
226 unsigned long flags;
227
228 a += nr >> SZLONG_LOG;
229 mask = 1UL << bit;
230 raw_local_irq_save(flags);
231 *a ^= mask;
232 raw_local_irq_restore(flags);
233 }
234} 224}
235 225
236/* 226/*
@@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
244static inline int test_and_set_bit(unsigned long nr, 234static inline int test_and_set_bit(unsigned long nr,
245 volatile unsigned long *addr) 235 volatile unsigned long *addr)
246{ 236{
247 unsigned short bit = nr & SZLONG_MASK; 237 int bit = nr & SZLONG_MASK;
248 unsigned long res; 238 unsigned long res;
249 239
250 smp_mb__before_llsc(); 240 smp_mb__before_llsc();
@@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr,
281 } while (unlikely(!res)); 271 } while (unlikely(!res));
282 272
283 res = temp & (1UL << bit); 273 res = temp & (1UL << bit);
284 } else { 274 } else
285 volatile unsigned long *a = addr; 275 res = __mips_test_and_set_bit(nr, addr);
286 unsigned long mask;
287 unsigned long flags;
288
289 a += nr >> SZLONG_LOG;
290 mask = 1UL << bit;
291 raw_local_irq_save(flags);
292 res = (mask & *a);
293 *a |= mask;
294 raw_local_irq_restore(flags);
295 }
296 276
297 smp_llsc_mb(); 277 smp_llsc_mb();
298 278
@@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr,
310static inline int test_and_set_bit_lock(unsigned long nr, 290static inline int test_and_set_bit_lock(unsigned long nr,
311 volatile unsigned long *addr) 291 volatile unsigned long *addr)
312{ 292{
313 unsigned short bit = nr & SZLONG_MASK; 293 int bit = nr & SZLONG_MASK;
314 unsigned long res; 294 unsigned long res;
315 295
316 if (kernel_uses_llsc && R10000_LLSC_WAR) { 296 if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr,
345 } while (unlikely(!res)); 325 } while (unlikely(!res));
346 326
347 res = temp & (1UL << bit); 327 res = temp & (1UL << bit);
348 } else { 328 } else
349 volatile unsigned long *a = addr; 329 res = __mips_test_and_set_bit_lock(nr, addr);
350 unsigned long mask;
351 unsigned long flags;
352
353 a += nr >> SZLONG_LOG;
354 mask = 1UL << bit;
355 raw_local_irq_save(flags);
356 res = (mask & *a);
357 *a |= mask;
358 raw_local_irq_restore(flags);
359 }
360 330
361 smp_llsc_mb(); 331 smp_llsc_mb();
362 332
@@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
373static inline int test_and_clear_bit(unsigned long nr, 343static inline int test_and_clear_bit(unsigned long nr,
374 volatile unsigned long *addr) 344 volatile unsigned long *addr)
375{ 345{
376 unsigned short bit = nr & SZLONG_MASK; 346 int bit = nr & SZLONG_MASK;
377 unsigned long res; 347 unsigned long res;
378 348
379 smp_mb__before_llsc(); 349 smp_mb__before_llsc();
@@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr,
428 } while (unlikely(!res)); 398 } while (unlikely(!res));
429 399
430 res = temp & (1UL << bit); 400 res = temp & (1UL << bit);
431 } else { 401 } else
432 volatile unsigned long *a = addr; 402 res = __mips_test_and_clear_bit(nr, addr);
433 unsigned long mask;
434 unsigned long flags;
435
436 a += nr >> SZLONG_LOG;
437 mask = 1UL << bit;
438 raw_local_irq_save(flags);
439 res = (mask & *a);
440 *a &= ~mask;
441 raw_local_irq_restore(flags);
442 }
443 403
444 smp_llsc_mb(); 404 smp_llsc_mb();
445 405
@@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr,
457static inline int test_and_change_bit(unsigned long nr, 417static inline int test_and_change_bit(unsigned long nr,
458 volatile unsigned long *addr) 418 volatile unsigned long *addr)
459{ 419{
460 unsigned short bit = nr & SZLONG_MASK; 420 int bit = nr & SZLONG_MASK;
461 unsigned long res; 421 unsigned long res;
462 422
463 smp_mb__before_llsc(); 423 smp_mb__before_llsc();
@@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr,
494 } while (unlikely(!res)); 454 } while (unlikely(!res));
495 455
496 res = temp & (1UL << bit); 456 res = temp & (1UL << bit);
497 } else { 457 } else
498 volatile unsigned long *a = addr; 458 res = __mips_test_and_change_bit(nr, addr);
499 unsigned long mask;
500 unsigned long flags;
501
502 a += nr >> SZLONG_LOG;
503 mask = 1UL << bit;
504 raw_local_irq_save(flags);
505 res = (mask & *a);
506 *a ^= mask;
507 raw_local_irq_restore(flags);
508 }
509 459
510 smp_llsc_mb(); 460 smp_llsc_mb();
511 461
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 58277e0e9cd..3c5d1464b7b 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -290,7 +290,7 @@ struct compat_shmid64_ds {
290 290
291static inline int is_compat_task(void) 291static inline int is_compat_task(void)
292{ 292{
293 return test_thread_flag(TIF_32BIT); 293 return test_thread_flag(TIF_32BIT_ADDR);
294} 294}
295 295
296#endif /* _ASM_COMPAT_H */ 296#endif /* _ASM_COMPAT_H */
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 29d9c23c20c..ff2e0345e01 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -15,6 +15,7 @@
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/irqflags.h>
18 19
19#include <asm/addrspace.h> 20#include <asm/addrspace.h>
20#include <asm/bug.h> 21#include <asm/bug.h>
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 309cbcd6909..9f3384c789d 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -16,83 +16,13 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <asm/hazards.h> 17#include <asm/hazards.h>
18 18
19__asm__( 19#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
20 " .macro arch_local_irq_enable \n"
21 " .set push \n"
22 " .set reorder \n"
23 " .set noat \n"
24#ifdef CONFIG_MIPS_MT_SMTC
25 " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
26 " ori $1, 0x400 \n"
27 " xori $1, 0x400 \n"
28 " mtc0 $1, $2, 1 \n"
29#elif defined(CONFIG_CPU_MIPSR2)
30 " ei \n"
31#else
32 " mfc0 $1,$12 \n"
33 " ori $1,0x1f \n"
34 " xori $1,0x1e \n"
35 " mtc0 $1,$12 \n"
36#endif
37 " irq_enable_hazard \n"
38 " .set pop \n"
39 " .endm");
40 20
41extern void smtc_ipi_replay(void);
42
43static inline void arch_local_irq_enable(void)
44{
45#ifdef CONFIG_MIPS_MT_SMTC
46 /*
47 * SMTC kernel needs to do a software replay of queued
48 * IPIs, at the cost of call overhead on each local_irq_enable()
49 */
50 smtc_ipi_replay();
51#endif
52 __asm__ __volatile__(
53 "arch_local_irq_enable"
54 : /* no outputs */
55 : /* no inputs */
56 : "memory");
57}
58
59
60/*
61 * For cli() we have to insert nops to make sure that the new value
62 * has actually arrived in the status register before the end of this
63 * macro.
64 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
65 * no nops at all.
66 */
67/*
68 * For TX49, operating only IE bit is not enough.
69 *
70 * If mfc0 $12 follows store and the mfc0 is last instruction of a
71 * page and fetching the next instruction causes TLB miss, the result
72 * of the mfc0 might wrongly contain EXL bit.
73 *
74 * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
75 *
76 * Workaround: mask EXL bit of the result or place a nop before mfc0.
77 */
78__asm__( 21__asm__(
79 " .macro arch_local_irq_disable\n" 22 " .macro arch_local_irq_disable\n"
80 " .set push \n" 23 " .set push \n"
81 " .set noat \n" 24 " .set noat \n"
82#ifdef CONFIG_MIPS_MT_SMTC
83 " mfc0 $1, $2, 1 \n"
84 " ori $1, 0x400 \n"
85 " .set noreorder \n"
86 " mtc0 $1, $2, 1 \n"
87#elif defined(CONFIG_CPU_MIPSR2)
88 " di \n" 25 " di \n"
89#else
90 " mfc0 $1,$12 \n"
91 " ori $1,0x1f \n"
92 " xori $1,0x1f \n"
93 " .set noreorder \n"
94 " mtc0 $1,$12 \n"
95#endif
96 " irq_disable_hazard \n" 26 " irq_disable_hazard \n"
97 " .set pop \n" 27 " .set pop \n"
98 " .endm \n"); 28 " .endm \n");
@@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void)
106 : "memory"); 36 : "memory");
107} 37}
108 38
109__asm__(
110 " .macro arch_local_save_flags flags \n"
111 " .set push \n"
112 " .set reorder \n"
113#ifdef CONFIG_MIPS_MT_SMTC
114 " mfc0 \\flags, $2, 1 \n"
115#else
116 " mfc0 \\flags, $12 \n"
117#endif
118 " .set pop \n"
119 " .endm \n");
120
121static inline unsigned long arch_local_save_flags(void)
122{
123 unsigned long flags;
124 asm volatile("arch_local_save_flags %0" : "=r" (flags));
125 return flags;
126}
127 39
128__asm__( 40__asm__(
129 " .macro arch_local_irq_save result \n" 41 " .macro arch_local_irq_save result \n"
130 " .set push \n" 42 " .set push \n"
131 " .set reorder \n" 43 " .set reorder \n"
132 " .set noat \n" 44 " .set noat \n"
133#ifdef CONFIG_MIPS_MT_SMTC
134 " mfc0 \\result, $2, 1 \n"
135 " ori $1, \\result, 0x400 \n"
136 " .set noreorder \n"
137 " mtc0 $1, $2, 1 \n"
138 " andi \\result, \\result, 0x400 \n"
139#elif defined(CONFIG_CPU_MIPSR2)
140 " di \\result \n" 45 " di \\result \n"
141 " andi \\result, 1 \n" 46 " andi \\result, 1 \n"
142#else
143 " mfc0 \\result, $12 \n"
144 " ori $1, \\result, 0x1f \n"
145 " xori $1, 0x1f \n"
146 " .set noreorder \n"
147 " mtc0 $1, $12 \n"
148#endif
149 " irq_disable_hazard \n" 47 " irq_disable_hazard \n"
150 " .set pop \n" 48 " .set pop \n"
151 " .endm \n"); 49 " .endm \n");
@@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void)
160 return flags; 58 return flags;
161} 59}
162 60
61
163__asm__( 62__asm__(
164 " .macro arch_local_irq_restore flags \n" 63 " .macro arch_local_irq_restore flags \n"
165 " .set push \n" 64 " .set push \n"
166 " .set noreorder \n" 65 " .set noreorder \n"
167 " .set noat \n" 66 " .set noat \n"
168#ifdef CONFIG_MIPS_MT_SMTC 67#if defined(CONFIG_IRQ_CPU)
169 "mfc0 $1, $2, 1 \n"
170 "andi \\flags, 0x400 \n"
171 "ori $1, 0x400 \n"
172 "xori $1, 0x400 \n"
173 "or \\flags, $1 \n"
174 "mtc0 \\flags, $2, 1 \n"
175#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
176 /* 68 /*
177 * Slow, but doesn't suffer from a relatively unlikely race 69 * Slow, but doesn't suffer from a relatively unlikely race
178 * condition we're having since days 1. 70 * condition we're having since days 1.
179 */ 71 */
180 " beqz \\flags, 1f \n" 72 " beqz \\flags, 1f \n"
181 " di \n" 73 " di \n"
182 " ei \n" 74 " ei \n"
183 "1: \n" 75 "1: \n"
184#elif defined(CONFIG_CPU_MIPSR2) 76#else
185 /* 77 /*
186 * Fast, dangerous. Life is fun, life is good. 78 * Fast, dangerous. Life is fun, life is good.
187 */ 79 */
188 " mfc0 $1, $12 \n" 80 " mfc0 $1, $12 \n"
189 " ins $1, \\flags, 0, 1 \n" 81 " ins $1, \\flags, 0, 1 \n"
190 " mtc0 $1, $12 \n" 82 " mtc0 $1, $12 \n"
191#else
192 " mfc0 $1, $12 \n"
193 " andi \\flags, 1 \n"
194 " ori $1, 0x1f \n"
195 " xori $1, 0x1f \n"
196 " or \\flags, $1 \n"
197 " mtc0 \\flags, $12 \n"
198#endif 83#endif
199 " irq_disable_hazard \n" 84 " irq_disable_hazard \n"
200 " .set pop \n" 85 " .set pop \n"
201 " .endm \n"); 86 " .endm \n");
202 87
203
204static inline void arch_local_irq_restore(unsigned long flags) 88static inline void arch_local_irq_restore(unsigned long flags)
205{ 89{
206 unsigned long __tmp1; 90 unsigned long __tmp1;
207 91
208#ifdef CONFIG_MIPS_MT_SMTC
209 /*
210 * SMTC kernel needs to do a software replay of queued
211 * IPIs, at the cost of branch and call overhead on each
212 * local_irq_restore()
213 */
214 if (unlikely(!(flags & 0x0400)))
215 smtc_ipi_replay();
216#endif
217
218 __asm__ __volatile__( 92 __asm__ __volatile__(
219 "arch_local_irq_restore\t%0" 93 "arch_local_irq_restore\t%0"
220 : "=r" (__tmp1) 94 : "=r" (__tmp1)
@@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags)
232 : "0" (flags) 106 : "0" (flags)
233 : "memory"); 107 : "memory");
234} 108}
109#else
110/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
111void arch_local_irq_disable(void);
112unsigned long arch_local_irq_save(void);
113void arch_local_irq_restore(unsigned long flags);
114void __arch_local_irq_restore(unsigned long flags);
115#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
116
117
118__asm__(
119 " .macro arch_local_irq_enable \n"
120 " .set push \n"
121 " .set reorder \n"
122 " .set noat \n"
123#ifdef CONFIG_MIPS_MT_SMTC
124 " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
125 " ori $1, 0x400 \n"
126 " xori $1, 0x400 \n"
127 " mtc0 $1, $2, 1 \n"
128#elif defined(CONFIG_CPU_MIPSR2)
129 " ei \n"
130#else
131 " mfc0 $1,$12 \n"
132 " ori $1,0x1f \n"
133 " xori $1,0x1e \n"
134 " mtc0 $1,$12 \n"
135#endif
136 " irq_enable_hazard \n"
137 " .set pop \n"
138 " .endm");
139
140extern void smtc_ipi_replay(void);
141
142static inline void arch_local_irq_enable(void)
143{
144#ifdef CONFIG_MIPS_MT_SMTC
145 /*
146 * SMTC kernel needs to do a software replay of queued
147 * IPIs, at the cost of call overhead on each local_irq_enable()
148 */
149 smtc_ipi_replay();
150#endif
151 __asm__ __volatile__(
152 "arch_local_irq_enable"
153 : /* no outputs */
154 : /* no inputs */
155 : "memory");
156}
157
158
159__asm__(
160 " .macro arch_local_save_flags flags \n"
161 " .set push \n"
162 " .set reorder \n"
163#ifdef CONFIG_MIPS_MT_SMTC
164 " mfc0 \\flags, $2, 1 \n"
165#else
166 " mfc0 \\flags, $12 \n"
167#endif
168 " .set pop \n"
169 " .endm \n");
170
171static inline unsigned long arch_local_save_flags(void)
172{
173 unsigned long flags;
174 asm volatile("arch_local_save_flags %0" : "=r" (flags));
175 return flags;
176}
177
235 178
236static inline int arch_irqs_disabled_flags(unsigned long flags) 179static inline int arch_irqs_disabled_flags(unsigned long flags)
237{ 180{
@@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
245#endif 188#endif
246} 189}
247 190
248#endif 191#endif /* #ifndef __ASSEMBLY__ */
249 192
250/* 193/*
251 * Do the CPU's IRQ-state tracing from assembly code. 194 * Do the CPU's IRQ-state tracing from assembly code.
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 8debe9e9175..18806a52061 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
112#define TIF_LOAD_WATCH 25 /* If set, load watch registers */ 112#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
113#define TIF_SYSCALL_TRACE 31 /* syscall trace active */ 113#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
114 114
115#ifdef CONFIG_MIPS32_O32
116#define TIF_32BIT TIF_32BIT_REGS
117#elif defined(CONFIG_MIPS32_N32)
118#define TIF_32BIT _TIF_32BIT_ADDR
119#endif /* CONFIG_MIPS32_O32 */
120
121#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 115#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
122#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 116#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
123#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 117#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)