aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c1
-rw-r--r--arch/mips/fw/arc/misc.c1
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/bitops.h128
-rw-r--r--arch/mips/include/asm/compat.h2
-rw-r--r--arch/mips/include/asm/delay.h6
-rw-r--r--arch/mips/include/asm/io.h1
-rw-r--r--arch/mips/include/asm/irqflags.h207
-rw-r--r--arch/mips/include/asm/pgtable-64.h15
-rw-r--r--arch/mips/include/asm/processor.h2
-rw-r--r--arch/mips/include/asm/ptrace.h6
-rw-r--r--arch/mips/include/asm/thread_info.h6
-rw-r--r--arch/mips/include/asm/unistd.h1
-rw-r--r--arch/mips/jz4740/serial.h3
-rw-r--r--arch/mips/kernel/entry.S6
-rw-r--r--arch/mips/kernel/linux32.c21
-rw-r--r--arch/mips/kernel/mips_ksyms.c2
-rw-r--r--arch/mips/kernel/process.c62
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c26
-rw-r--r--arch/mips/kernel/smp-cmp.c2
-rw-r--r--arch/mips/kernel/syscall.c53
-rw-r--r--arch/mips/lib/Makefile5
-rw-r--r--arch/mips/lib/bitops.c179
-rw-r--r--arch/mips/lib/delay.c6
-rw-r--r--arch/mips/lib/dump_tlb.c4
-rw-r--r--arch/mips/lib/mips-atomic.c176
-rw-r--r--arch/mips/mm/tlb-r4k.c1
-rw-r--r--arch/mips/mm/tlbex.c56
-rw-r--r--arch/mips/mti-malta/malta-platform.c3
32 files changed, 606 insertions, 382 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index dba9390d37cf..4183e62f178c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -40,6 +40,8 @@ config MIPS
40 select HAVE_MOD_ARCH_SPECIFIC 40 select HAVE_MOD_ARCH_SPECIFIC
41 select MODULES_USE_ELF_REL 41 select MODULES_USE_ELF_REL
42 select MODULES_USE_ELF_RELA if 64BIT 42 select MODULES_USE_ELF_RELA if 64BIT
43 select GENERIC_KERNEL_THREAD
44 select GENERIC_KERNEL_EXECVE
43 45
44menu "Machine selection" 46menu "Machine selection"
45 47
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
index d38246e33ddb..9f883bf76953 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -30,6 +30,7 @@
30 * measurement, and debugging facilities. 30 * measurement, and debugging facilities.
31 */ 31 */
32 32
33#include <linux/irqflags.h>
33#include <asm/octeon/cvmx.h> 34#include <asm/octeon/cvmx.h>
34#include <asm/octeon/cvmx-l2c.h> 35#include <asm/octeon/cvmx-l2c.h>
35#include <asm/octeon/cvmx-spinlock.h> 36#include <asm/octeon/cvmx-spinlock.h>
diff --git a/arch/mips/fw/arc/misc.c b/arch/mips/fw/arc/misc.c
index 7cf80ca2c1d2..f9f5307434c2 100644
--- a/arch/mips/fw/arc/misc.c
+++ b/arch/mips/fw/arc/misc.c
@@ -11,6 +11,7 @@
11 */ 11 */
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/irqflags.h>
14 15
15#include <asm/bcache.h> 16#include <asm/bcache.h>
16 17
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index e69de29bb2d1..533053d12ced 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -0,0 +1 @@
# MIPS headers
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 82ad35ce2b45..46ac73abd5ee 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -14,7 +14,6 @@
14#endif 14#endif
15 15
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/irqflags.h>
18#include <linux/types.h> 17#include <linux/types.h>
19#include <asm/barrier.h> 18#include <asm/barrier.h>
20#include <asm/byteorder.h> /* sigh ... */ 19#include <asm/byteorder.h> /* sigh ... */
@@ -44,6 +43,24 @@
44#define smp_mb__before_clear_bit() smp_mb__before_llsc() 43#define smp_mb__before_clear_bit() smp_mb__before_llsc()
45#define smp_mb__after_clear_bit() smp_llsc_mb() 44#define smp_mb__after_clear_bit() smp_llsc_mb()
46 45
46
47/*
48 * These are the "slower" versions of the functions and are in bitops.c.
49 * These functions call raw_local_irq_{save,restore}().
50 */
51void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
52void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
53void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
54int __mips_test_and_set_bit(unsigned long nr,
55 volatile unsigned long *addr);
56int __mips_test_and_set_bit_lock(unsigned long nr,
57 volatile unsigned long *addr);
58int __mips_test_and_clear_bit(unsigned long nr,
59 volatile unsigned long *addr);
60int __mips_test_and_change_bit(unsigned long nr,
61 volatile unsigned long *addr);
62
63
47/* 64/*
48 * set_bit - Atomically set a bit in memory 65 * set_bit - Atomically set a bit in memory
49 * @nr: the bit to set 66 * @nr: the bit to set
@@ -57,7 +74,7 @@
57static inline void set_bit(unsigned long nr, volatile unsigned long *addr) 74static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
58{ 75{
59 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 76 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
60 unsigned short bit = nr & SZLONG_MASK; 77 int bit = nr & SZLONG_MASK;
61 unsigned long temp; 78 unsigned long temp;
62 79
63 if (kernel_uses_llsc && R10000_LLSC_WAR) { 80 if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
92 : "=&r" (temp), "+m" (*m) 109 : "=&r" (temp), "+m" (*m)
93 : "ir" (1UL << bit)); 110 : "ir" (1UL << bit));
94 } while (unlikely(!temp)); 111 } while (unlikely(!temp));
95 } else { 112 } else
96 volatile unsigned long *a = addr; 113 __mips_set_bit(nr, addr);
97 unsigned long mask;
98 unsigned long flags;
99
100 a += nr >> SZLONG_LOG;
101 mask = 1UL << bit;
102 raw_local_irq_save(flags);
103 *a |= mask;
104 raw_local_irq_restore(flags);
105 }
106} 114}
107 115
108/* 116/*
@@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
118static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) 126static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
119{ 127{
120 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 128 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
121 unsigned short bit = nr & SZLONG_MASK; 129 int bit = nr & SZLONG_MASK;
122 unsigned long temp; 130 unsigned long temp;
123 131
124 if (kernel_uses_llsc && R10000_LLSC_WAR) { 132 if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
153 : "=&r" (temp), "+m" (*m) 161 : "=&r" (temp), "+m" (*m)
154 : "ir" (~(1UL << bit))); 162 : "ir" (~(1UL << bit)));
155 } while (unlikely(!temp)); 163 } while (unlikely(!temp));
156 } else { 164 } else
157 volatile unsigned long *a = addr; 165 __mips_clear_bit(nr, addr);
158 unsigned long mask;
159 unsigned long flags;
160
161 a += nr >> SZLONG_LOG;
162 mask = 1UL << bit;
163 raw_local_irq_save(flags);
164 *a &= ~mask;
165 raw_local_irq_restore(flags);
166 }
167} 166}
168 167
169/* 168/*
@@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
191 */ 190 */
192static inline void change_bit(unsigned long nr, volatile unsigned long *addr) 191static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
193{ 192{
194 unsigned short bit = nr & SZLONG_MASK; 193 int bit = nr & SZLONG_MASK;
195 194
196 if (kernel_uses_llsc && R10000_LLSC_WAR) { 195 if (kernel_uses_llsc && R10000_LLSC_WAR) {
197 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 196 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
220 : "=&r" (temp), "+m" (*m) 219 : "=&r" (temp), "+m" (*m)
221 : "ir" (1UL << bit)); 220 : "ir" (1UL << bit));
222 } while (unlikely(!temp)); 221 } while (unlikely(!temp));
223 } else { 222 } else
224 volatile unsigned long *a = addr; 223 __mips_change_bit(nr, addr);
225 unsigned long mask;
226 unsigned long flags;
227
228 a += nr >> SZLONG_LOG;
229 mask = 1UL << bit;
230 raw_local_irq_save(flags);
231 *a ^= mask;
232 raw_local_irq_restore(flags);
233 }
234} 224}
235 225
236/* 226/*
@@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
244static inline int test_and_set_bit(unsigned long nr, 234static inline int test_and_set_bit(unsigned long nr,
245 volatile unsigned long *addr) 235 volatile unsigned long *addr)
246{ 236{
247 unsigned short bit = nr & SZLONG_MASK; 237 int bit = nr & SZLONG_MASK;
248 unsigned long res; 238 unsigned long res;
249 239
250 smp_mb__before_llsc(); 240 smp_mb__before_llsc();
@@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr,
281 } while (unlikely(!res)); 271 } while (unlikely(!res));
282 272
283 res = temp & (1UL << bit); 273 res = temp & (1UL << bit);
284 } else { 274 } else
285 volatile unsigned long *a = addr; 275 res = __mips_test_and_set_bit(nr, addr);
286 unsigned long mask;
287 unsigned long flags;
288
289 a += nr >> SZLONG_LOG;
290 mask = 1UL << bit;
291 raw_local_irq_save(flags);
292 res = (mask & *a);
293 *a |= mask;
294 raw_local_irq_restore(flags);
295 }
296 276
297 smp_llsc_mb(); 277 smp_llsc_mb();
298 278
@@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr,
310static inline int test_and_set_bit_lock(unsigned long nr, 290static inline int test_and_set_bit_lock(unsigned long nr,
311 volatile unsigned long *addr) 291 volatile unsigned long *addr)
312{ 292{
313 unsigned short bit = nr & SZLONG_MASK; 293 int bit = nr & SZLONG_MASK;
314 unsigned long res; 294 unsigned long res;
315 295
316 if (kernel_uses_llsc && R10000_LLSC_WAR) { 296 if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr,
345 } while (unlikely(!res)); 325 } while (unlikely(!res));
346 326
347 res = temp & (1UL << bit); 327 res = temp & (1UL << bit);
348 } else { 328 } else
349 volatile unsigned long *a = addr; 329 res = __mips_test_and_set_bit_lock(nr, addr);
350 unsigned long mask;
351 unsigned long flags;
352
353 a += nr >> SZLONG_LOG;
354 mask = 1UL << bit;
355 raw_local_irq_save(flags);
356 res = (mask & *a);
357 *a |= mask;
358 raw_local_irq_restore(flags);
359 }
360 330
361 smp_llsc_mb(); 331 smp_llsc_mb();
362 332
@@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
373static inline int test_and_clear_bit(unsigned long nr, 343static inline int test_and_clear_bit(unsigned long nr,
374 volatile unsigned long *addr) 344 volatile unsigned long *addr)
375{ 345{
376 unsigned short bit = nr & SZLONG_MASK; 346 int bit = nr & SZLONG_MASK;
377 unsigned long res; 347 unsigned long res;
378 348
379 smp_mb__before_llsc(); 349 smp_mb__before_llsc();
@@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr,
428 } while (unlikely(!res)); 398 } while (unlikely(!res));
429 399
430 res = temp & (1UL << bit); 400 res = temp & (1UL << bit);
431 } else { 401 } else
432 volatile unsigned long *a = addr; 402 res = __mips_test_and_clear_bit(nr, addr);
433 unsigned long mask;
434 unsigned long flags;
435
436 a += nr >> SZLONG_LOG;
437 mask = 1UL << bit;
438 raw_local_irq_save(flags);
439 res = (mask & *a);
440 *a &= ~mask;
441 raw_local_irq_restore(flags);
442 }
443 403
444 smp_llsc_mb(); 404 smp_llsc_mb();
445 405
@@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr,
457static inline int test_and_change_bit(unsigned long nr, 417static inline int test_and_change_bit(unsigned long nr,
458 volatile unsigned long *addr) 418 volatile unsigned long *addr)
459{ 419{
460 unsigned short bit = nr & SZLONG_MASK; 420 int bit = nr & SZLONG_MASK;
461 unsigned long res; 421 unsigned long res;
462 422
463 smp_mb__before_llsc(); 423 smp_mb__before_llsc();
@@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr,
494 } while (unlikely(!res)); 454 } while (unlikely(!res));
495 455
496 res = temp & (1UL << bit); 456 res = temp & (1UL << bit);
497 } else { 457 } else
498 volatile unsigned long *a = addr; 458 res = __mips_test_and_change_bit(nr, addr);
499 unsigned long mask;
500 unsigned long flags;
501
502 a += nr >> SZLONG_LOG;
503 mask = 1UL << bit;
504 raw_local_irq_save(flags);
505 res = (mask & *a);
506 *a ^= mask;
507 raw_local_irq_restore(flags);
508 }
509 459
510 smp_llsc_mb(); 460 smp_llsc_mb();
511 461
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 58277e0e9cd4..3c5d1464b7bd 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -290,7 +290,7 @@ struct compat_shmid64_ds {
290 290
291static inline int is_compat_task(void) 291static inline int is_compat_task(void)
292{ 292{
293 return test_thread_flag(TIF_32BIT); 293 return test_thread_flag(TIF_32BIT_ADDR);
294} 294}
295 295
296#endif /* _ASM_COMPAT_H */ 296#endif /* _ASM_COMPAT_H */
diff --git a/arch/mips/include/asm/delay.h b/arch/mips/include/asm/delay.h
index e7cd78277c23..dc0a5f77a35c 100644
--- a/arch/mips/include/asm/delay.h
+++ b/arch/mips/include/asm/delay.h
@@ -13,9 +13,9 @@
13 13
14#include <linux/param.h> 14#include <linux/param.h>
15 15
16extern void __delay(unsigned int loops); 16extern void __delay(unsigned long loops);
17extern void __ndelay(unsigned int ns); 17extern void __ndelay(unsigned long ns);
18extern void __udelay(unsigned int us); 18extern void __udelay(unsigned long us);
19 19
20#define ndelay(ns) __ndelay(ns) 20#define ndelay(ns) __ndelay(ns)
21#define udelay(us) __udelay(us) 21#define udelay(us) __udelay(us)
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 29d9c23c20c7..ff2e0345e013 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -15,6 +15,7 @@
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/irqflags.h>
18 19
19#include <asm/addrspace.h> 20#include <asm/addrspace.h>
20#include <asm/bug.h> 21#include <asm/bug.h>
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 309cbcd6909c..9f3384c789d7 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -16,83 +16,13 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <asm/hazards.h> 17#include <asm/hazards.h>
18 18
19__asm__( 19#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
20 " .macro arch_local_irq_enable \n"
21 " .set push \n"
22 " .set reorder \n"
23 " .set noat \n"
24#ifdef CONFIG_MIPS_MT_SMTC
25 " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
26 " ori $1, 0x400 \n"
27 " xori $1, 0x400 \n"
28 " mtc0 $1, $2, 1 \n"
29#elif defined(CONFIG_CPU_MIPSR2)
30 " ei \n"
31#else
32 " mfc0 $1,$12 \n"
33 " ori $1,0x1f \n"
34 " xori $1,0x1e \n"
35 " mtc0 $1,$12 \n"
36#endif
37 " irq_enable_hazard \n"
38 " .set pop \n"
39 " .endm");
40 20
41extern void smtc_ipi_replay(void);
42
43static inline void arch_local_irq_enable(void)
44{
45#ifdef CONFIG_MIPS_MT_SMTC
46 /*
47 * SMTC kernel needs to do a software replay of queued
48 * IPIs, at the cost of call overhead on each local_irq_enable()
49 */
50 smtc_ipi_replay();
51#endif
52 __asm__ __volatile__(
53 "arch_local_irq_enable"
54 : /* no outputs */
55 : /* no inputs */
56 : "memory");
57}
58
59
60/*
61 * For cli() we have to insert nops to make sure that the new value
62 * has actually arrived in the status register before the end of this
63 * macro.
64 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
65 * no nops at all.
66 */
67/*
68 * For TX49, operating only IE bit is not enough.
69 *
70 * If mfc0 $12 follows store and the mfc0 is last instruction of a
71 * page and fetching the next instruction causes TLB miss, the result
72 * of the mfc0 might wrongly contain EXL bit.
73 *
74 * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
75 *
76 * Workaround: mask EXL bit of the result or place a nop before mfc0.
77 */
78__asm__( 21__asm__(
79 " .macro arch_local_irq_disable\n" 22 " .macro arch_local_irq_disable\n"
80 " .set push \n" 23 " .set push \n"
81 " .set noat \n" 24 " .set noat \n"
82#ifdef CONFIG_MIPS_MT_SMTC
83 " mfc0 $1, $2, 1 \n"
84 " ori $1, 0x400 \n"
85 " .set noreorder \n"
86 " mtc0 $1, $2, 1 \n"
87#elif defined(CONFIG_CPU_MIPSR2)
88 " di \n" 25 " di \n"
89#else
90 " mfc0 $1,$12 \n"
91 " ori $1,0x1f \n"
92 " xori $1,0x1f \n"
93 " .set noreorder \n"
94 " mtc0 $1,$12 \n"
95#endif
96 " irq_disable_hazard \n" 26 " irq_disable_hazard \n"
97 " .set pop \n" 27 " .set pop \n"
98 " .endm \n"); 28 " .endm \n");
@@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void)
106 : "memory"); 36 : "memory");
107} 37}
108 38
109__asm__(
110 " .macro arch_local_save_flags flags \n"
111 " .set push \n"
112 " .set reorder \n"
113#ifdef CONFIG_MIPS_MT_SMTC
114 " mfc0 \\flags, $2, 1 \n"
115#else
116 " mfc0 \\flags, $12 \n"
117#endif
118 " .set pop \n"
119 " .endm \n");
120
121static inline unsigned long arch_local_save_flags(void)
122{
123 unsigned long flags;
124 asm volatile("arch_local_save_flags %0" : "=r" (flags));
125 return flags;
126}
127 39
128__asm__( 40__asm__(
129 " .macro arch_local_irq_save result \n" 41 " .macro arch_local_irq_save result \n"
130 " .set push \n" 42 " .set push \n"
131 " .set reorder \n" 43 " .set reorder \n"
132 " .set noat \n" 44 " .set noat \n"
133#ifdef CONFIG_MIPS_MT_SMTC
134 " mfc0 \\result, $2, 1 \n"
135 " ori $1, \\result, 0x400 \n"
136 " .set noreorder \n"
137 " mtc0 $1, $2, 1 \n"
138 " andi \\result, \\result, 0x400 \n"
139#elif defined(CONFIG_CPU_MIPSR2)
140 " di \\result \n" 45 " di \\result \n"
141 " andi \\result, 1 \n" 46 " andi \\result, 1 \n"
142#else
143 " mfc0 \\result, $12 \n"
144 " ori $1, \\result, 0x1f \n"
145 " xori $1, 0x1f \n"
146 " .set noreorder \n"
147 " mtc0 $1, $12 \n"
148#endif
149 " irq_disable_hazard \n" 47 " irq_disable_hazard \n"
150 " .set pop \n" 48 " .set pop \n"
151 " .endm \n"); 49 " .endm \n");
@@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void)
160 return flags; 58 return flags;
161} 59}
162 60
61
163__asm__( 62__asm__(
164 " .macro arch_local_irq_restore flags \n" 63 " .macro arch_local_irq_restore flags \n"
165 " .set push \n" 64 " .set push \n"
166 " .set noreorder \n" 65 " .set noreorder \n"
167 " .set noat \n" 66 " .set noat \n"
168#ifdef CONFIG_MIPS_MT_SMTC 67#if defined(CONFIG_IRQ_CPU)
169 "mfc0 $1, $2, 1 \n"
170 "andi \\flags, 0x400 \n"
171 "ori $1, 0x400 \n"
172 "xori $1, 0x400 \n"
173 "or \\flags, $1 \n"
174 "mtc0 \\flags, $2, 1 \n"
175#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
176 /* 68 /*
177 * Slow, but doesn't suffer from a relatively unlikely race 69 * Slow, but doesn't suffer from a relatively unlikely race
178 * condition we're having since days 1. 70 * condition we're having since days 1.
179 */ 71 */
180 " beqz \\flags, 1f \n" 72 " beqz \\flags, 1f \n"
181 " di \n" 73 " di \n"
182 " ei \n" 74 " ei \n"
183 "1: \n" 75 "1: \n"
184#elif defined(CONFIG_CPU_MIPSR2) 76#else
185 /* 77 /*
186 * Fast, dangerous. Life is fun, life is good. 78 * Fast, dangerous. Life is fun, life is good.
187 */ 79 */
188 " mfc0 $1, $12 \n" 80 " mfc0 $1, $12 \n"
189 " ins $1, \\flags, 0, 1 \n" 81 " ins $1, \\flags, 0, 1 \n"
190 " mtc0 $1, $12 \n" 82 " mtc0 $1, $12 \n"
191#else
192 " mfc0 $1, $12 \n"
193 " andi \\flags, 1 \n"
194 " ori $1, 0x1f \n"
195 " xori $1, 0x1f \n"
196 " or \\flags, $1 \n"
197 " mtc0 \\flags, $12 \n"
198#endif 83#endif
199 " irq_disable_hazard \n" 84 " irq_disable_hazard \n"
200 " .set pop \n" 85 " .set pop \n"
201 " .endm \n"); 86 " .endm \n");
202 87
203
204static inline void arch_local_irq_restore(unsigned long flags) 88static inline void arch_local_irq_restore(unsigned long flags)
205{ 89{
206 unsigned long __tmp1; 90 unsigned long __tmp1;
207 91
208#ifdef CONFIG_MIPS_MT_SMTC
209 /*
210 * SMTC kernel needs to do a software replay of queued
211 * IPIs, at the cost of branch and call overhead on each
212 * local_irq_restore()
213 */
214 if (unlikely(!(flags & 0x0400)))
215 smtc_ipi_replay();
216#endif
217
218 __asm__ __volatile__( 92 __asm__ __volatile__(
219 "arch_local_irq_restore\t%0" 93 "arch_local_irq_restore\t%0"
220 : "=r" (__tmp1) 94 : "=r" (__tmp1)
@@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags)
232 : "0" (flags) 106 : "0" (flags)
233 : "memory"); 107 : "memory");
234} 108}
109#else
110/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
111void arch_local_irq_disable(void);
112unsigned long arch_local_irq_save(void);
113void arch_local_irq_restore(unsigned long flags);
114void __arch_local_irq_restore(unsigned long flags);
115#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
116
117
118__asm__(
119 " .macro arch_local_irq_enable \n"
120 " .set push \n"
121 " .set reorder \n"
122 " .set noat \n"
123#ifdef CONFIG_MIPS_MT_SMTC
124 " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
125 " ori $1, 0x400 \n"
126 " xori $1, 0x400 \n"
127 " mtc0 $1, $2, 1 \n"
128#elif defined(CONFIG_CPU_MIPSR2)
129 " ei \n"
130#else
131 " mfc0 $1,$12 \n"
132 " ori $1,0x1f \n"
133 " xori $1,0x1e \n"
134 " mtc0 $1,$12 \n"
135#endif
136 " irq_enable_hazard \n"
137 " .set pop \n"
138 " .endm");
139
140extern void smtc_ipi_replay(void);
141
142static inline void arch_local_irq_enable(void)
143{
144#ifdef CONFIG_MIPS_MT_SMTC
145 /*
146 * SMTC kernel needs to do a software replay of queued
147 * IPIs, at the cost of call overhead on each local_irq_enable()
148 */
149 smtc_ipi_replay();
150#endif
151 __asm__ __volatile__(
152 "arch_local_irq_enable"
153 : /* no outputs */
154 : /* no inputs */
155 : "memory");
156}
157
158
159__asm__(
160 " .macro arch_local_save_flags flags \n"
161 " .set push \n"
162 " .set reorder \n"
163#ifdef CONFIG_MIPS_MT_SMTC
164 " mfc0 \\flags, $2, 1 \n"
165#else
166 " mfc0 \\flags, $12 \n"
167#endif
168 " .set pop \n"
169 " .endm \n");
170
171static inline unsigned long arch_local_save_flags(void)
172{
173 unsigned long flags;
174 asm volatile("arch_local_save_flags %0" : "=r" (flags));
175 return flags;
176}
177
235 178
236static inline int arch_irqs_disabled_flags(unsigned long flags) 179static inline int arch_irqs_disabled_flags(unsigned long flags)
237{ 180{
@@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
245#endif 188#endif
246} 189}
247 190
248#endif 191#endif /* #ifndef __ASSEMBLY__ */
249 192
250/* 193/*
251 * Do the CPU's IRQ-state tracing from assembly code. 194 * Do the CPU's IRQ-state tracing from assembly code.
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index c26e18250079..f5b521d5a67d 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -9,6 +9,7 @@
9#ifndef _ASM_PGTABLE_64_H 9#ifndef _ASM_PGTABLE_64_H
10#define _ASM_PGTABLE_64_H 10#define _ASM_PGTABLE_64_H
11 11
12#include <linux/compiler.h>
12#include <linux/linkage.h> 13#include <linux/linkage.h>
13 14
14#include <asm/addrspace.h> 15#include <asm/addrspace.h>
@@ -172,7 +173,19 @@ static inline int pmd_none(pmd_t pmd)
172 return pmd_val(pmd) == (unsigned long) invalid_pte_table; 173 return pmd_val(pmd) == (unsigned long) invalid_pte_table;
173} 174}
174 175
175#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) 176static inline int pmd_bad(pmd_t pmd)
177{
178#ifdef CONFIG_HUGETLB_PAGE
179 /* pmd_huge(pmd) but inline */
180 if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
181 return 0;
182#endif
183
184 if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
185 return 1;
186
187 return 0;
188}
176 189
177static inline int pmd_present(pmd_t pmd) 190static inline int pmd_present(pmd_t pmd)
178{ 191{
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 5e33fabe354d..d28c41e0887c 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -310,8 +310,6 @@ struct task_struct;
310/* Free all resources held by a thread. */ 310/* Free all resources held by a thread. */
311#define release_thread(thread) do { } while(0) 311#define release_thread(thread) do { } while(0)
312 312
313extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
314
315extern unsigned long thread_saved_pc(struct task_struct *tsk); 313extern unsigned long thread_saved_pc(struct task_struct *tsk);
316 314
317/* 315/*
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index 4f5da948a777..cec5e125f7e4 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -61,4 +61,10 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs)
61 die(str, regs); 61 die(str, regs);
62} 62}
63 63
64#define current_pt_regs() \
65({ \
66 unsigned long sp = (unsigned long)__builtin_frame_address(0); \
67 (struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1 - 32) - 1; \
68})
69
64#endif /* _ASM_PTRACE_H */ 70#endif /* _ASM_PTRACE_H */
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 8debe9e91754..18806a52061c 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
112#define TIF_LOAD_WATCH 25 /* If set, load watch registers */ 112#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
113#define TIF_SYSCALL_TRACE 31 /* syscall trace active */ 113#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
114 114
115#ifdef CONFIG_MIPS32_O32
116#define TIF_32BIT TIF_32BIT_REGS
117#elif defined(CONFIG_MIPS32_N32)
118#define TIF_32BIT _TIF_32BIT_ADDR
119#endif /* CONFIG_MIPS32_O32 */
120
121#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 115#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
122#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 116#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
123#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 117#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 9e47cc11aa26..b306e2081cad 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -20,6 +20,7 @@
20#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64 20#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64
21#define __ARCH_WANT_OLD_READDIR 21#define __ARCH_WANT_OLD_READDIR
22#define __ARCH_WANT_SYS_ALARM 22#define __ARCH_WANT_SYS_ALARM
23#define __ARCH_WANT_SYS_EXECVE
23#define __ARCH_WANT_SYS_GETHOSTNAME 24#define __ARCH_WANT_SYS_GETHOSTNAME
24#define __ARCH_WANT_SYS_IPC 25#define __ARCH_WANT_SYS_IPC
25#define __ARCH_WANT_SYS_PAUSE 26#define __ARCH_WANT_SYS_PAUSE
diff --git a/arch/mips/jz4740/serial.h b/arch/mips/jz4740/serial.h
index b9fe3ade0289..8eb715bb1ea8 100644
--- a/arch/mips/jz4740/serial.h
+++ b/arch/mips/jz4740/serial.h
@@ -14,6 +14,9 @@
14 */ 14 */
15 15
16#ifndef __MIPS_JZ4740_SERIAL_H__ 16#ifndef __MIPS_JZ4740_SERIAL_H__
17#define __MIPS_JZ4740_SERIAL_H__
18
19struct uart_port;
17 20
18void jz4740_serial_out(struct uart_port *p, int offset, int value); 21void jz4740_serial_out(struct uart_port *p, int offset, int value);
19 22
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index a6c133212003..3320cb4ac1d4 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -65,6 +65,12 @@ need_resched:
65 b need_resched 65 b need_resched
66#endif 66#endif
67 67
68FEXPORT(ret_from_kernel_thread)
69 jal schedule_tail # a0 = struct task_struct *prev
70 move a0, s1
71 jal s0
72 j syscall_exit
73
68FEXPORT(ret_from_fork) 74FEXPORT(ret_from_fork)
69 jal schedule_tail # a0 = struct task_struct *prev 75 jal schedule_tail # a0 = struct task_struct *prev
70 76
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 3a21acedf882..8796dbc7e358 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -3,7 +3,6 @@
3 * 3 *
4 * Copyright (C) 2000 Silicon Graphics, Inc. 4 * Copyright (C) 2000 Silicon Graphics, Inc.
5 * Written by Ulf Carlsson (ulfc@engr.sgi.com) 5 * Written by Ulf Carlsson (ulfc@engr.sgi.com)
6 * sys32_execve from ia64/ia32 code, Feb 2000, Kanoj Sarcar (kanoj@sgi.com)
7 */ 6 */
8#include <linux/compiler.h> 7#include <linux/compiler.h>
9#include <linux/mm.h> 8#include <linux/mm.h>
@@ -77,26 +76,6 @@ out:
77 return error; 76 return error;
78} 77}
79 78
80/*
81 * sys_execve() executes a new program.
82 */
83asmlinkage int sys32_execve(nabi_no_regargs struct pt_regs regs)
84{
85 int error;
86 struct filename *filename;
87
88 filename = getname(compat_ptr(regs.regs[4]));
89 error = PTR_ERR(filename);
90 if (IS_ERR(filename))
91 goto out;
92 error = compat_do_execve(filename->name, compat_ptr(regs.regs[5]),
93 compat_ptr(regs.regs[6]), &regs);
94 putname(filename);
95
96out:
97 return error;
98}
99
100#define RLIM_INFINITY32 0x7fffffff 79#define RLIM_INFINITY32 0x7fffffff
101#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x) 80#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
102 81
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 3fc1691110dc..2d9304c2b54c 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -32,8 +32,6 @@ EXPORT_SYMBOL(memset);
32EXPORT_SYMBOL(memcpy); 32EXPORT_SYMBOL(memcpy);
33EXPORT_SYMBOL(memmove); 33EXPORT_SYMBOL(memmove);
34 34
35EXPORT_SYMBOL(kernel_thread);
36
37/* 35/*
38 * Functions that operate on entire pages. Mostly used by memory management. 36 * Functions that operate on entire pages. Mostly used by memory management.
39 */ 37 */
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index e9a5fd7277f4..d13720ac656f 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -84,6 +84,7 @@ void __noreturn cpu_idle(void)
84} 84}
85 85
86asmlinkage void ret_from_fork(void); 86asmlinkage void ret_from_fork(void);
87asmlinkage void ret_from_kernel_thread(void);
87 88
88void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) 89void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
89{ 90{
@@ -113,7 +114,7 @@ void flush_thread(void)
113} 114}
114 115
115int copy_thread(unsigned long clone_flags, unsigned long usp, 116int copy_thread(unsigned long clone_flags, unsigned long usp,
116 unsigned long unused, struct task_struct *p, struct pt_regs *regs) 117 unsigned long arg, struct task_struct *p, struct pt_regs *regs)
117{ 118{
118 struct thread_info *ti = task_thread_info(p); 119 struct thread_info *ti = task_thread_info(p);
119 struct pt_regs *childregs; 120 struct pt_regs *childregs;
@@ -136,19 +137,30 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
136 childregs = (struct pt_regs *) childksp - 1; 137 childregs = (struct pt_regs *) childksp - 1;
137 /* Put the stack after the struct pt_regs. */ 138 /* Put the stack after the struct pt_regs. */
138 childksp = (unsigned long) childregs; 139 childksp = (unsigned long) childregs;
140 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
141 if (unlikely(p->flags & PF_KTHREAD)) {
142 unsigned long status = p->thread.cp0_status;
143 memset(childregs, 0, sizeof(struct pt_regs));
144 ti->addr_limit = KERNEL_DS;
145 p->thread.reg16 = usp; /* fn */
146 p->thread.reg17 = arg;
147 p->thread.reg29 = childksp;
148 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
149#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
150 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
151 ((status & (ST0_KUC | ST0_IEC)) << 2);
152#else
153 status |= ST0_EXL;
154#endif
155 childregs->cp0_status = status;
156 return 0;
157 }
139 *childregs = *regs; 158 *childregs = *regs;
140 childregs->regs[7] = 0; /* Clear error flag */ 159 childregs->regs[7] = 0; /* Clear error flag */
141
142 childregs->regs[2] = 0; /* Child gets zero as return value */ 160 childregs->regs[2] = 0; /* Child gets zero as return value */
161 childregs->regs[29] = usp;
162 ti->addr_limit = USER_DS;
143 163
144 if (childregs->cp0_status & ST0_CU0) {
145 childregs->regs[28] = (unsigned long) ti;
146 childregs->regs[29] = childksp;
147 ti->addr_limit = KERNEL_DS;
148 } else {
149 childregs->regs[29] = usp;
150 ti->addr_limit = USER_DS;
151 }
152 p->thread.reg29 = (unsigned long) childregs; 164 p->thread.reg29 = (unsigned long) childregs;
153 p->thread.reg31 = (unsigned long) ret_from_fork; 165 p->thread.reg31 = (unsigned long) ret_from_fork;
154 166
@@ -156,7 +168,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
156 * New tasks lose permission to use the fpu. This accelerates context 168 * New tasks lose permission to use the fpu. This accelerates context
157 * switching for most programs since they don't use the fpu. 169 * switching for most programs since they don't use the fpu.
158 */ 170 */
159 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
160 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); 171 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
161 172
162#ifdef CONFIG_MIPS_MT_SMTC 173#ifdef CONFIG_MIPS_MT_SMTC
@@ -222,35 +233,6 @@ int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
222} 233}
223 234
224/* 235/*
225 * Create a kernel thread
226 */
227static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *))
228{
229 do_exit(fn(arg));
230}
231
232long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
233{
234 struct pt_regs regs;
235
236 memset(&regs, 0, sizeof(regs));
237
238 regs.regs[4] = (unsigned long) arg;
239 regs.regs[5] = (unsigned long) fn;
240 regs.cp0_epc = (unsigned long) kernel_thread_helper;
241 regs.cp0_status = read_c0_status();
242#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
243 regs.cp0_status = (regs.cp0_status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
244 ((regs.cp0_status & (ST0_KUC | ST0_IEC)) << 2);
245#else
246 regs.cp0_status |= ST0_EXL;
247#endif
248
249 /* Ok, create the new process.. */
250 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
251}
252
253/*
254 * 236 *
255 */ 237 */
256struct mips_frame_info { 238struct mips_frame_info {
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index f6ba8381ee01..d27ca340d46d 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -167,7 +167,7 @@ EXPORT(sysn32_call_table)
167 PTR sys_getsockopt 167 PTR sys_getsockopt
168 PTR sys_clone /* 6055 */ 168 PTR sys_clone /* 6055 */
169 PTR sys_fork 169 PTR sys_fork
170 PTR sys32_execve 170 PTR compat_sys_execve
171 PTR sys_exit 171 PTR sys_exit
172 PTR compat_sys_wait4 172 PTR compat_sys_wait4
173 PTR sys_kill /* 6060 */ 173 PTR sys_kill /* 6060 */
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 53c2d7245764..9601be6afa3d 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -203,7 +203,7 @@ sys_call_table:
203 PTR sys_creat 203 PTR sys_creat
204 PTR sys_link 204 PTR sys_link
205 PTR sys_unlink /* 4010 */ 205 PTR sys_unlink /* 4010 */
206 PTR sys32_execve 206 PTR compat_sys_execve
207 PTR sys_chdir 207 PTR sys_chdir
208 PTR compat_sys_time 208 PTR compat_sys_time
209 PTR sys_mknod 209 PTR sys_mknod
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index a53f8ec37aac..290dc6a1d7a3 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -79,7 +79,7 @@ static struct resource data_resource = { .name = "Kernel data", };
79void __init add_memory_region(phys_t start, phys_t size, long type) 79void __init add_memory_region(phys_t start, phys_t size, long type)
80{ 80{
81 int x = boot_mem_map.nr_map; 81 int x = boot_mem_map.nr_map;
82 struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1; 82 int i;
83 83
84 /* Sanity check */ 84 /* Sanity check */
85 if (start + size < start) { 85 if (start + size < start) {
@@ -88,15 +88,29 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
88 } 88 }
89 89
90 /* 90 /*
91 * Try to merge with previous entry if any. This is far less than 91 * Try to merge with existing entry, if any.
92 * perfect but is sufficient for most real world cases.
93 */ 92 */
94 if (x && prev->addr + prev->size == start && prev->type == type) { 93 for (i = 0; i < boot_mem_map.nr_map; i++) {
95 prev->size += size; 94 struct boot_mem_map_entry *entry = boot_mem_map.map + i;
95 unsigned long top;
96
97 if (entry->type != type)
98 continue;
99
100 if (start + size < entry->addr)
101 continue; /* no overlap */
102
103 if (entry->addr + entry->size < start)
104 continue; /* no overlap */
105
106 top = max(entry->addr + entry->size, start + size);
107 entry->addr = min(entry->addr, start);
108 entry->size = top - entry->addr;
109
96 return; 110 return;
97 } 111 }
98 112
99 if (x == BOOT_MEM_MAP_MAX) { 113 if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
100 pr_err("Ooops! Too many entries in the memory map!\n"); 114 pr_err("Ooops! Too many entries in the memory map!\n");
101 return; 115 return;
102 } 116 }
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index afc379ca3753..06cd0c610f44 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -97,7 +97,7 @@ static void cmp_init_secondary(void)
97 97
98 /* Enable per-cpu interrupts: platform specific */ 98 /* Enable per-cpu interrupts: platform specific */
99 99
100 c->core = (read_c0_ebase() >> 1) & 0xff; 100 c->core = (read_c0_ebase() >> 1) & 0x1ff;
101#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 101#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
102 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; 102 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
103#endif 103#endif
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 2bd561bc05ae..c611e2df7767 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -127,28 +127,6 @@ _sys_clone(nabi_no_regargs struct pt_regs regs)
127 parent_tidptr, child_tidptr); 127 parent_tidptr, child_tidptr);
128} 128}
129 129
130/*
131 * sys_execve() executes a new program.
132 */
133asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs)
134{
135 int error;
136 struct filename *filename;
137
138 filename = getname((const char __user *) (long)regs.regs[4]);
139 error = PTR_ERR(filename);
140 if (IS_ERR(filename))
141 goto out;
142 error = do_execve(filename->name,
143 (const char __user *const __user *) (long)regs.regs[5],
144 (const char __user *const __user *) (long)regs.regs[6],
145 &regs);
146 putname(filename);
147
148out:
149 return error;
150}
151
152SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) 130SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
153{ 131{
154 struct thread_info *ti = task_thread_info(current); 132 struct thread_info *ti = task_thread_info(current);
@@ -313,34 +291,3 @@ asmlinkage void bad_stack(void)
313{ 291{
314 do_exit(SIGSEGV); 292 do_exit(SIGSEGV);
315} 293}
316
317/*
318 * Do a system call from kernel instead of calling sys_execve so we
319 * end up with proper pt_regs.
320 */
321int kernel_execve(const char *filename,
322 const char *const argv[],
323 const char *const envp[])
324{
325 register unsigned long __a0 asm("$4") = (unsigned long) filename;
326 register unsigned long __a1 asm("$5") = (unsigned long) argv;
327 register unsigned long __a2 asm("$6") = (unsigned long) envp;
328 register unsigned long __a3 asm("$7");
329 unsigned long __v0;
330
331 __asm__ volatile (" \n"
332 " .set noreorder \n"
333 " li $2, %5 # __NR_execve \n"
334 " syscall \n"
335 " move %0, $2 \n"
336 " .set reorder \n"
337 : "=&r" (__v0), "=r" (__a3)
338 : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve)
339 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24",
340 "memory");
341
342 if (__a3 == 0)
343 return __v0;
344
345 return -__v0;
346}
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index c4a82e841c73..eeddc58802e1 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -2,8 +2,9 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += csum_partial.o delay.o memcpy.o memset.o \ 5lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
6 strlen_user.o strncpy_user.o strnlen_user.o uncached.o 6 mips-atomic.o strlen_user.o strncpy_user.o \
7 strnlen_user.o uncached.o
7 8
8obj-y += iomap.o 9obj-y += iomap.o
9obj-$(CONFIG_PCI) += iomap-pci.o 10obj-$(CONFIG_PCI) += iomap-pci.o
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
new file mode 100644
index 000000000000..239a9c957b02
--- /dev/null
+++ b/arch/mips/lib/bitops.c
@@ -0,0 +1,179 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#include <linux/bitops.h>
10#include <linux/irqflags.h>
11#include <linux/export.h>
12
13
14/**
15 * __mips_set_bit - Atomically set a bit in memory. This is called by
16 * set_bit() if it cannot find a faster solution.
17 * @nr: the bit to set
18 * @addr: the address to start counting from
19 */
20void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
21{
22 volatile unsigned long *a = addr;
23 unsigned bit = nr & SZLONG_MASK;
24 unsigned long mask;
25 unsigned long flags;
26
27 a += nr >> SZLONG_LOG;
28 mask = 1UL << bit;
29 raw_local_irq_save(flags);
30 *a |= mask;
31 raw_local_irq_restore(flags);
32}
33EXPORT_SYMBOL(__mips_set_bit);
34
35
36/**
37 * __mips_clear_bit - Clears a bit in memory. This is called by clear_bit() if
38 * it cannot find a faster solution.
39 * @nr: Bit to clear
40 * @addr: Address to start counting from
41 */
42void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
43{
44 volatile unsigned long *a = addr;
45 unsigned bit = nr & SZLONG_MASK;
46 unsigned long mask;
47 unsigned long flags;
48
49 a += nr >> SZLONG_LOG;
50 mask = 1UL << bit;
51 raw_local_irq_save(flags);
52 *a &= ~mask;
53 raw_local_irq_restore(flags);
54}
55EXPORT_SYMBOL(__mips_clear_bit);
56
57
58/**
59 * __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
60 * if it cannot find a faster solution.
61 * @nr: Bit to change
62 * @addr: Address to start counting from
63 */
64void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
65{
66 volatile unsigned long *a = addr;
67 unsigned bit = nr & SZLONG_MASK;
68 unsigned long mask;
69 unsigned long flags;
70
71 a += nr >> SZLONG_LOG;
72 mask = 1UL << bit;
73 raw_local_irq_save(flags);
74 *a ^= mask;
75 raw_local_irq_restore(flags);
76}
77EXPORT_SYMBOL(__mips_change_bit);
78
79
80/**
81 * __mips_test_and_set_bit - Set a bit and return its old value. This is
82 * called by test_and_set_bit() if it cannot find a faster solution.
83 * @nr: Bit to set
84 * @addr: Address to count from
85 */
86int __mips_test_and_set_bit(unsigned long nr,
87 volatile unsigned long *addr)
88{
89 volatile unsigned long *a = addr;
90 unsigned bit = nr & SZLONG_MASK;
91 unsigned long mask;
92 unsigned long flags;
93 unsigned long res;
94
95 a += nr >> SZLONG_LOG;
96 mask = 1UL << bit;
97 raw_local_irq_save(flags);
98 res = (mask & *a);
99 *a |= mask;
100 raw_local_irq_restore(flags);
101 return res;
102}
103EXPORT_SYMBOL(__mips_test_and_set_bit);
104
105
106/**
107 * __mips_test_and_set_bit_lock - Set a bit and return its old value. This is
108 * called by test_and_set_bit_lock() if it cannot find a faster solution.
109 * @nr: Bit to set
110 * @addr: Address to count from
111 */
112int __mips_test_and_set_bit_lock(unsigned long nr,
113 volatile unsigned long *addr)
114{
115 volatile unsigned long *a = addr;
116 unsigned bit = nr & SZLONG_MASK;
117 unsigned long mask;
118 unsigned long flags;
119 unsigned long res;
120
121 a += nr >> SZLONG_LOG;
122 mask = 1UL << bit;
123 raw_local_irq_save(flags);
124 res = (mask & *a);
125 *a |= mask;
126 raw_local_irq_restore(flags);
127 return res;
128}
129EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
130
131
132/**
133 * __mips_test_and_clear_bit - Clear a bit and return its old value. This is
134 * called by test_and_clear_bit() if it cannot find a faster solution.
135 * @nr: Bit to clear
136 * @addr: Address to count from
137 */
138int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
139{
140 volatile unsigned long *a = addr;
141 unsigned bit = nr & SZLONG_MASK;
142 unsigned long mask;
143 unsigned long flags;
144 unsigned long res;
145
146 a += nr >> SZLONG_LOG;
147 mask = 1UL << bit;
148 raw_local_irq_save(flags);
149 res = (mask & *a);
150 *a &= ~mask;
151 raw_local_irq_restore(flags);
152 return res;
153}
154EXPORT_SYMBOL(__mips_test_and_clear_bit);
155
156
157/**
158 * __mips_test_and_change_bit - Change a bit and return its old value. This is
159 * called by test_and_change_bit() if it cannot find a faster solution.
160 * @nr: Bit to change
161 * @addr: Address to count from
162 */
163int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
164{
165 volatile unsigned long *a = addr;
166 unsigned bit = nr & SZLONG_MASK;
167 unsigned long mask;
168 unsigned long flags;
169 unsigned long res;
170
171 a += nr >> SZLONG_LOG;
172 mask = 1UL << bit;
173 raw_local_irq_save(flags);
174 res = (mask & *a);
175 *a ^= mask;
176 raw_local_irq_restore(flags);
177 return res;
178}
179EXPORT_SYMBOL(__mips_test_and_change_bit);
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
index 5995969e8c42..dc81ca8dc0dd 100644
--- a/arch/mips/lib/delay.c
+++ b/arch/mips/lib/delay.c
@@ -15,13 +15,17 @@
15#include <asm/compiler.h> 15#include <asm/compiler.h>
16#include <asm/war.h> 16#include <asm/war.h>
17 17
18inline void __delay(unsigned int loops) 18void __delay(unsigned long loops)
19{ 19{
20 __asm__ __volatile__ ( 20 __asm__ __volatile__ (
21 " .set noreorder \n" 21 " .set noreorder \n"
22 " .align 3 \n" 22 " .align 3 \n"
23 "1: bnez %0, 1b \n" 23 "1: bnez %0, 1b \n"
24#if __SIZEOF_LONG__ == 4
24 " subu %0, 1 \n" 25 " subu %0, 1 \n"
26#else
27 " dsubu %0, 1 \n"
28#endif
25 " .set reorder \n" 29 " .set reorder \n"
26 : "=r" (loops) 30 : "=r" (loops)
27 : "0" (loops)); 31 : "0" (loops));
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 3f69725556af..a99c1d3fc567 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -50,8 +50,9 @@ static void dump_tlb(int first, int last)
50{ 50{
51 unsigned long s_entryhi, entryhi, asid; 51 unsigned long s_entryhi, entryhi, asid;
52 unsigned long long entrylo0, entrylo1; 52 unsigned long long entrylo0, entrylo1;
53 unsigned int s_index, pagemask, c0, c1, i; 53 unsigned int s_index, s_pagemask, pagemask, c0, c1, i;
54 54
55 s_pagemask = read_c0_pagemask();
55 s_entryhi = read_c0_entryhi(); 56 s_entryhi = read_c0_entryhi();
56 s_index = read_c0_index(); 57 s_index = read_c0_index();
57 asid = s_entryhi & 0xff; 58 asid = s_entryhi & 0xff;
@@ -103,6 +104,7 @@ static void dump_tlb(int first, int last)
103 104
104 write_c0_entryhi(s_entryhi); 105 write_c0_entryhi(s_entryhi);
105 write_c0_index(s_index); 106 write_c0_index(s_index);
107 write_c0_pagemask(s_pagemask);
106} 108}
107 109
108void dump_tlb_all(void) 110void dump_tlb_all(void)
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
new file mode 100644
index 000000000000..cd160be3ce4d
--- /dev/null
+++ b/arch/mips/lib/mips-atomic.c
@@ -0,0 +1,176 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Copyright (C) 2000 MIPS Technologies, Inc.
10 */
11#include <asm/irqflags.h>
12#include <asm/hazards.h>
13#include <linux/compiler.h>
14#include <linux/preempt.h>
15#include <linux/export.h>
16
17#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
18
19/*
20 * For cli() we have to insert nops to make sure that the new value
21 * has actually arrived in the status register before the end of this
22 * macro.
23 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
24 * no nops at all.
25 */
26/*
27 * For TX49, operating only IE bit is not enough.
28 *
29 * If mfc0 $12 follows store and the mfc0 is last instruction of a
30 * page and fetching the next instruction causes TLB miss, the result
31 * of the mfc0 might wrongly contain EXL bit.
32 *
33 * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
34 *
35 * Workaround: mask EXL bit of the result or place a nop before mfc0.
36 */
37__asm__(
38 " .macro arch_local_irq_disable\n"
39 " .set push \n"
40 " .set noat \n"
41#ifdef CONFIG_MIPS_MT_SMTC
42 " mfc0 $1, $2, 1 \n"
43 " ori $1, 0x400 \n"
44 " .set noreorder \n"
45 " mtc0 $1, $2, 1 \n"
46#elif defined(CONFIG_CPU_MIPSR2)
47 /* see irqflags.h for inline function */
48#else
49 " mfc0 $1,$12 \n"
50 " ori $1,0x1f \n"
51 " xori $1,0x1f \n"
52 " .set noreorder \n"
53 " mtc0 $1,$12 \n"
54#endif
55 " irq_disable_hazard \n"
56 " .set pop \n"
57 " .endm \n");
58
59notrace void arch_local_irq_disable(void)
60{
61 preempt_disable();
62 __asm__ __volatile__(
63 "arch_local_irq_disable"
64 : /* no outputs */
65 : /* no inputs */
66 : "memory");
67 preempt_enable();
68}
69EXPORT_SYMBOL(arch_local_irq_disable);
70
71
72__asm__(
73 " .macro arch_local_irq_save result \n"
74 " .set push \n"
75 " .set reorder \n"
76 " .set noat \n"
77#ifdef CONFIG_MIPS_MT_SMTC
78 " mfc0 \\result, $2, 1 \n"
79 " ori $1, \\result, 0x400 \n"
80 " .set noreorder \n"
81 " mtc0 $1, $2, 1 \n"
82 " andi \\result, \\result, 0x400 \n"
83#elif defined(CONFIG_CPU_MIPSR2)
84 /* see irqflags.h for inline function */
85#else
86 " mfc0 \\result, $12 \n"
87 " ori $1, \\result, 0x1f \n"
88 " xori $1, 0x1f \n"
89 " .set noreorder \n"
90 " mtc0 $1, $12 \n"
91#endif
92 " irq_disable_hazard \n"
93 " .set pop \n"
94 " .endm \n");
95
96notrace unsigned long arch_local_irq_save(void)
97{
98 unsigned long flags;
99 preempt_disable();
100 asm volatile("arch_local_irq_save\t%0"
101 : "=r" (flags)
102 : /* no inputs */
103 : "memory");
104 preempt_enable();
105 return flags;
106}
107EXPORT_SYMBOL(arch_local_irq_save);
108
109
110__asm__(
111 " .macro arch_local_irq_restore flags \n"
112 " .set push \n"
113 " .set noreorder \n"
114 " .set noat \n"
115#ifdef CONFIG_MIPS_MT_SMTC
116 "mfc0 $1, $2, 1 \n"
117 "andi \\flags, 0x400 \n"
118 "ori $1, 0x400 \n"
119 "xori $1, 0x400 \n"
120 "or \\flags, $1 \n"
121 "mtc0 \\flags, $2, 1 \n"
122#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
123 /* see irqflags.h for inline function */
124#elif defined(CONFIG_CPU_MIPSR2)
125 /* see irqflags.h for inline function */
126#else
127 " mfc0 $1, $12 \n"
128 " andi \\flags, 1 \n"
129 " ori $1, 0x1f \n"
130 " xori $1, 0x1f \n"
131 " or \\flags, $1 \n"
132 " mtc0 \\flags, $12 \n"
133#endif
134 " irq_disable_hazard \n"
135 " .set pop \n"
136 " .endm \n");
137
138notrace void arch_local_irq_restore(unsigned long flags)
139{
140 unsigned long __tmp1;
141
142#ifdef CONFIG_MIPS_MT_SMTC
143 /*
144 * SMTC kernel needs to do a software replay of queued
145 * IPIs, at the cost of branch and call overhead on each
146 * local_irq_restore()
147 */
148 if (unlikely(!(flags & 0x0400)))
149 smtc_ipi_replay();
150#endif
151 preempt_disable();
152 __asm__ __volatile__(
153 "arch_local_irq_restore\t%0"
154 : "=r" (__tmp1)
155 : "0" (flags)
156 : "memory");
157 preempt_enable();
158}
159EXPORT_SYMBOL(arch_local_irq_restore);
160
161
162notrace void __arch_local_irq_restore(unsigned long flags)
163{
164 unsigned long __tmp1;
165
166 preempt_disable();
167 __asm__ __volatile__(
168 "arch_local_irq_restore\t%0"
169 : "=r" (__tmp1)
170 : "0" (flags)
171 : "memory");
172 preempt_enable();
173}
174EXPORT_SYMBOL(__arch_local_irq_restore);
175
176#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 87b9cfcc30ff..4b9b935a070e 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -320,6 +320,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
320 tlb_write_random(); 320 tlb_write_random();
321 else 321 else
322 tlb_write_indexed(); 322 tlb_write_indexed();
323 tlbw_use_hazard();
323 write_c0_pagemask(PM_DEFAULT_MASK); 324 write_c0_pagemask(PM_DEFAULT_MASK);
324 } else 325 } else
325#endif 326#endif
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 658a520364ce..2833dcb67b5a 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -148,8 +148,8 @@ enum label_id {
148 label_leave, 148 label_leave,
149 label_vmalloc, 149 label_vmalloc,
150 label_vmalloc_done, 150 label_vmalloc_done,
151 label_tlbw_hazard, 151 label_tlbw_hazard_0,
152 label_split, 152 label_split = label_tlbw_hazard_0 + 8,
153 label_tlbl_goaround1, 153 label_tlbl_goaround1,
154 label_tlbl_goaround2, 154 label_tlbl_goaround2,
155 label_nopage_tlbl, 155 label_nopage_tlbl,
@@ -167,7 +167,7 @@ UASM_L_LA(_second_part)
167UASM_L_LA(_leave) 167UASM_L_LA(_leave)
168UASM_L_LA(_vmalloc) 168UASM_L_LA(_vmalloc)
169UASM_L_LA(_vmalloc_done) 169UASM_L_LA(_vmalloc_done)
170UASM_L_LA(_tlbw_hazard) 170/* _tlbw_hazard_x is handled differently. */
171UASM_L_LA(_split) 171UASM_L_LA(_split)
172UASM_L_LA(_tlbl_goaround1) 172UASM_L_LA(_tlbl_goaround1)
173UASM_L_LA(_tlbl_goaround2) 173UASM_L_LA(_tlbl_goaround2)
@@ -181,6 +181,30 @@ UASM_L_LA(_large_segbits_fault)
181UASM_L_LA(_tlb_huge_update) 181UASM_L_LA(_tlb_huge_update)
182#endif 182#endif
183 183
184static int __cpuinitdata hazard_instance;
185
186static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
187{
188 switch (instance) {
189 case 0 ... 7:
190 uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
191 return;
192 default:
193 BUG();
194 }
195}
196
197static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
198{
199 switch (instance) {
200 case 0 ... 7:
201 uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
202 break;
203 default:
204 BUG();
205 }
206}
207
184/* 208/*
185 * For debug purposes. 209 * For debug purposes.
186 */ 210 */
@@ -478,21 +502,28 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
478 * This branch uses up a mtc0 hazard nop slot and saves 502 * This branch uses up a mtc0 hazard nop slot and saves
479 * two nops after the tlbw instruction. 503 * two nops after the tlbw instruction.
480 */ 504 */
481 uasm_il_bgezl(p, r, 0, label_tlbw_hazard); 505 uasm_bgezl_hazard(p, r, hazard_instance);
482 tlbw(p); 506 tlbw(p);
483 uasm_l_tlbw_hazard(l, *p); 507 uasm_bgezl_label(l, p, hazard_instance);
508 hazard_instance++;
484 uasm_i_nop(p); 509 uasm_i_nop(p);
485 break; 510 break;
486 511
487 case CPU_R4600: 512 case CPU_R4600:
488 case CPU_R4700: 513 case CPU_R4700:
489 case CPU_R5000:
490 case CPU_R5000A:
491 uasm_i_nop(p); 514 uasm_i_nop(p);
492 tlbw(p); 515 tlbw(p);
493 uasm_i_nop(p); 516 uasm_i_nop(p);
494 break; 517 break;
495 518
519 case CPU_R5000:
520 case CPU_R5000A:
521 case CPU_NEVADA:
522 uasm_i_nop(p); /* QED specifies 2 nops hazard */
523 uasm_i_nop(p); /* QED specifies 2 nops hazard */
524 tlbw(p);
525 break;
526
496 case CPU_R4300: 527 case CPU_R4300:
497 case CPU_5KC: 528 case CPU_5KC:
498 case CPU_TX49XX: 529 case CPU_TX49XX:
@@ -526,17 +557,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
526 tlbw(p); 557 tlbw(p);
527 break; 558 break;
528 559
529 case CPU_NEVADA:
530 uasm_i_nop(p); /* QED specifies 2 nops hazard */
531 /*
532 * This branch uses up a mtc0 hazard nop slot and saves
533 * a nop after the tlbw instruction.
534 */
535 uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
536 tlbw(p);
537 uasm_l_tlbw_hazard(l, *p);
538 break;
539
540 case CPU_RM7000: 560 case CPU_RM7000:
541 uasm_i_nop(p); 561 uasm_i_nop(p);
542 uasm_i_nop(p); 562 uasm_i_nop(p);
diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c
index 80562b81f0f2..74732177851c 100644
--- a/arch/mips/mti-malta/malta-platform.c
+++ b/arch/mips/mti-malta/malta-platform.c
@@ -29,6 +29,7 @@
29#include <linux/mtd/partitions.h> 29#include <linux/mtd/partitions.h>
30#include <linux/mtd/physmap.h> 30#include <linux/mtd/physmap.h>
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <asm/mips-boards/maltaint.h>
32#include <mtd/mtd-abi.h> 33#include <mtd/mtd-abi.h>
33 34
34#define SMC_PORT(base, int) \ 35#define SMC_PORT(base, int) \
@@ -48,7 +49,7 @@ static struct plat_serial8250_port uart8250_data[] = {
48 SMC_PORT(0x2F8, 3), 49 SMC_PORT(0x2F8, 3),
49 { 50 {
50 .mapbase = 0x1f000900, /* The CBUS UART */ 51 .mapbase = 0x1f000900, /* The CBUS UART */
51 .irq = MIPS_CPU_IRQ_BASE + 2, 52 .irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
52 .uartclk = 3686400, /* Twice the usual clk! */ 53 .uartclk = 3686400, /* Twice the usual clk! */
53 .iotype = UPIO_MEM32, 54 .iotype = UPIO_MEM32,
54 .flags = CBUS_UART_FLAGS, 55 .flags = CBUS_UART_FLAGS,