aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2013-09-06 22:53:35 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2013-09-06 22:53:35 -0400
commiteeca9fad52fc4bfdf42c38bfcf383e932eb3e9d6 (patch)
treecc51c880459d41c0e8d7576405bef4c987bc7aa0 /arch/mips/kernel
parentff6f83fc9d44db09997937c3475d525a6866fbb4 (diff)
parentb48a97be8e6c2afdba2f3b61fd88c3c7743fbd73 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux
Merge upstream tree in order to reinstate crct10dif.
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/asm-offsets.c3
-rw-r--r--arch/mips/kernel/bmips_vec.S4
-rw-r--r--arch/mips/kernel/branch.c1
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c2
-rw-r--r--arch/mips/kernel/cevt-gic.c2
-rw-r--r--arch/mips/kernel/cevt-r4k.c2
-rw-r--r--arch/mips/kernel/cevt-sb1250.c2
-rw-r--r--arch/mips/kernel/cevt-smtc.c2
-rw-r--r--arch/mips/kernel/cpu-bugs64.c7
-rw-r--r--arch/mips/kernel/cpu-probe.c26
-rw-r--r--arch/mips/kernel/head.S43
-rw-r--r--arch/mips/kernel/irq-gic.c15
-rw-r--r--arch/mips/kernel/mcount.S4
-rw-r--r--arch/mips/kernel/octeon_switch.S34
-rw-r--r--arch/mips/kernel/proc.c4
-rw-r--r--arch/mips/kernel/process.c9
-rw-r--r--arch/mips/kernel/prom.c2
-rw-r--r--arch/mips/kernel/ptrace.c12
-rw-r--r--arch/mips/kernel/r2300_switch.S7
-rw-r--r--arch/mips/kernel/r4k_switch.S6
-rw-r--r--arch/mips/kernel/rtlx.c1
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/signal.c5
-rw-r--r--arch/mips/kernel/smp-bmips.c35
-rw-r--r--arch/mips/kernel/smp-mt.c6
-rw-r--r--arch/mips/kernel/smp-up.c6
-rw-r--r--arch/mips/kernel/smp.c6
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/kernel/spram.c14
-rw-r--r--arch/mips/kernel/sync-r4k.c12
-rw-r--r--arch/mips/kernel/traps.c115
-rw-r--r--arch/mips/kernel/unaligned.c8
-rw-r--r--arch/mips/kernel/watch.c10
36 files changed, 225 insertions, 190 deletions
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 0845091ba480..0c2e853c3db4 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -82,6 +82,9 @@ void output_task_defines(void)
82 OFFSET(TASK_FLAGS, task_struct, flags); 82 OFFSET(TASK_FLAGS, task_struct, flags);
83 OFFSET(TASK_MM, task_struct, mm); 83 OFFSET(TASK_MM, task_struct, mm);
84 OFFSET(TASK_PID, task_struct, pid); 84 OFFSET(TASK_PID, task_struct, pid);
85#if defined(CONFIG_CC_STACKPROTECTOR)
86 OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
87#endif
85 DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); 88 DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
86 BLANK(); 89 BLANK();
87} 90}
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index 64c4fd62cf08..f739aedcb509 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -28,8 +28,6 @@
28 .set mips0 28 .set mips0
29 .endm 29 .endm
30 30
31 __CPUINIT
32
33/*********************************************************************** 31/***********************************************************************
34 * Alternate CPU1 startup vector for BMIPS4350 32 * Alternate CPU1 startup vector for BMIPS4350
35 * 33 *
@@ -216,8 +214,6 @@ END(bmips_smp_int_vec)
216 * Certain CPUs support extending kseg0 to 1024MB. 214 * Certain CPUs support extending kseg0 to 1024MB.
217 ***********************************************************************/ 215 ***********************************************************************/
218 216
219 __CPUINIT
220
221LEAF(bmips_enable_xks01) 217LEAF(bmips_enable_xks01)
222 218
223#if defined(CONFIG_XKS01) 219#if defined(CONFIG_XKS01)
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 46c2ad0703a0..4d78bf445a9c 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -467,5 +467,4 @@ unaligned:
467 printk("%s: unaligned epc - sending SIGBUS.\n", current->comm); 467 printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
468 force_sig(SIGBUS, current); 468 force_sig(SIGBUS, current);
469 return -EFAULT; 469 return -EFAULT;
470
471} 470}
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index 15f618b40cf6..7976457184b1 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -109,7 +109,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
109static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction); 109static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
110static DEFINE_PER_CPU(char [18], sibyte_hpt_name); 110static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
111 111
112void __cpuinit sb1480_clockevent_init(void) 112void sb1480_clockevent_init(void)
113{ 113{
114 unsigned int cpu = smp_processor_id(); 114 unsigned int cpu = smp_processor_id();
115 unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu; 115 unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
index 730eaf92c018..594cbbf16d62 100644
--- a/arch/mips/kernel/cevt-gic.c
+++ b/arch/mips/kernel/cevt-gic.c
@@ -59,7 +59,7 @@ void gic_event_handler(struct clock_event_device *dev)
59{ 59{
60} 60}
61 61
62int __cpuinit gic_clockevent_init(void) 62int gic_clockevent_init(void)
63{ 63{
64 unsigned int cpu = smp_processor_id(); 64 unsigned int cpu = smp_processor_id();
65 struct clock_event_device *cd; 65 struct clock_event_device *cd;
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 02033eaf8825..50d3f5a8d6bb 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -171,7 +171,7 @@ int c0_compare_int_usable(void)
171} 171}
172 172
173#ifndef CONFIG_MIPS_MT_SMTC 173#ifndef CONFIG_MIPS_MT_SMTC
174int __cpuinit r4k_clockevent_init(void) 174int r4k_clockevent_init(void)
175{ 175{
176 unsigned int cpu = smp_processor_id(); 176 unsigned int cpu = smp_processor_id();
177 struct clock_event_device *cd; 177 struct clock_event_device *cd;
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index 200f2778bf36..5ea6d6b1de15 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -107,7 +107,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
107static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction); 107static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
108static DEFINE_PER_CPU(char [18], sibyte_hpt_name); 108static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
109 109
110void __cpuinit sb1250_clockevent_init(void) 110void sb1250_clockevent_init(void)
111{ 111{
112 unsigned int cpu = smp_processor_id(); 112 unsigned int cpu = smp_processor_id();
113 unsigned int irq = K_INT_TIMER_0 + cpu; 113 unsigned int irq = K_INT_TIMER_0 + cpu;
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
index 9de5ed7ef1a3..b6cf0a60d896 100644
--- a/arch/mips/kernel/cevt-smtc.c
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -248,7 +248,7 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
248} 248}
249 249
250 250
251int __cpuinit smtc_clockevent_init(void) 251int smtc_clockevent_init(void)
252{ 252{
253 uint64_t mips_freq = mips_hpt_frequency; 253 uint64_t mips_freq = mips_hpt_frequency;
254 unsigned int cpu = smp_processor_id(); 254 unsigned int cpu = smp_processor_id();
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index de3c25ffd9f9..2d80b5f1aeae 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -6,6 +6,7 @@
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9#include <linux/context_tracking.h>
9#include <linux/init.h> 10#include <linux/init.h>
10#include <linux/kernel.h> 11#include <linux/kernel.h>
11#include <linux/ptrace.h> 12#include <linux/ptrace.h>
@@ -167,12 +168,16 @@ static inline void check_mult_sh(void)
167 panic(bug64hit, !R4000_WAR ? r4kwar : nowar); 168 panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
168} 169}
169 170
170static volatile int daddi_ov __cpuinitdata; 171static volatile int daddi_ov;
171 172
172asmlinkage void __init do_daddi_ov(struct pt_regs *regs) 173asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
173{ 174{
175 enum ctx_state prev_state;
176
177 prev_state = exception_enter();
174 daddi_ov = 1; 178 daddi_ov = 1;
175 regs->cp0_epc += 4; 179 regs->cp0_epc += 4;
180 exception_exit(prev_state);
176} 181}
177 182
178static inline void check_daddi(void) 183static inline void check_daddi(void)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c6568bf4b1b0..4c6167a17875 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -27,7 +27,7 @@
27#include <asm/spram.h> 27#include <asm/spram.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29 29
30static int __cpuinitdata mips_fpu_disabled; 30static int mips_fpu_disabled;
31 31
32static int __init fpu_disable(char *s) 32static int __init fpu_disable(char *s)
33{ 33{
@@ -39,7 +39,7 @@ static int __init fpu_disable(char *s)
39 39
40__setup("nofpu", fpu_disable); 40__setup("nofpu", fpu_disable);
41 41
42int __cpuinitdata mips_dsp_disabled; 42int mips_dsp_disabled;
43 43
44static int __init dsp_disable(char *s) 44static int __init dsp_disable(char *s)
45{ 45{
@@ -134,7 +134,7 @@ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
134#endif 134#endif
135} 135}
136 136
137static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa) 137static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
138{ 138{
139 switch (isa) { 139 switch (isa) {
140 case MIPS_CPU_ISA_M64R2: 140 case MIPS_CPU_ISA_M64R2:
@@ -146,8 +146,7 @@ static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa)
146 case MIPS_CPU_ISA_IV: 146 case MIPS_CPU_ISA_IV:
147 c->isa_level |= MIPS_CPU_ISA_IV; 147 c->isa_level |= MIPS_CPU_ISA_IV;
148 case MIPS_CPU_ISA_III: 148 case MIPS_CPU_ISA_III:
149 c->isa_level |= MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | 149 c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
150 MIPS_CPU_ISA_III;
151 break; 150 break;
152 151
153 case MIPS_CPU_ISA_M32R2: 152 case MIPS_CPU_ISA_M32R2:
@@ -156,13 +155,11 @@ static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa)
156 c->isa_level |= MIPS_CPU_ISA_M32R1; 155 c->isa_level |= MIPS_CPU_ISA_M32R1;
157 case MIPS_CPU_ISA_II: 156 case MIPS_CPU_ISA_II:
158 c->isa_level |= MIPS_CPU_ISA_II; 157 c->isa_level |= MIPS_CPU_ISA_II;
159 case MIPS_CPU_ISA_I:
160 c->isa_level |= MIPS_CPU_ISA_I;
161 break; 158 break;
162 } 159 }
163} 160}
164 161
165static char unknown_isa[] __cpuinitdata = KERN_ERR \ 162static char unknown_isa[] = KERN_ERR \
166 "Unsupported ISA type, c0.config0: %d."; 163 "Unsupported ISA type, c0.config0: %d.";
167 164
168static inline unsigned int decode_config0(struct cpuinfo_mips *c) 165static inline unsigned int decode_config0(struct cpuinfo_mips *c)
@@ -272,9 +269,6 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
272 c->options |= MIPS_CPU_ULRI; 269 c->options |= MIPS_CPU_ULRI;
273 if (config3 & MIPS_CONF3_ISA) 270 if (config3 & MIPS_CONF3_ISA)
274 c->options |= MIPS_CPU_MICROMIPS; 271 c->options |= MIPS_CPU_MICROMIPS;
275#ifdef CONFIG_CPU_MICROMIPS
276 write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE);
277#endif
278 if (config3 & MIPS_CONF3_VZ) 272 if (config3 & MIPS_CONF3_VZ)
279 c->ases |= MIPS_ASE_VZ; 273 c->ases |= MIPS_ASE_VZ;
280 274
@@ -296,7 +290,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
296 return config4 & MIPS_CONF_M; 290 return config4 & MIPS_CONF_M;
297} 291}
298 292
299static void __cpuinit decode_configs(struct cpuinfo_mips *c) 293static void decode_configs(struct cpuinfo_mips *c)
300{ 294{
301 int ok; 295 int ok;
302 296
@@ -332,7 +326,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
332 case PRID_IMP_R2000: 326 case PRID_IMP_R2000:
333 c->cputype = CPU_R2000; 327 c->cputype = CPU_R2000;
334 __cpu_name[cpu] = "R2000"; 328 __cpu_name[cpu] = "R2000";
335 set_isa(c, MIPS_CPU_ISA_I);
336 c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | 329 c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
337 MIPS_CPU_NOFPUEX; 330 MIPS_CPU_NOFPUEX;
338 if (__cpu_has_fpu()) 331 if (__cpu_has_fpu())
@@ -352,7 +345,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
352 c->cputype = CPU_R3000; 345 c->cputype = CPU_R3000;
353 __cpu_name[cpu] = "R3000"; 346 __cpu_name[cpu] = "R3000";
354 } 347 }
355 set_isa(c, MIPS_CPU_ISA_I);
356 c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | 348 c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
357 MIPS_CPU_NOFPUEX; 349 MIPS_CPU_NOFPUEX;
358 if (__cpu_has_fpu()) 350 if (__cpu_has_fpu())
@@ -455,7 +447,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
455 break; 447 break;
456 #endif 448 #endif
457 case PRID_IMP_TX39: 449 case PRID_IMP_TX39:
458 set_isa(c, MIPS_CPU_ISA_I);
459 c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE; 450 c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE;
460 451
461 if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) { 452 if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) {
@@ -959,6 +950,7 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
959 set_isa(c, MIPS_CPU_ISA_M64R1); 950 set_isa(c, MIPS_CPU_ISA_M64R1);
960 c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; 951 c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1;
961 } 952 }
953 c->kscratch_mask = 0xf;
962} 954}
963 955
964#ifdef CONFIG_64BIT 956#ifdef CONFIG_64BIT
@@ -970,7 +962,7 @@ EXPORT_SYMBOL(__ua_limit);
970const char *__cpu_name[NR_CPUS]; 962const char *__cpu_name[NR_CPUS];
971const char *__elf_platform; 963const char *__elf_platform;
972 964
973__cpuinit void cpu_probe(void) 965void cpu_probe(void)
974{ 966{
975 struct cpuinfo_mips *c = &current_cpu_data; 967 struct cpuinfo_mips *c = &current_cpu_data;
976 unsigned int cpu = smp_processor_id(); 968 unsigned int cpu = smp_processor_id();
@@ -1055,7 +1047,7 @@ __cpuinit void cpu_probe(void)
1055#endif 1047#endif
1056} 1048}
1057 1049
1058__cpuinit void cpu_report(void) 1050void cpu_report(void)
1059{ 1051{
1060 struct cpuinfo_mips *c = &current_cpu_data; 1052 struct cpuinfo_mips *c = &current_cpu_data;
1061 1053
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index c61cdaed2b1d..7b6a5b3e3acf 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -28,45 +28,6 @@
28#include <kernel-entry-init.h> 28#include <kernel-entry-init.h>
29 29
30 /* 30 /*
31 * inputs are the text nasid in t1, data nasid in t2.
32 */
33 .macro MAPPED_KERNEL_SETUP_TLB
34#ifdef CONFIG_MAPPED_KERNEL
35 /*
36 * This needs to read the nasid - assume 0 for now.
37 * Drop in 0xffffffffc0000000 in tlbhi, 0+VG in tlblo_0,
38 * 0+DVG in tlblo_1.
39 */
40 dli t0, 0xffffffffc0000000
41 dmtc0 t0, CP0_ENTRYHI
42 li t0, 0x1c000 # Offset of text into node memory
43 dsll t1, NASID_SHFT # Shift text nasid into place
44 dsll t2, NASID_SHFT # Same for data nasid
45 or t1, t1, t0 # Physical load address of kernel text
46 or t2, t2, t0 # Physical load address of kernel data
47 dsrl t1, 12 # 4K pfn
48 dsrl t2, 12 # 4K pfn
49 dsll t1, 6 # Get pfn into place
50 dsll t2, 6 # Get pfn into place
51 li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _CACHE_CACHABLE_COW) >> 6)
52 or t0, t0, t1
53 mtc0 t0, CP0_ENTRYLO0 # physaddr, VG, cach exlwr
54 li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _PAGE_DIRTY|_CACHE_CACHABLE_COW) >> 6)
55 or t0, t0, t2
56 mtc0 t0, CP0_ENTRYLO1 # physaddr, DVG, cach exlwr
57 li t0, 0x1ffe000 # MAPPED_KERN_TLBMASK, TLBPGMASK_16M
58 mtc0 t0, CP0_PAGEMASK
59 li t0, 0 # KMAP_INX
60 mtc0 t0, CP0_INDEX
61 li t0, 1
62 mtc0 t0, CP0_WIRED
63 tlbwi
64#else
65 mtc0 zero, CP0_WIRED
66#endif
67 .endm
68
69 /*
70 * For the moment disable interrupts, mark the kernel mode and 31 * For the moment disable interrupts, mark the kernel mode and
71 * set ST0_KX so that the CPU does not spit fire when using 32 * set ST0_KX so that the CPU does not spit fire when using
72 * 64-bit addresses. A full initialization of the CPU's status 33 * 64-bit addresses. A full initialization of the CPU's status
@@ -197,8 +158,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
197 j start_kernel 158 j start_kernel
198 END(kernel_entry) 159 END(kernel_entry)
199 160
200 __CPUINIT
201
202#ifdef CONFIG_SMP 161#ifdef CONFIG_SMP
203/* 162/*
204 * SMP slave cpus entry point. Board specific code for bootstrap calls this 163 * SMP slave cpus entry point. Board specific code for bootstrap calls this
@@ -227,5 +186,3 @@ NESTED(smp_bootstrap, 16, sp)
227 j start_secondary 186 j start_secondary
228 END(smp_bootstrap) 187 END(smp_bootstrap)
229#endif /* CONFIG_SMP */ 188#endif /* CONFIG_SMP */
230
231 __FINIT
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index c01b307317a9..5b5ddb231f26 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -219,16 +219,15 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
219 219
220 /* Assumption : cpumask refers to a single CPU */ 220 /* Assumption : cpumask refers to a single CPU */
221 spin_lock_irqsave(&gic_lock, flags); 221 spin_lock_irqsave(&gic_lock, flags);
222 for (;;) {
223 /* Re-route this IRQ */
224 GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
225 222
226 /* Update the pcpu_masks */ 223 /* Re-route this IRQ */
227 for (i = 0; i < NR_CPUS; i++) 224 GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
228 clear_bit(irq, pcpu_masks[i].pcpu_mask); 225
229 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); 226 /* Update the pcpu_masks */
227 for (i = 0; i < NR_CPUS; i++)
228 clear_bit(irq, pcpu_masks[i].pcpu_mask);
229 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
230 230
231 }
232 cpumask_copy(d->affinity, cpumask); 231 cpumask_copy(d->affinity, cpumask);
233 spin_unlock_irqrestore(&gic_lock, flags); 232 spin_unlock_irqrestore(&gic_lock, flags);
234 233
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 33d067148e61..a03e93c4a946 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -168,15 +168,11 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra)
168#endif 168#endif
169 169
170 /* arg3: Get frame pointer of current stack */ 170 /* arg3: Get frame pointer of current stack */
171#ifdef CONFIG_FRAME_POINTER
172 move a2, fp
173#else /* ! CONFIG_FRAME_POINTER */
174#ifdef CONFIG_64BIT 171#ifdef CONFIG_64BIT
175 PTR_LA a2, PT_SIZE(sp) 172 PTR_LA a2, PT_SIZE(sp)
176#else 173#else
177 PTR_LA a2, (PT_SIZE+8)(sp) 174 PTR_LA a2, (PT_SIZE+8)(sp)
178#endif 175#endif
179#endif
180 176
181 jal prepare_ftrace_return 177 jal prepare_ftrace_return
182 nop 178 nop
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index 0e23343eb0a9..4204d76af854 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -40,33 +40,6 @@
40 cpu_save_nonscratch a0 40 cpu_save_nonscratch a0
41 LONG_S ra, THREAD_REG31(a0) 41 LONG_S ra, THREAD_REG31(a0)
42 42
43 /* check if we need to save COP2 registers */
44 PTR_L t2, TASK_THREAD_INFO(a0)
45 LONG_L t0, ST_OFF(t2)
46 bbit0 t0, 30, 1f
47
48 /* Disable COP2 in the stored process state */
49 li t1, ST0_CU2
50 xor t0, t1
51 LONG_S t0, ST_OFF(t2)
52
53 /* Enable COP2 so we can save it */
54 mfc0 t0, CP0_STATUS
55 or t0, t1
56 mtc0 t0, CP0_STATUS
57
58 /* Save COP2 */
59 daddu a0, THREAD_CP2
60 jal octeon_cop2_save
61 dsubu a0, THREAD_CP2
62
63 /* Disable COP2 now that we are done */
64 mfc0 t0, CP0_STATUS
65 li t1, ST0_CU2
66 xor t0, t1
67 mtc0 t0, CP0_STATUS
68
691:
70#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 43#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
71 /* Check if we need to store CVMSEG state */ 44 /* Check if we need to store CVMSEG state */
72 mfc0 t0, $11,7 /* CvmMemCtl */ 45 mfc0 t0, $11,7 /* CvmMemCtl */
@@ -98,6 +71,13 @@
98 mtc0 t0, $11,7 /* CvmMemCtl */ 71 mtc0 t0, $11,7 /* CvmMemCtl */
99#endif 72#endif
1003: 733:
74
75#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
76 PTR_L t8, __stack_chk_guard
77 LONG_L t9, TASK_STACK_CANARY(a1)
78 LONG_S t9, 0(t8)
79#endif
80
101 /* 81 /*
102 * The order of restoring the registers takes care of the race 82 * The order of restoring the registers takes care of the race
103 * updating $28, $29 and kernelsp without disabling ints. 83 * updating $28, $29 and kernelsp without disabling ints.
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index acb34373679e..8c58d8a84bf3 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -66,9 +66,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
66 seq_printf(m, "]\n"); 66 seq_printf(m, "]\n");
67 } 67 }
68 if (cpu_has_mips_r) { 68 if (cpu_has_mips_r) {
69 seq_printf(m, "isa\t\t\t:"); 69 seq_printf(m, "isa\t\t\t: mips1");
70 if (cpu_has_mips_1)
71 seq_printf(m, "%s", " mips1");
72 if (cpu_has_mips_2) 70 if (cpu_has_mips_2)
73 seq_printf(m, "%s", " mips2"); 71 seq_printf(m, "%s", " mips2");
74 if (cpu_has_mips_3) 72 if (cpu_has_mips_3)
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c6a041d9d05d..ddc76103e78c 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -201,9 +201,12 @@ int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
201 return 1; 201 return 1;
202} 202}
203 203
204/* 204#ifdef CONFIG_CC_STACKPROTECTOR
205 * 205#include <linux/stackprotector.h>
206 */ 206unsigned long __stack_chk_guard __read_mostly;
207EXPORT_SYMBOL(__stack_chk_guard);
208#endif
209
207struct mips_frame_info { 210struct mips_frame_info {
208 void *func; 211 void *func;
209 unsigned long func_size; 212 unsigned long func_size;
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 5712bb532245..7e954042f252 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -30,7 +30,7 @@ __init void mips_set_machine_name(const char *name)
30 if (name == NULL) 30 if (name == NULL)
31 return; 31 return;
32 32
33 strncpy(mips_machine_name, name, sizeof(mips_machine_name)); 33 strlcpy(mips_machine_name, name, sizeof(mips_machine_name));
34 pr_info("MIPS: machine is %s\n", mips_get_machine_name()); 34 pr_info("MIPS: machine is %s\n", mips_get_machine_name());
35} 35}
36 36
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 9c6299c733a3..8ae1ebef8b71 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -15,6 +15,7 @@
15 * binaries. 15 * binaries.
16 */ 16 */
17#include <linux/compiler.h> 17#include <linux/compiler.h>
18#include <linux/context_tracking.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
19#include <linux/sched.h> 20#include <linux/sched.h>
20#include <linux/mm.h> 21#include <linux/mm.h>
@@ -534,6 +535,8 @@ static inline int audit_arch(void)
534 */ 535 */
535asmlinkage void syscall_trace_enter(struct pt_regs *regs) 536asmlinkage void syscall_trace_enter(struct pt_regs *regs)
536{ 537{
538 user_exit();
539
537 /* do the secure computing check first */ 540 /* do the secure computing check first */
538 secure_computing_strict(regs->regs[2]); 541 secure_computing_strict(regs->regs[2]);
539 542
@@ -570,6 +573,13 @@ out:
570 */ 573 */
571asmlinkage void syscall_trace_leave(struct pt_regs *regs) 574asmlinkage void syscall_trace_leave(struct pt_regs *regs)
572{ 575{
576 /*
577 * We may come here right after calling schedule_user()
578 * or do_notify_resume(), in which case we can be in RCU
579 * user mode.
580 */
581 user_exit();
582
573 audit_syscall_exit(regs); 583 audit_syscall_exit(regs);
574 584
575 if (!(current->ptrace & PT_PTRACED)) 585 if (!(current->ptrace & PT_PTRACED))
@@ -592,4 +602,6 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
592 send_sig(current->exit_code, current, 1); 602 send_sig(current->exit_code, current, 1);
593 current->exit_code = 0; 603 current->exit_code = 0;
594 } 604 }
605
606 user_enter();
595} 607}
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 5266c6ee2b35..38af83f84c4a 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -65,6 +65,13 @@ LEAF(resume)
65 fpu_save_single a0, t0 # clobbers t0 65 fpu_save_single a0, t0 # clobbers t0
66 66
671: 671:
68
69#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
70 PTR_L t8, __stack_chk_guard
71 LONG_L t9, TASK_STACK_CANARY(a1)
72 LONG_S t9, 0(t8)
73#endif
74
68 /* 75 /*
69 * The order of restoring the registers takes care of the race 76 * The order of restoring the registers takes care of the race
70 * updating $28, $29 and kernelsp without disabling ints. 77 * updating $28, $29 and kernelsp without disabling ints.
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 5e51219990aa..921238a6bd26 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -68,6 +68,12 @@
68 # clobbers t1 68 # clobbers t1
691: 691:
70 70
71#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
72 PTR_L t8, __stack_chk_guard
73 LONG_L t9, TASK_STACK_CANARY(a1)
74 LONG_S t9, 0(t8)
75#endif
76
71 /* 77 /*
72 * The order of restoring the registers takes care of the race 78 * The order of restoring the registers takes care of the race
73 * updating $28, $29 and kernelsp without disabling ints. 79 * updating $28, $29 and kernelsp without disabling ints.
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 6fa198db8999..d763f11e35e2 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -437,7 +437,6 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
437 size_t count, loff_t * ppos) 437 size_t count, loff_t * ppos)
438{ 438{
439 int minor = iminor(file_inode(file)); 439 int minor = iminor(file_inode(file));
440 struct rtlx_channel *rt = &rtlx->channel[minor];
441 440
442 /* any space left... */ 441 /* any space left... */
443 if (!rtlx_write_poll(minor)) { 442 if (!rtlx_write_poll(minor)) {
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index e9127ec612ef..e774bb1088b5 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
52 52
53stack_done: 53stack_done:
54 lw t0, TI_FLAGS($28) # syscall tracing enabled? 54 lw t0, TI_FLAGS($28) # syscall tracing enabled?
55 li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT 55 li t1, _TIF_WORK_SYSCALL_ENTRY
56 and t0, t1 56 and t0, t1
57 bnez t0, syscall_trace_entry # -> yes 57 bnez t0, syscall_trace_entry # -> yes
58 58
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 97a5909a61cf..be6627ead619 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
54 54
55 sd a3, PT_R26(sp) # save a3 for syscall restarting 55 sd a3, PT_R26(sp) # save a3 for syscall restarting
56 56
57 li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT 57 li t1, _TIF_WORK_SYSCALL_ENTRY
58 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? 58 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
59 and t0, t1, t0 59 and t0, t1, t0
60 bnez t0, syscall_trace_entry 60 bnez t0, syscall_trace_entry
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index edcb6594e7b5..cab150789c8d 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
47 47
48 sd a3, PT_R26(sp) # save a3 for syscall restarting 48 sd a3, PT_R26(sp) # save a3 for syscall restarting
49 49
50 li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT 50 li t1, _TIF_WORK_SYSCALL_ENTRY
51 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? 51 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
52 and t0, t1, t0 52 and t0, t1, t0
53 bnez t0, n32_syscall_trace_entry 53 bnez t0, n32_syscall_trace_entry
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 74f485d3c0ef..37605dc8eef7 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
81 PTR 4b, bad_stack 81 PTR 4b, bad_stack
82 .previous 82 .previous
83 83
84 li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT 84 li t1, _TIF_WORK_SYSCALL_ENTRY
85 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? 85 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
86 and t0, t1, t0 86 and t0, t1, t0
87 bnez t0, trace_a_syscall 87 bnez t0, trace_a_syscall
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index fd3ef2c2afbc..2f285abc76d5 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -8,6 +8,7 @@
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */ 9 */
10#include <linux/cache.h> 10#include <linux/cache.h>
11#include <linux/context_tracking.h>
11#include <linux/irqflags.h> 12#include <linux/irqflags.h>
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/mm.h> 14#include <linux/mm.h>
@@ -573,6 +574,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
573{ 574{
574 local_irq_enable(); 575 local_irq_enable();
575 576
577 user_exit();
578
576 /* deal with pending signal delivery */ 579 /* deal with pending signal delivery */
577 if (thread_info_flags & _TIF_SIGPENDING) 580 if (thread_info_flags & _TIF_SIGPENDING)
578 do_signal(regs); 581 do_signal(regs);
@@ -581,6 +584,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
581 clear_thread_flag(TIF_NOTIFY_RESUME); 584 clear_thread_flag(TIF_NOTIFY_RESUME);
582 tracehook_notify_resume(regs); 585 tracehook_notify_resume(regs);
583 } 586 }
587
588 user_enter();
584} 589}
585 590
586#ifdef CONFIG_SMP 591#ifdef CONFIG_SMP
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 8e393b8443f7..c0bb4d59076a 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -63,7 +63,7 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id);
63 63
64static void __init bmips_smp_setup(void) 64static void __init bmips_smp_setup(void)
65{ 65{
66 int i; 66 int i, cpu = 1, boot_cpu = 0;
67 67
68#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 68#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
69 /* arbitration priority */ 69 /* arbitration priority */
@@ -72,13 +72,22 @@ static void __init bmips_smp_setup(void)
72 /* NBK and weak order flags */ 72 /* NBK and weak order flags */
73 set_c0_brcm_config_0(0x30000); 73 set_c0_brcm_config_0(0x30000);
74 74
75 /* Find out if we are running on TP0 or TP1 */
76 boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
77
75 /* 78 /*
76 * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread 79 * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread
77 * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output 80 * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
78 * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output 81 * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
82 *
83 * If booting from TP1, leave the existing CMT interrupt routing
84 * such that TP0 responds to SW1 and TP1 responds to SW0.
79 */ 85 */
80 change_c0_brcm_cmt_intr(0xf8018000, 86 if (boot_cpu == 0)
81 (0x02 << 27) | (0x03 << 15)); 87 change_c0_brcm_cmt_intr(0xf8018000,
88 (0x02 << 27) | (0x03 << 15));
89 else
90 change_c0_brcm_cmt_intr(0xf8018000, (0x1d << 27));
82 91
83 /* single core, 2 threads (2 pipelines) */ 92 /* single core, 2 threads (2 pipelines) */
84 max_cpus = 2; 93 max_cpus = 2;
@@ -106,9 +115,15 @@ static void __init bmips_smp_setup(void)
106 if (!board_ebase_setup) 115 if (!board_ebase_setup)
107 board_ebase_setup = &bmips_ebase_setup; 116 board_ebase_setup = &bmips_ebase_setup;
108 117
118 __cpu_number_map[boot_cpu] = 0;
119 __cpu_logical_map[0] = boot_cpu;
120
109 for (i = 0; i < max_cpus; i++) { 121 for (i = 0; i < max_cpus; i++) {
110 __cpu_number_map[i] = 1; 122 if (i != boot_cpu) {
111 __cpu_logical_map[i] = 1; 123 __cpu_number_map[i] = cpu;
124 __cpu_logical_map[cpu] = i;
125 cpu++;
126 }
112 set_cpu_possible(i, 1); 127 set_cpu_possible(i, 1);
113 set_cpu_present(i, 1); 128 set_cpu_present(i, 1);
114 } 129 }
@@ -157,7 +172,9 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
157 bmips_send_ipi_single(cpu, 0); 172 bmips_send_ipi_single(cpu, 0);
158 else { 173 else {
159#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 174#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
160 set_c0_brcm_cmt_ctrl(0x01); 175 /* Reset slave TP1 if booting from TP0 */
176 if (cpu_logical_map(cpu) == 1)
177 set_c0_brcm_cmt_ctrl(0x01);
161#elif defined(CONFIG_CPU_BMIPS5000) 178#elif defined(CONFIG_CPU_BMIPS5000)
162 if (cpu & 0x01) 179 if (cpu & 0x01)
163 write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); 180 write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
@@ -381,7 +398,7 @@ struct plat_smp_ops bmips_smp_ops = {
381 * UP BMIPS systems as well. 398 * UP BMIPS systems as well.
382 ***********************************************************************/ 399 ***********************************************************************/
383 400
384static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end) 401static void bmips_wr_vec(unsigned long dst, char *start, char *end)
385{ 402{
386 memcpy((void *)dst, start, end - start); 403 memcpy((void *)dst, start, end - start);
387 dma_cache_wback((unsigned long)start, end - start); 404 dma_cache_wback((unsigned long)start, end - start);
@@ -389,7 +406,7 @@ static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end)
389 instruction_hazard(); 406 instruction_hazard();
390} 407}
391 408
392static inline void __cpuinit bmips_nmi_handler_setup(void) 409static inline void bmips_nmi_handler_setup(void)
393{ 410{
394 bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec, 411 bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
395 &bmips_reset_nmi_vec_end); 412 &bmips_reset_nmi_vec_end);
@@ -397,7 +414,7 @@ static inline void __cpuinit bmips_nmi_handler_setup(void)
397 &bmips_smp_int_vec_end); 414 &bmips_smp_int_vec_end);
398} 415}
399 416
400void __cpuinit bmips_ebase_setup(void) 417void bmips_ebase_setup(void)
401{ 418{
402 unsigned long new_ebase = ebase; 419 unsigned long new_ebase = ebase;
403 void __iomem __maybe_unused *cbr; 420 void __iomem __maybe_unused *cbr;
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 3e5164c11cac..57a3f7a2b370 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -149,7 +149,7 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
149 vsmp_send_ipi_single(i, action); 149 vsmp_send_ipi_single(i, action);
150} 150}
151 151
152static void __cpuinit vsmp_init_secondary(void) 152static void vsmp_init_secondary(void)
153{ 153{
154#ifdef CONFIG_IRQ_GIC 154#ifdef CONFIG_IRQ_GIC
155 /* This is Malta specific: IPI,performance and timer interrupts */ 155 /* This is Malta specific: IPI,performance and timer interrupts */
@@ -162,7 +162,7 @@ static void __cpuinit vsmp_init_secondary(void)
162 STATUSF_IP6 | STATUSF_IP7); 162 STATUSF_IP6 | STATUSF_IP7);
163} 163}
164 164
165static void __cpuinit vsmp_smp_finish(void) 165static void vsmp_smp_finish(void)
166{ 166{
167 /* CDFIXME: remove this? */ 167 /* CDFIXME: remove this? */
168 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); 168 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
@@ -188,7 +188,7 @@ static void vsmp_cpus_done(void)
188 * (unsigned long)idle->thread_info the gp 188 * (unsigned long)idle->thread_info the gp
189 * assumes a 1:1 mapping of TC => VPE 189 * assumes a 1:1 mapping of TC => VPE
190 */ 190 */
191static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle) 191static void vsmp_boot_secondary(int cpu, struct task_struct *idle)
192{ 192{
193 struct thread_info *gp = task_thread_info(idle); 193 struct thread_info *gp = task_thread_info(idle);
194 dvpe(); 194 dvpe();
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
index 00500fea2750..7fde3e4d978f 100644
--- a/arch/mips/kernel/smp-up.c
+++ b/arch/mips/kernel/smp-up.c
@@ -28,11 +28,11 @@ static inline void up_send_ipi_mask(const struct cpumask *mask,
28 * After we've done initial boot, this function is called to allow the 28 * After we've done initial boot, this function is called to allow the
29 * board code to clean up state, if needed 29 * board code to clean up state, if needed
30 */ 30 */
31static void __cpuinit up_init_secondary(void) 31static void up_init_secondary(void)
32{ 32{
33} 33}
34 34
35static void __cpuinit up_smp_finish(void) 35static void up_smp_finish(void)
36{ 36{
37} 37}
38 38
@@ -44,7 +44,7 @@ static void up_cpus_done(void)
44/* 44/*
45 * Firmware CPU startup hook 45 * Firmware CPU startup hook
46 */ 46 */
47static void __cpuinit up_boot_secondary(int cpu, struct task_struct *idle) 47static void up_boot_secondary(int cpu, struct task_struct *idle)
48{ 48{
49} 49}
50 50
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 6e7862ab46cc..5c208ed8f856 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -86,7 +86,7 @@ static inline void set_cpu_sibling_map(int cpu)
86struct plat_smp_ops *mp_ops; 86struct plat_smp_ops *mp_ops;
87EXPORT_SYMBOL(mp_ops); 87EXPORT_SYMBOL(mp_ops);
88 88
89__cpuinit void register_smp_ops(struct plat_smp_ops *ops) 89void register_smp_ops(struct plat_smp_ops *ops)
90{ 90{
91 if (mp_ops) 91 if (mp_ops)
92 printk(KERN_WARNING "Overriding previously set SMP ops\n"); 92 printk(KERN_WARNING "Overriding previously set SMP ops\n");
@@ -98,7 +98,7 @@ __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
98 * First C code run on the secondary CPUs after being started up by 98 * First C code run on the secondary CPUs after being started up by
99 * the master. 99 * the master.
100 */ 100 */
101asmlinkage __cpuinit void start_secondary(void) 101asmlinkage void start_secondary(void)
102{ 102{
103 unsigned int cpu; 103 unsigned int cpu;
104 104
@@ -197,7 +197,7 @@ void smp_prepare_boot_cpu(void)
197 cpu_set(0, cpu_callin_map); 197 cpu_set(0, cpu_callin_map);
198} 198}
199 199
200int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 200int __cpu_up(unsigned int cpu, struct task_struct *tidle)
201{ 201{
202 mp_ops->boot_secondary(cpu, tidle); 202 mp_ops->boot_secondary(cpu, tidle);
203 203
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 75a4fd709841..dfc1b911be04 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -645,7 +645,7 @@ void smtc_prepare_cpus(int cpus)
645 * (unsigned long)idle->thread_info the gp 645 * (unsigned long)idle->thread_info the gp
646 * 646 *
647 */ 647 */
648void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) 648void smtc_boot_secondary(int cpu, struct task_struct *idle)
649{ 649{
650 extern u32 kernelsp[NR_CPUS]; 650 extern u32 kernelsp[NR_CPUS];
651 unsigned long flags; 651 unsigned long flags;
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index 6af08d896e20..93f86817f20a 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -37,7 +37,7 @@
37/* 37/*
38 * Different semantics to the set_c0_* function built by __BUILD_SET_C0 38 * Different semantics to the set_c0_* function built by __BUILD_SET_C0
39 */ 39 */
40static __cpuinit unsigned int bis_c0_errctl(unsigned int set) 40static unsigned int bis_c0_errctl(unsigned int set)
41{ 41{
42 unsigned int res; 42 unsigned int res;
43 res = read_c0_errctl(); 43 res = read_c0_errctl();
@@ -45,7 +45,7 @@ static __cpuinit unsigned int bis_c0_errctl(unsigned int set)
45 return res; 45 return res;
46} 46}
47 47
48static __cpuinit void ispram_store_tag(unsigned int offset, unsigned int data) 48static void ispram_store_tag(unsigned int offset, unsigned int data)
49{ 49{
50 unsigned int errctl; 50 unsigned int errctl;
51 51
@@ -64,7 +64,7 @@ static __cpuinit void ispram_store_tag(unsigned int offset, unsigned int data)
64} 64}
65 65
66 66
67static __cpuinit unsigned int ispram_load_tag(unsigned int offset) 67static unsigned int ispram_load_tag(unsigned int offset)
68{ 68{
69 unsigned int data; 69 unsigned int data;
70 unsigned int errctl; 70 unsigned int errctl;
@@ -82,7 +82,7 @@ static __cpuinit unsigned int ispram_load_tag(unsigned int offset)
82 return data; 82 return data;
83} 83}
84 84
85static __cpuinit void dspram_store_tag(unsigned int offset, unsigned int data) 85static void dspram_store_tag(unsigned int offset, unsigned int data)
86{ 86{
87 unsigned int errctl; 87 unsigned int errctl;
88 88
@@ -98,7 +98,7 @@ static __cpuinit void dspram_store_tag(unsigned int offset, unsigned int data)
98} 98}
99 99
100 100
101static __cpuinit unsigned int dspram_load_tag(unsigned int offset) 101static unsigned int dspram_load_tag(unsigned int offset)
102{ 102{
103 unsigned int data; 103 unsigned int data;
104 unsigned int errctl; 104 unsigned int errctl;
@@ -115,7 +115,7 @@ static __cpuinit unsigned int dspram_load_tag(unsigned int offset)
115 return data; 115 return data;
116} 116}
117 117
118static __cpuinit void probe_spram(char *type, 118static void probe_spram(char *type,
119 unsigned int base, 119 unsigned int base,
120 unsigned int (*read)(unsigned int), 120 unsigned int (*read)(unsigned int),
121 void (*write)(unsigned int, unsigned int)) 121 void (*write)(unsigned int, unsigned int))
@@ -196,7 +196,7 @@ static __cpuinit void probe_spram(char *type,
196 offset += 2 * SPRAM_TAG_STRIDE; 196 offset += 2 * SPRAM_TAG_STRIDE;
197 } 197 }
198} 198}
199void __cpuinit spram_config(void) 199void spram_config(void)
200{ 200{
201 struct cpuinfo_mips *c = &current_cpu_data; 201 struct cpuinfo_mips *c = &current_cpu_data;
202 unsigned int config0; 202 unsigned int config0;
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 1ff43d5ac2c4..84536bf4a154 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -20,15 +20,15 @@
20#include <asm/barrier.h> 20#include <asm/barrier.h>
21#include <asm/mipsregs.h> 21#include <asm/mipsregs.h>
22 22
23static atomic_t __cpuinitdata count_start_flag = ATOMIC_INIT(0); 23static atomic_t count_start_flag = ATOMIC_INIT(0);
24static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0); 24static atomic_t count_count_start = ATOMIC_INIT(0);
25static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0); 25static atomic_t count_count_stop = ATOMIC_INIT(0);
26static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0); 26static atomic_t count_reference = ATOMIC_INIT(0);
27 27
28#define COUNTON 100 28#define COUNTON 100
29#define NR_LOOPS 5 29#define NR_LOOPS 5
30 30
31void __cpuinit synchronise_count_master(int cpu) 31void synchronise_count_master(int cpu)
32{ 32{
33 int i; 33 int i;
34 unsigned long flags; 34 unsigned long flags;
@@ -106,7 +106,7 @@ void __cpuinit synchronise_count_master(int cpu)
106 printk("done.\n"); 106 printk("done.\n");
107} 107}
108 108
109void __cpuinit synchronise_count_slave(int cpu) 109void synchronise_count_slave(int cpu)
110{ 110{
111 int i; 111 int i;
112 unsigned int initcount; 112 unsigned int initcount;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index a75ae40184aa..aec3408edd4b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -13,6 +13,7 @@
13 */ 13 */
14#include <linux/bug.h> 14#include <linux/bug.h>
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <linux/context_tracking.h>
16#include <linux/kexec.h> 17#include <linux/kexec.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
@@ -89,7 +90,7 @@ void (*board_nmi_handler_setup)(void);
89void (*board_ejtag_handler_setup)(void); 90void (*board_ejtag_handler_setup)(void);
90void (*board_bind_eic_interrupt)(int irq, int regset); 91void (*board_bind_eic_interrupt)(int irq, int regset);
91void (*board_ebase_setup)(void); 92void (*board_ebase_setup)(void);
92void __cpuinitdata(*board_cache_error_setup)(void); 93void(*board_cache_error_setup)(void);
93 94
94static void show_raw_backtrace(unsigned long reg29) 95static void show_raw_backtrace(unsigned long reg29)
95{ 96{
@@ -264,7 +265,7 @@ static void __show_regs(const struct pt_regs *regs)
264 265
265 printk("Status: %08x ", (uint32_t) regs->cp0_status); 266 printk("Status: %08x ", (uint32_t) regs->cp0_status);
266 267
267 if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) { 268 if (cpu_has_3kex) {
268 if (regs->cp0_status & ST0_KUO) 269 if (regs->cp0_status & ST0_KUO)
269 printk("KUo "); 270 printk("KUo ");
270 if (regs->cp0_status & ST0_IEO) 271 if (regs->cp0_status & ST0_IEO)
@@ -277,7 +278,7 @@ static void __show_regs(const struct pt_regs *regs)
277 printk("KUc "); 278 printk("KUc ");
278 if (regs->cp0_status & ST0_IEC) 279 if (regs->cp0_status & ST0_IEC)
279 printk("IEc "); 280 printk("IEc ");
280 } else { 281 } else if (cpu_has_4kex) {
281 if (regs->cp0_status & ST0_KX) 282 if (regs->cp0_status & ST0_KX)
282 printk("KX "); 283 printk("KX ");
283 if (regs->cp0_status & ST0_SX) 284 if (regs->cp0_status & ST0_SX)
@@ -423,7 +424,9 @@ asmlinkage void do_be(struct pt_regs *regs)
423 const struct exception_table_entry *fixup = NULL; 424 const struct exception_table_entry *fixup = NULL;
424 int data = regs->cp0_cause & 4; 425 int data = regs->cp0_cause & 4;
425 int action = MIPS_BE_FATAL; 426 int action = MIPS_BE_FATAL;
427 enum ctx_state prev_state;
426 428
429 prev_state = exception_enter();
427 /* XXX For now. Fixme, this searches the wrong table ... */ 430 /* XXX For now. Fixme, this searches the wrong table ... */
428 if (data && !user_mode(regs)) 431 if (data && !user_mode(regs))
429 fixup = search_dbe_tables(exception_epc(regs)); 432 fixup = search_dbe_tables(exception_epc(regs));
@@ -436,11 +439,11 @@ asmlinkage void do_be(struct pt_regs *regs)
436 439
437 switch (action) { 440 switch (action) {
438 case MIPS_BE_DISCARD: 441 case MIPS_BE_DISCARD:
439 return; 442 goto out;
440 case MIPS_BE_FIXUP: 443 case MIPS_BE_FIXUP:
441 if (fixup) { 444 if (fixup) {
442 regs->cp0_epc = fixup->nextinsn; 445 regs->cp0_epc = fixup->nextinsn;
443 return; 446 goto out;
444 } 447 }
445 break; 448 break;
446 default: 449 default:
@@ -455,10 +458,13 @@ asmlinkage void do_be(struct pt_regs *regs)
455 field, regs->cp0_epc, field, regs->regs[31]); 458 field, regs->cp0_epc, field, regs->regs[31]);
456 if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS) 459 if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS)
457 == NOTIFY_STOP) 460 == NOTIFY_STOP)
458 return; 461 goto out;
459 462
460 die_if_kernel("Oops", regs); 463 die_if_kernel("Oops", regs);
461 force_sig(SIGBUS, current); 464 force_sig(SIGBUS, current);
465
466out:
467 exception_exit(prev_state);
462} 468}
463 469
464/* 470/*
@@ -673,8 +679,10 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
673 679
674asmlinkage void do_ov(struct pt_regs *regs) 680asmlinkage void do_ov(struct pt_regs *regs)
675{ 681{
682 enum ctx_state prev_state;
676 siginfo_t info; 683 siginfo_t info;
677 684
685 prev_state = exception_enter();
678 die_if_kernel("Integer overflow", regs); 686 die_if_kernel("Integer overflow", regs);
679 687
680 info.si_code = FPE_INTOVF; 688 info.si_code = FPE_INTOVF;
@@ -682,6 +690,7 @@ asmlinkage void do_ov(struct pt_regs *regs)
682 info.si_errno = 0; 690 info.si_errno = 0;
683 info.si_addr = (void __user *) regs->cp0_epc; 691 info.si_addr = (void __user *) regs->cp0_epc;
684 force_sig_info(SIGFPE, &info, current); 692 force_sig_info(SIGFPE, &info, current);
693 exception_exit(prev_state);
685} 694}
686 695
687int process_fpemu_return(int sig, void __user *fault_addr) 696int process_fpemu_return(int sig, void __user *fault_addr)
@@ -713,11 +722,13 @@ int process_fpemu_return(int sig, void __user *fault_addr)
713 */ 722 */
714asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) 723asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
715{ 724{
725 enum ctx_state prev_state;
716 siginfo_t info = {0}; 726 siginfo_t info = {0};
717 727
728 prev_state = exception_enter();
718 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE) 729 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
719 == NOTIFY_STOP) 730 == NOTIFY_STOP)
720 return; 731 goto out;
721 die_if_kernel("FP exception in kernel code", regs); 732 die_if_kernel("FP exception in kernel code", regs);
722 733
723 if (fcr31 & FPU_CSR_UNI_X) { 734 if (fcr31 & FPU_CSR_UNI_X) {
@@ -753,7 +764,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
753 /* If something went wrong, signal */ 764 /* If something went wrong, signal */
754 process_fpemu_return(sig, fault_addr); 765 process_fpemu_return(sig, fault_addr);
755 766
756 return; 767 goto out;
757 } else if (fcr31 & FPU_CSR_INV_X) 768 } else if (fcr31 & FPU_CSR_INV_X)
758 info.si_code = FPE_FLTINV; 769 info.si_code = FPE_FLTINV;
759 else if (fcr31 & FPU_CSR_DIV_X) 770 else if (fcr31 & FPU_CSR_DIV_X)
@@ -770,6 +781,9 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
770 info.si_errno = 0; 781 info.si_errno = 0;
771 info.si_addr = (void __user *) regs->cp0_epc; 782 info.si_addr = (void __user *) regs->cp0_epc;
772 force_sig_info(SIGFPE, &info, current); 783 force_sig_info(SIGFPE, &info, current);
784
785out:
786 exception_exit(prev_state);
773} 787}
774 788
775static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 789static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
@@ -835,9 +849,11 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
835asmlinkage void do_bp(struct pt_regs *regs) 849asmlinkage void do_bp(struct pt_regs *regs)
836{ 850{
837 unsigned int opcode, bcode; 851 unsigned int opcode, bcode;
852 enum ctx_state prev_state;
838 unsigned long epc; 853 unsigned long epc;
839 u16 instr[2]; 854 u16 instr[2];
840 855
856 prev_state = exception_enter();
841 if (get_isa16_mode(regs->cp0_epc)) { 857 if (get_isa16_mode(regs->cp0_epc)) {
842 /* Calculate EPC. */ 858 /* Calculate EPC. */
843 epc = exception_epc(regs); 859 epc = exception_epc(regs);
@@ -852,7 +868,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
852 goto out_sigsegv; 868 goto out_sigsegv;
853 bcode = (instr[0] >> 6) & 0x3f; 869 bcode = (instr[0] >> 6) & 0x3f;
854 do_trap_or_bp(regs, bcode, "Break"); 870 do_trap_or_bp(regs, bcode, "Break");
855 return; 871 goto out;
856 } 872 }
857 } else { 873 } else {
858 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 874 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
@@ -876,12 +892,12 @@ asmlinkage void do_bp(struct pt_regs *regs)
876 switch (bcode) { 892 switch (bcode) {
877 case BRK_KPROBE_BP: 893 case BRK_KPROBE_BP:
878 if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) 894 if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
879 return; 895 goto out;
880 else 896 else
881 break; 897 break;
882 case BRK_KPROBE_SSTEPBP: 898 case BRK_KPROBE_SSTEPBP:
883 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) 899 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
884 return; 900 goto out;
885 else 901 else
886 break; 902 break;
887 default: 903 default:
@@ -889,18 +905,24 @@ asmlinkage void do_bp(struct pt_regs *regs)
889 } 905 }
890 906
891 do_trap_or_bp(regs, bcode, "Break"); 907 do_trap_or_bp(regs, bcode, "Break");
908
909out:
910 exception_exit(prev_state);
892 return; 911 return;
893 912
894out_sigsegv: 913out_sigsegv:
895 force_sig(SIGSEGV, current); 914 force_sig(SIGSEGV, current);
915 goto out;
896} 916}
897 917
898asmlinkage void do_tr(struct pt_regs *regs) 918asmlinkage void do_tr(struct pt_regs *regs)
899{ 919{
900 u32 opcode, tcode = 0; 920 u32 opcode, tcode = 0;
921 enum ctx_state prev_state;
901 u16 instr[2]; 922 u16 instr[2];
902 unsigned long epc = msk_isa16_mode(exception_epc(regs)); 923 unsigned long epc = msk_isa16_mode(exception_epc(regs));
903 924
925 prev_state = exception_enter();
904 if (get_isa16_mode(regs->cp0_epc)) { 926 if (get_isa16_mode(regs->cp0_epc)) {
905 if (__get_user(instr[0], (u16 __user *)(epc + 0)) || 927 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
906 __get_user(instr[1], (u16 __user *)(epc + 2))) 928 __get_user(instr[1], (u16 __user *)(epc + 2)))
@@ -918,10 +940,14 @@ asmlinkage void do_tr(struct pt_regs *regs)
918 } 940 }
919 941
920 do_trap_or_bp(regs, tcode, "Trap"); 942 do_trap_or_bp(regs, tcode, "Trap");
943
944out:
945 exception_exit(prev_state);
921 return; 946 return;
922 947
923out_sigsegv: 948out_sigsegv:
924 force_sig(SIGSEGV, current); 949 force_sig(SIGSEGV, current);
950 goto out;
925} 951}
926 952
927asmlinkage void do_ri(struct pt_regs *regs) 953asmlinkage void do_ri(struct pt_regs *regs)
@@ -929,17 +955,19 @@ asmlinkage void do_ri(struct pt_regs *regs)
929 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); 955 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
930 unsigned long old_epc = regs->cp0_epc; 956 unsigned long old_epc = regs->cp0_epc;
931 unsigned long old31 = regs->regs[31]; 957 unsigned long old31 = regs->regs[31];
958 enum ctx_state prev_state;
932 unsigned int opcode = 0; 959 unsigned int opcode = 0;
933 int status = -1; 960 int status = -1;
934 961
962 prev_state = exception_enter();
935 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL) 963 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL)
936 == NOTIFY_STOP) 964 == NOTIFY_STOP)
937 return; 965 goto out;
938 966
939 die_if_kernel("Reserved instruction in kernel code", regs); 967 die_if_kernel("Reserved instruction in kernel code", regs);
940 968
941 if (unlikely(compute_return_epc(regs) < 0)) 969 if (unlikely(compute_return_epc(regs) < 0))
942 return; 970 goto out;
943 971
944 if (get_isa16_mode(regs->cp0_epc)) { 972 if (get_isa16_mode(regs->cp0_epc)) {
945 unsigned short mmop[2] = { 0 }; 973 unsigned short mmop[2] = { 0 };
@@ -974,6 +1002,9 @@ asmlinkage void do_ri(struct pt_regs *regs)
974 regs->regs[31] = old31; 1002 regs->regs[31] = old31;
975 force_sig(status, current); 1003 force_sig(status, current);
976 } 1004 }
1005
1006out:
1007 exception_exit(prev_state);
977} 1008}
978 1009
979/* 1010/*
@@ -1025,21 +1056,16 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1025{ 1056{
1026 struct pt_regs *regs = data; 1057 struct pt_regs *regs = data;
1027 1058
1028 switch (action) { 1059 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1029 default:
1030 die_if_kernel("Unhandled kernel unaligned access or invalid "
1031 "instruction", regs); 1060 "instruction", regs);
1032 /* Fall through */ 1061 force_sig(SIGILL, current);
1033
1034 case CU2_EXCEPTION:
1035 force_sig(SIGILL, current);
1036 }
1037 1062
1038 return NOTIFY_OK; 1063 return NOTIFY_OK;
1039} 1064}
1040 1065
1041asmlinkage void do_cpu(struct pt_regs *regs) 1066asmlinkage void do_cpu(struct pt_regs *regs)
1042{ 1067{
1068 enum ctx_state prev_state;
1043 unsigned int __user *epc; 1069 unsigned int __user *epc;
1044 unsigned long old_epc, old31; 1070 unsigned long old_epc, old31;
1045 unsigned int opcode; 1071 unsigned int opcode;
@@ -1047,10 +1073,12 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1047 int status; 1073 int status;
1048 unsigned long __maybe_unused flags; 1074 unsigned long __maybe_unused flags;
1049 1075
1050 die_if_kernel("do_cpu invoked from kernel context!", regs); 1076 prev_state = exception_enter();
1051
1052 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; 1077 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1053 1078
1079 if (cpid != 2)
1080 die_if_kernel("do_cpu invoked from kernel context!", regs);
1081
1054 switch (cpid) { 1082 switch (cpid) {
1055 case 0: 1083 case 0:
1056 epc = (unsigned int __user *)exception_epc(regs); 1084 epc = (unsigned int __user *)exception_epc(regs);
@@ -1060,7 +1088,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1060 status = -1; 1088 status = -1;
1061 1089
1062 if (unlikely(compute_return_epc(regs) < 0)) 1090 if (unlikely(compute_return_epc(regs) < 0))
1063 return; 1091 goto out;
1064 1092
1065 if (get_isa16_mode(regs->cp0_epc)) { 1093 if (get_isa16_mode(regs->cp0_epc)) {
1066 unsigned short mmop[2] = { 0 }; 1094 unsigned short mmop[2] = { 0 };
@@ -1093,7 +1121,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1093 force_sig(status, current); 1121 force_sig(status, current);
1094 } 1122 }
1095 1123
1096 return; 1124 goto out;
1097 1125
1098 case 3: 1126 case 3:
1099 /* 1127 /*
@@ -1131,19 +1159,26 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1131 mt_ase_fp_affinity(); 1159 mt_ase_fp_affinity();
1132 } 1160 }
1133 1161
1134 return; 1162 goto out;
1135 1163
1136 case 2: 1164 case 2:
1137 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); 1165 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1138 return; 1166 goto out;
1139 } 1167 }
1140 1168
1141 force_sig(SIGILL, current); 1169 force_sig(SIGILL, current);
1170
1171out:
1172 exception_exit(prev_state);
1142} 1173}
1143 1174
1144asmlinkage void do_mdmx(struct pt_regs *regs) 1175asmlinkage void do_mdmx(struct pt_regs *regs)
1145{ 1176{
1177 enum ctx_state prev_state;
1178
1179 prev_state = exception_enter();
1146 force_sig(SIGILL, current); 1180 force_sig(SIGILL, current);
1181 exception_exit(prev_state);
1147} 1182}
1148 1183
1149/* 1184/*
@@ -1151,8 +1186,10 @@ asmlinkage void do_mdmx(struct pt_regs *regs)
1151 */ 1186 */
1152asmlinkage void do_watch(struct pt_regs *regs) 1187asmlinkage void do_watch(struct pt_regs *regs)
1153{ 1188{
1189 enum ctx_state prev_state;
1154 u32 cause; 1190 u32 cause;
1155 1191
1192 prev_state = exception_enter();
1156 /* 1193 /*
1157 * Clear WP (bit 22) bit of cause register so we don't loop 1194 * Clear WP (bit 22) bit of cause register so we don't loop
1158 * forever. 1195 * forever.
@@ -1174,13 +1211,16 @@ asmlinkage void do_watch(struct pt_regs *regs)
1174 mips_clear_watch_registers(); 1211 mips_clear_watch_registers();
1175 local_irq_enable(); 1212 local_irq_enable();
1176 } 1213 }
1214 exception_exit(prev_state);
1177} 1215}
1178 1216
1179asmlinkage void do_mcheck(struct pt_regs *regs) 1217asmlinkage void do_mcheck(struct pt_regs *regs)
1180{ 1218{
1181 const int field = 2 * sizeof(unsigned long); 1219 const int field = 2 * sizeof(unsigned long);
1182 int multi_match = regs->cp0_status & ST0_TS; 1220 int multi_match = regs->cp0_status & ST0_TS;
1221 enum ctx_state prev_state;
1183 1222
1223 prev_state = exception_enter();
1184 show_regs(regs); 1224 show_regs(regs);
1185 1225
1186 if (multi_match) { 1226 if (multi_match) {
@@ -1627,7 +1667,6 @@ void *set_vi_handler(int n, vi_handler_t addr)
1627} 1667}
1628 1668
1629extern void tlb_init(void); 1669extern void tlb_init(void);
1630extern void flush_tlb_handlers(void);
1631 1670
1632/* 1671/*
1633 * Timer interrupt 1672 * Timer interrupt
@@ -1642,7 +1681,7 @@ int cp0_compare_irq_shift;
1642int cp0_perfcount_irq; 1681int cp0_perfcount_irq;
1643EXPORT_SYMBOL_GPL(cp0_perfcount_irq); 1682EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
1644 1683
1645static int __cpuinitdata noulri; 1684static int noulri;
1646 1685
1647static int __init ulri_disable(char *s) 1686static int __init ulri_disable(char *s)
1648{ 1687{
@@ -1653,7 +1692,7 @@ static int __init ulri_disable(char *s)
1653} 1692}
1654__setup("noulri", ulri_disable); 1693__setup("noulri", ulri_disable);
1655 1694
1656void __cpuinit per_cpu_trap_init(bool is_boot_cpu) 1695void per_cpu_trap_init(bool is_boot_cpu)
1657{ 1696{
1658 unsigned int cpu = smp_processor_id(); 1697 unsigned int cpu = smp_processor_id();
1659 unsigned int status_set = ST0_CU0; 1698 unsigned int status_set = ST0_CU0;
@@ -1770,7 +1809,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1770} 1809}
1771 1810
1772/* Install CPU exception handler */ 1811/* Install CPU exception handler */
1773void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) 1812void set_handler(unsigned long offset, void *addr, unsigned long size)
1774{ 1813{
1775#ifdef CONFIG_CPU_MICROMIPS 1814#ifdef CONFIG_CPU_MICROMIPS
1776 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); 1815 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
@@ -1780,7 +1819,7 @@ void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
1780 local_flush_icache_range(ebase + offset, ebase + offset + size); 1819 local_flush_icache_range(ebase + offset, ebase + offset + size);
1781} 1820}
1782 1821
1783static char panic_null_cerr[] __cpuinitdata = 1822static char panic_null_cerr[] =
1784 "Trying to set NULL cache error exception handler"; 1823 "Trying to set NULL cache error exception handler";
1785 1824
1786/* 1825/*
@@ -1788,7 +1827,7 @@ static char panic_null_cerr[] __cpuinitdata =
1788 * This is suitable only for the cache error exception which is the only 1827 * This is suitable only for the cache error exception which is the only
1789 * exception handler that is being run uncached. 1828 * exception handler that is being run uncached.
1790 */ 1829 */
1791void __cpuinit set_uncached_handler(unsigned long offset, void *addr, 1830void set_uncached_handler(unsigned long offset, void *addr,
1792 unsigned long size) 1831 unsigned long size)
1793{ 1832{
1794 unsigned long uncached_ebase = CKSEG1ADDR(ebase); 1833 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
@@ -1837,6 +1876,15 @@ void __init trap_init(void)
1837 ebase += (read_c0_ebase() & 0x3ffff000); 1876 ebase += (read_c0_ebase() & 0x3ffff000);
1838 } 1877 }
1839 1878
1879 if (cpu_has_mmips) {
1880 unsigned int config3 = read_c0_config3();
1881
1882 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
1883 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
1884 else
1885 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
1886 }
1887
1840 if (board_ebase_setup) 1888 if (board_ebase_setup)
1841 board_ebase_setup(); 1889 board_ebase_setup();
1842 per_cpu_trap_init(true); 1890 per_cpu_trap_init(true);
@@ -1956,7 +2004,6 @@ void __init trap_init(void)
1956 set_handler(0x080, &except_vec3_generic, 0x80); 2004 set_handler(0x080, &except_vec3_generic, 0x80);
1957 2005
1958 local_flush_icache_range(ebase, ebase + 0x400); 2006 local_flush_icache_range(ebase, ebase + 0x400);
1959 flush_tlb_handlers();
1960 2007
1961 sort_extable(__start___dbe_table, __stop___dbe_table); 2008 sort_extable(__start___dbe_table, __stop___dbe_table);
1962 2009
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 203d8857070d..c369a5d35527 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -72,6 +72,7 @@
72 * A store crossing a page boundary might be executed only partially. 72 * A store crossing a page boundary might be executed only partially.
73 * Undo the partial store in this case. 73 * Undo the partial store in this case.
74 */ 74 */
75#include <linux/context_tracking.h>
75#include <linux/mm.h> 76#include <linux/mm.h>
76#include <linux/signal.h> 77#include <linux/signal.h>
77#include <linux/smp.h> 78#include <linux/smp.h>
@@ -684,7 +685,8 @@ const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
684/* Recode table from 16-bit STORE register notation to 32-bit GPR. */ 685/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
685const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; 686const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
686 687
687void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr) 688static void emulate_load_store_microMIPS(struct pt_regs *regs,
689 void __user *addr)
688{ 690{
689 unsigned long value; 691 unsigned long value;
690 unsigned int res; 692 unsigned int res;
@@ -1548,11 +1550,14 @@ sigill:
1548 ("Unhandled kernel unaligned access or invalid instruction", regs); 1550 ("Unhandled kernel unaligned access or invalid instruction", regs);
1549 force_sig(SIGILL, current); 1551 force_sig(SIGILL, current);
1550} 1552}
1553
1551asmlinkage void do_ade(struct pt_regs *regs) 1554asmlinkage void do_ade(struct pt_regs *regs)
1552{ 1555{
1556 enum ctx_state prev_state;
1553 unsigned int __user *pc; 1557 unsigned int __user *pc;
1554 mm_segment_t seg; 1558 mm_segment_t seg;
1555 1559
1560 prev_state = exception_enter();
1556 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1561 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1557 1, regs, regs->cp0_badvaddr); 1562 1, regs, regs->cp0_badvaddr);
1558 /* 1563 /*
@@ -1628,6 +1633,7 @@ sigbus:
1628 /* 1633 /*
1629 * XXX On return from the signal handler we should advance the epc 1634 * XXX On return from the signal handler we should advance the epc
1630 */ 1635 */
1636 exception_exit(prev_state);
1631} 1637}
1632 1638
1633#ifdef CONFIG_DEBUG_FS 1639#ifdef CONFIG_DEBUG_FS
diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c
index 7726f6157d9e..2a03abb5bd2c 100644
--- a/arch/mips/kernel/watch.c
+++ b/arch/mips/kernel/watch.c
@@ -100,7 +100,7 @@ void mips_clear_watch_registers(void)
100 } 100 }
101} 101}
102 102
103__cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) 103void mips_probe_watch_registers(struct cpuinfo_mips *c)
104{ 104{
105 unsigned int t; 105 unsigned int t;
106 106
@@ -111,6 +111,7 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c)
111 * disable the register. 111 * disable the register.
112 */ 112 */
113 write_c0_watchlo0(7); 113 write_c0_watchlo0(7);
114 back_to_back_c0_hazard();
114 t = read_c0_watchlo0(); 115 t = read_c0_watchlo0();
115 write_c0_watchlo0(0); 116 write_c0_watchlo0(0);
116 c->watch_reg_masks[0] = t & 7; 117 c->watch_reg_masks[0] = t & 7;
@@ -121,12 +122,14 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c)
121 c->watch_reg_use_cnt = 1; 122 c->watch_reg_use_cnt = 1;
122 t = read_c0_watchhi0(); 123 t = read_c0_watchhi0();
123 write_c0_watchhi0(t | 0xff8); 124 write_c0_watchhi0(t | 0xff8);
125 back_to_back_c0_hazard();
124 t = read_c0_watchhi0(); 126 t = read_c0_watchhi0();
125 c->watch_reg_masks[0] |= (t & 0xff8); 127 c->watch_reg_masks[0] |= (t & 0xff8);
126 if ((t & 0x80000000) == 0) 128 if ((t & 0x80000000) == 0)
127 return; 129 return;
128 130
129 write_c0_watchlo1(7); 131 write_c0_watchlo1(7);
132 back_to_back_c0_hazard();
130 t = read_c0_watchlo1(); 133 t = read_c0_watchlo1();
131 write_c0_watchlo1(0); 134 write_c0_watchlo1(0);
132 c->watch_reg_masks[1] = t & 7; 135 c->watch_reg_masks[1] = t & 7;
@@ -135,12 +138,14 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c)
135 c->watch_reg_use_cnt = 2; 138 c->watch_reg_use_cnt = 2;
136 t = read_c0_watchhi1(); 139 t = read_c0_watchhi1();
137 write_c0_watchhi1(t | 0xff8); 140 write_c0_watchhi1(t | 0xff8);
141 back_to_back_c0_hazard();
138 t = read_c0_watchhi1(); 142 t = read_c0_watchhi1();
139 c->watch_reg_masks[1] |= (t & 0xff8); 143 c->watch_reg_masks[1] |= (t & 0xff8);
140 if ((t & 0x80000000) == 0) 144 if ((t & 0x80000000) == 0)
141 return; 145 return;
142 146
143 write_c0_watchlo2(7); 147 write_c0_watchlo2(7);
148 back_to_back_c0_hazard();
144 t = read_c0_watchlo2(); 149 t = read_c0_watchlo2();
145 write_c0_watchlo2(0); 150 write_c0_watchlo2(0);
146 c->watch_reg_masks[2] = t & 7; 151 c->watch_reg_masks[2] = t & 7;
@@ -149,12 +154,14 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c)
149 c->watch_reg_use_cnt = 3; 154 c->watch_reg_use_cnt = 3;
150 t = read_c0_watchhi2(); 155 t = read_c0_watchhi2();
151 write_c0_watchhi2(t | 0xff8); 156 write_c0_watchhi2(t | 0xff8);
157 back_to_back_c0_hazard();
152 t = read_c0_watchhi2(); 158 t = read_c0_watchhi2();
153 c->watch_reg_masks[2] |= (t & 0xff8); 159 c->watch_reg_masks[2] |= (t & 0xff8);
154 if ((t & 0x80000000) == 0) 160 if ((t & 0x80000000) == 0)
155 return; 161 return;
156 162
157 write_c0_watchlo3(7); 163 write_c0_watchlo3(7);
164 back_to_back_c0_hazard();
158 t = read_c0_watchlo3(); 165 t = read_c0_watchlo3();
159 write_c0_watchlo3(0); 166 write_c0_watchlo3(0);
160 c->watch_reg_masks[3] = t & 7; 167 c->watch_reg_masks[3] = t & 7;
@@ -163,6 +170,7 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c)
163 c->watch_reg_use_cnt = 4; 170 c->watch_reg_use_cnt = 4;
164 t = read_c0_watchhi3(); 171 t = read_c0_watchhi3();
165 write_c0_watchhi3(t | 0xff8); 172 write_c0_watchhi3(t | 0xff8);
173 back_to_back_c0_hazard();
166 t = read_c0_watchhi3(); 174 t = read_c0_watchhi3();
167 c->watch_reg_masks[3] |= (t & 0xff8); 175 c->watch_reg_masks[3] |= (t & 0xff8);
168 if ((t & 0x80000000) == 0) 176 if ((t & 0x80000000) == 0)