aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile3
-rw-r--r--arch/mips/kernel/cpu-probe.c5
-rw-r--r--arch/mips/kernel/irq-gic.c295
-rw-r--r--arch/mips/kernel/smp-cmp.c265
-rw-r--r--arch/mips/kernel/smp-mt.c96
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/kernel/smtc.c3
-rw-r--r--arch/mips/kernel/sync-r4k.c159
-rw-r--r--arch/mips/kernel/traps.c111
9 files changed, 847 insertions, 94 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 67d97fb02c38..d0ca4d41bb74 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
16obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o 16obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
17obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o 17obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
18obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o 18obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
19obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
19 20
20binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \ 21binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
21 irix5sys.o sysirix.o 22 irix5sys.o sysirix.o
@@ -50,6 +51,7 @@ obj-$(CONFIG_MIPS_MT) += mips-mt.o
50obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o 51obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
51obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o 52obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
52obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o 53obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
54obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
53obj-$(CONFIG_CPU_MIPSR2) += spram.o 55obj-$(CONFIG_CPU_MIPSR2) += spram.o
54 56
55obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o 57obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
@@ -63,6 +65,7 @@ obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
63obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o 65obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
64obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o 66obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
65obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o 67obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
68obj-$(CONFIG_IRQ_GIC) += irq-gic.o
66 69
67obj-$(CONFIG_32BIT) += scall32-o32.o 70obj-$(CONFIG_32BIT) += scall32-o32.o
68obj-$(CONFIG_64BIT) += scall64-64.o 71obj-$(CONFIG_64BIT) += scall64-64.o
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index add717dccf77..a742a967169a 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -169,6 +169,7 @@ static inline void check_wait(void)
169 169
170 case CPU_24K: 170 case CPU_24K:
171 case CPU_34K: 171 case CPU_34K:
172 case CPU_1004K:
172 cpu_wait = r4k_wait; 173 cpu_wait = r4k_wait;
173 if (read_c0_config7() & MIPS_CONF7_WII) 174 if (read_c0_config7() & MIPS_CONF7_WII)
174 cpu_wait = r4k_wait_irqoff; 175 cpu_wait = r4k_wait_irqoff;
@@ -717,6 +718,9 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c)
717 case PRID_IMP_74K: 718 case PRID_IMP_74K:
718 c->cputype = CPU_74K; 719 c->cputype = CPU_74K;
719 break; 720 break;
721 case PRID_IMP_1004K:
722 c->cputype = CPU_1004K;
723 break;
720 } 724 }
721 725
722 spram_config(); 726 spram_config();
@@ -884,6 +888,7 @@ static __cpuinit const char *cpu_to_name(struct cpuinfo_mips *c)
884 case CPU_24K: name = "MIPS 24K"; break; 888 case CPU_24K: name = "MIPS 24K"; break;
885 case CPU_25KF: name = "MIPS 25Kf"; break; 889 case CPU_25KF: name = "MIPS 25Kf"; break;
886 case CPU_34K: name = "MIPS 34K"; break; 890 case CPU_34K: name = "MIPS 34K"; break;
891 case CPU_1004K: name = "MIPS 1004K"; break;
887 case CPU_74K: name = "MIPS 74K"; break; 892 case CPU_74K: name = "MIPS 74K"; break;
888 case CPU_VR4111: name = "NEC VR4111"; break; 893 case CPU_VR4111: name = "NEC VR4111"; break;
889 case CPU_VR4121: name = "NEC VR4121"; break; 894 case CPU_VR4121: name = "NEC VR4121"; break;
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
new file mode 100644
index 000000000000..f0a4bb19e096
--- /dev/null
+++ b/arch/mips/kernel/irq-gic.c
@@ -0,0 +1,295 @@
1#undef DEBUG
2
3#include <linux/bitmap.h>
4#include <linux/init.h>
5
6#include <asm/io.h>
7#include <asm/gic.h>
8#include <asm/gcmpregs.h>
9#include <asm/mips-boards/maltaint.h>
10#include <asm/irq.h>
11#include <linux/hardirq.h>
12#include <asm-generic/bitops/find.h>
13
14
15static unsigned long _gic_base;
16static unsigned int _irqbase, _mapsize, numvpes, numintrs;
17static struct gic_intr_map *_intrmap;
18
19static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
20static struct gic_pending_regs pending_regs[NR_CPUS];
21static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
22
23#define gic_wedgeb2bok 0 /*
24 * Can GIC handle b2b writes to wedge register?
25 */
26#if gic_wedgeb2bok == 0
27static DEFINE_SPINLOCK(gic_wedgeb2b_lock);
28#endif
29
30void gic_send_ipi(unsigned int intr)
31{
32#if gic_wedgeb2bok == 0
33 unsigned long flags;
34#endif
35 pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__,
36 read_c0_status());
37 if (!gic_wedgeb2bok)
38 spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
39 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
40 if (!gic_wedgeb2bok) {
41 (void) GIC_REG(SHARED, GIC_SH_CONFIG);
42 spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
43 }
44}
45
46/* This is Malta specific and needs to be exported */
47static void vpe_local_setup(unsigned int numvpes)
48{
49 int i;
50 unsigned long timer_interrupt = 5, perf_interrupt = 5;
51 unsigned int vpe_ctl;
52
53 /*
54 * Setup the default performance counter timer interrupts
55 * for all VPEs
56 */
57 for (i = 0; i < numvpes; i++) {
58 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
59
60 /* Are Interrupts locally routable? */
61 GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
62 if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
63 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
64 GIC_MAP_TO_PIN_MSK | timer_interrupt);
65
66 if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
67 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
68 GIC_MAP_TO_PIN_MSK | perf_interrupt);
69 }
70}
71
72unsigned int gic_get_int(void)
73{
74 unsigned int i;
75 unsigned long *pending, *intrmask, *pcpu_mask;
76 unsigned long *pending_abs, *intrmask_abs;
77
78 /* Get per-cpu bitmaps */
79 pending = pending_regs[smp_processor_id()].pending;
80 intrmask = intrmask_regs[smp_processor_id()].intrmask;
81 pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
82
83 pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
84 GIC_SH_PEND_31_0_OFS);
85 intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
86 GIC_SH_MASK_31_0_OFS);
87
88 for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
89 GICREAD(*pending_abs, pending[i]);
90 GICREAD(*intrmask_abs, intrmask[i]);
91 pending_abs++;
92 intrmask_abs++;
93 }
94
95 bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
96 bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
97
98 i = find_first_bit(pending, GIC_NUM_INTRS);
99
100 pr_debug("CPU%d: %s pend=%d\n", smp_processor_id(), __func__, i);
101
102 return i;
103}
104
105static unsigned int gic_irq_startup(unsigned int irq)
106{
107 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
108 irq -= _irqbase;
109 /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
110 GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
111 1 << (irq % 32));
112 return 0;
113}
114
115static void gic_irq_ack(unsigned int irq)
116{
117#if gic_wedgeb2bok == 0
118 unsigned long flags;
119#endif
120 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
121 irq -= _irqbase;
122 GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
123 1 << (irq % 32));
124
125 if (_intrmap[irq].trigtype == GIC_TRIG_EDGE) {
126 if (!gic_wedgeb2bok)
127 spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
128 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
129 if (!gic_wedgeb2bok) {
130 (void) GIC_REG(SHARED, GIC_SH_CONFIG);
131 spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
132 }
133 }
134}
135
136static void gic_mask_irq(unsigned int irq)
137{
138 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
139 irq -= _irqbase;
140 /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
141 GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
142 1 << (irq % 32));
143}
144
145static void gic_unmask_irq(unsigned int irq)
146{
147 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
148 irq -= _irqbase;
149 /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
150 GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
151 1 << (irq % 32));
152}
153
154#ifdef CONFIG_SMP
155
156static DEFINE_SPINLOCK(gic_lock);
157
158static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
159{
160 cpumask_t tmp = CPU_MASK_NONE;
161 unsigned long flags;
162 int i;
163
164 pr_debug(KERN_DEBUG "%s called\n", __func__);
165 irq -= _irqbase;
166
167 cpus_and(tmp, cpumask, cpu_online_map);
168 if (cpus_empty(tmp))
169 return;
170
171 /* Assumption : cpumask refers to a single CPU */
172 spin_lock_irqsave(&gic_lock, flags);
173 for (;;) {
174 /* Re-route this IRQ */
175 GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
176
177 /*
178 * FIXME: assumption that _intrmap is ordered and has no holes
179 */
180
181 /* Update the intr_map */
182 _intrmap[irq].cpunum = first_cpu(tmp);
183
184 /* Update the pcpu_masks */
185 for (i = 0; i < NR_CPUS; i++)
186 clear_bit(irq, pcpu_masks[i].pcpu_mask);
187 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
188
189 }
190 irq_desc[irq].affinity = cpumask;
191 spin_unlock_irqrestore(&gic_lock, flags);
192
193}
194#endif
195
196static struct irq_chip gic_irq_controller = {
197 .name = "MIPS GIC",
198 .startup = gic_irq_startup,
199 .ack = gic_irq_ack,
200 .mask = gic_mask_irq,
201 .mask_ack = gic_mask_irq,
202 .unmask = gic_unmask_irq,
203 .eoi = gic_unmask_irq,
204#ifdef CONFIG_SMP
205 .set_affinity = gic_set_affinity,
206#endif
207};
208
209static void __init setup_intr(unsigned int intr, unsigned int cpu,
210 unsigned int pin, unsigned int polarity, unsigned int trigtype)
211{
212 /* Setup Intr to Pin mapping */
213 if (pin & GIC_MAP_TO_NMI_MSK) {
214 GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
215 /* FIXME: hack to route NMI to all cpu's */
216 for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
217 GICWRITE(GIC_REG_ADDR(SHARED,
218 GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
219 0xffffffff);
220 }
221 } else {
222 GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
223 GIC_MAP_TO_PIN_MSK | pin);
224 /* Setup Intr to CPU mapping */
225 GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
226 }
227
228 /* Setup Intr Polarity */
229 GIC_SET_POLARITY(intr, polarity);
230
231 /* Setup Intr Trigger Type */
232 GIC_SET_TRIGGER(intr, trigtype);
233
234 /* Init Intr Masks */
235 GIC_SET_INTR_MASK(intr, 0);
236}
237
238static void __init gic_basic_init(void)
239{
240 unsigned int i, cpu;
241
242 /* Setup defaults */
243 for (i = 0; i < GIC_NUM_INTRS; i++) {
244 GIC_SET_POLARITY(i, GIC_POL_POS);
245 GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
246 GIC_SET_INTR_MASK(i, 0);
247 }
248
249 /* Setup specifics */
250 for (i = 0; i < _mapsize; i++) {
251 cpu = _intrmap[i].cpunum;
252 if (cpu == X)
253 continue;
254
255 setup_intr(_intrmap[i].intrnum,
256 _intrmap[i].cpunum,
257 _intrmap[i].pin,
258 _intrmap[i].polarity,
259 _intrmap[i].trigtype);
260 /* Initialise per-cpu Interrupt software masks */
261 if (_intrmap[i].ipiflag)
262 set_bit(_intrmap[i].intrnum, pcpu_masks[cpu].pcpu_mask);
263 }
264
265 vpe_local_setup(numvpes);
266
267 for (i = _irqbase; i < (_irqbase + numintrs); i++)
268 set_irq_chip(i, &gic_irq_controller);
269}
270
271void __init gic_init(unsigned long gic_base_addr,
272 unsigned long gic_addrspace_size,
273 struct gic_intr_map *intr_map, unsigned int intr_map_size,
274 unsigned int irqbase)
275{
276 unsigned int gicconfig;
277
278 _gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
279 gic_addrspace_size);
280 _irqbase = irqbase;
281 _intrmap = intr_map;
282 _mapsize = intr_map_size;
283
284 GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
285 numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
286 GIC_SH_CONFIG_NUMINTRS_SHF;
287 numintrs = ((numintrs + 1) * 8);
288
289 numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
290 GIC_SH_CONFIG_NUMVPES_SHF;
291
292 pr_debug("%s called\n", __func__);
293
294 gic_basic_init();
295}
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
new file mode 100644
index 000000000000..ca476c4f62a5
--- /dev/null
+++ b/arch/mips/kernel/smp-cmp.c
@@ -0,0 +1,265 @@
1/*
2 * This program is free software; you can distribute it and/or modify it
3 * under the terms of the GNU General Public License (Version 2) as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
9 * for more details.
10 *
11 * You should have received a copy of the GNU General Public License along
12 * with this program; if not, write to the Free Software Foundation, Inc.,
13 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
14 *
15 * Copyright (C) 2007 MIPS Technologies, Inc.
16 * Chris Dearman (chris@mips.com)
17 */
18
19#undef DEBUG
20
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/cpumask.h>
24#include <linux/interrupt.h>
25#include <linux/compiler.h>
26
27#include <asm/atomic.h>
28#include <asm/cacheflush.h>
29#include <asm/cpu.h>
30#include <asm/processor.h>
31#include <asm/system.h>
32#include <asm/hardirq.h>
33#include <asm/mmu_context.h>
34#include <asm/smp.h>
35#include <asm/time.h>
36#include <asm/mipsregs.h>
37#include <asm/mipsmtregs.h>
38#include <asm/mips_mt.h>
39
40/*
41 * Crude manipulation of the CPU masks to control which
42 * which CPU's are brought online during initialisation
43 *
44 * Beware... this needs to be called after CPU discovery
45 * but before CPU bringup
46 */
47static int __init allowcpus(char *str)
48{
49 cpumask_t cpu_allow_map;
50 char buf[256];
51 int len;
52
53 cpus_clear(cpu_allow_map);
54 if (cpulist_parse(str, cpu_allow_map) == 0) {
55 cpu_set(0, cpu_allow_map);
56 cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map);
57 len = cpulist_scnprintf(buf, sizeof(buf)-1, cpu_possible_map);
58 buf[len] = '\0';
59 pr_debug("Allowable CPUs: %s\n", buf);
60 return 1;
61 } else
62 return 0;
63}
64__setup("allowcpus=", allowcpus);
65
66static void ipi_call_function(unsigned int cpu)
67{
68 unsigned int action = 0;
69
70 pr_debug("CPU%d: %s cpu %d status %08x\n",
71 smp_processor_id(), __func__, cpu, read_c0_status());
72
73 switch (cpu) {
74 case 0:
75 action = GIC_IPI_EXT_INTR_CALLFNC_VPE0;
76 break;
77 case 1:
78 action = GIC_IPI_EXT_INTR_CALLFNC_VPE1;
79 break;
80 case 2:
81 action = GIC_IPI_EXT_INTR_CALLFNC_VPE2;
82 break;
83 case 3:
84 action = GIC_IPI_EXT_INTR_CALLFNC_VPE3;
85 break;
86 }
87 gic_send_ipi(action);
88}
89
90
91static void ipi_resched(unsigned int cpu)
92{
93 unsigned int action = 0;
94
95 pr_debug("CPU%d: %s cpu %d status %08x\n",
96 smp_processor_id(), __func__, cpu, read_c0_status());
97
98 switch (cpu) {
99 case 0:
100 action = GIC_IPI_EXT_INTR_RESCHED_VPE0;
101 break;
102 case 1:
103 action = GIC_IPI_EXT_INTR_RESCHED_VPE1;
104 break;
105 case 2:
106 action = GIC_IPI_EXT_INTR_RESCHED_VPE2;
107 break;
108 case 3:
109 action = GIC_IPI_EXT_INTR_RESCHED_VPE3;
110 break;
111 }
112 gic_send_ipi(action);
113}
114
115/*
116 * FIXME: This isn't restricted to CMP
117 * The SMVP kernel could use GIC interrupts if available
118 */
119void cmp_send_ipi_single(int cpu, unsigned int action)
120{
121 unsigned long flags;
122
123 local_irq_save(flags);
124
125 switch (action) {
126 case SMP_CALL_FUNCTION:
127 ipi_call_function(cpu);
128 break;
129
130 case SMP_RESCHEDULE_YOURSELF:
131 ipi_resched(cpu);
132 break;
133 }
134
135 local_irq_restore(flags);
136}
137
138static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action)
139{
140 unsigned int i;
141
142 for_each_cpu_mask(i, mask)
143 cmp_send_ipi_single(i, action);
144}
145
146static void cmp_init_secondary(void)
147{
148 struct cpuinfo_mips *c = &current_cpu_data;
149
150 /* Assume GIC is present */
151 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
152 STATUSF_IP7);
153
154 /* Enable per-cpu interrupts: platform specific */
155
156 c->core = (read_c0_ebase() >> 1) & 0xff;
157#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
158 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
159#endif
160#ifdef CONFIG_MIPS_MT_SMTC
161 c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC;
162#endif
163}
164
165static void cmp_smp_finish(void)
166{
167 pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
168
169 /* CDFIXME: remove this? */
170 write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
171
172#ifdef CONFIG_MIPS_MT_FPAFF
173 /* If we have an FPU, enroll ourselves in the FPU-full mask */
174 if (cpu_has_fpu)
175 cpu_set(smp_processor_id(), mt_fpu_cpumask);
176#endif /* CONFIG_MIPS_MT_FPAFF */
177
178 local_irq_enable();
179}
180
181static void cmp_cpus_done(void)
182{
183 pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
184}
185
186/*
187 * Setup the PC, SP, and GP of a secondary processor and start it running
188 * smp_bootstrap is the place to resume from
189 * __KSTK_TOS(idle) is apparently the stack pointer
190 * (unsigned long)idle->thread_info the gp
191 */
192static void cmp_boot_secondary(int cpu, struct task_struct *idle)
193{
194 struct thread_info *gp = task_thread_info(idle);
195 unsigned long sp = __KSTK_TOS(idle);
196 unsigned long pc = (unsigned long)&smp_bootstrap;
197 unsigned long a0 = 0;
198
199 pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(),
200 __func__, cpu);
201
202#if 0
203 /* Needed? */
204 flush_icache_range((unsigned long)gp,
205 (unsigned long)(gp + sizeof(struct thread_info)));
206#endif
207
208 amon_cpu_start(cpu, pc, sp, gp, a0);
209}
210
211/*
212 * Common setup before any secondaries are started
213 */
214void __init cmp_smp_setup(void)
215{
216 int i;
217 int ncpu = 0;
218
219 pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
220
221#ifdef CONFIG_MIPS_MT_FPAFF
222 /* If we have an FPU, enroll ourselves in the FPU-full mask */
223 if (cpu_has_fpu)
224 cpu_set(0, mt_fpu_cpumask);
225#endif /* CONFIG_MIPS_MT_FPAFF */
226
227 for (i = 1; i < NR_CPUS; i++) {
228 if (amon_cpu_avail(i)) {
229 cpu_set(i, phys_cpu_present_map);
230 __cpu_number_map[i] = ++ncpu;
231 __cpu_logical_map[ncpu] = i;
232 }
233 }
234
235 if (cpu_has_mipsmt) {
236 unsigned int nvpe, mvpconf0 = read_c0_mvpconf0();
237
238 nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
239 smp_num_siblings = nvpe;
240 }
241 pr_info("Detected %i available secondary CPU(s)\n", ncpu);
242}
243
244void __init cmp_prepare_cpus(unsigned int max_cpus)
245{
246 pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
247 smp_processor_id(), __func__, max_cpus);
248
249 /*
250 * FIXME: some of these options are per-system, some per-core and
251 * some per-cpu
252 */
253 mips_mt_set_cpuoptions();
254}
255
256struct plat_smp_ops cmp_smp_ops = {
257 .send_ipi_single = cmp_send_ipi_single,
258 .send_ipi_mask = cmp_send_ipi_mask,
259 .init_secondary = cmp_init_secondary,
260 .smp_finish = cmp_smp_finish,
261 .cpus_done = cmp_cpus_done,
262 .boot_secondary = cmp_boot_secondary,
263 .smp_setup = cmp_smp_setup,
264 .prepare_cpus = cmp_prepare_cpus,
265};
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index e9c393a41775..87a1816c1f45 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -36,63 +36,7 @@
36#include <asm/mipsmtregs.h> 36#include <asm/mipsmtregs.h>
37#include <asm/mips_mt.h> 37#include <asm/mips_mt.h>
38 38
39#define MIPS_CPU_IPI_RESCHED_IRQ 0 39static void __init smvp_copy_vpe_config(void)
40#define MIPS_CPU_IPI_CALL_IRQ 1
41
42static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
43
44#if 0
45static void dump_mtregisters(int vpe, int tc)
46{
47 printk("vpe %d tc %d\n", vpe, tc);
48
49 settc(tc);
50
51 printk(" c0 status 0x%lx\n", read_vpe_c0_status());
52 printk(" vpecontrol 0x%lx\n", read_vpe_c0_vpecontrol());
53 printk(" vpeconf0 0x%lx\n", read_vpe_c0_vpeconf0());
54 printk(" tcstatus 0x%lx\n", read_tc_c0_tcstatus());
55 printk(" tcrestart 0x%lx\n", read_tc_c0_tcrestart());
56 printk(" tcbind 0x%lx\n", read_tc_c0_tcbind());
57 printk(" tchalt 0x%lx\n", read_tc_c0_tchalt());
58}
59#endif
60
61static void ipi_resched_dispatch(void)
62{
63 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
64}
65
66static void ipi_call_dispatch(void)
67{
68 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
69}
70
71static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
72{
73 return IRQ_HANDLED;
74}
75
76static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
77{
78 smp_call_function_interrupt();
79
80 return IRQ_HANDLED;
81}
82
83static struct irqaction irq_resched = {
84 .handler = ipi_resched_interrupt,
85 .flags = IRQF_DISABLED|IRQF_PERCPU,
86 .name = "IPI_resched"
87};
88
89static struct irqaction irq_call = {
90 .handler = ipi_call_interrupt,
91 .flags = IRQF_DISABLED|IRQF_PERCPU,
92 .name = "IPI_call"
93};
94
95static void __init smp_copy_vpe_config(void)
96{ 40{
97 write_vpe_c0_status( 41 write_vpe_c0_status(
98 (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); 42 (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
@@ -109,7 +53,7 @@ static void __init smp_copy_vpe_config(void)
109 write_vpe_c0_count(read_c0_count()); 53 write_vpe_c0_count(read_c0_count());
110} 54}
111 55
112static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0, 56static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
113 unsigned int ncpu) 57 unsigned int ncpu)
114{ 58{
115 if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) 59 if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))
@@ -135,12 +79,12 @@ static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0,
135 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); 79 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
136 80
137 if (tc != 0) 81 if (tc != 0)
138 smp_copy_vpe_config(); 82 smvp_copy_vpe_config();
139 83
140 return ncpu; 84 return ncpu;
141} 85}
142 86
143static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0) 87static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
144{ 88{
145 unsigned long tmp; 89 unsigned long tmp;
146 90
@@ -207,15 +151,20 @@ static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
207 151
208static void __cpuinit vsmp_init_secondary(void) 152static void __cpuinit vsmp_init_secondary(void)
209{ 153{
210 /* Enable per-cpu interrupts */ 154 extern int gic_present;
211 155
212 /* This is Malta specific: IPI,performance and timer inetrrupts */ 156 /* This is Malta specific: IPI,performance and timer inetrrupts */
213 write_c0_status((read_c0_status() & ~ST0_IM ) | 157 if (gic_present)
214 (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7)); 158 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
159 STATUSF_IP6 | STATUSF_IP7);
160 else
161 change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
162 STATUSF_IP6 | STATUSF_IP7);
215} 163}
216 164
217static void __cpuinit vsmp_smp_finish(void) 165static void __cpuinit vsmp_smp_finish(void)
218{ 166{
167 /* CDFIXME: remove this? */
219 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); 168 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
220 169
221#ifdef CONFIG_MIPS_MT_FPAFF 170#ifdef CONFIG_MIPS_MT_FPAFF
@@ -276,7 +225,7 @@ static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
276/* 225/*
277 * Common setup before any secondaries are started 226 * Common setup before any secondaries are started
278 * Make sure all CPU's are in a sensible state before we boot any of the 227 * Make sure all CPU's are in a sensible state before we boot any of the
279 * secondarys 228 * secondaries
280 */ 229 */
281static void __init vsmp_smp_setup(void) 230static void __init vsmp_smp_setup(void)
282{ 231{
@@ -309,8 +258,8 @@ static void __init vsmp_smp_setup(void)
309 for (tc = 0; tc <= ntc; tc++) { 258 for (tc = 0; tc <= ntc; tc++) {
310 settc(tc); 259 settc(tc);
311 260
312 smp_tc_init(tc, mvpconf0); 261 smvp_tc_init(tc, mvpconf0);
313 ncpu = smp_vpe_init(tc, mvpconf0, ncpu); 262 ncpu = smvp_vpe_init(tc, mvpconf0, ncpu);
314 } 263 }
315 264
316 /* Release config state */ 265 /* Release config state */
@@ -324,21 +273,6 @@ static void __init vsmp_smp_setup(void)
324static void __init vsmp_prepare_cpus(unsigned int max_cpus) 273static void __init vsmp_prepare_cpus(unsigned int max_cpus)
325{ 274{
326 mips_mt_set_cpuoptions(); 275 mips_mt_set_cpuoptions();
327
328 /* set up ipi interrupts */
329 if (cpu_has_vint) {
330 set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
331 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
332 }
333
334 cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
335 cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
336
337 setup_irq(cpu_ipi_resched_irq, &irq_resched);
338 setup_irq(cpu_ipi_call_irq, &irq_call);
339
340 set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
341 set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
342} 276}
343 277
344struct plat_smp_ops vsmp_smp_ops = { 278struct plat_smp_ops vsmp_smp_ops = {
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9d41dab90a80..33780cc61ce9 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -35,6 +35,7 @@
35#include <asm/atomic.h> 35#include <asm/atomic.h>
36#include <asm/cpu.h> 36#include <asm/cpu.h>
37#include <asm/processor.h> 37#include <asm/processor.h>
38#include <asm/r4k-timer.h>
38#include <asm/system.h> 39#include <asm/system.h>
39#include <asm/mmu_context.h> 40#include <asm/mmu_context.h>
40#include <asm/time.h> 41#include <asm/time.h>
@@ -125,6 +126,8 @@ asmlinkage __cpuinit void start_secondary(void)
125 126
126 cpu_set(cpu, cpu_callin_map); 127 cpu_set(cpu, cpu_callin_map);
127 128
129 synchronise_count_slave();
130
128 cpu_idle(); 131 cpu_idle();
129} 132}
130 133
@@ -287,6 +290,7 @@ void smp_send_stop(void)
287void __init smp_cpus_done(unsigned int max_cpus) 290void __init smp_cpus_done(unsigned int max_cpus)
288{ 291{
289 mp_ops->cpus_done(); 292 mp_ops->cpus_done();
293 synchronise_count_master();
290} 294}
291 295
292/* called from main before smp_init() */ 296/* called from main before smp_init() */
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 4705b3c11e5f..3e863186cd22 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -331,7 +331,8 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
331 /* In general, all TCs should have the same cpu_data indications */ 331 /* In general, all TCs should have the same cpu_data indications */
332 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); 332 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
333 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */ 333 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
334 if (cpu_data[0].cputype == CPU_34K) 334 if (cpu_data[0].cputype == CPU_34K ||
335 cpu_data[0].cputype == CPU_1004K)
335 cpu_data[cpu].options &= ~MIPS_CPU_FPU; 336 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
336 cpu_data[cpu].vpe_id = vpe; 337 cpu_data[cpu].vpe_id = vpe;
337 cpu_data[cpu].tc_id = tc; 338 cpu_data[cpu].tc_id = tc;
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
new file mode 100644
index 000000000000..9021108eb9c1
--- /dev/null
+++ b/arch/mips/kernel/sync-r4k.c
@@ -0,0 +1,159 @@
1/*
2 * Count register synchronisation.
3 *
4 * All CPUs will have their count registers synchronised to the CPU0 expirelo
5 * value. This can cause a small timewarp for CPU0. All other CPU's should
6 * not have done anything significant (but they may have had interrupts
7 * enabled briefly - prom_smp_finish() should not be responsible for enabling
8 * interrupts...)
9 *
10 * FIXME: broken for SMTC
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/irqflags.h>
16#include <linux/r4k-timer.h>
17
18#include <asm/atomic.h>
19#include <asm/barrier.h>
20#include <asm/cpumask.h>
21#include <asm/mipsregs.h>
22
23static atomic_t __initdata count_start_flag = ATOMIC_INIT(0);
24static atomic_t __initdata count_count_start = ATOMIC_INIT(0);
25static atomic_t __initdata count_count_stop = ATOMIC_INIT(0);
26
27#define COUNTON 100
28#define NR_LOOPS 5
29
30void __init synchronise_count_master(void)
31{
32 int i;
33 unsigned long flags;
34 unsigned int initcount;
35 int nslaves;
36
37#ifdef CONFIG_MIPS_MT_SMTC
38 /*
39 * SMTC needs to synchronise per VPE, not per CPU
40 * ignore for now
41 */
42 return;
43#endif
44
45 pr_info("Checking COUNT synchronization across %u CPUs: ",
46 num_online_cpus());
47
48 local_irq_save(flags);
49
50 /*
51 * Notify the slaves that it's time to start
52 */
53 atomic_set(&count_start_flag, 1);
54 smp_wmb();
55
56 /* Count will be initialised to expirelo for all CPU's */
57 initcount = expirelo;
58
59 /*
60 * We loop a few times to get a primed instruction cache,
61 * then the last pass is more or less synchronised and
62 * the master and slaves each set their cycle counters to a known
63 * value all at once. This reduces the chance of having random offsets
64 * between the processors, and guarantees that the maximum
65 * delay between the cycle counters is never bigger than
66 * the latency of information-passing (cachelines) between
67 * two CPUs.
68 */
69
70 nslaves = num_online_cpus()-1;
71 for (i = 0; i < NR_LOOPS; i++) {
72 /* slaves loop on '!= ncpus' */
73 while (atomic_read(&count_count_start) != nslaves)
74 mb();
75 atomic_set(&count_count_stop, 0);
76 smp_wmb();
77
78 /* this lets the slaves write their count register */
79 atomic_inc(&count_count_start);
80
81 /*
82 * Everyone initialises count in the last loop:
83 */
84 if (i == NR_LOOPS-1)
85 write_c0_count(initcount);
86
87 /*
88 * Wait for all slaves to leave the synchronization point:
89 */
90 while (atomic_read(&count_count_stop) != nslaves)
91 mb();
92 atomic_set(&count_count_start, 0);
93 smp_wmb();
94 atomic_inc(&count_count_stop);
95 }
96 /* Arrange for an interrupt in a short while */
97 write_c0_compare(read_c0_count() + COUNTON);
98
99 local_irq_restore(flags);
100
101 /*
102 * i386 code reported the skew here, but the
103 * count registers were almost certainly out of sync
104 * so no point in alarming people
105 */
106 printk("done.\n");
107}
108
109void __init synchronise_count_slave(void)
110{
111 int i;
112 unsigned long flags;
113 unsigned int initcount;
114 int ncpus;
115
116#ifdef CONFIG_MIPS_MT_SMTC
117 /*
118 * SMTC needs to synchronise per VPE, not per CPU
119 * ignore for now
120 */
121 return;
122#endif
123
124 local_irq_save(flags);
125
126 /*
127 * Not every cpu is online at the time this gets called,
128 * so we first wait for the master to say everyone is ready
129 */
130
131 while (!atomic_read(&count_start_flag))
132 mb();
133
134 /* Count will be initialised to expirelo for all CPU's */
135 initcount = expirelo;
136
137 ncpus = num_online_cpus();
138 for (i = 0; i < NR_LOOPS; i++) {
139 atomic_inc(&count_count_start);
140 while (atomic_read(&count_count_start) != ncpus)
141 mb();
142
143 /*
144 * Everyone initialises count in the last loop:
145 */
146 if (i == NR_LOOPS-1)
147 write_c0_count(initcount);
148
149 atomic_inc(&count_count_stop);
150 while (atomic_read(&count_count_stop) != ncpus)
151 mb();
152 }
153 /* Arrange for an interrupt in a short while */
154 write_c0_compare(read_c0_count() + COUNTON);
155
156 local_irq_restore(flags);
157}
158#undef NR_LOOPS
159#endif
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d51f4e98455f..88185cd40c3b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -22,6 +22,7 @@
22#include <linux/kallsyms.h> 22#include <linux/kallsyms.h>
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/ptrace.h>
25 26
26#include <asm/bootinfo.h> 27#include <asm/bootinfo.h>
27#include <asm/branch.h> 28#include <asm/branch.h>
@@ -80,19 +81,22 @@ void (*board_bind_eic_interrupt)(int irq, int regset);
80 81
81static void show_raw_backtrace(unsigned long reg29) 82static void show_raw_backtrace(unsigned long reg29)
82{ 83{
83 unsigned long *sp = (unsigned long *)reg29; 84 unsigned long *sp = (unsigned long *)(reg29 & ~3);
84 unsigned long addr; 85 unsigned long addr;
85 86
86 printk("Call Trace:"); 87 printk("Call Trace:");
87#ifdef CONFIG_KALLSYMS 88#ifdef CONFIG_KALLSYMS
88 printk("\n"); 89 printk("\n");
89#endif 90#endif
90 while (!kstack_end(sp)) { 91#define IS_KVA01(a) ((((unsigned int)a) & 0xc0000000) == 0x80000000)
91 addr = *sp++; 92 if (IS_KVA01(sp)) {
92 if (__kernel_text_address(addr)) 93 while (!kstack_end(sp)) {
93 print_ip_sym(addr); 94 addr = *sp++;
95 if (__kernel_text_address(addr))
96 print_ip_sym(addr);
97 }
98 printk("\n");
94 } 99 }
95 printk("\n");
96} 100}
97 101
98#ifdef CONFIG_KALLSYMS 102#ifdef CONFIG_KALLSYMS
@@ -192,16 +196,19 @@ EXPORT_SYMBOL(dump_stack);
192static void show_code(unsigned int __user *pc) 196static void show_code(unsigned int __user *pc)
193{ 197{
194 long i; 198 long i;
199 unsigned short __user *pc16 = NULL;
195 200
196 printk("\nCode:"); 201 printk("\nCode:");
197 202
203 if ((unsigned long)pc & 1)
204 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
198 for(i = -3 ; i < 6 ; i++) { 205 for(i = -3 ; i < 6 ; i++) {
199 unsigned int insn; 206 unsigned int insn;
200 if (__get_user(insn, pc + i)) { 207 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
201 printk(" (Bad address in epc)\n"); 208 printk(" (Bad address in epc)\n");
202 break; 209 break;
203 } 210 }
204 printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>')); 211 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
205 } 212 }
206} 213}
207 214
@@ -311,10 +318,21 @@ void show_regs(struct pt_regs *regs)
311 318
312void show_registers(const struct pt_regs *regs) 319void show_registers(const struct pt_regs *regs)
313{ 320{
321 const int field = 2 * sizeof(unsigned long);
322
314 __show_regs(regs); 323 __show_regs(regs);
315 print_modules(); 324 print_modules();
316 printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n", 325 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
317 current->comm, task_pid_nr(current), current_thread_info(), current); 326 current->comm, current->pid, current_thread_info(), current,
327 field, current_thread_info()->tp_value);
328 if (cpu_has_userlocal) {
329 unsigned long tls;
330
331 tls = read_c0_userlocal();
332 if (tls != current_thread_info()->tp_value)
333 printk("*HwTLS: %0*lx\n", field, tls);
334 }
335
318 show_stacktrace(current, regs); 336 show_stacktrace(current, regs);
319 show_code((unsigned int __user *) regs->cp0_epc); 337 show_code((unsigned int __user *) regs->cp0_epc);
320 printk("\n"); 338 printk("\n");
@@ -985,6 +1003,21 @@ asmlinkage void do_reserved(struct pt_regs *regs)
985 (regs->cp0_cause & 0x7f) >> 2); 1003 (regs->cp0_cause & 0x7f) >> 2);
986} 1004}
987 1005
1006static int __initdata l1parity = 1;
1007static int __init nol1parity(char *s)
1008{
1009 l1parity = 0;
1010 return 1;
1011}
1012__setup("nol1par", nol1parity);
1013static int __initdata l2parity = 1;
1014static int __init nol2parity(char *s)
1015{
1016 l2parity = 0;
1017 return 1;
1018}
1019__setup("nol2par", nol2parity);
1020
988/* 1021/*
989 * Some MIPS CPUs can enable/disable for cache parity detection, but do 1022 * Some MIPS CPUs can enable/disable for cache parity detection, but do
990 * it different ways. 1023 * it different ways.
@@ -994,6 +1027,62 @@ static inline void parity_protection_init(void)
994 switch (current_cpu_type()) { 1027 switch (current_cpu_type()) {
995 case CPU_24K: 1028 case CPU_24K:
996 case CPU_34K: 1029 case CPU_34K:
1030 case CPU_74K:
1031 case CPU_1004K:
1032 {
1033#define ERRCTL_PE 0x80000000
1034#define ERRCTL_L2P 0x00800000
1035 unsigned long errctl;
1036 unsigned int l1parity_present, l2parity_present;
1037
1038 errctl = read_c0_ecc();
1039 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1040
1041 /* probe L1 parity support */
1042 write_c0_ecc(errctl | ERRCTL_PE);
1043 back_to_back_c0_hazard();
1044 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1045
1046 /* probe L2 parity support */
1047 write_c0_ecc(errctl|ERRCTL_L2P);
1048 back_to_back_c0_hazard();
1049 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1050
1051 if (l1parity_present && l2parity_present) {
1052 if (l1parity)
1053 errctl |= ERRCTL_PE;
1054 if (l1parity ^ l2parity)
1055 errctl |= ERRCTL_L2P;
1056 } else if (l1parity_present) {
1057 if (l1parity)
1058 errctl |= ERRCTL_PE;
1059 } else if (l2parity_present) {
1060 if (l2parity)
1061 errctl |= ERRCTL_L2P;
1062 } else {
1063 /* No parity available */
1064 }
1065
1066 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1067
1068 write_c0_ecc(errctl);
1069 back_to_back_c0_hazard();
1070 errctl = read_c0_ecc();
1071 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1072
1073 if (l1parity_present)
1074 printk(KERN_INFO "Cache parity protection %sabled\n",
1075 (errctl & ERRCTL_PE) ? "en" : "dis");
1076
1077 if (l2parity_present) {
1078 if (l1parity_present && l1parity)
1079 errctl ^= ERRCTL_L2P;
1080 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1081 (errctl & ERRCTL_L2P) ? "en" : "dis");
1082 }
1083 }
1084 break;
1085
997 case CPU_5KC: 1086 case CPU_5KC:
998 write_c0_ecc(0x80000000); 1087 write_c0_ecc(0x80000000);
999 back_to_back_c0_hazard(); 1088 back_to_back_c0_hazard();
@@ -1353,7 +1442,6 @@ void __cpuinit per_cpu_trap_init(void)
1353 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, 1442 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1354 status_set); 1443 status_set);
1355 1444
1356#ifdef CONFIG_CPU_MIPSR2
1357 if (cpu_has_mips_r2) { 1445 if (cpu_has_mips_r2) {
1358 unsigned int enable = 0x0000000f; 1446 unsigned int enable = 0x0000000f;
1359 1447
@@ -1362,7 +1450,6 @@ void __cpuinit per_cpu_trap_init(void)
1362 1450
1363 write_c0_hwrena(enable); 1451 write_c0_hwrena(enable);
1364 } 1452 }
1365#endif
1366 1453
1367#ifdef CONFIG_MIPS_MT_SMTC 1454#ifdef CONFIG_MIPS_MT_SMTC
1368 if (!secondaryTC) { 1455 if (!secondaryTC) {