aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2006-04-05 04:45:45 -0400
committerRalf Baechle <ralf@linux-mips.org>2006-04-18 22:14:28 -0400
commit41c594ab65fc89573af296d192aa5235d09717ab (patch)
tree562462512a320f386bdf49eabfbb26bb3ee761fa
parent2600990e640e3bef29ed89d565864cf16ee83833 (diff)
[MIPS] MT: Improved multithreading support.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r--arch/mips/Kconfig6
-rw-r--r--arch/mips/kernel/Makefile4
-rw-r--r--arch/mips/kernel/asm-offsets.c3
-rw-r--r--arch/mips/kernel/entry.S34
-rw-r--r--arch/mips/kernel/gdb-low.S24
-rw-r--r--arch/mips/kernel/gdb-stub.c61
-rw-r--r--arch/mips/kernel/genex.S29
-rw-r--r--arch/mips/kernel/head.S57
-rw-r--r--arch/mips/kernel/i8259.c4
-rw-r--r--arch/mips/kernel/irq-msc01.c9
-rw-r--r--arch/mips/kernel/irq.c13
-rw-r--r--arch/mips/kernel/mips-mt.c449
-rw-r--r--arch/mips/kernel/process.c10
-rw-r--r--arch/mips/kernel/ptrace.c14
-rw-r--r--arch/mips/kernel/ptrace32.c14
-rw-r--r--arch/mips/kernel/r4k_switch.S34
-rw-r--r--arch/mips/kernel/smp-mt.c (renamed from arch/mips/kernel/smp_mt.c)33
-rw-r--r--arch/mips/kernel/smp.c10
-rw-r--r--arch/mips/kernel/smtc-asm.S130
-rw-r--r--arch/mips/kernel/smtc-proc.c93
-rw-r--r--arch/mips/kernel/smtc.c1322
-rw-r--r--arch/mips/kernel/time.c3
-rw-r--r--arch/mips/kernel/traps.c124
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/mips-boards/generic/init.c1
-rw-r--r--arch/mips/mips-boards/generic/time.c68
-rw-r--r--arch/mips/mips-boards/malta/Makefile1
-rw-r--r--arch/mips/mips-boards/malta/malta_int.c11
-rw-r--r--arch/mips/mips-boards/malta/malta_smp.c128
-rw-r--r--arch/mips/mips-boards/sim/cmdline.c59
-rw-r--r--arch/mips/mips-boards/sim/sim_cmdline.c6
-rw-r--r--arch/mips/mips-boards/sim/sim_smp.c14
-rw-r--r--arch/mips/mm/fault.c13
-rw-r--r--arch/mips/mm/tlb-r4k.c85
-rw-r--r--arch/mips/mm/tlbex.c83
-rw-r--r--include/asm-mips/asmmacro.h47
-rw-r--r--include/asm-mips/cpu-info.h10
-rw-r--r--include/asm-mips/hazards.h2
-rw-r--r--include/asm-mips/interrupt.h65
-rw-r--r--include/asm-mips/irq.h29
-rw-r--r--include/asm-mips/mips_mt.h15
-rw-r--r--include/asm-mips/mipsmtregs.h11
-rw-r--r--include/asm-mips/mipsregs.h133
-rw-r--r--include/asm-mips/mmu_context.h112
-rw-r--r--include/asm-mips/processor.h6
-rw-r--r--include/asm-mips/ptrace.h4
-rw-r--r--include/asm-mips/r4kcache.h128
-rw-r--r--include/asm-mips/smtc.h55
-rw-r--r--include/asm-mips/smtc_ipi.h118
-rw-r--r--include/asm-mips/smtc_proc.h23
-rw-r--r--include/asm-mips/stackframe.h187
51 files changed, 3720 insertions, 176 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a7bac0459f99..f9be549645ea 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1447,6 +1447,10 @@ choice
1447 prompt "MIPS MT options" 1447 prompt "MIPS MT options"
1448 depends on MIPS_MT 1448 depends on MIPS_MT
1449 1449
1450config MIPS_MT_SMTC
1451 bool "SMTC: Use all TCs on all VPEs for SMP"
1452 select SMP
1453
1450config MIPS_MT_SMP 1454config MIPS_MT_SMP
1451 bool "Use 1 TC on each available VPE for SMP" 1455 bool "Use 1 TC on each available VPE for SMP"
1452 select SMP 1456 select SMP
@@ -1613,7 +1617,7 @@ source "mm/Kconfig"
1613 1617
1614config SMP 1618config SMP
1615 bool "Multi-Processing support" 1619 bool "Multi-Processing support"
1616 depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP 1620 depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP || MIPS_MT_SMTC
1617 ---help--- 1621 ---help---
1618 This enables support for systems with more than one CPU. If you have 1622 This enables support for systems with more than one CPU. If you have
1619 a system with only one CPU, like most personal computers, say N. If 1623 a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 9ec01de81c04..34e8a256765c 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -34,7 +34,9 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
34 34
35obj-$(CONFIG_SMP) += smp.o 35obj-$(CONFIG_SMP) += smp.o
36 36
37obj-$(CONFIG_MIPS_MT_SMP) += smp_mt.o 37obj-$(CONFIG_MIPS_MT) += mips-mt.o
38obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
39obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
38 40
39obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o 41obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
40obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 42obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index ca6b03c773be..92b28b674d6f 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -69,6 +69,9 @@ void output_ptreg_defines(void)
69 offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr); 69 offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr);
70 offset("#define PT_STATUS ", struct pt_regs, cp0_status); 70 offset("#define PT_STATUS ", struct pt_regs, cp0_status);
71 offset("#define PT_CAUSE ", struct pt_regs, cp0_cause); 71 offset("#define PT_CAUSE ", struct pt_regs, cp0_cause);
72#ifdef CONFIG_MIPS_MT_SMTC
73 offset("#define PT_TCSTATUS ", struct pt_regs, cp0_tcstatus);
74#endif /* CONFIG_MIPS_MT_SMTC */
72 size("#define PT_SIZE ", struct pt_regs); 75 size("#define PT_SIZE ", struct pt_regs);
73 linefeed; 76 linefeed;
74} 77}
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index b1939a486d2c..d101d2fb24ca 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -17,6 +17,9 @@
17#include <asm/isadep.h> 17#include <asm/isadep.h>
18#include <asm/thread_info.h> 18#include <asm/thread_info.h>
19#include <asm/war.h> 19#include <asm/war.h>
20#ifdef CONFIG_MIPS_MT_SMTC
21#include <asm/mipsmtregs.h>
22#endif
20 23
21#ifdef CONFIG_PREEMPT 24#ifdef CONFIG_PREEMPT
22 .macro preempt_stop 25 .macro preempt_stop
@@ -75,6 +78,37 @@ FEXPORT(syscall_exit)
75 bnez t0, syscall_exit_work 78 bnez t0, syscall_exit_work
76 79
77FEXPORT(restore_all) # restore full frame 80FEXPORT(restore_all) # restore full frame
81#ifdef CONFIG_MIPS_MT_SMTC
82/* Detect and execute deferred IPI "interrupts" */
83 move a0,sp
84 jal deferred_smtc_ipi
85/* Re-arm any temporarily masked interrupts not explicitly "acked" */
86 mfc0 v0, CP0_TCSTATUS
87 ori v1, v0, TCSTATUS_IXMT
88 mtc0 v1, CP0_TCSTATUS
89 andi v0, TCSTATUS_IXMT
90 ehb
91 mfc0 t0, CP0_TCCONTEXT
92 DMT 9 # dmt t1
93 jal mips_ihb
94 mfc0 t2, CP0_STATUS
95 andi t3, t0, 0xff00
96 or t2, t2, t3
97 mtc0 t2, CP0_STATUS
98 ehb
99 andi t1, t1, VPECONTROL_TE
100 beqz t1, 1f
101 EMT
1021:
103 mfc0 v1, CP0_TCSTATUS
104 /* We set IXMT above, XOR should cler it here */
105 xori v1, v1, TCSTATUS_IXMT
106 or v1, v0, v1
107 mtc0 v1, CP0_TCSTATUS
108 ehb
109 xor t0, t0, t3
110 mtc0 t0, CP0_TCCONTEXT
111#endif /* CONFIG_MIPS_MT_SMTC */
78 .set noat 112 .set noat
79 RESTORE_TEMP 113 RESTORE_TEMP
80 RESTORE_AT 114 RESTORE_AT
diff --git a/arch/mips/kernel/gdb-low.S b/arch/mips/kernel/gdb-low.S
index 235ad9f6bd35..10f28fb9f008 100644
--- a/arch/mips/kernel/gdb-low.S
+++ b/arch/mips/kernel/gdb-low.S
@@ -283,11 +283,33 @@
283 */ 283 */
284 284
2853: 2853:
286#ifdef CONFIG_MIPS_MT_SMTC
287 /* Read-modify write of Status must be atomic */
288 mfc0 t2, CP0_TCSTATUS
289 ori t1, t2, TCSTATUS_IXMT
290 mtc0 t1, CP0_TCSTATUS
291 andi t2, t2, TCSTATUS_IXMT
292 ehb
293 DMT 9 # dmt t1
294 jal mips_ihb
295 nop
296#endif /* CONFIG_MIPS_MT_SMTC */
286 mfc0 t0, CP0_STATUS 297 mfc0 t0, CP0_STATUS
287 ori t0, 0x1f 298 ori t0, 0x1f
288 xori t0, 0x1f 299 xori t0, 0x1f
289 mtc0 t0, CP0_STATUS 300 mtc0 t0, CP0_STATUS
290 301#ifdef CONFIG_MIPS_MT_SMTC
302 andi t1, t1, VPECONTROL_TE
303 beqz t1, 9f
304 nop
305 EMT # emt
3069:
307 mfc0 t1, CP0_TCSTATUS
308 xori t1, t1, TCSTATUS_IXMT
309 or t1, t1, t2
310 mtc0 t1, CP0_TCSTATUS
311 ehb
312#endif /* CONFIG_MIPS_MT_SMTC */
291 LONG_L v0, GDB_FR_STATUS(sp) 313 LONG_L v0, GDB_FR_STATUS(sp)
292 LONG_L v1, GDB_FR_EPC(sp) 314 LONG_L v1, GDB_FR_EPC(sp)
293 mtc0 v0, CP0_STATUS 315 mtc0 v0, CP0_STATUS
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
index d4f88e0af24c..6ecbdc1fefd1 100644
--- a/arch/mips/kernel/gdb-stub.c
+++ b/arch/mips/kernel/gdb-stub.c
@@ -140,6 +140,7 @@
140#include <asm/system.h> 140#include <asm/system.h>
141#include <asm/gdb-stub.h> 141#include <asm/gdb-stub.h>
142#include <asm/inst.h> 142#include <asm/inst.h>
143#include <asm/smp.h>
143 144
144/* 145/*
145 * external low-level support routines 146 * external low-level support routines
@@ -669,6 +670,64 @@ static void kgdb_wait(void *arg)
669 local_irq_restore(flags); 670 local_irq_restore(flags);
670} 671}
671 672
673/*
674 * GDB stub needs to call kgdb_wait on all processor with interrupts
675 * disabled, so it uses it's own special variant.
676 */
677static int kgdb_smp_call_kgdb_wait(void)
678{
679#ifdef CONFIG_SMP
680 struct call_data_struct data;
681 int i, cpus = num_online_cpus() - 1;
682 int cpu = smp_processor_id();
683
684 /*
685 * Can die spectacularly if this CPU isn't yet marked online
686 */
687 BUG_ON(!cpu_online(cpu));
688
689 if (!cpus)
690 return 0;
691
692 if (spin_is_locked(&smp_call_lock)) {
693 /*
694 * Some other processor is trying to make us do something
695 * but we're not going to respond... give up
696 */
697 return -1;
698 }
699
700 /*
701 * We will continue here, accepting the fact that
702 * the kernel may deadlock if another CPU attempts
703 * to call smp_call_function now...
704 */
705
706 data.func = kgdb_wait;
707 data.info = NULL;
708 atomic_set(&data.started, 0);
709 data.wait = 0;
710
711 spin_lock(&smp_call_lock);
712 call_data = &data;
713 mb();
714
715 /* Send a message to all other CPUs and wait for them to respond */
716 for (i = 0; i < NR_CPUS; i++)
717 if (cpu_online(i) && i != cpu)
718 core_send_ipi(i, SMP_CALL_FUNCTION);
719
720 /* Wait for response */
721 /* FIXME: lock-up detection, backtrace on lock-up */
722 while (atomic_read(&data.started) != cpus)
723 barrier();
724
725 call_data = NULL;
726 spin_unlock(&smp_call_lock);
727#endif
728
729 return 0;
730}
672 731
673/* 732/*
674 * This function does all command processing for interfacing to gdb. It 733 * This function does all command processing for interfacing to gdb. It
@@ -718,7 +777,7 @@ void handle_exception (struct gdb_regs *regs)
718 /* 777 /*
719 * force other cpus to enter kgdb 778 * force other cpus to enter kgdb
720 */ 779 */
721 smp_call_function(kgdb_wait, NULL, 0, 0); 780 kgdb_smp_call_kgdb_wait();
722 781
723 /* 782 /*
724 * If we're in breakpoint() increment the PC 783 * If we're in breakpoint() increment the PC
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 04418b6568b0..ff7af369f286 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13 13
14#include <asm/asm.h> 14#include <asm/asm.h>
15#include <asm/asmmacro.h>
15#include <asm/cacheops.h> 16#include <asm/cacheops.h>
16#include <asm/regdef.h> 17#include <asm/regdef.h>
17#include <asm/fpregdef.h> 18#include <asm/fpregdef.h>
@@ -171,6 +172,15 @@ NESTED(except_vec_vi, 0, sp)
171 SAVE_AT 172 SAVE_AT
172 .set push 173 .set push
173 .set noreorder 174 .set noreorder
175#ifdef CONFIG_MIPS_MT_SMTC
176 /*
177 * To keep from blindly blocking *all* interrupts
178 * during service by SMTC kernel, we also want to
179 * pass the IM value to be cleared.
180 */
181EXPORT(except_vec_vi_mori)
182 ori a0, $0, 0
183#endif /* CONFIG_MIPS_MT_SMTC */
174EXPORT(except_vec_vi_lui) 184EXPORT(except_vec_vi_lui)
175 lui v0, 0 /* Patched */ 185 lui v0, 0 /* Patched */
176 j except_vec_vi_handler 186 j except_vec_vi_handler
@@ -187,6 +197,25 @@ EXPORT(except_vec_vi_end)
187NESTED(except_vec_vi_handler, 0, sp) 197NESTED(except_vec_vi_handler, 0, sp)
188 SAVE_TEMP 198 SAVE_TEMP
189 SAVE_STATIC 199 SAVE_STATIC
200#ifdef CONFIG_MIPS_MT_SMTC
201 /*
202 * SMTC has an interesting problem that interrupts are level-triggered,
203 * and the CLI macro will clear EXL, potentially causing a duplicate
204 * interrupt service invocation. So we need to clear the associated
205 * IM bit of Status prior to doing CLI, and restore it after the
206 * service routine has been invoked - we must assume that the
207 * service routine will have cleared the state, and any active
208 * level represents a new or otherwised unserviced event...
209 */
210 mfc0 t1, CP0_STATUS
211 and t0, a0, t1
212 mfc0 t2, CP0_TCCONTEXT
213 or t0, t0, t2
214 mtc0 t0, CP0_TCCONTEXT
215 xor t1, t1, t0
216 mtc0 t1, CP0_STATUS
217 ehb
218#endif /* CONFIG_MIPS_MT_SMTC */
190 CLI 219 CLI
191 move a0, sp 220 move a0, sp
192 jalr v0 221 jalr v0
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 2e9122a4213a..bdf6f6eff721 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -18,6 +18,7 @@
18#include <linux/threads.h> 18#include <linux/threads.h>
19 19
20#include <asm/asm.h> 20#include <asm/asm.h>
21#include <asm/asmmacro.h>
21#include <asm/regdef.h> 22#include <asm/regdef.h>
22#include <asm/page.h> 23#include <asm/page.h>
23#include <asm/mipsregs.h> 24#include <asm/mipsregs.h>
@@ -82,12 +83,33 @@
82 */ 83 */
83 .macro setup_c0_status set clr 84 .macro setup_c0_status set clr
84 .set push 85 .set push
86#ifdef CONFIG_MIPS_MT_SMTC
87 /*
88 * For SMTC, we need to set privilege and disable interrupts only for
89 * the current TC, using the TCStatus register.
90 */
91 mfc0 t0, CP0_TCSTATUS
92 /* Fortunately CU 0 is in the same place in both registers */
93 /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
94 li t1, ST0_CU0 | 0x08001c00
95 or t0, t1
96 /* Clear TKSU, leave IXMT */
97 xori t0, 0x00001800
98 mtc0 t0, CP0_TCSTATUS
99 ehb
100 /* We need to leave the global IE bit set, but clear EXL...*/
101 mfc0 t0, CP0_STATUS
102 or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr
103 xor t0, ST0_EXL | ST0_ERL | \clr
104 mtc0 t0, CP0_STATUS
105#else
85 mfc0 t0, CP0_STATUS 106 mfc0 t0, CP0_STATUS
86 or t0, ST0_CU0|\set|0x1f|\clr 107 or t0, ST0_CU0|\set|0x1f|\clr
87 xor t0, 0x1f|\clr 108 xor t0, 0x1f|\clr
88 mtc0 t0, CP0_STATUS 109 mtc0 t0, CP0_STATUS
89 .set noreorder 110 .set noreorder
90 sll zero,3 # ehb 111 sll zero,3 # ehb
112#endif
91 .set pop 113 .set pop
92 .endm 114 .endm
93 115
@@ -134,6 +156,24 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
134 156
135 ARC64_TWIDDLE_PC 157 ARC64_TWIDDLE_PC
136 158
159#ifdef CONFIG_MIPS_MT_SMTC
160 /*
161 * In SMTC kernel, "CLI" is thread-specific, in TCStatus.
162 * We still need to enable interrupts globally in Status,
163 * and clear EXL/ERL.
164 *
165 * TCContext is used to track interrupt levels under
166 * service in SMTC kernel. Clear for boot TC before
167 * allowing any interrupts.
168 */
169 mtc0 zero, CP0_TCCONTEXT
170
171 mfc0 t0, CP0_STATUS
172 ori t0, t0, 0xff1f
173 xori t0, t0, 0x001e
174 mtc0 t0, CP0_STATUS
175#endif /* CONFIG_MIPS_MT_SMTC */
176
137 PTR_LA t0, __bss_start # clear .bss 177 PTR_LA t0, __bss_start # clear .bss
138 LONG_S zero, (t0) 178 LONG_S zero, (t0)
139 PTR_LA t1, __bss_stop - LONGSIZE 179 PTR_LA t1, __bss_stop - LONGSIZE
@@ -166,8 +206,25 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
166 * function after setting up the stack and gp registers. 206 * function after setting up the stack and gp registers.
167 */ 207 */
168NESTED(smp_bootstrap, 16, sp) 208NESTED(smp_bootstrap, 16, sp)
209#ifdef CONFIG_MIPS_MT_SMTC
210 /*
211 * Read-modify-writes of Status must be atomic, and this
212 * is one case where CLI is invoked without EXL being
213 * necessarily set. The CLI and setup_c0_status will
214 * in fact be redundant for all but the first TC of
215 * each VPE being booted.
216 */
217 DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
218 jal mips_ihb
219#endif /* CONFIG_MIPS_MT_SMTC */
169 setup_c0_status_sec 220 setup_c0_status_sec
170 smp_slave_setup 221 smp_slave_setup
222#ifdef CONFIG_MIPS_MT_SMTC
223 andi t2, t2, VPECONTROL_TE
224 beqz t2, 2f
225 EMT # emt
2262:
227#endif /* CONFIG_MIPS_MT_SMTC */
171 j start_secondary 228 j start_secondary
172 END(smp_bootstrap) 229 END(smp_bootstrap)
173#endif /* CONFIG_SMP */ 230#endif /* CONFIG_SMP */
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index b974ac9057f6..2125ba5f1d9b 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -187,6 +187,10 @@ handle_real_irq:
187 outb(cached_21,0x21); 187 outb(cached_21,0x21);
188 outb(0x60+irq,0x20); /* 'Specific EOI' to master */ 188 outb(0x60+irq,0x20); /* 'Specific EOI' to master */
189 } 189 }
190#ifdef CONFIG_MIPS_MT_SMTC
191 if (irq_hwmask[irq] & ST0_IM)
192 set_c0_status(irq_hwmask[irq] & ST0_IM);
193#endif /* CONFIG_MIPS_MT_SMTC */
190 spin_unlock_irqrestore(&i8259A_lock, flags); 194 spin_unlock_irqrestore(&i8259A_lock, flags);
191 return; 195 return;
192 196
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 3f653c7cfbf3..97ebdc754b9e 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -76,6 +76,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
76 mask_msc_irq(irq); 76 mask_msc_irq(irq);
77 if (!cpu_has_veic) 77 if (!cpu_has_veic)
78 MSCIC_WRITE(MSC01_IC_EOI, 0); 78 MSCIC_WRITE(MSC01_IC_EOI, 0);
79#ifdef CONFIG_MIPS_MT_SMTC
80 /* This actually needs to be a call into platform code */
81 if (irq_hwmask[irq] & ST0_IM)
82 set_c0_status(irq_hwmask[irq] & ST0_IM);
83#endif /* CONFIG_MIPS_MT_SMTC */
79} 84}
80 85
81/* 86/*
@@ -92,6 +97,10 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
92 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); 97 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
93 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); 98 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
94 } 99 }
100#ifdef CONFIG_MIPS_MT_SMTC
101 if (irq_hwmask[irq] & ST0_IM)
102 set_c0_status(irq_hwmask[irq] & ST0_IM);
103#endif /* CONFIG_MIPS_MT_SMTC */
95} 104}
96 105
97/* 106/*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index e0efc4f2f93e..3dce742e716f 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -38,6 +38,15 @@ void ack_bad_irq(unsigned int irq)
38 38
39atomic_t irq_err_count; 39atomic_t irq_err_count;
40 40
41#ifdef CONFIG_MIPS_MT_SMTC
42/*
43 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
44 * in do_IRQ. These are passed in setup_irq_smtc() and stored
45 * in this table.
46 */
47unsigned long irq_hwmask[NR_IRQS];
48#endif /* CONFIG_MIPS_MT_SMTC */
49
41#undef do_IRQ 50#undef do_IRQ
42 51
43/* 52/*
@@ -49,6 +58,7 @@ asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs)
49{ 58{
50 irq_enter(); 59 irq_enter();
51 60
61 __DO_IRQ_SMTC_HOOK();
52 __do_IRQ(irq, regs); 62 __do_IRQ(irq, regs);
53 63
54 irq_exit(); 64 irq_exit();
@@ -129,6 +139,9 @@ void __init init_IRQ(void)
129 irq_desc[i].depth = 1; 139 irq_desc[i].depth = 1;
130 irq_desc[i].handler = &no_irq_type; 140 irq_desc[i].handler = &no_irq_type;
131 spin_lock_init(&irq_desc[i].lock); 141 spin_lock_init(&irq_desc[i].lock);
142#ifdef CONFIG_MIPS_MT_SMTC
143 irq_hwmask[i] = 0;
144#endif /* CONFIG_MIPS_MT_SMTC */
132 } 145 }
133 146
134 arch_init_irq(); 147 arch_init_irq();
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
new file mode 100644
index 000000000000..02237a685ec7
--- /dev/null
+++ b/arch/mips/kernel/mips-mt.c
@@ -0,0 +1,449 @@
1/*
2 * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
3 * Copyright (C) 2005 Mips Technologies, Inc
4 */
5
6#include <linux/kernel.h>
7#include <linux/sched.h>
8#include <linux/cpumask.h>
9#include <linux/interrupt.h>
10
11#include <asm/cpu.h>
12#include <asm/processor.h>
13#include <asm/atomic.h>
14#include <asm/system.h>
15#include <asm/hardirq.h>
16#include <asm/mmu_context.h>
17#include <asm/smp.h>
18#include <asm/mipsmtregs.h>
19#include <asm/r4kcache.h>
20#include <asm/cacheflush.h>
21
22/*
23 * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
24 */
25
26cpumask_t mt_fpu_cpumask;
27
28#ifdef CONFIG_MIPS_MT_FPAFF
29
30#include <linux/cpu.h>
31#include <linux/delay.h>
32#include <asm/uaccess.h>
33
34unsigned long mt_fpemul_threshold = 0;
35
36/*
37 * Replacement functions for the sys_sched_setaffinity() and
38 * sys_sched_getaffinity() system calls, so that we can integrate
39 * FPU affinity with the user's requested processor affinity.
40 * This code is 98% identical with the sys_sched_setaffinity()
41 * and sys_sched_getaffinity() system calls, and should be
42 * updated when kernel/sched.c changes.
43 */
44
45/*
46 * find_process_by_pid - find a process with a matching PID value.
47 * used in sys_sched_set/getaffinity() in kernel/sched.c, so
48 * cloned here.
49 */
50static inline task_t *find_process_by_pid(pid_t pid)
51{
52 return pid ? find_task_by_pid(pid) : current;
53}
54
55
56/*
57 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
58 */
59asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
60 unsigned long __user *user_mask_ptr)
61{
62 cpumask_t new_mask;
63 cpumask_t effective_mask;
64 int retval;
65 task_t *p;
66
67 if (len < sizeof(new_mask))
68 return -EINVAL;
69
70 if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
71 return -EFAULT;
72
73 lock_cpu_hotplug();
74 read_lock(&tasklist_lock);
75
76 p = find_process_by_pid(pid);
77 if (!p) {
78 read_unlock(&tasklist_lock);
79 unlock_cpu_hotplug();
80 return -ESRCH;
81 }
82
83 /*
84 * It is not safe to call set_cpus_allowed with the
85 * tasklist_lock held. We will bump the task_struct's
86 * usage count and drop tasklist_lock before invoking
87 * set_cpus_allowed.
88 */
89 get_task_struct(p);
90
91 retval = -EPERM;
92 if ((current->euid != p->euid) && (current->euid != p->uid) &&
93 !capable(CAP_SYS_NICE)) {
94 read_unlock(&tasklist_lock);
95 goto out_unlock;
96 }
97
98 /* Record new user-specified CPU set for future reference */
99 p->thread.user_cpus_allowed = new_mask;
100
101 /* Unlock the task list */
102 read_unlock(&tasklist_lock);
103
104 /* Compute new global allowed CPU set if necessary */
105 if( (p->thread.mflags & MF_FPUBOUND)
106 && cpus_intersects(new_mask, mt_fpu_cpumask)) {
107 cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
108 retval = set_cpus_allowed(p, effective_mask);
109 } else {
110 p->thread.mflags &= ~MF_FPUBOUND;
111 retval = set_cpus_allowed(p, new_mask);
112 }
113
114
115out_unlock:
116 put_task_struct(p);
117 unlock_cpu_hotplug();
118 return retval;
119}
120
121/*
122 * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
123 */
124asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
125 unsigned long __user *user_mask_ptr)
126{
127 unsigned int real_len;
128 cpumask_t mask;
129 int retval;
130 task_t *p;
131
132 real_len = sizeof(mask);
133 if (len < real_len)
134 return -EINVAL;
135
136 lock_cpu_hotplug();
137 read_lock(&tasklist_lock);
138
139 retval = -ESRCH;
140 p = find_process_by_pid(pid);
141 if (!p)
142 goto out_unlock;
143
144 retval = 0;
145
146 cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
147
148out_unlock:
149 read_unlock(&tasklist_lock);
150 unlock_cpu_hotplug();
151 if (retval)
152 return retval;
153 if (copy_to_user(user_mask_ptr, &mask, real_len))
154 return -EFAULT;
155 return real_len;
156}
157
158#endif /* CONFIG_MIPS_MT_FPAFF */
159
160/*
161 * Dump new MIPS MT state for the core. Does not leave TCs halted.
162 * Takes an argument which taken to be a pre-call MVPControl value.
163 */
164
165void mips_mt_regdump(unsigned long mvpctl)
166{
167 unsigned long flags;
168 unsigned long vpflags;
169 unsigned long mvpconf0;
170 int nvpe;
171 int ntc;
172 int i;
173 int tc;
174 unsigned long haltval;
175 unsigned long tcstatval;
176#ifdef CONFIG_MIPS_MT_SMTC
177 void smtc_soft_dump(void);
178#endif /* CONFIG_MIPT_MT_SMTC */
179
180 local_irq_save(flags);
181 vpflags = dvpe();
182 printk("=== MIPS MT State Dump ===\n");
183 printk("-- Global State --\n");
184 printk(" MVPControl Passed: %08lx\n", mvpctl);
185 printk(" MVPControl Read: %08lx\n", vpflags);
186 printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
187 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
188 ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
189 printk("-- per-VPE State --\n");
190 for(i = 0; i < nvpe; i++) {
191 for(tc = 0; tc < ntc; tc++) {
192 settc(tc);
193 if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
194 printk(" VPE %d\n", i);
195 printk(" VPEControl : %08lx\n", read_vpe_c0_vpecontrol());
196 printk(" VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0());
197 printk(" VPE%d.Status : %08lx\n",
198 i, read_vpe_c0_status());
199 printk(" VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc());
200 printk(" VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause());
201 printk(" VPE%d.Config7 : %08lx\n",
202 i, read_vpe_c0_config7());
203 break; /* Next VPE */
204 }
205 }
206 }
207 printk("-- per-TC State --\n");
208 for(tc = 0; tc < ntc; tc++) {
209 settc(tc);
210 if(read_tc_c0_tcbind() == read_c0_tcbind()) {
211 /* Are we dumping ourself? */
212 haltval = 0; /* Then we're not halted, and mustn't be */
213 tcstatval = flags; /* And pre-dump TCStatus is flags */
214 printk(" TC %d (current TC with VPE EPC above)\n", tc);
215 } else {
216 haltval = read_tc_c0_tchalt();
217 write_tc_c0_tchalt(1);
218 tcstatval = read_tc_c0_tcstatus();
219 printk(" TC %d\n", tc);
220 }
221 printk(" TCStatus : %08lx\n", tcstatval);
222 printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
223 printk(" TCRestart : %08lx\n", read_tc_c0_tcrestart());
224 printk(" TCHalt : %08lx\n", haltval);
225 printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
226 if (!haltval)
227 write_tc_c0_tchalt(0);
228 }
229#ifdef CONFIG_MIPS_MT_SMTC
230 smtc_soft_dump();
231#endif /* CONFIG_MIPT_MT_SMTC */
232 printk("===========================\n");
233 evpe(vpflags);
234 local_irq_restore(flags);
235}
236
237static int mt_opt_norps = 0;
238static int mt_opt_rpsctl = -1;
239static int mt_opt_nblsu = -1;
240static int mt_opt_forceconfig7 = 0;
241static int mt_opt_config7 = -1;
242
243static int __init rps_disable(char *s)
244{
245 mt_opt_norps = 1;
246 return 1;
247}
248__setup("norps", rps_disable);
249
250static int __init rpsctl_set(char *str)
251{
252 get_option(&str, &mt_opt_rpsctl);
253 return 1;
254}
255__setup("rpsctl=", rpsctl_set);
256
257static int __init nblsu_set(char *str)
258{
259 get_option(&str, &mt_opt_nblsu);
260 return 1;
261}
262__setup("nblsu=", nblsu_set);
263
264static int __init config7_set(char *str)
265{
266 get_option(&str, &mt_opt_config7);
267 mt_opt_forceconfig7 = 1;
268 return 1;
269}
270__setup("config7=", config7_set);
271
272/* Experimental cache flush control parameters that should go away some day */
273int mt_protiflush = 0;
274int mt_protdflush = 0;
275int mt_n_iflushes = 1;
276int mt_n_dflushes = 1;
277
278static int __init set_protiflush(char *s)
279{
280 mt_protiflush = 1;
281 return 1;
282}
283__setup("protiflush", set_protiflush);
284
285static int __init set_protdflush(char *s)
286{
287 mt_protdflush = 1;
288 return 1;
289}
290__setup("protdflush", set_protdflush);
291
292static int __init niflush(char *s)
293{
294 get_option(&s, &mt_n_iflushes);
295 return 1;
296}
297__setup("niflush=", niflush);
298
299static int __init ndflush(char *s)
300{
301 get_option(&s, &mt_n_dflushes);
302 return 1;
303}
304__setup("ndflush=", ndflush);
305#ifdef CONFIG_MIPS_MT_FPAFF
306static int fpaff_threshold = -1;
307
308static int __init fpaff_thresh(char *str)
309{
310 get_option(&str, &fpaff_threshold);
311 return 1;
312}
313
314__setup("fpaff=", fpaff_thresh);
315#endif /* CONFIG_MIPS_MT_FPAFF */
316
317static unsigned int itc_base = 0;
318
319static int __init set_itc_base(char *str)
320{
321 get_option(&str, &itc_base);
322 return 1;
323}
324
325__setup("itcbase=", set_itc_base);
326
327void mips_mt_set_cpuoptions(void)
328{
329 unsigned int oconfig7 = read_c0_config7();
330 unsigned int nconfig7 = oconfig7;
331
332 if (mt_opt_norps) {
333 printk("\"norps\" option deprectated: use \"rpsctl=\"\n");
334 }
335 if (mt_opt_rpsctl >= 0) {
336 printk("34K return prediction stack override set to %d.\n",
337 mt_opt_rpsctl);
338 if (mt_opt_rpsctl)
339 nconfig7 |= (1 << 2);
340 else
341 nconfig7 &= ~(1 << 2);
342 }
343 if (mt_opt_nblsu >= 0) {
344 printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
345 if (mt_opt_nblsu)
346 nconfig7 |= (1 << 5);
347 else
348 nconfig7 &= ~(1 << 5);
349 }
350 if (mt_opt_forceconfig7) {
351 printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
352 nconfig7 = mt_opt_config7;
353 }
354 if (oconfig7 != nconfig7) {
355 __asm__ __volatile("sync");
356 write_c0_config7(nconfig7);
357 ehb ();
358 printk("Config7: 0x%08x\n", read_c0_config7());
359 }
360
361 /* Report Cache management debug options */
362 if (mt_protiflush)
363 printk("I-cache flushes single-threaded\n");
364 if (mt_protdflush)
365 printk("D-cache flushes single-threaded\n");
366 if (mt_n_iflushes != 1)
367 printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
368 if (mt_n_dflushes != 1)
369 printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
370
371#ifdef CONFIG_MIPS_MT_FPAFF
372 /* FPU Use Factor empirically derived from experiments on 34K */
373#define FPUSEFACTOR 333
374
375 if (fpaff_threshold >= 0) {
376 mt_fpemul_threshold = fpaff_threshold;
377 } else {
378 mt_fpemul_threshold =
379 (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
380 }
381 printk("FPU Affinity set after %ld emulations\n",
382 mt_fpemul_threshold);
383#endif /* CONFIG_MIPS_MT_FPAFF */
384
385 if (itc_base != 0) {
386 /*
387 * Configure ITC mapping. This code is very
388 * specific to the 34K core family, which uses
389 * a special mode bit ("ITC") in the ErrCtl
390 * register to enable access to ITC control
391 * registers via cache "tag" operations.
392 */
393 unsigned long ectlval;
394 unsigned long itcblkgrn;
395
396 /* ErrCtl register is known as "ecc" to Linux */
397 ectlval = read_c0_ecc();
398 write_c0_ecc(ectlval | (0x1 << 26));
399 ehb();
400#define INDEX_0 (0x80000000)
401#define INDEX_8 (0x80000008)
402 /* Read "cache tag" for Dcache pseudo-index 8 */
403 cache_op(Index_Load_Tag_D, INDEX_8);
404 ehb();
405 itcblkgrn = read_c0_dtaglo();
406 itcblkgrn &= 0xfffe0000;
407 /* Set for 128 byte pitch of ITC cells */
408 itcblkgrn |= 0x00000c00;
409 /* Stage in Tag register */
410 write_c0_dtaglo(itcblkgrn);
411 ehb();
412 /* Write out to ITU with CACHE op */
413 cache_op(Index_Store_Tag_D, INDEX_8);
414 /* Now set base address, and turn ITC on with 0x1 bit */
415 write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
416 ehb();
417 /* Write out to ITU with CACHE op */
418 cache_op(Index_Store_Tag_D, INDEX_0);
419 write_c0_ecc(ectlval);
420 ehb();
421 printk("Mapped %ld ITC cells starting at 0x%08x\n",
422 ((itcblkgrn & 0x7fe00000) >> 20), itc_base);
423 }
424}
425
426/*
427 * Function to protect cache flushes from concurrent execution
428 * depends on MP software model chosen.
429 */
430
431void mt_cflush_lockdown(void)
432{
433#ifdef CONFIG_MIPS_MT_SMTC
434 void smtc_cflush_lockdown(void);
435
436 smtc_cflush_lockdown();
437#endif /* CONFIG_MIPS_MT_SMTC */
438 /* FILL IN VSMP and AP/SP VERSIONS HERE */
439}
440
441void mt_cflush_release(void)
442{
443#ifdef CONFIG_MIPS_MT_SMTC
444 void smtc_cflush_release(void);
445
446 smtc_cflush_release();
447#endif /* CONFIG_MIPS_MT_SMTC */
448 /* FILL IN VSMP and AP/SP VERSIONS HERE */
449}
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c66db5e5ab62..8b393df460a2 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -41,6 +41,10 @@
41#include <asm/elf.h> 41#include <asm/elf.h>
42#include <asm/isadep.h> 42#include <asm/isadep.h>
43#include <asm/inst.h> 43#include <asm/inst.h>
44#ifdef CONFIG_MIPS_MT_SMTC
45#include <asm/mipsmtregs.h>
46extern void smtc_idle_loop_hook(void);
47#endif /* CONFIG_MIPS_MT_SMTC */
44 48
45/* 49/*
46 * The idle thread. There's no useful work to be done, so just try to conserve 50 * The idle thread. There's no useful work to be done, so just try to conserve
@@ -51,9 +55,13 @@ ATTRIB_NORET void cpu_idle(void)
51{ 55{
52 /* endless idle loop with no priority at all */ 56 /* endless idle loop with no priority at all */
53 while (1) { 57 while (1) {
54 while (!need_resched()) 58 while (!need_resched()) {
59#ifdef CONFIG_MIPS_MT_SMTC
60 smtc_idle_loop_hook();
61#endif /* CONFIG_MIPS_MT_SMTC */
55 if (cpu_wait) 62 if (cpu_wait)
56 (*cpu_wait)(); 63 (*cpu_wait)();
64 }
57 preempt_enable_no_resched(); 65 preempt_enable_no_resched();
58 schedule(); 66 schedule();
59 preempt_disable(); 67 preempt_disable();
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index f838b36cc765..f3106d0771b0 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -248,10 +248,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
248 break; 248 break;
249 case FPC_EIR: { /* implementation / version register */ 249 case FPC_EIR: { /* implementation / version register */
250 unsigned int flags; 250 unsigned int flags;
251#ifdef CONFIG_MIPS_MT_SMTC
252 unsigned int irqflags;
253 unsigned int mtflags;
254#endif /* CONFIG_MIPS_MT_SMTC */
251 255
252 if (!cpu_has_fpu) 256 if (!cpu_has_fpu)
253 break; 257 break;
254 258
259#ifdef CONFIG_MIPS_MT_SMTC
260 /* Read-modify-write of Status must be atomic */
261 local_irq_save(irqflags);
262 mtflags = dmt();
263#endif /* CONFIG_MIPS_MT_SMTC */
264
255 preempt_disable(); 265 preempt_disable();
256 if (cpu_has_mipsmt) { 266 if (cpu_has_mipsmt) {
257 unsigned int vpflags = dvpe(); 267 unsigned int vpflags = dvpe();
@@ -266,6 +276,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
266 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); 276 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
267 write_c0_status(flags); 277 write_c0_status(flags);
268 } 278 }
279#ifdef CONFIG_MIPS_MT_SMTC
280 emt(mtflags);
281 local_irq_restore(irqflags);
282#endif /* CONFIG_MIPS_MT_SMTC */
269 preempt_enable(); 283 preempt_enable();
270 break; 284 break;
271 } 285 }
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 0d5cf97af727..8704dc0496ea 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -173,12 +173,22 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
173 break; 173 break;
174 case FPC_EIR: { /* implementation / version register */ 174 case FPC_EIR: { /* implementation / version register */
175 unsigned int flags; 175 unsigned int flags;
176#ifdef CONFIG_MIPS_MT_SMTC
177 unsigned int irqflags;
178 unsigned int mtflags;
179#endif /* CONFIG_MIPS_MT_SMTC */
176 180
177 if (!cpu_has_fpu) { 181 if (!cpu_has_fpu) {
178 tmp = 0; 182 tmp = 0;
179 break; 183 break;
180 } 184 }
181 185
186#ifdef CONFIG_MIPS_MT_SMTC
187 /* Read-modify-write of Status must be atomic */
188 local_irq_save(irqflags);
189 mtflags = dmt();
190#endif /* CONFIG_MIPS_MT_SMTC */
191
182 preempt_disable(); 192 preempt_disable();
183 if (cpu_has_mipsmt) { 193 if (cpu_has_mipsmt) {
184 unsigned int vpflags = dvpe(); 194 unsigned int vpflags = dvpe();
@@ -193,6 +203,10 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
193 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); 203 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
194 write_c0_status(flags); 204 write_c0_status(flags);
195 } 205 }
206#ifdef CONFIG_MIPS_MT_SMTC
207 emt(mtflags);
208 local_irq_restore(irqflags);
209#endif /* CONFIG_MIPS_MT_SMTC */
196 preempt_enable(); 210 preempt_enable();
197 break; 211 break;
198 } 212 }
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index d2afbd19a9c8..0b1b54acee9f 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -88,7 +88,18 @@
88 88
89 PTR_ADDIU t0, $28, _THREAD_SIZE - 32 89 PTR_ADDIU t0, $28, _THREAD_SIZE - 32
90 set_saved_sp t0, t1, t2 90 set_saved_sp t0, t1, t2
91 91#ifdef CONFIG_MIPS_MT_SMTC
92 /* Read-modify-writes of Status must be atomic on a VPE */
93 mfc0 t2, CP0_TCSTATUS
94 ori t1, t2, TCSTATUS_IXMT
95 mtc0 t1, CP0_TCSTATUS
96 andi t2, t2, TCSTATUS_IXMT
97 ehb
98 DMT 8 # dmt t0
99 move t1,ra
100 jal mips_ihb
101 move ra,t1
102#endif /* CONFIG_MIPS_MT_SMTC */
92 mfc0 t1, CP0_STATUS /* Do we really need this? */ 103 mfc0 t1, CP0_STATUS /* Do we really need this? */
93 li a3, 0xff01 104 li a3, 0xff01
94 and t1, a3 105 and t1, a3
@@ -97,6 +108,18 @@
97 and a2, a3 108 and a2, a3
98 or a2, t1 109 or a2, t1
99 mtc0 a2, CP0_STATUS 110 mtc0 a2, CP0_STATUS
111#ifdef CONFIG_MIPS_MT_SMTC
112 ehb
113 andi t0, t0, VPECONTROL_TE
114 beqz t0, 1f
115 emt
1161:
117 mfc0 t1, CP0_TCSTATUS
118 xori t1, t1, TCSTATUS_IXMT
119 or t1, t1, t2
120 mtc0 t1, CP0_TCSTATUS
121 ehb
122#endif /* CONFIG_MIPS_MT_SMTC */
100 move v0, a0 123 move v0, a0
101 jr ra 124 jr ra
102 END(resume) 125 END(resume)
@@ -131,10 +154,19 @@ LEAF(_restore_fp)
131#define FPU_DEFAULT 0x00000000 154#define FPU_DEFAULT 0x00000000
132 155
133LEAF(_init_fpu) 156LEAF(_init_fpu)
157#ifdef CONFIG_MIPS_MT_SMTC
158 /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
159 mfc0 t0, CP0_TCSTATUS
160 /* Bit position is the same for Status, TCStatus */
161 li t1, ST0_CU1
162 or t0, t1
163 mtc0 t0, CP0_TCSTATUS
164#else /* Normal MIPS CU1 enable */
134 mfc0 t0, CP0_STATUS 165 mfc0 t0, CP0_STATUS
135 li t1, ST0_CU1 166 li t1, ST0_CU1
136 or t0, t1 167 or t0, t1
137 mtc0 t0, CP0_STATUS 168 mtc0 t0, CP0_STATUS
169#endif /* CONFIG_MIPS_MT_SMTC */
138 fpu_enable_hazard 170 fpu_enable_hazard
139 171
140 li t1, FPU_DEFAULT 172 li t1, FPU_DEFAULT
diff --git a/arch/mips/kernel/smp_mt.c b/arch/mips/kernel/smp-mt.c
index 993b8bf56aaf..19b8e4b31b79 100644
--- a/arch/mips/kernel/smp_mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -1,8 +1,4 @@
1/* 1/*
2 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
3 *
4 * Elizabeth Clarke (beth@mips.com)
5 *
6 * This program is free software; you can distribute it and/or modify it 2 * This program is free software; you can distribute it and/or modify it
7 * under the terms of the GNU General Public License (Version 2) as 3 * under the terms of the GNU General Public License (Version 2) as
8 * published by the Free Software Foundation. 4 * published by the Free Software Foundation.
@@ -16,6 +12,10 @@
16 * with this program; if not, write to the Free Software Foundation, Inc., 12 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 13 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
18 * 14 *
15 * Copyright (C) 2004, 05, 06 MIPS Technologies, Inc.
16 * Elizabeth Clarke (beth@mips.com)
17 * Ralf Baechle (ralf@linux-mips.org)
18 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
19 */ 19 */
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
@@ -24,6 +24,7 @@
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25 25
26#include <asm/atomic.h> 26#include <asm/atomic.h>
27#include <asm/cacheflush.h>
27#include <asm/cpu.h> 28#include <asm/cpu.h>
28#include <asm/processor.h> 29#include <asm/processor.h>
29#include <asm/system.h> 30#include <asm/system.h>
@@ -33,8 +34,8 @@
33#include <asm/time.h> 34#include <asm/time.h>
34#include <asm/mipsregs.h> 35#include <asm/mipsregs.h>
35#include <asm/mipsmtregs.h> 36#include <asm/mipsmtregs.h>
36#include <asm/cacheflush.h> 37#include <asm/mips_mt.h>
37#include <asm/mips-boards/maltaint.h> 38#include <asm/mips-boards/maltaint.h> /* This is f*cking wrong */
38 39
39#define MIPS_CPU_IPI_RESCHED_IRQ 0 40#define MIPS_CPU_IPI_RESCHED_IRQ 0
40#define MIPS_CPU_IPI_CALL_IRQ 1 41#define MIPS_CPU_IPI_CALL_IRQ 1
@@ -66,6 +67,7 @@ void __init sanitize_tlb_entries(void)
66 if (!cpu_has_mipsmt) 67 if (!cpu_has_mipsmt)
67 return; 68 return;
68 69
70 /* Enable VPC */
69 set_c0_mvpcontrol(MVPCONTROL_VPC); 71 set_c0_mvpcontrol(MVPCONTROL_VPC);
70 72
71 back_to_back_c0_hazard(); 73 back_to_back_c0_hazard();
@@ -106,12 +108,12 @@ void __init sanitize_tlb_entries(void)
106 108
107static void ipi_resched_dispatch (struct pt_regs *regs) 109static void ipi_resched_dispatch (struct pt_regs *regs)
108{ 110{
109 do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ, regs); 111 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ, regs);
110} 112}
111 113
112static void ipi_call_dispatch (struct pt_regs *regs) 114static void ipi_call_dispatch (struct pt_regs *regs)
113{ 115{
114 do_IRQ(MIPS_CPU_IPI_CALL_IRQ, regs); 116 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ, regs);
115} 117}
116 118
117irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs) 119irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs)
@@ -155,6 +157,8 @@ void plat_smp_setup(void)
155 dvpe(); 157 dvpe();
156 dmt(); 158 dmt();
157 159
160 mips_mt_set_cpuoptions();
161
158 /* Put MVPE's into 'configuration state' */ 162 /* Put MVPE's into 'configuration state' */
159 set_c0_mvpcontrol(MVPCONTROL_VPC); 163 set_c0_mvpcontrol(MVPCONTROL_VPC);
160 164
@@ -189,11 +193,13 @@ void plat_smp_setup(void)
189 193
190 if (i != 0) { 194 if (i != 0) {
191 write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); 195 write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
192 write_vpe_c0_cause(read_vpe_c0_cause() & ~CAUSEF_IP);
193 196
194 /* set config to be the same as vpe0, particularly kseg0 coherency alg */ 197 /* set config to be the same as vpe0, particularly kseg0 coherency alg */
195 write_vpe_c0_config( read_c0_config()); 198 write_vpe_c0_config( read_c0_config());
196 199
200 /* make sure there are no software interrupts pending */
201 write_vpe_c0_cause(read_vpe_c0_cause() & ~(C_SW1|C_SW0));
202
197 /* Propagate Config7 */ 203 /* Propagate Config7 */
198 write_vpe_c0_config7(read_c0_config7()); 204 write_vpe_c0_config7(read_c0_config7());
199 } 205 }
@@ -233,16 +239,16 @@ void plat_smp_setup(void)
233 /* We'll wait until starting the secondaries before starting MVPE */ 239 /* We'll wait until starting the secondaries before starting MVPE */
234 240
235 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); 241 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
242}
236 243
244void __init plat_prepare_cpus(unsigned int max_cpus)
245{
237 /* set up ipi interrupts */ 246 /* set up ipi interrupts */
238 if (cpu_has_vint) { 247 if (cpu_has_vint) {
239 set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); 248 set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
240 set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); 249 set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
241 } 250 }
242}
243 251
244void __init plat_prepare_cpus(unsigned int max_cpus)
245{
246 cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ; 252 cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
247 cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ; 253 cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ;
248 254
@@ -287,7 +293,8 @@ void prom_boot_secondary(int cpu, struct task_struct *idle)
287 /* global pointer */ 293 /* global pointer */
288 write_tc_gpr_gp((unsigned long)gp); 294 write_tc_gpr_gp((unsigned long)gp);
289 295
290 flush_icache_range((unsigned long)gp, (unsigned long)(gp + 1)); 296 flush_icache_range((unsigned long)gp,
297 (unsigned long)(gp + sizeof(struct thread_info)));
291 298
292 /* finally out of configuration and into chaos */ 299 /* finally out of configuration and into chaos */
293 clear_c0_mvpcontrol(MVPCONTROL_VPC); 300 clear_c0_mvpcontrol(MVPCONTROL_VPC);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 72a287aa937e..d42f358754ad 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -38,6 +38,10 @@
38#include <asm/mmu_context.h> 38#include <asm/mmu_context.h>
39#include <asm/smp.h> 39#include <asm/smp.h>
40 40
41#ifdef CONFIG_MIPS_MT_SMTC
42#include <asm/mipsmtregs.h>
43#endif /* CONFIG_MIPS_MT_SMTC */
44
41cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ 45cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
42volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 46volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
43cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ 47cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
@@ -85,6 +89,10 @@ asmlinkage void start_secondary(void)
85{ 89{
86 unsigned int cpu; 90 unsigned int cpu;
87 91
92#ifdef CONFIG_MIPS_MT_SMTC
93 /* Only do cpu_probe for first TC of CPU */
94 if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
95#endif /* CONFIG_MIPS_MT_SMTC */
88 cpu_probe(); 96 cpu_probe();
89 cpu_report(); 97 cpu_report();
90 per_cpu_trap_init(); 98 per_cpu_trap_init();
@@ -179,11 +187,13 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
179 if (wait) 187 if (wait)
180 while (atomic_read(&data.finished) != cpus) 188 while (atomic_read(&data.finished) != cpus)
181 barrier(); 189 barrier();
190 call_data = NULL;
182 spin_unlock(&smp_call_lock); 191 spin_unlock(&smp_call_lock);
183 192
184 return 0; 193 return 0;
185} 194}
186 195
196
187void smp_call_function_interrupt(void) 197void smp_call_function_interrupt(void)
188{ 198{
189 void (*func) (void *info) = call_data->func; 199 void (*func) (void *info) = call_data->func;
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
new file mode 100644
index 000000000000..c9d65196d917
--- /dev/null
+++ b/arch/mips/kernel/smtc-asm.S
@@ -0,0 +1,130 @@
1/*
2 * Assembly Language Functions for MIPS MT SMTC support
3 */
4
5/*
6 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */
7
8#include <asm/regdef.h>
9#include <asm/asmmacro.h>
10#include <asm/stackframe.h>
11#include <asm/stackframe.h>
12
13/*
14 * "Software Interrupt" linkage.
15 *
16 * This is invoked when an "Interrupt" is sent from one TC to another,
17 * where the TC to be interrupted is halted, has it's Restart address
18 * and Status values saved by the "remote control" thread, then modified
19 * to cause execution to begin here, in kenel mode. This code then
20 * disguises the TC state as that of an exception and transfers
21 * control to the general exception or vectored interrupt handler.
22 */
23 .set noreorder
24
25/*
26The __smtc_ipi_vector would use k0 and k1 as temporaries and
271) Set EXL (this is per-VPE, so this can't be done by proxy!)
282) Restore the K/CU and IXMT bits to the pre "exception" state
29 (EXL means no interrupts and access to the kernel map).
303) Set EPC to be the saved value of TCRestart.
314) Jump to the exception handler entry point passed by the sender.
32
33CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
34*/
35
36/*
37 * Reviled and slandered vision: Set EXL and restore K/CU/IXMT
38 * state of pre-halt thread, then save everything and call
39 * thought some function pointer to imaginary_exception, which
40 * will parse a register value or memory message queue to
41 * deliver things like interprocessor interrupts. On return
42 * from that function, jump to the global ret_from_irq code
43 * to invoke the scheduler and return as appropriate.
44 */
45
46#define PT_PADSLOT4 (PT_R0-8)
47#define PT_PADSLOT5 (PT_R0-4)
48
49 .text
50 .align 5
51FEXPORT(__smtc_ipi_vector)
52 .set noat
53 /* Disable thread scheduling to make Status update atomic */
54 DMT 27 # dmt k1
55 ehb
56 /* Set EXL */
57 mfc0 k0,CP0_STATUS
58 ori k0,k0,ST0_EXL
59 mtc0 k0,CP0_STATUS
60 ehb
61 /* Thread scheduling now inhibited by EXL. Restore TE state. */
62 andi k1,k1,VPECONTROL_TE
63 beqz k1,1f
64 emt
651:
66 /*
67 * The IPI sender has put some information on the anticipated
68 * kernel stack frame. If we were in user mode, this will be
69 * built above the saved kernel SP. If we were already in the
70 * kernel, it will be built above the current CPU SP.
71 *
72 * Were we in kernel mode, as indicated by CU0?
73 */
74 sll k1,k0,3
75 .set noreorder
76 bltz k1,2f
77 move k1,sp
78 .set reorder
79 /*
80 * If previously in user mode, set CU0 and use kernel stack.
81 */
82 li k1,ST0_CU0
83 or k1,k1,k0
84 mtc0 k1,CP0_STATUS
85 ehb
86 get_saved_sp
87 /* Interrupting TC will have pre-set values in slots in the new frame */
882: subu k1,k1,PT_SIZE
89 /* Load TCStatus Value */
90 lw k0,PT_TCSTATUS(k1)
91 /* Write it to TCStatus to restore CU/KSU/IXMT state */
92 mtc0 k0,$2,1
93 ehb
94 lw k0,PT_EPC(k1)
95 mtc0 k0,CP0_EPC
96 /* Save all will redundantly recompute the SP, but use it for now */
97 SAVE_ALL
98 CLI
99 move a0,sp
100 /* Function to be invoked passed stack pad slot 5 */
101 lw t0,PT_PADSLOT5(sp)
102 /* Argument from sender passed in stack pad slot 4 */
103 lw a1,PT_PADSLOT4(sp)
104 jalr t0
105 nop
106 j ret_from_irq
107 nop
108
109/*
110 * Called from idle loop to provoke processing of queued IPIs
111 * First IPI message in queue passed as argument.
112 */
113
114LEAF(self_ipi)
115 /* Before anything else, block interrupts */
116 mfc0 t0,CP0_TCSTATUS
117 ori t1,t0,TCSTATUS_IXMT
118 mtc0 t1,CP0_TCSTATUS
119 ehb
120 /* We know we're in kernel mode, so prepare stack frame */
121 subu t1,sp,PT_SIZE
122 sw ra,PT_EPC(t1)
123 sw a0,PT_PADSLOT4(t1)
124 la t2,ipi_decode
125 sw t2,PT_PADSLOT5(t1)
126 /* Save pre-disable value of TCStatus */
127 sw t0,PT_TCSTATUS(t1)
128 j __smtc_ipi_vector
129 nop
130END(self_ipi)
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
new file mode 100644
index 000000000000..6f3709996172
--- /dev/null
+++ b/arch/mips/kernel/smtc-proc.c
@@ -0,0 +1,93 @@
1/*
2 * /proc hooks for SMTC kernel
3 * Copyright (C) 2005 Mips Technologies, Inc
4 */
5
6#include <linux/kernel.h>
7#include <linux/sched.h>
8#include <linux/cpumask.h>
9#include <linux/interrupt.h>
10
11#include <asm/cpu.h>
12#include <asm/processor.h>
13#include <asm/atomic.h>
14#include <asm/system.h>
15#include <asm/hardirq.h>
16#include <asm/mmu_context.h>
17#include <asm/smp.h>
18#include <asm/mipsregs.h>
19#include <asm/cacheflush.h>
20#include <linux/proc_fs.h>
21
22#include <asm/smtc_proc.h>
23
24/*
25 * /proc diagnostic and statistics hooks
26 */
27
28/*
29 * Statistics gathered
30 */
31unsigned long selfipis[NR_CPUS];
32
33struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
34
35static struct proc_dir_entry *smtc_stats;
36
37atomic_t smtc_fpu_recoveries;
38
39static int proc_read_smtc(char *page, char **start, off_t off,
40 int count, int *eof, void *data)
41{
42 int totalen = 0;
43 int len;
44 int i;
45 extern unsigned long ebase;
46
47 len = sprintf(page, "SMTC Status Word: 0x%08x\n", smtc_status);
48 totalen += len;
49 page += len;
50 len = sprintf(page, "Config7: 0x%08x\n", read_c0_config7());
51 totalen += len;
52 page += len;
53 len = sprintf(page, "EBASE: 0x%08lx\n", ebase);
54 totalen += len;
55 page += len;
56 len = sprintf(page, "Counter Interrupts taken per CPU (TC)\n");
57 totalen += len;
58 page += len;
59 for (i=0; i < NR_CPUS; i++) {
60 len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].timerints);
61 totalen += len;
62 page += len;
63 }
64 len = sprintf(page, "Self-IPIs by CPU:\n");
65 totalen += len;
66 page += len;
67 for(i = 0; i < NR_CPUS; i++) {
68 len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
69 totalen += len;
70 page += len;
71 }
72 len = sprintf(page, "%d Recoveries of \"stolen\" FPU\n",
73 atomic_read(&smtc_fpu_recoveries));
74 totalen += len;
75 page += len;
76
77 return totalen;
78}
79
80void init_smtc_stats(void)
81{
82 int i;
83
84 for (i=0; i<NR_CPUS; i++) {
85 smtc_cpu_stats[i].timerints = 0;
86 smtc_cpu_stats[i].selfipis = 0;
87 }
88
89 atomic_set(&smtc_fpu_recoveries, 0);
90
91 smtc_stats = create_proc_read_entry("smtc", 0444, NULL,
92 proc_read_smtc, NULL);
93}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
new file mode 100644
index 000000000000..2e8e52c135e6
--- /dev/null
+++ b/arch/mips/kernel/smtc.c
@@ -0,0 +1,1322 @@
1/* Copyright (C) 2004 Mips Technologies, Inc */
2
3#include <linux/kernel.h>
4#include <linux/sched.h>
5#include <linux/cpumask.h>
6#include <linux/interrupt.h>
7
8#include <asm/cpu.h>
9#include <asm/processor.h>
10#include <asm/atomic.h>
11#include <asm/system.h>
12#include <asm/hardirq.h>
13#include <asm/hazards.h>
14#include <asm/mmu_context.h>
15#include <asm/smp.h>
16#include <asm/mipsregs.h>
17#include <asm/cacheflush.h>
18#include <asm/time.h>
19#include <asm/addrspace.h>
20#include <asm/smtc.h>
21#include <asm/smtc_ipi.h>
22#include <asm/smtc_proc.h>
23
24/*
25 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
26 */
27
28/*
29 * MIPSCPU_INT_BASE is identically defined in both
30 * asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h,
31 * but as yet there's no properly organized include structure that
32 * will ensure that the right *int.h file will be included for a
33 * given platform build.
34 */
35
36#define MIPSCPU_INT_BASE 16
37
38#define MIPS_CPU_IPI_IRQ 1
39
40#define LOCK_MT_PRA() \
41 local_irq_save(flags); \
42 mtflags = dmt()
43
44#define UNLOCK_MT_PRA() \
45 emt(mtflags); \
46 local_irq_restore(flags)
47
48#define LOCK_CORE_PRA() \
49 local_irq_save(flags); \
50 mtflags = dvpe()
51
52#define UNLOCK_CORE_PRA() \
53 evpe(mtflags); \
54 local_irq_restore(flags)
55
56/*
57 * Data structures purely associated with SMTC parallelism
58 */
59
60
61/*
62 * Table for tracking ASIDs whose lifetime is prolonged.
63 */
64
65asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
66
67/*
68 * Clock interrupt "latch" buffers, per "CPU"
69 */
70
71unsigned int ipi_timer_latch[NR_CPUS];
72
73/*
74 * Number of InterProcessor Interupt (IPI) message buffers to allocate
75 */
76
77#define IPIBUF_PER_CPU 4
78
79struct smtc_ipi_q IPIQ[NR_CPUS];
80struct smtc_ipi_q freeIPIq;
81
82
83/* Forward declarations */
84
85void ipi_decode(struct pt_regs *, struct smtc_ipi *);
86void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
87void setup_cross_vpe_interrupts(void);
88void init_smtc_stats(void);
89
90/* Global SMTC Status */
91
92unsigned int smtc_status = 0;
93
94/* Boot command line configuration overrides */
95
96static int vpelimit = 0;
97static int tclimit = 0;
98static int ipibuffers = 0;
99static int nostlb = 0;
100static int asidmask = 0;
101unsigned long smtc_asid_mask = 0xff;
102
103static int __init maxvpes(char *str)
104{
105 get_option(&str, &vpelimit);
106 return 1;
107}
108
109static int __init maxtcs(char *str)
110{
111 get_option(&str, &tclimit);
112 return 1;
113}
114
115static int __init ipibufs(char *str)
116{
117 get_option(&str, &ipibuffers);
118 return 1;
119}
120
121static int __init stlb_disable(char *s)
122{
123 nostlb = 1;
124 return 1;
125}
126
127static int __init asidmask_set(char *str)
128{
129 get_option(&str, &asidmask);
130 switch(asidmask) {
131 case 0x1:
132 case 0x3:
133 case 0x7:
134 case 0xf:
135 case 0x1f:
136 case 0x3f:
137 case 0x7f:
138 case 0xff:
139 smtc_asid_mask = (unsigned long)asidmask;
140 break;
141 default:
142 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
143 }
144 return 1;
145}
146
147__setup("maxvpes=", maxvpes);
148__setup("maxtcs=", maxtcs);
149__setup("ipibufs=", ipibufs);
150__setup("nostlb", stlb_disable);
151__setup("asidmask=", asidmask_set);
152
153/* Enable additional debug checks before going into CPU idle loop */
154#define SMTC_IDLE_HOOK_DEBUG
155
156#ifdef SMTC_IDLE_HOOK_DEBUG
157
158static int hang_trig = 0;
159
160static int __init hangtrig_enable(char *s)
161{
162 hang_trig = 1;
163 return 1;
164}
165
166
167__setup("hangtrig", hangtrig_enable);
168
169#define DEFAULT_BLOCKED_IPI_LIMIT 32
170
171static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
172
173static int __init tintq(char *str)
174{
175 get_option(&str, &timerq_limit);
176 return 1;
177}
178
179__setup("tintq=", tintq);
180
181int imstuckcount[2][8];
182/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
183int vpemask[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}};
184int tcnoprog[NR_CPUS];
185static atomic_t idle_hook_initialized = {0};
186static int clock_hang_reported[NR_CPUS];
187
188#endif /* SMTC_IDLE_HOOK_DEBUG */
189
190/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
191
192void __init sanitize_tlb_entries(void)
193{
194 printk("Deprecated sanitize_tlb_entries() invoked\n");
195}
196
197
198/*
199 * Configure shared TLB - VPC configuration bit must be set by caller
200 */
201
202void smtc_configure_tlb(void)
203{
204 int i,tlbsiz,vpes;
205 unsigned long mvpconf0;
206 unsigned long config1val;
207
208 /* Set up ASID preservation table */
209 for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
210 for(i = 0; i < MAX_SMTC_ASIDS; i++) {
211 smtc_live_asid[vpes][i] = 0;
212 }
213 }
214 mvpconf0 = read_c0_mvpconf0();
215
216 if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
217 >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
218 /* If we have multiple VPEs, try to share the TLB */
219 if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
220 /*
221 * If TLB sizing is programmable, shared TLB
222 * size is the total available complement.
223 * Otherwise, we have to take the sum of all
224 * static VPE TLB entries.
225 */
226 if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
227 >> MVPCONF0_PTLBE_SHIFT)) == 0) {
228 /*
229 * If there's more than one VPE, there had better
230 * be more than one TC, because we need one to bind
231 * to each VPE in turn to be able to read
232 * its configuration state!
233 */
234 settc(1);
235 /* Stop the TC from doing anything foolish */
236 write_tc_c0_tchalt(TCHALT_H);
237 mips_ihb();
238 /* No need to un-Halt - that happens later anyway */
239 for (i=0; i < vpes; i++) {
240 write_tc_c0_tcbind(i);
241 /*
242 * To be 100% sure we're really getting the right
243 * information, we exit the configuration state
244 * and do an IHB after each rebinding.
245 */
246 write_c0_mvpcontrol(
247 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
248 mips_ihb();
249 /*
250 * Only count if the MMU Type indicated is TLB
251 */
252 if(((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
253 config1val = read_vpe_c0_config1();
254 tlbsiz += ((config1val >> 25) & 0x3f) + 1;
255 }
256
257 /* Put core back in configuration state */
258 write_c0_mvpcontrol(
259 read_c0_mvpcontrol() | MVPCONTROL_VPC );
260 mips_ihb();
261 }
262 }
263 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
264
265 /*
266 * Setup kernel data structures to use software total,
267 * rather than read the per-VPE Config1 value. The values
268 * for "CPU 0" gets copied to all the other CPUs as part
269 * of their initialization in smtc_cpu_setup().
270 */
271
272 tlbsiz = tlbsiz & 0x3f; /* MIPS32 limits TLB indices to 64 */
273 cpu_data[0].tlbsize = tlbsiz;
274 smtc_status |= SMTC_TLB_SHARED;
275
276 printk("TLB of %d entry pairs shared by %d VPEs\n",
277 tlbsiz, vpes);
278 } else {
279 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
280 }
281 }
282}
283
284
285/*
286 * Incrementally build the CPU map out of constituent MIPS MT cores,
287 * using the specified available VPEs and TCs. Plaform code needs
288 * to ensure that each MIPS MT core invokes this routine on reset,
289 * one at a time(!).
290 *
291 * This version of the build_cpu_map and prepare_cpus routines assumes
292 * that *all* TCs of a MIPS MT core will be used for Linux, and that
293 * they will be spread across *all* available VPEs (to minimise the
294 * loss of efficiency due to exception service serialization).
295 * An improved version would pick up configuration information and
296 * possibly leave some TCs/VPEs as "slave" processors.
297 *
298 * Use c0_MVPConf0 to find out how many TCs are available, setting up
299 * phys_cpu_present_map and the logical/physical mappings.
300 */
301
302int __init mipsmt_build_cpu_map(int start_cpu_slot)
303{
304 int i, ntcs;
305
306 /*
307 * The CPU map isn't actually used for anything at this point,
308 * so it's not clear what else we should do apart from set
309 * everything up so that "logical" = "physical".
310 */
311 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
312 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
313 cpu_set(i, phys_cpu_present_map);
314 __cpu_number_map[i] = i;
315 __cpu_logical_map[i] = i;
316 }
317 /* Initialize map of CPUs with FPUs */
318 cpus_clear(mt_fpu_cpumask);
319
320 /* One of those TC's is the one booting, and not a secondary... */
321 printk("%i available secondary CPU TC(s)\n", i - 1);
322
323 return i;
324}
325
326/*
327 * Common setup before any secondaries are started
328 * Make sure all CPU's are in a sensible state before we boot any of the
329 * secondaries.
330 *
331 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
332 * as possible across the available VPEs.
333 */
334
335static void smtc_tc_setup(int vpe, int tc, int cpu)
336{
337 settc(tc);
338 write_tc_c0_tchalt(TCHALT_H);
339 mips_ihb();
340 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
341 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
342 | TCSTATUS_A);
343 write_tc_c0_tccontext(0);
344 /* Bind tc to vpe */
345 write_tc_c0_tcbind(vpe);
346 /* In general, all TCs should have the same cpu_data indications */
347 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
348 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
349 if (cpu_data[0].cputype == CPU_34K)
350 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
351 cpu_data[cpu].vpe_id = vpe;
352 cpu_data[cpu].tc_id = tc;
353}
354
355
356void mipsmt_prepare_cpus(void)
357{
358 int i, vpe, tc, ntc, nvpe, tcpervpe, slop, cpu;
359 unsigned long flags;
360 unsigned long val;
361 int nipi;
362 struct smtc_ipi *pipi;
363
364 /* disable interrupts so we can disable MT */
365 local_irq_save(flags);
366 /* disable MT so we can configure */
367 dvpe();
368 dmt();
369
370 freeIPIq.lock = SPIN_LOCK_UNLOCKED;
371
372 /*
373 * We probably don't have as many VPEs as we do SMP "CPUs",
374 * but it's possible - and in any case we'll never use more!
375 */
376 for (i=0; i<NR_CPUS; i++) {
377 IPIQ[i].head = IPIQ[i].tail = NULL;
378 IPIQ[i].lock = SPIN_LOCK_UNLOCKED;
379 IPIQ[i].depth = 0;
380 ipi_timer_latch[i] = 0;
381 }
382
383 /* cpu_data index starts at zero */
384 cpu = 0;
385 cpu_data[cpu].vpe_id = 0;
386 cpu_data[cpu].tc_id = 0;
387 cpu++;
388
389 /* Report on boot-time options */
390 mips_mt_set_cpuoptions ();
391 if (vpelimit > 0)
392 printk("Limit of %d VPEs set\n", vpelimit);
393 if (tclimit > 0)
394 printk("Limit of %d TCs set\n", tclimit);
395 if (nostlb) {
396 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
397 }
398 if (asidmask)
399 printk("ASID mask value override to 0x%x\n", asidmask);
400
401 /* Temporary */
402#ifdef SMTC_IDLE_HOOK_DEBUG
403 if (hang_trig)
404 printk("Logic Analyser Trigger on suspected TC hang\n");
405#endif /* SMTC_IDLE_HOOK_DEBUG */
406
407 /* Put MVPE's into 'configuration state' */
408 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
409
410 val = read_c0_mvpconf0();
411 nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
412 if (vpelimit > 0 && nvpe > vpelimit)
413 nvpe = vpelimit;
414 ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
415 if (ntc > NR_CPUS)
416 ntc = NR_CPUS;
417 if (tclimit > 0 && ntc > tclimit)
418 ntc = tclimit;
419 tcpervpe = ntc / nvpe;
420 slop = ntc % nvpe; /* Residual TCs, < NVPE */
421
422 /* Set up shared TLB */
423 smtc_configure_tlb();
424
425 for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
426 /*
427 * Set the MVP bits.
428 */
429 settc(tc);
430 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
431 if (vpe != 0)
432 printk(", ");
433 printk("VPE %d: TC", vpe);
434 for (i = 0; i < tcpervpe; i++) {
435 /*
436 * TC 0 is bound to VPE 0 at reset,
437 * and is presumably executing this
438 * code. Leave it alone!
439 */
440 if (tc != 0) {
441 smtc_tc_setup(vpe,tc, cpu);
442 cpu++;
443 }
444 printk(" %d", tc);
445 tc++;
446 }
447 if (slop) {
448 if (tc != 0) {
449 smtc_tc_setup(vpe,tc, cpu);
450 cpu++;
451 }
452 printk(" %d", tc);
453 tc++;
454 slop--;
455 }
456 if (vpe != 0) {
457 /*
458 * Clear any stale software interrupts from VPE's Cause
459 */
460 write_vpe_c0_cause(0);
461
462 /*
463 * Clear ERL/EXL of VPEs other than 0
464 * and set restricted interrupt enable/mask.
465 */
466 write_vpe_c0_status((read_vpe_c0_status()
467 & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
468 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
469 | ST0_IE));
470 /*
471 * set config to be the same as vpe0,
472 * particularly kseg0 coherency alg
473 */
474 write_vpe_c0_config(read_c0_config());
475 /* Clear any pending timer interrupt */
476 write_vpe_c0_compare(0);
477 /* Propagate Config7 */
478 write_vpe_c0_config7(read_c0_config7());
479 }
480 /* enable multi-threading within VPE */
481 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
482 /* enable the VPE */
483 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
484 }
485
486 /*
487 * Pull any physically present but unused TCs out of circulation.
488 */
489 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
490 cpu_clear(tc, phys_cpu_present_map);
491 cpu_clear(tc, cpu_present_map);
492 tc++;
493 }
494
495 /* release config state */
496 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
497
498 printk("\n");
499
500 /* Set up coprocessor affinity CPU mask(s) */
501
502 for (tc = 0; tc < ntc; tc++) {
503 if(cpu_data[tc].options & MIPS_CPU_FPU)
504 cpu_set(tc, mt_fpu_cpumask);
505 }
506
507 /* set up ipi interrupts... */
508
509 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
510
511 if (nvpe > 1)
512 setup_cross_vpe_interrupts();
513
514 /* Set up queue of free IPI "messages". */
515 nipi = NR_CPUS * IPIBUF_PER_CPU;
516 if (ipibuffers > 0)
517 nipi = ipibuffers;
518
519 pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
520 if (pipi == NULL)
521 panic("kmalloc of IPI message buffers failed\n");
522 else
523 printk("IPI buffer pool of %d buffers\n", nipi);
524 for (i = 0; i < nipi; i++) {
525 smtc_ipi_nq(&freeIPIq, pipi);
526 pipi++;
527 }
528
529 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
530 emt(EMT_ENABLE);
531 evpe(EVPE_ENABLE);
532 local_irq_restore(flags);
533 /* Initialize SMTC /proc statistics/diagnostics */
534 init_smtc_stats();
535}
536
537
538/*
539 * Setup the PC, SP, and GP of a secondary processor and start it
540 * running!
541 * smp_bootstrap is the place to resume from
542 * __KSTK_TOS(idle) is apparently the stack pointer
543 * (unsigned long)idle->thread_info the gp
544 *
545 */
546void smtc_boot_secondary(int cpu, struct task_struct *idle)
547{
548 extern u32 kernelsp[NR_CPUS];
549 long flags;
550 int mtflags;
551
552 LOCK_MT_PRA();
553 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
554 dvpe();
555 }
556 settc(cpu_data[cpu].tc_id);
557
558 /* pc */
559 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
560
561 /* stack pointer */
562 kernelsp[cpu] = __KSTK_TOS(idle);
563 write_tc_gpr_sp(__KSTK_TOS(idle));
564
565 /* global pointer */
566 write_tc_gpr_gp((unsigned long)idle->thread_info);
567
568 smtc_status |= SMTC_MTC_ACTIVE;
569 write_tc_c0_tchalt(0);
570 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
571 evpe(EVPE_ENABLE);
572 }
573 UNLOCK_MT_PRA();
574}
575
576void smtc_init_secondary(void)
577{
578 /*
579 * Start timer on secondary VPEs if necessary.
580 * mips_timer_setup should already have been invoked by init/main
581 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
582 * SMTC init code assigns TCs consdecutively and in ascending order
583 * to across available VPEs.
584 */
585 if(((read_c0_tcbind() & TCBIND_CURTC) != 0)
586 && ((read_c0_tcbind() & TCBIND_CURVPE)
587 != cpu_data[smp_processor_id() - 1].vpe_id)){
588 write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
589 }
590
591 local_irq_enable();
592}
593
594void smtc_smp_finish(void)
595{
596 printk("TC %d going on-line as CPU %d\n",
597 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
598}
599
600void smtc_cpus_done(void)
601{
602}
603
604/*
605 * Support for SMTC-optimized driver IRQ registration
606 */
607
608/*
609 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
610 * in do_IRQ. These are passed in setup_irq_smtc() and stored
611 * in this table.
612 */
613
614int setup_irq_smtc(unsigned int irq, struct irqaction * new,
615 unsigned long hwmask)
616{
617 irq_hwmask[irq] = hwmask;
618
619 return setup_irq(irq, new);
620}
621
622/*
623 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
624 * Within a VPE one TC can interrupt another by different approaches.
625 * The easiest to get right would probably be to make all TCs except
626 * the target IXMT and set a software interrupt, but an IXMT-based
627 * scheme requires that a handler must run before a new IPI could
628 * be sent, which would break the "broadcast" loops in MIPS MT.
629 * A more gonzo approach within a VPE is to halt the TC, extract
630 * its Restart, Status, and a couple of GPRs, and program the Restart
631 * address to emulate an interrupt.
632 *
633 * Within a VPE, one can be confident that the target TC isn't in
634 * a critical EXL state when halted, since the write to the Halt
635 * register could not have issued on the writing thread if the
636 * halting thread had EXL set. So k0 and k1 of the target TC
637 * can be used by the injection code. Across VPEs, one can't
638 * be certain that the target TC isn't in a critical exception
639 * state. So we try a two-step process of sending a software
640 * interrupt to the target VPE, which either handles the event
641 * itself (if it was the target) or injects the event within
642 * the VPE.
643 */
644
645void smtc_ipi_qdump(void)
646{
647 int i;
648
649 for (i = 0; i < NR_CPUS ;i++) {
650 printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
651 i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
652 IPIQ[i].depth);
653 }
654}
655
656/*
657 * The standard atomic.h primitives don't quite do what we want
658 * here: We need an atomic add-and-return-previous-value (which
659 * could be done with atomic_add_return and a decrement) and an
660 * atomic set/zero-and-return-previous-value (which can't really
661 * be done with the atomic.h primitives). And since this is
662 * MIPS MT, we can assume that we have LL/SC.
663 */
664static __inline__ int atomic_postincrement(unsigned int *pv)
665{
666 unsigned long result;
667
668 unsigned long temp;
669
670 __asm__ __volatile__(
671 "1: ll %0, %2 \n"
672 " addu %1, %0, 1 \n"
673 " sc %1, %2 \n"
674 " beqz %1, 1b \n"
675 " sync \n"
676 : "=&r" (result), "=&r" (temp), "=m" (*pv)
677 : "m" (*pv)
678 : "memory");
679
680 return result;
681}
682
683/* No longer used in IPI dispatch, but retained for future recycling */
684
685static __inline__ int atomic_postclear(unsigned int *pv)
686{
687 unsigned long result;
688
689 unsigned long temp;
690
691 __asm__ __volatile__(
692 "1: ll %0, %2 \n"
693 " or %1, $0, $0 \n"
694 " sc %1, %2 \n"
695 " beqz %1, 1b \n"
696 " sync \n"
697 : "=&r" (result), "=&r" (temp), "=m" (*pv)
698 : "m" (*pv)
699 : "memory");
700
701 return result;
702}
703
704
705void smtc_send_ipi(int cpu, int type, unsigned int action)
706{
707 int tcstatus;
708 struct smtc_ipi *pipi;
709 long flags;
710 int mtflags;
711
712 if (cpu == smp_processor_id()) {
713 printk("Cannot Send IPI to self!\n");
714 return;
715 }
716 /* Set up a descriptor, to be delivered either promptly or queued */
717 pipi = smtc_ipi_dq(&freeIPIq);
718 if (pipi == NULL) {
719 bust_spinlocks(1);
720 mips_mt_regdump(dvpe());
721 panic("IPI Msg. Buffers Depleted\n");
722 }
723 pipi->type = type;
724 pipi->arg = (void *)action;
725 pipi->dest = cpu;
726 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
727 /* If not on same VPE, enqueue and send cross-VPE interupt */
728 smtc_ipi_nq(&IPIQ[cpu], pipi);
729 LOCK_CORE_PRA();
730 settc(cpu_data[cpu].tc_id);
731 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
732 UNLOCK_CORE_PRA();
733 } else {
734 /*
735 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
736 * since ASID shootdown on the other VPE may
737 * collide with this operation.
738 */
739 LOCK_CORE_PRA();
740 settc(cpu_data[cpu].tc_id);
741 /* Halt the targeted TC */
742 write_tc_c0_tchalt(TCHALT_H);
743 mips_ihb();
744
745 /*
746 * Inspect TCStatus - if IXMT is set, we have to queue
747 * a message. Otherwise, we set up the "interrupt"
748 * of the other TC
749 */
750 tcstatus = read_tc_c0_tcstatus();
751
752 if ((tcstatus & TCSTATUS_IXMT) != 0) {
753 /*
754 * Spin-waiting here can deadlock,
755 * so we queue the message for the target TC.
756 */
757 write_tc_c0_tchalt(0);
758 UNLOCK_CORE_PRA();
759 /* Try to reduce redundant timer interrupt messages */
760 if(type == SMTC_CLOCK_TICK) {
761 if(atomic_postincrement(&ipi_timer_latch[cpu])!=0) {
762 smtc_ipi_nq(&freeIPIq, pipi);
763 return;
764 }
765 }
766 smtc_ipi_nq(&IPIQ[cpu], pipi);
767 } else {
768 post_direct_ipi(cpu, pipi);
769 write_tc_c0_tchalt(0);
770 UNLOCK_CORE_PRA();
771 }
772 }
773}
774
775/*
776 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
777 */
778void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
779{
780 struct pt_regs *kstack;
781 unsigned long tcstatus;
782 unsigned long tcrestart;
783 extern u32 kernelsp[NR_CPUS];
784 extern void __smtc_ipi_vector(void);
785
786 /* Extract Status, EPC from halted TC */
787 tcstatus = read_tc_c0_tcstatus();
788 tcrestart = read_tc_c0_tcrestart();
789 /* If TCRestart indicates a WAIT instruction, advance the PC */
790 if ((tcrestart & 0x80000000)
791 && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
792 tcrestart += 4;
793 }
794 /*
795 * Save on TC's future kernel stack
796 *
797 * CU bit of Status is indicator that TC was
798 * already running on a kernel stack...
799 */
800 if(tcstatus & ST0_CU0) {
801 /* Note that this "- 1" is pointer arithmetic */
802 kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
803 } else {
804 kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
805 }
806
807 kstack->cp0_epc = (long)tcrestart;
808 /* Save TCStatus */
809 kstack->cp0_tcstatus = tcstatus;
810 /* Pass token of operation to be performed kernel stack pad area */
811 kstack->pad0[4] = (unsigned long)pipi;
812 /* Pass address of function to be called likewise */
813 kstack->pad0[5] = (unsigned long)&ipi_decode;
814 /* Set interrupt exempt and kernel mode */
815 tcstatus |= TCSTATUS_IXMT;
816 tcstatus &= ~TCSTATUS_TKSU;
817 write_tc_c0_tcstatus(tcstatus);
818 ehb();
819 /* Set TC Restart address to be SMTC IPI vector */
820 write_tc_c0_tcrestart(__smtc_ipi_vector);
821}
822
823void ipi_resched_interrupt(struct pt_regs *regs)
824{
825 /* Return from interrupt should be enough to cause scheduler check */
826}
827
828
829void ipi_call_interrupt(struct pt_regs *regs)
830{
831 /* Invoke generic function invocation code in smp.c */
832 smp_call_function_interrupt();
833}
834
835void ipi_decode(struct pt_regs *regs, struct smtc_ipi *pipi)
836{
837 void *arg_copy = pipi->arg;
838 int type_copy = pipi->type;
839 int dest_copy = pipi->dest;
840
841 smtc_ipi_nq(&freeIPIq, pipi);
842 switch (type_copy) {
843 case SMTC_CLOCK_TICK:
844 /* Invoke Clock "Interrupt" */
845 ipi_timer_latch[dest_copy] = 0;
846#ifdef SMTC_IDLE_HOOK_DEBUG
847 clock_hang_reported[dest_copy] = 0;
848#endif /* SMTC_IDLE_HOOK_DEBUG */
849 local_timer_interrupt(0, NULL, regs);
850 break;
851 case LINUX_SMP_IPI:
852 switch ((int)arg_copy) {
853 case SMP_RESCHEDULE_YOURSELF:
854 ipi_resched_interrupt(regs);
855 break;
856 case SMP_CALL_FUNCTION:
857 ipi_call_interrupt(regs);
858 break;
859 default:
860 printk("Impossible SMTC IPI Argument 0x%x\n",
861 (int)arg_copy);
862 break;
863 }
864 break;
865 default:
866 printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
867 break;
868 }
869}
870
871void deferred_smtc_ipi(struct pt_regs *regs)
872{
873 struct smtc_ipi *pipi;
874 unsigned long flags;
875/* DEBUG */
876 int q = smp_processor_id();
877
878 /*
879 * Test is not atomic, but much faster than a dequeue,
880 * and the vast majority of invocations will have a null queue.
881 */
882 if(IPIQ[q].head != NULL) {
883 while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
884 /* ipi_decode() should be called with interrupts off */
885 local_irq_save(flags);
886 ipi_decode(regs, pipi);
887 local_irq_restore(flags);
888 }
889 }
890}
891
892/*
893 * Send clock tick to all TCs except the one executing the funtion
894 */
895
896void smtc_timer_broadcast(int vpe)
897{
898 int cpu;
899 int myTC = cpu_data[smp_processor_id()].tc_id;
900 int myVPE = cpu_data[smp_processor_id()].vpe_id;
901
902 smtc_cpu_stats[smp_processor_id()].timerints++;
903
904 for_each_online_cpu(cpu) {
905 if (cpu_data[cpu].vpe_id == myVPE &&
906 cpu_data[cpu].tc_id != myTC)
907 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
908 }
909}
910
911/*
912 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
913 * set via cross-VPE MTTR manipulation of the Cause register. It would be
914 * in some regards preferable to have external logic for "doorbell" hardware
915 * interrupts.
916 */
917
918static int cpu_ipi_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_IRQ;
919
920static irqreturn_t ipi_interrupt(int irq, void *dev_idm, struct pt_regs *regs)
921{
922 int my_vpe = cpu_data[smp_processor_id()].vpe_id;
923 int my_tc = cpu_data[smp_processor_id()].tc_id;
924 int cpu;
925 struct smtc_ipi *pipi;
926 unsigned long tcstatus;
927 int sent;
928 long flags;
929 unsigned int mtflags;
930 unsigned int vpflags;
931
932 /*
933 * So long as cross-VPE interrupts are done via
934 * MFTR/MTTR read-modify-writes of Cause, we need
935 * to stop other VPEs whenever the local VPE does
936 * anything similar.
937 */
938 local_irq_save(flags);
939 vpflags = dvpe();
940 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
941 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
942 irq_enable_hazard();
943 evpe(vpflags);
944 local_irq_restore(flags);
945
946 /*
947 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
948 * queued for TCs on this VPE other than the current one.
949 * Return-from-interrupt should cause us to drain the queue
950 * for the current TC, so we ought not to have to do it explicitly here.
951 */
952
953 for_each_online_cpu(cpu) {
954 if (cpu_data[cpu].vpe_id != my_vpe)
955 continue;
956
957 pipi = smtc_ipi_dq(&IPIQ[cpu]);
958 if (pipi != NULL) {
959 if (cpu_data[cpu].tc_id != my_tc) {
960 sent = 0;
961 LOCK_MT_PRA();
962 settc(cpu_data[cpu].tc_id);
963 write_tc_c0_tchalt(TCHALT_H);
964 mips_ihb();
965 tcstatus = read_tc_c0_tcstatus();
966 if ((tcstatus & TCSTATUS_IXMT) == 0) {
967 post_direct_ipi(cpu, pipi);
968 sent = 1;
969 }
970 write_tc_c0_tchalt(0);
971 UNLOCK_MT_PRA();
972 if (!sent) {
973 smtc_ipi_req(&IPIQ[cpu], pipi);
974 }
975 } else {
976 /*
977 * ipi_decode() should be called
978 * with interrupts off
979 */
980 local_irq_save(flags);
981 ipi_decode(regs, pipi);
982 local_irq_restore(flags);
983 }
984 }
985 }
986
987 return IRQ_HANDLED;
988}
989
990static void ipi_irq_dispatch(struct pt_regs *regs)
991{
992 do_IRQ(cpu_ipi_irq, regs);
993}
994
995static struct irqaction irq_ipi;
996
997void setup_cross_vpe_interrupts(void)
998{
999 if (!cpu_has_vint)
1000 panic("SMTC Kernel requires Vectored Interupt support");
1001
1002 set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
1003
1004 irq_ipi.handler = ipi_interrupt;
1005 irq_ipi.flags = SA_INTERRUPT;
1006 irq_ipi.name = "SMTC_IPI";
1007
1008 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1009
1010 irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU;
1011}
1012
1013/*
1014 * SMTC-specific hacks invoked from elsewhere in the kernel.
1015 */
1016
1017void smtc_idle_loop_hook(void)
1018{
1019#ifdef SMTC_IDLE_HOOK_DEBUG
1020 int im;
1021 int flags;
1022 int mtflags;
1023 int bit;
1024 int vpe;
1025 int tc;
1026 int hook_ntcs;
1027 /*
1028 * printk within DMT-protected regions can deadlock,
1029 * so buffer diagnostic messages for later output.
1030 */
1031 char *pdb_msg;
1032 char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
1033
1034 if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
1035 if (atomic_add_return(1, &idle_hook_initialized) == 1) {
1036 int mvpconf0;
1037 /* Tedious stuff to just do once */
1038 mvpconf0 = read_c0_mvpconf0();
1039 hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
1040 if (hook_ntcs > NR_CPUS)
1041 hook_ntcs = NR_CPUS;
1042 for (tc = 0; tc < hook_ntcs; tc++) {
1043 tcnoprog[tc] = 0;
1044 clock_hang_reported[tc] = 0;
1045 }
1046 for (vpe = 0; vpe < 2; vpe++)
1047 for (im = 0; im < 8; im++)
1048 imstuckcount[vpe][im] = 0;
1049 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
1050 atomic_set(&idle_hook_initialized, 1000);
1051 } else {
1052 /* Someone else is initializing in parallel - let 'em finish */
1053 while (atomic_read(&idle_hook_initialized) < 1000)
1054 ;
1055 }
1056 }
1057
1058 /* Have we stupidly left IXMT set somewhere? */
1059 if (read_c0_tcstatus() & 0x400) {
1060 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1061 ehb();
1062 printk("Dangling IXMT in cpu_idle()\n");
1063 }
1064
1065 /* Have we stupidly left an IM bit turned off? */
1066#define IM_LIMIT 2000
1067 local_irq_save(flags);
1068 mtflags = dmt();
1069 pdb_msg = &id_ho_db_msg[0];
1070 im = read_c0_status();
1071 vpe = cpu_data[smp_processor_id()].vpe_id;
1072 for (bit = 0; bit < 8; bit++) {
1073 /*
1074 * In current prototype, I/O interrupts
1075 * are masked for VPE > 0
1076 */
1077 if (vpemask[vpe][bit]) {
1078 if (!(im & (0x100 << bit)))
1079 imstuckcount[vpe][bit]++;
1080 else
1081 imstuckcount[vpe][bit] = 0;
1082 if (imstuckcount[vpe][bit] > IM_LIMIT) {
1083 set_c0_status(0x100 << bit);
1084 ehb();
1085 imstuckcount[vpe][bit] = 0;
1086 pdb_msg += sprintf(pdb_msg,
1087 "Dangling IM %d fixed for VPE %d\n", bit,
1088 vpe);
1089 }
1090 }
1091 }
1092
1093 /*
1094 * Now that we limit outstanding timer IPIs, check for hung TC
1095 */
1096 for (tc = 0; tc < NR_CPUS; tc++) {
1097 /* Don't check ourself - we'll dequeue IPIs just below */
1098 if ((tc != smp_processor_id()) &&
1099 ipi_timer_latch[tc] > timerq_limit) {
1100 if (clock_hang_reported[tc] == 0) {
1101 pdb_msg += sprintf(pdb_msg,
1102 "TC %d looks hung with timer latch at %d\n",
1103 tc, ipi_timer_latch[tc]);
1104 clock_hang_reported[tc]++;
1105 }
1106 }
1107 }
1108 emt(mtflags);
1109 local_irq_restore(flags);
1110 if (pdb_msg != &id_ho_db_msg[0])
1111 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
1112#endif /* SMTC_IDLE_HOOK_DEBUG */
1113 /*
1114 * To the extent that we've ever turned interrupts off,
1115 * we may have accumulated deferred IPIs. This is subtle.
1116 * If we use the smtc_ipi_qdepth() macro, we'll get an
1117 * exact number - but we'll also disable interrupts
1118 * and create a window of failure where a new IPI gets
1119 * queued after we test the depth but before we re-enable
1120 * interrupts. So long as IXMT never gets set, however,
1121 * we should be OK: If we pick up something and dispatch
1122 * it here, that's great. If we see nothing, but concurrent
1123 * with this operation, another TC sends us an IPI, IXMT
1124 * is clear, and we'll handle it as a real pseudo-interrupt
1125 * and not a pseudo-pseudo interrupt.
1126 */
1127 if (IPIQ[smp_processor_id()].depth > 0) {
1128 struct smtc_ipi *pipi;
1129 extern void self_ipi(struct smtc_ipi *);
1130
1131 if ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()])) != NULL) {
1132 self_ipi(pipi);
1133 smtc_cpu_stats[smp_processor_id()].selfipis++;
1134 }
1135 }
1136}
1137
1138void smtc_soft_dump(void)
1139{
1140 int i;
1141
1142 printk("Counter Interrupts taken per CPU (TC)\n");
1143 for (i=0; i < NR_CPUS; i++) {
1144 printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
1145 }
1146 printk("Self-IPI invocations:\n");
1147 for (i=0; i < NR_CPUS; i++) {
1148 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1149 }
1150 smtc_ipi_qdump();
1151 printk("Timer IPI Backlogs:\n");
1152 for (i=0; i < NR_CPUS; i++) {
1153 printk("%d: %d\n", i, ipi_timer_latch[i]);
1154 }
1155 printk("%d Recoveries of \"stolen\" FPU\n",
1156 atomic_read(&smtc_fpu_recoveries));
1157}
1158
1159
1160/*
1161 * TLB management routines special to SMTC
1162 */
1163
1164void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1165{
1166 unsigned long flags, mtflags, tcstat, prevhalt, asid;
1167 int tlb, i;
1168
1169 /*
1170 * It would be nice to be able to use a spinlock here,
1171 * but this is invoked from within TLB flush routines
1172 * that protect themselves with DVPE, so if a lock is
1173 * held by another TC, it'll never be freed.
1174 *
1175 * DVPE/DMT must not be done with interrupts enabled,
1176 * so even so most callers will already have disabled
1177 * them, let's be really careful...
1178 */
1179
1180 local_irq_save(flags);
1181 if (smtc_status & SMTC_TLB_SHARED) {
1182 mtflags = dvpe();
1183 tlb = 0;
1184 } else {
1185 mtflags = dmt();
1186 tlb = cpu_data[cpu].vpe_id;
1187 }
1188 asid = asid_cache(cpu);
1189
1190 do {
1191 if (!((asid += ASID_INC) & ASID_MASK) ) {
1192 if (cpu_has_vtag_icache)
1193 flush_icache_all();
1194 /* Traverse all online CPUs (hack requires contigous range) */
1195 for (i = 0; i < num_online_cpus(); i++) {
1196 /*
1197 * We don't need to worry about our own CPU, nor those of
1198 * CPUs who don't share our TLB.
1199 */
1200 if ((i != smp_processor_id()) &&
1201 ((smtc_status & SMTC_TLB_SHARED) ||
1202 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
1203 settc(cpu_data[i].tc_id);
1204 prevhalt = read_tc_c0_tchalt() & TCHALT_H;
1205 if (!prevhalt) {
1206 write_tc_c0_tchalt(TCHALT_H);
1207 mips_ihb();
1208 }
1209 tcstat = read_tc_c0_tcstatus();
1210 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1211 if (!prevhalt)
1212 write_tc_c0_tchalt(0);
1213 }
1214 }
1215 if (!asid) /* fix version if needed */
1216 asid = ASID_FIRST_VERSION;
1217 local_flush_tlb_all(); /* start new asid cycle */
1218 }
1219 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1220
1221 /*
1222 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1223 */
1224 for (i = 0; i < num_online_cpus(); i++) {
1225 if ((smtc_status & SMTC_TLB_SHARED) ||
1226 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1227 cpu_context(i, mm) = asid_cache(i) = asid;
1228 }
1229
1230 if (smtc_status & SMTC_TLB_SHARED)
1231 evpe(mtflags);
1232 else
1233 emt(mtflags);
1234 local_irq_restore(flags);
1235}
1236
1237/*
1238 * Invoked from macros defined in mmu_context.h
1239 * which must already have disabled interrupts
1240 * and done a DVPE or DMT as appropriate.
1241 */
1242
1243void smtc_flush_tlb_asid(unsigned long asid)
1244{
1245 int entry;
1246 unsigned long ehi;
1247
1248 entry = read_c0_wired();
1249
1250 /* Traverse all non-wired entries */
1251 while (entry < current_cpu_data.tlbsize) {
1252 write_c0_index(entry);
1253 ehb();
1254 tlb_read();
1255 ehb();
1256 ehi = read_c0_entryhi();
1257 if((ehi & ASID_MASK) == asid) {
1258 /*
1259 * Invalidate only entries with specified ASID,
1260 * makiing sure all entries differ.
1261 */
1262 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
1263 write_c0_entrylo0(0);
1264 write_c0_entrylo1(0);
1265 mtc0_tlbw_hazard();
1266 tlb_write_indexed();
1267 }
1268 entry++;
1269 }
1270 write_c0_index(PARKED_INDEX);
1271 tlbw_use_hazard();
1272}
1273
1274/*
1275 * Support for single-threading cache flush operations.
1276 */
1277
1278int halt_state_save[NR_CPUS];
1279
1280/*
1281 * To really, really be sure that nothing is being done
1282 * by other TCs, halt them all. This code assumes that
1283 * a DVPE has already been done, so while their Halted
1284 * state is theoretically architecturally unstable, in
1285 * practice, it's not going to change while we're looking
1286 * at it.
1287 */
1288
1289void smtc_cflush_lockdown(void)
1290{
1291 int cpu;
1292
1293 for_each_online_cpu(cpu) {
1294 if (cpu != smp_processor_id()) {
1295 settc(cpu_data[cpu].tc_id);
1296 halt_state_save[cpu] = read_tc_c0_tchalt();
1297 write_tc_c0_tchalt(TCHALT_H);
1298 }
1299 }
1300 mips_ihb();
1301}
1302
1303/* It would be cheating to change the cpu_online states during a flush! */
1304
1305void smtc_cflush_release(void)
1306{
1307 int cpu;
1308
1309 /*
1310 * Start with a hazard barrier to ensure
1311 * that all CACHE ops have played through.
1312 */
1313 mips_ihb();
1314
1315 for_each_online_cpu(cpu) {
1316 if (cpu != smp_processor_id()) {
1317 settc(cpu_data[cpu].tc_id);
1318 write_tc_c0_tchalt(halt_state_save[cpu]);
1319 }
1320 }
1321 mips_ihb();
1322}
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 5e51a2d8f3f0..13ff4da598cd 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -116,8 +116,7 @@ static void c0_timer_ack(void)
116 write_c0_compare(expirelo); 116 write_c0_compare(expirelo);
117 117
118 /* Check to see if we have missed any timer interrupts. */ 118 /* Check to see if we have missed any timer interrupts. */
119 count = read_c0_count(); 119 while (((count = read_c0_count()) - expirelo) < 0x7fffffff) {
120 if ((count - expirelo) < 0x7fffffff) {
121 /* missed_timer_count++; */ 120 /* missed_timer_count++; */
122 expirelo = count + cycles_per_jiffy; 121 expirelo = count + cycles_per_jiffy;
123 write_c0_compare(expirelo); 122 write_c0_compare(expirelo);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 081e6ed5bb62..6336fe8008ec 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -280,9 +280,16 @@ static DEFINE_SPINLOCK(die_lock);
280NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) 280NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
281{ 281{
282 static int die_counter; 282 static int die_counter;
283#ifdef CONFIG_MIPS_MT_SMTC
284 unsigned long dvpret = dvpe();
285#endif /* CONFIG_MIPS_MT_SMTC */
283 286
284 console_verbose(); 287 console_verbose();
285 spin_lock_irq(&die_lock); 288 spin_lock_irq(&die_lock);
289 bust_spinlocks(1);
290#ifdef CONFIG_MIPS_MT_SMTC
291 mips_mt_regdump(dvpret);
292#endif /* CONFIG_MIPS_MT_SMTC */
286 printk("%s[#%d]:\n", str, ++die_counter); 293 printk("%s[#%d]:\n", str, ++die_counter);
287 show_registers(regs); 294 show_registers(regs);
288 spin_unlock_irq(&die_lock); 295 spin_unlock_irq(&die_lock);
@@ -757,6 +764,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
757 764
758 case 2: 765 case 2:
759 case 3: 766 case 3:
767 die_if_kernel("do_cpu invoked from kernel context!", regs);
760 break; 768 break;
761 } 769 }
762 770
@@ -794,6 +802,36 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
794 802
795asmlinkage void do_mt(struct pt_regs *regs) 803asmlinkage void do_mt(struct pt_regs *regs)
796{ 804{
805 int subcode;
806
807 die_if_kernel("MIPS MT Thread exception in kernel", regs);
808
809 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
810 >> VPECONTROL_EXCPT_SHIFT;
811 switch (subcode) {
812 case 0:
813 printk(KERN_ERR "Thread Underflow\n");
814 break;
815 case 1:
816 printk(KERN_ERR "Thread Overflow\n");
817 break;
818 case 2:
819 printk(KERN_ERR "Invalid YIELD Qualifier\n");
820 break;
821 case 3:
822 printk(KERN_ERR "Gating Storage Exception\n");
823 break;
824 case 4:
825 printk(KERN_ERR "YIELD Scheduler Exception\n");
826 break;
827 case 5:
828 printk(KERN_ERR "Gating Storage Schedulier Exception\n");
829 break;
830 default:
831 printk(KERN_ERR "*** UNKNOWN THREAD EXCEPTION %d ***\n",
832 subcode);
833 break;
834 }
797 die_if_kernel("MIPS MT Thread exception in kernel", regs); 835 die_if_kernel("MIPS MT Thread exception in kernel", regs);
798 836
799 force_sig(SIGILL, current); 837 force_sig(SIGILL, current);
@@ -929,7 +967,15 @@ void ejtag_exception_handler(struct pt_regs *regs)
929 */ 967 */
930void nmi_exception_handler(struct pt_regs *regs) 968void nmi_exception_handler(struct pt_regs *regs)
931{ 969{
970#ifdef CONFIG_MIPS_MT_SMTC
971 unsigned long dvpret = dvpe();
972 bust_spinlocks(1);
973 printk("NMI taken!!!!\n");
974 mips_mt_regdump(dvpret);
975#else
976 bust_spinlocks(1);
932 printk("NMI taken!!!!\n"); 977 printk("NMI taken!!!!\n");
978#endif /* CONFIG_MIPS_MT_SMTC */
933 die("NMI", regs); 979 die("NMI", regs);
934 while(1) ; 980 while(1) ;
935} 981}
@@ -1007,7 +1053,7 @@ again:
1007 return set; 1053 return set;
1008} 1054}
1009 1055
1010void mips_srs_free (int set) 1056void mips_srs_free(int set)
1011{ 1057{
1012 struct shadow_registers *sr = &shadow_registers; 1058 struct shadow_registers *sr = &shadow_registers;
1013 1059
@@ -1027,8 +1073,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs)
1027 if (addr == NULL) { 1073 if (addr == NULL) {
1028 handler = (unsigned long) do_default_vi; 1074 handler = (unsigned long) do_default_vi;
1029 srs = 0; 1075 srs = 0;
1030 } 1076 } else
1031 else
1032 handler = (unsigned long) addr; 1077 handler = (unsigned long) addr;
1033 vi_handlers[n] = (unsigned long) addr; 1078 vi_handlers[n] = (unsigned long) addr;
1034 1079
@@ -1040,8 +1085,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs)
1040 if (cpu_has_veic) { 1085 if (cpu_has_veic) {
1041 if (board_bind_eic_interrupt) 1086 if (board_bind_eic_interrupt)
1042 board_bind_eic_interrupt (n, srs); 1087 board_bind_eic_interrupt (n, srs);
1043 } 1088 } else if (cpu_has_vint) {
1044 else if (cpu_has_vint) {
1045 /* SRSMap is only defined if shadow sets are implemented */ 1089 /* SRSMap is only defined if shadow sets are implemented */
1046 if (mips_srs_max() > 1) 1090 if (mips_srs_max() > 1)
1047 change_c0_srsmap (0xf << n*4, srs << n*4); 1091 change_c0_srsmap (0xf << n*4, srs << n*4);
@@ -1055,6 +1099,15 @@ static void *set_vi_srs_handler(int n, void *addr, int srs)
1055 1099
1056 extern char except_vec_vi, except_vec_vi_lui; 1100 extern char except_vec_vi, except_vec_vi_lui;
1057 extern char except_vec_vi_ori, except_vec_vi_end; 1101 extern char except_vec_vi_ori, except_vec_vi_end;
1102#ifdef CONFIG_MIPS_MT_SMTC
1103 /*
1104 * We need to provide the SMTC vectored interrupt handler
1105 * not only with the address of the handler, but with the
1106 * Status.IM bit to be masked before going there.
1107 */
1108 extern char except_vec_vi_mori;
1109 const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
1110#endif /* CONFIG_MIPS_MT_SMTC */
1058 const int handler_len = &except_vec_vi_end - &except_vec_vi; 1111 const int handler_len = &except_vec_vi_end - &except_vec_vi;
1059 const int lui_offset = &except_vec_vi_lui - &except_vec_vi; 1112 const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
1060 const int ori_offset = &except_vec_vi_ori - &except_vec_vi; 1113 const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
@@ -1068,6 +1121,12 @@ static void *set_vi_srs_handler(int n, void *addr, int srs)
1068 } 1121 }
1069 1122
1070 memcpy (b, &except_vec_vi, handler_len); 1123 memcpy (b, &except_vec_vi, handler_len);
1124#ifdef CONFIG_MIPS_MT_SMTC
1125 if (n > 7)
1126 printk("Vector index %d exceeds SMTC maximum\n", n);
1127 w = (u32 *)(b + mori_offset);
1128 *w = (*w & 0xffff0000) | (0x100 << n);
1129#endif /* CONFIG_MIPS_MT_SMTC */
1071 w = (u32 *)(b + lui_offset); 1130 w = (u32 *)(b + lui_offset);
1072 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); 1131 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
1073 w = (u32 *)(b + ori_offset); 1132 w = (u32 *)(b + ori_offset);
@@ -1090,7 +1149,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs)
1090 return (void *)old_handler; 1149 return (void *)old_handler;
1091} 1150}
1092 1151
1093void *set_vi_handler (int n, void *addr) 1152void *set_vi_handler(int n, void *addr)
1094{ 1153{
1095 return set_vi_srs_handler(n, addr, 0); 1154 return set_vi_srs_handler(n, addr, 0);
1096} 1155}
@@ -1108,8 +1167,29 @@ extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
1108extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); 1167extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
1109extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); 1168extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
1110 1169
1170#ifdef CONFIG_SMP
1171static int smp_save_fp_context(struct sigcontext *sc)
1172{
1173 return cpu_has_fpu
1174 ? _save_fp_context(sc)
1175 : fpu_emulator_save_context(sc);
1176}
1177
1178static int smp_restore_fp_context(struct sigcontext *sc)
1179{
1180 return cpu_has_fpu
1181 ? _restore_fp_context(sc)
1182 : fpu_emulator_restore_context(sc);
1183}
1184#endif
1185
1111static inline void signal_init(void) 1186static inline void signal_init(void)
1112{ 1187{
1188#ifdef CONFIG_SMP
1189 /* For now just do the cpu_has_fpu check when the functions are invoked */
1190 save_fp_context = smp_save_fp_context;
1191 restore_fp_context = smp_restore_fp_context;
1192#else
1113 if (cpu_has_fpu) { 1193 if (cpu_has_fpu) {
1114 save_fp_context = _save_fp_context; 1194 save_fp_context = _save_fp_context;
1115 restore_fp_context = _restore_fp_context; 1195 restore_fp_context = _restore_fp_context;
@@ -1117,6 +1197,7 @@ static inline void signal_init(void)
1117 save_fp_context = fpu_emulator_save_context; 1197 save_fp_context = fpu_emulator_save_context;
1118 restore_fp_context = fpu_emulator_restore_context; 1198 restore_fp_context = fpu_emulator_restore_context;
1119 } 1199 }
1200#endif
1120} 1201}
1121 1202
1122#ifdef CONFIG_MIPS32_COMPAT 1203#ifdef CONFIG_MIPS32_COMPAT
@@ -1153,6 +1234,20 @@ void __init per_cpu_trap_init(void)
1153{ 1234{
1154 unsigned int cpu = smp_processor_id(); 1235 unsigned int cpu = smp_processor_id();
1155 unsigned int status_set = ST0_CU0; 1236 unsigned int status_set = ST0_CU0;
1237#ifdef CONFIG_MIPS_MT_SMTC
1238 int secondaryTC = 0;
1239 int bootTC = (cpu == 0);
1240
1241 /*
1242 * Only do per_cpu_trap_init() for first TC of Each VPE.
1243 * Note that this hack assumes that the SMTC init code
1244 * assigns TCs consecutively and in ascending order.
1245 */
1246
1247 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
1248 ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
1249 secondaryTC = 1;
1250#endif /* CONFIG_MIPS_MT_SMTC */
1156 1251
1157 /* 1252 /*
1158 * Disable coprocessors and select 32-bit or 64-bit addressing 1253 * Disable coprocessors and select 32-bit or 64-bit addressing
@@ -1175,6 +1270,10 @@ void __init per_cpu_trap_init(void)
1175 write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ 1270 write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
1176#endif 1271#endif
1177 1272
1273#ifdef CONFIG_MIPS_MT_SMTC
1274 if (!secondaryTC) {
1275#endif /* CONFIG_MIPS_MT_SMTC */
1276
1178 /* 1277 /*
1179 * Interrupt handling. 1278 * Interrupt handling.
1180 */ 1279 */
@@ -1191,6 +1290,9 @@ void __init per_cpu_trap_init(void)
1191 } else 1290 } else
1192 set_c0_cause(CAUSEF_IV); 1291 set_c0_cause(CAUSEF_IV);
1193 } 1292 }
1293#ifdef CONFIG_MIPS_MT_SMTC
1294 }
1295#endif /* CONFIG_MIPS_MT_SMTC */
1194 1296
1195 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1297 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1196 TLBMISS_HANDLER_SETUP(); 1298 TLBMISS_HANDLER_SETUP();
@@ -1200,8 +1302,14 @@ void __init per_cpu_trap_init(void)
1200 BUG_ON(current->mm); 1302 BUG_ON(current->mm);
1201 enter_lazy_tlb(&init_mm, current); 1303 enter_lazy_tlb(&init_mm, current);
1202 1304
1203 cpu_cache_init(); 1305#ifdef CONFIG_MIPS_MT_SMTC
1204 tlb_init(); 1306 if (bootTC) {
1307#endif /* CONFIG_MIPS_MT_SMTC */
1308 cpu_cache_init();
1309 tlb_init();
1310#ifdef CONFIG_MIPS_MT_SMTC
1311 }
1312#endif /* CONFIG_MIPS_MT_SMTC */
1205} 1313}
1206 1314
1207/* Install CPU exception handler */ 1315/* Install CPU exception handler */
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 2ad0cedf29fe..14fa00e3cdfa 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -2,7 +2,7 @@
2#include <asm/asm-offsets.h> 2#include <asm/asm-offsets.h>
3#include <asm-generic/vmlinux.lds.h> 3#include <asm-generic/vmlinux.lds.h>
4 4
5#undef mips /* CPP really sucks for this job */ 5#undef mips
6#define mips mips 6#define mips mips
7OUTPUT_ARCH(mips) 7OUTPUT_ARCH(mips)
8ENTRY(kernel_entry) 8ENTRY(kernel_entry)
diff --git a/arch/mips/mips-boards/generic/init.c b/arch/mips/mips-boards/generic/init.c
index eab5a705e989..17dfe6a8cab9 100644
--- a/arch/mips/mips-boards/generic/init.c
+++ b/arch/mips/mips-boards/generic/init.c
@@ -220,7 +220,6 @@ void __init kgdb_config (void)
220 generic_putDebugChar (*s++); 220 generic_putDebugChar (*s++);
221 } 221 }
222 222
223 kgdb_enabled = 1;
224 /* Breakpoint is invoked after interrupts are initialised */ 223 /* Breakpoint is invoked after interrupts are initialised */
225 } 224 }
226} 225}
diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c
index 93f3bf2c2b22..a9f6124b3a22 100644
--- a/arch/mips/mips-boards/generic/time.c
+++ b/arch/mips/mips-boards/generic/time.c
@@ -30,6 +30,7 @@
30#include <linux/mc146818rtc.h> 30#include <linux/mc146818rtc.h>
31 31
32#include <asm/mipsregs.h> 32#include <asm/mipsregs.h>
33#include <asm/mipsmtregs.h>
33#include <asm/ptrace.h> 34#include <asm/ptrace.h>
34#include <asm/hardirq.h> 35#include <asm/hardirq.h>
35#include <asm/irq.h> 36#include <asm/irq.h>
@@ -50,16 +51,23 @@ unsigned long cpu_khz;
50static char display_string[] = " LINUX ON ATLAS "; 51static char display_string[] = " LINUX ON ATLAS ";
51#endif 52#endif
52#if defined(CONFIG_MIPS_MALTA) 53#if defined(CONFIG_MIPS_MALTA)
54#if defined(CONFIG_MIPS_MT_SMTC)
55static char display_string[] = " SMTC LINUX ON MALTA ";
56#else
53static char display_string[] = " LINUX ON MALTA "; 57static char display_string[] = " LINUX ON MALTA ";
58#endif /* CONFIG_MIPS_MT_SMTC */
54#endif 59#endif
55#if defined(CONFIG_MIPS_SEAD) 60#if defined(CONFIG_MIPS_SEAD)
56static char display_string[] = " LINUX ON SEAD "; 61static char display_string[] = " LINUX ON SEAD ";
57#endif 62#endif
58static unsigned int display_count = 0; 63static unsigned int display_count;
59#define MAX_DISPLAY_COUNT (sizeof(display_string) - 8) 64#define MAX_DISPLAY_COUNT (sizeof(display_string) - 8)
60 65
61static unsigned int timer_tick_count=0; 66#define CPUCTR_IMASKBIT (0x100 << MIPSCPU_INT_CPUCTR)
67
68static unsigned int timer_tick_count;
62static int mips_cpu_timer_irq; 69static int mips_cpu_timer_irq;
70extern void smtc_timer_broadcast(int);
63 71
64static inline void scroll_display_message(void) 72static inline void scroll_display_message(void)
65{ 73{
@@ -75,15 +83,55 @@ static void mips_timer_dispatch (struct pt_regs *regs)
75 do_IRQ (mips_cpu_timer_irq, regs); 83 do_IRQ (mips_cpu_timer_irq, regs);
76} 84}
77 85
86/*
87 * Redeclare until I get around mopping the timer code insanity on MIPS.
88 */
78extern int null_perf_irq(struct pt_regs *regs); 89extern int null_perf_irq(struct pt_regs *regs);
79 90
80extern int (*perf_irq)(struct pt_regs *regs); 91extern int (*perf_irq)(struct pt_regs *regs);
81 92
82irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) 93irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
83{ 94{
84 int r2 = cpu_has_mips_r2;
85 int cpu = smp_processor_id(); 95 int cpu = smp_processor_id();
96 int r2 = cpu_has_mips_r2;
97
98#ifdef CONFIG_MIPS_MT_SMTC
99 /*
100 * In an SMTC system, one Count/Compare set exists per VPE.
101 * Which TC within a VPE gets the interrupt is essentially
102 * random - we only know that it shouldn't be one with
103 * IXMT set. Whichever TC gets the interrupt needs to
104 * send special interprocessor interrupts to the other
105 * TCs to make sure that they schedule, etc.
106 *
107 * That code is specific to the SMTC kernel, not to
108 * the a particular platform, so it's invoked from
109 * the general MIPS timer_interrupt routine.
110 */
111
112 /*
113 * DVPE is necessary so long as cross-VPE interrupts
114 * are done via read-modify-write of Cause register.
115 */
116 int vpflags = dvpe();
117 write_c0_compare (read_c0_count() - 1);
118 clear_c0_cause(CPUCTR_IMASKBIT);
119 evpe(vpflags);
120
121 if (cpu_data[cpu].vpe_id == 0) {
122 timer_interrupt(irq, dev_id, regs);
123 scroll_display_message();
124 } else
125 write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ));
126 smtc_timer_broadcast(cpu_data[cpu].vpe_id);
86 127
128 if (cpu != 0)
129 /*
130 * Other CPUs should do profiling and process accounting
131 */
132 local_timer_interrupt(irq, dev_id, regs);
133
134#else /* CONFIG_MIPS_MT_SMTC */
87 if (cpu == 0) { 135 if (cpu == 0) {
88 /* 136 /*
89 * CPU 0 handles the global timer interrupt job and process 137 * CPU 0 handles the global timer interrupt job and process
@@ -107,12 +155,14 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
107 * More support needs to be added to kernel/time for 155 * More support needs to be added to kernel/time for
108 * counter/timer interrupts on multiple CPU's 156 * counter/timer interrupts on multiple CPU's
109 */ 157 */
110 write_c0_compare (read_c0_count() + (mips_hpt_frequency/HZ)); 158 write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));
159
111 /* 160 /*
112 * other CPUs should do profiling and process accounting 161 * Other CPUs should do profiling and process accounting
113 */ 162 */
114 local_timer_interrupt (irq, dev_id, regs); 163 local_timer_interrupt(irq, dev_id, regs);
115 } 164 }
165#endif /* CONFIG_MIPS_MT_SMTC */
116 166
117out: 167out:
118 return IRQ_HANDLED; 168 return IRQ_HANDLED;
@@ -126,7 +176,7 @@ static unsigned int __init estimate_cpu_frequency(void)
126 unsigned int prid = read_c0_prid() & 0xffff00; 176 unsigned int prid = read_c0_prid() & 0xffff00;
127 unsigned int count; 177 unsigned int count;
128 178
129#ifdef CONFIG_MIPS_SEAD 179#if defined(CONFIG_MIPS_SEAD) || defined(CONFIG_MIPS_SIM)
130 /* 180 /*
131 * The SEAD board doesn't have a real time clock, so we can't 181 * The SEAD board doesn't have a real time clock, so we can't
132 * really calculate the timer frequency 182 * really calculate the timer frequency
@@ -211,7 +261,11 @@ void __init mips_timer_setup(struct irqaction *irq)
211 261
212 /* we are using the cpu counter for timer interrupts */ 262 /* we are using the cpu counter for timer interrupts */
213 irq->handler = mips_timer_interrupt; /* we use our own handler */ 263 irq->handler = mips_timer_interrupt; /* we use our own handler */
264#ifdef CONFIG_MIPS_MT_SMTC
265 setup_irq_smtc(mips_cpu_timer_irq, irq, CPUCTR_IMASKBIT);
266#else
214 setup_irq(mips_cpu_timer_irq, irq); 267 setup_irq(mips_cpu_timer_irq, irq);
268#endif /* CONFIG_MIPS_MT_SMTC */
215 269
216#ifdef CONFIG_SMP 270#ifdef CONFIG_SMP
217 /* irq_desc(riptor) is a global resource, when the interrupt overlaps 271 /* irq_desc(riptor) is a global resource, when the interrupt overlaps
diff --git a/arch/mips/mips-boards/malta/Makefile b/arch/mips/mips-boards/malta/Makefile
index fd4c143c0e2f..77ee5c6d33c1 100644
--- a/arch/mips/mips-boards/malta/Makefile
+++ b/arch/mips/mips-boards/malta/Makefile
@@ -20,3 +20,4 @@
20# 20#
21 21
22obj-y := malta_int.o malta_setup.o 22obj-y := malta_int.o malta_setup.o
23obj-$(CONFIG_SMP) += malta_smp.o
diff --git a/arch/mips/mips-boards/malta/malta_int.c b/arch/mips/mips-boards/malta/malta_int.c
index 1da8c18b9c8e..64db07d4dbe5 100644
--- a/arch/mips/mips-boards/malta/malta_int.c
+++ b/arch/mips/mips-boards/malta/malta_int.c
@@ -118,8 +118,9 @@ static void malta_hw0_irqdispatch(struct pt_regs *regs)
118 int irq; 118 int irq;
119 119
120 irq = get_int(); 120 irq = get_int();
121 if (irq < 0) 121 if (irq < 0) {
122 return; /* interrupt has already been cleared */ 122 return; /* interrupt has already been cleared */
123 }
123 124
124 do_IRQ(MALTA_INT_BASE+irq, regs); 125 do_IRQ(MALTA_INT_BASE+irq, regs);
125} 126}
@@ -324,9 +325,15 @@ void __init arch_init_irq(void)
324 else if (cpu_has_vint) { 325 else if (cpu_has_vint) {
325 set_vi_handler (MIPSCPU_INT_I8259A, malta_hw0_irqdispatch); 326 set_vi_handler (MIPSCPU_INT_I8259A, malta_hw0_irqdispatch);
326 set_vi_handler (MIPSCPU_INT_COREHI, corehi_irqdispatch); 327 set_vi_handler (MIPSCPU_INT_COREHI, corehi_irqdispatch);
327 328#ifdef CONFIG_MIPS_MT_SMTC
329 setup_irq_smtc (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq,
330 (0x100 << MIPSCPU_INT_I8259A));
331 setup_irq_smtc (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI,
332 &corehi_irqaction, (0x100 << MIPSCPU_INT_COREHI));
333#else /* Not SMTC */
328 setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq); 334 setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq);
329 setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction); 335 setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction);
336#endif /* CONFIG_MIPS_MT_SMTC */
330 } 337 }
331 else { 338 else {
332 setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq); 339 setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq);
diff --git a/arch/mips/mips-boards/malta/malta_smp.c b/arch/mips/mips-boards/malta/malta_smp.c
new file mode 100644
index 000000000000..6c6c8eeedbce
--- /dev/null
+++ b/arch/mips/mips-boards/malta/malta_smp.c
@@ -0,0 +1,128 @@
1/*
2 * Malta Platform-specific hooks for SMP operation
3 */
4
5#include <linux/kernel.h>
6#include <linux/sched.h>
7#include <linux/cpumask.h>
8#include <linux/interrupt.h>
9
10#include <asm/atomic.h>
11#include <asm/cpu.h>
12#include <asm/processor.h>
13#include <asm/system.h>
14#include <asm/hardirq.h>
15#include <asm/mmu_context.h>
16#include <asm/smp.h>
17#ifdef CONFIG_MIPS_MT_SMTC
18#include <asm/smtc_ipi.h>
19#endif /* CONFIG_MIPS_MT_SMTC */
20
21/* VPE/SMP Prototype implements platform interfaces directly */
22#if !defined(CONFIG_MIPS_MT_SMP)
23
24/*
25 * Cause the specified action to be performed on a targeted "CPU"
26 */
27
28void core_send_ipi(int cpu, unsigned int action)
29{
30/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
31#ifdef CONFIG_MIPS_MT_SMTC
32 smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
33#endif /* CONFIG_MIPS_MT_SMTC */
34}
35
36/*
37 * Detect available CPUs/VPEs/TCs and populate phys_cpu_present_map
38 */
39
40void __init prom_build_cpu_map(void)
41{
42 int nextslot;
43
44 /*
45 * As of November, 2004, MIPSsim only simulates one core
46 * at a time. However, that core may be a MIPS MT core
47 * with multiple virtual processors and thread contexts.
48 */
49
50 if (read_c0_config3() & (1<<2)) {
51 nextslot = mipsmt_build_cpu_map(1);
52 }
53}
54
55/*
56 * Platform "CPU" startup hook
57 */
58
59void prom_boot_secondary(int cpu, struct task_struct *idle)
60{
61#ifdef CONFIG_MIPS_MT_SMTC
62 smtc_boot_secondary(cpu, idle);
63#endif /* CONFIG_MIPS_MT_SMTC */
64}
65
66/*
67 * Post-config but pre-boot cleanup entry point
68 */
69
70void prom_init_secondary(void)
71{
72#ifdef CONFIG_MIPS_MT_SMTC
73 void smtc_init_secondary(void);
74 int myvpe;
75
76 /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
77 myvpe = read_c0_tcbind() & TCBIND_CURVPE;
78 if (myvpe != 0) {
79 /* Ideally, this should be done only once per VPE, but... */
80 clear_c0_status(STATUSF_IP2);
81 set_c0_status(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP3
82 | STATUSF_IP4 | STATUSF_IP5 | STATUSF_IP6
83 | STATUSF_IP7);
84 }
85
86 smtc_init_secondary();
87#endif /* CONFIG_MIPS_MT_SMTC */
88}
89
90/*
91 * Platform SMP pre-initialization
92 *
93 * As noted above, we can assume a single CPU for now
94 * but it may be multithreaded.
95 */
96
97void plat_smp_setup(void)
98{
99 if (read_c0_config3() & (1<<2))
100 mipsmt_build_cpu_map(0);
101}
102
103void __init plat_prepare_cpus(unsigned int max_cpus)
104{
105 if (read_c0_config3() & (1<<2))
106 mipsmt_prepare_cpus();
107}
108
109/*
110 * SMP initialization finalization entry point
111 */
112
113void prom_smp_finish(void)
114{
115#ifdef CONFIG_MIPS_MT_SMTC
116 smtc_smp_finish();
117#endif /* CONFIG_MIPS_MT_SMTC */
118}
119
120/*
121 * Hook for after all CPUs are online
122 */
123
124void prom_cpus_done(void)
125{
126}
127
128#endif /* CONFIG_MIPS32R2_MT_SMP */
diff --git a/arch/mips/mips-boards/sim/cmdline.c b/arch/mips/mips-boards/sim/cmdline.c
deleted file mode 100644
index fef9fbd8e710..000000000000
--- a/arch/mips/mips-boards/sim/cmdline.c
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * Carsten Langgaard, carstenl@mips.com
3 * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
4 *
5 * This program is free software; you can distribute it and/or modify it
6 * under the terms of the GNU General Public License (Version 2) as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
17 *
18 * Kernel command line creation using the prom monitor (YAMON) argc/argv.
19 */
20#include <linux/init.h>
21#include <linux/string.h>
22
23#include <asm/bootinfo.h>
24
25extern int prom_argc;
26extern int *_prom_argv;
27
28/*
29 * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
30 * This macro take care of sign extension.
31 */
32#define prom_argv(index) ((char *)(((int *)(int)_prom_argv)[(index)]))
33
34char arcs_cmdline[CL_SIZE];
35
36char * __init prom_getcmdline(void)
37{
38 return &(arcs_cmdline[0]);
39}
40
41
42void __init prom_init_cmdline(void)
43{
44 char *cp;
45 int actr;
46
47 actr = 1; /* Always ignore argv[0] */
48
49 cp = &(arcs_cmdline[0]);
50 while(actr < prom_argc) {
51 strcpy(cp, prom_argv(actr));
52 cp += strlen(prom_argv(actr));
53 *cp++ = ' ';
54 actr++;
55 }
56 if (cp != &(arcs_cmdline[0])) /* get rid of trailing space */
57 --cp;
58 *cp = '\0';
59}
diff --git a/arch/mips/mips-boards/sim/sim_cmdline.c b/arch/mips/mips-boards/sim/sim_cmdline.c
index 9df37c6fca36..c63021a5dc6c 100644
--- a/arch/mips/mips-boards/sim/sim_cmdline.c
+++ b/arch/mips/mips-boards/sim/sim_cmdline.c
@@ -26,8 +26,10 @@ char * __init prom_getcmdline(void)
26 return arcs_cmdline; 26 return arcs_cmdline;
27} 27}
28 28
29
30void __init prom_init_cmdline(void) 29void __init prom_init_cmdline(void)
31{ 30{
32 /* nothing to do */ 31 char *cp;
32 cp = arcs_cmdline;
33 /* Get boot line from environment? */
34 *cp = '\0';
33} 35}
diff --git a/arch/mips/mips-boards/sim/sim_smp.c b/arch/mips/mips-boards/sim/sim_smp.c
index a9f0c2bfe4ad..b7084e7c4bf9 100644
--- a/arch/mips/mips-boards/sim/sim_smp.c
+++ b/arch/mips/mips-boards/sim/sim_smp.c
@@ -44,8 +44,6 @@
44void core_send_ipi(int cpu, unsigned int action) 44void core_send_ipi(int cpu, unsigned int action)
45{ 45{
46#ifdef CONFIG_MIPS_MT_SMTC 46#ifdef CONFIG_MIPS_MT_SMTC
47 void smtc_send_ipi(int, int, unsigned int);
48
49 smtc_send_ipi(cpu, LINUX_SMP_IPI, action); 47 smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
50#endif /* CONFIG_MIPS_MT_SMTC */ 48#endif /* CONFIG_MIPS_MT_SMTC */
51/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ 49/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
@@ -59,15 +57,8 @@ void core_send_ipi(int cpu, unsigned int action)
59void __init prom_build_cpu_map(void) 57void __init prom_build_cpu_map(void)
60{ 58{
61#ifdef CONFIG_MIPS_MT_SMTC 59#ifdef CONFIG_MIPS_MT_SMTC
62 extern int mipsmt_build_cpu_map(int startslot);
63 int nextslot; 60 int nextslot;
64 61
65 cpus_clear(phys_cpu_present_map);
66
67 /* Register the boot CPU */
68
69 smp_prepare_boot_cpu();
70
71 /* 62 /*
72 * As of November, 2004, MIPSsim only simulates one core 63 * As of November, 2004, MIPSsim only simulates one core
73 * at a time. However, that core may be a MIPS MT core 64 * at a time. However, that core may be a MIPS MT core
@@ -87,8 +78,6 @@ void __init prom_build_cpu_map(void)
87void prom_boot_secondary(int cpu, struct task_struct *idle) 78void prom_boot_secondary(int cpu, struct task_struct *idle)
88{ 79{
89#ifdef CONFIG_MIPS_MT_SMTC 80#ifdef CONFIG_MIPS_MT_SMTC
90 extern void smtc_boot_secondary(int cpu, struct task_struct *t);
91
92 smtc_boot_secondary(cpu, idle); 81 smtc_boot_secondary(cpu, idle);
93#endif /* CONFIG_MIPS_MT_SMTC */ 82#endif /* CONFIG_MIPS_MT_SMTC */
94} 83}
@@ -113,7 +102,6 @@ void prom_init_secondary(void)
113void prom_prepare_cpus(unsigned int max_cpus) 102void prom_prepare_cpus(unsigned int max_cpus)
114{ 103{
115#ifdef CONFIG_MIPS_MT_SMTC 104#ifdef CONFIG_MIPS_MT_SMTC
116 void mipsmt_prepare_cpus(int c);
117 /* 105 /*
118 * As noted above, we can assume a single CPU for now 106 * As noted above, we can assume a single CPU for now
119 * but it may be multithreaded. 107 * but it may be multithreaded.
@@ -132,8 +120,6 @@ void prom_prepare_cpus(unsigned int max_cpus)
132void prom_smp_finish(void) 120void prom_smp_finish(void)
133{ 121{
134#ifdef CONFIG_MIPS_MT_SMTC 122#ifdef CONFIG_MIPS_MT_SMTC
135 void smtc_smp_finish(void);
136
137 smtc_smp_finish(); 123 smtc_smp_finish();
138#endif /* CONFIG_MIPS_MT_SMTC */ 124#endif /* CONFIG_MIPS_MT_SMTC */
139} 125}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 2d9624fd10ec..e3a617224868 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -157,7 +157,6 @@ no_context:
157 * Oops. The kernel tried to access some bad page. We'll have to 157 * Oops. The kernel tried to access some bad page. We'll have to
158 * terminate things with extreme prejudice. 158 * terminate things with extreme prejudice.
159 */ 159 */
160
161 bust_spinlocks(1); 160 bust_spinlocks(1);
162 161
163 printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " 162 printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
@@ -188,11 +187,20 @@ do_sigbus:
188 /* Kernel mode? Handle exceptions or die */ 187 /* Kernel mode? Handle exceptions or die */
189 if (!user_mode(regs)) 188 if (!user_mode(regs))
190 goto no_context; 189 goto no_context;
191 190 else
192 /* 191 /*
193 * Send a sigbus, regardless of whether we were in kernel 192 * Send a sigbus, regardless of whether we were in kernel
194 * or user mode. 193 * or user mode.
195 */ 194 */
195#if 0
196 printk("do_page_fault() #3: sending SIGBUS to %s for "
197 "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
198 tsk->comm,
199 write ? "write access to" : "read access from",
200 field, address,
201 field, (unsigned long) regs->cp0_epc,
202 field, (unsigned long) regs->regs[31]);
203#endif
196 tsk->thread.cp0_badvaddr = address; 204 tsk->thread.cp0_badvaddr = address;
197 info.si_signo = SIGBUS; 205 info.si_signo = SIGBUS;
198 info.si_errno = 0; 206 info.si_errno = 0;
@@ -201,7 +209,6 @@ do_sigbus:
201 force_sig_info(SIGBUS, &info, tsk); 209 force_sig_info(SIGBUS, &info, tsk);
202 210
203 return; 211 return;
204
205vmalloc_fault: 212vmalloc_fault:
206 { 213 {
207 /* 214 /*
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index a865f2394cb0..9dca099ba16b 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -32,13 +32,35 @@ extern void build_tlb_refill_handler(void);
32 "nop; nop; nop; nop; nop; nop;\n\t" \ 32 "nop; nop; nop; nop; nop; nop;\n\t" \
33 ".set reorder\n\t") 33 ".set reorder\n\t")
34 34
35/* Atomicity and interruptability */
36#ifdef CONFIG_MIPS_MT_SMTC
37
38#include <asm/smtc.h>
39#include <asm/mipsmtregs.h>
40
41#define ENTER_CRITICAL(flags) \
42 { \
43 unsigned int mvpflags; \
44 local_irq_save(flags);\
45 mvpflags = dvpe()
46#define EXIT_CRITICAL(flags) \
47 evpe(mvpflags); \
48 local_irq_restore(flags); \
49 }
50#else
51
52#define ENTER_CRITICAL(flags) local_irq_save(flags)
53#define EXIT_CRITICAL(flags) local_irq_restore(flags)
54
55#endif /* CONFIG_MIPS_MT_SMTC */
56
35void local_flush_tlb_all(void) 57void local_flush_tlb_all(void)
36{ 58{
37 unsigned long flags; 59 unsigned long flags;
38 unsigned long old_ctx; 60 unsigned long old_ctx;
39 int entry; 61 int entry;
40 62
41 local_irq_save(flags); 63 ENTER_CRITICAL(flags);
42 /* Save old context and create impossible VPN2 value */ 64 /* Save old context and create impossible VPN2 value */
43 old_ctx = read_c0_entryhi(); 65 old_ctx = read_c0_entryhi();
44 write_c0_entrylo0(0); 66 write_c0_entrylo0(0);
@@ -57,7 +79,7 @@ void local_flush_tlb_all(void)
57 } 79 }
58 tlbw_use_hazard(); 80 tlbw_use_hazard();
59 write_c0_entryhi(old_ctx); 81 write_c0_entryhi(old_ctx);
60 local_irq_restore(flags); 82 EXIT_CRITICAL(flags);
61} 83}
62 84
63/* All entries common to a mm share an asid. To effectively flush 85/* All entries common to a mm share an asid. To effectively flush
@@ -87,6 +109,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
87 unsigned long flags; 109 unsigned long flags;
88 int size; 110 int size;
89 111
112 ENTER_CRITICAL(flags);
90 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 113 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
91 size = (size + 1) >> 1; 114 size = (size + 1) >> 1;
92 local_irq_save(flags); 115 local_irq_save(flags);
@@ -120,7 +143,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
120 } else { 143 } else {
121 drop_mmu_context(mm, cpu); 144 drop_mmu_context(mm, cpu);
122 } 145 }
123 local_irq_restore(flags); 146 EXIT_CRITICAL(flags);
124 } 147 }
125} 148}
126 149
@@ -129,9 +152,9 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
129 unsigned long flags; 152 unsigned long flags;
130 int size; 153 int size;
131 154
155 ENTER_CRITICAL(flags);
132 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 156 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
133 size = (size + 1) >> 1; 157 size = (size + 1) >> 1;
134 local_irq_save(flags);
135 if (size <= current_cpu_data.tlbsize / 2) { 158 if (size <= current_cpu_data.tlbsize / 2) {
136 int pid = read_c0_entryhi(); 159 int pid = read_c0_entryhi();
137 160
@@ -162,7 +185,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
162 } else { 185 } else {
163 local_flush_tlb_all(); 186 local_flush_tlb_all();
164 } 187 }
165 local_irq_restore(flags); 188 EXIT_CRITICAL(flags);
166} 189}
167 190
168void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 191void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
@@ -175,7 +198,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
175 198
176 newpid = cpu_asid(cpu, vma->vm_mm); 199 newpid = cpu_asid(cpu, vma->vm_mm);
177 page &= (PAGE_MASK << 1); 200 page &= (PAGE_MASK << 1);
178 local_irq_save(flags); 201 ENTER_CRITICAL(flags);
179 oldpid = read_c0_entryhi(); 202 oldpid = read_c0_entryhi();
180 write_c0_entryhi(page | newpid); 203 write_c0_entryhi(page | newpid);
181 mtc0_tlbw_hazard(); 204 mtc0_tlbw_hazard();
@@ -194,7 +217,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
194 217
195 finish: 218 finish:
196 write_c0_entryhi(oldpid); 219 write_c0_entryhi(oldpid);
197 local_irq_restore(flags); 220 EXIT_CRITICAL(flags);
198 } 221 }
199} 222}
200 223
@@ -207,7 +230,7 @@ void local_flush_tlb_one(unsigned long page)
207 unsigned long flags; 230 unsigned long flags;
208 int oldpid, idx; 231 int oldpid, idx;
209 232
210 local_irq_save(flags); 233 ENTER_CRITICAL(flags);
211 oldpid = read_c0_entryhi(); 234 oldpid = read_c0_entryhi();
212 page &= (PAGE_MASK << 1); 235 page &= (PAGE_MASK << 1);
213 write_c0_entryhi(page); 236 write_c0_entryhi(page);
@@ -226,7 +249,7 @@ void local_flush_tlb_one(unsigned long page)
226 } 249 }
227 write_c0_entryhi(oldpid); 250 write_c0_entryhi(oldpid);
228 251
229 local_irq_restore(flags); 252 EXIT_CRITICAL(flags);
230} 253}
231 254
232/* 255/*
@@ -249,7 +272,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
249 if (current->active_mm != vma->vm_mm) 272 if (current->active_mm != vma->vm_mm)
250 return; 273 return;
251 274
252 local_irq_save(flags); 275 ENTER_CRITICAL(flags);
253 276
254 pid = read_c0_entryhi() & ASID_MASK; 277 pid = read_c0_entryhi() & ASID_MASK;
255 address &= (PAGE_MASK << 1); 278 address &= (PAGE_MASK << 1);
@@ -277,7 +300,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
277 else 300 else
278 tlb_write_indexed(); 301 tlb_write_indexed();
279 tlbw_use_hazard(); 302 tlbw_use_hazard();
280 local_irq_restore(flags); 303 EXIT_CRITICAL(flags);
281} 304}
282 305
283#if 0 306#if 0
@@ -291,7 +314,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
291 pte_t *ptep; 314 pte_t *ptep;
292 int idx; 315 int idx;
293 316
294 local_irq_save(flags); 317 ENTER_CRITICAL(flags);
295 address &= (PAGE_MASK << 1); 318 address &= (PAGE_MASK << 1);
296 asid = read_c0_entryhi() & ASID_MASK; 319 asid = read_c0_entryhi() & ASID_MASK;
297 write_c0_entryhi(address | asid); 320 write_c0_entryhi(address | asid);
@@ -310,7 +333,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
310 else 333 else
311 tlb_write_indexed(); 334 tlb_write_indexed();
312 tlbw_use_hazard(); 335 tlbw_use_hazard();
313 local_irq_restore(flags); 336 EXIT_CRITICAL(flags);
314} 337}
315#endif 338#endif
316 339
@@ -322,7 +345,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
322 unsigned long old_pagemask; 345 unsigned long old_pagemask;
323 unsigned long old_ctx; 346 unsigned long old_ctx;
324 347
325 local_irq_save(flags); 348 ENTER_CRITICAL(flags);
326 /* Save old context and create impossible VPN2 value */ 349 /* Save old context and create impossible VPN2 value */
327 old_ctx = read_c0_entryhi(); 350 old_ctx = read_c0_entryhi();
328 old_pagemask = read_c0_pagemask(); 351 old_pagemask = read_c0_pagemask();
@@ -342,7 +365,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
342 BARRIER; 365 BARRIER;
343 write_c0_pagemask(old_pagemask); 366 write_c0_pagemask(old_pagemask);
344 local_flush_tlb_all(); 367 local_flush_tlb_all();
345 local_irq_restore(flags); 368 EXIT_CRITICAL(flags);
346} 369}
347 370
348/* 371/*
@@ -362,7 +385,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
362 unsigned long old_pagemask; 385 unsigned long old_pagemask;
363 unsigned long old_ctx; 386 unsigned long old_ctx;
364 387
365 local_irq_save(flags); 388 ENTER_CRITICAL(flags);
366 /* Save old context and create impossible VPN2 value */ 389 /* Save old context and create impossible VPN2 value */
367 old_ctx = read_c0_entryhi(); 390 old_ctx = read_c0_entryhi();
368 old_pagemask = read_c0_pagemask(); 391 old_pagemask = read_c0_pagemask();
@@ -386,10 +409,11 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
386 write_c0_entryhi(old_ctx); 409 write_c0_entryhi(old_ctx);
387 write_c0_pagemask(old_pagemask); 410 write_c0_pagemask(old_pagemask);
388out: 411out:
389 local_irq_restore(flags); 412 EXIT_CRITICAL(flags);
390 return ret; 413 return ret;
391} 414}
392 415
416extern void __init sanitize_tlb_entries(void);
393static void __init probe_tlb(unsigned long config) 417static void __init probe_tlb(unsigned long config)
394{ 418{
395 struct cpuinfo_mips *c = &current_cpu_data; 419 struct cpuinfo_mips *c = &current_cpu_data;
@@ -402,6 +426,14 @@ static void __init probe_tlb(unsigned long config)
402 */ 426 */
403 if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY) 427 if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
404 return; 428 return;
429#ifdef CONFIG_MIPS_MT_SMTC
430 /*
431 * If TLB is shared in SMTC system, total size already
432 * has been calculated and written into cpu_data tlbsize
433 */
434 if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
435 return;
436#endif /* CONFIG_MIPS_MT_SMTC */
405 437
406 reg = read_c0_config1(); 438 reg = read_c0_config1();
407 if (!((config >> 7) & 3)) 439 if (!((config >> 7) & 3))
@@ -410,6 +442,15 @@ static void __init probe_tlb(unsigned long config)
410 c->tlbsize = ((reg >> 25) & 0x3f) + 1; 442 c->tlbsize = ((reg >> 25) & 0x3f) + 1;
411} 443}
412 444
445static int __initdata ntlb = 0;
446static int __init set_ntlb(char *str)
447{
448 get_option(&str, &ntlb);
449 return 1;
450}
451
452__setup("ntlb=", set_ntlb);
453
413void __init tlb_init(void) 454void __init tlb_init(void)
414{ 455{
415 unsigned int config = read_c0_config(); 456 unsigned int config = read_c0_config();
@@ -432,5 +473,15 @@ void __init tlb_init(void)
432 473
433 /* Did I tell you that ARC SUCKS? */ 474 /* Did I tell you that ARC SUCKS? */
434 475
476 if (ntlb) {
477 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
478 int wired = current_cpu_data.tlbsize - ntlb;
479 write_c0_wired(wired);
480 write_c0_index(wired-1);
481 printk ("Restricting TLB to %d entries\n", ntlb);
482 } else
483 printk("Ignoring invalid argument ntlb=%d\n", ntlb);
484 }
485
435 build_tlb_refill_handler(); 486 build_tlb_refill_handler();
436} 487}
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index c5eea6ae12ca..053dbacac56b 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -7,6 +7,16 @@
7 * 7 *
8 * Copyright (C) 2004,2005 by Thiemo Seufer 8 * Copyright (C) 2004,2005 by Thiemo Seufer
9 * Copyright (C) 2005 Maciej W. Rozycki 9 * Copyright (C) 2005 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
11 *
12 * ... and the days got worse and worse and now you see
13 * I've gone completly out of my mind.
14 *
15 * They're coming to take me a away haha
16 * they're coming to take me a away hoho hihi haha
17 * to the funny farm where code is beautiful all the time ...
18 *
19 * (Condolences to Napoleon XIV)
10 */ 20 */
11 21
12#include <stdarg.h> 22#include <stdarg.h>
@@ -68,6 +78,7 @@ enum fields
68 BIMM = 0x040, 78 BIMM = 0x040,
69 JIMM = 0x080, 79 JIMM = 0x080,
70 FUNC = 0x100, 80 FUNC = 0x100,
81 SET = 0x200
71}; 82};
72 83
73#define OP_MASK 0x2f 84#define OP_MASK 0x2f
@@ -86,6 +97,8 @@ enum fields
86#define JIMM_SH 0 97#define JIMM_SH 0
87#define FUNC_MASK 0x2f 98#define FUNC_MASK 0x2f
88#define FUNC_SH 0 99#define FUNC_SH 0
100#define SET_MASK 0x7
101#define SET_SH 0
89 102
90enum opcode { 103enum opcode {
91 insn_invalid, 104 insn_invalid,
@@ -129,8 +142,8 @@ static __initdata struct insn insn_table[] = {
129 { insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM }, 142 { insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM },
130 { insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM }, 143 { insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM },
131 { insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD }, 144 { insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD },
132 { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD }, 145 { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD | SET},
133 { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD }, 146 { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD | SET},
134 { insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE }, 147 { insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE },
135 { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE }, 148 { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE },
136 { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE }, 149 { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE },
@@ -145,8 +158,8 @@ static __initdata struct insn insn_table[] = {
145 { insn_lld, M(lld_op,0,0,0,0,0), RS | RT | SIMM }, 158 { insn_lld, M(lld_op,0,0,0,0,0), RS | RT | SIMM },
146 { insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM }, 159 { insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM },
147 { insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM }, 160 { insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM },
148 { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD }, 161 { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD | SET},
149 { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD }, 162 { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD | SET},
150 { insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM }, 163 { insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM },
151 { insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 }, 164 { insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 },
152 { insn_sc, M(sc_op,0,0,0,0,0), RS | RT | SIMM }, 165 { insn_sc, M(sc_op,0,0,0,0,0), RS | RT | SIMM },
@@ -242,6 +255,14 @@ static __init u32 build_func(u32 arg)
242 return arg & FUNC_MASK; 255 return arg & FUNC_MASK;
243} 256}
244 257
258static __init u32 build_set(u32 arg)
259{
260 if (arg & ~SET_MASK)
261 printk(KERN_WARNING "TLB synthesizer field overflow\n");
262
263 return arg & SET_MASK;
264}
265
245/* 266/*
246 * The order of opcode arguments is implicitly left to right, 267 * The order of opcode arguments is implicitly left to right,
247 * starting with RS and ending with FUNC or IMM. 268 * starting with RS and ending with FUNC or IMM.
@@ -273,6 +294,7 @@ static void __init build_insn(u32 **buf, enum opcode opc, ...)
273 if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32)); 294 if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32));
274 if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32)); 295 if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32));
275 if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32)); 296 if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32));
297 if (ip->fields & SET) op |= build_set(va_arg(ap, u32));
276 va_end(ap); 298 va_end(ap);
277 299
278 **buf = op; 300 **buf = op;
@@ -358,8 +380,8 @@ I_u1s2(_bgezl);
358I_u1s2(_bltz); 380I_u1s2(_bltz);
359I_u1s2(_bltzl); 381I_u1s2(_bltzl);
360I_u1u2s3(_bne); 382I_u1u2s3(_bne);
361I_u1u2(_dmfc0); 383I_u1u2u3(_dmfc0);
362I_u1u2(_dmtc0); 384I_u1u2u3(_dmtc0);
363I_u2u1s3(_daddiu); 385I_u2u1s3(_daddiu);
364I_u3u1u2(_daddu); 386I_u3u1u2(_daddu);
365I_u2u1u3(_dsll); 387I_u2u1u3(_dsll);
@@ -376,8 +398,8 @@ I_u2s3u1(_ll);
376I_u2s3u1(_lld); 398I_u2s3u1(_lld);
377I_u1s2(_lui); 399I_u1s2(_lui);
378I_u2s3u1(_lw); 400I_u2s3u1(_lw);
379I_u1u2(_mfc0); 401I_u1u2u3(_mfc0);
380I_u1u2(_mtc0); 402I_u1u2u3(_mtc0);
381I_u2u1u3(_ori); 403I_u2u1u3(_ori);
382I_0(_rfe); 404I_0(_rfe);
383I_u2s3u1(_sc); 405I_u2s3u1(_sc);
@@ -451,8 +473,8 @@ L_LA(_r3000_write_probe_fail)
451# define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh) 473# define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh)
452# define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh) 474# define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh)
453# define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh) 475# define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh)
454# define i_MFC0(buf, rt, rd) i_dmfc0(buf, rt, rd) 476# define i_MFC0(buf, rt, rd...) i_dmfc0(buf, rt, rd)
455# define i_MTC0(buf, rt, rd) i_dmtc0(buf, rt, rd) 477# define i_MTC0(buf, rt, rd...) i_dmtc0(buf, rt, rd)
456# define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val) 478# define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val)
457# define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd) 479# define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd)
458# define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd) 480# define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd)
@@ -464,8 +486,8 @@ L_LA(_r3000_write_probe_fail)
464# define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh) 486# define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh)
465# define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh) 487# define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh)
466# define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh) 488# define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh)
467# define i_MFC0(buf, rt, rd) i_mfc0(buf, rt, rd) 489# define i_MFC0(buf, rt, rd...) i_mfc0(buf, rt, rd)
468# define i_MTC0(buf, rt, rd) i_mtc0(buf, rt, rd) 490# define i_MTC0(buf, rt, rd...) i_mtc0(buf, rt, rd)
469# define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val) 491# define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val)
470# define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd) 492# define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd)
471# define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd) 493# define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd)
@@ -670,14 +692,15 @@ static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg,
670#define K1 27 692#define K1 27
671 693
672/* Some CP0 registers */ 694/* Some CP0 registers */
673#define C0_INDEX 0 695#define C0_INDEX 0, 0
674#define C0_ENTRYLO0 2 696#define C0_ENTRYLO0 2, 0
675#define C0_ENTRYLO1 3 697#define C0_TCBIND 2, 2
676#define C0_CONTEXT 4 698#define C0_ENTRYLO1 3, 0
677#define C0_BADVADDR 8 699#define C0_CONTEXT 4, 0
678#define C0_ENTRYHI 10 700#define C0_BADVADDR 8, 0
679#define C0_EPC 14 701#define C0_ENTRYHI 10, 0
680#define C0_XCONTEXT 20 702#define C0_EPC 14, 0
703#define C0_XCONTEXT 20, 0
681 704
682#ifdef CONFIG_64BIT 705#ifdef CONFIG_64BIT
683# define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT) 706# define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT)
@@ -951,12 +974,20 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
951 /* No i_nop needed here, since the next insn doesn't touch TMP. */ 974 /* No i_nop needed here, since the next insn doesn't touch TMP. */
952 975
953#ifdef CONFIG_SMP 976#ifdef CONFIG_SMP
977# ifdef CONFIG_MIPS_MT_SMTC
978 /*
979 * SMTC uses TCBind value as "CPU" index
980 */
981 i_mfc0(p, ptr, C0_TCBIND);
982 i_dsrl(p, ptr, ptr, 19);
983# else
954 /* 984 /*
955 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 985 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
956 * stored in CONTEXT. 986 * stored in CONTEXT.
957 */ 987 */
958 i_dmfc0(p, ptr, C0_CONTEXT); 988 i_dmfc0(p, ptr, C0_CONTEXT);
959 i_dsrl(p, ptr, ptr, 23); 989 i_dsrl(p, ptr, ptr, 23);
990#endif
960 i_LA_mostly(p, tmp, pgdc); 991 i_LA_mostly(p, tmp, pgdc);
961 i_daddu(p, ptr, ptr, tmp); 992 i_daddu(p, ptr, ptr, tmp);
962 i_dmfc0(p, tmp, C0_BADVADDR); 993 i_dmfc0(p, tmp, C0_BADVADDR);
@@ -1014,9 +1045,21 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
1014 1045
1015 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ 1046 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
1016#ifdef CONFIG_SMP 1047#ifdef CONFIG_SMP
1048#ifdef CONFIG_MIPS_MT_SMTC
1049 /*
1050 * SMTC uses TCBind value as "CPU" index
1051 */
1052 i_mfc0(p, ptr, C0_TCBIND);
1053 i_LA_mostly(p, tmp, pgdc);
1054 i_srl(p, ptr, ptr, 19);
1055#else
1056 /*
1057 * smp_processor_id() << 3 is stored in CONTEXT.
1058 */
1017 i_mfc0(p, ptr, C0_CONTEXT); 1059 i_mfc0(p, ptr, C0_CONTEXT);
1018 i_LA_mostly(p, tmp, pgdc); 1060 i_LA_mostly(p, tmp, pgdc);
1019 i_srl(p, ptr, ptr, 23); 1061 i_srl(p, ptr, ptr, 23);
1062#endif
1020 i_addu(p, ptr, tmp, ptr); 1063 i_addu(p, ptr, tmp, ptr);
1021#else 1064#else
1022 i_LA_mostly(p, ptr, pgdc); 1065 i_LA_mostly(p, ptr, pgdc);
diff --git a/include/asm-mips/asmmacro.h b/include/asm-mips/asmmacro.h
index 30b18ea6cb11..f54aa147ec19 100644
--- a/include/asm-mips/asmmacro.h
+++ b/include/asm-mips/asmmacro.h
@@ -17,7 +17,26 @@
17#ifdef CONFIG_64BIT 17#ifdef CONFIG_64BIT
18#include <asm/asmmacro-64.h> 18#include <asm/asmmacro-64.h>
19#endif 19#endif
20#ifdef CONFIG_MIPS_MT_SMTC
21#include <asm/mipsmtregs.h>
22#endif
20 23
24#ifdef CONFIG_MIPS_MT_SMTC
25 .macro local_irq_enable reg=t0
26 mfc0 \reg, CP0_TCSTATUS
27 ori \reg, \reg, TCSTATUS_IXMT
28 xori \reg, \reg, TCSTATUS_IXMT
29 mtc0 \reg, CP0_TCSTATUS
30 ehb
31 .endm
32
33 .macro local_irq_disable reg=t0
34 mfc0 \reg, CP0_TCSTATUS
35 ori \reg, \reg, TCSTATUS_IXMT
36 mtc0 \reg, CP0_TCSTATUS
37 ehb
38 .endm
39#else
21 .macro local_irq_enable reg=t0 40 .macro local_irq_enable reg=t0
22 mfc0 \reg, CP0_STATUS 41 mfc0 \reg, CP0_STATUS
23 ori \reg, \reg, 1 42 ori \reg, \reg, 1
@@ -32,6 +51,7 @@
32 mtc0 \reg, CP0_STATUS 51 mtc0 \reg, CP0_STATUS
33 irq_disable_hazard 52 irq_disable_hazard
34 .endm 53 .endm
54#endif /* CONFIG_MIPS_MT_SMTC */
35 55
36#ifdef CONFIG_CPU_SB1 56#ifdef CONFIG_CPU_SB1
37 .macro fpu_enable_hazard 57 .macro fpu_enable_hazard
@@ -48,4 +68,31 @@
48 .endm 68 .endm
49#endif 69#endif
50 70
71/*
72 * Temporary until all gas have MT ASE support
73 */
74 .macro DMT reg=0
75 .word (0x41600bc1 | (\reg << 16))
76 .endm
77
78 .macro EMT reg=0
79 .word (0x41600be1 | (\reg << 16))
80 .endm
81
82 .macro DVPE reg=0
83 .word (0x41600001 | (\reg << 16))
84 .endm
85
86 .macro EVPE reg=0
87 .word (0x41600021 | (\reg << 16))
88 .endm
89
90 .macro MFTR rt=0, rd=0, u=0, sel=0
91 .word (0x41000000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel))
92 .endm
93
94 .macro MTTR rt=0, rd=0, u=0, sel=0
95 .word (0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel))
96 .endm
97
51#endif /* _ASM_ASMMACRO_H */ 98#endif /* _ASM_ASMMACRO_H */
diff --git a/include/asm-mips/cpu-info.h b/include/asm-mips/cpu-info.h
index 140be1c67da7..6572ac703662 100644
--- a/include/asm-mips/cpu-info.h
+++ b/include/asm-mips/cpu-info.h
@@ -73,6 +73,16 @@ struct cpuinfo_mips {
73 struct cache_desc dcache; /* Primary D or combined I/D cache */ 73 struct cache_desc dcache; /* Primary D or combined I/D cache */
74 struct cache_desc scache; /* Secondary cache */ 74 struct cache_desc scache; /* Secondary cache */
75 struct cache_desc tcache; /* Tertiary/split secondary cache */ 75 struct cache_desc tcache; /* Tertiary/split secondary cache */
76#if defined(CONFIG_MIPS_MT_SMTC)
77 /*
78 * In the MIPS MT "SMTC" model, each TC is considered
79 * to be a "CPU" for the purposes of scheduling, but
80 * exception resources, ASID spaces, etc, are common
81 * to all TCs within the same VPE.
82 */
83 int vpe_id; /* Virtual Processor number */
84 int tc_id; /* Thread Context number */
85#endif /* CONFIG_MIPS_MT */
76 void *data; /* Additional data */ 86 void *data; /* Additional data */
77} __attribute__((aligned(SMP_CACHE_BYTES))); 87} __attribute__((aligned(SMP_CACHE_BYTES)));
78 88
diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h
index feb29a793888..dadc05188db7 100644
--- a/include/asm-mips/hazards.h
+++ b/include/asm-mips/hazards.h
@@ -284,6 +284,8 @@ do { \
284#define instruction_hazard() do { } while (0) 284#define instruction_hazard() do { } while (0)
285#endif 285#endif
286 286
287extern void mips_ihb(void);
288
287#endif /* __ASSEMBLY__ */ 289#endif /* __ASSEMBLY__ */
288 290
289#endif /* _ASM_HAZARDS_H */ 291#endif /* _ASM_HAZARDS_H */
diff --git a/include/asm-mips/interrupt.h b/include/asm-mips/interrupt.h
index 774348734fa0..4bb9c06f4410 100644
--- a/include/asm-mips/interrupt.h
+++ b/include/asm-mips/interrupt.h
@@ -19,7 +19,12 @@ __asm__ (
19 " .set push \n" 19 " .set push \n"
20 " .set reorder \n" 20 " .set reorder \n"
21 " .set noat \n" 21 " .set noat \n"
22#ifdef CONFIG_CPU_MIPSR2 22#ifdef CONFIG_MIPS_MT_SMTC
23 " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
24 " ori $1, 0x400 \n"
25 " xori $1, 0x400 \n"
26 " mtc0 $1, $2, 1 \n"
27#elif defined(CONFIG_CPU_MIPSR2)
23 " ei \n" 28 " ei \n"
24#else 29#else
25 " mfc0 $1,$12 \n" 30 " mfc0 $1,$12 \n"
@@ -62,7 +67,12 @@ __asm__ (
62 " .macro local_irq_disable\n" 67 " .macro local_irq_disable\n"
63 " .set push \n" 68 " .set push \n"
64 " .set noat \n" 69 " .set noat \n"
65#ifdef CONFIG_CPU_MIPSR2 70#ifdef CONFIG_MIPS_MT_SMTC
71 " mfc0 $1, $2, 1 \n"
72 " ori $1, 0x400 \n"
73 " .set noreorder \n"
74 " mtc0 $1, $2, 1 \n"
75#elif defined(CONFIG_CPU_MIPSR2)
66 " di \n" 76 " di \n"
67#else 77#else
68 " mfc0 $1,$12 \n" 78 " mfc0 $1,$12 \n"
@@ -88,7 +98,11 @@ __asm__ (
88 " .macro local_save_flags flags \n" 98 " .macro local_save_flags flags \n"
89 " .set push \n" 99 " .set push \n"
90 " .set reorder \n" 100 " .set reorder \n"
101#ifdef CONFIG_MIPS_MT_SMTC
102 " mfc0 \\flags, $2, 1 \n"
103#else
91 " mfc0 \\flags, $12 \n" 104 " mfc0 \\flags, $12 \n"
105#endif
92 " .set pop \n" 106 " .set pop \n"
93 " .endm \n"); 107 " .endm \n");
94 108
@@ -102,7 +116,13 @@ __asm__ (
102 " .set push \n" 116 " .set push \n"
103 " .set reorder \n" 117 " .set reorder \n"
104 " .set noat \n" 118 " .set noat \n"
105#ifdef CONFIG_CPU_MIPSR2 119#ifdef CONFIG_MIPS_MT_SMTC
120 " mfc0 \\result, $2, 1 \n"
121 " ori $1, \\result, 0x400 \n"
122 " .set noreorder \n"
123 " mtc0 $1, $2, 1 \n"
124 " andi \\result, \\result, 0x400 \n"
125#elif defined(CONFIG_CPU_MIPSR2)
106 " di \\result \n" 126 " di \\result \n"
107 " andi \\result, 1 \n" 127 " andi \\result, 1 \n"
108#else 128#else
@@ -128,7 +148,14 @@ __asm__ (
128 " .set push \n" 148 " .set push \n"
129 " .set noreorder \n" 149 " .set noreorder \n"
130 " .set noat \n" 150 " .set noat \n"
131#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 151#ifdef CONFIG_MIPS_MT_SMTC
152 "mfc0 $1, $2, 1 \n"
153 "andi \\flags, 0x400 \n"
154 "ori $1, 0x400 \n"
155 "xori $1, 0x400 \n"
156 "or \\flags, $1 \n"
157 "mtc0 \\flags, $2, 1 \n"
158#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
132 /* 159 /*
133 * Slow, but doesn't suffer from a relativly unlikely race 160 * Slow, but doesn't suffer from a relativly unlikely race
134 * condition we're having since days 1. 161 * condition we're having since days 1.
@@ -167,11 +194,29 @@ do { \
167 : "memory"); \ 194 : "memory"); \
168} while(0) 195} while(0)
169 196
170#define irqs_disabled() \ 197static inline int irqs_disabled(void)
171({ \ 198{
172 unsigned long flags; \ 199#ifdef CONFIG_MIPS_MT_SMTC
173 local_save_flags(flags); \ 200 /*
174 !(flags & 1); \ 201 * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
175}) 202 */
203 unsigned long __result;
204
205 __asm__ __volatile__(
206 " .set noreorder \n"
207 " mfc0 %0, $2, 1 \n"
208 " andi %0, 0x400 \n"
209 " slt %0, $0, %0 \n"
210 " .set reorder \n"
211 : "=r" (__result));
212
213 return __result;
214#else
215 unsigned long flags;
216 local_save_flags(flags);
217
218 return !(flags & 1);
219#endif
220}
176 221
177#endif /* _ASM_INTERRUPT_H */ 222#endif /* _ASM_INTERRUPT_H */
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h
index d7aecca3b95f..dde677f02bc0 100644
--- a/include/asm-mips/irq.h
+++ b/include/asm-mips/irq.h
@@ -11,6 +11,9 @@
11 11
12#include <linux/config.h> 12#include <linux/config.h>
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14
15#include <asm/mipsmtregs.h>
16
14#include <irq.h> 17#include <irq.h>
15 18
16#ifdef CONFIG_I8259 19#ifdef CONFIG_I8259
@@ -26,6 +29,23 @@ struct pt_regs;
26 29
27extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs); 30extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs);
28 31
32#ifdef CONFIG_MIPS_MT_SMTC
33/*
34 * Clear interrupt mask handling "backstop" if irq_hwmask
35 * entry so indicates. This implies that the ack() or end()
36 * functions will take over re-enabling the low-level mask.
37 * Otherwise it will be done on return from exception.
38 */
39#define __DO_IRQ_SMTC_HOOK() \
40do { \
41 if (irq_hwmask[irq] & 0x0000ff00) \
42 write_c0_tccontext(read_c0_tccontext() & \
43 ~(irq_hwmask[irq] & 0x0000ff00)); \
44} while (0)
45#else
46#define __DO_IRQ_SMTC_HOOK() do { } while (0)
47#endif
48
29#ifdef CONFIG_PREEMPT 49#ifdef CONFIG_PREEMPT
30 50
31/* 51/*
@@ -39,6 +59,7 @@ extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs);
39#define do_IRQ(irq, regs) \ 59#define do_IRQ(irq, regs) \
40do { \ 60do { \
41 irq_enter(); \ 61 irq_enter(); \
62 __DO_IRQ_SMTC_HOOK(); \
42 __do_IRQ((irq), (regs)); \ 63 __do_IRQ((irq), (regs)); \
43 irq_exit(); \ 64 irq_exit(); \
44} while (0) 65} while (0)
@@ -48,4 +69,12 @@ do { \
48extern void arch_init_irq(void); 69extern void arch_init_irq(void);
49extern void spurious_interrupt(struct pt_regs *regs); 70extern void spurious_interrupt(struct pt_regs *regs);
50 71
72#ifdef CONFIG_MIPS_MT_SMTC
73struct irqaction;
74
75extern unsigned long irq_hwmask[];
76extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
77 unsigned long hwmask);
78#endif /* CONFIG_MIPS_MT_SMTC */
79
51#endif /* _ASM_IRQ_H */ 80#endif /* _ASM_IRQ_H */
diff --git a/include/asm-mips/mips_mt.h b/include/asm-mips/mips_mt.h
new file mode 100644
index 000000000000..c31a312b9783
--- /dev/null
+++ b/include/asm-mips/mips_mt.h
@@ -0,0 +1,15 @@
1/*
2 * Definitions and decalrations for MIPS MT support
3 * that are common between SMTC, VSMP, and/or AP/SP
4 * kernel models.
5 */
6#ifndef __ASM_MIPS_MT_H
7#define __ASM_MIPS_MT_H
8
9extern cpumask_t mt_fpu_cpumask;
10extern unsigned long mt_fpemul_threshold;
11
12extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value);
13extern void mips_mt_set_cpuoptions(void);
14
15#endif /* __ASM_MIPS_MT_H */
diff --git a/include/asm-mips/mipsmtregs.h b/include/asm-mips/mipsmtregs.h
index a5ac1a62f4f4..f637ce70758f 100644
--- a/include/asm-mips/mipsmtregs.h
+++ b/include/asm-mips/mipsmtregs.h
@@ -165,7 +165,7 @@
165 165
166#ifndef __ASSEMBLY__ 166#ifndef __ASSEMBLY__
167 167
168extern void mips_mt_regdump(void); 168extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value);
169 169
170static inline unsigned int dvpe(void) 170static inline unsigned int dvpe(void)
171{ 171{
@@ -282,8 +282,11 @@ static inline void ehb(void)
282 \ 282 \
283 __asm__ __volatile__( \ 283 __asm__ __volatile__( \
284 " .set push \n" \ 284 " .set push \n" \
285 " .set noat \n" \
285 " .set mips32r2 \n" \ 286 " .set mips32r2 \n" \
286 " mftgpr %0," #rt " \n" \ 287 " # mftgpr $1," #rt " \n" \
288 " .word 0x41000820 | (" #rt " << 16) \n" \
289 " move %0, $1 \n" \
287 " .set pop \n" \ 290 " .set pop \n" \
288 : "=r" (__res)); \ 291 : "=r" (__res)); \
289 \ 292 \
@@ -295,9 +298,7 @@ static inline void ehb(void)
295 unsigned long __res; \ 298 unsigned long __res; \
296 \ 299 \
297 __asm__ __volatile__( \ 300 __asm__ __volatile__( \
298 ".set noat\n\t" \ 301 " mftr %0, " #rt ", " #u ", " #sel " \n" \
299 "mftr\t%0, " #rt ", " #u ", " #sel "\n\t" \
300 ".set at\n\t" \
301 : "=r" (__res)); \ 302 : "=r" (__res)); \
302 \ 303 \
303 __res; \ 304 __res; \
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h
index e85a42e2ea0c..a2ef579f6b1a 100644
--- a/include/asm-mips/mipsregs.h
+++ b/include/asm-mips/mipsregs.h
@@ -861,7 +861,19 @@ do { \
861#define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) 861#define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val)
862 862
863#define read_c0_status() __read_32bit_c0_register($12, 0) 863#define read_c0_status() __read_32bit_c0_register($12, 0)
864#ifdef CONFIG_MIPS_MT_SMTC
865#define write_c0_status(val) \
866do { \
867 __write_32bit_c0_register($12, 0, val); \
868 __ehb(); \
869} while (0)
870#else
871/*
872 * Legacy non-SMTC code, which may be hazardous
873 * but which might not support EHB
874 */
864#define write_c0_status(val) __write_32bit_c0_register($12, 0, val) 875#define write_c0_status(val) __write_32bit_c0_register($12, 0, val)
876#endif /* CONFIG_MIPS_MT_SMTC */
865 877
866#define read_c0_cause() __read_32bit_c0_register($13, 0) 878#define read_c0_cause() __read_32bit_c0_register($13, 0)
867#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) 879#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val)
@@ -1004,6 +1016,9 @@ do { \
1004#define read_c0_taglo() __read_32bit_c0_register($28, 0) 1016#define read_c0_taglo() __read_32bit_c0_register($28, 0)
1005#define write_c0_taglo(val) __write_32bit_c0_register($28, 0, val) 1017#define write_c0_taglo(val) __write_32bit_c0_register($28, 0, val)
1006 1018
1019#define read_c0_dtaglo() __read_32bit_c0_register($28, 2)
1020#define write_c0_dtaglo(val) __write_32bit_c0_register($28, 2, val)
1021
1007#define read_c0_taghi() __read_32bit_c0_register($29, 0) 1022#define read_c0_taghi() __read_32bit_c0_register($29, 0)
1008#define write_c0_taghi(val) __write_32bit_c0_register($29, 0, val) 1023#define write_c0_taghi(val) __write_32bit_c0_register($29, 0, val)
1009 1024
@@ -1357,6 +1372,11 @@ static inline void tlb_write_random(void)
1357/* 1372/*
1358 * Manipulate bits in a c0 register. 1373 * Manipulate bits in a c0 register.
1359 */ 1374 */
1375#ifndef CONFIG_MIPS_MT_SMTC
1376/*
1377 * SMTC Linux requires shutting-down microthread scheduling
1378 * during CP0 register read-modify-write sequences.
1379 */
1360#define __BUILD_SET_C0(name) \ 1380#define __BUILD_SET_C0(name) \
1361static inline unsigned int \ 1381static inline unsigned int \
1362set_c0_##name(unsigned int set) \ 1382set_c0_##name(unsigned int set) \
@@ -1395,6 +1415,119 @@ change_c0_##name(unsigned int change, unsigned int new) \
1395 return res; \ 1415 return res; \
1396} 1416}
1397 1417
1418#else /* SMTC versions that manage MT scheduling */
1419
1420#include <asm/interrupt.h>
1421
1422/*
1423 * This is a duplicate of dmt() in mipsmtregs.h to avoid problems with
1424 * header file recursion.
1425 */
1426static inline unsigned int __dmt(void)
1427{
1428 int res;
1429
1430 __asm__ __volatile__(
1431 " .set push \n"
1432 " .set mips32r2 \n"
1433 " .set noat \n"
1434 " .word 0x41610BC1 # dmt $1 \n"
1435 " ehb \n"
1436 " move %0, $1 \n"
1437 " .set pop \n"
1438 : "=r" (res));
1439
1440 instruction_hazard();
1441
1442 return res;
1443}
1444
1445#define __VPECONTROL_TE_SHIFT 15
1446#define __VPECONTROL_TE (1UL << __VPECONTROL_TE_SHIFT)
1447
1448#define __EMT_ENABLE __VPECONTROL_TE
1449
1450static inline void __emt(unsigned int previous)
1451{
1452 if ((previous & __EMT_ENABLE))
1453 __asm__ __volatile__(
1454 " .set noreorder \n"
1455 " .set mips32r2 \n"
1456 " .word 0x41600be1 # emt \n"
1457 " ehb \n"
1458 " .set mips0 \n"
1459 " .set reorder \n");
1460}
1461
1462static inline void __ehb(void)
1463{
1464 __asm__ __volatile__(
1465 " ehb \n");
1466}
1467
1468/*
1469 * Note that local_irq_save/restore affect TC-specific IXMT state,
1470 * not Status.IE as in non-SMTC kernel.
1471 */
1472
1473#define __BUILD_SET_C0(name) \
1474static inline unsigned int \
1475set_c0_##name(unsigned int set) \
1476{ \
1477 unsigned int res; \
1478 unsigned int omt; \
1479 unsigned int flags; \
1480 \
1481 local_irq_save(flags); \
1482 omt = __dmt(); \
1483 res = read_c0_##name(); \
1484 res |= set; \
1485 write_c0_##name(res); \
1486 __emt(omt); \
1487 local_irq_restore(flags); \
1488 \
1489 return res; \
1490} \
1491 \
1492static inline unsigned int \
1493clear_c0_##name(unsigned int clear) \
1494{ \
1495 unsigned int res; \
1496 unsigned int omt; \
1497 unsigned int flags; \
1498 \
1499 local_irq_save(flags); \
1500 omt = __dmt(); \
1501 res = read_c0_##name(); \
1502 res &= ~clear; \
1503 write_c0_##name(res); \
1504 __emt(omt); \
1505 local_irq_restore(flags); \
1506 \
1507 return res; \
1508} \
1509 \
1510static inline unsigned int \
1511change_c0_##name(unsigned int change, unsigned int new) \
1512{ \
1513 unsigned int res; \
1514 unsigned int omt; \
1515 unsigned int flags; \
1516 \
1517 local_irq_save(flags); \
1518 \
1519 omt = __dmt(); \
1520 res = read_c0_##name(); \
1521 res &= ~change; \
1522 res |= (new & change); \
1523 write_c0_##name(res); \
1524 __emt(omt); \
1525 local_irq_restore(flags); \
1526 \
1527 return res; \
1528}
1529#endif
1530
1398__BUILD_SET_C0(status) 1531__BUILD_SET_C0(status)
1399__BUILD_SET_C0(cause) 1532__BUILD_SET_C0(cause)
1400__BUILD_SET_C0(config) 1533__BUILD_SET_C0(config)
diff --git a/include/asm-mips/mmu_context.h b/include/asm-mips/mmu_context.h
index 61cf22588137..6e09f4c87211 100644
--- a/include/asm-mips/mmu_context.h
+++ b/include/asm-mips/mmu_context.h
@@ -17,6 +17,10 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
20#ifdef CONFIG_MIPS_MT_SMTC
21#include <asm/mipsmtregs.h>
22#include <asm/smtc.h>
23#endif /* SMTC */
20 24
21/* 25/*
22 * For the fast tlb miss handlers, we keep a per cpu array of pointers 26 * For the fast tlb miss handlers, we keep a per cpu array of pointers
@@ -54,6 +58,14 @@ extern unsigned long pgd_current[];
54#define ASID_INC 0x1 58#define ASID_INC 0x1
55#define ASID_MASK 0xfff 59#define ASID_MASK 0xfff
56 60
61/* SMTC/34K debug hack - but maybe we'll keep it */
62#elif defined(CONFIG_MIPS_MT_SMTC)
63
64#define ASID_INC 0x1
65extern unsigned long smtc_asid_mask;
66#define ASID_MASK (smtc_asid_mask)
67#define HW_ASID_MASK 0xff
68/* End SMTC/34K debug hack */
57#else /* FIXME: not correct for R6000 */ 69#else /* FIXME: not correct for R6000 */
58 70
59#define ASID_INC 0x1 71#define ASID_INC 0x1
@@ -76,6 +88,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
76#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) 88#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
77#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) 89#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
78 90
91#ifndef CONFIG_MIPS_MT_SMTC
92/* Normal, classic MIPS get_new_mmu_context */
79static inline void 93static inline void
80get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) 94get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
81{ 95{
@@ -91,6 +105,12 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
91 cpu_context(cpu, mm) = asid_cache(cpu) = asid; 105 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
92} 106}
93 107
108#else /* CONFIG_MIPS_MT_SMTC */
109
110#define get_new_mmu_context(mm,cpu) smtc_get_new_mmu_context((mm),(cpu))
111
112#endif /* CONFIG_MIPS_MT_SMTC */
113
94/* 114/*
95 * Initialize the context related info for a new mm_struct 115 * Initialize the context related info for a new mm_struct
96 * instance. 116 * instance.
@@ -111,14 +131,46 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
111{ 131{
112 unsigned int cpu = smp_processor_id(); 132 unsigned int cpu = smp_processor_id();
113 unsigned long flags; 133 unsigned long flags;
114 134#ifdef CONFIG_MIPS_MT_SMTC
135 unsigned long oldasid;
136 unsigned long mtflags;
137 int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
115 local_irq_save(flags); 138 local_irq_save(flags);
139 mtflags = dvpe();
140#else /* Not SMTC */
141 local_irq_save(flags);
142#endif /* CONFIG_MIPS_MT_SMTC */
116 143
117 /* Check if our ASID is of an older version and thus invalid */ 144 /* Check if our ASID is of an older version and thus invalid */
118 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) 145 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
119 get_new_mmu_context(next, cpu); 146 get_new_mmu_context(next, cpu);
120 147#ifdef CONFIG_MIPS_MT_SMTC
148 /*
149 * If the EntryHi ASID being replaced happens to be
150 * the value flagged at ASID recycling time as having
151 * an extended life, clear the bit showing it being
152 * in use by this "CPU", and if that's the last bit,
153 * free up the ASID value for use and flush any old
154 * instances of it from the TLB.
155 */
156 oldasid = (read_c0_entryhi() & ASID_MASK);
157 if(smtc_live_asid[mytlb][oldasid]) {
158 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
159 if(smtc_live_asid[mytlb][oldasid] == 0)
160 smtc_flush_tlb_asid(oldasid);
161 }
162 /*
163 * Tread softly on EntryHi, and so long as we support
164 * having ASID_MASK smaller than the hardware maximum,
165 * make sure no "soft" bits become "hard"...
166 */
167 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
168 | (cpu_context(cpu, next) & ASID_MASK));
169 ehb(); /* Make sure it propagates to TCStatus */
170 evpe(mtflags);
171#else
121 write_c0_entryhi(cpu_context(cpu, next)); 172 write_c0_entryhi(cpu_context(cpu, next));
173#endif /* CONFIG_MIPS_MT_SMTC */
122 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 174 TLBMISS_HANDLER_SETUP_PGD(next->pgd);
123 175
124 /* 176 /*
@@ -151,12 +203,34 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
151 unsigned long flags; 203 unsigned long flags;
152 unsigned int cpu = smp_processor_id(); 204 unsigned int cpu = smp_processor_id();
153 205
206#ifdef CONFIG_MIPS_MT_SMTC
207 unsigned long oldasid;
208 unsigned long mtflags;
209 int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
210#endif /* CONFIG_MIPS_MT_SMTC */
211
154 local_irq_save(flags); 212 local_irq_save(flags);
155 213
156 /* Unconditionally get a new ASID. */ 214 /* Unconditionally get a new ASID. */
157 get_new_mmu_context(next, cpu); 215 get_new_mmu_context(next, cpu);
158 216
217#ifdef CONFIG_MIPS_MT_SMTC
218 /* See comments for similar code above */
219 mtflags = dvpe();
220 oldasid = read_c0_entryhi() & ASID_MASK;
221 if(smtc_live_asid[mytlb][oldasid]) {
222 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
223 if(smtc_live_asid[mytlb][oldasid] == 0)
224 smtc_flush_tlb_asid(oldasid);
225 }
226 /* See comments for similar code above */
227 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
228 (cpu_context(cpu, next) & ASID_MASK));
229 ehb(); /* Make sure it propagates to TCStatus */
230 evpe(mtflags);
231#else
159 write_c0_entryhi(cpu_context(cpu, next)); 232 write_c0_entryhi(cpu_context(cpu, next));
233#endif /* CONFIG_MIPS_MT_SMTC */
160 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 234 TLBMISS_HANDLER_SETUP_PGD(next->pgd);
161 235
162 /* mark mmu ownership change */ 236 /* mark mmu ownership change */
@@ -174,17 +248,49 @@ static inline void
174drop_mmu_context(struct mm_struct *mm, unsigned cpu) 248drop_mmu_context(struct mm_struct *mm, unsigned cpu)
175{ 249{
176 unsigned long flags; 250 unsigned long flags;
251#ifdef CONFIG_MIPS_MT_SMTC
252 unsigned long oldasid;
253 /* Can't use spinlock because called from TLB flush within DVPE */
254 unsigned int prevvpe;
255 int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
256#endif /* CONFIG_MIPS_MT_SMTC */
177 257
178 local_irq_save(flags); 258 local_irq_save(flags);
179 259
180 if (cpu_isset(cpu, mm->cpu_vm_mask)) { 260 if (cpu_isset(cpu, mm->cpu_vm_mask)) {
181 get_new_mmu_context(mm, cpu); 261 get_new_mmu_context(mm, cpu);
262#ifdef CONFIG_MIPS_MT_SMTC
263 /* See comments for similar code above */
264 prevvpe = dvpe();
265 oldasid = (read_c0_entryhi() & ASID_MASK);
266 if(smtc_live_asid[mytlb][oldasid]) {
267 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
268 if(smtc_live_asid[mytlb][oldasid] == 0)
269 smtc_flush_tlb_asid(oldasid);
270 }
271 /* See comments for similar code above */
272 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
273 | cpu_asid(cpu, mm));
274 ehb(); /* Make sure it propagates to TCStatus */
275 evpe(prevvpe);
276#else /* not CONFIG_MIPS_MT_SMTC */
182 write_c0_entryhi(cpu_asid(cpu, mm)); 277 write_c0_entryhi(cpu_asid(cpu, mm));
278#endif /* CONFIG_MIPS_MT_SMTC */
183 } else { 279 } else {
184 /* will get a new context next time */ 280 /* will get a new context next time */
281#ifndef CONFIG_MIPS_MT_SMTC
185 cpu_context(cpu, mm) = 0; 282 cpu_context(cpu, mm) = 0;
283#else /* SMTC */
284 int i;
285
286 /* SMTC shares the TLB (and ASIDs) across VPEs */
287 for (i = 0; i < num_online_cpus(); i++) {
288 if((smtc_status & SMTC_TLB_SHARED)
289 || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
290 cpu_context(i, mm) = 0;
291 }
292#endif /* CONFIG_MIPS_MT_SMTC */
186 } 293 }
187
188 local_irq_restore(flags); 294 local_irq_restore(flags);
189} 295}
190 296
diff --git a/include/asm-mips/processor.h b/include/asm-mips/processor.h
index 39d2bd50fece..786651340de1 100644
--- a/include/asm-mips/processor.h
+++ b/include/asm-mips/processor.h
@@ -12,6 +12,7 @@
12#define _ASM_PROCESSOR_H 12#define _ASM_PROCESSOR_H
13 13
14#include <linux/config.h> 14#include <linux/config.h>
15#include <linux/cpumask.h>
15#include <linux/threads.h> 16#include <linux/threads.h>
16 17
17#include <asm/cachectl.h> 18#include <asm/cachectl.h>
@@ -107,6 +108,10 @@ struct mips_dsp_state {
107 108
108#define INIT_DSP {{0,},} 109#define INIT_DSP {{0,},}
109 110
111#define INIT_CPUMASK { \
112 {0,} \
113}
114
110typedef struct { 115typedef struct {
111 unsigned long seg; 116 unsigned long seg;
112} mm_segment_t; 117} mm_segment_t;
@@ -142,6 +147,7 @@ struct thread_struct {
142#define MF_LOGADE 2 /* Log address errors to syslog */ 147#define MF_LOGADE 2 /* Log address errors to syslog */
143#define MF_32BIT_REGS 4 /* also implies 16/32 fprs */ 148#define MF_32BIT_REGS 4 /* also implies 16/32 fprs */
144#define MF_32BIT_ADDR 8 /* 32-bit address space (o32/n32) */ 149#define MF_32BIT_ADDR 8 /* 32-bit address space (o32/n32) */
150#define MF_FPUBOUND 0x10 /* thread bound to FPU-full CPU set */
145 unsigned long mflags; 151 unsigned long mflags;
146 unsigned long irix_trampoline; /* Wheee... */ 152 unsigned long irix_trampoline; /* Wheee... */
147 unsigned long irix_oldctx; 153 unsigned long irix_oldctx;
diff --git a/include/asm-mips/ptrace.h b/include/asm-mips/ptrace.h
index 95c5839ac465..fa9d8713c12a 100644
--- a/include/asm-mips/ptrace.h
+++ b/include/asm-mips/ptrace.h
@@ -45,6 +45,10 @@ struct pt_regs {
45 unsigned long cp0_badvaddr; 45 unsigned long cp0_badvaddr;
46 unsigned long cp0_cause; 46 unsigned long cp0_cause;
47 unsigned long cp0_epc; 47 unsigned long cp0_epc;
48#ifdef CONFIG_MIPS_MT_SMTC
49 unsigned long cp0_tcstatus;
50 unsigned long smtc_pad;
51#endif /* CONFIG_MIPS_MT_SMTC */
48}; 52};
49 53
50/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ 54/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
diff --git a/include/asm-mips/r4kcache.h b/include/asm-mips/r4kcache.h
index 2f2eb95387f6..3c8e3c8d1a9a 100644
--- a/include/asm-mips/r4kcache.h
+++ b/include/asm-mips/r4kcache.h
@@ -15,6 +15,7 @@
15#include <asm/asm.h> 15#include <asm/asm.h>
16#include <asm/cacheops.h> 16#include <asm/cacheops.h>
17#include <asm/cpu-features.h> 17#include <asm/cpu-features.h>
18#include <asm/mipsmtregs.h>
18 19
19/* 20/*
20 * This macro return a properly sign-extended address suitable as base address 21 * This macro return a properly sign-extended address suitable as base address
@@ -39,14 +40,118 @@
39 : \ 40 : \
40 : "i" (op), "R" (*(unsigned char *)(addr))) 41 : "i" (op), "R" (*(unsigned char *)(addr)))
41 42
43#ifdef CONFIG_MIPS_MT
44/*
45 * Temporary hacks for SMTC debug. Optionally force single-threaded
46 * execution during I-cache flushes.
47 */
48
49#define PROTECT_CACHE_FLUSHES 1
50
51#ifdef PROTECT_CACHE_FLUSHES
52
53extern int mt_protiflush;
54extern int mt_protdflush;
55extern void mt_cflush_lockdown(void);
56extern void mt_cflush_release(void);
57
58#define BEGIN_MT_IPROT \
59 unsigned long flags = 0; \
60 unsigned long mtflags = 0; \
61 if(mt_protiflush) { \
62 local_irq_save(flags); \
63 ehb(); \
64 mtflags = dvpe(); \
65 mt_cflush_lockdown(); \
66 }
67
68#define END_MT_IPROT \
69 if(mt_protiflush) { \
70 mt_cflush_release(); \
71 evpe(mtflags); \
72 local_irq_restore(flags); \
73 }
74
75#define BEGIN_MT_DPROT \
76 unsigned long flags = 0; \
77 unsigned long mtflags = 0; \
78 if(mt_protdflush) { \
79 local_irq_save(flags); \
80 ehb(); \
81 mtflags = dvpe(); \
82 mt_cflush_lockdown(); \
83 }
84
85#define END_MT_DPROT \
86 if(mt_protdflush) { \
87 mt_cflush_release(); \
88 evpe(mtflags); \
89 local_irq_restore(flags); \
90 }
91
92#else
93
94#define BEGIN_MT_IPROT
95#define BEGIN_MT_DPROT
96#define END_MT_IPROT
97#define END_MT_DPROT
98
99#endif /* PROTECT_CACHE_FLUSHES */
100
101#define __iflush_prologue \
102 unsigned long redundance; \
103 extern int mt_n_iflushes; \
104 BEGIN_MT_IPROT \
105 for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
106
107#define __iflush_epilogue \
108 END_MT_IPROT \
109 }
110
111#define __dflush_prologue \
112 unsigned long redundance; \
113 extern int mt_n_dflushes; \
114 BEGIN_MT_DPROT \
115 for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
116
117#define __dflush_epilogue \
118 END_MT_DPROT \
119 }
120
121#define __inv_dflush_prologue __dflush_prologue
122#define __inv_dflush_epilogue __dflush_epilogue
123#define __sflush_prologue {
124#define __sflush_epilogue }
125#define __inv_sflush_prologue __sflush_prologue
126#define __inv_sflush_epilogue __sflush_epilogue
127
128#else /* CONFIG_MIPS_MT */
129
130#define __iflush_prologue {
131#define __iflush_epilogue }
132#define __dflush_prologue {
133#define __dflush_epilogue }
134#define __inv_dflush_prologue {
135#define __inv_dflush_epilogue }
136#define __sflush_prologue {
137#define __sflush_epilogue }
138#define __inv_sflush_prologue {
139#define __inv_sflush_epilogue }
140
141#endif /* CONFIG_MIPS_MT */
142
42static inline void flush_icache_line_indexed(unsigned long addr) 143static inline void flush_icache_line_indexed(unsigned long addr)
43{ 144{
145 __iflush_prologue
44 cache_op(Index_Invalidate_I, addr); 146 cache_op(Index_Invalidate_I, addr);
147 __iflush_epilogue
45} 148}
46 149
47static inline void flush_dcache_line_indexed(unsigned long addr) 150static inline void flush_dcache_line_indexed(unsigned long addr)
48{ 151{
152 __dflush_prologue
49 cache_op(Index_Writeback_Inv_D, addr); 153 cache_op(Index_Writeback_Inv_D, addr);
154 __dflush_epilogue
50} 155}
51 156
52static inline void flush_scache_line_indexed(unsigned long addr) 157static inline void flush_scache_line_indexed(unsigned long addr)
@@ -56,17 +161,23 @@ static inline void flush_scache_line_indexed(unsigned long addr)
56 161
57static inline void flush_icache_line(unsigned long addr) 162static inline void flush_icache_line(unsigned long addr)
58{ 163{
164 __iflush_prologue
59 cache_op(Hit_Invalidate_I, addr); 165 cache_op(Hit_Invalidate_I, addr);
166 __iflush_epilogue
60} 167}
61 168
62static inline void flush_dcache_line(unsigned long addr) 169static inline void flush_dcache_line(unsigned long addr)
63{ 170{
171 __dflush_prologue
64 cache_op(Hit_Writeback_Inv_D, addr); 172 cache_op(Hit_Writeback_Inv_D, addr);
173 __dflush_epilogue
65} 174}
66 175
67static inline void invalidate_dcache_line(unsigned long addr) 176static inline void invalidate_dcache_line(unsigned long addr)
68{ 177{
178 __dflush_prologue
69 cache_op(Hit_Invalidate_D, addr); 179 cache_op(Hit_Invalidate_D, addr);
180 __dflush_epilogue
70} 181}
71 182
72static inline void invalidate_scache_line(unsigned long addr) 183static inline void invalidate_scache_line(unsigned long addr)
@@ -239,9 +350,13 @@ static inline void blast_##pfx##cache##lsize(void) \
239 current_cpu_data.desc.waybit; \ 350 current_cpu_data.desc.waybit; \
240 unsigned long ws, addr; \ 351 unsigned long ws, addr; \
241 \ 352 \
353 __##pfx##flush_prologue \
354 \
242 for (ws = 0; ws < ws_end; ws += ws_inc) \ 355 for (ws = 0; ws < ws_end; ws += ws_inc) \
243 for (addr = start; addr < end; addr += lsize * 32) \ 356 for (addr = start; addr < end; addr += lsize * 32) \
244 cache##lsize##_unroll32(addr|ws,indexop); \ 357 cache##lsize##_unroll32(addr|ws,indexop); \
358 \
359 __##pfx##flush_epilogue \
245} \ 360} \
246 \ 361 \
247static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ 362static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
@@ -249,10 +364,14 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
249 unsigned long start = page; \ 364 unsigned long start = page; \
250 unsigned long end = page + PAGE_SIZE; \ 365 unsigned long end = page + PAGE_SIZE; \
251 \ 366 \
367 __##pfx##flush_prologue \
368 \
252 do { \ 369 do { \
253 cache##lsize##_unroll32(start,hitop); \ 370 cache##lsize##_unroll32(start,hitop); \
254 start += lsize * 32; \ 371 start += lsize * 32; \
255 } while (start < end); \ 372 } while (start < end); \
373 \
374 __##pfx##flush_epilogue \
256} \ 375} \
257 \ 376 \
258static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ 377static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
@@ -265,9 +384,13 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page)
265 current_cpu_data.desc.waybit; \ 384 current_cpu_data.desc.waybit; \
266 unsigned long ws, addr; \ 385 unsigned long ws, addr; \
267 \ 386 \
387 __##pfx##flush_prologue \
388 \
268 for (ws = 0; ws < ws_end; ws += ws_inc) \ 389 for (ws = 0; ws < ws_end; ws += ws_inc) \
269 for (addr = start; addr < end; addr += lsize * 32) \ 390 for (addr = start; addr < end; addr += lsize * 32) \
270 cache##lsize##_unroll32(addr|ws,indexop); \ 391 cache##lsize##_unroll32(addr|ws,indexop); \
392 \
393 __##pfx##flush_epilogue \
271} 394}
272 395
273__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) 396__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
@@ -288,12 +411,17 @@ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
288 unsigned long lsize = cpu_##desc##_line_size(); \ 411 unsigned long lsize = cpu_##desc##_line_size(); \
289 unsigned long addr = start & ~(lsize - 1); \ 412 unsigned long addr = start & ~(lsize - 1); \
290 unsigned long aend = (end - 1) & ~(lsize - 1); \ 413 unsigned long aend = (end - 1) & ~(lsize - 1); \
414 \
415 __##pfx##flush_prologue \
416 \
291 while (1) { \ 417 while (1) { \
292 prot##cache_op(hitop, addr); \ 418 prot##cache_op(hitop, addr); \
293 if (addr == aend) \ 419 if (addr == aend) \
294 break; \ 420 break; \
295 addr += lsize; \ 421 addr += lsize; \
296 } \ 422 } \
423 \
424 __##pfx##flush_epilogue \
297} 425}
298 426
299__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_) 427__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h
new file mode 100644
index 000000000000..e1941d1b8726
--- /dev/null
+++ b/include/asm-mips/smtc.h
@@ -0,0 +1,55 @@
1#ifndef _ASM_SMTC_MT_H
2#define _ASM_SMTC_MT_H
3
4/*
5 * Definitions for SMTC multitasking on MIPS MT cores
6 */
7
8#include <asm/mips_mt.h>
9
10/*
11 * System-wide SMTC status information
12 */
13
14extern unsigned int smtc_status;
15
16#define SMTC_TLB_SHARED 0x00000001
17#define SMTC_MTC_ACTIVE 0x00000002
18
19/*
20 * TLB/ASID Management information
21 */
22
23#define MAX_SMTC_TLBS 2
24#define MAX_SMTC_ASIDS 256
25#if NR_CPUS <= 8
26typedef char asiduse;
27#else
28#if NR_CPUS <= 16
29typedef short asiduse;
30#else
31typedef long asiduse;
32#endif
33#endif
34
35extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
36
37void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
38
39void smtc_flush_tlb_asid(unsigned long asid);
40extern int mipsmt_build_cpu_map(int startslot);
41extern void mipsmt_prepare_cpus(void);
42extern void smtc_smp_finish(void);
43extern void smtc_boot_secondary(int cpu, struct task_struct *t);
44
45/*
46 * Sharing the TLB between multiple VPEs means that the
47 * "random" index selection function is not allowed to
48 * select the current value of the Index register. To
49 * avoid additional TLB pressure, the Index registers
50 * are "parked" with an non-Valid value.
51 */
52
53#define PARKED_INDEX ((unsigned int)0x80000000)
54
55#endif /* _ASM_SMTC_MT_H */
diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h
new file mode 100644
index 000000000000..f22c3e2f993a
--- /dev/null
+++ b/include/asm-mips/smtc_ipi.h
@@ -0,0 +1,118 @@
1/*
2 * Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code.
3 */
4#ifndef __ASM_SMTC_IPI_H
5#define __ASM_SMTC_IPI_H
6
7//#define SMTC_IPI_DEBUG
8
9#ifdef SMTC_IPI_DEBUG
10#include <asm/mipsregs.h>
11#include <asm/mipsmtregs.h>
12#endif /* SMTC_IPI_DEBUG */
13
14/*
15 * An IPI "message"
16 */
17
18struct smtc_ipi {
19 struct smtc_ipi *flink;
20 int type;
21 void *arg;
22 int dest;
23#ifdef SMTC_IPI_DEBUG
24 int sender;
25 long stamp;
26#endif /* SMTC_IPI_DEBUG */
27};
28
29/*
30 * Defined IPI Types
31 */
32
33#define LINUX_SMP_IPI 1
34#define SMTC_CLOCK_TICK 2
35
36/*
37 * A queue of IPI messages
38 */
39
40struct smtc_ipi_q {
41 struct smtc_ipi *head;
42 spinlock_t lock;
43 struct smtc_ipi *tail;
44 int depth;
45};
46
47extern struct smtc_ipi_q IPIQ[NR_CPUS];
48extern struct smtc_ipi_q freeIPIq;
49
50static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p)
51{
52 long flags;
53
54 spin_lock_irqsave(&q->lock, flags);
55 if (q->head == NULL)
56 q->head = q->tail = p;
57 else
58 q->tail->flink = p;
59 p->flink = NULL;
60 q->tail = p;
61 q->depth++;
62#ifdef SMTC_IPI_DEBUG
63 p->sender = read_c0_tcbind();
64 p->stamp = read_c0_count();
65#endif /* SMTC_IPI_DEBUG */
66 spin_unlock_irqrestore(&q->lock, flags);
67}
68
69static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q)
70{
71 struct smtc_ipi *p;
72 long flags;
73
74 spin_lock_irqsave(&q->lock, flags);
75 if (q->head == NULL)
76 p = NULL;
77 else {
78 p = q->head;
79 q->head = q->head->flink;
80 q->depth--;
81 /* Arguably unnecessary, but leaves queue cleaner */
82 if (q->head == NULL)
83 q->tail = NULL;
84 }
85 spin_unlock_irqrestore(&q->lock, flags);
86 return p;
87}
88
89static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p)
90{
91 long flags;
92
93 spin_lock_irqsave(&q->lock, flags);
94 if (q->head == NULL) {
95 q->head = q->tail = p;
96 p->flink = NULL;
97 } else {
98 p->flink = q->head;
99 q->head = p;
100 }
101 q->depth++;
102 spin_unlock_irqrestore(&q->lock, flags);
103}
104
105static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q)
106{
107 long flags;
108 int retval;
109
110 spin_lock_irqsave(&q->lock, flags);
111 retval = q->depth;
112 spin_unlock_irqrestore(&q->lock, flags);
113 return retval;
114}
115
116extern void smtc_send_ipi(int cpu, int type, unsigned int action);
117
118#endif /* __ASM_SMTC_IPI_H */
diff --git a/include/asm-mips/smtc_proc.h b/include/asm-mips/smtc_proc.h
new file mode 100644
index 000000000000..25da651f1f5f
--- /dev/null
+++ b/include/asm-mips/smtc_proc.h
@@ -0,0 +1,23 @@
1/*
2 * Definitions for SMTC /proc entries
3 * Copyright(C) 2005 MIPS Technologies Inc.
4 */
5#ifndef __ASM_SMTC_PROC_H
6#define __ASM_SMTC_PROC_H
7
8/*
9 * per-"CPU" statistics
10 */
11
12struct smtc_cpu_proc {
13 unsigned long timerints;
14 unsigned long selfipis;
15};
16
17extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
18
19/* Count of number of recoveries of "stolen" FPU access rights on 34K */
20
21extern atomic_t smtc_fpu_recoveries;
22
23#endif /* __ASM_SMTC_PROC_H */
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h
index 2acf3e844f00..c4856a874965 100644
--- a/include/asm-mips/stackframe.h
+++ b/include/asm-mips/stackframe.h
@@ -14,9 +14,14 @@
14#include <linux/threads.h> 14#include <linux/threads.h>
15 15
16#include <asm/asm.h> 16#include <asm/asm.h>
17#include <asm/asmmacro.h>
17#include <asm/mipsregs.h> 18#include <asm/mipsregs.h>
18#include <asm/asm-offsets.h> 19#include <asm/asm-offsets.h>
19 20
21#ifdef CONFIG_MIPS_MT_SMTC
22#include <asm/mipsmtregs.h>
23#endif /* CONFIG_MIPS_MT_SMTC */
24
20 .macro SAVE_AT 25 .macro SAVE_AT
21 .set push 26 .set push
22 .set noat 27 .set noat
@@ -57,13 +62,30 @@
57#ifdef CONFIG_SMP 62#ifdef CONFIG_SMP
58 .macro get_saved_sp /* SMP variation */ 63 .macro get_saved_sp /* SMP variation */
59#ifdef CONFIG_32BIT 64#ifdef CONFIG_32BIT
65#ifdef CONFIG_MIPS_MT_SMTC
66 .set mips32
67 mfc0 k0, CP0_TCBIND;
68 .set mips0
69 lui k1, %hi(kernelsp)
70 srl k0, k0, 19
71 /* No need to shift down and up to clear bits 0-1 */
72#else
60 mfc0 k0, CP0_CONTEXT 73 mfc0 k0, CP0_CONTEXT
61 lui k1, %hi(kernelsp) 74 lui k1, %hi(kernelsp)
62 srl k0, k0, 23 75 srl k0, k0, 23
76#endif
63 addu k1, k0 77 addu k1, k0
64 LONG_L k1, %lo(kernelsp)(k1) 78 LONG_L k1, %lo(kernelsp)(k1)
65#endif 79#endif
66#ifdef CONFIG_64BIT 80#ifdef CONFIG_64BIT
81#ifdef CONFIG_MIPS_MT_SMTC
82 .set mips64
83 mfc0 k0, CP0_TCBIND;
84 .set mips0
85 lui k0, %highest(kernelsp)
86 dsrl k1, 19
87 /* No need to shift down and up to clear bits 0-2 */
88#else
67 MFC0 k1, CP0_CONTEXT 89 MFC0 k1, CP0_CONTEXT
68 lui k0, %highest(kernelsp) 90 lui k0, %highest(kernelsp)
69 dsrl k1, 23 91 dsrl k1, 23
@@ -71,20 +93,31 @@
71 dsll k0, k0, 16 93 dsll k0, k0, 16
72 daddiu k0, %hi(kernelsp) 94 daddiu k0, %hi(kernelsp)
73 dsll k0, k0, 16 95 dsll k0, k0, 16
96#endif /* CONFIG_MIPS_MT_SMTC */
74 daddu k1, k1, k0 97 daddu k1, k1, k0
75 LONG_L k1, %lo(kernelsp)(k1) 98 LONG_L k1, %lo(kernelsp)(k1)
76#endif 99#endif /* CONFIG_64BIT */
77 .endm 100 .endm
78 101
79 .macro set_saved_sp stackp temp temp2 102 .macro set_saved_sp stackp temp temp2
80#ifdef CONFIG_32BIT 103#ifdef CONFIG_32BIT
104#ifdef CONFIG_MIPS_MT_SMTC
105 mfc0 \temp, CP0_TCBIND
106 srl \temp, 19
107#else
81 mfc0 \temp, CP0_CONTEXT 108 mfc0 \temp, CP0_CONTEXT
82 srl \temp, 23 109 srl \temp, 23
83#endif 110#endif
111#endif
84#ifdef CONFIG_64BIT 112#ifdef CONFIG_64BIT
113#ifdef CONFIG_MIPS_MT_SMTC
114 mfc0 \temp, CP0_TCBIND
115 dsrl \temp, 19
116#else
85 MFC0 \temp, CP0_CONTEXT 117 MFC0 \temp, CP0_CONTEXT
86 dsrl \temp, 23 118 dsrl \temp, 23
87#endif 119#endif
120#endif
88 LONG_S \stackp, kernelsp(\temp) 121 LONG_S \stackp, kernelsp(\temp)
89 .endm 122 .endm
90#else 123#else
@@ -122,10 +155,25 @@
122 PTR_SUBU sp, k1, PT_SIZE 155 PTR_SUBU sp, k1, PT_SIZE
123 LONG_S k0, PT_R29(sp) 156 LONG_S k0, PT_R29(sp)
124 LONG_S $3, PT_R3(sp) 157 LONG_S $3, PT_R3(sp)
158 /*
159 * You might think that you don't need to save $0,
160 * but the FPU emulator and gdb remote debug stub
161 * need it to operate correctly
162 */
125 LONG_S $0, PT_R0(sp) 163 LONG_S $0, PT_R0(sp)
126 mfc0 v1, CP0_STATUS 164 mfc0 v1, CP0_STATUS
127 LONG_S $2, PT_R2(sp) 165 LONG_S $2, PT_R2(sp)
128 LONG_S v1, PT_STATUS(sp) 166 LONG_S v1, PT_STATUS(sp)
167#ifdef CONFIG_MIPS_MT_SMTC
168 /*
169 * Ideally, these instructions would be shuffled in
170 * to cover the pipeline delay.
171 */
172 .set mips32
173 mfc0 v1, CP0_TCSTATUS
174 .set mips0
175 LONG_S v1, PT_TCSTATUS(sp)
176#endif /* CONFIG_MIPS_MT_SMTC */
129 LONG_S $4, PT_R4(sp) 177 LONG_S $4, PT_R4(sp)
130 mfc0 v1, CP0_CAUSE 178 mfc0 v1, CP0_CAUSE
131 LONG_S $5, PT_R5(sp) 179 LONG_S $5, PT_R5(sp)
@@ -234,14 +282,36 @@
234 .endm 282 .endm
235 283
236#else 284#else
285/*
286 * For SMTC kernel, global IE should be left set, and interrupts
287 * controlled exclusively via IXMT.
288 */
237 289
290#ifdef CONFIG_MIPS_MT_SMTC
291#define STATMASK 0x1e
292#else
293#define STATMASK 0x1f
294#endif
238 .macro RESTORE_SOME 295 .macro RESTORE_SOME
239 .set push 296 .set push
240 .set reorder 297 .set reorder
241 .set noat 298 .set noat
299#ifdef CONFIG_MIPS_MT_SMTC
300 .set mips32r2
301 /*
302 * This may not really be necessary if ints are already
303 * inhibited here.
304 */
305 mfc0 v0, CP0_TCSTATUS
306 ori v0, TCSTATUS_IXMT
307 mtc0 v0, CP0_TCSTATUS
308 ehb
309 DMT 5 # dmt a1
310 jal mips_ihb
311#endif /* CONFIG_MIPS_MT_SMTC */
242 mfc0 a0, CP0_STATUS 312 mfc0 a0, CP0_STATUS
243 ori a0, 0x1f 313 ori a0, STATMASK
244 xori a0, 0x1f 314 xori a0, STATMASK
245 mtc0 a0, CP0_STATUS 315 mtc0 a0, CP0_STATUS
246 li v1, 0xff00 316 li v1, 0xff00
247 and a0, v1 317 and a0, v1
@@ -250,6 +320,26 @@
250 and v0, v1 320 and v0, v1
251 or v0, a0 321 or v0, a0
252 mtc0 v0, CP0_STATUS 322 mtc0 v0, CP0_STATUS
323#ifdef CONFIG_MIPS_MT_SMTC
324/*
325 * Only after EXL/ERL have been restored to status can we
326 * restore TCStatus.IXMT.
327 */
328 LONG_L v1, PT_TCSTATUS(sp)
329 ehb
330 mfc0 v0, CP0_TCSTATUS
331 andi v1, TCSTATUS_IXMT
332 /* We know that TCStatua.IXMT should be set from above */
333 xori v0, v0, TCSTATUS_IXMT
334 or v0, v0, v1
335 mtc0 v0, CP0_TCSTATUS
336 ehb
337 andi a1, a1, VPECONTROL_TE
338 beqz a1, 1f
339 emt
3401:
341 .set mips0
342#endif /* CONFIG_MIPS_MT_SMTC */
253 LONG_L v1, PT_EPC(sp) 343 LONG_L v1, PT_EPC(sp)
254 MTC0 v1, CP0_EPC 344 MTC0 v1, CP0_EPC
255 LONG_L $31, PT_R31(sp) 345 LONG_L $31, PT_R31(sp)
@@ -302,11 +392,33 @@
302 * Set cp0 enable bit as sign that we're running on the kernel stack 392 * Set cp0 enable bit as sign that we're running on the kernel stack
303 */ 393 */
304 .macro CLI 394 .macro CLI
395#if !defined(CONFIG_MIPS_MT_SMTC)
305 mfc0 t0, CP0_STATUS 396 mfc0 t0, CP0_STATUS
306 li t1, ST0_CU0 | 0x1f 397 li t1, ST0_CU0 | 0x1f
307 or t0, t1 398 or t0, t1
308 xori t0, 0x1f 399 xori t0, 0x1f
309 mtc0 t0, CP0_STATUS 400 mtc0 t0, CP0_STATUS
401#else /* CONFIG_MIPS_MT_SMTC */
402 /*
403 * For SMTC, we need to set privilege
404 * and disable interrupts only for the
405 * current TC, using the TCStatus register.
406 */
407 mfc0 t0,CP0_TCSTATUS
408 /* Fortunately CU 0 is in the same place in both registers */
409 /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
410 li t1, ST0_CU0 | 0x08001c00
411 or t0,t1
412 /* Clear TKSU, leave IXMT */
413 xori t0, 0x00001800
414 mtc0 t0, CP0_TCSTATUS
415 ehb
416 /* We need to leave the global IE bit set, but clear EXL...*/
417 mfc0 t0, CP0_STATUS
418 ori t0, ST0_EXL | ST0_ERL
419 xori t0, ST0_EXL | ST0_ERL
420 mtc0 t0, CP0_STATUS
421#endif /* CONFIG_MIPS_MT_SMTC */
310 irq_disable_hazard 422 irq_disable_hazard
311 .endm 423 .endm
312 424
@@ -315,11 +427,35 @@
315 * Set cp0 enable bit as sign that we're running on the kernel stack 427 * Set cp0 enable bit as sign that we're running on the kernel stack
316 */ 428 */
317 .macro STI 429 .macro STI
430#if !defined(CONFIG_MIPS_MT_SMTC)
318 mfc0 t0, CP0_STATUS 431 mfc0 t0, CP0_STATUS
319 li t1, ST0_CU0 | 0x1f 432 li t1, ST0_CU0 | 0x1f
320 or t0, t1 433 or t0, t1
321 xori t0, 0x1e 434 xori t0, 0x1e
322 mtc0 t0, CP0_STATUS 435 mtc0 t0, CP0_STATUS
436#else /* CONFIG_MIPS_MT_SMTC */
437 /*
438 * For SMTC, we need to set privilege
439 * and enable interrupts only for the
440 * current TC, using the TCStatus register.
441 */
442 ehb
443 mfc0 t0,CP0_TCSTATUS
444 /* Fortunately CU 0 is in the same place in both registers */
445 /* Set TCU0, TKSU (for later inversion) and IXMT */
446 li t1, ST0_CU0 | 0x08001c00
447 or t0,t1
448 /* Clear TKSU *and* IXMT */
449 xori t0, 0x00001c00
450 mtc0 t0, CP0_TCSTATUS
451 ehb
452 /* We need to leave the global IE bit set, but clear EXL...*/
453 mfc0 t0, CP0_STATUS
454 ori t0, ST0_EXL
455 xori t0, ST0_EXL
456 mtc0 t0, CP0_STATUS
457 /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
458#endif /* CONFIG_MIPS_MT_SMTC */
323 irq_enable_hazard 459 irq_enable_hazard
324 .endm 460 .endm
325 461
@@ -328,11 +464,56 @@
328 * Set cp0 enable bit as sign that we're running on the kernel stack 464 * Set cp0 enable bit as sign that we're running on the kernel stack
329 */ 465 */
330 .macro KMODE 466 .macro KMODE
467#ifdef CONFIG_MIPS_MT_SMTC
468 /*
469 * This gets baroque in SMTC. We want to
470 * protect the non-atomic clearing of EXL
471 * with DMT/EMT, but we don't want to take
472 * an interrupt while DMT is still in effect.
473 */
474
475 /* KMODE gets invoked from both reorder and noreorder code */
476 .set push
477 .set mips32r2
478 .set noreorder
479 mfc0 v0, CP0_TCSTATUS
480 andi v1, v0, TCSTATUS_IXMT
481 ori v0, TCSTATUS_IXMT
482 mtc0 v0, CP0_TCSTATUS
483 ehb
484 DMT 2 # dmt v0
485 /*
486 * We don't know a priori if ra is "live"
487 */
488 move t0, ra
489 jal mips_ihb
490 nop /* delay slot */
491 move ra, t0
492#endif /* CONFIG_MIPS_MT_SMTC */
331 mfc0 t0, CP0_STATUS 493 mfc0 t0, CP0_STATUS
332 li t1, ST0_CU0 | 0x1e 494 li t1, ST0_CU0 | 0x1e
333 or t0, t1 495 or t0, t1
334 xori t0, 0x1e 496 xori t0, 0x1e
335 mtc0 t0, CP0_STATUS 497 mtc0 t0, CP0_STATUS
498#ifdef CONFIG_MIPS_MT_SMTC
499 ehb
500 andi v0, v0, VPECONTROL_TE
501 beqz v0, 2f
502 nop /* delay slot */
503 emt
5042:
505 mfc0 v0, CP0_TCSTATUS
506 /* Clear IXMT, then OR in previous value */
507 ori v0, TCSTATUS_IXMT
508 xori v0, TCSTATUS_IXMT
509 or v0, v1, v0
510 mtc0 v0, CP0_TCSTATUS
511 /*
512 * irq_disable_hazard below should expand to EHB
513 * on 24K/34K CPUS
514 */
515 .set pop
516#endif /* CONFIG_MIPS_MT_SMTC */
336 irq_disable_hazard 517 irq_disable_hazard
337 .endm 518 .endm
338 519