aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile5
-rw-r--r--arch/mips/kernel/asm-offsets.c3
-rw-r--r--arch/mips/kernel/branch.c2
-rw-r--r--arch/mips/kernel/entry.S69
-rw-r--r--arch/mips/kernel/gdb-low.S24
-rw-r--r--arch/mips/kernel/gdb-stub.c61
-rw-r--r--arch/mips/kernel/genex.S43
-rw-r--r--arch/mips/kernel/head.S57
-rw-r--r--arch/mips/kernel/i8259.c4
-rw-r--r--arch/mips/kernel/irq-msc01.c9
-rw-r--r--arch/mips/kernel/irq.c18
-rw-r--r--arch/mips/kernel/kspd.c398
-rw-r--r--arch/mips/kernel/linux32.c74
-rw-r--r--arch/mips/kernel/mips-mt.c449
-rw-r--r--arch/mips/kernel/mips_ksyms.c15
-rw-r--r--arch/mips/kernel/process.c21
-rw-r--r--arch/mips/kernel/ptrace.c18
-rw-r--r--arch/mips/kernel/ptrace32.c14
-rw-r--r--arch/mips/kernel/r4k_switch.S34
-rw-r--r--arch/mips/kernel/rtlx.c518
-rw-r--r--arch/mips/kernel/scall32-o32.S13
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c5
-rw-r--r--arch/mips/kernel/smp-mt.c (renamed from arch/mips/kernel/smp_mt.c)44
-rw-r--r--arch/mips/kernel/smp.c12
-rw-r--r--arch/mips/kernel/smtc-asm.S130
-rw-r--r--arch/mips/kernel/smtc-proc.c93
-rw-r--r--arch/mips/kernel/smtc.c1322
-rw-r--r--arch/mips/kernel/time.c3
-rw-r--r--arch/mips/kernel/traps.c212
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/kernel/vpe.c665
34 files changed, 3793 insertions, 550 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 309d54cceda3..34e8a256765c 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -34,8 +34,11 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
34 34
35obj-$(CONFIG_SMP) += smp.o 35obj-$(CONFIG_SMP) += smp.o
36 36
37obj-$(CONFIG_MIPS_MT_SMP) += smp_mt.o 37obj-$(CONFIG_MIPS_MT) += mips-mt.o
38obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
39obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
38 40
41obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
39obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 42obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
40obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o 43obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
41 44
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index ca6b03c773be..92b28b674d6f 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -69,6 +69,9 @@ void output_ptreg_defines(void)
69 offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr); 69 offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr);
70 offset("#define PT_STATUS ", struct pt_regs, cp0_status); 70 offset("#define PT_STATUS ", struct pt_regs, cp0_status);
71 offset("#define PT_CAUSE ", struct pt_regs, cp0_cause); 71 offset("#define PT_CAUSE ", struct pt_regs, cp0_cause);
72#ifdef CONFIG_MIPS_MT_SMTC
73 offset("#define PT_TCSTATUS ", struct pt_regs, cp0_tcstatus);
74#endif /* CONFIG_MIPS_MT_SMTC */
72 size("#define PT_SIZE ", struct pt_regs); 75 size("#define PT_SIZE ", struct pt_regs);
73 linefeed; 76 linefeed;
74} 77}
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 374de839558d..b6232d9033cb 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -184,7 +184,7 @@ int __compute_return_epc(struct pt_regs *regs)
184 bit = (insn.i_format.rt >> 2); 184 bit = (insn.i_format.rt >> 2);
185 bit += (bit != 0); 185 bit += (bit != 0);
186 bit += 23; 186 bit += 23;
187 switch (insn.i_format.rt) { 187 switch (insn.i_format.rt & 3) {
188 case 0: /* bc1f */ 188 case 0: /* bc1f */
189 case 2: /* bc1fl */ 189 case 2: /* bc1fl */
190 if (~fcr31 & (1 << bit)) 190 if (~fcr31 & (1 << bit))
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 83c87fe4ee4f..d101d2fb24ca 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -17,6 +17,9 @@
17#include <asm/isadep.h> 17#include <asm/isadep.h>
18#include <asm/thread_info.h> 18#include <asm/thread_info.h>
19#include <asm/war.h> 19#include <asm/war.h>
20#ifdef CONFIG_MIPS_MT_SMTC
21#include <asm/mipsmtregs.h>
22#endif
20 23
21#ifdef CONFIG_PREEMPT 24#ifdef CONFIG_PREEMPT
22 .macro preempt_stop 25 .macro preempt_stop
@@ -75,6 +78,37 @@ FEXPORT(syscall_exit)
75 bnez t0, syscall_exit_work 78 bnez t0, syscall_exit_work
76 79
77FEXPORT(restore_all) # restore full frame 80FEXPORT(restore_all) # restore full frame
81#ifdef CONFIG_MIPS_MT_SMTC
82/* Detect and execute deferred IPI "interrupts" */
83 move a0,sp
84 jal deferred_smtc_ipi
85/* Re-arm any temporarily masked interrupts not explicitly "acked" */
86 mfc0 v0, CP0_TCSTATUS
87 ori v1, v0, TCSTATUS_IXMT
88 mtc0 v1, CP0_TCSTATUS
89 andi v0, TCSTATUS_IXMT
90 ehb
91 mfc0 t0, CP0_TCCONTEXT
92 DMT 9 # dmt t1
93 jal mips_ihb
94 mfc0 t2, CP0_STATUS
95 andi t3, t0, 0xff00
96 or t2, t2, t3
97 mtc0 t2, CP0_STATUS
98 ehb
99 andi t1, t1, VPECONTROL_TE
100 beqz t1, 1f
101 EMT
1021:
103 mfc0 v1, CP0_TCSTATUS
104 /* We set IXMT above, XOR should cler it here */
105 xori v1, v1, TCSTATUS_IXMT
106 or v1, v0, v1
107 mtc0 v1, CP0_TCSTATUS
108 ehb
109 xor t0, t0, t3
110 mtc0 t0, CP0_TCCONTEXT
111#endif /* CONFIG_MIPS_MT_SMTC */
78 .set noat 112 .set noat
79 RESTORE_TEMP 113 RESTORE_TEMP
80 RESTORE_AT 114 RESTORE_AT
@@ -120,28 +154,17 @@ syscall_exit_work:
120 jal do_syscall_trace 154 jal do_syscall_trace
121 b resume_userspace 155 b resume_userspace
122 156
157#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
158
123/* 159/*
124 * Common spurious interrupt handler. 160 * MIPS32R2 Instruction Hazard Barrier - must be called
161 *
162 * For C code use the inline version named instruction_hazard().
125 */ 163 */
126LEAF(spurious_interrupt) 164LEAF(mips_ihb)
127 /* 165 .set mips32r2
128 * Someone tried to fool us by sending an interrupt but we 166 jr.hb ra
129 * couldn't find a cause for it. 167 nop
130 */ 168 END(mips_ihb)
131 PTR_LA t1, irq_err_count 169
132#ifdef CONFIG_SMP 170#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */
1331: ll t0, (t1)
134 addiu t0, 1
135 sc t0, (t1)
136#if R10000_LLSC_WAR
137 beqzl t0, 1b
138#else
139 beqz t0, 1b
140#endif
141#else
142 lw t0, (t1)
143 addiu t0, 1
144 sw t0, (t1)
145#endif
146 j ret_from_irq
147 END(spurious_interrupt)
diff --git a/arch/mips/kernel/gdb-low.S b/arch/mips/kernel/gdb-low.S
index 235ad9f6bd35..10f28fb9f008 100644
--- a/arch/mips/kernel/gdb-low.S
+++ b/arch/mips/kernel/gdb-low.S
@@ -283,11 +283,33 @@
283 */ 283 */
284 284
2853: 2853:
286#ifdef CONFIG_MIPS_MT_SMTC
287 /* Read-modify write of Status must be atomic */
288 mfc0 t2, CP0_TCSTATUS
289 ori t1, t2, TCSTATUS_IXMT
290 mtc0 t1, CP0_TCSTATUS
291 andi t2, t2, TCSTATUS_IXMT
292 ehb
293 DMT 9 # dmt t1
294 jal mips_ihb
295 nop
296#endif /* CONFIG_MIPS_MT_SMTC */
286 mfc0 t0, CP0_STATUS 297 mfc0 t0, CP0_STATUS
287 ori t0, 0x1f 298 ori t0, 0x1f
288 xori t0, 0x1f 299 xori t0, 0x1f
289 mtc0 t0, CP0_STATUS 300 mtc0 t0, CP0_STATUS
290 301#ifdef CONFIG_MIPS_MT_SMTC
302 andi t1, t1, VPECONTROL_TE
303 beqz t1, 9f
304 nop
305 EMT # emt
3069:
307 mfc0 t1, CP0_TCSTATUS
308 xori t1, t1, TCSTATUS_IXMT
309 or t1, t1, t2
310 mtc0 t1, CP0_TCSTATUS
311 ehb
312#endif /* CONFIG_MIPS_MT_SMTC */
291 LONG_L v0, GDB_FR_STATUS(sp) 313 LONG_L v0, GDB_FR_STATUS(sp)
292 LONG_L v1, GDB_FR_EPC(sp) 314 LONG_L v1, GDB_FR_EPC(sp)
293 mtc0 v0, CP0_STATUS 315 mtc0 v0, CP0_STATUS
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
index d4f88e0af24c..6ecbdc1fefd1 100644
--- a/arch/mips/kernel/gdb-stub.c
+++ b/arch/mips/kernel/gdb-stub.c
@@ -140,6 +140,7 @@
140#include <asm/system.h> 140#include <asm/system.h>
141#include <asm/gdb-stub.h> 141#include <asm/gdb-stub.h>
142#include <asm/inst.h> 142#include <asm/inst.h>
143#include <asm/smp.h>
143 144
144/* 145/*
145 * external low-level support routines 146 * external low-level support routines
@@ -669,6 +670,64 @@ static void kgdb_wait(void *arg)
669 local_irq_restore(flags); 670 local_irq_restore(flags);
670} 671}
671 672
673/*
674 * GDB stub needs to call kgdb_wait on all processor with interrupts
675 * disabled, so it uses it's own special variant.
676 */
677static int kgdb_smp_call_kgdb_wait(void)
678{
679#ifdef CONFIG_SMP
680 struct call_data_struct data;
681 int i, cpus = num_online_cpus() - 1;
682 int cpu = smp_processor_id();
683
684 /*
685 * Can die spectacularly if this CPU isn't yet marked online
686 */
687 BUG_ON(!cpu_online(cpu));
688
689 if (!cpus)
690 return 0;
691
692 if (spin_is_locked(&smp_call_lock)) {
693 /*
694 * Some other processor is trying to make us do something
695 * but we're not going to respond... give up
696 */
697 return -1;
698 }
699
700 /*
701 * We will continue here, accepting the fact that
702 * the kernel may deadlock if another CPU attempts
703 * to call smp_call_function now...
704 */
705
706 data.func = kgdb_wait;
707 data.info = NULL;
708 atomic_set(&data.started, 0);
709 data.wait = 0;
710
711 spin_lock(&smp_call_lock);
712 call_data = &data;
713 mb();
714
715 /* Send a message to all other CPUs and wait for them to respond */
716 for (i = 0; i < NR_CPUS; i++)
717 if (cpu_online(i) && i != cpu)
718 core_send_ipi(i, SMP_CALL_FUNCTION);
719
720 /* Wait for response */
721 /* FIXME: lock-up detection, backtrace on lock-up */
722 while (atomic_read(&data.started) != cpus)
723 barrier();
724
725 call_data = NULL;
726 spin_unlock(&smp_call_lock);
727#endif
728
729 return 0;
730}
672 731
673/* 732/*
674 * This function does all command processing for interfacing to gdb. It 733 * This function does all command processing for interfacing to gdb. It
@@ -718,7 +777,7 @@ void handle_exception (struct gdb_regs *regs)
718 /* 777 /*
719 * force other cpus to enter kgdb 778 * force other cpus to enter kgdb
720 */ 779 */
721 smp_call_function(kgdb_wait, NULL, 0, 0); 780 kgdb_smp_call_kgdb_wait();
722 781
723 /* 782 /*
724 * If we're in breakpoint() increment the PC 783 * If we're in breakpoint() increment the PC
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 13f22d1d0e8b..ff7af369f286 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13 13
14#include <asm/asm.h> 14#include <asm/asm.h>
15#include <asm/asmmacro.h>
15#include <asm/cacheops.h> 16#include <asm/cacheops.h>
16#include <asm/regdef.h> 17#include <asm/regdef.h>
17#include <asm/fpregdef.h> 18#include <asm/fpregdef.h>
@@ -122,6 +123,20 @@ handle_vcei:
122 .set pop 123 .set pop
123 END(except_vec3_r4000) 124 END(except_vec3_r4000)
124 125
126 __FINIT
127
128 .align 5
129NESTED(handle_int, PT_SIZE, sp)
130 SAVE_ALL
131 CLI
132
133 PTR_LA ra, ret_from_irq
134 move a0, sp
135 j plat_irq_dispatch
136 END(handle_int)
137
138 __INIT
139
125/* 140/*
126 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 141 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
127 * This is a dedicated interrupt exception vector which reduces the 142 * This is a dedicated interrupt exception vector which reduces the
@@ -157,6 +172,15 @@ NESTED(except_vec_vi, 0, sp)
157 SAVE_AT 172 SAVE_AT
158 .set push 173 .set push
159 .set noreorder 174 .set noreorder
175#ifdef CONFIG_MIPS_MT_SMTC
176 /*
177 * To keep from blindly blocking *all* interrupts
178 * during service by SMTC kernel, we also want to
179 * pass the IM value to be cleared.
180 */
181EXPORT(except_vec_vi_mori)
182 ori a0, $0, 0
183#endif /* CONFIG_MIPS_MT_SMTC */
160EXPORT(except_vec_vi_lui) 184EXPORT(except_vec_vi_lui)
161 lui v0, 0 /* Patched */ 185 lui v0, 0 /* Patched */
162 j except_vec_vi_handler 186 j except_vec_vi_handler
@@ -173,6 +197,25 @@ EXPORT(except_vec_vi_end)
173NESTED(except_vec_vi_handler, 0, sp) 197NESTED(except_vec_vi_handler, 0, sp)
174 SAVE_TEMP 198 SAVE_TEMP
175 SAVE_STATIC 199 SAVE_STATIC
200#ifdef CONFIG_MIPS_MT_SMTC
201 /*
202 * SMTC has an interesting problem that interrupts are level-triggered,
203 * and the CLI macro will clear EXL, potentially causing a duplicate
204 * interrupt service invocation. So we need to clear the associated
205 * IM bit of Status prior to doing CLI, and restore it after the
206 * service routine has been invoked - we must assume that the
207 * service routine will have cleared the state, and any active
208 * level represents a new or otherwised unserviced event...
209 */
210 mfc0 t1, CP0_STATUS
211 and t0, a0, t1
212 mfc0 t2, CP0_TCCONTEXT
213 or t0, t0, t2
214 mtc0 t0, CP0_TCCONTEXT
215 xor t1, t1, t0
216 mtc0 t1, CP0_STATUS
217 ehb
218#endif /* CONFIG_MIPS_MT_SMTC */
176 CLI 219 CLI
177 move a0, sp 220 move a0, sp
178 jalr v0 221 jalr v0
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 2e9122a4213a..bdf6f6eff721 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -18,6 +18,7 @@
18#include <linux/threads.h> 18#include <linux/threads.h>
19 19
20#include <asm/asm.h> 20#include <asm/asm.h>
21#include <asm/asmmacro.h>
21#include <asm/regdef.h> 22#include <asm/regdef.h>
22#include <asm/page.h> 23#include <asm/page.h>
23#include <asm/mipsregs.h> 24#include <asm/mipsregs.h>
@@ -82,12 +83,33 @@
82 */ 83 */
83 .macro setup_c0_status set clr 84 .macro setup_c0_status set clr
84 .set push 85 .set push
86#ifdef CONFIG_MIPS_MT_SMTC
87 /*
88 * For SMTC, we need to set privilege and disable interrupts only for
89 * the current TC, using the TCStatus register.
90 */
91 mfc0 t0, CP0_TCSTATUS
92 /* Fortunately CU 0 is in the same place in both registers */
93 /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
94 li t1, ST0_CU0 | 0x08001c00
95 or t0, t1
96 /* Clear TKSU, leave IXMT */
97 xori t0, 0x00001800
98 mtc0 t0, CP0_TCSTATUS
99 ehb
100 /* We need to leave the global IE bit set, but clear EXL...*/
101 mfc0 t0, CP0_STATUS
102 or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr
103 xor t0, ST0_EXL | ST0_ERL | \clr
104 mtc0 t0, CP0_STATUS
105#else
85 mfc0 t0, CP0_STATUS 106 mfc0 t0, CP0_STATUS
86 or t0, ST0_CU0|\set|0x1f|\clr 107 or t0, ST0_CU0|\set|0x1f|\clr
87 xor t0, 0x1f|\clr 108 xor t0, 0x1f|\clr
88 mtc0 t0, CP0_STATUS 109 mtc0 t0, CP0_STATUS
89 .set noreorder 110 .set noreorder
90 sll zero,3 # ehb 111 sll zero,3 # ehb
112#endif
91 .set pop 113 .set pop
92 .endm 114 .endm
93 115
@@ -134,6 +156,24 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
134 156
135 ARC64_TWIDDLE_PC 157 ARC64_TWIDDLE_PC
136 158
159#ifdef CONFIG_MIPS_MT_SMTC
160 /*
161 * In SMTC kernel, "CLI" is thread-specific, in TCStatus.
162 * We still need to enable interrupts globally in Status,
163 * and clear EXL/ERL.
164 *
165 * TCContext is used to track interrupt levels under
166 * service in SMTC kernel. Clear for boot TC before
167 * allowing any interrupts.
168 */
169 mtc0 zero, CP0_TCCONTEXT
170
171 mfc0 t0, CP0_STATUS
172 ori t0, t0, 0xff1f
173 xori t0, t0, 0x001e
174 mtc0 t0, CP0_STATUS
175#endif /* CONFIG_MIPS_MT_SMTC */
176
137 PTR_LA t0, __bss_start # clear .bss 177 PTR_LA t0, __bss_start # clear .bss
138 LONG_S zero, (t0) 178 LONG_S zero, (t0)
139 PTR_LA t1, __bss_stop - LONGSIZE 179 PTR_LA t1, __bss_stop - LONGSIZE
@@ -166,8 +206,25 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
166 * function after setting up the stack and gp registers. 206 * function after setting up the stack and gp registers.
167 */ 207 */
168NESTED(smp_bootstrap, 16, sp) 208NESTED(smp_bootstrap, 16, sp)
209#ifdef CONFIG_MIPS_MT_SMTC
210 /*
211 * Read-modify-writes of Status must be atomic, and this
212 * is one case where CLI is invoked without EXL being
213 * necessarily set. The CLI and setup_c0_status will
214 * in fact be redundant for all but the first TC of
215 * each VPE being booted.
216 */
217 DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
218 jal mips_ihb
219#endif /* CONFIG_MIPS_MT_SMTC */
169 setup_c0_status_sec 220 setup_c0_status_sec
170 smp_slave_setup 221 smp_slave_setup
222#ifdef CONFIG_MIPS_MT_SMTC
223 andi t2, t2, VPECONTROL_TE
224 beqz t2, 2f
225 EMT # emt
2262:
227#endif /* CONFIG_MIPS_MT_SMTC */
171 j start_secondary 228 j start_secondary
172 END(smp_bootstrap) 229 END(smp_bootstrap)
173#endif /* CONFIG_SMP */ 230#endif /* CONFIG_SMP */
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index b974ac9057f6..2125ba5f1d9b 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -187,6 +187,10 @@ handle_real_irq:
187 outb(cached_21,0x21); 187 outb(cached_21,0x21);
188 outb(0x60+irq,0x20); /* 'Specific EOI' to master */ 188 outb(0x60+irq,0x20); /* 'Specific EOI' to master */
189 } 189 }
190#ifdef CONFIG_MIPS_MT_SMTC
191 if (irq_hwmask[irq] & ST0_IM)
192 set_c0_status(irq_hwmask[irq] & ST0_IM);
193#endif /* CONFIG_MIPS_MT_SMTC */
190 spin_unlock_irqrestore(&i8259A_lock, flags); 194 spin_unlock_irqrestore(&i8259A_lock, flags);
191 return; 195 return;
192 196
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 3f653c7cfbf3..97ebdc754b9e 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -76,6 +76,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
76 mask_msc_irq(irq); 76 mask_msc_irq(irq);
77 if (!cpu_has_veic) 77 if (!cpu_has_veic)
78 MSCIC_WRITE(MSC01_IC_EOI, 0); 78 MSCIC_WRITE(MSC01_IC_EOI, 0);
79#ifdef CONFIG_MIPS_MT_SMTC
80 /* This actually needs to be a call into platform code */
81 if (irq_hwmask[irq] & ST0_IM)
82 set_c0_status(irq_hwmask[irq] & ST0_IM);
83#endif /* CONFIG_MIPS_MT_SMTC */
79} 84}
80 85
81/* 86/*
@@ -92,6 +97,10 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
92 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); 97 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
93 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); 98 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
94 } 99 }
100#ifdef CONFIG_MIPS_MT_SMTC
101 if (irq_hwmask[irq] & ST0_IM)
102 set_c0_status(irq_hwmask[irq] & ST0_IM);
103#endif /* CONFIG_MIPS_MT_SMTC */
95} 104}
96 105
97/* 106/*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 3dd76b3d2967..3dce742e716f 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -38,6 +38,15 @@ void ack_bad_irq(unsigned int irq)
38 38
39atomic_t irq_err_count; 39atomic_t irq_err_count;
40 40
41#ifdef CONFIG_MIPS_MT_SMTC
42/*
43 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
44 * in do_IRQ. These are passed in setup_irq_smtc() and stored
45 * in this table.
46 */
47unsigned long irq_hwmask[NR_IRQS];
48#endif /* CONFIG_MIPS_MT_SMTC */
49
41#undef do_IRQ 50#undef do_IRQ
42 51
43/* 52/*
@@ -49,6 +58,7 @@ asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs)
49{ 58{
50 irq_enter(); 59 irq_enter();
51 60
61 __DO_IRQ_SMTC_HOOK();
52 __do_IRQ(irq, regs); 62 __do_IRQ(irq, regs);
53 63
54 irq_exit(); 64 irq_exit();
@@ -101,6 +111,11 @@ skip:
101 return 0; 111 return 0;
102} 112}
103 113
114asmlinkage void spurious_interrupt(struct pt_regs *regs)
115{
116 atomic_inc(&irq_err_count);
117}
118
104#ifdef CONFIG_KGDB 119#ifdef CONFIG_KGDB
105extern void breakpoint(void); 120extern void breakpoint(void);
106extern void set_debug_traps(void); 121extern void set_debug_traps(void);
@@ -124,6 +139,9 @@ void __init init_IRQ(void)
124 irq_desc[i].depth = 1; 139 irq_desc[i].depth = 1;
125 irq_desc[i].handler = &no_irq_type; 140 irq_desc[i].handler = &no_irq_type;
126 spin_lock_init(&irq_desc[i].lock); 141 spin_lock_init(&irq_desc[i].lock);
142#ifdef CONFIG_MIPS_MT_SMTC
143 irq_hwmask[i] = 0;
144#endif /* CONFIG_MIPS_MT_SMTC */
127 } 145 }
128 146
129 arch_init_irq(); 147 arch_init_irq();
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
new file mode 100644
index 000000000000..f06a144c7881
--- /dev/null
+++ b/arch/mips/kernel/kspd.c
@@ -0,0 +1,398 @@
1/*
2 * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
3 *
4 * This program is free software; you can distribute it and/or modify it
5 * under the terms of the GNU General Public License (Version 2) as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
16 *
17 */
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/unistd.h>
21#include <linux/file.h>
22#include <linux/fs.h>
23#include <linux/syscalls.h>
24#include <linux/workqueue.h>
25#include <linux/errno.h>
26#include <linux/list.h>
27
28#include <asm/vpe.h>
29#include <asm/rtlx.h>
30#include <asm/kspd.h>
31
32static struct workqueue_struct *workqueue = NULL;
33static struct work_struct work;
34
35extern unsigned long cpu_khz;
36
37struct mtsp_syscall {
38 int cmd;
39 unsigned char abi;
40 unsigned char size;
41};
42
43struct mtsp_syscall_ret {
44 int retval;
45 int errno;
46};
47
48struct mtsp_syscall_generic {
49 int arg0;
50 int arg1;
51 int arg2;
52 int arg3;
53 int arg4;
54 int arg5;
55 int arg6;
56};
57
58static struct list_head kspd_notifylist;
59static int sp_stopping = 0;
60
61/* these should match with those in the SDE kit */
62#define MTSP_SYSCALL_BASE 0
63#define MTSP_SYSCALL_EXIT (MTSP_SYSCALL_BASE + 0)
64#define MTSP_SYSCALL_OPEN (MTSP_SYSCALL_BASE + 1)
65#define MTSP_SYSCALL_READ (MTSP_SYSCALL_BASE + 2)
66#define MTSP_SYSCALL_WRITE (MTSP_SYSCALL_BASE + 3)
67#define MTSP_SYSCALL_CLOSE (MTSP_SYSCALL_BASE + 4)
68#define MTSP_SYSCALL_LSEEK32 (MTSP_SYSCALL_BASE + 5)
69#define MTSP_SYSCALL_ISATTY (MTSP_SYSCALL_BASE + 6)
70#define MTSP_SYSCALL_GETTIME (MTSP_SYSCALL_BASE + 7)
71#define MTSP_SYSCALL_PIPEFREQ (MTSP_SYSCALL_BASE + 8)
72#define MTSP_SYSCALL_GETTOD (MTSP_SYSCALL_BASE + 9)
73
74#define MTSP_O_RDONLY 0x0000
75#define MTSP_O_WRONLY 0x0001
76#define MTSP_O_RDWR 0x0002
77#define MTSP_O_NONBLOCK 0x0004
78#define MTSP_O_APPEND 0x0008
79#define MTSP_O_SHLOCK 0x0010
80#define MTSP_O_EXLOCK 0x0020
81#define MTSP_O_ASYNC 0x0040
82#define MTSP_O_FSYNC O_SYNC
83#define MTSP_O_NOFOLLOW 0x0100
84#define MTSP_O_SYNC 0x0080
85#define MTSP_O_CREAT 0x0200
86#define MTSP_O_TRUNC 0x0400
87#define MTSP_O_EXCL 0x0800
88#define MTSP_O_BINARY 0x8000
89
90#define SP_VPE 1
91
92struct apsp_table {
93 int sp;
94 int ap;
95};
96
97/* we might want to do the mode flags too */
98struct apsp_table open_flags_table[] = {
99 { MTSP_O_RDWR, O_RDWR },
100 { MTSP_O_WRONLY, O_WRONLY },
101 { MTSP_O_CREAT, O_CREAT },
102 { MTSP_O_TRUNC, O_TRUNC },
103 { MTSP_O_NONBLOCK, O_NONBLOCK },
104 { MTSP_O_APPEND, O_APPEND },
105 { MTSP_O_NOFOLLOW, O_NOFOLLOW }
106};
107
108struct apsp_table syscall_command_table[] = {
109 { MTSP_SYSCALL_OPEN, __NR_open },
110 { MTSP_SYSCALL_CLOSE, __NR_close },
111 { MTSP_SYSCALL_READ, __NR_read },
112 { MTSP_SYSCALL_WRITE, __NR_write },
113 { MTSP_SYSCALL_LSEEK32, __NR_lseek }
114};
115
116static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3)
117{
118 register long int _num __asm__ ("$2") = num;
119 register long int _arg0 __asm__ ("$4") = arg0;
120 register long int _arg1 __asm__ ("$5") = arg1;
121 register long int _arg2 __asm__ ("$6") = arg2;
122 register long int _arg3 __asm__ ("$7") = arg3;
123
124 mm_segment_t old_fs;
125
126 old_fs = get_fs();
127 set_fs(KERNEL_DS);
128
129 __asm__ __volatile__ (
130 " syscall \n"
131 : "=r" (_num), "=r" (_arg3)
132 : "r" (_num), "r" (_arg0), "r" (_arg1), "r" (_arg2), "r" (_arg3));
133
134 set_fs(old_fs);
135
136 /* $a3 is error flag */
137 if (_arg3)
138 return -_num;
139
140 return _num;
141}
142
143static int translate_syscall_command(int cmd)
144{
145 int i;
146 int ret = -1;
147
148 for (i = 0; i < ARRAY_SIZE(syscall_command_table); i++) {
149 if ((cmd == syscall_command_table[i].sp))
150 return syscall_command_table[i].ap;
151 }
152
153 return ret;
154}
155
156static unsigned int translate_open_flags(int flags)
157{
158 int i;
159 unsigned int ret = 0;
160
161 for (i = 0; i < (sizeof(open_flags_table) / sizeof(struct apsp_table));
162 i++) {
163 if( (flags & open_flags_table[i].sp) ) {
164 ret |= open_flags_table[i].ap;
165 }
166 }
167
168 return ret;
169}
170
171
172static void sp_setfsuidgid( uid_t uid, gid_t gid)
173{
174 current->fsuid = uid;
175 current->fsgid = gid;
176
177 key_fsuid_changed(current);
178 key_fsgid_changed(current);
179}
180
181/*
182 * Expects a request to be on the sysio channel. Reads it. Decides whether
183 * its a linux syscall and runs it, or whatever. Puts the return code back
184 * into the request and sends the whole thing back.
185 */
186void sp_work_handle_request(void)
187{
188 struct mtsp_syscall sc;
189 struct mtsp_syscall_generic generic;
190 struct mtsp_syscall_ret ret;
191 struct kspd_notifications *n;
192 struct timeval tv;
193 struct timezone tz;
194 int cmd;
195
196 char *vcwd;
197 mm_segment_t old_fs;
198 int size;
199
200 ret.retval = -1;
201
202 if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall), 0)) {
203 printk(KERN_ERR "Expected request but nothing to read\n");
204 return;
205 }
206
207 size = sc.size;
208
209 if (size) {
210 if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size, 0)) {
211 printk(KERN_ERR "Expected request but nothing to read\n");
212 return;
213 }
214 }
215
216 /* Run the syscall at the priviledge of the user who loaded the
217 SP program */
218
219 if (vpe_getuid(SP_VPE))
220 sp_setfsuidgid( vpe_getuid(SP_VPE), vpe_getgid(SP_VPE));
221
222 switch (sc.cmd) {
223 /* needs the flags argument translating from SDE kit to
224 linux */
225 case MTSP_SYSCALL_PIPEFREQ:
226 ret.retval = cpu_khz * 1000;
227 ret.errno = 0;
228 break;
229
230 case MTSP_SYSCALL_GETTOD:
231 memset(&tz, 0, sizeof(tz));
232 if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
233 (int)&tz, 0,0)) == 0)
234 ret.retval = tv.tv_sec;
235
236 ret.errno = errno;
237 break;
238
239 case MTSP_SYSCALL_EXIT:
240 list_for_each_entry(n, &kspd_notifylist, list)
241 n->kspd_sp_exit(SP_VPE);
242 sp_stopping = 1;
243
244 printk(KERN_DEBUG "KSPD got exit syscall from SP exitcode %d\n",
245 generic.arg0);
246 break;
247
248 case MTSP_SYSCALL_OPEN:
249 generic.arg1 = translate_open_flags(generic.arg1);
250
251 vcwd = vpe_getcwd(SP_VPE);
252
253 /* change to the cwd of the process that loaded the SP program */
254 old_fs = get_fs();
255 set_fs(KERNEL_DS);
256 sys_chdir(vcwd);
257 set_fs(old_fs);
258
259 sc.cmd = __NR_open;
260
261 /* fall through */
262
263 default:
264 if ((sc.cmd >= __NR_Linux) &&
265 (sc.cmd <= (__NR_Linux + __NR_Linux_syscalls)) )
266 cmd = sc.cmd;
267 else
268 cmd = translate_syscall_command(sc.cmd);
269
270 if (cmd >= 0) {
271 ret.retval = sp_syscall(cmd, generic.arg0, generic.arg1,
272 generic.arg2, generic.arg3);
273 ret.errno = errno;
274 } else
275 printk(KERN_WARNING
276 "KSPD: Unknown SP syscall number %d\n", sc.cmd);
277 break;
278 } /* switch */
279
280 if (vpe_getuid(SP_VPE))
281 sp_setfsuidgid( 0, 0);
282
283 if ((rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(struct mtsp_syscall_ret), 0))
284 < sizeof(struct mtsp_syscall_ret))
285 printk("KSPD: sp_work_handle_request failed to send to SP\n");
286}
287
288static void sp_cleanup(void)
289{
290 struct files_struct *files = current->files;
291 int i, j;
292 struct fdtable *fdt;
293
294 j = 0;
295
296 /*
297 * It is safe to dereference the fd table without RCU or
298 * ->file_lock
299 */
300 fdt = files_fdtable(files);
301 for (;;) {
302 unsigned long set;
303 i = j * __NFDBITS;
304 if (i >= fdt->max_fdset || i >= fdt->max_fds)
305 break;
306 set = fdt->open_fds->fds_bits[j++];
307 while (set) {
308 if (set & 1) {
309 struct file * file = xchg(&fdt->fd[i], NULL);
310 if (file)
311 filp_close(file, files);
312 }
313 i++;
314 set >>= 1;
315 }
316 }
317}
318
319static int channel_open = 0;
320
321/* the work handler */
322static void sp_work(void *data)
323{
324 if (!channel_open) {
325 if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
326 printk("KSPD: unable to open sp channel\n");
327 sp_stopping = 1;
328 } else {
329 channel_open++;
330 printk(KERN_DEBUG "KSPD: SP channel opened\n");
331 }
332 } else {
333 /* wait for some data, allow it to sleep */
334 rtlx_read_poll(RTLX_CHANNEL_SYSIO, 1);
335
336 /* Check we haven't been woken because we are stopping */
337 if (!sp_stopping)
338 sp_work_handle_request();
339 }
340
341 if (!sp_stopping)
342 queue_work(workqueue, &work);
343 else
344 sp_cleanup();
345}
346
347static void startwork(int vpe)
348{
349 sp_stopping = channel_open = 0;
350
351 if (workqueue == NULL) {
352 if ((workqueue = create_singlethread_workqueue("kspd")) == NULL) {
353 printk(KERN_ERR "unable to start kspd\n");
354 return;
355 }
356
357 INIT_WORK(&work, sp_work, NULL);
358 queue_work(workqueue, &work);
359 } else
360 queue_work(workqueue, &work);
361
362}
363
364static void stopwork(int vpe)
365{
366 sp_stopping = 1;
367
368 printk(KERN_DEBUG "KSPD: SP stopping\n");
369}
370
371void kspd_notify(struct kspd_notifications *notify)
372{
373 list_add(&notify->list, &kspd_notifylist);
374}
375
376static struct vpe_notifications notify;
377static int kspd_module_init(void)
378{
379 INIT_LIST_HEAD(&kspd_notifylist);
380
381 notify.start = startwork;
382 notify.stop = stopwork;
383 vpe_notify(SP_VPE, &notify);
384
385 return 0;
386}
387
388static void kspd_module_exit(void)
389{
390
391}
392
393module_init(kspd_module_init);
394module_exit(kspd_module_exit);
395
396MODULE_DESCRIPTION("MIPS KSPD");
397MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
398MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 3f40c37a9ee6..a7d2bb3cf835 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -356,73 +356,13 @@ asmlinkage int sys32_llseek(unsigned int fd, unsigned int offset_high,
356asmlinkage ssize_t sys32_pread(unsigned int fd, char __user * buf, 356asmlinkage ssize_t sys32_pread(unsigned int fd, char __user * buf,
357 size_t count, u32 unused, u64 a4, u64 a5) 357 size_t count, u32 unused, u64 a4, u64 a5)
358{ 358{
359 ssize_t ret; 359 return sys_pread64(fd, buf, count, merge_64(a4, a5));
360 struct file * file;
361 ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
362 loff_t pos;
363
364 ret = -EBADF;
365 file = fget(fd);
366 if (!file)
367 goto bad_file;
368 if (!(file->f_mode & FMODE_READ))
369 goto out;
370 pos = merge_64(a4, a5);
371 ret = rw_verify_area(READ, file, &pos, count);
372 if (ret < 0)
373 goto out;
374 ret = -EINVAL;
375 if (!file->f_op || !(read = file->f_op->read))
376 goto out;
377 if (pos < 0)
378 goto out;
379 ret = -ESPIPE;
380 if (!(file->f_mode & FMODE_PREAD))
381 goto out;
382 ret = read(file, buf, count, &pos);
383 if (ret > 0)
384 dnotify_parent(file->f_dentry, DN_ACCESS);
385out:
386 fput(file);
387bad_file:
388 return ret;
389} 360}
390 361
391asmlinkage ssize_t sys32_pwrite(unsigned int fd, const char __user * buf, 362asmlinkage ssize_t sys32_pwrite(unsigned int fd, const char __user * buf,
392 size_t count, u32 unused, u64 a4, u64 a5) 363 size_t count, u32 unused, u64 a4, u64 a5)
393{ 364{
394 ssize_t ret; 365 return sys_pwrite64(fd, buf, count, merge_64(a4, a5));
395 struct file * file;
396 ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
397 loff_t pos;
398
399 ret = -EBADF;
400 file = fget(fd);
401 if (!file)
402 goto bad_file;
403 if (!(file->f_mode & FMODE_WRITE))
404 goto out;
405 pos = merge_64(a4, a5);
406 ret = rw_verify_area(WRITE, file, &pos, count);
407 if (ret < 0)
408 goto out;
409 ret = -EINVAL;
410 if (!file->f_op || !(write = file->f_op->write))
411 goto out;
412 if (pos < 0)
413 goto out;
414
415 ret = -ESPIPE;
416 if (!(file->f_mode & FMODE_PWRITE))
417 goto out;
418
419 ret = write(file, buf, count, &pos);
420 if (ret > 0)
421 dnotify_parent(file->f_dentry, DN_MODIFY);
422out:
423 fput(file);
424bad_file:
425 return ret;
426} 366}
427 367
428asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid, 368asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
@@ -1182,6 +1122,16 @@ asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3,
1182 return sys_readahead(fd, merge_64(a2, a3), count); 1122 return sys_readahead(fd, merge_64(a2, a3), count);
1183} 1123}
1184 1124
1125asmlinkage long sys32_sync_file_range(int fd, int __pad,
1126 unsigned long a2, unsigned long a3,
1127 unsigned long a4, unsigned long a5,
1128 int flags)
1129{
1130 return sys_sync_file_range(fd,
1131 merge_64(a2, a3), merge_64(a4, a5),
1132 flags);
1133}
1134
1185/* Argument list sizes for sys_socketcall */ 1135/* Argument list sizes for sys_socketcall */
1186#define AL(x) ((x) * sizeof(unsigned int)) 1136#define AL(x) ((x) * sizeof(unsigned int))
1187static unsigned char socketcall_nargs[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), 1137static unsigned char socketcall_nargs[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
new file mode 100644
index 000000000000..02237a685ec7
--- /dev/null
+++ b/arch/mips/kernel/mips-mt.c
@@ -0,0 +1,449 @@
1/*
2 * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
3 * Copyright (C) 2005 Mips Technologies, Inc
4 */
5
6#include <linux/kernel.h>
7#include <linux/sched.h>
8#include <linux/cpumask.h>
9#include <linux/interrupt.h>
10
11#include <asm/cpu.h>
12#include <asm/processor.h>
13#include <asm/atomic.h>
14#include <asm/system.h>
15#include <asm/hardirq.h>
16#include <asm/mmu_context.h>
17#include <asm/smp.h>
18#include <asm/mipsmtregs.h>
19#include <asm/r4kcache.h>
20#include <asm/cacheflush.h>
21
22/*
23 * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
24 */
25
26cpumask_t mt_fpu_cpumask;
27
28#ifdef CONFIG_MIPS_MT_FPAFF
29
30#include <linux/cpu.h>
31#include <linux/delay.h>
32#include <asm/uaccess.h>
33
34unsigned long mt_fpemul_threshold = 0;
35
36/*
37 * Replacement functions for the sys_sched_setaffinity() and
38 * sys_sched_getaffinity() system calls, so that we can integrate
39 * FPU affinity with the user's requested processor affinity.
40 * This code is 98% identical with the sys_sched_setaffinity()
41 * and sys_sched_getaffinity() system calls, and should be
42 * updated when kernel/sched.c changes.
43 */
44
45/*
46 * find_process_by_pid - find a process with a matching PID value.
47 * used in sys_sched_set/getaffinity() in kernel/sched.c, so
48 * cloned here.
49 */
50static inline task_t *find_process_by_pid(pid_t pid)
51{
52 return pid ? find_task_by_pid(pid) : current;
53}
54
55
56/*
57 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
58 */
59asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
60 unsigned long __user *user_mask_ptr)
61{
62 cpumask_t new_mask;
63 cpumask_t effective_mask;
64 int retval;
65 task_t *p;
66
67 if (len < sizeof(new_mask))
68 return -EINVAL;
69
70 if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
71 return -EFAULT;
72
73 lock_cpu_hotplug();
74 read_lock(&tasklist_lock);
75
76 p = find_process_by_pid(pid);
77 if (!p) {
78 read_unlock(&tasklist_lock);
79 unlock_cpu_hotplug();
80 return -ESRCH;
81 }
82
83 /*
84 * It is not safe to call set_cpus_allowed with the
85 * tasklist_lock held. We will bump the task_struct's
86 * usage count and drop tasklist_lock before invoking
87 * set_cpus_allowed.
88 */
89 get_task_struct(p);
90
91 retval = -EPERM;
92 if ((current->euid != p->euid) && (current->euid != p->uid) &&
93 !capable(CAP_SYS_NICE)) {
94 read_unlock(&tasklist_lock);
95 goto out_unlock;
96 }
97
98 /* Record new user-specified CPU set for future reference */
99 p->thread.user_cpus_allowed = new_mask;
100
101 /* Unlock the task list */
102 read_unlock(&tasklist_lock);
103
104 /* Compute new global allowed CPU set if necessary */
105 if( (p->thread.mflags & MF_FPUBOUND)
106 && cpus_intersects(new_mask, mt_fpu_cpumask)) {
107 cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
108 retval = set_cpus_allowed(p, effective_mask);
109 } else {
110 p->thread.mflags &= ~MF_FPUBOUND;
111 retval = set_cpus_allowed(p, new_mask);
112 }
113
114
115out_unlock:
116 put_task_struct(p);
117 unlock_cpu_hotplug();
118 return retval;
119}
120
121/*
122 * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
123 */
124asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
125 unsigned long __user *user_mask_ptr)
126{
127 unsigned int real_len;
128 cpumask_t mask;
129 int retval;
130 task_t *p;
131
132 real_len = sizeof(mask);
133 if (len < real_len)
134 return -EINVAL;
135
136 lock_cpu_hotplug();
137 read_lock(&tasklist_lock);
138
139 retval = -ESRCH;
140 p = find_process_by_pid(pid);
141 if (!p)
142 goto out_unlock;
143
144 retval = 0;
145
146 cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
147
148out_unlock:
149 read_unlock(&tasklist_lock);
150 unlock_cpu_hotplug();
151 if (retval)
152 return retval;
153 if (copy_to_user(user_mask_ptr, &mask, real_len))
154 return -EFAULT;
155 return real_len;
156}
157
158#endif /* CONFIG_MIPS_MT_FPAFF */
159
160/*
161 * Dump new MIPS MT state for the core. Does not leave TCs halted.
162 * Takes an argument which taken to be a pre-call MVPControl value.
163 */
164
165void mips_mt_regdump(unsigned long mvpctl)
166{
167 unsigned long flags;
168 unsigned long vpflags;
169 unsigned long mvpconf0;
170 int nvpe;
171 int ntc;
172 int i;
173 int tc;
174 unsigned long haltval;
175 unsigned long tcstatval;
176#ifdef CONFIG_MIPS_MT_SMTC
177 void smtc_soft_dump(void);
178#endif /* CONFIG_MIPT_MT_SMTC */
179
180 local_irq_save(flags);
181 vpflags = dvpe();
182 printk("=== MIPS MT State Dump ===\n");
183 printk("-- Global State --\n");
184 printk(" MVPControl Passed: %08lx\n", mvpctl);
185 printk(" MVPControl Read: %08lx\n", vpflags);
186 printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
187 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
188 ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
189 printk("-- per-VPE State --\n");
190 for(i = 0; i < nvpe; i++) {
191 for(tc = 0; tc < ntc; tc++) {
192 settc(tc);
193 if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
194 printk(" VPE %d\n", i);
195 printk(" VPEControl : %08lx\n", read_vpe_c0_vpecontrol());
196 printk(" VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0());
197 printk(" VPE%d.Status : %08lx\n",
198 i, read_vpe_c0_status());
199 printk(" VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc());
200 printk(" VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause());
201 printk(" VPE%d.Config7 : %08lx\n",
202 i, read_vpe_c0_config7());
203 break; /* Next VPE */
204 }
205 }
206 }
207 printk("-- per-TC State --\n");
208 for(tc = 0; tc < ntc; tc++) {
209 settc(tc);
210 if(read_tc_c0_tcbind() == read_c0_tcbind()) {
211 /* Are we dumping ourself? */
212 haltval = 0; /* Then we're not halted, and mustn't be */
213 tcstatval = flags; /* And pre-dump TCStatus is flags */
214 printk(" TC %d (current TC with VPE EPC above)\n", tc);
215 } else {
216 haltval = read_tc_c0_tchalt();
217 write_tc_c0_tchalt(1);
218 tcstatval = read_tc_c0_tcstatus();
219 printk(" TC %d\n", tc);
220 }
221 printk(" TCStatus : %08lx\n", tcstatval);
222 printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
223 printk(" TCRestart : %08lx\n", read_tc_c0_tcrestart());
224 printk(" TCHalt : %08lx\n", haltval);
225 printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
226 if (!haltval)
227 write_tc_c0_tchalt(0);
228 }
229#ifdef CONFIG_MIPS_MT_SMTC
230 smtc_soft_dump();
231#endif /* CONFIG_MIPT_MT_SMTC */
232 printk("===========================\n");
233 evpe(vpflags);
234 local_irq_restore(flags);
235}
236
237static int mt_opt_norps = 0;
238static int mt_opt_rpsctl = -1;
239static int mt_opt_nblsu = -1;
240static int mt_opt_forceconfig7 = 0;
241static int mt_opt_config7 = -1;
242
243static int __init rps_disable(char *s)
244{
245 mt_opt_norps = 1;
246 return 1;
247}
248__setup("norps", rps_disable);
249
250static int __init rpsctl_set(char *str)
251{
252 get_option(&str, &mt_opt_rpsctl);
253 return 1;
254}
255__setup("rpsctl=", rpsctl_set);
256
257static int __init nblsu_set(char *str)
258{
259 get_option(&str, &mt_opt_nblsu);
260 return 1;
261}
262__setup("nblsu=", nblsu_set);
263
264static int __init config7_set(char *str)
265{
266 get_option(&str, &mt_opt_config7);
267 mt_opt_forceconfig7 = 1;
268 return 1;
269}
270__setup("config7=", config7_set);
271
272/* Experimental cache flush control parameters that should go away some day */
273int mt_protiflush = 0;
274int mt_protdflush = 0;
275int mt_n_iflushes = 1;
276int mt_n_dflushes = 1;
277
278static int __init set_protiflush(char *s)
279{
280 mt_protiflush = 1;
281 return 1;
282}
283__setup("protiflush", set_protiflush);
284
285static int __init set_protdflush(char *s)
286{
287 mt_protdflush = 1;
288 return 1;
289}
290__setup("protdflush", set_protdflush);
291
292static int __init niflush(char *s)
293{
294 get_option(&s, &mt_n_iflushes);
295 return 1;
296}
297__setup("niflush=", niflush);
298
299static int __init ndflush(char *s)
300{
301 get_option(&s, &mt_n_dflushes);
302 return 1;
303}
304__setup("ndflush=", ndflush);
305#ifdef CONFIG_MIPS_MT_FPAFF
306static int fpaff_threshold = -1;
307
308static int __init fpaff_thresh(char *str)
309{
310 get_option(&str, &fpaff_threshold);
311 return 1;
312}
313
314__setup("fpaff=", fpaff_thresh);
315#endif /* CONFIG_MIPS_MT_FPAFF */
316
317static unsigned int itc_base = 0;
318
319static int __init set_itc_base(char *str)
320{
321 get_option(&str, &itc_base);
322 return 1;
323}
324
325__setup("itcbase=", set_itc_base);
326
327void mips_mt_set_cpuoptions(void)
328{
329 unsigned int oconfig7 = read_c0_config7();
330 unsigned int nconfig7 = oconfig7;
331
332 if (mt_opt_norps) {
333 printk("\"norps\" option deprectated: use \"rpsctl=\"\n");
334 }
335 if (mt_opt_rpsctl >= 0) {
336 printk("34K return prediction stack override set to %d.\n",
337 mt_opt_rpsctl);
338 if (mt_opt_rpsctl)
339 nconfig7 |= (1 << 2);
340 else
341 nconfig7 &= ~(1 << 2);
342 }
343 if (mt_opt_nblsu >= 0) {
344 printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
345 if (mt_opt_nblsu)
346 nconfig7 |= (1 << 5);
347 else
348 nconfig7 &= ~(1 << 5);
349 }
350 if (mt_opt_forceconfig7) {
351 printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
352 nconfig7 = mt_opt_config7;
353 }
354 if (oconfig7 != nconfig7) {
355 __asm__ __volatile("sync");
356 write_c0_config7(nconfig7);
357 ehb ();
358 printk("Config7: 0x%08x\n", read_c0_config7());
359 }
360
361 /* Report Cache management debug options */
362 if (mt_protiflush)
363 printk("I-cache flushes single-threaded\n");
364 if (mt_protdflush)
365 printk("D-cache flushes single-threaded\n");
366 if (mt_n_iflushes != 1)
367 printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
368 if (mt_n_dflushes != 1)
369 printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
370
371#ifdef CONFIG_MIPS_MT_FPAFF
372 /* FPU Use Factor empirically derived from experiments on 34K */
373#define FPUSEFACTOR 333
374
375 if (fpaff_threshold >= 0) {
376 mt_fpemul_threshold = fpaff_threshold;
377 } else {
378 mt_fpemul_threshold =
379 (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
380 }
381 printk("FPU Affinity set after %ld emulations\n",
382 mt_fpemul_threshold);
383#endif /* CONFIG_MIPS_MT_FPAFF */
384
385 if (itc_base != 0) {
386 /*
387 * Configure ITC mapping. This code is very
388 * specific to the 34K core family, which uses
389 * a special mode bit ("ITC") in the ErrCtl
390 * register to enable access to ITC control
391 * registers via cache "tag" operations.
392 */
393 unsigned long ectlval;
394 unsigned long itcblkgrn;
395
396 /* ErrCtl register is known as "ecc" to Linux */
397 ectlval = read_c0_ecc();
398 write_c0_ecc(ectlval | (0x1 << 26));
399 ehb();
400#define INDEX_0 (0x80000000)
401#define INDEX_8 (0x80000008)
402 /* Read "cache tag" for Dcache pseudo-index 8 */
403 cache_op(Index_Load_Tag_D, INDEX_8);
404 ehb();
405 itcblkgrn = read_c0_dtaglo();
406 itcblkgrn &= 0xfffe0000;
407 /* Set for 128 byte pitch of ITC cells */
408 itcblkgrn |= 0x00000c00;
409 /* Stage in Tag register */
410 write_c0_dtaglo(itcblkgrn);
411 ehb();
412 /* Write out to ITU with CACHE op */
413 cache_op(Index_Store_Tag_D, INDEX_8);
414 /* Now set base address, and turn ITC on with 0x1 bit */
415 write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
416 ehb();
417 /* Write out to ITU with CACHE op */
418 cache_op(Index_Store_Tag_D, INDEX_0);
419 write_c0_ecc(ectlval);
420 ehb();
421 printk("Mapped %ld ITC cells starting at 0x%08x\n",
422 ((itcblkgrn & 0x7fe00000) >> 20), itc_base);
423 }
424}
425
426/*
427 * Function to protect cache flushes from concurrent execution
428 * depends on MP software model chosen.
429 */
430
431void mt_cflush_lockdown(void)
432{
433#ifdef CONFIG_MIPS_MT_SMTC
434 void smtc_cflush_lockdown(void);
435
436 smtc_cflush_lockdown();
437#endif /* CONFIG_MIPS_MT_SMTC */
438 /* FILL IN VSMP and AP/SP VERSIONS HERE */
439}
440
441void mt_cflush_release(void)
442{
443#ifdef CONFIG_MIPS_MT_SMTC
444 void smtc_cflush_release(void);
445
446 smtc_cflush_release();
447#endif /* CONFIG_MIPS_MT_SMTC */
448 /* FILL IN VSMP and AP/SP VERSIONS HERE */
449}
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index e042f9d2ba31..0a71a4c33716 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -28,21 +28,9 @@ extern long __strnlen_user_asm(const char *s);
28/* 28/*
29 * String functions 29 * String functions
30 */ 30 */
31EXPORT_SYMBOL(memchr);
32EXPORT_SYMBOL(memcmp);
33EXPORT_SYMBOL(memset); 31EXPORT_SYMBOL(memset);
34EXPORT_SYMBOL(memcpy); 32EXPORT_SYMBOL(memcpy);
35EXPORT_SYMBOL(memmove); 33EXPORT_SYMBOL(memmove);
36EXPORT_SYMBOL(strcat);
37EXPORT_SYMBOL(strchr);
38#ifdef CONFIG_64BIT
39EXPORT_SYMBOL(strncmp);
40#endif
41EXPORT_SYMBOL(strlen);
42EXPORT_SYMBOL(strncat);
43EXPORT_SYMBOL(strnlen);
44EXPORT_SYMBOL(strrchr);
45EXPORT_SYMBOL(strstr);
46 34
47EXPORT_SYMBOL(kernel_thread); 35EXPORT_SYMBOL(kernel_thread);
48 36
@@ -61,6 +49,3 @@ EXPORT_SYMBOL(__strnlen_user_asm);
61EXPORT_SYMBOL(csum_partial); 49EXPORT_SYMBOL(csum_partial);
62 50
63EXPORT_SYMBOL(invalid_pte_table); 51EXPORT_SYMBOL(invalid_pte_table);
64#ifdef CONFIG_GENERIC_IRQ_PROBE
65EXPORT_SYMBOL(probe_irq_mask);
66#endif
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c66db5e5ab62..199a06e873c6 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -41,6 +41,10 @@
41#include <asm/elf.h> 41#include <asm/elf.h>
42#include <asm/isadep.h> 42#include <asm/isadep.h>
43#include <asm/inst.h> 43#include <asm/inst.h>
44#ifdef CONFIG_MIPS_MT_SMTC
45#include <asm/mipsmtregs.h>
46extern void smtc_idle_loop_hook(void);
47#endif /* CONFIG_MIPS_MT_SMTC */
44 48
45/* 49/*
46 * The idle thread. There's no useful work to be done, so just try to conserve 50 * The idle thread. There's no useful work to be done, so just try to conserve
@@ -51,9 +55,13 @@ ATTRIB_NORET void cpu_idle(void)
51{ 55{
52 /* endless idle loop with no priority at all */ 56 /* endless idle loop with no priority at all */
53 while (1) { 57 while (1) {
54 while (!need_resched()) 58 while (!need_resched()) {
59#ifdef CONFIG_MIPS_MT_SMTC
60 smtc_idle_loop_hook();
61#endif /* CONFIG_MIPS_MT_SMTC */
55 if (cpu_wait) 62 if (cpu_wait)
56 (*cpu_wait)(); 63 (*cpu_wait)();
64 }
57 preempt_enable_no_resched(); 65 preempt_enable_no_resched();
58 schedule(); 66 schedule();
59 preempt_disable(); 67 preempt_disable();
@@ -177,6 +185,17 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
177 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); 185 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
178 clear_tsk_thread_flag(p, TIF_USEDFPU); 186 clear_tsk_thread_flag(p, TIF_USEDFPU);
179 187
188#ifdef CONFIG_MIPS_MT_FPAFF
189 /*
190 * FPU affinity support is cleaner if we track the
191 * user-visible CPU affinity from the very beginning.
192 * The generic cpus_allowed mask will already have
193 * been copied from the parent before copy_thread
194 * is invoked.
195 */
196 p->thread.user_cpus_allowed = p->cpus_allowed;
197#endif /* CONFIG_MIPS_MT_FPAFF */
198
180 if (clone_flags & CLONE_SETTLS) 199 if (clone_flags & CLONE_SETTLS)
181 ti->tp_value = regs->regs[7]; 200 ti->tp_value = regs->regs[7];
182 201
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index f838b36cc765..9b4733c12395 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -248,10 +248,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
248 break; 248 break;
249 case FPC_EIR: { /* implementation / version register */ 249 case FPC_EIR: { /* implementation / version register */
250 unsigned int flags; 250 unsigned int flags;
251#ifdef CONFIG_MIPS_MT_SMTC
252 unsigned int irqflags;
253 unsigned int mtflags;
254#endif /* CONFIG_MIPS_MT_SMTC */
251 255
252 if (!cpu_has_fpu) 256 if (!cpu_has_fpu)
253 break; 257 break;
254 258
259#ifdef CONFIG_MIPS_MT_SMTC
260 /* Read-modify-write of Status must be atomic */
261 local_irq_save(irqflags);
262 mtflags = dmt();
263#endif /* CONFIG_MIPS_MT_SMTC */
264
255 preempt_disable(); 265 preempt_disable();
256 if (cpu_has_mipsmt) { 266 if (cpu_has_mipsmt) {
257 unsigned int vpflags = dvpe(); 267 unsigned int vpflags = dvpe();
@@ -266,6 +276,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
266 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); 276 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
267 write_c0_status(flags); 277 write_c0_status(flags);
268 } 278 }
279#ifdef CONFIG_MIPS_MT_SMTC
280 emt(mtflags);
281 local_irq_restore(irqflags);
282#endif /* CONFIG_MIPS_MT_SMTC */
269 preempt_enable(); 283 preempt_enable();
270 break; 284 break;
271 } 285 }
@@ -469,7 +483,7 @@ static inline int audit_arch(void)
469asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) 483asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
470{ 484{
471 if (unlikely(current->audit_context) && entryexit) 485 if (unlikely(current->audit_context) && entryexit)
472 audit_syscall_exit(current, AUDITSC_RESULT(regs->regs[2]), 486 audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
473 regs->regs[2]); 487 regs->regs[2]);
474 488
475 if (!(current->ptrace & PT_PTRACED)) 489 if (!(current->ptrace & PT_PTRACED))
@@ -493,7 +507,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
493 } 507 }
494 out: 508 out:
495 if (unlikely(current->audit_context) && !entryexit) 509 if (unlikely(current->audit_context) && !entryexit)
496 audit_syscall_entry(current, audit_arch(), regs->regs[2], 510 audit_syscall_entry(audit_arch(), regs->regs[2],
497 regs->regs[4], regs->regs[5], 511 regs->regs[4], regs->regs[5],
498 regs->regs[6], regs->regs[7]); 512 regs->regs[6], regs->regs[7]);
499} 513}
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 0d5cf97af727..8704dc0496ea 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -173,12 +173,22 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
173 break; 173 break;
174 case FPC_EIR: { /* implementation / version register */ 174 case FPC_EIR: { /* implementation / version register */
175 unsigned int flags; 175 unsigned int flags;
176#ifdef CONFIG_MIPS_MT_SMTC
177 unsigned int irqflags;
178 unsigned int mtflags;
179#endif /* CONFIG_MIPS_MT_SMTC */
176 180
177 if (!cpu_has_fpu) { 181 if (!cpu_has_fpu) {
178 tmp = 0; 182 tmp = 0;
179 break; 183 break;
180 } 184 }
181 185
186#ifdef CONFIG_MIPS_MT_SMTC
187 /* Read-modify-write of Status must be atomic */
188 local_irq_save(irqflags);
189 mtflags = dmt();
190#endif /* CONFIG_MIPS_MT_SMTC */
191
182 preempt_disable(); 192 preempt_disable();
183 if (cpu_has_mipsmt) { 193 if (cpu_has_mipsmt) {
184 unsigned int vpflags = dvpe(); 194 unsigned int vpflags = dvpe();
@@ -193,6 +203,10 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
193 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); 203 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
194 write_c0_status(flags); 204 write_c0_status(flags);
195 } 205 }
206#ifdef CONFIG_MIPS_MT_SMTC
207 emt(mtflags);
208 local_irq_restore(irqflags);
209#endif /* CONFIG_MIPS_MT_SMTC */
196 preempt_enable(); 210 preempt_enable();
197 break; 211 break;
198 } 212 }
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index d2afbd19a9c8..0b1b54acee9f 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -88,7 +88,18 @@
88 88
89 PTR_ADDIU t0, $28, _THREAD_SIZE - 32 89 PTR_ADDIU t0, $28, _THREAD_SIZE - 32
90 set_saved_sp t0, t1, t2 90 set_saved_sp t0, t1, t2
91 91#ifdef CONFIG_MIPS_MT_SMTC
92 /* Read-modify-writes of Status must be atomic on a VPE */
93 mfc0 t2, CP0_TCSTATUS
94 ori t1, t2, TCSTATUS_IXMT
95 mtc0 t1, CP0_TCSTATUS
96 andi t2, t2, TCSTATUS_IXMT
97 ehb
98 DMT 8 # dmt t0
99 move t1,ra
100 jal mips_ihb
101 move ra,t1
102#endif /* CONFIG_MIPS_MT_SMTC */
92 mfc0 t1, CP0_STATUS /* Do we really need this? */ 103 mfc0 t1, CP0_STATUS /* Do we really need this? */
93 li a3, 0xff01 104 li a3, 0xff01
94 and t1, a3 105 and t1, a3
@@ -97,6 +108,18 @@
97 and a2, a3 108 and a2, a3
98 or a2, t1 109 or a2, t1
99 mtc0 a2, CP0_STATUS 110 mtc0 a2, CP0_STATUS
111#ifdef CONFIG_MIPS_MT_SMTC
112 ehb
113 andi t0, t0, VPECONTROL_TE
114 beqz t0, 1f
115 emt
1161:
117 mfc0 t1, CP0_TCSTATUS
118 xori t1, t1, TCSTATUS_IXMT
119 or t1, t1, t2
120 mtc0 t1, CP0_TCSTATUS
121 ehb
122#endif /* CONFIG_MIPS_MT_SMTC */
100 move v0, a0 123 move v0, a0
101 jr ra 124 jr ra
102 END(resume) 125 END(resume)
@@ -131,10 +154,19 @@ LEAF(_restore_fp)
131#define FPU_DEFAULT 0x00000000 154#define FPU_DEFAULT 0x00000000
132 155
133LEAF(_init_fpu) 156LEAF(_init_fpu)
157#ifdef CONFIG_MIPS_MT_SMTC
158 /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
159 mfc0 t0, CP0_TCSTATUS
160 /* Bit position is the same for Status, TCStatus */
161 li t1, ST0_CU1
162 or t0, t1
163 mtc0 t0, CP0_TCSTATUS
164#else /* Normal MIPS CU1 enable */
134 mfc0 t0, CP0_STATUS 165 mfc0 t0, CP0_STATUS
135 li t1, ST0_CU1 166 li t1, ST0_CU1
136 or t0, t1 167 or t0, t1
137 mtc0 t0, CP0_STATUS 168 mtc0 t0, CP0_STATUS
169#endif /* CONFIG_MIPS_MT_SMTC */
138 fpu_enable_hazard 170 fpu_enable_hazard
139 171
140 li t1, FPU_DEFAULT 172 li t1, FPU_DEFAULT
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 986a9cf23067..caf777f83289 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -21,45 +21,44 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/fs.h> 22#include <linux/fs.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <asm/uaccess.h>
25#include <linux/slab.h>
26#include <linux/list.h>
27#include <linux/vmalloc.h>
28#include <linux/elf.h>
29#include <linux/seq_file.h>
30#include <linux/syscalls.h>
31#include <linux/moduleloader.h>
24#include <linux/interrupt.h> 32#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/poll.h> 33#include <linux/poll.h>
27#include <linux/sched.h> 34#include <linux/sched.h>
28#include <linux/wait.h> 35#include <linux/wait.h>
29
30#include <asm/mipsmtregs.h> 36#include <asm/mipsmtregs.h>
31#include <asm/bitops.h> 37#include <asm/cacheflush.h>
38#include <asm/atomic.h>
32#include <asm/cpu.h> 39#include <asm/cpu.h>
33#include <asm/processor.h> 40#include <asm/processor.h>
41#include <asm/system.h>
42#include <asm/vpe.h>
34#include <asm/rtlx.h> 43#include <asm/rtlx.h>
35#include <asm/uaccess.h>
36 44
37#define RTLX_TARG_VPE 1 45#define RTLX_TARG_VPE 1
38 46
39static struct rtlx_info *rtlx; 47static struct rtlx_info *rtlx;
40static int major; 48static int major;
41static char module_name[] = "rtlx"; 49static char module_name[] = "rtlx";
42static struct irqaction irq;
43static int irq_num;
44
45static inline int spacefree(int read, int write, int size)
46{
47 if (read == write) {
48 /*
49 * never fill the buffer completely, so indexes are always
50 * equal if empty and only empty, or !equal if data available
51 */
52 return size - 1;
53 }
54
55 return ((read + size - write) % size) - 1;
56}
57 50
58static struct chan_waitqueues { 51static struct chan_waitqueues {
59 wait_queue_head_t rt_queue; 52 wait_queue_head_t rt_queue;
60 wait_queue_head_t lx_queue; 53 wait_queue_head_t lx_queue;
54 int in_open;
61} channel_wqs[RTLX_CHANNELS]; 55} channel_wqs[RTLX_CHANNELS];
62 56
57static struct irqaction irq;
58static int irq_num;
59static struct vpe_notifications notify;
60static int sp_stopping = 0;
61
63extern void *vpe_get_shared(int index); 62extern void *vpe_get_shared(int index);
64 63
65static void rtlx_dispatch(struct pt_regs *regs) 64static void rtlx_dispatch(struct pt_regs *regs)
@@ -67,174 +66,298 @@ static void rtlx_dispatch(struct pt_regs *regs)
67 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ, regs); 66 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ, regs);
68} 67}
69 68
69
70/* Interrupt handler may be called before rtlx_init has otherwise had
71 a chance to run.
72*/
70static irqreturn_t rtlx_interrupt(int irq, void *dev_id, struct pt_regs *regs) 73static irqreturn_t rtlx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
71{ 74{
72 int i; 75 int i;
73 76
74 for (i = 0; i < RTLX_CHANNELS; i++) { 77 for (i = 0; i < RTLX_CHANNELS; i++) {
75 struct rtlx_channel *chan = &rtlx->channel[i]; 78 wake_up(&channel_wqs[i].lx_queue);
76 79 wake_up(&channel_wqs[i].rt_queue);
77 if (chan->lx_read != chan->lx_write)
78 wake_up_interruptible(&channel_wqs[i].lx_queue);
79 } 80 }
80 81
81 return IRQ_HANDLED; 82 return IRQ_HANDLED;
82} 83}
83 84
84/* call when we have the address of the shared structure from the SP side. */ 85static __attribute_used__ void dump_rtlx(void)
85static int rtlx_init(struct rtlx_info *rtlxi)
86{ 86{
87 int i; 87 int i;
88 88
89 if (rtlxi->id != RTLX_ID) { 89 printk("id 0x%lx state %d\n", rtlx->id, rtlx->state);
90 printk(KERN_WARNING "no valid RTLX id at 0x%p\n", rtlxi);
91 return -ENOEXEC;
92 }
93 90
94 /* initialise the wait queues */
95 for (i = 0; i < RTLX_CHANNELS; i++) { 91 for (i = 0; i < RTLX_CHANNELS; i++) {
96 init_waitqueue_head(&channel_wqs[i].rt_queue); 92 struct rtlx_channel *chan = &rtlx->channel[i];
97 init_waitqueue_head(&channel_wqs[i].lx_queue);
98 }
99 93
100 /* set up for interrupt handling */ 94 printk(" rt_state %d lx_state %d buffer_size %d\n",
101 memset(&irq, 0, sizeof(struct irqaction)); 95 chan->rt_state, chan->lx_state, chan->buffer_size);
102 96
103 if (cpu_has_vint) 97 printk(" rt_read %d rt_write %d\n",
104 set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); 98 chan->rt_read, chan->rt_write);
99
100 printk(" lx_read %d lx_write %d\n",
101 chan->lx_read, chan->lx_write);
105 102
106 irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ; 103 printk(" rt_buffer <%s>\n", chan->rt_buffer);
107 irq.handler = rtlx_interrupt; 104 printk(" lx_buffer <%s>\n", chan->lx_buffer);
108 irq.flags = SA_INTERRUPT; 105 }
109 irq.name = "RTLX"; 106}
110 irq.dev_id = rtlx; 107
111 setup_irq(irq_num, &irq); 108/* call when we have the address of the shared structure from the SP side. */
109static int rtlx_init(struct rtlx_info *rtlxi)
110{
111 if (rtlxi->id != RTLX_ID) {
112 printk(KERN_ERR "no valid RTLX id at 0x%p 0x%x\n", rtlxi, rtlxi->id);
113 return -ENOEXEC;
114 }
112 115
113 rtlx = rtlxi; 116 rtlx = rtlxi;
114 117
115 return 0; 118 return 0;
116} 119}
117 120
118/* only allow one open process at a time to open each channel */ 121/* notifications */
119static int rtlx_open(struct inode *inode, struct file *filp) 122static void starting(int vpe)
123{
124 int i;
125 sp_stopping = 0;
126
127 /* force a reload of rtlx */
128 rtlx=NULL;
129
130 /* wake up any sleeping rtlx_open's */
131 for (i = 0; i < RTLX_CHANNELS; i++)
132 wake_up_interruptible(&channel_wqs[i].lx_queue);
133}
134
135static void stopping(int vpe)
120{ 136{
121 int minor, ret; 137 int i;
138
139 sp_stopping = 1;
140 for (i = 0; i < RTLX_CHANNELS; i++)
141 wake_up_interruptible(&channel_wqs[i].lx_queue);
142}
143
144
145int rtlx_open(int index, int can_sleep)
146{
147 int ret;
122 struct rtlx_channel *chan; 148 struct rtlx_channel *chan;
149 volatile struct rtlx_info **p;
123 150
124 /* assume only 1 device at the mo. */ 151 if (index >= RTLX_CHANNELS) {
125 minor = MINOR(inode->i_rdev); 152 printk(KERN_DEBUG "rtlx_open index out of range\n");
153 return -ENOSYS;
154 }
155
156 if (channel_wqs[index].in_open) {
157 printk(KERN_DEBUG "rtlx_open channel %d already opened\n", index);
158 return -EBUSY;
159 }
160
161 channel_wqs[index].in_open++;
126 162
127 if (rtlx == NULL) { 163 if (rtlx == NULL) {
128 struct rtlx_info **p;
129 if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) { 164 if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) {
130 printk(KERN_ERR "vpe_get_shared is NULL. " 165 if (can_sleep) {
131 "Has an SP program been loaded?\n"); 166 DECLARE_WAITQUEUE(wait, current);
132 return -EFAULT; 167
168 /* go to sleep */
169 add_wait_queue(&channel_wqs[index].lx_queue, &wait);
170
171 set_current_state(TASK_INTERRUPTIBLE);
172 while ((p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) {
173 schedule();
174 set_current_state(TASK_INTERRUPTIBLE);
175 }
176
177 set_current_state(TASK_RUNNING);
178 remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
179
180 /* back running */
181 } else {
182 printk( KERN_DEBUG "No SP program loaded, and device "
183 "opened with O_NONBLOCK\n");
184 channel_wqs[index].in_open = 0;
185 return -ENOSYS;
186 }
133 } 187 }
134 188
135 if (*p == NULL) { 189 if (*p == NULL) {
136 printk(KERN_ERR "vpe_shared %p %p\n", p, *p); 190 if (can_sleep) {
137 return -EFAULT; 191 DECLARE_WAITQUEUE(wait, current);
192
193 /* go to sleep */
194 add_wait_queue(&channel_wqs[index].lx_queue, &wait);
195
196 set_current_state(TASK_INTERRUPTIBLE);
197 while (*p == NULL) {
198 schedule();
199
200 /* reset task state to interruptable otherwise
201 we'll whizz round here like a very fast loopy
202 thing. schedule() appears to return with state
203 set to TASK_RUNNING.
204
205 If the loaded SP program, for whatever reason,
206 doesn't set up the shared structure *p will never
207 become true. So whoever connected to either /dev/rt?
208 or if it was kspd, will then take up rather a lot of
209 processor cycles.
210 */
211
212 set_current_state(TASK_INTERRUPTIBLE);
213 }
214
215 set_current_state(TASK_RUNNING);
216 remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
217
218 /* back running */
219 }
220 else {
221 printk(" *vpe_get_shared is NULL. "
222 "Has an SP program been loaded?\n");
223 channel_wqs[index].in_open = 0;
224 return -ENOSYS;
225 }
138 } 226 }
139 227
140 if ((ret = rtlx_init(*p)) < 0) 228 if ((unsigned int)*p < KSEG0) {
141 return ret; 229 printk(KERN_WARNING "vpe_get_shared returned an invalid pointer "
230 "maybe an error code %d\n", (int)*p);
231 channel_wqs[index].in_open = 0;
232 return -ENOSYS;
233 }
234
235 if ((ret = rtlx_init(*p)) < 0) {
236 channel_wqs[index].in_open = 0;
237 return ret;
238 }
142 } 239 }
143 240
144 chan = &rtlx->channel[minor]; 241 chan = &rtlx->channel[index];
145 242
146 if (test_and_set_bit(RTLX_STATE_OPENED, &chan->lx_state)) 243 if (chan->lx_state == RTLX_STATE_OPENED) {
147 return -EBUSY; 244 channel_wqs[index].in_open = 0;
245 return -EBUSY;
246 }
148 247
248 chan->lx_state = RTLX_STATE_OPENED;
249 channel_wqs[index].in_open = 0;
149 return 0; 250 return 0;
150} 251}
151 252
152static int rtlx_release(struct inode *inode, struct file *filp) 253int rtlx_release(int index)
153{ 254{
154 int minor = MINOR(inode->i_rdev); 255 rtlx->channel[index].lx_state = RTLX_STATE_UNUSED;
155
156 clear_bit(RTLX_STATE_OPENED, &rtlx->channel[minor].lx_state);
157 smp_mb__after_clear_bit();
158
159 return 0; 256 return 0;
160} 257}
161 258
162static unsigned int rtlx_poll(struct file *file, poll_table * wait) 259unsigned int rtlx_read_poll(int index, int can_sleep)
163{ 260{
164 int minor; 261 struct rtlx_channel *chan;
165 unsigned int mask = 0;
166 struct rtlx_channel *chan;
167 262
168 minor = MINOR(file->f_dentry->d_inode->i_rdev); 263 if (rtlx == NULL)
169 chan = &rtlx->channel[minor]; 264 return 0;
170 265
171 poll_wait(file, &channel_wqs[minor].rt_queue, wait); 266 chan = &rtlx->channel[index];
172 poll_wait(file, &channel_wqs[minor].lx_queue, wait);
173 267
174 /* data available to read? */ 268 /* data available to read? */
175 if (chan->lx_read != chan->lx_write) 269 if (chan->lx_read == chan->lx_write) {
176 mask |= POLLIN | POLLRDNORM; 270 if (can_sleep) {
271 DECLARE_WAITQUEUE(wait, current);
177 272
178 /* space to write */ 273 /* go to sleep */
179 if (spacefree(chan->rt_read, chan->rt_write, chan->buffer_size)) 274 add_wait_queue(&channel_wqs[index].lx_queue, &wait);
180 mask |= POLLOUT | POLLWRNORM;
181 275
182 return mask; 276 set_current_state(TASK_INTERRUPTIBLE);
277 while (chan->lx_read == chan->lx_write) {
278 schedule();
279
280 set_current_state(TASK_INTERRUPTIBLE);
281
282 if (sp_stopping) {
283 set_current_state(TASK_RUNNING);
284 remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
285 return 0;
286 }
287 }
288
289 set_current_state(TASK_RUNNING);
290 remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
291
292 /* back running */
293 }
294 else
295 return 0;
296 }
297
298 return (chan->lx_write + chan->buffer_size - chan->lx_read)
299 % chan->buffer_size;
183} 300}
184 301
185static ssize_t rtlx_read(struct file *file, char __user * buffer, size_t count, 302static inline int write_spacefree(int read, int write, int size)
186 loff_t * ppos)
187{ 303{
188 unsigned long failed; 304 if (read == write) {
189 size_t fl = 0L; 305 /*
190 int minor; 306 * Never fill the buffer completely, so indexes are always
191 struct rtlx_channel *lx; 307 * equal if empty and only empty, or !equal if data available
192 DECLARE_WAITQUEUE(wait, current); 308 */
309 return size - 1;
310 }
193 311
194 minor = MINOR(file->f_dentry->d_inode->i_rdev); 312 return ((read + size - write) % size) - 1;
195 lx = &rtlx->channel[minor]; 313}
196 314
197 /* data available? */ 315unsigned int rtlx_write_poll(int index)
198 if (lx->lx_write == lx->lx_read) { 316{
199 if (file->f_flags & O_NONBLOCK) 317 struct rtlx_channel *chan = &rtlx->channel[index];
200 return 0; /* -EAGAIN makes cat whinge */ 318 return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size);
319}
201 320
202 /* go to sleep */ 321static inline void copy_to(void *dst, void *src, size_t count, int user)
203 add_wait_queue(&channel_wqs[minor].lx_queue, &wait); 322{
204 set_current_state(TASK_INTERRUPTIBLE); 323 if (user)
324 copy_to_user(dst, src, count);
325 else
326 memcpy(dst, src, count);
327}
205 328
206 while (lx->lx_write == lx->lx_read) 329static inline void copy_from(void *dst, void *src, size_t count, int user)
207 schedule(); 330{
331 if (user)
332 copy_from_user(dst, src, count);
333 else
334 memcpy(dst, src, count);
335}
208 336
209 set_current_state(TASK_RUNNING); 337ssize_t rtlx_read(int index, void *buff, size_t count, int user)
210 remove_wait_queue(&channel_wqs[minor].lx_queue, &wait); 338{
339 size_t fl = 0L;
340 struct rtlx_channel *lx;
211 341
212 /* back running */ 342 if (rtlx == NULL)
213 } 343 return -ENOSYS;
344
345 lx = &rtlx->channel[index];
214 346
215 /* find out how much in total */ 347 /* find out how much in total */
216 count = min(count, 348 count = min(count,
217 (size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) % lx->buffer_size); 349 (size_t)(lx->lx_write + lx->buffer_size - lx->lx_read)
350 % lx->buffer_size);
218 351
219 /* then how much from the read pointer onwards */ 352 /* then how much from the read pointer onwards */
220 fl = min(count, (size_t)lx->buffer_size - lx->lx_read); 353 fl = min( count, (size_t)lx->buffer_size - lx->lx_read);
221 354
222 failed = copy_to_user (buffer, &lx->lx_buffer[lx->lx_read], fl); 355 copy_to(buff, &lx->lx_buffer[lx->lx_read], fl, user);
223 if (failed) {
224 count = fl - failed;
225 goto out;
226 }
227 356
228 /* and if there is anything left at the beginning of the buffer */ 357 /* and if there is anything left at the beginning of the buffer */
229 if (count - fl) { 358 if ( count - fl )
230 failed = copy_to_user (buffer + fl, lx->lx_buffer, count - fl); 359 copy_to (buff + fl, lx->lx_buffer, count - fl, user);
231 if (failed) {
232 count -= failed;
233 goto out;
234 }
235 }
236 360
237out:
238 /* update the index */ 361 /* update the index */
239 lx->lx_read += count; 362 lx->lx_read += count;
240 lx->lx_read %= lx->buffer_size; 363 lx->lx_read %= lx->buffer_size;
@@ -242,20 +365,100 @@ out:
242 return count; 365 return count;
243} 366}
244 367
245static ssize_t rtlx_write(struct file *file, const char __user * buffer, 368ssize_t rtlx_write(int index, void *buffer, size_t count, int user)
369{
370 struct rtlx_channel *rt;
371 size_t fl;
372
373 if (rtlx == NULL)
374 return(-ENOSYS);
375
376 rt = &rtlx->channel[index];
377
378 /* total number of bytes to copy */
379 count = min(count,
380 (size_t)write_spacefree(rt->rt_read, rt->rt_write,
381 rt->buffer_size));
382
383 /* first bit from write pointer to the end of the buffer, or count */
384 fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
385
386 copy_from (&rt->rt_buffer[rt->rt_write], buffer, fl, user);
387
388 /* if there's any left copy to the beginning of the buffer */
389 if( count - fl )
390 copy_from (rt->rt_buffer, buffer + fl, count - fl, user);
391
392 rt->rt_write += count;
393 rt->rt_write %= rt->buffer_size;
394
395 return(count);
396}
397
398
399static int file_open(struct inode *inode, struct file *filp)
400{
401 int minor = iminor(inode);
402
403 return rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1);
404}
405
406static int file_release(struct inode *inode, struct file *filp)
407{
408 int minor = iminor(inode);
409
410 return rtlx_release(minor);
411}
412
413static unsigned int file_poll(struct file *file, poll_table * wait)
414{
415 int minor;
416 unsigned int mask = 0;
417
418 minor = iminor(file->f_dentry->d_inode);
419
420 poll_wait(file, &channel_wqs[minor].rt_queue, wait);
421 poll_wait(file, &channel_wqs[minor].lx_queue, wait);
422
423 if (rtlx == NULL)
424 return 0;
425
426 /* data available to read? */
427 if (rtlx_read_poll(minor, 0))
428 mask |= POLLIN | POLLRDNORM;
429
430 /* space to write */
431 if (rtlx_write_poll(minor))
432 mask |= POLLOUT | POLLWRNORM;
433
434 return mask;
435}
436
437static ssize_t file_read(struct file *file, char __user * buffer, size_t count,
438 loff_t * ppos)
439{
440 int minor = iminor(file->f_dentry->d_inode);
441
442 /* data available? */
443 if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) {
444 return 0; // -EAGAIN makes cat whinge
445 }
446
447 return rtlx_read(minor, buffer, count, 1);
448}
449
450static ssize_t file_write(struct file *file, const char __user * buffer,
246 size_t count, loff_t * ppos) 451 size_t count, loff_t * ppos)
247{ 452{
248 unsigned long failed;
249 int minor; 453 int minor;
250 struct rtlx_channel *rt; 454 struct rtlx_channel *rt;
251 size_t fl;
252 DECLARE_WAITQUEUE(wait, current); 455 DECLARE_WAITQUEUE(wait, current);
253 456
254 minor = MINOR(file->f_dentry->d_inode->i_rdev); 457 minor = iminor(file->f_dentry->d_inode);
255 rt = &rtlx->channel[minor]; 458 rt = &rtlx->channel[minor];
256 459
257 /* any space left... */ 460 /* any space left... */
258 if (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) { 461 if (!rtlx_write_poll(minor)) {
259 462
260 if (file->f_flags & O_NONBLOCK) 463 if (file->f_flags & O_NONBLOCK)
261 return -EAGAIN; 464 return -EAGAIN;
@@ -263,61 +466,64 @@ static ssize_t rtlx_write(struct file *file, const char __user * buffer,
263 add_wait_queue(&channel_wqs[minor].rt_queue, &wait); 466 add_wait_queue(&channel_wqs[minor].rt_queue, &wait);
264 set_current_state(TASK_INTERRUPTIBLE); 467 set_current_state(TASK_INTERRUPTIBLE);
265 468
266 while (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) 469 while (!rtlx_write_poll(minor))
267 schedule(); 470 schedule();
268 471
269 set_current_state(TASK_RUNNING); 472 set_current_state(TASK_RUNNING);
270 remove_wait_queue(&channel_wqs[minor].rt_queue, &wait); 473 remove_wait_queue(&channel_wqs[minor].rt_queue, &wait);
271 } 474 }
272 475
273 /* total number of bytes to copy */ 476 return rtlx_write(minor, (void *)buffer, count, 1);
274 count = min(count, (size_t)spacefree(rt->rt_read, rt->rt_write, rt->buffer_size) );
275
276 /* first bit from write pointer to the end of the buffer, or count */
277 fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
278
279 failed = copy_from_user(&rt->rt_buffer[rt->rt_write], buffer, fl);
280 if (failed) {
281 count = fl - failed;
282 goto out;
283 }
284
285 /* if there's any left copy to the beginning of the buffer */
286 if (count - fl) {
287 failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
288 if (failed) {
289 count -= failed;
290 goto out;
291 }
292 }
293
294out:
295 rt->rt_write += count;
296 rt->rt_write %= rt->buffer_size;
297
298 return count;
299} 477}
300 478
301static struct file_operations rtlx_fops = { 479static struct file_operations rtlx_fops = {
302 .owner = THIS_MODULE, 480 .owner = THIS_MODULE,
303 .open = rtlx_open, 481 .open = file_open,
304 .release = rtlx_release, 482 .release = file_release,
305 .write = rtlx_write, 483 .write = file_write,
306 .read = rtlx_read, 484 .read = file_read,
307 .poll = rtlx_poll 485 .poll = file_poll
486};
487
488static struct irqaction rtlx_irq = {
489 .handler = rtlx_interrupt,
490 .flags = SA_INTERRUPT,
491 .name = "RTLX",
308}; 492};
309 493
494static int rtlx_irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ;
495
310static char register_chrdev_failed[] __initdata = 496static char register_chrdev_failed[] __initdata =
311 KERN_ERR "rtlx_module_init: unable to register device\n"; 497 KERN_ERR "rtlx_module_init: unable to register device\n";
312 498
313static int __init rtlx_module_init(void) 499static int rtlx_module_init(void)
314{ 500{
501 int i;
502
315 major = register_chrdev(0, module_name, &rtlx_fops); 503 major = register_chrdev(0, module_name, &rtlx_fops);
316 if (major < 0) { 504 if (major < 0) {
317 printk(register_chrdev_failed); 505 printk(register_chrdev_failed);
318 return major; 506 return major;
319 } 507 }
320 508
509 /* initialise the wait queues */
510 for (i = 0; i < RTLX_CHANNELS; i++) {
511 init_waitqueue_head(&channel_wqs[i].rt_queue);
512 init_waitqueue_head(&channel_wqs[i].lx_queue);
513 channel_wqs[i].in_open = 0;
514 }
515
516 /* set up notifiers */
517 notify.start = starting;
518 notify.stop = stopping;
519 vpe_notify(RTLX_TARG_VPE, &notify);
520
521 if (cpu_has_vint)
522 set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch);
523
524 rtlx_irq.dev_id = rtlx;
525 setup_irq(rtlx_irq_num, &rtlx_irq);
526
321 return 0; 527 return 0;
322} 528}
323 529
@@ -330,5 +536,5 @@ module_init(rtlx_module_init);
330module_exit(rtlx_module_exit); 536module_exit(rtlx_module_exit);
331 537
332MODULE_DESCRIPTION("MIPS RTLX"); 538MODULE_DESCRIPTION("MIPS RTLX");
333MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc."); 539MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
334MODULE_LICENSE("GPL"); 540MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 2f2dc54b2e26..a0ac0e5f61ad 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -569,8 +569,19 @@ einval: li v0, -EINVAL
569 sys sys_tkill 2 569 sys sys_tkill 2
570 sys sys_sendfile64 5 570 sys sys_sendfile64 5
571 sys sys_futex 6 571 sys sys_futex 6
572#ifdef CONFIG_MIPS_MT_FPAFF
573 /*
574 * For FPU affinity scheduling on MIPS MT processors, we need to
575 * intercept sys_sched_xxxaffinity() calls until we get a proper hook
576 * in kernel/sched.c. Considered only temporary we only support these
577 * hooks for the 32-bit kernel - there is no MIPS64 MT processor atm.
578 */
579 sys mipsmt_sys_sched_setaffinity 3
580 sys mipsmt_sys_sched_getaffinity 3
581#else
572 sys sys_sched_setaffinity 3 582 sys sys_sched_setaffinity 3
573 sys sys_sched_getaffinity 3 /* 4240 */ 583 sys sys_sched_getaffinity 3 /* 4240 */
584#endif /* CONFIG_MIPS_MT_FPAFF */
574 sys sys_io_setup 2 585 sys sys_io_setup 2
575 sys sys_io_destroy 1 586 sys sys_io_destroy 1
576 sys sys_io_getevents 5 587 sys sys_io_getevents 5
@@ -634,6 +645,8 @@ einval: li v0, -EINVAL
634 sys sys_pselect6 6 645 sys sys_pselect6 6
635 sys sys_ppoll 5 646 sys sys_ppoll 5
636 sys sys_unshare 1 647 sys sys_unshare 1
648 sys sys_splice 4
649 sys sys_sync_file_range 7 /* 4305 */
637 .endm 650 .endm
638 651
639 /* We pre-compute the number of _instruction_ bytes needed to 652 /* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 98bf25df56f3..9ba750887377 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -460,3 +460,5 @@ sys_call_table:
460 PTR sys_pselect6 /* 5260 */ 460 PTR sys_pselect6 /* 5260 */
461 PTR sys_ppoll 461 PTR sys_ppoll
462 PTR sys_unshare 462 PTR sys_unshare
463 PTR sys_splice
464 PTR sys_sync_file_range
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 05a2c0567dae..942aca26f9c4 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -386,3 +386,5 @@ EXPORT(sysn32_call_table)
386 PTR sys_pselect6 386 PTR sys_pselect6
387 PTR sys_ppoll /* 6265 */ 387 PTR sys_ppoll /* 6265 */
388 PTR sys_unshare 388 PTR sys_unshare
389 PTR sys_splice
390 PTR sys_sync_file_range
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 19c4ca481b02..b53a9207f530 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -508,4 +508,6 @@ sys_call_table:
508 PTR sys_pselect6 508 PTR sys_pselect6
509 PTR sys_ppoll 509 PTR sys_ppoll
510 PTR sys_unshare 510 PTR sys_unshare
511 PTR sys_splice
512 PTR sys32_sync_file_range /* 4305 */
511 .size sys_call_table,.-sys_call_table 513 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index dcbfd27071f0..bcf1b10e518f 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -529,7 +529,10 @@ void __init setup_arch(char **cmdline_p)
529 529
530int __init fpu_disable(char *s) 530int __init fpu_disable(char *s)
531{ 531{
532 cpu_data[0].options &= ~MIPS_CPU_FPU; 532 int i;
533
534 for (i = 0; i < NR_CPUS; i++)
535 cpu_data[i].options &= ~MIPS_CPU_FPU;
533 536
534 return 1; 537 return 1;
535} 538}
diff --git a/arch/mips/kernel/smp_mt.c b/arch/mips/kernel/smp-mt.c
index 993b8bf56aaf..57770902b9ae 100644
--- a/arch/mips/kernel/smp_mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -1,8 +1,4 @@
1/* 1/*
2 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
3 *
4 * Elizabeth Clarke (beth@mips.com)
5 *
6 * This program is free software; you can distribute it and/or modify it 2 * This program is free software; you can distribute it and/or modify it
7 * under the terms of the GNU General Public License (Version 2) as 3 * under the terms of the GNU General Public License (Version 2) as
8 * published by the Free Software Foundation. 4 * published by the Free Software Foundation.
@@ -16,6 +12,10 @@
16 * with this program; if not, write to the Free Software Foundation, Inc., 12 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 13 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
18 * 14 *
15 * Copyright (C) 2004, 05, 06 MIPS Technologies, Inc.
16 * Elizabeth Clarke (beth@mips.com)
17 * Ralf Baechle (ralf@linux-mips.org)
18 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
19 */ 19 */
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
@@ -24,6 +24,7 @@
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25 25
26#include <asm/atomic.h> 26#include <asm/atomic.h>
27#include <asm/cacheflush.h>
27#include <asm/cpu.h> 28#include <asm/cpu.h>
28#include <asm/processor.h> 29#include <asm/processor.h>
29#include <asm/system.h> 30#include <asm/system.h>
@@ -33,8 +34,8 @@
33#include <asm/time.h> 34#include <asm/time.h>
34#include <asm/mipsregs.h> 35#include <asm/mipsregs.h>
35#include <asm/mipsmtregs.h> 36#include <asm/mipsmtregs.h>
36#include <asm/cacheflush.h> 37#include <asm/mips_mt.h>
37#include <asm/mips-boards/maltaint.h> 38#include <asm/mips-boards/maltaint.h> /* This is f*cking wrong */
38 39
39#define MIPS_CPU_IPI_RESCHED_IRQ 0 40#define MIPS_CPU_IPI_RESCHED_IRQ 0
40#define MIPS_CPU_IPI_CALL_IRQ 1 41#define MIPS_CPU_IPI_CALL_IRQ 1
@@ -66,6 +67,7 @@ void __init sanitize_tlb_entries(void)
66 if (!cpu_has_mipsmt) 67 if (!cpu_has_mipsmt)
67 return; 68 return;
68 69
70 /* Enable VPC */
69 set_c0_mvpcontrol(MVPCONTROL_VPC); 71 set_c0_mvpcontrol(MVPCONTROL_VPC);
70 72
71 back_to_back_c0_hazard(); 73 back_to_back_c0_hazard();
@@ -106,12 +108,12 @@ void __init sanitize_tlb_entries(void)
106 108
107static void ipi_resched_dispatch (struct pt_regs *regs) 109static void ipi_resched_dispatch (struct pt_regs *regs)
108{ 110{
109 do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ, regs); 111 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ, regs);
110} 112}
111 113
112static void ipi_call_dispatch (struct pt_regs *regs) 114static void ipi_call_dispatch (struct pt_regs *regs)
113{ 115{
114 do_IRQ(MIPS_CPU_IPI_CALL_IRQ, regs); 116 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ, regs);
115} 117}
116 118
117irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs) 119irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs)
@@ -148,6 +150,11 @@ void plat_smp_setup(void)
148 unsigned long val; 150 unsigned long val;
149 int i, num; 151 int i, num;
150 152
153#ifdef CONFIG_MIPS_MT_FPAFF
154 /* If we have an FPU, enroll ourselves in the FPU-full mask */
155 if (cpu_has_fpu)
156 cpu_set(0, mt_fpu_cpumask);
157#endif /* CONFIG_MIPS_MT_FPAFF */
151 if (!cpu_has_mipsmt) 158 if (!cpu_has_mipsmt)
152 return; 159 return;
153 160
@@ -155,6 +162,8 @@ void plat_smp_setup(void)
155 dvpe(); 162 dvpe();
156 dmt(); 163 dmt();
157 164
165 mips_mt_set_cpuoptions();
166
158 /* Put MVPE's into 'configuration state' */ 167 /* Put MVPE's into 'configuration state' */
159 set_c0_mvpcontrol(MVPCONTROL_VPC); 168 set_c0_mvpcontrol(MVPCONTROL_VPC);
160 169
@@ -189,11 +198,13 @@ void plat_smp_setup(void)
189 198
190 if (i != 0) { 199 if (i != 0) {
191 write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); 200 write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
192 write_vpe_c0_cause(read_vpe_c0_cause() & ~CAUSEF_IP);
193 201
194 /* set config to be the same as vpe0, particularly kseg0 coherency alg */ 202 /* set config to be the same as vpe0, particularly kseg0 coherency alg */
195 write_vpe_c0_config( read_c0_config()); 203 write_vpe_c0_config( read_c0_config());
196 204
205 /* make sure there are no software interrupts pending */
206 write_vpe_c0_cause(read_vpe_c0_cause() & ~(C_SW1|C_SW0));
207
197 /* Propagate Config7 */ 208 /* Propagate Config7 */
198 write_vpe_c0_config7(read_c0_config7()); 209 write_vpe_c0_config7(read_c0_config7());
199 } 210 }
@@ -233,16 +244,16 @@ void plat_smp_setup(void)
233 /* We'll wait until starting the secondaries before starting MVPE */ 244 /* We'll wait until starting the secondaries before starting MVPE */
234 245
235 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); 246 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
247}
236 248
249void __init plat_prepare_cpus(unsigned int max_cpus)
250{
237 /* set up ipi interrupts */ 251 /* set up ipi interrupts */
238 if (cpu_has_vint) { 252 if (cpu_has_vint) {
239 set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); 253 set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
240 set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); 254 set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
241 } 255 }
242}
243 256
244void __init plat_prepare_cpus(unsigned int max_cpus)
245{
246 cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ; 257 cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
247 cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ; 258 cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ;
248 259
@@ -287,7 +298,8 @@ void prom_boot_secondary(int cpu, struct task_struct *idle)
287 /* global pointer */ 298 /* global pointer */
288 write_tc_gpr_gp((unsigned long)gp); 299 write_tc_gpr_gp((unsigned long)gp);
289 300
290 flush_icache_range((unsigned long)gp, (unsigned long)(gp + 1)); 301 flush_icache_range((unsigned long)gp,
302 (unsigned long)(gp + sizeof(struct thread_info)));
291 303
292 /* finally out of configuration and into chaos */ 304 /* finally out of configuration and into chaos */
293 clear_c0_mvpcontrol(MVPCONTROL_VPC); 305 clear_c0_mvpcontrol(MVPCONTROL_VPC);
@@ -305,6 +317,12 @@ void prom_smp_finish(void)
305{ 317{
306 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); 318 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
307 319
320#ifdef CONFIG_MIPS_MT_FPAFF
321 /* If we have an FPU, enroll ourselves in the FPU-full mask */
322 if (cpu_has_fpu)
323 cpu_set(smp_processor_id(), mt_fpu_cpumask);
324#endif /* CONFIG_MIPS_MT_FPAFF */
325
308 local_irq_enable(); 326 local_irq_enable();
309} 327}
310 328
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 78d171bfa331..d42f358754ad 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -38,6 +38,10 @@
38#include <asm/mmu_context.h> 38#include <asm/mmu_context.h>
39#include <asm/smp.h> 39#include <asm/smp.h>
40 40
41#ifdef CONFIG_MIPS_MT_SMTC
42#include <asm/mipsmtregs.h>
43#endif /* CONFIG_MIPS_MT_SMTC */
44
41cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ 45cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
42volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 46volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
43cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ 47cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
@@ -85,6 +89,10 @@ asmlinkage void start_secondary(void)
85{ 89{
86 unsigned int cpu; 90 unsigned int cpu;
87 91
92#ifdef CONFIG_MIPS_MT_SMTC
93 /* Only do cpu_probe for first TC of CPU */
94 if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
95#endif /* CONFIG_MIPS_MT_SMTC */
88 cpu_probe(); 96 cpu_probe();
89 cpu_report(); 97 cpu_report();
90 per_cpu_trap_init(); 98 per_cpu_trap_init();
@@ -179,11 +187,13 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
179 if (wait) 187 if (wait)
180 while (atomic_read(&data.finished) != cpus) 188 while (atomic_read(&data.finished) != cpus)
181 barrier(); 189 barrier();
190 call_data = NULL;
182 spin_unlock(&smp_call_lock); 191 spin_unlock(&smp_call_lock);
183 192
184 return 0; 193 return 0;
185} 194}
186 195
196
187void smp_call_function_interrupt(void) 197void smp_call_function_interrupt(void)
188{ 198{
189 void (*func) (void *info) = call_data->func; 199 void (*func) (void *info) = call_data->func;
@@ -446,5 +456,3 @@ subsys_initcall(topology_init);
446 456
447EXPORT_SYMBOL(flush_tlb_page); 457EXPORT_SYMBOL(flush_tlb_page);
448EXPORT_SYMBOL(flush_tlb_one); 458EXPORT_SYMBOL(flush_tlb_one);
449EXPORT_SYMBOL(cpu_data);
450EXPORT_SYMBOL(synchronize_irq);
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
new file mode 100644
index 000000000000..c9d65196d917
--- /dev/null
+++ b/arch/mips/kernel/smtc-asm.S
@@ -0,0 +1,130 @@
1/*
2 * Assembly Language Functions for MIPS MT SMTC support
3 */
4
5/*
6 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */
7
8#include <asm/regdef.h>
9#include <asm/asmmacro.h>
10#include <asm/stackframe.h>
11#include <asm/stackframe.h>
12
13/*
14 * "Software Interrupt" linkage.
15 *
16 * This is invoked when an "Interrupt" is sent from one TC to another,
17 * where the TC to be interrupted is halted, has it's Restart address
18 * and Status values saved by the "remote control" thread, then modified
19 * to cause execution to begin here, in kenel mode. This code then
20 * disguises the TC state as that of an exception and transfers
21 * control to the general exception or vectored interrupt handler.
22 */
23 .set noreorder
24
25/*
26The __smtc_ipi_vector would use k0 and k1 as temporaries and
271) Set EXL (this is per-VPE, so this can't be done by proxy!)
282) Restore the K/CU and IXMT bits to the pre "exception" state
29 (EXL means no interrupts and access to the kernel map).
303) Set EPC to be the saved value of TCRestart.
314) Jump to the exception handler entry point passed by the sender.
32
33CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
34*/
35
36/*
37 * Reviled and slandered vision: Set EXL and restore K/CU/IXMT
38 * state of pre-halt thread, then save everything and call
39 * thought some function pointer to imaginary_exception, which
40 * will parse a register value or memory message queue to
41 * deliver things like interprocessor interrupts. On return
42 * from that function, jump to the global ret_from_irq code
43 * to invoke the scheduler and return as appropriate.
44 */
45
46#define PT_PADSLOT4 (PT_R0-8)
47#define PT_PADSLOT5 (PT_R0-4)
48
49 .text
50 .align 5
51FEXPORT(__smtc_ipi_vector)
52 .set noat
53 /* Disable thread scheduling to make Status update atomic */
54 DMT 27 # dmt k1
55 ehb
56 /* Set EXL */
57 mfc0 k0,CP0_STATUS
58 ori k0,k0,ST0_EXL
59 mtc0 k0,CP0_STATUS
60 ehb
61 /* Thread scheduling now inhibited by EXL. Restore TE state. */
62 andi k1,k1,VPECONTROL_TE
63 beqz k1,1f
64 emt
651:
66 /*
67 * The IPI sender has put some information on the anticipated
68 * kernel stack frame. If we were in user mode, this will be
69 * built above the saved kernel SP. If we were already in the
70 * kernel, it will be built above the current CPU SP.
71 *
72 * Were we in kernel mode, as indicated by CU0?
73 */
74 sll k1,k0,3
75 .set noreorder
76 bltz k1,2f
77 move k1,sp
78 .set reorder
79 /*
80 * If previously in user mode, set CU0 and use kernel stack.
81 */
82 li k1,ST0_CU0
83 or k1,k1,k0
84 mtc0 k1,CP0_STATUS
85 ehb
86 get_saved_sp
87 /* Interrupting TC will have pre-set values in slots in the new frame */
882: subu k1,k1,PT_SIZE
89 /* Load TCStatus Value */
90 lw k0,PT_TCSTATUS(k1)
91 /* Write it to TCStatus to restore CU/KSU/IXMT state */
92 mtc0 k0,$2,1
93 ehb
94 lw k0,PT_EPC(k1)
95 mtc0 k0,CP0_EPC
96 /* Save all will redundantly recompute the SP, but use it for now */
97 SAVE_ALL
98 CLI
99 move a0,sp
100 /* Function to be invoked passed stack pad slot 5 */
101 lw t0,PT_PADSLOT5(sp)
102 /* Argument from sender passed in stack pad slot 4 */
103 lw a1,PT_PADSLOT4(sp)
104 jalr t0
105 nop
106 j ret_from_irq
107 nop
108
109/*
110 * Called from idle loop to provoke processing of queued IPIs
111 * First IPI message in queue passed as argument.
112 */
113
114LEAF(self_ipi)
115 /* Before anything else, block interrupts */
116 mfc0 t0,CP0_TCSTATUS
117 ori t1,t0,TCSTATUS_IXMT
118 mtc0 t1,CP0_TCSTATUS
119 ehb
120 /* We know we're in kernel mode, so prepare stack frame */
121 subu t1,sp,PT_SIZE
122 sw ra,PT_EPC(t1)
123 sw a0,PT_PADSLOT4(t1)
124 la t2,ipi_decode
125 sw t2,PT_PADSLOT5(t1)
126 /* Save pre-disable value of TCStatus */
127 sw t0,PT_TCSTATUS(t1)
128 j __smtc_ipi_vector
129 nop
130END(self_ipi)
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
new file mode 100644
index 000000000000..6f3709996172
--- /dev/null
+++ b/arch/mips/kernel/smtc-proc.c
@@ -0,0 +1,93 @@
1/*
2 * /proc hooks for SMTC kernel
3 * Copyright (C) 2005 Mips Technologies, Inc
4 */
5
6#include <linux/kernel.h>
7#include <linux/sched.h>
8#include <linux/cpumask.h>
9#include <linux/interrupt.h>
10
11#include <asm/cpu.h>
12#include <asm/processor.h>
13#include <asm/atomic.h>
14#include <asm/system.h>
15#include <asm/hardirq.h>
16#include <asm/mmu_context.h>
17#include <asm/smp.h>
18#include <asm/mipsregs.h>
19#include <asm/cacheflush.h>
20#include <linux/proc_fs.h>
21
22#include <asm/smtc_proc.h>
23
24/*
25 * /proc diagnostic and statistics hooks
26 */
27
28/*
29 * Statistics gathered
30 */
31unsigned long selfipis[NR_CPUS];
32
33struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
34
35static struct proc_dir_entry *smtc_stats;
36
37atomic_t smtc_fpu_recoveries;
38
39static int proc_read_smtc(char *page, char **start, off_t off,
40 int count, int *eof, void *data)
41{
42 int totalen = 0;
43 int len;
44 int i;
45 extern unsigned long ebase;
46
47 len = sprintf(page, "SMTC Status Word: 0x%08x\n", smtc_status);
48 totalen += len;
49 page += len;
50 len = sprintf(page, "Config7: 0x%08x\n", read_c0_config7());
51 totalen += len;
52 page += len;
53 len = sprintf(page, "EBASE: 0x%08lx\n", ebase);
54 totalen += len;
55 page += len;
56 len = sprintf(page, "Counter Interrupts taken per CPU (TC)\n");
57 totalen += len;
58 page += len;
59 for (i=0; i < NR_CPUS; i++) {
60 len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].timerints);
61 totalen += len;
62 page += len;
63 }
64 len = sprintf(page, "Self-IPIs by CPU:\n");
65 totalen += len;
66 page += len;
67 for(i = 0; i < NR_CPUS; i++) {
68 len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
69 totalen += len;
70 page += len;
71 }
72 len = sprintf(page, "%d Recoveries of \"stolen\" FPU\n",
73 atomic_read(&smtc_fpu_recoveries));
74 totalen += len;
75 page += len;
76
77 return totalen;
78}
79
80void init_smtc_stats(void)
81{
82 int i;
83
84 for (i=0; i<NR_CPUS; i++) {
85 smtc_cpu_stats[i].timerints = 0;
86 smtc_cpu_stats[i].selfipis = 0;
87 }
88
89 atomic_set(&smtc_fpu_recoveries, 0);
90
91 smtc_stats = create_proc_read_entry("smtc", 0444, NULL,
92 proc_read_smtc, NULL);
93}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
new file mode 100644
index 000000000000..2e8e52c135e6
--- /dev/null
+++ b/arch/mips/kernel/smtc.c
@@ -0,0 +1,1322 @@
1/* Copyright (C) 2004 Mips Technologies, Inc */
2
3#include <linux/kernel.h>
4#include <linux/sched.h>
5#include <linux/cpumask.h>
6#include <linux/interrupt.h>
7
8#include <asm/cpu.h>
9#include <asm/processor.h>
10#include <asm/atomic.h>
11#include <asm/system.h>
12#include <asm/hardirq.h>
13#include <asm/hazards.h>
14#include <asm/mmu_context.h>
15#include <asm/smp.h>
16#include <asm/mipsregs.h>
17#include <asm/cacheflush.h>
18#include <asm/time.h>
19#include <asm/addrspace.h>
20#include <asm/smtc.h>
21#include <asm/smtc_ipi.h>
22#include <asm/smtc_proc.h>
23
24/*
25 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
26 */
27
28/*
29 * MIPSCPU_INT_BASE is identically defined in both
30 * asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h,
31 * but as yet there's no properly organized include structure that
32 * will ensure that the right *int.h file will be included for a
33 * given platform build.
34 */
35
36#define MIPSCPU_INT_BASE 16
37
38#define MIPS_CPU_IPI_IRQ 1
39
40#define LOCK_MT_PRA() \
41 local_irq_save(flags); \
42 mtflags = dmt()
43
44#define UNLOCK_MT_PRA() \
45 emt(mtflags); \
46 local_irq_restore(flags)
47
48#define LOCK_CORE_PRA() \
49 local_irq_save(flags); \
50 mtflags = dvpe()
51
52#define UNLOCK_CORE_PRA() \
53 evpe(mtflags); \
54 local_irq_restore(flags)
55
56/*
57 * Data structures purely associated with SMTC parallelism
58 */
59
60
61/*
62 * Table for tracking ASIDs whose lifetime is prolonged.
63 */
64
65asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
66
67/*
68 * Clock interrupt "latch" buffers, per "CPU"
69 */
70
71unsigned int ipi_timer_latch[NR_CPUS];
72
73/*
74 * Number of InterProcessor Interupt (IPI) message buffers to allocate
75 */
76
77#define IPIBUF_PER_CPU 4
78
79struct smtc_ipi_q IPIQ[NR_CPUS];
80struct smtc_ipi_q freeIPIq;
81
82
83/* Forward declarations */
84
85void ipi_decode(struct pt_regs *, struct smtc_ipi *);
86void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
87void setup_cross_vpe_interrupts(void);
88void init_smtc_stats(void);
89
90/* Global SMTC Status */
91
92unsigned int smtc_status = 0;
93
94/* Boot command line configuration overrides */
95
96static int vpelimit = 0;
97static int tclimit = 0;
98static int ipibuffers = 0;
99static int nostlb = 0;
100static int asidmask = 0;
101unsigned long smtc_asid_mask = 0xff;
102
103static int __init maxvpes(char *str)
104{
105 get_option(&str, &vpelimit);
106 return 1;
107}
108
109static int __init maxtcs(char *str)
110{
111 get_option(&str, &tclimit);
112 return 1;
113}
114
115static int __init ipibufs(char *str)
116{
117 get_option(&str, &ipibuffers);
118 return 1;
119}
120
121static int __init stlb_disable(char *s)
122{
123 nostlb = 1;
124 return 1;
125}
126
127static int __init asidmask_set(char *str)
128{
129 get_option(&str, &asidmask);
130 switch(asidmask) {
131 case 0x1:
132 case 0x3:
133 case 0x7:
134 case 0xf:
135 case 0x1f:
136 case 0x3f:
137 case 0x7f:
138 case 0xff:
139 smtc_asid_mask = (unsigned long)asidmask;
140 break;
141 default:
142 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
143 }
144 return 1;
145}
146
147__setup("maxvpes=", maxvpes);
148__setup("maxtcs=", maxtcs);
149__setup("ipibufs=", ipibufs);
150__setup("nostlb", stlb_disable);
151__setup("asidmask=", asidmask_set);
152
153/* Enable additional debug checks before going into CPU idle loop */
154#define SMTC_IDLE_HOOK_DEBUG
155
156#ifdef SMTC_IDLE_HOOK_DEBUG
157
158static int hang_trig = 0;
159
160static int __init hangtrig_enable(char *s)
161{
162 hang_trig = 1;
163 return 1;
164}
165
166
167__setup("hangtrig", hangtrig_enable);
168
169#define DEFAULT_BLOCKED_IPI_LIMIT 32
170
171static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
172
173static int __init tintq(char *str)
174{
175 get_option(&str, &timerq_limit);
176 return 1;
177}
178
179__setup("tintq=", tintq);
180
181int imstuckcount[2][8];
182/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
183int vpemask[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}};
184int tcnoprog[NR_CPUS];
185static atomic_t idle_hook_initialized = {0};
186static int clock_hang_reported[NR_CPUS];
187
188#endif /* SMTC_IDLE_HOOK_DEBUG */
189
190/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
191
192void __init sanitize_tlb_entries(void)
193{
194 printk("Deprecated sanitize_tlb_entries() invoked\n");
195}
196
197
198/*
199 * Configure shared TLB - VPC configuration bit must be set by caller
200 */
201
202void smtc_configure_tlb(void)
203{
204 int i,tlbsiz,vpes;
205 unsigned long mvpconf0;
206 unsigned long config1val;
207
208 /* Set up ASID preservation table */
209 for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
210 for(i = 0; i < MAX_SMTC_ASIDS; i++) {
211 smtc_live_asid[vpes][i] = 0;
212 }
213 }
214 mvpconf0 = read_c0_mvpconf0();
215
216 if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
217 >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
218 /* If we have multiple VPEs, try to share the TLB */
219 if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
220 /*
221 * If TLB sizing is programmable, shared TLB
222 * size is the total available complement.
223 * Otherwise, we have to take the sum of all
224 * static VPE TLB entries.
225 */
226 if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
227 >> MVPCONF0_PTLBE_SHIFT)) == 0) {
228 /*
229 * If there's more than one VPE, there had better
230 * be more than one TC, because we need one to bind
231 * to each VPE in turn to be able to read
232 * its configuration state!
233 */
234 settc(1);
235 /* Stop the TC from doing anything foolish */
236 write_tc_c0_tchalt(TCHALT_H);
237 mips_ihb();
238 /* No need to un-Halt - that happens later anyway */
239 for (i=0; i < vpes; i++) {
240 write_tc_c0_tcbind(i);
241 /*
242 * To be 100% sure we're really getting the right
243 * information, we exit the configuration state
244 * and do an IHB after each rebinding.
245 */
246 write_c0_mvpcontrol(
247 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
248 mips_ihb();
249 /*
250 * Only count if the MMU Type indicated is TLB
251 */
252 if(((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
253 config1val = read_vpe_c0_config1();
254 tlbsiz += ((config1val >> 25) & 0x3f) + 1;
255 }
256
257 /* Put core back in configuration state */
258 write_c0_mvpcontrol(
259 read_c0_mvpcontrol() | MVPCONTROL_VPC );
260 mips_ihb();
261 }
262 }
263 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
264
265 /*
266 * Setup kernel data structures to use software total,
267 * rather than read the per-VPE Config1 value. The values
268 * for "CPU 0" gets copied to all the other CPUs as part
269 * of their initialization in smtc_cpu_setup().
270 */
271
272 tlbsiz = tlbsiz & 0x3f; /* MIPS32 limits TLB indices to 64 */
273 cpu_data[0].tlbsize = tlbsiz;
274 smtc_status |= SMTC_TLB_SHARED;
275
276 printk("TLB of %d entry pairs shared by %d VPEs\n",
277 tlbsiz, vpes);
278 } else {
279 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
280 }
281 }
282}
283
284
285/*
286 * Incrementally build the CPU map out of constituent MIPS MT cores,
287 * using the specified available VPEs and TCs. Plaform code needs
288 * to ensure that each MIPS MT core invokes this routine on reset,
289 * one at a time(!).
290 *
291 * This version of the build_cpu_map and prepare_cpus routines assumes
292 * that *all* TCs of a MIPS MT core will be used for Linux, and that
293 * they will be spread across *all* available VPEs (to minimise the
294 * loss of efficiency due to exception service serialization).
295 * An improved version would pick up configuration information and
296 * possibly leave some TCs/VPEs as "slave" processors.
297 *
298 * Use c0_MVPConf0 to find out how many TCs are available, setting up
299 * phys_cpu_present_map and the logical/physical mappings.
300 */
301
302int __init mipsmt_build_cpu_map(int start_cpu_slot)
303{
304 int i, ntcs;
305
306 /*
307 * The CPU map isn't actually used for anything at this point,
308 * so it's not clear what else we should do apart from set
309 * everything up so that "logical" = "physical".
310 */
311 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
312 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
313 cpu_set(i, phys_cpu_present_map);
314 __cpu_number_map[i] = i;
315 __cpu_logical_map[i] = i;
316 }
317 /* Initialize map of CPUs with FPUs */
318 cpus_clear(mt_fpu_cpumask);
319
320 /* One of those TC's is the one booting, and not a secondary... */
321 printk("%i available secondary CPU TC(s)\n", i - 1);
322
323 return i;
324}
325
326/*
327 * Common setup before any secondaries are started
328 * Make sure all CPU's are in a sensible state before we boot any of the
329 * secondaries.
330 *
331 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
332 * as possible across the available VPEs.
333 */
334
335static void smtc_tc_setup(int vpe, int tc, int cpu)
336{
337 settc(tc);
338 write_tc_c0_tchalt(TCHALT_H);
339 mips_ihb();
340 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
341 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
342 | TCSTATUS_A);
343 write_tc_c0_tccontext(0);
344 /* Bind tc to vpe */
345 write_tc_c0_tcbind(vpe);
346 /* In general, all TCs should have the same cpu_data indications */
347 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
348 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
349 if (cpu_data[0].cputype == CPU_34K)
350 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
351 cpu_data[cpu].vpe_id = vpe;
352 cpu_data[cpu].tc_id = tc;
353}
354
355
356void mipsmt_prepare_cpus(void)
357{
358 int i, vpe, tc, ntc, nvpe, tcpervpe, slop, cpu;
359 unsigned long flags;
360 unsigned long val;
361 int nipi;
362 struct smtc_ipi *pipi;
363
364 /* disable interrupts so we can disable MT */
365 local_irq_save(flags);
366 /* disable MT so we can configure */
367 dvpe();
368 dmt();
369
370 freeIPIq.lock = SPIN_LOCK_UNLOCKED;
371
372 /*
373 * We probably don't have as many VPEs as we do SMP "CPUs",
374 * but it's possible - and in any case we'll never use more!
375 */
376 for (i=0; i<NR_CPUS; i++) {
377 IPIQ[i].head = IPIQ[i].tail = NULL;
378 IPIQ[i].lock = SPIN_LOCK_UNLOCKED;
379 IPIQ[i].depth = 0;
380 ipi_timer_latch[i] = 0;
381 }
382
383 /* cpu_data index starts at zero */
384 cpu = 0;
385 cpu_data[cpu].vpe_id = 0;
386 cpu_data[cpu].tc_id = 0;
387 cpu++;
388
389 /* Report on boot-time options */
390 mips_mt_set_cpuoptions ();
391 if (vpelimit > 0)
392 printk("Limit of %d VPEs set\n", vpelimit);
393 if (tclimit > 0)
394 printk("Limit of %d TCs set\n", tclimit);
395 if (nostlb) {
396 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
397 }
398 if (asidmask)
399 printk("ASID mask value override to 0x%x\n", asidmask);
400
401 /* Temporary */
402#ifdef SMTC_IDLE_HOOK_DEBUG
403 if (hang_trig)
404 printk("Logic Analyser Trigger on suspected TC hang\n");
405#endif /* SMTC_IDLE_HOOK_DEBUG */
406
407 /* Put MVPE's into 'configuration state' */
408 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
409
410 val = read_c0_mvpconf0();
411 nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
412 if (vpelimit > 0 && nvpe > vpelimit)
413 nvpe = vpelimit;
414 ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
415 if (ntc > NR_CPUS)
416 ntc = NR_CPUS;
417 if (tclimit > 0 && ntc > tclimit)
418 ntc = tclimit;
419 tcpervpe = ntc / nvpe;
420 slop = ntc % nvpe; /* Residual TCs, < NVPE */
421
422 /* Set up shared TLB */
423 smtc_configure_tlb();
424
425 for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
426 /*
427 * Set the MVP bits.
428 */
429 settc(tc);
430 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
431 if (vpe != 0)
432 printk(", ");
433 printk("VPE %d: TC", vpe);
434 for (i = 0; i < tcpervpe; i++) {
435 /*
436 * TC 0 is bound to VPE 0 at reset,
437 * and is presumably executing this
438 * code. Leave it alone!
439 */
440 if (tc != 0) {
441 smtc_tc_setup(vpe,tc, cpu);
442 cpu++;
443 }
444 printk(" %d", tc);
445 tc++;
446 }
447 if (slop) {
448 if (tc != 0) {
449 smtc_tc_setup(vpe,tc, cpu);
450 cpu++;
451 }
452 printk(" %d", tc);
453 tc++;
454 slop--;
455 }
456 if (vpe != 0) {
457 /*
458 * Clear any stale software interrupts from VPE's Cause
459 */
460 write_vpe_c0_cause(0);
461
462 /*
463 * Clear ERL/EXL of VPEs other than 0
464 * and set restricted interrupt enable/mask.
465 */
466 write_vpe_c0_status((read_vpe_c0_status()
467 & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
468 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
469 | ST0_IE));
470 /*
471 * set config to be the same as vpe0,
472 * particularly kseg0 coherency alg
473 */
474 write_vpe_c0_config(read_c0_config());
475 /* Clear any pending timer interrupt */
476 write_vpe_c0_compare(0);
477 /* Propagate Config7 */
478 write_vpe_c0_config7(read_c0_config7());
479 }
480 /* enable multi-threading within VPE */
481 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
482 /* enable the VPE */
483 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
484 }
485
486 /*
487 * Pull any physically present but unused TCs out of circulation.
488 */
489 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
490 cpu_clear(tc, phys_cpu_present_map);
491 cpu_clear(tc, cpu_present_map);
492 tc++;
493 }
494
495 /* release config state */
496 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
497
498 printk("\n");
499
500 /* Set up coprocessor affinity CPU mask(s) */
501
502 for (tc = 0; tc < ntc; tc++) {
503 if(cpu_data[tc].options & MIPS_CPU_FPU)
504 cpu_set(tc, mt_fpu_cpumask);
505 }
506
507 /* set up ipi interrupts... */
508
509 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
510
511 if (nvpe > 1)
512 setup_cross_vpe_interrupts();
513
514 /* Set up queue of free IPI "messages". */
515 nipi = NR_CPUS * IPIBUF_PER_CPU;
516 if (ipibuffers > 0)
517 nipi = ipibuffers;
518
519 pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
520 if (pipi == NULL)
521 panic("kmalloc of IPI message buffers failed\n");
522 else
523 printk("IPI buffer pool of %d buffers\n", nipi);
524 for (i = 0; i < nipi; i++) {
525 smtc_ipi_nq(&freeIPIq, pipi);
526 pipi++;
527 }
528
529 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
530 emt(EMT_ENABLE);
531 evpe(EVPE_ENABLE);
532 local_irq_restore(flags);
533 /* Initialize SMTC /proc statistics/diagnostics */
534 init_smtc_stats();
535}
536
537
538/*
539 * Setup the PC, SP, and GP of a secondary processor and start it
540 * running!
541 * smp_bootstrap is the place to resume from
542 * __KSTK_TOS(idle) is apparently the stack pointer
543 * (unsigned long)idle->thread_info the gp
544 *
545 */
546void smtc_boot_secondary(int cpu, struct task_struct *idle)
547{
548 extern u32 kernelsp[NR_CPUS];
549 long flags;
550 int mtflags;
551
552 LOCK_MT_PRA();
553 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
554 dvpe();
555 }
556 settc(cpu_data[cpu].tc_id);
557
558 /* pc */
559 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
560
561 /* stack pointer */
562 kernelsp[cpu] = __KSTK_TOS(idle);
563 write_tc_gpr_sp(__KSTK_TOS(idle));
564
565 /* global pointer */
566 write_tc_gpr_gp((unsigned long)idle->thread_info);
567
568 smtc_status |= SMTC_MTC_ACTIVE;
569 write_tc_c0_tchalt(0);
570 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
571 evpe(EVPE_ENABLE);
572 }
573 UNLOCK_MT_PRA();
574}
575
576void smtc_init_secondary(void)
577{
578 /*
579 * Start timer on secondary VPEs if necessary.
580 * mips_timer_setup should already have been invoked by init/main
581 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
582 * SMTC init code assigns TCs consdecutively and in ascending order
583 * to across available VPEs.
584 */
585 if(((read_c0_tcbind() & TCBIND_CURTC) != 0)
586 && ((read_c0_tcbind() & TCBIND_CURVPE)
587 != cpu_data[smp_processor_id() - 1].vpe_id)){
588 write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
589 }
590
591 local_irq_enable();
592}
593
594void smtc_smp_finish(void)
595{
596 printk("TC %d going on-line as CPU %d\n",
597 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
598}
599
600void smtc_cpus_done(void)
601{
602}
603
604/*
605 * Support for SMTC-optimized driver IRQ registration
606 */
607
608/*
609 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
610 * in do_IRQ. These are passed in setup_irq_smtc() and stored
611 * in this table.
612 */
613
614int setup_irq_smtc(unsigned int irq, struct irqaction * new,
615 unsigned long hwmask)
616{
617 irq_hwmask[irq] = hwmask;
618
619 return setup_irq(irq, new);
620}
621
622/*
623 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
624 * Within a VPE one TC can interrupt another by different approaches.
625 * The easiest to get right would probably be to make all TCs except
626 * the target IXMT and set a software interrupt, but an IXMT-based
627 * scheme requires that a handler must run before a new IPI could
628 * be sent, which would break the "broadcast" loops in MIPS MT.
629 * A more gonzo approach within a VPE is to halt the TC, extract
630 * its Restart, Status, and a couple of GPRs, and program the Restart
631 * address to emulate an interrupt.
632 *
633 * Within a VPE, one can be confident that the target TC isn't in
634 * a critical EXL state when halted, since the write to the Halt
635 * register could not have issued on the writing thread if the
636 * halting thread had EXL set. So k0 and k1 of the target TC
637 * can be used by the injection code. Across VPEs, one can't
638 * be certain that the target TC isn't in a critical exception
639 * state. So we try a two-step process of sending a software
640 * interrupt to the target VPE, which either handles the event
641 * itself (if it was the target) or injects the event within
642 * the VPE.
643 */
644
645void smtc_ipi_qdump(void)
646{
647 int i;
648
649 for (i = 0; i < NR_CPUS ;i++) {
650 printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
651 i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
652 IPIQ[i].depth);
653 }
654}
655
656/*
657 * The standard atomic.h primitives don't quite do what we want
658 * here: We need an atomic add-and-return-previous-value (which
659 * could be done with atomic_add_return and a decrement) and an
660 * atomic set/zero-and-return-previous-value (which can't really
661 * be done with the atomic.h primitives). And since this is
662 * MIPS MT, we can assume that we have LL/SC.
663 */
664static __inline__ int atomic_postincrement(unsigned int *pv)
665{
666 unsigned long result;
667
668 unsigned long temp;
669
670 __asm__ __volatile__(
671 "1: ll %0, %2 \n"
672 " addu %1, %0, 1 \n"
673 " sc %1, %2 \n"
674 " beqz %1, 1b \n"
675 " sync \n"
676 : "=&r" (result), "=&r" (temp), "=m" (*pv)
677 : "m" (*pv)
678 : "memory");
679
680 return result;
681}
682
683/* No longer used in IPI dispatch, but retained for future recycling */
684
685static __inline__ int atomic_postclear(unsigned int *pv)
686{
687 unsigned long result;
688
689 unsigned long temp;
690
691 __asm__ __volatile__(
692 "1: ll %0, %2 \n"
693 " or %1, $0, $0 \n"
694 " sc %1, %2 \n"
695 " beqz %1, 1b \n"
696 " sync \n"
697 : "=&r" (result), "=&r" (temp), "=m" (*pv)
698 : "m" (*pv)
699 : "memory");
700
701 return result;
702}
703
704
705void smtc_send_ipi(int cpu, int type, unsigned int action)
706{
707 int tcstatus;
708 struct smtc_ipi *pipi;
709 long flags;
710 int mtflags;
711
712 if (cpu == smp_processor_id()) {
713 printk("Cannot Send IPI to self!\n");
714 return;
715 }
716 /* Set up a descriptor, to be delivered either promptly or queued */
717 pipi = smtc_ipi_dq(&freeIPIq);
718 if (pipi == NULL) {
719 bust_spinlocks(1);
720 mips_mt_regdump(dvpe());
721 panic("IPI Msg. Buffers Depleted\n");
722 }
723 pipi->type = type;
724 pipi->arg = (void *)action;
725 pipi->dest = cpu;
726 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
727 /* If not on same VPE, enqueue and send cross-VPE interupt */
728 smtc_ipi_nq(&IPIQ[cpu], pipi);
729 LOCK_CORE_PRA();
730 settc(cpu_data[cpu].tc_id);
731 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
732 UNLOCK_CORE_PRA();
733 } else {
734 /*
735 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
736 * since ASID shootdown on the other VPE may
737 * collide with this operation.
738 */
739 LOCK_CORE_PRA();
740 settc(cpu_data[cpu].tc_id);
741 /* Halt the targeted TC */
742 write_tc_c0_tchalt(TCHALT_H);
743 mips_ihb();
744
745 /*
746 * Inspect TCStatus - if IXMT is set, we have to queue
747 * a message. Otherwise, we set up the "interrupt"
748 * of the other TC
749 */
750 tcstatus = read_tc_c0_tcstatus();
751
752 if ((tcstatus & TCSTATUS_IXMT) != 0) {
753 /*
754 * Spin-waiting here can deadlock,
755 * so we queue the message for the target TC.
756 */
757 write_tc_c0_tchalt(0);
758 UNLOCK_CORE_PRA();
759 /* Try to reduce redundant timer interrupt messages */
760 if(type == SMTC_CLOCK_TICK) {
761 if(atomic_postincrement(&ipi_timer_latch[cpu])!=0) {
762 smtc_ipi_nq(&freeIPIq, pipi);
763 return;
764 }
765 }
766 smtc_ipi_nq(&IPIQ[cpu], pipi);
767 } else {
768 post_direct_ipi(cpu, pipi);
769 write_tc_c0_tchalt(0);
770 UNLOCK_CORE_PRA();
771 }
772 }
773}
774
775/*
776 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
777 */
778void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
779{
780 struct pt_regs *kstack;
781 unsigned long tcstatus;
782 unsigned long tcrestart;
783 extern u32 kernelsp[NR_CPUS];
784 extern void __smtc_ipi_vector(void);
785
786 /* Extract Status, EPC from halted TC */
787 tcstatus = read_tc_c0_tcstatus();
788 tcrestart = read_tc_c0_tcrestart();
789 /* If TCRestart indicates a WAIT instruction, advance the PC */
790 if ((tcrestart & 0x80000000)
791 && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
792 tcrestart += 4;
793 }
794 /*
795 * Save on TC's future kernel stack
796 *
797 * CU bit of Status is indicator that TC was
798 * already running on a kernel stack...
799 */
800 if(tcstatus & ST0_CU0) {
801 /* Note that this "- 1" is pointer arithmetic */
802 kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
803 } else {
804 kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
805 }
806
807 kstack->cp0_epc = (long)tcrestart;
808 /* Save TCStatus */
809 kstack->cp0_tcstatus = tcstatus;
810 /* Pass token of operation to be performed kernel stack pad area */
811 kstack->pad0[4] = (unsigned long)pipi;
812 /* Pass address of function to be called likewise */
813 kstack->pad0[5] = (unsigned long)&ipi_decode;
814 /* Set interrupt exempt and kernel mode */
815 tcstatus |= TCSTATUS_IXMT;
816 tcstatus &= ~TCSTATUS_TKSU;
817 write_tc_c0_tcstatus(tcstatus);
818 ehb();
819 /* Set TC Restart address to be SMTC IPI vector */
820 write_tc_c0_tcrestart(__smtc_ipi_vector);
821}
822
823void ipi_resched_interrupt(struct pt_regs *regs)
824{
825 /* Return from interrupt should be enough to cause scheduler check */
826}
827
828
829void ipi_call_interrupt(struct pt_regs *regs)
830{
831 /* Invoke generic function invocation code in smp.c */
832 smp_call_function_interrupt();
833}
834
835void ipi_decode(struct pt_regs *regs, struct smtc_ipi *pipi)
836{
837 void *arg_copy = pipi->arg;
838 int type_copy = pipi->type;
839 int dest_copy = pipi->dest;
840
841 smtc_ipi_nq(&freeIPIq, pipi);
842 switch (type_copy) {
843 case SMTC_CLOCK_TICK:
844 /* Invoke Clock "Interrupt" */
845 ipi_timer_latch[dest_copy] = 0;
846#ifdef SMTC_IDLE_HOOK_DEBUG
847 clock_hang_reported[dest_copy] = 0;
848#endif /* SMTC_IDLE_HOOK_DEBUG */
849 local_timer_interrupt(0, NULL, regs);
850 break;
851 case LINUX_SMP_IPI:
852 switch ((int)arg_copy) {
853 case SMP_RESCHEDULE_YOURSELF:
854 ipi_resched_interrupt(regs);
855 break;
856 case SMP_CALL_FUNCTION:
857 ipi_call_interrupt(regs);
858 break;
859 default:
860 printk("Impossible SMTC IPI Argument 0x%x\n",
861 (int)arg_copy);
862 break;
863 }
864 break;
865 default:
866 printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
867 break;
868 }
869}
870
871void deferred_smtc_ipi(struct pt_regs *regs)
872{
873 struct smtc_ipi *pipi;
874 unsigned long flags;
875/* DEBUG */
876 int q = smp_processor_id();
877
878 /*
879 * Test is not atomic, but much faster than a dequeue,
880 * and the vast majority of invocations will have a null queue.
881 */
882 if(IPIQ[q].head != NULL) {
883 while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
884 /* ipi_decode() should be called with interrupts off */
885 local_irq_save(flags);
886 ipi_decode(regs, pipi);
887 local_irq_restore(flags);
888 }
889 }
890}
891
892/*
893 * Send clock tick to all TCs except the one executing the funtion
894 */
895
896void smtc_timer_broadcast(int vpe)
897{
898 int cpu;
899 int myTC = cpu_data[smp_processor_id()].tc_id;
900 int myVPE = cpu_data[smp_processor_id()].vpe_id;
901
902 smtc_cpu_stats[smp_processor_id()].timerints++;
903
904 for_each_online_cpu(cpu) {
905 if (cpu_data[cpu].vpe_id == myVPE &&
906 cpu_data[cpu].tc_id != myTC)
907 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
908 }
909}
910
911/*
912 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
913 * set via cross-VPE MTTR manipulation of the Cause register. It would be
914 * in some regards preferable to have external logic for "doorbell" hardware
915 * interrupts.
916 */
917
918static int cpu_ipi_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_IRQ;
919
920static irqreturn_t ipi_interrupt(int irq, void *dev_idm, struct pt_regs *regs)
921{
922 int my_vpe = cpu_data[smp_processor_id()].vpe_id;
923 int my_tc = cpu_data[smp_processor_id()].tc_id;
924 int cpu;
925 struct smtc_ipi *pipi;
926 unsigned long tcstatus;
927 int sent;
928 long flags;
929 unsigned int mtflags;
930 unsigned int vpflags;
931
932 /*
933 * So long as cross-VPE interrupts are done via
934 * MFTR/MTTR read-modify-writes of Cause, we need
935 * to stop other VPEs whenever the local VPE does
936 * anything similar.
937 */
938 local_irq_save(flags);
939 vpflags = dvpe();
940 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
941 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
942 irq_enable_hazard();
943 evpe(vpflags);
944 local_irq_restore(flags);
945
946 /*
947 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
948 * queued for TCs on this VPE other than the current one.
949 * Return-from-interrupt should cause us to drain the queue
950 * for the current TC, so we ought not to have to do it explicitly here.
951 */
952
953 for_each_online_cpu(cpu) {
954 if (cpu_data[cpu].vpe_id != my_vpe)
955 continue;
956
957 pipi = smtc_ipi_dq(&IPIQ[cpu]);
958 if (pipi != NULL) {
959 if (cpu_data[cpu].tc_id != my_tc) {
960 sent = 0;
961 LOCK_MT_PRA();
962 settc(cpu_data[cpu].tc_id);
963 write_tc_c0_tchalt(TCHALT_H);
964 mips_ihb();
965 tcstatus = read_tc_c0_tcstatus();
966 if ((tcstatus & TCSTATUS_IXMT) == 0) {
967 post_direct_ipi(cpu, pipi);
968 sent = 1;
969 }
970 write_tc_c0_tchalt(0);
971 UNLOCK_MT_PRA();
972 if (!sent) {
973 smtc_ipi_req(&IPIQ[cpu], pipi);
974 }
975 } else {
976 /*
977 * ipi_decode() should be called
978 * with interrupts off
979 */
980 local_irq_save(flags);
981 ipi_decode(regs, pipi);
982 local_irq_restore(flags);
983 }
984 }
985 }
986
987 return IRQ_HANDLED;
988}
989
990static void ipi_irq_dispatch(struct pt_regs *regs)
991{
992 do_IRQ(cpu_ipi_irq, regs);
993}
994
995static struct irqaction irq_ipi;
996
997void setup_cross_vpe_interrupts(void)
998{
999 if (!cpu_has_vint)
1000 panic("SMTC Kernel requires Vectored Interupt support");
1001
1002 set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
1003
1004 irq_ipi.handler = ipi_interrupt;
1005 irq_ipi.flags = SA_INTERRUPT;
1006 irq_ipi.name = "SMTC_IPI";
1007
1008 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1009
1010 irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU;
1011}
1012
1013/*
1014 * SMTC-specific hacks invoked from elsewhere in the kernel.
1015 */
1016
1017void smtc_idle_loop_hook(void)
1018{
1019#ifdef SMTC_IDLE_HOOK_DEBUG
1020 int im;
1021 int flags;
1022 int mtflags;
1023 int bit;
1024 int vpe;
1025 int tc;
1026 int hook_ntcs;
1027 /*
1028 * printk within DMT-protected regions can deadlock,
1029 * so buffer diagnostic messages for later output.
1030 */
1031 char *pdb_msg;
1032 char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
1033
1034 if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
1035 if (atomic_add_return(1, &idle_hook_initialized) == 1) {
1036 int mvpconf0;
1037 /* Tedious stuff to just do once */
1038 mvpconf0 = read_c0_mvpconf0();
1039 hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
1040 if (hook_ntcs > NR_CPUS)
1041 hook_ntcs = NR_CPUS;
1042 for (tc = 0; tc < hook_ntcs; tc++) {
1043 tcnoprog[tc] = 0;
1044 clock_hang_reported[tc] = 0;
1045 }
1046 for (vpe = 0; vpe < 2; vpe++)
1047 for (im = 0; im < 8; im++)
1048 imstuckcount[vpe][im] = 0;
1049 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
1050 atomic_set(&idle_hook_initialized, 1000);
1051 } else {
1052 /* Someone else is initializing in parallel - let 'em finish */
1053 while (atomic_read(&idle_hook_initialized) < 1000)
1054 ;
1055 }
1056 }
1057
1058 /* Have we stupidly left IXMT set somewhere? */
1059 if (read_c0_tcstatus() & 0x400) {
1060 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1061 ehb();
1062 printk("Dangling IXMT in cpu_idle()\n");
1063 }
1064
1065 /* Have we stupidly left an IM bit turned off? */
1066#define IM_LIMIT 2000
1067 local_irq_save(flags);
1068 mtflags = dmt();
1069 pdb_msg = &id_ho_db_msg[0];
1070 im = read_c0_status();
1071 vpe = cpu_data[smp_processor_id()].vpe_id;
1072 for (bit = 0; bit < 8; bit++) {
1073 /*
1074 * In current prototype, I/O interrupts
1075 * are masked for VPE > 0
1076 */
1077 if (vpemask[vpe][bit]) {
1078 if (!(im & (0x100 << bit)))
1079 imstuckcount[vpe][bit]++;
1080 else
1081 imstuckcount[vpe][bit] = 0;
1082 if (imstuckcount[vpe][bit] > IM_LIMIT) {
1083 set_c0_status(0x100 << bit);
1084 ehb();
1085 imstuckcount[vpe][bit] = 0;
1086 pdb_msg += sprintf(pdb_msg,
1087 "Dangling IM %d fixed for VPE %d\n", bit,
1088 vpe);
1089 }
1090 }
1091 }
1092
1093 /*
1094 * Now that we limit outstanding timer IPIs, check for hung TC
1095 */
1096 for (tc = 0; tc < NR_CPUS; tc++) {
1097 /* Don't check ourself - we'll dequeue IPIs just below */
1098 if ((tc != smp_processor_id()) &&
1099 ipi_timer_latch[tc] > timerq_limit) {
1100 if (clock_hang_reported[tc] == 0) {
1101 pdb_msg += sprintf(pdb_msg,
1102 "TC %d looks hung with timer latch at %d\n",
1103 tc, ipi_timer_latch[tc]);
1104 clock_hang_reported[tc]++;
1105 }
1106 }
1107 }
1108 emt(mtflags);
1109 local_irq_restore(flags);
1110 if (pdb_msg != &id_ho_db_msg[0])
1111 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
1112#endif /* SMTC_IDLE_HOOK_DEBUG */
1113 /*
1114 * To the extent that we've ever turned interrupts off,
1115 * we may have accumulated deferred IPIs. This is subtle.
1116 * If we use the smtc_ipi_qdepth() macro, we'll get an
1117 * exact number - but we'll also disable interrupts
1118 * and create a window of failure where a new IPI gets
1119 * queued after we test the depth but before we re-enable
1120 * interrupts. So long as IXMT never gets set, however,
1121 * we should be OK: If we pick up something and dispatch
1122 * it here, that's great. If we see nothing, but concurrent
1123 * with this operation, another TC sends us an IPI, IXMT
1124 * is clear, and we'll handle it as a real pseudo-interrupt
1125 * and not a pseudo-pseudo interrupt.
1126 */
1127 if (IPIQ[smp_processor_id()].depth > 0) {
1128 struct smtc_ipi *pipi;
1129 extern void self_ipi(struct smtc_ipi *);
1130
1131 if ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()])) != NULL) {
1132 self_ipi(pipi);
1133 smtc_cpu_stats[smp_processor_id()].selfipis++;
1134 }
1135 }
1136}
1137
1138void smtc_soft_dump(void)
1139{
1140 int i;
1141
1142 printk("Counter Interrupts taken per CPU (TC)\n");
1143 for (i=0; i < NR_CPUS; i++) {
1144 printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
1145 }
1146 printk("Self-IPI invocations:\n");
1147 for (i=0; i < NR_CPUS; i++) {
1148 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1149 }
1150 smtc_ipi_qdump();
1151 printk("Timer IPI Backlogs:\n");
1152 for (i=0; i < NR_CPUS; i++) {
1153 printk("%d: %d\n", i, ipi_timer_latch[i]);
1154 }
1155 printk("%d Recoveries of \"stolen\" FPU\n",
1156 atomic_read(&smtc_fpu_recoveries));
1157}
1158
1159
1160/*
1161 * TLB management routines special to SMTC
1162 */
1163
1164void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1165{
1166 unsigned long flags, mtflags, tcstat, prevhalt, asid;
1167 int tlb, i;
1168
1169 /*
1170 * It would be nice to be able to use a spinlock here,
1171 * but this is invoked from within TLB flush routines
1172 * that protect themselves with DVPE, so if a lock is
1173 * held by another TC, it'll never be freed.
1174 *
1175 * DVPE/DMT must not be done with interrupts enabled,
1176 * so even so most callers will already have disabled
1177 * them, let's be really careful...
1178 */
1179
1180 local_irq_save(flags);
1181 if (smtc_status & SMTC_TLB_SHARED) {
1182 mtflags = dvpe();
1183 tlb = 0;
1184 } else {
1185 mtflags = dmt();
1186 tlb = cpu_data[cpu].vpe_id;
1187 }
1188 asid = asid_cache(cpu);
1189
1190 do {
1191 if (!((asid += ASID_INC) & ASID_MASK) ) {
1192 if (cpu_has_vtag_icache)
1193 flush_icache_all();
1194 /* Traverse all online CPUs (hack requires contigous range) */
1195 for (i = 0; i < num_online_cpus(); i++) {
1196 /*
1197 * We don't need to worry about our own CPU, nor those of
1198 * CPUs who don't share our TLB.
1199 */
1200 if ((i != smp_processor_id()) &&
1201 ((smtc_status & SMTC_TLB_SHARED) ||
1202 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
1203 settc(cpu_data[i].tc_id);
1204 prevhalt = read_tc_c0_tchalt() & TCHALT_H;
1205 if (!prevhalt) {
1206 write_tc_c0_tchalt(TCHALT_H);
1207 mips_ihb();
1208 }
1209 tcstat = read_tc_c0_tcstatus();
1210 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1211 if (!prevhalt)
1212 write_tc_c0_tchalt(0);
1213 }
1214 }
1215 if (!asid) /* fix version if needed */
1216 asid = ASID_FIRST_VERSION;
1217 local_flush_tlb_all(); /* start new asid cycle */
1218 }
1219 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1220
1221 /*
1222 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1223 */
1224 for (i = 0; i < num_online_cpus(); i++) {
1225 if ((smtc_status & SMTC_TLB_SHARED) ||
1226 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1227 cpu_context(i, mm) = asid_cache(i) = asid;
1228 }
1229
1230 if (smtc_status & SMTC_TLB_SHARED)
1231 evpe(mtflags);
1232 else
1233 emt(mtflags);
1234 local_irq_restore(flags);
1235}
1236
1237/*
1238 * Invoked from macros defined in mmu_context.h
1239 * which must already have disabled interrupts
1240 * and done a DVPE or DMT as appropriate.
1241 */
1242
1243void smtc_flush_tlb_asid(unsigned long asid)
1244{
1245 int entry;
1246 unsigned long ehi;
1247
1248 entry = read_c0_wired();
1249
1250 /* Traverse all non-wired entries */
1251 while (entry < current_cpu_data.tlbsize) {
1252 write_c0_index(entry);
1253 ehb();
1254 tlb_read();
1255 ehb();
1256 ehi = read_c0_entryhi();
1257 if((ehi & ASID_MASK) == asid) {
1258 /*
1259 * Invalidate only entries with specified ASID,
1260 * makiing sure all entries differ.
1261 */
1262 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
1263 write_c0_entrylo0(0);
1264 write_c0_entrylo1(0);
1265 mtc0_tlbw_hazard();
1266 tlb_write_indexed();
1267 }
1268 entry++;
1269 }
1270 write_c0_index(PARKED_INDEX);
1271 tlbw_use_hazard();
1272}
1273
1274/*
1275 * Support for single-threading cache flush operations.
1276 */
1277
1278int halt_state_save[NR_CPUS];
1279
1280/*
1281 * To really, really be sure that nothing is being done
1282 * by other TCs, halt them all. This code assumes that
1283 * a DVPE has already been done, so while their Halted
1284 * state is theoretically architecturally unstable, in
1285 * practice, it's not going to change while we're looking
1286 * at it.
1287 */
1288
1289void smtc_cflush_lockdown(void)
1290{
1291 int cpu;
1292
1293 for_each_online_cpu(cpu) {
1294 if (cpu != smp_processor_id()) {
1295 settc(cpu_data[cpu].tc_id);
1296 halt_state_save[cpu] = read_tc_c0_tchalt();
1297 write_tc_c0_tchalt(TCHALT_H);
1298 }
1299 }
1300 mips_ihb();
1301}
1302
1303/* It would be cheating to change the cpu_online states during a flush! */
1304
1305void smtc_cflush_release(void)
1306{
1307 int cpu;
1308
1309 /*
1310 * Start with a hazard barrier to ensure
1311 * that all CACHE ops have played through.
1312 */
1313 mips_ihb();
1314
1315 for_each_online_cpu(cpu) {
1316 if (cpu != smp_processor_id()) {
1317 settc(cpu_data[cpu].tc_id);
1318 write_tc_c0_tchalt(halt_state_save[cpu]);
1319 }
1320 }
1321 mips_ihb();
1322}
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 5e51a2d8f3f0..13ff4da598cd 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -116,8 +116,7 @@ static void c0_timer_ack(void)
116 write_c0_compare(expirelo); 116 write_c0_compare(expirelo);
117 117
118 /* Check to see if we have missed any timer interrupts. */ 118 /* Check to see if we have missed any timer interrupts. */
119 count = read_c0_count(); 119 while (((count = read_c0_count()) - expirelo) < 0x7fffffff) {
120 if ((count - expirelo) < 0x7fffffff) {
121 /* missed_timer_count++; */ 120 /* missed_timer_count++; */
122 expirelo = count + cycles_per_jiffy; 121 expirelo = count + cycles_per_jiffy;
123 write_c0_compare(expirelo); 122 write_c0_compare(expirelo);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index bed0eb6cf55d..4901f0a37fca 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -42,6 +42,7 @@
42#include <asm/watch.h> 42#include <asm/watch.h>
43#include <asm/types.h> 43#include <asm/types.h>
44 44
45extern asmlinkage void handle_int(void);
45extern asmlinkage void handle_tlbm(void); 46extern asmlinkage void handle_tlbm(void);
46extern asmlinkage void handle_tlbl(void); 47extern asmlinkage void handle_tlbl(void);
47extern asmlinkage void handle_tlbs(void); 48extern asmlinkage void handle_tlbs(void);
@@ -279,9 +280,16 @@ static DEFINE_SPINLOCK(die_lock);
279NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) 280NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
280{ 281{
281 static int die_counter; 282 static int die_counter;
283#ifdef CONFIG_MIPS_MT_SMTC
284 unsigned long dvpret = dvpe();
285#endif /* CONFIG_MIPS_MT_SMTC */
282 286
283 console_verbose(); 287 console_verbose();
284 spin_lock_irq(&die_lock); 288 spin_lock_irq(&die_lock);
289 bust_spinlocks(1);
290#ifdef CONFIG_MIPS_MT_SMTC
291 mips_mt_regdump(dvpret);
292#endif /* CONFIG_MIPS_MT_SMTC */
285 printk("%s[#%d]:\n", str, ++die_counter); 293 printk("%s[#%d]:\n", str, ++die_counter);
286 show_registers(regs); 294 show_registers(regs);
287 spin_unlock_irq(&die_lock); 295 spin_unlock_irq(&die_lock);
@@ -750,12 +758,43 @@ asmlinkage void do_cpu(struct pt_regs *regs)
750 &current->thread.fpu.soft); 758 &current->thread.fpu.soft);
751 if (sig) 759 if (sig)
752 force_sig(sig, current); 760 force_sig(sig, current);
761#ifdef CONFIG_MIPS_MT_FPAFF
762 else {
763 /*
764 * MIPS MT processors may have fewer FPU contexts
765 * than CPU threads. If we've emulated more than
766 * some threshold number of instructions, force
767 * migration to a "CPU" that has FP support.
768 */
769 if(mt_fpemul_threshold > 0
770 && ((current->thread.emulated_fp++
771 > mt_fpemul_threshold))) {
772 /*
773 * If there's no FPU present, or if the
774 * application has already restricted
775 * the allowed set to exclude any CPUs
776 * with FPUs, we'll skip the procedure.
777 */
778 if (cpus_intersects(current->cpus_allowed,
779 mt_fpu_cpumask)) {
780 cpumask_t tmask;
781
782 cpus_and(tmask,
783 current->thread.user_cpus_allowed,
784 mt_fpu_cpumask);
785 set_cpus_allowed(current, tmask);
786 current->thread.mflags |= MF_FPUBOUND;
787 }
788 }
789 }
790#endif /* CONFIG_MIPS_MT_FPAFF */
753 } 791 }
754 792
755 return; 793 return;
756 794
757 case 2: 795 case 2:
758 case 3: 796 case 3:
797 die_if_kernel("do_cpu invoked from kernel context!", regs);
759 break; 798 break;
760 } 799 }
761 800
@@ -793,6 +832,36 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
793 832
794asmlinkage void do_mt(struct pt_regs *regs) 833asmlinkage void do_mt(struct pt_regs *regs)
795{ 834{
835 int subcode;
836
837 die_if_kernel("MIPS MT Thread exception in kernel", regs);
838
839 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
840 >> VPECONTROL_EXCPT_SHIFT;
841 switch (subcode) {
842 case 0:
843 printk(KERN_ERR "Thread Underflow\n");
844 break;
845 case 1:
846 printk(KERN_ERR "Thread Overflow\n");
847 break;
848 case 2:
849 printk(KERN_ERR "Invalid YIELD Qualifier\n");
850 break;
851 case 3:
852 printk(KERN_ERR "Gating Storage Exception\n");
853 break;
854 case 4:
855 printk(KERN_ERR "YIELD Scheduler Exception\n");
856 break;
857 case 5:
858 printk(KERN_ERR "Gating Storage Schedulier Exception\n");
859 break;
860 default:
861 printk(KERN_ERR "*** UNKNOWN THREAD EXCEPTION %d ***\n",
862 subcode);
863 break;
864 }
796 die_if_kernel("MIPS MT Thread exception in kernel", regs); 865 die_if_kernel("MIPS MT Thread exception in kernel", regs);
797 866
798 force_sig(SIGILL, current); 867 force_sig(SIGILL, current);
@@ -928,7 +997,15 @@ void ejtag_exception_handler(struct pt_regs *regs)
928 */ 997 */
929void nmi_exception_handler(struct pt_regs *regs) 998void nmi_exception_handler(struct pt_regs *regs)
930{ 999{
1000#ifdef CONFIG_MIPS_MT_SMTC
1001 unsigned long dvpret = dvpe();
1002 bust_spinlocks(1);
1003 printk("NMI taken!!!!\n");
1004 mips_mt_regdump(dvpret);
1005#else
1006 bust_spinlocks(1);
931 printk("NMI taken!!!!\n"); 1007 printk("NMI taken!!!!\n");
1008#endif /* CONFIG_MIPS_MT_SMTC */
932 die("NMI", regs); 1009 die("NMI", regs);
933 while(1) ; 1010 while(1) ;
934} 1011}
@@ -960,27 +1037,29 @@ void *set_except_vector(int n, void *addr)
960 1037
961#ifdef CONFIG_CPU_MIPSR2 1038#ifdef CONFIG_CPU_MIPSR2
962/* 1039/*
963 * Shadow register allocation 1040 * MIPSR2 shadow register set allocation
964 * FIXME: SMP... 1041 * FIXME: SMP...
965 */ 1042 */
966 1043
967/* MIPSR2 shadow register sets */ 1044static struct shadow_registers {
968struct shadow_registers { 1045 /*
969 spinlock_t sr_lock; /* */ 1046 * Number of shadow register sets supported
970 int sr_supported; /* Number of shadow register sets supported */ 1047 */
971 int sr_allocated; /* Bitmap of allocated shadow registers */ 1048 unsigned long sr_supported;
1049 /*
1050 * Bitmap of allocated shadow registers
1051 */
1052 unsigned long sr_allocated;
972} shadow_registers; 1053} shadow_registers;
973 1054
974void mips_srs_init(void) 1055static void mips_srs_init(void)
975{ 1056{
976#ifdef CONFIG_CPU_MIPSR2_SRS 1057#ifdef CONFIG_CPU_MIPSR2_SRS
977 shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1; 1058 shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
978 printk ("%d MIPSR2 register sets available\n", shadow_registers.sr_supported); 1059 printk(KERN_INFO "%d MIPSR2 register sets available\n",
979#else 1060 shadow_registers.sr_supported);
980 shadow_registers.sr_supported = 1;
981#endif 1061#endif
982 shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */ 1062 shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */
983 spin_lock_init(&shadow_registers.sr_lock);
984} 1063}
985 1064
986int mips_srs_max(void) 1065int mips_srs_max(void)
@@ -988,38 +1067,30 @@ int mips_srs_max(void)
988 return shadow_registers.sr_supported; 1067 return shadow_registers.sr_supported;
989} 1068}
990 1069
991int mips_srs_alloc (void) 1070int mips_srs_alloc(void)
992{ 1071{
993 struct shadow_registers *sr = &shadow_registers; 1072 struct shadow_registers *sr = &shadow_registers;
994 unsigned long flags;
995 int set; 1073 int set;
996 1074
997 spin_lock_irqsave(&sr->sr_lock, flags); 1075again:
1076 set = find_first_zero_bit(&sr->sr_allocated, sr->sr_supported);
1077 if (set >= sr->sr_supported)
1078 return -1;
998 1079
999 for (set = 0; set < sr->sr_supported; set++) { 1080 if (test_and_set_bit(set, &sr->sr_allocated))
1000 if ((sr->sr_allocated & (1 << set)) == 0) { 1081 goto again;
1001 sr->sr_allocated |= 1 << set;
1002 spin_unlock_irqrestore(&sr->sr_lock, flags);
1003 return set;
1004 }
1005 }
1006 1082
1007 /* None available */ 1083 return set;
1008 spin_unlock_irqrestore(&sr->sr_lock, flags);
1009 return -1;
1010} 1084}
1011 1085
1012void mips_srs_free (int set) 1086void mips_srs_free(int set)
1013{ 1087{
1014 struct shadow_registers *sr = &shadow_registers; 1088 struct shadow_registers *sr = &shadow_registers;
1015 unsigned long flags;
1016 1089
1017 spin_lock_irqsave(&sr->sr_lock, flags); 1090 clear_bit(set, &sr->sr_allocated);
1018 sr->sr_allocated &= ~(1 << set);
1019 spin_unlock_irqrestore(&sr->sr_lock, flags);
1020} 1091}
1021 1092
1022void *set_vi_srs_handler (int n, void *addr, int srs) 1093static void *set_vi_srs_handler(int n, void *addr, int srs)
1023{ 1094{
1024 unsigned long handler; 1095 unsigned long handler;
1025 unsigned long old_handler = vi_handlers[n]; 1096 unsigned long old_handler = vi_handlers[n];
@@ -1032,8 +1103,7 @@ void *set_vi_srs_handler (int n, void *addr, int srs)
1032 if (addr == NULL) { 1103 if (addr == NULL) {
1033 handler = (unsigned long) do_default_vi; 1104 handler = (unsigned long) do_default_vi;
1034 srs = 0; 1105 srs = 0;
1035 } 1106 } else
1036 else
1037 handler = (unsigned long) addr; 1107 handler = (unsigned long) addr;
1038 vi_handlers[n] = (unsigned long) addr; 1108 vi_handlers[n] = (unsigned long) addr;
1039 1109
@@ -1045,8 +1115,7 @@ void *set_vi_srs_handler (int n, void *addr, int srs)
1045 if (cpu_has_veic) { 1115 if (cpu_has_veic) {
1046 if (board_bind_eic_interrupt) 1116 if (board_bind_eic_interrupt)
1047 board_bind_eic_interrupt (n, srs); 1117 board_bind_eic_interrupt (n, srs);
1048 } 1118 } else if (cpu_has_vint) {
1049 else if (cpu_has_vint) {
1050 /* SRSMap is only defined if shadow sets are implemented */ 1119 /* SRSMap is only defined if shadow sets are implemented */
1051 if (mips_srs_max() > 1) 1120 if (mips_srs_max() > 1)
1052 change_c0_srsmap (0xf << n*4, srs << n*4); 1121 change_c0_srsmap (0xf << n*4, srs << n*4);
@@ -1060,6 +1129,15 @@ void *set_vi_srs_handler (int n, void *addr, int srs)
1060 1129
1061 extern char except_vec_vi, except_vec_vi_lui; 1130 extern char except_vec_vi, except_vec_vi_lui;
1062 extern char except_vec_vi_ori, except_vec_vi_end; 1131 extern char except_vec_vi_ori, except_vec_vi_end;
1132#ifdef CONFIG_MIPS_MT_SMTC
1133 /*
1134 * We need to provide the SMTC vectored interrupt handler
1135 * not only with the address of the handler, but with the
1136 * Status.IM bit to be masked before going there.
1137 */
1138 extern char except_vec_vi_mori;
1139 const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
1140#endif /* CONFIG_MIPS_MT_SMTC */
1063 const int handler_len = &except_vec_vi_end - &except_vec_vi; 1141 const int handler_len = &except_vec_vi_end - &except_vec_vi;
1064 const int lui_offset = &except_vec_vi_lui - &except_vec_vi; 1142 const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
1065 const int ori_offset = &except_vec_vi_ori - &except_vec_vi; 1143 const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
@@ -1073,6 +1151,12 @@ void *set_vi_srs_handler (int n, void *addr, int srs)
1073 } 1151 }
1074 1152
1075 memcpy (b, &except_vec_vi, handler_len); 1153 memcpy (b, &except_vec_vi, handler_len);
1154#ifdef CONFIG_MIPS_MT_SMTC
1155 if (n > 7)
1156 printk("Vector index %d exceeds SMTC maximum\n", n);
1157 w = (u32 *)(b + mori_offset);
1158 *w = (*w & 0xffff0000) | (0x100 << n);
1159#endif /* CONFIG_MIPS_MT_SMTC */
1076 w = (u32 *)(b + lui_offset); 1160 w = (u32 *)(b + lui_offset);
1077 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); 1161 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
1078 w = (u32 *)(b + ori_offset); 1162 w = (u32 *)(b + ori_offset);
@@ -1095,9 +1179,9 @@ void *set_vi_srs_handler (int n, void *addr, int srs)
1095 return (void *)old_handler; 1179 return (void *)old_handler;
1096} 1180}
1097 1181
1098void *set_vi_handler (int n, void *addr) 1182void *set_vi_handler(int n, void *addr)
1099{ 1183{
1100 return set_vi_srs_handler (n, addr, 0); 1184 return set_vi_srs_handler(n, addr, 0);
1101} 1185}
1102#endif 1186#endif
1103 1187
@@ -1113,8 +1197,29 @@ extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
1113extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); 1197extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
1114extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); 1198extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
1115 1199
1200#ifdef CONFIG_SMP
1201static int smp_save_fp_context(struct sigcontext *sc)
1202{
1203 return cpu_has_fpu
1204 ? _save_fp_context(sc)
1205 : fpu_emulator_save_context(sc);
1206}
1207
1208static int smp_restore_fp_context(struct sigcontext *sc)
1209{
1210 return cpu_has_fpu
1211 ? _restore_fp_context(sc)
1212 : fpu_emulator_restore_context(sc);
1213}
1214#endif
1215
1116static inline void signal_init(void) 1216static inline void signal_init(void)
1117{ 1217{
1218#ifdef CONFIG_SMP
1219 /* For now just do the cpu_has_fpu check when the functions are invoked */
1220 save_fp_context = smp_save_fp_context;
1221 restore_fp_context = smp_restore_fp_context;
1222#else
1118 if (cpu_has_fpu) { 1223 if (cpu_has_fpu) {
1119 save_fp_context = _save_fp_context; 1224 save_fp_context = _save_fp_context;
1120 restore_fp_context = _restore_fp_context; 1225 restore_fp_context = _restore_fp_context;
@@ -1122,6 +1227,7 @@ static inline void signal_init(void)
1122 save_fp_context = fpu_emulator_save_context; 1227 save_fp_context = fpu_emulator_save_context;
1123 restore_fp_context = fpu_emulator_restore_context; 1228 restore_fp_context = fpu_emulator_restore_context;
1124 } 1229 }
1230#endif
1125} 1231}
1126 1232
1127#ifdef CONFIG_MIPS32_COMPAT 1233#ifdef CONFIG_MIPS32_COMPAT
@@ -1158,6 +1264,20 @@ void __init per_cpu_trap_init(void)
1158{ 1264{
1159 unsigned int cpu = smp_processor_id(); 1265 unsigned int cpu = smp_processor_id();
1160 unsigned int status_set = ST0_CU0; 1266 unsigned int status_set = ST0_CU0;
1267#ifdef CONFIG_MIPS_MT_SMTC
1268 int secondaryTC = 0;
1269 int bootTC = (cpu == 0);
1270
1271 /*
1272 * Only do per_cpu_trap_init() for first TC of Each VPE.
1273 * Note that this hack assumes that the SMTC init code
1274 * assigns TCs consecutively and in ascending order.
1275 */
1276
1277 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
1278 ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
1279 secondaryTC = 1;
1280#endif /* CONFIG_MIPS_MT_SMTC */
1161 1281
1162 /* 1282 /*
1163 * Disable coprocessors and select 32-bit or 64-bit addressing 1283 * Disable coprocessors and select 32-bit or 64-bit addressing
@@ -1180,6 +1300,10 @@ void __init per_cpu_trap_init(void)
1180 write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ 1300 write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
1181#endif 1301#endif
1182 1302
1303#ifdef CONFIG_MIPS_MT_SMTC
1304 if (!secondaryTC) {
1305#endif /* CONFIG_MIPS_MT_SMTC */
1306
1183 /* 1307 /*
1184 * Interrupt handling. 1308 * Interrupt handling.
1185 */ 1309 */
@@ -1196,6 +1320,9 @@ void __init per_cpu_trap_init(void)
1196 } else 1320 } else
1197 set_c0_cause(CAUSEF_IV); 1321 set_c0_cause(CAUSEF_IV);
1198 } 1322 }
1323#ifdef CONFIG_MIPS_MT_SMTC
1324 }
1325#endif /* CONFIG_MIPS_MT_SMTC */
1199 1326
1200 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1327 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1201 TLBMISS_HANDLER_SETUP(); 1328 TLBMISS_HANDLER_SETUP();
@@ -1205,8 +1332,14 @@ void __init per_cpu_trap_init(void)
1205 BUG_ON(current->mm); 1332 BUG_ON(current->mm);
1206 enter_lazy_tlb(&init_mm, current); 1333 enter_lazy_tlb(&init_mm, current);
1207 1334
1208 cpu_cache_init(); 1335#ifdef CONFIG_MIPS_MT_SMTC
1209 tlb_init(); 1336 if (bootTC) {
1337#endif /* CONFIG_MIPS_MT_SMTC */
1338 cpu_cache_init();
1339 tlb_init();
1340#ifdef CONFIG_MIPS_MT_SMTC
1341 }
1342#endif /* CONFIG_MIPS_MT_SMTC */
1210} 1343}
1211 1344
1212/* Install CPU exception handler */ 1345/* Install CPU exception handler */
@@ -1278,7 +1411,7 @@ void __init trap_init(void)
1278 if (cpu_has_veic || cpu_has_vint) { 1411 if (cpu_has_veic || cpu_has_vint) {
1279 int nvec = cpu_has_veic ? 64 : 8; 1412 int nvec = cpu_has_veic ? 64 : 8;
1280 for (i = 0; i < nvec; i++) 1413 for (i = 0; i < nvec; i++)
1281 set_vi_handler (i, NULL); 1414 set_vi_handler(i, NULL);
1282 } 1415 }
1283 else if (cpu_has_divec) 1416 else if (cpu_has_divec)
1284 set_handler(0x200, &except_vec4, 0x8); 1417 set_handler(0x200, &except_vec4, 0x8);
@@ -1297,6 +1430,7 @@ void __init trap_init(void)
1297 if (board_be_init) 1430 if (board_be_init)
1298 board_be_init(); 1431 board_be_init();
1299 1432
1433 set_except_vector(0, handle_int);
1300 set_except_vector(1, handle_tlbm); 1434 set_except_vector(1, handle_tlbm);
1301 set_except_vector(2, handle_tlbl); 1435 set_except_vector(2, handle_tlbl);
1302 set_except_vector(3, handle_tlbs); 1436 set_except_vector(3, handle_tlbs);
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 2ad0cedf29fe..14fa00e3cdfa 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -2,7 +2,7 @@
2#include <asm/asm-offsets.h> 2#include <asm/asm-offsets.h>
3#include <asm-generic/vmlinux.lds.h> 3#include <asm-generic/vmlinux.lds.h>
4 4
5#undef mips /* CPP really sucks for this job */ 5#undef mips
6#define mips mips 6#define mips mips
7OUTPUT_ARCH(mips) 7OUTPUT_ARCH(mips)
8ENTRY(kernel_entry) 8ENTRY(kernel_entry)
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index ae83b755cf4a..85d7df7b18e1 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -13,7 +13,6 @@
13 * You should have received a copy of the GNU General Public License along 13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc., 14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 15 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
16 *
17 */ 16 */
18 17
19/* 18/*
@@ -27,11 +26,8 @@
27 * 26 *
28 * To load and run, simply cat a SP 'program file' to /dev/vpe1. 27 * To load and run, simply cat a SP 'program file' to /dev/vpe1.
29 * i.e cat spapp >/dev/vpe1. 28 * i.e cat spapp >/dev/vpe1.
30 *
31 * You'll need to have the following device files.
32 * mknod /dev/vpe0 c 63 0
33 * mknod /dev/vpe1 c 63 1
34 */ 29 */
30
35#include <linux/config.h> 31#include <linux/config.h>
36#include <linux/kernel.h> 32#include <linux/kernel.h>
37#include <linux/module.h> 33#include <linux/module.h>
@@ -55,6 +51,8 @@
55#include <asm/cpu.h> 51#include <asm/cpu.h>
56#include <asm/processor.h> 52#include <asm/processor.h>
57#include <asm/system.h> 53#include <asm/system.h>
54#include <asm/vpe.h>
55#include <asm/kspd.h>
58 56
59typedef void *vpe_handle; 57typedef void *vpe_handle;
60 58
@@ -68,6 +66,11 @@ typedef void *vpe_handle;
68static char module_name[] = "vpe"; 66static char module_name[] = "vpe";
69static int major; 67static int major;
70 68
69#ifdef CONFIG_MIPS_APSP_KSPD
70 static struct kspd_notifications kspd_events;
71static int kspd_events_reqd = 0;
72#endif
73
71/* grab the likely amount of memory we will need. */ 74/* grab the likely amount of memory we will need. */
72#ifdef CONFIG_MIPS_VPE_LOADER_TOM 75#ifdef CONFIG_MIPS_VPE_LOADER_TOM
73#define P_SIZE (2 * 1024 * 1024) 76#define P_SIZE (2 * 1024 * 1024)
@@ -76,7 +79,10 @@ static int major;
76#define P_SIZE (256 * 1024) 79#define P_SIZE (256 * 1024)
77#endif 80#endif
78 81
82extern unsigned long physical_memsize;
83
79#define MAX_VPES 16 84#define MAX_VPES 16
85#define VPE_PATH_MAX 256
80 86
81enum vpe_state { 87enum vpe_state {
82 VPE_STATE_UNUSED = 0, 88 VPE_STATE_UNUSED = 0,
@@ -102,6 +108,8 @@ struct vpe {
102 unsigned long len; 108 unsigned long len;
103 char *pbuffer; 109 char *pbuffer;
104 unsigned long plen; 110 unsigned long plen;
111 unsigned int uid, gid;
112 char cwd[VPE_PATH_MAX];
105 113
106 unsigned long __start; 114 unsigned long __start;
107 115
@@ -113,6 +121,9 @@ struct vpe {
113 121
114 /* shared symbol address */ 122 /* shared symbol address */
115 void *shared_ptr; 123 void *shared_ptr;
124
125 /* the list of who wants to know when something major happens */
126 struct list_head notify;
116}; 127};
117 128
118struct tc { 129struct tc {
@@ -138,7 +149,7 @@ struct vpecontrol_ {
138} vpecontrol; 149} vpecontrol;
139 150
140static void release_progmem(void *ptr); 151static void release_progmem(void *ptr);
141static void dump_vpe(struct vpe * v); 152/* static __attribute_used__ void dump_vpe(struct vpe * v); */
142extern void save_gp_address(unsigned int secbase, unsigned int rel); 153extern void save_gp_address(unsigned int secbase, unsigned int rel);
143 154
144/* get the vpe associated with this minor */ 155/* get the vpe associated with this minor */
@@ -146,12 +157,14 @@ struct vpe *get_vpe(int minor)
146{ 157{
147 struct vpe *v; 158 struct vpe *v;
148 159
160 if (!cpu_has_mipsmt)
161 return NULL;
162
149 list_for_each_entry(v, &vpecontrol.vpe_list, list) { 163 list_for_each_entry(v, &vpecontrol.vpe_list, list) {
150 if (v->minor == minor) 164 if (v->minor == minor)
151 return v; 165 return v;
152 } 166 }
153 167
154 printk(KERN_DEBUG "VPE: get_vpe minor %d not found\n", minor);
155 return NULL; 168 return NULL;
156} 169}
157 170
@@ -165,8 +178,6 @@ struct tc *get_tc(int index)
165 return t; 178 return t;
166 } 179 }
167 180
168 printk(KERN_DEBUG "VPE: get_tc index %d not found\n", index);
169
170 return NULL; 181 return NULL;
171} 182}
172 183
@@ -179,8 +190,6 @@ struct tc *get_tc_unused(void)
179 return t; 190 return t;
180 } 191 }
181 192
182 printk(KERN_DEBUG "VPE: All TC's are in use\n");
183
184 return NULL; 193 return NULL;
185} 194}
186 195
@@ -190,13 +199,13 @@ struct vpe *alloc_vpe(int minor)
190 struct vpe *v; 199 struct vpe *v;
191 200
192 if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) { 201 if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) {
193 printk(KERN_WARNING "VPE: alloc_vpe no mem\n");
194 return NULL; 202 return NULL;
195 } 203 }
196 204
197 INIT_LIST_HEAD(&v->tc); 205 INIT_LIST_HEAD(&v->tc);
198 list_add_tail(&v->list, &vpecontrol.vpe_list); 206 list_add_tail(&v->list, &vpecontrol.vpe_list);
199 207
208 INIT_LIST_HEAD(&v->notify);
200 v->minor = minor; 209 v->minor = minor;
201 return v; 210 return v;
202} 211}
@@ -207,7 +216,6 @@ struct tc *alloc_tc(int index)
207 struct tc *t; 216 struct tc *t;
208 217
209 if ((t = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) { 218 if ((t = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) {
210 printk(KERN_WARNING "VPE: alloc_tc no mem\n");
211 return NULL; 219 return NULL;
212 } 220 }
213 221
@@ -236,20 +244,16 @@ void dump_mtregs(void)
236 printk("config3 0x%lx MT %ld\n", val, 244 printk("config3 0x%lx MT %ld\n", val,
237 (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT); 245 (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT);
238 246
239 val = read_c0_mvpconf0();
240 printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
241 (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
242 val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
243
244 val = read_c0_mvpcontrol(); 247 val = read_c0_mvpcontrol();
245 printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val, 248 printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val,
246 (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT, 249 (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT,
247 (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT, 250 (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT,
248 (val & MVPCONTROL_EVP)); 251 (val & MVPCONTROL_EVP));
249 252
250 val = read_c0_vpeconf0(); 253 val = read_c0_mvpconf0();
251 printk("VPEConf0 0x%lx MVP %ld\n", val, 254 printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
252 (val & VPECONF0_MVP) >> VPECONF0_MVP_SHIFT); 255 (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
256 val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
253} 257}
254 258
255/* Find some VPE program space */ 259/* Find some VPE program space */
@@ -354,9 +358,9 @@ static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
354 } 358 }
355 359
356 if( (rel > 32768) || (rel < -32768) ) { 360 if( (rel > 32768) || (rel < -32768) ) {
357 printk(KERN_ERR 361 printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: "
358 "apply_r_mips_gprel16: relative address out of range 0x%x %d\n", 362 "relative address 0x%x out of range of gp register\n",
359 rel, rel); 363 rel);
360 return -ENOEXEC; 364 return -ENOEXEC;
361 } 365 }
362 366
@@ -374,8 +378,8 @@ static int apply_r_mips_pc16(struct module *me, uint32_t *location,
374 rel -= 1; // and one instruction less due to the branch delay slot. 378 rel -= 1; // and one instruction less due to the branch delay slot.
375 379
376 if( (rel > 32768) || (rel < -32768) ) { 380 if( (rel > 32768) || (rel < -32768) ) {
377 printk(KERN_ERR 381 printk(KERN_DEBUG "VPE loader: "
378 "apply_r_mips_pc16: relative address out of range 0x%x\n", rel); 382 "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
379 return -ENOEXEC; 383 return -ENOEXEC;
380 } 384 }
381 385
@@ -396,7 +400,8 @@ static int apply_r_mips_26(struct module *me, uint32_t *location,
396 Elf32_Addr v) 400 Elf32_Addr v)
397{ 401{
398 if (v % 4) { 402 if (v % 4) {
399 printk(KERN_ERR "module %s: dangerous relocation mod4\n", me->name); 403 printk(KERN_DEBUG "VPE loader: apply_r_mips_26 "
404 " unaligned relocation\n");
400 return -ENOEXEC; 405 return -ENOEXEC;
401 } 406 }
402 407
@@ -459,12 +464,13 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
459 /* 464 /*
460 * The value for the HI16 had best be the same. 465 * The value for the HI16 had best be the same.
461 */ 466 */
462 if (v != l->value) { 467 if (v != l->value) {
463 printk("%d != %d\n", v, l->value); 468 printk(KERN_DEBUG "VPE loader: "
464 goto out_danger; 469 "apply_r_mips_lo16/hi16: "
470 "inconsistent value information\n");
471 return -ENOEXEC;
465 } 472 }
466 473
467
468 /* 474 /*
469 * Do the HI16 relocation. Note that we actually don't 475 * Do the HI16 relocation. Note that we actually don't
470 * need to know anything about the LO16 itself, except 476 * need to know anything about the LO16 itself, except
@@ -500,11 +506,6 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
500 *location = insnlo; 506 *location = insnlo;
501 507
502 return 0; 508 return 0;
503
504out_danger:
505 printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name);
506
507 return -ENOEXEC;
508} 509}
509 510
510static int (*reloc_handlers[]) (struct module *me, uint32_t *location, 511static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
@@ -518,6 +519,15 @@ static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
518 [R_MIPS_PC16] = apply_r_mips_pc16 519 [R_MIPS_PC16] = apply_r_mips_pc16
519}; 520};
520 521
522static char *rstrs[] = {
523 [R_MIPS_NONE] = "MIPS_NONE",
524 [R_MIPS_32] = "MIPS_32",
525 [R_MIPS_26] = "MIPS_26",
526 [R_MIPS_HI16] = "MIPS_HI16",
527 [R_MIPS_LO16] = "MIPS_LO16",
528 [R_MIPS_GPREL16] = "MIPS_GPREL16",
529 [R_MIPS_PC16] = "MIPS_PC16"
530};
521 531
522int apply_relocations(Elf32_Shdr *sechdrs, 532int apply_relocations(Elf32_Shdr *sechdrs,
523 const char *strtab, 533 const char *strtab,
@@ -552,15 +562,13 @@ int apply_relocations(Elf32_Shdr *sechdrs,
552 562
553 res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v); 563 res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
554 if( res ) { 564 if( res ) {
555 printk(KERN_DEBUG 565 char *r = rstrs[ELF32_R_TYPE(r_info)];
556 "relocation error 0x%x sym refer <%s> value 0x%x " 566 printk(KERN_WARNING "VPE loader: .text+0x%x "
557 "type 0x%x r_info 0x%x\n", 567 "relocation type %s for symbol \"%s\" failed\n",
558 (unsigned int)location, strtab + sym->st_name, v, 568 rel[i].r_offset, r ? r : "UNKNOWN",
559 r_info, ELF32_R_TYPE(r_info)); 569 strtab + sym->st_name);
560 }
561
562 if (res)
563 return res; 570 return res;
571 }
564 } 572 }
565 573
566 return 0; 574 return 0;
@@ -576,7 +584,7 @@ void save_gp_address(unsigned int secbase, unsigned int rel)
576 584
577 585
578/* Change all symbols so that sh_value encodes the pointer directly. */ 586/* Change all symbols so that sh_value encodes the pointer directly. */
579static int simplify_symbols(Elf_Shdr * sechdrs, 587static void simplify_symbols(Elf_Shdr * sechdrs,
580 unsigned int symindex, 588 unsigned int symindex,
581 const char *strtab, 589 const char *strtab,
582 const char *secstrings, 590 const char *secstrings,
@@ -585,18 +593,21 @@ static int simplify_symbols(Elf_Shdr * sechdrs,
585 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; 593 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
586 unsigned long secbase, bssbase = 0; 594 unsigned long secbase, bssbase = 0;
587 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); 595 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
588 int ret = 0, size; 596 int size;
589 597
590 /* find the .bss section for COMMON symbols */ 598 /* find the .bss section for COMMON symbols */
591 for (i = 0; i < nsecs; i++) { 599 for (i = 0; i < nsecs; i++) {
592 if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) 600 if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) {
593 bssbase = sechdrs[i].sh_addr; 601 bssbase = sechdrs[i].sh_addr;
602 break;
603 }
594 } 604 }
595 605
596 for (i = 1; i < n; i++) { 606 for (i = 1; i < n; i++) {
597 switch (sym[i].st_shndx) { 607 switch (sym[i].st_shndx) {
598 case SHN_COMMON: 608 case SHN_COMMON:
599 /* Allocate space for the symbol in the .bss section. st_value is currently size. 609 /* Allocate space for the symbol in the .bss section.
610 st_value is currently size.
600 We want it to have the address of the symbol. */ 611 We want it to have the address of the symbol. */
601 612
602 size = sym[i].st_value; 613 size = sym[i].st_value;
@@ -614,11 +625,9 @@ static int simplify_symbols(Elf_Shdr * sechdrs,
614 break; 625 break;
615 626
616 case SHN_MIPS_SCOMMON: 627 case SHN_MIPS_SCOMMON:
617 628 printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON"
618 printk(KERN_DEBUG 629 "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name,
619 "simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n", 630 sym[i].st_shndx);
620 strtab + sym[i].st_name, sym[i].st_shndx);
621
622 // .sbss section 631 // .sbss section
623 break; 632 break;
624 633
@@ -632,10 +641,7 @@ static int simplify_symbols(Elf_Shdr * sechdrs,
632 sym[i].st_value += secbase; 641 sym[i].st_value += secbase;
633 break; 642 break;
634 } 643 }
635
636 } 644 }
637
638 return ret;
639} 645}
640 646
641#ifdef DEBUG_ELFLOADER 647#ifdef DEBUG_ELFLOADER
@@ -655,9 +661,26 @@ static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
655 661
656static void dump_tc(struct tc *t) 662static void dump_tc(struct tc *t)
657{ 663{
658 printk(KERN_WARNING "VPE: TC index %d TCStatus 0x%lx halt 0x%lx\n", 664 unsigned long val;
659 t->index, read_tc_c0_tcstatus(), read_tc_c0_tchalt()); 665
660 printk(KERN_WARNING "VPE: tcrestart 0x%lx\n", read_tc_c0_tcrestart()); 666 settc(t->index);
667 printk(KERN_DEBUG "VPE loader: TC index %d targtc %ld "
668 "TCStatus 0x%lx halt 0x%lx\n",
669 t->index, read_c0_vpecontrol() & VPECONTROL_TARGTC,
670 read_tc_c0_tcstatus(), read_tc_c0_tchalt());
671
672 printk(KERN_DEBUG " tcrestart 0x%lx\n", read_tc_c0_tcrestart());
673 printk(KERN_DEBUG " tcbind 0x%lx\n", read_tc_c0_tcbind());
674
675 val = read_c0_vpeconf0();
676 printk(KERN_DEBUG " VPEConf0 0x%lx MVP %ld\n", val,
677 (val & VPECONF0_MVP) >> VPECONF0_MVP_SHIFT);
678
679 printk(KERN_DEBUG " c0 status 0x%lx\n", read_vpe_c0_status());
680 printk(KERN_DEBUG " c0 cause 0x%lx\n", read_vpe_c0_cause());
681
682 printk(KERN_DEBUG " c0 badvaddr 0x%lx\n", read_vpe_c0_badvaddr());
683 printk(KERN_DEBUG " c0 epc 0x%lx\n", read_vpe_c0_epc());
661} 684}
662 685
663static void dump_tclist(void) 686static void dump_tclist(void)
@@ -672,96 +695,108 @@ static void dump_tclist(void)
672/* We are prepared so configure and start the VPE... */ 695/* We are prepared so configure and start the VPE... */
673int vpe_run(struct vpe * v) 696int vpe_run(struct vpe * v)
674{ 697{
675 unsigned long val; 698 struct vpe_notifications *n;
699 unsigned long val, dmt_flag;
676 struct tc *t; 700 struct tc *t;
677 701
678 /* check we are the Master VPE */ 702 /* check we are the Master VPE */
679 val = read_c0_vpeconf0(); 703 val = read_c0_vpeconf0();
680 if (!(val & VPECONF0_MVP)) { 704 if (!(val & VPECONF0_MVP)) {
681 printk(KERN_WARNING 705 printk(KERN_WARNING
682 "VPE: only Master VPE's are allowed to configure MT\n"); 706 "VPE loader: only Master VPE's are allowed to configure MT\n");
683 return -1; 707 return -1;
684 } 708 }
685 709
686 /* disable MT (using dvpe) */ 710 /* disable MT (using dvpe) */
687 dvpe(); 711 dvpe();
688 712
713 if (!list_empty(&v->tc)) {
714 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
715 printk(KERN_WARNING "VPE loader: TC %d is already in use.\n",
716 t->index);
717 return -ENOEXEC;
718 }
719 } else {
720 printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n",
721 v->minor);
722 return -ENOEXEC;
723 }
724
689 /* Put MVPE's into 'configuration state' */ 725 /* Put MVPE's into 'configuration state' */
690 set_c0_mvpcontrol(MVPCONTROL_VPC); 726 set_c0_mvpcontrol(MVPCONTROL_VPC);
691 727
692 if (!list_empty(&v->tc)) {
693 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
694 printk(KERN_WARNING "VPE: TC %d is already in use.\n",
695 t->index);
696 return -ENOEXEC;
697 }
698 } else {
699 printk(KERN_WARNING "VPE: No TC's associated with VPE %d\n",
700 v->minor);
701 return -ENOEXEC;
702 }
703
704 settc(t->index); 728 settc(t->index);
705 729
706 val = read_vpe_c0_vpeconf0();
707
708 /* should check it is halted, and not activated */ 730 /* should check it is halted, and not activated */
709 if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { 731 if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) {
710 printk(KERN_WARNING "VPE: TC %d is already doing something!\n", 732 printk(KERN_WARNING "VPE loader: TC %d is already doing something!\n",
711 t->index); 733 t->index);
712
713 dump_tclist(); 734 dump_tclist();
714 return -ENOEXEC; 735 return -ENOEXEC;
715 } 736 }
716 737
738 /*
739 * Disable multi-threaded execution whilst we activate, clear the
740 * halt bit and bound the tc to the other VPE...
741 */
742 dmt_flag = dmt();
743
717 /* Write the address we want it to start running from in the TCPC register. */ 744 /* Write the address we want it to start running from in the TCPC register. */
718 write_tc_c0_tcrestart((unsigned long)v->__start); 745 write_tc_c0_tcrestart((unsigned long)v->__start);
719
720 /* write the sivc_info address to tccontext */
721 write_tc_c0_tccontext((unsigned long)0); 746 write_tc_c0_tccontext((unsigned long)0);
722 747 /*
723 /* Set up the XTC bit in vpeconf0 to point at our tc */ 748 * Mark the TC as activated, not interrupt exempt and not dynamically
724 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (t->index << VPECONF0_XTC_SHIFT)); 749 * allocatable
725 750 */
726 /* mark the TC as activated, not interrupt exempt and not dynamically allocatable */
727 val = read_tc_c0_tcstatus(); 751 val = read_tc_c0_tcstatus();
728 val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; 752 val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
729 write_tc_c0_tcstatus(val); 753 write_tc_c0_tcstatus(val);
730 754
731 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); 755 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
732 756
733 /* set up VPE1 */
734 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); // no multiple TC's
735 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); // enable this VPE
736
737 /* 757 /*
738 * The sde-kit passes 'memsize' to __start in $a3, so set something 758 * The sde-kit passes 'memsize' to __start in $a3, so set something
739 * here... 759 * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
740 * Or set $a3 (register 7) to zero and define DFLT_STACK_SIZE and
741 * DFLT_HEAP_SIZE when you compile your program 760 * DFLT_HEAP_SIZE when you compile your program
742 */ 761 */
762 mttgpr(7, physical_memsize);
763
764
765 /* set up VPE1 */
766 /*
767 * bind the TC to VPE 1 as late as possible so we only have the final
768 * VPE registers to set up, and so an EJTAG probe can trigger on it
769 */
770 write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | v->minor);
743 771
744 mttgpr(7, 0); 772 /* Set up the XTC bit in vpeconf0 to point at our tc */
773 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
774 | (t->index << VPECONF0_XTC_SHIFT));
745 775
746 /* set config to be the same as vpe0, particularly kseg0 coherency alg */ 776 /* enable this VPE */
747 write_vpe_c0_config(read_c0_config()); 777 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
748 778
749 /* clear out any left overs from a previous program */ 779 /* clear out any left overs from a previous program */
780 write_vpe_c0_status(0);
750 write_vpe_c0_cause(0); 781 write_vpe_c0_cause(0);
751 782
752 /* take system out of configuration state */ 783 /* take system out of configuration state */
753 clear_c0_mvpcontrol(MVPCONTROL_VPC); 784 clear_c0_mvpcontrol(MVPCONTROL_VPC);
754 785
755 /* clear interrupts enabled IE, ERL, EXL, and KSU from c0 status */ 786 /* now safe to re-enable multi-threading */
756 write_vpe_c0_status(read_vpe_c0_status() & ~(ST0_ERL | ST0_KSU | ST0_IE | ST0_EXL)); 787 emt(dmt_flag);
757 788
758 /* set it running */ 789 /* set it running */
759 evpe(EVPE_ENABLE); 790 evpe(EVPE_ENABLE);
760 791
792 list_for_each_entry(n, &v->notify, list) {
793 n->start(v->minor);
794 }
795
761 return 0; 796 return 0;
762} 797}
763 798
764static unsigned long find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, 799static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
765 unsigned int symindex, const char *strtab, 800 unsigned int symindex, const char *strtab,
766 struct module *mod) 801 struct module *mod)
767{ 802{
@@ -778,26 +813,28 @@ static unsigned long find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
778 } 813 }
779 } 814 }
780 815
816 if ( (v->__start == 0) || (v->shared_ptr == NULL))
817 return -1;
818
781 return 0; 819 return 0;
782} 820}
783 821
784/* 822/*
785 * Allocates a VPE with some program code space(the load address), copies 823 * Allocates a VPE with some program code space(the load address), copies the
786 * the contents of the program (p)buffer performing relocatations/etc, 824 * contents of the program (p)buffer performing relocatations/etc, free's it
787 * free's it when finished. 825 * when finished.
788*/ 826 */
789int vpe_elfload(struct vpe * v) 827int vpe_elfload(struct vpe * v)
790{ 828{
791 Elf_Ehdr *hdr; 829 Elf_Ehdr *hdr;
792 Elf_Shdr *sechdrs; 830 Elf_Shdr *sechdrs;
793 long err = 0; 831 long err = 0;
794 char *secstrings, *strtab = NULL; 832 char *secstrings, *strtab = NULL;
795 unsigned int len, i, symindex = 0, strindex = 0; 833 unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
796
797 struct module mod; // so we can re-use the relocations code 834 struct module mod; // so we can re-use the relocations code
798 835
799 memset(&mod, 0, sizeof(struct module)); 836 memset(&mod, 0, sizeof(struct module));
800 strcpy(mod.name, "VPE dummy prog module"); 837 strcpy(mod.name, "VPE loader");
801 838
802 hdr = (Elf_Ehdr *) v->pbuffer; 839 hdr = (Elf_Ehdr *) v->pbuffer;
803 len = v->plen; 840 len = v->plen;
@@ -805,16 +842,22 @@ int vpe_elfload(struct vpe * v)
805 /* Sanity checks against insmoding binaries or wrong arch, 842 /* Sanity checks against insmoding binaries or wrong arch,
806 weird elf version */ 843 weird elf version */
807 if (memcmp(hdr->e_ident, ELFMAG, 4) != 0 844 if (memcmp(hdr->e_ident, ELFMAG, 4) != 0
808 || hdr->e_type != ET_REL || !elf_check_arch(hdr) 845 || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
846 || !elf_check_arch(hdr)
809 || hdr->e_shentsize != sizeof(*sechdrs)) { 847 || hdr->e_shentsize != sizeof(*sechdrs)) {
810 printk(KERN_WARNING 848 printk(KERN_WARNING
811 "VPE program, wrong arch or weird elf version\n"); 849 "VPE loader: program wrong arch or weird elf version\n");
812 850
813 return -ENOEXEC; 851 return -ENOEXEC;
814 } 852 }
815 853
854 if (hdr->e_type == ET_REL)
855 relocate = 1;
856
816 if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) { 857 if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
817 printk(KERN_ERR "VPE program length %u truncated\n", len); 858 printk(KERN_ERR "VPE loader: program length %u truncated\n",
859 len);
860
818 return -ENOEXEC; 861 return -ENOEXEC;
819 } 862 }
820 863
@@ -826,82 +869,126 @@ int vpe_elfload(struct vpe * v)
826 /* And these should exist, but gcc whinges if we don't init them */ 869 /* And these should exist, but gcc whinges if we don't init them */
827 symindex = strindex = 0; 870 symindex = strindex = 0;
828 871
829 for (i = 1; i < hdr->e_shnum; i++) { 872 if (relocate) {
830 873 for (i = 1; i < hdr->e_shnum; i++) {
831 if (sechdrs[i].sh_type != SHT_NOBITS 874 if (sechdrs[i].sh_type != SHT_NOBITS
832 && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) { 875 && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) {
833 printk(KERN_ERR "VPE program length %u truncated\n", 876 printk(KERN_ERR "VPE program length %u truncated\n",
834 len); 877 len);
835 return -ENOEXEC; 878 return -ENOEXEC;
836 } 879 }
837 880
838 /* Mark all sections sh_addr with their address in the 881 /* Mark all sections sh_addr with their address in the
839 temporary image. */ 882 temporary image. */
840 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; 883 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
841 884
842 /* Internal symbols and strings. */ 885 /* Internal symbols and strings. */
843 if (sechdrs[i].sh_type == SHT_SYMTAB) { 886 if (sechdrs[i].sh_type == SHT_SYMTAB) {
844 symindex = i; 887 symindex = i;
845 strindex = sechdrs[i].sh_link; 888 strindex = sechdrs[i].sh_link;
846 strtab = (char *)hdr + sechdrs[strindex].sh_offset; 889 strtab = (char *)hdr + sechdrs[strindex].sh_offset;
890 }
847 } 891 }
892 layout_sections(&mod, hdr, sechdrs, secstrings);
848 } 893 }
849 894
850 layout_sections(&mod, hdr, sechdrs, secstrings);
851
852 v->load_addr = alloc_progmem(mod.core_size); 895 v->load_addr = alloc_progmem(mod.core_size);
853 memset(v->load_addr, 0, mod.core_size); 896 memset(v->load_addr, 0, mod.core_size);
854 897
855 printk("VPE elf_loader: loading to %p\n", v->load_addr); 898 printk("VPE loader: loading to %p\n", v->load_addr);
856 899
857 for (i = 0; i < hdr->e_shnum; i++) { 900 if (relocate) {
858 void *dest; 901 for (i = 0; i < hdr->e_shnum; i++) {
902 void *dest;
859 903
860 if (!(sechdrs[i].sh_flags & SHF_ALLOC)) 904 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
861 continue; 905 continue;
862 906
863 dest = v->load_addr + sechdrs[i].sh_entsize; 907 dest = v->load_addr + sechdrs[i].sh_entsize;
864 908
865 if (sechdrs[i].sh_type != SHT_NOBITS) 909 if (sechdrs[i].sh_type != SHT_NOBITS)
866 memcpy(dest, (void *)sechdrs[i].sh_addr, 910 memcpy(dest, (void *)sechdrs[i].sh_addr,
867 sechdrs[i].sh_size); 911 sechdrs[i].sh_size);
868 /* Update sh_addr to point to copy in image. */ 912 /* Update sh_addr to point to copy in image. */
869 sechdrs[i].sh_addr = (unsigned long)dest; 913 sechdrs[i].sh_addr = (unsigned long)dest;
870 }
871 914
872 /* Fix up syms, so that st_value is a pointer to location. */ 915 printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n",
873 err = 916 secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr);
874 simplify_symbols(sechdrs, symindex, strtab, secstrings, 917 }
875 hdr->e_shnum, &mod);
876 if (err < 0) {
877 printk(KERN_WARNING "VPE: unable to simplify symbols\n");
878 goto cleanup;
879 }
880 918
881 /* Now do relocations. */ 919 /* Fix up syms, so that st_value is a pointer to location. */
882 for (i = 1; i < hdr->e_shnum; i++) { 920 simplify_symbols(sechdrs, symindex, strtab, secstrings,
883 const char *strtab = (char *)sechdrs[strindex].sh_addr; 921 hdr->e_shnum, &mod);
884 unsigned int info = sechdrs[i].sh_info; 922
885 923 /* Now do relocations. */
886 /* Not a valid relocation section? */ 924 for (i = 1; i < hdr->e_shnum; i++) {
887 if (info >= hdr->e_shnum) 925 const char *strtab = (char *)sechdrs[strindex].sh_addr;
888 continue; 926 unsigned int info = sechdrs[i].sh_info;
889 927
890 /* Don't bother with non-allocated sections */ 928 /* Not a valid relocation section? */
891 if (!(sechdrs[info].sh_flags & SHF_ALLOC)) 929 if (info >= hdr->e_shnum)
892 continue; 930 continue;
893 931
894 if (sechdrs[i].sh_type == SHT_REL) 932 /* Don't bother with non-allocated sections */
895 err = 933 if (!(sechdrs[info].sh_flags & SHF_ALLOC))
896 apply_relocations(sechdrs, strtab, symindex, i, &mod); 934 continue;
897 else if (sechdrs[i].sh_type == SHT_RELA) 935
898 err = apply_relocate_add(sechdrs, strtab, symindex, i, 936 if (sechdrs[i].sh_type == SHT_REL)
899 &mod); 937 err = apply_relocations(sechdrs, strtab, symindex, i,
900 if (err < 0) { 938 &mod);
901 printk(KERN_WARNING 939 else if (sechdrs[i].sh_type == SHT_RELA)
902 "vpe_elfload: error in relocations err %ld\n", 940 err = apply_relocate_add(sechdrs, strtab, symindex, i,
903 err); 941 &mod);
904 goto cleanup; 942 if (err < 0)
943 return err;
944
945 }
946 } else {
947 for (i = 0; i < hdr->e_shnum; i++) {
948
949 /* Internal symbols and strings. */
950 if (sechdrs[i].sh_type == SHT_SYMTAB) {
951 symindex = i;
952 strindex = sechdrs[i].sh_link;
953 strtab = (char *)hdr + sechdrs[strindex].sh_offset;
954
955 /* mark the symtab's address for when we try to find the
956 magic symbols */
957 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
958 }
959
960 /* filter sections we dont want in the final image */
961 if (!(sechdrs[i].sh_flags & SHF_ALLOC) ||
962 (sechdrs[i].sh_type == SHT_MIPS_REGINFO)) {
963 printk( KERN_DEBUG " ignoring section, "
964 "name %s type %x address 0x%x \n",
965 secstrings + sechdrs[i].sh_name,
966 sechdrs[i].sh_type, sechdrs[i].sh_addr);
967 continue;
968 }
969
970 if (sechdrs[i].sh_addr < (unsigned int)v->load_addr) {
971 printk( KERN_WARNING "VPE loader: "
972 "fully linked image has invalid section, "
973 "name %s type %x address 0x%x, before load "
974 "address of 0x%x\n",
975 secstrings + sechdrs[i].sh_name,
976 sechdrs[i].sh_type, sechdrs[i].sh_addr,
977 (unsigned int)v->load_addr);
978 return -ENOEXEC;
979 }
980
981 printk(KERN_DEBUG " copying section sh_name %s, sh_addr 0x%x "
982 "size 0x%x0 from x%p\n",
983 secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr,
984 sechdrs[i].sh_size, hdr + sechdrs[i].sh_offset);
985
986 if (sechdrs[i].sh_type != SHT_NOBITS)
987 memcpy((void *)sechdrs[i].sh_addr,
988 (char *)hdr + sechdrs[i].sh_offset,
989 sechdrs[i].sh_size);
990 else
991 memset((void *)sechdrs[i].sh_addr, 0, sechdrs[i].sh_size);
905 } 992 }
906 } 993 }
907 994
@@ -910,71 +997,104 @@ int vpe_elfload(struct vpe * v)
910 (unsigned long)v->load_addr + v->len); 997 (unsigned long)v->load_addr + v->len);
911 998
912 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) { 999 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
1000 if (v->__start == 0) {
1001 printk(KERN_WARNING "VPE loader: program does not contain "
1002 "a __start symbol\n");
1003 return -ENOEXEC;
1004 }
913 1005
914 printk(KERN_WARNING 1006 if (v->shared_ptr == NULL)
915 "VPE: program doesn't contain __start or vpe_shared symbols\n"); 1007 printk(KERN_WARNING "VPE loader: "
916 err = -ENOEXEC; 1008 "program does not contain vpe_shared symbol.\n"
1009 " Unable to use AMVP (AP/SP) facilities.\n");
917 } 1010 }
918 1011
919 printk(" elf loaded\n"); 1012 printk(" elf loaded\n");
920 1013 return 0;
921cleanup:
922 return err;
923} 1014}
924 1015
925static void dump_vpe(struct vpe * v) 1016__attribute_used__ void dump_vpe(struct vpe * v)
926{ 1017{
927 struct tc *t; 1018 struct tc *t;
928 1019
1020 settc(v->minor);
1021
929 printk(KERN_DEBUG "VPEControl 0x%lx\n", read_vpe_c0_vpecontrol()); 1022 printk(KERN_DEBUG "VPEControl 0x%lx\n", read_vpe_c0_vpecontrol());
930 printk(KERN_DEBUG "VPEConf0 0x%lx\n", read_vpe_c0_vpeconf0()); 1023 printk(KERN_DEBUG "VPEConf0 0x%lx\n", read_vpe_c0_vpeconf0());
931 1024
932 list_for_each_entry(t, &vpecontrol.tc_list, list) { 1025 list_for_each_entry(t, &vpecontrol.tc_list, list)
933 dump_tc(t); 1026 dump_tc(t);
934 }
935} 1027}
936 1028
937/* checks for VPE is unused and gets ready to load program */ 1029static void cleanup_tc(struct tc *tc)
1030{
1031 int tmp;
1032
1033 /* Put MVPE's into 'configuration state' */
1034 set_c0_mvpcontrol(MVPCONTROL_VPC);
1035
1036 settc(tc->index);
1037 tmp = read_tc_c0_tcstatus();
1038
1039 /* mark not allocated and not dynamically allocatable */
1040 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1041 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
1042 write_tc_c0_tcstatus(tmp);
1043
1044 write_tc_c0_tchalt(TCHALT_H);
1045
1046 /* bind it to anything other than VPE1 */
1047 write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
1048
1049 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1050}
1051
1052static int getcwd(char *buff, int size)
1053{
1054 mm_segment_t old_fs;
1055 int ret;
1056
1057 old_fs = get_fs();
1058 set_fs(KERNEL_DS);
1059
1060 ret = sys_getcwd(buff,size);
1061
1062 set_fs(old_fs);
1063
1064 return ret;
1065}
1066
1067/* checks VPE is unused and gets ready to load program */
938static int vpe_open(struct inode *inode, struct file *filp) 1068static int vpe_open(struct inode *inode, struct file *filp)
939{ 1069{
940 int minor; 1070 int minor, ret;
941 struct vpe *v; 1071 struct vpe *v;
1072 struct vpe_notifications *not;
942 1073
943 /* assume only 1 device at the mo. */ 1074 /* assume only 1 device at the mo. */
944 if ((minor = MINOR(inode->i_rdev)) != 1) { 1075 if ((minor = iminor(inode)) != 1) {
945 printk(KERN_WARNING "VPE: only vpe1 is supported\n"); 1076 printk(KERN_WARNING "VPE loader: only vpe1 is supported\n");
946 return -ENODEV; 1077 return -ENODEV;
947 } 1078 }
948 1079
949 if ((v = get_vpe(minor)) == NULL) { 1080 if ((v = get_vpe(minor)) == NULL) {
950 printk(KERN_WARNING "VPE: unable to get vpe\n"); 1081 printk(KERN_WARNING "VPE loader: unable to get vpe\n");
951 return -ENODEV; 1082 return -ENODEV;
952 } 1083 }
953 1084
954 if (v->state != VPE_STATE_UNUSED) { 1085 if (v->state != VPE_STATE_UNUSED) {
955 unsigned long tmp;
956 struct tc *t;
957
958 printk(KERN_WARNING "VPE: device %d already in use\n", minor);
959
960 dvpe(); 1086 dvpe();
961 dump_vpe(v);
962
963 printk(KERN_WARNING "VPE: re-initialising %d\n", minor);
964
965 release_progmem(v->load_addr);
966 1087
967 t = get_tc(minor); 1088 printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n");
968 settc(minor);
969 tmp = read_tc_c0_tcstatus();
970 1089
971 /* mark not allocated and not dynamically allocatable */ 1090 dump_tc(get_tc(minor));
972 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
973 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
974 write_tc_c0_tcstatus(tmp);
975 1091
976 write_tc_c0_tchalt(TCHALT_H); 1092 list_for_each_entry(not, &v->notify, list) {
1093 not->stop(minor);
1094 }
977 1095
1096 release_progmem(v->load_addr);
1097 cleanup_tc(get_tc(minor));
978 } 1098 }
979 1099
980 // allocate it so when we get write ops we know it's expected. 1100 // allocate it so when we get write ops we know it's expected.
@@ -986,6 +1106,24 @@ static int vpe_open(struct inode *inode, struct file *filp)
986 v->load_addr = NULL; 1106 v->load_addr = NULL;
987 v->len = 0; 1107 v->len = 0;
988 1108
1109 v->uid = filp->f_uid;
1110 v->gid = filp->f_gid;
1111
1112#ifdef CONFIG_MIPS_APSP_KSPD
1113 /* get kspd to tell us when a syscall_exit happens */
1114 if (!kspd_events_reqd) {
1115 kspd_notify(&kspd_events);
1116 kspd_events_reqd++;
1117 }
1118#endif
1119
1120 v->cwd[0] = 0;
1121 ret = getcwd(v->cwd, VPE_PATH_MAX);
1122 if (ret < 0)
1123 printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret);
1124
1125 v->shared_ptr = NULL;
1126 v->__start = 0;
989 return 0; 1127 return 0;
990} 1128}
991 1129
@@ -995,7 +1133,7 @@ static int vpe_release(struct inode *inode, struct file *filp)
995 struct vpe *v; 1133 struct vpe *v;
996 Elf_Ehdr *hdr; 1134 Elf_Ehdr *hdr;
997 1135
998 minor = MINOR(inode->i_rdev); 1136 minor = iminor(inode);
999 if ((v = get_vpe(minor)) == NULL) 1137 if ((v = get_vpe(minor)) == NULL)
1000 return -ENODEV; 1138 return -ENODEV;
1001 1139
@@ -1006,14 +1144,22 @@ static int vpe_release(struct inode *inode, struct file *filp)
1006 if (vpe_elfload(v) >= 0) 1144 if (vpe_elfload(v) >= 0)
1007 vpe_run(v); 1145 vpe_run(v);
1008 else { 1146 else {
1009 printk(KERN_WARNING "VPE: ELF load failed.\n"); 1147 printk(KERN_WARNING "VPE loader: ELF load failed.\n");
1010 ret = -ENOEXEC; 1148 ret = -ENOEXEC;
1011 } 1149 }
1012 } else { 1150 } else {
1013 printk(KERN_WARNING "VPE: only elf files are supported\n"); 1151 printk(KERN_WARNING "VPE loader: only elf files are supported\n");
1014 ret = -ENOEXEC; 1152 ret = -ENOEXEC;
1015 } 1153 }
1016 1154
1155 /* It's good to be able to run the SP and if it chokes have a look at
1156 the /dev/rt?. But if we reset the pointer to the shared struct we
1157 loose what has happened. So perhaps if garbage is sent to the vpe
1158 device, use it as a trigger for the reset. Hopefully a nice
1159 executable will be along shortly. */
1160 if (ret < 0)
1161 v->shared_ptr = NULL;
1162
1017 // cleanup any temp buffers 1163 // cleanup any temp buffers
1018 if (v->pbuffer) 1164 if (v->pbuffer)
1019 vfree(v->pbuffer); 1165 vfree(v->pbuffer);
@@ -1028,26 +1174,24 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer,
1028 size_t ret = count; 1174 size_t ret = count;
1029 struct vpe *v; 1175 struct vpe *v;
1030 1176
1031 minor = MINOR(file->f_dentry->d_inode->i_rdev); 1177 minor = iminor(file->f_dentry->d_inode);
1032 if ((v = get_vpe(minor)) == NULL) 1178 if ((v = get_vpe(minor)) == NULL)
1033 return -ENODEV; 1179 return -ENODEV;
1034 1180
1035 if (v->pbuffer == NULL) { 1181 if (v->pbuffer == NULL) {
1036 printk(KERN_ERR "vpe_write: no pbuffer\n"); 1182 printk(KERN_ERR "VPE loader: no buffer for program\n");
1037 return -ENOMEM; 1183 return -ENOMEM;
1038 } 1184 }
1039 1185
1040 if ((count + v->len) > v->plen) { 1186 if ((count + v->len) > v->plen) {
1041 printk(KERN_WARNING 1187 printk(KERN_WARNING
1042 "VPE Loader: elf size too big. Perhaps strip uneeded symbols\n"); 1188 "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
1043 return -ENOMEM; 1189 return -ENOMEM;
1044 } 1190 }
1045 1191
1046 count -= copy_from_user(v->pbuffer + v->len, buffer, count); 1192 count -= copy_from_user(v->pbuffer + v->len, buffer, count);
1047 if (!count) { 1193 if (!count)
1048 printk("vpe_write: copy_to_user failed\n");
1049 return -EFAULT; 1194 return -EFAULT;
1050 }
1051 1195
1052 v->len += count; 1196 v->len += count;
1053 return ret; 1197 return ret;
@@ -1149,16 +1293,70 @@ void *vpe_get_shared(int index)
1149{ 1293{
1150 struct vpe *v; 1294 struct vpe *v;
1151 1295
1152 if ((v = get_vpe(index)) == NULL) { 1296 if ((v = get_vpe(index)) == NULL)
1153 printk(KERN_WARNING "vpe: invalid vpe index %d\n", index);
1154 return NULL; 1297 return NULL;
1155 }
1156 1298
1157 return v->shared_ptr; 1299 return v->shared_ptr;
1158} 1300}
1159 1301
1160EXPORT_SYMBOL(vpe_get_shared); 1302EXPORT_SYMBOL(vpe_get_shared);
1161 1303
1304int vpe_getuid(int index)
1305{
1306 struct vpe *v;
1307
1308 if ((v = get_vpe(index)) == NULL)
1309 return -1;
1310
1311 return v->uid;
1312}
1313
1314EXPORT_SYMBOL(vpe_getuid);
1315
1316int vpe_getgid(int index)
1317{
1318 struct vpe *v;
1319
1320 if ((v = get_vpe(index)) == NULL)
1321 return -1;
1322
1323 return v->gid;
1324}
1325
1326EXPORT_SYMBOL(vpe_getgid);
1327
1328int vpe_notify(int index, struct vpe_notifications *notify)
1329{
1330 struct vpe *v;
1331
1332 if ((v = get_vpe(index)) == NULL)
1333 return -1;
1334
1335 list_add(&notify->list, &v->notify);
1336 return 0;
1337}
1338
1339EXPORT_SYMBOL(vpe_notify);
1340
1341char *vpe_getcwd(int index)
1342{
1343 struct vpe *v;
1344
1345 if ((v = get_vpe(index)) == NULL)
1346 return NULL;
1347
1348 return v->cwd;
1349}
1350
1351EXPORT_SYMBOL(vpe_getcwd);
1352
1353#ifdef CONFIG_MIPS_APSP_KSPD
1354static void kspd_sp_exit( int sp_id)
1355{
1356 cleanup_tc(get_tc(sp_id));
1357}
1358#endif
1359
1162static int __init vpe_module_init(void) 1360static int __init vpe_module_init(void)
1163{ 1361{
1164 struct vpe *v = NULL; 1362 struct vpe *v = NULL;
@@ -1201,7 +1399,8 @@ static int __init vpe_module_init(void)
1201 return -ENODEV; 1399 return -ENODEV;
1202 } 1400 }
1203 1401
1204 list_add(&t->tc, &v->tc); /* add the tc to the list of this vpe's tc's. */ 1402 /* add the tc to the list of this vpe's tc's. */
1403 list_add(&t->tc, &v->tc);
1205 1404
1206 /* deactivate all but vpe0 */ 1405 /* deactivate all but vpe0 */
1207 if (i != 0) { 1406 if (i != 0) {
@@ -1222,10 +1421,12 @@ static int __init vpe_module_init(void)
1222 ~(ST0_IM | ST0_IE | ST0_KSU)) 1421 ~(ST0_IM | ST0_IE | ST0_KSU))
1223 | ST0_CU0); 1422 | ST0_CU0);
1224 1423
1225 /* set config to be the same as vpe0, particularly kseg0 coherency alg */ 1424 /*
1425 * Set config to be the same as vpe0,
1426 * particularly kseg0 coherency alg
1427 */
1226 write_vpe_c0_config(read_c0_config()); 1428 write_vpe_c0_config(read_c0_config());
1227 } 1429 }
1228
1229 } 1430 }
1230 1431
1231 /* TC's */ 1432 /* TC's */
@@ -1234,23 +1435,28 @@ static int __init vpe_module_init(void)
1234 if (i != 0) { 1435 if (i != 0) {
1235 unsigned long tmp; 1436 unsigned long tmp;
1236 1437
1237 /* tc 0 will of course be running.... */
1238 if (i == 0)
1239 t->state = TC_STATE_RUNNING;
1240
1241 settc(i); 1438 settc(i);
1242 1439
1243 /* bind a TC to each VPE, May as well put all excess TC's 1440 /* Any TC that is bound to VPE0 gets left as is - in case
1244 on the last VPE */ 1441 we are running SMTC on VPE0. A TC that is bound to any
1245 if (i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1)) 1442 other VPE gets bound to VPE0, ideally I'd like to make
1246 write_tc_c0_tcbind(read_tc_c0_tcbind() | 1443 it homeless but it doesn't appear to let me bind a TC
1247 ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)); 1444 to a non-existent VPE. Which is perfectly reasonable.
1248 else 1445
1249 write_tc_c0_tcbind(read_tc_c0_tcbind() | i); 1446 The (un)bound state is visible to an EJTAG probe so may
1447 notify GDB...
1448 */
1449
1450 if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) {
1451 /* tc is bound >vpe0 */
1452 write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
1453
1454 t->pvpe = get_vpe(0); /* set the parent vpe */
1455 }
1250 1456
1251 tmp = read_tc_c0_tcstatus(); 1457 tmp = read_tc_c0_tcstatus();
1252 1458
1253 /* mark not allocated and not dynamically allocatable */ 1459 /* mark not activated and not dynamically allocatable */
1254 tmp &= ~(TCSTATUS_A | TCSTATUS_DA); 1460 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1255 tmp |= TCSTATUS_IXMT; /* interrupt exempt */ 1461 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
1256 write_tc_c0_tcstatus(tmp); 1462 write_tc_c0_tcstatus(tmp);
@@ -1262,6 +1468,9 @@ static int __init vpe_module_init(void)
1262 /* release config state */ 1468 /* release config state */
1263 clear_c0_mvpcontrol(MVPCONTROL_VPC); 1469 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1264 1470
1471#ifdef CONFIG_MIPS_APSP_KSPD
1472 kspd_events.kspd_sp_exit = kspd_sp_exit;
1473#endif
1265 return 0; 1474 return 0;
1266} 1475}
1267 1476
@@ -1281,5 +1490,5 @@ static void __exit vpe_module_exit(void)
1281module_init(vpe_module_init); 1490module_init(vpe_module_init);
1282module_exit(vpe_module_exit); 1491module_exit(vpe_module_exit);
1283MODULE_DESCRIPTION("MIPS VPE Loader"); 1492MODULE_DESCRIPTION("MIPS VPE Loader");
1284MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc"); 1493MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
1285MODULE_LICENSE("GPL"); 1494MODULE_LICENSE("GPL");