diff options
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/mips/kernel/asm-offsets.c | 3 | ||||
-rw-r--r-- | arch/mips/kernel/entry.S | 34 | ||||
-rw-r--r-- | arch/mips/kernel/gdb-low.S | 24 | ||||
-rw-r--r-- | arch/mips/kernel/gdb-stub.c | 61 | ||||
-rw-r--r-- | arch/mips/kernel/genex.S | 29 | ||||
-rw-r--r-- | arch/mips/kernel/head.S | 57 | ||||
-rw-r--r-- | arch/mips/kernel/i8259.c | 4 | ||||
-rw-r--r-- | arch/mips/kernel/irq-msc01.c | 9 | ||||
-rw-r--r-- | arch/mips/kernel/irq.c | 13 | ||||
-rw-r--r-- | arch/mips/kernel/mips-mt.c | 449 | ||||
-rw-r--r-- | arch/mips/kernel/process.c | 10 | ||||
-rw-r--r-- | arch/mips/kernel/ptrace.c | 14 | ||||
-rw-r--r-- | arch/mips/kernel/ptrace32.c | 14 | ||||
-rw-r--r-- | arch/mips/kernel/r4k_switch.S | 34 | ||||
-rw-r--r-- | arch/mips/kernel/smp-mt.c (renamed from arch/mips/kernel/smp_mt.c) | 33 | ||||
-rw-r--r-- | arch/mips/kernel/smp.c | 10 | ||||
-rw-r--r-- | arch/mips/kernel/smtc-asm.S | 130 | ||||
-rw-r--r-- | arch/mips/kernel/smtc-proc.c | 93 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 1322 | ||||
-rw-r--r-- | arch/mips/kernel/time.c | 3 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 124 | ||||
-rw-r--r-- | arch/mips/kernel/vmlinux.lds.S | 2 |
23 files changed, 2447 insertions, 29 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 9ec01de81c04..34e8a256765c 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -34,7 +34,9 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o | |||
34 | 34 | ||
35 | obj-$(CONFIG_SMP) += smp.o | 35 | obj-$(CONFIG_SMP) += smp.o |
36 | 36 | ||
37 | obj-$(CONFIG_MIPS_MT_SMP) += smp_mt.o | 37 | obj-$(CONFIG_MIPS_MT) += mips-mt.o |
38 | obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o | ||
39 | obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o | ||
38 | 40 | ||
39 | obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o | 41 | obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o |
40 | obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o | 42 | obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o |
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index ca6b03c773be..92b28b674d6f 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c | |||
@@ -69,6 +69,9 @@ void output_ptreg_defines(void) | |||
69 | offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr); | 69 | offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr); |
70 | offset("#define PT_STATUS ", struct pt_regs, cp0_status); | 70 | offset("#define PT_STATUS ", struct pt_regs, cp0_status); |
71 | offset("#define PT_CAUSE ", struct pt_regs, cp0_cause); | 71 | offset("#define PT_CAUSE ", struct pt_regs, cp0_cause); |
72 | #ifdef CONFIG_MIPS_MT_SMTC | ||
73 | offset("#define PT_TCSTATUS ", struct pt_regs, cp0_tcstatus); | ||
74 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
72 | size("#define PT_SIZE ", struct pt_regs); | 75 | size("#define PT_SIZE ", struct pt_regs); |
73 | linefeed; | 76 | linefeed; |
74 | } | 77 | } |
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index b1939a486d2c..d101d2fb24ca 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
@@ -17,6 +17,9 @@ | |||
17 | #include <asm/isadep.h> | 17 | #include <asm/isadep.h> |
18 | #include <asm/thread_info.h> | 18 | #include <asm/thread_info.h> |
19 | #include <asm/war.h> | 19 | #include <asm/war.h> |
20 | #ifdef CONFIG_MIPS_MT_SMTC | ||
21 | #include <asm/mipsmtregs.h> | ||
22 | #endif | ||
20 | 23 | ||
21 | #ifdef CONFIG_PREEMPT | 24 | #ifdef CONFIG_PREEMPT |
22 | .macro preempt_stop | 25 | .macro preempt_stop |
@@ -75,6 +78,37 @@ FEXPORT(syscall_exit) | |||
75 | bnez t0, syscall_exit_work | 78 | bnez t0, syscall_exit_work |
76 | 79 | ||
77 | FEXPORT(restore_all) # restore full frame | 80 | FEXPORT(restore_all) # restore full frame |
81 | #ifdef CONFIG_MIPS_MT_SMTC | ||
82 | /* Detect and execute deferred IPI "interrupts" */ | ||
83 | move a0,sp | ||
84 | jal deferred_smtc_ipi | ||
85 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ | ||
86 | mfc0 v0, CP0_TCSTATUS | ||
87 | ori v1, v0, TCSTATUS_IXMT | ||
88 | mtc0 v1, CP0_TCSTATUS | ||
89 | andi v0, TCSTATUS_IXMT | ||
90 | ehb | ||
91 | mfc0 t0, CP0_TCCONTEXT | ||
92 | DMT 9 # dmt t1 | ||
93 | jal mips_ihb | ||
94 | mfc0 t2, CP0_STATUS | ||
95 | andi t3, t0, 0xff00 | ||
96 | or t2, t2, t3 | ||
97 | mtc0 t2, CP0_STATUS | ||
98 | ehb | ||
99 | andi t1, t1, VPECONTROL_TE | ||
100 | beqz t1, 1f | ||
101 | EMT | ||
102 | 1: | ||
103 | mfc0 v1, CP0_TCSTATUS | ||
104 | /* We set IXMT above, XOR should cler it here */ | ||
105 | xori v1, v1, TCSTATUS_IXMT | ||
106 | or v1, v0, v1 | ||
107 | mtc0 v1, CP0_TCSTATUS | ||
108 | ehb | ||
109 | xor t0, t0, t3 | ||
110 | mtc0 t0, CP0_TCCONTEXT | ||
111 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
78 | .set noat | 112 | .set noat |
79 | RESTORE_TEMP | 113 | RESTORE_TEMP |
80 | RESTORE_AT | 114 | RESTORE_AT |
diff --git a/arch/mips/kernel/gdb-low.S b/arch/mips/kernel/gdb-low.S index 235ad9f6bd35..10f28fb9f008 100644 --- a/arch/mips/kernel/gdb-low.S +++ b/arch/mips/kernel/gdb-low.S | |||
@@ -283,11 +283,33 @@ | |||
283 | */ | 283 | */ |
284 | 284 | ||
285 | 3: | 285 | 3: |
286 | #ifdef CONFIG_MIPS_MT_SMTC | ||
287 | /* Read-modify write of Status must be atomic */ | ||
288 | mfc0 t2, CP0_TCSTATUS | ||
289 | ori t1, t2, TCSTATUS_IXMT | ||
290 | mtc0 t1, CP0_TCSTATUS | ||
291 | andi t2, t2, TCSTATUS_IXMT | ||
292 | ehb | ||
293 | DMT 9 # dmt t1 | ||
294 | jal mips_ihb | ||
295 | nop | ||
296 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
286 | mfc0 t0, CP0_STATUS | 297 | mfc0 t0, CP0_STATUS |
287 | ori t0, 0x1f | 298 | ori t0, 0x1f |
288 | xori t0, 0x1f | 299 | xori t0, 0x1f |
289 | mtc0 t0, CP0_STATUS | 300 | mtc0 t0, CP0_STATUS |
290 | 301 | #ifdef CONFIG_MIPS_MT_SMTC | |
302 | andi t1, t1, VPECONTROL_TE | ||
303 | beqz t1, 9f | ||
304 | nop | ||
305 | EMT # emt | ||
306 | 9: | ||
307 | mfc0 t1, CP0_TCSTATUS | ||
308 | xori t1, t1, TCSTATUS_IXMT | ||
309 | or t1, t1, t2 | ||
310 | mtc0 t1, CP0_TCSTATUS | ||
311 | ehb | ||
312 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
291 | LONG_L v0, GDB_FR_STATUS(sp) | 313 | LONG_L v0, GDB_FR_STATUS(sp) |
292 | LONG_L v1, GDB_FR_EPC(sp) | 314 | LONG_L v1, GDB_FR_EPC(sp) |
293 | mtc0 v0, CP0_STATUS | 315 | mtc0 v0, CP0_STATUS |
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c index d4f88e0af24c..6ecbdc1fefd1 100644 --- a/arch/mips/kernel/gdb-stub.c +++ b/arch/mips/kernel/gdb-stub.c | |||
@@ -140,6 +140,7 @@ | |||
140 | #include <asm/system.h> | 140 | #include <asm/system.h> |
141 | #include <asm/gdb-stub.h> | 141 | #include <asm/gdb-stub.h> |
142 | #include <asm/inst.h> | 142 | #include <asm/inst.h> |
143 | #include <asm/smp.h> | ||
143 | 144 | ||
144 | /* | 145 | /* |
145 | * external low-level support routines | 146 | * external low-level support routines |
@@ -669,6 +670,64 @@ static void kgdb_wait(void *arg) | |||
669 | local_irq_restore(flags); | 670 | local_irq_restore(flags); |
670 | } | 671 | } |
671 | 672 | ||
673 | /* | ||
674 | * GDB stub needs to call kgdb_wait on all processor with interrupts | ||
675 | * disabled, so it uses it's own special variant. | ||
676 | */ | ||
677 | static int kgdb_smp_call_kgdb_wait(void) | ||
678 | { | ||
679 | #ifdef CONFIG_SMP | ||
680 | struct call_data_struct data; | ||
681 | int i, cpus = num_online_cpus() - 1; | ||
682 | int cpu = smp_processor_id(); | ||
683 | |||
684 | /* | ||
685 | * Can die spectacularly if this CPU isn't yet marked online | ||
686 | */ | ||
687 | BUG_ON(!cpu_online(cpu)); | ||
688 | |||
689 | if (!cpus) | ||
690 | return 0; | ||
691 | |||
692 | if (spin_is_locked(&smp_call_lock)) { | ||
693 | /* | ||
694 | * Some other processor is trying to make us do something | ||
695 | * but we're not going to respond... give up | ||
696 | */ | ||
697 | return -1; | ||
698 | } | ||
699 | |||
700 | /* | ||
701 | * We will continue here, accepting the fact that | ||
702 | * the kernel may deadlock if another CPU attempts | ||
703 | * to call smp_call_function now... | ||
704 | */ | ||
705 | |||
706 | data.func = kgdb_wait; | ||
707 | data.info = NULL; | ||
708 | atomic_set(&data.started, 0); | ||
709 | data.wait = 0; | ||
710 | |||
711 | spin_lock(&smp_call_lock); | ||
712 | call_data = &data; | ||
713 | mb(); | ||
714 | |||
715 | /* Send a message to all other CPUs and wait for them to respond */ | ||
716 | for (i = 0; i < NR_CPUS; i++) | ||
717 | if (cpu_online(i) && i != cpu) | ||
718 | core_send_ipi(i, SMP_CALL_FUNCTION); | ||
719 | |||
720 | /* Wait for response */ | ||
721 | /* FIXME: lock-up detection, backtrace on lock-up */ | ||
722 | while (atomic_read(&data.started) != cpus) | ||
723 | barrier(); | ||
724 | |||
725 | call_data = NULL; | ||
726 | spin_unlock(&smp_call_lock); | ||
727 | #endif | ||
728 | |||
729 | return 0; | ||
730 | } | ||
672 | 731 | ||
673 | /* | 732 | /* |
674 | * This function does all command processing for interfacing to gdb. It | 733 | * This function does all command processing for interfacing to gdb. It |
@@ -718,7 +777,7 @@ void handle_exception (struct gdb_regs *regs) | |||
718 | /* | 777 | /* |
719 | * force other cpus to enter kgdb | 778 | * force other cpus to enter kgdb |
720 | */ | 779 | */ |
721 | smp_call_function(kgdb_wait, NULL, 0, 0); | 780 | kgdb_smp_call_kgdb_wait(); |
722 | 781 | ||
723 | /* | 782 | /* |
724 | * If we're in breakpoint() increment the PC | 783 | * If we're in breakpoint() increment the PC |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 04418b6568b0..ff7af369f286 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | 13 | ||
14 | #include <asm/asm.h> | 14 | #include <asm/asm.h> |
15 | #include <asm/asmmacro.h> | ||
15 | #include <asm/cacheops.h> | 16 | #include <asm/cacheops.h> |
16 | #include <asm/regdef.h> | 17 | #include <asm/regdef.h> |
17 | #include <asm/fpregdef.h> | 18 | #include <asm/fpregdef.h> |
@@ -171,6 +172,15 @@ NESTED(except_vec_vi, 0, sp) | |||
171 | SAVE_AT | 172 | SAVE_AT |
172 | .set push | 173 | .set push |
173 | .set noreorder | 174 | .set noreorder |
175 | #ifdef CONFIG_MIPS_MT_SMTC | ||
176 | /* | ||
177 | * To keep from blindly blocking *all* interrupts | ||
178 | * during service by SMTC kernel, we also want to | ||
179 | * pass the IM value to be cleared. | ||
180 | */ | ||
181 | EXPORT(except_vec_vi_mori) | ||
182 | ori a0, $0, 0 | ||
183 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
174 | EXPORT(except_vec_vi_lui) | 184 | EXPORT(except_vec_vi_lui) |
175 | lui v0, 0 /* Patched */ | 185 | lui v0, 0 /* Patched */ |
176 | j except_vec_vi_handler | 186 | j except_vec_vi_handler |
@@ -187,6 +197,25 @@ EXPORT(except_vec_vi_end) | |||
187 | NESTED(except_vec_vi_handler, 0, sp) | 197 | NESTED(except_vec_vi_handler, 0, sp) |
188 | SAVE_TEMP | 198 | SAVE_TEMP |
189 | SAVE_STATIC | 199 | SAVE_STATIC |
200 | #ifdef CONFIG_MIPS_MT_SMTC | ||
201 | /* | ||
202 | * SMTC has an interesting problem that interrupts are level-triggered, | ||
203 | * and the CLI macro will clear EXL, potentially causing a duplicate | ||
204 | * interrupt service invocation. So we need to clear the associated | ||
205 | * IM bit of Status prior to doing CLI, and restore it after the | ||
206 | * service routine has been invoked - we must assume that the | ||
207 | * service routine will have cleared the state, and any active | ||
208 | * level represents a new or otherwised unserviced event... | ||
209 | */ | ||
210 | mfc0 t1, CP0_STATUS | ||
211 | and t0, a0, t1 | ||
212 | mfc0 t2, CP0_TCCONTEXT | ||
213 | or t0, t0, t2 | ||
214 | mtc0 t0, CP0_TCCONTEXT | ||
215 | xor t1, t1, t0 | ||
216 | mtc0 t1, CP0_STATUS | ||
217 | ehb | ||
218 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
190 | CLI | 219 | CLI |
191 | move a0, sp | 220 | move a0, sp |
192 | jalr v0 | 221 | jalr v0 |
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 2e9122a4213a..bdf6f6eff721 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/threads.h> | 18 | #include <linux/threads.h> |
19 | 19 | ||
20 | #include <asm/asm.h> | 20 | #include <asm/asm.h> |
21 | #include <asm/asmmacro.h> | ||
21 | #include <asm/regdef.h> | 22 | #include <asm/regdef.h> |
22 | #include <asm/page.h> | 23 | #include <asm/page.h> |
23 | #include <asm/mipsregs.h> | 24 | #include <asm/mipsregs.h> |
@@ -82,12 +83,33 @@ | |||
82 | */ | 83 | */ |
83 | .macro setup_c0_status set clr | 84 | .macro setup_c0_status set clr |
84 | .set push | 85 | .set push |
86 | #ifdef CONFIG_MIPS_MT_SMTC | ||
87 | /* | ||
88 | * For SMTC, we need to set privilege and disable interrupts only for | ||
89 | * the current TC, using the TCStatus register. | ||
90 | */ | ||
91 | mfc0 t0, CP0_TCSTATUS | ||
92 | /* Fortunately CU 0 is in the same place in both registers */ | ||
93 | /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ | ||
94 | li t1, ST0_CU0 | 0x08001c00 | ||
95 | or t0, t1 | ||
96 | /* Clear TKSU, leave IXMT */ | ||
97 | xori t0, 0x00001800 | ||
98 | mtc0 t0, CP0_TCSTATUS | ||
99 | ehb | ||
100 | /* We need to leave the global IE bit set, but clear EXL...*/ | ||
101 | mfc0 t0, CP0_STATUS | ||
102 | or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr | ||
103 | xor t0, ST0_EXL | ST0_ERL | \clr | ||
104 | mtc0 t0, CP0_STATUS | ||
105 | #else | ||
85 | mfc0 t0, CP0_STATUS | 106 | mfc0 t0, CP0_STATUS |
86 | or t0, ST0_CU0|\set|0x1f|\clr | 107 | or t0, ST0_CU0|\set|0x1f|\clr |
87 | xor t0, 0x1f|\clr | 108 | xor t0, 0x1f|\clr |
88 | mtc0 t0, CP0_STATUS | 109 | mtc0 t0, CP0_STATUS |
89 | .set noreorder | 110 | .set noreorder |
90 | sll zero,3 # ehb | 111 | sll zero,3 # ehb |
112 | #endif | ||
91 | .set pop | 113 | .set pop |
92 | .endm | 114 | .endm |
93 | 115 | ||
@@ -134,6 +156,24 @@ NESTED(kernel_entry, 16, sp) # kernel entry point | |||
134 | 156 | ||
135 | ARC64_TWIDDLE_PC | 157 | ARC64_TWIDDLE_PC |
136 | 158 | ||
159 | #ifdef CONFIG_MIPS_MT_SMTC | ||
160 | /* | ||
161 | * In SMTC kernel, "CLI" is thread-specific, in TCStatus. | ||
162 | * We still need to enable interrupts globally in Status, | ||
163 | * and clear EXL/ERL. | ||
164 | * | ||
165 | * TCContext is used to track interrupt levels under | ||
166 | * service in SMTC kernel. Clear for boot TC before | ||
167 | * allowing any interrupts. | ||
168 | */ | ||
169 | mtc0 zero, CP0_TCCONTEXT | ||
170 | |||
171 | mfc0 t0, CP0_STATUS | ||
172 | ori t0, t0, 0xff1f | ||
173 | xori t0, t0, 0x001e | ||
174 | mtc0 t0, CP0_STATUS | ||
175 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
176 | |||
137 | PTR_LA t0, __bss_start # clear .bss | 177 | PTR_LA t0, __bss_start # clear .bss |
138 | LONG_S zero, (t0) | 178 | LONG_S zero, (t0) |
139 | PTR_LA t1, __bss_stop - LONGSIZE | 179 | PTR_LA t1, __bss_stop - LONGSIZE |
@@ -166,8 +206,25 @@ NESTED(kernel_entry, 16, sp) # kernel entry point | |||
166 | * function after setting up the stack and gp registers. | 206 | * function after setting up the stack and gp registers. |
167 | */ | 207 | */ |
168 | NESTED(smp_bootstrap, 16, sp) | 208 | NESTED(smp_bootstrap, 16, sp) |
209 | #ifdef CONFIG_MIPS_MT_SMTC | ||
210 | /* | ||
211 | * Read-modify-writes of Status must be atomic, and this | ||
212 | * is one case where CLI is invoked without EXL being | ||
213 | * necessarily set. The CLI and setup_c0_status will | ||
214 | * in fact be redundant for all but the first TC of | ||
215 | * each VPE being booted. | ||
216 | */ | ||
217 | DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */ | ||
218 | jal mips_ihb | ||
219 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
169 | setup_c0_status_sec | 220 | setup_c0_status_sec |
170 | smp_slave_setup | 221 | smp_slave_setup |
222 | #ifdef CONFIG_MIPS_MT_SMTC | ||
223 | andi t2, t2, VPECONTROL_TE | ||
224 | beqz t2, 2f | ||
225 | EMT # emt | ||
226 | 2: | ||
227 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
171 | j start_secondary | 228 | j start_secondary |
172 | END(smp_bootstrap) | 229 | END(smp_bootstrap) |
173 | #endif /* CONFIG_SMP */ | 230 | #endif /* CONFIG_SMP */ |
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index b974ac9057f6..2125ba5f1d9b 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c | |||
@@ -187,6 +187,10 @@ handle_real_irq: | |||
187 | outb(cached_21,0x21); | 187 | outb(cached_21,0x21); |
188 | outb(0x60+irq,0x20); /* 'Specific EOI' to master */ | 188 | outb(0x60+irq,0x20); /* 'Specific EOI' to master */ |
189 | } | 189 | } |
190 | #ifdef CONFIG_MIPS_MT_SMTC | ||
191 | if (irq_hwmask[irq] & ST0_IM) | ||
192 | set_c0_status(irq_hwmask[irq] & ST0_IM); | ||
193 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
190 | spin_unlock_irqrestore(&i8259A_lock, flags); | 194 | spin_unlock_irqrestore(&i8259A_lock, flags); |
191 | return; | 195 | return; |
192 | 196 | ||
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index 3f653c7cfbf3..97ebdc754b9e 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c | |||
@@ -76,6 +76,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq) | |||
76 | mask_msc_irq(irq); | 76 | mask_msc_irq(irq); |
77 | if (!cpu_has_veic) | 77 | if (!cpu_has_veic) |
78 | MSCIC_WRITE(MSC01_IC_EOI, 0); | 78 | MSCIC_WRITE(MSC01_IC_EOI, 0); |
79 | #ifdef CONFIG_MIPS_MT_SMTC | ||
80 | /* This actually needs to be a call into platform code */ | ||
81 | if (irq_hwmask[irq] & ST0_IM) | ||
82 | set_c0_status(irq_hwmask[irq] & ST0_IM); | ||
83 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
79 | } | 84 | } |
80 | 85 | ||
81 | /* | 86 | /* |
@@ -92,6 +97,10 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq) | |||
92 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); | 97 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); |
93 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); | 98 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); |
94 | } | 99 | } |
100 | #ifdef CONFIG_MIPS_MT_SMTC | ||
101 | if (irq_hwmask[irq] & ST0_IM) | ||
102 | set_c0_status(irq_hwmask[irq] & ST0_IM); | ||
103 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
95 | } | 104 | } |
96 | 105 | ||
97 | /* | 106 | /* |
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index e0efc4f2f93e..3dce742e716f 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c | |||
@@ -38,6 +38,15 @@ void ack_bad_irq(unsigned int irq) | |||
38 | 38 | ||
39 | atomic_t irq_err_count; | 39 | atomic_t irq_err_count; |
40 | 40 | ||
41 | #ifdef CONFIG_MIPS_MT_SMTC | ||
42 | /* | ||
43 | * SMTC Kernel needs to manipulate low-level CPU interrupt mask | ||
44 | * in do_IRQ. These are passed in setup_irq_smtc() and stored | ||
45 | * in this table. | ||
46 | */ | ||
47 | unsigned long irq_hwmask[NR_IRQS]; | ||
48 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
49 | |||
41 | #undef do_IRQ | 50 | #undef do_IRQ |
42 | 51 | ||
43 | /* | 52 | /* |
@@ -49,6 +58,7 @@ asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
49 | { | 58 | { |
50 | irq_enter(); | 59 | irq_enter(); |
51 | 60 | ||
61 | __DO_IRQ_SMTC_HOOK(); | ||
52 | __do_IRQ(irq, regs); | 62 | __do_IRQ(irq, regs); |
53 | 63 | ||
54 | irq_exit(); | 64 | irq_exit(); |
@@ -129,6 +139,9 @@ void __init init_IRQ(void) | |||
129 | irq_desc[i].depth = 1; | 139 | irq_desc[i].depth = 1; |
130 | irq_desc[i].handler = &no_irq_type; | 140 | irq_desc[i].handler = &no_irq_type; |
131 | spin_lock_init(&irq_desc[i].lock); | 141 | spin_lock_init(&irq_desc[i].lock); |
142 | #ifdef CONFIG_MIPS_MT_SMTC | ||
143 | irq_hwmask[i] = 0; | ||
144 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
132 | } | 145 | } |
133 | 146 | ||
134 | arch_init_irq(); | 147 | arch_init_irq(); |
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c new file mode 100644 index 000000000000..02237a685ec7 --- /dev/null +++ b/arch/mips/kernel/mips-mt.c | |||
@@ -0,0 +1,449 @@ | |||
1 | /* | ||
2 | * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels | ||
3 | * Copyright (C) 2005 Mips Technologies, Inc | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <linux/cpumask.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | |||
11 | #include <asm/cpu.h> | ||
12 | #include <asm/processor.h> | ||
13 | #include <asm/atomic.h> | ||
14 | #include <asm/system.h> | ||
15 | #include <asm/hardirq.h> | ||
16 | #include <asm/mmu_context.h> | ||
17 | #include <asm/smp.h> | ||
18 | #include <asm/mipsmtregs.h> | ||
19 | #include <asm/r4kcache.h> | ||
20 | #include <asm/cacheflush.h> | ||
21 | |||
22 | /* | ||
23 | * CPU mask used to set process affinity for MT VPEs/TCs with FPUs | ||
24 | */ | ||
25 | |||
26 | cpumask_t mt_fpu_cpumask; | ||
27 | |||
28 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
29 | |||
30 | #include <linux/cpu.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <asm/uaccess.h> | ||
33 | |||
34 | unsigned long mt_fpemul_threshold = 0; | ||
35 | |||
36 | /* | ||
37 | * Replacement functions for the sys_sched_setaffinity() and | ||
38 | * sys_sched_getaffinity() system calls, so that we can integrate | ||
39 | * FPU affinity with the user's requested processor affinity. | ||
40 | * This code is 98% identical with the sys_sched_setaffinity() | ||
41 | * and sys_sched_getaffinity() system calls, and should be | ||
42 | * updated when kernel/sched.c changes. | ||
43 | */ | ||
44 | |||
45 | /* | ||
46 | * find_process_by_pid - find a process with a matching PID value. | ||
47 | * used in sys_sched_set/getaffinity() in kernel/sched.c, so | ||
48 | * cloned here. | ||
49 | */ | ||
50 | static inline task_t *find_process_by_pid(pid_t pid) | ||
51 | { | ||
52 | return pid ? find_task_by_pid(pid) : current; | ||
53 | } | ||
54 | |||
55 | |||
56 | /* | ||
57 | * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process | ||
58 | */ | ||
59 | asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | ||
60 | unsigned long __user *user_mask_ptr) | ||
61 | { | ||
62 | cpumask_t new_mask; | ||
63 | cpumask_t effective_mask; | ||
64 | int retval; | ||
65 | task_t *p; | ||
66 | |||
67 | if (len < sizeof(new_mask)) | ||
68 | return -EINVAL; | ||
69 | |||
70 | if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) | ||
71 | return -EFAULT; | ||
72 | |||
73 | lock_cpu_hotplug(); | ||
74 | read_lock(&tasklist_lock); | ||
75 | |||
76 | p = find_process_by_pid(pid); | ||
77 | if (!p) { | ||
78 | read_unlock(&tasklist_lock); | ||
79 | unlock_cpu_hotplug(); | ||
80 | return -ESRCH; | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * It is not safe to call set_cpus_allowed with the | ||
85 | * tasklist_lock held. We will bump the task_struct's | ||
86 | * usage count and drop tasklist_lock before invoking | ||
87 | * set_cpus_allowed. | ||
88 | */ | ||
89 | get_task_struct(p); | ||
90 | |||
91 | retval = -EPERM; | ||
92 | if ((current->euid != p->euid) && (current->euid != p->uid) && | ||
93 | !capable(CAP_SYS_NICE)) { | ||
94 | read_unlock(&tasklist_lock); | ||
95 | goto out_unlock; | ||
96 | } | ||
97 | |||
98 | /* Record new user-specified CPU set for future reference */ | ||
99 | p->thread.user_cpus_allowed = new_mask; | ||
100 | |||
101 | /* Unlock the task list */ | ||
102 | read_unlock(&tasklist_lock); | ||
103 | |||
104 | /* Compute new global allowed CPU set if necessary */ | ||
105 | if( (p->thread.mflags & MF_FPUBOUND) | ||
106 | && cpus_intersects(new_mask, mt_fpu_cpumask)) { | ||
107 | cpus_and(effective_mask, new_mask, mt_fpu_cpumask); | ||
108 | retval = set_cpus_allowed(p, effective_mask); | ||
109 | } else { | ||
110 | p->thread.mflags &= ~MF_FPUBOUND; | ||
111 | retval = set_cpus_allowed(p, new_mask); | ||
112 | } | ||
113 | |||
114 | |||
115 | out_unlock: | ||
116 | put_task_struct(p); | ||
117 | unlock_cpu_hotplug(); | ||
118 | return retval; | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process | ||
123 | */ | ||
124 | asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, | ||
125 | unsigned long __user *user_mask_ptr) | ||
126 | { | ||
127 | unsigned int real_len; | ||
128 | cpumask_t mask; | ||
129 | int retval; | ||
130 | task_t *p; | ||
131 | |||
132 | real_len = sizeof(mask); | ||
133 | if (len < real_len) | ||
134 | return -EINVAL; | ||
135 | |||
136 | lock_cpu_hotplug(); | ||
137 | read_lock(&tasklist_lock); | ||
138 | |||
139 | retval = -ESRCH; | ||
140 | p = find_process_by_pid(pid); | ||
141 | if (!p) | ||
142 | goto out_unlock; | ||
143 | |||
144 | retval = 0; | ||
145 | |||
146 | cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); | ||
147 | |||
148 | out_unlock: | ||
149 | read_unlock(&tasklist_lock); | ||
150 | unlock_cpu_hotplug(); | ||
151 | if (retval) | ||
152 | return retval; | ||
153 | if (copy_to_user(user_mask_ptr, &mask, real_len)) | ||
154 | return -EFAULT; | ||
155 | return real_len; | ||
156 | } | ||
157 | |||
158 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
159 | |||
160 | /* | ||
161 | * Dump new MIPS MT state for the core. Does not leave TCs halted. | ||
162 | * Takes an argument which taken to be a pre-call MVPControl value. | ||
163 | */ | ||
164 | |||
165 | void mips_mt_regdump(unsigned long mvpctl) | ||
166 | { | ||
167 | unsigned long flags; | ||
168 | unsigned long vpflags; | ||
169 | unsigned long mvpconf0; | ||
170 | int nvpe; | ||
171 | int ntc; | ||
172 | int i; | ||
173 | int tc; | ||
174 | unsigned long haltval; | ||
175 | unsigned long tcstatval; | ||
176 | #ifdef CONFIG_MIPS_MT_SMTC | ||
177 | void smtc_soft_dump(void); | ||
178 | #endif /* CONFIG_MIPT_MT_SMTC */ | ||
179 | |||
180 | local_irq_save(flags); | ||
181 | vpflags = dvpe(); | ||
182 | printk("=== MIPS MT State Dump ===\n"); | ||
183 | printk("-- Global State --\n"); | ||
184 | printk(" MVPControl Passed: %08lx\n", mvpctl); | ||
185 | printk(" MVPControl Read: %08lx\n", vpflags); | ||
186 | printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0())); | ||
187 | nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
188 | ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | ||
189 | printk("-- per-VPE State --\n"); | ||
190 | for(i = 0; i < nvpe; i++) { | ||
191 | for(tc = 0; tc < ntc; tc++) { | ||
192 | settc(tc); | ||
193 | if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { | ||
194 | printk(" VPE %d\n", i); | ||
195 | printk(" VPEControl : %08lx\n", read_vpe_c0_vpecontrol()); | ||
196 | printk(" VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0()); | ||
197 | printk(" VPE%d.Status : %08lx\n", | ||
198 | i, read_vpe_c0_status()); | ||
199 | printk(" VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc()); | ||
200 | printk(" VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause()); | ||
201 | printk(" VPE%d.Config7 : %08lx\n", | ||
202 | i, read_vpe_c0_config7()); | ||
203 | break; /* Next VPE */ | ||
204 | } | ||
205 | } | ||
206 | } | ||
207 | printk("-- per-TC State --\n"); | ||
208 | for(tc = 0; tc < ntc; tc++) { | ||
209 | settc(tc); | ||
210 | if(read_tc_c0_tcbind() == read_c0_tcbind()) { | ||
211 | /* Are we dumping ourself? */ | ||
212 | haltval = 0; /* Then we're not halted, and mustn't be */ | ||
213 | tcstatval = flags; /* And pre-dump TCStatus is flags */ | ||
214 | printk(" TC %d (current TC with VPE EPC above)\n", tc); | ||
215 | } else { | ||
216 | haltval = read_tc_c0_tchalt(); | ||
217 | write_tc_c0_tchalt(1); | ||
218 | tcstatval = read_tc_c0_tcstatus(); | ||
219 | printk(" TC %d\n", tc); | ||
220 | } | ||
221 | printk(" TCStatus : %08lx\n", tcstatval); | ||
222 | printk(" TCBind : %08lx\n", read_tc_c0_tcbind()); | ||
223 | printk(" TCRestart : %08lx\n", read_tc_c0_tcrestart()); | ||
224 | printk(" TCHalt : %08lx\n", haltval); | ||
225 | printk(" TCContext : %08lx\n", read_tc_c0_tccontext()); | ||
226 | if (!haltval) | ||
227 | write_tc_c0_tchalt(0); | ||
228 | } | ||
229 | #ifdef CONFIG_MIPS_MT_SMTC | ||
230 | smtc_soft_dump(); | ||
231 | #endif /* CONFIG_MIPT_MT_SMTC */ | ||
232 | printk("===========================\n"); | ||
233 | evpe(vpflags); | ||
234 | local_irq_restore(flags); | ||
235 | } | ||
236 | |||
237 | static int mt_opt_norps = 0; | ||
238 | static int mt_opt_rpsctl = -1; | ||
239 | static int mt_opt_nblsu = -1; | ||
240 | static int mt_opt_forceconfig7 = 0; | ||
241 | static int mt_opt_config7 = -1; | ||
242 | |||
243 | static int __init rps_disable(char *s) | ||
244 | { | ||
245 | mt_opt_norps = 1; | ||
246 | return 1; | ||
247 | } | ||
248 | __setup("norps", rps_disable); | ||
249 | |||
250 | static int __init rpsctl_set(char *str) | ||
251 | { | ||
252 | get_option(&str, &mt_opt_rpsctl); | ||
253 | return 1; | ||
254 | } | ||
255 | __setup("rpsctl=", rpsctl_set); | ||
256 | |||
257 | static int __init nblsu_set(char *str) | ||
258 | { | ||
259 | get_option(&str, &mt_opt_nblsu); | ||
260 | return 1; | ||
261 | } | ||
262 | __setup("nblsu=", nblsu_set); | ||
263 | |||
264 | static int __init config7_set(char *str) | ||
265 | { | ||
266 | get_option(&str, &mt_opt_config7); | ||
267 | mt_opt_forceconfig7 = 1; | ||
268 | return 1; | ||
269 | } | ||
270 | __setup("config7=", config7_set); | ||
271 | |||
272 | /* Experimental cache flush control parameters that should go away some day */ | ||
273 | int mt_protiflush = 0; | ||
274 | int mt_protdflush = 0; | ||
275 | int mt_n_iflushes = 1; | ||
276 | int mt_n_dflushes = 1; | ||
277 | |||
278 | static int __init set_protiflush(char *s) | ||
279 | { | ||
280 | mt_protiflush = 1; | ||
281 | return 1; | ||
282 | } | ||
283 | __setup("protiflush", set_protiflush); | ||
284 | |||
285 | static int __init set_protdflush(char *s) | ||
286 | { | ||
287 | mt_protdflush = 1; | ||
288 | return 1; | ||
289 | } | ||
290 | __setup("protdflush", set_protdflush); | ||
291 | |||
292 | static int __init niflush(char *s) | ||
293 | { | ||
294 | get_option(&s, &mt_n_iflushes); | ||
295 | return 1; | ||
296 | } | ||
297 | __setup("niflush=", niflush); | ||
298 | |||
299 | static int __init ndflush(char *s) | ||
300 | { | ||
301 | get_option(&s, &mt_n_dflushes); | ||
302 | return 1; | ||
303 | } | ||
304 | __setup("ndflush=", ndflush); | ||
305 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
306 | static int fpaff_threshold = -1; | ||
307 | |||
308 | static int __init fpaff_thresh(char *str) | ||
309 | { | ||
310 | get_option(&str, &fpaff_threshold); | ||
311 | return 1; | ||
312 | } | ||
313 | |||
314 | __setup("fpaff=", fpaff_thresh); | ||
315 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
316 | |||
317 | static unsigned int itc_base = 0; | ||
318 | |||
319 | static int __init set_itc_base(char *str) | ||
320 | { | ||
321 | get_option(&str, &itc_base); | ||
322 | return 1; | ||
323 | } | ||
324 | |||
325 | __setup("itcbase=", set_itc_base); | ||
326 | |||
327 | void mips_mt_set_cpuoptions(void) | ||
328 | { | ||
329 | unsigned int oconfig7 = read_c0_config7(); | ||
330 | unsigned int nconfig7 = oconfig7; | ||
331 | |||
332 | if (mt_opt_norps) { | ||
333 | printk("\"norps\" option deprectated: use \"rpsctl=\"\n"); | ||
334 | } | ||
335 | if (mt_opt_rpsctl >= 0) { | ||
336 | printk("34K return prediction stack override set to %d.\n", | ||
337 | mt_opt_rpsctl); | ||
338 | if (mt_opt_rpsctl) | ||
339 | nconfig7 |= (1 << 2); | ||
340 | else | ||
341 | nconfig7 &= ~(1 << 2); | ||
342 | } | ||
343 | if (mt_opt_nblsu >= 0) { | ||
344 | printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu); | ||
345 | if (mt_opt_nblsu) | ||
346 | nconfig7 |= (1 << 5); | ||
347 | else | ||
348 | nconfig7 &= ~(1 << 5); | ||
349 | } | ||
350 | if (mt_opt_forceconfig7) { | ||
351 | printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7); | ||
352 | nconfig7 = mt_opt_config7; | ||
353 | } | ||
354 | if (oconfig7 != nconfig7) { | ||
355 | __asm__ __volatile("sync"); | ||
356 | write_c0_config7(nconfig7); | ||
357 | ehb (); | ||
358 | printk("Config7: 0x%08x\n", read_c0_config7()); | ||
359 | } | ||
360 | |||
361 | /* Report Cache management debug options */ | ||
362 | if (mt_protiflush) | ||
363 | printk("I-cache flushes single-threaded\n"); | ||
364 | if (mt_protdflush) | ||
365 | printk("D-cache flushes single-threaded\n"); | ||
366 | if (mt_n_iflushes != 1) | ||
367 | printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes); | ||
368 | if (mt_n_dflushes != 1) | ||
369 | printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes); | ||
370 | |||
371 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
372 | /* FPU Use Factor empirically derived from experiments on 34K */ | ||
373 | #define FPUSEFACTOR 333 | ||
374 | |||
375 | if (fpaff_threshold >= 0) { | ||
376 | mt_fpemul_threshold = fpaff_threshold; | ||
377 | } else { | ||
378 | mt_fpemul_threshold = | ||
379 | (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ; | ||
380 | } | ||
381 | printk("FPU Affinity set after %ld emulations\n", | ||
382 | mt_fpemul_threshold); | ||
383 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
384 | |||
385 | if (itc_base != 0) { | ||
386 | /* | ||
387 | * Configure ITC mapping. This code is very | ||
388 | * specific to the 34K core family, which uses | ||
389 | * a special mode bit ("ITC") in the ErrCtl | ||
390 | * register to enable access to ITC control | ||
391 | * registers via cache "tag" operations. | ||
392 | */ | ||
393 | unsigned long ectlval; | ||
394 | unsigned long itcblkgrn; | ||
395 | |||
396 | /* ErrCtl register is known as "ecc" to Linux */ | ||
397 | ectlval = read_c0_ecc(); | ||
398 | write_c0_ecc(ectlval | (0x1 << 26)); | ||
399 | ehb(); | ||
400 | #define INDEX_0 (0x80000000) | ||
401 | #define INDEX_8 (0x80000008) | ||
402 | /* Read "cache tag" for Dcache pseudo-index 8 */ | ||
403 | cache_op(Index_Load_Tag_D, INDEX_8); | ||
404 | ehb(); | ||
405 | itcblkgrn = read_c0_dtaglo(); | ||
406 | itcblkgrn &= 0xfffe0000; | ||
407 | /* Set for 128 byte pitch of ITC cells */ | ||
408 | itcblkgrn |= 0x00000c00; | ||
409 | /* Stage in Tag register */ | ||
410 | write_c0_dtaglo(itcblkgrn); | ||
411 | ehb(); | ||
412 | /* Write out to ITU with CACHE op */ | ||
413 | cache_op(Index_Store_Tag_D, INDEX_8); | ||
414 | /* Now set base address, and turn ITC on with 0x1 bit */ | ||
415 | write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 ); | ||
416 | ehb(); | ||
417 | /* Write out to ITU with CACHE op */ | ||
418 | cache_op(Index_Store_Tag_D, INDEX_0); | ||
419 | write_c0_ecc(ectlval); | ||
420 | ehb(); | ||
421 | printk("Mapped %ld ITC cells starting at 0x%08x\n", | ||
422 | ((itcblkgrn & 0x7fe00000) >> 20), itc_base); | ||
423 | } | ||
424 | } | ||
425 | |||
426 | /* | ||
427 | * Function to protect cache flushes from concurrent execution | ||
428 | * depends on MP software model chosen. | ||
429 | */ | ||
430 | |||
431 | void mt_cflush_lockdown(void) | ||
432 | { | ||
433 | #ifdef CONFIG_MIPS_MT_SMTC | ||
434 | void smtc_cflush_lockdown(void); | ||
435 | |||
436 | smtc_cflush_lockdown(); | ||
437 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
438 | /* FILL IN VSMP and AP/SP VERSIONS HERE */ | ||
439 | } | ||
440 | |||
441 | void mt_cflush_release(void) | ||
442 | { | ||
443 | #ifdef CONFIG_MIPS_MT_SMTC | ||
444 | void smtc_cflush_release(void); | ||
445 | |||
446 | smtc_cflush_release(); | ||
447 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
448 | /* FILL IN VSMP and AP/SP VERSIONS HERE */ | ||
449 | } | ||
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index c66db5e5ab62..8b393df460a2 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -41,6 +41,10 @@ | |||
41 | #include <asm/elf.h> | 41 | #include <asm/elf.h> |
42 | #include <asm/isadep.h> | 42 | #include <asm/isadep.h> |
43 | #include <asm/inst.h> | 43 | #include <asm/inst.h> |
44 | #ifdef CONFIG_MIPS_MT_SMTC | ||
45 | #include <asm/mipsmtregs.h> | ||
46 | extern void smtc_idle_loop_hook(void); | ||
47 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
44 | 48 | ||
45 | /* | 49 | /* |
46 | * The idle thread. There's no useful work to be done, so just try to conserve | 50 | * The idle thread. There's no useful work to be done, so just try to conserve |
@@ -51,9 +55,13 @@ ATTRIB_NORET void cpu_idle(void) | |||
51 | { | 55 | { |
52 | /* endless idle loop with no priority at all */ | 56 | /* endless idle loop with no priority at all */ |
53 | while (1) { | 57 | while (1) { |
54 | while (!need_resched()) | 58 | while (!need_resched()) { |
59 | #ifdef CONFIG_MIPS_MT_SMTC | ||
60 | smtc_idle_loop_hook(); | ||
61 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
55 | if (cpu_wait) | 62 | if (cpu_wait) |
56 | (*cpu_wait)(); | 63 | (*cpu_wait)(); |
64 | } | ||
57 | preempt_enable_no_resched(); | 65 | preempt_enable_no_resched(); |
58 | schedule(); | 66 | schedule(); |
59 | preempt_disable(); | 67 | preempt_disable(); |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index f838b36cc765..f3106d0771b0 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -248,10 +248,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
248 | break; | 248 | break; |
249 | case FPC_EIR: { /* implementation / version register */ | 249 | case FPC_EIR: { /* implementation / version register */ |
250 | unsigned int flags; | 250 | unsigned int flags; |
251 | #ifdef CONFIG_MIPS_MT_SMTC | ||
252 | unsigned int irqflags; | ||
253 | unsigned int mtflags; | ||
254 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
251 | 255 | ||
252 | if (!cpu_has_fpu) | 256 | if (!cpu_has_fpu) |
253 | break; | 257 | break; |
254 | 258 | ||
259 | #ifdef CONFIG_MIPS_MT_SMTC | ||
260 | /* Read-modify-write of Status must be atomic */ | ||
261 | local_irq_save(irqflags); | ||
262 | mtflags = dmt(); | ||
263 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
264 | |||
255 | preempt_disable(); | 265 | preempt_disable(); |
256 | if (cpu_has_mipsmt) { | 266 | if (cpu_has_mipsmt) { |
257 | unsigned int vpflags = dvpe(); | 267 | unsigned int vpflags = dvpe(); |
@@ -266,6 +276,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
266 | __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); | 276 | __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); |
267 | write_c0_status(flags); | 277 | write_c0_status(flags); |
268 | } | 278 | } |
279 | #ifdef CONFIG_MIPS_MT_SMTC | ||
280 | emt(mtflags); | ||
281 | local_irq_restore(irqflags); | ||
282 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
269 | preempt_enable(); | 283 | preempt_enable(); |
270 | break; | 284 | break; |
271 | } | 285 | } |
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 0d5cf97af727..8704dc0496ea 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c | |||
@@ -173,12 +173,22 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) | |||
173 | break; | 173 | break; |
174 | case FPC_EIR: { /* implementation / version register */ | 174 | case FPC_EIR: { /* implementation / version register */ |
175 | unsigned int flags; | 175 | unsigned int flags; |
176 | #ifdef CONFIG_MIPS_MT_SMTC | ||
177 | unsigned int irqflags; | ||
178 | unsigned int mtflags; | ||
179 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
176 | 180 | ||
177 | if (!cpu_has_fpu) { | 181 | if (!cpu_has_fpu) { |
178 | tmp = 0; | 182 | tmp = 0; |
179 | break; | 183 | break; |
180 | } | 184 | } |
181 | 185 | ||
186 | #ifdef CONFIG_MIPS_MT_SMTC | ||
187 | /* Read-modify-write of Status must be atomic */ | ||
188 | local_irq_save(irqflags); | ||
189 | mtflags = dmt(); | ||
190 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
191 | |||
182 | preempt_disable(); | 192 | preempt_disable(); |
183 | if (cpu_has_mipsmt) { | 193 | if (cpu_has_mipsmt) { |
184 | unsigned int vpflags = dvpe(); | 194 | unsigned int vpflags = dvpe(); |
@@ -193,6 +203,10 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) | |||
193 | __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); | 203 | __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); |
194 | write_c0_status(flags); | 204 | write_c0_status(flags); |
195 | } | 205 | } |
206 | #ifdef CONFIG_MIPS_MT_SMTC | ||
207 | emt(mtflags); | ||
208 | local_irq_restore(irqflags); | ||
209 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
196 | preempt_enable(); | 210 | preempt_enable(); |
197 | break; | 211 | break; |
198 | } | 212 | } |
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index d2afbd19a9c8..0b1b54acee9f 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S | |||
@@ -88,7 +88,18 @@ | |||
88 | 88 | ||
89 | PTR_ADDIU t0, $28, _THREAD_SIZE - 32 | 89 | PTR_ADDIU t0, $28, _THREAD_SIZE - 32 |
90 | set_saved_sp t0, t1, t2 | 90 | set_saved_sp t0, t1, t2 |
91 | 91 | #ifdef CONFIG_MIPS_MT_SMTC | |
92 | /* Read-modify-writes of Status must be atomic on a VPE */ | ||
93 | mfc0 t2, CP0_TCSTATUS | ||
94 | ori t1, t2, TCSTATUS_IXMT | ||
95 | mtc0 t1, CP0_TCSTATUS | ||
96 | andi t2, t2, TCSTATUS_IXMT | ||
97 | ehb | ||
98 | DMT 8 # dmt t0 | ||
99 | move t1,ra | ||
100 | jal mips_ihb | ||
101 | move ra,t1 | ||
102 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
92 | mfc0 t1, CP0_STATUS /* Do we really need this? */ | 103 | mfc0 t1, CP0_STATUS /* Do we really need this? */ |
93 | li a3, 0xff01 | 104 | li a3, 0xff01 |
94 | and t1, a3 | 105 | and t1, a3 |
@@ -97,6 +108,18 @@ | |||
97 | and a2, a3 | 108 | and a2, a3 |
98 | or a2, t1 | 109 | or a2, t1 |
99 | mtc0 a2, CP0_STATUS | 110 | mtc0 a2, CP0_STATUS |
111 | #ifdef CONFIG_MIPS_MT_SMTC | ||
112 | ehb | ||
113 | andi t0, t0, VPECONTROL_TE | ||
114 | beqz t0, 1f | ||
115 | emt | ||
116 | 1: | ||
117 | mfc0 t1, CP0_TCSTATUS | ||
118 | xori t1, t1, TCSTATUS_IXMT | ||
119 | or t1, t1, t2 | ||
120 | mtc0 t1, CP0_TCSTATUS | ||
121 | ehb | ||
122 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
100 | move v0, a0 | 123 | move v0, a0 |
101 | jr ra | 124 | jr ra |
102 | END(resume) | 125 | END(resume) |
@@ -131,10 +154,19 @@ LEAF(_restore_fp) | |||
131 | #define FPU_DEFAULT 0x00000000 | 154 | #define FPU_DEFAULT 0x00000000 |
132 | 155 | ||
133 | LEAF(_init_fpu) | 156 | LEAF(_init_fpu) |
157 | #ifdef CONFIG_MIPS_MT_SMTC | ||
158 | /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */ | ||
159 | mfc0 t0, CP0_TCSTATUS | ||
160 | /* Bit position is the same for Status, TCStatus */ | ||
161 | li t1, ST0_CU1 | ||
162 | or t0, t1 | ||
163 | mtc0 t0, CP0_TCSTATUS | ||
164 | #else /* Normal MIPS CU1 enable */ | ||
134 | mfc0 t0, CP0_STATUS | 165 | mfc0 t0, CP0_STATUS |
135 | li t1, ST0_CU1 | 166 | li t1, ST0_CU1 |
136 | or t0, t1 | 167 | or t0, t1 |
137 | mtc0 t0, CP0_STATUS | 168 | mtc0 t0, CP0_STATUS |
169 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
138 | fpu_enable_hazard | 170 | fpu_enable_hazard |
139 | 171 | ||
140 | li t1, FPU_DEFAULT | 172 | li t1, FPU_DEFAULT |
diff --git a/arch/mips/kernel/smp_mt.c b/arch/mips/kernel/smp-mt.c index 993b8bf56aaf..19b8e4b31b79 100644 --- a/arch/mips/kernel/smp_mt.c +++ b/arch/mips/kernel/smp-mt.c | |||
@@ -1,8 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. | ||
3 | * | ||
4 | * Elizabeth Clarke (beth@mips.com) | ||
5 | * | ||
6 | * This program is free software; you can distribute it and/or modify it | 2 | * This program is free software; you can distribute it and/or modify it |
7 | * under the terms of the GNU General Public License (Version 2) as | 3 | * under the terms of the GNU General Public License (Version 2) as |
8 | * published by the Free Software Foundation. | 4 | * published by the Free Software Foundation. |
@@ -16,6 +12,10 @@ | |||
16 | * with this program; if not, write to the Free Software Foundation, Inc., | 12 | * with this program; if not, write to the Free Software Foundation, Inc., |
17 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | 13 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. |
18 | * | 14 | * |
15 | * Copyright (C) 2004, 05, 06 MIPS Technologies, Inc. | ||
16 | * Elizabeth Clarke (beth@mips.com) | ||
17 | * Ralf Baechle (ralf@linux-mips.org) | ||
18 | * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) | ||
19 | */ | 19 | */ |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
25 | 25 | ||
26 | #include <asm/atomic.h> | 26 | #include <asm/atomic.h> |
27 | #include <asm/cacheflush.h> | ||
27 | #include <asm/cpu.h> | 28 | #include <asm/cpu.h> |
28 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
29 | #include <asm/system.h> | 30 | #include <asm/system.h> |
@@ -33,8 +34,8 @@ | |||
33 | #include <asm/time.h> | 34 | #include <asm/time.h> |
34 | #include <asm/mipsregs.h> | 35 | #include <asm/mipsregs.h> |
35 | #include <asm/mipsmtregs.h> | 36 | #include <asm/mipsmtregs.h> |
36 | #include <asm/cacheflush.h> | 37 | #include <asm/mips_mt.h> |
37 | #include <asm/mips-boards/maltaint.h> | 38 | #include <asm/mips-boards/maltaint.h> /* This is f*cking wrong */ |
38 | 39 | ||
39 | #define MIPS_CPU_IPI_RESCHED_IRQ 0 | 40 | #define MIPS_CPU_IPI_RESCHED_IRQ 0 |
40 | #define MIPS_CPU_IPI_CALL_IRQ 1 | 41 | #define MIPS_CPU_IPI_CALL_IRQ 1 |
@@ -66,6 +67,7 @@ void __init sanitize_tlb_entries(void) | |||
66 | if (!cpu_has_mipsmt) | 67 | if (!cpu_has_mipsmt) |
67 | return; | 68 | return; |
68 | 69 | ||
70 | /* Enable VPC */ | ||
69 | set_c0_mvpcontrol(MVPCONTROL_VPC); | 71 | set_c0_mvpcontrol(MVPCONTROL_VPC); |
70 | 72 | ||
71 | back_to_back_c0_hazard(); | 73 | back_to_back_c0_hazard(); |
@@ -106,12 +108,12 @@ void __init sanitize_tlb_entries(void) | |||
106 | 108 | ||
107 | static void ipi_resched_dispatch (struct pt_regs *regs) | 109 | static void ipi_resched_dispatch (struct pt_regs *regs) |
108 | { | 110 | { |
109 | do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ, regs); | 111 | do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ, regs); |
110 | } | 112 | } |
111 | 113 | ||
112 | static void ipi_call_dispatch (struct pt_regs *regs) | 114 | static void ipi_call_dispatch (struct pt_regs *regs) |
113 | { | 115 | { |
114 | do_IRQ(MIPS_CPU_IPI_CALL_IRQ, regs); | 116 | do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ, regs); |
115 | } | 117 | } |
116 | 118 | ||
117 | irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs) | 119 | irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs) |
@@ -155,6 +157,8 @@ void plat_smp_setup(void) | |||
155 | dvpe(); | 157 | dvpe(); |
156 | dmt(); | 158 | dmt(); |
157 | 159 | ||
160 | mips_mt_set_cpuoptions(); | ||
161 | |||
158 | /* Put MVPE's into 'configuration state' */ | 162 | /* Put MVPE's into 'configuration state' */ |
159 | set_c0_mvpcontrol(MVPCONTROL_VPC); | 163 | set_c0_mvpcontrol(MVPCONTROL_VPC); |
160 | 164 | ||
@@ -189,11 +193,13 @@ void plat_smp_setup(void) | |||
189 | 193 | ||
190 | if (i != 0) { | 194 | if (i != 0) { |
191 | write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); | 195 | write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); |
192 | write_vpe_c0_cause(read_vpe_c0_cause() & ~CAUSEF_IP); | ||
193 | 196 | ||
194 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ | 197 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ |
195 | write_vpe_c0_config( read_c0_config()); | 198 | write_vpe_c0_config( read_c0_config()); |
196 | 199 | ||
200 | /* make sure there are no software interrupts pending */ | ||
201 | write_vpe_c0_cause(read_vpe_c0_cause() & ~(C_SW1|C_SW0)); | ||
202 | |||
197 | /* Propagate Config7 */ | 203 | /* Propagate Config7 */ |
198 | write_vpe_c0_config7(read_c0_config7()); | 204 | write_vpe_c0_config7(read_c0_config7()); |
199 | } | 205 | } |
@@ -233,16 +239,16 @@ void plat_smp_setup(void) | |||
233 | /* We'll wait until starting the secondaries before starting MVPE */ | 239 | /* We'll wait until starting the secondaries before starting MVPE */ |
234 | 240 | ||
235 | printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); | 241 | printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); |
242 | } | ||
236 | 243 | ||
244 | void __init plat_prepare_cpus(unsigned int max_cpus) | ||
245 | { | ||
237 | /* set up ipi interrupts */ | 246 | /* set up ipi interrupts */ |
238 | if (cpu_has_vint) { | 247 | if (cpu_has_vint) { |
239 | set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); | 248 | set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); |
240 | set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); | 249 | set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); |
241 | } | 250 | } |
242 | } | ||
243 | 251 | ||
244 | void __init plat_prepare_cpus(unsigned int max_cpus) | ||
245 | { | ||
246 | cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ; | 252 | cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ; |
247 | cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ; | 253 | cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ; |
248 | 254 | ||
@@ -287,7 +293,8 @@ void prom_boot_secondary(int cpu, struct task_struct *idle) | |||
287 | /* global pointer */ | 293 | /* global pointer */ |
288 | write_tc_gpr_gp((unsigned long)gp); | 294 | write_tc_gpr_gp((unsigned long)gp); |
289 | 295 | ||
290 | flush_icache_range((unsigned long)gp, (unsigned long)(gp + 1)); | 296 | flush_icache_range((unsigned long)gp, |
297 | (unsigned long)(gp + sizeof(struct thread_info))); | ||
291 | 298 | ||
292 | /* finally out of configuration and into chaos */ | 299 | /* finally out of configuration and into chaos */ |
293 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 300 | clear_c0_mvpcontrol(MVPCONTROL_VPC); |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 72a287aa937e..d42f358754ad 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -38,6 +38,10 @@ | |||
38 | #include <asm/mmu_context.h> | 38 | #include <asm/mmu_context.h> |
39 | #include <asm/smp.h> | 39 | #include <asm/smp.h> |
40 | 40 | ||
41 | #ifdef CONFIG_MIPS_MT_SMTC | ||
42 | #include <asm/mipsmtregs.h> | ||
43 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
44 | |||
41 | cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ | 45 | cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ |
42 | volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ | 46 | volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ |
43 | cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ | 47 | cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ |
@@ -85,6 +89,10 @@ asmlinkage void start_secondary(void) | |||
85 | { | 89 | { |
86 | unsigned int cpu; | 90 | unsigned int cpu; |
87 | 91 | ||
92 | #ifdef CONFIG_MIPS_MT_SMTC | ||
93 | /* Only do cpu_probe for first TC of CPU */ | ||
94 | if ((read_c0_tcbind() & TCBIND_CURTC) == 0) | ||
95 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
88 | cpu_probe(); | 96 | cpu_probe(); |
89 | cpu_report(); | 97 | cpu_report(); |
90 | per_cpu_trap_init(); | 98 | per_cpu_trap_init(); |
@@ -179,11 +187,13 @@ int smp_call_function (void (*func) (void *info), void *info, int retry, | |||
179 | if (wait) | 187 | if (wait) |
180 | while (atomic_read(&data.finished) != cpus) | 188 | while (atomic_read(&data.finished) != cpus) |
181 | barrier(); | 189 | barrier(); |
190 | call_data = NULL; | ||
182 | spin_unlock(&smp_call_lock); | 191 | spin_unlock(&smp_call_lock); |
183 | 192 | ||
184 | return 0; | 193 | return 0; |
185 | } | 194 | } |
186 | 195 | ||
196 | |||
187 | void smp_call_function_interrupt(void) | 197 | void smp_call_function_interrupt(void) |
188 | { | 198 | { |
189 | void (*func) (void *info) = call_data->func; | 199 | void (*func) (void *info) = call_data->func; |
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S new file mode 100644 index 000000000000..c9d65196d917 --- /dev/null +++ b/arch/mips/kernel/smtc-asm.S | |||
@@ -0,0 +1,130 @@ | |||
1 | /* | ||
2 | * Assembly Language Functions for MIPS MT SMTC support | ||
3 | */ | ||
4 | |||
5 | /* | ||
6 | * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */ | ||
7 | |||
8 | #include <asm/regdef.h> | ||
9 | #include <asm/asmmacro.h> | ||
10 | #include <asm/stackframe.h> | ||
11 | #include <asm/stackframe.h> | ||
12 | |||
13 | /* | ||
14 | * "Software Interrupt" linkage. | ||
15 | * | ||
16 | * This is invoked when an "Interrupt" is sent from one TC to another, | ||
17 | * where the TC to be interrupted is halted, has it's Restart address | ||
18 | * and Status values saved by the "remote control" thread, then modified | ||
19 | * to cause execution to begin here, in kenel mode. This code then | ||
20 | * disguises the TC state as that of an exception and transfers | ||
21 | * control to the general exception or vectored interrupt handler. | ||
22 | */ | ||
23 | .set noreorder | ||
24 | |||
25 | /* | ||
26 | The __smtc_ipi_vector would use k0 and k1 as temporaries and | ||
27 | 1) Set EXL (this is per-VPE, so this can't be done by proxy!) | ||
28 | 2) Restore the K/CU and IXMT bits to the pre "exception" state | ||
29 | (EXL means no interrupts and access to the kernel map). | ||
30 | 3) Set EPC to be the saved value of TCRestart. | ||
31 | 4) Jump to the exception handler entry point passed by the sender. | ||
32 | |||
33 | CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED?? | ||
34 | */ | ||
35 | |||
36 | /* | ||
37 | * Reviled and slandered vision: Set EXL and restore K/CU/IXMT | ||
38 | * state of pre-halt thread, then save everything and call | ||
39 | * thought some function pointer to imaginary_exception, which | ||
40 | * will parse a register value or memory message queue to | ||
41 | * deliver things like interprocessor interrupts. On return | ||
42 | * from that function, jump to the global ret_from_irq code | ||
43 | * to invoke the scheduler and return as appropriate. | ||
44 | */ | ||
45 | |||
46 | #define PT_PADSLOT4 (PT_R0-8) | ||
47 | #define PT_PADSLOT5 (PT_R0-4) | ||
48 | |||
49 | .text | ||
50 | .align 5 | ||
51 | FEXPORT(__smtc_ipi_vector) | ||
52 | .set noat | ||
53 | /* Disable thread scheduling to make Status update atomic */ | ||
54 | DMT 27 # dmt k1 | ||
55 | ehb | ||
56 | /* Set EXL */ | ||
57 | mfc0 k0,CP0_STATUS | ||
58 | ori k0,k0,ST0_EXL | ||
59 | mtc0 k0,CP0_STATUS | ||
60 | ehb | ||
61 | /* Thread scheduling now inhibited by EXL. Restore TE state. */ | ||
62 | andi k1,k1,VPECONTROL_TE | ||
63 | beqz k1,1f | ||
64 | emt | ||
65 | 1: | ||
66 | /* | ||
67 | * The IPI sender has put some information on the anticipated | ||
68 | * kernel stack frame. If we were in user mode, this will be | ||
69 | * built above the saved kernel SP. If we were already in the | ||
70 | * kernel, it will be built above the current CPU SP. | ||
71 | * | ||
72 | * Were we in kernel mode, as indicated by CU0? | ||
73 | */ | ||
74 | sll k1,k0,3 | ||
75 | .set noreorder | ||
76 | bltz k1,2f | ||
77 | move k1,sp | ||
78 | .set reorder | ||
79 | /* | ||
80 | * If previously in user mode, set CU0 and use kernel stack. | ||
81 | */ | ||
82 | li k1,ST0_CU0 | ||
83 | or k1,k1,k0 | ||
84 | mtc0 k1,CP0_STATUS | ||
85 | ehb | ||
86 | get_saved_sp | ||
87 | /* Interrupting TC will have pre-set values in slots in the new frame */ | ||
88 | 2: subu k1,k1,PT_SIZE | ||
89 | /* Load TCStatus Value */ | ||
90 | lw k0,PT_TCSTATUS(k1) | ||
91 | /* Write it to TCStatus to restore CU/KSU/IXMT state */ | ||
92 | mtc0 k0,$2,1 | ||
93 | ehb | ||
94 | lw k0,PT_EPC(k1) | ||
95 | mtc0 k0,CP0_EPC | ||
96 | /* Save all will redundantly recompute the SP, but use it for now */ | ||
97 | SAVE_ALL | ||
98 | CLI | ||
99 | move a0,sp | ||
100 | /* Function to be invoked passed stack pad slot 5 */ | ||
101 | lw t0,PT_PADSLOT5(sp) | ||
102 | /* Argument from sender passed in stack pad slot 4 */ | ||
103 | lw a1,PT_PADSLOT4(sp) | ||
104 | jalr t0 | ||
105 | nop | ||
106 | j ret_from_irq | ||
107 | nop | ||
108 | |||
109 | /* | ||
110 | * Called from idle loop to provoke processing of queued IPIs | ||
111 | * First IPI message in queue passed as argument. | ||
112 | */ | ||
113 | |||
114 | LEAF(self_ipi) | ||
115 | /* Before anything else, block interrupts */ | ||
116 | mfc0 t0,CP0_TCSTATUS | ||
117 | ori t1,t0,TCSTATUS_IXMT | ||
118 | mtc0 t1,CP0_TCSTATUS | ||
119 | ehb | ||
120 | /* We know we're in kernel mode, so prepare stack frame */ | ||
121 | subu t1,sp,PT_SIZE | ||
122 | sw ra,PT_EPC(t1) | ||
123 | sw a0,PT_PADSLOT4(t1) | ||
124 | la t2,ipi_decode | ||
125 | sw t2,PT_PADSLOT5(t1) | ||
126 | /* Save pre-disable value of TCStatus */ | ||
127 | sw t0,PT_TCSTATUS(t1) | ||
128 | j __smtc_ipi_vector | ||
129 | nop | ||
130 | END(self_ipi) | ||
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c new file mode 100644 index 000000000000..6f3709996172 --- /dev/null +++ b/arch/mips/kernel/smtc-proc.c | |||
@@ -0,0 +1,93 @@ | |||
1 | /* | ||
2 | * /proc hooks for SMTC kernel | ||
3 | * Copyright (C) 2005 Mips Technologies, Inc | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <linux/cpumask.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | |||
11 | #include <asm/cpu.h> | ||
12 | #include <asm/processor.h> | ||
13 | #include <asm/atomic.h> | ||
14 | #include <asm/system.h> | ||
15 | #include <asm/hardirq.h> | ||
16 | #include <asm/mmu_context.h> | ||
17 | #include <asm/smp.h> | ||
18 | #include <asm/mipsregs.h> | ||
19 | #include <asm/cacheflush.h> | ||
20 | #include <linux/proc_fs.h> | ||
21 | |||
22 | #include <asm/smtc_proc.h> | ||
23 | |||
24 | /* | ||
25 | * /proc diagnostic and statistics hooks | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * Statistics gathered | ||
30 | */ | ||
31 | unsigned long selfipis[NR_CPUS]; | ||
32 | |||
33 | struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; | ||
34 | |||
35 | static struct proc_dir_entry *smtc_stats; | ||
36 | |||
37 | atomic_t smtc_fpu_recoveries; | ||
38 | |||
39 | static int proc_read_smtc(char *page, char **start, off_t off, | ||
40 | int count, int *eof, void *data) | ||
41 | { | ||
42 | int totalen = 0; | ||
43 | int len; | ||
44 | int i; | ||
45 | extern unsigned long ebase; | ||
46 | |||
47 | len = sprintf(page, "SMTC Status Word: 0x%08x\n", smtc_status); | ||
48 | totalen += len; | ||
49 | page += len; | ||
50 | len = sprintf(page, "Config7: 0x%08x\n", read_c0_config7()); | ||
51 | totalen += len; | ||
52 | page += len; | ||
53 | len = sprintf(page, "EBASE: 0x%08lx\n", ebase); | ||
54 | totalen += len; | ||
55 | page += len; | ||
56 | len = sprintf(page, "Counter Interrupts taken per CPU (TC)\n"); | ||
57 | totalen += len; | ||
58 | page += len; | ||
59 | for (i=0; i < NR_CPUS; i++) { | ||
60 | len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].timerints); | ||
61 | totalen += len; | ||
62 | page += len; | ||
63 | } | ||
64 | len = sprintf(page, "Self-IPIs by CPU:\n"); | ||
65 | totalen += len; | ||
66 | page += len; | ||
67 | for(i = 0; i < NR_CPUS; i++) { | ||
68 | len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | ||
69 | totalen += len; | ||
70 | page += len; | ||
71 | } | ||
72 | len = sprintf(page, "%d Recoveries of \"stolen\" FPU\n", | ||
73 | atomic_read(&smtc_fpu_recoveries)); | ||
74 | totalen += len; | ||
75 | page += len; | ||
76 | |||
77 | return totalen; | ||
78 | } | ||
79 | |||
80 | void init_smtc_stats(void) | ||
81 | { | ||
82 | int i; | ||
83 | |||
84 | for (i=0; i<NR_CPUS; i++) { | ||
85 | smtc_cpu_stats[i].timerints = 0; | ||
86 | smtc_cpu_stats[i].selfipis = 0; | ||
87 | } | ||
88 | |||
89 | atomic_set(&smtc_fpu_recoveries, 0); | ||
90 | |||
91 | smtc_stats = create_proc_read_entry("smtc", 0444, NULL, | ||
92 | proc_read_smtc, NULL); | ||
93 | } | ||
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c new file mode 100644 index 000000000000..2e8e52c135e6 --- /dev/null +++ b/arch/mips/kernel/smtc.c | |||
@@ -0,0 +1,1322 @@ | |||
1 | /* Copyright (C) 2004 Mips Technologies, Inc */ | ||
2 | |||
3 | #include <linux/kernel.h> | ||
4 | #include <linux/sched.h> | ||
5 | #include <linux/cpumask.h> | ||
6 | #include <linux/interrupt.h> | ||
7 | |||
8 | #include <asm/cpu.h> | ||
9 | #include <asm/processor.h> | ||
10 | #include <asm/atomic.h> | ||
11 | #include <asm/system.h> | ||
12 | #include <asm/hardirq.h> | ||
13 | #include <asm/hazards.h> | ||
14 | #include <asm/mmu_context.h> | ||
15 | #include <asm/smp.h> | ||
16 | #include <asm/mipsregs.h> | ||
17 | #include <asm/cacheflush.h> | ||
18 | #include <asm/time.h> | ||
19 | #include <asm/addrspace.h> | ||
20 | #include <asm/smtc.h> | ||
21 | #include <asm/smtc_ipi.h> | ||
22 | #include <asm/smtc_proc.h> | ||
23 | |||
24 | /* | ||
25 | * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * MIPSCPU_INT_BASE is identically defined in both | ||
30 | * asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h, | ||
31 | * but as yet there's no properly organized include structure that | ||
32 | * will ensure that the right *int.h file will be included for a | ||
33 | * given platform build. | ||
34 | */ | ||
35 | |||
36 | #define MIPSCPU_INT_BASE 16 | ||
37 | |||
38 | #define MIPS_CPU_IPI_IRQ 1 | ||
39 | |||
40 | #define LOCK_MT_PRA() \ | ||
41 | local_irq_save(flags); \ | ||
42 | mtflags = dmt() | ||
43 | |||
44 | #define UNLOCK_MT_PRA() \ | ||
45 | emt(mtflags); \ | ||
46 | local_irq_restore(flags) | ||
47 | |||
48 | #define LOCK_CORE_PRA() \ | ||
49 | local_irq_save(flags); \ | ||
50 | mtflags = dvpe() | ||
51 | |||
52 | #define UNLOCK_CORE_PRA() \ | ||
53 | evpe(mtflags); \ | ||
54 | local_irq_restore(flags) | ||
55 | |||
56 | /* | ||
57 | * Data structures purely associated with SMTC parallelism | ||
58 | */ | ||
59 | |||
60 | |||
61 | /* | ||
62 | * Table for tracking ASIDs whose lifetime is prolonged. | ||
63 | */ | ||
64 | |||
65 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | ||
66 | |||
67 | /* | ||
68 | * Clock interrupt "latch" buffers, per "CPU" | ||
69 | */ | ||
70 | |||
71 | unsigned int ipi_timer_latch[NR_CPUS]; | ||
72 | |||
73 | /* | ||
74 | * Number of InterProcessor Interupt (IPI) message buffers to allocate | ||
75 | */ | ||
76 | |||
77 | #define IPIBUF_PER_CPU 4 | ||
78 | |||
79 | struct smtc_ipi_q IPIQ[NR_CPUS]; | ||
80 | struct smtc_ipi_q freeIPIq; | ||
81 | |||
82 | |||
83 | /* Forward declarations */ | ||
84 | |||
85 | void ipi_decode(struct pt_regs *, struct smtc_ipi *); | ||
86 | void post_direct_ipi(int cpu, struct smtc_ipi *pipi); | ||
87 | void setup_cross_vpe_interrupts(void); | ||
88 | void init_smtc_stats(void); | ||
89 | |||
90 | /* Global SMTC Status */ | ||
91 | |||
92 | unsigned int smtc_status = 0; | ||
93 | |||
94 | /* Boot command line configuration overrides */ | ||
95 | |||
96 | static int vpelimit = 0; | ||
97 | static int tclimit = 0; | ||
98 | static int ipibuffers = 0; | ||
99 | static int nostlb = 0; | ||
100 | static int asidmask = 0; | ||
101 | unsigned long smtc_asid_mask = 0xff; | ||
102 | |||
103 | static int __init maxvpes(char *str) | ||
104 | { | ||
105 | get_option(&str, &vpelimit); | ||
106 | return 1; | ||
107 | } | ||
108 | |||
109 | static int __init maxtcs(char *str) | ||
110 | { | ||
111 | get_option(&str, &tclimit); | ||
112 | return 1; | ||
113 | } | ||
114 | |||
115 | static int __init ipibufs(char *str) | ||
116 | { | ||
117 | get_option(&str, &ipibuffers); | ||
118 | return 1; | ||
119 | } | ||
120 | |||
121 | static int __init stlb_disable(char *s) | ||
122 | { | ||
123 | nostlb = 1; | ||
124 | return 1; | ||
125 | } | ||
126 | |||
127 | static int __init asidmask_set(char *str) | ||
128 | { | ||
129 | get_option(&str, &asidmask); | ||
130 | switch(asidmask) { | ||
131 | case 0x1: | ||
132 | case 0x3: | ||
133 | case 0x7: | ||
134 | case 0xf: | ||
135 | case 0x1f: | ||
136 | case 0x3f: | ||
137 | case 0x7f: | ||
138 | case 0xff: | ||
139 | smtc_asid_mask = (unsigned long)asidmask; | ||
140 | break; | ||
141 | default: | ||
142 | printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask); | ||
143 | } | ||
144 | return 1; | ||
145 | } | ||
146 | |||
147 | __setup("maxvpes=", maxvpes); | ||
148 | __setup("maxtcs=", maxtcs); | ||
149 | __setup("ipibufs=", ipibufs); | ||
150 | __setup("nostlb", stlb_disable); | ||
151 | __setup("asidmask=", asidmask_set); | ||
152 | |||
153 | /* Enable additional debug checks before going into CPU idle loop */ | ||
154 | #define SMTC_IDLE_HOOK_DEBUG | ||
155 | |||
156 | #ifdef SMTC_IDLE_HOOK_DEBUG | ||
157 | |||
158 | static int hang_trig = 0; | ||
159 | |||
160 | static int __init hangtrig_enable(char *s) | ||
161 | { | ||
162 | hang_trig = 1; | ||
163 | return 1; | ||
164 | } | ||
165 | |||
166 | |||
167 | __setup("hangtrig", hangtrig_enable); | ||
168 | |||
169 | #define DEFAULT_BLOCKED_IPI_LIMIT 32 | ||
170 | |||
171 | static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT; | ||
172 | |||
173 | static int __init tintq(char *str) | ||
174 | { | ||
175 | get_option(&str, &timerq_limit); | ||
176 | return 1; | ||
177 | } | ||
178 | |||
179 | __setup("tintq=", tintq); | ||
180 | |||
181 | int imstuckcount[2][8]; | ||
182 | /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ | ||
183 | int vpemask[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}}; | ||
184 | int tcnoprog[NR_CPUS]; | ||
185 | static atomic_t idle_hook_initialized = {0}; | ||
186 | static int clock_hang_reported[NR_CPUS]; | ||
187 | |||
188 | #endif /* SMTC_IDLE_HOOK_DEBUG */ | ||
189 | |||
190 | /* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */ | ||
191 | |||
192 | void __init sanitize_tlb_entries(void) | ||
193 | { | ||
194 | printk("Deprecated sanitize_tlb_entries() invoked\n"); | ||
195 | } | ||
196 | |||
197 | |||
198 | /* | ||
199 | * Configure shared TLB - VPC configuration bit must be set by caller | ||
200 | */ | ||
201 | |||
202 | void smtc_configure_tlb(void) | ||
203 | { | ||
204 | int i,tlbsiz,vpes; | ||
205 | unsigned long mvpconf0; | ||
206 | unsigned long config1val; | ||
207 | |||
208 | /* Set up ASID preservation table */ | ||
209 | for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) { | ||
210 | for(i = 0; i < MAX_SMTC_ASIDS; i++) { | ||
211 | smtc_live_asid[vpes][i] = 0; | ||
212 | } | ||
213 | } | ||
214 | mvpconf0 = read_c0_mvpconf0(); | ||
215 | |||
216 | if ((vpes = ((mvpconf0 & MVPCONF0_PVPE) | ||
217 | >> MVPCONF0_PVPE_SHIFT) + 1) > 1) { | ||
218 | /* If we have multiple VPEs, try to share the TLB */ | ||
219 | if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) { | ||
220 | /* | ||
221 | * If TLB sizing is programmable, shared TLB | ||
222 | * size is the total available complement. | ||
223 | * Otherwise, we have to take the sum of all | ||
224 | * static VPE TLB entries. | ||
225 | */ | ||
226 | if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE) | ||
227 | >> MVPCONF0_PTLBE_SHIFT)) == 0) { | ||
228 | /* | ||
229 | * If there's more than one VPE, there had better | ||
230 | * be more than one TC, because we need one to bind | ||
231 | * to each VPE in turn to be able to read | ||
232 | * its configuration state! | ||
233 | */ | ||
234 | settc(1); | ||
235 | /* Stop the TC from doing anything foolish */ | ||
236 | write_tc_c0_tchalt(TCHALT_H); | ||
237 | mips_ihb(); | ||
238 | /* No need to un-Halt - that happens later anyway */ | ||
239 | for (i=0; i < vpes; i++) { | ||
240 | write_tc_c0_tcbind(i); | ||
241 | /* | ||
242 | * To be 100% sure we're really getting the right | ||
243 | * information, we exit the configuration state | ||
244 | * and do an IHB after each rebinding. | ||
245 | */ | ||
246 | write_c0_mvpcontrol( | ||
247 | read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); | ||
248 | mips_ihb(); | ||
249 | /* | ||
250 | * Only count if the MMU Type indicated is TLB | ||
251 | */ | ||
252 | if(((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) { | ||
253 | config1val = read_vpe_c0_config1(); | ||
254 | tlbsiz += ((config1val >> 25) & 0x3f) + 1; | ||
255 | } | ||
256 | |||
257 | /* Put core back in configuration state */ | ||
258 | write_c0_mvpcontrol( | ||
259 | read_c0_mvpcontrol() | MVPCONTROL_VPC ); | ||
260 | mips_ihb(); | ||
261 | } | ||
262 | } | ||
263 | write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB); | ||
264 | |||
265 | /* | ||
266 | * Setup kernel data structures to use software total, | ||
267 | * rather than read the per-VPE Config1 value. The values | ||
268 | * for "CPU 0" gets copied to all the other CPUs as part | ||
269 | * of their initialization in smtc_cpu_setup(). | ||
270 | */ | ||
271 | |||
272 | tlbsiz = tlbsiz & 0x3f; /* MIPS32 limits TLB indices to 64 */ | ||
273 | cpu_data[0].tlbsize = tlbsiz; | ||
274 | smtc_status |= SMTC_TLB_SHARED; | ||
275 | |||
276 | printk("TLB of %d entry pairs shared by %d VPEs\n", | ||
277 | tlbsiz, vpes); | ||
278 | } else { | ||
279 | printk("WARNING: TLB Not Sharable on SMTC Boot!\n"); | ||
280 | } | ||
281 | } | ||
282 | } | ||
283 | |||
284 | |||
285 | /* | ||
286 | * Incrementally build the CPU map out of constituent MIPS MT cores, | ||
287 | * using the specified available VPEs and TCs. Plaform code needs | ||
288 | * to ensure that each MIPS MT core invokes this routine on reset, | ||
289 | * one at a time(!). | ||
290 | * | ||
291 | * This version of the build_cpu_map and prepare_cpus routines assumes | ||
292 | * that *all* TCs of a MIPS MT core will be used for Linux, and that | ||
293 | * they will be spread across *all* available VPEs (to minimise the | ||
294 | * loss of efficiency due to exception service serialization). | ||
295 | * An improved version would pick up configuration information and | ||
296 | * possibly leave some TCs/VPEs as "slave" processors. | ||
297 | * | ||
298 | * Use c0_MVPConf0 to find out how many TCs are available, setting up | ||
299 | * phys_cpu_present_map and the logical/physical mappings. | ||
300 | */ | ||
301 | |||
302 | int __init mipsmt_build_cpu_map(int start_cpu_slot) | ||
303 | { | ||
304 | int i, ntcs; | ||
305 | |||
306 | /* | ||
307 | * The CPU map isn't actually used for anything at this point, | ||
308 | * so it's not clear what else we should do apart from set | ||
309 | * everything up so that "logical" = "physical". | ||
310 | */ | ||
311 | ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | ||
312 | for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { | ||
313 | cpu_set(i, phys_cpu_present_map); | ||
314 | __cpu_number_map[i] = i; | ||
315 | __cpu_logical_map[i] = i; | ||
316 | } | ||
317 | /* Initialize map of CPUs with FPUs */ | ||
318 | cpus_clear(mt_fpu_cpumask); | ||
319 | |||
320 | /* One of those TC's is the one booting, and not a secondary... */ | ||
321 | printk("%i available secondary CPU TC(s)\n", i - 1); | ||
322 | |||
323 | return i; | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * Common setup before any secondaries are started | ||
328 | * Make sure all CPU's are in a sensible state before we boot any of the | ||
329 | * secondaries. | ||
330 | * | ||
331 | * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly | ||
332 | * as possible across the available VPEs. | ||
333 | */ | ||
334 | |||
335 | static void smtc_tc_setup(int vpe, int tc, int cpu) | ||
336 | { | ||
337 | settc(tc); | ||
338 | write_tc_c0_tchalt(TCHALT_H); | ||
339 | mips_ihb(); | ||
340 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() | ||
341 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | ||
342 | | TCSTATUS_A); | ||
343 | write_tc_c0_tccontext(0); | ||
344 | /* Bind tc to vpe */ | ||
345 | write_tc_c0_tcbind(vpe); | ||
346 | /* In general, all TCs should have the same cpu_data indications */ | ||
347 | memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); | ||
348 | /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */ | ||
349 | if (cpu_data[0].cputype == CPU_34K) | ||
350 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; | ||
351 | cpu_data[cpu].vpe_id = vpe; | ||
352 | cpu_data[cpu].tc_id = tc; | ||
353 | } | ||
354 | |||
355 | |||
356 | void mipsmt_prepare_cpus(void) | ||
357 | { | ||
358 | int i, vpe, tc, ntc, nvpe, tcpervpe, slop, cpu; | ||
359 | unsigned long flags; | ||
360 | unsigned long val; | ||
361 | int nipi; | ||
362 | struct smtc_ipi *pipi; | ||
363 | |||
364 | /* disable interrupts so we can disable MT */ | ||
365 | local_irq_save(flags); | ||
366 | /* disable MT so we can configure */ | ||
367 | dvpe(); | ||
368 | dmt(); | ||
369 | |||
370 | freeIPIq.lock = SPIN_LOCK_UNLOCKED; | ||
371 | |||
372 | /* | ||
373 | * We probably don't have as many VPEs as we do SMP "CPUs", | ||
374 | * but it's possible - and in any case we'll never use more! | ||
375 | */ | ||
376 | for (i=0; i<NR_CPUS; i++) { | ||
377 | IPIQ[i].head = IPIQ[i].tail = NULL; | ||
378 | IPIQ[i].lock = SPIN_LOCK_UNLOCKED; | ||
379 | IPIQ[i].depth = 0; | ||
380 | ipi_timer_latch[i] = 0; | ||
381 | } | ||
382 | |||
383 | /* cpu_data index starts at zero */ | ||
384 | cpu = 0; | ||
385 | cpu_data[cpu].vpe_id = 0; | ||
386 | cpu_data[cpu].tc_id = 0; | ||
387 | cpu++; | ||
388 | |||
389 | /* Report on boot-time options */ | ||
390 | mips_mt_set_cpuoptions (); | ||
391 | if (vpelimit > 0) | ||
392 | printk("Limit of %d VPEs set\n", vpelimit); | ||
393 | if (tclimit > 0) | ||
394 | printk("Limit of %d TCs set\n", tclimit); | ||
395 | if (nostlb) { | ||
396 | printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n"); | ||
397 | } | ||
398 | if (asidmask) | ||
399 | printk("ASID mask value override to 0x%x\n", asidmask); | ||
400 | |||
401 | /* Temporary */ | ||
402 | #ifdef SMTC_IDLE_HOOK_DEBUG | ||
403 | if (hang_trig) | ||
404 | printk("Logic Analyser Trigger on suspected TC hang\n"); | ||
405 | #endif /* SMTC_IDLE_HOOK_DEBUG */ | ||
406 | |||
407 | /* Put MVPE's into 'configuration state' */ | ||
408 | write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC ); | ||
409 | |||
410 | val = read_c0_mvpconf0(); | ||
411 | nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
412 | if (vpelimit > 0 && nvpe > vpelimit) | ||
413 | nvpe = vpelimit; | ||
414 | ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | ||
415 | if (ntc > NR_CPUS) | ||
416 | ntc = NR_CPUS; | ||
417 | if (tclimit > 0 && ntc > tclimit) | ||
418 | ntc = tclimit; | ||
419 | tcpervpe = ntc / nvpe; | ||
420 | slop = ntc % nvpe; /* Residual TCs, < NVPE */ | ||
421 | |||
422 | /* Set up shared TLB */ | ||
423 | smtc_configure_tlb(); | ||
424 | |||
425 | for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { | ||
426 | /* | ||
427 | * Set the MVP bits. | ||
428 | */ | ||
429 | settc(tc); | ||
430 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP); | ||
431 | if (vpe != 0) | ||
432 | printk(", "); | ||
433 | printk("VPE %d: TC", vpe); | ||
434 | for (i = 0; i < tcpervpe; i++) { | ||
435 | /* | ||
436 | * TC 0 is bound to VPE 0 at reset, | ||
437 | * and is presumably executing this | ||
438 | * code. Leave it alone! | ||
439 | */ | ||
440 | if (tc != 0) { | ||
441 | smtc_tc_setup(vpe,tc, cpu); | ||
442 | cpu++; | ||
443 | } | ||
444 | printk(" %d", tc); | ||
445 | tc++; | ||
446 | } | ||
447 | if (slop) { | ||
448 | if (tc != 0) { | ||
449 | smtc_tc_setup(vpe,tc, cpu); | ||
450 | cpu++; | ||
451 | } | ||
452 | printk(" %d", tc); | ||
453 | tc++; | ||
454 | slop--; | ||
455 | } | ||
456 | if (vpe != 0) { | ||
457 | /* | ||
458 | * Clear any stale software interrupts from VPE's Cause | ||
459 | */ | ||
460 | write_vpe_c0_cause(0); | ||
461 | |||
462 | /* | ||
463 | * Clear ERL/EXL of VPEs other than 0 | ||
464 | * and set restricted interrupt enable/mask. | ||
465 | */ | ||
466 | write_vpe_c0_status((read_vpe_c0_status() | ||
467 | & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM)) | ||
468 | | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7 | ||
469 | | ST0_IE)); | ||
470 | /* | ||
471 | * set config to be the same as vpe0, | ||
472 | * particularly kseg0 coherency alg | ||
473 | */ | ||
474 | write_vpe_c0_config(read_c0_config()); | ||
475 | /* Clear any pending timer interrupt */ | ||
476 | write_vpe_c0_compare(0); | ||
477 | /* Propagate Config7 */ | ||
478 | write_vpe_c0_config7(read_c0_config7()); | ||
479 | } | ||
480 | /* enable multi-threading within VPE */ | ||
481 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); | ||
482 | /* enable the VPE */ | ||
483 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * Pull any physically present but unused TCs out of circulation. | ||
488 | */ | ||
489 | while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { | ||
490 | cpu_clear(tc, phys_cpu_present_map); | ||
491 | cpu_clear(tc, cpu_present_map); | ||
492 | tc++; | ||
493 | } | ||
494 | |||
495 | /* release config state */ | ||
496 | write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); | ||
497 | |||
498 | printk("\n"); | ||
499 | |||
500 | /* Set up coprocessor affinity CPU mask(s) */ | ||
501 | |||
502 | for (tc = 0; tc < ntc; tc++) { | ||
503 | if(cpu_data[tc].options & MIPS_CPU_FPU) | ||
504 | cpu_set(tc, mt_fpu_cpumask); | ||
505 | } | ||
506 | |||
507 | /* set up ipi interrupts... */ | ||
508 | |||
509 | /* If we have multiple VPEs running, set up the cross-VPE interrupt */ | ||
510 | |||
511 | if (nvpe > 1) | ||
512 | setup_cross_vpe_interrupts(); | ||
513 | |||
514 | /* Set up queue of free IPI "messages". */ | ||
515 | nipi = NR_CPUS * IPIBUF_PER_CPU; | ||
516 | if (ipibuffers > 0) | ||
517 | nipi = ipibuffers; | ||
518 | |||
519 | pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); | ||
520 | if (pipi == NULL) | ||
521 | panic("kmalloc of IPI message buffers failed\n"); | ||
522 | else | ||
523 | printk("IPI buffer pool of %d buffers\n", nipi); | ||
524 | for (i = 0; i < nipi; i++) { | ||
525 | smtc_ipi_nq(&freeIPIq, pipi); | ||
526 | pipi++; | ||
527 | } | ||
528 | |||
529 | /* Arm multithreading and enable other VPEs - but all TCs are Halted */ | ||
530 | emt(EMT_ENABLE); | ||
531 | evpe(EVPE_ENABLE); | ||
532 | local_irq_restore(flags); | ||
533 | /* Initialize SMTC /proc statistics/diagnostics */ | ||
534 | init_smtc_stats(); | ||
535 | } | ||
536 | |||
537 | |||
538 | /* | ||
539 | * Setup the PC, SP, and GP of a secondary processor and start it | ||
540 | * running! | ||
541 | * smp_bootstrap is the place to resume from | ||
542 | * __KSTK_TOS(idle) is apparently the stack pointer | ||
543 | * (unsigned long)idle->thread_info the gp | ||
544 | * | ||
545 | */ | ||
546 | void smtc_boot_secondary(int cpu, struct task_struct *idle) | ||
547 | { | ||
548 | extern u32 kernelsp[NR_CPUS]; | ||
549 | long flags; | ||
550 | int mtflags; | ||
551 | |||
552 | LOCK_MT_PRA(); | ||
553 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | ||
554 | dvpe(); | ||
555 | } | ||
556 | settc(cpu_data[cpu].tc_id); | ||
557 | |||
558 | /* pc */ | ||
559 | write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); | ||
560 | |||
561 | /* stack pointer */ | ||
562 | kernelsp[cpu] = __KSTK_TOS(idle); | ||
563 | write_tc_gpr_sp(__KSTK_TOS(idle)); | ||
564 | |||
565 | /* global pointer */ | ||
566 | write_tc_gpr_gp((unsigned long)idle->thread_info); | ||
567 | |||
568 | smtc_status |= SMTC_MTC_ACTIVE; | ||
569 | write_tc_c0_tchalt(0); | ||
570 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | ||
571 | evpe(EVPE_ENABLE); | ||
572 | } | ||
573 | UNLOCK_MT_PRA(); | ||
574 | } | ||
575 | |||
576 | void smtc_init_secondary(void) | ||
577 | { | ||
578 | /* | ||
579 | * Start timer on secondary VPEs if necessary. | ||
580 | * mips_timer_setup should already have been invoked by init/main | ||
581 | * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that | ||
582 | * SMTC init code assigns TCs consdecutively and in ascending order | ||
583 | * to across available VPEs. | ||
584 | */ | ||
585 | if(((read_c0_tcbind() & TCBIND_CURTC) != 0) | ||
586 | && ((read_c0_tcbind() & TCBIND_CURVPE) | ||
587 | != cpu_data[smp_processor_id() - 1].vpe_id)){ | ||
588 | write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ); | ||
589 | } | ||
590 | |||
591 | local_irq_enable(); | ||
592 | } | ||
593 | |||
594 | void smtc_smp_finish(void) | ||
595 | { | ||
596 | printk("TC %d going on-line as CPU %d\n", | ||
597 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); | ||
598 | } | ||
599 | |||
600 | void smtc_cpus_done(void) | ||
601 | { | ||
602 | } | ||
603 | |||
604 | /* | ||
605 | * Support for SMTC-optimized driver IRQ registration | ||
606 | */ | ||
607 | |||
608 | /* | ||
609 | * SMTC Kernel needs to manipulate low-level CPU interrupt mask | ||
610 | * in do_IRQ. These are passed in setup_irq_smtc() and stored | ||
611 | * in this table. | ||
612 | */ | ||
613 | |||
614 | int setup_irq_smtc(unsigned int irq, struct irqaction * new, | ||
615 | unsigned long hwmask) | ||
616 | { | ||
617 | irq_hwmask[irq] = hwmask; | ||
618 | |||
619 | return setup_irq(irq, new); | ||
620 | } | ||
621 | |||
622 | /* | ||
623 | * IPI model for SMTC is tricky, because interrupts aren't TC-specific. | ||
624 | * Within a VPE one TC can interrupt another by different approaches. | ||
625 | * The easiest to get right would probably be to make all TCs except | ||
626 | * the target IXMT and set a software interrupt, but an IXMT-based | ||
627 | * scheme requires that a handler must run before a new IPI could | ||
628 | * be sent, which would break the "broadcast" loops in MIPS MT. | ||
629 | * A more gonzo approach within a VPE is to halt the TC, extract | ||
630 | * its Restart, Status, and a couple of GPRs, and program the Restart | ||
631 | * address to emulate an interrupt. | ||
632 | * | ||
633 | * Within a VPE, one can be confident that the target TC isn't in | ||
634 | * a critical EXL state when halted, since the write to the Halt | ||
635 | * register could not have issued on the writing thread if the | ||
636 | * halting thread had EXL set. So k0 and k1 of the target TC | ||
637 | * can be used by the injection code. Across VPEs, one can't | ||
638 | * be certain that the target TC isn't in a critical exception | ||
639 | * state. So we try a two-step process of sending a software | ||
640 | * interrupt to the target VPE, which either handles the event | ||
641 | * itself (if it was the target) or injects the event within | ||
642 | * the VPE. | ||
643 | */ | ||
644 | |||
645 | void smtc_ipi_qdump(void) | ||
646 | { | ||
647 | int i; | ||
648 | |||
649 | for (i = 0; i < NR_CPUS ;i++) { | ||
650 | printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n", | ||
651 | i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail, | ||
652 | IPIQ[i].depth); | ||
653 | } | ||
654 | } | ||
655 | |||
656 | /* | ||
657 | * The standard atomic.h primitives don't quite do what we want | ||
658 | * here: We need an atomic add-and-return-previous-value (which | ||
659 | * could be done with atomic_add_return and a decrement) and an | ||
660 | * atomic set/zero-and-return-previous-value (which can't really | ||
661 | * be done with the atomic.h primitives). And since this is | ||
662 | * MIPS MT, we can assume that we have LL/SC. | ||
663 | */ | ||
664 | static __inline__ int atomic_postincrement(unsigned int *pv) | ||
665 | { | ||
666 | unsigned long result; | ||
667 | |||
668 | unsigned long temp; | ||
669 | |||
670 | __asm__ __volatile__( | ||
671 | "1: ll %0, %2 \n" | ||
672 | " addu %1, %0, 1 \n" | ||
673 | " sc %1, %2 \n" | ||
674 | " beqz %1, 1b \n" | ||
675 | " sync \n" | ||
676 | : "=&r" (result), "=&r" (temp), "=m" (*pv) | ||
677 | : "m" (*pv) | ||
678 | : "memory"); | ||
679 | |||
680 | return result; | ||
681 | } | ||
682 | |||
683 | /* No longer used in IPI dispatch, but retained for future recycling */ | ||
684 | |||
685 | static __inline__ int atomic_postclear(unsigned int *pv) | ||
686 | { | ||
687 | unsigned long result; | ||
688 | |||
689 | unsigned long temp; | ||
690 | |||
691 | __asm__ __volatile__( | ||
692 | "1: ll %0, %2 \n" | ||
693 | " or %1, $0, $0 \n" | ||
694 | " sc %1, %2 \n" | ||
695 | " beqz %1, 1b \n" | ||
696 | " sync \n" | ||
697 | : "=&r" (result), "=&r" (temp), "=m" (*pv) | ||
698 | : "m" (*pv) | ||
699 | : "memory"); | ||
700 | |||
701 | return result; | ||
702 | } | ||
703 | |||
704 | |||
705 | void smtc_send_ipi(int cpu, int type, unsigned int action) | ||
706 | { | ||
707 | int tcstatus; | ||
708 | struct smtc_ipi *pipi; | ||
709 | long flags; | ||
710 | int mtflags; | ||
711 | |||
712 | if (cpu == smp_processor_id()) { | ||
713 | printk("Cannot Send IPI to self!\n"); | ||
714 | return; | ||
715 | } | ||
716 | /* Set up a descriptor, to be delivered either promptly or queued */ | ||
717 | pipi = smtc_ipi_dq(&freeIPIq); | ||
718 | if (pipi == NULL) { | ||
719 | bust_spinlocks(1); | ||
720 | mips_mt_regdump(dvpe()); | ||
721 | panic("IPI Msg. Buffers Depleted\n"); | ||
722 | } | ||
723 | pipi->type = type; | ||
724 | pipi->arg = (void *)action; | ||
725 | pipi->dest = cpu; | ||
726 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | ||
727 | /* If not on same VPE, enqueue and send cross-VPE interupt */ | ||
728 | smtc_ipi_nq(&IPIQ[cpu], pipi); | ||
729 | LOCK_CORE_PRA(); | ||
730 | settc(cpu_data[cpu].tc_id); | ||
731 | write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1); | ||
732 | UNLOCK_CORE_PRA(); | ||
733 | } else { | ||
734 | /* | ||
735 | * Not sufficient to do a LOCK_MT_PRA (dmt) here, | ||
736 | * since ASID shootdown on the other VPE may | ||
737 | * collide with this operation. | ||
738 | */ | ||
739 | LOCK_CORE_PRA(); | ||
740 | settc(cpu_data[cpu].tc_id); | ||
741 | /* Halt the targeted TC */ | ||
742 | write_tc_c0_tchalt(TCHALT_H); | ||
743 | mips_ihb(); | ||
744 | |||
745 | /* | ||
746 | * Inspect TCStatus - if IXMT is set, we have to queue | ||
747 | * a message. Otherwise, we set up the "interrupt" | ||
748 | * of the other TC | ||
749 | */ | ||
750 | tcstatus = read_tc_c0_tcstatus(); | ||
751 | |||
752 | if ((tcstatus & TCSTATUS_IXMT) != 0) { | ||
753 | /* | ||
754 | * Spin-waiting here can deadlock, | ||
755 | * so we queue the message for the target TC. | ||
756 | */ | ||
757 | write_tc_c0_tchalt(0); | ||
758 | UNLOCK_CORE_PRA(); | ||
759 | /* Try to reduce redundant timer interrupt messages */ | ||
760 | if(type == SMTC_CLOCK_TICK) { | ||
761 | if(atomic_postincrement(&ipi_timer_latch[cpu])!=0) { | ||
762 | smtc_ipi_nq(&freeIPIq, pipi); | ||
763 | return; | ||
764 | } | ||
765 | } | ||
766 | smtc_ipi_nq(&IPIQ[cpu], pipi); | ||
767 | } else { | ||
768 | post_direct_ipi(cpu, pipi); | ||
769 | write_tc_c0_tchalt(0); | ||
770 | UNLOCK_CORE_PRA(); | ||
771 | } | ||
772 | } | ||
773 | } | ||
774 | |||
775 | /* | ||
776 | * Send IPI message to Halted TC, TargTC/TargVPE already having been set | ||
777 | */ | ||
778 | void post_direct_ipi(int cpu, struct smtc_ipi *pipi) | ||
779 | { | ||
780 | struct pt_regs *kstack; | ||
781 | unsigned long tcstatus; | ||
782 | unsigned long tcrestart; | ||
783 | extern u32 kernelsp[NR_CPUS]; | ||
784 | extern void __smtc_ipi_vector(void); | ||
785 | |||
786 | /* Extract Status, EPC from halted TC */ | ||
787 | tcstatus = read_tc_c0_tcstatus(); | ||
788 | tcrestart = read_tc_c0_tcrestart(); | ||
789 | /* If TCRestart indicates a WAIT instruction, advance the PC */ | ||
790 | if ((tcrestart & 0x80000000) | ||
791 | && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) { | ||
792 | tcrestart += 4; | ||
793 | } | ||
794 | /* | ||
795 | * Save on TC's future kernel stack | ||
796 | * | ||
797 | * CU bit of Status is indicator that TC was | ||
798 | * already running on a kernel stack... | ||
799 | */ | ||
800 | if(tcstatus & ST0_CU0) { | ||
801 | /* Note that this "- 1" is pointer arithmetic */ | ||
802 | kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1; | ||
803 | } else { | ||
804 | kstack = ((struct pt_regs *)kernelsp[cpu]) - 1; | ||
805 | } | ||
806 | |||
807 | kstack->cp0_epc = (long)tcrestart; | ||
808 | /* Save TCStatus */ | ||
809 | kstack->cp0_tcstatus = tcstatus; | ||
810 | /* Pass token of operation to be performed kernel stack pad area */ | ||
811 | kstack->pad0[4] = (unsigned long)pipi; | ||
812 | /* Pass address of function to be called likewise */ | ||
813 | kstack->pad0[5] = (unsigned long)&ipi_decode; | ||
814 | /* Set interrupt exempt and kernel mode */ | ||
815 | tcstatus |= TCSTATUS_IXMT; | ||
816 | tcstatus &= ~TCSTATUS_TKSU; | ||
817 | write_tc_c0_tcstatus(tcstatus); | ||
818 | ehb(); | ||
819 | /* Set TC Restart address to be SMTC IPI vector */ | ||
820 | write_tc_c0_tcrestart(__smtc_ipi_vector); | ||
821 | } | ||
822 | |||
823 | void ipi_resched_interrupt(struct pt_regs *regs) | ||
824 | { | ||
825 | /* Return from interrupt should be enough to cause scheduler check */ | ||
826 | } | ||
827 | |||
828 | |||
829 | void ipi_call_interrupt(struct pt_regs *regs) | ||
830 | { | ||
831 | /* Invoke generic function invocation code in smp.c */ | ||
832 | smp_call_function_interrupt(); | ||
833 | } | ||
834 | |||
835 | void ipi_decode(struct pt_regs *regs, struct smtc_ipi *pipi) | ||
836 | { | ||
837 | void *arg_copy = pipi->arg; | ||
838 | int type_copy = pipi->type; | ||
839 | int dest_copy = pipi->dest; | ||
840 | |||
841 | smtc_ipi_nq(&freeIPIq, pipi); | ||
842 | switch (type_copy) { | ||
843 | case SMTC_CLOCK_TICK: | ||
844 | /* Invoke Clock "Interrupt" */ | ||
845 | ipi_timer_latch[dest_copy] = 0; | ||
846 | #ifdef SMTC_IDLE_HOOK_DEBUG | ||
847 | clock_hang_reported[dest_copy] = 0; | ||
848 | #endif /* SMTC_IDLE_HOOK_DEBUG */ | ||
849 | local_timer_interrupt(0, NULL, regs); | ||
850 | break; | ||
851 | case LINUX_SMP_IPI: | ||
852 | switch ((int)arg_copy) { | ||
853 | case SMP_RESCHEDULE_YOURSELF: | ||
854 | ipi_resched_interrupt(regs); | ||
855 | break; | ||
856 | case SMP_CALL_FUNCTION: | ||
857 | ipi_call_interrupt(regs); | ||
858 | break; | ||
859 | default: | ||
860 | printk("Impossible SMTC IPI Argument 0x%x\n", | ||
861 | (int)arg_copy); | ||
862 | break; | ||
863 | } | ||
864 | break; | ||
865 | default: | ||
866 | printk("Impossible SMTC IPI Type 0x%x\n", type_copy); | ||
867 | break; | ||
868 | } | ||
869 | } | ||
870 | |||
871 | void deferred_smtc_ipi(struct pt_regs *regs) | ||
872 | { | ||
873 | struct smtc_ipi *pipi; | ||
874 | unsigned long flags; | ||
875 | /* DEBUG */ | ||
876 | int q = smp_processor_id(); | ||
877 | |||
878 | /* | ||
879 | * Test is not atomic, but much faster than a dequeue, | ||
880 | * and the vast majority of invocations will have a null queue. | ||
881 | */ | ||
882 | if(IPIQ[q].head != NULL) { | ||
883 | while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { | ||
884 | /* ipi_decode() should be called with interrupts off */ | ||
885 | local_irq_save(flags); | ||
886 | ipi_decode(regs, pipi); | ||
887 | local_irq_restore(flags); | ||
888 | } | ||
889 | } | ||
890 | } | ||
891 | |||
892 | /* | ||
893 | * Send clock tick to all TCs except the one executing the funtion | ||
894 | */ | ||
895 | |||
896 | void smtc_timer_broadcast(int vpe) | ||
897 | { | ||
898 | int cpu; | ||
899 | int myTC = cpu_data[smp_processor_id()].tc_id; | ||
900 | int myVPE = cpu_data[smp_processor_id()].vpe_id; | ||
901 | |||
902 | smtc_cpu_stats[smp_processor_id()].timerints++; | ||
903 | |||
904 | for_each_online_cpu(cpu) { | ||
905 | if (cpu_data[cpu].vpe_id == myVPE && | ||
906 | cpu_data[cpu].tc_id != myTC) | ||
907 | smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | ||
908 | } | ||
909 | } | ||
910 | |||
911 | /* | ||
912 | * Cross-VPE interrupts in the SMTC prototype use "software interrupts" | ||
913 | * set via cross-VPE MTTR manipulation of the Cause register. It would be | ||
914 | * in some regards preferable to have external logic for "doorbell" hardware | ||
915 | * interrupts. | ||
916 | */ | ||
917 | |||
918 | static int cpu_ipi_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_IRQ; | ||
919 | |||
920 | static irqreturn_t ipi_interrupt(int irq, void *dev_idm, struct pt_regs *regs) | ||
921 | { | ||
922 | int my_vpe = cpu_data[smp_processor_id()].vpe_id; | ||
923 | int my_tc = cpu_data[smp_processor_id()].tc_id; | ||
924 | int cpu; | ||
925 | struct smtc_ipi *pipi; | ||
926 | unsigned long tcstatus; | ||
927 | int sent; | ||
928 | long flags; | ||
929 | unsigned int mtflags; | ||
930 | unsigned int vpflags; | ||
931 | |||
932 | /* | ||
933 | * So long as cross-VPE interrupts are done via | ||
934 | * MFTR/MTTR read-modify-writes of Cause, we need | ||
935 | * to stop other VPEs whenever the local VPE does | ||
936 | * anything similar. | ||
937 | */ | ||
938 | local_irq_save(flags); | ||
939 | vpflags = dvpe(); | ||
940 | clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ); | ||
941 | set_c0_status(0x100 << MIPS_CPU_IPI_IRQ); | ||
942 | irq_enable_hazard(); | ||
943 | evpe(vpflags); | ||
944 | local_irq_restore(flags); | ||
945 | |||
946 | /* | ||
947 | * Cross-VPE Interrupt handler: Try to directly deliver IPIs | ||
948 | * queued for TCs on this VPE other than the current one. | ||
949 | * Return-from-interrupt should cause us to drain the queue | ||
950 | * for the current TC, so we ought not to have to do it explicitly here. | ||
951 | */ | ||
952 | |||
953 | for_each_online_cpu(cpu) { | ||
954 | if (cpu_data[cpu].vpe_id != my_vpe) | ||
955 | continue; | ||
956 | |||
957 | pipi = smtc_ipi_dq(&IPIQ[cpu]); | ||
958 | if (pipi != NULL) { | ||
959 | if (cpu_data[cpu].tc_id != my_tc) { | ||
960 | sent = 0; | ||
961 | LOCK_MT_PRA(); | ||
962 | settc(cpu_data[cpu].tc_id); | ||
963 | write_tc_c0_tchalt(TCHALT_H); | ||
964 | mips_ihb(); | ||
965 | tcstatus = read_tc_c0_tcstatus(); | ||
966 | if ((tcstatus & TCSTATUS_IXMT) == 0) { | ||
967 | post_direct_ipi(cpu, pipi); | ||
968 | sent = 1; | ||
969 | } | ||
970 | write_tc_c0_tchalt(0); | ||
971 | UNLOCK_MT_PRA(); | ||
972 | if (!sent) { | ||
973 | smtc_ipi_req(&IPIQ[cpu], pipi); | ||
974 | } | ||
975 | } else { | ||
976 | /* | ||
977 | * ipi_decode() should be called | ||
978 | * with interrupts off | ||
979 | */ | ||
980 | local_irq_save(flags); | ||
981 | ipi_decode(regs, pipi); | ||
982 | local_irq_restore(flags); | ||
983 | } | ||
984 | } | ||
985 | } | ||
986 | |||
987 | return IRQ_HANDLED; | ||
988 | } | ||
989 | |||
990 | static void ipi_irq_dispatch(struct pt_regs *regs) | ||
991 | { | ||
992 | do_IRQ(cpu_ipi_irq, regs); | ||
993 | } | ||
994 | |||
995 | static struct irqaction irq_ipi; | ||
996 | |||
997 | void setup_cross_vpe_interrupts(void) | ||
998 | { | ||
999 | if (!cpu_has_vint) | ||
1000 | panic("SMTC Kernel requires Vectored Interupt support"); | ||
1001 | |||
1002 | set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch); | ||
1003 | |||
1004 | irq_ipi.handler = ipi_interrupt; | ||
1005 | irq_ipi.flags = SA_INTERRUPT; | ||
1006 | irq_ipi.name = "SMTC_IPI"; | ||
1007 | |||
1008 | setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); | ||
1009 | |||
1010 | irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU; | ||
1011 | } | ||
1012 | |||
1013 | /* | ||
1014 | * SMTC-specific hacks invoked from elsewhere in the kernel. | ||
1015 | */ | ||
1016 | |||
1017 | void smtc_idle_loop_hook(void) | ||
1018 | { | ||
1019 | #ifdef SMTC_IDLE_HOOK_DEBUG | ||
1020 | int im; | ||
1021 | int flags; | ||
1022 | int mtflags; | ||
1023 | int bit; | ||
1024 | int vpe; | ||
1025 | int tc; | ||
1026 | int hook_ntcs; | ||
1027 | /* | ||
1028 | * printk within DMT-protected regions can deadlock, | ||
1029 | * so buffer diagnostic messages for later output. | ||
1030 | */ | ||
1031 | char *pdb_msg; | ||
1032 | char id_ho_db_msg[768]; /* worst-case use should be less than 700 */ | ||
1033 | |||
1034 | if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */ | ||
1035 | if (atomic_add_return(1, &idle_hook_initialized) == 1) { | ||
1036 | int mvpconf0; | ||
1037 | /* Tedious stuff to just do once */ | ||
1038 | mvpconf0 = read_c0_mvpconf0(); | ||
1039 | hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | ||
1040 | if (hook_ntcs > NR_CPUS) | ||
1041 | hook_ntcs = NR_CPUS; | ||
1042 | for (tc = 0; tc < hook_ntcs; tc++) { | ||
1043 | tcnoprog[tc] = 0; | ||
1044 | clock_hang_reported[tc] = 0; | ||
1045 | } | ||
1046 | for (vpe = 0; vpe < 2; vpe++) | ||
1047 | for (im = 0; im < 8; im++) | ||
1048 | imstuckcount[vpe][im] = 0; | ||
1049 | printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs); | ||
1050 | atomic_set(&idle_hook_initialized, 1000); | ||
1051 | } else { | ||
1052 | /* Someone else is initializing in parallel - let 'em finish */ | ||
1053 | while (atomic_read(&idle_hook_initialized) < 1000) | ||
1054 | ; | ||
1055 | } | ||
1056 | } | ||
1057 | |||
1058 | /* Have we stupidly left IXMT set somewhere? */ | ||
1059 | if (read_c0_tcstatus() & 0x400) { | ||
1060 | write_c0_tcstatus(read_c0_tcstatus() & ~0x400); | ||
1061 | ehb(); | ||
1062 | printk("Dangling IXMT in cpu_idle()\n"); | ||
1063 | } | ||
1064 | |||
1065 | /* Have we stupidly left an IM bit turned off? */ | ||
1066 | #define IM_LIMIT 2000 | ||
1067 | local_irq_save(flags); | ||
1068 | mtflags = dmt(); | ||
1069 | pdb_msg = &id_ho_db_msg[0]; | ||
1070 | im = read_c0_status(); | ||
1071 | vpe = cpu_data[smp_processor_id()].vpe_id; | ||
1072 | for (bit = 0; bit < 8; bit++) { | ||
1073 | /* | ||
1074 | * In current prototype, I/O interrupts | ||
1075 | * are masked for VPE > 0 | ||
1076 | */ | ||
1077 | if (vpemask[vpe][bit]) { | ||
1078 | if (!(im & (0x100 << bit))) | ||
1079 | imstuckcount[vpe][bit]++; | ||
1080 | else | ||
1081 | imstuckcount[vpe][bit] = 0; | ||
1082 | if (imstuckcount[vpe][bit] > IM_LIMIT) { | ||
1083 | set_c0_status(0x100 << bit); | ||
1084 | ehb(); | ||
1085 | imstuckcount[vpe][bit] = 0; | ||
1086 | pdb_msg += sprintf(pdb_msg, | ||
1087 | "Dangling IM %d fixed for VPE %d\n", bit, | ||
1088 | vpe); | ||
1089 | } | ||
1090 | } | ||
1091 | } | ||
1092 | |||
1093 | /* | ||
1094 | * Now that we limit outstanding timer IPIs, check for hung TC | ||
1095 | */ | ||
1096 | for (tc = 0; tc < NR_CPUS; tc++) { | ||
1097 | /* Don't check ourself - we'll dequeue IPIs just below */ | ||
1098 | if ((tc != smp_processor_id()) && | ||
1099 | ipi_timer_latch[tc] > timerq_limit) { | ||
1100 | if (clock_hang_reported[tc] == 0) { | ||
1101 | pdb_msg += sprintf(pdb_msg, | ||
1102 | "TC %d looks hung with timer latch at %d\n", | ||
1103 | tc, ipi_timer_latch[tc]); | ||
1104 | clock_hang_reported[tc]++; | ||
1105 | } | ||
1106 | } | ||
1107 | } | ||
1108 | emt(mtflags); | ||
1109 | local_irq_restore(flags); | ||
1110 | if (pdb_msg != &id_ho_db_msg[0]) | ||
1111 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); | ||
1112 | #endif /* SMTC_IDLE_HOOK_DEBUG */ | ||
1113 | /* | ||
1114 | * To the extent that we've ever turned interrupts off, | ||
1115 | * we may have accumulated deferred IPIs. This is subtle. | ||
1116 | * If we use the smtc_ipi_qdepth() macro, we'll get an | ||
1117 | * exact number - but we'll also disable interrupts | ||
1118 | * and create a window of failure where a new IPI gets | ||
1119 | * queued after we test the depth but before we re-enable | ||
1120 | * interrupts. So long as IXMT never gets set, however, | ||
1121 | * we should be OK: If we pick up something and dispatch | ||
1122 | * it here, that's great. If we see nothing, but concurrent | ||
1123 | * with this operation, another TC sends us an IPI, IXMT | ||
1124 | * is clear, and we'll handle it as a real pseudo-interrupt | ||
1125 | * and not a pseudo-pseudo interrupt. | ||
1126 | */ | ||
1127 | if (IPIQ[smp_processor_id()].depth > 0) { | ||
1128 | struct smtc_ipi *pipi; | ||
1129 | extern void self_ipi(struct smtc_ipi *); | ||
1130 | |||
1131 | if ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()])) != NULL) { | ||
1132 | self_ipi(pipi); | ||
1133 | smtc_cpu_stats[smp_processor_id()].selfipis++; | ||
1134 | } | ||
1135 | } | ||
1136 | } | ||
1137 | |||
1138 | void smtc_soft_dump(void) | ||
1139 | { | ||
1140 | int i; | ||
1141 | |||
1142 | printk("Counter Interrupts taken per CPU (TC)\n"); | ||
1143 | for (i=0; i < NR_CPUS; i++) { | ||
1144 | printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints); | ||
1145 | } | ||
1146 | printk("Self-IPI invocations:\n"); | ||
1147 | for (i=0; i < NR_CPUS; i++) { | ||
1148 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | ||
1149 | } | ||
1150 | smtc_ipi_qdump(); | ||
1151 | printk("Timer IPI Backlogs:\n"); | ||
1152 | for (i=0; i < NR_CPUS; i++) { | ||
1153 | printk("%d: %d\n", i, ipi_timer_latch[i]); | ||
1154 | } | ||
1155 | printk("%d Recoveries of \"stolen\" FPU\n", | ||
1156 | atomic_read(&smtc_fpu_recoveries)); | ||
1157 | } | ||
1158 | |||
1159 | |||
1160 | /* | ||
1161 | * TLB management routines special to SMTC | ||
1162 | */ | ||
1163 | |||
1164 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | ||
1165 | { | ||
1166 | unsigned long flags, mtflags, tcstat, prevhalt, asid; | ||
1167 | int tlb, i; | ||
1168 | |||
1169 | /* | ||
1170 | * It would be nice to be able to use a spinlock here, | ||
1171 | * but this is invoked from within TLB flush routines | ||
1172 | * that protect themselves with DVPE, so if a lock is | ||
1173 | * held by another TC, it'll never be freed. | ||
1174 | * | ||
1175 | * DVPE/DMT must not be done with interrupts enabled, | ||
1176 | * so even so most callers will already have disabled | ||
1177 | * them, let's be really careful... | ||
1178 | */ | ||
1179 | |||
1180 | local_irq_save(flags); | ||
1181 | if (smtc_status & SMTC_TLB_SHARED) { | ||
1182 | mtflags = dvpe(); | ||
1183 | tlb = 0; | ||
1184 | } else { | ||
1185 | mtflags = dmt(); | ||
1186 | tlb = cpu_data[cpu].vpe_id; | ||
1187 | } | ||
1188 | asid = asid_cache(cpu); | ||
1189 | |||
1190 | do { | ||
1191 | if (!((asid += ASID_INC) & ASID_MASK) ) { | ||
1192 | if (cpu_has_vtag_icache) | ||
1193 | flush_icache_all(); | ||
1194 | /* Traverse all online CPUs (hack requires contigous range) */ | ||
1195 | for (i = 0; i < num_online_cpus(); i++) { | ||
1196 | /* | ||
1197 | * We don't need to worry about our own CPU, nor those of | ||
1198 | * CPUs who don't share our TLB. | ||
1199 | */ | ||
1200 | if ((i != smp_processor_id()) && | ||
1201 | ((smtc_status & SMTC_TLB_SHARED) || | ||
1202 | (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) { | ||
1203 | settc(cpu_data[i].tc_id); | ||
1204 | prevhalt = read_tc_c0_tchalt() & TCHALT_H; | ||
1205 | if (!prevhalt) { | ||
1206 | write_tc_c0_tchalt(TCHALT_H); | ||
1207 | mips_ihb(); | ||
1208 | } | ||
1209 | tcstat = read_tc_c0_tcstatus(); | ||
1210 | smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); | ||
1211 | if (!prevhalt) | ||
1212 | write_tc_c0_tchalt(0); | ||
1213 | } | ||
1214 | } | ||
1215 | if (!asid) /* fix version if needed */ | ||
1216 | asid = ASID_FIRST_VERSION; | ||
1217 | local_flush_tlb_all(); /* start new asid cycle */ | ||
1218 | } | ||
1219 | } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); | ||
1220 | |||
1221 | /* | ||
1222 | * SMTC shares the TLB within VPEs and possibly across all VPEs. | ||
1223 | */ | ||
1224 | for (i = 0; i < num_online_cpus(); i++) { | ||
1225 | if ((smtc_status & SMTC_TLB_SHARED) || | ||
1226 | (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) | ||
1227 | cpu_context(i, mm) = asid_cache(i) = asid; | ||
1228 | } | ||
1229 | |||
1230 | if (smtc_status & SMTC_TLB_SHARED) | ||
1231 | evpe(mtflags); | ||
1232 | else | ||
1233 | emt(mtflags); | ||
1234 | local_irq_restore(flags); | ||
1235 | } | ||
1236 | |||
1237 | /* | ||
1238 | * Invoked from macros defined in mmu_context.h | ||
1239 | * which must already have disabled interrupts | ||
1240 | * and done a DVPE or DMT as appropriate. | ||
1241 | */ | ||
1242 | |||
1243 | void smtc_flush_tlb_asid(unsigned long asid) | ||
1244 | { | ||
1245 | int entry; | ||
1246 | unsigned long ehi; | ||
1247 | |||
1248 | entry = read_c0_wired(); | ||
1249 | |||
1250 | /* Traverse all non-wired entries */ | ||
1251 | while (entry < current_cpu_data.tlbsize) { | ||
1252 | write_c0_index(entry); | ||
1253 | ehb(); | ||
1254 | tlb_read(); | ||
1255 | ehb(); | ||
1256 | ehi = read_c0_entryhi(); | ||
1257 | if((ehi & ASID_MASK) == asid) { | ||
1258 | /* | ||
1259 | * Invalidate only entries with specified ASID, | ||
1260 | * makiing sure all entries differ. | ||
1261 | */ | ||
1262 | write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); | ||
1263 | write_c0_entrylo0(0); | ||
1264 | write_c0_entrylo1(0); | ||
1265 | mtc0_tlbw_hazard(); | ||
1266 | tlb_write_indexed(); | ||
1267 | } | ||
1268 | entry++; | ||
1269 | } | ||
1270 | write_c0_index(PARKED_INDEX); | ||
1271 | tlbw_use_hazard(); | ||
1272 | } | ||
1273 | |||
1274 | /* | ||
1275 | * Support for single-threading cache flush operations. | ||
1276 | */ | ||
1277 | |||
1278 | int halt_state_save[NR_CPUS]; | ||
1279 | |||
1280 | /* | ||
1281 | * To really, really be sure that nothing is being done | ||
1282 | * by other TCs, halt them all. This code assumes that | ||
1283 | * a DVPE has already been done, so while their Halted | ||
1284 | * state is theoretically architecturally unstable, in | ||
1285 | * practice, it's not going to change while we're looking | ||
1286 | * at it. | ||
1287 | */ | ||
1288 | |||
1289 | void smtc_cflush_lockdown(void) | ||
1290 | { | ||
1291 | int cpu; | ||
1292 | |||
1293 | for_each_online_cpu(cpu) { | ||
1294 | if (cpu != smp_processor_id()) { | ||
1295 | settc(cpu_data[cpu].tc_id); | ||
1296 | halt_state_save[cpu] = read_tc_c0_tchalt(); | ||
1297 | write_tc_c0_tchalt(TCHALT_H); | ||
1298 | } | ||
1299 | } | ||
1300 | mips_ihb(); | ||
1301 | } | ||
1302 | |||
1303 | /* It would be cheating to change the cpu_online states during a flush! */ | ||
1304 | |||
1305 | void smtc_cflush_release(void) | ||
1306 | { | ||
1307 | int cpu; | ||
1308 | |||
1309 | /* | ||
1310 | * Start with a hazard barrier to ensure | ||
1311 | * that all CACHE ops have played through. | ||
1312 | */ | ||
1313 | mips_ihb(); | ||
1314 | |||
1315 | for_each_online_cpu(cpu) { | ||
1316 | if (cpu != smp_processor_id()) { | ||
1317 | settc(cpu_data[cpu].tc_id); | ||
1318 | write_tc_c0_tchalt(halt_state_save[cpu]); | ||
1319 | } | ||
1320 | } | ||
1321 | mips_ihb(); | ||
1322 | } | ||
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 5e51a2d8f3f0..13ff4da598cd 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c | |||
@@ -116,8 +116,7 @@ static void c0_timer_ack(void) | |||
116 | write_c0_compare(expirelo); | 116 | write_c0_compare(expirelo); |
117 | 117 | ||
118 | /* Check to see if we have missed any timer interrupts. */ | 118 | /* Check to see if we have missed any timer interrupts. */ |
119 | count = read_c0_count(); | 119 | while (((count = read_c0_count()) - expirelo) < 0x7fffffff) { |
120 | if ((count - expirelo) < 0x7fffffff) { | ||
121 | /* missed_timer_count++; */ | 120 | /* missed_timer_count++; */ |
122 | expirelo = count + cycles_per_jiffy; | 121 | expirelo = count + cycles_per_jiffy; |
123 | write_c0_compare(expirelo); | 122 | write_c0_compare(expirelo); |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 081e6ed5bb62..6336fe8008ec 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -280,9 +280,16 @@ static DEFINE_SPINLOCK(die_lock); | |||
280 | NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) | 280 | NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) |
281 | { | 281 | { |
282 | static int die_counter; | 282 | static int die_counter; |
283 | #ifdef CONFIG_MIPS_MT_SMTC | ||
284 | unsigned long dvpret = dvpe(); | ||
285 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
283 | 286 | ||
284 | console_verbose(); | 287 | console_verbose(); |
285 | spin_lock_irq(&die_lock); | 288 | spin_lock_irq(&die_lock); |
289 | bust_spinlocks(1); | ||
290 | #ifdef CONFIG_MIPS_MT_SMTC | ||
291 | mips_mt_regdump(dvpret); | ||
292 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
286 | printk("%s[#%d]:\n", str, ++die_counter); | 293 | printk("%s[#%d]:\n", str, ++die_counter); |
287 | show_registers(regs); | 294 | show_registers(regs); |
288 | spin_unlock_irq(&die_lock); | 295 | spin_unlock_irq(&die_lock); |
@@ -757,6 +764,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
757 | 764 | ||
758 | case 2: | 765 | case 2: |
759 | case 3: | 766 | case 3: |
767 | die_if_kernel("do_cpu invoked from kernel context!", regs); | ||
760 | break; | 768 | break; |
761 | } | 769 | } |
762 | 770 | ||
@@ -794,6 +802,36 @@ asmlinkage void do_mcheck(struct pt_regs *regs) | |||
794 | 802 | ||
795 | asmlinkage void do_mt(struct pt_regs *regs) | 803 | asmlinkage void do_mt(struct pt_regs *regs) |
796 | { | 804 | { |
805 | int subcode; | ||
806 | |||
807 | die_if_kernel("MIPS MT Thread exception in kernel", regs); | ||
808 | |||
809 | subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) | ||
810 | >> VPECONTROL_EXCPT_SHIFT; | ||
811 | switch (subcode) { | ||
812 | case 0: | ||
813 | printk(KERN_ERR "Thread Underflow\n"); | ||
814 | break; | ||
815 | case 1: | ||
816 | printk(KERN_ERR "Thread Overflow\n"); | ||
817 | break; | ||
818 | case 2: | ||
819 | printk(KERN_ERR "Invalid YIELD Qualifier\n"); | ||
820 | break; | ||
821 | case 3: | ||
822 | printk(KERN_ERR "Gating Storage Exception\n"); | ||
823 | break; | ||
824 | case 4: | ||
825 | printk(KERN_ERR "YIELD Scheduler Exception\n"); | ||
826 | break; | ||
827 | case 5: | ||
828 | printk(KERN_ERR "Gating Storage Schedulier Exception\n"); | ||
829 | break; | ||
830 | default: | ||
831 | printk(KERN_ERR "*** UNKNOWN THREAD EXCEPTION %d ***\n", | ||
832 | subcode); | ||
833 | break; | ||
834 | } | ||
797 | die_if_kernel("MIPS MT Thread exception in kernel", regs); | 835 | die_if_kernel("MIPS MT Thread exception in kernel", regs); |
798 | 836 | ||
799 | force_sig(SIGILL, current); | 837 | force_sig(SIGILL, current); |
@@ -929,7 +967,15 @@ void ejtag_exception_handler(struct pt_regs *regs) | |||
929 | */ | 967 | */ |
930 | void nmi_exception_handler(struct pt_regs *regs) | 968 | void nmi_exception_handler(struct pt_regs *regs) |
931 | { | 969 | { |
970 | #ifdef CONFIG_MIPS_MT_SMTC | ||
971 | unsigned long dvpret = dvpe(); | ||
972 | bust_spinlocks(1); | ||
973 | printk("NMI taken!!!!\n"); | ||
974 | mips_mt_regdump(dvpret); | ||
975 | #else | ||
976 | bust_spinlocks(1); | ||
932 | printk("NMI taken!!!!\n"); | 977 | printk("NMI taken!!!!\n"); |
978 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
933 | die("NMI", regs); | 979 | die("NMI", regs); |
934 | while(1) ; | 980 | while(1) ; |
935 | } | 981 | } |
@@ -1007,7 +1053,7 @@ again: | |||
1007 | return set; | 1053 | return set; |
1008 | } | 1054 | } |
1009 | 1055 | ||
1010 | void mips_srs_free (int set) | 1056 | void mips_srs_free(int set) |
1011 | { | 1057 | { |
1012 | struct shadow_registers *sr = &shadow_registers; | 1058 | struct shadow_registers *sr = &shadow_registers; |
1013 | 1059 | ||
@@ -1027,8 +1073,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs) | |||
1027 | if (addr == NULL) { | 1073 | if (addr == NULL) { |
1028 | handler = (unsigned long) do_default_vi; | 1074 | handler = (unsigned long) do_default_vi; |
1029 | srs = 0; | 1075 | srs = 0; |
1030 | } | 1076 | } else |
1031 | else | ||
1032 | handler = (unsigned long) addr; | 1077 | handler = (unsigned long) addr; |
1033 | vi_handlers[n] = (unsigned long) addr; | 1078 | vi_handlers[n] = (unsigned long) addr; |
1034 | 1079 | ||
@@ -1040,8 +1085,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs) | |||
1040 | if (cpu_has_veic) { | 1085 | if (cpu_has_veic) { |
1041 | if (board_bind_eic_interrupt) | 1086 | if (board_bind_eic_interrupt) |
1042 | board_bind_eic_interrupt (n, srs); | 1087 | board_bind_eic_interrupt (n, srs); |
1043 | } | 1088 | } else if (cpu_has_vint) { |
1044 | else if (cpu_has_vint) { | ||
1045 | /* SRSMap is only defined if shadow sets are implemented */ | 1089 | /* SRSMap is only defined if shadow sets are implemented */ |
1046 | if (mips_srs_max() > 1) | 1090 | if (mips_srs_max() > 1) |
1047 | change_c0_srsmap (0xf << n*4, srs << n*4); | 1091 | change_c0_srsmap (0xf << n*4, srs << n*4); |
@@ -1055,6 +1099,15 @@ static void *set_vi_srs_handler(int n, void *addr, int srs) | |||
1055 | 1099 | ||
1056 | extern char except_vec_vi, except_vec_vi_lui; | 1100 | extern char except_vec_vi, except_vec_vi_lui; |
1057 | extern char except_vec_vi_ori, except_vec_vi_end; | 1101 | extern char except_vec_vi_ori, except_vec_vi_end; |
1102 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1103 | /* | ||
1104 | * We need to provide the SMTC vectored interrupt handler | ||
1105 | * not only with the address of the handler, but with the | ||
1106 | * Status.IM bit to be masked before going there. | ||
1107 | */ | ||
1108 | extern char except_vec_vi_mori; | ||
1109 | const int mori_offset = &except_vec_vi_mori - &except_vec_vi; | ||
1110 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1058 | const int handler_len = &except_vec_vi_end - &except_vec_vi; | 1111 | const int handler_len = &except_vec_vi_end - &except_vec_vi; |
1059 | const int lui_offset = &except_vec_vi_lui - &except_vec_vi; | 1112 | const int lui_offset = &except_vec_vi_lui - &except_vec_vi; |
1060 | const int ori_offset = &except_vec_vi_ori - &except_vec_vi; | 1113 | const int ori_offset = &except_vec_vi_ori - &except_vec_vi; |
@@ -1068,6 +1121,12 @@ static void *set_vi_srs_handler(int n, void *addr, int srs) | |||
1068 | } | 1121 | } |
1069 | 1122 | ||
1070 | memcpy (b, &except_vec_vi, handler_len); | 1123 | memcpy (b, &except_vec_vi, handler_len); |
1124 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1125 | if (n > 7) | ||
1126 | printk("Vector index %d exceeds SMTC maximum\n", n); | ||
1127 | w = (u32 *)(b + mori_offset); | ||
1128 | *w = (*w & 0xffff0000) | (0x100 << n); | ||
1129 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1071 | w = (u32 *)(b + lui_offset); | 1130 | w = (u32 *)(b + lui_offset); |
1072 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); | 1131 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); |
1073 | w = (u32 *)(b + ori_offset); | 1132 | w = (u32 *)(b + ori_offset); |
@@ -1090,7 +1149,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs) | |||
1090 | return (void *)old_handler; | 1149 | return (void *)old_handler; |
1091 | } | 1150 | } |
1092 | 1151 | ||
1093 | void *set_vi_handler (int n, void *addr) | 1152 | void *set_vi_handler(int n, void *addr) |
1094 | { | 1153 | { |
1095 | return set_vi_srs_handler(n, addr, 0); | 1154 | return set_vi_srs_handler(n, addr, 0); |
1096 | } | 1155 | } |
@@ -1108,8 +1167,29 @@ extern asmlinkage int _restore_fp_context(struct sigcontext *sc); | |||
1108 | extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); | 1167 | extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); |
1109 | extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); | 1168 | extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); |
1110 | 1169 | ||
1170 | #ifdef CONFIG_SMP | ||
1171 | static int smp_save_fp_context(struct sigcontext *sc) | ||
1172 | { | ||
1173 | return cpu_has_fpu | ||
1174 | ? _save_fp_context(sc) | ||
1175 | : fpu_emulator_save_context(sc); | ||
1176 | } | ||
1177 | |||
1178 | static int smp_restore_fp_context(struct sigcontext *sc) | ||
1179 | { | ||
1180 | return cpu_has_fpu | ||
1181 | ? _restore_fp_context(sc) | ||
1182 | : fpu_emulator_restore_context(sc); | ||
1183 | } | ||
1184 | #endif | ||
1185 | |||
1111 | static inline void signal_init(void) | 1186 | static inline void signal_init(void) |
1112 | { | 1187 | { |
1188 | #ifdef CONFIG_SMP | ||
1189 | /* For now just do the cpu_has_fpu check when the functions are invoked */ | ||
1190 | save_fp_context = smp_save_fp_context; | ||
1191 | restore_fp_context = smp_restore_fp_context; | ||
1192 | #else | ||
1113 | if (cpu_has_fpu) { | 1193 | if (cpu_has_fpu) { |
1114 | save_fp_context = _save_fp_context; | 1194 | save_fp_context = _save_fp_context; |
1115 | restore_fp_context = _restore_fp_context; | 1195 | restore_fp_context = _restore_fp_context; |
@@ -1117,6 +1197,7 @@ static inline void signal_init(void) | |||
1117 | save_fp_context = fpu_emulator_save_context; | 1197 | save_fp_context = fpu_emulator_save_context; |
1118 | restore_fp_context = fpu_emulator_restore_context; | 1198 | restore_fp_context = fpu_emulator_restore_context; |
1119 | } | 1199 | } |
1200 | #endif | ||
1120 | } | 1201 | } |
1121 | 1202 | ||
1122 | #ifdef CONFIG_MIPS32_COMPAT | 1203 | #ifdef CONFIG_MIPS32_COMPAT |
@@ -1153,6 +1234,20 @@ void __init per_cpu_trap_init(void) | |||
1153 | { | 1234 | { |
1154 | unsigned int cpu = smp_processor_id(); | 1235 | unsigned int cpu = smp_processor_id(); |
1155 | unsigned int status_set = ST0_CU0; | 1236 | unsigned int status_set = ST0_CU0; |
1237 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1238 | int secondaryTC = 0; | ||
1239 | int bootTC = (cpu == 0); | ||
1240 | |||
1241 | /* | ||
1242 | * Only do per_cpu_trap_init() for first TC of Each VPE. | ||
1243 | * Note that this hack assumes that the SMTC init code | ||
1244 | * assigns TCs consecutively and in ascending order. | ||
1245 | */ | ||
1246 | |||
1247 | if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && | ||
1248 | ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) | ||
1249 | secondaryTC = 1; | ||
1250 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1156 | 1251 | ||
1157 | /* | 1252 | /* |
1158 | * Disable coprocessors and select 32-bit or 64-bit addressing | 1253 | * Disable coprocessors and select 32-bit or 64-bit addressing |
@@ -1175,6 +1270,10 @@ void __init per_cpu_trap_init(void) | |||
1175 | write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ | 1270 | write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ |
1176 | #endif | 1271 | #endif |
1177 | 1272 | ||
1273 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1274 | if (!secondaryTC) { | ||
1275 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1276 | |||
1178 | /* | 1277 | /* |
1179 | * Interrupt handling. | 1278 | * Interrupt handling. |
1180 | */ | 1279 | */ |
@@ -1191,6 +1290,9 @@ void __init per_cpu_trap_init(void) | |||
1191 | } else | 1290 | } else |
1192 | set_c0_cause(CAUSEF_IV); | 1291 | set_c0_cause(CAUSEF_IV); |
1193 | } | 1292 | } |
1293 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1294 | } | ||
1295 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1194 | 1296 | ||
1195 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; | 1297 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; |
1196 | TLBMISS_HANDLER_SETUP(); | 1298 | TLBMISS_HANDLER_SETUP(); |
@@ -1200,8 +1302,14 @@ void __init per_cpu_trap_init(void) | |||
1200 | BUG_ON(current->mm); | 1302 | BUG_ON(current->mm); |
1201 | enter_lazy_tlb(&init_mm, current); | 1303 | enter_lazy_tlb(&init_mm, current); |
1202 | 1304 | ||
1203 | cpu_cache_init(); | 1305 | #ifdef CONFIG_MIPS_MT_SMTC |
1204 | tlb_init(); | 1306 | if (bootTC) { |
1307 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1308 | cpu_cache_init(); | ||
1309 | tlb_init(); | ||
1310 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1311 | } | ||
1312 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1205 | } | 1313 | } |
1206 | 1314 | ||
1207 | /* Install CPU exception handler */ | 1315 | /* Install CPU exception handler */ |
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 2ad0cedf29fe..14fa00e3cdfa 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S | |||
@@ -2,7 +2,7 @@ | |||
2 | #include <asm/asm-offsets.h> | 2 | #include <asm/asm-offsets.h> |
3 | #include <asm-generic/vmlinux.lds.h> | 3 | #include <asm-generic/vmlinux.lds.h> |
4 | 4 | ||
5 | #undef mips /* CPP really sucks for this job */ | 5 | #undef mips |
6 | #define mips mips | 6 | #define mips mips |
7 | OUTPUT_ARCH(mips) | 7 | OUTPUT_ARCH(mips) |
8 | ENTRY(kernel_entry) | 8 | ENTRY(kernel_entry) |