diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2006-04-05 04:45:45 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2006-04-18 22:14:28 -0400 |
commit | 41c594ab65fc89573af296d192aa5235d09717ab (patch) | |
tree | 562462512a320f386bdf49eabfbb26bb3ee761fa /arch | |
parent | 2600990e640e3bef29ed89d565864cf16ee83833 (diff) |
[MIPS] MT: Improved multithreading support.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
35 files changed, 2796 insertions, 155 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index a7bac0459f99..f9be549645ea 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -1447,6 +1447,10 @@ choice | |||
1447 | prompt "MIPS MT options" | 1447 | prompt "MIPS MT options" |
1448 | depends on MIPS_MT | 1448 | depends on MIPS_MT |
1449 | 1449 | ||
1450 | config MIPS_MT_SMTC | ||
1451 | bool "SMTC: Use all TCs on all VPEs for SMP" | ||
1452 | select SMP | ||
1453 | |||
1450 | config MIPS_MT_SMP | 1454 | config MIPS_MT_SMP |
1451 | bool "Use 1 TC on each available VPE for SMP" | 1455 | bool "Use 1 TC on each available VPE for SMP" |
1452 | select SMP | 1456 | select SMP |
@@ -1613,7 +1617,7 @@ source "mm/Kconfig" | |||
1613 | 1617 | ||
1614 | config SMP | 1618 | config SMP |
1615 | bool "Multi-Processing support" | 1619 | bool "Multi-Processing support" |
1616 | depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP | 1620 | depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP || MIPS_MT_SMTC |
1617 | ---help--- | 1621 | ---help--- |
1618 | This enables support for systems with more than one CPU. If you have | 1622 | This enables support for systems with more than one CPU. If you have |
1619 | a system with only one CPU, like most personal computers, say N. If | 1623 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 9ec01de81c04..34e8a256765c 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -34,7 +34,9 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o | |||
34 | 34 | ||
35 | obj-$(CONFIG_SMP) += smp.o | 35 | obj-$(CONFIG_SMP) += smp.o |
36 | 36 | ||
37 | obj-$(CONFIG_MIPS_MT_SMP) += smp_mt.o | 37 | obj-$(CONFIG_MIPS_MT) += mips-mt.o |
38 | obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o | ||
39 | obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o | ||
38 | 40 | ||
39 | obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o | 41 | obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o |
40 | obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o | 42 | obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o |
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index ca6b03c773be..92b28b674d6f 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c | |||
@@ -69,6 +69,9 @@ void output_ptreg_defines(void) | |||
69 | offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr); | 69 | offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr); |
70 | offset("#define PT_STATUS ", struct pt_regs, cp0_status); | 70 | offset("#define PT_STATUS ", struct pt_regs, cp0_status); |
71 | offset("#define PT_CAUSE ", struct pt_regs, cp0_cause); | 71 | offset("#define PT_CAUSE ", struct pt_regs, cp0_cause); |
72 | #ifdef CONFIG_MIPS_MT_SMTC | ||
73 | offset("#define PT_TCSTATUS ", struct pt_regs, cp0_tcstatus); | ||
74 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
72 | size("#define PT_SIZE ", struct pt_regs); | 75 | size("#define PT_SIZE ", struct pt_regs); |
73 | linefeed; | 76 | linefeed; |
74 | } | 77 | } |
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index b1939a486d2c..d101d2fb24ca 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
@@ -17,6 +17,9 @@ | |||
17 | #include <asm/isadep.h> | 17 | #include <asm/isadep.h> |
18 | #include <asm/thread_info.h> | 18 | #include <asm/thread_info.h> |
19 | #include <asm/war.h> | 19 | #include <asm/war.h> |
20 | #ifdef CONFIG_MIPS_MT_SMTC | ||
21 | #include <asm/mipsmtregs.h> | ||
22 | #endif | ||
20 | 23 | ||
21 | #ifdef CONFIG_PREEMPT | 24 | #ifdef CONFIG_PREEMPT |
22 | .macro preempt_stop | 25 | .macro preempt_stop |
@@ -75,6 +78,37 @@ FEXPORT(syscall_exit) | |||
75 | bnez t0, syscall_exit_work | 78 | bnez t0, syscall_exit_work |
76 | 79 | ||
77 | FEXPORT(restore_all) # restore full frame | 80 | FEXPORT(restore_all) # restore full frame |
81 | #ifdef CONFIG_MIPS_MT_SMTC | ||
82 | /* Detect and execute deferred IPI "interrupts" */ | ||
83 | move a0,sp | ||
84 | jal deferred_smtc_ipi | ||
85 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ | ||
86 | mfc0 v0, CP0_TCSTATUS | ||
87 | ori v1, v0, TCSTATUS_IXMT | ||
88 | mtc0 v1, CP0_TCSTATUS | ||
89 | andi v0, TCSTATUS_IXMT | ||
90 | ehb | ||
91 | mfc0 t0, CP0_TCCONTEXT | ||
92 | DMT 9 # dmt t1 | ||
93 | jal mips_ihb | ||
94 | mfc0 t2, CP0_STATUS | ||
95 | andi t3, t0, 0xff00 | ||
96 | or t2, t2, t3 | ||
97 | mtc0 t2, CP0_STATUS | ||
98 | ehb | ||
99 | andi t1, t1, VPECONTROL_TE | ||
100 | beqz t1, 1f | ||
101 | EMT | ||
102 | 1: | ||
103 | mfc0 v1, CP0_TCSTATUS | ||
104 | /* We set IXMT above, XOR should cler it here */ | ||
105 | xori v1, v1, TCSTATUS_IXMT | ||
106 | or v1, v0, v1 | ||
107 | mtc0 v1, CP0_TCSTATUS | ||
108 | ehb | ||
109 | xor t0, t0, t3 | ||
110 | mtc0 t0, CP0_TCCONTEXT | ||
111 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
78 | .set noat | 112 | .set noat |
79 | RESTORE_TEMP | 113 | RESTORE_TEMP |
80 | RESTORE_AT | 114 | RESTORE_AT |
diff --git a/arch/mips/kernel/gdb-low.S b/arch/mips/kernel/gdb-low.S index 235ad9f6bd35..10f28fb9f008 100644 --- a/arch/mips/kernel/gdb-low.S +++ b/arch/mips/kernel/gdb-low.S | |||
@@ -283,11 +283,33 @@ | |||
283 | */ | 283 | */ |
284 | 284 | ||
285 | 3: | 285 | 3: |
286 | #ifdef CONFIG_MIPS_MT_SMTC | ||
287 | /* Read-modify write of Status must be atomic */ | ||
288 | mfc0 t2, CP0_TCSTATUS | ||
289 | ori t1, t2, TCSTATUS_IXMT | ||
290 | mtc0 t1, CP0_TCSTATUS | ||
291 | andi t2, t2, TCSTATUS_IXMT | ||
292 | ehb | ||
293 | DMT 9 # dmt t1 | ||
294 | jal mips_ihb | ||
295 | nop | ||
296 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
286 | mfc0 t0, CP0_STATUS | 297 | mfc0 t0, CP0_STATUS |
287 | ori t0, 0x1f | 298 | ori t0, 0x1f |
288 | xori t0, 0x1f | 299 | xori t0, 0x1f |
289 | mtc0 t0, CP0_STATUS | 300 | mtc0 t0, CP0_STATUS |
290 | 301 | #ifdef CONFIG_MIPS_MT_SMTC | |
302 | andi t1, t1, VPECONTROL_TE | ||
303 | beqz t1, 9f | ||
304 | nop | ||
305 | EMT # emt | ||
306 | 9: | ||
307 | mfc0 t1, CP0_TCSTATUS | ||
308 | xori t1, t1, TCSTATUS_IXMT | ||
309 | or t1, t1, t2 | ||
310 | mtc0 t1, CP0_TCSTATUS | ||
311 | ehb | ||
312 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
291 | LONG_L v0, GDB_FR_STATUS(sp) | 313 | LONG_L v0, GDB_FR_STATUS(sp) |
292 | LONG_L v1, GDB_FR_EPC(sp) | 314 | LONG_L v1, GDB_FR_EPC(sp) |
293 | mtc0 v0, CP0_STATUS | 315 | mtc0 v0, CP0_STATUS |
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c index d4f88e0af24c..6ecbdc1fefd1 100644 --- a/arch/mips/kernel/gdb-stub.c +++ b/arch/mips/kernel/gdb-stub.c | |||
@@ -140,6 +140,7 @@ | |||
140 | #include <asm/system.h> | 140 | #include <asm/system.h> |
141 | #include <asm/gdb-stub.h> | 141 | #include <asm/gdb-stub.h> |
142 | #include <asm/inst.h> | 142 | #include <asm/inst.h> |
143 | #include <asm/smp.h> | ||
143 | 144 | ||
144 | /* | 145 | /* |
145 | * external low-level support routines | 146 | * external low-level support routines |
@@ -669,6 +670,64 @@ static void kgdb_wait(void *arg) | |||
669 | local_irq_restore(flags); | 670 | local_irq_restore(flags); |
670 | } | 671 | } |
671 | 672 | ||
673 | /* | ||
674 | * GDB stub needs to call kgdb_wait on all processor with interrupts | ||
675 | * disabled, so it uses it's own special variant. | ||
676 | */ | ||
677 | static int kgdb_smp_call_kgdb_wait(void) | ||
678 | { | ||
679 | #ifdef CONFIG_SMP | ||
680 | struct call_data_struct data; | ||
681 | int i, cpus = num_online_cpus() - 1; | ||
682 | int cpu = smp_processor_id(); | ||
683 | |||
684 | /* | ||
685 | * Can die spectacularly if this CPU isn't yet marked online | ||
686 | */ | ||
687 | BUG_ON(!cpu_online(cpu)); | ||
688 | |||
689 | if (!cpus) | ||
690 | return 0; | ||
691 | |||
692 | if (spin_is_locked(&smp_call_lock)) { | ||
693 | /* | ||
694 | * Some other processor is trying to make us do something | ||
695 | * but we're not going to respond... give up | ||
696 | */ | ||
697 | return -1; | ||
698 | } | ||
699 | |||
700 | /* | ||
701 | * We will continue here, accepting the fact that | ||
702 | * the kernel may deadlock if another CPU attempts | ||
703 | * to call smp_call_function now... | ||
704 | */ | ||
705 | |||
706 | data.func = kgdb_wait; | ||
707 | data.info = NULL; | ||
708 | atomic_set(&data.started, 0); | ||
709 | data.wait = 0; | ||
710 | |||
711 | spin_lock(&smp_call_lock); | ||
712 | call_data = &data; | ||
713 | mb(); | ||
714 | |||
715 | /* Send a message to all other CPUs and wait for them to respond */ | ||
716 | for (i = 0; i < NR_CPUS; i++) | ||
717 | if (cpu_online(i) && i != cpu) | ||
718 | core_send_ipi(i, SMP_CALL_FUNCTION); | ||
719 | |||
720 | /* Wait for response */ | ||
721 | /* FIXME: lock-up detection, backtrace on lock-up */ | ||
722 | while (atomic_read(&data.started) != cpus) | ||
723 | barrier(); | ||
724 | |||
725 | call_data = NULL; | ||
726 | spin_unlock(&smp_call_lock); | ||
727 | #endif | ||
728 | |||
729 | return 0; | ||
730 | } | ||
672 | 731 | ||
673 | /* | 732 | /* |
674 | * This function does all command processing for interfacing to gdb. It | 733 | * This function does all command processing for interfacing to gdb. It |
@@ -718,7 +777,7 @@ void handle_exception (struct gdb_regs *regs) | |||
718 | /* | 777 | /* |
719 | * force other cpus to enter kgdb | 778 | * force other cpus to enter kgdb |
720 | */ | 779 | */ |
721 | smp_call_function(kgdb_wait, NULL, 0, 0); | 780 | kgdb_smp_call_kgdb_wait(); |
722 | 781 | ||
723 | /* | 782 | /* |
724 | * If we're in breakpoint() increment the PC | 783 | * If we're in breakpoint() increment the PC |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 04418b6568b0..ff7af369f286 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | 13 | ||
14 | #include <asm/asm.h> | 14 | #include <asm/asm.h> |
15 | #include <asm/asmmacro.h> | ||
15 | #include <asm/cacheops.h> | 16 | #include <asm/cacheops.h> |
16 | #include <asm/regdef.h> | 17 | #include <asm/regdef.h> |
17 | #include <asm/fpregdef.h> | 18 | #include <asm/fpregdef.h> |
@@ -171,6 +172,15 @@ NESTED(except_vec_vi, 0, sp) | |||
171 | SAVE_AT | 172 | SAVE_AT |
172 | .set push | 173 | .set push |
173 | .set noreorder | 174 | .set noreorder |
175 | #ifdef CONFIG_MIPS_MT_SMTC | ||
176 | /* | ||
177 | * To keep from blindly blocking *all* interrupts | ||
178 | * during service by SMTC kernel, we also want to | ||
179 | * pass the IM value to be cleared. | ||
180 | */ | ||
181 | EXPORT(except_vec_vi_mori) | ||
182 | ori a0, $0, 0 | ||
183 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
174 | EXPORT(except_vec_vi_lui) | 184 | EXPORT(except_vec_vi_lui) |
175 | lui v0, 0 /* Patched */ | 185 | lui v0, 0 /* Patched */ |
176 | j except_vec_vi_handler | 186 | j except_vec_vi_handler |
@@ -187,6 +197,25 @@ EXPORT(except_vec_vi_end) | |||
187 | NESTED(except_vec_vi_handler, 0, sp) | 197 | NESTED(except_vec_vi_handler, 0, sp) |
188 | SAVE_TEMP | 198 | SAVE_TEMP |
189 | SAVE_STATIC | 199 | SAVE_STATIC |
200 | #ifdef CONFIG_MIPS_MT_SMTC | ||
201 | /* | ||
202 | * SMTC has an interesting problem that interrupts are level-triggered, | ||
203 | * and the CLI macro will clear EXL, potentially causing a duplicate | ||
204 | * interrupt service invocation. So we need to clear the associated | ||
205 | * IM bit of Status prior to doing CLI, and restore it after the | ||
206 | * service routine has been invoked - we must assume that the | ||
207 | * service routine will have cleared the state, and any active | ||
208 | * level represents a new or otherwised unserviced event... | ||
209 | */ | ||
210 | mfc0 t1, CP0_STATUS | ||
211 | and t0, a0, t1 | ||
212 | mfc0 t2, CP0_TCCONTEXT | ||
213 | or t0, t0, t2 | ||
214 | mtc0 t0, CP0_TCCONTEXT | ||
215 | xor t1, t1, t0 | ||
216 | mtc0 t1, CP0_STATUS | ||
217 | ehb | ||
218 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
190 | CLI | 219 | CLI |
191 | move a0, sp | 220 | move a0, sp |
192 | jalr v0 | 221 | jalr v0 |
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 2e9122a4213a..bdf6f6eff721 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/threads.h> | 18 | #include <linux/threads.h> |
19 | 19 | ||
20 | #include <asm/asm.h> | 20 | #include <asm/asm.h> |
21 | #include <asm/asmmacro.h> | ||
21 | #include <asm/regdef.h> | 22 | #include <asm/regdef.h> |
22 | #include <asm/page.h> | 23 | #include <asm/page.h> |
23 | #include <asm/mipsregs.h> | 24 | #include <asm/mipsregs.h> |
@@ -82,12 +83,33 @@ | |||
82 | */ | 83 | */ |
83 | .macro setup_c0_status set clr | 84 | .macro setup_c0_status set clr |
84 | .set push | 85 | .set push |
86 | #ifdef CONFIG_MIPS_MT_SMTC | ||
87 | /* | ||
88 | * For SMTC, we need to set privilege and disable interrupts only for | ||
89 | * the current TC, using the TCStatus register. | ||
90 | */ | ||
91 | mfc0 t0, CP0_TCSTATUS | ||
92 | /* Fortunately CU 0 is in the same place in both registers */ | ||
93 | /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ | ||
94 | li t1, ST0_CU0 | 0x08001c00 | ||
95 | or t0, t1 | ||
96 | /* Clear TKSU, leave IXMT */ | ||
97 | xori t0, 0x00001800 | ||
98 | mtc0 t0, CP0_TCSTATUS | ||
99 | ehb | ||
100 | /* We need to leave the global IE bit set, but clear EXL...*/ | ||
101 | mfc0 t0, CP0_STATUS | ||
102 | or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr | ||
103 | xor t0, ST0_EXL | ST0_ERL | \clr | ||
104 | mtc0 t0, CP0_STATUS | ||
105 | #else | ||
85 | mfc0 t0, CP0_STATUS | 106 | mfc0 t0, CP0_STATUS |
86 | or t0, ST0_CU0|\set|0x1f|\clr | 107 | or t0, ST0_CU0|\set|0x1f|\clr |
87 | xor t0, 0x1f|\clr | 108 | xor t0, 0x1f|\clr |
88 | mtc0 t0, CP0_STATUS | 109 | mtc0 t0, CP0_STATUS |
89 | .set noreorder | 110 | .set noreorder |
90 | sll zero,3 # ehb | 111 | sll zero,3 # ehb |
112 | #endif | ||
91 | .set pop | 113 | .set pop |
92 | .endm | 114 | .endm |
93 | 115 | ||
@@ -134,6 +156,24 @@ NESTED(kernel_entry, 16, sp) # kernel entry point | |||
134 | 156 | ||
135 | ARC64_TWIDDLE_PC | 157 | ARC64_TWIDDLE_PC |
136 | 158 | ||
159 | #ifdef CONFIG_MIPS_MT_SMTC | ||
160 | /* | ||
161 | * In SMTC kernel, "CLI" is thread-specific, in TCStatus. | ||
162 | * We still need to enable interrupts globally in Status, | ||
163 | * and clear EXL/ERL. | ||
164 | * | ||
165 | * TCContext is used to track interrupt levels under | ||
166 | * service in SMTC kernel. Clear for boot TC before | ||
167 | * allowing any interrupts. | ||
168 | */ | ||
169 | mtc0 zero, CP0_TCCONTEXT | ||
170 | |||
171 | mfc0 t0, CP0_STATUS | ||
172 | ori t0, t0, 0xff1f | ||
173 | xori t0, t0, 0x001e | ||
174 | mtc0 t0, CP0_STATUS | ||
175 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
176 | |||
137 | PTR_LA t0, __bss_start # clear .bss | 177 | PTR_LA t0, __bss_start # clear .bss |
138 | LONG_S zero, (t0) | 178 | LONG_S zero, (t0) |
139 | PTR_LA t1, __bss_stop - LONGSIZE | 179 | PTR_LA t1, __bss_stop - LONGSIZE |
@@ -166,8 +206,25 @@ NESTED(kernel_entry, 16, sp) # kernel entry point | |||
166 | * function after setting up the stack and gp registers. | 206 | * function after setting up the stack and gp registers. |
167 | */ | 207 | */ |
168 | NESTED(smp_bootstrap, 16, sp) | 208 | NESTED(smp_bootstrap, 16, sp) |
209 | #ifdef CONFIG_MIPS_MT_SMTC | ||
210 | /* | ||
211 | * Read-modify-writes of Status must be atomic, and this | ||
212 | * is one case where CLI is invoked without EXL being | ||
213 | * necessarily set. The CLI and setup_c0_status will | ||
214 | * in fact be redundant for all but the first TC of | ||
215 | * each VPE being booted. | ||
216 | */ | ||
217 | DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */ | ||
218 | jal mips_ihb | ||
219 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
169 | setup_c0_status_sec | 220 | setup_c0_status_sec |
170 | smp_slave_setup | 221 | smp_slave_setup |
222 | #ifdef CONFIG_MIPS_MT_SMTC | ||
223 | andi t2, t2, VPECONTROL_TE | ||
224 | beqz t2, 2f | ||
225 | EMT # emt | ||
226 | 2: | ||
227 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
171 | j start_secondary | 228 | j start_secondary |
172 | END(smp_bootstrap) | 229 | END(smp_bootstrap) |
173 | #endif /* CONFIG_SMP */ | 230 | #endif /* CONFIG_SMP */ |
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index b974ac9057f6..2125ba5f1d9b 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c | |||
@@ -187,6 +187,10 @@ handle_real_irq: | |||
187 | outb(cached_21,0x21); | 187 | outb(cached_21,0x21); |
188 | outb(0x60+irq,0x20); /* 'Specific EOI' to master */ | 188 | outb(0x60+irq,0x20); /* 'Specific EOI' to master */ |
189 | } | 189 | } |
190 | #ifdef CONFIG_MIPS_MT_SMTC | ||
191 | if (irq_hwmask[irq] & ST0_IM) | ||
192 | set_c0_status(irq_hwmask[irq] & ST0_IM); | ||
193 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
190 | spin_unlock_irqrestore(&i8259A_lock, flags); | 194 | spin_unlock_irqrestore(&i8259A_lock, flags); |
191 | return; | 195 | return; |
192 | 196 | ||
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index 3f653c7cfbf3..97ebdc754b9e 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c | |||
@@ -76,6 +76,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq) | |||
76 | mask_msc_irq(irq); | 76 | mask_msc_irq(irq); |
77 | if (!cpu_has_veic) | 77 | if (!cpu_has_veic) |
78 | MSCIC_WRITE(MSC01_IC_EOI, 0); | 78 | MSCIC_WRITE(MSC01_IC_EOI, 0); |
79 | #ifdef CONFIG_MIPS_MT_SMTC | ||
80 | /* This actually needs to be a call into platform code */ | ||
81 | if (irq_hwmask[irq] & ST0_IM) | ||
82 | set_c0_status(irq_hwmask[irq] & ST0_IM); | ||
83 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
79 | } | 84 | } |
80 | 85 | ||
81 | /* | 86 | /* |
@@ -92,6 +97,10 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq) | |||
92 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); | 97 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); |
93 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); | 98 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); |
94 | } | 99 | } |
100 | #ifdef CONFIG_MIPS_MT_SMTC | ||
101 | if (irq_hwmask[irq] & ST0_IM) | ||
102 | set_c0_status(irq_hwmask[irq] & ST0_IM); | ||
103 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
95 | } | 104 | } |
96 | 105 | ||
97 | /* | 106 | /* |
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index e0efc4f2f93e..3dce742e716f 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c | |||
@@ -38,6 +38,15 @@ void ack_bad_irq(unsigned int irq) | |||
38 | 38 | ||
39 | atomic_t irq_err_count; | 39 | atomic_t irq_err_count; |
40 | 40 | ||
41 | #ifdef CONFIG_MIPS_MT_SMTC | ||
42 | /* | ||
43 | * SMTC Kernel needs to manipulate low-level CPU interrupt mask | ||
44 | * in do_IRQ. These are passed in setup_irq_smtc() and stored | ||
45 | * in this table. | ||
46 | */ | ||
47 | unsigned long irq_hwmask[NR_IRQS]; | ||
48 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
49 | |||
41 | #undef do_IRQ | 50 | #undef do_IRQ |
42 | 51 | ||
43 | /* | 52 | /* |
@@ -49,6 +58,7 @@ asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
49 | { | 58 | { |
50 | irq_enter(); | 59 | irq_enter(); |
51 | 60 | ||
61 | __DO_IRQ_SMTC_HOOK(); | ||
52 | __do_IRQ(irq, regs); | 62 | __do_IRQ(irq, regs); |
53 | 63 | ||
54 | irq_exit(); | 64 | irq_exit(); |
@@ -129,6 +139,9 @@ void __init init_IRQ(void) | |||
129 | irq_desc[i].depth = 1; | 139 | irq_desc[i].depth = 1; |
130 | irq_desc[i].handler = &no_irq_type; | 140 | irq_desc[i].handler = &no_irq_type; |
131 | spin_lock_init(&irq_desc[i].lock); | 141 | spin_lock_init(&irq_desc[i].lock); |
142 | #ifdef CONFIG_MIPS_MT_SMTC | ||
143 | irq_hwmask[i] = 0; | ||
144 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
132 | } | 145 | } |
133 | 146 | ||
134 | arch_init_irq(); | 147 | arch_init_irq(); |
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c new file mode 100644 index 000000000000..02237a685ec7 --- /dev/null +++ b/arch/mips/kernel/mips-mt.c | |||
@@ -0,0 +1,449 @@ | |||
1 | /* | ||
2 | * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels | ||
3 | * Copyright (C) 2005 Mips Technologies, Inc | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <linux/cpumask.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | |||
11 | #include <asm/cpu.h> | ||
12 | #include <asm/processor.h> | ||
13 | #include <asm/atomic.h> | ||
14 | #include <asm/system.h> | ||
15 | #include <asm/hardirq.h> | ||
16 | #include <asm/mmu_context.h> | ||
17 | #include <asm/smp.h> | ||
18 | #include <asm/mipsmtregs.h> | ||
19 | #include <asm/r4kcache.h> | ||
20 | #include <asm/cacheflush.h> | ||
21 | |||
22 | /* | ||
23 | * CPU mask used to set process affinity for MT VPEs/TCs with FPUs | ||
24 | */ | ||
25 | |||
26 | cpumask_t mt_fpu_cpumask; | ||
27 | |||
28 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
29 | |||
30 | #include <linux/cpu.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <asm/uaccess.h> | ||
33 | |||
34 | unsigned long mt_fpemul_threshold = 0; | ||
35 | |||
36 | /* | ||
37 | * Replacement functions for the sys_sched_setaffinity() and | ||
38 | * sys_sched_getaffinity() system calls, so that we can integrate | ||
39 | * FPU affinity with the user's requested processor affinity. | ||
40 | * This code is 98% identical with the sys_sched_setaffinity() | ||
41 | * and sys_sched_getaffinity() system calls, and should be | ||
42 | * updated when kernel/sched.c changes. | ||
43 | */ | ||
44 | |||
45 | /* | ||
46 | * find_process_by_pid - find a process with a matching PID value. | ||
47 | * used in sys_sched_set/getaffinity() in kernel/sched.c, so | ||
48 | * cloned here. | ||
49 | */ | ||
50 | static inline task_t *find_process_by_pid(pid_t pid) | ||
51 | { | ||
52 | return pid ? find_task_by_pid(pid) : current; | ||
53 | } | ||
54 | |||
55 | |||
56 | /* | ||
57 | * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process | ||
58 | */ | ||
59 | asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | ||
60 | unsigned long __user *user_mask_ptr) | ||
61 | { | ||
62 | cpumask_t new_mask; | ||
63 | cpumask_t effective_mask; | ||
64 | int retval; | ||
65 | task_t *p; | ||
66 | |||
67 | if (len < sizeof(new_mask)) | ||
68 | return -EINVAL; | ||
69 | |||
70 | if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) | ||
71 | return -EFAULT; | ||
72 | |||
73 | lock_cpu_hotplug(); | ||
74 | read_lock(&tasklist_lock); | ||
75 | |||
76 | p = find_process_by_pid(pid); | ||
77 | if (!p) { | ||
78 | read_unlock(&tasklist_lock); | ||
79 | unlock_cpu_hotplug(); | ||
80 | return -ESRCH; | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * It is not safe to call set_cpus_allowed with the | ||
85 | * tasklist_lock held. We will bump the task_struct's | ||
86 | * usage count and drop tasklist_lock before invoking | ||
87 | * set_cpus_allowed. | ||
88 | */ | ||
89 | get_task_struct(p); | ||
90 | |||
91 | retval = -EPERM; | ||
92 | if ((current->euid != p->euid) && (current->euid != p->uid) && | ||
93 | !capable(CAP_SYS_NICE)) { | ||
94 | read_unlock(&tasklist_lock); | ||
95 | goto out_unlock; | ||
96 | } | ||
97 | |||
98 | /* Record new user-specified CPU set for future reference */ | ||
99 | p->thread.user_cpus_allowed = new_mask; | ||
100 | |||
101 | /* Unlock the task list */ | ||
102 | read_unlock(&tasklist_lock); | ||
103 | |||
104 | /* Compute new global allowed CPU set if necessary */ | ||
105 | if( (p->thread.mflags & MF_FPUBOUND) | ||
106 | && cpus_intersects(new_mask, mt_fpu_cpumask)) { | ||
107 | cpus_and(effective_mask, new_mask, mt_fpu_cpumask); | ||
108 | retval = set_cpus_allowed(p, effective_mask); | ||
109 | } else { | ||
110 | p->thread.mflags &= ~MF_FPUBOUND; | ||
111 | retval = set_cpus_allowed(p, new_mask); | ||
112 | } | ||
113 | |||
114 | |||
115 | out_unlock: | ||
116 | put_task_struct(p); | ||
117 | unlock_cpu_hotplug(); | ||
118 | return retval; | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process | ||
123 | */ | ||
124 | asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, | ||
125 | unsigned long __user *user_mask_ptr) | ||
126 | { | ||
127 | unsigned int real_len; | ||
128 | cpumask_t mask; | ||
129 | int retval; | ||
130 | task_t *p; | ||
131 | |||
132 | real_len = sizeof(mask); | ||
133 | if (len < real_len) | ||
134 | return -EINVAL; | ||
135 | |||
136 | lock_cpu_hotplug(); | ||
137 | read_lock(&tasklist_lock); | ||
138 | |||
139 | retval = -ESRCH; | ||
140 | p = find_process_by_pid(pid); | ||
141 | if (!p) | ||
142 | goto out_unlock; | ||
143 | |||
144 | retval = 0; | ||
145 | |||
146 | cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); | ||
147 | |||
148 | out_unlock: | ||
149 | read_unlock(&tasklist_lock); | ||
150 | unlock_cpu_hotplug(); | ||
151 | if (retval) | ||
152 | return retval; | ||
153 | if (copy_to_user(user_mask_ptr, &mask, real_len)) | ||
154 | return -EFAULT; | ||
155 | return real_len; | ||
156 | } | ||
157 | |||
158 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
159 | |||
160 | /* | ||
161 | * Dump new MIPS MT state for the core. Does not leave TCs halted. | ||
162 | * Takes an argument which taken to be a pre-call MVPControl value. | ||
163 | */ | ||
164 | |||
165 | void mips_mt_regdump(unsigned long mvpctl) | ||
166 | { | ||
167 | unsigned long flags; | ||
168 | unsigned long vpflags; | ||
169 | unsigned long mvpconf0; | ||
170 | int nvpe; | ||
171 | int ntc; | ||
172 | int i; | ||
173 | int tc; | ||
174 | unsigned long haltval; | ||
175 | unsigned long tcstatval; | ||
176 | #ifdef CONFIG_MIPS_MT_SMTC | ||
177 | void smtc_soft_dump(void); | ||
178 | #endif /* CONFIG_MIPT_MT_SMTC */ | ||
179 | |||
180 | local_irq_save(flags); | ||
181 | vpflags = dvpe(); | ||
182 | printk("=== MIPS MT State Dump ===\n"); | ||
183 | printk("-- Global State --\n"); | ||
184 | printk(" MVPControl Passed: %08lx\n", mvpctl); | ||
185 | printk(" MVPControl Read: %08lx\n", vpflags); | ||
186 | printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0())); | ||
187 | nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
188 | ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | ||
189 | printk("-- per-VPE State --\n"); | ||
190 | for(i = 0; i < nvpe; i++) { | ||
191 | for(tc = 0; tc < ntc; tc++) { | ||
192 | settc(tc); | ||
193 | if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { | ||
194 | printk(" VPE %d\n", i); | ||
195 | printk(" VPEControl : %08lx\n", read_vpe_c0_vpecontrol()); | ||
196 | printk(" VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0()); | ||
197 | printk(" VPE%d.Status : %08lx\n", | ||
198 | i, read_vpe_c0_status()); | ||
199 | printk(" VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc()); | ||
200 | printk(" VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause()); | ||
201 | printk(" VPE%d.Config7 : %08lx\n", | ||
202 | i, read_vpe_c0_config7()); | ||
203 | break; /* Next VPE */ | ||
204 | } | ||
205 | } | ||
206 | } | ||
207 | printk("-- per-TC State --\n"); | ||
208 | for(tc = 0; tc < ntc; tc++) { | ||
209 | settc(tc); | ||
210 | if(read_tc_c0_tcbind() == read_c0_tcbind()) { | ||
211 | /* Are we dumping ourself? */ | ||
212 | haltval = 0; /* Then we're not halted, and mustn't be */ | ||
213 | tcstatval = flags; /* And pre-dump TCStatus is flags */ | ||
214 | printk(" TC %d (current TC with VPE EPC above)\n", tc); | ||
215 | } else { | ||
216 | haltval = read_tc_c0_tchalt(); | ||
217 | write_tc_c0_tchalt(1); | ||
218 | tcstatval = read_tc_c0_tcstatus(); | ||
219 | printk(" TC %d\n", tc); | ||
220 | } | ||
221 | printk(" TCStatus : %08lx\n", tcstatval); | ||
222 | printk(" TCBind : %08lx\n", read_tc_c0_tcbind()); | ||
223 | printk(" TCRestart : %08lx\n", read_tc_c0_tcrestart()); | ||
224 | printk(" TCHalt : %08lx\n", haltval); | ||
225 | printk(" TCContext : %08lx\n", read_tc_c0_tccontext()); | ||
226 | if (!haltval) | ||
227 | write_tc_c0_tchalt(0); | ||
228 | } | ||
229 | #ifdef CONFIG_MIPS_MT_SMTC | ||
230 | smtc_soft_dump(); | ||
231 | #endif /* CONFIG_MIPT_MT_SMTC */ | ||
232 | printk("===========================\n"); | ||
233 | evpe(vpflags); | ||
234 | local_irq_restore(flags); | ||
235 | } | ||
236 | |||
237 | static int mt_opt_norps = 0; | ||
238 | static int mt_opt_rpsctl = -1; | ||
239 | static int mt_opt_nblsu = -1; | ||
240 | static int mt_opt_forceconfig7 = 0; | ||
241 | static int mt_opt_config7 = -1; | ||
242 | |||
243 | static int __init rps_disable(char *s) | ||
244 | { | ||
245 | mt_opt_norps = 1; | ||
246 | return 1; | ||
247 | } | ||
248 | __setup("norps", rps_disable); | ||
249 | |||
250 | static int __init rpsctl_set(char *str) | ||
251 | { | ||
252 | get_option(&str, &mt_opt_rpsctl); | ||
253 | return 1; | ||
254 | } | ||
255 | __setup("rpsctl=", rpsctl_set); | ||
256 | |||
257 | static int __init nblsu_set(char *str) | ||
258 | { | ||
259 | get_option(&str, &mt_opt_nblsu); | ||
260 | return 1; | ||
261 | } | ||
262 | __setup("nblsu=", nblsu_set); | ||
263 | |||
264 | static int __init config7_set(char *str) | ||
265 | { | ||
266 | get_option(&str, &mt_opt_config7); | ||
267 | mt_opt_forceconfig7 = 1; | ||
268 | return 1; | ||
269 | } | ||
270 | __setup("config7=", config7_set); | ||
271 | |||
272 | /* Experimental cache flush control parameters that should go away some day */ | ||
273 | int mt_protiflush = 0; | ||
274 | int mt_protdflush = 0; | ||
275 | int mt_n_iflushes = 1; | ||
276 | int mt_n_dflushes = 1; | ||
277 | |||
278 | static int __init set_protiflush(char *s) | ||
279 | { | ||
280 | mt_protiflush = 1; | ||
281 | return 1; | ||
282 | } | ||
283 | __setup("protiflush", set_protiflush); | ||
284 | |||
285 | static int __init set_protdflush(char *s) | ||
286 | { | ||
287 | mt_protdflush = 1; | ||
288 | return 1; | ||
289 | } | ||
290 | __setup("protdflush", set_protdflush); | ||
291 | |||
292 | static int __init niflush(char *s) | ||
293 | { | ||
294 | get_option(&s, &mt_n_iflushes); | ||
295 | return 1; | ||
296 | } | ||
297 | __setup("niflush=", niflush); | ||
298 | |||
299 | static int __init ndflush(char *s) | ||
300 | { | ||
301 | get_option(&s, &mt_n_dflushes); | ||
302 | return 1; | ||
303 | } | ||
304 | __setup("ndflush=", ndflush); | ||
305 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
306 | static int fpaff_threshold = -1; | ||
307 | |||
308 | static int __init fpaff_thresh(char *str) | ||
309 | { | ||
310 | get_option(&str, &fpaff_threshold); | ||
311 | return 1; | ||
312 | } | ||
313 | |||
314 | __setup("fpaff=", fpaff_thresh); | ||
315 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
316 | |||
317 | static unsigned int itc_base = 0; | ||
318 | |||
319 | static int __init set_itc_base(char *str) | ||
320 | { | ||
321 | get_option(&str, &itc_base); | ||
322 | return 1; | ||
323 | } | ||
324 | |||
325 | __setup("itcbase=", set_itc_base); | ||
326 | |||
327 | void mips_mt_set_cpuoptions(void) | ||
328 | { | ||
329 | unsigned int oconfig7 = read_c0_config7(); | ||
330 | unsigned int nconfig7 = oconfig7; | ||
331 | |||
332 | if (mt_opt_norps) { | ||
333 | printk("\"norps\" option deprectated: use \"rpsctl=\"\n"); | ||
334 | } | ||
335 | if (mt_opt_rpsctl >= 0) { | ||
336 | printk("34K return prediction stack override set to %d.\n", | ||
337 | mt_opt_rpsctl); | ||
338 | if (mt_opt_rpsctl) | ||
339 | nconfig7 |= (1 << 2); | ||
340 | else | ||
341 | nconfig7 &= ~(1 << 2); | ||
342 | } | ||
343 | if (mt_opt_nblsu >= 0) { | ||
344 | printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu); | ||
345 | if (mt_opt_nblsu) | ||
346 | nconfig7 |= (1 << 5); | ||
347 | else | ||
348 | nconfig7 &= ~(1 << 5); | ||
349 | } | ||
350 | if (mt_opt_forceconfig7) { | ||
351 | printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7); | ||
352 | nconfig7 = mt_opt_config7; | ||
353 | } | ||
354 | if (oconfig7 != nconfig7) { | ||
355 | __asm__ __volatile("sync"); | ||
356 | write_c0_config7(nconfig7); | ||
357 | ehb (); | ||
358 | printk("Config7: 0x%08x\n", read_c0_config7()); | ||
359 | } | ||
360 | |||
361 | /* Report Cache management debug options */ | ||
362 | if (mt_protiflush) | ||
363 | printk("I-cache flushes single-threaded\n"); | ||
364 | if (mt_protdflush) | ||
365 | printk("D-cache flushes single-threaded\n"); | ||
366 | if (mt_n_iflushes != 1) | ||
367 | printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes); | ||
368 | if (mt_n_dflushes != 1) | ||
369 | printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes); | ||
370 | |||
371 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
372 | /* FPU Use Factor empirically derived from experiments on 34K */ | ||
373 | #define FPUSEFACTOR 333 | ||
374 | |||
375 | if (fpaff_threshold >= 0) { | ||
376 | mt_fpemul_threshold = fpaff_threshold; | ||
377 | } else { | ||
378 | mt_fpemul_threshold = | ||
379 | (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ; | ||
380 | } | ||
381 | printk("FPU Affinity set after %ld emulations\n", | ||
382 | mt_fpemul_threshold); | ||
383 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
384 | |||
385 | if (itc_base != 0) { | ||
386 | /* | ||
387 | * Configure ITC mapping. This code is very | ||
388 | * specific to the 34K core family, which uses | ||
389 | * a special mode bit ("ITC") in the ErrCtl | ||
390 | * register to enable access to ITC control | ||
391 | * registers via cache "tag" operations. | ||
392 | */ | ||
393 | unsigned long ectlval; | ||
394 | unsigned long itcblkgrn; | ||
395 | |||
396 | /* ErrCtl register is known as "ecc" to Linux */ | ||
397 | ectlval = read_c0_ecc(); | ||
398 | write_c0_ecc(ectlval | (0x1 << 26)); | ||
399 | ehb(); | ||
400 | #define INDEX_0 (0x80000000) | ||
401 | #define INDEX_8 (0x80000008) | ||
402 | /* Read "cache tag" for Dcache pseudo-index 8 */ | ||
403 | cache_op(Index_Load_Tag_D, INDEX_8); | ||
404 | ehb(); | ||
405 | itcblkgrn = read_c0_dtaglo(); | ||
406 | itcblkgrn &= 0xfffe0000; | ||
407 | /* Set for 128 byte pitch of ITC cells */ | ||
408 | itcblkgrn |= 0x00000c00; | ||
409 | /* Stage in Tag register */ | ||
410 | write_c0_dtaglo(itcblkgrn); | ||
411 | ehb(); | ||
412 | /* Write out to ITU with CACHE op */ | ||
413 | cache_op(Index_Store_Tag_D, INDEX_8); | ||
414 | /* Now set base address, and turn ITC on with 0x1 bit */ | ||
415 | write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 ); | ||
416 | ehb(); | ||
417 | /* Write out to ITU with CACHE op */ | ||
418 | cache_op(Index_Store_Tag_D, INDEX_0); | ||
419 | write_c0_ecc(ectlval); | ||
420 | ehb(); | ||
421 | printk("Mapped %ld ITC cells starting at 0x%08x\n", | ||
422 | ((itcblkgrn & 0x7fe00000) >> 20), itc_base); | ||
423 | } | ||
424 | } | ||
425 | |||
426 | /* | ||
427 | * Function to protect cache flushes from concurrent execution | ||
428 | * depends on MP software model chosen. | ||
429 | */ | ||
430 | |||
431 | void mt_cflush_lockdown(void) | ||
432 | { | ||
433 | #ifdef CONFIG_MIPS_MT_SMTC | ||
434 | void smtc_cflush_lockdown(void); | ||
435 | |||
436 | smtc_cflush_lockdown(); | ||
437 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
438 | /* FILL IN VSMP and AP/SP VERSIONS HERE */ | ||
439 | } | ||
440 | |||
441 | void mt_cflush_release(void) | ||
442 | { | ||
443 | #ifdef CONFIG_MIPS_MT_SMTC | ||
444 | void smtc_cflush_release(void); | ||
445 | |||
446 | smtc_cflush_release(); | ||
447 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
448 | /* FILL IN VSMP and AP/SP VERSIONS HERE */ | ||
449 | } | ||
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index c66db5e5ab62..8b393df460a2 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -41,6 +41,10 @@ | |||
41 | #include <asm/elf.h> | 41 | #include <asm/elf.h> |
42 | #include <asm/isadep.h> | 42 | #include <asm/isadep.h> |
43 | #include <asm/inst.h> | 43 | #include <asm/inst.h> |
44 | #ifdef CONFIG_MIPS_MT_SMTC | ||
45 | #include <asm/mipsmtregs.h> | ||
46 | extern void smtc_idle_loop_hook(void); | ||
47 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
44 | 48 | ||
45 | /* | 49 | /* |
46 | * The idle thread. There's no useful work to be done, so just try to conserve | 50 | * The idle thread. There's no useful work to be done, so just try to conserve |
@@ -51,9 +55,13 @@ ATTRIB_NORET void cpu_idle(void) | |||
51 | { | 55 | { |
52 | /* endless idle loop with no priority at all */ | 56 | /* endless idle loop with no priority at all */ |
53 | while (1) { | 57 | while (1) { |
54 | while (!need_resched()) | 58 | while (!need_resched()) { |
59 | #ifdef CONFIG_MIPS_MT_SMTC | ||
60 | smtc_idle_loop_hook(); | ||
61 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
55 | if (cpu_wait) | 62 | if (cpu_wait) |
56 | (*cpu_wait)(); | 63 | (*cpu_wait)(); |
64 | } | ||
57 | preempt_enable_no_resched(); | 65 | preempt_enable_no_resched(); |
58 | schedule(); | 66 | schedule(); |
59 | preempt_disable(); | 67 | preempt_disable(); |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index f838b36cc765..f3106d0771b0 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -248,10 +248,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
248 | break; | 248 | break; |
249 | case FPC_EIR: { /* implementation / version register */ | 249 | case FPC_EIR: { /* implementation / version register */ |
250 | unsigned int flags; | 250 | unsigned int flags; |
251 | #ifdef CONFIG_MIPS_MT_SMTC | ||
252 | unsigned int irqflags; | ||
253 | unsigned int mtflags; | ||
254 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
251 | 255 | ||
252 | if (!cpu_has_fpu) | 256 | if (!cpu_has_fpu) |
253 | break; | 257 | break; |
254 | 258 | ||
259 | #ifdef CONFIG_MIPS_MT_SMTC | ||
260 | /* Read-modify-write of Status must be atomic */ | ||
261 | local_irq_save(irqflags); | ||
262 | mtflags = dmt(); | ||
263 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
264 | |||
255 | preempt_disable(); | 265 | preempt_disable(); |
256 | if (cpu_has_mipsmt) { | 266 | if (cpu_has_mipsmt) { |
257 | unsigned int vpflags = dvpe(); | 267 | unsigned int vpflags = dvpe(); |
@@ -266,6 +276,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
266 | __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); | 276 | __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); |
267 | write_c0_status(flags); | 277 | write_c0_status(flags); |
268 | } | 278 | } |
279 | #ifdef CONFIG_MIPS_MT_SMTC | ||
280 | emt(mtflags); | ||
281 | local_irq_restore(irqflags); | ||
282 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
269 | preempt_enable(); | 283 | preempt_enable(); |
270 | break; | 284 | break; |
271 | } | 285 | } |
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 0d5cf97af727..8704dc0496ea 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c | |||
@@ -173,12 +173,22 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) | |||
173 | break; | 173 | break; |
174 | case FPC_EIR: { /* implementation / version register */ | 174 | case FPC_EIR: { /* implementation / version register */ |
175 | unsigned int flags; | 175 | unsigned int flags; |
176 | #ifdef CONFIG_MIPS_MT_SMTC | ||
177 | unsigned int irqflags; | ||
178 | unsigned int mtflags; | ||
179 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
176 | 180 | ||
177 | if (!cpu_has_fpu) { | 181 | if (!cpu_has_fpu) { |
178 | tmp = 0; | 182 | tmp = 0; |
179 | break; | 183 | break; |
180 | } | 184 | } |
181 | 185 | ||
186 | #ifdef CONFIG_MIPS_MT_SMTC | ||
187 | /* Read-modify-write of Status must be atomic */ | ||
188 | local_irq_save(irqflags); | ||
189 | mtflags = dmt(); | ||
190 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
191 | |||
182 | preempt_disable(); | 192 | preempt_disable(); |
183 | if (cpu_has_mipsmt) { | 193 | if (cpu_has_mipsmt) { |
184 | unsigned int vpflags = dvpe(); | 194 | unsigned int vpflags = dvpe(); |
@@ -193,6 +203,10 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) | |||
193 | __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); | 203 | __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); |
194 | write_c0_status(flags); | 204 | write_c0_status(flags); |
195 | } | 205 | } |
206 | #ifdef CONFIG_MIPS_MT_SMTC | ||
207 | emt(mtflags); | ||
208 | local_irq_restore(irqflags); | ||
209 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
196 | preempt_enable(); | 210 | preempt_enable(); |
197 | break; | 211 | break; |
198 | } | 212 | } |
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index d2afbd19a9c8..0b1b54acee9f 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S | |||
@@ -88,7 +88,18 @@ | |||
88 | 88 | ||
89 | PTR_ADDIU t0, $28, _THREAD_SIZE - 32 | 89 | PTR_ADDIU t0, $28, _THREAD_SIZE - 32 |
90 | set_saved_sp t0, t1, t2 | 90 | set_saved_sp t0, t1, t2 |
91 | 91 | #ifdef CONFIG_MIPS_MT_SMTC | |
92 | /* Read-modify-writes of Status must be atomic on a VPE */ | ||
93 | mfc0 t2, CP0_TCSTATUS | ||
94 | ori t1, t2, TCSTATUS_IXMT | ||
95 | mtc0 t1, CP0_TCSTATUS | ||
96 | andi t2, t2, TCSTATUS_IXMT | ||
97 | ehb | ||
98 | DMT 8 # dmt t0 | ||
99 | move t1,ra | ||
100 | jal mips_ihb | ||
101 | move ra,t1 | ||
102 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
92 | mfc0 t1, CP0_STATUS /* Do we really need this? */ | 103 | mfc0 t1, CP0_STATUS /* Do we really need this? */ |
93 | li a3, 0xff01 | 104 | li a3, 0xff01 |
94 | and t1, a3 | 105 | and t1, a3 |
@@ -97,6 +108,18 @@ | |||
97 | and a2, a3 | 108 | and a2, a3 |
98 | or a2, t1 | 109 | or a2, t1 |
99 | mtc0 a2, CP0_STATUS | 110 | mtc0 a2, CP0_STATUS |
111 | #ifdef CONFIG_MIPS_MT_SMTC | ||
112 | ehb | ||
113 | andi t0, t0, VPECONTROL_TE | ||
114 | beqz t0, 1f | ||
115 | emt | ||
116 | 1: | ||
117 | mfc0 t1, CP0_TCSTATUS | ||
118 | xori t1, t1, TCSTATUS_IXMT | ||
119 | or t1, t1, t2 | ||
120 | mtc0 t1, CP0_TCSTATUS | ||
121 | ehb | ||
122 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
100 | move v0, a0 | 123 | move v0, a0 |
101 | jr ra | 124 | jr ra |
102 | END(resume) | 125 | END(resume) |
@@ -131,10 +154,19 @@ LEAF(_restore_fp) | |||
131 | #define FPU_DEFAULT 0x00000000 | 154 | #define FPU_DEFAULT 0x00000000 |
132 | 155 | ||
133 | LEAF(_init_fpu) | 156 | LEAF(_init_fpu) |
157 | #ifdef CONFIG_MIPS_MT_SMTC | ||
158 | /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */ | ||
159 | mfc0 t0, CP0_TCSTATUS | ||
160 | /* Bit position is the same for Status, TCStatus */ | ||
161 | li t1, ST0_CU1 | ||
162 | or t0, t1 | ||
163 | mtc0 t0, CP0_TCSTATUS | ||
164 | #else /* Normal MIPS CU1 enable */ | ||
134 | mfc0 t0, CP0_STATUS | 165 | mfc0 t0, CP0_STATUS |
135 | li t1, ST0_CU1 | 166 | li t1, ST0_CU1 |
136 | or t0, t1 | 167 | or t0, t1 |
137 | mtc0 t0, CP0_STATUS | 168 | mtc0 t0, CP0_STATUS |
169 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
138 | fpu_enable_hazard | 170 | fpu_enable_hazard |
139 | 171 | ||
140 | li t1, FPU_DEFAULT | 172 | li t1, FPU_DEFAULT |
diff --git a/arch/mips/kernel/smp_mt.c b/arch/mips/kernel/smp-mt.c index 993b8bf56aaf..19b8e4b31b79 100644 --- a/arch/mips/kernel/smp_mt.c +++ b/arch/mips/kernel/smp-mt.c | |||
@@ -1,8 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. | ||
3 | * | ||
4 | * Elizabeth Clarke (beth@mips.com) | ||
5 | * | ||
6 | * This program is free software; you can distribute it and/or modify it | 2 | * This program is free software; you can distribute it and/or modify it |
7 | * under the terms of the GNU General Public License (Version 2) as | 3 | * under the terms of the GNU General Public License (Version 2) as |
8 | * published by the Free Software Foundation. | 4 | * published by the Free Software Foundation. |
@@ -16,6 +12,10 @@ | |||
16 | * with this program; if not, write to the Free Software Foundation, Inc., | 12 | * with this program; if not, write to the Free Software Foundation, Inc., |
17 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | 13 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. |
18 | * | 14 | * |
15 | * Copyright (C) 2004, 05, 06 MIPS Technologies, Inc. | ||
16 | * Elizabeth Clarke (beth@mips.com) | ||
17 | * Ralf Baechle (ralf@linux-mips.org) | ||
18 | * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) | ||
19 | */ | 19 | */ |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
25 | 25 | ||
26 | #include <asm/atomic.h> | 26 | #include <asm/atomic.h> |
27 | #include <asm/cacheflush.h> | ||
27 | #include <asm/cpu.h> | 28 | #include <asm/cpu.h> |
28 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
29 | #include <asm/system.h> | 30 | #include <asm/system.h> |
@@ -33,8 +34,8 @@ | |||
33 | #include <asm/time.h> | 34 | #include <asm/time.h> |
34 | #include <asm/mipsregs.h> | 35 | #include <asm/mipsregs.h> |
35 | #include <asm/mipsmtregs.h> | 36 | #include <asm/mipsmtregs.h> |
36 | #include <asm/cacheflush.h> | 37 | #include <asm/mips_mt.h> |
37 | #include <asm/mips-boards/maltaint.h> | 38 | #include <asm/mips-boards/maltaint.h> /* This is f*cking wrong */ |
38 | 39 | ||
39 | #define MIPS_CPU_IPI_RESCHED_IRQ 0 | 40 | #define MIPS_CPU_IPI_RESCHED_IRQ 0 |
40 | #define MIPS_CPU_IPI_CALL_IRQ 1 | 41 | #define MIPS_CPU_IPI_CALL_IRQ 1 |
@@ -66,6 +67,7 @@ void __init sanitize_tlb_entries(void) | |||
66 | if (!cpu_has_mipsmt) | 67 | if (!cpu_has_mipsmt) |
67 | return; | 68 | return; |
68 | 69 | ||
70 | /* Enable VPC */ | ||
69 | set_c0_mvpcontrol(MVPCONTROL_VPC); | 71 | set_c0_mvpcontrol(MVPCONTROL_VPC); |
70 | 72 | ||
71 | back_to_back_c0_hazard(); | 73 | back_to_back_c0_hazard(); |
@@ -106,12 +108,12 @@ void __init sanitize_tlb_entries(void) | |||
106 | 108 | ||
107 | static void ipi_resched_dispatch (struct pt_regs *regs) | 109 | static void ipi_resched_dispatch (struct pt_regs *regs) |
108 | { | 110 | { |
109 | do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ, regs); | 111 | do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ, regs); |
110 | } | 112 | } |
111 | 113 | ||
112 | static void ipi_call_dispatch (struct pt_regs *regs) | 114 | static void ipi_call_dispatch (struct pt_regs *regs) |
113 | { | 115 | { |
114 | do_IRQ(MIPS_CPU_IPI_CALL_IRQ, regs); | 116 | do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ, regs); |
115 | } | 117 | } |
116 | 118 | ||
117 | irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs) | 119 | irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs) |
@@ -155,6 +157,8 @@ void plat_smp_setup(void) | |||
155 | dvpe(); | 157 | dvpe(); |
156 | dmt(); | 158 | dmt(); |
157 | 159 | ||
160 | mips_mt_set_cpuoptions(); | ||
161 | |||
158 | /* Put MVPE's into 'configuration state' */ | 162 | /* Put MVPE's into 'configuration state' */ |
159 | set_c0_mvpcontrol(MVPCONTROL_VPC); | 163 | set_c0_mvpcontrol(MVPCONTROL_VPC); |
160 | 164 | ||
@@ -189,11 +193,13 @@ void plat_smp_setup(void) | |||
189 | 193 | ||
190 | if (i != 0) { | 194 | if (i != 0) { |
191 | write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); | 195 | write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); |
192 | write_vpe_c0_cause(read_vpe_c0_cause() & ~CAUSEF_IP); | ||
193 | 196 | ||
194 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ | 197 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ |
195 | write_vpe_c0_config( read_c0_config()); | 198 | write_vpe_c0_config( read_c0_config()); |
196 | 199 | ||
200 | /* make sure there are no software interrupts pending */ | ||
201 | write_vpe_c0_cause(read_vpe_c0_cause() & ~(C_SW1|C_SW0)); | ||
202 | |||
197 | /* Propagate Config7 */ | 203 | /* Propagate Config7 */ |
198 | write_vpe_c0_config7(read_c0_config7()); | 204 | write_vpe_c0_config7(read_c0_config7()); |
199 | } | 205 | } |
@@ -233,16 +239,16 @@ void plat_smp_setup(void) | |||
233 | /* We'll wait until starting the secondaries before starting MVPE */ | 239 | /* We'll wait until starting the secondaries before starting MVPE */ |
234 | 240 | ||
235 | printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); | 241 | printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); |
242 | } | ||
236 | 243 | ||
244 | void __init plat_prepare_cpus(unsigned int max_cpus) | ||
245 | { | ||
237 | /* set up ipi interrupts */ | 246 | /* set up ipi interrupts */ |
238 | if (cpu_has_vint) { | 247 | if (cpu_has_vint) { |
239 | set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); | 248 | set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); |
240 | set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); | 249 | set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); |
241 | } | 250 | } |
242 | } | ||
243 | 251 | ||
244 | void __init plat_prepare_cpus(unsigned int max_cpus) | ||
245 | { | ||
246 | cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ; | 252 | cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ; |
247 | cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ; | 253 | cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ; |
248 | 254 | ||
@@ -287,7 +293,8 @@ void prom_boot_secondary(int cpu, struct task_struct *idle) | |||
287 | /* global pointer */ | 293 | /* global pointer */ |
288 | write_tc_gpr_gp((unsigned long)gp); | 294 | write_tc_gpr_gp((unsigned long)gp); |
289 | 295 | ||
290 | flush_icache_range((unsigned long)gp, (unsigned long)(gp + 1)); | 296 | flush_icache_range((unsigned long)gp, |
297 | (unsigned long)(gp + sizeof(struct thread_info))); | ||
291 | 298 | ||
292 | /* finally out of configuration and into chaos */ | 299 | /* finally out of configuration and into chaos */ |
293 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 300 | clear_c0_mvpcontrol(MVPCONTROL_VPC); |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 72a287aa937e..d42f358754ad 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -38,6 +38,10 @@ | |||
38 | #include <asm/mmu_context.h> | 38 | #include <asm/mmu_context.h> |
39 | #include <asm/smp.h> | 39 | #include <asm/smp.h> |
40 | 40 | ||
41 | #ifdef CONFIG_MIPS_MT_SMTC | ||
42 | #include <asm/mipsmtregs.h> | ||
43 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
44 | |||
41 | cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ | 45 | cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ |
42 | volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ | 46 | volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ |
43 | cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ | 47 | cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ |
@@ -85,6 +89,10 @@ asmlinkage void start_secondary(void) | |||
85 | { | 89 | { |
86 | unsigned int cpu; | 90 | unsigned int cpu; |
87 | 91 | ||
92 | #ifdef CONFIG_MIPS_MT_SMTC | ||
93 | /* Only do cpu_probe for first TC of CPU */ | ||
94 | if ((read_c0_tcbind() & TCBIND_CURTC) == 0) | ||
95 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
88 | cpu_probe(); | 96 | cpu_probe(); |
89 | cpu_report(); | 97 | cpu_report(); |
90 | per_cpu_trap_init(); | 98 | per_cpu_trap_init(); |
@@ -179,11 +187,13 @@ int smp_call_function (void (*func) (void *info), void *info, int retry, | |||
179 | if (wait) | 187 | if (wait) |
180 | while (atomic_read(&data.finished) != cpus) | 188 | while (atomic_read(&data.finished) != cpus) |
181 | barrier(); | 189 | barrier(); |
190 | call_data = NULL; | ||
182 | spin_unlock(&smp_call_lock); | 191 | spin_unlock(&smp_call_lock); |
183 | 192 | ||
184 | return 0; | 193 | return 0; |
185 | } | 194 | } |
186 | 195 | ||
196 | |||
187 | void smp_call_function_interrupt(void) | 197 | void smp_call_function_interrupt(void) |
188 | { | 198 | { |
189 | void (*func) (void *info) = call_data->func; | 199 | void (*func) (void *info) = call_data->func; |
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S new file mode 100644 index 000000000000..c9d65196d917 --- /dev/null +++ b/arch/mips/kernel/smtc-asm.S | |||
@@ -0,0 +1,130 @@ | |||
1 | /* | ||
2 | * Assembly Language Functions for MIPS MT SMTC support | ||
3 | */ | ||
4 | |||
5 | /* | ||
6 | * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */ | ||
7 | |||
8 | #include <asm/regdef.h> | ||
9 | #include <asm/asmmacro.h> | ||
10 | #include <asm/stackframe.h> | ||
11 | #include <asm/stackframe.h> | ||
12 | |||
13 | /* | ||
14 | * "Software Interrupt" linkage. | ||
15 | * | ||
16 | * This is invoked when an "Interrupt" is sent from one TC to another, | ||
17 | * where the TC to be interrupted is halted, has it's Restart address | ||
18 | * and Status values saved by the "remote control" thread, then modified | ||
19 | * to cause execution to begin here, in kenel mode. This code then | ||
20 | * disguises the TC state as that of an exception and transfers | ||
21 | * control to the general exception or vectored interrupt handler. | ||
22 | */ | ||
23 | .set noreorder | ||
24 | |||
25 | /* | ||
26 | The __smtc_ipi_vector would use k0 and k1 as temporaries and | ||
27 | 1) Set EXL (this is per-VPE, so this can't be done by proxy!) | ||
28 | 2) Restore the K/CU and IXMT bits to the pre "exception" state | ||
29 | (EXL means no interrupts and access to the kernel map). | ||
30 | 3) Set EPC to be the saved value of TCRestart. | ||
31 | 4) Jump to the exception handler entry point passed by the sender. | ||
32 | |||
33 | CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED?? | ||
34 | */ | ||
35 | |||
36 | /* | ||
37 | * Reviled and slandered vision: Set EXL and restore K/CU/IXMT | ||
38 | * state of pre-halt thread, then save everything and call | ||
39 | * thought some function pointer to imaginary_exception, which | ||
40 | * will parse a register value or memory message queue to | ||
41 | * deliver things like interprocessor interrupts. On return | ||
42 | * from that function, jump to the global ret_from_irq code | ||
43 | * to invoke the scheduler and return as appropriate. | ||
44 | */ | ||
45 | |||
46 | #define PT_PADSLOT4 (PT_R0-8) | ||
47 | #define PT_PADSLOT5 (PT_R0-4) | ||
48 | |||
49 | .text | ||
50 | .align 5 | ||
51 | FEXPORT(__smtc_ipi_vector) | ||
52 | .set noat | ||
53 | /* Disable thread scheduling to make Status update atomic */ | ||
54 | DMT 27 # dmt k1 | ||
55 | ehb | ||
56 | /* Set EXL */ | ||
57 | mfc0 k0,CP0_STATUS | ||
58 | ori k0,k0,ST0_EXL | ||
59 | mtc0 k0,CP0_STATUS | ||
60 | ehb | ||
61 | /* Thread scheduling now inhibited by EXL. Restore TE state. */ | ||
62 | andi k1,k1,VPECONTROL_TE | ||
63 | beqz k1,1f | ||
64 | emt | ||
65 | 1: | ||
66 | /* | ||
67 | * The IPI sender has put some information on the anticipated | ||
68 | * kernel stack frame. If we were in user mode, this will be | ||
69 | * built above the saved kernel SP. If we were already in the | ||
70 | * kernel, it will be built above the current CPU SP. | ||
71 | * | ||
72 | * Were we in kernel mode, as indicated by CU0? | ||
73 | */ | ||
74 | sll k1,k0,3 | ||
75 | .set noreorder | ||
76 | bltz k1,2f | ||
77 | move k1,sp | ||
78 | .set reorder | ||
79 | /* | ||
80 | * If previously in user mode, set CU0 and use kernel stack. | ||
81 | */ | ||
82 | li k1,ST0_CU0 | ||
83 | or k1,k1,k0 | ||
84 | mtc0 k1,CP0_STATUS | ||
85 | ehb | ||
86 | get_saved_sp | ||
87 | /* Interrupting TC will have pre-set values in slots in the new frame */ | ||
88 | 2: subu k1,k1,PT_SIZE | ||
89 | /* Load TCStatus Value */ | ||
90 | lw k0,PT_TCSTATUS(k1) | ||
91 | /* Write it to TCStatus to restore CU/KSU/IXMT state */ | ||
92 | mtc0 k0,$2,1 | ||
93 | ehb | ||
94 | lw k0,PT_EPC(k1) | ||
95 | mtc0 k0,CP0_EPC | ||
96 | /* Save all will redundantly recompute the SP, but use it for now */ | ||
97 | SAVE_ALL | ||
98 | CLI | ||
99 | move a0,sp | ||
100 | /* Function to be invoked passed stack pad slot 5 */ | ||
101 | lw t0,PT_PADSLOT5(sp) | ||
102 | /* Argument from sender passed in stack pad slot 4 */ | ||
103 | lw a1,PT_PADSLOT4(sp) | ||
104 | jalr t0 | ||
105 | nop | ||
106 | j ret_from_irq | ||
107 | nop | ||
108 | |||
109 | /* | ||
110 | * Called from idle loop to provoke processing of queued IPIs | ||
111 | * First IPI message in queue passed as argument. | ||
112 | */ | ||
113 | |||
114 | LEAF(self_ipi) | ||
115 | /* Before anything else, block interrupts */ | ||
116 | mfc0 t0,CP0_TCSTATUS | ||
117 | ori t1,t0,TCSTATUS_IXMT | ||
118 | mtc0 t1,CP0_TCSTATUS | ||
119 | ehb | ||
120 | /* We know we're in kernel mode, so prepare stack frame */ | ||
121 | subu t1,sp,PT_SIZE | ||
122 | sw ra,PT_EPC(t1) | ||
123 | sw a0,PT_PADSLOT4(t1) | ||
124 | la t2,ipi_decode | ||
125 | sw t2,PT_PADSLOT5(t1) | ||
126 | /* Save pre-disable value of TCStatus */ | ||
127 | sw t0,PT_TCSTATUS(t1) | ||
128 | j __smtc_ipi_vector | ||
129 | nop | ||
130 | END(self_ipi) | ||
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c new file mode 100644 index 000000000000..6f3709996172 --- /dev/null +++ b/arch/mips/kernel/smtc-proc.c | |||
@@ -0,0 +1,93 @@ | |||
1 | /* | ||
2 | * /proc hooks for SMTC kernel | ||
3 | * Copyright (C) 2005 Mips Technologies, Inc | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <linux/cpumask.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | |||
11 | #include <asm/cpu.h> | ||
12 | #include <asm/processor.h> | ||
13 | #include <asm/atomic.h> | ||
14 | #include <asm/system.h> | ||
15 | #include <asm/hardirq.h> | ||
16 | #include <asm/mmu_context.h> | ||
17 | #include <asm/smp.h> | ||
18 | #include <asm/mipsregs.h> | ||
19 | #include <asm/cacheflush.h> | ||
20 | #include <linux/proc_fs.h> | ||
21 | |||
22 | #include <asm/smtc_proc.h> | ||
23 | |||
24 | /* | ||
25 | * /proc diagnostic and statistics hooks | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * Statistics gathered | ||
30 | */ | ||
31 | unsigned long selfipis[NR_CPUS]; | ||
32 | |||
33 | struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; | ||
34 | |||
35 | static struct proc_dir_entry *smtc_stats; | ||
36 | |||
37 | atomic_t smtc_fpu_recoveries; | ||
38 | |||
39 | static int proc_read_smtc(char *page, char **start, off_t off, | ||
40 | int count, int *eof, void *data) | ||
41 | { | ||
42 | int totalen = 0; | ||
43 | int len; | ||
44 | int i; | ||
45 | extern unsigned long ebase; | ||
46 | |||
47 | len = sprintf(page, "SMTC Status Word: 0x%08x\n", smtc_status); | ||
48 | totalen += len; | ||
49 | page += len; | ||
50 | len = sprintf(page, "Config7: 0x%08x\n", read_c0_config7()); | ||
51 | totalen += len; | ||
52 | page += len; | ||
53 | len = sprintf(page, "EBASE: 0x%08lx\n", ebase); | ||
54 | totalen += len; | ||
55 | page += len; | ||
56 | len = sprintf(page, "Counter Interrupts taken per CPU (TC)\n"); | ||
57 | totalen += len; | ||
58 | page += len; | ||
59 | for (i=0; i < NR_CPUS; i++) { | ||
60 | len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].timerints); | ||
61 | totalen += len; | ||
62 | page += len; | ||
63 | } | ||
64 | len = sprintf(page, "Self-IPIs by CPU:\n"); | ||
65 | totalen += len; | ||
66 | page += len; | ||
67 | for(i = 0; i < NR_CPUS; i++) { | ||
68 | len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | ||
69 | totalen += len; | ||
70 | page += len; | ||
71 | } | ||
72 | len = sprintf(page, "%d Recoveries of \"stolen\" FPU\n", | ||
73 | atomic_read(&smtc_fpu_recoveries)); | ||
74 | totalen += len; | ||
75 | page += len; | ||
76 | |||
77 | return totalen; | ||
78 | } | ||
79 | |||
80 | void init_smtc_stats(void) | ||
81 | { | ||
82 | int i; | ||
83 | |||
84 | for (i=0; i<NR_CPUS; i++) { | ||
85 | smtc_cpu_stats[i].timerints = 0; | ||
86 | smtc_cpu_stats[i].selfipis = 0; | ||
87 | } | ||
88 | |||
89 | atomic_set(&smtc_fpu_recoveries, 0); | ||
90 | |||
91 | smtc_stats = create_proc_read_entry("smtc", 0444, NULL, | ||
92 | proc_read_smtc, NULL); | ||
93 | } | ||
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c new file mode 100644 index 000000000000..2e8e52c135e6 --- /dev/null +++ b/arch/mips/kernel/smtc.c | |||
@@ -0,0 +1,1322 @@ | |||
1 | /* Copyright (C) 2004 Mips Technologies, Inc */ | ||
2 | |||
3 | #include <linux/kernel.h> | ||
4 | #include <linux/sched.h> | ||
5 | #include <linux/cpumask.h> | ||
6 | #include <linux/interrupt.h> | ||
7 | |||
8 | #include <asm/cpu.h> | ||
9 | #include <asm/processor.h> | ||
10 | #include <asm/atomic.h> | ||
11 | #include <asm/system.h> | ||
12 | #include <asm/hardirq.h> | ||
13 | #include <asm/hazards.h> | ||
14 | #include <asm/mmu_context.h> | ||
15 | #include <asm/smp.h> | ||
16 | #include <asm/mipsregs.h> | ||
17 | #include <asm/cacheflush.h> | ||
18 | #include <asm/time.h> | ||
19 | #include <asm/addrspace.h> | ||
20 | #include <asm/smtc.h> | ||
21 | #include <asm/smtc_ipi.h> | ||
22 | #include <asm/smtc_proc.h> | ||
23 | |||
24 | /* | ||
25 | * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * MIPSCPU_INT_BASE is identically defined in both | ||
30 | * asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h, | ||
31 | * but as yet there's no properly organized include structure that | ||
32 | * will ensure that the right *int.h file will be included for a | ||
33 | * given platform build. | ||
34 | */ | ||
35 | |||
36 | #define MIPSCPU_INT_BASE 16 | ||
37 | |||
38 | #define MIPS_CPU_IPI_IRQ 1 | ||
39 | |||
40 | #define LOCK_MT_PRA() \ | ||
41 | local_irq_save(flags); \ | ||
42 | mtflags = dmt() | ||
43 | |||
44 | #define UNLOCK_MT_PRA() \ | ||
45 | emt(mtflags); \ | ||
46 | local_irq_restore(flags) | ||
47 | |||
48 | #define LOCK_CORE_PRA() \ | ||
49 | local_irq_save(flags); \ | ||
50 | mtflags = dvpe() | ||
51 | |||
52 | #define UNLOCK_CORE_PRA() \ | ||
53 | evpe(mtflags); \ | ||
54 | local_irq_restore(flags) | ||
55 | |||
56 | /* | ||
57 | * Data structures purely associated with SMTC parallelism | ||
58 | */ | ||
59 | |||
60 | |||
61 | /* | ||
62 | * Table for tracking ASIDs whose lifetime is prolonged. | ||
63 | */ | ||
64 | |||
65 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | ||
66 | |||
67 | /* | ||
68 | * Clock interrupt "latch" buffers, per "CPU" | ||
69 | */ | ||
70 | |||
71 | unsigned int ipi_timer_latch[NR_CPUS]; | ||
72 | |||
73 | /* | ||
74 | * Number of InterProcessor Interupt (IPI) message buffers to allocate | ||
75 | */ | ||
76 | |||
77 | #define IPIBUF_PER_CPU 4 | ||
78 | |||
79 | struct smtc_ipi_q IPIQ[NR_CPUS]; | ||
80 | struct smtc_ipi_q freeIPIq; | ||
81 | |||
82 | |||
83 | /* Forward declarations */ | ||
84 | |||
85 | void ipi_decode(struct pt_regs *, struct smtc_ipi *); | ||
86 | void post_direct_ipi(int cpu, struct smtc_ipi *pipi); | ||
87 | void setup_cross_vpe_interrupts(void); | ||
88 | void init_smtc_stats(void); | ||
89 | |||
90 | /* Global SMTC Status */ | ||
91 | |||
92 | unsigned int smtc_status = 0; | ||
93 | |||
94 | /* Boot command line configuration overrides */ | ||
95 | |||
96 | static int vpelimit = 0; | ||
97 | static int tclimit = 0; | ||
98 | static int ipibuffers = 0; | ||
99 | static int nostlb = 0; | ||
100 | static int asidmask = 0; | ||
101 | unsigned long smtc_asid_mask = 0xff; | ||
102 | |||
103 | static int __init maxvpes(char *str) | ||
104 | { | ||
105 | get_option(&str, &vpelimit); | ||
106 | return 1; | ||
107 | } | ||
108 | |||
109 | static int __init maxtcs(char *str) | ||
110 | { | ||
111 | get_option(&str, &tclimit); | ||
112 | return 1; | ||
113 | } | ||
114 | |||
115 | static int __init ipibufs(char *str) | ||
116 | { | ||
117 | get_option(&str, &ipibuffers); | ||
118 | return 1; | ||
119 | } | ||
120 | |||
121 | static int __init stlb_disable(char *s) | ||
122 | { | ||
123 | nostlb = 1; | ||
124 | return 1; | ||
125 | } | ||
126 | |||
127 | static int __init asidmask_set(char *str) | ||
128 | { | ||
129 | get_option(&str, &asidmask); | ||
130 | switch(asidmask) { | ||
131 | case 0x1: | ||
132 | case 0x3: | ||
133 | case 0x7: | ||
134 | case 0xf: | ||
135 | case 0x1f: | ||
136 | case 0x3f: | ||
137 | case 0x7f: | ||
138 | case 0xff: | ||
139 | smtc_asid_mask = (unsigned long)asidmask; | ||
140 | break; | ||
141 | default: | ||
142 | printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask); | ||
143 | } | ||
144 | return 1; | ||
145 | } | ||
146 | |||
147 | __setup("maxvpes=", maxvpes); | ||
148 | __setup("maxtcs=", maxtcs); | ||
149 | __setup("ipibufs=", ipibufs); | ||
150 | __setup("nostlb", stlb_disable); | ||
151 | __setup("asidmask=", asidmask_set); | ||
152 | |||
153 | /* Enable additional debug checks before going into CPU idle loop */ | ||
154 | #define SMTC_IDLE_HOOK_DEBUG | ||
155 | |||
156 | #ifdef SMTC_IDLE_HOOK_DEBUG | ||
157 | |||
158 | static int hang_trig = 0; | ||
159 | |||
160 | static int __init hangtrig_enable(char *s) | ||
161 | { | ||
162 | hang_trig = 1; | ||
163 | return 1; | ||
164 | } | ||
165 | |||
166 | |||
167 | __setup("hangtrig", hangtrig_enable); | ||
168 | |||
169 | #define DEFAULT_BLOCKED_IPI_LIMIT 32 | ||
170 | |||
171 | static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT; | ||
172 | |||
173 | static int __init tintq(char *str) | ||
174 | { | ||
175 | get_option(&str, &timerq_limit); | ||
176 | return 1; | ||
177 | } | ||
178 | |||
179 | __setup("tintq=", tintq); | ||
180 | |||
181 | int imstuckcount[2][8]; | ||
182 | /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ | ||
183 | int vpemask[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}}; | ||
184 | int tcnoprog[NR_CPUS]; | ||
185 | static atomic_t idle_hook_initialized = {0}; | ||
186 | static int clock_hang_reported[NR_CPUS]; | ||
187 | |||
188 | #endif /* SMTC_IDLE_HOOK_DEBUG */ | ||
189 | |||
190 | /* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */ | ||
191 | |||
192 | void __init sanitize_tlb_entries(void) | ||
193 | { | ||
194 | printk("Deprecated sanitize_tlb_entries() invoked\n"); | ||
195 | } | ||
196 | |||
197 | |||
198 | /* | ||
199 | * Configure shared TLB - VPC configuration bit must be set by caller | ||
200 | */ | ||
201 | |||
202 | void smtc_configure_tlb(void) | ||
203 | { | ||
204 | int i,tlbsiz,vpes; | ||
205 | unsigned long mvpconf0; | ||
206 | unsigned long config1val; | ||
207 | |||
208 | /* Set up ASID preservation table */ | ||
209 | for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) { | ||
210 | for(i = 0; i < MAX_SMTC_ASIDS; i++) { | ||
211 | smtc_live_asid[vpes][i] = 0; | ||
212 | } | ||
213 | } | ||
214 | mvpconf0 = read_c0_mvpconf0(); | ||
215 | |||
216 | if ((vpes = ((mvpconf0 & MVPCONF0_PVPE) | ||
217 | >> MVPCONF0_PVPE_SHIFT) + 1) > 1) { | ||
218 | /* If we have multiple VPEs, try to share the TLB */ | ||
219 | if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) { | ||
220 | /* | ||
221 | * If TLB sizing is programmable, shared TLB | ||
222 | * size is the total available complement. | ||
223 | * Otherwise, we have to take the sum of all | ||
224 | * static VPE TLB entries. | ||
225 | */ | ||
226 | if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE) | ||
227 | >> MVPCONF0_PTLBE_SHIFT)) == 0) { | ||
228 | /* | ||
229 | * If there's more than one VPE, there had better | ||
230 | * be more than one TC, because we need one to bind | ||
231 | * to each VPE in turn to be able to read | ||
232 | * its configuration state! | ||
233 | */ | ||
234 | settc(1); | ||
235 | /* Stop the TC from doing anything foolish */ | ||
236 | write_tc_c0_tchalt(TCHALT_H); | ||
237 | mips_ihb(); | ||
238 | /* No need to un-Halt - that happens later anyway */ | ||
239 | for (i=0; i < vpes; i++) { | ||
240 | write_tc_c0_tcbind(i); | ||
241 | /* | ||
242 | * To be 100% sure we're really getting the right | ||
243 | * information, we exit the configuration state | ||
244 | * and do an IHB after each rebinding. | ||
245 | */ | ||
246 | write_c0_mvpcontrol( | ||
247 | read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); | ||
248 | mips_ihb(); | ||
249 | /* | ||
250 | * Only count if the MMU Type indicated is TLB | ||
251 | */ | ||
252 | if(((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) { | ||
253 | config1val = read_vpe_c0_config1(); | ||
254 | tlbsiz += ((config1val >> 25) & 0x3f) + 1; | ||
255 | } | ||
256 | |||
257 | /* Put core back in configuration state */ | ||
258 | write_c0_mvpcontrol( | ||
259 | read_c0_mvpcontrol() | MVPCONTROL_VPC ); | ||
260 | mips_ihb(); | ||
261 | } | ||
262 | } | ||
263 | write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB); | ||
264 | |||
265 | /* | ||
266 | * Setup kernel data structures to use software total, | ||
267 | * rather than read the per-VPE Config1 value. The values | ||
268 | * for "CPU 0" gets copied to all the other CPUs as part | ||
269 | * of their initialization in smtc_cpu_setup(). | ||
270 | */ | ||
271 | |||
272 | tlbsiz = tlbsiz & 0x3f; /* MIPS32 limits TLB indices to 64 */ | ||
273 | cpu_data[0].tlbsize = tlbsiz; | ||
274 | smtc_status |= SMTC_TLB_SHARED; | ||
275 | |||
276 | printk("TLB of %d entry pairs shared by %d VPEs\n", | ||
277 | tlbsiz, vpes); | ||
278 | } else { | ||
279 | printk("WARNING: TLB Not Sharable on SMTC Boot!\n"); | ||
280 | } | ||
281 | } | ||
282 | } | ||
283 | |||
284 | |||
285 | /* | ||
286 | * Incrementally build the CPU map out of constituent MIPS MT cores, | ||
287 | * using the specified available VPEs and TCs. Plaform code needs | ||
288 | * to ensure that each MIPS MT core invokes this routine on reset, | ||
289 | * one at a time(!). | ||
290 | * | ||
291 | * This version of the build_cpu_map and prepare_cpus routines assumes | ||
292 | * that *all* TCs of a MIPS MT core will be used for Linux, and that | ||
293 | * they will be spread across *all* available VPEs (to minimise the | ||
294 | * loss of efficiency due to exception service serialization). | ||
295 | * An improved version would pick up configuration information and | ||
296 | * possibly leave some TCs/VPEs as "slave" processors. | ||
297 | * | ||
298 | * Use c0_MVPConf0 to find out how many TCs are available, setting up | ||
299 | * phys_cpu_present_map and the logical/physical mappings. | ||
300 | */ | ||
301 | |||
302 | int __init mipsmt_build_cpu_map(int start_cpu_slot) | ||
303 | { | ||
304 | int i, ntcs; | ||
305 | |||
306 | /* | ||
307 | * The CPU map isn't actually used for anything at this point, | ||
308 | * so it's not clear what else we should do apart from set | ||
309 | * everything up so that "logical" = "physical". | ||
310 | */ | ||
311 | ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | ||
312 | for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { | ||
313 | cpu_set(i, phys_cpu_present_map); | ||
314 | __cpu_number_map[i] = i; | ||
315 | __cpu_logical_map[i] = i; | ||
316 | } | ||
317 | /* Initialize map of CPUs with FPUs */ | ||
318 | cpus_clear(mt_fpu_cpumask); | ||
319 | |||
320 | /* One of those TC's is the one booting, and not a secondary... */ | ||
321 | printk("%i available secondary CPU TC(s)\n", i - 1); | ||
322 | |||
323 | return i; | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * Common setup before any secondaries are started | ||
328 | * Make sure all CPU's are in a sensible state before we boot any of the | ||
329 | * secondaries. | ||
330 | * | ||
331 | * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly | ||
332 | * as possible across the available VPEs. | ||
333 | */ | ||
334 | |||
335 | static void smtc_tc_setup(int vpe, int tc, int cpu) | ||
336 | { | ||
337 | settc(tc); | ||
338 | write_tc_c0_tchalt(TCHALT_H); | ||
339 | mips_ihb(); | ||
340 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() | ||
341 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | ||
342 | | TCSTATUS_A); | ||
343 | write_tc_c0_tccontext(0); | ||
344 | /* Bind tc to vpe */ | ||
345 | write_tc_c0_tcbind(vpe); | ||
346 | /* In general, all TCs should have the same cpu_data indications */ | ||
347 | memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); | ||
348 | /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */ | ||
349 | if (cpu_data[0].cputype == CPU_34K) | ||
350 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; | ||
351 | cpu_data[cpu].vpe_id = vpe; | ||
352 | cpu_data[cpu].tc_id = tc; | ||
353 | } | ||
354 | |||
355 | |||
356 | void mipsmt_prepare_cpus(void) | ||
357 | { | ||
358 | int i, vpe, tc, ntc, nvpe, tcpervpe, slop, cpu; | ||
359 | unsigned long flags; | ||
360 | unsigned long val; | ||
361 | int nipi; | ||
362 | struct smtc_ipi *pipi; | ||
363 | |||
364 | /* disable interrupts so we can disable MT */ | ||
365 | local_irq_save(flags); | ||
366 | /* disable MT so we can configure */ | ||
367 | dvpe(); | ||
368 | dmt(); | ||
369 | |||
370 | freeIPIq.lock = SPIN_LOCK_UNLOCKED; | ||
371 | |||
372 | /* | ||
373 | * We probably don't have as many VPEs as we do SMP "CPUs", | ||
374 | * but it's possible - and in any case we'll never use more! | ||
375 | */ | ||
376 | for (i=0; i<NR_CPUS; i++) { | ||
377 | IPIQ[i].head = IPIQ[i].tail = NULL; | ||
378 | IPIQ[i].lock = SPIN_LOCK_UNLOCKED; | ||
379 | IPIQ[i].depth = 0; | ||
380 | ipi_timer_latch[i] = 0; | ||
381 | } | ||
382 | |||
383 | /* cpu_data index starts at zero */ | ||
384 | cpu = 0; | ||
385 | cpu_data[cpu].vpe_id = 0; | ||
386 | cpu_data[cpu].tc_id = 0; | ||
387 | cpu++; | ||
388 | |||
389 | /* Report on boot-time options */ | ||
390 | mips_mt_set_cpuoptions (); | ||
391 | if (vpelimit > 0) | ||
392 | printk("Limit of %d VPEs set\n", vpelimit); | ||
393 | if (tclimit > 0) | ||
394 | printk("Limit of %d TCs set\n", tclimit); | ||
395 | if (nostlb) { | ||
396 | printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n"); | ||
397 | } | ||
398 | if (asidmask) | ||
399 | printk("ASID mask value override to 0x%x\n", asidmask); | ||
400 | |||
401 | /* Temporary */ | ||
402 | #ifdef SMTC_IDLE_HOOK_DEBUG | ||
403 | if (hang_trig) | ||
404 | printk("Logic Analyser Trigger on suspected TC hang\n"); | ||
405 | #endif /* SMTC_IDLE_HOOK_DEBUG */ | ||
406 | |||
407 | /* Put MVPE's into 'configuration state' */ | ||
408 | write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC ); | ||
409 | |||
410 | val = read_c0_mvpconf0(); | ||
411 | nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
412 | if (vpelimit > 0 && nvpe > vpelimit) | ||
413 | nvpe = vpelimit; | ||
414 | ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | ||
415 | if (ntc > NR_CPUS) | ||
416 | ntc = NR_CPUS; | ||
417 | if (tclimit > 0 && ntc > tclimit) | ||
418 | ntc = tclimit; | ||
419 | tcpervpe = ntc / nvpe; | ||
420 | slop = ntc % nvpe; /* Residual TCs, < NVPE */ | ||
421 | |||
422 | /* Set up shared TLB */ | ||
423 | smtc_configure_tlb(); | ||
424 | |||
425 | for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { | ||
426 | /* | ||
427 | * Set the MVP bits. | ||
428 | */ | ||
429 | settc(tc); | ||
430 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP); | ||
431 | if (vpe != 0) | ||
432 | printk(", "); | ||
433 | printk("VPE %d: TC", vpe); | ||
434 | for (i = 0; i < tcpervpe; i++) { | ||
435 | /* | ||
436 | * TC 0 is bound to VPE 0 at reset, | ||
437 | * and is presumably executing this | ||
438 | * code. Leave it alone! | ||
439 | */ | ||
440 | if (tc != 0) { | ||
441 | smtc_tc_setup(vpe,tc, cpu); | ||
442 | cpu++; | ||
443 | } | ||
444 | printk(" %d", tc); | ||
445 | tc++; | ||
446 | } | ||
447 | if (slop) { | ||
448 | if (tc != 0) { | ||
449 | smtc_tc_setup(vpe,tc, cpu); | ||
450 | cpu++; | ||
451 | } | ||
452 | printk(" %d", tc); | ||
453 | tc++; | ||
454 | slop--; | ||
455 | } | ||
456 | if (vpe != 0) { | ||
457 | /* | ||
458 | * Clear any stale software interrupts from VPE's Cause | ||
459 | */ | ||
460 | write_vpe_c0_cause(0); | ||
461 | |||
462 | /* | ||
463 | * Clear ERL/EXL of VPEs other than 0 | ||
464 | * and set restricted interrupt enable/mask. | ||
465 | */ | ||
466 | write_vpe_c0_status((read_vpe_c0_status() | ||
467 | & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM)) | ||
468 | | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7 | ||
469 | | ST0_IE)); | ||
470 | /* | ||
471 | * set config to be the same as vpe0, | ||
472 | * particularly kseg0 coherency alg | ||
473 | */ | ||
474 | write_vpe_c0_config(read_c0_config()); | ||
475 | /* Clear any pending timer interrupt */ | ||
476 | write_vpe_c0_compare(0); | ||
477 | /* Propagate Config7 */ | ||
478 | write_vpe_c0_config7(read_c0_config7()); | ||
479 | } | ||
480 | /* enable multi-threading within VPE */ | ||
481 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); | ||
482 | /* enable the VPE */ | ||
483 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * Pull any physically present but unused TCs out of circulation. | ||
488 | */ | ||
489 | while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { | ||
490 | cpu_clear(tc, phys_cpu_present_map); | ||
491 | cpu_clear(tc, cpu_present_map); | ||
492 | tc++; | ||
493 | } | ||
494 | |||
495 | /* release config state */ | ||
496 | write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); | ||
497 | |||
498 | printk("\n"); | ||
499 | |||
500 | /* Set up coprocessor affinity CPU mask(s) */ | ||
501 | |||
502 | for (tc = 0; tc < ntc; tc++) { | ||
503 | if(cpu_data[tc].options & MIPS_CPU_FPU) | ||
504 | cpu_set(tc, mt_fpu_cpumask); | ||
505 | } | ||
506 | |||
507 | /* set up ipi interrupts... */ | ||
508 | |||
509 | /* If we have multiple VPEs running, set up the cross-VPE interrupt */ | ||
510 | |||
511 | if (nvpe > 1) | ||
512 | setup_cross_vpe_interrupts(); | ||
513 | |||
514 | /* Set up queue of free IPI "messages". */ | ||
515 | nipi = NR_CPUS * IPIBUF_PER_CPU; | ||
516 | if (ipibuffers > 0) | ||
517 | nipi = ipibuffers; | ||
518 | |||
519 | pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); | ||
520 | if (pipi == NULL) | ||
521 | panic("kmalloc of IPI message buffers failed\n"); | ||
522 | else | ||
523 | printk("IPI buffer pool of %d buffers\n", nipi); | ||
524 | for (i = 0; i < nipi; i++) { | ||
525 | smtc_ipi_nq(&freeIPIq, pipi); | ||
526 | pipi++; | ||
527 | } | ||
528 | |||
529 | /* Arm multithreading and enable other VPEs - but all TCs are Halted */ | ||
530 | emt(EMT_ENABLE); | ||
531 | evpe(EVPE_ENABLE); | ||
532 | local_irq_restore(flags); | ||
533 | /* Initialize SMTC /proc statistics/diagnostics */ | ||
534 | init_smtc_stats(); | ||
535 | } | ||
536 | |||
537 | |||
538 | /* | ||
539 | * Setup the PC, SP, and GP of a secondary processor and start it | ||
540 | * running! | ||
541 | * smp_bootstrap is the place to resume from | ||
542 | * __KSTK_TOS(idle) is apparently the stack pointer | ||
543 | * (unsigned long)idle->thread_info the gp | ||
544 | * | ||
545 | */ | ||
546 | void smtc_boot_secondary(int cpu, struct task_struct *idle) | ||
547 | { | ||
548 | extern u32 kernelsp[NR_CPUS]; | ||
549 | long flags; | ||
550 | int mtflags; | ||
551 | |||
552 | LOCK_MT_PRA(); | ||
553 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | ||
554 | dvpe(); | ||
555 | } | ||
556 | settc(cpu_data[cpu].tc_id); | ||
557 | |||
558 | /* pc */ | ||
559 | write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); | ||
560 | |||
561 | /* stack pointer */ | ||
562 | kernelsp[cpu] = __KSTK_TOS(idle); | ||
563 | write_tc_gpr_sp(__KSTK_TOS(idle)); | ||
564 | |||
565 | /* global pointer */ | ||
566 | write_tc_gpr_gp((unsigned long)idle->thread_info); | ||
567 | |||
568 | smtc_status |= SMTC_MTC_ACTIVE; | ||
569 | write_tc_c0_tchalt(0); | ||
570 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | ||
571 | evpe(EVPE_ENABLE); | ||
572 | } | ||
573 | UNLOCK_MT_PRA(); | ||
574 | } | ||
575 | |||
576 | void smtc_init_secondary(void) | ||
577 | { | ||
578 | /* | ||
579 | * Start timer on secondary VPEs if necessary. | ||
580 | * mips_timer_setup should already have been invoked by init/main | ||
581 | * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that | ||
582 | * SMTC init code assigns TCs consdecutively and in ascending order | ||
583 | * to across available VPEs. | ||
584 | */ | ||
585 | if(((read_c0_tcbind() & TCBIND_CURTC) != 0) | ||
586 | && ((read_c0_tcbind() & TCBIND_CURVPE) | ||
587 | != cpu_data[smp_processor_id() - 1].vpe_id)){ | ||
588 | write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ); | ||
589 | } | ||
590 | |||
591 | local_irq_enable(); | ||
592 | } | ||
593 | |||
594 | void smtc_smp_finish(void) | ||
595 | { | ||
596 | printk("TC %d going on-line as CPU %d\n", | ||
597 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); | ||
598 | } | ||
599 | |||
600 | void smtc_cpus_done(void) | ||
601 | { | ||
602 | } | ||
603 | |||
604 | /* | ||
605 | * Support for SMTC-optimized driver IRQ registration | ||
606 | */ | ||
607 | |||
608 | /* | ||
609 | * SMTC Kernel needs to manipulate low-level CPU interrupt mask | ||
610 | * in do_IRQ. These are passed in setup_irq_smtc() and stored | ||
611 | * in this table. | ||
612 | */ | ||
613 | |||
614 | int setup_irq_smtc(unsigned int irq, struct irqaction * new, | ||
615 | unsigned long hwmask) | ||
616 | { | ||
617 | irq_hwmask[irq] = hwmask; | ||
618 | |||
619 | return setup_irq(irq, new); | ||
620 | } | ||
621 | |||
622 | /* | ||
623 | * IPI model for SMTC is tricky, because interrupts aren't TC-specific. | ||
624 | * Within a VPE one TC can interrupt another by different approaches. | ||
625 | * The easiest to get right would probably be to make all TCs except | ||
626 | * the target IXMT and set a software interrupt, but an IXMT-based | ||
627 | * scheme requires that a handler must run before a new IPI could | ||
628 | * be sent, which would break the "broadcast" loops in MIPS MT. | ||
629 | * A more gonzo approach within a VPE is to halt the TC, extract | ||
630 | * its Restart, Status, and a couple of GPRs, and program the Restart | ||
631 | * address to emulate an interrupt. | ||
632 | * | ||
633 | * Within a VPE, one can be confident that the target TC isn't in | ||
634 | * a critical EXL state when halted, since the write to the Halt | ||
635 | * register could not have issued on the writing thread if the | ||
636 | * halting thread had EXL set. So k0 and k1 of the target TC | ||
637 | * can be used by the injection code. Across VPEs, one can't | ||
638 | * be certain that the target TC isn't in a critical exception | ||
639 | * state. So we try a two-step process of sending a software | ||
640 | * interrupt to the target VPE, which either handles the event | ||
641 | * itself (if it was the target) or injects the event within | ||
642 | * the VPE. | ||
643 | */ | ||
644 | |||
645 | void smtc_ipi_qdump(void) | ||
646 | { | ||
647 | int i; | ||
648 | |||
649 | for (i = 0; i < NR_CPUS ;i++) { | ||
650 | printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n", | ||
651 | i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail, | ||
652 | IPIQ[i].depth); | ||
653 | } | ||
654 | } | ||
655 | |||
656 | /* | ||
657 | * The standard atomic.h primitives don't quite do what we want | ||
658 | * here: We need an atomic add-and-return-previous-value (which | ||
659 | * could be done with atomic_add_return and a decrement) and an | ||
660 | * atomic set/zero-and-return-previous-value (which can't really | ||
661 | * be done with the atomic.h primitives). And since this is | ||
662 | * MIPS MT, we can assume that we have LL/SC. | ||
663 | */ | ||
664 | static __inline__ int atomic_postincrement(unsigned int *pv) | ||
665 | { | ||
666 | unsigned long result; | ||
667 | |||
668 | unsigned long temp; | ||
669 | |||
670 | __asm__ __volatile__( | ||
671 | "1: ll %0, %2 \n" | ||
672 | " addu %1, %0, 1 \n" | ||
673 | " sc %1, %2 \n" | ||
674 | " beqz %1, 1b \n" | ||
675 | " sync \n" | ||
676 | : "=&r" (result), "=&r" (temp), "=m" (*pv) | ||
677 | : "m" (*pv) | ||
678 | : "memory"); | ||
679 | |||
680 | return result; | ||
681 | } | ||
682 | |||
683 | /* No longer used in IPI dispatch, but retained for future recycling */ | ||
684 | |||
685 | static __inline__ int atomic_postclear(unsigned int *pv) | ||
686 | { | ||
687 | unsigned long result; | ||
688 | |||
689 | unsigned long temp; | ||
690 | |||
691 | __asm__ __volatile__( | ||
692 | "1: ll %0, %2 \n" | ||
693 | " or %1, $0, $0 \n" | ||
694 | " sc %1, %2 \n" | ||
695 | " beqz %1, 1b \n" | ||
696 | " sync \n" | ||
697 | : "=&r" (result), "=&r" (temp), "=m" (*pv) | ||
698 | : "m" (*pv) | ||
699 | : "memory"); | ||
700 | |||
701 | return result; | ||
702 | } | ||
703 | |||
704 | |||
705 | void smtc_send_ipi(int cpu, int type, unsigned int action) | ||
706 | { | ||
707 | int tcstatus; | ||
708 | struct smtc_ipi *pipi; | ||
709 | long flags; | ||
710 | int mtflags; | ||
711 | |||
712 | if (cpu == smp_processor_id()) { | ||
713 | printk("Cannot Send IPI to self!\n"); | ||
714 | return; | ||
715 | } | ||
716 | /* Set up a descriptor, to be delivered either promptly or queued */ | ||
717 | pipi = smtc_ipi_dq(&freeIPIq); | ||
718 | if (pipi == NULL) { | ||
719 | bust_spinlocks(1); | ||
720 | mips_mt_regdump(dvpe()); | ||
721 | panic("IPI Msg. Buffers Depleted\n"); | ||
722 | } | ||
723 | pipi->type = type; | ||
724 | pipi->arg = (void *)action; | ||
725 | pipi->dest = cpu; | ||
726 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | ||
727 | /* If not on same VPE, enqueue and send cross-VPE interupt */ | ||
728 | smtc_ipi_nq(&IPIQ[cpu], pipi); | ||
729 | LOCK_CORE_PRA(); | ||
730 | settc(cpu_data[cpu].tc_id); | ||
731 | write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1); | ||
732 | UNLOCK_CORE_PRA(); | ||
733 | } else { | ||
734 | /* | ||
735 | * Not sufficient to do a LOCK_MT_PRA (dmt) here, | ||
736 | * since ASID shootdown on the other VPE may | ||
737 | * collide with this operation. | ||
738 | */ | ||
739 | LOCK_CORE_PRA(); | ||
740 | settc(cpu_data[cpu].tc_id); | ||
741 | /* Halt the targeted TC */ | ||
742 | write_tc_c0_tchalt(TCHALT_H); | ||
743 | mips_ihb(); | ||
744 | |||
745 | /* | ||
746 | * Inspect TCStatus - if IXMT is set, we have to queue | ||
747 | * a message. Otherwise, we set up the "interrupt" | ||
748 | * of the other TC | ||
749 | */ | ||
750 | tcstatus = read_tc_c0_tcstatus(); | ||
751 | |||
752 | if ((tcstatus & TCSTATUS_IXMT) != 0) { | ||
753 | /* | ||
754 | * Spin-waiting here can deadlock, | ||
755 | * so we queue the message for the target TC. | ||
756 | */ | ||
757 | write_tc_c0_tchalt(0); | ||
758 | UNLOCK_CORE_PRA(); | ||
759 | /* Try to reduce redundant timer interrupt messages */ | ||
760 | if(type == SMTC_CLOCK_TICK) { | ||
761 | if(atomic_postincrement(&ipi_timer_latch[cpu])!=0) { | ||
762 | smtc_ipi_nq(&freeIPIq, pipi); | ||
763 | return; | ||
764 | } | ||
765 | } | ||
766 | smtc_ipi_nq(&IPIQ[cpu], pipi); | ||
767 | } else { | ||
768 | post_direct_ipi(cpu, pipi); | ||
769 | write_tc_c0_tchalt(0); | ||
770 | UNLOCK_CORE_PRA(); | ||
771 | } | ||
772 | } | ||
773 | } | ||
774 | |||
775 | /* | ||
776 | * Send IPI message to Halted TC, TargTC/TargVPE already having been set | ||
777 | */ | ||
778 | void post_direct_ipi(int cpu, struct smtc_ipi *pipi) | ||
779 | { | ||
780 | struct pt_regs *kstack; | ||
781 | unsigned long tcstatus; | ||
782 | unsigned long tcrestart; | ||
783 | extern u32 kernelsp[NR_CPUS]; | ||
784 | extern void __smtc_ipi_vector(void); | ||
785 | |||
786 | /* Extract Status, EPC from halted TC */ | ||
787 | tcstatus = read_tc_c0_tcstatus(); | ||
788 | tcrestart = read_tc_c0_tcrestart(); | ||
789 | /* If TCRestart indicates a WAIT instruction, advance the PC */ | ||
790 | if ((tcrestart & 0x80000000) | ||
791 | && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) { | ||
792 | tcrestart += 4; | ||
793 | } | ||
794 | /* | ||
795 | * Save on TC's future kernel stack | ||
796 | * | ||
797 | * CU bit of Status is indicator that TC was | ||
798 | * already running on a kernel stack... | ||
799 | */ | ||
800 | if(tcstatus & ST0_CU0) { | ||
801 | /* Note that this "- 1" is pointer arithmetic */ | ||
802 | kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1; | ||
803 | } else { | ||
804 | kstack = ((struct pt_regs *)kernelsp[cpu]) - 1; | ||
805 | } | ||
806 | |||
807 | kstack->cp0_epc = (long)tcrestart; | ||
808 | /* Save TCStatus */ | ||
809 | kstack->cp0_tcstatus = tcstatus; | ||
810 | /* Pass token of operation to be performed kernel stack pad area */ | ||
811 | kstack->pad0[4] = (unsigned long)pipi; | ||
812 | /* Pass address of function to be called likewise */ | ||
813 | kstack->pad0[5] = (unsigned long)&ipi_decode; | ||
814 | /* Set interrupt exempt and kernel mode */ | ||
815 | tcstatus |= TCSTATUS_IXMT; | ||
816 | tcstatus &= ~TCSTATUS_TKSU; | ||
817 | write_tc_c0_tcstatus(tcstatus); | ||
818 | ehb(); | ||
819 | /* Set TC Restart address to be SMTC IPI vector */ | ||
820 | write_tc_c0_tcrestart(__smtc_ipi_vector); | ||
821 | } | ||
822 | |||
823 | void ipi_resched_interrupt(struct pt_regs *regs) | ||
824 | { | ||
825 | /* Return from interrupt should be enough to cause scheduler check */ | ||
826 | } | ||
827 | |||
828 | |||
829 | void ipi_call_interrupt(struct pt_regs *regs) | ||
830 | { | ||
831 | /* Invoke generic function invocation code in smp.c */ | ||
832 | smp_call_function_interrupt(); | ||
833 | } | ||
834 | |||
835 | void ipi_decode(struct pt_regs *regs, struct smtc_ipi *pipi) | ||
836 | { | ||
837 | void *arg_copy = pipi->arg; | ||
838 | int type_copy = pipi->type; | ||
839 | int dest_copy = pipi->dest; | ||
840 | |||
841 | smtc_ipi_nq(&freeIPIq, pipi); | ||
842 | switch (type_copy) { | ||
843 | case SMTC_CLOCK_TICK: | ||
844 | /* Invoke Clock "Interrupt" */ | ||
845 | ipi_timer_latch[dest_copy] = 0; | ||
846 | #ifdef SMTC_IDLE_HOOK_DEBUG | ||
847 | clock_hang_reported[dest_copy] = 0; | ||
848 | #endif /* SMTC_IDLE_HOOK_DEBUG */ | ||
849 | local_timer_interrupt(0, NULL, regs); | ||
850 | break; | ||
851 | case LINUX_SMP_IPI: | ||
852 | switch ((int)arg_copy) { | ||
853 | case SMP_RESCHEDULE_YOURSELF: | ||
854 | ipi_resched_interrupt(regs); | ||
855 | break; | ||
856 | case SMP_CALL_FUNCTION: | ||
857 | ipi_call_interrupt(regs); | ||
858 | break; | ||
859 | default: | ||
860 | printk("Impossible SMTC IPI Argument 0x%x\n", | ||
861 | (int)arg_copy); | ||
862 | break; | ||
863 | } | ||
864 | break; | ||
865 | default: | ||
866 | printk("Impossible SMTC IPI Type 0x%x\n", type_copy); | ||
867 | break; | ||
868 | } | ||
869 | } | ||
870 | |||
871 | void deferred_smtc_ipi(struct pt_regs *regs) | ||
872 | { | ||
873 | struct smtc_ipi *pipi; | ||
874 | unsigned long flags; | ||
875 | /* DEBUG */ | ||
876 | int q = smp_processor_id(); | ||
877 | |||
878 | /* | ||
879 | * Test is not atomic, but much faster than a dequeue, | ||
880 | * and the vast majority of invocations will have a null queue. | ||
881 | */ | ||
882 | if(IPIQ[q].head != NULL) { | ||
883 | while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { | ||
884 | /* ipi_decode() should be called with interrupts off */ | ||
885 | local_irq_save(flags); | ||
886 | ipi_decode(regs, pipi); | ||
887 | local_irq_restore(flags); | ||
888 | } | ||
889 | } | ||
890 | } | ||
891 | |||
892 | /* | ||
893 | * Send clock tick to all TCs except the one executing the funtion | ||
894 | */ | ||
895 | |||
896 | void smtc_timer_broadcast(int vpe) | ||
897 | { | ||
898 | int cpu; | ||
899 | int myTC = cpu_data[smp_processor_id()].tc_id; | ||
900 | int myVPE = cpu_data[smp_processor_id()].vpe_id; | ||
901 | |||
902 | smtc_cpu_stats[smp_processor_id()].timerints++; | ||
903 | |||
904 | for_each_online_cpu(cpu) { | ||
905 | if (cpu_data[cpu].vpe_id == myVPE && | ||
906 | cpu_data[cpu].tc_id != myTC) | ||
907 | smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | ||
908 | } | ||
909 | } | ||
910 | |||
911 | /* | ||
912 | * Cross-VPE interrupts in the SMTC prototype use "software interrupts" | ||
913 | * set via cross-VPE MTTR manipulation of the Cause register. It would be | ||
914 | * in some regards preferable to have external logic for "doorbell" hardware | ||
915 | * interrupts. | ||
916 | */ | ||
917 | |||
918 | static int cpu_ipi_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_IRQ; | ||
919 | |||
920 | static irqreturn_t ipi_interrupt(int irq, void *dev_idm, struct pt_regs *regs) | ||
921 | { | ||
922 | int my_vpe = cpu_data[smp_processor_id()].vpe_id; | ||
923 | int my_tc = cpu_data[smp_processor_id()].tc_id; | ||
924 | int cpu; | ||
925 | struct smtc_ipi *pipi; | ||
926 | unsigned long tcstatus; | ||
927 | int sent; | ||
928 | long flags; | ||
929 | unsigned int mtflags; | ||
930 | unsigned int vpflags; | ||
931 | |||
932 | /* | ||
933 | * So long as cross-VPE interrupts are done via | ||
934 | * MFTR/MTTR read-modify-writes of Cause, we need | ||
935 | * to stop other VPEs whenever the local VPE does | ||
936 | * anything similar. | ||
937 | */ | ||
938 | local_irq_save(flags); | ||
939 | vpflags = dvpe(); | ||
940 | clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ); | ||
941 | set_c0_status(0x100 << MIPS_CPU_IPI_IRQ); | ||
942 | irq_enable_hazard(); | ||
943 | evpe(vpflags); | ||
944 | local_irq_restore(flags); | ||
945 | |||
946 | /* | ||
947 | * Cross-VPE Interrupt handler: Try to directly deliver IPIs | ||
948 | * queued for TCs on this VPE other than the current one. | ||
949 | * Return-from-interrupt should cause us to drain the queue | ||
950 | * for the current TC, so we ought not to have to do it explicitly here. | ||
951 | */ | ||
952 | |||
953 | for_each_online_cpu(cpu) { | ||
954 | if (cpu_data[cpu].vpe_id != my_vpe) | ||
955 | continue; | ||
956 | |||
957 | pipi = smtc_ipi_dq(&IPIQ[cpu]); | ||
958 | if (pipi != NULL) { | ||
959 | if (cpu_data[cpu].tc_id != my_tc) { | ||
960 | sent = 0; | ||
961 | LOCK_MT_PRA(); | ||
962 | settc(cpu_data[cpu].tc_id); | ||
963 | write_tc_c0_tchalt(TCHALT_H); | ||
964 | mips_ihb(); | ||
965 | tcstatus = read_tc_c0_tcstatus(); | ||
966 | if ((tcstatus & TCSTATUS_IXMT) == 0) { | ||
967 | post_direct_ipi(cpu, pipi); | ||
968 | sent = 1; | ||
969 | } | ||
970 | write_tc_c0_tchalt(0); | ||
971 | UNLOCK_MT_PRA(); | ||
972 | if (!sent) { | ||
973 | smtc_ipi_req(&IPIQ[cpu], pipi); | ||
974 | } | ||
975 | } else { | ||
976 | /* | ||
977 | * ipi_decode() should be called | ||
978 | * with interrupts off | ||
979 | */ | ||
980 | local_irq_save(flags); | ||
981 | ipi_decode(regs, pipi); | ||
982 | local_irq_restore(flags); | ||
983 | } | ||
984 | } | ||
985 | } | ||
986 | |||
987 | return IRQ_HANDLED; | ||
988 | } | ||
989 | |||
990 | static void ipi_irq_dispatch(struct pt_regs *regs) | ||
991 | { | ||
992 | do_IRQ(cpu_ipi_irq, regs); | ||
993 | } | ||
994 | |||
995 | static struct irqaction irq_ipi; | ||
996 | |||
997 | void setup_cross_vpe_interrupts(void) | ||
998 | { | ||
999 | if (!cpu_has_vint) | ||
1000 | panic("SMTC Kernel requires Vectored Interupt support"); | ||
1001 | |||
1002 | set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch); | ||
1003 | |||
1004 | irq_ipi.handler = ipi_interrupt; | ||
1005 | irq_ipi.flags = SA_INTERRUPT; | ||
1006 | irq_ipi.name = "SMTC_IPI"; | ||
1007 | |||
1008 | setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); | ||
1009 | |||
1010 | irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU; | ||
1011 | } | ||
1012 | |||
1013 | /* | ||
1014 | * SMTC-specific hacks invoked from elsewhere in the kernel. | ||
1015 | */ | ||
1016 | |||
1017 | void smtc_idle_loop_hook(void) | ||
1018 | { | ||
1019 | #ifdef SMTC_IDLE_HOOK_DEBUG | ||
1020 | int im; | ||
1021 | int flags; | ||
1022 | int mtflags; | ||
1023 | int bit; | ||
1024 | int vpe; | ||
1025 | int tc; | ||
1026 | int hook_ntcs; | ||
1027 | /* | ||
1028 | * printk within DMT-protected regions can deadlock, | ||
1029 | * so buffer diagnostic messages for later output. | ||
1030 | */ | ||
1031 | char *pdb_msg; | ||
1032 | char id_ho_db_msg[768]; /* worst-case use should be less than 700 */ | ||
1033 | |||
1034 | if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */ | ||
1035 | if (atomic_add_return(1, &idle_hook_initialized) == 1) { | ||
1036 | int mvpconf0; | ||
1037 | /* Tedious stuff to just do once */ | ||
1038 | mvpconf0 = read_c0_mvpconf0(); | ||
1039 | hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | ||
1040 | if (hook_ntcs > NR_CPUS) | ||
1041 | hook_ntcs = NR_CPUS; | ||
1042 | for (tc = 0; tc < hook_ntcs; tc++) { | ||
1043 | tcnoprog[tc] = 0; | ||
1044 | clock_hang_reported[tc] = 0; | ||
1045 | } | ||
1046 | for (vpe = 0; vpe < 2; vpe++) | ||
1047 | for (im = 0; im < 8; im++) | ||
1048 | imstuckcount[vpe][im] = 0; | ||
1049 | printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs); | ||
1050 | atomic_set(&idle_hook_initialized, 1000); | ||
1051 | } else { | ||
1052 | /* Someone else is initializing in parallel - let 'em finish */ | ||
1053 | while (atomic_read(&idle_hook_initialized) < 1000) | ||
1054 | ; | ||
1055 | } | ||
1056 | } | ||
1057 | |||
1058 | /* Have we stupidly left IXMT set somewhere? */ | ||
1059 | if (read_c0_tcstatus() & 0x400) { | ||
1060 | write_c0_tcstatus(read_c0_tcstatus() & ~0x400); | ||
1061 | ehb(); | ||
1062 | printk("Dangling IXMT in cpu_idle()\n"); | ||
1063 | } | ||
1064 | |||
1065 | /* Have we stupidly left an IM bit turned off? */ | ||
1066 | #define IM_LIMIT 2000 | ||
1067 | local_irq_save(flags); | ||
1068 | mtflags = dmt(); | ||
1069 | pdb_msg = &id_ho_db_msg[0]; | ||
1070 | im = read_c0_status(); | ||
1071 | vpe = cpu_data[smp_processor_id()].vpe_id; | ||
1072 | for (bit = 0; bit < 8; bit++) { | ||
1073 | /* | ||
1074 | * In current prototype, I/O interrupts | ||
1075 | * are masked for VPE > 0 | ||
1076 | */ | ||
1077 | if (vpemask[vpe][bit]) { | ||
1078 | if (!(im & (0x100 << bit))) | ||
1079 | imstuckcount[vpe][bit]++; | ||
1080 | else | ||
1081 | imstuckcount[vpe][bit] = 0; | ||
1082 | if (imstuckcount[vpe][bit] > IM_LIMIT) { | ||
1083 | set_c0_status(0x100 << bit); | ||
1084 | ehb(); | ||
1085 | imstuckcount[vpe][bit] = 0; | ||
1086 | pdb_msg += sprintf(pdb_msg, | ||
1087 | "Dangling IM %d fixed for VPE %d\n", bit, | ||
1088 | vpe); | ||
1089 | } | ||
1090 | } | ||
1091 | } | ||
1092 | |||
1093 | /* | ||
1094 | * Now that we limit outstanding timer IPIs, check for hung TC | ||
1095 | */ | ||
1096 | for (tc = 0; tc < NR_CPUS; tc++) { | ||
1097 | /* Don't check ourself - we'll dequeue IPIs just below */ | ||
1098 | if ((tc != smp_processor_id()) && | ||
1099 | ipi_timer_latch[tc] > timerq_limit) { | ||
1100 | if (clock_hang_reported[tc] == 0) { | ||
1101 | pdb_msg += sprintf(pdb_msg, | ||
1102 | "TC %d looks hung with timer latch at %d\n", | ||
1103 | tc, ipi_timer_latch[tc]); | ||
1104 | clock_hang_reported[tc]++; | ||
1105 | } | ||
1106 | } | ||
1107 | } | ||
1108 | emt(mtflags); | ||
1109 | local_irq_restore(flags); | ||
1110 | if (pdb_msg != &id_ho_db_msg[0]) | ||
1111 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); | ||
1112 | #endif /* SMTC_IDLE_HOOK_DEBUG */ | ||
1113 | /* | ||
1114 | * To the extent that we've ever turned interrupts off, | ||
1115 | * we may have accumulated deferred IPIs. This is subtle. | ||
1116 | * If we use the smtc_ipi_qdepth() macro, we'll get an | ||
1117 | * exact number - but we'll also disable interrupts | ||
1118 | * and create a window of failure where a new IPI gets | ||
1119 | * queued after we test the depth but before we re-enable | ||
1120 | * interrupts. So long as IXMT never gets set, however, | ||
1121 | * we should be OK: If we pick up something and dispatch | ||
1122 | * it here, that's great. If we see nothing, but concurrent | ||
1123 | * with this operation, another TC sends us an IPI, IXMT | ||
1124 | * is clear, and we'll handle it as a real pseudo-interrupt | ||
1125 | * and not a pseudo-pseudo interrupt. | ||
1126 | */ | ||
1127 | if (IPIQ[smp_processor_id()].depth > 0) { | ||
1128 | struct smtc_ipi *pipi; | ||
1129 | extern void self_ipi(struct smtc_ipi *); | ||
1130 | |||
1131 | if ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()])) != NULL) { | ||
1132 | self_ipi(pipi); | ||
1133 | smtc_cpu_stats[smp_processor_id()].selfipis++; | ||
1134 | } | ||
1135 | } | ||
1136 | } | ||
1137 | |||
1138 | void smtc_soft_dump(void) | ||
1139 | { | ||
1140 | int i; | ||
1141 | |||
1142 | printk("Counter Interrupts taken per CPU (TC)\n"); | ||
1143 | for (i=0; i < NR_CPUS; i++) { | ||
1144 | printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints); | ||
1145 | } | ||
1146 | printk("Self-IPI invocations:\n"); | ||
1147 | for (i=0; i < NR_CPUS; i++) { | ||
1148 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | ||
1149 | } | ||
1150 | smtc_ipi_qdump(); | ||
1151 | printk("Timer IPI Backlogs:\n"); | ||
1152 | for (i=0; i < NR_CPUS; i++) { | ||
1153 | printk("%d: %d\n", i, ipi_timer_latch[i]); | ||
1154 | } | ||
1155 | printk("%d Recoveries of \"stolen\" FPU\n", | ||
1156 | atomic_read(&smtc_fpu_recoveries)); | ||
1157 | } | ||
1158 | |||
1159 | |||
1160 | /* | ||
1161 | * TLB management routines special to SMTC | ||
1162 | */ | ||
1163 | |||
1164 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | ||
1165 | { | ||
1166 | unsigned long flags, mtflags, tcstat, prevhalt, asid; | ||
1167 | int tlb, i; | ||
1168 | |||
1169 | /* | ||
1170 | * It would be nice to be able to use a spinlock here, | ||
1171 | * but this is invoked from within TLB flush routines | ||
1172 | * that protect themselves with DVPE, so if a lock is | ||
1173 | * held by another TC, it'll never be freed. | ||
1174 | * | ||
1175 | * DVPE/DMT must not be done with interrupts enabled, | ||
1176 | * so even so most callers will already have disabled | ||
1177 | * them, let's be really careful... | ||
1178 | */ | ||
1179 | |||
1180 | local_irq_save(flags); | ||
1181 | if (smtc_status & SMTC_TLB_SHARED) { | ||
1182 | mtflags = dvpe(); | ||
1183 | tlb = 0; | ||
1184 | } else { | ||
1185 | mtflags = dmt(); | ||
1186 | tlb = cpu_data[cpu].vpe_id; | ||
1187 | } | ||
1188 | asid = asid_cache(cpu); | ||
1189 | |||
1190 | do { | ||
1191 | if (!((asid += ASID_INC) & ASID_MASK) ) { | ||
1192 | if (cpu_has_vtag_icache) | ||
1193 | flush_icache_all(); | ||
1194 | /* Traverse all online CPUs (hack requires contigous range) */ | ||
1195 | for (i = 0; i < num_online_cpus(); i++) { | ||
1196 | /* | ||
1197 | * We don't need to worry about our own CPU, nor those of | ||
1198 | * CPUs who don't share our TLB. | ||
1199 | */ | ||
1200 | if ((i != smp_processor_id()) && | ||
1201 | ((smtc_status & SMTC_TLB_SHARED) || | ||
1202 | (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) { | ||
1203 | settc(cpu_data[i].tc_id); | ||
1204 | prevhalt = read_tc_c0_tchalt() & TCHALT_H; | ||
1205 | if (!prevhalt) { | ||
1206 | write_tc_c0_tchalt(TCHALT_H); | ||
1207 | mips_ihb(); | ||
1208 | } | ||
1209 | tcstat = read_tc_c0_tcstatus(); | ||
1210 | smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); | ||
1211 | if (!prevhalt) | ||
1212 | write_tc_c0_tchalt(0); | ||
1213 | } | ||
1214 | } | ||
1215 | if (!asid) /* fix version if needed */ | ||
1216 | asid = ASID_FIRST_VERSION; | ||
1217 | local_flush_tlb_all(); /* start new asid cycle */ | ||
1218 | } | ||
1219 | } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); | ||
1220 | |||
1221 | /* | ||
1222 | * SMTC shares the TLB within VPEs and possibly across all VPEs. | ||
1223 | */ | ||
1224 | for (i = 0; i < num_online_cpus(); i++) { | ||
1225 | if ((smtc_status & SMTC_TLB_SHARED) || | ||
1226 | (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) | ||
1227 | cpu_context(i, mm) = asid_cache(i) = asid; | ||
1228 | } | ||
1229 | |||
1230 | if (smtc_status & SMTC_TLB_SHARED) | ||
1231 | evpe(mtflags); | ||
1232 | else | ||
1233 | emt(mtflags); | ||
1234 | local_irq_restore(flags); | ||
1235 | } | ||
1236 | |||
1237 | /* | ||
1238 | * Invoked from macros defined in mmu_context.h | ||
1239 | * which must already have disabled interrupts | ||
1240 | * and done a DVPE or DMT as appropriate. | ||
1241 | */ | ||
1242 | |||
1243 | void smtc_flush_tlb_asid(unsigned long asid) | ||
1244 | { | ||
1245 | int entry; | ||
1246 | unsigned long ehi; | ||
1247 | |||
1248 | entry = read_c0_wired(); | ||
1249 | |||
1250 | /* Traverse all non-wired entries */ | ||
1251 | while (entry < current_cpu_data.tlbsize) { | ||
1252 | write_c0_index(entry); | ||
1253 | ehb(); | ||
1254 | tlb_read(); | ||
1255 | ehb(); | ||
1256 | ehi = read_c0_entryhi(); | ||
1257 | if((ehi & ASID_MASK) == asid) { | ||
1258 | /* | ||
1259 | * Invalidate only entries with specified ASID, | ||
1260 | * makiing sure all entries differ. | ||
1261 | */ | ||
1262 | write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); | ||
1263 | write_c0_entrylo0(0); | ||
1264 | write_c0_entrylo1(0); | ||
1265 | mtc0_tlbw_hazard(); | ||
1266 | tlb_write_indexed(); | ||
1267 | } | ||
1268 | entry++; | ||
1269 | } | ||
1270 | write_c0_index(PARKED_INDEX); | ||
1271 | tlbw_use_hazard(); | ||
1272 | } | ||
1273 | |||
1274 | /* | ||
1275 | * Support for single-threading cache flush operations. | ||
1276 | */ | ||
1277 | |||
1278 | int halt_state_save[NR_CPUS]; | ||
1279 | |||
1280 | /* | ||
1281 | * To really, really be sure that nothing is being done | ||
1282 | * by other TCs, halt them all. This code assumes that | ||
1283 | * a DVPE has already been done, so while their Halted | ||
1284 | * state is theoretically architecturally unstable, in | ||
1285 | * practice, it's not going to change while we're looking | ||
1286 | * at it. | ||
1287 | */ | ||
1288 | |||
1289 | void smtc_cflush_lockdown(void) | ||
1290 | { | ||
1291 | int cpu; | ||
1292 | |||
1293 | for_each_online_cpu(cpu) { | ||
1294 | if (cpu != smp_processor_id()) { | ||
1295 | settc(cpu_data[cpu].tc_id); | ||
1296 | halt_state_save[cpu] = read_tc_c0_tchalt(); | ||
1297 | write_tc_c0_tchalt(TCHALT_H); | ||
1298 | } | ||
1299 | } | ||
1300 | mips_ihb(); | ||
1301 | } | ||
1302 | |||
1303 | /* It would be cheating to change the cpu_online states during a flush! */ | ||
1304 | |||
1305 | void smtc_cflush_release(void) | ||
1306 | { | ||
1307 | int cpu; | ||
1308 | |||
1309 | /* | ||
1310 | * Start with a hazard barrier to ensure | ||
1311 | * that all CACHE ops have played through. | ||
1312 | */ | ||
1313 | mips_ihb(); | ||
1314 | |||
1315 | for_each_online_cpu(cpu) { | ||
1316 | if (cpu != smp_processor_id()) { | ||
1317 | settc(cpu_data[cpu].tc_id); | ||
1318 | write_tc_c0_tchalt(halt_state_save[cpu]); | ||
1319 | } | ||
1320 | } | ||
1321 | mips_ihb(); | ||
1322 | } | ||
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 5e51a2d8f3f0..13ff4da598cd 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c | |||
@@ -116,8 +116,7 @@ static void c0_timer_ack(void) | |||
116 | write_c0_compare(expirelo); | 116 | write_c0_compare(expirelo); |
117 | 117 | ||
118 | /* Check to see if we have missed any timer interrupts. */ | 118 | /* Check to see if we have missed any timer interrupts. */ |
119 | count = read_c0_count(); | 119 | while (((count = read_c0_count()) - expirelo) < 0x7fffffff) { |
120 | if ((count - expirelo) < 0x7fffffff) { | ||
121 | /* missed_timer_count++; */ | 120 | /* missed_timer_count++; */ |
122 | expirelo = count + cycles_per_jiffy; | 121 | expirelo = count + cycles_per_jiffy; |
123 | write_c0_compare(expirelo); | 122 | write_c0_compare(expirelo); |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 081e6ed5bb62..6336fe8008ec 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -280,9 +280,16 @@ static DEFINE_SPINLOCK(die_lock); | |||
280 | NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) | 280 | NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) |
281 | { | 281 | { |
282 | static int die_counter; | 282 | static int die_counter; |
283 | #ifdef CONFIG_MIPS_MT_SMTC | ||
284 | unsigned long dvpret = dvpe(); | ||
285 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
283 | 286 | ||
284 | console_verbose(); | 287 | console_verbose(); |
285 | spin_lock_irq(&die_lock); | 288 | spin_lock_irq(&die_lock); |
289 | bust_spinlocks(1); | ||
290 | #ifdef CONFIG_MIPS_MT_SMTC | ||
291 | mips_mt_regdump(dvpret); | ||
292 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
286 | printk("%s[#%d]:\n", str, ++die_counter); | 293 | printk("%s[#%d]:\n", str, ++die_counter); |
287 | show_registers(regs); | 294 | show_registers(regs); |
288 | spin_unlock_irq(&die_lock); | 295 | spin_unlock_irq(&die_lock); |
@@ -757,6 +764,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
757 | 764 | ||
758 | case 2: | 765 | case 2: |
759 | case 3: | 766 | case 3: |
767 | die_if_kernel("do_cpu invoked from kernel context!", regs); | ||
760 | break; | 768 | break; |
761 | } | 769 | } |
762 | 770 | ||
@@ -794,6 +802,36 @@ asmlinkage void do_mcheck(struct pt_regs *regs) | |||
794 | 802 | ||
795 | asmlinkage void do_mt(struct pt_regs *regs) | 803 | asmlinkage void do_mt(struct pt_regs *regs) |
796 | { | 804 | { |
805 | int subcode; | ||
806 | |||
807 | die_if_kernel("MIPS MT Thread exception in kernel", regs); | ||
808 | |||
809 | subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) | ||
810 | >> VPECONTROL_EXCPT_SHIFT; | ||
811 | switch (subcode) { | ||
812 | case 0: | ||
813 | printk(KERN_ERR "Thread Underflow\n"); | ||
814 | break; | ||
815 | case 1: | ||
816 | printk(KERN_ERR "Thread Overflow\n"); | ||
817 | break; | ||
818 | case 2: | ||
819 | printk(KERN_ERR "Invalid YIELD Qualifier\n"); | ||
820 | break; | ||
821 | case 3: | ||
822 | printk(KERN_ERR "Gating Storage Exception\n"); | ||
823 | break; | ||
824 | case 4: | ||
825 | printk(KERN_ERR "YIELD Scheduler Exception\n"); | ||
826 | break; | ||
827 | case 5: | ||
828 | printk(KERN_ERR "Gating Storage Schedulier Exception\n"); | ||
829 | break; | ||
830 | default: | ||
831 | printk(KERN_ERR "*** UNKNOWN THREAD EXCEPTION %d ***\n", | ||
832 | subcode); | ||
833 | break; | ||
834 | } | ||
797 | die_if_kernel("MIPS MT Thread exception in kernel", regs); | 835 | die_if_kernel("MIPS MT Thread exception in kernel", regs); |
798 | 836 | ||
799 | force_sig(SIGILL, current); | 837 | force_sig(SIGILL, current); |
@@ -929,7 +967,15 @@ void ejtag_exception_handler(struct pt_regs *regs) | |||
929 | */ | 967 | */ |
930 | void nmi_exception_handler(struct pt_regs *regs) | 968 | void nmi_exception_handler(struct pt_regs *regs) |
931 | { | 969 | { |
970 | #ifdef CONFIG_MIPS_MT_SMTC | ||
971 | unsigned long dvpret = dvpe(); | ||
972 | bust_spinlocks(1); | ||
973 | printk("NMI taken!!!!\n"); | ||
974 | mips_mt_regdump(dvpret); | ||
975 | #else | ||
976 | bust_spinlocks(1); | ||
932 | printk("NMI taken!!!!\n"); | 977 | printk("NMI taken!!!!\n"); |
978 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
933 | die("NMI", regs); | 979 | die("NMI", regs); |
934 | while(1) ; | 980 | while(1) ; |
935 | } | 981 | } |
@@ -1007,7 +1053,7 @@ again: | |||
1007 | return set; | 1053 | return set; |
1008 | } | 1054 | } |
1009 | 1055 | ||
1010 | void mips_srs_free (int set) | 1056 | void mips_srs_free(int set) |
1011 | { | 1057 | { |
1012 | struct shadow_registers *sr = &shadow_registers; | 1058 | struct shadow_registers *sr = &shadow_registers; |
1013 | 1059 | ||
@@ -1027,8 +1073,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs) | |||
1027 | if (addr == NULL) { | 1073 | if (addr == NULL) { |
1028 | handler = (unsigned long) do_default_vi; | 1074 | handler = (unsigned long) do_default_vi; |
1029 | srs = 0; | 1075 | srs = 0; |
1030 | } | 1076 | } else |
1031 | else | ||
1032 | handler = (unsigned long) addr; | 1077 | handler = (unsigned long) addr; |
1033 | vi_handlers[n] = (unsigned long) addr; | 1078 | vi_handlers[n] = (unsigned long) addr; |
1034 | 1079 | ||
@@ -1040,8 +1085,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs) | |||
1040 | if (cpu_has_veic) { | 1085 | if (cpu_has_veic) { |
1041 | if (board_bind_eic_interrupt) | 1086 | if (board_bind_eic_interrupt) |
1042 | board_bind_eic_interrupt (n, srs); | 1087 | board_bind_eic_interrupt (n, srs); |
1043 | } | 1088 | } else if (cpu_has_vint) { |
1044 | else if (cpu_has_vint) { | ||
1045 | /* SRSMap is only defined if shadow sets are implemented */ | 1089 | /* SRSMap is only defined if shadow sets are implemented */ |
1046 | if (mips_srs_max() > 1) | 1090 | if (mips_srs_max() > 1) |
1047 | change_c0_srsmap (0xf << n*4, srs << n*4); | 1091 | change_c0_srsmap (0xf << n*4, srs << n*4); |
@@ -1055,6 +1099,15 @@ static void *set_vi_srs_handler(int n, void *addr, int srs) | |||
1055 | 1099 | ||
1056 | extern char except_vec_vi, except_vec_vi_lui; | 1100 | extern char except_vec_vi, except_vec_vi_lui; |
1057 | extern char except_vec_vi_ori, except_vec_vi_end; | 1101 | extern char except_vec_vi_ori, except_vec_vi_end; |
1102 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1103 | /* | ||
1104 | * We need to provide the SMTC vectored interrupt handler | ||
1105 | * not only with the address of the handler, but with the | ||
1106 | * Status.IM bit to be masked before going there. | ||
1107 | */ | ||
1108 | extern char except_vec_vi_mori; | ||
1109 | const int mori_offset = &except_vec_vi_mori - &except_vec_vi; | ||
1110 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1058 | const int handler_len = &except_vec_vi_end - &except_vec_vi; | 1111 | const int handler_len = &except_vec_vi_end - &except_vec_vi; |
1059 | const int lui_offset = &except_vec_vi_lui - &except_vec_vi; | 1112 | const int lui_offset = &except_vec_vi_lui - &except_vec_vi; |
1060 | const int ori_offset = &except_vec_vi_ori - &except_vec_vi; | 1113 | const int ori_offset = &except_vec_vi_ori - &except_vec_vi; |
@@ -1068,6 +1121,12 @@ static void *set_vi_srs_handler(int n, void *addr, int srs) | |||
1068 | } | 1121 | } |
1069 | 1122 | ||
1070 | memcpy (b, &except_vec_vi, handler_len); | 1123 | memcpy (b, &except_vec_vi, handler_len); |
1124 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1125 | if (n > 7) | ||
1126 | printk("Vector index %d exceeds SMTC maximum\n", n); | ||
1127 | w = (u32 *)(b + mori_offset); | ||
1128 | *w = (*w & 0xffff0000) | (0x100 << n); | ||
1129 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1071 | w = (u32 *)(b + lui_offset); | 1130 | w = (u32 *)(b + lui_offset); |
1072 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); | 1131 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); |
1073 | w = (u32 *)(b + ori_offset); | 1132 | w = (u32 *)(b + ori_offset); |
@@ -1090,7 +1149,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs) | |||
1090 | return (void *)old_handler; | 1149 | return (void *)old_handler; |
1091 | } | 1150 | } |
1092 | 1151 | ||
1093 | void *set_vi_handler (int n, void *addr) | 1152 | void *set_vi_handler(int n, void *addr) |
1094 | { | 1153 | { |
1095 | return set_vi_srs_handler(n, addr, 0); | 1154 | return set_vi_srs_handler(n, addr, 0); |
1096 | } | 1155 | } |
@@ -1108,8 +1167,29 @@ extern asmlinkage int _restore_fp_context(struct sigcontext *sc); | |||
1108 | extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); | 1167 | extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); |
1109 | extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); | 1168 | extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); |
1110 | 1169 | ||
1170 | #ifdef CONFIG_SMP | ||
1171 | static int smp_save_fp_context(struct sigcontext *sc) | ||
1172 | { | ||
1173 | return cpu_has_fpu | ||
1174 | ? _save_fp_context(sc) | ||
1175 | : fpu_emulator_save_context(sc); | ||
1176 | } | ||
1177 | |||
1178 | static int smp_restore_fp_context(struct sigcontext *sc) | ||
1179 | { | ||
1180 | return cpu_has_fpu | ||
1181 | ? _restore_fp_context(sc) | ||
1182 | : fpu_emulator_restore_context(sc); | ||
1183 | } | ||
1184 | #endif | ||
1185 | |||
1111 | static inline void signal_init(void) | 1186 | static inline void signal_init(void) |
1112 | { | 1187 | { |
1188 | #ifdef CONFIG_SMP | ||
1189 | /* For now just do the cpu_has_fpu check when the functions are invoked */ | ||
1190 | save_fp_context = smp_save_fp_context; | ||
1191 | restore_fp_context = smp_restore_fp_context; | ||
1192 | #else | ||
1113 | if (cpu_has_fpu) { | 1193 | if (cpu_has_fpu) { |
1114 | save_fp_context = _save_fp_context; | 1194 | save_fp_context = _save_fp_context; |
1115 | restore_fp_context = _restore_fp_context; | 1195 | restore_fp_context = _restore_fp_context; |
@@ -1117,6 +1197,7 @@ static inline void signal_init(void) | |||
1117 | save_fp_context = fpu_emulator_save_context; | 1197 | save_fp_context = fpu_emulator_save_context; |
1118 | restore_fp_context = fpu_emulator_restore_context; | 1198 | restore_fp_context = fpu_emulator_restore_context; |
1119 | } | 1199 | } |
1200 | #endif | ||
1120 | } | 1201 | } |
1121 | 1202 | ||
1122 | #ifdef CONFIG_MIPS32_COMPAT | 1203 | #ifdef CONFIG_MIPS32_COMPAT |
@@ -1153,6 +1234,20 @@ void __init per_cpu_trap_init(void) | |||
1153 | { | 1234 | { |
1154 | unsigned int cpu = smp_processor_id(); | 1235 | unsigned int cpu = smp_processor_id(); |
1155 | unsigned int status_set = ST0_CU0; | 1236 | unsigned int status_set = ST0_CU0; |
1237 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1238 | int secondaryTC = 0; | ||
1239 | int bootTC = (cpu == 0); | ||
1240 | |||
1241 | /* | ||
1242 | * Only do per_cpu_trap_init() for first TC of Each VPE. | ||
1243 | * Note that this hack assumes that the SMTC init code | ||
1244 | * assigns TCs consecutively and in ascending order. | ||
1245 | */ | ||
1246 | |||
1247 | if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && | ||
1248 | ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) | ||
1249 | secondaryTC = 1; | ||
1250 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1156 | 1251 | ||
1157 | /* | 1252 | /* |
1158 | * Disable coprocessors and select 32-bit or 64-bit addressing | 1253 | * Disable coprocessors and select 32-bit or 64-bit addressing |
@@ -1175,6 +1270,10 @@ void __init per_cpu_trap_init(void) | |||
1175 | write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ | 1270 | write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ |
1176 | #endif | 1271 | #endif |
1177 | 1272 | ||
1273 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1274 | if (!secondaryTC) { | ||
1275 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1276 | |||
1178 | /* | 1277 | /* |
1179 | * Interrupt handling. | 1278 | * Interrupt handling. |
1180 | */ | 1279 | */ |
@@ -1191,6 +1290,9 @@ void __init per_cpu_trap_init(void) | |||
1191 | } else | 1290 | } else |
1192 | set_c0_cause(CAUSEF_IV); | 1291 | set_c0_cause(CAUSEF_IV); |
1193 | } | 1292 | } |
1293 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1294 | } | ||
1295 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1194 | 1296 | ||
1195 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; | 1297 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; |
1196 | TLBMISS_HANDLER_SETUP(); | 1298 | TLBMISS_HANDLER_SETUP(); |
@@ -1200,8 +1302,14 @@ void __init per_cpu_trap_init(void) | |||
1200 | BUG_ON(current->mm); | 1302 | BUG_ON(current->mm); |
1201 | enter_lazy_tlb(&init_mm, current); | 1303 | enter_lazy_tlb(&init_mm, current); |
1202 | 1304 | ||
1203 | cpu_cache_init(); | 1305 | #ifdef CONFIG_MIPS_MT_SMTC |
1204 | tlb_init(); | 1306 | if (bootTC) { |
1307 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1308 | cpu_cache_init(); | ||
1309 | tlb_init(); | ||
1310 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1311 | } | ||
1312 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1205 | } | 1313 | } |
1206 | 1314 | ||
1207 | /* Install CPU exception handler */ | 1315 | /* Install CPU exception handler */ |
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 2ad0cedf29fe..14fa00e3cdfa 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S | |||
@@ -2,7 +2,7 @@ | |||
2 | #include <asm/asm-offsets.h> | 2 | #include <asm/asm-offsets.h> |
3 | #include <asm-generic/vmlinux.lds.h> | 3 | #include <asm-generic/vmlinux.lds.h> |
4 | 4 | ||
5 | #undef mips /* CPP really sucks for this job */ | 5 | #undef mips |
6 | #define mips mips | 6 | #define mips mips |
7 | OUTPUT_ARCH(mips) | 7 | OUTPUT_ARCH(mips) |
8 | ENTRY(kernel_entry) | 8 | ENTRY(kernel_entry) |
diff --git a/arch/mips/mips-boards/generic/init.c b/arch/mips/mips-boards/generic/init.c index eab5a705e989..17dfe6a8cab9 100644 --- a/arch/mips/mips-boards/generic/init.c +++ b/arch/mips/mips-boards/generic/init.c | |||
@@ -220,7 +220,6 @@ void __init kgdb_config (void) | |||
220 | generic_putDebugChar (*s++); | 220 | generic_putDebugChar (*s++); |
221 | } | 221 | } |
222 | 222 | ||
223 | kgdb_enabled = 1; | ||
224 | /* Breakpoint is invoked after interrupts are initialised */ | 223 | /* Breakpoint is invoked after interrupts are initialised */ |
225 | } | 224 | } |
226 | } | 225 | } |
diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c index 93f3bf2c2b22..a9f6124b3a22 100644 --- a/arch/mips/mips-boards/generic/time.c +++ b/arch/mips/mips-boards/generic/time.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/mc146818rtc.h> | 30 | #include <linux/mc146818rtc.h> |
31 | 31 | ||
32 | #include <asm/mipsregs.h> | 32 | #include <asm/mipsregs.h> |
33 | #include <asm/mipsmtregs.h> | ||
33 | #include <asm/ptrace.h> | 34 | #include <asm/ptrace.h> |
34 | #include <asm/hardirq.h> | 35 | #include <asm/hardirq.h> |
35 | #include <asm/irq.h> | 36 | #include <asm/irq.h> |
@@ -50,16 +51,23 @@ unsigned long cpu_khz; | |||
50 | static char display_string[] = " LINUX ON ATLAS "; | 51 | static char display_string[] = " LINUX ON ATLAS "; |
51 | #endif | 52 | #endif |
52 | #if defined(CONFIG_MIPS_MALTA) | 53 | #if defined(CONFIG_MIPS_MALTA) |
54 | #if defined(CONFIG_MIPS_MT_SMTC) | ||
55 | static char display_string[] = " SMTC LINUX ON MALTA "; | ||
56 | #else | ||
53 | static char display_string[] = " LINUX ON MALTA "; | 57 | static char display_string[] = " LINUX ON MALTA "; |
58 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
54 | #endif | 59 | #endif |
55 | #if defined(CONFIG_MIPS_SEAD) | 60 | #if defined(CONFIG_MIPS_SEAD) |
56 | static char display_string[] = " LINUX ON SEAD "; | 61 | static char display_string[] = " LINUX ON SEAD "; |
57 | #endif | 62 | #endif |
58 | static unsigned int display_count = 0; | 63 | static unsigned int display_count; |
59 | #define MAX_DISPLAY_COUNT (sizeof(display_string) - 8) | 64 | #define MAX_DISPLAY_COUNT (sizeof(display_string) - 8) |
60 | 65 | ||
61 | static unsigned int timer_tick_count=0; | 66 | #define CPUCTR_IMASKBIT (0x100 << MIPSCPU_INT_CPUCTR) |
67 | |||
68 | static unsigned int timer_tick_count; | ||
62 | static int mips_cpu_timer_irq; | 69 | static int mips_cpu_timer_irq; |
70 | extern void smtc_timer_broadcast(int); | ||
63 | 71 | ||
64 | static inline void scroll_display_message(void) | 72 | static inline void scroll_display_message(void) |
65 | { | 73 | { |
@@ -75,15 +83,55 @@ static void mips_timer_dispatch (struct pt_regs *regs) | |||
75 | do_IRQ (mips_cpu_timer_irq, regs); | 83 | do_IRQ (mips_cpu_timer_irq, regs); |
76 | } | 84 | } |
77 | 85 | ||
86 | /* | ||
87 | * Redeclare until I get around mopping the timer code insanity on MIPS. | ||
88 | */ | ||
78 | extern int null_perf_irq(struct pt_regs *regs); | 89 | extern int null_perf_irq(struct pt_regs *regs); |
79 | 90 | ||
80 | extern int (*perf_irq)(struct pt_regs *regs); | 91 | extern int (*perf_irq)(struct pt_regs *regs); |
81 | 92 | ||
82 | irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) | 93 | irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) |
83 | { | 94 | { |
84 | int r2 = cpu_has_mips_r2; | ||
85 | int cpu = smp_processor_id(); | 95 | int cpu = smp_processor_id(); |
96 | int r2 = cpu_has_mips_r2; | ||
97 | |||
98 | #ifdef CONFIG_MIPS_MT_SMTC | ||
99 | /* | ||
100 | * In an SMTC system, one Count/Compare set exists per VPE. | ||
101 | * Which TC within a VPE gets the interrupt is essentially | ||
102 | * random - we only know that it shouldn't be one with | ||
103 | * IXMT set. Whichever TC gets the interrupt needs to | ||
104 | * send special interprocessor interrupts to the other | ||
105 | * TCs to make sure that they schedule, etc. | ||
106 | * | ||
107 | * That code is specific to the SMTC kernel, not to | ||
108 | * the a particular platform, so it's invoked from | ||
109 | * the general MIPS timer_interrupt routine. | ||
110 | */ | ||
111 | |||
112 | /* | ||
113 | * DVPE is necessary so long as cross-VPE interrupts | ||
114 | * are done via read-modify-write of Cause register. | ||
115 | */ | ||
116 | int vpflags = dvpe(); | ||
117 | write_c0_compare (read_c0_count() - 1); | ||
118 | clear_c0_cause(CPUCTR_IMASKBIT); | ||
119 | evpe(vpflags); | ||
120 | |||
121 | if (cpu_data[cpu].vpe_id == 0) { | ||
122 | timer_interrupt(irq, dev_id, regs); | ||
123 | scroll_display_message(); | ||
124 | } else | ||
125 | write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ)); | ||
126 | smtc_timer_broadcast(cpu_data[cpu].vpe_id); | ||
86 | 127 | ||
128 | if (cpu != 0) | ||
129 | /* | ||
130 | * Other CPUs should do profiling and process accounting | ||
131 | */ | ||
132 | local_timer_interrupt(irq, dev_id, regs); | ||
133 | |||
134 | #else /* CONFIG_MIPS_MT_SMTC */ | ||
87 | if (cpu == 0) { | 135 | if (cpu == 0) { |
88 | /* | 136 | /* |
89 | * CPU 0 handles the global timer interrupt job and process | 137 | * CPU 0 handles the global timer interrupt job and process |
@@ -107,12 +155,14 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
107 | * More support needs to be added to kernel/time for | 155 | * More support needs to be added to kernel/time for |
108 | * counter/timer interrupts on multiple CPU's | 156 | * counter/timer interrupts on multiple CPU's |
109 | */ | 157 | */ |
110 | write_c0_compare (read_c0_count() + (mips_hpt_frequency/HZ)); | 158 | write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); |
159 | |||
111 | /* | 160 | /* |
112 | * other CPUs should do profiling and process accounting | 161 | * Other CPUs should do profiling and process accounting |
113 | */ | 162 | */ |
114 | local_timer_interrupt (irq, dev_id, regs); | 163 | local_timer_interrupt(irq, dev_id, regs); |
115 | } | 164 | } |
165 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
116 | 166 | ||
117 | out: | 167 | out: |
118 | return IRQ_HANDLED; | 168 | return IRQ_HANDLED; |
@@ -126,7 +176,7 @@ static unsigned int __init estimate_cpu_frequency(void) | |||
126 | unsigned int prid = read_c0_prid() & 0xffff00; | 176 | unsigned int prid = read_c0_prid() & 0xffff00; |
127 | unsigned int count; | 177 | unsigned int count; |
128 | 178 | ||
129 | #ifdef CONFIG_MIPS_SEAD | 179 | #if defined(CONFIG_MIPS_SEAD) || defined(CONFIG_MIPS_SIM) |
130 | /* | 180 | /* |
131 | * The SEAD board doesn't have a real time clock, so we can't | 181 | * The SEAD board doesn't have a real time clock, so we can't |
132 | * really calculate the timer frequency | 182 | * really calculate the timer frequency |
@@ -211,7 +261,11 @@ void __init mips_timer_setup(struct irqaction *irq) | |||
211 | 261 | ||
212 | /* we are using the cpu counter for timer interrupts */ | 262 | /* we are using the cpu counter for timer interrupts */ |
213 | irq->handler = mips_timer_interrupt; /* we use our own handler */ | 263 | irq->handler = mips_timer_interrupt; /* we use our own handler */ |
264 | #ifdef CONFIG_MIPS_MT_SMTC | ||
265 | setup_irq_smtc(mips_cpu_timer_irq, irq, CPUCTR_IMASKBIT); | ||
266 | #else | ||
214 | setup_irq(mips_cpu_timer_irq, irq); | 267 | setup_irq(mips_cpu_timer_irq, irq); |
268 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
215 | 269 | ||
216 | #ifdef CONFIG_SMP | 270 | #ifdef CONFIG_SMP |
217 | /* irq_desc(riptor) is a global resource, when the interrupt overlaps | 271 | /* irq_desc(riptor) is a global resource, when the interrupt overlaps |
diff --git a/arch/mips/mips-boards/malta/Makefile b/arch/mips/mips-boards/malta/Makefile index fd4c143c0e2f..77ee5c6d33c1 100644 --- a/arch/mips/mips-boards/malta/Makefile +++ b/arch/mips/mips-boards/malta/Makefile | |||
@@ -20,3 +20,4 @@ | |||
20 | # | 20 | # |
21 | 21 | ||
22 | obj-y := malta_int.o malta_setup.o | 22 | obj-y := malta_int.o malta_setup.o |
23 | obj-$(CONFIG_SMP) += malta_smp.o | ||
diff --git a/arch/mips/mips-boards/malta/malta_int.c b/arch/mips/mips-boards/malta/malta_int.c index 1da8c18b9c8e..64db07d4dbe5 100644 --- a/arch/mips/mips-boards/malta/malta_int.c +++ b/arch/mips/mips-boards/malta/malta_int.c | |||
@@ -118,8 +118,9 @@ static void malta_hw0_irqdispatch(struct pt_regs *regs) | |||
118 | int irq; | 118 | int irq; |
119 | 119 | ||
120 | irq = get_int(); | 120 | irq = get_int(); |
121 | if (irq < 0) | 121 | if (irq < 0) { |
122 | return; /* interrupt has already been cleared */ | 122 | return; /* interrupt has already been cleared */ |
123 | } | ||
123 | 124 | ||
124 | do_IRQ(MALTA_INT_BASE+irq, regs); | 125 | do_IRQ(MALTA_INT_BASE+irq, regs); |
125 | } | 126 | } |
@@ -324,9 +325,15 @@ void __init arch_init_irq(void) | |||
324 | else if (cpu_has_vint) { | 325 | else if (cpu_has_vint) { |
325 | set_vi_handler (MIPSCPU_INT_I8259A, malta_hw0_irqdispatch); | 326 | set_vi_handler (MIPSCPU_INT_I8259A, malta_hw0_irqdispatch); |
326 | set_vi_handler (MIPSCPU_INT_COREHI, corehi_irqdispatch); | 327 | set_vi_handler (MIPSCPU_INT_COREHI, corehi_irqdispatch); |
327 | 328 | #ifdef CONFIG_MIPS_MT_SMTC | |
329 | setup_irq_smtc (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq, | ||
330 | (0x100 << MIPSCPU_INT_I8259A)); | ||
331 | setup_irq_smtc (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, | ||
332 | &corehi_irqaction, (0x100 << MIPSCPU_INT_COREHI)); | ||
333 | #else /* Not SMTC */ | ||
328 | setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq); | 334 | setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq); |
329 | setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction); | 335 | setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction); |
336 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
330 | } | 337 | } |
331 | else { | 338 | else { |
332 | setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq); | 339 | setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq); |
diff --git a/arch/mips/mips-boards/malta/malta_smp.c b/arch/mips/mips-boards/malta/malta_smp.c new file mode 100644 index 000000000000..6c6c8eeedbce --- /dev/null +++ b/arch/mips/mips-boards/malta/malta_smp.c | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Malta Platform-specific hooks for SMP operation | ||
3 | */ | ||
4 | |||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/sched.h> | ||
7 | #include <linux/cpumask.h> | ||
8 | #include <linux/interrupt.h> | ||
9 | |||
10 | #include <asm/atomic.h> | ||
11 | #include <asm/cpu.h> | ||
12 | #include <asm/processor.h> | ||
13 | #include <asm/system.h> | ||
14 | #include <asm/hardirq.h> | ||
15 | #include <asm/mmu_context.h> | ||
16 | #include <asm/smp.h> | ||
17 | #ifdef CONFIG_MIPS_MT_SMTC | ||
18 | #include <asm/smtc_ipi.h> | ||
19 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
20 | |||
21 | /* VPE/SMP Prototype implements platform interfaces directly */ | ||
22 | #if !defined(CONFIG_MIPS_MT_SMP) | ||
23 | |||
24 | /* | ||
25 | * Cause the specified action to be performed on a targeted "CPU" | ||
26 | */ | ||
27 | |||
28 | void core_send_ipi(int cpu, unsigned int action) | ||
29 | { | ||
30 | /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ | ||
31 | #ifdef CONFIG_MIPS_MT_SMTC | ||
32 | smtc_send_ipi(cpu, LINUX_SMP_IPI, action); | ||
33 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * Detect available CPUs/VPEs/TCs and populate phys_cpu_present_map | ||
38 | */ | ||
39 | |||
40 | void __init prom_build_cpu_map(void) | ||
41 | { | ||
42 | int nextslot; | ||
43 | |||
44 | /* | ||
45 | * As of November, 2004, MIPSsim only simulates one core | ||
46 | * at a time. However, that core may be a MIPS MT core | ||
47 | * with multiple virtual processors and thread contexts. | ||
48 | */ | ||
49 | |||
50 | if (read_c0_config3() & (1<<2)) { | ||
51 | nextslot = mipsmt_build_cpu_map(1); | ||
52 | } | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * Platform "CPU" startup hook | ||
57 | */ | ||
58 | |||
59 | void prom_boot_secondary(int cpu, struct task_struct *idle) | ||
60 | { | ||
61 | #ifdef CONFIG_MIPS_MT_SMTC | ||
62 | smtc_boot_secondary(cpu, idle); | ||
63 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * Post-config but pre-boot cleanup entry point | ||
68 | */ | ||
69 | |||
70 | void prom_init_secondary(void) | ||
71 | { | ||
72 | #ifdef CONFIG_MIPS_MT_SMTC | ||
73 | void smtc_init_secondary(void); | ||
74 | int myvpe; | ||
75 | |||
76 | /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */ | ||
77 | myvpe = read_c0_tcbind() & TCBIND_CURVPE; | ||
78 | if (myvpe != 0) { | ||
79 | /* Ideally, this should be done only once per VPE, but... */ | ||
80 | clear_c0_status(STATUSF_IP2); | ||
81 | set_c0_status(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP3 | ||
82 | | STATUSF_IP4 | STATUSF_IP5 | STATUSF_IP6 | ||
83 | | STATUSF_IP7); | ||
84 | } | ||
85 | |||
86 | smtc_init_secondary(); | ||
87 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Platform SMP pre-initialization | ||
92 | * | ||
93 | * As noted above, we can assume a single CPU for now | ||
94 | * but it may be multithreaded. | ||
95 | */ | ||
96 | |||
97 | void plat_smp_setup(void) | ||
98 | { | ||
99 | if (read_c0_config3() & (1<<2)) | ||
100 | mipsmt_build_cpu_map(0); | ||
101 | } | ||
102 | |||
103 | void __init plat_prepare_cpus(unsigned int max_cpus) | ||
104 | { | ||
105 | if (read_c0_config3() & (1<<2)) | ||
106 | mipsmt_prepare_cpus(); | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * SMP initialization finalization entry point | ||
111 | */ | ||
112 | |||
113 | void prom_smp_finish(void) | ||
114 | { | ||
115 | #ifdef CONFIG_MIPS_MT_SMTC | ||
116 | smtc_smp_finish(); | ||
117 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * Hook for after all CPUs are online | ||
122 | */ | ||
123 | |||
124 | void prom_cpus_done(void) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | #endif /* CONFIG_MIPS32R2_MT_SMP */ | ||
diff --git a/arch/mips/mips-boards/sim/cmdline.c b/arch/mips/mips-boards/sim/cmdline.c deleted file mode 100644 index fef9fbd8e710..000000000000 --- a/arch/mips/mips-boards/sim/cmdline.c +++ /dev/null | |||
@@ -1,59 +0,0 @@ | |||
1 | /* | ||
2 | * Carsten Langgaard, carstenl@mips.com | ||
3 | * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can distribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License (Version 2) as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | * for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
17 | * | ||
18 | * Kernel command line creation using the prom monitor (YAMON) argc/argv. | ||
19 | */ | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/string.h> | ||
22 | |||
23 | #include <asm/bootinfo.h> | ||
24 | |||
25 | extern int prom_argc; | ||
26 | extern int *_prom_argv; | ||
27 | |||
28 | /* | ||
29 | * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer. | ||
30 | * This macro take care of sign extension. | ||
31 | */ | ||
32 | #define prom_argv(index) ((char *)(((int *)(int)_prom_argv)[(index)])) | ||
33 | |||
34 | char arcs_cmdline[CL_SIZE]; | ||
35 | |||
36 | char * __init prom_getcmdline(void) | ||
37 | { | ||
38 | return &(arcs_cmdline[0]); | ||
39 | } | ||
40 | |||
41 | |||
42 | void __init prom_init_cmdline(void) | ||
43 | { | ||
44 | char *cp; | ||
45 | int actr; | ||
46 | |||
47 | actr = 1; /* Always ignore argv[0] */ | ||
48 | |||
49 | cp = &(arcs_cmdline[0]); | ||
50 | while(actr < prom_argc) { | ||
51 | strcpy(cp, prom_argv(actr)); | ||
52 | cp += strlen(prom_argv(actr)); | ||
53 | *cp++ = ' '; | ||
54 | actr++; | ||
55 | } | ||
56 | if (cp != &(arcs_cmdline[0])) /* get rid of trailing space */ | ||
57 | --cp; | ||
58 | *cp = '\0'; | ||
59 | } | ||
diff --git a/arch/mips/mips-boards/sim/sim_cmdline.c b/arch/mips/mips-boards/sim/sim_cmdline.c index 9df37c6fca36..c63021a5dc6c 100644 --- a/arch/mips/mips-boards/sim/sim_cmdline.c +++ b/arch/mips/mips-boards/sim/sim_cmdline.c | |||
@@ -26,8 +26,10 @@ char * __init prom_getcmdline(void) | |||
26 | return arcs_cmdline; | 26 | return arcs_cmdline; |
27 | } | 27 | } |
28 | 28 | ||
29 | |||
30 | void __init prom_init_cmdline(void) | 29 | void __init prom_init_cmdline(void) |
31 | { | 30 | { |
32 | /* nothing to do */ | 31 | char *cp; |
32 | cp = arcs_cmdline; | ||
33 | /* Get boot line from environment? */ | ||
34 | *cp = '\0'; | ||
33 | } | 35 | } |
diff --git a/arch/mips/mips-boards/sim/sim_smp.c b/arch/mips/mips-boards/sim/sim_smp.c index a9f0c2bfe4ad..b7084e7c4bf9 100644 --- a/arch/mips/mips-boards/sim/sim_smp.c +++ b/arch/mips/mips-boards/sim/sim_smp.c | |||
@@ -44,8 +44,6 @@ | |||
44 | void core_send_ipi(int cpu, unsigned int action) | 44 | void core_send_ipi(int cpu, unsigned int action) |
45 | { | 45 | { |
46 | #ifdef CONFIG_MIPS_MT_SMTC | 46 | #ifdef CONFIG_MIPS_MT_SMTC |
47 | void smtc_send_ipi(int, int, unsigned int); | ||
48 | |||
49 | smtc_send_ipi(cpu, LINUX_SMP_IPI, action); | 47 | smtc_send_ipi(cpu, LINUX_SMP_IPI, action); |
50 | #endif /* CONFIG_MIPS_MT_SMTC */ | 48 | #endif /* CONFIG_MIPS_MT_SMTC */ |
51 | /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ | 49 | /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ |
@@ -59,15 +57,8 @@ void core_send_ipi(int cpu, unsigned int action) | |||
59 | void __init prom_build_cpu_map(void) | 57 | void __init prom_build_cpu_map(void) |
60 | { | 58 | { |
61 | #ifdef CONFIG_MIPS_MT_SMTC | 59 | #ifdef CONFIG_MIPS_MT_SMTC |
62 | extern int mipsmt_build_cpu_map(int startslot); | ||
63 | int nextslot; | 60 | int nextslot; |
64 | 61 | ||
65 | cpus_clear(phys_cpu_present_map); | ||
66 | |||
67 | /* Register the boot CPU */ | ||
68 | |||
69 | smp_prepare_boot_cpu(); | ||
70 | |||
71 | /* | 62 | /* |
72 | * As of November, 2004, MIPSsim only simulates one core | 63 | * As of November, 2004, MIPSsim only simulates one core |
73 | * at a time. However, that core may be a MIPS MT core | 64 | * at a time. However, that core may be a MIPS MT core |
@@ -87,8 +78,6 @@ void __init prom_build_cpu_map(void) | |||
87 | void prom_boot_secondary(int cpu, struct task_struct *idle) | 78 | void prom_boot_secondary(int cpu, struct task_struct *idle) |
88 | { | 79 | { |
89 | #ifdef CONFIG_MIPS_MT_SMTC | 80 | #ifdef CONFIG_MIPS_MT_SMTC |
90 | extern void smtc_boot_secondary(int cpu, struct task_struct *t); | ||
91 | |||
92 | smtc_boot_secondary(cpu, idle); | 81 | smtc_boot_secondary(cpu, idle); |
93 | #endif /* CONFIG_MIPS_MT_SMTC */ | 82 | #endif /* CONFIG_MIPS_MT_SMTC */ |
94 | } | 83 | } |
@@ -113,7 +102,6 @@ void prom_init_secondary(void) | |||
113 | void prom_prepare_cpus(unsigned int max_cpus) | 102 | void prom_prepare_cpus(unsigned int max_cpus) |
114 | { | 103 | { |
115 | #ifdef CONFIG_MIPS_MT_SMTC | 104 | #ifdef CONFIG_MIPS_MT_SMTC |
116 | void mipsmt_prepare_cpus(int c); | ||
117 | /* | 105 | /* |
118 | * As noted above, we can assume a single CPU for now | 106 | * As noted above, we can assume a single CPU for now |
119 | * but it may be multithreaded. | 107 | * but it may be multithreaded. |
@@ -132,8 +120,6 @@ void prom_prepare_cpus(unsigned int max_cpus) | |||
132 | void prom_smp_finish(void) | 120 | void prom_smp_finish(void) |
133 | { | 121 | { |
134 | #ifdef CONFIG_MIPS_MT_SMTC | 122 | #ifdef CONFIG_MIPS_MT_SMTC |
135 | void smtc_smp_finish(void); | ||
136 | |||
137 | smtc_smp_finish(); | 123 | smtc_smp_finish(); |
138 | #endif /* CONFIG_MIPS_MT_SMTC */ | 124 | #endif /* CONFIG_MIPS_MT_SMTC */ |
139 | } | 125 | } |
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 2d9624fd10ec..e3a617224868 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -157,7 +157,6 @@ no_context: | |||
157 | * Oops. The kernel tried to access some bad page. We'll have to | 157 | * Oops. The kernel tried to access some bad page. We'll have to |
158 | * terminate things with extreme prejudice. | 158 | * terminate things with extreme prejudice. |
159 | */ | 159 | */ |
160 | |||
161 | bust_spinlocks(1); | 160 | bust_spinlocks(1); |
162 | 161 | ||
163 | printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " | 162 | printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " |
@@ -188,11 +187,20 @@ do_sigbus: | |||
188 | /* Kernel mode? Handle exceptions or die */ | 187 | /* Kernel mode? Handle exceptions or die */ |
189 | if (!user_mode(regs)) | 188 | if (!user_mode(regs)) |
190 | goto no_context; | 189 | goto no_context; |
191 | 190 | else | |
192 | /* | 191 | /* |
193 | * Send a sigbus, regardless of whether we were in kernel | 192 | * Send a sigbus, regardless of whether we were in kernel |
194 | * or user mode. | 193 | * or user mode. |
195 | */ | 194 | */ |
195 | #if 0 | ||
196 | printk("do_page_fault() #3: sending SIGBUS to %s for " | ||
197 | "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n", | ||
198 | tsk->comm, | ||
199 | write ? "write access to" : "read access from", | ||
200 | field, address, | ||
201 | field, (unsigned long) regs->cp0_epc, | ||
202 | field, (unsigned long) regs->regs[31]); | ||
203 | #endif | ||
196 | tsk->thread.cp0_badvaddr = address; | 204 | tsk->thread.cp0_badvaddr = address; |
197 | info.si_signo = SIGBUS; | 205 | info.si_signo = SIGBUS; |
198 | info.si_errno = 0; | 206 | info.si_errno = 0; |
@@ -201,7 +209,6 @@ do_sigbus: | |||
201 | force_sig_info(SIGBUS, &info, tsk); | 209 | force_sig_info(SIGBUS, &info, tsk); |
202 | 210 | ||
203 | return; | 211 | return; |
204 | |||
205 | vmalloc_fault: | 212 | vmalloc_fault: |
206 | { | 213 | { |
207 | /* | 214 | /* |
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index a865f2394cb0..9dca099ba16b 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c | |||
@@ -32,13 +32,35 @@ extern void build_tlb_refill_handler(void); | |||
32 | "nop; nop; nop; nop; nop; nop;\n\t" \ | 32 | "nop; nop; nop; nop; nop; nop;\n\t" \ |
33 | ".set reorder\n\t") | 33 | ".set reorder\n\t") |
34 | 34 | ||
35 | /* Atomicity and interruptability */ | ||
36 | #ifdef CONFIG_MIPS_MT_SMTC | ||
37 | |||
38 | #include <asm/smtc.h> | ||
39 | #include <asm/mipsmtregs.h> | ||
40 | |||
41 | #define ENTER_CRITICAL(flags) \ | ||
42 | { \ | ||
43 | unsigned int mvpflags; \ | ||
44 | local_irq_save(flags);\ | ||
45 | mvpflags = dvpe() | ||
46 | #define EXIT_CRITICAL(flags) \ | ||
47 | evpe(mvpflags); \ | ||
48 | local_irq_restore(flags); \ | ||
49 | } | ||
50 | #else | ||
51 | |||
52 | #define ENTER_CRITICAL(flags) local_irq_save(flags) | ||
53 | #define EXIT_CRITICAL(flags) local_irq_restore(flags) | ||
54 | |||
55 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
56 | |||
35 | void local_flush_tlb_all(void) | 57 | void local_flush_tlb_all(void) |
36 | { | 58 | { |
37 | unsigned long flags; | 59 | unsigned long flags; |
38 | unsigned long old_ctx; | 60 | unsigned long old_ctx; |
39 | int entry; | 61 | int entry; |
40 | 62 | ||
41 | local_irq_save(flags); | 63 | ENTER_CRITICAL(flags); |
42 | /* Save old context and create impossible VPN2 value */ | 64 | /* Save old context and create impossible VPN2 value */ |
43 | old_ctx = read_c0_entryhi(); | 65 | old_ctx = read_c0_entryhi(); |
44 | write_c0_entrylo0(0); | 66 | write_c0_entrylo0(0); |
@@ -57,7 +79,7 @@ void local_flush_tlb_all(void) | |||
57 | } | 79 | } |
58 | tlbw_use_hazard(); | 80 | tlbw_use_hazard(); |
59 | write_c0_entryhi(old_ctx); | 81 | write_c0_entryhi(old_ctx); |
60 | local_irq_restore(flags); | 82 | EXIT_CRITICAL(flags); |
61 | } | 83 | } |
62 | 84 | ||
63 | /* All entries common to a mm share an asid. To effectively flush | 85 | /* All entries common to a mm share an asid. To effectively flush |
@@ -87,6 +109,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
87 | unsigned long flags; | 109 | unsigned long flags; |
88 | int size; | 110 | int size; |
89 | 111 | ||
112 | ENTER_CRITICAL(flags); | ||
90 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 113 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
91 | size = (size + 1) >> 1; | 114 | size = (size + 1) >> 1; |
92 | local_irq_save(flags); | 115 | local_irq_save(flags); |
@@ -120,7 +143,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
120 | } else { | 143 | } else { |
121 | drop_mmu_context(mm, cpu); | 144 | drop_mmu_context(mm, cpu); |
122 | } | 145 | } |
123 | local_irq_restore(flags); | 146 | EXIT_CRITICAL(flags); |
124 | } | 147 | } |
125 | } | 148 | } |
126 | 149 | ||
@@ -129,9 +152,9 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
129 | unsigned long flags; | 152 | unsigned long flags; |
130 | int size; | 153 | int size; |
131 | 154 | ||
155 | ENTER_CRITICAL(flags); | ||
132 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 156 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
133 | size = (size + 1) >> 1; | 157 | size = (size + 1) >> 1; |
134 | local_irq_save(flags); | ||
135 | if (size <= current_cpu_data.tlbsize / 2) { | 158 | if (size <= current_cpu_data.tlbsize / 2) { |
136 | int pid = read_c0_entryhi(); | 159 | int pid = read_c0_entryhi(); |
137 | 160 | ||
@@ -162,7 +185,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
162 | } else { | 185 | } else { |
163 | local_flush_tlb_all(); | 186 | local_flush_tlb_all(); |
164 | } | 187 | } |
165 | local_irq_restore(flags); | 188 | EXIT_CRITICAL(flags); |
166 | } | 189 | } |
167 | 190 | ||
168 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 191 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
@@ -175,7 +198,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
175 | 198 | ||
176 | newpid = cpu_asid(cpu, vma->vm_mm); | 199 | newpid = cpu_asid(cpu, vma->vm_mm); |
177 | page &= (PAGE_MASK << 1); | 200 | page &= (PAGE_MASK << 1); |
178 | local_irq_save(flags); | 201 | ENTER_CRITICAL(flags); |
179 | oldpid = read_c0_entryhi(); | 202 | oldpid = read_c0_entryhi(); |
180 | write_c0_entryhi(page | newpid); | 203 | write_c0_entryhi(page | newpid); |
181 | mtc0_tlbw_hazard(); | 204 | mtc0_tlbw_hazard(); |
@@ -194,7 +217,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
194 | 217 | ||
195 | finish: | 218 | finish: |
196 | write_c0_entryhi(oldpid); | 219 | write_c0_entryhi(oldpid); |
197 | local_irq_restore(flags); | 220 | EXIT_CRITICAL(flags); |
198 | } | 221 | } |
199 | } | 222 | } |
200 | 223 | ||
@@ -207,7 +230,7 @@ void local_flush_tlb_one(unsigned long page) | |||
207 | unsigned long flags; | 230 | unsigned long flags; |
208 | int oldpid, idx; | 231 | int oldpid, idx; |
209 | 232 | ||
210 | local_irq_save(flags); | 233 | ENTER_CRITICAL(flags); |
211 | oldpid = read_c0_entryhi(); | 234 | oldpid = read_c0_entryhi(); |
212 | page &= (PAGE_MASK << 1); | 235 | page &= (PAGE_MASK << 1); |
213 | write_c0_entryhi(page); | 236 | write_c0_entryhi(page); |
@@ -226,7 +249,7 @@ void local_flush_tlb_one(unsigned long page) | |||
226 | } | 249 | } |
227 | write_c0_entryhi(oldpid); | 250 | write_c0_entryhi(oldpid); |
228 | 251 | ||
229 | local_irq_restore(flags); | 252 | EXIT_CRITICAL(flags); |
230 | } | 253 | } |
231 | 254 | ||
232 | /* | 255 | /* |
@@ -249,7 +272,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
249 | if (current->active_mm != vma->vm_mm) | 272 | if (current->active_mm != vma->vm_mm) |
250 | return; | 273 | return; |
251 | 274 | ||
252 | local_irq_save(flags); | 275 | ENTER_CRITICAL(flags); |
253 | 276 | ||
254 | pid = read_c0_entryhi() & ASID_MASK; | 277 | pid = read_c0_entryhi() & ASID_MASK; |
255 | address &= (PAGE_MASK << 1); | 278 | address &= (PAGE_MASK << 1); |
@@ -277,7 +300,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
277 | else | 300 | else |
278 | tlb_write_indexed(); | 301 | tlb_write_indexed(); |
279 | tlbw_use_hazard(); | 302 | tlbw_use_hazard(); |
280 | local_irq_restore(flags); | 303 | EXIT_CRITICAL(flags); |
281 | } | 304 | } |
282 | 305 | ||
283 | #if 0 | 306 | #if 0 |
@@ -291,7 +314,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma, | |||
291 | pte_t *ptep; | 314 | pte_t *ptep; |
292 | int idx; | 315 | int idx; |
293 | 316 | ||
294 | local_irq_save(flags); | 317 | ENTER_CRITICAL(flags); |
295 | address &= (PAGE_MASK << 1); | 318 | address &= (PAGE_MASK << 1); |
296 | asid = read_c0_entryhi() & ASID_MASK; | 319 | asid = read_c0_entryhi() & ASID_MASK; |
297 | write_c0_entryhi(address | asid); | 320 | write_c0_entryhi(address | asid); |
@@ -310,7 +333,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma, | |||
310 | else | 333 | else |
311 | tlb_write_indexed(); | 334 | tlb_write_indexed(); |
312 | tlbw_use_hazard(); | 335 | tlbw_use_hazard(); |
313 | local_irq_restore(flags); | 336 | EXIT_CRITICAL(flags); |
314 | } | 337 | } |
315 | #endif | 338 | #endif |
316 | 339 | ||
@@ -322,7 +345,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
322 | unsigned long old_pagemask; | 345 | unsigned long old_pagemask; |
323 | unsigned long old_ctx; | 346 | unsigned long old_ctx; |
324 | 347 | ||
325 | local_irq_save(flags); | 348 | ENTER_CRITICAL(flags); |
326 | /* Save old context and create impossible VPN2 value */ | 349 | /* Save old context and create impossible VPN2 value */ |
327 | old_ctx = read_c0_entryhi(); | 350 | old_ctx = read_c0_entryhi(); |
328 | old_pagemask = read_c0_pagemask(); | 351 | old_pagemask = read_c0_pagemask(); |
@@ -342,7 +365,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
342 | BARRIER; | 365 | BARRIER; |
343 | write_c0_pagemask(old_pagemask); | 366 | write_c0_pagemask(old_pagemask); |
344 | local_flush_tlb_all(); | 367 | local_flush_tlb_all(); |
345 | local_irq_restore(flags); | 368 | EXIT_CRITICAL(flags); |
346 | } | 369 | } |
347 | 370 | ||
348 | /* | 371 | /* |
@@ -362,7 +385,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
362 | unsigned long old_pagemask; | 385 | unsigned long old_pagemask; |
363 | unsigned long old_ctx; | 386 | unsigned long old_ctx; |
364 | 387 | ||
365 | local_irq_save(flags); | 388 | ENTER_CRITICAL(flags); |
366 | /* Save old context and create impossible VPN2 value */ | 389 | /* Save old context and create impossible VPN2 value */ |
367 | old_ctx = read_c0_entryhi(); | 390 | old_ctx = read_c0_entryhi(); |
368 | old_pagemask = read_c0_pagemask(); | 391 | old_pagemask = read_c0_pagemask(); |
@@ -386,10 +409,11 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
386 | write_c0_entryhi(old_ctx); | 409 | write_c0_entryhi(old_ctx); |
387 | write_c0_pagemask(old_pagemask); | 410 | write_c0_pagemask(old_pagemask); |
388 | out: | 411 | out: |
389 | local_irq_restore(flags); | 412 | EXIT_CRITICAL(flags); |
390 | return ret; | 413 | return ret; |
391 | } | 414 | } |
392 | 415 | ||
416 | extern void __init sanitize_tlb_entries(void); | ||
393 | static void __init probe_tlb(unsigned long config) | 417 | static void __init probe_tlb(unsigned long config) |
394 | { | 418 | { |
395 | struct cpuinfo_mips *c = ¤t_cpu_data; | 419 | struct cpuinfo_mips *c = ¤t_cpu_data; |
@@ -402,6 +426,14 @@ static void __init probe_tlb(unsigned long config) | |||
402 | */ | 426 | */ |
403 | if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY) | 427 | if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY) |
404 | return; | 428 | return; |
429 | #ifdef CONFIG_MIPS_MT_SMTC | ||
430 | /* | ||
431 | * If TLB is shared in SMTC system, total size already | ||
432 | * has been calculated and written into cpu_data tlbsize | ||
433 | */ | ||
434 | if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED) | ||
435 | return; | ||
436 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
405 | 437 | ||
406 | reg = read_c0_config1(); | 438 | reg = read_c0_config1(); |
407 | if (!((config >> 7) & 3)) | 439 | if (!((config >> 7) & 3)) |
@@ -410,6 +442,15 @@ static void __init probe_tlb(unsigned long config) | |||
410 | c->tlbsize = ((reg >> 25) & 0x3f) + 1; | 442 | c->tlbsize = ((reg >> 25) & 0x3f) + 1; |
411 | } | 443 | } |
412 | 444 | ||
445 | static int __initdata ntlb = 0; | ||
446 | static int __init set_ntlb(char *str) | ||
447 | { | ||
448 | get_option(&str, &ntlb); | ||
449 | return 1; | ||
450 | } | ||
451 | |||
452 | __setup("ntlb=", set_ntlb); | ||
453 | |||
413 | void __init tlb_init(void) | 454 | void __init tlb_init(void) |
414 | { | 455 | { |
415 | unsigned int config = read_c0_config(); | 456 | unsigned int config = read_c0_config(); |
@@ -432,5 +473,15 @@ void __init tlb_init(void) | |||
432 | 473 | ||
433 | /* Did I tell you that ARC SUCKS? */ | 474 | /* Did I tell you that ARC SUCKS? */ |
434 | 475 | ||
476 | if (ntlb) { | ||
477 | if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { | ||
478 | int wired = current_cpu_data.tlbsize - ntlb; | ||
479 | write_c0_wired(wired); | ||
480 | write_c0_index(wired-1); | ||
481 | printk ("Restricting TLB to %d entries\n", ntlb); | ||
482 | } else | ||
483 | printk("Ignoring invalid argument ntlb=%d\n", ntlb); | ||
484 | } | ||
485 | |||
435 | build_tlb_refill_handler(); | 486 | build_tlb_refill_handler(); |
436 | } | 487 | } |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index c5eea6ae12ca..053dbacac56b 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -7,6 +7,16 @@ | |||
7 | * | 7 | * |
8 | * Copyright (C) 2004,2005 by Thiemo Seufer | 8 | * Copyright (C) 2004,2005 by Thiemo Seufer |
9 | * Copyright (C) 2005 Maciej W. Rozycki | 9 | * Copyright (C) 2005 Maciej W. Rozycki |
10 | * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) | ||
11 | * | ||
12 | * ... and the days got worse and worse and now you see | ||
13 | * I've gone completly out of my mind. | ||
14 | * | ||
15 | * They're coming to take me a away haha | ||
16 | * they're coming to take me a away hoho hihi haha | ||
17 | * to the funny farm where code is beautiful all the time ... | ||
18 | * | ||
19 | * (Condolences to Napoleon XIV) | ||
10 | */ | 20 | */ |
11 | 21 | ||
12 | #include <stdarg.h> | 22 | #include <stdarg.h> |
@@ -68,6 +78,7 @@ enum fields | |||
68 | BIMM = 0x040, | 78 | BIMM = 0x040, |
69 | JIMM = 0x080, | 79 | JIMM = 0x080, |
70 | FUNC = 0x100, | 80 | FUNC = 0x100, |
81 | SET = 0x200 | ||
71 | }; | 82 | }; |
72 | 83 | ||
73 | #define OP_MASK 0x2f | 84 | #define OP_MASK 0x2f |
@@ -86,6 +97,8 @@ enum fields | |||
86 | #define JIMM_SH 0 | 97 | #define JIMM_SH 0 |
87 | #define FUNC_MASK 0x2f | 98 | #define FUNC_MASK 0x2f |
88 | #define FUNC_SH 0 | 99 | #define FUNC_SH 0 |
100 | #define SET_MASK 0x7 | ||
101 | #define SET_SH 0 | ||
89 | 102 | ||
90 | enum opcode { | 103 | enum opcode { |
91 | insn_invalid, | 104 | insn_invalid, |
@@ -129,8 +142,8 @@ static __initdata struct insn insn_table[] = { | |||
129 | { insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM }, | 142 | { insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM }, |
130 | { insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM }, | 143 | { insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM }, |
131 | { insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD }, | 144 | { insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD }, |
132 | { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD }, | 145 | { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD | SET}, |
133 | { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD }, | 146 | { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD | SET}, |
134 | { insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE }, | 147 | { insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE }, |
135 | { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE }, | 148 | { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE }, |
136 | { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE }, | 149 | { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE }, |
@@ -145,8 +158,8 @@ static __initdata struct insn insn_table[] = { | |||
145 | { insn_lld, M(lld_op,0,0,0,0,0), RS | RT | SIMM }, | 158 | { insn_lld, M(lld_op,0,0,0,0,0), RS | RT | SIMM }, |
146 | { insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM }, | 159 | { insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM }, |
147 | { insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM }, | 160 | { insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM }, |
148 | { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD }, | 161 | { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD | SET}, |
149 | { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD }, | 162 | { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD | SET}, |
150 | { insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM }, | 163 | { insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM }, |
151 | { insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 }, | 164 | { insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 }, |
152 | { insn_sc, M(sc_op,0,0,0,0,0), RS | RT | SIMM }, | 165 | { insn_sc, M(sc_op,0,0,0,0,0), RS | RT | SIMM }, |
@@ -242,6 +255,14 @@ static __init u32 build_func(u32 arg) | |||
242 | return arg & FUNC_MASK; | 255 | return arg & FUNC_MASK; |
243 | } | 256 | } |
244 | 257 | ||
258 | static __init u32 build_set(u32 arg) | ||
259 | { | ||
260 | if (arg & ~SET_MASK) | ||
261 | printk(KERN_WARNING "TLB synthesizer field overflow\n"); | ||
262 | |||
263 | return arg & SET_MASK; | ||
264 | } | ||
265 | |||
245 | /* | 266 | /* |
246 | * The order of opcode arguments is implicitly left to right, | 267 | * The order of opcode arguments is implicitly left to right, |
247 | * starting with RS and ending with FUNC or IMM. | 268 | * starting with RS and ending with FUNC or IMM. |
@@ -273,6 +294,7 @@ static void __init build_insn(u32 **buf, enum opcode opc, ...) | |||
273 | if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32)); | 294 | if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32)); |
274 | if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32)); | 295 | if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32)); |
275 | if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32)); | 296 | if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32)); |
297 | if (ip->fields & SET) op |= build_set(va_arg(ap, u32)); | ||
276 | va_end(ap); | 298 | va_end(ap); |
277 | 299 | ||
278 | **buf = op; | 300 | **buf = op; |
@@ -358,8 +380,8 @@ I_u1s2(_bgezl); | |||
358 | I_u1s2(_bltz); | 380 | I_u1s2(_bltz); |
359 | I_u1s2(_bltzl); | 381 | I_u1s2(_bltzl); |
360 | I_u1u2s3(_bne); | 382 | I_u1u2s3(_bne); |
361 | I_u1u2(_dmfc0); | 383 | I_u1u2u3(_dmfc0); |
362 | I_u1u2(_dmtc0); | 384 | I_u1u2u3(_dmtc0); |
363 | I_u2u1s3(_daddiu); | 385 | I_u2u1s3(_daddiu); |
364 | I_u3u1u2(_daddu); | 386 | I_u3u1u2(_daddu); |
365 | I_u2u1u3(_dsll); | 387 | I_u2u1u3(_dsll); |
@@ -376,8 +398,8 @@ I_u2s3u1(_ll); | |||
376 | I_u2s3u1(_lld); | 398 | I_u2s3u1(_lld); |
377 | I_u1s2(_lui); | 399 | I_u1s2(_lui); |
378 | I_u2s3u1(_lw); | 400 | I_u2s3u1(_lw); |
379 | I_u1u2(_mfc0); | 401 | I_u1u2u3(_mfc0); |
380 | I_u1u2(_mtc0); | 402 | I_u1u2u3(_mtc0); |
381 | I_u2u1u3(_ori); | 403 | I_u2u1u3(_ori); |
382 | I_0(_rfe); | 404 | I_0(_rfe); |
383 | I_u2s3u1(_sc); | 405 | I_u2s3u1(_sc); |
@@ -451,8 +473,8 @@ L_LA(_r3000_write_probe_fail) | |||
451 | # define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh) | 473 | # define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh) |
452 | # define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh) | 474 | # define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh) |
453 | # define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh) | 475 | # define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh) |
454 | # define i_MFC0(buf, rt, rd) i_dmfc0(buf, rt, rd) | 476 | # define i_MFC0(buf, rt, rd...) i_dmfc0(buf, rt, rd) |
455 | # define i_MTC0(buf, rt, rd) i_dmtc0(buf, rt, rd) | 477 | # define i_MTC0(buf, rt, rd...) i_dmtc0(buf, rt, rd) |
456 | # define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val) | 478 | # define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val) |
457 | # define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd) | 479 | # define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd) |
458 | # define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd) | 480 | # define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd) |
@@ -464,8 +486,8 @@ L_LA(_r3000_write_probe_fail) | |||
464 | # define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh) | 486 | # define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh) |
465 | # define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh) | 487 | # define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh) |
466 | # define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh) | 488 | # define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh) |
467 | # define i_MFC0(buf, rt, rd) i_mfc0(buf, rt, rd) | 489 | # define i_MFC0(buf, rt, rd...) i_mfc0(buf, rt, rd) |
468 | # define i_MTC0(buf, rt, rd) i_mtc0(buf, rt, rd) | 490 | # define i_MTC0(buf, rt, rd...) i_mtc0(buf, rt, rd) |
469 | # define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val) | 491 | # define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val) |
470 | # define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd) | 492 | # define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd) |
471 | # define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd) | 493 | # define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd) |
@@ -670,14 +692,15 @@ static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg, | |||
670 | #define K1 27 | 692 | #define K1 27 |
671 | 693 | ||
672 | /* Some CP0 registers */ | 694 | /* Some CP0 registers */ |
673 | #define C0_INDEX 0 | 695 | #define C0_INDEX 0, 0 |
674 | #define C0_ENTRYLO0 2 | 696 | #define C0_ENTRYLO0 2, 0 |
675 | #define C0_ENTRYLO1 3 | 697 | #define C0_TCBIND 2, 2 |
676 | #define C0_CONTEXT 4 | 698 | #define C0_ENTRYLO1 3, 0 |
677 | #define C0_BADVADDR 8 | 699 | #define C0_CONTEXT 4, 0 |
678 | #define C0_ENTRYHI 10 | 700 | #define C0_BADVADDR 8, 0 |
679 | #define C0_EPC 14 | 701 | #define C0_ENTRYHI 10, 0 |
680 | #define C0_XCONTEXT 20 | 702 | #define C0_EPC 14, 0 |
703 | #define C0_XCONTEXT 20, 0 | ||
681 | 704 | ||
682 | #ifdef CONFIG_64BIT | 705 | #ifdef CONFIG_64BIT |
683 | # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT) | 706 | # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT) |
@@ -951,12 +974,20 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r, | |||
951 | /* No i_nop needed here, since the next insn doesn't touch TMP. */ | 974 | /* No i_nop needed here, since the next insn doesn't touch TMP. */ |
952 | 975 | ||
953 | #ifdef CONFIG_SMP | 976 | #ifdef CONFIG_SMP |
977 | # ifdef CONFIG_MIPS_MT_SMTC | ||
978 | /* | ||
979 | * SMTC uses TCBind value as "CPU" index | ||
980 | */ | ||
981 | i_mfc0(p, ptr, C0_TCBIND); | ||
982 | i_dsrl(p, ptr, ptr, 19); | ||
983 | # else | ||
954 | /* | 984 | /* |
955 | * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 | 985 | * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 |
956 | * stored in CONTEXT. | 986 | * stored in CONTEXT. |
957 | */ | 987 | */ |
958 | i_dmfc0(p, ptr, C0_CONTEXT); | 988 | i_dmfc0(p, ptr, C0_CONTEXT); |
959 | i_dsrl(p, ptr, ptr, 23); | 989 | i_dsrl(p, ptr, ptr, 23); |
990 | #endif | ||
960 | i_LA_mostly(p, tmp, pgdc); | 991 | i_LA_mostly(p, tmp, pgdc); |
961 | i_daddu(p, ptr, ptr, tmp); | 992 | i_daddu(p, ptr, ptr, tmp); |
962 | i_dmfc0(p, tmp, C0_BADVADDR); | 993 | i_dmfc0(p, tmp, C0_BADVADDR); |
@@ -1014,9 +1045,21 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) | |||
1014 | 1045 | ||
1015 | /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ | 1046 | /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ |
1016 | #ifdef CONFIG_SMP | 1047 | #ifdef CONFIG_SMP |
1048 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1049 | /* | ||
1050 | * SMTC uses TCBind value as "CPU" index | ||
1051 | */ | ||
1052 | i_mfc0(p, ptr, C0_TCBIND); | ||
1053 | i_LA_mostly(p, tmp, pgdc); | ||
1054 | i_srl(p, ptr, ptr, 19); | ||
1055 | #else | ||
1056 | /* | ||
1057 | * smp_processor_id() << 3 is stored in CONTEXT. | ||
1058 | */ | ||
1017 | i_mfc0(p, ptr, C0_CONTEXT); | 1059 | i_mfc0(p, ptr, C0_CONTEXT); |
1018 | i_LA_mostly(p, tmp, pgdc); | 1060 | i_LA_mostly(p, tmp, pgdc); |
1019 | i_srl(p, ptr, ptr, 23); | 1061 | i_srl(p, ptr, ptr, 23); |
1062 | #endif | ||
1020 | i_addu(p, ptr, tmp, ptr); | 1063 | i_addu(p, ptr, tmp, ptr); |
1021 | #else | 1064 | #else |
1022 | i_LA_mostly(p, ptr, pgdc); | 1065 | i_LA_mostly(p, ptr, pgdc); |