diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2006-04-05 04:45:45 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2006-04-18 22:14:28 -0400 |
commit | 41c594ab65fc89573af296d192aa5235d09717ab (patch) | |
tree | 562462512a320f386bdf49eabfbb26bb3ee761fa /include | |
parent | 2600990e640e3bef29ed89d565864cf16ee83833 (diff) |
[MIPS] MT: Improved multithreading support.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-mips/asmmacro.h | 47 | ||||
-rw-r--r-- | include/asm-mips/cpu-info.h | 10 | ||||
-rw-r--r-- | include/asm-mips/hazards.h | 2 | ||||
-rw-r--r-- | include/asm-mips/interrupt.h | 65 | ||||
-rw-r--r-- | include/asm-mips/irq.h | 29 | ||||
-rw-r--r-- | include/asm-mips/mips_mt.h | 15 | ||||
-rw-r--r-- | include/asm-mips/mipsmtregs.h | 11 | ||||
-rw-r--r-- | include/asm-mips/mipsregs.h | 133 | ||||
-rw-r--r-- | include/asm-mips/mmu_context.h | 112 | ||||
-rw-r--r-- | include/asm-mips/processor.h | 6 | ||||
-rw-r--r-- | include/asm-mips/ptrace.h | 4 | ||||
-rw-r--r-- | include/asm-mips/r4kcache.h | 128 | ||||
-rw-r--r-- | include/asm-mips/smtc.h | 55 | ||||
-rw-r--r-- | include/asm-mips/smtc_ipi.h | 118 | ||||
-rw-r--r-- | include/asm-mips/smtc_proc.h | 23 | ||||
-rw-r--r-- | include/asm-mips/stackframe.h | 187 |
16 files changed, 924 insertions, 21 deletions
diff --git a/include/asm-mips/asmmacro.h b/include/asm-mips/asmmacro.h index 30b18ea6cb11..f54aa147ec19 100644 --- a/include/asm-mips/asmmacro.h +++ b/include/asm-mips/asmmacro.h | |||
@@ -17,7 +17,26 @@ | |||
17 | #ifdef CONFIG_64BIT | 17 | #ifdef CONFIG_64BIT |
18 | #include <asm/asmmacro-64.h> | 18 | #include <asm/asmmacro-64.h> |
19 | #endif | 19 | #endif |
20 | #ifdef CONFIG_MIPS_MT_SMTC | ||
21 | #include <asm/mipsmtregs.h> | ||
22 | #endif | ||
20 | 23 | ||
24 | #ifdef CONFIG_MIPS_MT_SMTC | ||
25 | .macro local_irq_enable reg=t0 | ||
26 | mfc0 \reg, CP0_TCSTATUS | ||
27 | ori \reg, \reg, TCSTATUS_IXMT | ||
28 | xori \reg, \reg, TCSTATUS_IXMT | ||
29 | mtc0 \reg, CP0_TCSTATUS | ||
30 | ehb | ||
31 | .endm | ||
32 | |||
33 | .macro local_irq_disable reg=t0 | ||
34 | mfc0 \reg, CP0_TCSTATUS | ||
35 | ori \reg, \reg, TCSTATUS_IXMT | ||
36 | mtc0 \reg, CP0_TCSTATUS | ||
37 | ehb | ||
38 | .endm | ||
39 | #else | ||
21 | .macro local_irq_enable reg=t0 | 40 | .macro local_irq_enable reg=t0 |
22 | mfc0 \reg, CP0_STATUS | 41 | mfc0 \reg, CP0_STATUS |
23 | ori \reg, \reg, 1 | 42 | ori \reg, \reg, 1 |
@@ -32,6 +51,7 @@ | |||
32 | mtc0 \reg, CP0_STATUS | 51 | mtc0 \reg, CP0_STATUS |
33 | irq_disable_hazard | 52 | irq_disable_hazard |
34 | .endm | 53 | .endm |
54 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
35 | 55 | ||
36 | #ifdef CONFIG_CPU_SB1 | 56 | #ifdef CONFIG_CPU_SB1 |
37 | .macro fpu_enable_hazard | 57 | .macro fpu_enable_hazard |
@@ -48,4 +68,31 @@ | |||
48 | .endm | 68 | .endm |
49 | #endif | 69 | #endif |
50 | 70 | ||
71 | /* | ||
72 | * Temporary until all gas have MT ASE support | ||
73 | */ | ||
74 | .macro DMT reg=0 | ||
75 | .word (0x41600bc1 | (\reg << 16)) | ||
76 | .endm | ||
77 | |||
78 | .macro EMT reg=0 | ||
79 | .word (0x41600be1 | (\reg << 16)) | ||
80 | .endm | ||
81 | |||
82 | .macro DVPE reg=0 | ||
83 | .word (0x41600001 | (\reg << 16)) | ||
84 | .endm | ||
85 | |||
86 | .macro EVPE reg=0 | ||
87 | .word (0x41600021 | (\reg << 16)) | ||
88 | .endm | ||
89 | |||
90 | .macro MFTR rt=0, rd=0, u=0, sel=0 | ||
91 | .word (0x41000000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)) | ||
92 | .endm | ||
93 | |||
94 | .macro MTTR rt=0, rd=0, u=0, sel=0 | ||
95 | .word (0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)) | ||
96 | .endm | ||
97 | |||
51 | #endif /* _ASM_ASMMACRO_H */ | 98 | #endif /* _ASM_ASMMACRO_H */ |
diff --git a/include/asm-mips/cpu-info.h b/include/asm-mips/cpu-info.h index 140be1c67da7..6572ac703662 100644 --- a/include/asm-mips/cpu-info.h +++ b/include/asm-mips/cpu-info.h | |||
@@ -73,6 +73,16 @@ struct cpuinfo_mips { | |||
73 | struct cache_desc dcache; /* Primary D or combined I/D cache */ | 73 | struct cache_desc dcache; /* Primary D or combined I/D cache */ |
74 | struct cache_desc scache; /* Secondary cache */ | 74 | struct cache_desc scache; /* Secondary cache */ |
75 | struct cache_desc tcache; /* Tertiary/split secondary cache */ | 75 | struct cache_desc tcache; /* Tertiary/split secondary cache */ |
76 | #if defined(CONFIG_MIPS_MT_SMTC) | ||
77 | /* | ||
78 | * In the MIPS MT "SMTC" model, each TC is considered | ||
79 | * to be a "CPU" for the purposes of scheduling, but | ||
80 | * exception resources, ASID spaces, etc, are common | ||
81 | * to all TCs within the same VPE. | ||
82 | */ | ||
83 | int vpe_id; /* Virtual Processor number */ | ||
84 | int tc_id; /* Thread Context number */ | ||
85 | #endif /* CONFIG_MIPS_MT */ | ||
76 | void *data; /* Additional data */ | 86 | void *data; /* Additional data */ |
77 | } __attribute__((aligned(SMP_CACHE_BYTES))); | 87 | } __attribute__((aligned(SMP_CACHE_BYTES))); |
78 | 88 | ||
diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h index feb29a793888..dadc05188db7 100644 --- a/include/asm-mips/hazards.h +++ b/include/asm-mips/hazards.h | |||
@@ -284,6 +284,8 @@ do { \ | |||
284 | #define instruction_hazard() do { } while (0) | 284 | #define instruction_hazard() do { } while (0) |
285 | #endif | 285 | #endif |
286 | 286 | ||
287 | extern void mips_ihb(void); | ||
288 | |||
287 | #endif /* __ASSEMBLY__ */ | 289 | #endif /* __ASSEMBLY__ */ |
288 | 290 | ||
289 | #endif /* _ASM_HAZARDS_H */ | 291 | #endif /* _ASM_HAZARDS_H */ |
diff --git a/include/asm-mips/interrupt.h b/include/asm-mips/interrupt.h index 774348734fa0..4bb9c06f4410 100644 --- a/include/asm-mips/interrupt.h +++ b/include/asm-mips/interrupt.h | |||
@@ -19,7 +19,12 @@ __asm__ ( | |||
19 | " .set push \n" | 19 | " .set push \n" |
20 | " .set reorder \n" | 20 | " .set reorder \n" |
21 | " .set noat \n" | 21 | " .set noat \n" |
22 | #ifdef CONFIG_CPU_MIPSR2 | 22 | #ifdef CONFIG_MIPS_MT_SMTC |
23 | " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" | ||
24 | " ori $1, 0x400 \n" | ||
25 | " xori $1, 0x400 \n" | ||
26 | " mtc0 $1, $2, 1 \n" | ||
27 | #elif defined(CONFIG_CPU_MIPSR2) | ||
23 | " ei \n" | 28 | " ei \n" |
24 | #else | 29 | #else |
25 | " mfc0 $1,$12 \n" | 30 | " mfc0 $1,$12 \n" |
@@ -62,7 +67,12 @@ __asm__ ( | |||
62 | " .macro local_irq_disable\n" | 67 | " .macro local_irq_disable\n" |
63 | " .set push \n" | 68 | " .set push \n" |
64 | " .set noat \n" | 69 | " .set noat \n" |
65 | #ifdef CONFIG_CPU_MIPSR2 | 70 | #ifdef CONFIG_MIPS_MT_SMTC |
71 | " mfc0 $1, $2, 1 \n" | ||
72 | " ori $1, 0x400 \n" | ||
73 | " .set noreorder \n" | ||
74 | " mtc0 $1, $2, 1 \n" | ||
75 | #elif defined(CONFIG_CPU_MIPSR2) | ||
66 | " di \n" | 76 | " di \n" |
67 | #else | 77 | #else |
68 | " mfc0 $1,$12 \n" | 78 | " mfc0 $1,$12 \n" |
@@ -88,7 +98,11 @@ __asm__ ( | |||
88 | " .macro local_save_flags flags \n" | 98 | " .macro local_save_flags flags \n" |
89 | " .set push \n" | 99 | " .set push \n" |
90 | " .set reorder \n" | 100 | " .set reorder \n" |
101 | #ifdef CONFIG_MIPS_MT_SMTC | ||
102 | " mfc0 \\flags, $2, 1 \n" | ||
103 | #else | ||
91 | " mfc0 \\flags, $12 \n" | 104 | " mfc0 \\flags, $12 \n" |
105 | #endif | ||
92 | " .set pop \n" | 106 | " .set pop \n" |
93 | " .endm \n"); | 107 | " .endm \n"); |
94 | 108 | ||
@@ -102,7 +116,13 @@ __asm__ ( | |||
102 | " .set push \n" | 116 | " .set push \n" |
103 | " .set reorder \n" | 117 | " .set reorder \n" |
104 | " .set noat \n" | 118 | " .set noat \n" |
105 | #ifdef CONFIG_CPU_MIPSR2 | 119 | #ifdef CONFIG_MIPS_MT_SMTC |
120 | " mfc0 \\result, $2, 1 \n" | ||
121 | " ori $1, \\result, 0x400 \n" | ||
122 | " .set noreorder \n" | ||
123 | " mtc0 $1, $2, 1 \n" | ||
124 | " andi \\result, \\result, 0x400 \n" | ||
125 | #elif defined(CONFIG_CPU_MIPSR2) | ||
106 | " di \\result \n" | 126 | " di \\result \n" |
107 | " andi \\result, 1 \n" | 127 | " andi \\result, 1 \n" |
108 | #else | 128 | #else |
@@ -128,7 +148,14 @@ __asm__ ( | |||
128 | " .set push \n" | 148 | " .set push \n" |
129 | " .set noreorder \n" | 149 | " .set noreorder \n" |
130 | " .set noat \n" | 150 | " .set noat \n" |
131 | #if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | 151 | #ifdef CONFIG_MIPS_MT_SMTC |
152 | "mfc0 $1, $2, 1 \n" | ||
153 | "andi \\flags, 0x400 \n" | ||
154 | "ori $1, 0x400 \n" | ||
155 | "xori $1, 0x400 \n" | ||
156 | "or \\flags, $1 \n" | ||
157 | "mtc0 \\flags, $2, 1 \n" | ||
158 | #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | ||
132 | /* | 159 | /* |
133 | * Slow, but doesn't suffer from a relativly unlikely race | 160 | * Slow, but doesn't suffer from a relativly unlikely race |
134 | * condition we're having since days 1. | 161 | * condition we're having since days 1. |
@@ -167,11 +194,29 @@ do { \ | |||
167 | : "memory"); \ | 194 | : "memory"); \ |
168 | } while(0) | 195 | } while(0) |
169 | 196 | ||
170 | #define irqs_disabled() \ | 197 | static inline int irqs_disabled(void) |
171 | ({ \ | 198 | { |
172 | unsigned long flags; \ | 199 | #ifdef CONFIG_MIPS_MT_SMTC |
173 | local_save_flags(flags); \ | 200 | /* |
174 | !(flags & 1); \ | 201 | * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU |
175 | }) | 202 | */ |
203 | unsigned long __result; | ||
204 | |||
205 | __asm__ __volatile__( | ||
206 | " .set noreorder \n" | ||
207 | " mfc0 %0, $2, 1 \n" | ||
208 | " andi %0, 0x400 \n" | ||
209 | " slt %0, $0, %0 \n" | ||
210 | " .set reorder \n" | ||
211 | : "=r" (__result)); | ||
212 | |||
213 | return __result; | ||
214 | #else | ||
215 | unsigned long flags; | ||
216 | local_save_flags(flags); | ||
217 | |||
218 | return !(flags & 1); | ||
219 | #endif | ||
220 | } | ||
176 | 221 | ||
177 | #endif /* _ASM_INTERRUPT_H */ | 222 | #endif /* _ASM_INTERRUPT_H */ |
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index d7aecca3b95f..dde677f02bc0 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h | |||
@@ -11,6 +11,9 @@ | |||
11 | 11 | ||
12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | |||
15 | #include <asm/mipsmtregs.h> | ||
16 | |||
14 | #include <irq.h> | 17 | #include <irq.h> |
15 | 18 | ||
16 | #ifdef CONFIG_I8259 | 19 | #ifdef CONFIG_I8259 |
@@ -26,6 +29,23 @@ struct pt_regs; | |||
26 | 29 | ||
27 | extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs); | 30 | extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs); |
28 | 31 | ||
32 | #ifdef CONFIG_MIPS_MT_SMTC | ||
33 | /* | ||
34 | * Clear interrupt mask handling "backstop" if irq_hwmask | ||
35 | * entry so indicates. This implies that the ack() or end() | ||
36 | * functions will take over re-enabling the low-level mask. | ||
37 | * Otherwise it will be done on return from exception. | ||
38 | */ | ||
39 | #define __DO_IRQ_SMTC_HOOK() \ | ||
40 | do { \ | ||
41 | if (irq_hwmask[irq] & 0x0000ff00) \ | ||
42 | write_c0_tccontext(read_c0_tccontext() & \ | ||
43 | ~(irq_hwmask[irq] & 0x0000ff00)); \ | ||
44 | } while (0) | ||
45 | #else | ||
46 | #define __DO_IRQ_SMTC_HOOK() do { } while (0) | ||
47 | #endif | ||
48 | |||
29 | #ifdef CONFIG_PREEMPT | 49 | #ifdef CONFIG_PREEMPT |
30 | 50 | ||
31 | /* | 51 | /* |
@@ -39,6 +59,7 @@ extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs); | |||
39 | #define do_IRQ(irq, regs) \ | 59 | #define do_IRQ(irq, regs) \ |
40 | do { \ | 60 | do { \ |
41 | irq_enter(); \ | 61 | irq_enter(); \ |
62 | __DO_IRQ_SMTC_HOOK(); \ | ||
42 | __do_IRQ((irq), (regs)); \ | 63 | __do_IRQ((irq), (regs)); \ |
43 | irq_exit(); \ | 64 | irq_exit(); \ |
44 | } while (0) | 65 | } while (0) |
@@ -48,4 +69,12 @@ do { \ | |||
48 | extern void arch_init_irq(void); | 69 | extern void arch_init_irq(void); |
49 | extern void spurious_interrupt(struct pt_regs *regs); | 70 | extern void spurious_interrupt(struct pt_regs *regs); |
50 | 71 | ||
72 | #ifdef CONFIG_MIPS_MT_SMTC | ||
73 | struct irqaction; | ||
74 | |||
75 | extern unsigned long irq_hwmask[]; | ||
76 | extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, | ||
77 | unsigned long hwmask); | ||
78 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
79 | |||
51 | #endif /* _ASM_IRQ_H */ | 80 | #endif /* _ASM_IRQ_H */ |
diff --git a/include/asm-mips/mips_mt.h b/include/asm-mips/mips_mt.h new file mode 100644 index 000000000000..c31a312b9783 --- /dev/null +++ b/include/asm-mips/mips_mt.h | |||
@@ -0,0 +1,15 @@ | |||
1 | /* | ||
2 | * Definitions and decalrations for MIPS MT support | ||
3 | * that are common between SMTC, VSMP, and/or AP/SP | ||
4 | * kernel models. | ||
5 | */ | ||
6 | #ifndef __ASM_MIPS_MT_H | ||
7 | #define __ASM_MIPS_MT_H | ||
8 | |||
9 | extern cpumask_t mt_fpu_cpumask; | ||
10 | extern unsigned long mt_fpemul_threshold; | ||
11 | |||
12 | extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value); | ||
13 | extern void mips_mt_set_cpuoptions(void); | ||
14 | |||
15 | #endif /* __ASM_MIPS_MT_H */ | ||
diff --git a/include/asm-mips/mipsmtregs.h b/include/asm-mips/mipsmtregs.h index a5ac1a62f4f4..f637ce70758f 100644 --- a/include/asm-mips/mipsmtregs.h +++ b/include/asm-mips/mipsmtregs.h | |||
@@ -165,7 +165,7 @@ | |||
165 | 165 | ||
166 | #ifndef __ASSEMBLY__ | 166 | #ifndef __ASSEMBLY__ |
167 | 167 | ||
168 | extern void mips_mt_regdump(void); | 168 | extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value); |
169 | 169 | ||
170 | static inline unsigned int dvpe(void) | 170 | static inline unsigned int dvpe(void) |
171 | { | 171 | { |
@@ -282,8 +282,11 @@ static inline void ehb(void) | |||
282 | \ | 282 | \ |
283 | __asm__ __volatile__( \ | 283 | __asm__ __volatile__( \ |
284 | " .set push \n" \ | 284 | " .set push \n" \ |
285 | " .set noat \n" \ | ||
285 | " .set mips32r2 \n" \ | 286 | " .set mips32r2 \n" \ |
286 | " mftgpr %0," #rt " \n" \ | 287 | " # mftgpr $1," #rt " \n" \ |
288 | " .word 0x41000820 | (" #rt " << 16) \n" \ | ||
289 | " move %0, $1 \n" \ | ||
287 | " .set pop \n" \ | 290 | " .set pop \n" \ |
288 | : "=r" (__res)); \ | 291 | : "=r" (__res)); \ |
289 | \ | 292 | \ |
@@ -295,9 +298,7 @@ static inline void ehb(void) | |||
295 | unsigned long __res; \ | 298 | unsigned long __res; \ |
296 | \ | 299 | \ |
297 | __asm__ __volatile__( \ | 300 | __asm__ __volatile__( \ |
298 | ".set noat\n\t" \ | 301 | " mftr %0, " #rt ", " #u ", " #sel " \n" \ |
299 | "mftr\t%0, " #rt ", " #u ", " #sel "\n\t" \ | ||
300 | ".set at\n\t" \ | ||
301 | : "=r" (__res)); \ | 302 | : "=r" (__res)); \ |
302 | \ | 303 | \ |
303 | __res; \ | 304 | __res; \ |
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h index e85a42e2ea0c..a2ef579f6b1a 100644 --- a/include/asm-mips/mipsregs.h +++ b/include/asm-mips/mipsregs.h | |||
@@ -861,7 +861,19 @@ do { \ | |||
861 | #define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) | 861 | #define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) |
862 | 862 | ||
863 | #define read_c0_status() __read_32bit_c0_register($12, 0) | 863 | #define read_c0_status() __read_32bit_c0_register($12, 0) |
864 | #ifdef CONFIG_MIPS_MT_SMTC | ||
865 | #define write_c0_status(val) \ | ||
866 | do { \ | ||
867 | __write_32bit_c0_register($12, 0, val); \ | ||
868 | __ehb(); \ | ||
869 | } while (0) | ||
870 | #else | ||
871 | /* | ||
872 | * Legacy non-SMTC code, which may be hazardous | ||
873 | * but which might not support EHB | ||
874 | */ | ||
864 | #define write_c0_status(val) __write_32bit_c0_register($12, 0, val) | 875 | #define write_c0_status(val) __write_32bit_c0_register($12, 0, val) |
876 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
865 | 877 | ||
866 | #define read_c0_cause() __read_32bit_c0_register($13, 0) | 878 | #define read_c0_cause() __read_32bit_c0_register($13, 0) |
867 | #define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) | 879 | #define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) |
@@ -1004,6 +1016,9 @@ do { \ | |||
1004 | #define read_c0_taglo() __read_32bit_c0_register($28, 0) | 1016 | #define read_c0_taglo() __read_32bit_c0_register($28, 0) |
1005 | #define write_c0_taglo(val) __write_32bit_c0_register($28, 0, val) | 1017 | #define write_c0_taglo(val) __write_32bit_c0_register($28, 0, val) |
1006 | 1018 | ||
1019 | #define read_c0_dtaglo() __read_32bit_c0_register($28, 2) | ||
1020 | #define write_c0_dtaglo(val) __write_32bit_c0_register($28, 2, val) | ||
1021 | |||
1007 | #define read_c0_taghi() __read_32bit_c0_register($29, 0) | 1022 | #define read_c0_taghi() __read_32bit_c0_register($29, 0) |
1008 | #define write_c0_taghi(val) __write_32bit_c0_register($29, 0, val) | 1023 | #define write_c0_taghi(val) __write_32bit_c0_register($29, 0, val) |
1009 | 1024 | ||
@@ -1357,6 +1372,11 @@ static inline void tlb_write_random(void) | |||
1357 | /* | 1372 | /* |
1358 | * Manipulate bits in a c0 register. | 1373 | * Manipulate bits in a c0 register. |
1359 | */ | 1374 | */ |
1375 | #ifndef CONFIG_MIPS_MT_SMTC | ||
1376 | /* | ||
1377 | * SMTC Linux requires shutting-down microthread scheduling | ||
1378 | * during CP0 register read-modify-write sequences. | ||
1379 | */ | ||
1360 | #define __BUILD_SET_C0(name) \ | 1380 | #define __BUILD_SET_C0(name) \ |
1361 | static inline unsigned int \ | 1381 | static inline unsigned int \ |
1362 | set_c0_##name(unsigned int set) \ | 1382 | set_c0_##name(unsigned int set) \ |
@@ -1395,6 +1415,119 @@ change_c0_##name(unsigned int change, unsigned int new) \ | |||
1395 | return res; \ | 1415 | return res; \ |
1396 | } | 1416 | } |
1397 | 1417 | ||
1418 | #else /* SMTC versions that manage MT scheduling */ | ||
1419 | |||
1420 | #include <asm/interrupt.h> | ||
1421 | |||
1422 | /* | ||
1423 | * This is a duplicate of dmt() in mipsmtregs.h to avoid problems with | ||
1424 | * header file recursion. | ||
1425 | */ | ||
1426 | static inline unsigned int __dmt(void) | ||
1427 | { | ||
1428 | int res; | ||
1429 | |||
1430 | __asm__ __volatile__( | ||
1431 | " .set push \n" | ||
1432 | " .set mips32r2 \n" | ||
1433 | " .set noat \n" | ||
1434 | " .word 0x41610BC1 # dmt $1 \n" | ||
1435 | " ehb \n" | ||
1436 | " move %0, $1 \n" | ||
1437 | " .set pop \n" | ||
1438 | : "=r" (res)); | ||
1439 | |||
1440 | instruction_hazard(); | ||
1441 | |||
1442 | return res; | ||
1443 | } | ||
1444 | |||
1445 | #define __VPECONTROL_TE_SHIFT 15 | ||
1446 | #define __VPECONTROL_TE (1UL << __VPECONTROL_TE_SHIFT) | ||
1447 | |||
1448 | #define __EMT_ENABLE __VPECONTROL_TE | ||
1449 | |||
1450 | static inline void __emt(unsigned int previous) | ||
1451 | { | ||
1452 | if ((previous & __EMT_ENABLE)) | ||
1453 | __asm__ __volatile__( | ||
1454 | " .set noreorder \n" | ||
1455 | " .set mips32r2 \n" | ||
1456 | " .word 0x41600be1 # emt \n" | ||
1457 | " ehb \n" | ||
1458 | " .set mips0 \n" | ||
1459 | " .set reorder \n"); | ||
1460 | } | ||
1461 | |||
1462 | static inline void __ehb(void) | ||
1463 | { | ||
1464 | __asm__ __volatile__( | ||
1465 | " ehb \n"); | ||
1466 | } | ||
1467 | |||
1468 | /* | ||
1469 | * Note that local_irq_save/restore affect TC-specific IXMT state, | ||
1470 | * not Status.IE as in non-SMTC kernel. | ||
1471 | */ | ||
1472 | |||
1473 | #define __BUILD_SET_C0(name) \ | ||
1474 | static inline unsigned int \ | ||
1475 | set_c0_##name(unsigned int set) \ | ||
1476 | { \ | ||
1477 | unsigned int res; \ | ||
1478 | unsigned int omt; \ | ||
1479 | unsigned int flags; \ | ||
1480 | \ | ||
1481 | local_irq_save(flags); \ | ||
1482 | omt = __dmt(); \ | ||
1483 | res = read_c0_##name(); \ | ||
1484 | res |= set; \ | ||
1485 | write_c0_##name(res); \ | ||
1486 | __emt(omt); \ | ||
1487 | local_irq_restore(flags); \ | ||
1488 | \ | ||
1489 | return res; \ | ||
1490 | } \ | ||
1491 | \ | ||
1492 | static inline unsigned int \ | ||
1493 | clear_c0_##name(unsigned int clear) \ | ||
1494 | { \ | ||
1495 | unsigned int res; \ | ||
1496 | unsigned int omt; \ | ||
1497 | unsigned int flags; \ | ||
1498 | \ | ||
1499 | local_irq_save(flags); \ | ||
1500 | omt = __dmt(); \ | ||
1501 | res = read_c0_##name(); \ | ||
1502 | res &= ~clear; \ | ||
1503 | write_c0_##name(res); \ | ||
1504 | __emt(omt); \ | ||
1505 | local_irq_restore(flags); \ | ||
1506 | \ | ||
1507 | return res; \ | ||
1508 | } \ | ||
1509 | \ | ||
1510 | static inline unsigned int \ | ||
1511 | change_c0_##name(unsigned int change, unsigned int new) \ | ||
1512 | { \ | ||
1513 | unsigned int res; \ | ||
1514 | unsigned int omt; \ | ||
1515 | unsigned int flags; \ | ||
1516 | \ | ||
1517 | local_irq_save(flags); \ | ||
1518 | \ | ||
1519 | omt = __dmt(); \ | ||
1520 | res = read_c0_##name(); \ | ||
1521 | res &= ~change; \ | ||
1522 | res |= (new & change); \ | ||
1523 | write_c0_##name(res); \ | ||
1524 | __emt(omt); \ | ||
1525 | local_irq_restore(flags); \ | ||
1526 | \ | ||
1527 | return res; \ | ||
1528 | } | ||
1529 | #endif | ||
1530 | |||
1398 | __BUILD_SET_C0(status) | 1531 | __BUILD_SET_C0(status) |
1399 | __BUILD_SET_C0(cause) | 1532 | __BUILD_SET_C0(cause) |
1400 | __BUILD_SET_C0(config) | 1533 | __BUILD_SET_C0(config) |
diff --git a/include/asm-mips/mmu_context.h b/include/asm-mips/mmu_context.h index 61cf22588137..6e09f4c87211 100644 --- a/include/asm-mips/mmu_context.h +++ b/include/asm-mips/mmu_context.h | |||
@@ -17,6 +17,10 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
20 | #ifdef CONFIG_MIPS_MT_SMTC | ||
21 | #include <asm/mipsmtregs.h> | ||
22 | #include <asm/smtc.h> | ||
23 | #endif /* SMTC */ | ||
20 | 24 | ||
21 | /* | 25 | /* |
22 | * For the fast tlb miss handlers, we keep a per cpu array of pointers | 26 | * For the fast tlb miss handlers, we keep a per cpu array of pointers |
@@ -54,6 +58,14 @@ extern unsigned long pgd_current[]; | |||
54 | #define ASID_INC 0x1 | 58 | #define ASID_INC 0x1 |
55 | #define ASID_MASK 0xfff | 59 | #define ASID_MASK 0xfff |
56 | 60 | ||
61 | /* SMTC/34K debug hack - but maybe we'll keep it */ | ||
62 | #elif defined(CONFIG_MIPS_MT_SMTC) | ||
63 | |||
64 | #define ASID_INC 0x1 | ||
65 | extern unsigned long smtc_asid_mask; | ||
66 | #define ASID_MASK (smtc_asid_mask) | ||
67 | #define HW_ASID_MASK 0xff | ||
68 | /* End SMTC/34K debug hack */ | ||
57 | #else /* FIXME: not correct for R6000 */ | 69 | #else /* FIXME: not correct for R6000 */ |
58 | 70 | ||
59 | #define ASID_INC 0x1 | 71 | #define ASID_INC 0x1 |
@@ -76,6 +88,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
76 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) | 88 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) |
77 | #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) | 89 | #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) |
78 | 90 | ||
91 | #ifndef CONFIG_MIPS_MT_SMTC | ||
92 | /* Normal, classic MIPS get_new_mmu_context */ | ||
79 | static inline void | 93 | static inline void |
80 | get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | 94 | get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) |
81 | { | 95 | { |
@@ -91,6 +105,12 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
91 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; | 105 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; |
92 | } | 106 | } |
93 | 107 | ||
108 | #else /* CONFIG_MIPS_MT_SMTC */ | ||
109 | |||
110 | #define get_new_mmu_context(mm,cpu) smtc_get_new_mmu_context((mm),(cpu)) | ||
111 | |||
112 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
113 | |||
94 | /* | 114 | /* |
95 | * Initialize the context related info for a new mm_struct | 115 | * Initialize the context related info for a new mm_struct |
96 | * instance. | 116 | * instance. |
@@ -111,14 +131,46 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
111 | { | 131 | { |
112 | unsigned int cpu = smp_processor_id(); | 132 | unsigned int cpu = smp_processor_id(); |
113 | unsigned long flags; | 133 | unsigned long flags; |
114 | 134 | #ifdef CONFIG_MIPS_MT_SMTC | |
135 | unsigned long oldasid; | ||
136 | unsigned long mtflags; | ||
137 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||
115 | local_irq_save(flags); | 138 | local_irq_save(flags); |
139 | mtflags = dvpe(); | ||
140 | #else /* Not SMTC */ | ||
141 | local_irq_save(flags); | ||
142 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
116 | 143 | ||
117 | /* Check if our ASID is of an older version and thus invalid */ | 144 | /* Check if our ASID is of an older version and thus invalid */ |
118 | if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) | 145 | if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) |
119 | get_new_mmu_context(next, cpu); | 146 | get_new_mmu_context(next, cpu); |
120 | 147 | #ifdef CONFIG_MIPS_MT_SMTC | |
148 | /* | ||
149 | * If the EntryHi ASID being replaced happens to be | ||
150 | * the value flagged at ASID recycling time as having | ||
151 | * an extended life, clear the bit showing it being | ||
152 | * in use by this "CPU", and if that's the last bit, | ||
153 | * free up the ASID value for use and flush any old | ||
154 | * instances of it from the TLB. | ||
155 | */ | ||
156 | oldasid = (read_c0_entryhi() & ASID_MASK); | ||
157 | if(smtc_live_asid[mytlb][oldasid]) { | ||
158 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||
159 | if(smtc_live_asid[mytlb][oldasid] == 0) | ||
160 | smtc_flush_tlb_asid(oldasid); | ||
161 | } | ||
162 | /* | ||
163 | * Tread softly on EntryHi, and so long as we support | ||
164 | * having ASID_MASK smaller than the hardware maximum, | ||
165 | * make sure no "soft" bits become "hard"... | ||
166 | */ | ||
167 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | ||
168 | | (cpu_context(cpu, next) & ASID_MASK)); | ||
169 | ehb(); /* Make sure it propagates to TCStatus */ | ||
170 | evpe(mtflags); | ||
171 | #else | ||
121 | write_c0_entryhi(cpu_context(cpu, next)); | 172 | write_c0_entryhi(cpu_context(cpu, next)); |
173 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
122 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); | 174 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); |
123 | 175 | ||
124 | /* | 176 | /* |
@@ -151,12 +203,34 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) | |||
151 | unsigned long flags; | 203 | unsigned long flags; |
152 | unsigned int cpu = smp_processor_id(); | 204 | unsigned int cpu = smp_processor_id(); |
153 | 205 | ||
206 | #ifdef CONFIG_MIPS_MT_SMTC | ||
207 | unsigned long oldasid; | ||
208 | unsigned long mtflags; | ||
209 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||
210 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
211 | |||
154 | local_irq_save(flags); | 212 | local_irq_save(flags); |
155 | 213 | ||
156 | /* Unconditionally get a new ASID. */ | 214 | /* Unconditionally get a new ASID. */ |
157 | get_new_mmu_context(next, cpu); | 215 | get_new_mmu_context(next, cpu); |
158 | 216 | ||
217 | #ifdef CONFIG_MIPS_MT_SMTC | ||
218 | /* See comments for similar code above */ | ||
219 | mtflags = dvpe(); | ||
220 | oldasid = read_c0_entryhi() & ASID_MASK; | ||
221 | if(smtc_live_asid[mytlb][oldasid]) { | ||
222 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||
223 | if(smtc_live_asid[mytlb][oldasid] == 0) | ||
224 | smtc_flush_tlb_asid(oldasid); | ||
225 | } | ||
226 | /* See comments for similar code above */ | ||
227 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | | ||
228 | (cpu_context(cpu, next) & ASID_MASK)); | ||
229 | ehb(); /* Make sure it propagates to TCStatus */ | ||
230 | evpe(mtflags); | ||
231 | #else | ||
159 | write_c0_entryhi(cpu_context(cpu, next)); | 232 | write_c0_entryhi(cpu_context(cpu, next)); |
233 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
160 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); | 234 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); |
161 | 235 | ||
162 | /* mark mmu ownership change */ | 236 | /* mark mmu ownership change */ |
@@ -174,17 +248,49 @@ static inline void | |||
174 | drop_mmu_context(struct mm_struct *mm, unsigned cpu) | 248 | drop_mmu_context(struct mm_struct *mm, unsigned cpu) |
175 | { | 249 | { |
176 | unsigned long flags; | 250 | unsigned long flags; |
251 | #ifdef CONFIG_MIPS_MT_SMTC | ||
252 | unsigned long oldasid; | ||
253 | /* Can't use spinlock because called from TLB flush within DVPE */ | ||
254 | unsigned int prevvpe; | ||
255 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||
256 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
177 | 257 | ||
178 | local_irq_save(flags); | 258 | local_irq_save(flags); |
179 | 259 | ||
180 | if (cpu_isset(cpu, mm->cpu_vm_mask)) { | 260 | if (cpu_isset(cpu, mm->cpu_vm_mask)) { |
181 | get_new_mmu_context(mm, cpu); | 261 | get_new_mmu_context(mm, cpu); |
262 | #ifdef CONFIG_MIPS_MT_SMTC | ||
263 | /* See comments for similar code above */ | ||
264 | prevvpe = dvpe(); | ||
265 | oldasid = (read_c0_entryhi() & ASID_MASK); | ||
266 | if(smtc_live_asid[mytlb][oldasid]) { | ||
267 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||
268 | if(smtc_live_asid[mytlb][oldasid] == 0) | ||
269 | smtc_flush_tlb_asid(oldasid); | ||
270 | } | ||
271 | /* See comments for similar code above */ | ||
272 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | ||
273 | | cpu_asid(cpu, mm)); | ||
274 | ehb(); /* Make sure it propagates to TCStatus */ | ||
275 | evpe(prevvpe); | ||
276 | #else /* not CONFIG_MIPS_MT_SMTC */ | ||
182 | write_c0_entryhi(cpu_asid(cpu, mm)); | 277 | write_c0_entryhi(cpu_asid(cpu, mm)); |
278 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
183 | } else { | 279 | } else { |
184 | /* will get a new context next time */ | 280 | /* will get a new context next time */ |
281 | #ifndef CONFIG_MIPS_MT_SMTC | ||
185 | cpu_context(cpu, mm) = 0; | 282 | cpu_context(cpu, mm) = 0; |
283 | #else /* SMTC */ | ||
284 | int i; | ||
285 | |||
286 | /* SMTC shares the TLB (and ASIDs) across VPEs */ | ||
287 | for (i = 0; i < num_online_cpus(); i++) { | ||
288 | if((smtc_status & SMTC_TLB_SHARED) | ||
289 | || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) | ||
290 | cpu_context(i, mm) = 0; | ||
291 | } | ||
292 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
186 | } | 293 | } |
187 | |||
188 | local_irq_restore(flags); | 294 | local_irq_restore(flags); |
189 | } | 295 | } |
190 | 296 | ||
diff --git a/include/asm-mips/processor.h b/include/asm-mips/processor.h index 39d2bd50fece..786651340de1 100644 --- a/include/asm-mips/processor.h +++ b/include/asm-mips/processor.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define _ASM_PROCESSOR_H | 12 | #define _ASM_PROCESSOR_H |
13 | 13 | ||
14 | #include <linux/config.h> | 14 | #include <linux/config.h> |
15 | #include <linux/cpumask.h> | ||
15 | #include <linux/threads.h> | 16 | #include <linux/threads.h> |
16 | 17 | ||
17 | #include <asm/cachectl.h> | 18 | #include <asm/cachectl.h> |
@@ -107,6 +108,10 @@ struct mips_dsp_state { | |||
107 | 108 | ||
108 | #define INIT_DSP {{0,},} | 109 | #define INIT_DSP {{0,},} |
109 | 110 | ||
111 | #define INIT_CPUMASK { \ | ||
112 | {0,} \ | ||
113 | } | ||
114 | |||
110 | typedef struct { | 115 | typedef struct { |
111 | unsigned long seg; | 116 | unsigned long seg; |
112 | } mm_segment_t; | 117 | } mm_segment_t; |
@@ -142,6 +147,7 @@ struct thread_struct { | |||
142 | #define MF_LOGADE 2 /* Log address errors to syslog */ | 147 | #define MF_LOGADE 2 /* Log address errors to syslog */ |
143 | #define MF_32BIT_REGS 4 /* also implies 16/32 fprs */ | 148 | #define MF_32BIT_REGS 4 /* also implies 16/32 fprs */ |
144 | #define MF_32BIT_ADDR 8 /* 32-bit address space (o32/n32) */ | 149 | #define MF_32BIT_ADDR 8 /* 32-bit address space (o32/n32) */ |
150 | #define MF_FPUBOUND 0x10 /* thread bound to FPU-full CPU set */ | ||
145 | unsigned long mflags; | 151 | unsigned long mflags; |
146 | unsigned long irix_trampoline; /* Wheee... */ | 152 | unsigned long irix_trampoline; /* Wheee... */ |
147 | unsigned long irix_oldctx; | 153 | unsigned long irix_oldctx; |
diff --git a/include/asm-mips/ptrace.h b/include/asm-mips/ptrace.h index 95c5839ac465..fa9d8713c12a 100644 --- a/include/asm-mips/ptrace.h +++ b/include/asm-mips/ptrace.h | |||
@@ -45,6 +45,10 @@ struct pt_regs { | |||
45 | unsigned long cp0_badvaddr; | 45 | unsigned long cp0_badvaddr; |
46 | unsigned long cp0_cause; | 46 | unsigned long cp0_cause; |
47 | unsigned long cp0_epc; | 47 | unsigned long cp0_epc; |
48 | #ifdef CONFIG_MIPS_MT_SMTC | ||
49 | unsigned long cp0_tcstatus; | ||
50 | unsigned long smtc_pad; | ||
51 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
48 | }; | 52 | }; |
49 | 53 | ||
50 | /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ | 54 | /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ |
diff --git a/include/asm-mips/r4kcache.h b/include/asm-mips/r4kcache.h index 2f2eb95387f6..3c8e3c8d1a9a 100644 --- a/include/asm-mips/r4kcache.h +++ b/include/asm-mips/r4kcache.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/asm.h> | 15 | #include <asm/asm.h> |
16 | #include <asm/cacheops.h> | 16 | #include <asm/cacheops.h> |
17 | #include <asm/cpu-features.h> | 17 | #include <asm/cpu-features.h> |
18 | #include <asm/mipsmtregs.h> | ||
18 | 19 | ||
19 | /* | 20 | /* |
20 | * This macro return a properly sign-extended address suitable as base address | 21 | * This macro return a properly sign-extended address suitable as base address |
@@ -39,14 +40,118 @@ | |||
39 | : \ | 40 | : \ |
40 | : "i" (op), "R" (*(unsigned char *)(addr))) | 41 | : "i" (op), "R" (*(unsigned char *)(addr))) |
41 | 42 | ||
43 | #ifdef CONFIG_MIPS_MT | ||
44 | /* | ||
45 | * Temporary hacks for SMTC debug. Optionally force single-threaded | ||
46 | * execution during I-cache flushes. | ||
47 | */ | ||
48 | |||
49 | #define PROTECT_CACHE_FLUSHES 1 | ||
50 | |||
51 | #ifdef PROTECT_CACHE_FLUSHES | ||
52 | |||
53 | extern int mt_protiflush; | ||
54 | extern int mt_protdflush; | ||
55 | extern void mt_cflush_lockdown(void); | ||
56 | extern void mt_cflush_release(void); | ||
57 | |||
58 | #define BEGIN_MT_IPROT \ | ||
59 | unsigned long flags = 0; \ | ||
60 | unsigned long mtflags = 0; \ | ||
61 | if(mt_protiflush) { \ | ||
62 | local_irq_save(flags); \ | ||
63 | ehb(); \ | ||
64 | mtflags = dvpe(); \ | ||
65 | mt_cflush_lockdown(); \ | ||
66 | } | ||
67 | |||
68 | #define END_MT_IPROT \ | ||
69 | if(mt_protiflush) { \ | ||
70 | mt_cflush_release(); \ | ||
71 | evpe(mtflags); \ | ||
72 | local_irq_restore(flags); \ | ||
73 | } | ||
74 | |||
75 | #define BEGIN_MT_DPROT \ | ||
76 | unsigned long flags = 0; \ | ||
77 | unsigned long mtflags = 0; \ | ||
78 | if(mt_protdflush) { \ | ||
79 | local_irq_save(flags); \ | ||
80 | ehb(); \ | ||
81 | mtflags = dvpe(); \ | ||
82 | mt_cflush_lockdown(); \ | ||
83 | } | ||
84 | |||
85 | #define END_MT_DPROT \ | ||
86 | if(mt_protdflush) { \ | ||
87 | mt_cflush_release(); \ | ||
88 | evpe(mtflags); \ | ||
89 | local_irq_restore(flags); \ | ||
90 | } | ||
91 | |||
92 | #else | ||
93 | |||
94 | #define BEGIN_MT_IPROT | ||
95 | #define BEGIN_MT_DPROT | ||
96 | #define END_MT_IPROT | ||
97 | #define END_MT_DPROT | ||
98 | |||
99 | #endif /* PROTECT_CACHE_FLUSHES */ | ||
100 | |||
101 | #define __iflush_prologue \ | ||
102 | unsigned long redundance; \ | ||
103 | extern int mt_n_iflushes; \ | ||
104 | BEGIN_MT_IPROT \ | ||
105 | for (redundance = 0; redundance < mt_n_iflushes; redundance++) { | ||
106 | |||
107 | #define __iflush_epilogue \ | ||
108 | END_MT_IPROT \ | ||
109 | } | ||
110 | |||
111 | #define __dflush_prologue \ | ||
112 | unsigned long redundance; \ | ||
113 | extern int mt_n_dflushes; \ | ||
114 | BEGIN_MT_DPROT \ | ||
115 | for (redundance = 0; redundance < mt_n_dflushes; redundance++) { | ||
116 | |||
117 | #define __dflush_epilogue \ | ||
118 | END_MT_DPROT \ | ||
119 | } | ||
120 | |||
121 | #define __inv_dflush_prologue __dflush_prologue | ||
122 | #define __inv_dflush_epilogue __dflush_epilogue | ||
123 | #define __sflush_prologue { | ||
124 | #define __sflush_epilogue } | ||
125 | #define __inv_sflush_prologue __sflush_prologue | ||
126 | #define __inv_sflush_epilogue __sflush_epilogue | ||
127 | |||
128 | #else /* CONFIG_MIPS_MT */ | ||
129 | |||
130 | #define __iflush_prologue { | ||
131 | #define __iflush_epilogue } | ||
132 | #define __dflush_prologue { | ||
133 | #define __dflush_epilogue } | ||
134 | #define __inv_dflush_prologue { | ||
135 | #define __inv_dflush_epilogue } | ||
136 | #define __sflush_prologue { | ||
137 | #define __sflush_epilogue } | ||
138 | #define __inv_sflush_prologue { | ||
139 | #define __inv_sflush_epilogue } | ||
140 | |||
141 | #endif /* CONFIG_MIPS_MT */ | ||
142 | |||
42 | static inline void flush_icache_line_indexed(unsigned long addr) | 143 | static inline void flush_icache_line_indexed(unsigned long addr) |
43 | { | 144 | { |
145 | __iflush_prologue | ||
44 | cache_op(Index_Invalidate_I, addr); | 146 | cache_op(Index_Invalidate_I, addr); |
147 | __iflush_epilogue | ||
45 | } | 148 | } |
46 | 149 | ||
47 | static inline void flush_dcache_line_indexed(unsigned long addr) | 150 | static inline void flush_dcache_line_indexed(unsigned long addr) |
48 | { | 151 | { |
152 | __dflush_prologue | ||
49 | cache_op(Index_Writeback_Inv_D, addr); | 153 | cache_op(Index_Writeback_Inv_D, addr); |
154 | __dflush_epilogue | ||
50 | } | 155 | } |
51 | 156 | ||
52 | static inline void flush_scache_line_indexed(unsigned long addr) | 157 | static inline void flush_scache_line_indexed(unsigned long addr) |
@@ -56,17 +161,23 @@ static inline void flush_scache_line_indexed(unsigned long addr) | |||
56 | 161 | ||
57 | static inline void flush_icache_line(unsigned long addr) | 162 | static inline void flush_icache_line(unsigned long addr) |
58 | { | 163 | { |
164 | __iflush_prologue | ||
59 | cache_op(Hit_Invalidate_I, addr); | 165 | cache_op(Hit_Invalidate_I, addr); |
166 | __iflush_epilogue | ||
60 | } | 167 | } |
61 | 168 | ||
62 | static inline void flush_dcache_line(unsigned long addr) | 169 | static inline void flush_dcache_line(unsigned long addr) |
63 | { | 170 | { |
171 | __dflush_prologue | ||
64 | cache_op(Hit_Writeback_Inv_D, addr); | 172 | cache_op(Hit_Writeback_Inv_D, addr); |
173 | __dflush_epilogue | ||
65 | } | 174 | } |
66 | 175 | ||
67 | static inline void invalidate_dcache_line(unsigned long addr) | 176 | static inline void invalidate_dcache_line(unsigned long addr) |
68 | { | 177 | { |
178 | __dflush_prologue | ||
69 | cache_op(Hit_Invalidate_D, addr); | 179 | cache_op(Hit_Invalidate_D, addr); |
180 | __dflush_epilogue | ||
70 | } | 181 | } |
71 | 182 | ||
72 | static inline void invalidate_scache_line(unsigned long addr) | 183 | static inline void invalidate_scache_line(unsigned long addr) |
@@ -239,9 +350,13 @@ static inline void blast_##pfx##cache##lsize(void) \ | |||
239 | current_cpu_data.desc.waybit; \ | 350 | current_cpu_data.desc.waybit; \ |
240 | unsigned long ws, addr; \ | 351 | unsigned long ws, addr; \ |
241 | \ | 352 | \ |
353 | __##pfx##flush_prologue \ | ||
354 | \ | ||
242 | for (ws = 0; ws < ws_end; ws += ws_inc) \ | 355 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
243 | for (addr = start; addr < end; addr += lsize * 32) \ | 356 | for (addr = start; addr < end; addr += lsize * 32) \ |
244 | cache##lsize##_unroll32(addr|ws,indexop); \ | 357 | cache##lsize##_unroll32(addr|ws,indexop); \ |
358 | \ | ||
359 | __##pfx##flush_epilogue \ | ||
245 | } \ | 360 | } \ |
246 | \ | 361 | \ |
247 | static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ | 362 | static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ |
@@ -249,10 +364,14 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ | |||
249 | unsigned long start = page; \ | 364 | unsigned long start = page; \ |
250 | unsigned long end = page + PAGE_SIZE; \ | 365 | unsigned long end = page + PAGE_SIZE; \ |
251 | \ | 366 | \ |
367 | __##pfx##flush_prologue \ | ||
368 | \ | ||
252 | do { \ | 369 | do { \ |
253 | cache##lsize##_unroll32(start,hitop); \ | 370 | cache##lsize##_unroll32(start,hitop); \ |
254 | start += lsize * 32; \ | 371 | start += lsize * 32; \ |
255 | } while (start < end); \ | 372 | } while (start < end); \ |
373 | \ | ||
374 | __##pfx##flush_epilogue \ | ||
256 | } \ | 375 | } \ |
257 | \ | 376 | \ |
258 | static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ | 377 | static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ |
@@ -265,9 +384,13 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) | |||
265 | current_cpu_data.desc.waybit; \ | 384 | current_cpu_data.desc.waybit; \ |
266 | unsigned long ws, addr; \ | 385 | unsigned long ws, addr; \ |
267 | \ | 386 | \ |
387 | __##pfx##flush_prologue \ | ||
388 | \ | ||
268 | for (ws = 0; ws < ws_end; ws += ws_inc) \ | 389 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
269 | for (addr = start; addr < end; addr += lsize * 32) \ | 390 | for (addr = start; addr < end; addr += lsize * 32) \ |
270 | cache##lsize##_unroll32(addr|ws,indexop); \ | 391 | cache##lsize##_unroll32(addr|ws,indexop); \ |
392 | \ | ||
393 | __##pfx##flush_epilogue \ | ||
271 | } | 394 | } |
272 | 395 | ||
273 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) | 396 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) |
@@ -288,12 +411,17 @@ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \ | |||
288 | unsigned long lsize = cpu_##desc##_line_size(); \ | 411 | unsigned long lsize = cpu_##desc##_line_size(); \ |
289 | unsigned long addr = start & ~(lsize - 1); \ | 412 | unsigned long addr = start & ~(lsize - 1); \ |
290 | unsigned long aend = (end - 1) & ~(lsize - 1); \ | 413 | unsigned long aend = (end - 1) & ~(lsize - 1); \ |
414 | \ | ||
415 | __##pfx##flush_prologue \ | ||
416 | \ | ||
291 | while (1) { \ | 417 | while (1) { \ |
292 | prot##cache_op(hitop, addr); \ | 418 | prot##cache_op(hitop, addr); \ |
293 | if (addr == aend) \ | 419 | if (addr == aend) \ |
294 | break; \ | 420 | break; \ |
295 | addr += lsize; \ | 421 | addr += lsize; \ |
296 | } \ | 422 | } \ |
423 | \ | ||
424 | __##pfx##flush_epilogue \ | ||
297 | } | 425 | } |
298 | 426 | ||
299 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_) | 427 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_) |
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h new file mode 100644 index 000000000000..e1941d1b8726 --- /dev/null +++ b/include/asm-mips/smtc.h | |||
@@ -0,0 +1,55 @@ | |||
1 | #ifndef _ASM_SMTC_MT_H | ||
2 | #define _ASM_SMTC_MT_H | ||
3 | |||
4 | /* | ||
5 | * Definitions for SMTC multitasking on MIPS MT cores | ||
6 | */ | ||
7 | |||
8 | #include <asm/mips_mt.h> | ||
9 | |||
10 | /* | ||
11 | * System-wide SMTC status information | ||
12 | */ | ||
13 | |||
14 | extern unsigned int smtc_status; | ||
15 | |||
16 | #define SMTC_TLB_SHARED 0x00000001 | ||
17 | #define SMTC_MTC_ACTIVE 0x00000002 | ||
18 | |||
19 | /* | ||
20 | * TLB/ASID Management information | ||
21 | */ | ||
22 | |||
23 | #define MAX_SMTC_TLBS 2 | ||
24 | #define MAX_SMTC_ASIDS 256 | ||
25 | #if NR_CPUS <= 8 | ||
26 | typedef char asiduse; | ||
27 | #else | ||
28 | #if NR_CPUS <= 16 | ||
29 | typedef short asiduse; | ||
30 | #else | ||
31 | typedef long asiduse; | ||
32 | #endif | ||
33 | #endif | ||
34 | |||
35 | extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | ||
36 | |||
37 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); | ||
38 | |||
39 | void smtc_flush_tlb_asid(unsigned long asid); | ||
40 | extern int mipsmt_build_cpu_map(int startslot); | ||
41 | extern void mipsmt_prepare_cpus(void); | ||
42 | extern void smtc_smp_finish(void); | ||
43 | extern void smtc_boot_secondary(int cpu, struct task_struct *t); | ||
44 | |||
45 | /* | ||
46 | * Sharing the TLB between multiple VPEs means that the | ||
47 | * "random" index selection function is not allowed to | ||
48 | * select the current value of the Index register. To | ||
49 | * avoid additional TLB pressure, the Index registers | ||
50 | * are "parked" with an non-Valid value. | ||
51 | */ | ||
52 | |||
53 | #define PARKED_INDEX ((unsigned int)0x80000000) | ||
54 | |||
55 | #endif /* _ASM_SMTC_MT_H */ | ||
diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h new file mode 100644 index 000000000000..f22c3e2f993a --- /dev/null +++ b/include/asm-mips/smtc_ipi.h | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | * Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code. | ||
3 | */ | ||
4 | #ifndef __ASM_SMTC_IPI_H | ||
5 | #define __ASM_SMTC_IPI_H | ||
6 | |||
7 | //#define SMTC_IPI_DEBUG | ||
8 | |||
9 | #ifdef SMTC_IPI_DEBUG | ||
10 | #include <asm/mipsregs.h> | ||
11 | #include <asm/mipsmtregs.h> | ||
12 | #endif /* SMTC_IPI_DEBUG */ | ||
13 | |||
14 | /* | ||
15 | * An IPI "message" | ||
16 | */ | ||
17 | |||
18 | struct smtc_ipi { | ||
19 | struct smtc_ipi *flink; | ||
20 | int type; | ||
21 | void *arg; | ||
22 | int dest; | ||
23 | #ifdef SMTC_IPI_DEBUG | ||
24 | int sender; | ||
25 | long stamp; | ||
26 | #endif /* SMTC_IPI_DEBUG */ | ||
27 | }; | ||
28 | |||
29 | /* | ||
30 | * Defined IPI Types | ||
31 | */ | ||
32 | |||
33 | #define LINUX_SMP_IPI 1 | ||
34 | #define SMTC_CLOCK_TICK 2 | ||
35 | |||
36 | /* | ||
37 | * A queue of IPI messages | ||
38 | */ | ||
39 | |||
40 | struct smtc_ipi_q { | ||
41 | struct smtc_ipi *head; | ||
42 | spinlock_t lock; | ||
43 | struct smtc_ipi *tail; | ||
44 | int depth; | ||
45 | }; | ||
46 | |||
47 | extern struct smtc_ipi_q IPIQ[NR_CPUS]; | ||
48 | extern struct smtc_ipi_q freeIPIq; | ||
49 | |||
50 | static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p) | ||
51 | { | ||
52 | long flags; | ||
53 | |||
54 | spin_lock_irqsave(&q->lock, flags); | ||
55 | if (q->head == NULL) | ||
56 | q->head = q->tail = p; | ||
57 | else | ||
58 | q->tail->flink = p; | ||
59 | p->flink = NULL; | ||
60 | q->tail = p; | ||
61 | q->depth++; | ||
62 | #ifdef SMTC_IPI_DEBUG | ||
63 | p->sender = read_c0_tcbind(); | ||
64 | p->stamp = read_c0_count(); | ||
65 | #endif /* SMTC_IPI_DEBUG */ | ||
66 | spin_unlock_irqrestore(&q->lock, flags); | ||
67 | } | ||
68 | |||
69 | static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) | ||
70 | { | ||
71 | struct smtc_ipi *p; | ||
72 | long flags; | ||
73 | |||
74 | spin_lock_irqsave(&q->lock, flags); | ||
75 | if (q->head == NULL) | ||
76 | p = NULL; | ||
77 | else { | ||
78 | p = q->head; | ||
79 | q->head = q->head->flink; | ||
80 | q->depth--; | ||
81 | /* Arguably unnecessary, but leaves queue cleaner */ | ||
82 | if (q->head == NULL) | ||
83 | q->tail = NULL; | ||
84 | } | ||
85 | spin_unlock_irqrestore(&q->lock, flags); | ||
86 | return p; | ||
87 | } | ||
88 | |||
89 | static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p) | ||
90 | { | ||
91 | long flags; | ||
92 | |||
93 | spin_lock_irqsave(&q->lock, flags); | ||
94 | if (q->head == NULL) { | ||
95 | q->head = q->tail = p; | ||
96 | p->flink = NULL; | ||
97 | } else { | ||
98 | p->flink = q->head; | ||
99 | q->head = p; | ||
100 | } | ||
101 | q->depth++; | ||
102 | spin_unlock_irqrestore(&q->lock, flags); | ||
103 | } | ||
104 | |||
105 | static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q) | ||
106 | { | ||
107 | long flags; | ||
108 | int retval; | ||
109 | |||
110 | spin_lock_irqsave(&q->lock, flags); | ||
111 | retval = q->depth; | ||
112 | spin_unlock_irqrestore(&q->lock, flags); | ||
113 | return retval; | ||
114 | } | ||
115 | |||
116 | extern void smtc_send_ipi(int cpu, int type, unsigned int action); | ||
117 | |||
118 | #endif /* __ASM_SMTC_IPI_H */ | ||
diff --git a/include/asm-mips/smtc_proc.h b/include/asm-mips/smtc_proc.h new file mode 100644 index 000000000000..25da651f1f5f --- /dev/null +++ b/include/asm-mips/smtc_proc.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Definitions for SMTC /proc entries | ||
3 | * Copyright(C) 2005 MIPS Technologies Inc. | ||
4 | */ | ||
5 | #ifndef __ASM_SMTC_PROC_H | ||
6 | #define __ASM_SMTC_PROC_H | ||
7 | |||
8 | /* | ||
9 | * per-"CPU" statistics | ||
10 | */ | ||
11 | |||
12 | struct smtc_cpu_proc { | ||
13 | unsigned long timerints; | ||
14 | unsigned long selfipis; | ||
15 | }; | ||
16 | |||
17 | extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; | ||
18 | |||
19 | /* Count of number of recoveries of "stolen" FPU access rights on 34K */ | ||
20 | |||
21 | extern atomic_t smtc_fpu_recoveries; | ||
22 | |||
23 | #endif /* __ASM_SMTC_PROC_H */ | ||
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h index 2acf3e844f00..c4856a874965 100644 --- a/include/asm-mips/stackframe.h +++ b/include/asm-mips/stackframe.h | |||
@@ -14,9 +14,14 @@ | |||
14 | #include <linux/threads.h> | 14 | #include <linux/threads.h> |
15 | 15 | ||
16 | #include <asm/asm.h> | 16 | #include <asm/asm.h> |
17 | #include <asm/asmmacro.h> | ||
17 | #include <asm/mipsregs.h> | 18 | #include <asm/mipsregs.h> |
18 | #include <asm/asm-offsets.h> | 19 | #include <asm/asm-offsets.h> |
19 | 20 | ||
21 | #ifdef CONFIG_MIPS_MT_SMTC | ||
22 | #include <asm/mipsmtregs.h> | ||
23 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
24 | |||
20 | .macro SAVE_AT | 25 | .macro SAVE_AT |
21 | .set push | 26 | .set push |
22 | .set noat | 27 | .set noat |
@@ -57,13 +62,30 @@ | |||
57 | #ifdef CONFIG_SMP | 62 | #ifdef CONFIG_SMP |
58 | .macro get_saved_sp /* SMP variation */ | 63 | .macro get_saved_sp /* SMP variation */ |
59 | #ifdef CONFIG_32BIT | 64 | #ifdef CONFIG_32BIT |
65 | #ifdef CONFIG_MIPS_MT_SMTC | ||
66 | .set mips32 | ||
67 | mfc0 k0, CP0_TCBIND; | ||
68 | .set mips0 | ||
69 | lui k1, %hi(kernelsp) | ||
70 | srl k0, k0, 19 | ||
71 | /* No need to shift down and up to clear bits 0-1 */ | ||
72 | #else | ||
60 | mfc0 k0, CP0_CONTEXT | 73 | mfc0 k0, CP0_CONTEXT |
61 | lui k1, %hi(kernelsp) | 74 | lui k1, %hi(kernelsp) |
62 | srl k0, k0, 23 | 75 | srl k0, k0, 23 |
76 | #endif | ||
63 | addu k1, k0 | 77 | addu k1, k0 |
64 | LONG_L k1, %lo(kernelsp)(k1) | 78 | LONG_L k1, %lo(kernelsp)(k1) |
65 | #endif | 79 | #endif |
66 | #ifdef CONFIG_64BIT | 80 | #ifdef CONFIG_64BIT |
81 | #ifdef CONFIG_MIPS_MT_SMTC | ||
82 | .set mips64 | ||
83 | mfc0 k0, CP0_TCBIND; | ||
84 | .set mips0 | ||
85 | lui k0, %highest(kernelsp) | ||
86 | dsrl k1, 19 | ||
87 | /* No need to shift down and up to clear bits 0-2 */ | ||
88 | #else | ||
67 | MFC0 k1, CP0_CONTEXT | 89 | MFC0 k1, CP0_CONTEXT |
68 | lui k0, %highest(kernelsp) | 90 | lui k0, %highest(kernelsp) |
69 | dsrl k1, 23 | 91 | dsrl k1, 23 |
@@ -71,20 +93,31 @@ | |||
71 | dsll k0, k0, 16 | 93 | dsll k0, k0, 16 |
72 | daddiu k0, %hi(kernelsp) | 94 | daddiu k0, %hi(kernelsp) |
73 | dsll k0, k0, 16 | 95 | dsll k0, k0, 16 |
96 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
74 | daddu k1, k1, k0 | 97 | daddu k1, k1, k0 |
75 | LONG_L k1, %lo(kernelsp)(k1) | 98 | LONG_L k1, %lo(kernelsp)(k1) |
76 | #endif | 99 | #endif /* CONFIG_64BIT */ |
77 | .endm | 100 | .endm |
78 | 101 | ||
79 | .macro set_saved_sp stackp temp temp2 | 102 | .macro set_saved_sp stackp temp temp2 |
80 | #ifdef CONFIG_32BIT | 103 | #ifdef CONFIG_32BIT |
104 | #ifdef CONFIG_MIPS_MT_SMTC | ||
105 | mfc0 \temp, CP0_TCBIND | ||
106 | srl \temp, 19 | ||
107 | #else | ||
81 | mfc0 \temp, CP0_CONTEXT | 108 | mfc0 \temp, CP0_CONTEXT |
82 | srl \temp, 23 | 109 | srl \temp, 23 |
83 | #endif | 110 | #endif |
111 | #endif | ||
84 | #ifdef CONFIG_64BIT | 112 | #ifdef CONFIG_64BIT |
113 | #ifdef CONFIG_MIPS_MT_SMTC | ||
114 | mfc0 \temp, CP0_TCBIND | ||
115 | dsrl \temp, 19 | ||
116 | #else | ||
85 | MFC0 \temp, CP0_CONTEXT | 117 | MFC0 \temp, CP0_CONTEXT |
86 | dsrl \temp, 23 | 118 | dsrl \temp, 23 |
87 | #endif | 119 | #endif |
120 | #endif | ||
88 | LONG_S \stackp, kernelsp(\temp) | 121 | LONG_S \stackp, kernelsp(\temp) |
89 | .endm | 122 | .endm |
90 | #else | 123 | #else |
@@ -122,10 +155,25 @@ | |||
122 | PTR_SUBU sp, k1, PT_SIZE | 155 | PTR_SUBU sp, k1, PT_SIZE |
123 | LONG_S k0, PT_R29(sp) | 156 | LONG_S k0, PT_R29(sp) |
124 | LONG_S $3, PT_R3(sp) | 157 | LONG_S $3, PT_R3(sp) |
158 | /* | ||
159 | * You might think that you don't need to save $0, | ||
160 | * but the FPU emulator and gdb remote debug stub | ||
161 | * need it to operate correctly | ||
162 | */ | ||
125 | LONG_S $0, PT_R0(sp) | 163 | LONG_S $0, PT_R0(sp) |
126 | mfc0 v1, CP0_STATUS | 164 | mfc0 v1, CP0_STATUS |
127 | LONG_S $2, PT_R2(sp) | 165 | LONG_S $2, PT_R2(sp) |
128 | LONG_S v1, PT_STATUS(sp) | 166 | LONG_S v1, PT_STATUS(sp) |
167 | #ifdef CONFIG_MIPS_MT_SMTC | ||
168 | /* | ||
169 | * Ideally, these instructions would be shuffled in | ||
170 | * to cover the pipeline delay. | ||
171 | */ | ||
172 | .set mips32 | ||
173 | mfc0 v1, CP0_TCSTATUS | ||
174 | .set mips0 | ||
175 | LONG_S v1, PT_TCSTATUS(sp) | ||
176 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
129 | LONG_S $4, PT_R4(sp) | 177 | LONG_S $4, PT_R4(sp) |
130 | mfc0 v1, CP0_CAUSE | 178 | mfc0 v1, CP0_CAUSE |
131 | LONG_S $5, PT_R5(sp) | 179 | LONG_S $5, PT_R5(sp) |
@@ -234,14 +282,36 @@ | |||
234 | .endm | 282 | .endm |
235 | 283 | ||
236 | #else | 284 | #else |
285 | /* | ||
286 | * For SMTC kernel, global IE should be left set, and interrupts | ||
287 | * controlled exclusively via IXMT. | ||
288 | */ | ||
237 | 289 | ||
290 | #ifdef CONFIG_MIPS_MT_SMTC | ||
291 | #define STATMASK 0x1e | ||
292 | #else | ||
293 | #define STATMASK 0x1f | ||
294 | #endif | ||
238 | .macro RESTORE_SOME | 295 | .macro RESTORE_SOME |
239 | .set push | 296 | .set push |
240 | .set reorder | 297 | .set reorder |
241 | .set noat | 298 | .set noat |
299 | #ifdef CONFIG_MIPS_MT_SMTC | ||
300 | .set mips32r2 | ||
301 | /* | ||
302 | * This may not really be necessary if ints are already | ||
303 | * inhibited here. | ||
304 | */ | ||
305 | mfc0 v0, CP0_TCSTATUS | ||
306 | ori v0, TCSTATUS_IXMT | ||
307 | mtc0 v0, CP0_TCSTATUS | ||
308 | ehb | ||
309 | DMT 5 # dmt a1 | ||
310 | jal mips_ihb | ||
311 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
242 | mfc0 a0, CP0_STATUS | 312 | mfc0 a0, CP0_STATUS |
243 | ori a0, 0x1f | 313 | ori a0, STATMASK |
244 | xori a0, 0x1f | 314 | xori a0, STATMASK |
245 | mtc0 a0, CP0_STATUS | 315 | mtc0 a0, CP0_STATUS |
246 | li v1, 0xff00 | 316 | li v1, 0xff00 |
247 | and a0, v1 | 317 | and a0, v1 |
@@ -250,6 +320,26 @@ | |||
250 | and v0, v1 | 320 | and v0, v1 |
251 | or v0, a0 | 321 | or v0, a0 |
252 | mtc0 v0, CP0_STATUS | 322 | mtc0 v0, CP0_STATUS |
323 | #ifdef CONFIG_MIPS_MT_SMTC | ||
324 | /* | ||
325 | * Only after EXL/ERL have been restored to status can we | ||
326 | * restore TCStatus.IXMT. | ||
327 | */ | ||
328 | LONG_L v1, PT_TCSTATUS(sp) | ||
329 | ehb | ||
330 | mfc0 v0, CP0_TCSTATUS | ||
331 | andi v1, TCSTATUS_IXMT | ||
332 | /* We know that TCStatua.IXMT should be set from above */ | ||
333 | xori v0, v0, TCSTATUS_IXMT | ||
334 | or v0, v0, v1 | ||
335 | mtc0 v0, CP0_TCSTATUS | ||
336 | ehb | ||
337 | andi a1, a1, VPECONTROL_TE | ||
338 | beqz a1, 1f | ||
339 | emt | ||
340 | 1: | ||
341 | .set mips0 | ||
342 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
253 | LONG_L v1, PT_EPC(sp) | 343 | LONG_L v1, PT_EPC(sp) |
254 | MTC0 v1, CP0_EPC | 344 | MTC0 v1, CP0_EPC |
255 | LONG_L $31, PT_R31(sp) | 345 | LONG_L $31, PT_R31(sp) |
@@ -302,11 +392,33 @@ | |||
302 | * Set cp0 enable bit as sign that we're running on the kernel stack | 392 | * Set cp0 enable bit as sign that we're running on the kernel stack |
303 | */ | 393 | */ |
304 | .macro CLI | 394 | .macro CLI |
395 | #if !defined(CONFIG_MIPS_MT_SMTC) | ||
305 | mfc0 t0, CP0_STATUS | 396 | mfc0 t0, CP0_STATUS |
306 | li t1, ST0_CU0 | 0x1f | 397 | li t1, ST0_CU0 | 0x1f |
307 | or t0, t1 | 398 | or t0, t1 |
308 | xori t0, 0x1f | 399 | xori t0, 0x1f |
309 | mtc0 t0, CP0_STATUS | 400 | mtc0 t0, CP0_STATUS |
401 | #else /* CONFIG_MIPS_MT_SMTC */ | ||
402 | /* | ||
403 | * For SMTC, we need to set privilege | ||
404 | * and disable interrupts only for the | ||
405 | * current TC, using the TCStatus register. | ||
406 | */ | ||
407 | mfc0 t0,CP0_TCSTATUS | ||
408 | /* Fortunately CU 0 is in the same place in both registers */ | ||
409 | /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ | ||
410 | li t1, ST0_CU0 | 0x08001c00 | ||
411 | or t0,t1 | ||
412 | /* Clear TKSU, leave IXMT */ | ||
413 | xori t0, 0x00001800 | ||
414 | mtc0 t0, CP0_TCSTATUS | ||
415 | ehb | ||
416 | /* We need to leave the global IE bit set, but clear EXL...*/ | ||
417 | mfc0 t0, CP0_STATUS | ||
418 | ori t0, ST0_EXL | ST0_ERL | ||
419 | xori t0, ST0_EXL | ST0_ERL | ||
420 | mtc0 t0, CP0_STATUS | ||
421 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
310 | irq_disable_hazard | 422 | irq_disable_hazard |
311 | .endm | 423 | .endm |
312 | 424 | ||
@@ -315,11 +427,35 @@ | |||
315 | * Set cp0 enable bit as sign that we're running on the kernel stack | 427 | * Set cp0 enable bit as sign that we're running on the kernel stack |
316 | */ | 428 | */ |
317 | .macro STI | 429 | .macro STI |
430 | #if !defined(CONFIG_MIPS_MT_SMTC) | ||
318 | mfc0 t0, CP0_STATUS | 431 | mfc0 t0, CP0_STATUS |
319 | li t1, ST0_CU0 | 0x1f | 432 | li t1, ST0_CU0 | 0x1f |
320 | or t0, t1 | 433 | or t0, t1 |
321 | xori t0, 0x1e | 434 | xori t0, 0x1e |
322 | mtc0 t0, CP0_STATUS | 435 | mtc0 t0, CP0_STATUS |
436 | #else /* CONFIG_MIPS_MT_SMTC */ | ||
437 | /* | ||
438 | * For SMTC, we need to set privilege | ||
439 | * and enable interrupts only for the | ||
440 | * current TC, using the TCStatus register. | ||
441 | */ | ||
442 | ehb | ||
443 | mfc0 t0,CP0_TCSTATUS | ||
444 | /* Fortunately CU 0 is in the same place in both registers */ | ||
445 | /* Set TCU0, TKSU (for later inversion) and IXMT */ | ||
446 | li t1, ST0_CU0 | 0x08001c00 | ||
447 | or t0,t1 | ||
448 | /* Clear TKSU *and* IXMT */ | ||
449 | xori t0, 0x00001c00 | ||
450 | mtc0 t0, CP0_TCSTATUS | ||
451 | ehb | ||
452 | /* We need to leave the global IE bit set, but clear EXL...*/ | ||
453 | mfc0 t0, CP0_STATUS | ||
454 | ori t0, ST0_EXL | ||
455 | xori t0, ST0_EXL | ||
456 | mtc0 t0, CP0_STATUS | ||
457 | /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */ | ||
458 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
323 | irq_enable_hazard | 459 | irq_enable_hazard |
324 | .endm | 460 | .endm |
325 | 461 | ||
@@ -328,11 +464,56 @@ | |||
328 | * Set cp0 enable bit as sign that we're running on the kernel stack | 464 | * Set cp0 enable bit as sign that we're running on the kernel stack |
329 | */ | 465 | */ |
330 | .macro KMODE | 466 | .macro KMODE |
467 | #ifdef CONFIG_MIPS_MT_SMTC | ||
468 | /* | ||
469 | * This gets baroque in SMTC. We want to | ||
470 | * protect the non-atomic clearing of EXL | ||
471 | * with DMT/EMT, but we don't want to take | ||
472 | * an interrupt while DMT is still in effect. | ||
473 | */ | ||
474 | |||
475 | /* KMODE gets invoked from both reorder and noreorder code */ | ||
476 | .set push | ||
477 | .set mips32r2 | ||
478 | .set noreorder | ||
479 | mfc0 v0, CP0_TCSTATUS | ||
480 | andi v1, v0, TCSTATUS_IXMT | ||
481 | ori v0, TCSTATUS_IXMT | ||
482 | mtc0 v0, CP0_TCSTATUS | ||
483 | ehb | ||
484 | DMT 2 # dmt v0 | ||
485 | /* | ||
486 | * We don't know a priori if ra is "live" | ||
487 | */ | ||
488 | move t0, ra | ||
489 | jal mips_ihb | ||
490 | nop /* delay slot */ | ||
491 | move ra, t0 | ||
492 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
331 | mfc0 t0, CP0_STATUS | 493 | mfc0 t0, CP0_STATUS |
332 | li t1, ST0_CU0 | 0x1e | 494 | li t1, ST0_CU0 | 0x1e |
333 | or t0, t1 | 495 | or t0, t1 |
334 | xori t0, 0x1e | 496 | xori t0, 0x1e |
335 | mtc0 t0, CP0_STATUS | 497 | mtc0 t0, CP0_STATUS |
498 | #ifdef CONFIG_MIPS_MT_SMTC | ||
499 | ehb | ||
500 | andi v0, v0, VPECONTROL_TE | ||
501 | beqz v0, 2f | ||
502 | nop /* delay slot */ | ||
503 | emt | ||
504 | 2: | ||
505 | mfc0 v0, CP0_TCSTATUS | ||
506 | /* Clear IXMT, then OR in previous value */ | ||
507 | ori v0, TCSTATUS_IXMT | ||
508 | xori v0, TCSTATUS_IXMT | ||
509 | or v0, v1, v0 | ||
510 | mtc0 v0, CP0_TCSTATUS | ||
511 | /* | ||
512 | * irq_disable_hazard below should expand to EHB | ||
513 | * on 24K/34K CPUS | ||
514 | */ | ||
515 | .set pop | ||
516 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
336 | irq_disable_hazard | 517 | irq_disable_hazard |
337 | .endm | 518 | .endm |
338 | 519 | ||