diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2006-04-05 04:45:45 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2006-04-18 22:14:28 -0400 |
commit | 41c594ab65fc89573af296d192aa5235d09717ab (patch) | |
tree | 562462512a320f386bdf49eabfbb26bb3ee761fa /include/asm-mips/mmu_context.h | |
parent | 2600990e640e3bef29ed89d565864cf16ee83833 (diff) |
[MIPS] MT: Improved multithreading support.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include/asm-mips/mmu_context.h')
-rw-r--r-- | include/asm-mips/mmu_context.h | 112 |
1 files changed, 109 insertions, 3 deletions
diff --git a/include/asm-mips/mmu_context.h b/include/asm-mips/mmu_context.h index 61cf22588137..6e09f4c87211 100644 --- a/include/asm-mips/mmu_context.h +++ b/include/asm-mips/mmu_context.h | |||
@@ -17,6 +17,10 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
20 | #ifdef CONFIG_MIPS_MT_SMTC | ||
21 | #include <asm/mipsmtregs.h> | ||
22 | #include <asm/smtc.h> | ||
23 | #endif /* SMTC */ | ||
20 | 24 | ||
21 | /* | 25 | /* |
22 | * For the fast tlb miss handlers, we keep a per cpu array of pointers | 26 | * For the fast tlb miss handlers, we keep a per cpu array of pointers |
@@ -54,6 +58,14 @@ extern unsigned long pgd_current[]; | |||
54 | #define ASID_INC 0x1 | 58 | #define ASID_INC 0x1 |
55 | #define ASID_MASK 0xfff | 59 | #define ASID_MASK 0xfff |
56 | 60 | ||
61 | /* SMTC/34K debug hack - but maybe we'll keep it */ | ||
62 | #elif defined(CONFIG_MIPS_MT_SMTC) | ||
63 | |||
64 | #define ASID_INC 0x1 | ||
65 | extern unsigned long smtc_asid_mask; | ||
66 | #define ASID_MASK (smtc_asid_mask) | ||
67 | #define HW_ASID_MASK 0xff | ||
68 | /* End SMTC/34K debug hack */ | ||
57 | #else /* FIXME: not correct for R6000 */ | 69 | #else /* FIXME: not correct for R6000 */ |
58 | 70 | ||
59 | #define ASID_INC 0x1 | 71 | #define ASID_INC 0x1 |
@@ -76,6 +88,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
76 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) | 88 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) |
77 | #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) | 89 | #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) |
78 | 90 | ||
91 | #ifndef CONFIG_MIPS_MT_SMTC | ||
92 | /* Normal, classic MIPS get_new_mmu_context */ | ||
79 | static inline void | 93 | static inline void |
80 | get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | 94 | get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) |
81 | { | 95 | { |
@@ -91,6 +105,12 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
91 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; | 105 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; |
92 | } | 106 | } |
93 | 107 | ||
108 | #else /* CONFIG_MIPS_MT_SMTC */ | ||
109 | |||
110 | #define get_new_mmu_context(mm,cpu) smtc_get_new_mmu_context((mm),(cpu)) | ||
111 | |||
112 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
113 | |||
94 | /* | 114 | /* |
95 | * Initialize the context related info for a new mm_struct | 115 | * Initialize the context related info for a new mm_struct |
96 | * instance. | 116 | * instance. |
@@ -111,14 +131,46 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
111 | { | 131 | { |
112 | unsigned int cpu = smp_processor_id(); | 132 | unsigned int cpu = smp_processor_id(); |
113 | unsigned long flags; | 133 | unsigned long flags; |
114 | 134 | #ifdef CONFIG_MIPS_MT_SMTC | |
135 | unsigned long oldasid; | ||
136 | unsigned long mtflags; | ||
137 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||
115 | local_irq_save(flags); | 138 | local_irq_save(flags); |
139 | mtflags = dvpe(); | ||
140 | #else /* Not SMTC */ | ||
141 | local_irq_save(flags); | ||
142 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
116 | 143 | ||
117 | /* Check if our ASID is of an older version and thus invalid */ | 144 | /* Check if our ASID is of an older version and thus invalid */ |
118 | if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) | 145 | if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) |
119 | get_new_mmu_context(next, cpu); | 146 | get_new_mmu_context(next, cpu); |
120 | 147 | #ifdef CONFIG_MIPS_MT_SMTC | |
148 | /* | ||
149 | * If the EntryHi ASID being replaced happens to be | ||
150 | * the value flagged at ASID recycling time as having | ||
151 | * an extended life, clear the bit showing it being | ||
152 | * in use by this "CPU", and if that's the last bit, | ||
153 | * free up the ASID value for use and flush any old | ||
154 | * instances of it from the TLB. | ||
155 | */ | ||
156 | oldasid = (read_c0_entryhi() & ASID_MASK); | ||
157 | if(smtc_live_asid[mytlb][oldasid]) { | ||
158 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||
159 | if(smtc_live_asid[mytlb][oldasid] == 0) | ||
160 | smtc_flush_tlb_asid(oldasid); | ||
161 | } | ||
162 | /* | ||
163 | * Tread softly on EntryHi, and so long as we support | ||
164 | * having ASID_MASK smaller than the hardware maximum, | ||
165 | * make sure no "soft" bits become "hard"... | ||
166 | */ | ||
167 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | ||
168 | | (cpu_context(cpu, next) & ASID_MASK)); | ||
169 | ehb(); /* Make sure it propagates to TCStatus */ | ||
170 | evpe(mtflags); | ||
171 | #else | ||
121 | write_c0_entryhi(cpu_context(cpu, next)); | 172 | write_c0_entryhi(cpu_context(cpu, next)); |
173 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
122 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); | 174 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); |
123 | 175 | ||
124 | /* | 176 | /* |
@@ -151,12 +203,34 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) | |||
151 | unsigned long flags; | 203 | unsigned long flags; |
152 | unsigned int cpu = smp_processor_id(); | 204 | unsigned int cpu = smp_processor_id(); |
153 | 205 | ||
206 | #ifdef CONFIG_MIPS_MT_SMTC | ||
207 | unsigned long oldasid; | ||
208 | unsigned long mtflags; | ||
209 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||
210 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
211 | |||
154 | local_irq_save(flags); | 212 | local_irq_save(flags); |
155 | 213 | ||
156 | /* Unconditionally get a new ASID. */ | 214 | /* Unconditionally get a new ASID. */ |
157 | get_new_mmu_context(next, cpu); | 215 | get_new_mmu_context(next, cpu); |
158 | 216 | ||
217 | #ifdef CONFIG_MIPS_MT_SMTC | ||
218 | /* See comments for similar code above */ | ||
219 | mtflags = dvpe(); | ||
220 | oldasid = read_c0_entryhi() & ASID_MASK; | ||
221 | if(smtc_live_asid[mytlb][oldasid]) { | ||
222 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||
223 | if(smtc_live_asid[mytlb][oldasid] == 0) | ||
224 | smtc_flush_tlb_asid(oldasid); | ||
225 | } | ||
226 | /* See comments for similar code above */ | ||
227 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | | ||
228 | (cpu_context(cpu, next) & ASID_MASK)); | ||
229 | ehb(); /* Make sure it propagates to TCStatus */ | ||
230 | evpe(mtflags); | ||
231 | #else | ||
159 | write_c0_entryhi(cpu_context(cpu, next)); | 232 | write_c0_entryhi(cpu_context(cpu, next)); |
233 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
160 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); | 234 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); |
161 | 235 | ||
162 | /* mark mmu ownership change */ | 236 | /* mark mmu ownership change */ |
@@ -174,17 +248,49 @@ static inline void | |||
174 | drop_mmu_context(struct mm_struct *mm, unsigned cpu) | 248 | drop_mmu_context(struct mm_struct *mm, unsigned cpu) |
175 | { | 249 | { |
176 | unsigned long flags; | 250 | unsigned long flags; |
251 | #ifdef CONFIG_MIPS_MT_SMTC | ||
252 | unsigned long oldasid; | ||
253 | /* Can't use spinlock because called from TLB flush within DVPE */ | ||
254 | unsigned int prevvpe; | ||
255 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||
256 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
177 | 257 | ||
178 | local_irq_save(flags); | 258 | local_irq_save(flags); |
179 | 259 | ||
180 | if (cpu_isset(cpu, mm->cpu_vm_mask)) { | 260 | if (cpu_isset(cpu, mm->cpu_vm_mask)) { |
181 | get_new_mmu_context(mm, cpu); | 261 | get_new_mmu_context(mm, cpu); |
262 | #ifdef CONFIG_MIPS_MT_SMTC | ||
263 | /* See comments for similar code above */ | ||
264 | prevvpe = dvpe(); | ||
265 | oldasid = (read_c0_entryhi() & ASID_MASK); | ||
266 | if(smtc_live_asid[mytlb][oldasid]) { | ||
267 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||
268 | if(smtc_live_asid[mytlb][oldasid] == 0) | ||
269 | smtc_flush_tlb_asid(oldasid); | ||
270 | } | ||
271 | /* See comments for similar code above */ | ||
272 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | ||
273 | | cpu_asid(cpu, mm)); | ||
274 | ehb(); /* Make sure it propagates to TCStatus */ | ||
275 | evpe(prevvpe); | ||
276 | #else /* not CONFIG_MIPS_MT_SMTC */ | ||
182 | write_c0_entryhi(cpu_asid(cpu, mm)); | 277 | write_c0_entryhi(cpu_asid(cpu, mm)); |
278 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
183 | } else { | 279 | } else { |
184 | /* will get a new context next time */ | 280 | /* will get a new context next time */ |
281 | #ifndef CONFIG_MIPS_MT_SMTC | ||
185 | cpu_context(cpu, mm) = 0; | 282 | cpu_context(cpu, mm) = 0; |
283 | #else /* SMTC */ | ||
284 | int i; | ||
285 | |||
286 | /* SMTC shares the TLB (and ASIDs) across VPEs */ | ||
287 | for (i = 0; i < num_online_cpus(); i++) { | ||
288 | if((smtc_status & SMTC_TLB_SHARED) | ||
289 | || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) | ||
290 | cpu_context(i, mm) = 0; | ||
291 | } | ||
292 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
186 | } | 293 | } |
187 | |||
188 | local_irq_restore(flags); | 294 | local_irq_restore(flags); |
189 | } | 295 | } |
190 | 296 | ||