diff options
Diffstat (limited to 'arch/mips/kernel/smp-mt.c')
-rw-r--r-- | arch/mips/kernel/smp-mt.c | 156 |
1 files changed, 87 insertions, 69 deletions
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 3b5f3b63262..1ee689c0e0c 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c | |||
@@ -140,15 +140,90 @@ static struct irqaction irq_call = { | |||
140 | .name = "IPI_call" | 140 | .name = "IPI_call" |
141 | }; | 141 | }; |
142 | 142 | ||
143 | static void __init smp_copy_vpe_config(void) | ||
144 | { | ||
145 | write_vpe_c0_status( | ||
146 | (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); | ||
147 | |||
148 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ | ||
149 | write_vpe_c0_config( read_c0_config()); | ||
150 | |||
151 | /* make sure there are no software interrupts pending */ | ||
152 | write_vpe_c0_cause(0); | ||
153 | |||
154 | /* Propagate Config7 */ | ||
155 | write_vpe_c0_config7(read_c0_config7()); | ||
156 | |||
157 | write_vpe_c0_count(read_c0_count()); | ||
158 | } | ||
159 | |||
160 | static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0, | ||
161 | unsigned int ncpu) | ||
162 | { | ||
163 | if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) | ||
164 | return ncpu; | ||
165 | |||
166 | /* Deactivate all but VPE 0 */ | ||
167 | if (tc != 0) { | ||
168 | unsigned long tmp = read_vpe_c0_vpeconf0(); | ||
169 | |||
170 | tmp &= ~VPECONF0_VPA; | ||
171 | |||
172 | /* master VPE */ | ||
173 | tmp |= VPECONF0_MVP; | ||
174 | write_vpe_c0_vpeconf0(tmp); | ||
175 | |||
176 | /* Record this as available CPU */ | ||
177 | cpu_set(tc, phys_cpu_present_map); | ||
178 | __cpu_number_map[tc] = ++ncpu; | ||
179 | __cpu_logical_map[ncpu] = tc; | ||
180 | } | ||
181 | |||
182 | /* Disable multi-threading with TC's */ | ||
183 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); | ||
184 | |||
185 | if (tc != 0) | ||
186 | smp_copy_vpe_config(); | ||
187 | |||
188 | return ncpu; | ||
189 | } | ||
190 | |||
191 | static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0) | ||
192 | { | ||
193 | unsigned long tmp; | ||
194 | |||
195 | if (!tc) | ||
196 | return; | ||
197 | |||
198 | /* bind a TC to each VPE, May as well put all excess TC's | ||
199 | on the last VPE */ | ||
200 | if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1)) | ||
201 | write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)); | ||
202 | else { | ||
203 | write_tc_c0_tcbind(read_tc_c0_tcbind() | tc); | ||
204 | |||
205 | /* and set XTC */ | ||
206 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT)); | ||
207 | } | ||
208 | |||
209 | tmp = read_tc_c0_tcstatus(); | ||
210 | |||
211 | /* mark not allocated and not dynamically allocatable */ | ||
212 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
213 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
214 | write_tc_c0_tcstatus(tmp); | ||
215 | |||
216 | write_tc_c0_tchalt(TCHALT_H); | ||
217 | } | ||
218 | |||
143 | /* | 219 | /* |
144 | * Common setup before any secondaries are started | 220 | * Common setup before any secondaries are started |
145 | * Make sure all CPU's are in a sensible state before we boot any of the | 221 | * Make sure all CPU's are in a sensible state before we boot any of the |
146 | * secondarys | 222 | * secondarys |
147 | */ | 223 | */ |
148 | void plat_smp_setup(void) | 224 | void __init plat_smp_setup(void) |
149 | { | 225 | { |
150 | unsigned long val; | 226 | unsigned int mvpconf0, ntc, tc, ncpu = 0; |
151 | int i, num; | ||
152 | 227 | ||
153 | #ifdef CONFIG_MIPS_MT_FPAFF | 228 | #ifdef CONFIG_MIPS_MT_FPAFF |
154 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | 229 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ |
@@ -167,75 +242,16 @@ void plat_smp_setup(void) | |||
167 | /* Put MVPE's into 'configuration state' */ | 242 | /* Put MVPE's into 'configuration state' */ |
168 | set_c0_mvpcontrol(MVPCONTROL_VPC); | 243 | set_c0_mvpcontrol(MVPCONTROL_VPC); |
169 | 244 | ||
170 | val = read_c0_mvpconf0(); | 245 | mvpconf0 = read_c0_mvpconf0(); |
246 | ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; | ||
171 | 247 | ||
172 | /* we'll always have more TC's than VPE's, so loop setting everything | 248 | /* we'll always have more TC's than VPE's, so loop setting everything |
173 | to a sensible state */ | 249 | to a sensible state */ |
174 | for (i = 0, num = 0; i <= ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT); i++) { | 250 | for (tc = 0; tc <= ntc; tc++) { |
175 | settc(i); | 251 | settc(tc); |
176 | |||
177 | /* VPE's */ | ||
178 | if (i <= ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) { | ||
179 | |||
180 | /* deactivate all but vpe0 */ | ||
181 | if (i != 0) { | ||
182 | unsigned long tmp = read_vpe_c0_vpeconf0(); | ||
183 | |||
184 | tmp &= ~VPECONF0_VPA; | ||
185 | |||
186 | /* master VPE */ | ||
187 | tmp |= VPECONF0_MVP; | ||
188 | write_vpe_c0_vpeconf0(tmp); | ||
189 | |||
190 | /* Record this as available CPU */ | ||
191 | cpu_set(i, phys_cpu_present_map); | ||
192 | __cpu_number_map[i] = ++num; | ||
193 | __cpu_logical_map[num] = i; | ||
194 | } | ||
195 | |||
196 | /* disable multi-threading with TC's */ | ||
197 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); | ||
198 | |||
199 | if (i != 0) { | ||
200 | write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); | ||
201 | 252 | ||
202 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ | 253 | smp_tc_init(tc, mvpconf0); |
203 | write_vpe_c0_config( read_c0_config()); | 254 | ncpu = smp_vpe_init(tc, mvpconf0, ncpu); |
204 | |||
205 | /* make sure there are no software interrupts pending */ | ||
206 | write_vpe_c0_cause(0); | ||
207 | |||
208 | /* Propagate Config7 */ | ||
209 | write_vpe_c0_config7(read_c0_config7()); | ||
210 | } | ||
211 | |||
212 | } | ||
213 | |||
214 | /* TC's */ | ||
215 | |||
216 | if (i != 0) { | ||
217 | unsigned long tmp; | ||
218 | |||
219 | /* bind a TC to each VPE, May as well put all excess TC's | ||
220 | on the last VPE */ | ||
221 | if ( i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1) ) | ||
222 | write_tc_c0_tcbind(read_tc_c0_tcbind() | ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) ); | ||
223 | else { | ||
224 | write_tc_c0_tcbind( read_tc_c0_tcbind() | i); | ||
225 | |||
226 | /* and set XTC */ | ||
227 | write_vpe_c0_vpeconf0( read_vpe_c0_vpeconf0() | (i << VPECONF0_XTC_SHIFT)); | ||
228 | } | ||
229 | |||
230 | tmp = read_tc_c0_tcstatus(); | ||
231 | |||
232 | /* mark not allocated and not dynamically allocatable */ | ||
233 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
234 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
235 | write_tc_c0_tcstatus(tmp); | ||
236 | |||
237 | write_tc_c0_tchalt(TCHALT_H); | ||
238 | } | ||
239 | } | 255 | } |
240 | 256 | ||
241 | /* Release config state */ | 257 | /* Release config state */ |
@@ -243,7 +259,7 @@ void plat_smp_setup(void) | |||
243 | 259 | ||
244 | /* We'll wait until starting the secondaries before starting MVPE */ | 260 | /* We'll wait until starting the secondaries before starting MVPE */ |
245 | 261 | ||
246 | printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); | 262 | printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); |
247 | } | 263 | } |
248 | 264 | ||
249 | void __init plat_prepare_cpus(unsigned int max_cpus) | 265 | void __init plat_prepare_cpus(unsigned int max_cpus) |
@@ -262,7 +278,9 @@ void __init plat_prepare_cpus(unsigned int max_cpus) | |||
262 | 278 | ||
263 | /* need to mark IPI's as IRQ_PER_CPU */ | 279 | /* need to mark IPI's as IRQ_PER_CPU */ |
264 | irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU; | 280 | irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU; |
281 | set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq); | ||
265 | irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU; | 282 | irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU; |
283 | set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); | ||
266 | } | 284 | } |
267 | 285 | ||
268 | /* | 286 | /* |