aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/smp-mt.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/smp-mt.c')
-rw-r--r--arch/mips/kernel/smp-mt.c193
1 files changed, 108 insertions, 85 deletions
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 94e210cc6cb6..89e6f6aa5166 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -22,6 +22,7 @@
22#include <linux/cpumask.h> 22#include <linux/cpumask.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25#include <linux/smp.h>
25 26
26#include <asm/atomic.h> 27#include <asm/atomic.h>
27#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
@@ -30,7 +31,6 @@
30#include <asm/system.h> 31#include <asm/system.h>
31#include <asm/hardirq.h> 32#include <asm/hardirq.h>
32#include <asm/mmu_context.h> 33#include <asm/mmu_context.h>
33#include <asm/smp.h>
34#include <asm/time.h> 34#include <asm/time.h>
35#include <asm/mipsregs.h> 35#include <asm/mipsregs.h>
36#include <asm/mipsmtregs.h> 36#include <asm/mipsmtregs.h>
@@ -215,68 +215,67 @@ static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)
215 write_tc_c0_tchalt(TCHALT_H); 215 write_tc_c0_tchalt(TCHALT_H);
216} 216}
217 217
218/* 218static void vsmp_send_ipi_single(int cpu, unsigned int action)
219 * Common setup before any secondaries are started
220 * Make sure all CPU's are in a sensible state before we boot any of the
221 * secondarys
222 */
223void __init plat_smp_setup(void)
224{ 219{
225 unsigned int mvpconf0, ntc, tc, ncpu = 0; 220 int i;
226 221 unsigned long flags;
227#ifdef CONFIG_MIPS_MT_FPAFF 222 int vpflags;
228 /* If we have an FPU, enroll ourselves in the FPU-full mask */
229 if (cpu_has_fpu)
230 cpu_set(0, mt_fpu_cpumask);
231#endif /* CONFIG_MIPS_MT_FPAFF */
232 if (!cpu_has_mipsmt)
233 return;
234
235 /* disable MT so we can configure */
236 dvpe();
237 dmt();
238 223
239 /* Put MVPE's into 'configuration state' */ 224 local_irq_save(flags);
240 set_c0_mvpcontrol(MVPCONTROL_VPC);
241 225
242 mvpconf0 = read_c0_mvpconf0(); 226 vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */
243 ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
244 227
245 /* we'll always have more TC's than VPE's, so loop setting everything 228 switch (action) {
246 to a sensible state */ 229 case SMP_CALL_FUNCTION:
247 for (tc = 0; tc <= ntc; tc++) { 230 i = C_SW1;
248 settc(tc); 231 break;
249 232
250 smp_tc_init(tc, mvpconf0); 233 case SMP_RESCHEDULE_YOURSELF:
251 ncpu = smp_vpe_init(tc, mvpconf0, ncpu); 234 default:
235 i = C_SW0;
236 break;
252 } 237 }
253 238
254 /* Release config state */ 239 /* 1:1 mapping of vpe and tc... */
255 clear_c0_mvpcontrol(MVPCONTROL_VPC); 240 settc(cpu);
241 write_vpe_c0_cause(read_vpe_c0_cause() | i);
242 evpe(vpflags);
256 243
257 /* We'll wait until starting the secondaries before starting MVPE */ 244 local_irq_restore(flags);
245}
258 246
259 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); 247static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
248{
249 unsigned int i;
250
251 for_each_cpu_mask(i, mask)
252 vsmp_send_ipi_single(i, action);
260} 253}
261 254
262void __init plat_prepare_cpus(unsigned int max_cpus) 255static void __cpuinit vsmp_init_secondary(void)
263{ 256{
264 mips_mt_set_cpuoptions(); 257 /* Enable per-cpu interrupts */
265 258
266 /* set up ipi interrupts */ 259 /* This is Malta specific: IPI,performance and timer inetrrupts */
267 if (cpu_has_vint) { 260 write_c0_status((read_c0_status() & ~ST0_IM ) |
268 set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); 261 (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
269 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); 262}
270 }
271 263
272 cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ; 264static void __cpuinit vsmp_smp_finish(void)
273 cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ; 265{
266 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
274 267
275 setup_irq(cpu_ipi_resched_irq, &irq_resched); 268#ifdef CONFIG_MIPS_MT_FPAFF
276 setup_irq(cpu_ipi_call_irq, &irq_call); 269 /* If we have an FPU, enroll ourselves in the FPU-full mask */
270 if (cpu_has_fpu)
271 cpu_set(smp_processor_id(), mt_fpu_cpumask);
272#endif /* CONFIG_MIPS_MT_FPAFF */
277 273
278 set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq); 274 local_irq_enable();
279 set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); 275}
276
277static void vsmp_cpus_done(void)
278{
280} 279}
281 280
282/* 281/*
@@ -287,7 +286,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
287 * (unsigned long)idle->thread_info the gp 286 * (unsigned long)idle->thread_info the gp
288 * assumes a 1:1 mapping of TC => VPE 287 * assumes a 1:1 mapping of TC => VPE
289 */ 288 */
290void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) 289static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
291{ 290{
292 struct thread_info *gp = task_thread_info(idle); 291 struct thread_info *gp = task_thread_info(idle);
293 dvpe(); 292 dvpe();
@@ -321,57 +320,81 @@ void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
321 evpe(EVPE_ENABLE); 320 evpe(EVPE_ENABLE);
322} 321}
323 322
324void __cpuinit prom_init_secondary(void) 323/*
325{ 324 * Common setup before any secondaries are started
326 /* Enable per-cpu interrupts */ 325 * Make sure all CPU's are in a sensible state before we boot any of the
327 326 * secondarys
328 /* This is Malta specific: IPI,performance and timer inetrrupts */ 327 */
329 write_c0_status((read_c0_status() & ~ST0_IM ) | 328static void __init vsmp_smp_setup(void)
330 (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
331}
332
333void __cpuinit prom_smp_finish(void)
334{ 329{
335 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); 330 unsigned int mvpconf0, ntc, tc, ncpu = 0;
331 unsigned int nvpe;
336 332
337#ifdef CONFIG_MIPS_MT_FPAFF 333#ifdef CONFIG_MIPS_MT_FPAFF
338 /* If we have an FPU, enroll ourselves in the FPU-full mask */ 334 /* If we have an FPU, enroll ourselves in the FPU-full mask */
339 if (cpu_has_fpu) 335 if (cpu_has_fpu)
340 cpu_set(smp_processor_id(), mt_fpu_cpumask); 336 cpu_set(0, mt_fpu_cpumask);
341#endif /* CONFIG_MIPS_MT_FPAFF */ 337#endif /* CONFIG_MIPS_MT_FPAFF */
338 if (!cpu_has_mipsmt)
339 return;
342 340
343 local_irq_enable(); 341 /* disable MT so we can configure */
344} 342 dvpe();
343 dmt();
345 344
346void prom_cpus_done(void) 345 /* Put MVPE's into 'configuration state' */
347{ 346 set_c0_mvpcontrol(MVPCONTROL_VPC);
348}
349 347
350void core_send_ipi(int cpu, unsigned int action) 348 mvpconf0 = read_c0_mvpconf0();
351{ 349 ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
352 int i;
353 unsigned long flags;
354 int vpflags;
355 350
356 local_irq_save(flags); 351 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
352 smp_num_siblings = nvpe;
357 353
358 vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ 354 /* we'll always have more TC's than VPE's, so loop setting everything
355 to a sensible state */
356 for (tc = 0; tc <= ntc; tc++) {
357 settc(tc);
359 358
360 switch (action) { 359 smp_tc_init(tc, mvpconf0);
361 case SMP_CALL_FUNCTION: 360 ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
362 i = C_SW1; 361 }
363 break;
364 362
365 case SMP_RESCHEDULE_YOURSELF: 363 /* Release config state */
366 default: 364 clear_c0_mvpcontrol(MVPCONTROL_VPC);
367 i = C_SW0; 365
368 break; 366 /* We'll wait until starting the secondaries before starting MVPE */
367
368 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
369}
370
371static void __init vsmp_prepare_cpus(unsigned int max_cpus)
372{
373 mips_mt_set_cpuoptions();
374
375 /* set up ipi interrupts */
376 if (cpu_has_vint) {
377 set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
378 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
369 } 379 }
370 380
371 /* 1:1 mapping of vpe and tc... */ 381 cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
372 settc(cpu); 382 cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
373 write_vpe_c0_cause(read_vpe_c0_cause() | i);
374 evpe(vpflags);
375 383
376 local_irq_restore(flags); 384 setup_irq(cpu_ipi_resched_irq, &irq_resched);
385 setup_irq(cpu_ipi_call_irq, &irq_call);
386
387 set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
388 set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
377} 389}
390
391struct plat_smp_ops vsmp_smp_ops = {
392 .send_ipi_single = vsmp_send_ipi_single,
393 .send_ipi_mask = vsmp_send_ipi_mask,
394 .init_secondary = vsmp_init_secondary,
395 .smp_finish = vsmp_smp_finish,
396 .cpus_done = vsmp_cpus_done,
397 .boot_secondary = vsmp_boot_secondary,
398 .smp_setup = vsmp_smp_setup,
399 .prepare_cpus = vsmp_prepare_cpus,
400};