diff options
Diffstat (limited to 'arch/blackfin/mach-common/smp.c')
-rw-r--r-- | arch/blackfin/mach-common/smp.c | 87 |
1 files changed, 66 insertions, 21 deletions
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 369e687582b..7803f22d2ca 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c | |||
@@ -122,9 +122,17 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) | |||
122 | wait = msg->call_struct.wait; | 122 | wait = msg->call_struct.wait; |
123 | cpu_clear(cpu, msg->call_struct.pending); | 123 | cpu_clear(cpu, msg->call_struct.pending); |
124 | func(info); | 124 | func(info); |
125 | if (wait) | 125 | if (wait) { |
126 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
127 | /* | ||
128 | * 'wait' usually means synchronization between CPUs. | ||
129 | * Invalidate D cache in case shared data was changed | ||
130 | * by func() to ensure cache coherence. | ||
131 | */ | ||
132 | resync_core_dcache(); | ||
133 | #endif | ||
126 | cpu_clear(cpu, msg->call_struct.waitmask); | 134 | cpu_clear(cpu, msg->call_struct.waitmask); |
127 | else | 135 | } else |
128 | kfree(msg); | 136 | kfree(msg); |
129 | } | 137 | } |
130 | 138 | ||
@@ -219,6 +227,13 @@ int smp_call_function(void (*func)(void *info), void *info, int wait) | |||
219 | blackfin_dcache_invalidate_range( | 227 | blackfin_dcache_invalidate_range( |
220 | (unsigned long)(&msg->call_struct.waitmask), | 228 | (unsigned long)(&msg->call_struct.waitmask), |
221 | (unsigned long)(&msg->call_struct.waitmask)); | 229 | (unsigned long)(&msg->call_struct.waitmask)); |
230 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
231 | /* | ||
232 | * Invalidate D cache in case shared data was changed by | ||
233 | * other processors to ensure cache coherence. | ||
234 | */ | ||
235 | resync_core_dcache(); | ||
236 | #endif | ||
222 | kfree(msg); | 237 | kfree(msg); |
223 | } | 238 | } |
224 | return 0; | 239 | return 0; |
@@ -261,6 +276,13 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | |||
261 | blackfin_dcache_invalidate_range( | 276 | blackfin_dcache_invalidate_range( |
262 | (unsigned long)(&msg->call_struct.waitmask), | 277 | (unsigned long)(&msg->call_struct.waitmask), |
263 | (unsigned long)(&msg->call_struct.waitmask)); | 278 | (unsigned long)(&msg->call_struct.waitmask)); |
279 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
280 | /* | ||
281 | * Invalidate D cache in case shared data was changed by | ||
282 | * other processors to ensure cache coherence. | ||
283 | */ | ||
284 | resync_core_dcache(); | ||
285 | #endif | ||
264 | kfree(msg); | 286 | kfree(msg); |
265 | } | 287 | } |
266 | return 0; | 288 | return 0; |
@@ -322,8 +344,11 @@ void smp_send_stop(void) | |||
322 | 344 | ||
323 | int __cpuinit __cpu_up(unsigned int cpu) | 345 | int __cpuinit __cpu_up(unsigned int cpu) |
324 | { | 346 | { |
325 | struct task_struct *idle; | ||
326 | int ret; | 347 | int ret; |
348 | static struct task_struct *idle; | ||
349 | |||
350 | if (idle) | ||
351 | free_task(idle); | ||
327 | 352 | ||
328 | idle = fork_idle(cpu); | 353 | idle = fork_idle(cpu); |
329 | if (IS_ERR(idle)) { | 354 | if (IS_ERR(idle)) { |
@@ -332,7 +357,6 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
332 | } | 357 | } |
333 | 358 | ||
334 | secondary_stack = task_stack_page(idle) + THREAD_SIZE; | 359 | secondary_stack = task_stack_page(idle) + THREAD_SIZE; |
335 | smp_wmb(); | ||
336 | 360 | ||
337 | ret = platform_boot_secondary(cpu, idle); | 361 | ret = platform_boot_secondary(cpu, idle); |
338 | 362 | ||
@@ -343,9 +367,6 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
343 | 367 | ||
344 | static void __cpuinit setup_secondary(unsigned int cpu) | 368 | static void __cpuinit setup_secondary(unsigned int cpu) |
345 | { | 369 | { |
346 | #if !defined(CONFIG_TICKSOURCE_GPTMR0) | ||
347 | struct irq_desc *timer_desc; | ||
348 | #endif | ||
349 | unsigned long ilat; | 370 | unsigned long ilat; |
350 | 371 | ||
351 | bfin_write_IMASK(0); | 372 | bfin_write_IMASK(0); |
@@ -360,17 +381,6 @@ static void __cpuinit setup_secondary(unsigned int cpu) | |||
360 | bfin_irq_flags |= IMASK_IVG15 | | 381 | bfin_irq_flags |= IMASK_IVG15 | |
361 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | | 382 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | |
362 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; | 383 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; |
363 | |||
364 | #if defined(CONFIG_TICKSOURCE_GPTMR0) | ||
365 | /* Power down the core timer, just to play safe. */ | ||
366 | bfin_write_TCNTL(0); | ||
367 | |||
368 | /* system timer0 has been setup by CoreA. */ | ||
369 | #else | ||
370 | timer_desc = irq_desc + IRQ_CORETMR; | ||
371 | setup_core_timer(); | ||
372 | timer_desc->chip->enable(IRQ_CORETMR); | ||
373 | #endif | ||
374 | } | 384 | } |
375 | 385 | ||
376 | void __cpuinit secondary_start_kernel(void) | 386 | void __cpuinit secondary_start_kernel(void) |
@@ -405,7 +415,6 @@ void __cpuinit secondary_start_kernel(void) | |||
405 | atomic_inc(&mm->mm_users); | 415 | atomic_inc(&mm->mm_users); |
406 | atomic_inc(&mm->mm_count); | 416 | atomic_inc(&mm->mm_count); |
407 | current->active_mm = mm; | 417 | current->active_mm = mm; |
408 | BUG_ON(current->mm); /* Can't be, but better be safe than sorry. */ | ||
409 | 418 | ||
410 | preempt_disable(); | 419 | preempt_disable(); |
411 | 420 | ||
@@ -413,6 +422,9 @@ void __cpuinit secondary_start_kernel(void) | |||
413 | 422 | ||
414 | platform_secondary_init(cpu); | 423 | platform_secondary_init(cpu); |
415 | 424 | ||
425 | /* setup local core timer */ | ||
426 | bfin_local_timer_setup(); | ||
427 | |||
416 | local_irq_enable(); | 428 | local_irq_enable(); |
417 | 429 | ||
418 | /* | 430 | /* |
@@ -462,25 +474,58 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end) | |||
462 | EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); | 474 | EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); |
463 | 475 | ||
464 | #ifdef __ARCH_SYNC_CORE_ICACHE | 476 | #ifdef __ARCH_SYNC_CORE_ICACHE |
477 | unsigned long icache_invld_count[NR_CPUS]; | ||
465 | void resync_core_icache(void) | 478 | void resync_core_icache(void) |
466 | { | 479 | { |
467 | unsigned int cpu = get_cpu(); | 480 | unsigned int cpu = get_cpu(); |
468 | blackfin_invalidate_entire_icache(); | 481 | blackfin_invalidate_entire_icache(); |
469 | ++per_cpu(cpu_data, cpu).icache_invld_count; | 482 | icache_invld_count[cpu]++; |
470 | put_cpu(); | 483 | put_cpu(); |
471 | } | 484 | } |
472 | EXPORT_SYMBOL(resync_core_icache); | 485 | EXPORT_SYMBOL(resync_core_icache); |
473 | #endif | 486 | #endif |
474 | 487 | ||
475 | #ifdef __ARCH_SYNC_CORE_DCACHE | 488 | #ifdef __ARCH_SYNC_CORE_DCACHE |
489 | unsigned long dcache_invld_count[NR_CPUS]; | ||
476 | unsigned long barrier_mask __attribute__ ((__section__(".l2.bss"))); | 490 | unsigned long barrier_mask __attribute__ ((__section__(".l2.bss"))); |
477 | 491 | ||
478 | void resync_core_dcache(void) | 492 | void resync_core_dcache(void) |
479 | { | 493 | { |
480 | unsigned int cpu = get_cpu(); | 494 | unsigned int cpu = get_cpu(); |
481 | blackfin_invalidate_entire_dcache(); | 495 | blackfin_invalidate_entire_dcache(); |
482 | ++per_cpu(cpu_data, cpu).dcache_invld_count; | 496 | dcache_invld_count[cpu]++; |
483 | put_cpu(); | 497 | put_cpu(); |
484 | } | 498 | } |
485 | EXPORT_SYMBOL(resync_core_dcache); | 499 | EXPORT_SYMBOL(resync_core_dcache); |
486 | #endif | 500 | #endif |
501 | |||
502 | #ifdef CONFIG_HOTPLUG_CPU | ||
503 | int __cpuexit __cpu_disable(void) | ||
504 | { | ||
505 | unsigned int cpu = smp_processor_id(); | ||
506 | |||
507 | if (cpu == 0) | ||
508 | return -EPERM; | ||
509 | |||
510 | set_cpu_online(cpu, false); | ||
511 | return 0; | ||
512 | } | ||
513 | |||
514 | static DECLARE_COMPLETION(cpu_killed); | ||
515 | |||
516 | int __cpuexit __cpu_die(unsigned int cpu) | ||
517 | { | ||
518 | return wait_for_completion_timeout(&cpu_killed, 5000); | ||
519 | } | ||
520 | |||
521 | void cpu_die(void) | ||
522 | { | ||
523 | complete(&cpu_killed); | ||
524 | |||
525 | atomic_dec(&init_mm.mm_users); | ||
526 | atomic_dec(&init_mm.mm_count); | ||
527 | |||
528 | local_irq_disable(); | ||
529 | platform_cpu_die(); | ||
530 | } | ||
531 | #endif | ||