diff options
Diffstat (limited to 'arch/blackfin/mach-common/smp.c')
-rw-r--r-- | arch/blackfin/mach-common/smp.c | 92 |
1 files changed, 69 insertions, 23 deletions
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 369e687582b7..a17107a700d5 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
22 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
24 | #include <linux/slab.h> | ||
24 | #include <asm/atomic.h> | 25 | #include <asm/atomic.h> |
25 | #include <asm/cacheflush.h> | 26 | #include <asm/cacheflush.h> |
26 | #include <asm/mmu_context.h> | 27 | #include <asm/mmu_context.h> |
@@ -122,9 +123,17 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) | |||
122 | wait = msg->call_struct.wait; | 123 | wait = msg->call_struct.wait; |
123 | cpu_clear(cpu, msg->call_struct.pending); | 124 | cpu_clear(cpu, msg->call_struct.pending); |
124 | func(info); | 125 | func(info); |
125 | if (wait) | 126 | if (wait) { |
127 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
128 | /* | ||
129 | * 'wait' usually means synchronization between CPUs. | ||
130 | * Invalidate D cache in case shared data was changed | ||
131 | * by func() to ensure cache coherence. | ||
132 | */ | ||
133 | resync_core_dcache(); | ||
134 | #endif | ||
126 | cpu_clear(cpu, msg->call_struct.waitmask); | 135 | cpu_clear(cpu, msg->call_struct.waitmask); |
127 | else | 136 | } else |
128 | kfree(msg); | 137 | kfree(msg); |
129 | } | 138 | } |
130 | 139 | ||
@@ -161,8 +170,8 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance) | |||
161 | kfree(msg); | 170 | kfree(msg); |
162 | break; | 171 | break; |
163 | default: | 172 | default: |
164 | printk(KERN_CRIT "CPU%u: Unknown IPI message \ | 173 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n", |
165 | 0x%lx\n", cpu, msg->type); | 174 | cpu, msg->type); |
166 | kfree(msg); | 175 | kfree(msg); |
167 | break; | 176 | break; |
168 | } | 177 | } |
@@ -219,6 +228,13 @@ int smp_call_function(void (*func)(void *info), void *info, int wait) | |||
219 | blackfin_dcache_invalidate_range( | 228 | blackfin_dcache_invalidate_range( |
220 | (unsigned long)(&msg->call_struct.waitmask), | 229 | (unsigned long)(&msg->call_struct.waitmask), |
221 | (unsigned long)(&msg->call_struct.waitmask)); | 230 | (unsigned long)(&msg->call_struct.waitmask)); |
231 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
232 | /* | ||
233 | * Invalidate D cache in case shared data was changed by | ||
234 | * other processors to ensure cache coherence. | ||
235 | */ | ||
236 | resync_core_dcache(); | ||
237 | #endif | ||
222 | kfree(msg); | 238 | kfree(msg); |
223 | } | 239 | } |
224 | return 0; | 240 | return 0; |
@@ -261,6 +277,13 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | |||
261 | blackfin_dcache_invalidate_range( | 277 | blackfin_dcache_invalidate_range( |
262 | (unsigned long)(&msg->call_struct.waitmask), | 278 | (unsigned long)(&msg->call_struct.waitmask), |
263 | (unsigned long)(&msg->call_struct.waitmask)); | 279 | (unsigned long)(&msg->call_struct.waitmask)); |
280 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
281 | /* | ||
282 | * Invalidate D cache in case shared data was changed by | ||
283 | * other processors to ensure cache coherence. | ||
284 | */ | ||
285 | resync_core_dcache(); | ||
286 | #endif | ||
264 | kfree(msg); | 287 | kfree(msg); |
265 | } | 288 | } |
266 | return 0; | 289 | return 0; |
@@ -322,8 +345,11 @@ void smp_send_stop(void) | |||
322 | 345 | ||
323 | int __cpuinit __cpu_up(unsigned int cpu) | 346 | int __cpuinit __cpu_up(unsigned int cpu) |
324 | { | 347 | { |
325 | struct task_struct *idle; | ||
326 | int ret; | 348 | int ret; |
349 | static struct task_struct *idle; | ||
350 | |||
351 | if (idle) | ||
352 | free_task(idle); | ||
327 | 353 | ||
328 | idle = fork_idle(cpu); | 354 | idle = fork_idle(cpu); |
329 | if (IS_ERR(idle)) { | 355 | if (IS_ERR(idle)) { |
@@ -332,7 +358,6 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
332 | } | 358 | } |
333 | 359 | ||
334 | secondary_stack = task_stack_page(idle) + THREAD_SIZE; | 360 | secondary_stack = task_stack_page(idle) + THREAD_SIZE; |
335 | smp_wmb(); | ||
336 | 361 | ||
337 | ret = platform_boot_secondary(cpu, idle); | 362 | ret = platform_boot_secondary(cpu, idle); |
338 | 363 | ||
@@ -343,9 +368,6 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
343 | 368 | ||
344 | static void __cpuinit setup_secondary(unsigned int cpu) | 369 | static void __cpuinit setup_secondary(unsigned int cpu) |
345 | { | 370 | { |
346 | #if !defined(CONFIG_TICKSOURCE_GPTMR0) | ||
347 | struct irq_desc *timer_desc; | ||
348 | #endif | ||
349 | unsigned long ilat; | 371 | unsigned long ilat; |
350 | 372 | ||
351 | bfin_write_IMASK(0); | 373 | bfin_write_IMASK(0); |
@@ -360,17 +382,6 @@ static void __cpuinit setup_secondary(unsigned int cpu) | |||
360 | bfin_irq_flags |= IMASK_IVG15 | | 382 | bfin_irq_flags |= IMASK_IVG15 | |
361 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | | 383 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | |
362 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; | 384 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; |
363 | |||
364 | #if defined(CONFIG_TICKSOURCE_GPTMR0) | ||
365 | /* Power down the core timer, just to play safe. */ | ||
366 | bfin_write_TCNTL(0); | ||
367 | |||
368 | /* system timer0 has been setup by CoreA. */ | ||
369 | #else | ||
370 | timer_desc = irq_desc + IRQ_CORETMR; | ||
371 | setup_core_timer(); | ||
372 | timer_desc->chip->enable(IRQ_CORETMR); | ||
373 | #endif | ||
374 | } | 385 | } |
375 | 386 | ||
376 | void __cpuinit secondary_start_kernel(void) | 387 | void __cpuinit secondary_start_kernel(void) |
@@ -405,7 +416,6 @@ void __cpuinit secondary_start_kernel(void) | |||
405 | atomic_inc(&mm->mm_users); | 416 | atomic_inc(&mm->mm_users); |
406 | atomic_inc(&mm->mm_count); | 417 | atomic_inc(&mm->mm_count); |
407 | current->active_mm = mm; | 418 | current->active_mm = mm; |
408 | BUG_ON(current->mm); /* Can't be, but better be safe than sorry. */ | ||
409 | 419 | ||
410 | preempt_disable(); | 420 | preempt_disable(); |
411 | 421 | ||
@@ -413,6 +423,9 @@ void __cpuinit secondary_start_kernel(void) | |||
413 | 423 | ||
414 | platform_secondary_init(cpu); | 424 | platform_secondary_init(cpu); |
415 | 425 | ||
426 | /* setup local core timer */ | ||
427 | bfin_local_timer_setup(); | ||
428 | |||
416 | local_irq_enable(); | 429 | local_irq_enable(); |
417 | 430 | ||
418 | /* | 431 | /* |
@@ -462,25 +475,58 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end) | |||
462 | EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); | 475 | EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); |
463 | 476 | ||
464 | #ifdef __ARCH_SYNC_CORE_ICACHE | 477 | #ifdef __ARCH_SYNC_CORE_ICACHE |
478 | unsigned long icache_invld_count[NR_CPUS]; | ||
465 | void resync_core_icache(void) | 479 | void resync_core_icache(void) |
466 | { | 480 | { |
467 | unsigned int cpu = get_cpu(); | 481 | unsigned int cpu = get_cpu(); |
468 | blackfin_invalidate_entire_icache(); | 482 | blackfin_invalidate_entire_icache(); |
469 | ++per_cpu(cpu_data, cpu).icache_invld_count; | 483 | icache_invld_count[cpu]++; |
470 | put_cpu(); | 484 | put_cpu(); |
471 | } | 485 | } |
472 | EXPORT_SYMBOL(resync_core_icache); | 486 | EXPORT_SYMBOL(resync_core_icache); |
473 | #endif | 487 | #endif |
474 | 488 | ||
475 | #ifdef __ARCH_SYNC_CORE_DCACHE | 489 | #ifdef __ARCH_SYNC_CORE_DCACHE |
490 | unsigned long dcache_invld_count[NR_CPUS]; | ||
476 | unsigned long barrier_mask __attribute__ ((__section__(".l2.bss"))); | 491 | unsigned long barrier_mask __attribute__ ((__section__(".l2.bss"))); |
477 | 492 | ||
478 | void resync_core_dcache(void) | 493 | void resync_core_dcache(void) |
479 | { | 494 | { |
480 | unsigned int cpu = get_cpu(); | 495 | unsigned int cpu = get_cpu(); |
481 | blackfin_invalidate_entire_dcache(); | 496 | blackfin_invalidate_entire_dcache(); |
482 | ++per_cpu(cpu_data, cpu).dcache_invld_count; | 497 | dcache_invld_count[cpu]++; |
483 | put_cpu(); | 498 | put_cpu(); |
484 | } | 499 | } |
485 | EXPORT_SYMBOL(resync_core_dcache); | 500 | EXPORT_SYMBOL(resync_core_dcache); |
486 | #endif | 501 | #endif |
502 | |||
503 | #ifdef CONFIG_HOTPLUG_CPU | ||
504 | int __cpuexit __cpu_disable(void) | ||
505 | { | ||
506 | unsigned int cpu = smp_processor_id(); | ||
507 | |||
508 | if (cpu == 0) | ||
509 | return -EPERM; | ||
510 | |||
511 | set_cpu_online(cpu, false); | ||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | static DECLARE_COMPLETION(cpu_killed); | ||
516 | |||
517 | int __cpuexit __cpu_die(unsigned int cpu) | ||
518 | { | ||
519 | return wait_for_completion_timeout(&cpu_killed, 5000); | ||
520 | } | ||
521 | |||
522 | void cpu_die(void) | ||
523 | { | ||
524 | complete(&cpu_killed); | ||
525 | |||
526 | atomic_dec(&init_mm.mm_users); | ||
527 | atomic_dec(&init_mm.mm_count); | ||
528 | |||
529 | local_irq_disable(); | ||
530 | platform_cpu_die(); | ||
531 | } | ||
532 | #endif | ||