diff options
| -rw-r--r-- | arch/arm/Kconfig | 1 | ||||
| -rw-r--r-- | arch/arm/kernel/smp.c | 157 | ||||
| -rw-r--r-- | include/asm-arm/smp.h | 3 |
3 files changed, 19 insertions, 142 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b786e68914d4..c72dae633f60 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -650,6 +650,7 @@ source "kernel/time/Kconfig" | |||
| 650 | config SMP | 650 | config SMP |
| 651 | bool "Symmetric Multi-Processing (EXPERIMENTAL)" | 651 | bool "Symmetric Multi-Processing (EXPERIMENTAL)" |
| 652 | depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP) | 652 | depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP) |
| 653 | select USE_GENERIC_SMP_HELPERS | ||
| 653 | help | 654 | help |
| 654 | This enables support for systems with more than one CPU. If you have | 655 | This enables support for systems with more than one CPU. If you have |
| 655 | a system with only one CPU, like most personal computers, say N. If | 656 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index eefae1de334c..6344466b2113 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
| @@ -68,20 +68,10 @@ enum ipi_msg_type { | |||
| 68 | IPI_TIMER, | 68 | IPI_TIMER, |
| 69 | IPI_RESCHEDULE, | 69 | IPI_RESCHEDULE, |
| 70 | IPI_CALL_FUNC, | 70 | IPI_CALL_FUNC, |
| 71 | IPI_CALL_FUNC_SINGLE, | ||
| 71 | IPI_CPU_STOP, | 72 | IPI_CPU_STOP, |
| 72 | }; | 73 | }; |
| 73 | 74 | ||
| 74 | struct smp_call_struct { | ||
| 75 | void (*func)(void *info); | ||
| 76 | void *info; | ||
| 77 | int wait; | ||
| 78 | cpumask_t pending; | ||
| 79 | cpumask_t unfinished; | ||
| 80 | }; | ||
| 81 | |||
| 82 | static struct smp_call_struct * volatile smp_call_function_data; | ||
| 83 | static DEFINE_SPINLOCK(smp_call_function_lock); | ||
| 84 | |||
| 85 | int __cpuinit __cpu_up(unsigned int cpu) | 75 | int __cpuinit __cpu_up(unsigned int cpu) |
| 86 | { | 76 | { |
| 87 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); | 77 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); |
| @@ -366,114 +356,15 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) | |||
| 366 | local_irq_restore(flags); | 356 | local_irq_restore(flags); |
| 367 | } | 357 | } |
| 368 | 358 | ||
| 369 | /* | 359 | void arch_send_call_function_ipi(cpumask_t mask) |
| 370 | * You must not call this function with disabled interrupts, from a | ||
| 371 | * hardware interrupt handler, nor from a bottom half handler. | ||
| 372 | */ | ||
| 373 | static int smp_call_function_on_cpu(void (*func)(void *info), void *info, | ||
| 374 | int retry, int wait, cpumask_t callmap) | ||
| 375 | { | ||
| 376 | struct smp_call_struct data; | ||
| 377 | unsigned long timeout; | ||
| 378 | int ret = 0; | ||
| 379 | |||
| 380 | data.func = func; | ||
| 381 | data.info = info; | ||
| 382 | data.wait = wait; | ||
| 383 | |||
| 384 | cpu_clear(smp_processor_id(), callmap); | ||
| 385 | if (cpus_empty(callmap)) | ||
| 386 | goto out; | ||
| 387 | |||
| 388 | data.pending = callmap; | ||
| 389 | if (wait) | ||
| 390 | data.unfinished = callmap; | ||
| 391 | |||
| 392 | /* | ||
| 393 | * try to get the mutex on smp_call_function_data | ||
| 394 | */ | ||
| 395 | spin_lock(&smp_call_function_lock); | ||
| 396 | smp_call_function_data = &data; | ||
| 397 | |||
| 398 | send_ipi_message(callmap, IPI_CALL_FUNC); | ||
| 399 | |||
| 400 | timeout = jiffies + HZ; | ||
| 401 | while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) | ||
| 402 | barrier(); | ||
| 403 | |||
| 404 | /* | ||
| 405 | * did we time out? | ||
| 406 | */ | ||
| 407 | if (!cpus_empty(data.pending)) { | ||
| 408 | /* | ||
| 409 | * this may be causing our panic - report it | ||
| 410 | */ | ||
| 411 | printk(KERN_CRIT | ||
| 412 | "CPU%u: smp_call_function timeout for %p(%p)\n" | ||
| 413 | " callmap %lx pending %lx, %swait\n", | ||
| 414 | smp_processor_id(), func, info, *cpus_addr(callmap), | ||
| 415 | *cpus_addr(data.pending), wait ? "" : "no "); | ||
| 416 | |||
| 417 | /* | ||
| 418 | * TRACE | ||
| 419 | */ | ||
| 420 | timeout = jiffies + (5 * HZ); | ||
| 421 | while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) | ||
| 422 | barrier(); | ||
| 423 | |||
| 424 | if (cpus_empty(data.pending)) | ||
| 425 | printk(KERN_CRIT " RESOLVED\n"); | ||
| 426 | else | ||
| 427 | printk(KERN_CRIT " STILL STUCK\n"); | ||
| 428 | } | ||
| 429 | |||
| 430 | /* | ||
| 431 | * whatever happened, we're done with the data, so release it | ||
| 432 | */ | ||
| 433 | smp_call_function_data = NULL; | ||
| 434 | spin_unlock(&smp_call_function_lock); | ||
| 435 | |||
| 436 | if (!cpus_empty(data.pending)) { | ||
| 437 | ret = -ETIMEDOUT; | ||
| 438 | goto out; | ||
| 439 | } | ||
| 440 | |||
| 441 | if (wait) | ||
| 442 | while (!cpus_empty(data.unfinished)) | ||
| 443 | barrier(); | ||
| 444 | out: | ||
| 445 | |||
| 446 | return 0; | ||
| 447 | } | ||
| 448 | |||
| 449 | int smp_call_function(void (*func)(void *info), void *info, int retry, | ||
| 450 | int wait) | ||
| 451 | { | 360 | { |
| 452 | return smp_call_function_on_cpu(func, info, retry, wait, | 361 | send_ipi_message(mask, IPI_CALL_FUNC); |
| 453 | cpu_online_map); | ||
| 454 | } | 362 | } |
| 455 | EXPORT_SYMBOL_GPL(smp_call_function); | ||
| 456 | 363 | ||
| 457 | int smp_call_function_single(int cpu, void (*func)(void *info), void *info, | 364 | void arch_send_call_function_single_ipi(int cpu) |
| 458 | int retry, int wait) | ||
| 459 | { | 365 | { |
| 460 | /* prevent preemption and reschedule on another processor */ | 366 | send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); |
| 461 | int current_cpu = get_cpu(); | ||
| 462 | int ret = 0; | ||
| 463 | |||
| 464 | if (cpu == current_cpu) { | ||
| 465 | local_irq_disable(); | ||
| 466 | func(info); | ||
| 467 | local_irq_enable(); | ||
| 468 | } else | ||
| 469 | ret = smp_call_function_on_cpu(func, info, retry, wait, | ||
| 470 | cpumask_of_cpu(cpu)); | ||
| 471 | |||
| 472 | put_cpu(); | ||
| 473 | |||
| 474 | return ret; | ||
| 475 | } | 367 | } |
| 476 | EXPORT_SYMBOL_GPL(smp_call_function_single); | ||
| 477 | 368 | ||
| 478 | void show_ipi_list(struct seq_file *p) | 369 | void show_ipi_list(struct seq_file *p) |
| 479 | { | 370 | { |
| @@ -521,27 +412,6 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs) | |||
| 521 | } | 412 | } |
| 522 | #endif | 413 | #endif |
| 523 | 414 | ||
| 524 | /* | ||
| 525 | * ipi_call_function - handle IPI from smp_call_function() | ||
| 526 | * | ||
| 527 | * Note that we copy data out of the cross-call structure and then | ||
| 528 | * let the caller know that we're here and have done with their data | ||
| 529 | */ | ||
| 530 | static void ipi_call_function(unsigned int cpu) | ||
| 531 | { | ||
| 532 | struct smp_call_struct *data = smp_call_function_data; | ||
| 533 | void (*func)(void *info) = data->func; | ||
| 534 | void *info = data->info; | ||
| 535 | int wait = data->wait; | ||
| 536 | |||
| 537 | cpu_clear(cpu, data->pending); | ||
| 538 | |||
| 539 | func(info); | ||
| 540 | |||
| 541 | if (wait) | ||
| 542 | cpu_clear(cpu, data->unfinished); | ||
| 543 | } | ||
| 544 | |||
| 545 | static DEFINE_SPINLOCK(stop_lock); | 415 | static DEFINE_SPINLOCK(stop_lock); |
| 546 | 416 | ||
| 547 | /* | 417 | /* |
| @@ -611,7 +481,11 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs) | |||
| 611 | break; | 481 | break; |
| 612 | 482 | ||
| 613 | case IPI_CALL_FUNC: | 483 | case IPI_CALL_FUNC: |
| 614 | ipi_call_function(cpu); | 484 | generic_smp_call_function_interrupt(); |
| 485 | break; | ||
| 486 | |||
| 487 | case IPI_CALL_FUNC_SINGLE: | ||
| 488 | generic_smp_call_function_single_interrupt(); | ||
| 615 | break; | 489 | break; |
| 616 | 490 | ||
| 617 | case IPI_CPU_STOP: | 491 | case IPI_CPU_STOP: |
| @@ -662,14 +536,13 @@ int setup_profiling_timer(unsigned int multiplier) | |||
| 662 | } | 536 | } |
| 663 | 537 | ||
| 664 | static int | 538 | static int |
| 665 | on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait, | 539 | on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask) |
| 666 | cpumask_t mask) | ||
| 667 | { | 540 | { |
| 668 | int ret = 0; | 541 | int ret = 0; |
| 669 | 542 | ||
| 670 | preempt_disable(); | 543 | preempt_disable(); |
| 671 | 544 | ||
| 672 | ret = smp_call_function_on_cpu(func, info, retry, wait, mask); | 545 | ret = smp_call_function_mask(mask, func, info, wait); |
| 673 | if (cpu_isset(smp_processor_id(), mask)) | 546 | if (cpu_isset(smp_processor_id(), mask)) |
| 674 | func(info); | 547 | func(info); |
| 675 | 548 | ||
| @@ -738,7 +611,7 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
| 738 | { | 611 | { |
| 739 | cpumask_t mask = mm->cpu_vm_mask; | 612 | cpumask_t mask = mm->cpu_vm_mask; |
| 740 | 613 | ||
| 741 | on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask); | 614 | on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask); |
| 742 | } | 615 | } |
| 743 | 616 | ||
| 744 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | 617 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) |
| @@ -749,7 +622,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | |||
| 749 | ta.ta_vma = vma; | 622 | ta.ta_vma = vma; |
| 750 | ta.ta_start = uaddr; | 623 | ta.ta_start = uaddr; |
| 751 | 624 | ||
| 752 | on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask); | 625 | on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask); |
| 753 | } | 626 | } |
| 754 | 627 | ||
| 755 | void flush_tlb_kernel_page(unsigned long kaddr) | 628 | void flush_tlb_kernel_page(unsigned long kaddr) |
| @@ -771,7 +644,7 @@ void flush_tlb_range(struct vm_area_struct *vma, | |||
| 771 | ta.ta_start = start; | 644 | ta.ta_start = start; |
| 772 | ta.ta_end = end; | 645 | ta.ta_end = end; |
| 773 | 646 | ||
| 774 | on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask); | 647 | on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask); |
| 775 | } | 648 | } |
| 776 | 649 | ||
| 777 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 650 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
diff --git a/include/asm-arm/smp.h b/include/asm-arm/smp.h index af99636db400..7fffa2404b8e 100644 --- a/include/asm-arm/smp.h +++ b/include/asm-arm/smp.h | |||
| @@ -101,6 +101,9 @@ extern void platform_cpu_die(unsigned int cpu); | |||
| 101 | extern int platform_cpu_kill(unsigned int cpu); | 101 | extern int platform_cpu_kill(unsigned int cpu); |
| 102 | extern void platform_cpu_enable(unsigned int cpu); | 102 | extern void platform_cpu_enable(unsigned int cpu); |
| 103 | 103 | ||
| 104 | extern void arch_send_call_function_single_ipi(int cpu); | ||
| 105 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
| 106 | |||
| 104 | /* | 107 | /* |
| 105 | * Local timer interrupt handling function (can be IPI'ed). | 108 | * Local timer interrupt handling function (can be IPI'ed). |
| 106 | */ | 109 | */ |
