diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-07-15 15:55:59 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-07-15 15:55:59 -0400 |
| commit | 1a781a777b2f6ac46523fe92396215762ced624d (patch) | |
| tree | 4f34bb4aade85c0eb364b53d664ec7f6ab959006 | |
| parent | b9d2252c1e44fa83a4e65fdc9eb93db6297c55af (diff) | |
| parent | 42a2f217a5e324ed5f2373ab1b7a0a15187c4d6c (diff) | |
Merge branch 'generic-ipi' into generic-ipi-for-linus
Conflicts:
arch/powerpc/Kconfig
arch/s390/kernel/time.c
arch/x86/kernel/apic_32.c
arch/x86/kernel/cpu/perfctr-watchdog.c
arch/x86/kernel/i8259_64.c
arch/x86/kernel/ldt.c
arch/x86/kernel/nmi_64.c
arch/x86/kernel/smpboot.c
arch/x86/xen/smp.c
include/asm-x86/hw_irq_32.h
include/asm-x86/hw_irq_64.h
include/asm-x86/mach-default/irq_vectors.h
include/asm-x86/mach-voyager/irq_vectors.h
include/asm-x86/smp.h
kernel/Makefile
Signed-off-by: Ingo Molnar <mingo@elte.hu>
125 files changed, 881 insertions, 1787 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 3ea332b009e5..ad89a33d8c6e 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
| @@ -39,3 +39,6 @@ config HAVE_KRETPROBES | |||
| 39 | 39 | ||
| 40 | config HAVE_DMA_ATTRS | 40 | config HAVE_DMA_ATTRS |
| 41 | def_bool n | 41 | def_bool n |
| 42 | |||
| 43 | config USE_GENERIC_SMP_HELPERS | ||
| 44 | def_bool n | ||
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 729cdbdf8036..dbe8c280fea9 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
| @@ -528,6 +528,7 @@ config ARCH_MAY_HAVE_PC_FDC | |||
| 528 | config SMP | 528 | config SMP |
| 529 | bool "Symmetric multi-processing support" | 529 | bool "Symmetric multi-processing support" |
| 530 | depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL | 530 | depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL |
| 531 | select USE_GENERIC_SMP_HELPERS | ||
| 531 | ---help--- | 532 | ---help--- |
| 532 | This enables support for systems with more than one CPU. If you have | 533 | This enables support for systems with more than one CPU. If you have |
| 533 | a system with only one CPU, like most personal computers, say N. If | 534 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index b04f1feb1dda..04dcc5e5d4c1 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c | |||
| @@ -660,9 +660,9 @@ __marvel_rtc_io(u8 b, unsigned long addr, int write) | |||
| 660 | 660 | ||
| 661 | #ifdef CONFIG_SMP | 661 | #ifdef CONFIG_SMP |
| 662 | if (smp_processor_id() != boot_cpuid) | 662 | if (smp_processor_id() != boot_cpuid) |
| 663 | smp_call_function_on_cpu(__marvel_access_rtc, | 663 | smp_call_function_single(boot_cpuid, |
| 664 | &rtc_access, 1, 1, | 664 | __marvel_access_rtc, |
| 665 | cpumask_of_cpu(boot_cpuid)); | 665 | &rtc_access, 1); |
| 666 | else | 666 | else |
| 667 | __marvel_access_rtc(&rtc_access); | 667 | __marvel_access_rtc(&rtc_access); |
| 668 | #else | 668 | #else |
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 96ed82fd9eef..351407e07e71 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c | |||
| @@ -160,7 +160,7 @@ common_shutdown(int mode, char *restart_cmd) | |||
| 160 | struct halt_info args; | 160 | struct halt_info args; |
| 161 | args.mode = mode; | 161 | args.mode = mode; |
| 162 | args.restart_cmd = restart_cmd; | 162 | args.restart_cmd = restart_cmd; |
| 163 | on_each_cpu(common_shutdown_1, &args, 1, 0); | 163 | on_each_cpu(common_shutdown_1, &args, 0); |
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | void | 166 | void |
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 2525692db0ab..83df541650fc 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c | |||
| @@ -62,6 +62,7 @@ static struct { | |||
| 62 | enum ipi_message_type { | 62 | enum ipi_message_type { |
| 63 | IPI_RESCHEDULE, | 63 | IPI_RESCHEDULE, |
| 64 | IPI_CALL_FUNC, | 64 | IPI_CALL_FUNC, |
| 65 | IPI_CALL_FUNC_SINGLE, | ||
| 65 | IPI_CPU_STOP, | 66 | IPI_CPU_STOP, |
| 66 | }; | 67 | }; |
| 67 | 68 | ||
| @@ -558,51 +559,6 @@ send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation) | |||
| 558 | wripir(i); | 559 | wripir(i); |
| 559 | } | 560 | } |
| 560 | 561 | ||
| 561 | /* Structure and data for smp_call_function. This is designed to | ||
| 562 | minimize static memory requirements. Plus it looks cleaner. */ | ||
| 563 | |||
| 564 | struct smp_call_struct { | ||
| 565 | void (*func) (void *info); | ||
| 566 | void *info; | ||
| 567 | long wait; | ||
| 568 | atomic_t unstarted_count; | ||
| 569 | atomic_t unfinished_count; | ||
| 570 | }; | ||
| 571 | |||
| 572 | static struct smp_call_struct *smp_call_function_data; | ||
| 573 | |||
| 574 | /* Atomicly drop data into a shared pointer. The pointer is free if | ||
| 575 | it is initially locked. If retry, spin until free. */ | ||
| 576 | |||
| 577 | static int | ||
| 578 | pointer_lock (void *lock, void *data, int retry) | ||
| 579 | { | ||
| 580 | void *old, *tmp; | ||
| 581 | |||
| 582 | mb(); | ||
| 583 | again: | ||
| 584 | /* Compare and swap with zero. */ | ||
| 585 | asm volatile ( | ||
| 586 | "1: ldq_l %0,%1\n" | ||
| 587 | " mov %3,%2\n" | ||
| 588 | " bne %0,2f\n" | ||
| 589 | " stq_c %2,%1\n" | ||
| 590 | " beq %2,1b\n" | ||
| 591 | "2:" | ||
| 592 | : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp) | ||
| 593 | : "r"(data) | ||
| 594 | : "memory"); | ||
| 595 | |||
| 596 | if (old == 0) | ||
| 597 | return 0; | ||
| 598 | if (! retry) | ||
| 599 | return -EBUSY; | ||
| 600 | |||
| 601 | while (*(void **)lock) | ||
| 602 | barrier(); | ||
| 603 | goto again; | ||
| 604 | } | ||
| 605 | |||
| 606 | void | 562 | void |
| 607 | handle_ipi(struct pt_regs *regs) | 563 | handle_ipi(struct pt_regs *regs) |
| 608 | { | 564 | { |
| @@ -632,31 +588,12 @@ handle_ipi(struct pt_regs *regs) | |||
| 632 | break; | 588 | break; |
| 633 | 589 | ||
| 634 | case IPI_CALL_FUNC: | 590 | case IPI_CALL_FUNC: |
| 635 | { | 591 | generic_smp_call_function_interrupt(); |
| 636 | struct smp_call_struct *data; | 592 | break; |
| 637 | void (*func)(void *info); | 593 | |
| 638 | void *info; | 594 | case IPI_CALL_FUNC_SINGLE: |
| 639 | int wait; | 595 | generic_smp_call_function_single_interrupt(); |
| 640 | |||
| 641 | data = smp_call_function_data; | ||
| 642 | func = data->func; | ||
| 643 | info = data->info; | ||
| 644 | wait = data->wait; | ||
| 645 | |||
| 646 | /* Notify the sending CPU that the data has been | ||
| 647 | received, and execution is about to begin. */ | ||
| 648 | mb(); | ||
| 649 | atomic_dec (&data->unstarted_count); | ||
| 650 | |||
| 651 | /* At this point the structure may be gone unless | ||
| 652 | wait is true. */ | ||
| 653 | (*func)(info); | ||
| 654 | |||
| 655 | /* Notify the sending CPU that the task is done. */ | ||
| 656 | mb(); | ||
| 657 | if (wait) atomic_dec (&data->unfinished_count); | ||
| 658 | break; | 596 | break; |
| 659 | } | ||
| 660 | 597 | ||
| 661 | case IPI_CPU_STOP: | 598 | case IPI_CPU_STOP: |
| 662 | halt(); | 599 | halt(); |
| @@ -700,102 +637,15 @@ smp_send_stop(void) | |||
| 700 | send_ipi_message(to_whom, IPI_CPU_STOP); | 637 | send_ipi_message(to_whom, IPI_CPU_STOP); |
| 701 | } | 638 | } |
| 702 | 639 | ||
| 703 | /* | 640 | void arch_send_call_function_ipi(cpumask_t mask) |
| 704 | * Run a function on all other CPUs. | ||
| 705 | * <func> The function to run. This must be fast and non-blocking. | ||
| 706 | * <info> An arbitrary pointer to pass to the function. | ||
| 707 | * <retry> If true, keep retrying until ready. | ||
| 708 | * <wait> If true, wait until function has completed on other CPUs. | ||
| 709 | * [RETURNS] 0 on success, else a negative status code. | ||
| 710 | * | ||
| 711 | * Does not return until remote CPUs are nearly ready to execute <func> | ||
| 712 | * or are or have executed. | ||
| 713 | * You must not call this function with disabled interrupts or from a | ||
| 714 | * hardware interrupt handler or from a bottom half handler. | ||
| 715 | */ | ||
| 716 | |||
| 717 | int | ||
| 718 | smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry, | ||
| 719 | int wait, cpumask_t to_whom) | ||
| 720 | { | 641 | { |
| 721 | struct smp_call_struct data; | 642 | send_ipi_message(mask, IPI_CALL_FUNC); |
| 722 | unsigned long timeout; | ||
| 723 | int num_cpus_to_call; | ||
| 724 | |||
| 725 | /* Can deadlock when called with interrupts disabled */ | ||
| 726 | WARN_ON(irqs_disabled()); | ||
| 727 | |||
| 728 | data.func = func; | ||
| 729 | data.info = info; | ||
| 730 | data.wait = wait; | ||
| 731 | |||
| 732 | cpu_clear(smp_processor_id(), to_whom); | ||
| 733 | num_cpus_to_call = cpus_weight(to_whom); | ||
| 734 | |||
| 735 | atomic_set(&data.unstarted_count, num_cpus_to_call); | ||
| 736 | atomic_set(&data.unfinished_count, num_cpus_to_call); | ||
| 737 | |||
| 738 | /* Acquire the smp_call_function_data mutex. */ | ||
| 739 | if (pointer_lock(&smp_call_function_data, &data, retry)) | ||
| 740 | return -EBUSY; | ||
| 741 | |||
| 742 | /* Send a message to the requested CPUs. */ | ||
| 743 | send_ipi_message(to_whom, IPI_CALL_FUNC); | ||
| 744 | |||
| 745 | /* Wait for a minimal response. */ | ||
| 746 | timeout = jiffies + HZ; | ||
| 747 | while (atomic_read (&data.unstarted_count) > 0 | ||
| 748 | && time_before (jiffies, timeout)) | ||
| 749 | barrier(); | ||
| 750 | |||
| 751 | /* If there's no response yet, log a message but allow a longer | ||
| 752 | * timeout period -- if we get a response this time, log | ||
| 753 | * a message saying when we got it.. | ||
| 754 | */ | ||
| 755 | if (atomic_read(&data.unstarted_count) > 0) { | ||
| 756 | long start_time = jiffies; | ||
| 757 | printk(KERN_ERR "%s: initial timeout -- trying long wait\n", | ||
| 758 | __func__); | ||
| 759 | timeout = jiffies + 30 * HZ; | ||
| 760 | while (atomic_read(&data.unstarted_count) > 0 | ||
| 761 | && time_before(jiffies, timeout)) | ||
| 762 | barrier(); | ||
| 763 | if (atomic_read(&data.unstarted_count) <= 0) { | ||
| 764 | long delta = jiffies - start_time; | ||
| 765 | printk(KERN_ERR | ||
| 766 | "%s: response %ld.%ld seconds into long wait\n", | ||
| 767 | __func__, delta / HZ, | ||
| 768 | (100 * (delta - ((delta / HZ) * HZ))) / HZ); | ||
| 769 | } | ||
| 770 | } | ||
| 771 | |||
| 772 | /* We either got one or timed out -- clear the lock. */ | ||
| 773 | mb(); | ||
| 774 | smp_call_function_data = NULL; | ||
| 775 | |||
| 776 | /* | ||
| 777 | * If after both the initial and long timeout periods we still don't | ||
| 778 | * have a response, something is very wrong... | ||
| 779 | */ | ||
| 780 | BUG_ON(atomic_read (&data.unstarted_count) > 0); | ||
| 781 | |||
| 782 | /* Wait for a complete response, if needed. */ | ||
| 783 | if (wait) { | ||
| 784 | while (atomic_read (&data.unfinished_count) > 0) | ||
| 785 | barrier(); | ||
| 786 | } | ||
| 787 | |||
| 788 | return 0; | ||
| 789 | } | 643 | } |
| 790 | EXPORT_SYMBOL(smp_call_function_on_cpu); | ||
| 791 | 644 | ||
| 792 | int | 645 | void arch_send_call_function_single_ipi(int cpu) |
| 793 | smp_call_function (void (*func) (void *info), void *info, int retry, int wait) | ||
| 794 | { | 646 | { |
| 795 | return smp_call_function_on_cpu (func, info, retry, wait, | 647 | send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); |
| 796 | cpu_online_map); | ||
| 797 | } | 648 | } |
| 798 | EXPORT_SYMBOL(smp_call_function); | ||
| 799 | 649 | ||
| 800 | static void | 650 | static void |
| 801 | ipi_imb(void *ignored) | 651 | ipi_imb(void *ignored) |
| @@ -807,7 +657,7 @@ void | |||
| 807 | smp_imb(void) | 657 | smp_imb(void) |
| 808 | { | 658 | { |
| 809 | /* Must wait other processors to flush their icache before continue. */ | 659 | /* Must wait other processors to flush their icache before continue. */ |
| 810 | if (on_each_cpu(ipi_imb, NULL, 1, 1)) | 660 | if (on_each_cpu(ipi_imb, NULL, 1)) |
| 811 | printk(KERN_CRIT "smp_imb: timed out\n"); | 661 | printk(KERN_CRIT "smp_imb: timed out\n"); |
| 812 | } | 662 | } |
| 813 | EXPORT_SYMBOL(smp_imb); | 663 | EXPORT_SYMBOL(smp_imb); |
| @@ -823,7 +673,7 @@ flush_tlb_all(void) | |||
| 823 | { | 673 | { |
| 824 | /* Although we don't have any data to pass, we do want to | 674 | /* Although we don't have any data to pass, we do want to |
| 825 | synchronize with the other processors. */ | 675 | synchronize with the other processors. */ |
| 826 | if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) { | 676 | if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) { |
| 827 | printk(KERN_CRIT "flush_tlb_all: timed out\n"); | 677 | printk(KERN_CRIT "flush_tlb_all: timed out\n"); |
| 828 | } | 678 | } |
| 829 | } | 679 | } |
| @@ -860,7 +710,7 @@ flush_tlb_mm(struct mm_struct *mm) | |||
| 860 | } | 710 | } |
| 861 | } | 711 | } |
| 862 | 712 | ||
| 863 | if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { | 713 | if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) { |
| 864 | printk(KERN_CRIT "flush_tlb_mm: timed out\n"); | 714 | printk(KERN_CRIT "flush_tlb_mm: timed out\n"); |
| 865 | } | 715 | } |
| 866 | 716 | ||
| @@ -913,7 +763,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | |||
| 913 | data.mm = mm; | 763 | data.mm = mm; |
| 914 | data.addr = addr; | 764 | data.addr = addr; |
| 915 | 765 | ||
| 916 | if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { | 766 | if (smp_call_function(ipi_flush_tlb_page, &data, 1)) { |
| 917 | printk(KERN_CRIT "flush_tlb_page: timed out\n"); | 767 | printk(KERN_CRIT "flush_tlb_page: timed out\n"); |
| 918 | } | 768 | } |
| 919 | 769 | ||
| @@ -965,7 +815,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | |||
| 965 | } | 815 | } |
| 966 | } | 816 | } |
| 967 | 817 | ||
| 968 | if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { | 818 | if (smp_call_function(ipi_flush_icache_page, mm, 1)) { |
| 969 | printk(KERN_CRIT "flush_icache_page: timed out\n"); | 819 | printk(KERN_CRIT "flush_icache_page: timed out\n"); |
| 970 | } | 820 | } |
| 971 | 821 | ||
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c index 9fc0eeb4f0ab..7c3d5ec6ec67 100644 --- a/arch/alpha/oprofile/common.c +++ b/arch/alpha/oprofile/common.c | |||
| @@ -65,7 +65,7 @@ op_axp_setup(void) | |||
| 65 | model->reg_setup(®, ctr, &sys); | 65 | model->reg_setup(®, ctr, &sys); |
| 66 | 66 | ||
| 67 | /* Configure the registers on all cpus. */ | 67 | /* Configure the registers on all cpus. */ |
| 68 | (void)smp_call_function(model->cpu_setup, ®, 0, 1); | 68 | (void)smp_call_function(model->cpu_setup, ®, 1); |
| 69 | model->cpu_setup(®); | 69 | model->cpu_setup(®); |
| 70 | return 0; | 70 | return 0; |
| 71 | } | 71 | } |
| @@ -86,7 +86,7 @@ op_axp_cpu_start(void *dummy) | |||
| 86 | static int | 86 | static int |
| 87 | op_axp_start(void) | 87 | op_axp_start(void) |
| 88 | { | 88 | { |
| 89 | (void)smp_call_function(op_axp_cpu_start, NULL, 0, 1); | 89 | (void)smp_call_function(op_axp_cpu_start, NULL, 1); |
| 90 | op_axp_cpu_start(NULL); | 90 | op_axp_cpu_start(NULL); |
| 91 | return 0; | 91 | return 0; |
| 92 | } | 92 | } |
| @@ -101,7 +101,7 @@ op_axp_cpu_stop(void *dummy) | |||
| 101 | static void | 101 | static void |
| 102 | op_axp_stop(void) | 102 | op_axp_stop(void) |
| 103 | { | 103 | { |
| 104 | (void)smp_call_function(op_axp_cpu_stop, NULL, 0, 1); | 104 | (void)smp_call_function(op_axp_cpu_stop, NULL, 1); |
| 105 | op_axp_cpu_stop(NULL); | 105 | op_axp_cpu_stop(NULL); |
| 106 | } | 106 | } |
| 107 | 107 | ||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 258f1369fb0c..c7ad324ddf2c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -701,6 +701,7 @@ source "kernel/time/Kconfig" | |||
| 701 | config SMP | 701 | config SMP |
| 702 | bool "Symmetric Multi-Processing (EXPERIMENTAL)" | 702 | bool "Symmetric Multi-Processing (EXPERIMENTAL)" |
| 703 | depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP) | 703 | depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP) |
| 704 | select USE_GENERIC_SMP_HELPERS | ||
| 704 | help | 705 | help |
| 705 | This enables support for systems with more than one CPU. If you have | 706 | This enables support for systems with more than one CPU. If you have |
| 706 | a system with only one CPU, like most personal computers, say N. If | 707 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index eefae1de334c..5a7c09564d13 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
| @@ -68,20 +68,10 @@ enum ipi_msg_type { | |||
| 68 | IPI_TIMER, | 68 | IPI_TIMER, |
| 69 | IPI_RESCHEDULE, | 69 | IPI_RESCHEDULE, |
| 70 | IPI_CALL_FUNC, | 70 | IPI_CALL_FUNC, |
| 71 | IPI_CALL_FUNC_SINGLE, | ||
| 71 | IPI_CPU_STOP, | 72 | IPI_CPU_STOP, |
| 72 | }; | 73 | }; |
| 73 | 74 | ||
| 74 | struct smp_call_struct { | ||
| 75 | void (*func)(void *info); | ||
| 76 | void *info; | ||
| 77 | int wait; | ||
| 78 | cpumask_t pending; | ||
| 79 | cpumask_t unfinished; | ||
| 80 | }; | ||
| 81 | |||
| 82 | static struct smp_call_struct * volatile smp_call_function_data; | ||
| 83 | static DEFINE_SPINLOCK(smp_call_function_lock); | ||
| 84 | |||
| 85 | int __cpuinit __cpu_up(unsigned int cpu) | 75 | int __cpuinit __cpu_up(unsigned int cpu) |
| 86 | { | 76 | { |
| 87 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); | 77 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); |
| @@ -366,114 +356,15 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) | |||
| 366 | local_irq_restore(flags); | 356 | local_irq_restore(flags); |
| 367 | } | 357 | } |
| 368 | 358 | ||
| 369 | /* | 359 | void arch_send_call_function_ipi(cpumask_t mask) |
| 370 | * You must not call this function with disabled interrupts, from a | ||
| 371 | * hardware interrupt handler, nor from a bottom half handler. | ||
| 372 | */ | ||
| 373 | static int smp_call_function_on_cpu(void (*func)(void *info), void *info, | ||
| 374 | int retry, int wait, cpumask_t callmap) | ||
| 375 | { | ||
| 376 | struct smp_call_struct data; | ||
| 377 | unsigned long timeout; | ||
| 378 | int ret = 0; | ||
| 379 | |||
| 380 | data.func = func; | ||
| 381 | data.info = info; | ||
| 382 | data.wait = wait; | ||
| 383 | |||
| 384 | cpu_clear(smp_processor_id(), callmap); | ||
| 385 | if (cpus_empty(callmap)) | ||
| 386 | goto out; | ||
| 387 | |||
| 388 | data.pending = callmap; | ||
| 389 | if (wait) | ||
| 390 | data.unfinished = callmap; | ||
| 391 | |||
| 392 | /* | ||
| 393 | * try to get the mutex on smp_call_function_data | ||
| 394 | */ | ||
| 395 | spin_lock(&smp_call_function_lock); | ||
| 396 | smp_call_function_data = &data; | ||
| 397 | |||
| 398 | send_ipi_message(callmap, IPI_CALL_FUNC); | ||
| 399 | |||
| 400 | timeout = jiffies + HZ; | ||
| 401 | while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) | ||
| 402 | barrier(); | ||
| 403 | |||
| 404 | /* | ||
| 405 | * did we time out? | ||
| 406 | */ | ||
| 407 | if (!cpus_empty(data.pending)) { | ||
| 408 | /* | ||
| 409 | * this may be causing our panic - report it | ||
| 410 | */ | ||
| 411 | printk(KERN_CRIT | ||
| 412 | "CPU%u: smp_call_function timeout for %p(%p)\n" | ||
| 413 | " callmap %lx pending %lx, %swait\n", | ||
| 414 | smp_processor_id(), func, info, *cpus_addr(callmap), | ||
| 415 | *cpus_addr(data.pending), wait ? "" : "no "); | ||
| 416 | |||
| 417 | /* | ||
| 418 | * TRACE | ||
| 419 | */ | ||
| 420 | timeout = jiffies + (5 * HZ); | ||
| 421 | while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) | ||
| 422 | barrier(); | ||
| 423 | |||
| 424 | if (cpus_empty(data.pending)) | ||
| 425 | printk(KERN_CRIT " RESOLVED\n"); | ||
| 426 | else | ||
| 427 | printk(KERN_CRIT " STILL STUCK\n"); | ||
| 428 | } | ||
| 429 | |||
| 430 | /* | ||
| 431 | * whatever happened, we're done with the data, so release it | ||
| 432 | */ | ||
| 433 | smp_call_function_data = NULL; | ||
| 434 | spin_unlock(&smp_call_function_lock); | ||
| 435 | |||
| 436 | if (!cpus_empty(data.pending)) { | ||
| 437 | ret = -ETIMEDOUT; | ||
| 438 | goto out; | ||
| 439 | } | ||
| 440 | |||
| 441 | if (wait) | ||
| 442 | while (!cpus_empty(data.unfinished)) | ||
| 443 | barrier(); | ||
| 444 | out: | ||
| 445 | |||
| 446 | return 0; | ||
| 447 | } | ||
| 448 | |||
| 449 | int smp_call_function(void (*func)(void *info), void *info, int retry, | ||
| 450 | int wait) | ||
| 451 | { | 360 | { |
| 452 | return smp_call_function_on_cpu(func, info, retry, wait, | 361 | send_ipi_message(mask, IPI_CALL_FUNC); |
| 453 | cpu_online_map); | ||
| 454 | } | 362 | } |
| 455 | EXPORT_SYMBOL_GPL(smp_call_function); | ||
| 456 | 363 | ||
| 457 | int smp_call_function_single(int cpu, void (*func)(void *info), void *info, | 364 | void arch_send_call_function_single_ipi(int cpu) |
| 458 | int retry, int wait) | ||
| 459 | { | 365 | { |
| 460 | /* prevent preemption and reschedule on another processor */ | 366 | send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); |
| 461 | int current_cpu = get_cpu(); | ||
| 462 | int ret = 0; | ||
| 463 | |||
| 464 | if (cpu == current_cpu) { | ||
| 465 | local_irq_disable(); | ||
| 466 | func(info); | ||
| 467 | local_irq_enable(); | ||
| 468 | } else | ||
| 469 | ret = smp_call_function_on_cpu(func, info, retry, wait, | ||
| 470 | cpumask_of_cpu(cpu)); | ||
| 471 | |||
| 472 | put_cpu(); | ||
| 473 | |||
| 474 | return ret; | ||
| 475 | } | 367 | } |
| 476 | EXPORT_SYMBOL_GPL(smp_call_function_single); | ||
| 477 | 368 | ||
| 478 | void show_ipi_list(struct seq_file *p) | 369 | void show_ipi_list(struct seq_file *p) |
| 479 | { | 370 | { |
| @@ -521,27 +412,6 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs) | |||
| 521 | } | 412 | } |
| 522 | #endif | 413 | #endif |
| 523 | 414 | ||
| 524 | /* | ||
| 525 | * ipi_call_function - handle IPI from smp_call_function() | ||
| 526 | * | ||
| 527 | * Note that we copy data out of the cross-call structure and then | ||
| 528 | * let the caller know that we're here and have done with their data | ||
| 529 | */ | ||
| 530 | static void ipi_call_function(unsigned int cpu) | ||
| 531 | { | ||
| 532 | struct smp_call_struct *data = smp_call_function_data; | ||
| 533 | void (*func)(void *info) = data->func; | ||
| 534 | void *info = data->info; | ||
| 535 | int wait = data->wait; | ||
| 536 | |||
| 537 | cpu_clear(cpu, data->pending); | ||
| 538 | |||
| 539 | func(info); | ||
| 540 | |||
| 541 | if (wait) | ||
| 542 | cpu_clear(cpu, data->unfinished); | ||
| 543 | } | ||
| 544 | |||
| 545 | static DEFINE_SPINLOCK(stop_lock); | 415 | static DEFINE_SPINLOCK(stop_lock); |
| 546 | 416 | ||
| 547 | /* | 417 | /* |
| @@ -611,7 +481,11 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs) | |||
| 611 | break; | 481 | break; |
| 612 | 482 | ||
| 613 | case IPI_CALL_FUNC: | 483 | case IPI_CALL_FUNC: |
| 614 | ipi_call_function(cpu); | 484 | generic_smp_call_function_interrupt(); |
| 485 | break; | ||
| 486 | |||
| 487 | case IPI_CALL_FUNC_SINGLE: | ||
| 488 | generic_smp_call_function_single_interrupt(); | ||
| 615 | break; | 489 | break; |
| 616 | 490 | ||
| 617 | case IPI_CPU_STOP: | 491 | case IPI_CPU_STOP: |
| @@ -662,14 +536,13 @@ int setup_profiling_timer(unsigned int multiplier) | |||
| 662 | } | 536 | } |
| 663 | 537 | ||
| 664 | static int | 538 | static int |
| 665 | on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait, | 539 | on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask) |
| 666 | cpumask_t mask) | ||
| 667 | { | 540 | { |
| 668 | int ret = 0; | 541 | int ret = 0; |
| 669 | 542 | ||
| 670 | preempt_disable(); | 543 | preempt_disable(); |
| 671 | 544 | ||
| 672 | ret = smp_call_function_on_cpu(func, info, retry, wait, mask); | 545 | ret = smp_call_function_mask(mask, func, info, wait); |
| 673 | if (cpu_isset(smp_processor_id(), mask)) | 546 | if (cpu_isset(smp_processor_id(), mask)) |
| 674 | func(info); | 547 | func(info); |
| 675 | 548 | ||
| @@ -731,14 +604,14 @@ static inline void ipi_flush_tlb_kernel_range(void *arg) | |||
| 731 | 604 | ||
| 732 | void flush_tlb_all(void) | 605 | void flush_tlb_all(void) |
| 733 | { | 606 | { |
| 734 | on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1); | 607 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); |
| 735 | } | 608 | } |
| 736 | 609 | ||
| 737 | void flush_tlb_mm(struct mm_struct *mm) | 610 | void flush_tlb_mm(struct mm_struct *mm) |
| 738 | { | 611 | { |
| 739 | cpumask_t mask = mm->cpu_vm_mask; | 612 | cpumask_t mask = mm->cpu_vm_mask; |
| 740 | 613 | ||
| 741 | on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask); | 614 | on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask); |
| 742 | } | 615 | } |
| 743 | 616 | ||
| 744 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | 617 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) |
| @@ -749,7 +622,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | |||
| 749 | ta.ta_vma = vma; | 622 | ta.ta_vma = vma; |
| 750 | ta.ta_start = uaddr; | 623 | ta.ta_start = uaddr; |
| 751 | 624 | ||
| 752 | on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask); | 625 | on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask); |
| 753 | } | 626 | } |
| 754 | 627 | ||
| 755 | void flush_tlb_kernel_page(unsigned long kaddr) | 628 | void flush_tlb_kernel_page(unsigned long kaddr) |
| @@ -758,7 +631,7 @@ void flush_tlb_kernel_page(unsigned long kaddr) | |||
| 758 | 631 | ||
| 759 | ta.ta_start = kaddr; | 632 | ta.ta_start = kaddr; |
| 760 | 633 | ||
| 761 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1); | 634 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); |
| 762 | } | 635 | } |
| 763 | 636 | ||
| 764 | void flush_tlb_range(struct vm_area_struct *vma, | 637 | void flush_tlb_range(struct vm_area_struct *vma, |
| @@ -771,7 +644,7 @@ void flush_tlb_range(struct vm_area_struct *vma, | |||
| 771 | ta.ta_start = start; | 644 | ta.ta_start = start; |
| 772 | ta.ta_end = end; | 645 | ta.ta_end = end; |
| 773 | 646 | ||
| 774 | on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask); | 647 | on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask); |
| 775 | } | 648 | } |
| 776 | 649 | ||
| 777 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 650 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| @@ -781,5 +654,5 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
| 781 | ta.ta_start = start; | 654 | ta.ta_start = start; |
| 782 | ta.ta_end = end; | 655 | ta.ta_end = end; |
| 783 | 656 | ||
| 784 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1); | 657 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); |
| 785 | } | 658 | } |
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c index 74fae6045650..4458705021e0 100644 --- a/arch/arm/oprofile/op_model_mpcore.c +++ b/arch/arm/oprofile/op_model_mpcore.c | |||
| @@ -201,7 +201,7 @@ static int em_call_function(int (*fn)(void)) | |||
| 201 | data.ret = 0; | 201 | data.ret = 0; |
| 202 | 202 | ||
| 203 | preempt_disable(); | 203 | preempt_disable(); |
| 204 | smp_call_function(em_func, &data, 1, 1); | 204 | smp_call_function(em_func, &data, 1); |
| 205 | em_func(&data); | 205 | em_func(&data); |
| 206 | preempt_enable(); | 206 | preempt_enable(); |
| 207 | 207 | ||
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 32455c633f1c..c0d2c9bb952b 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
| @@ -352,7 +352,7 @@ static int __init vfp_init(void) | |||
| 352 | else if (vfpsid & FPSID_NODOUBLE) { | 352 | else if (vfpsid & FPSID_NODOUBLE) { |
| 353 | printk("no double precision support\n"); | 353 | printk("no double precision support\n"); |
| 354 | } else { | 354 | } else { |
| 355 | smp_call_function(vfp_enable, NULL, 1, 1); | 355 | smp_call_function(vfp_enable, NULL, 1); |
| 356 | 356 | ||
| 357 | VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ | 357 | VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ |
| 358 | printk("implementor %02x architecture %d part %02x variant %x rev %x\n", | 358 | printk("implementor %02x architecture %d part %02x variant %x rev %x\n", |
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index a9c3334e46c9..952a24b2f5a9 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c | |||
| @@ -194,7 +194,7 @@ void stop_this_cpu(void* dummy) | |||
| 194 | /* Other calls */ | 194 | /* Other calls */ |
| 195 | void smp_send_stop(void) | 195 | void smp_send_stop(void) |
| 196 | { | 196 | { |
| 197 | smp_call_function(stop_this_cpu, NULL, 1, 0); | 197 | smp_call_function(stop_this_cpu, NULL, 0); |
| 198 | } | 198 | } |
| 199 | 199 | ||
| 200 | int setup_profiling_timer(unsigned int multiplier) | 200 | int setup_profiling_timer(unsigned int multiplier) |
| @@ -316,8 +316,7 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask) | |||
| 316 | * You must not call this function with disabled interrupts or from a | 316 | * You must not call this function with disabled interrupts or from a |
| 317 | * hardware interrupt handler or from a bottom half handler. | 317 | * hardware interrupt handler or from a bottom half handler. |
| 318 | */ | 318 | */ |
| 319 | int smp_call_function(void (*func)(void *info), void *info, | 319 | int smp_call_function(void (*func)(void *info), void *info, int wait) |
| 320 | int nonatomic, int wait) | ||
| 321 | { | 320 | { |
| 322 | cpumask_t cpu_mask = CPU_MASK_ALL; | 321 | cpumask_t cpu_mask = CPU_MASK_ALL; |
| 323 | struct call_data_struct data; | 322 | struct call_data_struct data; |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 16be41446b5b..18bcc10903b4 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
| @@ -303,6 +303,7 @@ config VIRT_CPU_ACCOUNTING | |||
| 303 | 303 | ||
| 304 | config SMP | 304 | config SMP |
| 305 | bool "Symmetric multi-processing support" | 305 | bool "Symmetric multi-processing support" |
| 306 | select USE_GENERIC_SMP_HELPERS | ||
| 306 | help | 307 | help |
| 307 | This enables support for systems with more than one CPU. If you have | 308 | This enables support for systems with more than one CPU. If you have |
| 308 | a system with only one CPU, say N. If you have a system with more | 309 | a system with only one CPU, say N. If you have a system with more |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 705176b434b3..7dd96c127177 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
| @@ -707,7 +707,7 @@ ia64_mca_cmc_vector_enable (void *dummy) | |||
| 707 | static void | 707 | static void |
| 708 | ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) | 708 | ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) |
| 709 | { | 709 | { |
| 710 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); | 710 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0); |
| 711 | } | 711 | } |
| 712 | 712 | ||
| 713 | /* | 713 | /* |
| @@ -719,7 +719,7 @@ ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) | |||
| 719 | static void | 719 | static void |
| 720 | ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) | 720 | ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) |
| 721 | { | 721 | { |
| 722 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); | 722 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0); |
| 723 | } | 723 | } |
| 724 | 724 | ||
| 725 | /* | 725 | /* |
| @@ -1881,7 +1881,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, | |||
| 1881 | case CPU_ONLINE: | 1881 | case CPU_ONLINE: |
| 1882 | case CPU_ONLINE_FROZEN: | 1882 | case CPU_ONLINE_FROZEN: |
| 1883 | smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, | 1883 | smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, |
| 1884 | NULL, 1, 0); | 1884 | NULL, 0); |
| 1885 | break; | 1885 | break; |
| 1886 | } | 1886 | } |
| 1887 | return NOTIFY_OK; | 1887 | return NOTIFY_OK; |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 9dc00f7fe10e..e5c57f413ca2 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c | |||
| @@ -921,7 +921,7 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page) | |||
| 921 | 921 | ||
| 922 | 922 | ||
| 923 | /* will send IPI to other CPU and wait for completion of remote call */ | 923 | /* will send IPI to other CPU and wait for completion of remote call */ |
| 924 | if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) { | 924 | if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) { |
| 925 | printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: " | 925 | printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: " |
| 926 | "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret); | 926 | "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret); |
| 927 | return 0; | 927 | return 0; |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 7714a97b0104..19d4493c6193 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
| @@ -1820,7 +1820,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx) | |||
| 1820 | int ret; | 1820 | int ret; |
| 1821 | 1821 | ||
| 1822 | DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); | 1822 | DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); |
| 1823 | ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); | 1823 | ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1); |
| 1824 | DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); | 1824 | DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); |
| 1825 | } | 1825 | } |
| 1826 | #endif /* CONFIG_SMP */ | 1826 | #endif /* CONFIG_SMP */ |
| @@ -6508,7 +6508,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | |||
| 6508 | } | 6508 | } |
| 6509 | 6509 | ||
| 6510 | /* save the current system wide pmu states */ | 6510 | /* save the current system wide pmu states */ |
| 6511 | ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1); | 6511 | ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1); |
| 6512 | if (ret) { | 6512 | if (ret) { |
| 6513 | DPRINT(("on_each_cpu() failed: %d\n", ret)); | 6513 | DPRINT(("on_each_cpu() failed: %d\n", ret)); |
| 6514 | goto cleanup_reserve; | 6514 | goto cleanup_reserve; |
| @@ -6553,7 +6553,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | |||
| 6553 | 6553 | ||
| 6554 | pfm_alt_intr_handler = NULL; | 6554 | pfm_alt_intr_handler = NULL; |
| 6555 | 6555 | ||
| 6556 | ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1); | 6556 | ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1); |
| 6557 | if (ret) { | 6557 | if (ret) { |
| 6558 | DPRINT(("on_each_cpu() failed: %d\n", ret)); | 6558 | DPRINT(("on_each_cpu() failed: %d\n", ret)); |
| 6559 | } | 6559 | } |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index a3a34b4eb038..fabaf08d9a69 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
| @@ -286,7 +286,7 @@ void cpu_idle_wait(void) | |||
| 286 | { | 286 | { |
| 287 | smp_mb(); | 287 | smp_mb(); |
| 288 | /* kick all the CPUs so that they exit out of pm_idle */ | 288 | /* kick all the CPUs so that they exit out of pm_idle */ |
| 289 | smp_call_function(do_nothing, NULL, 0, 1); | 289 | smp_call_function(do_nothing, NULL, 1); |
| 290 | } | 290 | } |
| 291 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | 291 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |
| 292 | 292 | ||
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 983296f1c813..3676468612b6 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
| @@ -60,25 +60,9 @@ static struct local_tlb_flush_counts { | |||
| 60 | 60 | ||
| 61 | static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; | 61 | static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; |
| 62 | 62 | ||
| 63 | |||
| 64 | /* | ||
| 65 | * Structure and data for smp_call_function(). This is designed to minimise static memory | ||
| 66 | * requirements. It also looks cleaner. | ||
| 67 | */ | ||
| 68 | static __cacheline_aligned DEFINE_SPINLOCK(call_lock); | ||
| 69 | |||
| 70 | struct call_data_struct { | ||
| 71 | void (*func) (void *info); | ||
| 72 | void *info; | ||
| 73 | long wait; | ||
| 74 | atomic_t started; | ||
| 75 | atomic_t finished; | ||
| 76 | }; | ||
| 77 | |||
| 78 | static volatile struct call_data_struct *call_data; | ||
| 79 | |||
| 80 | #define IPI_CALL_FUNC 0 | 63 | #define IPI_CALL_FUNC 0 |
| 81 | #define IPI_CPU_STOP 1 | 64 | #define IPI_CPU_STOP 1 |
| 65 | #define IPI_CALL_FUNC_SINGLE 2 | ||
| 82 | #define IPI_KDUMP_CPU_STOP 3 | 66 | #define IPI_KDUMP_CPU_STOP 3 |
| 83 | 67 | ||
| 84 | /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ | 68 | /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ |
| @@ -86,43 +70,6 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation); | |||
| 86 | 70 | ||
| 87 | extern void cpu_halt (void); | 71 | extern void cpu_halt (void); |
| 88 | 72 | ||
| 89 | void | ||
| 90 | lock_ipi_calllock(void) | ||
| 91 | { | ||
| 92 | spin_lock_irq(&call_lock); | ||
| 93 | } | ||
| 94 | |||
| 95 | void | ||
| 96 | unlock_ipi_calllock(void) | ||
| 97 | { | ||
| 98 | spin_unlock_irq(&call_lock); | ||
| 99 | } | ||
| 100 | |||
| 101 | static inline void | ||
| 102 | handle_call_data(void) | ||
| 103 | { | ||
| 104 | struct call_data_struct *data; | ||
| 105 | void (*func)(void *info); | ||
| 106 | void *info; | ||
| 107 | int wait; | ||
| 108 | |||
| 109 | /* release the 'pointer lock' */ | ||
| 110 | data = (struct call_data_struct *)call_data; | ||
| 111 | func = data->func; | ||
| 112 | info = data->info; | ||
| 113 | wait = data->wait; | ||
| 114 | |||
| 115 | mb(); | ||
| 116 | atomic_inc(&data->started); | ||
| 117 | /* At this point the structure may be gone unless wait is true. */ | ||
| 118 | (*func)(info); | ||
| 119 | |||
| 120 | /* Notify the sending CPU that the task is done. */ | ||
| 121 | mb(); | ||
| 122 | if (wait) | ||
| 123 | atomic_inc(&data->finished); | ||
| 124 | } | ||
| 125 | |||
| 126 | static void | 73 | static void |
| 127 | stop_this_cpu(void) | 74 | stop_this_cpu(void) |
| 128 | { | 75 | { |
| @@ -163,13 +110,15 @@ handle_IPI (int irq, void *dev_id) | |||
| 163 | ops &= ~(1 << which); | 110 | ops &= ~(1 << which); |
| 164 | 111 | ||
| 165 | switch (which) { | 112 | switch (which) { |
| 166 | case IPI_CALL_FUNC: | ||
| 167 | handle_call_data(); | ||
| 168 | break; | ||
| 169 | |||
| 170 | case IPI_CPU_STOP: | 113 | case IPI_CPU_STOP: |
| 171 | stop_this_cpu(); | 114 | stop_this_cpu(); |
| 172 | break; | 115 | break; |
| 116 | case IPI_CALL_FUNC: | ||
| 117 | generic_smp_call_function_interrupt(); | ||
| 118 | break; | ||
| 119 | case IPI_CALL_FUNC_SINGLE: | ||
| 120 | generic_smp_call_function_single_interrupt(); | ||
| 121 | break; | ||
| 173 | #ifdef CONFIG_KEXEC | 122 | #ifdef CONFIG_KEXEC |
| 174 | case IPI_KDUMP_CPU_STOP: | 123 | case IPI_KDUMP_CPU_STOP: |
| 175 | unw_init_running(kdump_cpu_freeze, NULL); | 124 | unw_init_running(kdump_cpu_freeze, NULL); |
| @@ -187,6 +136,8 @@ handle_IPI (int irq, void *dev_id) | |||
| 187 | return IRQ_HANDLED; | 136 | return IRQ_HANDLED; |
| 188 | } | 137 | } |
| 189 | 138 | ||
| 139 | |||
| 140 | |||
| 190 | /* | 141 | /* |
| 191 | * Called with preemption disabled. | 142 | * Called with preemption disabled. |
| 192 | */ | 143 | */ |
| @@ -334,7 +285,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask) | |||
| 334 | void | 285 | void |
| 335 | smp_flush_tlb_all (void) | 286 | smp_flush_tlb_all (void) |
| 336 | { | 287 | { |
| 337 | on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1); | 288 | on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1); |
| 338 | } | 289 | } |
| 339 | 290 | ||
| 340 | void | 291 | void |
| @@ -357,193 +308,18 @@ smp_flush_tlb_mm (struct mm_struct *mm) | |||
| 357 | * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is | 308 | * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is |
| 358 | * rather trivial. | 309 | * rather trivial. |
| 359 | */ | 310 | */ |
| 360 | on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); | 311 | on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1); |
| 361 | } | 312 | } |
| 362 | 313 | ||
| 363 | /* | 314 | void arch_send_call_function_single_ipi(int cpu) |
| 364 | * Run a function on a specific CPU | ||
| 365 | * <func> The function to run. This must be fast and non-blocking. | ||
| 366 | * <info> An arbitrary pointer to pass to the function. | ||
| 367 | * <nonatomic> Currently unused. | ||
| 368 | * <wait> If true, wait until function has completed on other CPUs. | ||
| 369 | * [RETURNS] 0 on success, else a negative status code. | ||
| 370 | * | ||
| 371 | * Does not return until the remote CPU is nearly ready to execute <func> | ||
| 372 | * or is or has executed. | ||
| 373 | */ | ||
| 374 | |||
| 375 | int | ||
| 376 | smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic, | ||
| 377 | int wait) | ||
| 378 | { | ||
| 379 | struct call_data_struct data; | ||
| 380 | int cpus = 1; | ||
| 381 | int me = get_cpu(); /* prevent preemption and reschedule on another processor */ | ||
| 382 | |||
| 383 | if (cpuid == me) { | ||
| 384 | local_irq_disable(); | ||
| 385 | func(info); | ||
| 386 | local_irq_enable(); | ||
| 387 | put_cpu(); | ||
| 388 | return 0; | ||
| 389 | } | ||
| 390 | |||
| 391 | data.func = func; | ||
| 392 | data.info = info; | ||
| 393 | atomic_set(&data.started, 0); | ||
| 394 | data.wait = wait; | ||
| 395 | if (wait) | ||
| 396 | atomic_set(&data.finished, 0); | ||
| 397 | |||
| 398 | spin_lock_bh(&call_lock); | ||
| 399 | |||
| 400 | call_data = &data; | ||
| 401 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ | ||
| 402 | send_IPI_single(cpuid, IPI_CALL_FUNC); | ||
| 403 | |||
| 404 | /* Wait for response */ | ||
| 405 | while (atomic_read(&data.started) != cpus) | ||
| 406 | cpu_relax(); | ||
| 407 | |||
| 408 | if (wait) | ||
| 409 | while (atomic_read(&data.finished) != cpus) | ||
| 410 | cpu_relax(); | ||
| 411 | call_data = NULL; | ||
| 412 | |||
| 413 | spin_unlock_bh(&call_lock); | ||
| 414 | put_cpu(); | ||
| 415 | return 0; | ||
| 416 | } | ||
| 417 | EXPORT_SYMBOL(smp_call_function_single); | ||
| 418 | |||
| 419 | /** | ||
| 420 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
| 421 | * <mask> The set of cpus to run on. Must not include the current cpu. | ||
| 422 | * <func> The function to run. This must be fast and non-blocking. | ||
| 423 | * <info> An arbitrary pointer to pass to the function. | ||
| 424 | * <wait> If true, wait (atomically) until function | ||
| 425 | * has completed on other CPUs. | ||
| 426 | * | ||
| 427 | * Returns 0 on success, else a negative status code. | ||
| 428 | * | ||
| 429 | * If @wait is true, then returns once @func has returned; otherwise | ||
| 430 | * it returns just before the target cpu calls @func. | ||
| 431 | * | ||
| 432 | * You must not call this function with disabled interrupts or from a | ||
| 433 | * hardware interrupt handler or from a bottom half handler. | ||
| 434 | */ | ||
| 435 | int smp_call_function_mask(cpumask_t mask, | ||
| 436 | void (*func)(void *), void *info, | ||
| 437 | int wait) | ||
| 438 | { | 315 | { |
| 439 | struct call_data_struct data; | 316 | send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); |
| 440 | cpumask_t allbutself; | ||
| 441 | int cpus; | ||
| 442 | |||
| 443 | spin_lock(&call_lock); | ||
| 444 | allbutself = cpu_online_map; | ||
| 445 | cpu_clear(smp_processor_id(), allbutself); | ||
| 446 | |||
| 447 | cpus_and(mask, mask, allbutself); | ||
| 448 | cpus = cpus_weight(mask); | ||
| 449 | if (!cpus) { | ||
| 450 | spin_unlock(&call_lock); | ||
| 451 | return 0; | ||
| 452 | } | ||
| 453 | |||
| 454 | /* Can deadlock when called with interrupts disabled */ | ||
| 455 | WARN_ON(irqs_disabled()); | ||
| 456 | |||
| 457 | data.func = func; | ||
| 458 | data.info = info; | ||
| 459 | atomic_set(&data.started, 0); | ||
| 460 | data.wait = wait; | ||
| 461 | if (wait) | ||
| 462 | atomic_set(&data.finished, 0); | ||
| 463 | |||
| 464 | call_data = &data; | ||
| 465 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/ | ||
| 466 | |||
| 467 | /* Send a message to other CPUs */ | ||
| 468 | if (cpus_equal(mask, allbutself)) | ||
| 469 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
| 470 | else | ||
| 471 | send_IPI_mask(mask, IPI_CALL_FUNC); | ||
| 472 | |||
| 473 | /* Wait for response */ | ||
| 474 | while (atomic_read(&data.started) != cpus) | ||
| 475 | cpu_relax(); | ||
| 476 | |||
| 477 | if (wait) | ||
| 478 | while (atomic_read(&data.finished) != cpus) | ||
| 479 | cpu_relax(); | ||
| 480 | call_data = NULL; | ||
| 481 | |||
| 482 | spin_unlock(&call_lock); | ||
| 483 | return 0; | ||
| 484 | |||
| 485 | } | 317 | } |
| 486 | EXPORT_SYMBOL(smp_call_function_mask); | ||
| 487 | 318 | ||
| 488 | /* | 319 | void arch_send_call_function_ipi(cpumask_t mask) |
| 489 | * this function sends a 'generic call function' IPI to all other CPUs | ||
| 490 | * in the system. | ||
| 491 | */ | ||
| 492 | |||
| 493 | /* | ||
| 494 | * [SUMMARY] Run a function on all other CPUs. | ||
| 495 | * <func> The function to run. This must be fast and non-blocking. | ||
| 496 | * <info> An arbitrary pointer to pass to the function. | ||
| 497 | * <nonatomic> currently unused. | ||
| 498 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | ||
| 499 | * [RETURNS] 0 on success, else a negative status code. | ||
| 500 | * | ||
| 501 | * Does not return until remote CPUs are nearly ready to execute <func> or are or have | ||
| 502 | * executed. | ||
| 503 | * | ||
| 504 | * You must not call this function with disabled interrupts or from a | ||
| 505 | * hardware interrupt handler or from a bottom half handler. | ||
| 506 | */ | ||
| 507 | int | ||
| 508 | smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) | ||
| 509 | { | 320 | { |
| 510 | struct call_data_struct data; | 321 | send_IPI_mask(mask, IPI_CALL_FUNC); |
| 511 | int cpus; | ||
| 512 | |||
| 513 | spin_lock(&call_lock); | ||
| 514 | cpus = num_online_cpus() - 1; | ||
| 515 | if (!cpus) { | ||
| 516 | spin_unlock(&call_lock); | ||
| 517 | return 0; | ||
| 518 | } | ||
| 519 | |||
| 520 | /* Can deadlock when called with interrupts disabled */ | ||
| 521 | WARN_ON(irqs_disabled()); | ||
| 522 | |||
| 523 | data.func = func; | ||
| 524 | data.info = info; | ||
| 525 | atomic_set(&data.started, 0); | ||
| 526 | data.wait = wait; | ||
| 527 | if (wait) | ||
| 528 | atomic_set(&data.finished, 0); | ||
| 529 | |||
| 530 | call_data = &data; | ||
| 531 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ | ||
| 532 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
| 533 | |||
| 534 | /* Wait for response */ | ||
| 535 | while (atomic_read(&data.started) != cpus) | ||
| 536 | cpu_relax(); | ||
| 537 | |||
| 538 | if (wait) | ||
| 539 | while (atomic_read(&data.finished) != cpus) | ||
| 540 | cpu_relax(); | ||
| 541 | call_data = NULL; | ||
| 542 | |||
| 543 | spin_unlock(&call_lock); | ||
| 544 | return 0; | ||
| 545 | } | 322 | } |
| 546 | EXPORT_SYMBOL(smp_call_function); | ||
| 547 | 323 | ||
| 548 | /* | 324 | /* |
| 549 | * this function calls the 'stop' function on all other CPUs in the system. | 325 | * this function calls the 'stop' function on all other CPUs in the system. |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index d7ad42b77d41..9d1d429c6c59 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
| @@ -317,7 +317,7 @@ ia64_sync_itc (unsigned int master) | |||
| 317 | 317 | ||
| 318 | go[MASTER] = 1; | 318 | go[MASTER] = 1; |
| 319 | 319 | ||
| 320 | if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) { | 320 | if (smp_call_function_single(master, sync_master, NULL, 0) < 0) { |
| 321 | printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); | 321 | printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); |
| 322 | return; | 322 | return; |
| 323 | } | 323 | } |
| @@ -395,14 +395,14 @@ smp_callin (void) | |||
| 395 | 395 | ||
| 396 | fix_b0_for_bsp(); | 396 | fix_b0_for_bsp(); |
| 397 | 397 | ||
| 398 | lock_ipi_calllock(); | 398 | ipi_call_lock_irq(); |
| 399 | spin_lock(&vector_lock); | 399 | spin_lock(&vector_lock); |
| 400 | /* Setup the per cpu irq handling data structures */ | 400 | /* Setup the per cpu irq handling data structures */ |
| 401 | __setup_vector_irq(cpuid); | 401 | __setup_vector_irq(cpuid); |
| 402 | cpu_set(cpuid, cpu_online_map); | 402 | cpu_set(cpuid, cpu_online_map); |
| 403 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; | 403 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; |
| 404 | spin_unlock(&vector_lock); | 404 | spin_unlock(&vector_lock); |
| 405 | unlock_ipi_calllock(); | 405 | ipi_call_unlock_irq(); |
| 406 | 406 | ||
| 407 | smp_setup_percpu_timer(); | 407 | smp_setup_percpu_timer(); |
| 408 | 408 | ||
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index e77995a6e3ed..8eff8c1d40a6 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
| @@ -123,8 +123,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) | |||
| 123 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); | 123 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); |
| 124 | if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { | 124 | if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { |
| 125 | atomic_set(&uc_pool->status, 0); | 125 | atomic_set(&uc_pool->status, 0); |
| 126 | status = smp_call_function(uncached_ipi_visibility, uc_pool, | 126 | status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); |
| 127 | 0, 1); | ||
| 128 | if (status || atomic_read(&uc_pool->status)) | 127 | if (status || atomic_read(&uc_pool->status)) |
| 129 | goto failed; | 128 | goto failed; |
| 130 | } else if (status != PAL_VISIBILITY_OK) | 129 | } else if (status != PAL_VISIBILITY_OK) |
| @@ -146,7 +145,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) | |||
| 146 | if (status != PAL_STATUS_SUCCESS) | 145 | if (status != PAL_STATUS_SUCCESS) |
| 147 | goto failed; | 146 | goto failed; |
| 148 | atomic_set(&uc_pool->status, 0); | 147 | atomic_set(&uc_pool->status, 0); |
| 149 | status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1); | 148 | status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); |
| 150 | if (status || atomic_read(&uc_pool->status)) | 149 | if (status || atomic_read(&uc_pool->status)) |
| 151 | goto failed; | 150 | goto failed; |
| 152 | 151 | ||
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 8cc0c4753d89..636588e7e068 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c | |||
| @@ -629,7 +629,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) | |||
| 629 | if (use_ipi) { | 629 | if (use_ipi) { |
| 630 | /* use an interprocessor interrupt to call SAL */ | 630 | /* use an interprocessor interrupt to call SAL */ |
| 631 | smp_call_function_single(cpu, sn_hwperf_call_sal, | 631 | smp_call_function_single(cpu, sn_hwperf_call_sal, |
| 632 | op_info, 1, 1); | 632 | op_info, 1); |
| 633 | } | 633 | } |
| 634 | else { | 634 | else { |
| 635 | /* migrate the task before calling SAL */ | 635 | /* migrate the task before calling SAL */ |
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index de153de2ea9f..a5f864c445b2 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig | |||
| @@ -296,6 +296,7 @@ config PREEMPT | |||
| 296 | 296 | ||
| 297 | config SMP | 297 | config SMP |
| 298 | bool "Symmetric multi-processing support" | 298 | bool "Symmetric multi-processing support" |
| 299 | select USE_GENERIC_SMP_HELPERS | ||
| 299 | ---help--- | 300 | ---help--- |
| 300 | This enables support for systems with more than one CPU. If you have | 301 | This enables support for systems with more than one CPU. If you have |
| 301 | a system with only one CPU, like most personal computers, say N. If | 302 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c index e6709fe950ba..16bcb189a383 100644 --- a/arch/m32r/kernel/m32r_ksyms.c +++ b/arch/m32r/kernel/m32r_ksyms.c | |||
| @@ -43,9 +43,6 @@ EXPORT_SYMBOL(dcache_dummy); | |||
| 43 | #endif | 43 | #endif |
| 44 | EXPORT_SYMBOL(cpu_data); | 44 | EXPORT_SYMBOL(cpu_data); |
| 45 | 45 | ||
| 46 | /* Global SMP stuff */ | ||
| 47 | EXPORT_SYMBOL(smp_call_function); | ||
| 48 | |||
| 49 | /* TLB flushing */ | 46 | /* TLB flushing */ |
| 50 | EXPORT_SYMBOL(smp_flush_tlb_page); | 47 | EXPORT_SYMBOL(smp_flush_tlb_page); |
| 51 | #endif | 48 | #endif |
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index c837bc13b015..7577f971ea4e 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c | |||
| @@ -35,22 +35,6 @@ | |||
| 35 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 35 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
| 36 | 36 | ||
| 37 | /* | 37 | /* |
| 38 | * Structure and data for smp_call_function(). This is designed to minimise | ||
| 39 | * static memory requirements. It also looks cleaner. | ||
| 40 | */ | ||
| 41 | static DEFINE_SPINLOCK(call_lock); | ||
| 42 | |||
| 43 | struct call_data_struct { | ||
| 44 | void (*func) (void *info); | ||
| 45 | void *info; | ||
| 46 | atomic_t started; | ||
| 47 | atomic_t finished; | ||
| 48 | int wait; | ||
| 49 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | ||
| 50 | |||
| 51 | static struct call_data_struct *call_data; | ||
| 52 | |||
| 53 | /* | ||
| 54 | * For flush_cache_all() | 38 | * For flush_cache_all() |
| 55 | */ | 39 | */ |
| 56 | static DEFINE_SPINLOCK(flushcache_lock); | 40 | static DEFINE_SPINLOCK(flushcache_lock); |
| @@ -96,9 +80,6 @@ void smp_invalidate_interrupt(void); | |||
| 96 | void smp_send_stop(void); | 80 | void smp_send_stop(void); |
| 97 | static void stop_this_cpu(void *); | 81 | static void stop_this_cpu(void *); |
| 98 | 82 | ||
| 99 | int smp_call_function(void (*) (void *), void *, int, int); | ||
| 100 | void smp_call_function_interrupt(void); | ||
| 101 | |||
| 102 | void smp_send_timer(void); | 83 | void smp_send_timer(void); |
| 103 | void smp_ipi_timer_interrupt(struct pt_regs *); | 84 | void smp_ipi_timer_interrupt(struct pt_regs *); |
| 104 | void smp_local_timer_interrupt(void); | 85 | void smp_local_timer_interrupt(void); |
| @@ -231,7 +212,7 @@ void smp_flush_tlb_all(void) | |||
| 231 | local_irq_save(flags); | 212 | local_irq_save(flags); |
| 232 | __flush_tlb_all(); | 213 | __flush_tlb_all(); |
| 233 | local_irq_restore(flags); | 214 | local_irq_restore(flags); |
| 234 | smp_call_function(flush_tlb_all_ipi, NULL, 1, 1); | 215 | smp_call_function(flush_tlb_all_ipi, NULL, 1); |
| 235 | preempt_enable(); | 216 | preempt_enable(); |
| 236 | } | 217 | } |
| 237 | 218 | ||
| @@ -524,7 +505,7 @@ void smp_invalidate_interrupt(void) | |||
| 524 | *==========================================================================*/ | 505 | *==========================================================================*/ |
| 525 | void smp_send_stop(void) | 506 | void smp_send_stop(void) |
| 526 | { | 507 | { |
| 527 | smp_call_function(stop_this_cpu, NULL, 1, 0); | 508 | smp_call_function(stop_this_cpu, NULL, 0); |
| 528 | } | 509 | } |
| 529 | 510 | ||
| 530 | /*==========================================================================* | 511 | /*==========================================================================* |
| @@ -565,86 +546,14 @@ static void stop_this_cpu(void *dummy) | |||
| 565 | for ( ; ; ); | 546 | for ( ; ; ); |
| 566 | } | 547 | } |
| 567 | 548 | ||
| 568 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 549 | void arch_send_call_function_ipi(cpumask_t mask) |
| 569 | /* Call function Routines */ | ||
| 570 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
| 571 | |||
| 572 | /*==========================================================================* | ||
| 573 | * Name: smp_call_function | ||
| 574 | * | ||
| 575 | * Description: This routine sends a 'CALL_FUNCTION_IPI' to all other CPUs | ||
| 576 | * in the system. | ||
| 577 | * | ||
| 578 | * Born on Date: 2002.02.05 | ||
| 579 | * | ||
| 580 | * Arguments: *func - The function to run. This must be fast and | ||
| 581 | * non-blocking. | ||
| 582 | * *info - An arbitrary pointer to pass to the function. | ||
| 583 | * nonatomic - currently unused. | ||
| 584 | * wait - If true, wait (atomically) until function has | ||
| 585 | * completed on other CPUs. | ||
| 586 | * | ||
| 587 | * Returns: 0 on success, else a negative status code. Does not return | ||
| 588 | * until remote CPUs are nearly ready to execute <<func>> or | ||
| 589 | * are or have executed. | ||
| 590 | * | ||
| 591 | * Cautions: You must not call this function with disabled interrupts or | ||
| 592 | * from a hardware interrupt handler, you may call it from a | ||
| 593 | * bottom half handler. | ||
| 594 | * | ||
| 595 | * Modification log: | ||
| 596 | * Date Who Description | ||
| 597 | * ---------- --- -------------------------------------------------------- | ||
| 598 | * | ||
| 599 | *==========================================================================*/ | ||
| 600 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | ||
| 601 | int wait) | ||
| 602 | { | 550 | { |
| 603 | struct call_data_struct data; | 551 | send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); |
| 604 | int cpus; | 552 | } |
| 605 | |||
| 606 | #ifdef DEBUG_SMP | ||
| 607 | unsigned long flags; | ||
| 608 | __save_flags(flags); | ||
| 609 | if (!(flags & 0x0040)) /* Interrupt Disable NONONO */ | ||
| 610 | BUG(); | ||
| 611 | #endif /* DEBUG_SMP */ | ||
| 612 | |||
| 613 | /* Holding any lock stops cpus from going down. */ | ||
| 614 | spin_lock(&call_lock); | ||
| 615 | cpus = num_online_cpus() - 1; | ||
| 616 | |||
| 617 | if (!cpus) { | ||
| 618 | spin_unlock(&call_lock); | ||
| 619 | return 0; | ||
| 620 | } | ||
| 621 | |||
| 622 | /* Can deadlock when called with interrupts disabled */ | ||
| 623 | WARN_ON(irqs_disabled()); | ||
| 624 | |||
| 625 | data.func = func; | ||
| 626 | data.info = info; | ||
| 627 | atomic_set(&data.started, 0); | ||
| 628 | data.wait = wait; | ||
| 629 | if (wait) | ||
| 630 | atomic_set(&data.finished, 0); | ||
| 631 | |||
| 632 | call_data = &data; | ||
| 633 | mb(); | ||
| 634 | |||
| 635 | /* Send a message to all other CPUs and wait for them to respond */ | ||
| 636 | send_IPI_allbutself(CALL_FUNCTION_IPI, 0); | ||
| 637 | |||
| 638 | /* Wait for response */ | ||
| 639 | while (atomic_read(&data.started) != cpus) | ||
| 640 | barrier(); | ||
| 641 | |||
| 642 | if (wait) | ||
| 643 | while (atomic_read(&data.finished) != cpus) | ||
| 644 | barrier(); | ||
| 645 | spin_unlock(&call_lock); | ||
| 646 | 553 | ||
| 647 | return 0; | 554 | void arch_send_call_function_single_ipi(int cpu) |
| 555 | { | ||
| 556 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0); | ||
| 648 | } | 557 | } |
| 649 | 558 | ||
| 650 | /*==========================================================================* | 559 | /*==========================================================================* |
| @@ -666,27 +575,16 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | |||
| 666 | *==========================================================================*/ | 575 | *==========================================================================*/ |
| 667 | void smp_call_function_interrupt(void) | 576 | void smp_call_function_interrupt(void) |
| 668 | { | 577 | { |
| 669 | void (*func) (void *info) = call_data->func; | ||
| 670 | void *info = call_data->info; | ||
| 671 | int wait = call_data->wait; | ||
| 672 | |||
| 673 | /* | ||
| 674 | * Notify initiating CPU that I've grabbed the data and am | ||
| 675 | * about to execute the function | ||
| 676 | */ | ||
| 677 | mb(); | ||
| 678 | atomic_inc(&call_data->started); | ||
| 679 | /* | ||
| 680 | * At this point the info structure may be out of scope unless wait==1 | ||
| 681 | */ | ||
| 682 | irq_enter(); | 578 | irq_enter(); |
| 683 | (*func)(info); | 579 | generic_smp_call_function_interrupt(); |
| 684 | irq_exit(); | 580 | irq_exit(); |
| 581 | } | ||
| 685 | 582 | ||
| 686 | if (wait) { | 583 | void smp_call_function_single_interrupt(void) |
| 687 | mb(); | 584 | { |
| 688 | atomic_inc(&call_data->finished); | 585 | irq_enter(); |
| 689 | } | 586 | generic_smp_call_function_single_interrupt(); |
| 587 | irq_exit(); | ||
| 690 | } | 588 | } |
| 691 | 589 | ||
| 692 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 590 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index 89ba4a0b5d51..46159a4e644b 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c | |||
| @@ -40,6 +40,7 @@ extern void smp_invalidate_interrupt(void); | |||
| 40 | extern void smp_call_function_interrupt(void); | 40 | extern void smp_call_function_interrupt(void); |
| 41 | extern void smp_ipi_timer_interrupt(void); | 41 | extern void smp_ipi_timer_interrupt(void); |
| 42 | extern void smp_flush_cache_all_interrupt(void); | 42 | extern void smp_flush_cache_all_interrupt(void); |
| 43 | extern void smp_call_function_single_interrupt(void); | ||
| 43 | 44 | ||
| 44 | /* | 45 | /* |
| 45 | * for Boot AP function | 46 | * for Boot AP function |
| @@ -103,7 +104,7 @@ void set_eit_vector_entries(void) | |||
| 103 | eit_vector[186] = (unsigned long)smp_call_function_interrupt; | 104 | eit_vector[186] = (unsigned long)smp_call_function_interrupt; |
| 104 | eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; | 105 | eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; |
| 105 | eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; | 106 | eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; |
| 106 | eit_vector[189] = 0; | 107 | eit_vector[189] = (unsigned long)smp_call_function_single_interrupt; |
| 107 | eit_vector[190] = 0; | 108 | eit_vector[190] = 0; |
| 108 | eit_vector[191] = 0; | 109 | eit_vector[191] = 0; |
| 109 | #endif | 110 | #endif |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 24c5dee91768..d2be3ffca280 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
| @@ -1763,6 +1763,7 @@ config SMP | |||
| 1763 | bool "Multi-Processing support" | 1763 | bool "Multi-Processing support" |
| 1764 | depends on SYS_SUPPORTS_SMP | 1764 | depends on SYS_SUPPORTS_SMP |
| 1765 | select IRQ_PER_CPU | 1765 | select IRQ_PER_CPU |
| 1766 | select USE_GENERIC_SMP_HELPERS | ||
| 1766 | help | 1767 | help |
| 1767 | This enables support for systems with more than one CPU. If you have | 1768 | This enables support for systems with more than one CPU. If you have |
| 1768 | a system with only one CPU, like most personal computers, say N. If | 1769 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c index ed9febe63d72..b47e4615ec12 100644 --- a/arch/mips/kernel/irq-rm9000.c +++ b/arch/mips/kernel/irq-rm9000.c | |||
| @@ -49,7 +49,7 @@ static void local_rm9k_perfcounter_irq_startup(void *args) | |||
| 49 | 49 | ||
| 50 | static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) | 50 | static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) |
| 51 | { | 51 | { |
| 52 | on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 0, 1); | 52 | on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 1); |
| 53 | 53 | ||
| 54 | return 0; | 54 | return 0; |
| 55 | } | 55 | } |
| @@ -66,7 +66,7 @@ static void local_rm9k_perfcounter_irq_shutdown(void *args) | |||
| 66 | 66 | ||
| 67 | static void rm9k_perfcounter_irq_shutdown(unsigned int irq) | 67 | static void rm9k_perfcounter_irq_shutdown(unsigned int irq) |
| 68 | { | 68 | { |
| 69 | on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1); | 69 | on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 1); |
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | static struct irq_chip rm9k_irq_controller = { | 72 | static struct irq_chip rm9k_irq_controller = { |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index cdf87a9dd4ba..4410f172b8ab 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
| @@ -131,148 +131,29 @@ asmlinkage __cpuinit void start_secondary(void) | |||
| 131 | cpu_idle(); | 131 | cpu_idle(); |
| 132 | } | 132 | } |
| 133 | 133 | ||
| 134 | DEFINE_SPINLOCK(smp_call_lock); | 134 | void arch_send_call_function_ipi(cpumask_t mask) |
| 135 | |||
| 136 | struct call_data_struct *call_data; | ||
| 137 | |||
| 138 | /* | ||
| 139 | * Run a function on all other CPUs. | ||
| 140 | * | ||
| 141 | * <mask> cpuset_t of all processors to run the function on. | ||
| 142 | * <func> The function to run. This must be fast and non-blocking. | ||
| 143 | * <info> An arbitrary pointer to pass to the function. | ||
| 144 | * <retry> If true, keep retrying until ready. | ||
| 145 | * <wait> If true, wait until function has completed on other CPUs. | ||
| 146 | * [RETURNS] 0 on success, else a negative status code. | ||
| 147 | * | ||
| 148 | * Does not return until remote CPUs are nearly ready to execute <func> | ||
| 149 | * or are or have executed. | ||
| 150 | * | ||
| 151 | * You must not call this function with disabled interrupts or from a | ||
| 152 | * hardware interrupt handler or from a bottom half handler: | ||
| 153 | * | ||
| 154 | * CPU A CPU B | ||
| 155 | * Disable interrupts | ||
| 156 | * smp_call_function() | ||
| 157 | * Take call_lock | ||
| 158 | * Send IPIs | ||
| 159 | * Wait for all cpus to acknowledge IPI | ||
| 160 | * CPU A has not responded, spin waiting | ||
| 161 | * for cpu A to respond, holding call_lock | ||
| 162 | * smp_call_function() | ||
| 163 | * Spin waiting for call_lock | ||
| 164 | * Deadlock Deadlock | ||
| 165 | */ | ||
| 166 | int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), | ||
| 167 | void *info, int retry, int wait) | ||
| 168 | { | 135 | { |
| 169 | struct call_data_struct data; | ||
| 170 | int cpu = smp_processor_id(); | ||
| 171 | int cpus; | ||
| 172 | |||
| 173 | /* | ||
| 174 | * Can die spectacularly if this CPU isn't yet marked online | ||
| 175 | */ | ||
| 176 | BUG_ON(!cpu_online(cpu)); | ||
| 177 | |||
| 178 | cpu_clear(cpu, mask); | ||
| 179 | cpus = cpus_weight(mask); | ||
| 180 | if (!cpus) | ||
| 181 | return 0; | ||
| 182 | |||
| 183 | /* Can deadlock when called with interrupts disabled */ | ||
| 184 | WARN_ON(irqs_disabled()); | ||
| 185 | |||
| 186 | data.func = func; | ||
| 187 | data.info = info; | ||
| 188 | atomic_set(&data.started, 0); | ||
| 189 | data.wait = wait; | ||
| 190 | if (wait) | ||
| 191 | atomic_set(&data.finished, 0); | ||
| 192 | |||
| 193 | spin_lock(&smp_call_lock); | ||
| 194 | call_data = &data; | ||
| 195 | smp_mb(); | ||
| 196 | |||
| 197 | /* Send a message to all other CPUs and wait for them to respond */ | ||
| 198 | mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); | 136 | mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); |
| 199 | |||
| 200 | /* Wait for response */ | ||
| 201 | /* FIXME: lock-up detection, backtrace on lock-up */ | ||
| 202 | while (atomic_read(&data.started) != cpus) | ||
| 203 | barrier(); | ||
| 204 | |||
| 205 | if (wait) | ||
| 206 | while (atomic_read(&data.finished) != cpus) | ||
| 207 | barrier(); | ||
| 208 | call_data = NULL; | ||
| 209 | spin_unlock(&smp_call_lock); | ||
| 210 | |||
| 211 | return 0; | ||
| 212 | } | 137 | } |
| 213 | 138 | ||
| 214 | int smp_call_function(void (*func) (void *info), void *info, int retry, | 139 | /* |
| 215 | int wait) | 140 | * We reuse the same vector for the single IPI |
| 141 | */ | ||
| 142 | void arch_send_call_function_single_ipi(int cpu) | ||
| 216 | { | 143 | { |
| 217 | return smp_call_function_mask(cpu_online_map, func, info, retry, wait); | 144 | mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION); |
| 218 | } | 145 | } |
| 219 | EXPORT_SYMBOL(smp_call_function); | ||
| 220 | 146 | ||
| 147 | /* | ||
| 148 | * Call into both interrupt handlers, as we share the IPI for them | ||
| 149 | */ | ||
| 221 | void smp_call_function_interrupt(void) | 150 | void smp_call_function_interrupt(void) |
| 222 | { | 151 | { |
| 223 | void (*func) (void *info) = call_data->func; | ||
| 224 | void *info = call_data->info; | ||
| 225 | int wait = call_data->wait; | ||
| 226 | |||
| 227 | /* | ||
| 228 | * Notify initiating CPU that I've grabbed the data and am | ||
| 229 | * about to execute the function. | ||
| 230 | */ | ||
| 231 | smp_mb(); | ||
| 232 | atomic_inc(&call_data->started); | ||
| 233 | |||
| 234 | /* | ||
| 235 | * At this point the info structure may be out of scope unless wait==1. | ||
| 236 | */ | ||
| 237 | irq_enter(); | 152 | irq_enter(); |
| 238 | (*func)(info); | 153 | generic_smp_call_function_single_interrupt(); |
| 154 | generic_smp_call_function_interrupt(); | ||
| 239 | irq_exit(); | 155 | irq_exit(); |
| 240 | |||
| 241 | if (wait) { | ||
| 242 | smp_mb(); | ||
| 243 | atomic_inc(&call_data->finished); | ||
| 244 | } | ||
| 245 | } | ||
| 246 | |||
| 247 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
| 248 | int retry, int wait) | ||
| 249 | { | ||
| 250 | int ret, me; | ||
| 251 | |||
| 252 | /* | ||
| 253 | * Can die spectacularly if this CPU isn't yet marked online | ||
| 254 | */ | ||
| 255 | if (!cpu_online(cpu)) | ||
| 256 | return 0; | ||
| 257 | |||
| 258 | me = get_cpu(); | ||
| 259 | BUG_ON(!cpu_online(me)); | ||
| 260 | |||
| 261 | if (cpu == me) { | ||
| 262 | local_irq_disable(); | ||
| 263 | func(info); | ||
| 264 | local_irq_enable(); | ||
| 265 | put_cpu(); | ||
| 266 | return 0; | ||
| 267 | } | ||
| 268 | |||
| 269 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry, | ||
| 270 | wait); | ||
| 271 | |||
| 272 | put_cpu(); | ||
| 273 | return 0; | ||
| 274 | } | 156 | } |
| 275 | EXPORT_SYMBOL(smp_call_function_single); | ||
| 276 | 157 | ||
| 277 | static void stop_this_cpu(void *dummy) | 158 | static void stop_this_cpu(void *dummy) |
| 278 | { | 159 | { |
| @@ -286,7 +167,7 @@ static void stop_this_cpu(void *dummy) | |||
| 286 | 167 | ||
| 287 | void smp_send_stop(void) | 168 | void smp_send_stop(void) |
| 288 | { | 169 | { |
| 289 | smp_call_function(stop_this_cpu, NULL, 1, 0); | 170 | smp_call_function(stop_this_cpu, NULL, 0); |
| 290 | } | 171 | } |
| 291 | 172 | ||
| 292 | void __init smp_cpus_done(unsigned int max_cpus) | 173 | void __init smp_cpus_done(unsigned int max_cpus) |
| @@ -365,7 +246,7 @@ static void flush_tlb_all_ipi(void *info) | |||
| 365 | 246 | ||
| 366 | void flush_tlb_all(void) | 247 | void flush_tlb_all(void) |
| 367 | { | 248 | { |
| 368 | on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); | 249 | on_each_cpu(flush_tlb_all_ipi, NULL, 1); |
| 369 | } | 250 | } |
| 370 | 251 | ||
| 371 | static void flush_tlb_mm_ipi(void *mm) | 252 | static void flush_tlb_mm_ipi(void *mm) |
| @@ -385,7 +266,7 @@ static void flush_tlb_mm_ipi(void *mm) | |||
| 385 | static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) | 266 | static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) |
| 386 | { | 267 | { |
| 387 | #ifndef CONFIG_MIPS_MT_SMTC | 268 | #ifndef CONFIG_MIPS_MT_SMTC |
| 388 | smp_call_function(func, info, 1, 1); | 269 | smp_call_function(func, info, 1); |
| 389 | #endif | 270 | #endif |
| 390 | } | 271 | } |
| 391 | 272 | ||
| @@ -485,7 +366,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
| 485 | .addr2 = end, | 366 | .addr2 = end, |
| 486 | }; | 367 | }; |
| 487 | 368 | ||
| 488 | on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1); | 369 | on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1); |
| 489 | } | 370 | } |
| 490 | 371 | ||
| 491 | static void flush_tlb_page_ipi(void *info) | 372 | static void flush_tlb_page_ipi(void *info) |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 3e863186cd22..a516286532ab 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
| @@ -877,7 +877,6 @@ static void ipi_resched_interrupt(void) | |||
| 877 | /* Return from interrupt should be enough to cause scheduler check */ | 877 | /* Return from interrupt should be enough to cause scheduler check */ |
| 878 | } | 878 | } |
| 879 | 879 | ||
| 880 | |||
| 881 | static void ipi_call_interrupt(void) | 880 | static void ipi_call_interrupt(void) |
| 882 | { | 881 | { |
| 883 | /* Invoke generic function invocation code in smp.c */ | 882 | /* Invoke generic function invocation code in smp.c */ |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 27096751ddce..71df3390c07b 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
| @@ -43,12 +43,12 @@ | |||
| 43 | * primary cache. | 43 | * primary cache. |
| 44 | */ | 44 | */ |
| 45 | static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, | 45 | static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, |
| 46 | int retry, int wait) | 46 | int wait) |
| 47 | { | 47 | { |
| 48 | preempt_disable(); | 48 | preempt_disable(); |
| 49 | 49 | ||
| 50 | #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) | 50 | #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) |
| 51 | smp_call_function(func, info, retry, wait); | 51 | smp_call_function(func, info, wait); |
| 52 | #endif | 52 | #endif |
| 53 | func(info); | 53 | func(info); |
| 54 | preempt_enable(); | 54 | preempt_enable(); |
| @@ -350,7 +350,7 @@ static inline void local_r4k___flush_cache_all(void * args) | |||
| 350 | 350 | ||
| 351 | static void r4k___flush_cache_all(void) | 351 | static void r4k___flush_cache_all(void) |
| 352 | { | 352 | { |
| 353 | r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); | 353 | r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1); |
| 354 | } | 354 | } |
| 355 | 355 | ||
| 356 | static inline int has_valid_asid(const struct mm_struct *mm) | 356 | static inline int has_valid_asid(const struct mm_struct *mm) |
| @@ -397,7 +397,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma, | |||
| 397 | int exec = vma->vm_flags & VM_EXEC; | 397 | int exec = vma->vm_flags & VM_EXEC; |
| 398 | 398 | ||
| 399 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) | 399 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) |
| 400 | r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); | 400 | r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1); |
| 401 | } | 401 | } |
| 402 | 402 | ||
| 403 | static inline void local_r4k_flush_cache_mm(void * args) | 403 | static inline void local_r4k_flush_cache_mm(void * args) |
| @@ -429,7 +429,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm) | |||
| 429 | if (!cpu_has_dc_aliases) | 429 | if (!cpu_has_dc_aliases) |
| 430 | return; | 430 | return; |
| 431 | 431 | ||
| 432 | r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); | 432 | r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1); |
| 433 | } | 433 | } |
| 434 | 434 | ||
| 435 | struct flush_cache_page_args { | 435 | struct flush_cache_page_args { |
| @@ -521,7 +521,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma, | |||
| 521 | args.addr = addr; | 521 | args.addr = addr; |
| 522 | args.pfn = pfn; | 522 | args.pfn = pfn; |
| 523 | 523 | ||
| 524 | r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); | 524 | r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1); |
| 525 | } | 525 | } |
| 526 | 526 | ||
| 527 | static inline void local_r4k_flush_data_cache_page(void * addr) | 527 | static inline void local_r4k_flush_data_cache_page(void * addr) |
| @@ -535,7 +535,7 @@ static void r4k_flush_data_cache_page(unsigned long addr) | |||
| 535 | local_r4k_flush_data_cache_page((void *)addr); | 535 | local_r4k_flush_data_cache_page((void *)addr); |
| 536 | else | 536 | else |
| 537 | r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, | 537 | r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, |
| 538 | 1, 1); | 538 | 1); |
| 539 | } | 539 | } |
| 540 | 540 | ||
| 541 | struct flush_icache_range_args { | 541 | struct flush_icache_range_args { |
| @@ -571,7 +571,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end) | |||
| 571 | args.start = start; | 571 | args.start = start; |
| 572 | args.end = end; | 572 | args.end = end; |
| 573 | 573 | ||
| 574 | r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); | 574 | r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1); |
| 575 | instruction_hazard(); | 575 | instruction_hazard(); |
| 576 | } | 576 | } |
| 577 | 577 | ||
| @@ -672,7 +672,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg) | |||
| 672 | 672 | ||
| 673 | static void r4k_flush_cache_sigtramp(unsigned long addr) | 673 | static void r4k_flush_cache_sigtramp(unsigned long addr) |
| 674 | { | 674 | { |
| 675 | r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); | 675 | r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1); |
| 676 | } | 676 | } |
| 677 | 677 | ||
| 678 | static void r4k_flush_icache_all(void) | 678 | static void r4k_flush_icache_all(void) |
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c index b5f6f71b27bc..dd2fbd6645c1 100644 --- a/arch/mips/oprofile/common.c +++ b/arch/mips/oprofile/common.c | |||
| @@ -27,7 +27,7 @@ static int op_mips_setup(void) | |||
| 27 | model->reg_setup(ctr); | 27 | model->reg_setup(ctr); |
| 28 | 28 | ||
| 29 | /* Configure the registers on all cpus. */ | 29 | /* Configure the registers on all cpus. */ |
| 30 | on_each_cpu(model->cpu_setup, NULL, 0, 1); | 30 | on_each_cpu(model->cpu_setup, NULL, 1); |
| 31 | 31 | ||
| 32 | return 0; | 32 | return 0; |
| 33 | } | 33 | } |
| @@ -58,7 +58,7 @@ static int op_mips_create_files(struct super_block * sb, struct dentry * root) | |||
| 58 | 58 | ||
| 59 | static int op_mips_start(void) | 59 | static int op_mips_start(void) |
| 60 | { | 60 | { |
| 61 | on_each_cpu(model->cpu_start, NULL, 0, 1); | 61 | on_each_cpu(model->cpu_start, NULL, 1); |
| 62 | 62 | ||
| 63 | return 0; | 63 | return 0; |
| 64 | } | 64 | } |
| @@ -66,7 +66,7 @@ static int op_mips_start(void) | |||
| 66 | static void op_mips_stop(void) | 66 | static void op_mips_stop(void) |
| 67 | { | 67 | { |
| 68 | /* Disable performance monitoring for all counters. */ | 68 | /* Disable performance monitoring for all counters. */ |
| 69 | on_each_cpu(model->cpu_stop, NULL, 0, 1); | 69 | on_each_cpu(model->cpu_stop, NULL, 1); |
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | int __init oprofile_arch_init(struct oprofile_operations *ops) | 72 | int __init oprofile_arch_init(struct oprofile_operations *ops) |
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c index 35dc435846a6..cf4c868715ac 100644 --- a/arch/mips/pmc-sierra/yosemite/prom.c +++ b/arch/mips/pmc-sierra/yosemite/prom.c | |||
| @@ -64,7 +64,7 @@ static void prom_exit(void) | |||
| 64 | #ifdef CONFIG_SMP | 64 | #ifdef CONFIG_SMP |
| 65 | if (smp_processor_id()) | 65 | if (smp_processor_id()) |
| 66 | /* CPU 1 */ | 66 | /* CPU 1 */ |
| 67 | smp_call_function(prom_cpu0_exit, NULL, 1, 1); | 67 | smp_call_function(prom_cpu0_exit, NULL, 1); |
| 68 | #endif | 68 | #endif |
| 69 | prom_cpu0_exit(NULL); | 69 | prom_cpu0_exit(NULL); |
| 70 | } | 70 | } |
diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c index 33fce826f8bf..fd9604d5555a 100644 --- a/arch/mips/sibyte/cfe/setup.c +++ b/arch/mips/sibyte/cfe/setup.c | |||
| @@ -74,7 +74,7 @@ static void __noreturn cfe_linux_exit(void *arg) | |||
| 74 | if (!reboot_smp) { | 74 | if (!reboot_smp) { |
| 75 | /* Get CPU 0 to do the cfe_exit */ | 75 | /* Get CPU 0 to do the cfe_exit */ |
| 76 | reboot_smp = 1; | 76 | reboot_smp = 1; |
| 77 | smp_call_function(cfe_linux_exit, arg, 1, 0); | 77 | smp_call_function(cfe_linux_exit, arg, 0); |
| 78 | } | 78 | } |
| 79 | } else { | 79 | } else { |
| 80 | printk("Passing control back to CFE...\n"); | 80 | printk("Passing control back to CFE...\n"); |
diff --git a/arch/mips/sibyte/sb1250/prom.c b/arch/mips/sibyte/sb1250/prom.c index cf8f6b3de86c..65b1af66b674 100644 --- a/arch/mips/sibyte/sb1250/prom.c +++ b/arch/mips/sibyte/sb1250/prom.c | |||
| @@ -66,7 +66,7 @@ static void prom_linux_exit(void) | |||
| 66 | { | 66 | { |
| 67 | #ifdef CONFIG_SMP | 67 | #ifdef CONFIG_SMP |
| 68 | if (smp_processor_id()) { | 68 | if (smp_processor_id()) { |
| 69 | smp_call_function(prom_cpu0_exit, NULL, 1, 1); | 69 | smp_call_function(prom_cpu0_exit, NULL, 1); |
| 70 | } | 70 | } |
| 71 | #endif | 71 | #endif |
| 72 | while(1); | 72 | while(1); |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index bc7a19da6245..a7d4fd353c2b 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
| @@ -199,6 +199,7 @@ endchoice | |||
| 199 | 199 | ||
| 200 | config SMP | 200 | config SMP |
| 201 | bool "Symmetric multi-processing support" | 201 | bool "Symmetric multi-processing support" |
| 202 | select USE_GENERIC_SMP_HELPERS | ||
| 202 | ---help--- | 203 | ---help--- |
| 203 | This enables support for systems with more than one CPU. If you have | 204 | This enables support for systems with more than one CPU. If you have |
| 204 | a system with only one CPU, like most personal computers, say N. If | 205 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index e10d25d2d9c9..5259d8c20676 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
| @@ -51,12 +51,12 @@ static struct pdc_btlb_info btlb_info __read_mostly; | |||
| 51 | void | 51 | void |
| 52 | flush_data_cache(void) | 52 | flush_data_cache(void) |
| 53 | { | 53 | { |
| 54 | on_each_cpu(flush_data_cache_local, NULL, 1, 1); | 54 | on_each_cpu(flush_data_cache_local, NULL, 1); |
| 55 | } | 55 | } |
| 56 | void | 56 | void |
| 57 | flush_instruction_cache(void) | 57 | flush_instruction_cache(void) |
| 58 | { | 58 | { |
| 59 | on_each_cpu(flush_instruction_cache_local, NULL, 1, 1); | 59 | on_each_cpu(flush_instruction_cache_local, NULL, 1); |
| 60 | } | 60 | } |
| 61 | #endif | 61 | #endif |
| 62 | 62 | ||
| @@ -515,7 +515,7 @@ static void cacheflush_h_tmp_function(void *dummy) | |||
| 515 | 515 | ||
| 516 | void flush_cache_all(void) | 516 | void flush_cache_all(void) |
| 517 | { | 517 | { |
| 518 | on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1); | 518 | on_each_cpu(cacheflush_h_tmp_function, NULL, 1); |
| 519 | } | 519 | } |
| 520 | 520 | ||
| 521 | void flush_cache_mm(struct mm_struct *mm) | 521 | void flush_cache_mm(struct mm_struct *mm) |
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 85fc7754ec25..d47f3975c9c6 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
| @@ -84,19 +84,11 @@ EXPORT_SYMBOL(cpu_possible_map); | |||
| 84 | 84 | ||
| 85 | DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; | 85 | DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; |
| 86 | 86 | ||
| 87 | struct smp_call_struct { | ||
| 88 | void (*func) (void *info); | ||
| 89 | void *info; | ||
| 90 | long wait; | ||
| 91 | atomic_t unstarted_count; | ||
| 92 | atomic_t unfinished_count; | ||
| 93 | }; | ||
| 94 | static volatile struct smp_call_struct *smp_call_function_data; | ||
| 95 | |||
| 96 | enum ipi_message_type { | 87 | enum ipi_message_type { |
| 97 | IPI_NOP=0, | 88 | IPI_NOP=0, |
| 98 | IPI_RESCHEDULE=1, | 89 | IPI_RESCHEDULE=1, |
| 99 | IPI_CALL_FUNC, | 90 | IPI_CALL_FUNC, |
| 91 | IPI_CALL_FUNC_SINGLE, | ||
| 100 | IPI_CPU_START, | 92 | IPI_CPU_START, |
| 101 | IPI_CPU_STOP, | 93 | IPI_CPU_STOP, |
| 102 | IPI_CPU_TEST | 94 | IPI_CPU_TEST |
| @@ -187,33 +179,12 @@ ipi_interrupt(int irq, void *dev_id) | |||
| 187 | 179 | ||
| 188 | case IPI_CALL_FUNC: | 180 | case IPI_CALL_FUNC: |
| 189 | smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); | 181 | smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); |
| 190 | { | 182 | generic_smp_call_function_interrupt(); |
| 191 | volatile struct smp_call_struct *data; | 183 | break; |
| 192 | void (*func)(void *info); | 184 | |
| 193 | void *info; | 185 | case IPI_CALL_FUNC_SINGLE: |
| 194 | int wait; | 186 | smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu); |
| 195 | 187 | generic_smp_call_function_single_interrupt(); | |
| 196 | data = smp_call_function_data; | ||
| 197 | func = data->func; | ||
| 198 | info = data->info; | ||
| 199 | wait = data->wait; | ||
| 200 | |||
| 201 | mb(); | ||
| 202 | atomic_dec ((atomic_t *)&data->unstarted_count); | ||
| 203 | |||
| 204 | /* At this point, *data can't | ||
| 205 | * be relied upon. | ||
| 206 | */ | ||
| 207 | |||
| 208 | (*func)(info); | ||
| 209 | |||
| 210 | /* Notify the sending CPU that the | ||
| 211 | * task is done. | ||
| 212 | */ | ||
| 213 | mb(); | ||
| 214 | if (wait) | ||
| 215 | atomic_dec ((atomic_t *)&data->unfinished_count); | ||
| 216 | } | ||
| 217 | break; | 188 | break; |
| 218 | 189 | ||
| 219 | case IPI_CPU_START: | 190 | case IPI_CPU_START: |
| @@ -256,6 +227,14 @@ ipi_send(int cpu, enum ipi_message_type op) | |||
| 256 | spin_unlock_irqrestore(lock, flags); | 227 | spin_unlock_irqrestore(lock, flags); |
| 257 | } | 228 | } |
| 258 | 229 | ||
| 230 | static void | ||
| 231 | send_IPI_mask(cpumask_t mask, enum ipi_message_type op) | ||
| 232 | { | ||
| 233 | int cpu; | ||
| 234 | |||
| 235 | for_each_cpu_mask(cpu, mask) | ||
| 236 | ipi_send(cpu, op); | ||
| 237 | } | ||
| 259 | 238 | ||
| 260 | static inline void | 239 | static inline void |
| 261 | send_IPI_single(int dest_cpu, enum ipi_message_type op) | 240 | send_IPI_single(int dest_cpu, enum ipi_message_type op) |
| @@ -295,86 +274,15 @@ smp_send_all_nop(void) | |||
| 295 | send_IPI_allbutself(IPI_NOP); | 274 | send_IPI_allbutself(IPI_NOP); |
| 296 | } | 275 | } |
| 297 | 276 | ||
| 298 | 277 | void arch_send_call_function_ipi(cpumask_t mask) | |
| 299 | /** | ||
| 300 | * Run a function on all other CPUs. | ||
| 301 | * <func> The function to run. This must be fast and non-blocking. | ||
| 302 | * <info> An arbitrary pointer to pass to the function. | ||
| 303 | * <retry> If true, keep retrying until ready. | ||
| 304 | * <wait> If true, wait until function has completed on other CPUs. | ||
| 305 | * [RETURNS] 0 on success, else a negative status code. | ||
| 306 | * | ||
| 307 | * Does not return until remote CPUs are nearly ready to execute <func> | ||
| 308 | * or have executed. | ||
| 309 | */ | ||
| 310 | |||
| 311 | int | ||
| 312 | smp_call_function (void (*func) (void *info), void *info, int retry, int wait) | ||
| 313 | { | 278 | { |
| 314 | struct smp_call_struct data; | 279 | send_IPI_mask(mask, IPI_CALL_FUNC); |
| 315 | unsigned long timeout; | ||
| 316 | static DEFINE_SPINLOCK(lock); | ||
| 317 | int retries = 0; | ||
| 318 | |||
| 319 | if (num_online_cpus() < 2) | ||
| 320 | return 0; | ||
| 321 | |||
| 322 | /* Can deadlock when called with interrupts disabled */ | ||
| 323 | WARN_ON(irqs_disabled()); | ||
| 324 | |||
| 325 | /* can also deadlock if IPIs are disabled */ | ||
| 326 | WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0); | ||
| 327 | |||
| 328 | |||
| 329 | data.func = func; | ||
| 330 | data.info = info; | ||
| 331 | data.wait = wait; | ||
| 332 | atomic_set(&data.unstarted_count, num_online_cpus() - 1); | ||
| 333 | atomic_set(&data.unfinished_count, num_online_cpus() - 1); | ||
| 334 | |||
| 335 | if (retry) { | ||
| 336 | spin_lock (&lock); | ||
| 337 | while (smp_call_function_data != 0) | ||
| 338 | barrier(); | ||
| 339 | } | ||
| 340 | else { | ||
| 341 | spin_lock (&lock); | ||
| 342 | if (smp_call_function_data) { | ||
| 343 | spin_unlock (&lock); | ||
| 344 | return -EBUSY; | ||
| 345 | } | ||
| 346 | } | ||
| 347 | |||
| 348 | smp_call_function_data = &data; | ||
| 349 | spin_unlock (&lock); | ||
| 350 | |||
| 351 | /* Send a message to all other CPUs and wait for them to respond */ | ||
| 352 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
| 353 | |||
| 354 | retry: | ||
| 355 | /* Wait for response */ | ||
| 356 | timeout = jiffies + HZ; | ||
| 357 | while ( (atomic_read (&data.unstarted_count) > 0) && | ||
| 358 | time_before (jiffies, timeout) ) | ||
| 359 | barrier (); | ||
| 360 | |||
| 361 | if (atomic_read (&data.unstarted_count) > 0) { | ||
| 362 | printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n", | ||
| 363 | smp_processor_id(), ++retries); | ||
| 364 | goto retry; | ||
| 365 | } | ||
| 366 | /* We either got one or timed out. Release the lock */ | ||
| 367 | |||
| 368 | mb(); | ||
| 369 | smp_call_function_data = NULL; | ||
| 370 | |||
| 371 | while (wait && atomic_read (&data.unfinished_count) > 0) | ||
| 372 | barrier (); | ||
| 373 | |||
| 374 | return 0; | ||
| 375 | } | 280 | } |
| 376 | 281 | ||
| 377 | EXPORT_SYMBOL(smp_call_function); | 282 | void arch_send_call_function_single_ipi(int cpu) |
| 283 | { | ||
| 284 | send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); | ||
| 285 | } | ||
| 378 | 286 | ||
| 379 | /* | 287 | /* |
| 380 | * Flush all other CPU's tlb and then mine. Do this with on_each_cpu() | 288 | * Flush all other CPU's tlb and then mine. Do this with on_each_cpu() |
| @@ -384,7 +292,7 @@ EXPORT_SYMBOL(smp_call_function); | |||
| 384 | void | 292 | void |
| 385 | smp_flush_tlb_all(void) | 293 | smp_flush_tlb_all(void) |
| 386 | { | 294 | { |
| 387 | on_each_cpu(flush_tlb_all_local, NULL, 1, 1); | 295 | on_each_cpu(flush_tlb_all_local, NULL, 1); |
| 388 | } | 296 | } |
| 389 | 297 | ||
| 390 | /* | 298 | /* |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index ce0da689a89d..b4d6c8777ed0 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
| @@ -1053,7 +1053,7 @@ void flush_tlb_all(void) | |||
| 1053 | do_recycle++; | 1053 | do_recycle++; |
| 1054 | } | 1054 | } |
| 1055 | spin_unlock(&sid_lock); | 1055 | spin_unlock(&sid_lock); |
| 1056 | on_each_cpu(flush_tlb_all_local, NULL, 1, 1); | 1056 | on_each_cpu(flush_tlb_all_local, NULL, 1); |
| 1057 | if (do_recycle) { | 1057 | if (do_recycle) { |
| 1058 | spin_lock(&sid_lock); | 1058 | spin_lock(&sid_lock); |
| 1059 | recycle_sids(recycle_ndirty,recycle_dirty_array); | 1059 | recycle_sids(recycle_ndirty,recycle_dirty_array); |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a5e9912e2d37..20eacf2a8424 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
| @@ -111,6 +111,7 @@ config PPC | |||
| 111 | select HAVE_KPROBES | 111 | select HAVE_KPROBES |
| 112 | select HAVE_KRETPROBES | 112 | select HAVE_KRETPROBES |
| 113 | select HAVE_LMB | 113 | select HAVE_LMB |
| 114 | select USE_GENERIC_SMP_HELPERS if SMP | ||
| 114 | select HAVE_OPROFILE | 115 | select HAVE_OPROFILE |
| 115 | 116 | ||
| 116 | config EARLY_PRINTK | 117 | config EARLY_PRINTK |
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 704375bda73a..b732b5f8e356 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c | |||
| @@ -172,7 +172,7 @@ static void kexec_prepare_cpus(void) | |||
| 172 | { | 172 | { |
| 173 | int my_cpu, i, notified=-1; | 173 | int my_cpu, i, notified=-1; |
| 174 | 174 | ||
| 175 | smp_call_function(kexec_smp_down, NULL, 0, /* wait */0); | 175 | smp_call_function(kexec_smp_down, NULL, /* wait */0); |
| 176 | my_cpu = get_cpu(); | 176 | my_cpu = get_cpu(); |
| 177 | 177 | ||
| 178 | /* check the others cpus are now down (via paca hw cpu id == -1) */ | 178 | /* check the others cpus are now down (via paca hw cpu id == -1) */ |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 34843c318419..647f3e8677dc 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
| @@ -747,7 +747,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args) | |||
| 747 | /* Call function on all CPUs. One of us will make the | 747 | /* Call function on all CPUs. One of us will make the |
| 748 | * rtas call | 748 | * rtas call |
| 749 | */ | 749 | */ |
| 750 | if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0)) | 750 | if (on_each_cpu(rtas_percpu_suspend_me, &data, 0)) |
| 751 | data.error = -EINVAL; | 751 | data.error = -EINVAL; |
| 752 | 752 | ||
| 753 | wait_for_completion(&done); | 753 | wait_for_completion(&done); |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 1457aa0a08f1..5191b46a611e 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
| @@ -72,12 +72,8 @@ struct smp_ops_t *smp_ops; | |||
| 72 | 72 | ||
| 73 | static volatile unsigned int cpu_callin_map[NR_CPUS]; | 73 | static volatile unsigned int cpu_callin_map[NR_CPUS]; |
| 74 | 74 | ||
| 75 | void smp_call_function_interrupt(void); | ||
| 76 | |||
| 77 | int smt_enabled_at_boot = 1; | 75 | int smt_enabled_at_boot = 1; |
| 78 | 76 | ||
| 79 | static int ipi_fail_ok; | ||
| 80 | |||
| 81 | static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; | 77 | static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; |
| 82 | 78 | ||
| 83 | #ifdef CONFIG_PPC64 | 79 | #ifdef CONFIG_PPC64 |
| @@ -99,12 +95,15 @@ void smp_message_recv(int msg) | |||
| 99 | { | 95 | { |
| 100 | switch(msg) { | 96 | switch(msg) { |
| 101 | case PPC_MSG_CALL_FUNCTION: | 97 | case PPC_MSG_CALL_FUNCTION: |
| 102 | smp_call_function_interrupt(); | 98 | generic_smp_call_function_interrupt(); |
| 103 | break; | 99 | break; |
| 104 | case PPC_MSG_RESCHEDULE: | 100 | case PPC_MSG_RESCHEDULE: |
| 105 | /* XXX Do we have to do this? */ | 101 | /* XXX Do we have to do this? */ |
| 106 | set_need_resched(); | 102 | set_need_resched(); |
| 107 | break; | 103 | break; |
| 104 | case PPC_MSG_CALL_FUNC_SINGLE: | ||
| 105 | generic_smp_call_function_single_interrupt(); | ||
| 106 | break; | ||
| 108 | case PPC_MSG_DEBUGGER_BREAK: | 107 | case PPC_MSG_DEBUGGER_BREAK: |
| 109 | if (crash_ipi_function_ptr) { | 108 | if (crash_ipi_function_ptr) { |
| 110 | crash_ipi_function_ptr(get_irq_regs()); | 109 | crash_ipi_function_ptr(get_irq_regs()); |
| @@ -128,6 +127,19 @@ void smp_send_reschedule(int cpu) | |||
| 128 | smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); | 127 | smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); |
| 129 | } | 128 | } |
| 130 | 129 | ||
| 130 | void arch_send_call_function_single_ipi(int cpu) | ||
| 131 | { | ||
| 132 | smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); | ||
| 133 | } | ||
| 134 | |||
| 135 | void arch_send_call_function_ipi(cpumask_t mask) | ||
| 136 | { | ||
| 137 | unsigned int cpu; | ||
| 138 | |||
| 139 | for_each_cpu_mask(cpu, mask) | ||
| 140 | smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); | ||
| 141 | } | ||
| 142 | |||
| 131 | #ifdef CONFIG_DEBUGGER | 143 | #ifdef CONFIG_DEBUGGER |
| 132 | void smp_send_debugger_break(int cpu) | 144 | void smp_send_debugger_break(int cpu) |
| 133 | { | 145 | { |
| @@ -154,215 +166,9 @@ static void stop_this_cpu(void *dummy) | |||
| 154 | ; | 166 | ; |
| 155 | } | 167 | } |
| 156 | 168 | ||
| 157 | /* | ||
| 158 | * Structure and data for smp_call_function(). This is designed to minimise | ||
| 159 | * static memory requirements. It also looks cleaner. | ||
| 160 | * Stolen from the i386 version. | ||
| 161 | */ | ||
| 162 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); | ||
| 163 | |||
| 164 | static struct call_data_struct { | ||
| 165 | void (*func) (void *info); | ||
| 166 | void *info; | ||
| 167 | atomic_t started; | ||
| 168 | atomic_t finished; | ||
| 169 | int wait; | ||
| 170 | } *call_data; | ||
| 171 | |||
| 172 | /* delay of at least 8 seconds */ | ||
| 173 | #define SMP_CALL_TIMEOUT 8 | ||
| 174 | |||
| 175 | /* | ||
| 176 | * These functions send a 'generic call function' IPI to other online | ||
| 177 | * CPUS in the system. | ||
| 178 | * | ||
| 179 | * [SUMMARY] Run a function on other CPUs. | ||
| 180 | * <func> The function to run. This must be fast and non-blocking. | ||
| 181 | * <info> An arbitrary pointer to pass to the function. | ||
| 182 | * <nonatomic> currently unused. | ||
| 183 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | ||
| 184 | * [RETURNS] 0 on success, else a negative status code. Does not return until | ||
| 185 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | ||
| 186 | * <map> is a cpu map of the cpus to send IPI to. | ||
| 187 | * | ||
| 188 | * You must not call this function with disabled interrupts or from a | ||
| 189 | * hardware interrupt handler or from a bottom half handler. | ||
| 190 | */ | ||
| 191 | static int __smp_call_function_map(void (*func) (void *info), void *info, | ||
| 192 | int nonatomic, int wait, cpumask_t map) | ||
| 193 | { | ||
| 194 | struct call_data_struct data; | ||
| 195 | int ret = -1, num_cpus; | ||
| 196 | int cpu; | ||
| 197 | u64 timeout; | ||
| 198 | |||
| 199 | if (unlikely(smp_ops == NULL)) | ||
| 200 | return ret; | ||
| 201 | |||
| 202 | data.func = func; | ||
| 203 | data.info = info; | ||
| 204 | atomic_set(&data.started, 0); | ||
| 205 | data.wait = wait; | ||
| 206 | if (wait) | ||
| 207 | atomic_set(&data.finished, 0); | ||
| 208 | |||
| 209 | /* remove 'self' from the map */ | ||
| 210 | if (cpu_isset(smp_processor_id(), map)) | ||
| 211 | cpu_clear(smp_processor_id(), map); | ||
| 212 | |||
| 213 | /* sanity check the map, remove any non-online processors. */ | ||
| 214 | cpus_and(map, map, cpu_online_map); | ||
| 215 | |||
| 216 | num_cpus = cpus_weight(map); | ||
| 217 | if (!num_cpus) | ||
| 218 | goto done; | ||
| 219 | |||
| 220 | call_data = &data; | ||
| 221 | smp_wmb(); | ||
| 222 | /* Send a message to all CPUs in the map */ | ||
| 223 | for_each_cpu_mask(cpu, map) | ||
| 224 | smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); | ||
| 225 | |||
| 226 | timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec; | ||
| 227 | |||
| 228 | /* Wait for indication that they have received the message */ | ||
| 229 | while (atomic_read(&data.started) != num_cpus) { | ||
| 230 | HMT_low(); | ||
| 231 | if (get_tb() >= timeout) { | ||
| 232 | printk("smp_call_function on cpu %d: other cpus not " | ||
| 233 | "responding (%d)\n", smp_processor_id(), | ||
| 234 | atomic_read(&data.started)); | ||
| 235 | if (!ipi_fail_ok) | ||
| 236 | debugger(NULL); | ||
| 237 | goto out; | ||
| 238 | } | ||
| 239 | } | ||
| 240 | |||
| 241 | /* optionally wait for the CPUs to complete */ | ||
| 242 | if (wait) { | ||
| 243 | while (atomic_read(&data.finished) != num_cpus) { | ||
| 244 | HMT_low(); | ||
| 245 | if (get_tb() >= timeout) { | ||
| 246 | printk("smp_call_function on cpu %d: other " | ||
| 247 | "cpus not finishing (%d/%d)\n", | ||
| 248 | smp_processor_id(), | ||
| 249 | atomic_read(&data.finished), | ||
| 250 | atomic_read(&data.started)); | ||
| 251 | debugger(NULL); | ||
| 252 | goto out; | ||
| 253 | } | ||
| 254 | } | ||
| 255 | } | ||
| 256 | |||
| 257 | done: | ||
| 258 | ret = 0; | ||
| 259 | |||
| 260 | out: | ||
| 261 | call_data = NULL; | ||
| 262 | HMT_medium(); | ||
| 263 | return ret; | ||
| 264 | } | ||
| 265 | |||
| 266 | static int __smp_call_function(void (*func)(void *info), void *info, | ||
| 267 | int nonatomic, int wait) | ||
| 268 | { | ||
| 269 | int ret; | ||
| 270 | spin_lock(&call_lock); | ||
| 271 | ret =__smp_call_function_map(func, info, nonatomic, wait, | ||
| 272 | cpu_online_map); | ||
| 273 | spin_unlock(&call_lock); | ||
| 274 | return ret; | ||
| 275 | } | ||
| 276 | |||
| 277 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | ||
| 278 | int wait) | ||
| 279 | { | ||
| 280 | /* Can deadlock when called with interrupts disabled */ | ||
| 281 | WARN_ON(irqs_disabled()); | ||
| 282 | |||
| 283 | return __smp_call_function(func, info, nonatomic, wait); | ||
| 284 | } | ||
| 285 | EXPORT_SYMBOL(smp_call_function); | ||
| 286 | |||
| 287 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
| 288 | int nonatomic, int wait) | ||
| 289 | { | ||
| 290 | cpumask_t map = CPU_MASK_NONE; | ||
| 291 | int ret = 0; | ||
| 292 | |||
| 293 | /* Can deadlock when called with interrupts disabled */ | ||
| 294 | WARN_ON(irqs_disabled()); | ||
| 295 | |||
| 296 | if (!cpu_online(cpu)) | ||
| 297 | return -EINVAL; | ||
| 298 | |||
| 299 | cpu_set(cpu, map); | ||
| 300 | if (cpu != get_cpu()) { | ||
| 301 | spin_lock(&call_lock); | ||
| 302 | ret = __smp_call_function_map(func, info, nonatomic, wait, map); | ||
| 303 | spin_unlock(&call_lock); | ||
| 304 | } else { | ||
| 305 | local_irq_disable(); | ||
| 306 | func(info); | ||
| 307 | local_irq_enable(); | ||
| 308 | } | ||
| 309 | put_cpu(); | ||
| 310 | return ret; | ||
| 311 | } | ||
| 312 | EXPORT_SYMBOL(smp_call_function_single); | ||
| 313 | |||
| 314 | void smp_send_stop(void) | 169 | void smp_send_stop(void) |
| 315 | { | 170 | { |
| 316 | int nolock; | 171 | smp_call_function(stop_this_cpu, NULL, 0); |
| 317 | |||
| 318 | /* It's OK to fail sending the IPI, since the alternative is to | ||
| 319 | * be stuck forever waiting on the other CPU to take the interrupt. | ||
| 320 | * | ||
| 321 | * It's better to at least continue and go through reboot, since this | ||
| 322 | * function is usually called at panic or reboot time in the first | ||
| 323 | * place. | ||
| 324 | */ | ||
| 325 | ipi_fail_ok = 1; | ||
| 326 | |||
| 327 | /* Don't deadlock in case we got called through panic */ | ||
| 328 | nolock = !spin_trylock(&call_lock); | ||
| 329 | __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map); | ||
| 330 | if (!nolock) | ||
| 331 | spin_unlock(&call_lock); | ||
| 332 | } | ||
| 333 | |||
| 334 | void smp_call_function_interrupt(void) | ||
| 335 | { | ||
| 336 | void (*func) (void *info); | ||
| 337 | void *info; | ||
| 338 | int wait; | ||
| 339 | |||
| 340 | /* call_data will be NULL if the sender timed out while | ||
| 341 | * waiting on us to receive the call. | ||
| 342 | */ | ||
| 343 | if (!call_data) | ||
| 344 | return; | ||
| 345 | |||
| 346 | func = call_data->func; | ||
| 347 | info = call_data->info; | ||
| 348 | wait = call_data->wait; | ||
| 349 | |||
| 350 | if (!wait) | ||
| 351 | smp_mb__before_atomic_inc(); | ||
| 352 | |||
| 353 | /* | ||
| 354 | * Notify initiating CPU that I've grabbed the data and am | ||
| 355 | * about to execute the function | ||
| 356 | */ | ||
| 357 | atomic_inc(&call_data->started); | ||
| 358 | /* | ||
| 359 | * At this point the info structure may be out of scope unless wait==1 | ||
| 360 | */ | ||
| 361 | (*func)(info); | ||
| 362 | if (wait) { | ||
| 363 | smp_mb__before_atomic_inc(); | ||
| 364 | atomic_inc(&call_data->finished); | ||
| 365 | } | ||
| 366 | } | 172 | } |
| 367 | 173 | ||
| 368 | extern struct gettimeofday_struct do_gtod; | 174 | extern struct gettimeofday_struct do_gtod; |
| @@ -596,9 +402,9 @@ int __devinit start_secondary(void *unused) | |||
| 596 | 402 | ||
| 597 | secondary_cpu_time_init(); | 403 | secondary_cpu_time_init(); |
| 598 | 404 | ||
| 599 | spin_lock(&call_lock); | 405 | ipi_call_lock(); |
| 600 | cpu_set(cpu, cpu_online_map); | 406 | cpu_set(cpu, cpu_online_map); |
| 601 | spin_unlock(&call_lock); | 407 | ipi_call_unlock(); |
| 602 | 408 | ||
| 603 | local_irq_enable(); | 409 | local_irq_enable(); |
| 604 | 410 | ||
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c index 368a4934f7ee..c3a56d65c5a9 100644 --- a/arch/powerpc/kernel/tau_6xx.c +++ b/arch/powerpc/kernel/tau_6xx.c | |||
| @@ -192,7 +192,7 @@ static void tau_timeout_smp(unsigned long unused) | |||
| 192 | 192 | ||
| 193 | /* schedule ourselves to be run again */ | 193 | /* schedule ourselves to be run again */ |
| 194 | mod_timer(&tau_timer, jiffies + shrink_timer) ; | 194 | mod_timer(&tau_timer, jiffies + shrink_timer) ; |
| 195 | on_each_cpu(tau_timeout, NULL, 1, 0); | 195 | on_each_cpu(tau_timeout, NULL, 0); |
| 196 | } | 196 | } |
| 197 | 197 | ||
| 198 | /* | 198 | /* |
| @@ -234,7 +234,7 @@ int __init TAU_init(void) | |||
| 234 | tau_timer.expires = jiffies + shrink_timer; | 234 | tau_timer.expires = jiffies + shrink_timer; |
| 235 | add_timer(&tau_timer); | 235 | add_timer(&tau_timer); |
| 236 | 236 | ||
| 237 | on_each_cpu(TAU_init_smp, NULL, 1, 0); | 237 | on_each_cpu(TAU_init_smp, NULL, 0); |
| 238 | 238 | ||
| 239 | printk("Thermal assist unit "); | 239 | printk("Thermal assist unit "); |
| 240 | #ifdef CONFIG_TAU_INT | 240 | #ifdef CONFIG_TAU_INT |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 73401e83739a..f1a38a6c1e2d 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
| @@ -322,7 +322,7 @@ void snapshot_timebases(void) | |||
| 322 | { | 322 | { |
| 323 | if (!cpu_has_feature(CPU_FTR_PURR)) | 323 | if (!cpu_has_feature(CPU_FTR_PURR)) |
| 324 | return; | 324 | return; |
| 325 | on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); | 325 | on_each_cpu(snapshot_tb_and_purr, NULL, 1); |
| 326 | } | 326 | } |
| 327 | 327 | ||
| 328 | /* | 328 | /* |
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index ad928edafb0a..2bd12d965db1 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c | |||
| @@ -218,7 +218,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz | |||
| 218 | mb(); | 218 | mb(); |
| 219 | 219 | ||
| 220 | /* XXX this is sub-optimal but will do for now */ | 220 | /* XXX this is sub-optimal but will do for now */ |
| 221 | on_each_cpu(slice_flush_segments, mm, 0, 1); | 221 | on_each_cpu(slice_flush_segments, mm, 1); |
| 222 | #ifdef CONFIG_SPU_BASE | 222 | #ifdef CONFIG_SPU_BASE |
| 223 | spu_flush_all_slbs(mm); | 223 | spu_flush_all_slbs(mm); |
| 224 | #endif | 224 | #endif |
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index e2d867ce1c7e..69ad829a7fa3 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c | |||
| @@ -66,7 +66,7 @@ static void pgtable_free_now(pgtable_free_t pgf) | |||
| 66 | { | 66 | { |
| 67 | pte_freelist_forced_free++; | 67 | pte_freelist_forced_free++; |
| 68 | 68 | ||
| 69 | smp_call_function(pte_free_smp_sync, NULL, 0, 1); | 69 | smp_call_function(pte_free_smp_sync, NULL, 1); |
| 70 | 70 | ||
| 71 | pgtable_free(pgf); | 71 | pgtable_free(pgf); |
| 72 | } | 72 | } |
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c index 4908dc98f9ca..17807acb05d9 100644 --- a/arch/powerpc/oprofile/common.c +++ b/arch/powerpc/oprofile/common.c | |||
| @@ -65,7 +65,7 @@ static int op_powerpc_setup(void) | |||
| 65 | 65 | ||
| 66 | /* Configure the registers on all cpus. If an error occurs on one | 66 | /* Configure the registers on all cpus. If an error occurs on one |
| 67 | * of the cpus, op_per_cpu_rc will be set to the error */ | 67 | * of the cpus, op_per_cpu_rc will be set to the error */ |
| 68 | on_each_cpu(op_powerpc_cpu_setup, NULL, 0, 1); | 68 | on_each_cpu(op_powerpc_cpu_setup, NULL, 1); |
| 69 | 69 | ||
| 70 | out: if (op_per_cpu_rc) { | 70 | out: if (op_per_cpu_rc) { |
| 71 | /* error on setup release the performance counter hardware */ | 71 | /* error on setup release the performance counter hardware */ |
| @@ -100,7 +100,7 @@ static int op_powerpc_start(void) | |||
| 100 | if (model->global_start) | 100 | if (model->global_start) |
| 101 | return model->global_start(ctr); | 101 | return model->global_start(ctr); |
| 102 | if (model->start) { | 102 | if (model->start) { |
| 103 | on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1); | 103 | on_each_cpu(op_powerpc_cpu_start, NULL, 1); |
| 104 | return op_per_cpu_rc; | 104 | return op_per_cpu_rc; |
| 105 | } | 105 | } |
| 106 | return -EIO; /* No start function is defined for this | 106 | return -EIO; /* No start function is defined for this |
| @@ -115,7 +115,7 @@ static inline void op_powerpc_cpu_stop(void *dummy) | |||
| 115 | static void op_powerpc_stop(void) | 115 | static void op_powerpc_stop(void) |
| 116 | { | 116 | { |
| 117 | if (model->stop) | 117 | if (model->stop) |
| 118 | on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1); | 118 | on_each_cpu(op_powerpc_cpu_stop, NULL, 1); |
| 119 | if (model->global_stop) | 119 | if (model->global_stop) |
| 120 | model->global_stop(); | 120 | model->global_stop(); |
| 121 | } | 121 | } |
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 5bf7df146022..2d5bb22d6c09 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
| @@ -218,6 +218,7 @@ void iic_request_IPIs(void) | |||
| 218 | { | 218 | { |
| 219 | iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call"); | 219 | iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call"); |
| 220 | iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched"); | 220 | iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched"); |
| 221 | iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single"); | ||
| 221 | #ifdef CONFIG_DEBUGGER | 222 | #ifdef CONFIG_DEBUGGER |
| 222 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); | 223 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); |
| 223 | #endif /* CONFIG_DEBUGGER */ | 224 | #endif /* CONFIG_DEBUGGER */ |
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c index f0b12f212363..a0927a3bacb7 100644 --- a/arch/powerpc/platforms/ps3/smp.c +++ b/arch/powerpc/platforms/ps3/smp.c | |||
| @@ -105,9 +105,10 @@ static void __init ps3_smp_setup_cpu(int cpu) | |||
| 105 | * to index needs to be setup. | 105 | * to index needs to be setup. |
| 106 | */ | 106 | */ |
| 107 | 107 | ||
| 108 | BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0); | 108 | BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0); |
| 109 | BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1); | 109 | BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1); |
| 110 | BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3); | 110 | BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2); |
| 111 | BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3); | ||
| 111 | 112 | ||
| 112 | for (i = 0; i < MSG_COUNT; i++) { | 113 | for (i = 0; i < MSG_COUNT; i++) { |
| 113 | result = ps3_event_receive_port_setup(cpu, &virqs[i]); | 114 | result = ps3_event_receive_port_setup(cpu, &virqs[i]); |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index ebebc28fe895..0fc830f576f5 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
| @@ -383,13 +383,11 @@ static irqreturn_t xics_ipi_dispatch(int cpu) | |||
| 383 | mb(); | 383 | mb(); |
| 384 | smp_message_recv(PPC_MSG_RESCHEDULE); | 384 | smp_message_recv(PPC_MSG_RESCHEDULE); |
| 385 | } | 385 | } |
| 386 | #if 0 | 386 | if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, |
| 387 | if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK, | ||
| 388 | &xics_ipi_message[cpu].value)) { | 387 | &xics_ipi_message[cpu].value)) { |
| 389 | mb(); | 388 | mb(); |
| 390 | smp_message_recv(PPC_MSG_MIGRATE_TASK); | 389 | smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); |
| 391 | } | 390 | } |
| 392 | #endif | ||
| 393 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | 391 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
| 394 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, | 392 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, |
| 395 | &xics_ipi_message[cpu].value)) { | 393 | &xics_ipi_message[cpu].value)) { |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 7680001676a6..6c90c95b454e 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
| @@ -1494,7 +1494,7 @@ void mpic_request_ipis(void) | |||
| 1494 | static char *ipi_names[] = { | 1494 | static char *ipi_names[] = { |
| 1495 | "IPI0 (call function)", | 1495 | "IPI0 (call function)", |
| 1496 | "IPI1 (reschedule)", | 1496 | "IPI1 (reschedule)", |
| 1497 | "IPI2 (unused)", | 1497 | "IPI2 (call function single)", |
| 1498 | "IPI3 (debugger break)", | 1498 | "IPI3 (debugger break)", |
| 1499 | }; | 1499 | }; |
| 1500 | BUG_ON(mpic == NULL); | 1500 | BUG_ON(mpic == NULL); |
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 9cb3d92447a3..a7f8979fb925 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
| @@ -203,7 +203,7 @@ __appldata_vtimer_setup(int cmd) | |||
| 203 | per_cpu(appldata_timer, i).expires = per_cpu_interval; | 203 | per_cpu(appldata_timer, i).expires = per_cpu_interval; |
| 204 | smp_call_function_single(i, add_virt_timer_periodic, | 204 | smp_call_function_single(i, add_virt_timer_periodic, |
| 205 | &per_cpu(appldata_timer, i), | 205 | &per_cpu(appldata_timer, i), |
| 206 | 0, 1); | 206 | 1); |
| 207 | } | 207 | } |
| 208 | appldata_timer_active = 1; | 208 | appldata_timer_active = 1; |
| 209 | break; | 209 | break; |
| @@ -228,7 +228,7 @@ __appldata_vtimer_setup(int cmd) | |||
| 228 | args.timer = &per_cpu(appldata_timer, i); | 228 | args.timer = &per_cpu(appldata_timer, i); |
| 229 | args.expires = per_cpu_interval; | 229 | args.expires = per_cpu_interval; |
| 230 | smp_call_function_single(i, __appldata_mod_vtimer_wrap, | 230 | smp_call_function_single(i, __appldata_mod_vtimer_wrap, |
| 231 | &args, 0, 1); | 231 | &args, 1); |
| 232 | } | 232 | } |
| 233 | } | 233 | } |
| 234 | } | 234 | } |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 5d4fa4b1c74c..b6781030cfbd 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
| @@ -109,7 +109,7 @@ static void do_call_function(void) | |||
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | static void __smp_call_function_map(void (*func) (void *info), void *info, | 111 | static void __smp_call_function_map(void (*func) (void *info), void *info, |
| 112 | int nonatomic, int wait, cpumask_t map) | 112 | int wait, cpumask_t map) |
| 113 | { | 113 | { |
| 114 | struct call_data_struct data; | 114 | struct call_data_struct data; |
| 115 | int cpu, local = 0; | 115 | int cpu, local = 0; |
| @@ -162,7 +162,6 @@ out: | |||
| 162 | * smp_call_function: | 162 | * smp_call_function: |
| 163 | * @func: the function to run; this must be fast and non-blocking | 163 | * @func: the function to run; this must be fast and non-blocking |
| 164 | * @info: an arbitrary pointer to pass to the function | 164 | * @info: an arbitrary pointer to pass to the function |
| 165 | * @nonatomic: unused | ||
| 166 | * @wait: if true, wait (atomically) until function has completed on other CPUs | 165 | * @wait: if true, wait (atomically) until function has completed on other CPUs |
| 167 | * | 166 | * |
| 168 | * Run a function on all other CPUs. | 167 | * Run a function on all other CPUs. |
| @@ -170,15 +169,14 @@ out: | |||
| 170 | * You must not call this function with disabled interrupts, from a | 169 | * You must not call this function with disabled interrupts, from a |
| 171 | * hardware interrupt handler or from a bottom half. | 170 | * hardware interrupt handler or from a bottom half. |
| 172 | */ | 171 | */ |
| 173 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | 172 | int smp_call_function(void (*func) (void *info), void *info, int wait) |
| 174 | int wait) | ||
| 175 | { | 173 | { |
| 176 | cpumask_t map; | 174 | cpumask_t map; |
| 177 | 175 | ||
| 178 | spin_lock(&call_lock); | 176 | spin_lock(&call_lock); |
| 179 | map = cpu_online_map; | 177 | map = cpu_online_map; |
| 180 | cpu_clear(smp_processor_id(), map); | 178 | cpu_clear(smp_processor_id(), map); |
| 181 | __smp_call_function_map(func, info, nonatomic, wait, map); | 179 | __smp_call_function_map(func, info, wait, map); |
| 182 | spin_unlock(&call_lock); | 180 | spin_unlock(&call_lock); |
| 183 | return 0; | 181 | return 0; |
| 184 | } | 182 | } |
| @@ -189,7 +187,6 @@ EXPORT_SYMBOL(smp_call_function); | |||
| 189 | * @cpu: the CPU where func should run | 187 | * @cpu: the CPU where func should run |
| 190 | * @func: the function to run; this must be fast and non-blocking | 188 | * @func: the function to run; this must be fast and non-blocking |
| 191 | * @info: an arbitrary pointer to pass to the function | 189 | * @info: an arbitrary pointer to pass to the function |
| 192 | * @nonatomic: unused | ||
| 193 | * @wait: if true, wait (atomically) until function has completed on other CPUs | 190 | * @wait: if true, wait (atomically) until function has completed on other CPUs |
| 194 | * | 191 | * |
| 195 | * Run a function on one processor. | 192 | * Run a function on one processor. |
| @@ -198,11 +195,10 @@ EXPORT_SYMBOL(smp_call_function); | |||
| 198 | * hardware interrupt handler or from a bottom half. | 195 | * hardware interrupt handler or from a bottom half. |
| 199 | */ | 196 | */ |
| 200 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | 197 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, |
| 201 | int nonatomic, int wait) | 198 | int wait) |
| 202 | { | 199 | { |
| 203 | spin_lock(&call_lock); | 200 | spin_lock(&call_lock); |
| 204 | __smp_call_function_map(func, info, nonatomic, wait, | 201 | __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu)); |
| 205 | cpumask_of_cpu(cpu)); | ||
| 206 | spin_unlock(&call_lock); | 202 | spin_unlock(&call_lock); |
| 207 | return 0; | 203 | return 0; |
| 208 | } | 204 | } |
| @@ -228,7 +224,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
| 228 | { | 224 | { |
| 229 | spin_lock(&call_lock); | 225 | spin_lock(&call_lock); |
| 230 | cpu_clear(smp_processor_id(), mask); | 226 | cpu_clear(smp_processor_id(), mask); |
| 231 | __smp_call_function_map(func, info, 0, wait, mask); | 227 | __smp_call_function_map(func, info, wait, mask); |
| 232 | spin_unlock(&call_lock); | 228 | spin_unlock(&call_lock); |
| 233 | return 0; | 229 | return 0; |
| 234 | } | 230 | } |
| @@ -303,7 +299,7 @@ static void smp_ptlb_callback(void *info) | |||
| 303 | 299 | ||
| 304 | void smp_ptlb_all(void) | 300 | void smp_ptlb_all(void) |
| 305 | { | 301 | { |
| 306 | on_each_cpu(smp_ptlb_callback, NULL, 0, 1); | 302 | on_each_cpu(smp_ptlb_callback, NULL, 1); |
| 307 | } | 303 | } |
| 308 | EXPORT_SYMBOL(smp_ptlb_all); | 304 | EXPORT_SYMBOL(smp_ptlb_all); |
| 309 | #endif /* ! CONFIG_64BIT */ | 305 | #endif /* ! CONFIG_64BIT */ |
| @@ -351,7 +347,7 @@ void smp_ctl_set_bit(int cr, int bit) | |||
| 351 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 347 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
| 352 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 348 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
| 353 | parms.orvals[cr] = 1 << bit; | 349 | parms.orvals[cr] = 1 << bit; |
| 354 | on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); | 350 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
| 355 | } | 351 | } |
| 356 | EXPORT_SYMBOL(smp_ctl_set_bit); | 352 | EXPORT_SYMBOL(smp_ctl_set_bit); |
| 357 | 353 | ||
| @@ -365,7 +361,7 @@ void smp_ctl_clear_bit(int cr, int bit) | |||
| 365 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 361 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
| 366 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 362 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
| 367 | parms.andvals[cr] = ~(1L << bit); | 363 | parms.andvals[cr] = ~(1L << bit); |
| 368 | on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); | 364 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
| 369 | } | 365 | } |
| 370 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 366 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
| 371 | 367 | ||
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 7418bebb547f..8051e9326dfc 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
| @@ -707,7 +707,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
| 707 | */ | 707 | */ |
| 708 | memset(&etr_sync, 0, sizeof(etr_sync)); | 708 | memset(&etr_sync, 0, sizeof(etr_sync)); |
| 709 | preempt_disable(); | 709 | preempt_disable(); |
| 710 | smp_call_function(clock_sync_cpu_start, &etr_sync, 0, 0); | 710 | smp_call_function(clock_sync_cpu_start, &etr_sync, 0); |
| 711 | local_irq_disable(); | 711 | local_irq_disable(); |
| 712 | enable_sync_clock(); | 712 | enable_sync_clock(); |
| 713 | 713 | ||
| @@ -746,7 +746,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
| 746 | rc = -EAGAIN; | 746 | rc = -EAGAIN; |
| 747 | } | 747 | } |
| 748 | local_irq_enable(); | 748 | local_irq_enable(); |
| 749 | smp_call_function(clock_sync_cpu_end, NULL, 0, 0); | 749 | smp_call_function(clock_sync_cpu_end, NULL, 0); |
| 750 | preempt_enable(); | 750 | preempt_enable(); |
| 751 | return rc; | 751 | return rc; |
| 752 | } | 752 | } |
| @@ -926,7 +926,7 @@ static void etr_work_fn(struct work_struct *work) | |||
| 926 | if (!eacr.ea) { | 926 | if (!eacr.ea) { |
| 927 | /* Both ports offline. Reset everything. */ | 927 | /* Both ports offline. Reset everything. */ |
| 928 | eacr.dp = eacr.es = eacr.sl = 0; | 928 | eacr.dp = eacr.es = eacr.sl = 0; |
| 929 | on_each_cpu(disable_sync_clock, NULL, 0, 1); | 929 | on_each_cpu(disable_sync_clock, NULL, 1); |
| 930 | del_timer_sync(&etr_timer); | 930 | del_timer_sync(&etr_timer); |
| 931 | etr_update_eacr(eacr); | 931 | etr_update_eacr(eacr); |
| 932 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | 932 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 9a854c8e5274..3e7384f4619c 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
| @@ -688,6 +688,7 @@ config CRASH_DUMP | |||
| 688 | config SMP | 688 | config SMP |
| 689 | bool "Symmetric multi-processing support" | 689 | bool "Symmetric multi-processing support" |
| 690 | depends on SYS_SUPPORTS_SMP | 690 | depends on SYS_SUPPORTS_SMP |
| 691 | select USE_GENERIC_SMP_HELPERS | ||
| 691 | ---help--- | 692 | ---help--- |
| 692 | This enables support for systems with more than one CPU. If you have | 693 | This enables support for systems with more than one CPU. If you have |
| 693 | a system with only one CPU, like most personal computers, say N. If | 694 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 5d039d168f57..60c50841143e 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
| @@ -36,13 +36,6 @@ EXPORT_SYMBOL(cpu_possible_map); | |||
| 36 | cpumask_t cpu_online_map; | 36 | cpumask_t cpu_online_map; |
| 37 | EXPORT_SYMBOL(cpu_online_map); | 37 | EXPORT_SYMBOL(cpu_online_map); |
| 38 | 38 | ||
| 39 | static atomic_t cpus_booted = ATOMIC_INIT(0); | ||
| 40 | |||
| 41 | /* | ||
| 42 | * Run specified function on a particular processor. | ||
| 43 | */ | ||
| 44 | void __smp_call_function(unsigned int cpu); | ||
| 45 | |||
| 46 | static inline void __init smp_store_cpu_info(unsigned int cpu) | 39 | static inline void __init smp_store_cpu_info(unsigned int cpu) |
| 47 | { | 40 | { |
| 48 | struct sh_cpuinfo *c = cpu_data + cpu; | 41 | struct sh_cpuinfo *c = cpu_data + cpu; |
| @@ -175,45 +168,20 @@ static void stop_this_cpu(void *unused) | |||
| 175 | 168 | ||
| 176 | void smp_send_stop(void) | 169 | void smp_send_stop(void) |
| 177 | { | 170 | { |
| 178 | smp_call_function(stop_this_cpu, 0, 1, 0); | 171 | smp_call_function(stop_this_cpu, 0, 0); |
| 179 | } | 172 | } |
| 180 | 173 | ||
| 181 | struct smp_fn_call_struct smp_fn_call = { | 174 | void arch_send_call_function_ipi(cpumask_t mask) |
| 182 | .lock = __SPIN_LOCK_UNLOCKED(smp_fn_call.lock), | ||
| 183 | .finished = ATOMIC_INIT(0), | ||
| 184 | }; | ||
| 185 | |||
| 186 | /* | ||
| 187 | * The caller of this wants the passed function to run on every cpu. If wait | ||
| 188 | * is set, wait until all cpus have finished the function before returning. | ||
| 189 | * The lock is here to protect the call structure. | ||
| 190 | * You must not call this function with disabled interrupts or from a | ||
| 191 | * hardware interrupt handler or from a bottom half handler. | ||
| 192 | */ | ||
| 193 | int smp_call_function(void (*func)(void *info), void *info, int retry, int wait) | ||
| 194 | { | 175 | { |
| 195 | unsigned int nr_cpus = atomic_read(&cpus_booted); | 176 | int cpu; |
| 196 | int i; | ||
| 197 | |||
| 198 | /* Can deadlock when called with interrupts disabled */ | ||
| 199 | WARN_ON(irqs_disabled()); | ||
| 200 | |||
| 201 | spin_lock(&smp_fn_call.lock); | ||
| 202 | |||
| 203 | atomic_set(&smp_fn_call.finished, 0); | ||
| 204 | smp_fn_call.fn = func; | ||
| 205 | smp_fn_call.data = info; | ||
| 206 | |||
| 207 | for (i = 0; i < nr_cpus; i++) | ||
| 208 | if (i != smp_processor_id()) | ||
| 209 | plat_send_ipi(i, SMP_MSG_FUNCTION); | ||
| 210 | |||
| 211 | if (wait) | ||
| 212 | while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1)); | ||
| 213 | 177 | ||
| 214 | spin_unlock(&smp_fn_call.lock); | 178 | for_each_cpu_mask(cpu, mask) |
| 179 | plat_send_ipi(cpu, SMP_MSG_FUNCTION); | ||
| 180 | } | ||
| 215 | 181 | ||
| 216 | return 0; | 182 | void arch_send_call_function_single_ipi(int cpu) |
| 183 | { | ||
| 184 | plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); | ||
| 217 | } | 185 | } |
| 218 | 186 | ||
| 219 | /* Not really SMP stuff ... */ | 187 | /* Not really SMP stuff ... */ |
| @@ -229,7 +197,7 @@ static void flush_tlb_all_ipi(void *info) | |||
| 229 | 197 | ||
| 230 | void flush_tlb_all(void) | 198 | void flush_tlb_all(void) |
| 231 | { | 199 | { |
| 232 | on_each_cpu(flush_tlb_all_ipi, 0, 1, 1); | 200 | on_each_cpu(flush_tlb_all_ipi, 0, 1); |
| 233 | } | 201 | } |
| 234 | 202 | ||
| 235 | static void flush_tlb_mm_ipi(void *mm) | 203 | static void flush_tlb_mm_ipi(void *mm) |
| @@ -255,7 +223,7 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
| 255 | preempt_disable(); | 223 | preempt_disable(); |
| 256 | 224 | ||
| 257 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { | 225 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { |
| 258 | smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1); | 226 | smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); |
| 259 | } else { | 227 | } else { |
| 260 | int i; | 228 | int i; |
| 261 | for (i = 0; i < num_online_cpus(); i++) | 229 | for (i = 0; i < num_online_cpus(); i++) |
| @@ -292,7 +260,7 @@ void flush_tlb_range(struct vm_area_struct *vma, | |||
| 292 | fd.vma = vma; | 260 | fd.vma = vma; |
| 293 | fd.addr1 = start; | 261 | fd.addr1 = start; |
| 294 | fd.addr2 = end; | 262 | fd.addr2 = end; |
| 295 | smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1); | 263 | smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1); |
| 296 | } else { | 264 | } else { |
| 297 | int i; | 265 | int i; |
| 298 | for (i = 0; i < num_online_cpus(); i++) | 266 | for (i = 0; i < num_online_cpus(); i++) |
| @@ -316,7 +284,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
| 316 | 284 | ||
| 317 | fd.addr1 = start; | 285 | fd.addr1 = start; |
| 318 | fd.addr2 = end; | 286 | fd.addr2 = end; |
| 319 | on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1); | 287 | on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1); |
| 320 | } | 288 | } |
| 321 | 289 | ||
| 322 | static void flush_tlb_page_ipi(void *info) | 290 | static void flush_tlb_page_ipi(void *info) |
| @@ -335,7 +303,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
| 335 | 303 | ||
| 336 | fd.vma = vma; | 304 | fd.vma = vma; |
| 337 | fd.addr1 = page; | 305 | fd.addr1 = page; |
| 338 | smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1); | 306 | smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1); |
| 339 | } else { | 307 | } else { |
| 340 | int i; | 308 | int i; |
| 341 | for (i = 0; i < num_online_cpus(); i++) | 309 | for (i = 0; i < num_online_cpus(); i++) |
| @@ -359,6 +327,6 @@ void flush_tlb_one(unsigned long asid, unsigned long vaddr) | |||
| 359 | fd.addr1 = asid; | 327 | fd.addr1 = asid; |
| 360 | fd.addr2 = vaddr; | 328 | fd.addr2 = vaddr; |
| 361 | 329 | ||
| 362 | smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1); | 330 | smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1); |
| 363 | local_flush_tlb_one(asid, vaddr); | 331 | local_flush_tlb_one(asid, vaddr); |
| 364 | } | 332 | } |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index fa63c68a1819..c099d96f1239 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
| @@ -807,7 +807,6 @@ extern unsigned long xcall_call_function; | |||
| 807 | * smp_call_function(): Run a function on all other CPUs. | 807 | * smp_call_function(): Run a function on all other CPUs. |
| 808 | * @func: The function to run. This must be fast and non-blocking. | 808 | * @func: The function to run. This must be fast and non-blocking. |
| 809 | * @info: An arbitrary pointer to pass to the function. | 809 | * @info: An arbitrary pointer to pass to the function. |
| 810 | * @nonatomic: currently unused. | ||
| 811 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | 810 | * @wait: If true, wait (atomically) until function has completed on other CPUs. |
| 812 | * | 811 | * |
| 813 | * Returns 0 on success, else a negative status code. Does not return until | 812 | * Returns 0 on success, else a negative status code. Does not return until |
| @@ -816,8 +815,8 @@ extern unsigned long xcall_call_function; | |||
| 816 | * You must not call this function with disabled interrupts or from a | 815 | * You must not call this function with disabled interrupts or from a |
| 817 | * hardware interrupt handler or from a bottom half handler. | 816 | * hardware interrupt handler or from a bottom half handler. |
| 818 | */ | 817 | */ |
| 819 | static int smp_call_function_mask(void (*func)(void *info), void *info, | 818 | static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info, |
| 820 | int nonatomic, int wait, cpumask_t mask) | 819 | int wait, cpumask_t mask) |
| 821 | { | 820 | { |
| 822 | struct call_data_struct data; | 821 | struct call_data_struct data; |
| 823 | int cpus; | 822 | int cpus; |
| @@ -852,11 +851,9 @@ out_unlock: | |||
| 852 | return 0; | 851 | return 0; |
| 853 | } | 852 | } |
| 854 | 853 | ||
| 855 | int smp_call_function(void (*func)(void *info), void *info, | 854 | int smp_call_function(void (*func)(void *info), void *info, int wait) |
| 856 | int nonatomic, int wait) | ||
| 857 | { | 855 | { |
| 858 | return smp_call_function_mask(func, info, nonatomic, wait, | 856 | return sparc64_smp_call_function_mask(func, info, wait, cpu_online_map); |
| 859 | cpu_online_map); | ||
| 860 | } | 857 | } |
| 861 | 858 | ||
| 862 | void smp_call_function_client(int irq, struct pt_regs *regs) | 859 | void smp_call_function_client(int irq, struct pt_regs *regs) |
| @@ -893,7 +890,7 @@ static void tsb_sync(void *info) | |||
| 893 | 890 | ||
| 894 | void smp_tsb_sync(struct mm_struct *mm) | 891 | void smp_tsb_sync(struct mm_struct *mm) |
| 895 | { | 892 | { |
| 896 | smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); | 893 | sparc64_smp_call_function_mask(tsb_sync, mm, 1, mm->cpu_vm_mask); |
| 897 | } | 894 | } |
| 898 | 895 | ||
| 899 | extern unsigned long xcall_flush_tlb_mm; | 896 | extern unsigned long xcall_flush_tlb_mm; |
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index 6cfab2e4d340..ebefd2a14375 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c | |||
| @@ -344,7 +344,7 @@ void hugetlb_prefault_arch_hook(struct mm_struct *mm) | |||
| 344 | * also executing in this address space. | 344 | * also executing in this address space. |
| 345 | */ | 345 | */ |
| 346 | mm->context.sparc64_ctx_val = ctx; | 346 | mm->context.sparc64_ctx_val = ctx; |
| 347 | on_each_cpu(context_reload, mm, 0, 0); | 347 | on_each_cpu(context_reload, mm, 0); |
| 348 | } | 348 | } |
| 349 | spin_unlock(&ctx_alloc_lock); | 349 | spin_unlock(&ctx_alloc_lock); |
| 350 | } | 350 | } |
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c index e1062ec36d40..be2d50c3aa95 100644 --- a/arch/um/kernel/smp.c +++ b/arch/um/kernel/smp.c | |||
| @@ -214,8 +214,7 @@ void smp_call_function_slave(int cpu) | |||
| 214 | atomic_inc(&scf_finished); | 214 | atomic_inc(&scf_finished); |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic, | 217 | int smp_call_function(void (*_func)(void *info), void *_info, int wait) |
| 218 | int wait) | ||
| 219 | { | 218 | { |
| 220 | int cpus = num_online_cpus() - 1; | 219 | int cpus = num_online_cpus() - 1; |
| 221 | int i; | 220 | int i; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2642b4bf41b9..96e0c2ebc388 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -170,6 +170,7 @@ config GENERIC_PENDING_IRQ | |||
| 170 | config X86_SMP | 170 | config X86_SMP |
| 171 | bool | 171 | bool |
| 172 | depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) | 172 | depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) |
| 173 | select USE_GENERIC_SMP_HELPERS | ||
| 173 | default y | 174 | default y |
| 174 | 175 | ||
| 175 | config X86_32_SMP | 176 | config X86_32_SMP |
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 3e58b676d23b..a437d027f20b 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c | |||
| @@ -1340,6 +1340,10 @@ void __init smp_intr_init(void) | |||
| 1340 | 1340 | ||
| 1341 | /* IPI for generic function call */ | 1341 | /* IPI for generic function call */ |
| 1342 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | 1342 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
| 1343 | |||
| 1344 | /* IPI for single call function */ | ||
| 1345 | set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | ||
| 1346 | call_function_single_interrupt); | ||
| 1343 | } | 1347 | } |
| 1344 | #endif | 1348 | #endif |
| 1345 | 1349 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 987410745182..c4a7ec31394c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
| @@ -364,7 +364,7 @@ static void mcheck_check_cpu(void *info) | |||
| 364 | 364 | ||
| 365 | static void mcheck_timer(struct work_struct *work) | 365 | static void mcheck_timer(struct work_struct *work) |
| 366 | { | 366 | { |
| 367 | on_each_cpu(mcheck_check_cpu, NULL, 1, 1); | 367 | on_each_cpu(mcheck_check_cpu, NULL, 1); |
| 368 | 368 | ||
| 369 | /* | 369 | /* |
| 370 | * Alert userspace if needed. If we logged an MCE, reduce the | 370 | * Alert userspace if needed. If we logged an MCE, reduce the |
| @@ -621,7 +621,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | |||
| 621 | * Collect entries that were still getting written before the | 621 | * Collect entries that were still getting written before the |
| 622 | * synchronize. | 622 | * synchronize. |
| 623 | */ | 623 | */ |
| 624 | on_each_cpu(collect_tscs, cpu_tsc, 1, 1); | 624 | on_each_cpu(collect_tscs, cpu_tsc, 1); |
| 625 | for (i = next; i < MCE_LOG_LEN; i++) { | 625 | for (i = next; i < MCE_LOG_LEN; i++) { |
| 626 | if (mcelog.entry[i].finished && | 626 | if (mcelog.entry[i].finished && |
| 627 | mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { | 627 | mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { |
| @@ -746,7 +746,7 @@ static void mce_restart(void) | |||
| 746 | if (next_interval) | 746 | if (next_interval) |
| 747 | cancel_delayed_work(&mcheck_work); | 747 | cancel_delayed_work(&mcheck_work); |
| 748 | /* Timer race is harmless here */ | 748 | /* Timer race is harmless here */ |
| 749 | on_each_cpu(mce_init, NULL, 1, 1); | 749 | on_each_cpu(mce_init, NULL, 1); |
| 750 | next_interval = check_interval * HZ; | 750 | next_interval = check_interval * HZ; |
| 751 | if (next_interval) | 751 | if (next_interval) |
| 752 | schedule_delayed_work(&mcheck_work, | 752 | schedule_delayed_work(&mcheck_work, |
diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c index 00ccb6c14ec2..cc1fccdd31e0 100644 --- a/arch/x86/kernel/cpu/mcheck/non-fatal.c +++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c | |||
| @@ -59,7 +59,7 @@ static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); | |||
| 59 | 59 | ||
| 60 | static void mce_work_fn(struct work_struct *work) | 60 | static void mce_work_fn(struct work_struct *work) |
| 61 | { | 61 | { |
| 62 | on_each_cpu(mce_checkregs, NULL, 1, 1); | 62 | on_each_cpu(mce_checkregs, NULL, 1); |
| 63 | schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); | 63 | schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); |
| 64 | } | 64 | } |
| 65 | 65 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 105afe12beb0..6f23969c8faf 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
| @@ -223,7 +223,7 @@ static void set_mtrr(unsigned int reg, unsigned long base, | |||
| 223 | atomic_set(&data.gate,0); | 223 | atomic_set(&data.gate,0); |
| 224 | 224 | ||
| 225 | /* Start the ball rolling on other CPUs */ | 225 | /* Start the ball rolling on other CPUs */ |
| 226 | if (smp_call_function(ipi_handler, &data, 1, 0) != 0) | 226 | if (smp_call_function(ipi_handler, &data, 0) != 0) |
| 227 | panic("mtrr: timed out waiting for other CPUs\n"); | 227 | panic("mtrr: timed out waiting for other CPUs\n"); |
| 228 | 228 | ||
| 229 | local_irq_save(flags); | 229 | local_irq_save(flags); |
| @@ -1682,7 +1682,7 @@ void mtrr_ap_init(void) | |||
| 1682 | */ | 1682 | */ |
| 1683 | void mtrr_save_state(void) | 1683 | void mtrr_save_state(void) |
| 1684 | { | 1684 | { |
| 1685 | smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); | 1685 | smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1); |
| 1686 | } | 1686 | } |
| 1687 | 1687 | ||
| 1688 | static int __init mtrr_init_finialize(void) | 1688 | static int __init mtrr_init_finialize(void) |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 2e9bef6e3aa3..6d4bdc02388a 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
| @@ -189,7 +189,7 @@ void disable_lapic_nmi_watchdog(void) | |||
| 189 | if (atomic_read(&nmi_active) <= 0) | 189 | if (atomic_read(&nmi_active) <= 0) |
| 190 | return; | 190 | return; |
| 191 | 191 | ||
| 192 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); | 192 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 1); |
| 193 | 193 | ||
| 194 | if (wd_ops) | 194 | if (wd_ops) |
| 195 | wd_ops->unreserve(); | 195 | wd_ops->unreserve(); |
| @@ -213,7 +213,7 @@ void enable_lapic_nmi_watchdog(void) | |||
| 213 | return; | 213 | return; |
| 214 | } | 214 | } |
| 215 | 215 | ||
| 216 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); | 216 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 1); |
| 217 | touch_nmi_watchdog(); | 217 | touch_nmi_watchdog(); |
| 218 | } | 218 | } |
| 219 | 219 | ||
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 71f1c2654bec..2de5fa2bbf77 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
| @@ -96,7 +96,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, | |||
| 96 | for (; count; count -= 16) { | 96 | for (; count; count -= 16) { |
| 97 | cmd.eax = pos; | 97 | cmd.eax = pos; |
| 98 | cmd.ecx = pos >> 32; | 98 | cmd.ecx = pos >> 32; |
| 99 | smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); | 99 | smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); |
| 100 | if (copy_to_user(tmp, &cmd, 16)) | 100 | if (copy_to_user(tmp, &cmd, 16)) |
| 101 | return -EFAULT; | 101 | return -EFAULT; |
| 102 | tmp += 16; | 102 | tmp += 16; |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index ba41bf42748d..ae63e584c340 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
| @@ -816,6 +816,9 @@ END(invalidate_interrupt\num) | |||
| 816 | ENTRY(call_function_interrupt) | 816 | ENTRY(call_function_interrupt) |
| 817 | apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt | 817 | apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt |
| 818 | END(call_function_interrupt) | 818 | END(call_function_interrupt) |
| 819 | ENTRY(call_function_single_interrupt) | ||
| 820 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt | ||
| 821 | END(call_function_single_interrupt) | ||
| 819 | ENTRY(irq_move_cleanup_interrupt) | 822 | ENTRY(irq_move_cleanup_interrupt) |
| 820 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt | 823 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt |
| 821 | END(irq_move_cleanup_interrupt) | 824 | END(irq_move_cleanup_interrupt) |
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 603261a5885c..558abf4c796a 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
| @@ -1569,7 +1569,7 @@ void /*__init*/ print_local_APIC(void *dummy) | |||
| 1569 | 1569 | ||
| 1570 | void print_all_local_APICs(void) | 1570 | void print_all_local_APICs(void) |
| 1571 | { | 1571 | { |
| 1572 | on_each_cpu(print_local_APIC, NULL, 1, 1); | 1572 | on_each_cpu(print_local_APIC, NULL, 1); |
| 1573 | } | 1573 | } |
| 1574 | 1574 | ||
| 1575 | void /*__init*/ print_PIC(void) | 1575 | void /*__init*/ print_PIC(void) |
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index b16ef029cf88..6510cde36b35 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
| @@ -1160,7 +1160,7 @@ void __apicdebuginit print_local_APIC(void * dummy) | |||
| 1160 | 1160 | ||
| 1161 | void print_all_local_APICs (void) | 1161 | void print_all_local_APICs (void) |
| 1162 | { | 1162 | { |
| 1163 | on_each_cpu(print_local_APIC, NULL, 1, 1); | 1163 | on_each_cpu(print_local_APIC, NULL, 1); |
| 1164 | } | 1164 | } |
| 1165 | 1165 | ||
| 1166 | void __apicdebuginit print_PIC(void) | 1166 | void __apicdebuginit print_PIC(void) |
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 31f49e8f46a7..0373e88de95a 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c | |||
| @@ -199,6 +199,10 @@ void __init native_init_IRQ(void) | |||
| 199 | /* IPI for generic function call */ | 199 | /* IPI for generic function call */ |
| 200 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | 200 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
| 201 | 201 | ||
| 202 | /* IPI for generic single function call */ | ||
| 203 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | ||
| 204 | call_function_single_interrupt); | ||
| 205 | |||
| 202 | /* Low priority IPI to cleanup after moving an irq */ | 206 | /* Low priority IPI to cleanup after moving an irq */ |
| 203 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | 207 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); |
| 204 | #endif | 208 | #endif |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 21f2bae98c15..a8449571858a 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
| @@ -68,7 +68,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) | |||
| 68 | load_LDT(pc); | 68 | load_LDT(pc); |
| 69 | mask = cpumask_of_cpu(smp_processor_id()); | 69 | mask = cpumask_of_cpu(smp_processor_id()); |
| 70 | if (!cpus_equal(current->mm->cpu_vm_mask, mask)) | 70 | if (!cpus_equal(current->mm->cpu_vm_mask, mask)) |
| 71 | smp_call_function(flush_ldt, current->mm, 1, 1); | 71 | smp_call_function(flush_ldt, current->mm, 1); |
| 72 | preempt_enable(); | 72 | preempt_enable(); |
| 73 | #else | 73 | #else |
| 74 | load_LDT(pc); | 74 | load_LDT(pc); |
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 716b89284be0..ec024b3baad0 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c | |||
| @@ -130,7 +130,7 @@ int __init check_nmi_watchdog(void) | |||
| 130 | 130 | ||
| 131 | #ifdef CONFIG_SMP | 131 | #ifdef CONFIG_SMP |
| 132 | if (nmi_watchdog == NMI_LOCAL_APIC) | 132 | if (nmi_watchdog == NMI_LOCAL_APIC) |
| 133 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); | 133 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); |
| 134 | #endif | 134 | #endif |
| 135 | 135 | ||
| 136 | for_each_possible_cpu(cpu) | 136 | for_each_possible_cpu(cpu) |
| @@ -272,7 +272,7 @@ static void __acpi_nmi_enable(void *__unused) | |||
| 272 | void acpi_nmi_enable(void) | 272 | void acpi_nmi_enable(void) |
| 273 | { | 273 | { |
| 274 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | 274 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
| 275 | on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); | 275 | on_each_cpu(__acpi_nmi_enable, NULL, 1); |
| 276 | } | 276 | } |
| 277 | 277 | ||
| 278 | static void __acpi_nmi_disable(void *__unused) | 278 | static void __acpi_nmi_disable(void *__unused) |
| @@ -286,7 +286,7 @@ static void __acpi_nmi_disable(void *__unused) | |||
| 286 | void acpi_nmi_disable(void) | 286 | void acpi_nmi_disable(void) |
| 287 | { | 287 | { |
| 288 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | 288 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
| 289 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); | 289 | on_each_cpu(__acpi_nmi_disable, NULL, 1); |
| 290 | } | 290 | } |
| 291 | 291 | ||
| 292 | void setup_apic_nmi_watchdog(void *unused) | 292 | void setup_apic_nmi_watchdog(void *unused) |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 4061d63aabe7..7dceea947232 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -132,7 +132,7 @@ void cpu_idle_wait(void) | |||
| 132 | { | 132 | { |
| 133 | smp_mb(); | 133 | smp_mb(); |
| 134 | /* kick all the CPUs so that they exit out of pm_idle */ | 134 | /* kick all the CPUs so that they exit out of pm_idle */ |
| 135 | smp_call_function(do_nothing, NULL, 0, 1); | 135 | smp_call_function(do_nothing, NULL, 1); |
| 136 | } | 136 | } |
| 137 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | 137 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |
| 138 | 138 | ||
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 0cb7aadc87cd..361b7a4c640c 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
| @@ -121,132 +121,23 @@ static void native_smp_send_reschedule(int cpu) | |||
| 121 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); | 121 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); |
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | /* | 124 | void native_send_call_func_single_ipi(int cpu) |
| 125 | * Structure and data for smp_call_function(). This is designed to minimise | ||
| 126 | * static memory requirements. It also looks cleaner. | ||
| 127 | */ | ||
| 128 | static DEFINE_SPINLOCK(call_lock); | ||
| 129 | |||
| 130 | struct call_data_struct { | ||
| 131 | void (*func) (void *info); | ||
| 132 | void *info; | ||
| 133 | atomic_t started; | ||
| 134 | atomic_t finished; | ||
| 135 | int wait; | ||
| 136 | }; | ||
| 137 | |||
| 138 | void lock_ipi_call_lock(void) | ||
| 139 | { | 125 | { |
| 140 | spin_lock_irq(&call_lock); | 126 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); |
| 141 | } | ||
| 142 | |||
| 143 | void unlock_ipi_call_lock(void) | ||
| 144 | { | ||
| 145 | spin_unlock_irq(&call_lock); | ||
| 146 | } | ||
| 147 | |||
| 148 | static struct call_data_struct *call_data; | ||
| 149 | |||
| 150 | static void __smp_call_function(void (*func) (void *info), void *info, | ||
| 151 | int nonatomic, int wait) | ||
| 152 | { | ||
| 153 | struct call_data_struct data; | ||
| 154 | int cpus = num_online_cpus() - 1; | ||
| 155 | |||
| 156 | if (!cpus) | ||
| 157 | return; | ||
| 158 | |||
| 159 | data.func = func; | ||
| 160 | data.info = info; | ||
| 161 | atomic_set(&data.started, 0); | ||
| 162 | data.wait = wait; | ||
| 163 | if (wait) | ||
| 164 | atomic_set(&data.finished, 0); | ||
| 165 | |||
| 166 | call_data = &data; | ||
| 167 | mb(); | ||
| 168 | |||
| 169 | /* Send a message to all other CPUs and wait for them to respond */ | ||
| 170 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
| 171 | |||
| 172 | /* Wait for response */ | ||
| 173 | while (atomic_read(&data.started) != cpus) | ||
| 174 | cpu_relax(); | ||
| 175 | |||
| 176 | if (wait) | ||
| 177 | while (atomic_read(&data.finished) != cpus) | ||
| 178 | cpu_relax(); | ||
| 179 | } | 127 | } |
| 180 | 128 | ||
| 181 | 129 | void native_send_call_func_ipi(cpumask_t mask) | |
| 182 | /** | ||
| 183 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
| 184 | * @mask: The set of cpus to run on. Must not include the current cpu. | ||
| 185 | * @func: The function to run. This must be fast and non-blocking. | ||
| 186 | * @info: An arbitrary pointer to pass to the function. | ||
| 187 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
| 188 | * | ||
| 189 | * Returns 0 on success, else a negative status code. | ||
| 190 | * | ||
| 191 | * If @wait is true, then returns once @func has returned; otherwise | ||
| 192 | * it returns just before the target cpu calls @func. | ||
| 193 | * | ||
| 194 | * You must not call this function with disabled interrupts or from a | ||
| 195 | * hardware interrupt handler or from a bottom half handler. | ||
| 196 | */ | ||
| 197 | static int | ||
| 198 | native_smp_call_function_mask(cpumask_t mask, | ||
| 199 | void (*func)(void *), void *info, | ||
| 200 | int wait) | ||
| 201 | { | 130 | { |
| 202 | struct call_data_struct data; | ||
| 203 | cpumask_t allbutself; | 131 | cpumask_t allbutself; |
| 204 | int cpus; | ||
| 205 | |||
| 206 | /* Can deadlock when called with interrupts disabled */ | ||
| 207 | WARN_ON(irqs_disabled()); | ||
| 208 | |||
| 209 | /* Holding any lock stops cpus from going down. */ | ||
| 210 | spin_lock(&call_lock); | ||
| 211 | 132 | ||
| 212 | allbutself = cpu_online_map; | 133 | allbutself = cpu_online_map; |
| 213 | cpu_clear(smp_processor_id(), allbutself); | 134 | cpu_clear(smp_processor_id(), allbutself); |
| 214 | 135 | ||
| 215 | cpus_and(mask, mask, allbutself); | ||
| 216 | cpus = cpus_weight(mask); | ||
| 217 | |||
| 218 | if (!cpus) { | ||
| 219 | spin_unlock(&call_lock); | ||
| 220 | return 0; | ||
| 221 | } | ||
| 222 | |||
| 223 | data.func = func; | ||
| 224 | data.info = info; | ||
| 225 | atomic_set(&data.started, 0); | ||
| 226 | data.wait = wait; | ||
| 227 | if (wait) | ||
| 228 | atomic_set(&data.finished, 0); | ||
| 229 | |||
| 230 | call_data = &data; | ||
| 231 | wmb(); | ||
| 232 | |||
| 233 | /* Send a message to other CPUs */ | ||
| 234 | if (cpus_equal(mask, allbutself) && | 136 | if (cpus_equal(mask, allbutself) && |
| 235 | cpus_equal(cpu_online_map, cpu_callout_map)) | 137 | cpus_equal(cpu_online_map, cpu_callout_map)) |
| 236 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | 138 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); |
| 237 | else | 139 | else |
| 238 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); | 140 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); |
| 239 | |||
| 240 | /* Wait for response */ | ||
| 241 | while (atomic_read(&data.started) != cpus) | ||
| 242 | cpu_relax(); | ||
| 243 | |||
| 244 | if (wait) | ||
| 245 | while (atomic_read(&data.finished) != cpus) | ||
| 246 | cpu_relax(); | ||
| 247 | spin_unlock(&call_lock); | ||
| 248 | |||
| 249 | return 0; | ||
| 250 | } | 141 | } |
| 251 | 142 | ||
| 252 | static void stop_this_cpu(void *dummy) | 143 | static void stop_this_cpu(void *dummy) |
| @@ -268,18 +159,13 @@ static void stop_this_cpu(void *dummy) | |||
| 268 | 159 | ||
| 269 | static void native_smp_send_stop(void) | 160 | static void native_smp_send_stop(void) |
| 270 | { | 161 | { |
| 271 | int nolock; | ||
| 272 | unsigned long flags; | 162 | unsigned long flags; |
| 273 | 163 | ||
| 274 | if (reboot_force) | 164 | if (reboot_force) |
| 275 | return; | 165 | return; |
| 276 | 166 | ||
| 277 | /* Don't deadlock on the call lock in panic */ | 167 | smp_call_function(stop_this_cpu, NULL, 0); |
| 278 | nolock = !spin_trylock(&call_lock); | ||
| 279 | local_irq_save(flags); | 168 | local_irq_save(flags); |
| 280 | __smp_call_function(stop_this_cpu, NULL, 0, 0); | ||
| 281 | if (!nolock) | ||
| 282 | spin_unlock(&call_lock); | ||
| 283 | disable_local_APIC(); | 169 | disable_local_APIC(); |
| 284 | local_irq_restore(flags); | 170 | local_irq_restore(flags); |
| 285 | } | 171 | } |
| @@ -301,33 +187,28 @@ void smp_reschedule_interrupt(struct pt_regs *regs) | |||
| 301 | 187 | ||
| 302 | void smp_call_function_interrupt(struct pt_regs *regs) | 188 | void smp_call_function_interrupt(struct pt_regs *regs) |
| 303 | { | 189 | { |
| 304 | void (*func) (void *info) = call_data->func; | ||
| 305 | void *info = call_data->info; | ||
| 306 | int wait = call_data->wait; | ||
| 307 | |||
| 308 | ack_APIC_irq(); | 190 | ack_APIC_irq(); |
| 309 | /* | ||
| 310 | * Notify initiating CPU that I've grabbed the data and am | ||
| 311 | * about to execute the function | ||
| 312 | */ | ||
| 313 | mb(); | ||
| 314 | atomic_inc(&call_data->started); | ||
| 315 | /* | ||
| 316 | * At this point the info structure may be out of scope unless wait==1 | ||
| 317 | */ | ||
| 318 | irq_enter(); | 191 | irq_enter(); |
| 319 | (*func)(info); | 192 | generic_smp_call_function_interrupt(); |
| 320 | #ifdef CONFIG_X86_32 | 193 | #ifdef CONFIG_X86_32 |
| 321 | __get_cpu_var(irq_stat).irq_call_count++; | 194 | __get_cpu_var(irq_stat).irq_call_count++; |
| 322 | #else | 195 | #else |
| 323 | add_pda(irq_call_count, 1); | 196 | add_pda(irq_call_count, 1); |
| 324 | #endif | 197 | #endif |
| 325 | irq_exit(); | 198 | irq_exit(); |
| 199 | } | ||
| 326 | 200 | ||
| 327 | if (wait) { | 201 | void smp_call_function_single_interrupt(struct pt_regs *regs) |
| 328 | mb(); | 202 | { |
| 329 | atomic_inc(&call_data->finished); | 203 | ack_APIC_irq(); |
| 330 | } | 204 | irq_enter(); |
| 205 | generic_smp_call_function_single_interrupt(); | ||
| 206 | #ifdef CONFIG_X86_32 | ||
| 207 | __get_cpu_var(irq_stat).irq_call_count++; | ||
| 208 | #else | ||
| 209 | add_pda(irq_call_count, 1); | ||
| 210 | #endif | ||
| 211 | irq_exit(); | ||
| 331 | } | 212 | } |
| 332 | 213 | ||
| 333 | struct smp_ops smp_ops = { | 214 | struct smp_ops smp_ops = { |
| @@ -338,7 +219,8 @@ struct smp_ops smp_ops = { | |||
| 338 | 219 | ||
| 339 | .smp_send_stop = native_smp_send_stop, | 220 | .smp_send_stop = native_smp_send_stop, |
| 340 | .smp_send_reschedule = native_smp_send_reschedule, | 221 | .smp_send_reschedule = native_smp_send_reschedule, |
| 341 | .smp_call_function_mask = native_smp_call_function_mask, | 222 | |
| 223 | .send_call_func_ipi = native_send_call_func_ipi, | ||
| 224 | .send_call_func_single_ipi = native_send_call_func_single_ipi, | ||
| 342 | }; | 225 | }; |
| 343 | EXPORT_SYMBOL_GPL(smp_ops); | 226 | EXPORT_SYMBOL_GPL(smp_ops); |
| 344 | |||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f35c2d8016ac..687376ab07e8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -327,12 +327,12 @@ static void __cpuinit start_secondary(void *unused) | |||
| 327 | * lock helps us to not include this cpu in a currently in progress | 327 | * lock helps us to not include this cpu in a currently in progress |
| 328 | * smp_call_function(). | 328 | * smp_call_function(). |
| 329 | */ | 329 | */ |
| 330 | lock_ipi_call_lock(); | 330 | ipi_call_lock_irq(); |
| 331 | #ifdef CONFIG_X86_IO_APIC | 331 | #ifdef CONFIG_X86_IO_APIC |
| 332 | setup_vector_irq(smp_processor_id()); | 332 | setup_vector_irq(smp_processor_id()); |
| 333 | #endif | 333 | #endif |
| 334 | cpu_set(smp_processor_id(), cpu_online_map); | 334 | cpu_set(smp_processor_id(), cpu_online_map); |
| 335 | unlock_ipi_call_lock(); | 335 | ipi_call_unlock_irq(); |
| 336 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | 336 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
| 337 | 337 | ||
| 338 | setup_secondary_clock(); | 338 | setup_secondary_clock(); |
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c index 3449064d141a..99941b37eca0 100644 --- a/arch/x86/kernel/smpcommon.c +++ b/arch/x86/kernel/smpcommon.c | |||
| @@ -25,59 +25,3 @@ __cpuinit void init_gdt(int cpu) | |||
| 25 | per_cpu(cpu_number, cpu) = cpu; | 25 | per_cpu(cpu_number, cpu) = cpu; |
| 26 | } | 26 | } |
| 27 | #endif | 27 | #endif |
| 28 | |||
| 29 | /** | ||
| 30 | * smp_call_function(): Run a function on all other CPUs. | ||
| 31 | * @func: The function to run. This must be fast and non-blocking. | ||
| 32 | * @info: An arbitrary pointer to pass to the function. | ||
| 33 | * @nonatomic: Unused. | ||
| 34 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
| 35 | * | ||
| 36 | * Returns 0 on success, else a negative status code. | ||
| 37 | * | ||
| 38 | * If @wait is true, then returns once @func has returned; otherwise | ||
| 39 | * it returns just before the target cpu calls @func. | ||
| 40 | * | ||
| 41 | * You must not call this function with disabled interrupts or from a | ||
| 42 | * hardware interrupt handler or from a bottom half handler. | ||
| 43 | */ | ||
| 44 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | ||
| 45 | int wait) | ||
| 46 | { | ||
| 47 | return smp_call_function_mask(cpu_online_map, func, info, wait); | ||
| 48 | } | ||
| 49 | EXPORT_SYMBOL(smp_call_function); | ||
| 50 | |||
| 51 | /** | ||
| 52 | * smp_call_function_single - Run a function on a specific CPU | ||
| 53 | * @cpu: The target CPU. Cannot be the calling CPU. | ||
| 54 | * @func: The function to run. This must be fast and non-blocking. | ||
| 55 | * @info: An arbitrary pointer to pass to the function. | ||
| 56 | * @nonatomic: Unused. | ||
| 57 | * @wait: If true, wait until function has completed on other CPUs. | ||
| 58 | * | ||
| 59 | * Returns 0 on success, else a negative status code. | ||
| 60 | * | ||
| 61 | * If @wait is true, then returns once @func has returned; otherwise | ||
| 62 | * it returns just before the target cpu calls @func. | ||
| 63 | */ | ||
| 64 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
| 65 | int nonatomic, int wait) | ||
| 66 | { | ||
| 67 | /* prevent preemption and reschedule on another processor */ | ||
| 68 | int ret; | ||
| 69 | int me = get_cpu(); | ||
| 70 | if (cpu == me) { | ||
| 71 | local_irq_disable(); | ||
| 72 | func(info); | ||
| 73 | local_irq_enable(); | ||
| 74 | put_cpu(); | ||
| 75 | return 0; | ||
| 76 | } | ||
| 77 | |||
| 78 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); | ||
| 79 | |||
| 80 | put_cpu(); | ||
| 81 | return ret; | ||
| 82 | } | ||
| 83 | EXPORT_SYMBOL(smp_call_function_single); | ||
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index 9bb2363851af..fec1ecedc9b7 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c | |||
| @@ -238,6 +238,6 @@ static void do_flush_tlb_all(void *info) | |||
| 238 | 238 | ||
| 239 | void flush_tlb_all(void) | 239 | void flush_tlb_all(void) |
| 240 | { | 240 | { |
| 241 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | 241 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
| 242 | } | 242 | } |
| 243 | 243 | ||
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 5039d0f097a2..dcbf7a1159ea 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c | |||
| @@ -275,5 +275,5 @@ static void do_flush_tlb_all(void *info) | |||
| 275 | 275 | ||
| 276 | void flush_tlb_all(void) | 276 | void flush_tlb_all(void) |
| 277 | { | 277 | { |
| 278 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | 278 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
| 279 | } | 279 | } |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index e50740d32314..0b8b6690a86d 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
| @@ -279,7 +279,7 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) | |||
| 279 | { | 279 | { |
| 280 | long cpu = (long)arg; | 280 | long cpu = (long)arg; |
| 281 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) | 281 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) |
| 282 | smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); | 282 | smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); |
| 283 | return NOTIFY_DONE; | 283 | return NOTIFY_DONE; |
| 284 | } | 284 | } |
| 285 | 285 | ||
| @@ -302,7 +302,7 @@ static int __init vsyscall_init(void) | |||
| 302 | #ifdef CONFIG_SYSCTL | 302 | #ifdef CONFIG_SYSCTL |
| 303 | register_sysctl_table(kernel_root_table2); | 303 | register_sysctl_table(kernel_root_table2); |
| 304 | #endif | 304 | #endif |
| 305 | on_each_cpu(cpu_vsyscall_init, NULL, 0, 1); | 305 | on_each_cpu(cpu_vsyscall_init, NULL, 1); |
| 306 | hotcpu_notifier(cpu_vsyscall_notifier, 0); | 306 | hotcpu_notifier(cpu_vsyscall_notifier, 0); |
| 307 | return 0; | 307 | return 0; |
| 308 | } | 308 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 540e95179074..10ce6ee4c491 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -335,7 +335,7 @@ static void vcpu_clear(struct vcpu_vmx *vmx) | |||
| 335 | { | 335 | { |
| 336 | if (vmx->vcpu.cpu == -1) | 336 | if (vmx->vcpu.cpu == -1) |
| 337 | return; | 337 | return; |
| 338 | smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1); | 338 | smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); |
| 339 | vmx->launched = 0; | 339 | vmx->launched = 0; |
| 340 | } | 340 | } |
| 341 | 341 | ||
| @@ -2968,7 +2968,7 @@ static void vmx_free_vmcs(struct kvm_vcpu *vcpu) | |||
| 2968 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2968 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 2969 | 2969 | ||
| 2970 | if (vmx->vmcs) { | 2970 | if (vmx->vmcs) { |
| 2971 | on_each_cpu(__vcpu_clear, vmx, 0, 1); | 2971 | on_each_cpu(__vcpu_clear, vmx, 1); |
| 2972 | free_vmcs(vmx->vmcs); | 2972 | free_vmcs(vmx->vmcs); |
| 2973 | vmx->vmcs = NULL; | 2973 | vmx->vmcs = NULL; |
| 2974 | } | 2974 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 63a77caa59f1..0faa2546b1cd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -4044,6 +4044,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | |||
| 4044 | * So need not to call smp_call_function_single() in that case. | 4044 | * So need not to call smp_call_function_single() in that case. |
| 4045 | */ | 4045 | */ |
| 4046 | if (vcpu->guest_mode && vcpu->cpu != cpu) | 4046 | if (vcpu->guest_mode && vcpu->cpu != cpu) |
| 4047 | smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0); | 4047 | smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); |
| 4048 | put_cpu(); | 4048 | put_cpu(); |
| 4049 | } | 4049 | } |
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c index 57d043fa893e..d5a2b39f882b 100644 --- a/arch/x86/lib/msr-on-cpu.c +++ b/arch/x86/lib/msr-on-cpu.c | |||
| @@ -30,10 +30,10 @@ static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) | |||
| 30 | 30 | ||
| 31 | rv.msr_no = msr_no; | 31 | rv.msr_no = msr_no; |
| 32 | if (safe) { | 32 | if (safe) { |
| 33 | smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1); | 33 | smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); |
| 34 | err = rv.err; | 34 | err = rv.err; |
| 35 | } else { | 35 | } else { |
| 36 | smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1); | 36 | smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); |
| 37 | } | 37 | } |
| 38 | *l = rv.l; | 38 | *l = rv.l; |
| 39 | *h = rv.h; | 39 | *h = rv.h; |
| @@ -64,10 +64,10 @@ static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) | |||
| 64 | rv.l = l; | 64 | rv.l = l; |
| 65 | rv.h = h; | 65 | rv.h = h; |
| 66 | if (safe) { | 66 | if (safe) { |
| 67 | smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1); | 67 | smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); |
| 68 | err = rv.err; | 68 | err = rv.err; |
| 69 | } else { | 69 | } else { |
| 70 | smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1); | 70 | smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | return err; | 73 | return err; |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 8dedd01e909f..ee0fba092157 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
| @@ -950,94 +950,24 @@ static void smp_stop_cpu_function(void *dummy) | |||
| 950 | halt(); | 950 | halt(); |
| 951 | } | 951 | } |
| 952 | 952 | ||
| 953 | static DEFINE_SPINLOCK(call_lock); | ||
| 954 | |||
| 955 | struct call_data_struct { | ||
| 956 | void (*func) (void *info); | ||
| 957 | void *info; | ||
| 958 | volatile unsigned long started; | ||
| 959 | volatile unsigned long finished; | ||
| 960 | int wait; | ||
| 961 | }; | ||
| 962 | |||
| 963 | static struct call_data_struct *call_data; | ||
| 964 | |||
| 965 | /* execute a thread on a new CPU. The function to be called must be | 953 | /* execute a thread on a new CPU. The function to be called must be |
| 966 | * previously set up. This is used to schedule a function for | 954 | * previously set up. This is used to schedule a function for |
| 967 | * execution on all CPUs - set up the function then broadcast a | 955 | * execution on all CPUs - set up the function then broadcast a |
| 968 | * function_interrupt CPI to come here on each CPU */ | 956 | * function_interrupt CPI to come here on each CPU */ |
| 969 | static void smp_call_function_interrupt(void) | 957 | static void smp_call_function_interrupt(void) |
| 970 | { | 958 | { |
| 971 | void (*func) (void *info) = call_data->func; | ||
| 972 | void *info = call_data->info; | ||
| 973 | /* must take copy of wait because call_data may be replaced | ||
| 974 | * unless the function is waiting for us to finish */ | ||
| 975 | int wait = call_data->wait; | ||
| 976 | __u8 cpu = smp_processor_id(); | ||
| 977 | |||
| 978 | /* | ||
| 979 | * Notify initiating CPU that I've grabbed the data and am | ||
| 980 | * about to execute the function | ||
| 981 | */ | ||
| 982 | mb(); | ||
| 983 | if (!test_and_clear_bit(cpu, &call_data->started)) { | ||
| 984 | /* If the bit wasn't set, this could be a replay */ | ||
| 985 | printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion" | ||
| 986 | " with no call pending\n", cpu); | ||
| 987 | return; | ||
| 988 | } | ||
| 989 | /* | ||
| 990 | * At this point the info structure may be out of scope unless wait==1 | ||
| 991 | */ | ||
| 992 | irq_enter(); | 959 | irq_enter(); |
| 993 | (*func) (info); | 960 | generic_smp_call_function_interrupt(); |
| 994 | __get_cpu_var(irq_stat).irq_call_count++; | 961 | __get_cpu_var(irq_stat).irq_call_count++; |
| 995 | irq_exit(); | 962 | irq_exit(); |
| 996 | if (wait) { | ||
| 997 | mb(); | ||
| 998 | clear_bit(cpu, &call_data->finished); | ||
| 999 | } | ||
| 1000 | } | 963 | } |
| 1001 | 964 | ||
| 1002 | static int | 965 | static void smp_call_function_single_interrupt(void) |
| 1003 | voyager_smp_call_function_mask(cpumask_t cpumask, | ||
| 1004 | void (*func) (void *info), void *info, int wait) | ||
| 1005 | { | 966 | { |
| 1006 | struct call_data_struct data; | 967 | irq_enter(); |
| 1007 | u32 mask = cpus_addr(cpumask)[0]; | 968 | generic_smp_call_function_single_interrupt(); |
| 1008 | 969 | __get_cpu_var(irq_stat).irq_call_count++; | |
| 1009 | mask &= ~(1 << smp_processor_id()); | 970 | irq_exit(); |
| 1010 | |||
| 1011 | if (!mask) | ||
| 1012 | return 0; | ||
| 1013 | |||
| 1014 | /* Can deadlock when called with interrupts disabled */ | ||
| 1015 | WARN_ON(irqs_disabled()); | ||
| 1016 | |||
| 1017 | data.func = func; | ||
| 1018 | data.info = info; | ||
| 1019 | data.started = mask; | ||
| 1020 | data.wait = wait; | ||
| 1021 | if (wait) | ||
| 1022 | data.finished = mask; | ||
| 1023 | |||
| 1024 | spin_lock(&call_lock); | ||
| 1025 | call_data = &data; | ||
| 1026 | wmb(); | ||
| 1027 | /* Send a message to all other CPUs and wait for them to respond */ | ||
| 1028 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); | ||
| 1029 | |||
| 1030 | /* Wait for response */ | ||
| 1031 | while (data.started) | ||
| 1032 | barrier(); | ||
| 1033 | |||
| 1034 | if (wait) | ||
| 1035 | while (data.finished) | ||
| 1036 | barrier(); | ||
| 1037 | |||
| 1038 | spin_unlock(&call_lock); | ||
| 1039 | |||
| 1040 | return 0; | ||
| 1041 | } | 971 | } |
| 1042 | 972 | ||
| 1043 | /* Sorry about the name. In an APIC based system, the APICs | 973 | /* Sorry about the name. In an APIC based system, the APICs |
| @@ -1094,6 +1024,12 @@ void smp_qic_call_function_interrupt(struct pt_regs *regs) | |||
| 1094 | smp_call_function_interrupt(); | 1024 | smp_call_function_interrupt(); |
| 1095 | } | 1025 | } |
| 1096 | 1026 | ||
| 1027 | void smp_qic_call_function_single_interrupt(struct pt_regs *regs) | ||
| 1028 | { | ||
| 1029 | ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI); | ||
| 1030 | smp_call_function_single_interrupt(); | ||
| 1031 | } | ||
| 1032 | |||
| 1097 | void smp_vic_cpi_interrupt(struct pt_regs *regs) | 1033 | void smp_vic_cpi_interrupt(struct pt_regs *regs) |
| 1098 | { | 1034 | { |
| 1099 | struct pt_regs *old_regs = set_irq_regs(regs); | 1035 | struct pt_regs *old_regs = set_irq_regs(regs); |
| @@ -1114,6 +1050,8 @@ void smp_vic_cpi_interrupt(struct pt_regs *regs) | |||
| 1114 | smp_enable_irq_interrupt(); | 1050 | smp_enable_irq_interrupt(); |
| 1115 | if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) | 1051 | if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) |
| 1116 | smp_call_function_interrupt(); | 1052 | smp_call_function_interrupt(); |
| 1053 | if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu])) | ||
| 1054 | smp_call_function_single_interrupt(); | ||
| 1117 | set_irq_regs(old_regs); | 1055 | set_irq_regs(old_regs); |
| 1118 | } | 1056 | } |
| 1119 | 1057 | ||
| @@ -1129,7 +1067,7 @@ static void do_flush_tlb_all(void *info) | |||
| 1129 | /* flush the TLB of every active CPU in the system */ | 1067 | /* flush the TLB of every active CPU in the system */ |
| 1130 | void flush_tlb_all(void) | 1068 | void flush_tlb_all(void) |
| 1131 | { | 1069 | { |
| 1132 | on_each_cpu(do_flush_tlb_all, 0, 1, 1); | 1070 | on_each_cpu(do_flush_tlb_all, 0, 1); |
| 1133 | } | 1071 | } |
| 1134 | 1072 | ||
| 1135 | /* send a reschedule CPI to one CPU by physical CPU number*/ | 1073 | /* send a reschedule CPI to one CPU by physical CPU number*/ |
| @@ -1161,7 +1099,7 @@ int safe_smp_processor_id(void) | |||
| 1161 | /* broadcast a halt to all other CPUs */ | 1099 | /* broadcast a halt to all other CPUs */ |
| 1162 | static void voyager_smp_send_stop(void) | 1100 | static void voyager_smp_send_stop(void) |
| 1163 | { | 1101 | { |
| 1164 | smp_call_function(smp_stop_cpu_function, NULL, 1, 1); | 1102 | smp_call_function(smp_stop_cpu_function, NULL, 1); |
| 1165 | } | 1103 | } |
| 1166 | 1104 | ||
| 1167 | /* this function is triggered in time.c when a clock tick fires | 1105 | /* this function is triggered in time.c when a clock tick fires |
| @@ -1848,5 +1786,7 @@ struct smp_ops smp_ops = { | |||
| 1848 | 1786 | ||
| 1849 | .smp_send_stop = voyager_smp_send_stop, | 1787 | .smp_send_stop = voyager_smp_send_stop, |
| 1850 | .smp_send_reschedule = voyager_smp_send_reschedule, | 1788 | .smp_send_reschedule = voyager_smp_send_reschedule, |
| 1851 | .smp_call_function_mask = voyager_smp_call_function_mask, | 1789 | |
| 1790 | .send_call_func_ipi = native_send_call_func_ipi, | ||
| 1791 | .send_call_func_single_ipi = native_send_call_func_single_ipi, | ||
| 1852 | }; | 1792 | }; |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 47f4e2e4a096..65c6e46bf059 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
| @@ -141,7 +141,7 @@ static void cpa_flush_all(unsigned long cache) | |||
| 141 | { | 141 | { |
| 142 | BUG_ON(irqs_disabled()); | 142 | BUG_ON(irqs_disabled()); |
| 143 | 143 | ||
| 144 | on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1); | 144 | on_each_cpu(__cpa_flush_all, (void *) cache, 1); |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | static void __cpa_flush_range(void *arg) | 147 | static void __cpa_flush_range(void *arg) |
| @@ -162,7 +162,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache) | |||
| 162 | BUG_ON(irqs_disabled()); | 162 | BUG_ON(irqs_disabled()); |
| 163 | WARN_ON(PAGE_ALIGN(start) != start); | 163 | WARN_ON(PAGE_ALIGN(start) != start); |
| 164 | 164 | ||
| 165 | on_each_cpu(__cpa_flush_range, NULL, 1, 1); | 165 | on_each_cpu(__cpa_flush_range, NULL, 1); |
| 166 | 166 | ||
| 167 | if (!cache) | 167 | if (!cache) |
| 168 | return; | 168 | return; |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 2b6ad5b9f9d5..7f3329b55d2e 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
| @@ -218,8 +218,8 @@ static int nmi_setup(void) | |||
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | } | 220 | } |
| 221 | on_each_cpu(nmi_save_registers, NULL, 0, 1); | 221 | on_each_cpu(nmi_save_registers, NULL, 1); |
| 222 | on_each_cpu(nmi_cpu_setup, NULL, 0, 1); | 222 | on_each_cpu(nmi_cpu_setup, NULL, 1); |
| 223 | nmi_enabled = 1; | 223 | nmi_enabled = 1; |
| 224 | return 0; | 224 | return 0; |
| 225 | } | 225 | } |
| @@ -271,7 +271,7 @@ static void nmi_shutdown(void) | |||
| 271 | { | 271 | { |
| 272 | struct op_msrs *msrs = &get_cpu_var(cpu_msrs); | 272 | struct op_msrs *msrs = &get_cpu_var(cpu_msrs); |
| 273 | nmi_enabled = 0; | 273 | nmi_enabled = 0; |
| 274 | on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); | 274 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); |
| 275 | unregister_die_notifier(&profile_exceptions_nb); | 275 | unregister_die_notifier(&profile_exceptions_nb); |
| 276 | model->shutdown(msrs); | 276 | model->shutdown(msrs); |
| 277 | free_msrs(); | 277 | free_msrs(); |
| @@ -286,7 +286,7 @@ static void nmi_cpu_start(void *dummy) | |||
| 286 | 286 | ||
| 287 | static int nmi_start(void) | 287 | static int nmi_start(void) |
| 288 | { | 288 | { |
| 289 | on_each_cpu(nmi_cpu_start, NULL, 0, 1); | 289 | on_each_cpu(nmi_cpu_start, NULL, 1); |
| 290 | return 0; | 290 | return 0; |
| 291 | } | 291 | } |
| 292 | 292 | ||
| @@ -298,7 +298,7 @@ static void nmi_cpu_stop(void *dummy) | |||
| 298 | 298 | ||
| 299 | static void nmi_stop(void) | 299 | static void nmi_stop(void) |
| 300 | { | 300 | { |
| 301 | on_each_cpu(nmi_cpu_stop, NULL, 0, 1); | 301 | on_each_cpu(nmi_cpu_stop, NULL, 1); |
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | struct op_counter_config counter_config[OP_MAX_COUNTER]; | 304 | struct op_counter_config counter_config[OP_MAX_COUNTER]; |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index dcd4e51f2f16..bb508456ef52 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
| @@ -1214,7 +1214,9 @@ static const struct smp_ops xen_smp_ops __initdata = { | |||
| 1214 | 1214 | ||
| 1215 | .smp_send_stop = xen_smp_send_stop, | 1215 | .smp_send_stop = xen_smp_send_stop, |
| 1216 | .smp_send_reschedule = xen_smp_send_reschedule, | 1216 | .smp_send_reschedule = xen_smp_send_reschedule, |
| 1217 | .smp_call_function_mask = xen_smp_call_function_mask, | 1217 | |
| 1218 | .send_call_func_ipi = xen_smp_send_call_function_ipi, | ||
| 1219 | .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi, | ||
| 1218 | }; | 1220 | }; |
| 1219 | #endif /* CONFIG_SMP */ | 1221 | #endif /* CONFIG_SMP */ |
| 1220 | 1222 | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 42b3b9ed641d..ff0aa74afaa1 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
| @@ -796,7 +796,7 @@ static void drop_mm_ref(struct mm_struct *mm) | |||
| 796 | } | 796 | } |
| 797 | 797 | ||
| 798 | if (!cpus_empty(mask)) | 798 | if (!cpus_empty(mask)) |
| 799 | xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); | 799 | smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); |
| 800 | } | 800 | } |
| 801 | #else | 801 | #else |
| 802 | static void drop_mm_ref(struct mm_struct *mm) | 802 | static void drop_mm_ref(struct mm_struct *mm) |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index d2e3c20127d7..233156f39b7f 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
| @@ -36,27 +36,14 @@ | |||
| 36 | #include "mmu.h" | 36 | #include "mmu.h" |
| 37 | 37 | ||
| 38 | cpumask_t xen_cpu_initialized_map; | 38 | cpumask_t xen_cpu_initialized_map; |
| 39 | static DEFINE_PER_CPU(int, resched_irq) = -1; | ||
| 40 | static DEFINE_PER_CPU(int, callfunc_irq) = -1; | ||
| 41 | static DEFINE_PER_CPU(int, debug_irq) = -1; | ||
| 42 | |||
| 43 | /* | ||
| 44 | * Structure and data for smp_call_function(). This is designed to minimise | ||
| 45 | * static memory requirements. It also looks cleaner. | ||
| 46 | */ | ||
| 47 | static DEFINE_SPINLOCK(call_lock); | ||
| 48 | 39 | ||
| 49 | struct call_data_struct { | 40 | static DEFINE_PER_CPU(int, resched_irq); |
| 50 | void (*func) (void *info); | 41 | static DEFINE_PER_CPU(int, callfunc_irq); |
| 51 | void *info; | 42 | static DEFINE_PER_CPU(int, callfuncsingle_irq); |
| 52 | atomic_t started; | 43 | static DEFINE_PER_CPU(int, debug_irq) = -1; |
| 53 | atomic_t finished; | ||
| 54 | int wait; | ||
| 55 | }; | ||
| 56 | 44 | ||
| 57 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); | 45 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); |
| 58 | 46 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); | |
| 59 | static struct call_data_struct *call_data; | ||
| 60 | 47 | ||
| 61 | /* | 48 | /* |
| 62 | * Reschedule call back. Nothing to do, | 49 | * Reschedule call back. Nothing to do, |
| @@ -128,6 +115,17 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
| 128 | goto fail; | 115 | goto fail; |
| 129 | per_cpu(debug_irq, cpu) = rc; | 116 | per_cpu(debug_irq, cpu) = rc; |
| 130 | 117 | ||
| 118 | callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); | ||
| 119 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, | ||
| 120 | cpu, | ||
| 121 | xen_call_function_single_interrupt, | ||
| 122 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | ||
| 123 | callfunc_name, | ||
| 124 | NULL); | ||
| 125 | if (rc < 0) | ||
| 126 | goto fail; | ||
| 127 | per_cpu(callfuncsingle_irq, cpu) = rc; | ||
| 128 | |||
| 131 | return 0; | 129 | return 0; |
| 132 | 130 | ||
| 133 | fail: | 131 | fail: |
| @@ -137,6 +135,9 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
| 137 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); | 135 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); |
| 138 | if (per_cpu(debug_irq, cpu) >= 0) | 136 | if (per_cpu(debug_irq, cpu) >= 0) |
| 139 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); | 137 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); |
| 138 | if (per_cpu(callfuncsingle_irq, cpu) >= 0) | ||
| 139 | unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); | ||
| 140 | |||
| 140 | return rc; | 141 | return rc; |
| 141 | } | 142 | } |
| 142 | 143 | ||
| @@ -336,7 +337,7 @@ static void stop_self(void *v) | |||
| 336 | 337 | ||
| 337 | void xen_smp_send_stop(void) | 338 | void xen_smp_send_stop(void) |
| 338 | { | 339 | { |
| 339 | smp_call_function(stop_self, NULL, 0, 0); | 340 | smp_call_function(stop_self, NULL, 0); |
| 340 | } | 341 | } |
| 341 | 342 | ||
| 342 | void xen_smp_send_reschedule(int cpu) | 343 | void xen_smp_send_reschedule(int cpu) |
| @@ -344,7 +345,6 @@ void xen_smp_send_reschedule(int cpu) | |||
| 344 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); | 345 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); |
| 345 | } | 346 | } |
| 346 | 347 | ||
| 347 | |||
| 348 | static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | 348 | static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) |
| 349 | { | 349 | { |
| 350 | unsigned cpu; | 350 | unsigned cpu; |
| @@ -355,83 +355,42 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | |||
| 355 | xen_send_IPI_one(cpu, vector); | 355 | xen_send_IPI_one(cpu, vector); |
| 356 | } | 356 | } |
| 357 | 357 | ||
| 358 | void xen_smp_send_call_function_ipi(cpumask_t mask) | ||
| 359 | { | ||
| 360 | int cpu; | ||
| 361 | |||
| 362 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | ||
| 363 | |||
| 364 | /* Make sure other vcpus get a chance to run if they need to. */ | ||
| 365 | for_each_cpu_mask(cpu, mask) { | ||
| 366 | if (xen_vcpu_stolen(cpu)) { | ||
| 367 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | ||
| 368 | break; | ||
| 369 | } | ||
| 370 | } | ||
| 371 | } | ||
| 372 | |||
| 373 | void xen_smp_send_call_function_single_ipi(int cpu) | ||
| 374 | { | ||
| 375 | xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); | ||
| 376 | } | ||
| 377 | |||
| 358 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | 378 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) |
| 359 | { | 379 | { |
| 360 | void (*func) (void *info) = call_data->func; | ||
| 361 | void *info = call_data->info; | ||
| 362 | int wait = call_data->wait; | ||
| 363 | |||
| 364 | /* | ||
| 365 | * Notify initiating CPU that I've grabbed the data and am | ||
| 366 | * about to execute the function | ||
| 367 | */ | ||
| 368 | mb(); | ||
| 369 | atomic_inc(&call_data->started); | ||
| 370 | /* | ||
| 371 | * At this point the info structure may be out of scope unless wait==1 | ||
| 372 | */ | ||
| 373 | irq_enter(); | 380 | irq_enter(); |
| 374 | (*func)(info); | 381 | generic_smp_call_function_interrupt(); |
| 375 | __get_cpu_var(irq_stat).irq_call_count++; | 382 | __get_cpu_var(irq_stat).irq_call_count++; |
| 376 | irq_exit(); | 383 | irq_exit(); |
| 377 | 384 | ||
| 378 | if (wait) { | ||
| 379 | mb(); /* commit everything before setting finished */ | ||
| 380 | atomic_inc(&call_data->finished); | ||
| 381 | } | ||
| 382 | |||
| 383 | return IRQ_HANDLED; | 385 | return IRQ_HANDLED; |
| 384 | } | 386 | } |
| 385 | 387 | ||
| 386 | int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), | 388 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) |
| 387 | void *info, int wait) | ||
| 388 | { | 389 | { |
| 389 | struct call_data_struct data; | 390 | irq_enter(); |
| 390 | int cpus, cpu; | 391 | generic_smp_call_function_single_interrupt(); |
| 391 | bool yield; | 392 | __get_cpu_var(irq_stat).irq_call_count++; |
| 392 | 393 | irq_exit(); | |
| 393 | /* Holding any lock stops cpus from going down. */ | ||
| 394 | spin_lock(&call_lock); | ||
| 395 | |||
| 396 | cpu_clear(smp_processor_id(), mask); | ||
| 397 | |||
| 398 | cpus = cpus_weight(mask); | ||
| 399 | if (!cpus) { | ||
| 400 | spin_unlock(&call_lock); | ||
| 401 | return 0; | ||
| 402 | } | ||
| 403 | |||
| 404 | /* Can deadlock when called with interrupts disabled */ | ||
| 405 | WARN_ON(irqs_disabled()); | ||
| 406 | |||
| 407 | data.func = func; | ||
| 408 | data.info = info; | ||
| 409 | atomic_set(&data.started, 0); | ||
| 410 | data.wait = wait; | ||
| 411 | if (wait) | ||
| 412 | atomic_set(&data.finished, 0); | ||
| 413 | |||
| 414 | call_data = &data; | ||
| 415 | mb(); /* write everything before IPI */ | ||
| 416 | |||
| 417 | /* Send a message to other CPUs and wait for them to respond */ | ||
| 418 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | ||
| 419 | |||
| 420 | /* Make sure other vcpus get a chance to run if they need to. */ | ||
| 421 | yield = false; | ||
| 422 | for_each_cpu_mask(cpu, mask) | ||
| 423 | if (xen_vcpu_stolen(cpu)) | ||
| 424 | yield = true; | ||
| 425 | |||
| 426 | if (yield) | ||
| 427 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | ||
| 428 | |||
| 429 | /* Wait for response */ | ||
| 430 | while (atomic_read(&data.started) != cpus || | ||
| 431 | (wait && atomic_read(&data.finished) != cpus)) | ||
| 432 | cpu_relax(); | ||
| 433 | |||
| 434 | spin_unlock(&call_lock); | ||
| 435 | 394 | ||
| 436 | return 0; | 395 | return IRQ_HANDLED; |
| 437 | } | 396 | } |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index d852ddbb3448..6f4b1045c1c2 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
| @@ -55,13 +55,8 @@ void xen_smp_cpus_done(unsigned int max_cpus); | |||
| 55 | 55 | ||
| 56 | void xen_smp_send_stop(void); | 56 | void xen_smp_send_stop(void); |
| 57 | void xen_smp_send_reschedule(int cpu); | 57 | void xen_smp_send_reschedule(int cpu); |
| 58 | int xen_smp_call_function (void (*func) (void *info), void *info, int nonatomic, | 58 | void xen_smp_send_call_function_ipi(cpumask_t mask); |
| 59 | int wait); | 59 | void xen_smp_send_call_function_single_ipi(int cpu); |
| 60 | int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
| 61 | int nonatomic, int wait); | ||
| 62 | |||
| 63 | int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), | ||
| 64 | void *info, int wait); | ||
| 65 | 60 | ||
| 66 | extern cpumask_t xen_cpu_initialized_map; | 61 | extern cpumask_t xen_cpu_initialized_map; |
| 67 | 62 | ||
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 556ee1585192..4976e5db2b3f 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
| @@ -1339,7 +1339,7 @@ static void smp_callback(void *v) | |||
| 1339 | static int acpi_processor_latency_notify(struct notifier_block *b, | 1339 | static int acpi_processor_latency_notify(struct notifier_block *b, |
| 1340 | unsigned long l, void *v) | 1340 | unsigned long l, void *v) |
| 1341 | { | 1341 | { |
| 1342 | smp_call_function(smp_callback, NULL, 0, 1); | 1342 | smp_call_function(smp_callback, NULL, 1); |
| 1343 | return NOTIFY_OK; | 1343 | return NOTIFY_OK; |
| 1344 | } | 1344 | } |
| 1345 | 1345 | ||
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 564daaa6c7d0..eaa1a355bb32 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c | |||
| @@ -1249,7 +1249,7 @@ static void ipi_handler(void *null) | |||
| 1249 | 1249 | ||
| 1250 | void global_cache_flush(void) | 1250 | void global_cache_flush(void) |
| 1251 | { | 1251 | { |
| 1252 | if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0) | 1252 | if (on_each_cpu(ipi_handler, NULL, 1) != 0) |
| 1253 | panic(PFX "timed out waiting for the other CPUs!\n"); | 1253 | panic(PFX "timed out waiting for the other CPUs!\n"); |
| 1254 | } | 1254 | } |
| 1255 | EXPORT_SYMBOL(global_cache_flush); | 1255 | EXPORT_SYMBOL(global_cache_flush); |
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index dbce1263bdff..8fdfe9c871e3 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c | |||
| @@ -215,7 +215,7 @@ static void showacpu(void *dummy) | |||
| 215 | 215 | ||
| 216 | static void sysrq_showregs_othercpus(struct work_struct *dummy) | 216 | static void sysrq_showregs_othercpus(struct work_struct *dummy) |
| 217 | { | 217 | { |
| 218 | smp_call_function(showacpu, NULL, 0, 0); | 218 | smp_call_function(showacpu, NULL, 0); |
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus); | 221 | static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus); |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 23554b676d6e..5405769020a1 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
| @@ -340,7 +340,7 @@ static void smp_callback(void *v) | |||
| 340 | static int cpuidle_latency_notify(struct notifier_block *b, | 340 | static int cpuidle_latency_notify(struct notifier_block *b, |
| 341 | unsigned long l, void *v) | 341 | unsigned long l, void *v) |
| 342 | { | 342 | { |
| 343 | smp_call_function(smp_callback, NULL, 0, 1); | 343 | smp_call_function(smp_callback, NULL, 1); |
| 344 | return NOTIFY_OK; | 344 | return NOTIFY_OK; |
| 345 | } | 345 | } |
| 346 | 346 | ||
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 2e554a4ab337..95dfda52b4f9 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c | |||
| @@ -478,7 +478,7 @@ void __init lguest_arch_host_init(void) | |||
| 478 | cpu_had_pge = 1; | 478 | cpu_had_pge = 1; |
| 479 | /* adjust_pge is a helper function which sets or unsets the PGE | 479 | /* adjust_pge is a helper function which sets or unsets the PGE |
| 480 | * bit on its CPU, depending on the argument (0 == unset). */ | 480 | * bit on its CPU, depending on the argument (0 == unset). */ |
| 481 | on_each_cpu(adjust_pge, (void *)0, 0, 1); | 481 | on_each_cpu(adjust_pge, (void *)0, 1); |
| 482 | /* Turn off the feature in the global feature set. */ | 482 | /* Turn off the feature in the global feature set. */ |
| 483 | clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); | 483 | clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); |
| 484 | } | 484 | } |
| @@ -493,7 +493,7 @@ void __exit lguest_arch_host_fini(void) | |||
| 493 | if (cpu_had_pge) { | 493 | if (cpu_had_pge) { |
| 494 | set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); | 494 | set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); |
| 495 | /* adjust_pge's argument "1" means set PGE. */ | 495 | /* adjust_pge's argument "1" means set PGE. */ |
| 496 | on_each_cpu(adjust_pge, (void *)1, 0, 1); | 496 | on_each_cpu(adjust_pge, (void *)1, 1); |
| 497 | } | 497 | } |
| 498 | put_online_cpus(); | 498 | put_online_cpus(); |
| 499 | } | 499 | } |
diff --git a/fs/buffer.c b/fs/buffer.c index 5fa1512cd9a2..d48caee12e2a 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
| @@ -1464,7 +1464,7 @@ static void invalidate_bh_lru(void *arg) | |||
| 1464 | 1464 | ||
| 1465 | void invalidate_bh_lrus(void) | 1465 | void invalidate_bh_lrus(void) |
| 1466 | { | 1466 | { |
| 1467 | on_each_cpu(invalidate_bh_lru, NULL, 1, 1); | 1467 | on_each_cpu(invalidate_bh_lru, NULL, 1); |
| 1468 | } | 1468 | } |
| 1469 | EXPORT_SYMBOL_GPL(invalidate_bh_lrus); | 1469 | EXPORT_SYMBOL_GPL(invalidate_bh_lrus); |
| 1470 | 1470 | ||
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h index 286e1d844f63..544c69af8168 100644 --- a/include/asm-alpha/smp.h +++ b/include/asm-alpha/smp.h | |||
| @@ -47,12 +47,13 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS]; | |||
| 47 | extern int smp_num_cpus; | 47 | extern int smp_num_cpus; |
| 48 | #define cpu_possible_map cpu_present_map | 48 | #define cpu_possible_map cpu_present_map |
| 49 | 49 | ||
| 50 | int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu); | 50 | extern void arch_send_call_function_single_ipi(int cpu); |
| 51 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
| 51 | 52 | ||
| 52 | #else /* CONFIG_SMP */ | 53 | #else /* CONFIG_SMP */ |
| 53 | 54 | ||
| 54 | #define hard_smp_processor_id() 0 | 55 | #define hard_smp_processor_id() 0 |
| 55 | #define smp_call_function_on_cpu(func,info,retry,wait,cpu) ({ 0; }) | 56 | #define smp_call_function_on_cpu(func,info,wait,cpu) ({ 0; }) |
| 56 | 57 | ||
| 57 | #endif /* CONFIG_SMP */ | 58 | #endif /* CONFIG_SMP */ |
| 58 | 59 | ||
diff --git a/include/asm-arm/smp.h b/include/asm-arm/smp.h index af99636db400..7fffa2404b8e 100644 --- a/include/asm-arm/smp.h +++ b/include/asm-arm/smp.h | |||
| @@ -101,6 +101,9 @@ extern void platform_cpu_die(unsigned int cpu); | |||
| 101 | extern int platform_cpu_kill(unsigned int cpu); | 101 | extern int platform_cpu_kill(unsigned int cpu); |
| 102 | extern void platform_cpu_enable(unsigned int cpu); | 102 | extern void platform_cpu_enable(unsigned int cpu); |
| 103 | 103 | ||
| 104 | extern void arch_send_call_function_single_ipi(int cpu); | ||
| 105 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
| 106 | |||
| 104 | /* | 107 | /* |
| 105 | * Local timer interrupt handling function (can be IPI'ed). | 108 | * Local timer interrupt handling function (can be IPI'ed). |
| 106 | */ | 109 | */ |
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index ec5f355fb7e3..27731e032ee9 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h | |||
| @@ -38,9 +38,6 @@ ia64_get_lid (void) | |||
| 38 | return lid.f.id << 8 | lid.f.eid; | 38 | return lid.f.id << 8 | lid.f.eid; |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), | ||
| 42 | void *info, int wait); | ||
| 43 | |||
| 44 | #define hard_smp_processor_id() ia64_get_lid() | 41 | #define hard_smp_processor_id() ia64_get_lid() |
| 45 | 42 | ||
| 46 | #ifdef CONFIG_SMP | 43 | #ifdef CONFIG_SMP |
| @@ -124,11 +121,12 @@ extern void __init init_smp_config (void); | |||
| 124 | extern void smp_do_timer (struct pt_regs *regs); | 121 | extern void smp_do_timer (struct pt_regs *regs); |
| 125 | 122 | ||
| 126 | extern void smp_send_reschedule (int cpu); | 123 | extern void smp_send_reschedule (int cpu); |
| 127 | extern void lock_ipi_calllock(void); | ||
| 128 | extern void unlock_ipi_calllock(void); | ||
| 129 | extern void identify_siblings (struct cpuinfo_ia64 *); | 124 | extern void identify_siblings (struct cpuinfo_ia64 *); |
| 130 | extern int is_multithreading_enabled(void); | 125 | extern int is_multithreading_enabled(void); |
| 131 | 126 | ||
| 127 | extern void arch_send_call_function_single_ipi(int cpu); | ||
| 128 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
| 129 | |||
| 132 | #else /* CONFIG_SMP */ | 130 | #else /* CONFIG_SMP */ |
| 133 | 131 | ||
| 134 | #define cpu_logical_id(i) 0 | 132 | #define cpu_logical_id(i) 0 |
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h index 078e1a51a042..c5dd66916692 100644 --- a/include/asm-m32r/smp.h +++ b/include/asm-m32r/smp.h | |||
| @@ -89,6 +89,9 @@ static __inline__ unsigned int num_booting_cpus(void) | |||
| 89 | extern void smp_send_timer(void); | 89 | extern void smp_send_timer(void); |
| 90 | extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); | 90 | extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); |
| 91 | 91 | ||
| 92 | extern void arch_send_call_function_single_ipi(int cpu); | ||
| 93 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
| 94 | |||
| 92 | #endif /* not __ASSEMBLY__ */ | 95 | #endif /* not __ASSEMBLY__ */ |
| 93 | 96 | ||
| 94 | #define NO_PROC_ID (0xff) /* No processor magic marker */ | 97 | #define NO_PROC_ID (0xff) /* No processor magic marker */ |
| @@ -104,6 +107,7 @@ extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); | |||
| 104 | #define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0) | 107 | #define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0) |
| 105 | #define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0) | 108 | #define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0) |
| 106 | #define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0) | 109 | #define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0) |
| 110 | #define CALL_FUNC_SINGLE_IPI (M32R_IRQ_IPI6-M32R_IRQ_IPI0) | ||
| 107 | 111 | ||
| 108 | #define IPI_SHIFT (0) | 112 | #define IPI_SHIFT (0) |
| 109 | #define NR_IPIS (8) | 113 | #define NR_IPIS (8) |
diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h index 84fef1aeec0c..0ff5b523ea77 100644 --- a/include/asm-mips/smp.h +++ b/include/asm-mips/smp.h | |||
| @@ -35,16 +35,6 @@ extern int __cpu_logical_map[NR_CPUS]; | |||
| 35 | 35 | ||
| 36 | #define NO_PROC_ID (-1) | 36 | #define NO_PROC_ID (-1) |
| 37 | 37 | ||
| 38 | struct call_data_struct { | ||
| 39 | void (*func)(void *); | ||
| 40 | void *info; | ||
| 41 | atomic_t started; | ||
| 42 | atomic_t finished; | ||
| 43 | int wait; | ||
| 44 | }; | ||
| 45 | |||
| 46 | extern struct call_data_struct *call_data; | ||
| 47 | |||
| 48 | #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ | 38 | #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ |
| 49 | #define SMP_CALL_FUNCTION 0x2 | 39 | #define SMP_CALL_FUNCTION 0x2 |
| 50 | 40 | ||
| @@ -67,4 +57,7 @@ static inline void smp_send_reschedule(int cpu) | |||
| 67 | 57 | ||
| 68 | extern asmlinkage void smp_call_function_interrupt(void); | 58 | extern asmlinkage void smp_call_function_interrupt(void); |
| 69 | 59 | ||
| 60 | extern void arch_send_call_function_single_ipi(int cpu); | ||
| 61 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
| 62 | |||
| 70 | #endif /* __ASM_SMP_H */ | 63 | #endif /* __ASM_SMP_H */ |
diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h index 306f4950e32e..398cdbaf4e54 100644 --- a/include/asm-parisc/smp.h +++ b/include/asm-parisc/smp.h | |||
| @@ -30,6 +30,9 @@ extern cpumask_t cpu_online_map; | |||
| 30 | extern void smp_send_reschedule(int cpu); | 30 | extern void smp_send_reschedule(int cpu); |
| 31 | extern void smp_send_all_nop(void); | 31 | extern void smp_send_all_nop(void); |
| 32 | 32 | ||
| 33 | extern void arch_send_call_function_single_ipi(int cpu); | ||
| 34 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
| 35 | |||
| 33 | #endif /* !ASSEMBLY */ | 36 | #endif /* !ASSEMBLY */ |
| 34 | 37 | ||
| 35 | /* | 38 | /* |
diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h index 505f35bacaa9..c663a1fa77c5 100644 --- a/include/asm-powerpc/smp.h +++ b/include/asm-powerpc/smp.h | |||
| @@ -67,10 +67,7 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | |||
| 67 | * in /proc/interrupts will be wrong!!! --Troy */ | 67 | * in /proc/interrupts will be wrong!!! --Troy */ |
| 68 | #define PPC_MSG_CALL_FUNCTION 0 | 68 | #define PPC_MSG_CALL_FUNCTION 0 |
| 69 | #define PPC_MSG_RESCHEDULE 1 | 69 | #define PPC_MSG_RESCHEDULE 1 |
| 70 | /* This is unused now */ | 70 | #define PPC_MSG_CALL_FUNC_SINGLE 2 |
| 71 | #if 0 | ||
| 72 | #define PPC_MSG_MIGRATE_TASK 2 | ||
| 73 | #endif | ||
| 74 | #define PPC_MSG_DEBUGGER_BREAK 3 | 71 | #define PPC_MSG_DEBUGGER_BREAK 3 |
| 75 | 72 | ||
| 76 | void smp_init_iSeries(void); | 73 | void smp_init_iSeries(void); |
| @@ -117,6 +114,9 @@ extern void smp_generic_take_timebase(void); | |||
| 117 | 114 | ||
| 118 | extern struct smp_ops_t *smp_ops; | 115 | extern struct smp_ops_t *smp_ops; |
| 119 | 116 | ||
| 117 | extern void arch_send_call_function_single_ipi(int cpu); | ||
| 118 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
| 119 | |||
| 120 | #endif /* __ASSEMBLY__ */ | 120 | #endif /* __ASSEMBLY__ */ |
| 121 | 121 | ||
| 122 | #endif /* __KERNEL__ */ | 122 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-sh/smp.h b/include/asm-sh/smp.h index 9c8d34b07ebf..593343cd26ee 100644 --- a/include/asm-sh/smp.h +++ b/include/asm-sh/smp.h | |||
| @@ -26,18 +26,10 @@ extern int __cpu_logical_map[NR_CPUS]; | |||
| 26 | 26 | ||
| 27 | #define NO_PROC_ID (-1) | 27 | #define NO_PROC_ID (-1) |
| 28 | 28 | ||
| 29 | struct smp_fn_call_struct { | ||
| 30 | spinlock_t lock; | ||
| 31 | atomic_t finished; | ||
| 32 | void (*fn)(void *); | ||
| 33 | void *data; | ||
| 34 | }; | ||
| 35 | |||
| 36 | extern struct smp_fn_call_struct smp_fn_call; | ||
| 37 | |||
| 38 | #define SMP_MSG_FUNCTION 0 | 29 | #define SMP_MSG_FUNCTION 0 |
| 39 | #define SMP_MSG_RESCHEDULE 1 | 30 | #define SMP_MSG_RESCHEDULE 1 |
| 40 | #define SMP_MSG_NR 2 | 31 | #define SMP_MSG_FUNCTION_SINGLE 2 |
| 32 | #define SMP_MSG_NR 3 | ||
| 41 | 33 | ||
| 42 | void plat_smp_setup(void); | 34 | void plat_smp_setup(void); |
| 43 | void plat_prepare_cpus(unsigned int max_cpus); | 35 | void plat_prepare_cpus(unsigned int max_cpus); |
| @@ -46,6 +38,8 @@ void plat_start_cpu(unsigned int cpu, unsigned long entry_point); | |||
| 46 | void plat_send_ipi(unsigned int cpu, unsigned int message); | 38 | void plat_send_ipi(unsigned int cpu, unsigned int message); |
| 47 | int plat_register_ipi_handler(unsigned int message, | 39 | int plat_register_ipi_handler(unsigned int message, |
| 48 | void (*handler)(void *), void *arg); | 40 | void (*handler)(void *), void *arg); |
| 41 | extern void arch_send_call_function_single_ipi(int cpu); | ||
| 42 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
| 49 | 43 | ||
| 50 | #else | 44 | #else |
| 51 | 45 | ||
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h index e6d561599726..b61e74bea06a 100644 --- a/include/asm-sparc/smp.h +++ b/include/asm-sparc/smp.h | |||
| @@ -72,7 +72,7 @@ static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2, | |||
| 72 | unsigned long arg3, unsigned long arg4, unsigned long arg5) | 72 | unsigned long arg3, unsigned long arg4, unsigned long arg5) |
| 73 | { smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); } | 73 | { smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); } |
| 74 | 74 | ||
| 75 | static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) | 75 | static inline int smp_call_function(void (*func)(void *info), void *info, int wait) |
| 76 | { | 76 | { |
| 77 | xc1((smpfunc_t)func, (unsigned long)info); | 77 | xc1((smpfunc_t)func, (unsigned long)info); |
| 78 | return 0; | 78 | return 0; |
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h index 18f067c310f7..77ba51df5668 100644 --- a/include/asm-x86/hw_irq.h +++ b/include/asm-x86/hw_irq.h | |||
| @@ -48,6 +48,7 @@ extern void irq_move_cleanup_interrupt(void); | |||
| 48 | extern void threshold_interrupt(void); | 48 | extern void threshold_interrupt(void); |
| 49 | 49 | ||
| 50 | extern void call_function_interrupt(void); | 50 | extern void call_function_interrupt(void); |
| 51 | extern void call_function_single_interrupt(void); | ||
| 51 | 52 | ||
| 52 | /* PIC specific functions */ | 53 | /* PIC specific functions */ |
| 53 | extern void disable_8259A_irq(unsigned int irq); | 54 | extern void disable_8259A_irq(unsigned int irq); |
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h index 0ac864ef3cd4..90b1d1f12f08 100644 --- a/include/asm-x86/irq_vectors.h +++ b/include/asm-x86/irq_vectors.h | |||
| @@ -64,6 +64,7 @@ | |||
| 64 | # define INVALIDATE_TLB_VECTOR 0xfd | 64 | # define INVALIDATE_TLB_VECTOR 0xfd |
| 65 | # define RESCHEDULE_VECTOR 0xfc | 65 | # define RESCHEDULE_VECTOR 0xfc |
| 66 | # define CALL_FUNCTION_VECTOR 0xfb | 66 | # define CALL_FUNCTION_VECTOR 0xfb |
| 67 | # define CALL_FUNCTION_SINGLE_VECTOR 0xfa | ||
| 67 | # define THERMAL_APIC_VECTOR 0xf0 | 68 | # define THERMAL_APIC_VECTOR 0xf0 |
| 68 | 69 | ||
| 69 | #else | 70 | #else |
| @@ -72,6 +73,7 @@ | |||
| 72 | #define ERROR_APIC_VECTOR 0xfe | 73 | #define ERROR_APIC_VECTOR 0xfe |
| 73 | #define RESCHEDULE_VECTOR 0xfd | 74 | #define RESCHEDULE_VECTOR 0xfd |
| 74 | #define CALL_FUNCTION_VECTOR 0xfc | 75 | #define CALL_FUNCTION_VECTOR 0xfc |
| 76 | #define CALL_FUNCTION_SINGLE_VECTOR 0xfb | ||
| 75 | #define THERMAL_APIC_VECTOR 0xfa | 77 | #define THERMAL_APIC_VECTOR 0xfa |
| 76 | #define THRESHOLD_APIC_VECTOR 0xf9 | 78 | #define THRESHOLD_APIC_VECTOR 0xf9 |
| 77 | #define INVALIDATE_TLB_VECTOR_END 0xf7 | 79 | #define INVALIDATE_TLB_VECTOR_END 0xf7 |
| @@ -143,6 +145,7 @@ | |||
| 143 | #define VIC_RESCHEDULE_CPI 4 | 145 | #define VIC_RESCHEDULE_CPI 4 |
| 144 | #define VIC_ENABLE_IRQ_CPI 5 | 146 | #define VIC_ENABLE_IRQ_CPI 5 |
| 145 | #define VIC_CALL_FUNCTION_CPI 6 | 147 | #define VIC_CALL_FUNCTION_CPI 6 |
| 148 | #define VIC_CALL_FUNCTION_SINGLE_CPI 7 | ||
| 146 | 149 | ||
| 147 | /* Now the QIC CPIs: Since we don't need the two initial levels, | 150 | /* Now the QIC CPIs: Since we don't need the two initial levels, |
| 148 | * these are 2 less than the VIC CPIs */ | 151 | * these are 2 less than the VIC CPIs */ |
| @@ -152,9 +155,10 @@ | |||
| 152 | #define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) | 155 | #define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) |
| 153 | #define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) | 156 | #define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) |
| 154 | #define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) | 157 | #define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) |
| 158 | #define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET) | ||
| 155 | 159 | ||
| 156 | #define VIC_START_FAKE_CPI VIC_TIMER_CPI | 160 | #define VIC_START_FAKE_CPI VIC_TIMER_CPI |
| 157 | #define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI | 161 | #define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI |
| 158 | 162 | ||
| 159 | /* this is the SYS_INT CPI. */ | 163 | /* this is the SYS_INT CPI. */ |
| 160 | #define VIC_SYS_INT 8 | 164 | #define VIC_SYS_INT 8 |
diff --git a/include/asm-x86/mach-default/entry_arch.h b/include/asm-x86/mach-default/entry_arch.h index bc861469bdba..9283b60a1dd2 100644 --- a/include/asm-x86/mach-default/entry_arch.h +++ b/include/asm-x86/mach-default/entry_arch.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) | 13 | BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) |
| 14 | BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) | 14 | BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) |
| 15 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) | 15 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) |
| 16 | BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) | ||
| 16 | #endif | 17 | #endif |
| 17 | 18 | ||
| 18 | /* | 19 | /* |
diff --git a/include/asm-x86/mach-visws/entry_arch.h b/include/asm-x86/mach-visws/entry_arch.h index b183fa6d83d9..86be554342d4 100644 --- a/include/asm-x86/mach-visws/entry_arch.h +++ b/include/asm-x86/mach-visws/entry_arch.h | |||
| @@ -1,23 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * The following vectors are part of the Linux architecture, there | 2 | * VISWS uses the standard Linux entry points: |
| 3 | * is no hardware IRQ pin equivalent for them, they are triggered | ||
| 4 | * through the ICC by us (IPIs) | ||
| 5 | */ | 3 | */ |
| 6 | #ifdef CONFIG_X86_SMP | ||
| 7 | BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) | ||
| 8 | BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) | ||
| 9 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) | ||
| 10 | #endif | ||
| 11 | 4 | ||
| 12 | /* | 5 | #include "../mach-default/entry_arch.h" |
| 13 | * every pentium local APIC has two 'local interrupts', with a | ||
| 14 | * soft-definable vector attached to both interrupts, one of | ||
| 15 | * which is a timer interrupt, the other one is error counter | ||
| 16 | * overflow. Linux uses the local APIC timer interrupt to get | ||
| 17 | * a much simpler SMP time architecture: | ||
| 18 | */ | ||
| 19 | #ifdef CONFIG_X86_LOCAL_APIC | ||
| 20 | BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) | ||
| 21 | BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) | ||
| 22 | BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) | ||
| 23 | #endif | ||
diff --git a/include/asm-x86/mach-voyager/entry_arch.h b/include/asm-x86/mach-voyager/entry_arch.h index 4a1e1e8c10b6..ae52624b5937 100644 --- a/include/asm-x86/mach-voyager/entry_arch.h +++ b/include/asm-x86/mach-voyager/entry_arch.h | |||
| @@ -23,4 +23,4 @@ BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI); | |||
| 23 | BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI); | 23 | BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI); |
| 24 | BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI); | 24 | BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI); |
| 25 | BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI); | 25 | BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI); |
| 26 | 26 | BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI); | |
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 2e221f1ce0b2..c2784b3e0b77 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h | |||
| @@ -50,9 +50,9 @@ struct smp_ops { | |||
| 50 | 50 | ||
| 51 | void (*smp_send_stop)(void); | 51 | void (*smp_send_stop)(void); |
| 52 | void (*smp_send_reschedule)(int cpu); | 52 | void (*smp_send_reschedule)(int cpu); |
| 53 | int (*smp_call_function_mask)(cpumask_t mask, | 53 | |
| 54 | void (*func)(void *info), void *info, | 54 | void (*send_call_func_ipi)(cpumask_t mask); |
| 55 | int wait); | 55 | void (*send_call_func_single_ipi)(int cpu); |
| 56 | }; | 56 | }; |
| 57 | 57 | ||
| 58 | /* Globals due to paravirt */ | 58 | /* Globals due to paravirt */ |
| @@ -94,17 +94,22 @@ static inline void smp_send_reschedule(int cpu) | |||
| 94 | smp_ops.smp_send_reschedule(cpu); | 94 | smp_ops.smp_send_reschedule(cpu); |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | static inline int smp_call_function_mask(cpumask_t mask, | 97 | static inline void arch_send_call_function_single_ipi(int cpu) |
| 98 | void (*func) (void *info), void *info, | 98 | { |
| 99 | int wait) | 99 | smp_ops.send_call_func_single_ipi(cpu); |
| 100 | } | ||
| 101 | |||
| 102 | static inline void arch_send_call_function_ipi(cpumask_t mask) | ||
| 100 | { | 103 | { |
| 101 | return smp_ops.smp_call_function_mask(mask, func, info, wait); | 104 | smp_ops.send_call_func_ipi(mask); |
| 102 | } | 105 | } |
| 103 | 106 | ||
| 104 | void native_smp_prepare_boot_cpu(void); | 107 | void native_smp_prepare_boot_cpu(void); |
| 105 | void native_smp_prepare_cpus(unsigned int max_cpus); | 108 | void native_smp_prepare_cpus(unsigned int max_cpus); |
| 106 | void native_smp_cpus_done(unsigned int max_cpus); | 109 | void native_smp_cpus_done(unsigned int max_cpus); |
| 107 | int native_cpu_up(unsigned int cpunum); | 110 | int native_cpu_up(unsigned int cpunum); |
| 111 | void native_send_call_func_ipi(cpumask_t mask); | ||
| 112 | void native_send_call_func_single_ipi(int cpu); | ||
| 108 | 113 | ||
| 109 | extern int __cpu_disable(void); | 114 | extern int __cpu_disable(void); |
| 110 | extern void __cpu_die(unsigned int cpu); | 115 | extern void __cpu_die(unsigned int cpu); |
| @@ -197,7 +202,5 @@ static inline int hard_smp_processor_id(void) | |||
| 197 | extern void cpu_uninit(void); | 202 | extern void cpu_uninit(void); |
| 198 | #endif | 203 | #endif |
| 199 | 204 | ||
| 200 | extern void lock_ipi_call_lock(void); | ||
| 201 | extern void unlock_ipi_call_lock(void); | ||
| 202 | #endif /* __ASSEMBLY__ */ | 205 | #endif /* __ASSEMBLY__ */ |
| 203 | #endif | 206 | #endif |
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h index 596312a7bfc9..f8d57ea1f05f 100644 --- a/include/asm-x86/xen/events.h +++ b/include/asm-x86/xen/events.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | enum ipi_vector { | 4 | enum ipi_vector { |
| 5 | XEN_RESCHEDULE_VECTOR, | 5 | XEN_RESCHEDULE_VECTOR, |
| 6 | XEN_CALL_FUNCTION_VECTOR, | 6 | XEN_CALL_FUNCTION_VECTOR, |
| 7 | XEN_CALL_FUNCTION_SINGLE_VECTOR, | ||
| 7 | 8 | ||
| 8 | XEN_NR_IPIS, | 9 | XEN_NR_IPIS, |
| 9 | }; | 10 | }; |
diff --git a/include/linux/smp.h b/include/linux/smp.h index 55232ccf9cfd..48262f86c969 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
| @@ -7,9 +7,18 @@ | |||
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
| 10 | #include <linux/list.h> | ||
| 11 | #include <linux/cpumask.h> | ||
| 10 | 12 | ||
| 11 | extern void cpu_idle(void); | 13 | extern void cpu_idle(void); |
| 12 | 14 | ||
| 15 | struct call_single_data { | ||
| 16 | struct list_head list; | ||
| 17 | void (*func) (void *info); | ||
| 18 | void *info; | ||
| 19 | unsigned int flags; | ||
| 20 | }; | ||
| 21 | |||
| 13 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
| 14 | 23 | ||
| 15 | #include <linux/preempt.h> | 24 | #include <linux/preempt.h> |
| @@ -52,15 +61,34 @@ extern void smp_cpus_done(unsigned int max_cpus); | |||
| 52 | /* | 61 | /* |
| 53 | * Call a function on all other processors | 62 | * Call a function on all other processors |
| 54 | */ | 63 | */ |
| 55 | int smp_call_function(void(*func)(void *info), void *info, int retry, int wait); | 64 | int smp_call_function(void(*func)(void *info), void *info, int wait); |
| 56 | 65 | int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, | |
| 66 | int wait); | ||
| 57 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | 67 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, |
| 58 | int retry, int wait); | 68 | int wait); |
| 69 | void __smp_call_function_single(int cpuid, struct call_single_data *data); | ||
| 70 | |||
| 71 | /* | ||
| 72 | * Generic and arch helpers | ||
| 73 | */ | ||
| 74 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | ||
| 75 | void generic_smp_call_function_single_interrupt(void); | ||
| 76 | void generic_smp_call_function_interrupt(void); | ||
| 77 | void init_call_single_data(void); | ||
| 78 | void ipi_call_lock(void); | ||
| 79 | void ipi_call_unlock(void); | ||
| 80 | void ipi_call_lock_irq(void); | ||
| 81 | void ipi_call_unlock_irq(void); | ||
| 82 | #else | ||
| 83 | static inline void init_call_single_data(void) | ||
| 84 | { | ||
| 85 | } | ||
| 86 | #endif | ||
| 59 | 87 | ||
| 60 | /* | 88 | /* |
| 61 | * Call a function on all processors | 89 | * Call a function on all processors |
| 62 | */ | 90 | */ |
| 63 | int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait); | 91 | int on_each_cpu(void (*func) (void *info), void *info, int wait); |
| 64 | 92 | ||
| 65 | #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ | 93 | #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ |
| 66 | #define MSG_ALL 0x8001 | 94 | #define MSG_ALL 0x8001 |
| @@ -90,9 +118,9 @@ static inline int up_smp_call_function(void (*func)(void *), void *info) | |||
| 90 | { | 118 | { |
| 91 | return 0; | 119 | return 0; |
| 92 | } | 120 | } |
| 93 | #define smp_call_function(func, info, retry, wait) \ | 121 | #define smp_call_function(func, info, wait) \ |
| 94 | (up_smp_call_function(func, info)) | 122 | (up_smp_call_function(func, info)) |
| 95 | #define on_each_cpu(func,info,retry,wait) \ | 123 | #define on_each_cpu(func,info,wait) \ |
| 96 | ({ \ | 124 | ({ \ |
| 97 | local_irq_disable(); \ | 125 | local_irq_disable(); \ |
| 98 | func(info); \ | 126 | func(info); \ |
| @@ -102,7 +130,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info) | |||
| 102 | static inline void smp_send_reschedule(int cpu) { } | 130 | static inline void smp_send_reschedule(int cpu) { } |
| 103 | #define num_booting_cpus() 1 | 131 | #define num_booting_cpus() 1 |
| 104 | #define smp_prepare_boot_cpu() do {} while (0) | 132 | #define smp_prepare_boot_cpu() do {} while (0) |
| 105 | #define smp_call_function_single(cpuid, func, info, retry, wait) \ | 133 | #define smp_call_function_single(cpuid, func, info, wait) \ |
| 106 | ({ \ | 134 | ({ \ |
| 107 | WARN_ON(cpuid != 0); \ | 135 | WARN_ON(cpuid != 0); \ |
| 108 | local_irq_disable(); \ | 136 | local_irq_disable(); \ |
| @@ -112,7 +140,9 @@ static inline void smp_send_reschedule(int cpu) { } | |||
| 112 | }) | 140 | }) |
| 113 | #define smp_call_function_mask(mask, func, info, wait) \ | 141 | #define smp_call_function_mask(mask, func, info, wait) \ |
| 114 | (up_smp_call_function(func, info)) | 142 | (up_smp_call_function(func, info)) |
| 115 | 143 | static inline void init_call_single_data(void) | |
| 144 | { | ||
| 145 | } | ||
| 116 | #endif /* !SMP */ | 146 | #endif /* !SMP */ |
| 117 | 147 | ||
| 118 | /* | 148 | /* |
diff --git a/init/main.c b/init/main.c index f7fb20021d48..1efcccff1bdb 100644 --- a/init/main.c +++ b/init/main.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <linux/kernel_stat.h> | 31 | #include <linux/kernel_stat.h> |
| 32 | #include <linux/start_kernel.h> | 32 | #include <linux/start_kernel.h> |
| 33 | #include <linux/security.h> | 33 | #include <linux/security.h> |
| 34 | #include <linux/smp.h> | ||
| 34 | #include <linux/workqueue.h> | 35 | #include <linux/workqueue.h> |
| 35 | #include <linux/profile.h> | 36 | #include <linux/profile.h> |
| 36 | #include <linux/rcupdate.h> | 37 | #include <linux/rcupdate.h> |
| @@ -779,6 +780,7 @@ static void __init do_pre_smp_initcalls(void) | |||
| 779 | { | 780 | { |
| 780 | extern int spawn_ksoftirqd(void); | 781 | extern int spawn_ksoftirqd(void); |
| 781 | 782 | ||
| 783 | init_call_single_data(); | ||
| 782 | migration_init(); | 784 | migration_init(); |
| 783 | spawn_ksoftirqd(); | 785 | spawn_ksoftirqd(); |
| 784 | if (!nosoftlockup) | 786 | if (!nosoftlockup) |
diff --git a/kernel/Makefile b/kernel/Makefile index f6328e16dfdd..0a7ed838984b 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -39,6 +39,7 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o | |||
| 39 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o | 39 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o |
| 40 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o | 40 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o |
| 41 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o | 41 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
| 42 | obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o | ||
| 42 | obj-$(CONFIG_SMP) += spinlock.o | 43 | obj-$(CONFIG_SMP) += spinlock.o |
| 43 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o | 44 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o |
| 44 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o | 45 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 2913a8bff612..b8e4dce80a74 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -622,7 +622,7 @@ static void retrigger_next_event(void *arg) | |||
| 622 | void clock_was_set(void) | 622 | void clock_was_set(void) |
| 623 | { | 623 | { |
| 624 | /* Retrigger the CPU local events everywhere */ | 624 | /* Retrigger the CPU local events everywhere */ |
| 625 | on_each_cpu(retrigger_next_event, NULL, 0, 1); | 625 | on_each_cpu(retrigger_next_event, NULL, 1); |
| 626 | } | 626 | } |
| 627 | 627 | ||
| 628 | /* | 628 | /* |
diff --git a/kernel/profile.c b/kernel/profile.c index ae7ead82cbc9..58926411eb2a 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -252,7 +252,7 @@ static void profile_flip_buffers(void) | |||
| 252 | mutex_lock(&profile_flip_mutex); | 252 | mutex_lock(&profile_flip_mutex); |
| 253 | j = per_cpu(cpu_profile_flip, get_cpu()); | 253 | j = per_cpu(cpu_profile_flip, get_cpu()); |
| 254 | put_cpu(); | 254 | put_cpu(); |
| 255 | on_each_cpu(__profile_flip_buffers, NULL, 0, 1); | 255 | on_each_cpu(__profile_flip_buffers, NULL, 1); |
| 256 | for_each_online_cpu(cpu) { | 256 | for_each_online_cpu(cpu) { |
| 257 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; | 257 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; |
| 258 | for (i = 0; i < NR_PROFILE_HIT; ++i) { | 258 | for (i = 0; i < NR_PROFILE_HIT; ++i) { |
| @@ -275,7 +275,7 @@ static void profile_discard_flip_buffers(void) | |||
| 275 | mutex_lock(&profile_flip_mutex); | 275 | mutex_lock(&profile_flip_mutex); |
| 276 | i = per_cpu(cpu_profile_flip, get_cpu()); | 276 | i = per_cpu(cpu_profile_flip, get_cpu()); |
| 277 | put_cpu(); | 277 | put_cpu(); |
| 278 | on_each_cpu(__profile_flip_buffers, NULL, 0, 1); | 278 | on_each_cpu(__profile_flip_buffers, NULL, 1); |
| 279 | for_each_online_cpu(cpu) { | 279 | for_each_online_cpu(cpu) { |
| 280 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; | 280 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; |
| 281 | memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); | 281 | memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); |
| @@ -558,7 +558,7 @@ static int __init create_hash_tables(void) | |||
| 558 | out_cleanup: | 558 | out_cleanup: |
| 559 | prof_on = 0; | 559 | prof_on = 0; |
| 560 | smp_mb(); | 560 | smp_mb(); |
| 561 | on_each_cpu(profile_nop, NULL, 0, 1); | 561 | on_each_cpu(profile_nop, NULL, 1); |
| 562 | for_each_online_cpu(cpu) { | 562 | for_each_online_cpu(cpu) { |
| 563 | struct page *page; | 563 | struct page *page; |
| 564 | 564 | ||
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index c09605f8d16c..6addab5e6d88 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
| @@ -127,7 +127,7 @@ void rcu_barrier(void) | |||
| 127 | * until all the callbacks are queued. | 127 | * until all the callbacks are queued. |
| 128 | */ | 128 | */ |
| 129 | rcu_read_lock(); | 129 | rcu_read_lock(); |
| 130 | on_each_cpu(rcu_barrier_func, NULL, 0, 1); | 130 | on_each_cpu(rcu_barrier_func, NULL, 1); |
| 131 | rcu_read_unlock(); | 131 | rcu_read_unlock(); |
| 132 | wait_for_completion(&rcu_barrier_completion); | 132 | wait_for_completion(&rcu_barrier_completion); |
| 133 | mutex_unlock(&rcu_barrier_mutex); | 133 | mutex_unlock(&rcu_barrier_mutex); |
diff --git a/kernel/smp.c b/kernel/smp.c new file mode 100644 index 000000000000..4f582b257eba --- /dev/null +++ b/kernel/smp.c | |||
| @@ -0,0 +1,381 @@ | |||
| 1 | /* | ||
| 2 | * Generic helpers for smp ipi calls | ||
| 3 | * | ||
| 4 | * (C) Jens Axboe <jens.axboe@oracle.com> 2008 | ||
| 5 | * | ||
| 6 | */ | ||
| 7 | #include <linux/init.h> | ||
| 8 | #include <linux/module.h> | ||
| 9 | #include <linux/percpu.h> | ||
| 10 | #include <linux/rcupdate.h> | ||
| 11 | #include <linux/smp.h> | ||
| 12 | |||
| 13 | static DEFINE_PER_CPU(struct call_single_queue, call_single_queue); | ||
| 14 | static LIST_HEAD(call_function_queue); | ||
| 15 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock); | ||
| 16 | |||
| 17 | enum { | ||
| 18 | CSD_FLAG_WAIT = 0x01, | ||
| 19 | CSD_FLAG_ALLOC = 0x02, | ||
| 20 | }; | ||
| 21 | |||
| 22 | struct call_function_data { | ||
| 23 | struct call_single_data csd; | ||
| 24 | spinlock_t lock; | ||
| 25 | unsigned int refs; | ||
| 26 | cpumask_t cpumask; | ||
| 27 | struct rcu_head rcu_head; | ||
| 28 | }; | ||
| 29 | |||
| 30 | struct call_single_queue { | ||
| 31 | struct list_head list; | ||
| 32 | spinlock_t lock; | ||
| 33 | }; | ||
| 34 | |||
| 35 | void __cpuinit init_call_single_data(void) | ||
| 36 | { | ||
| 37 | int i; | ||
| 38 | |||
| 39 | for_each_possible_cpu(i) { | ||
| 40 | struct call_single_queue *q = &per_cpu(call_single_queue, i); | ||
| 41 | |||
| 42 | spin_lock_init(&q->lock); | ||
| 43 | INIT_LIST_HEAD(&q->list); | ||
| 44 | } | ||
| 45 | } | ||
| 46 | |||
| 47 | static void csd_flag_wait(struct call_single_data *data) | ||
| 48 | { | ||
| 49 | /* Wait for response */ | ||
| 50 | do { | ||
| 51 | /* | ||
| 52 | * We need to see the flags store in the IPI handler | ||
| 53 | */ | ||
| 54 | smp_mb(); | ||
| 55 | if (!(data->flags & CSD_FLAG_WAIT)) | ||
| 56 | break; | ||
| 57 | cpu_relax(); | ||
| 58 | } while (1); | ||
| 59 | } | ||
| 60 | |||
| 61 | /* | ||
| 62 | * Insert a previously allocated call_single_data element for execution | ||
| 63 | * on the given CPU. data must already have ->func, ->info, and ->flags set. | ||
| 64 | */ | ||
| 65 | static void generic_exec_single(int cpu, struct call_single_data *data) | ||
| 66 | { | ||
| 67 | struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); | ||
| 68 | int wait = data->flags & CSD_FLAG_WAIT, ipi; | ||
| 69 | unsigned long flags; | ||
| 70 | |||
| 71 | spin_lock_irqsave(&dst->lock, flags); | ||
| 72 | ipi = list_empty(&dst->list); | ||
| 73 | list_add_tail(&data->list, &dst->list); | ||
| 74 | spin_unlock_irqrestore(&dst->lock, flags); | ||
| 75 | |||
| 76 | if (ipi) | ||
| 77 | arch_send_call_function_single_ipi(cpu); | ||
| 78 | |||
| 79 | if (wait) | ||
| 80 | csd_flag_wait(data); | ||
| 81 | } | ||
| 82 | |||
| 83 | static void rcu_free_call_data(struct rcu_head *head) | ||
| 84 | { | ||
| 85 | struct call_function_data *data; | ||
| 86 | |||
| 87 | data = container_of(head, struct call_function_data, rcu_head); | ||
| 88 | |||
| 89 | kfree(data); | ||
| 90 | } | ||
| 91 | |||
| 92 | /* | ||
| 93 | * Invoked by arch to handle an IPI for call function. Must be called with | ||
| 94 | * interrupts disabled. | ||
| 95 | */ | ||
| 96 | void generic_smp_call_function_interrupt(void) | ||
| 97 | { | ||
| 98 | struct call_function_data *data; | ||
| 99 | int cpu = get_cpu(); | ||
| 100 | |||
| 101 | /* | ||
| 102 | * It's ok to use list_for_each_rcu() here even though we may delete | ||
| 103 | * 'pos', since list_del_rcu() doesn't clear ->next | ||
| 104 | */ | ||
| 105 | rcu_read_lock(); | ||
| 106 | list_for_each_entry_rcu(data, &call_function_queue, csd.list) { | ||
| 107 | int refs; | ||
| 108 | |||
| 109 | if (!cpu_isset(cpu, data->cpumask)) | ||
| 110 | continue; | ||
| 111 | |||
| 112 | data->csd.func(data->csd.info); | ||
| 113 | |||
| 114 | spin_lock(&data->lock); | ||
| 115 | cpu_clear(cpu, data->cpumask); | ||
| 116 | WARN_ON(data->refs == 0); | ||
| 117 | data->refs--; | ||
| 118 | refs = data->refs; | ||
| 119 | spin_unlock(&data->lock); | ||
| 120 | |||
| 121 | if (refs) | ||
| 122 | continue; | ||
| 123 | |||
| 124 | spin_lock(&call_function_lock); | ||
| 125 | list_del_rcu(&data->csd.list); | ||
| 126 | spin_unlock(&call_function_lock); | ||
| 127 | |||
| 128 | if (data->csd.flags & CSD_FLAG_WAIT) { | ||
| 129 | /* | ||
| 130 | * serialize stores to data with the flag clear | ||
| 131 | * and wakeup | ||
| 132 | */ | ||
| 133 | smp_wmb(); | ||
| 134 | data->csd.flags &= ~CSD_FLAG_WAIT; | ||
| 135 | } else | ||
| 136 | call_rcu(&data->rcu_head, rcu_free_call_data); | ||
| 137 | } | ||
| 138 | rcu_read_unlock(); | ||
| 139 | |||
| 140 | put_cpu(); | ||
| 141 | } | ||
| 142 | |||
| 143 | /* | ||
| 144 | * Invoked by arch to handle an IPI for call function single. Must be called | ||
| 145 | * from the arch with interrupts disabled. | ||
| 146 | */ | ||
| 147 | void generic_smp_call_function_single_interrupt(void) | ||
| 148 | { | ||
| 149 | struct call_single_queue *q = &__get_cpu_var(call_single_queue); | ||
| 150 | LIST_HEAD(list); | ||
| 151 | |||
| 152 | /* | ||
| 153 | * Need to see other stores to list head for checking whether | ||
| 154 | * list is empty without holding q->lock | ||
| 155 | */ | ||
| 156 | smp_mb(); | ||
| 157 | while (!list_empty(&q->list)) { | ||
| 158 | unsigned int data_flags; | ||
| 159 | |||
| 160 | spin_lock(&q->lock); | ||
| 161 | list_replace_init(&q->list, &list); | ||
| 162 | spin_unlock(&q->lock); | ||
| 163 | |||
| 164 | while (!list_empty(&list)) { | ||
| 165 | struct call_single_data *data; | ||
| 166 | |||
| 167 | data = list_entry(list.next, struct call_single_data, | ||
| 168 | list); | ||
| 169 | list_del(&data->list); | ||
| 170 | |||
| 171 | /* | ||
| 172 | * 'data' can be invalid after this call if | ||
| 173 | * flags == 0 (when called through | ||
| 174 | * generic_exec_single(), so save them away before | ||
| 175 | * making the call. | ||
| 176 | */ | ||
| 177 | data_flags = data->flags; | ||
| 178 | |||
| 179 | data->func(data->info); | ||
| 180 | |||
| 181 | if (data_flags & CSD_FLAG_WAIT) { | ||
| 182 | smp_wmb(); | ||
| 183 | data->flags &= ~CSD_FLAG_WAIT; | ||
| 184 | } else if (data_flags & CSD_FLAG_ALLOC) | ||
| 185 | kfree(data); | ||
| 186 | } | ||
| 187 | /* | ||
| 188 | * See comment on outer loop | ||
| 189 | */ | ||
| 190 | smp_mb(); | ||
| 191 | } | ||
| 192 | } | ||
| 193 | |||
| 194 | /* | ||
| 195 | * smp_call_function_single - Run a function on a specific CPU | ||
| 196 | * @func: The function to run. This must be fast and non-blocking. | ||
| 197 | * @info: An arbitrary pointer to pass to the function. | ||
| 198 | * @wait: If true, wait until function has completed on other CPUs. | ||
| 199 | * | ||
| 200 | * Returns 0 on success, else a negative status code. Note that @wait | ||
| 201 | * will be implicitly turned on in case of allocation failures, since | ||
| 202 | * we fall back to on-stack allocation. | ||
| 203 | */ | ||
| 204 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
| 205 | int wait) | ||
| 206 | { | ||
| 207 | struct call_single_data d; | ||
| 208 | unsigned long flags; | ||
| 209 | /* prevent preemption and reschedule on another processor */ | ||
| 210 | int me = get_cpu(); | ||
| 211 | |||
| 212 | /* Can deadlock when called with interrupts disabled */ | ||
| 213 | WARN_ON(irqs_disabled()); | ||
| 214 | |||
| 215 | if (cpu == me) { | ||
| 216 | local_irq_save(flags); | ||
| 217 | func(info); | ||
| 218 | local_irq_restore(flags); | ||
| 219 | } else { | ||
| 220 | struct call_single_data *data = NULL; | ||
| 221 | |||
| 222 | if (!wait) { | ||
| 223 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | ||
| 224 | if (data) | ||
| 225 | data->flags = CSD_FLAG_ALLOC; | ||
| 226 | } | ||
| 227 | if (!data) { | ||
| 228 | data = &d; | ||
| 229 | data->flags = CSD_FLAG_WAIT; | ||
| 230 | } | ||
| 231 | |||
| 232 | data->func = func; | ||
| 233 | data->info = info; | ||
| 234 | generic_exec_single(cpu, data); | ||
| 235 | } | ||
| 236 | |||
| 237 | put_cpu(); | ||
| 238 | return 0; | ||
| 239 | } | ||
| 240 | EXPORT_SYMBOL(smp_call_function_single); | ||
| 241 | |||
| 242 | /** | ||
| 243 | * __smp_call_function_single(): Run a function on another CPU | ||
| 244 | * @cpu: The CPU to run on. | ||
| 245 | * @data: Pre-allocated and setup data structure | ||
| 246 | * | ||
| 247 | * Like smp_call_function_single(), but allow caller to pass in a pre-allocated | ||
| 248 | * data structure. Useful for embedding @data inside other structures, for | ||
| 249 | * instance. | ||
| 250 | * | ||
| 251 | */ | ||
| 252 | void __smp_call_function_single(int cpu, struct call_single_data *data) | ||
| 253 | { | ||
| 254 | /* Can deadlock when called with interrupts disabled */ | ||
| 255 | WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled()); | ||
| 256 | |||
| 257 | generic_exec_single(cpu, data); | ||
| 258 | } | ||
| 259 | |||
| 260 | /** | ||
| 261 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
| 262 | * @mask: The set of cpus to run on. | ||
| 263 | * @func: The function to run. This must be fast and non-blocking. | ||
| 264 | * @info: An arbitrary pointer to pass to the function. | ||
| 265 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
| 266 | * | ||
| 267 | * Returns 0 on success, else a negative status code. | ||
| 268 | * | ||
| 269 | * If @wait is true, then returns once @func has returned. Note that @wait | ||
| 270 | * will be implicitly turned on in case of allocation failures, since | ||
| 271 | * we fall back to on-stack allocation. | ||
| 272 | * | ||
| 273 | * You must not call this function with disabled interrupts or from a | ||
| 274 | * hardware interrupt handler or from a bottom half handler. Preemption | ||
| 275 | * must be disabled when calling this function. | ||
| 276 | */ | ||
| 277 | int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | ||
| 278 | int wait) | ||
| 279 | { | ||
| 280 | struct call_function_data d; | ||
| 281 | struct call_function_data *data = NULL; | ||
| 282 | cpumask_t allbutself; | ||
| 283 | unsigned long flags; | ||
| 284 | int cpu, num_cpus; | ||
| 285 | |||
| 286 | /* Can deadlock when called with interrupts disabled */ | ||
| 287 | WARN_ON(irqs_disabled()); | ||
| 288 | |||
| 289 | cpu = smp_processor_id(); | ||
| 290 | allbutself = cpu_online_map; | ||
| 291 | cpu_clear(cpu, allbutself); | ||
| 292 | cpus_and(mask, mask, allbutself); | ||
| 293 | num_cpus = cpus_weight(mask); | ||
| 294 | |||
| 295 | /* | ||
| 296 | * If zero CPUs, return. If just a single CPU, turn this request | ||
| 297 | * into a targetted single call instead since it's faster. | ||
| 298 | */ | ||
| 299 | if (!num_cpus) | ||
| 300 | return 0; | ||
| 301 | else if (num_cpus == 1) { | ||
| 302 | cpu = first_cpu(mask); | ||
| 303 | return smp_call_function_single(cpu, func, info, wait); | ||
| 304 | } | ||
| 305 | |||
| 306 | if (!wait) { | ||
| 307 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | ||
| 308 | if (data) | ||
| 309 | data->csd.flags = CSD_FLAG_ALLOC; | ||
| 310 | } | ||
| 311 | if (!data) { | ||
| 312 | data = &d; | ||
| 313 | data->csd.flags = CSD_FLAG_WAIT; | ||
| 314 | } | ||
| 315 | |||
| 316 | spin_lock_init(&data->lock); | ||
| 317 | data->csd.func = func; | ||
| 318 | data->csd.info = info; | ||
| 319 | data->refs = num_cpus; | ||
| 320 | data->cpumask = mask; | ||
| 321 | |||
| 322 | spin_lock_irqsave(&call_function_lock, flags); | ||
| 323 | list_add_tail_rcu(&data->csd.list, &call_function_queue); | ||
| 324 | spin_unlock_irqrestore(&call_function_lock, flags); | ||
| 325 | |||
| 326 | /* Send a message to all CPUs in the map */ | ||
| 327 | arch_send_call_function_ipi(mask); | ||
| 328 | |||
| 329 | /* optionally wait for the CPUs to complete */ | ||
| 330 | if (wait) | ||
| 331 | csd_flag_wait(&data->csd); | ||
| 332 | |||
| 333 | return 0; | ||
| 334 | } | ||
| 335 | EXPORT_SYMBOL(smp_call_function_mask); | ||
| 336 | |||
| 337 | /** | ||
| 338 | * smp_call_function(): Run a function on all other CPUs. | ||
| 339 | * @func: The function to run. This must be fast and non-blocking. | ||
| 340 | * @info: An arbitrary pointer to pass to the function. | ||
| 341 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
| 342 | * | ||
| 343 | * Returns 0 on success, else a negative status code. | ||
| 344 | * | ||
| 345 | * If @wait is true, then returns once @func has returned; otherwise | ||
| 346 | * it returns just before the target cpu calls @func. In case of allocation | ||
| 347 | * failure, @wait will be implicitly turned on. | ||
| 348 | * | ||
| 349 | * You must not call this function with disabled interrupts or from a | ||
| 350 | * hardware interrupt handler or from a bottom half handler. | ||
| 351 | */ | ||
| 352 | int smp_call_function(void (*func)(void *), void *info, int wait) | ||
| 353 | { | ||
| 354 | int ret; | ||
| 355 | |||
| 356 | preempt_disable(); | ||
| 357 | ret = smp_call_function_mask(cpu_online_map, func, info, wait); | ||
| 358 | preempt_enable(); | ||
| 359 | return ret; | ||
| 360 | } | ||
| 361 | EXPORT_SYMBOL(smp_call_function); | ||
| 362 | |||
| 363 | void ipi_call_lock(void) | ||
| 364 | { | ||
| 365 | spin_lock(&call_function_lock); | ||
| 366 | } | ||
| 367 | |||
| 368 | void ipi_call_unlock(void) | ||
| 369 | { | ||
| 370 | spin_unlock(&call_function_lock); | ||
| 371 | } | ||
| 372 | |||
| 373 | void ipi_call_lock_irq(void) | ||
| 374 | { | ||
| 375 | spin_lock_irq(&call_function_lock); | ||
| 376 | } | ||
| 377 | |||
| 378 | void ipi_call_unlock_irq(void) | ||
| 379 | { | ||
| 380 | spin_unlock_irq(&call_function_lock); | ||
| 381 | } | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index 3e9e896fdc5b..81e2fe0f983a 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -645,12 +645,12 @@ __init int spawn_ksoftirqd(void) | |||
| 645 | /* | 645 | /* |
| 646 | * Call a function on all processors | 646 | * Call a function on all processors |
| 647 | */ | 647 | */ |
| 648 | int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait) | 648 | int on_each_cpu(void (*func) (void *info), void *info, int wait) |
| 649 | { | 649 | { |
| 650 | int ret = 0; | 650 | int ret = 0; |
| 651 | 651 | ||
| 652 | preempt_disable(); | 652 | preempt_disable(); |
| 653 | ret = smp_call_function(func, info, retry, wait); | 653 | ret = smp_call_function(func, info, wait); |
| 654 | local_irq_disable(); | 654 | local_irq_disable(); |
| 655 | func(info); | 655 | func(info); |
| 656 | local_irq_enable(); | 656 | local_irq_enable(); |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 67f80c261709..f48d0f09d32f 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -268,7 +268,7 @@ void tick_broadcast_on_off(unsigned long reason, int *oncpu) | |||
| 268 | "offline CPU #%d\n", *oncpu); | 268 | "offline CPU #%d\n", *oncpu); |
| 269 | else | 269 | else |
| 270 | smp_call_function_single(*oncpu, tick_do_broadcast_on_off, | 270 | smp_call_function_single(*oncpu, tick_do_broadcast_on_off, |
| 271 | &reason, 1, 1); | 271 | &reason, 1); |
| 272 | } | 272 | } |
| 273 | 273 | ||
| 274 | /* | 274 | /* |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f024b9b3a2a6..79ac4afc908c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -918,7 +918,7 @@ void drain_local_pages(void *arg) | |||
| 918 | */ | 918 | */ |
| 919 | void drain_all_pages(void) | 919 | void drain_all_pages(void) |
| 920 | { | 920 | { |
| 921 | on_each_cpu(drain_local_pages, NULL, 0, 1); | 921 | on_each_cpu(drain_local_pages, NULL, 1); |
| 922 | } | 922 | } |
| 923 | 923 | ||
| 924 | #ifdef CONFIG_HIBERNATION | 924 | #ifdef CONFIG_HIBERNATION |
| @@ -2446,7 +2446,7 @@ static void drain_cpu_caches(struct kmem_cache *cachep) | |||
| 2446 | struct kmem_list3 *l3; | 2446 | struct kmem_list3 *l3; |
| 2447 | int node; | 2447 | int node; |
| 2448 | 2448 | ||
| 2449 | on_each_cpu(do_drain, cachep, 1, 1); | 2449 | on_each_cpu(do_drain, cachep, 1); |
| 2450 | check_irq_on(); | 2450 | check_irq_on(); |
| 2451 | for_each_online_node(node) { | 2451 | for_each_online_node(node) { |
| 2452 | l3 = cachep->nodelists[node]; | 2452 | l3 = cachep->nodelists[node]; |
| @@ -3931,7 +3931,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | |||
| 3931 | } | 3931 | } |
| 3932 | new->cachep = cachep; | 3932 | new->cachep = cachep; |
| 3933 | 3933 | ||
| 3934 | on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); | 3934 | on_each_cpu(do_ccupdate_local, (void *)new, 1); |
| 3935 | 3935 | ||
| 3936 | check_irq_on(); | 3936 | check_irq_on(); |
| 3937 | cachep->batchcount = batchcount; | 3937 | cachep->batchcount = batchcount; |
| @@ -1496,7 +1496,7 @@ static void flush_cpu_slab(void *d) | |||
| 1496 | static void flush_all(struct kmem_cache *s) | 1496 | static void flush_all(struct kmem_cache *s) |
| 1497 | { | 1497 | { |
| 1498 | #ifdef CONFIG_SMP | 1498 | #ifdef CONFIG_SMP |
| 1499 | on_each_cpu(flush_cpu_slab, s, 1, 1); | 1499 | on_each_cpu(flush_cpu_slab, s, 1); |
| 1500 | #else | 1500 | #else |
| 1501 | unsigned long flags; | 1501 | unsigned long flags; |
| 1502 | 1502 | ||
diff --git a/net/core/flow.c b/net/core/flow.c index 19991175fdeb..5cf81052d044 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
| @@ -298,7 +298,7 @@ void flow_cache_flush(void) | |||
| 298 | init_completion(&info.completion); | 298 | init_completion(&info.completion); |
| 299 | 299 | ||
| 300 | local_bh_disable(); | 300 | local_bh_disable(); |
| 301 | smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0); | 301 | smp_call_function(flow_cache_flush_per_cpu, &info, 0); |
| 302 | flow_cache_flush_tasklet((unsigned long)&info); | 302 | flow_cache_flush_tasklet((unsigned long)&info); |
| 303 | local_bh_enable(); | 303 | local_bh_enable(); |
| 304 | 304 | ||
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 7f82b7616212..cc34ac769a3c 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
| @@ -480,7 +480,7 @@ static void iucv_setmask_mp(void) | |||
| 480 | if (cpu_isset(cpu, iucv_buffer_cpumask) && | 480 | if (cpu_isset(cpu, iucv_buffer_cpumask) && |
| 481 | !cpu_isset(cpu, iucv_irq_cpumask)) | 481 | !cpu_isset(cpu, iucv_irq_cpumask)) |
| 482 | smp_call_function_single(cpu, iucv_allow_cpu, | 482 | smp_call_function_single(cpu, iucv_allow_cpu, |
| 483 | NULL, 0, 1); | 483 | NULL, 1); |
| 484 | preempt_enable(); | 484 | preempt_enable(); |
| 485 | } | 485 | } |
| 486 | 486 | ||
| @@ -498,7 +498,7 @@ static void iucv_setmask_up(void) | |||
| 498 | cpumask = iucv_irq_cpumask; | 498 | cpumask = iucv_irq_cpumask; |
| 499 | cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); | 499 | cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); |
| 500 | for_each_cpu_mask(cpu, cpumask) | 500 | for_each_cpu_mask(cpu, cpumask) |
| 501 | smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1); | 501 | smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); |
| 502 | } | 502 | } |
| 503 | 503 | ||
| 504 | /** | 504 | /** |
| @@ -523,7 +523,7 @@ static int iucv_enable(void) | |||
| 523 | rc = -EIO; | 523 | rc = -EIO; |
| 524 | preempt_disable(); | 524 | preempt_disable(); |
| 525 | for_each_online_cpu(cpu) | 525 | for_each_online_cpu(cpu) |
| 526 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); | 526 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
| 527 | preempt_enable(); | 527 | preempt_enable(); |
| 528 | if (cpus_empty(iucv_buffer_cpumask)) | 528 | if (cpus_empty(iucv_buffer_cpumask)) |
| 529 | /* No cpu could declare an iucv buffer. */ | 529 | /* No cpu could declare an iucv buffer. */ |
| @@ -545,7 +545,7 @@ out: | |||
| 545 | */ | 545 | */ |
| 546 | static void iucv_disable(void) | 546 | static void iucv_disable(void) |
| 547 | { | 547 | { |
| 548 | on_each_cpu(iucv_retrieve_cpu, NULL, 0, 1); | 548 | on_each_cpu(iucv_retrieve_cpu, NULL, 1); |
| 549 | kfree(iucv_path_table); | 549 | kfree(iucv_path_table); |
| 550 | } | 550 | } |
| 551 | 551 | ||
| @@ -580,7 +580,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
| 580 | case CPU_ONLINE_FROZEN: | 580 | case CPU_ONLINE_FROZEN: |
| 581 | case CPU_DOWN_FAILED: | 581 | case CPU_DOWN_FAILED: |
| 582 | case CPU_DOWN_FAILED_FROZEN: | 582 | case CPU_DOWN_FAILED_FROZEN: |
| 583 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); | 583 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
| 584 | break; | 584 | break; |
| 585 | case CPU_DOWN_PREPARE: | 585 | case CPU_DOWN_PREPARE: |
| 586 | case CPU_DOWN_PREPARE_FROZEN: | 586 | case CPU_DOWN_PREPARE_FROZEN: |
| @@ -589,10 +589,10 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
| 589 | if (cpus_empty(cpumask)) | 589 | if (cpus_empty(cpumask)) |
| 590 | /* Can't offline last IUCV enabled cpu. */ | 590 | /* Can't offline last IUCV enabled cpu. */ |
| 591 | return NOTIFY_BAD; | 591 | return NOTIFY_BAD; |
| 592 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 0, 1); | 592 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); |
| 593 | if (cpus_empty(iucv_irq_cpumask)) | 593 | if (cpus_empty(iucv_irq_cpumask)) |
| 594 | smp_call_function_single(first_cpu(iucv_buffer_cpumask), | 594 | smp_call_function_single(first_cpu(iucv_buffer_cpumask), |
| 595 | iucv_allow_cpu, NULL, 0, 1); | 595 | iucv_allow_cpu, NULL, 1); |
| 596 | break; | 596 | break; |
| 597 | } | 597 | } |
| 598 | return NOTIFY_OK; | 598 | return NOTIFY_OK; |
| @@ -652,7 +652,7 @@ static void iucv_cleanup_queue(void) | |||
| 652 | * pending interrupts force them to the work queue by calling | 652 | * pending interrupts force them to the work queue by calling |
| 653 | * an empty function on all cpus. | 653 | * an empty function on all cpus. |
| 654 | */ | 654 | */ |
| 655 | smp_call_function(__iucv_cleanup_queue, NULL, 0, 1); | 655 | smp_call_function(__iucv_cleanup_queue, NULL, 1); |
| 656 | spin_lock_irq(&iucv_queue_lock); | 656 | spin_lock_irq(&iucv_queue_lock); |
| 657 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) { | 657 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) { |
| 658 | /* Remove stale work items from the task queue. */ | 658 | /* Remove stale work items from the task queue. */ |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2d29e260da3d..d4eae6af0738 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -1266,12 +1266,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | |||
| 1266 | case CPU_UP_CANCELED: | 1266 | case CPU_UP_CANCELED: |
| 1267 | printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", | 1267 | printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", |
| 1268 | cpu); | 1268 | cpu); |
| 1269 | smp_call_function_single(cpu, hardware_disable, NULL, 0, 1); | 1269 | smp_call_function_single(cpu, hardware_disable, NULL, 1); |
| 1270 | break; | 1270 | break; |
| 1271 | case CPU_ONLINE: | 1271 | case CPU_ONLINE: |
| 1272 | printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", | 1272 | printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", |
| 1273 | cpu); | 1273 | cpu); |
| 1274 | smp_call_function_single(cpu, hardware_enable, NULL, 0, 1); | 1274 | smp_call_function_single(cpu, hardware_enable, NULL, 1); |
| 1275 | break; | 1275 | break; |
| 1276 | } | 1276 | } |
| 1277 | return NOTIFY_OK; | 1277 | return NOTIFY_OK; |
| @@ -1286,7 +1286,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val, | |||
| 1286 | * in vmx root mode. | 1286 | * in vmx root mode. |
| 1287 | */ | 1287 | */ |
| 1288 | printk(KERN_INFO "kvm: exiting hardware virtualization\n"); | 1288 | printk(KERN_INFO "kvm: exiting hardware virtualization\n"); |
| 1289 | on_each_cpu(hardware_disable, NULL, 0, 1); | 1289 | on_each_cpu(hardware_disable, NULL, 1); |
| 1290 | } | 1290 | } |
| 1291 | return NOTIFY_OK; | 1291 | return NOTIFY_OK; |
| 1292 | } | 1292 | } |
| @@ -1474,12 +1474,12 @@ int kvm_init(void *opaque, unsigned int vcpu_size, | |||
| 1474 | for_each_online_cpu(cpu) { | 1474 | for_each_online_cpu(cpu) { |
| 1475 | smp_call_function_single(cpu, | 1475 | smp_call_function_single(cpu, |
| 1476 | kvm_arch_check_processor_compat, | 1476 | kvm_arch_check_processor_compat, |
| 1477 | &r, 0, 1); | 1477 | &r, 1); |
| 1478 | if (r < 0) | 1478 | if (r < 0) |
| 1479 | goto out_free_1; | 1479 | goto out_free_1; |
| 1480 | } | 1480 | } |
| 1481 | 1481 | ||
| 1482 | on_each_cpu(hardware_enable, NULL, 0, 1); | 1482 | on_each_cpu(hardware_enable, NULL, 1); |
| 1483 | r = register_cpu_notifier(&kvm_cpu_notifier); | 1483 | r = register_cpu_notifier(&kvm_cpu_notifier); |
| 1484 | if (r) | 1484 | if (r) |
| 1485 | goto out_free_2; | 1485 | goto out_free_2; |
| @@ -1525,7 +1525,7 @@ out_free_3: | |||
| 1525 | unregister_reboot_notifier(&kvm_reboot_notifier); | 1525 | unregister_reboot_notifier(&kvm_reboot_notifier); |
| 1526 | unregister_cpu_notifier(&kvm_cpu_notifier); | 1526 | unregister_cpu_notifier(&kvm_cpu_notifier); |
| 1527 | out_free_2: | 1527 | out_free_2: |
| 1528 | on_each_cpu(hardware_disable, NULL, 0, 1); | 1528 | on_each_cpu(hardware_disable, NULL, 1); |
| 1529 | out_free_1: | 1529 | out_free_1: |
| 1530 | kvm_arch_hardware_unsetup(); | 1530 | kvm_arch_hardware_unsetup(); |
| 1531 | out_free_0: | 1531 | out_free_0: |
| @@ -1547,7 +1547,7 @@ void kvm_exit(void) | |||
| 1547 | sysdev_class_unregister(&kvm_sysdev_class); | 1547 | sysdev_class_unregister(&kvm_sysdev_class); |
| 1548 | unregister_reboot_notifier(&kvm_reboot_notifier); | 1548 | unregister_reboot_notifier(&kvm_reboot_notifier); |
| 1549 | unregister_cpu_notifier(&kvm_cpu_notifier); | 1549 | unregister_cpu_notifier(&kvm_cpu_notifier); |
| 1550 | on_each_cpu(hardware_disable, NULL, 0, 1); | 1550 | on_each_cpu(hardware_disable, NULL, 1); |
| 1551 | kvm_arch_hardware_unsetup(); | 1551 | kvm_arch_hardware_unsetup(); |
| 1552 | kvm_arch_exit(); | 1552 | kvm_arch_exit(); |
| 1553 | kvm_exit_debug(); | 1553 | kvm_exit_debug(); |
