diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-18 02:44:50 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-18 02:44:50 -0400 |
commit | d172ad18f9914f70c761a6cad470efc986d5e07e (patch) | |
tree | de393d1c2819fbf9f68539b3e29ff7424365f8f0 | |
parent | 4fe3ebec122f23a095cc1d17557c175caaa55ca1 (diff) |
sparc64: Convert to generic helpers for IPI function calls.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/sparc64/Kconfig | 1 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 87 | ||||
-rw-r--r-- | arch/sparc64/kernel/sparc64_ksyms.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/ttable.S | 7 | ||||
-rw-r--r-- | arch/sparc64/mm/ultra.S | 5 | ||||
-rw-r--r-- | include/asm-sparc/pil.h | 1 | ||||
-rw-r--r-- | include/asm-sparc/smp_64.h | 3 |
7 files changed, 33 insertions, 73 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index 794d22fdf463..1aeb1da9829d 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig | |||
@@ -16,6 +16,7 @@ config SPARC64 | |||
16 | select HAVE_IDE | 16 | select HAVE_IDE |
17 | select HAVE_LMB | 17 | select HAVE_LMB |
18 | select HAVE_ARCH_KGDB | 18 | select HAVE_ARCH_KGDB |
19 | select USE_GENERIC_SMP_HELPERS if SMP | ||
19 | 20 | ||
20 | config GENERIC_TIME | 21 | config GENERIC_TIME |
21 | bool | 22 | bool |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index c099d96f1239..7cf72b4bb108 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -788,89 +788,36 @@ static void smp_start_sync_tick_client(int cpu) | |||
788 | 0, 0, 0, mask); | 788 | 0, 0, 0, mask); |
789 | } | 789 | } |
790 | 790 | ||
791 | /* Send cross call to all processors except self. */ | ||
792 | #define smp_cross_call(func, ctx, data1, data2) \ | ||
793 | smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map) | ||
794 | |||
795 | struct call_data_struct { | ||
796 | void (*func) (void *info); | ||
797 | void *info; | ||
798 | atomic_t finished; | ||
799 | int wait; | ||
800 | }; | ||
801 | |||
802 | static struct call_data_struct *call_data; | ||
803 | |||
804 | extern unsigned long xcall_call_function; | 791 | extern unsigned long xcall_call_function; |
805 | 792 | ||
806 | /** | 793 | void arch_send_call_function_ipi(cpumask_t mask) |
807 | * smp_call_function(): Run a function on all other CPUs. | ||
808 | * @func: The function to run. This must be fast and non-blocking. | ||
809 | * @info: An arbitrary pointer to pass to the function. | ||
810 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
811 | * | ||
812 | * Returns 0 on success, else a negative status code. Does not return until | ||
813 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | ||
814 | * | ||
815 | * You must not call this function with disabled interrupts or from a | ||
816 | * hardware interrupt handler or from a bottom half handler. | ||
817 | */ | ||
818 | static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info, | ||
819 | int wait, cpumask_t mask) | ||
820 | { | 794 | { |
821 | struct call_data_struct data; | ||
822 | int cpus; | ||
823 | |||
824 | /* Can deadlock when called with interrupts disabled */ | ||
825 | WARN_ON(irqs_disabled()); | ||
826 | |||
827 | data.func = func; | ||
828 | data.info = info; | ||
829 | atomic_set(&data.finished, 0); | ||
830 | data.wait = wait; | ||
831 | |||
832 | spin_lock(&call_lock); | ||
833 | |||
834 | cpu_clear(smp_processor_id(), mask); | ||
835 | cpus = cpus_weight(mask); | ||
836 | if (!cpus) | ||
837 | goto out_unlock; | ||
838 | |||
839 | call_data = &data; | ||
840 | mb(); | ||
841 | |||
842 | smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); | 795 | smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); |
796 | } | ||
843 | 797 | ||
844 | /* Wait for response */ | 798 | extern unsigned long xcall_call_function_single; |
845 | while (atomic_read(&data.finished) != cpus) | ||
846 | cpu_relax(); | ||
847 | 799 | ||
848 | out_unlock: | 800 | void arch_send_call_function_single_ipi(int cpu) |
849 | spin_unlock(&call_lock); | 801 | { |
802 | cpumask_t mask = cpumask_of_cpu(cpu); | ||
850 | 803 | ||
851 | return 0; | 804 | smp_cross_call_masked(&xcall_call_function_single, 0, 0, 0, mask); |
852 | } | 805 | } |
853 | 806 | ||
854 | int smp_call_function(void (*func)(void *info), void *info, int wait) | 807 | /* Send cross call to all processors except self. */ |
855 | { | 808 | #define smp_cross_call(func, ctx, data1, data2) \ |
856 | return sparc64_smp_call_function_mask(func, info, wait, cpu_online_map); | 809 | smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map) |
857 | } | ||
858 | 810 | ||
859 | void smp_call_function_client(int irq, struct pt_regs *regs) | 811 | void smp_call_function_client(int irq, struct pt_regs *regs) |
860 | { | 812 | { |
861 | void (*func) (void *info) = call_data->func; | 813 | clear_softint(1 << irq); |
862 | void *info = call_data->info; | 814 | generic_smp_call_function_interrupt(); |
815 | } | ||
863 | 816 | ||
817 | void smp_call_function_single_client(int irq, struct pt_regs *regs) | ||
818 | { | ||
864 | clear_softint(1 << irq); | 819 | clear_softint(1 << irq); |
865 | if (call_data->wait) { | 820 | generic_smp_call_function_single_interrupt(); |
866 | /* let initiator proceed only after completion */ | ||
867 | func(info); | ||
868 | atomic_inc(&call_data->finished); | ||
869 | } else { | ||
870 | /* let initiator proceed after getting data */ | ||
871 | atomic_inc(&call_data->finished); | ||
872 | func(info); | ||
873 | } | ||
874 | } | 821 | } |
875 | 822 | ||
876 | static void tsb_sync(void *info) | 823 | static void tsb_sync(void *info) |
@@ -890,7 +837,7 @@ static void tsb_sync(void *info) | |||
890 | 837 | ||
891 | void smp_tsb_sync(struct mm_struct *mm) | 838 | void smp_tsb_sync(struct mm_struct *mm) |
892 | { | 839 | { |
893 | sparc64_smp_call_function_mask(tsb_sync, mm, 1, mm->cpu_vm_mask); | 840 | smp_call_function_mask(mm->cpu_vm_mask, tsb_sync, mm, 1); |
894 | } | 841 | } |
895 | 842 | ||
896 | extern unsigned long xcall_flush_tlb_mm; | 843 | extern unsigned long xcall_flush_tlb_mm; |
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 49d3ea50c247..504e678ee128 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -108,8 +108,6 @@ EXPORT_SYMBOL(__read_unlock); | |||
108 | EXPORT_SYMBOL(__write_lock); | 108 | EXPORT_SYMBOL(__write_lock); |
109 | EXPORT_SYMBOL(__write_unlock); | 109 | EXPORT_SYMBOL(__write_unlock); |
110 | EXPORT_SYMBOL(__write_trylock); | 110 | EXPORT_SYMBOL(__write_trylock); |
111 | |||
112 | EXPORT_SYMBOL(smp_call_function); | ||
113 | #endif /* CONFIG_SMP */ | 111 | #endif /* CONFIG_SMP */ |
114 | 112 | ||
115 | #ifdef CONFIG_MCOUNT | 113 | #ifdef CONFIG_MCOUNT |
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index 450053af039e..1ade3d6fb7fc 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S | |||
@@ -58,7 +58,12 @@ tl0_irq3: BTRAP(0x43) | |||
58 | tl0_irq4: BTRAP(0x44) | 58 | tl0_irq4: BTRAP(0x44) |
59 | #endif | 59 | #endif |
60 | tl0_irq5: TRAP_IRQ(handler_irq, 5) | 60 | tl0_irq5: TRAP_IRQ(handler_irq, 5) |
61 | tl0_irq6: BTRAP(0x46) BTRAP(0x47) BTRAP(0x48) BTRAP(0x49) | 61 | #ifdef CONFIG_SMP |
62 | tl0_irq6: TRAP_IRQ(smp_call_function_single_client, 6) | ||
63 | #else | ||
64 | tl0_irq6: BTRAP(0x46) | ||
65 | #endif | ||
66 | tl0_irq7: BTRAP(0x47) BTRAP(0x48) BTRAP(0x49) | ||
62 | tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d) | 67 | tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d) |
63 | tl0_irq14: TRAP_IRQ(timer_interrupt, 14) | 68 | tl0_irq14: TRAP_IRQ(timer_interrupt, 14) |
64 | tl0_irq15: TRAP_IRQ(handler_irq, 15) | 69 | tl0_irq15: TRAP_IRQ(handler_irq, 15) |
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 9bb2d90a9df6..4c8ca131ffaf 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S | |||
@@ -688,6 +688,11 @@ xcall_call_function: | |||
688 | wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint | 688 | wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint |
689 | retry | 689 | retry |
690 | 690 | ||
691 | .globl xcall_call_function_single | ||
692 | xcall_call_function_single: | ||
693 | wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint | ||
694 | retry | ||
695 | |||
691 | .globl xcall_receive_signal | 696 | .globl xcall_receive_signal |
692 | xcall_receive_signal: | 697 | xcall_receive_signal: |
693 | wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint | 698 | wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint |
diff --git a/include/asm-sparc/pil.h b/include/asm-sparc/pil.h index eaac842d88c3..71819bb943fc 100644 --- a/include/asm-sparc/pil.h +++ b/include/asm-sparc/pil.h | |||
@@ -17,5 +17,6 @@ | |||
17 | #define PIL_SMP_CAPTURE 3 | 17 | #define PIL_SMP_CAPTURE 3 |
18 | #define PIL_SMP_CTX_NEW_VERSION 4 | 18 | #define PIL_SMP_CTX_NEW_VERSION 4 |
19 | #define PIL_DEVICE_IRQ 5 | 19 | #define PIL_DEVICE_IRQ 5 |
20 | #define PIL_SMP_CALL_FUNC_SNGL 6 | ||
20 | 21 | ||
21 | #endif /* !(_SPARC64_PIL_H) */ | 22 | #endif /* !(_SPARC64_PIL_H) */ |
diff --git a/include/asm-sparc/smp_64.h b/include/asm-sparc/smp_64.h index 4cfe09c51f1f..57224dd37b3a 100644 --- a/include/asm-sparc/smp_64.h +++ b/include/asm-sparc/smp_64.h | |||
@@ -34,6 +34,9 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | |||
34 | extern cpumask_t cpu_core_map[NR_CPUS]; | 34 | extern cpumask_t cpu_core_map[NR_CPUS]; |
35 | extern int sparc64_multi_core; | 35 | extern int sparc64_multi_core; |
36 | 36 | ||
37 | extern void arch_send_call_function_single_ipi(int cpu); | ||
38 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
39 | |||
37 | /* | 40 | /* |
38 | * General functions that each host system must provide. | 41 | * General functions that each host system must provide. |
39 | */ | 42 | */ |