aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-18 02:44:50 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-18 02:44:50 -0400
commitd172ad18f9914f70c761a6cad470efc986d5e07e (patch)
treede393d1c2819fbf9f68539b3e29ff7424365f8f0 /arch
parent4fe3ebec122f23a095cc1d17557c175caaa55ca1 (diff)
sparc64: Convert to generic helpers for IPI function calls.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/Kconfig1
-rw-r--r--arch/sparc64/kernel/smp.c87
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c2
-rw-r--r--arch/sparc64/kernel/ttable.S7
-rw-r--r--arch/sparc64/mm/ultra.S5
5 files changed, 29 insertions, 73 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 794d22fdf463..1aeb1da9829d 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -16,6 +16,7 @@ config SPARC64
16 select HAVE_IDE 16 select HAVE_IDE
17 select HAVE_LMB 17 select HAVE_LMB
18 select HAVE_ARCH_KGDB 18 select HAVE_ARCH_KGDB
19 select USE_GENERIC_SMP_HELPERS if SMP
19 20
20config GENERIC_TIME 21config GENERIC_TIME
21 bool 22 bool
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index c099d96f1239..7cf72b4bb108 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -788,89 +788,36 @@ static void smp_start_sync_tick_client(int cpu)
788 0, 0, 0, mask); 788 0, 0, 0, mask);
789} 789}
790 790
791/* Send cross call to all processors except self. */
792#define smp_cross_call(func, ctx, data1, data2) \
793 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
794
795struct call_data_struct {
796 void (*func) (void *info);
797 void *info;
798 atomic_t finished;
799 int wait;
800};
801
802static struct call_data_struct *call_data;
803
804extern unsigned long xcall_call_function; 791extern unsigned long xcall_call_function;
805 792
806/** 793void arch_send_call_function_ipi(cpumask_t mask)
807 * smp_call_function(): Run a function on all other CPUs.
808 * @func: The function to run. This must be fast and non-blocking.
809 * @info: An arbitrary pointer to pass to the function.
810 * @wait: If true, wait (atomically) until function has completed on other CPUs.
811 *
812 * Returns 0 on success, else a negative status code. Does not return until
813 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
814 *
815 * You must not call this function with disabled interrupts or from a
816 * hardware interrupt handler or from a bottom half handler.
817 */
818static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info,
819 int wait, cpumask_t mask)
820{ 794{
821 struct call_data_struct data;
822 int cpus;
823
824 /* Can deadlock when called with interrupts disabled */
825 WARN_ON(irqs_disabled());
826
827 data.func = func;
828 data.info = info;
829 atomic_set(&data.finished, 0);
830 data.wait = wait;
831
832 spin_lock(&call_lock);
833
834 cpu_clear(smp_processor_id(), mask);
835 cpus = cpus_weight(mask);
836 if (!cpus)
837 goto out_unlock;
838
839 call_data = &data;
840 mb();
841
842 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); 795 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
796}
843 797
844 /* Wait for response */ 798extern unsigned long xcall_call_function_single;
845 while (atomic_read(&data.finished) != cpus)
846 cpu_relax();
847 799
848out_unlock: 800void arch_send_call_function_single_ipi(int cpu)
849 spin_unlock(&call_lock); 801{
802 cpumask_t mask = cpumask_of_cpu(cpu);
850 803
851 return 0; 804 smp_cross_call_masked(&xcall_call_function_single, 0, 0, 0, mask);
852} 805}
853 806
854int smp_call_function(void (*func)(void *info), void *info, int wait) 807/* Send cross call to all processors except self. */
855{ 808#define smp_cross_call(func, ctx, data1, data2) \
856 return sparc64_smp_call_function_mask(func, info, wait, cpu_online_map); 809 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
857}
858 810
859void smp_call_function_client(int irq, struct pt_regs *regs) 811void smp_call_function_client(int irq, struct pt_regs *regs)
860{ 812{
861 void (*func) (void *info) = call_data->func; 813 clear_softint(1 << irq);
862 void *info = call_data->info; 814 generic_smp_call_function_interrupt();
815}
863 816
817void smp_call_function_single_client(int irq, struct pt_regs *regs)
818{
864 clear_softint(1 << irq); 819 clear_softint(1 << irq);
865 if (call_data->wait) { 820 generic_smp_call_function_single_interrupt();
866 /* let initiator proceed only after completion */
867 func(info);
868 atomic_inc(&call_data->finished);
869 } else {
870 /* let initiator proceed after getting data */
871 atomic_inc(&call_data->finished);
872 func(info);
873 }
874} 821}
875 822
876static void tsb_sync(void *info) 823static void tsb_sync(void *info)
@@ -890,7 +837,7 @@ static void tsb_sync(void *info)
890 837
891void smp_tsb_sync(struct mm_struct *mm) 838void smp_tsb_sync(struct mm_struct *mm)
892{ 839{
893 sparc64_smp_call_function_mask(tsb_sync, mm, 1, mm->cpu_vm_mask); 840 smp_call_function_mask(mm->cpu_vm_mask, tsb_sync, mm, 1);
894} 841}
895 842
896extern unsigned long xcall_flush_tlb_mm; 843extern unsigned long xcall_flush_tlb_mm;
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 49d3ea50c247..504e678ee128 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -108,8 +108,6 @@ EXPORT_SYMBOL(__read_unlock);
108EXPORT_SYMBOL(__write_lock); 108EXPORT_SYMBOL(__write_lock);
109EXPORT_SYMBOL(__write_unlock); 109EXPORT_SYMBOL(__write_unlock);
110EXPORT_SYMBOL(__write_trylock); 110EXPORT_SYMBOL(__write_trylock);
111
112EXPORT_SYMBOL(smp_call_function);
113#endif /* CONFIG_SMP */ 111#endif /* CONFIG_SMP */
114 112
115#ifdef CONFIG_MCOUNT 113#ifdef CONFIG_MCOUNT
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index 450053af039e..1ade3d6fb7fc 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -58,7 +58,12 @@ tl0_irq3: BTRAP(0x43)
58tl0_irq4: BTRAP(0x44) 58tl0_irq4: BTRAP(0x44)
59#endif 59#endif
60tl0_irq5: TRAP_IRQ(handler_irq, 5) 60tl0_irq5: TRAP_IRQ(handler_irq, 5)
61tl0_irq6: BTRAP(0x46) BTRAP(0x47) BTRAP(0x48) BTRAP(0x49) 61#ifdef CONFIG_SMP
62tl0_irq6: TRAP_IRQ(smp_call_function_single_client, 6)
63#else
64tl0_irq6: BTRAP(0x46)
65#endif
66tl0_irq7: BTRAP(0x47) BTRAP(0x48) BTRAP(0x49)
62tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d) 67tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d)
63tl0_irq14: TRAP_IRQ(timer_interrupt, 14) 68tl0_irq14: TRAP_IRQ(timer_interrupt, 14)
64tl0_irq15: TRAP_IRQ(handler_irq, 15) 69tl0_irq15: TRAP_IRQ(handler_irq, 15)
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index 9bb2d90a9df6..4c8ca131ffaf 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -688,6 +688,11 @@ xcall_call_function:
688 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint 688 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
689 retry 689 retry
690 690
691 .globl xcall_call_function_single
692xcall_call_function_single:
693 wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
694 retry
695
691 .globl xcall_receive_signal 696 .globl xcall_receive_signal
692xcall_receive_signal: 697xcall_receive_signal:
693 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint 698 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint