aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-04-06 19:54:33 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-04-10 01:56:44 -0400
commitaa1d1a0af6022f02fb601508d3feaabafd405299 (patch)
treeef472a0c4df262b26366eb0d25fa2d03c87a444b /arch
parent731bbe431f7dbbcbdc5293cfb187a916c375e83b (diff)
[SPARC64]: smp_call_function() fixups...
1) Take doc-book function comment from i386 implementation. 2) cacheline align call_lock, taken from powerpc 3) Need memory barrier after setting call_data 4) Remove timeout Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/kernel/smp.c35
1 files changed, 15 insertions, 20 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 8175a6968c6b..eb36f7988ff7 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -745,12 +745,21 @@ struct call_data_struct {
745 int wait; 745 int wait;
746}; 746};
747 747
748static DEFINE_SPINLOCK(call_lock); 748static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
749static struct call_data_struct *call_data; 749static struct call_data_struct *call_data;
750 750
751extern unsigned long xcall_call_function; 751extern unsigned long xcall_call_function;
752 752
753/* 753/**
754 * smp_call_function(): Run a function on all other CPUs.
755 * @func: The function to run. This must be fast and non-blocking.
756 * @info: An arbitrary pointer to pass to the function.
757 * @nonatomic: currently unused.
758 * @wait: If true, wait (atomically) until function has completed on other CPUs.
759 *
760 * Returns 0 on success, else a negative status code. Does not return until
761 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
762 *
754 * You must not call this function with disabled interrupts or from a 763 * You must not call this function with disabled interrupts or from a
755 * hardware interrupt handler or from a bottom half handler. 764 * hardware interrupt handler or from a bottom half handler.
756 */ 765 */
@@ -759,7 +768,6 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
759{ 768{
760 struct call_data_struct data; 769 struct call_data_struct data;
761 int cpus; 770 int cpus;
762 long timeout;
763 771
764 /* Can deadlock when called with interrupts disabled */ 772 /* Can deadlock when called with interrupts disabled */
765 WARN_ON(irqs_disabled()); 773 WARN_ON(irqs_disabled());
@@ -777,31 +785,18 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
777 goto out_unlock; 785 goto out_unlock;
778 786
779 call_data = &data; 787 call_data = &data;
788 mb();
780 789
781 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); 790 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
782 791
783 /* 792 /* Wait for response */
784 * Wait for other cpus to complete function or at 793 while (atomic_read(&data.finished) != cpus)
785 * least snap the call data. 794 cpu_relax();
786 */
787 timeout = 1000000;
788 while (atomic_read(&data.finished) != cpus) {
789 if (--timeout <= 0)
790 goto out_timeout;
791 barrier();
792 udelay(1);
793 }
794 795
795out_unlock: 796out_unlock:
796 spin_unlock(&call_lock); 797 spin_unlock(&call_lock);
797 798
798 return 0; 799 return 0;
799
800out_timeout:
801 spin_unlock(&call_lock);
802 printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n",
803 cpus, atomic_read(&data.finished));
804 return 0;
805} 800}
806 801
807int smp_call_function(void (*func)(void *info), void *info, 802int smp_call_function(void (*func)(void *info), void *info,