aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r--arch/sparc64/kernel/smp.c41
1 files changed, 18 insertions, 23 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 8175a6968c6b..90eaca3ec9a6 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -745,12 +745,21 @@ struct call_data_struct {
745 int wait; 745 int wait;
746}; 746};
747 747
748static DEFINE_SPINLOCK(call_lock); 748static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
749static struct call_data_struct *call_data; 749static struct call_data_struct *call_data;
750 750
751extern unsigned long xcall_call_function; 751extern unsigned long xcall_call_function;
752 752
753/* 753/**
754 * smp_call_function(): Run a function on all other CPUs.
755 * @func: The function to run. This must be fast and non-blocking.
756 * @info: An arbitrary pointer to pass to the function.
757 * @nonatomic: currently unused.
758 * @wait: If true, wait (atomically) until function has completed on other CPUs.
759 *
760 * Returns 0 on success, else a negative status code. Does not return until
761 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
762 *
754 * You must not call this function with disabled interrupts or from a 763 * You must not call this function with disabled interrupts or from a
755 * hardware interrupt handler or from a bottom half handler. 764 * hardware interrupt handler or from a bottom half handler.
756 */ 765 */
@@ -759,7 +768,6 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
759{ 768{
760 struct call_data_struct data; 769 struct call_data_struct data;
761 int cpus; 770 int cpus;
762 long timeout;
763 771
764 /* Can deadlock when called with interrupts disabled */ 772 /* Can deadlock when called with interrupts disabled */
765 WARN_ON(irqs_disabled()); 773 WARN_ON(irqs_disabled());
@@ -777,31 +785,18 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
777 goto out_unlock; 785 goto out_unlock;
778 786
779 call_data = &data; 787 call_data = &data;
788 mb();
780 789
781 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); 790 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
782 791
783 /* 792 /* Wait for response */
784 * Wait for other cpus to complete function or at 793 while (atomic_read(&data.finished) != cpus)
785 * least snap the call data. 794 cpu_relax();
786 */
787 timeout = 1000000;
788 while (atomic_read(&data.finished) != cpus) {
789 if (--timeout <= 0)
790 goto out_timeout;
791 barrier();
792 udelay(1);
793 }
794 795
795out_unlock: 796out_unlock:
796 spin_unlock(&call_lock); 797 spin_unlock(&call_lock);
797 798
798 return 0; 799 return 0;
799
800out_timeout:
801 spin_unlock(&call_lock);
802 printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n",
803 cpus, atomic_read(&data.finished));
804 return 0;
805} 800}
806 801
807int smp_call_function(void (*func)(void *info), void *info, 802int smp_call_function(void (*func)(void *info), void *info,
@@ -1285,7 +1280,7 @@ int setup_profiling_timer(unsigned int multiplier)
1285 return -EINVAL; 1280 return -EINVAL;
1286 1281
1287 spin_lock_irqsave(&prof_setup_lock, flags); 1282 spin_lock_irqsave(&prof_setup_lock, flags);
1288 for_each_cpu(i) 1283 for_each_possible_cpu(i)
1289 prof_multiplier(i) = multiplier; 1284 prof_multiplier(i) = multiplier;
1290 current_tick_offset = (timer_tick_offset / multiplier); 1285 current_tick_offset = (timer_tick_offset / multiplier);
1291 spin_unlock_irqrestore(&prof_setup_lock, flags); 1286 spin_unlock_irqrestore(&prof_setup_lock, flags);
@@ -1313,12 +1308,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
1313 } 1308 }
1314 } 1309 }
1315 1310
1316 for_each_cpu(i) { 1311 for_each_possible_cpu(i) {
1317 if (tlb_type == hypervisor) { 1312 if (tlb_type == hypervisor) {
1318 int j; 1313 int j;
1319 1314
1320 /* XXX get this mapping from machine description */ 1315 /* XXX get this mapping from machine description */
1321 for_each_cpu(j) { 1316 for_each_possible_cpu(j) {
1322 if ((j >> 2) == (i >> 2)) 1317 if ((j >> 2) == (i >> 2))
1323 cpu_set(j, cpu_sibling_map[i]); 1318 cpu_set(j, cpu_sibling_map[i]);
1324 } 1319 }