aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r--arch/sparc64/kernel/smp.c39
1 files changed, 24 insertions, 15 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index c4548a88953c..cf56128097c8 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -760,12 +760,9 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
760 int nonatomic, int wait, cpumask_t mask) 760 int nonatomic, int wait, cpumask_t mask)
761{ 761{
762 struct call_data_struct data; 762 struct call_data_struct data;
763 int cpus = cpus_weight(mask) - 1; 763 int cpus;
764 long timeout; 764 long timeout;
765 765
766 if (!cpus)
767 return 0;
768
769 /* Can deadlock when called with interrupts disabled */ 766 /* Can deadlock when called with interrupts disabled */
770 WARN_ON(irqs_disabled()); 767 WARN_ON(irqs_disabled());
771 768
@@ -776,6 +773,11 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
776 773
777 spin_lock(&call_lock); 774 spin_lock(&call_lock);
778 775
776 cpu_clear(smp_processor_id(), mask);
777 cpus = cpus_weight(mask);
778 if (!cpus)
779 goto out_unlock;
780
779 call_data = &data; 781 call_data = &data;
780 782
781 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); 783 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
@@ -792,6 +794,7 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
792 udelay(1); 794 udelay(1);
793 } 795 }
794 796
797out_unlock:
795 spin_unlock(&call_lock); 798 spin_unlock(&call_lock);
796 799
797 return 0; 800 return 0;
@@ -845,6 +848,7 @@ extern unsigned long xcall_flush_tlb_pending;
845extern unsigned long xcall_flush_tlb_kernel_range; 848extern unsigned long xcall_flush_tlb_kernel_range;
846extern unsigned long xcall_report_regs; 849extern unsigned long xcall_report_regs;
847extern unsigned long xcall_receive_signal; 850extern unsigned long xcall_receive_signal;
851extern unsigned long xcall_new_mmu_context_version;
848 852
849#ifdef DCACHE_ALIASING_POSSIBLE 853#ifdef DCACHE_ALIASING_POSSIBLE
850extern unsigned long xcall_flush_dcache_page_cheetah; 854extern unsigned long xcall_flush_dcache_page_cheetah;
@@ -974,7 +978,13 @@ void smp_receive_signal(int cpu)
974 978
975void smp_receive_signal_client(int irq, struct pt_regs *regs) 979void smp_receive_signal_client(int irq, struct pt_regs *regs)
976{ 980{
981 clear_softint(1 << irq);
982}
983
984void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
985{
977 struct mm_struct *mm; 986 struct mm_struct *mm;
987 unsigned long flags;
978 988
979 clear_softint(1 << irq); 989 clear_softint(1 << irq);
980 990
@@ -982,25 +992,24 @@ void smp_receive_signal_client(int irq, struct pt_regs *regs)
982 * the version of the one we are using is now out of date. 992 * the version of the one we are using is now out of date.
983 */ 993 */
984 mm = current->active_mm; 994 mm = current->active_mm;
985 if (likely(mm)) { 995 if (unlikely(!mm || (mm == &init_mm)))
986 unsigned long flags; 996 return;
987 997
988 spin_lock_irqsave(&mm->context.lock, flags); 998 spin_lock_irqsave(&mm->context.lock, flags);
989 999
990 if (unlikely(!CTX_VALID(mm->context))) 1000 if (unlikely(!CTX_VALID(mm->context)))
991 get_new_mmu_context(mm); 1001 get_new_mmu_context(mm);
992 1002
993 load_secondary_context(mm); 1003 spin_unlock_irqrestore(&mm->context.lock, flags);
994 __flush_tlb_mm(CTX_HWBITS(mm->context),
995 SECONDARY_CONTEXT);
996 1004
997 spin_unlock_irqrestore(&mm->context.lock, flags); 1005 load_secondary_context(mm);
998 } 1006 __flush_tlb_mm(CTX_HWBITS(mm->context),
1007 SECONDARY_CONTEXT);
999} 1008}
1000 1009
1001void smp_new_mmu_context_version(void) 1010void smp_new_mmu_context_version(void)
1002{ 1011{
1003 __smp_receive_signal_mask(cpu_online_map); 1012 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
1004} 1013}
1005 1014
1006void smp_report_regs(void) 1015void smp_report_regs(void)