aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-04-25 06:11:37 -0400
committerDavid S. Miller <davem@davemloft.net>2008-04-25 06:11:37 -0400
commit2664ef44cf5053d2b7dff01cecac70bc601a5f68 (patch)
treec2f5d5533647885f3580c565870d2959dbb204d2
parent020cfb05f2c594c778537159bd45ea5efb0c5e0d (diff)
[SPARC64]: Wrap SMP IPIs with irq_enter()/irq_exit().
Otherwise all sorts of bad things can happen, including spurious softlockup reports. Other platforms have this same bug, in one form or another, just don't see the issue because they don't sleep as long as sparc64 can in NOHZ. Thanks to some brilliant debugging by Peter Zijlstra. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/kernel/smp.c27
1 files changed, 22 insertions, 5 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 524b88920947..409dd71f2738 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -866,14 +866,21 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
866 void *info = call_data->info; 866 void *info = call_data->info;
867 867
868 clear_softint(1 << irq); 868 clear_softint(1 << irq);
869
870 irq_enter();
871
872 if (!call_data->wait) {
873 /* let initiator proceed after getting data */
874 atomic_inc(&call_data->finished);
875 }
876
877 func(info);
878
879 irq_exit();
880
869 if (call_data->wait) { 881 if (call_data->wait) {
870 /* let initiator proceed only after completion */ 882 /* let initiator proceed only after completion */
871 func(info);
872 atomic_inc(&call_data->finished); 883 atomic_inc(&call_data->finished);
873 } else {
874 /* let initiator proceed after getting data */
875 atomic_inc(&call_data->finished);
876 func(info);
877 } 884 }
878} 885}
879 886
@@ -1032,7 +1039,9 @@ void smp_receive_signal(int cpu)
1032 1039
1033void smp_receive_signal_client(int irq, struct pt_regs *regs) 1040void smp_receive_signal_client(int irq, struct pt_regs *regs)
1034{ 1041{
1042 irq_enter();
1035 clear_softint(1 << irq); 1043 clear_softint(1 << irq);
1044 irq_exit();
1036} 1045}
1037 1046
1038void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) 1047void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
@@ -1040,6 +1049,8 @@ void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
1040 struct mm_struct *mm; 1049 struct mm_struct *mm;
1041 unsigned long flags; 1050 unsigned long flags;
1042 1051
1052 irq_enter();
1053
1043 clear_softint(1 << irq); 1054 clear_softint(1 << irq);
1044 1055
1045 /* See if we need to allocate a new TLB context because 1056 /* See if we need to allocate a new TLB context because
@@ -1059,6 +1070,8 @@ void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
1059 load_secondary_context(mm); 1070 load_secondary_context(mm);
1060 __flush_tlb_mm(CTX_HWBITS(mm->context), 1071 __flush_tlb_mm(CTX_HWBITS(mm->context),
1061 SECONDARY_CONTEXT); 1072 SECONDARY_CONTEXT);
1073
1074 irq_exit();
1062} 1075}
1063 1076
1064void smp_new_mmu_context_version(void) 1077void smp_new_mmu_context_version(void)
@@ -1217,6 +1230,8 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1217{ 1230{
1218 clear_softint(1 << irq); 1231 clear_softint(1 << irq);
1219 1232
1233 irq_enter();
1234
1220 preempt_disable(); 1235 preempt_disable();
1221 1236
1222 __asm__ __volatile__("flushw"); 1237 __asm__ __volatile__("flushw");
@@ -1229,6 +1244,8 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1229 prom_world(0); 1244 prom_world(0);
1230 1245
1231 preempt_enable(); 1246 preempt_enable();
1247
1248 irq_exit();
1232} 1249}
1233 1250
1234/* /proc/profile writes can call this, don't __init it please. */ 1251/* /proc/profile writes can call this, don't __init it please. */