diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2007-03-29 18:42:42 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2007-03-29 18:46:36 -0400 |
commit | 8a1e97ee2e025f116765c92409a3cf8f6cb07ad6 (patch) | |
tree | c3df538fce3f32b35b1b8355a9a411cb668e724e | |
parent | 6c9fde4bfff11b2fd93b4e518ae7ecb25a9244e4 (diff) |
[MIPS] SMTC: Fix recursion in instant IPI replay code.
local_irq_restore -> raw_local_irq_restore -> irq_restore_epilog ->
smtc_ipi_replay -> smtc_ipi_dq -> spin_unlock_irqrestore ->
_spin_unlock_irqrestore -> local_irq_restore
The recursion does abort when there is no more IPI queued for a CPU, so
this isn't usually fatal which is why we got away with this for so long
until this was discovered by code inspection.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r-- | arch/mips/kernel/smtc.c | 40 | ||||
-rw-r--r-- | include/asm-mips/irqflags.h | 55 | ||||
-rw-r--r-- | include/asm-mips/smtc_ipi.h | 16 |
3 files changed, 69 insertions, 42 deletions
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index e50fe20571f0..5dcfab6b288e 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -999,10 +999,17 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe) | |||
999 | 999 | ||
1000 | /* | 1000 | /* |
1001 | * SMTC-specific hacks invoked from elsewhere in the kernel. | 1001 | * SMTC-specific hacks invoked from elsewhere in the kernel. |
1002 | * | ||
1003 | * smtc_ipi_replay is called from raw_local_irq_restore which is only ever | ||
1004 | * called with interrupts disabled. We do rely on interrupts being disabled | ||
1005 | * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would | ||
1006 | * result in a recursive call to raw_local_irq_restore(). | ||
1002 | */ | 1007 | */ |
1003 | 1008 | ||
1004 | void smtc_ipi_replay(void) | 1009 | static void __smtc_ipi_replay(void) |
1005 | { | 1010 | { |
1011 | unsigned int cpu = smp_processor_id(); | ||
1012 | |||
1006 | /* | 1013 | /* |
1007 | * To the extent that we've ever turned interrupts off, | 1014 | * To the extent that we've ever turned interrupts off, |
1008 | * we may have accumulated deferred IPIs. This is subtle. | 1015 | * we may have accumulated deferred IPIs. This is subtle. |
@@ -1017,17 +1024,30 @@ void smtc_ipi_replay(void) | |||
1017 | * is clear, and we'll handle it as a real pseudo-interrupt | 1024 | * is clear, and we'll handle it as a real pseudo-interrupt |
1018 | * and not a pseudo-pseudo interrupt. | 1025 | * and not a pseudo-pseudo interrupt. |
1019 | */ | 1026 | */ |
1020 | if (IPIQ[smp_processor_id()].depth > 0) { | 1027 | if (IPIQ[cpu].depth > 0) { |
1021 | struct smtc_ipi *pipi; | 1028 | while (1) { |
1022 | extern void self_ipi(struct smtc_ipi *); | 1029 | struct smtc_ipi_q *q = &IPIQ[cpu]; |
1030 | struct smtc_ipi *pipi; | ||
1031 | extern void self_ipi(struct smtc_ipi *); | ||
1032 | |||
1033 | spin_lock(&q->lock); | ||
1034 | pipi = __smtc_ipi_dq(q); | ||
1035 | spin_unlock(&q->lock); | ||
1036 | if (!pipi) | ||
1037 | break; | ||
1023 | 1038 | ||
1024 | while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) { | ||
1025 | self_ipi(pipi); | 1039 | self_ipi(pipi); |
1026 | smtc_cpu_stats[smp_processor_id()].selfipis++; | 1040 | smtc_cpu_stats[cpu].selfipis++; |
1027 | } | 1041 | } |
1028 | } | 1042 | } |
1029 | } | 1043 | } |
1030 | 1044 | ||
1045 | void smtc_ipi_replay(void) | ||
1046 | { | ||
1047 | raw_local_irq_disable(); | ||
1048 | __smtc_ipi_replay(); | ||
1049 | } | ||
1050 | |||
1031 | EXPORT_SYMBOL(smtc_ipi_replay); | 1051 | EXPORT_SYMBOL(smtc_ipi_replay); |
1032 | 1052 | ||
1033 | void smtc_idle_loop_hook(void) | 1053 | void smtc_idle_loop_hook(void) |
@@ -1132,7 +1152,13 @@ void smtc_idle_loop_hook(void) | |||
1132 | * is in use, there should never be any. | 1152 | * is in use, there should never be any. |
1133 | */ | 1153 | */ |
1134 | #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY | 1154 | #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY |
1135 | smtc_ipi_replay(); | 1155 | { |
1156 | unsigned long flags; | ||
1157 | |||
1158 | local_irq_save(flags); | ||
1159 | __smtc_ipi_replay(); | ||
1160 | local_irq_restore(flags); | ||
1161 | } | ||
1136 | #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ | 1162 | #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ |
1137 | } | 1163 | } |
1138 | 1164 | ||
diff --git a/include/asm-mips/irqflags.h b/include/asm-mips/irqflags.h index af3b07dfad4b..e459fa05db83 100644 --- a/include/asm-mips/irqflags.h +++ b/include/asm-mips/irqflags.h | |||
@@ -13,29 +13,9 @@ | |||
13 | 13 | ||
14 | #ifndef __ASSEMBLY__ | 14 | #ifndef __ASSEMBLY__ |
15 | 15 | ||
16 | #include <linux/compiler.h> | ||
16 | #include <asm/hazards.h> | 17 | #include <asm/hazards.h> |
17 | 18 | ||
18 | /* | ||
19 | * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred IPIs, | ||
20 | * at the cost of branch and call overhead on each local_irq_restore() | ||
21 | */ | ||
22 | |||
23 | #ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY | ||
24 | |||
25 | extern void smtc_ipi_replay(void); | ||
26 | |||
27 | #define irq_restore_epilog(flags) \ | ||
28 | do { \ | ||
29 | if (!(flags & 0x0400)) \ | ||
30 | smtc_ipi_replay(); \ | ||
31 | } while (0) | ||
32 | |||
33 | #else | ||
34 | |||
35 | #define irq_restore_epilog(ignore) do { } while (0) | ||
36 | |||
37 | #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ | ||
38 | |||
39 | __asm__ ( | 19 | __asm__ ( |
40 | " .macro raw_local_irq_enable \n" | 20 | " .macro raw_local_irq_enable \n" |
41 | " .set push \n" | 21 | " .set push \n" |
@@ -205,17 +185,28 @@ __asm__ ( | |||
205 | " .set pop \n" | 185 | " .set pop \n" |
206 | " .endm \n"); | 186 | " .endm \n"); |
207 | 187 | ||
208 | #define raw_local_irq_restore(flags) \ | 188 | extern void smtc_ipi_replay(void); |
209 | do { \ | 189 | |
210 | unsigned long __tmp1; \ | 190 | static inline void raw_local_irq_restore(unsigned long flags) |
211 | \ | 191 | { |
212 | __asm__ __volatile__( \ | 192 | unsigned long __tmp1; |
213 | "raw_local_irq_restore\t%0" \ | 193 | |
214 | : "=r" (__tmp1) \ | 194 | #ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY |
215 | : "0" (flags) \ | 195 | /* |
216 | : "memory"); \ | 196 | * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred |
217 | irq_restore_epilog(flags); \ | 197 | * IPIs, at the cost of branch and call overhead on each |
218 | } while(0) | 198 | * local_irq_restore() |
199 | */ | ||
200 | if (unlikely(!(flags & 0x0400))) | ||
201 | smtc_ipi_replay(); | ||
202 | #endif | ||
203 | |||
204 | __asm__ __volatile__( | ||
205 | "raw_local_irq_restore\t%0" | ||
206 | : "=r" (__tmp1) | ||
207 | : "0" (flags) | ||
208 | : "memory"); | ||
209 | } | ||
219 | 210 | ||
220 | static inline int raw_irqs_disabled_flags(unsigned long flags) | 211 | static inline int raw_irqs_disabled_flags(unsigned long flags) |
221 | { | 212 | { |
diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h index 360ea6d250c7..a52a4a7a36e0 100644 --- a/include/asm-mips/smtc_ipi.h +++ b/include/asm-mips/smtc_ipi.h | |||
@@ -65,12 +65,10 @@ static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p) | |||
65 | spin_unlock_irqrestore(&q->lock, flags); | 65 | spin_unlock_irqrestore(&q->lock, flags); |
66 | } | 66 | } |
67 | 67 | ||
68 | static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) | 68 | static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q) |
69 | { | 69 | { |
70 | struct smtc_ipi *p; | 70 | struct smtc_ipi *p; |
71 | long flags; | ||
72 | 71 | ||
73 | spin_lock_irqsave(&q->lock, flags); | ||
74 | if (q->head == NULL) | 72 | if (q->head == NULL) |
75 | p = NULL; | 73 | p = NULL; |
76 | else { | 74 | else { |
@@ -81,7 +79,19 @@ static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) | |||
81 | if (q->head == NULL) | 79 | if (q->head == NULL) |
82 | q->tail = NULL; | 80 | q->tail = NULL; |
83 | } | 81 | } |
82 | |||
83 | return p; | ||
84 | } | ||
85 | |||
86 | static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) | ||
87 | { | ||
88 | unsigned long flags; | ||
89 | struct smtc_ipi *p; | ||
90 | |||
91 | spin_lock_irqsave(&q->lock, flags); | ||
92 | p = __smtc_ipi_dq(q); | ||
84 | spin_unlock_irqrestore(&q->lock, flags); | 93 | spin_unlock_irqrestore(&q->lock, flags); |
94 | |||
85 | return p; | 95 | return p; |
86 | } | 96 | } |
87 | 97 | ||