diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2007-03-29 18:42:42 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2007-03-29 18:46:36 -0400 |
commit | 8a1e97ee2e025f116765c92409a3cf8f6cb07ad6 (patch) | |
tree | c3df538fce3f32b35b1b8355a9a411cb668e724e /include/asm-mips | |
parent | 6c9fde4bfff11b2fd93b4e518ae7ecb25a9244e4 (diff) |
[MIPS] SMTC: Fix recursion in instant IPI replay code.
local_irq_restore -> raw_local_irq_restore -> irq_restore_epilog ->
smtc_ipi_replay -> smtc_ipi_dq -> spin_unlock_irqrestore ->
_spin_unlock_irqrestore -> local_irq_restore
The recursion does abort when there is no more IPI queued for a CPU, so
this isn't usually fatal which is why we got away with this for so long
until this was discovered by code inspection.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include/asm-mips')
-rw-r--r-- | include/asm-mips/irqflags.h | 55 | ||||
-rw-r--r-- | include/asm-mips/smtc_ipi.h | 16 |
2 files changed, 36 insertions, 35 deletions
diff --git a/include/asm-mips/irqflags.h b/include/asm-mips/irqflags.h index af3b07dfad4b..e459fa05db83 100644 --- a/include/asm-mips/irqflags.h +++ b/include/asm-mips/irqflags.h | |||
@@ -13,29 +13,9 @@ | |||
13 | 13 | ||
14 | #ifndef __ASSEMBLY__ | 14 | #ifndef __ASSEMBLY__ |
15 | 15 | ||
16 | #include <linux/compiler.h> | ||
16 | #include <asm/hazards.h> | 17 | #include <asm/hazards.h> |
17 | 18 | ||
18 | /* | ||
19 | * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred IPIs, | ||
20 | * at the cost of branch and call overhead on each local_irq_restore() | ||
21 | */ | ||
22 | |||
23 | #ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY | ||
24 | |||
25 | extern void smtc_ipi_replay(void); | ||
26 | |||
27 | #define irq_restore_epilog(flags) \ | ||
28 | do { \ | ||
29 | if (!(flags & 0x0400)) \ | ||
30 | smtc_ipi_replay(); \ | ||
31 | } while (0) | ||
32 | |||
33 | #else | ||
34 | |||
35 | #define irq_restore_epilog(ignore) do { } while (0) | ||
36 | |||
37 | #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ | ||
38 | |||
39 | __asm__ ( | 19 | __asm__ ( |
40 | " .macro raw_local_irq_enable \n" | 20 | " .macro raw_local_irq_enable \n" |
41 | " .set push \n" | 21 | " .set push \n" |
@@ -205,17 +185,28 @@ __asm__ ( | |||
205 | " .set pop \n" | 185 | " .set pop \n" |
206 | " .endm \n"); | 186 | " .endm \n"); |
207 | 187 | ||
208 | #define raw_local_irq_restore(flags) \ | 188 | extern void smtc_ipi_replay(void); |
209 | do { \ | 189 | |
210 | unsigned long __tmp1; \ | 190 | static inline void raw_local_irq_restore(unsigned long flags) |
211 | \ | 191 | { |
212 | __asm__ __volatile__( \ | 192 | unsigned long __tmp1; |
213 | "raw_local_irq_restore\t%0" \ | 193 | |
214 | : "=r" (__tmp1) \ | 194 | #ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY |
215 | : "0" (flags) \ | 195 | /* |
216 | : "memory"); \ | 196 | * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred |
217 | irq_restore_epilog(flags); \ | 197 | * IPIs, at the cost of branch and call overhead on each |
218 | } while(0) | 198 | * local_irq_restore() |
199 | */ | ||
200 | if (unlikely(!(flags & 0x0400))) | ||
201 | smtc_ipi_replay(); | ||
202 | #endif | ||
203 | |||
204 | __asm__ __volatile__( | ||
205 | "raw_local_irq_restore\t%0" | ||
206 | : "=r" (__tmp1) | ||
207 | : "0" (flags) | ||
208 | : "memory"); | ||
209 | } | ||
219 | 210 | ||
220 | static inline int raw_irqs_disabled_flags(unsigned long flags) | 211 | static inline int raw_irqs_disabled_flags(unsigned long flags) |
221 | { | 212 | { |
diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h index 360ea6d250c7..a52a4a7a36e0 100644 --- a/include/asm-mips/smtc_ipi.h +++ b/include/asm-mips/smtc_ipi.h | |||
@@ -65,12 +65,10 @@ static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p) | |||
65 | spin_unlock_irqrestore(&q->lock, flags); | 65 | spin_unlock_irqrestore(&q->lock, flags); |
66 | } | 66 | } |
67 | 67 | ||
68 | static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) | 68 | static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q) |
69 | { | 69 | { |
70 | struct smtc_ipi *p; | 70 | struct smtc_ipi *p; |
71 | long flags; | ||
72 | 71 | ||
73 | spin_lock_irqsave(&q->lock, flags); | ||
74 | if (q->head == NULL) | 72 | if (q->head == NULL) |
75 | p = NULL; | 73 | p = NULL; |
76 | else { | 74 | else { |
@@ -81,7 +79,19 @@ static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) | |||
81 | if (q->head == NULL) | 79 | if (q->head == NULL) |
82 | q->tail = NULL; | 80 | q->tail = NULL; |
83 | } | 81 | } |
82 | |||
83 | return p; | ||
84 | } | ||
85 | |||
86 | static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) | ||
87 | { | ||
88 | unsigned long flags; | ||
89 | struct smtc_ipi *p; | ||
90 | |||
91 | spin_lock_irqsave(&q->lock, flags); | ||
92 | p = __smtc_ipi_dq(q); | ||
84 | spin_unlock_irqrestore(&q->lock, flags); | 93 | spin_unlock_irqrestore(&q->lock, flags); |
94 | |||
85 | return p; | 95 | return p; |
86 | } | 96 | } |
87 | 97 | ||