diff options
author | James Bottomley <jejb@parisc-linux.org> | 2005-11-17 16:27:02 -0500 |
---|---|---|
committer | Kyle McMartin <kyle@parisc-linux.org> | 2005-11-17 16:27:02 -0500 |
commit | d911aed8adf74e1fae88d082b8474b2175b7f1da (patch) | |
tree | dc3271e33b2951a8fd43824300b790610c7cd221 | |
parent | 3f902886a81c6d4e6c399760936b645b5c7a7342 (diff) |
[PARISC] Fix our interrupts not to use smp_call_function
Fix our interrupts not to use smp_call_function
On K and D class smp, the generic code calls this under an irq
spinlock, which causes the WARN_ON() message in smp_call_function()
(and is also illegal because it could deadlock).
The fix is to use a new scheme based on the IPI_NOP.
Signed-off-by: James Bottomley <jejb@parisc-linux.org>
Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
-rw-r--r-- | arch/parisc/kernel/irq.c | 26 | ||||
-rw-r--r-- | arch/parisc/kernel/smp.c | 20 | ||||
-rw-r--r-- | include/asm-parisc/smp.h | 1 |
3 files changed, 32 insertions, 15 deletions
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 21a9c5ad580b..3998c0cb925b 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -43,26 +43,34 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *); | |||
43 | */ | 43 | */ |
44 | static volatile unsigned long cpu_eiem = 0; | 44 | static volatile unsigned long cpu_eiem = 0; |
45 | 45 | ||
46 | static void cpu_set_eiem(void *info) | 46 | static void cpu_disable_irq(unsigned int irq) |
47 | { | ||
48 | set_eiem((unsigned long) info); | ||
49 | } | ||
50 | |||
51 | static inline void cpu_disable_irq(unsigned int irq) | ||
52 | { | 47 | { |
53 | unsigned long eirr_bit = EIEM_MASK(irq); | 48 | unsigned long eirr_bit = EIEM_MASK(irq); |
54 | 49 | ||
55 | cpu_eiem &= ~eirr_bit; | 50 | cpu_eiem &= ~eirr_bit; |
56 | on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); | 51 | /* Do nothing on the other CPUs. If they get this interrupt, |
52 | * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't | ||
53 | * handle it, and the set_eiem() at the bottom will ensure it | ||
54 | * then gets disabled */ | ||
57 | } | 55 | } |
58 | 56 | ||
59 | static void cpu_enable_irq(unsigned int irq) | 57 | static void cpu_enable_irq(unsigned int irq) |
60 | { | 58 | { |
61 | unsigned long eirr_bit = EIEM_MASK(irq); | 59 | unsigned long eirr_bit = EIEM_MASK(irq); |
62 | 60 | ||
63 | mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */ | ||
64 | cpu_eiem |= eirr_bit; | 61 | cpu_eiem |= eirr_bit; |
65 | on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); | 62 | |
63 | /* FIXME: while our interrupts aren't nested, we cannot reset | ||
64 | * the eiem mask if we're already in an interrupt. Once we | ||
65 | * implement nested interrupts, this can go away | ||
66 | */ | ||
67 | if (!in_interrupt()) | ||
68 | set_eiem(cpu_eiem); | ||
69 | |||
70 | /* This is just a simple NOP IPI. But what it does is cause | ||
71 | * all the other CPUs to do a set_eiem(cpu_eiem) at the end | ||
72 | * of the interrupt handler */ | ||
73 | smp_send_all_nop(); | ||
66 | } | 74 | } |
67 | 75 | ||
68 | static unsigned int cpu_startup_irq(unsigned int irq) | 76 | static unsigned int cpu_startup_irq(unsigned int irq) |
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 268b0f2a328e..ce89da0f654d 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -181,12 +181,19 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
181 | while (ops) { | 181 | while (ops) { |
182 | unsigned long which = ffz(~ops); | 182 | unsigned long which = ffz(~ops); |
183 | 183 | ||
184 | ops &= ~(1 << which); | ||
185 | |||
184 | switch (which) { | 186 | switch (which) { |
187 | case IPI_NOP: | ||
188 | #if (kDEBUG>=100) | ||
189 | printk(KERN_DEBUG "CPU%d IPI_NOP\n",this_cpu); | ||
190 | #endif /* kDEBUG */ | ||
191 | break; | ||
192 | |||
185 | case IPI_RESCHEDULE: | 193 | case IPI_RESCHEDULE: |
186 | #if (kDEBUG>=100) | 194 | #if (kDEBUG>=100) |
187 | printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu); | 195 | printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu); |
188 | #endif /* kDEBUG */ | 196 | #endif /* kDEBUG */ |
189 | ops &= ~(1 << IPI_RESCHEDULE); | ||
190 | /* | 197 | /* |
191 | * Reschedule callback. Everything to be | 198 | * Reschedule callback. Everything to be |
192 | * done is done by the interrupt return path. | 199 | * done is done by the interrupt return path. |
@@ -197,7 +204,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
197 | #if (kDEBUG>=100) | 204 | #if (kDEBUG>=100) |
198 | printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu); | 205 | printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu); |
199 | #endif /* kDEBUG */ | 206 | #endif /* kDEBUG */ |
200 | ops &= ~(1 << IPI_CALL_FUNC); | ||
201 | { | 207 | { |
202 | volatile struct smp_call_struct *data; | 208 | volatile struct smp_call_struct *data; |
203 | void (*func)(void *info); | 209 | void (*func)(void *info); |
@@ -231,7 +237,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
231 | #if (kDEBUG>=100) | 237 | #if (kDEBUG>=100) |
232 | printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu); | 238 | printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu); |
233 | #endif /* kDEBUG */ | 239 | #endif /* kDEBUG */ |
234 | ops &= ~(1 << IPI_CPU_START); | ||
235 | #ifdef ENTRY_SYS_CPUS | 240 | #ifdef ENTRY_SYS_CPUS |
236 | p->state = STATE_RUNNING; | 241 | p->state = STATE_RUNNING; |
237 | #endif | 242 | #endif |
@@ -241,7 +246,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
241 | #if (kDEBUG>=100) | 246 | #if (kDEBUG>=100) |
242 | printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu); | 247 | printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu); |
243 | #endif /* kDEBUG */ | 248 | #endif /* kDEBUG */ |
244 | ops &= ~(1 << IPI_CPU_STOP); | ||
245 | #ifdef ENTRY_SYS_CPUS | 249 | #ifdef ENTRY_SYS_CPUS |
246 | #else | 250 | #else |
247 | halt_processor(); | 251 | halt_processor(); |
@@ -252,13 +256,11 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
252 | #if (kDEBUG>=100) | 256 | #if (kDEBUG>=100) |
253 | printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu); | 257 | printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu); |
254 | #endif /* kDEBUG */ | 258 | #endif /* kDEBUG */ |
255 | ops &= ~(1 << IPI_CPU_TEST); | ||
256 | break; | 259 | break; |
257 | 260 | ||
258 | default: | 261 | default: |
259 | printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", | 262 | printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", |
260 | this_cpu, which); | 263 | this_cpu, which); |
261 | ops &= ~(1 << which); | ||
262 | return IRQ_NONE; | 264 | return IRQ_NONE; |
263 | } /* Switch */ | 265 | } /* Switch */ |
264 | } /* while (ops) */ | 266 | } /* while (ops) */ |
@@ -312,6 +314,12 @@ smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); } | |||
312 | void | 314 | void |
313 | smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } | 315 | smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } |
314 | 316 | ||
317 | void | ||
318 | smp_send_all_nop(void) | ||
319 | { | ||
320 | send_IPI_allbutself(IPI_NOP); | ||
321 | } | ||
322 | |||
315 | 323 | ||
316 | /** | 324 | /** |
317 | * Run a function on all other CPUs. | 325 | * Run a function on all other CPUs. |
diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h index 9413f67a540b..a5191950ce00 100644 --- a/include/asm-parisc/smp.h +++ b/include/asm-parisc/smp.h | |||
@@ -29,6 +29,7 @@ extern cpumask_t cpu_online_map; | |||
29 | #define cpu_logical_map(cpu) (cpu) | 29 | #define cpu_logical_map(cpu) (cpu) |
30 | 30 | ||
31 | extern void smp_send_reschedule(int cpu); | 31 | extern void smp_send_reschedule(int cpu); |
32 | extern void smp_send_all_nop(void); | ||
32 | 33 | ||
33 | #endif /* !ASSEMBLY */ | 34 | #endif /* !ASSEMBLY */ |
34 | 35 | ||