diff options
author | Scott Wood <scottwood@freescale.com> | 2006-12-06 16:16:24 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-12-08 01:10:22 -0500 |
commit | 2c1d2f34a03ef0a89ff57da18b52fda9e6f09a10 (patch) | |
tree | 049e2b4250fb2fec7c239778afce3991454d2ef2 /arch/powerpc/sysdev | |
parent | d0e70341c05f6c31375530e0ae29b319153004a7 (diff) |
[POWERPC] qe_ic: Do a sync when masking interrupts
This patch causes a sync do be done after masking a QE interrupt, to
ensure that the masking has completed before interrupts are enabled.
This allows the masking of the cascade IRQ to be removed without causing
spurious interrupts.
The mask_and_ack function is also removed and set to the mask function,
as the two are identical.
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/sysdev')
-rw-r--r-- | arch/powerpc/sysdev/qe_lib/qe_ic.c | 40 |
1 files changed, 10 insertions, 30 deletions
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c index 6995f51b9488..74e48d94f27c 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c | |||
@@ -223,23 +223,15 @@ static void qe_ic_mask_irq(unsigned int virq) | |||
223 | qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, | 223 | qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, |
224 | temp & ~qe_ic_info[src].mask); | 224 | temp & ~qe_ic_info[src].mask); |
225 | 225 | ||
226 | spin_unlock_irqrestore(&qe_ic_lock, flags); | 226 | /* Flush the above write before enabling interrupts; otherwise, |
227 | } | 227 | * spurious interrupts will sometimes happen. To be 100% sure |
228 | 228 | * that the write has reached the device before interrupts are | |
229 | static void qe_ic_mask_irq_and_ack(unsigned int virq) | 229 | * enabled, the mask register would have to be read back; however, |
230 | { | 230 | * this is not required for correctness, only to avoid wasting |
231 | struct qe_ic *qe_ic = qe_ic_from_irq(virq); | 231 | * time on a large number of spurious interrupts. In testing, |
232 | unsigned int src = virq_to_hw(virq); | 232 | * a sync reduced the observed spurious interrupts to zero. |
233 | unsigned long flags; | 233 | */ |
234 | u32 temp; | 234 | mb(); |
235 | |||
236 | spin_lock_irqsave(&qe_ic_lock, flags); | ||
237 | |||
238 | temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); | ||
239 | qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, | ||
240 | temp & ~qe_ic_info[src].mask); | ||
241 | |||
242 | /* There is nothing to do for ack here, ack is handled in ISR */ | ||
243 | 235 | ||
244 | spin_unlock_irqrestore(&qe_ic_lock, flags); | 236 | spin_unlock_irqrestore(&qe_ic_lock, flags); |
245 | } | 237 | } |
@@ -248,7 +240,7 @@ static struct irq_chip qe_ic_irq_chip = { | |||
248 | .typename = " QEIC ", | 240 | .typename = " QEIC ", |
249 | .unmask = qe_ic_unmask_irq, | 241 | .unmask = qe_ic_unmask_irq, |
250 | .mask = qe_ic_mask_irq, | 242 | .mask = qe_ic_mask_irq, |
251 | .mask_ack = qe_ic_mask_irq_and_ack, | 243 | .mask_ack = qe_ic_mask_irq, |
252 | }; | 244 | }; |
253 | 245 | ||
254 | static int qe_ic_host_match(struct irq_host *h, struct device_node *node) | 246 | static int qe_ic_host_match(struct irq_host *h, struct device_node *node) |
@@ -331,34 +323,22 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) | |||
331 | return irq_linear_revmap(qe_ic->irqhost, irq); | 323 | return irq_linear_revmap(qe_ic->irqhost, irq); |
332 | } | 324 | } |
333 | 325 | ||
334 | /* FIXME: We mask all the QE Low interrupts while handling. We should | ||
335 | * let other interrupt come in, but BAD interrupts are generated */ | ||
336 | void fastcall qe_ic_cascade_low(unsigned int irq, struct irq_desc *desc) | 326 | void fastcall qe_ic_cascade_low(unsigned int irq, struct irq_desc *desc) |
337 | { | 327 | { |
338 | struct qe_ic *qe_ic = desc->handler_data; | 328 | struct qe_ic *qe_ic = desc->handler_data; |
339 | struct irq_chip *chip = irq_desc[irq].chip; | ||
340 | |||
341 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); | 329 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); |
342 | 330 | ||
343 | chip->mask_ack(irq); | ||
344 | if (cascade_irq != NO_IRQ) | 331 | if (cascade_irq != NO_IRQ) |
345 | generic_handle_irq(cascade_irq); | 332 | generic_handle_irq(cascade_irq); |
346 | chip->unmask(irq); | ||
347 | } | 333 | } |
348 | 334 | ||
349 | /* FIXME: We mask all the QE High interrupts while handling. We should | ||
350 | * let other interrupt come in, but BAD interrupts are generated */ | ||
351 | void fastcall qe_ic_cascade_high(unsigned int irq, struct irq_desc *desc) | 335 | void fastcall qe_ic_cascade_high(unsigned int irq, struct irq_desc *desc) |
352 | { | 336 | { |
353 | struct qe_ic *qe_ic = desc->handler_data; | 337 | struct qe_ic *qe_ic = desc->handler_data; |
354 | struct irq_chip *chip = irq_desc[irq].chip; | ||
355 | |||
356 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); | 338 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); |
357 | 339 | ||
358 | chip->mask_ack(irq); | ||
359 | if (cascade_irq != NO_IRQ) | 340 | if (cascade_irq != NO_IRQ) |
360 | generic_handle_irq(cascade_irq); | 341 | generic_handle_irq(cascade_irq); |
361 | chip->unmask(irq); | ||
362 | } | 342 | } |
363 | 343 | ||
364 | void __init qe_ic_init(struct device_node *node, unsigned int flags) | 344 | void __init qe_ic_init(struct device_node *node, unsigned int flags) |