aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-03-09 13:45:54 -0500
committerThomas Gleixner <tglx@linutronix.de>2010-03-10 11:45:14 -0500
commit0b1adaa031a55e44f5dd942f234bf09d28e8a0d6 (patch)
tree354aa6cbfbcd856226c543b9f263f87864245065 /kernel
parent522dba7134d6b2e5821d3457f7941ec34f668e6d (diff)
genirq: Prevent oneshot irq thread race
Lars-Peter pointed out that the oneshot threaded interrupt handler code has the following race: CPU0 CPU1 hande_level_irq(irq X) mask_ack_irq(irq X) handle_IRQ_event(irq X) wake_up(thread_handler) thread handler(irq X) runs finalize_oneshot(irq X) does not unmask due to !(desc->status & IRQ_MASKED) return from irq does not unmask due to (desc->status & IRQ_ONESHOT) This leaves the interrupt line masked forever. The reason for this is the inconsistent handling of the IRQ_MASKED flag. Instead of setting it in the mask function the oneshot support sets the flag after waking up the irq thread. The solution for this is to set/clear the IRQ_MASKED status whenever we mask/unmask an interrupt line. That's the easy part, but that cleanup opens another race: CPU0 CPU1 hande_level_irq(irq) mask_ack_irq(irq) handle_IRQ_event(irq) wake_up(thread_handler) thread handler(irq) runs finalize_oneshot_irq(irq) unmask(irq) irq triggers again handle_level_irq(irq) mask_ack_irq(irq) return from irq due to IRQ_INPROGRESS return from irq does not unmask due to (desc->status & IRQ_ONESHOT) This requires that we synchronize finalize_oneshot_irq() with the primary handler. If IRQ_INPROGESS is set we wait until the primary handler on the other CPU has returned before unmasking the interrupt line again. We probably have never seen that problem because it does not happen on UP and on SMP the irqbalancer protects us by pinning the primary handler and the thread to the same CPU. Reported-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: stable@kernel.org
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/chip.c31
-rw-r--r--kernel/irq/manage.c18
2 files changed, 40 insertions, 9 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index d70394f12ee9..71eba24a39a2 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -359,6 +359,23 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
359 if (desc->chip->ack) 359 if (desc->chip->ack)
360 desc->chip->ack(irq); 360 desc->chip->ack(irq);
361 } 361 }
362 desc->status |= IRQ_MASKED;
363}
364
365static inline void mask_irq(struct irq_desc *desc, int irq)
366{
367 if (desc->chip->mask) {
368 desc->chip->mask(irq);
369 desc->status |= IRQ_MASKED;
370 }
371}
372
373static inline void unmask_irq(struct irq_desc *desc, int irq)
374{
375 if (desc->chip->unmask) {
376 desc->chip->unmask(irq);
377 desc->status &= ~IRQ_MASKED;
378 }
362} 379}
363 380
364/* 381/*
@@ -484,10 +501,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
484 raw_spin_lock(&desc->lock); 501 raw_spin_lock(&desc->lock);
485 desc->status &= ~IRQ_INPROGRESS; 502 desc->status &= ~IRQ_INPROGRESS;
486 503
487 if (unlikely(desc->status & IRQ_ONESHOT)) 504 if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
488 desc->status |= IRQ_MASKED; 505 unmask_irq(desc, irq);
489 else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
490 desc->chip->unmask(irq);
491out_unlock: 506out_unlock:
492 raw_spin_unlock(&desc->lock); 507 raw_spin_unlock(&desc->lock);
493} 508}
@@ -524,8 +539,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
524 action = desc->action; 539 action = desc->action;
525 if (unlikely(!action || (desc->status & IRQ_DISABLED))) { 540 if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
526 desc->status |= IRQ_PENDING; 541 desc->status |= IRQ_PENDING;
527 if (desc->chip->mask) 542 mask_irq(desc, irq);
528 desc->chip->mask(irq);
529 goto out; 543 goto out;
530 } 544 }
531 545
@@ -593,7 +607,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
593 irqreturn_t action_ret; 607 irqreturn_t action_ret;
594 608
595 if (unlikely(!action)) { 609 if (unlikely(!action)) {
596 desc->chip->mask(irq); 610 mask_irq(desc, irq);
597 goto out_unlock; 611 goto out_unlock;
598 } 612 }
599 613
@@ -605,8 +619,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
605 if (unlikely((desc->status & 619 if (unlikely((desc->status &
606 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == 620 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
607 (IRQ_PENDING | IRQ_MASKED))) { 621 (IRQ_PENDING | IRQ_MASKED))) {
608 desc->chip->unmask(irq); 622 unmask_irq(desc, irq);
609 desc->status &= ~IRQ_MASKED;
610 } 623 }
611 624
612 desc->status &= ~IRQ_PENDING; 625 desc->status &= ~IRQ_PENDING;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index eb6078ca60c7..69a3d7b9414c 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -483,8 +483,26 @@ static int irq_wait_for_interrupt(struct irqaction *action)
483 */ 483 */
484static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 484static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
485{ 485{
486again:
486 chip_bus_lock(irq, desc); 487 chip_bus_lock(irq, desc);
487 raw_spin_lock_irq(&desc->lock); 488 raw_spin_lock_irq(&desc->lock);
489
490 /*
491 * Implausible though it may be we need to protect us against
492 * the following scenario:
493 *
494 * The thread is faster done than the hard interrupt handler
495 * on the other CPU. If we unmask the irq line then the
496 * interrupt can come in again and masks the line, leaves due
497 * to IRQ_INPROGRESS and the irq line is masked forever.
498 */
499 if (unlikely(desc->status & IRQ_INPROGRESS)) {
500 raw_spin_unlock_irq(&desc->lock);
501 chip_bus_sync_unlock(irq, desc);
502 cpu_relax();
503 goto again;
504 }
505
488 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 506 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
489 desc->status &= ~IRQ_MASKED; 507 desc->status &= ~IRQ_MASKED;
490 desc->chip->unmask(irq); 508 desc->chip->unmask(irq);