aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index eb6078ca60c..69a3d7b9414 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -483,8 +483,26 @@ static int irq_wait_for_interrupt(struct irqaction *action)
483 */ 483 */
484static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 484static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
485{ 485{
486again:
486 chip_bus_lock(irq, desc); 487 chip_bus_lock(irq, desc);
487 raw_spin_lock_irq(&desc->lock); 488 raw_spin_lock_irq(&desc->lock);
489
490 /*
491 * Implausible though it may be we need to protect us against
492 * the following scenario:
493 *
494 * The thread is faster done than the hard interrupt handler
495 * on the other CPU. If we unmask the irq line then the
496 * interrupt can come in again and masks the line, leaves due
497 * to IRQ_INPROGRESS and the irq line is masked forever.
498 */
499 if (unlikely(desc->status & IRQ_INPROGRESS)) {
500 raw_spin_unlock_irq(&desc->lock);
501 chip_bus_sync_unlock(irq, desc);
502 cpu_relax();
503 goto again;
504 }
505
488 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 506 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
489 desc->status &= ~IRQ_MASKED; 507 desc->status &= ~IRQ_MASKED;
490 desc->chip->unmask(irq); 508 desc->chip->unmask(irq);