aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/chip.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-02-07 04:34:30 -0500
committerThomas Gleixner <tglx@linutronix.de>2011-02-19 06:58:09 -0500
commitfe200ae48ef5c79bf7941fe8046ff9505c570ff6 (patch)
tree767d2cf011437a266a655ce2ec39360cb85f7f28 /kernel/irq/chip.c
parentd05c65fff0ef672be75429266751f0e015b54d94 (diff)
genirq: Mark polled irqs and defer the real handler
With the chip.end() function gone we might run into a situation where a poll call runs and the real interrupt comes in, sees IRQ_INPROGRESS and disables the line. That might be a perfect working one, which will then be masked forever. So mark them polled while the poll runs. When the real handler sees IRQ_INPROGRESS it checks the poll flag and waits for the polling to complete. Add the necessary amount of sanity checks to it to avoid deadlocks. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq/chip.c')
-rw-r--r--kernel/irq/chip.c26
1 files changed, 20 insertions, 6 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 622b55ac0e09..31258782742c 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -448,6 +448,13 @@ out_unlock:
448} 448}
449EXPORT_SYMBOL_GPL(handle_nested_irq); 449EXPORT_SYMBOL_GPL(handle_nested_irq);
450 450
451static bool irq_check_poll(struct irq_desc *desc)
452{
453 if (!(desc->status & IRQ_POLL_INPROGRESS))
454 return false;
455 return irq_wait_for_poll(desc);
456}
457
451/** 458/**
452 * handle_simple_irq - Simple and software-decoded IRQs. 459 * handle_simple_irq - Simple and software-decoded IRQs.
453 * @irq: the interrupt number 460 * @irq: the interrupt number
@@ -469,7 +476,9 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
469 raw_spin_lock(&desc->lock); 476 raw_spin_lock(&desc->lock);
470 477
471 if (unlikely(desc->status & IRQ_INPROGRESS)) 478 if (unlikely(desc->status & IRQ_INPROGRESS))
472 goto out_unlock; 479 if (!irq_check_poll(desc))
480 goto out_unlock;
481
473 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 482 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
474 kstat_incr_irqs_this_cpu(irq, desc); 483 kstat_incr_irqs_this_cpu(irq, desc);
475 484
@@ -510,7 +519,9 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
510 mask_ack_irq(desc); 519 mask_ack_irq(desc);
511 520
512 if (unlikely(desc->status & IRQ_INPROGRESS)) 521 if (unlikely(desc->status & IRQ_INPROGRESS))
513 goto out_unlock; 522 if (!irq_check_poll(desc))
523 goto out_unlock;
524
514 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 525 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
515 kstat_incr_irqs_this_cpu(irq, desc); 526 kstat_incr_irqs_this_cpu(irq, desc);
516 527
@@ -558,7 +569,8 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
558 raw_spin_lock(&desc->lock); 569 raw_spin_lock(&desc->lock);
559 570
560 if (unlikely(desc->status & IRQ_INPROGRESS)) 571 if (unlikely(desc->status & IRQ_INPROGRESS))
561 goto out; 572 if (!irq_check_poll(desc))
573 goto out;
562 574
563 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 575 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
564 kstat_incr_irqs_this_cpu(irq, desc); 576 kstat_incr_irqs_this_cpu(irq, desc);
@@ -620,9 +632,11 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
620 */ 632 */
621 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || 633 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
622 !desc->action)) { 634 !desc->action)) {
623 desc->status |= (IRQ_PENDING | IRQ_MASKED); 635 if (!irq_check_poll(desc)) {
624 mask_ack_irq(desc); 636 desc->status |= (IRQ_PENDING | IRQ_MASKED);
625 goto out_unlock; 637 mask_ack_irq(desc);
638 goto out_unlock;
639 }
626 } 640 }
627 kstat_incr_irqs_this_cpu(irq, desc); 641 kstat_incr_irqs_this_cpu(irq, desc);
628 642