diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-03-28 10:13:24 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-03-28 10:55:11 -0400 |
commit | 0521c8fbb3da45c2a58cd551ca6e9644983f6028 (patch) | |
tree | d2ed3452a75f1d3ff516cd02c86f4371db81e06e /kernel/irq | |
parent | 32f4125ebffee4f3c4dbc6a437fc656129eb9e60 (diff) |
genirq: Provide edge_eoi flow handler
This is a replacment for the cell flow handler which is in the way of
cleanups. Must be selected to avoid general bloat.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/Kconfig | 4 | ||||
-rw-r--r-- | kernel/irq/chip.c | 45 |
2 files changed, 49 insertions, 0 deletions
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 00f2c037267a..72606ba10b14 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
@@ -51,6 +51,10 @@ config HARDIRQS_SW_RESEND | |||
51 | config IRQ_PREFLOW_FASTEOI | 51 | config IRQ_PREFLOW_FASTEOI |
52 | bool | 52 | bool |
53 | 53 | ||
54 | # Edge style eoi based handler (cell) | ||
55 | config IRQ_EDGE_EOI_HANDLER | ||
56 | bool | ||
57 | |||
54 | # Support forced irq threading | 58 | # Support forced irq threading |
55 | config IRQ_FORCED_THREADING | 59 | config IRQ_FORCED_THREADING |
56 | bool | 60 | bool |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index e00bdc56269f..451d1e81c15c 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -604,6 +604,51 @@ out_unlock: | |||
604 | raw_spin_unlock(&desc->lock); | 604 | raw_spin_unlock(&desc->lock); |
605 | } | 605 | } |
606 | 606 | ||
607 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER | ||
608 | /** | ||
609 | * handle_edge_eoi_irq - edge eoi type IRQ handler | ||
610 | * @irq: the interrupt number | ||
611 | * @desc: the interrupt description structure for this irq | ||
612 | * | ||
613 | * Similar as the above handle_edge_irq, but using eoi and w/o the | ||
614 | * mask/unmask logic. | ||
615 | */ | ||
616 | void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) | ||
617 | { | ||
618 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
619 | |||
620 | raw_spin_lock(&desc->lock); | ||
621 | |||
622 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | ||
623 | /* | ||
624 | * If we're currently running this IRQ, or its disabled, | ||
625 | * we shouldn't process the IRQ. Mark it pending, handle | ||
626 | * the necessary masking and go out | ||
627 | */ | ||
628 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || | ||
629 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | ||
630 | if (!irq_check_poll(desc)) { | ||
631 | desc->istate |= IRQS_PENDING; | ||
632 | goto out_eoi; | ||
633 | } | ||
634 | } | ||
635 | kstat_incr_irqs_this_cpu(irq, desc); | ||
636 | |||
637 | do { | ||
638 | if (unlikely(!desc->action)) | ||
639 | goto out_eoi; | ||
640 | |||
641 | handle_irq_event(desc); | ||
642 | |||
643 | } while ((desc->istate & IRQS_PENDING) && | ||
644 | !irqd_irq_disabled(&desc->irq_data)); | ||
645 | |||
646 | out_unlock: | ||
647 | chip->irq_eoi(&desc->irq_data); | ||
648 | raw_spin_unlock(&desc->lock); | ||
649 | } | ||
650 | #endif | ||
651 | |||
607 | /** | 652 | /** |
608 | * handle_percpu_irq - Per CPU local irq handler | 653 | * handle_percpu_irq - Per CPU local irq handler |
609 | * @irq: the interrupt number | 654 | * @irq: the interrupt number |