diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2014-08-29 07:39:37 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2014-09-01 07:48:43 -0400 |
commit | c7bd3ec0531aa636ad57ed9f27e637cbd247e64a (patch) | |
tree | fa2397fb34888b9aff338d178982048cdae29aa1 | |
parent | c3d7acd0273edf0ee50ccf85167acd7ae0759eda (diff) |
genirq: Create helper for flow handler entry check
All flow handlers - except the per cpu ones - check for an interrupt
in progress and an eventual concurrent polling on another cpu.
Create a helper function for the repeated code pattern.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r-- | kernel/irq/chip.c | 48 |
1 files changed, 20 insertions, 28 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index f10c2e58a786..6baf86085571 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -342,6 +342,13 @@ static bool irq_check_poll(struct irq_desc *desc) | |||
342 | return irq_wait_for_poll(desc); | 342 | return irq_wait_for_poll(desc); |
343 | } | 343 | } |
344 | 344 | ||
345 | static bool irq_may_run(struct irq_desc *desc) | ||
346 | { | ||
347 | if (!irqd_irq_inprogress(&desc->irq_data)) | ||
348 | return true; | ||
349 | return irq_check_poll(desc); | ||
350 | } | ||
351 | |||
345 | /** | 352 | /** |
346 | * handle_simple_irq - Simple and software-decoded IRQs. | 353 | * handle_simple_irq - Simple and software-decoded IRQs. |
347 | * @irq: the interrupt number | 354 | * @irq: the interrupt number |
@@ -359,9 +366,8 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
359 | { | 366 | { |
360 | raw_spin_lock(&desc->lock); | 367 | raw_spin_lock(&desc->lock); |
361 | 368 | ||
362 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | 369 | if (!irq_may_run(desc)) |
363 | if (!irq_check_poll(desc)) | 370 | goto out_unlock; |
364 | goto out_unlock; | ||
365 | 371 | ||
366 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 372 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
367 | kstat_incr_irqs_this_cpu(irq, desc); | 373 | kstat_incr_irqs_this_cpu(irq, desc); |
@@ -412,9 +418,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
412 | raw_spin_lock(&desc->lock); | 418 | raw_spin_lock(&desc->lock); |
413 | mask_ack_irq(desc); | 419 | mask_ack_irq(desc); |
414 | 420 | ||
415 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | 421 | if (!irq_may_run(desc)) |
416 | if (!irq_check_poll(desc)) | 422 | goto out_unlock; |
417 | goto out_unlock; | ||
418 | 423 | ||
419 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 424 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
420 | kstat_incr_irqs_this_cpu(irq, desc); | 425 | kstat_incr_irqs_this_cpu(irq, desc); |
@@ -485,9 +490,8 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
485 | 490 | ||
486 | raw_spin_lock(&desc->lock); | 491 | raw_spin_lock(&desc->lock); |
487 | 492 | ||
488 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | 493 | if (!irq_may_run(desc)) |
489 | if (!irq_check_poll(desc)) | 494 | goto out; |
490 | goto out; | ||
491 | 495 | ||
492 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 496 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
493 | kstat_incr_irqs_this_cpu(irq, desc); | 497 | kstat_incr_irqs_this_cpu(irq, desc); |
@@ -541,16 +545,10 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
541 | 545 | ||
542 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 546 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
543 | 547 | ||
544 | /* | 548 | if (!irq_may_run(desc)) { |
545 | * If the handler is currently running, mark it pending, | 549 | desc->istate |= IRQS_PENDING; |
546 | * handle the necessary masking and go out | 550 | mask_ack_irq(desc); |
547 | */ | 551 | goto out_unlock; |
548 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { | ||
549 | if (!irq_check_poll(desc)) { | ||
550 | desc->istate |= IRQS_PENDING; | ||
551 | mask_ack_irq(desc); | ||
552 | goto out_unlock; | ||
553 | } | ||
554 | } | 552 | } |
555 | 553 | ||
556 | /* | 554 | /* |
@@ -612,15 +610,9 @@ void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) | |||
612 | 610 | ||
613 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 611 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
614 | 612 | ||
615 | /* | 613 | if (!irq_may_run(desc)) { |
616 | * If the handler is currently running, mark it pending, | 614 | desc->istate |= IRQS_PENDING; |
617 | * handle the necessary masking and go out | 615 | goto out_eoi; |
618 | */ | ||
619 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { | ||
620 | if (!irq_check_poll(desc)) { | ||
621 | desc->istate |= IRQS_PENDING; | ||
622 | goto out_eoi; | ||
623 | } | ||
624 | } | 616 | } |
625 | 617 | ||
626 | /* | 618 | /* |