diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/irq/chip.c | 26 | ||||
-rw-r--r-- | kernel/irq/internals.h | 11 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 51 |
3 files changed, 60 insertions, 28 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 622b55ac0e09..31258782742c 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -448,6 +448,13 @@ out_unlock: | |||
448 | } | 448 | } |
449 | EXPORT_SYMBOL_GPL(handle_nested_irq); | 449 | EXPORT_SYMBOL_GPL(handle_nested_irq); |
450 | 450 | ||
451 | static bool irq_check_poll(struct irq_desc *desc) | ||
452 | { | ||
453 | if (!(desc->status & IRQ_POLL_INPROGRESS)) | ||
454 | return false; | ||
455 | return irq_wait_for_poll(desc); | ||
456 | } | ||
457 | |||
451 | /** | 458 | /** |
452 | * handle_simple_irq - Simple and software-decoded IRQs. | 459 | * handle_simple_irq - Simple and software-decoded IRQs. |
453 | * @irq: the interrupt number | 460 | * @irq: the interrupt number |
@@ -469,7 +476,9 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
469 | raw_spin_lock(&desc->lock); | 476 | raw_spin_lock(&desc->lock); |
470 | 477 | ||
471 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 478 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
472 | goto out_unlock; | 479 | if (!irq_check_poll(desc)) |
480 | goto out_unlock; | ||
481 | |||
473 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 482 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); |
474 | kstat_incr_irqs_this_cpu(irq, desc); | 483 | kstat_incr_irqs_this_cpu(irq, desc); |
475 | 484 | ||
@@ -510,7 +519,9 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
510 | mask_ack_irq(desc); | 519 | mask_ack_irq(desc); |
511 | 520 | ||
512 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 521 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
513 | goto out_unlock; | 522 | if (!irq_check_poll(desc)) |
523 | goto out_unlock; | ||
524 | |||
514 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 525 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); |
515 | kstat_incr_irqs_this_cpu(irq, desc); | 526 | kstat_incr_irqs_this_cpu(irq, desc); |
516 | 527 | ||
@@ -558,7 +569,8 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
558 | raw_spin_lock(&desc->lock); | 569 | raw_spin_lock(&desc->lock); |
559 | 570 | ||
560 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 571 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
561 | goto out; | 572 | if (!irq_check_poll(desc)) |
573 | goto out; | ||
562 | 574 | ||
563 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 575 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); |
564 | kstat_incr_irqs_this_cpu(irq, desc); | 576 | kstat_incr_irqs_this_cpu(irq, desc); |
@@ -620,9 +632,11 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
620 | */ | 632 | */ |
621 | if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || | 633 | if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || |
622 | !desc->action)) { | 634 | !desc->action)) { |
623 | desc->status |= (IRQ_PENDING | IRQ_MASKED); | 635 | if (!irq_check_poll(desc)) { |
624 | mask_ack_irq(desc); | 636 | desc->status |= (IRQ_PENDING | IRQ_MASKED); |
625 | goto out_unlock; | 637 | mask_ack_irq(desc); |
638 | goto out_unlock; | ||
639 | } | ||
626 | } | 640 | } |
627 | kstat_incr_irqs_this_cpu(irq, desc); | 641 | kstat_incr_irqs_this_cpu(irq, desc); |
628 | 642 | ||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index b5bfa24aa6a6..0eff7e92b1a9 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -28,6 +28,7 @@ extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); | |||
28 | 28 | ||
29 | /* Resending of interrupts :*/ | 29 | /* Resending of interrupts :*/ |
30 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | 30 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); |
31 | bool irq_wait_for_poll(struct irq_desc *desc); | ||
31 | 32 | ||
32 | #ifdef CONFIG_PROC_FS | 33 | #ifdef CONFIG_PROC_FS |
33 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); | 34 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
@@ -47,16 +48,6 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); | |||
47 | 48 | ||
48 | extern void irq_set_thread_affinity(struct irq_desc *desc); | 49 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
49 | 50 | ||
50 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
51 | static inline void irq_end(unsigned int irq, struct irq_desc *desc) | ||
52 | { | ||
53 | if (desc->irq_data.chip && desc->irq_data.chip->end) | ||
54 | desc->irq_data.chip->end(irq); | ||
55 | } | ||
56 | #else | ||
57 | static inline void irq_end(unsigned int irq, struct irq_desc *desc) { } | ||
58 | #endif | ||
59 | |||
60 | /* Inline functions for support of irq chips on slow busses */ | 51 | /* Inline functions for support of irq chips on slow busses */ |
61 | static inline void chip_bus_lock(struct irq_desc *desc) | 52 | static inline void chip_bus_lock(struct irq_desc *desc) |
62 | { | 53 | { |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 56ff8fffb8b0..f749d29bfd81 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -25,12 +25,44 @@ static int irq_poll_cpu; | |||
25 | static atomic_t irq_poll_active; | 25 | static atomic_t irq_poll_active; |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * We wait here for a poller to finish. | ||
29 | * | ||
30 | * If the poll runs on this CPU, then we yell loudly and return | ||
31 | * false. That will leave the interrupt line disabled in the worst | ||
32 | * case, but it should never happen. | ||
33 | * | ||
34 | * We wait until the poller is done and then recheck disabled and | ||
35 | * action (about to be disabled). Only if it's still active, we return | ||
36 | * true and let the handler run. | ||
37 | */ | ||
38 | bool irq_wait_for_poll(struct irq_desc *desc) | ||
39 | { | ||
40 | if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), | ||
41 | "irq poll in progress on cpu %d for irq %d\n", | ||
42 | smp_processor_id(), desc->irq_data.irq)) | ||
43 | return false; | ||
44 | |||
45 | #ifdef CONFIG_SMP | ||
46 | do { | ||
47 | raw_spin_unlock(&desc->lock); | ||
48 | while (desc->status & IRQ_INPROGRESS) | ||
49 | cpu_relax(); | ||
50 | raw_spin_lock(&desc->lock); | ||
51 | } while (desc->status & IRQ_INPROGRESS); | ||
52 | /* Might have been disabled in meantime */ | ||
53 | return !(desc->status & IRQ_DISABLED) && desc->action; | ||
54 | #else | ||
55 | return false; | ||
56 | #endif | ||
57 | } | ||
58 | |||
59 | /* | ||
28 | * Recovery handler for misrouted interrupts. | 60 | * Recovery handler for misrouted interrupts. |
29 | */ | 61 | */ |
30 | static int try_one_irq(int irq, struct irq_desc *desc, bool force) | 62 | static int try_one_irq(int irq, struct irq_desc *desc, bool force) |
31 | { | 63 | { |
32 | struct irqaction *action; | 64 | struct irqaction *action; |
33 | int ok = 0, work = 0; | 65 | int ok = 0; |
34 | 66 | ||
35 | raw_spin_lock(&desc->lock); | 67 | raw_spin_lock(&desc->lock); |
36 | 68 | ||
@@ -64,10 +96,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) | |||
64 | goto out; | 96 | goto out; |
65 | } | 97 | } |
66 | 98 | ||
67 | /* Honour the normal IRQ locking */ | 99 | /* Honour the normal IRQ locking and mark it poll in progress */ |
68 | desc->status |= IRQ_INPROGRESS; | 100 | desc->status |= IRQ_INPROGRESS | IRQ_POLL_INPROGRESS; |
69 | do { | 101 | do { |
70 | work++; | ||
71 | desc->status &= ~IRQ_PENDING; | 102 | desc->status &= ~IRQ_PENDING; |
72 | raw_spin_unlock(&desc->lock); | 103 | raw_spin_unlock(&desc->lock); |
73 | if (handle_IRQ_event(irq, action) != IRQ_NONE) | 104 | if (handle_IRQ_event(irq, action) != IRQ_NONE) |
@@ -76,14 +107,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) | |||
76 | action = desc->action; | 107 | action = desc->action; |
77 | } while ((desc->status & IRQ_PENDING) && action); | 108 | } while ((desc->status & IRQ_PENDING) && action); |
78 | 109 | ||
79 | desc->status &= ~IRQ_INPROGRESS; | 110 | desc->status &= ~(IRQ_INPROGRESS | IRQ_POLL_INPROGRESS); |
80 | /* | ||
81 | * If we did actual work for the real IRQ line we must let the | ||
82 | * IRQ controller clean up too | ||
83 | */ | ||
84 | if (work > 1) | ||
85 | irq_end(irq, desc); | ||
86 | |||
87 | out: | 111 | out: |
88 | raw_spin_unlock(&desc->lock); | 112 | raw_spin_unlock(&desc->lock); |
89 | return ok; | 113 | return ok; |
@@ -238,6 +262,9 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc, | |||
238 | void note_interrupt(unsigned int irq, struct irq_desc *desc, | 262 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
239 | irqreturn_t action_ret) | 263 | irqreturn_t action_ret) |
240 | { | 264 | { |
265 | if (desc->status & IRQ_POLL_INPROGRESS) | ||
266 | return; | ||
267 | |||
241 | if (unlikely(action_ret != IRQ_HANDLED)) { | 268 | if (unlikely(action_ret != IRQ_HANDLED)) { |
242 | /* | 269 | /* |
243 | * If we are seeing only the odd spurious IRQ caused by | 270 | * If we are seeing only the odd spurious IRQ caused by |