diff options
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/handle.c | 76 | ||||
-rw-r--r-- | kernel/irq/manage.c | 54 |
2 files changed, 111 insertions, 19 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index b110c835e070..517561fc7317 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -51,6 +51,68 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action) | |||
51 | "but no thread function available.", irq, action->name); | 51 | "but no thread function available.", irq, action->name); |
52 | } | 52 | } |
53 | 53 | ||
54 | static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action) | ||
55 | { | ||
56 | /* | ||
57 | * Wake up the handler thread for this action. In case the | ||
58 | * thread crashed and was killed we just pretend that we | ||
59 | * handled the interrupt. The hardirq handler has disabled the | ||
60 | * device interrupt, so no irq storm is lurking. If the | ||
61 | * RUNTHREAD bit is already set, nothing to do. | ||
62 | */ | ||
63 | if (test_bit(IRQTF_DIED, &action->thread_flags) || | ||
64 | test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
65 | return; | ||
66 | |||
67 | /* | ||
68 | * It's safe to OR the mask lockless here. We have only two | ||
69 | * places which write to threads_oneshot: This code and the | ||
70 | * irq thread. | ||
71 | * | ||
72 | * This code is the hard irq context and can never run on two | ||
73 | * cpus in parallel. If it ever does we have more serious | ||
74 | * problems than this bitmask. | ||
75 | * | ||
76 | * The irq threads of this irq which clear their "running" bit | ||
77 | * in threads_oneshot are serialized via desc->lock against | ||
78 | * each other and they are serialized against this code by | ||
79 | * IRQS_INPROGRESS. | ||
80 | * | ||
81 | * Hard irq handler: | ||
82 | * | ||
83 | * spin_lock(desc->lock); | ||
84 | * desc->state |= IRQS_INPROGRESS; | ||
85 | * spin_unlock(desc->lock); | ||
86 | * set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
87 | * desc->threads_oneshot |= mask; | ||
88 | * spin_lock(desc->lock); | ||
89 | * desc->state &= ~IRQS_INPROGRESS; | ||
90 | * spin_unlock(desc->lock); | ||
91 | * | ||
92 | * irq thread: | ||
93 | * | ||
94 | * again: | ||
95 | * spin_lock(desc->lock); | ||
96 | * if (desc->state & IRQS_INPROGRESS) { | ||
97 | * spin_unlock(desc->lock); | ||
98 | * while(desc->state & IRQS_INPROGRESS) | ||
99 | * cpu_relax(); | ||
100 | * goto again; | ||
101 | * } | ||
102 | * if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
103 | * desc->threads_oneshot &= ~mask; | ||
104 | * spin_unlock(desc->lock); | ||
105 | * | ||
106 | * So either the thread waits for us to clear IRQS_INPROGRESS | ||
107 | * or we are waiting in the flow handler for desc->lock to be | ||
108 | * released before we reach this point. The thread also checks | ||
109 | * IRQTF_RUNTHREAD under desc->lock. If set it leaves | ||
110 | * threads_oneshot untouched and runs the thread another time. | ||
111 | */ | ||
112 | desc->threads_oneshot |= action->thread_mask; | ||
113 | wake_up_process(action->thread); | ||
114 | } | ||
115 | |||
54 | irqreturn_t | 116 | irqreturn_t |
55 | handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) | 117 | handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) |
56 | { | 118 | { |
@@ -85,19 +147,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) | |||
85 | break; | 147 | break; |
86 | } | 148 | } |
87 | 149 | ||
88 | /* | 150 | irq_wake_thread(desc, action); |
89 | * Wake up the handler thread for this | ||
90 | * action. In case the thread crashed and was | ||
91 | * killed we just pretend that we handled the | ||
92 | * interrupt. The hardirq handler above has | ||
93 | * disabled the device interrupt, so no irq | ||
94 | * storm is lurking. | ||
95 | */ | ||
96 | if (likely(!test_bit(IRQTF_DIED, | ||
97 | &action->thread_flags))) { | ||
98 | set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
99 | wake_up_process(action->thread); | ||
100 | } | ||
101 | 151 | ||
102 | /* Fall through to add to randomness */ | 152 | /* Fall through to add to randomness */ |
103 | case IRQ_HANDLED: | 153 | case IRQ_HANDLED: |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 01f8a9519e63..2301de19ac7d 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -617,8 +617,11 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
617 | * handler finished. unmask if the interrupt has not been disabled and | 617 | * handler finished. unmask if the interrupt has not been disabled and |
618 | * is marked MASKED. | 618 | * is marked MASKED. |
619 | */ | 619 | */ |
620 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | 620 | static void irq_finalize_oneshot(struct irq_desc *desc, |
621 | struct irqaction *action, bool force) | ||
621 | { | 622 | { |
623 | if (!(desc->istate & IRQS_ONESHOT)) | ||
624 | return; | ||
622 | again: | 625 | again: |
623 | chip_bus_lock(desc); | 626 | chip_bus_lock(desc); |
624 | raw_spin_lock_irq(&desc->lock); | 627 | raw_spin_lock_irq(&desc->lock); |
@@ -631,6 +634,11 @@ again: | |||
631 | * on the other CPU. If we unmask the irq line then the | 634 | * on the other CPU. If we unmask the irq line then the |
632 | * interrupt can come in again and masks the line, leaves due | 635 | * interrupt can come in again and masks the line, leaves due |
633 | * to IRQS_INPROGRESS and the irq line is masked forever. | 636 | * to IRQS_INPROGRESS and the irq line is masked forever. |
637 | * | ||
638 | * This also serializes the state of shared oneshot handlers | ||
639 | * versus "desc->threads_onehsot |= action->thread_mask;" in | ||
640 | * irq_wake_thread(). See the comment there which explains the | ||
641 | * serialization. | ||
634 | */ | 642 | */ |
635 | if (unlikely(desc->istate & IRQS_INPROGRESS)) { | 643 | if (unlikely(desc->istate & IRQS_INPROGRESS)) { |
636 | raw_spin_unlock_irq(&desc->lock); | 644 | raw_spin_unlock_irq(&desc->lock); |
@@ -639,11 +647,23 @@ again: | |||
639 | goto again; | 647 | goto again; |
640 | } | 648 | } |
641 | 649 | ||
642 | if (!(desc->istate & IRQS_DISABLED) && (desc->istate & IRQS_MASKED)) { | 650 | /* |
651 | * Now check again, whether the thread should run. Otherwise | ||
652 | * we would clear the threads_oneshot bit of this thread which | ||
653 | * was just set. | ||
654 | */ | ||
655 | if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
656 | goto out_unlock; | ||
657 | |||
658 | desc->threads_oneshot &= ~action->thread_mask; | ||
659 | |||
660 | if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) && | ||
661 | (desc->istate & IRQS_MASKED)) { | ||
643 | irq_compat_clr_masked(desc); | 662 | irq_compat_clr_masked(desc); |
644 | desc->istate &= ~IRQS_MASKED; | 663 | desc->istate &= ~IRQS_MASKED; |
645 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | 664 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
646 | } | 665 | } |
666 | out_unlock: | ||
647 | raw_spin_unlock_irq(&desc->lock); | 667 | raw_spin_unlock_irq(&desc->lock); |
648 | chip_bus_sync_unlock(desc); | 668 | chip_bus_sync_unlock(desc); |
649 | } | 669 | } |
@@ -691,7 +711,7 @@ static int irq_thread(void *data) | |||
691 | }; | 711 | }; |
692 | struct irqaction *action = data; | 712 | struct irqaction *action = data; |
693 | struct irq_desc *desc = irq_to_desc(action->irq); | 713 | struct irq_desc *desc = irq_to_desc(action->irq); |
694 | int wake, oneshot = desc->istate & IRQS_ONESHOT; | 714 | int wake; |
695 | 715 | ||
696 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 716 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
697 | current->irqaction = action; | 717 | current->irqaction = action; |
@@ -719,8 +739,7 @@ static int irq_thread(void *data) | |||
719 | 739 | ||
720 | action->thread_fn(action->irq, action->dev_id); | 740 | action->thread_fn(action->irq, action->dev_id); |
721 | 741 | ||
722 | if (oneshot) | 742 | irq_finalize_oneshot(desc, action, false); |
723 | irq_finalize_oneshot(action->irq, desc); | ||
724 | } | 743 | } |
725 | 744 | ||
726 | wake = atomic_dec_and_test(&desc->threads_active); | 745 | wake = atomic_dec_and_test(&desc->threads_active); |
@@ -729,6 +748,9 @@ static int irq_thread(void *data) | |||
729 | wake_up(&desc->wait_for_threads); | 748 | wake_up(&desc->wait_for_threads); |
730 | } | 749 | } |
731 | 750 | ||
751 | /* Prevent a stale desc->threads_oneshot */ | ||
752 | irq_finalize_oneshot(desc, action, true); | ||
753 | |||
732 | /* | 754 | /* |
733 | * Clear irqaction. Otherwise exit_irq_thread() would make | 755 | * Clear irqaction. Otherwise exit_irq_thread() would make |
734 | * fuzz about an active irq thread going into nirvana. | 756 | * fuzz about an active irq thread going into nirvana. |
@@ -743,6 +765,7 @@ static int irq_thread(void *data) | |||
743 | void exit_irq_thread(void) | 765 | void exit_irq_thread(void) |
744 | { | 766 | { |
745 | struct task_struct *tsk = current; | 767 | struct task_struct *tsk = current; |
768 | struct irq_desc *desc; | ||
746 | 769 | ||
747 | if (!tsk->irqaction) | 770 | if (!tsk->irqaction) |
748 | return; | 771 | return; |
@@ -751,6 +774,14 @@ void exit_irq_thread(void) | |||
751 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | 774 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", |
752 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | 775 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); |
753 | 776 | ||
777 | desc = irq_to_desc(tsk->irqaction->irq); | ||
778 | |||
779 | /* | ||
780 | * Prevent a stale desc->threads_oneshot. Must be called | ||
781 | * before setting the IRQTF_DIED flag. | ||
782 | */ | ||
783 | irq_finalize_oneshot(desc, tsk->irqaction, true); | ||
784 | |||
754 | /* | 785 | /* |
755 | * Set the THREAD DIED flag to prevent further wakeups of the | 786 | * Set the THREAD DIED flag to prevent further wakeups of the |
756 | * soon to be gone threaded handler. | 787 | * soon to be gone threaded handler. |
@@ -767,7 +798,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
767 | { | 798 | { |
768 | struct irqaction *old, **old_ptr; | 799 | struct irqaction *old, **old_ptr; |
769 | const char *old_name = NULL; | 800 | const char *old_name = NULL; |
770 | unsigned long flags; | 801 | unsigned long flags, thread_mask = 0; |
771 | int ret, nested, shared = 0; | 802 | int ret, nested, shared = 0; |
772 | cpumask_var_t mask; | 803 | cpumask_var_t mask; |
773 | 804 | ||
@@ -865,12 +896,23 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
865 | 896 | ||
866 | /* add new interrupt at end of irq queue */ | 897 | /* add new interrupt at end of irq queue */ |
867 | do { | 898 | do { |
899 | thread_mask |= old->thread_mask; | ||
868 | old_ptr = &old->next; | 900 | old_ptr = &old->next; |
869 | old = *old_ptr; | 901 | old = *old_ptr; |
870 | } while (old); | 902 | } while (old); |
871 | shared = 1; | 903 | shared = 1; |
872 | } | 904 | } |
873 | 905 | ||
906 | /* | ||
907 | * Setup the thread mask for this irqaction. Unlikely to have | ||
908 | * 32 resp 64 irqs sharing one line, but who knows. | ||
909 | */ | ||
910 | if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) { | ||
911 | ret = -EBUSY; | ||
912 | goto out_mask; | ||
913 | } | ||
914 | new->thread_mask = 1 << ffz(thread_mask); | ||
915 | |||
874 | if (!shared) { | 916 | if (!shared) { |
875 | irq_chip_set_defaults(desc->irq_data.chip); | 917 | irq_chip_set_defaults(desc->irq_data.chip); |
876 | 918 | ||