diff options
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 147 |
1 files changed, 88 insertions, 59 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 32313c084442..89a3ea82569b 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -282,7 +282,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |||
282 | { | 282 | { |
283 | struct irq_chip *chip = irq_desc_get_chip(desc); | 283 | struct irq_chip *chip = irq_desc_get_chip(desc); |
284 | struct cpumask *set = irq_default_affinity; | 284 | struct cpumask *set = irq_default_affinity; |
285 | int ret; | 285 | int ret, node = desc->irq_data.node; |
286 | 286 | ||
287 | /* Excludes PER_CPU and NO_BALANCE interrupts */ | 287 | /* Excludes PER_CPU and NO_BALANCE interrupts */ |
288 | if (!irq_can_set_affinity(irq)) | 288 | if (!irq_can_set_affinity(irq)) |
@@ -301,6 +301,13 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |||
301 | } | 301 | } |
302 | 302 | ||
303 | cpumask_and(mask, cpu_online_mask, set); | 303 | cpumask_and(mask, cpu_online_mask, set); |
304 | if (node != NUMA_NO_NODE) { | ||
305 | const struct cpumask *nodemask = cpumask_of_node(node); | ||
306 | |||
307 | /* make sure at least one of the cpus in nodemask is online */ | ||
308 | if (cpumask_intersects(mask, nodemask)) | ||
309 | cpumask_and(mask, mask, nodemask); | ||
310 | } | ||
304 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); | 311 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); |
305 | switch (ret) { | 312 | switch (ret) { |
306 | case IRQ_SET_MASK_OK: | 313 | case IRQ_SET_MASK_OK: |
@@ -645,7 +652,7 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
645 | * is marked MASKED. | 652 | * is marked MASKED. |
646 | */ | 653 | */ |
647 | static void irq_finalize_oneshot(struct irq_desc *desc, | 654 | static void irq_finalize_oneshot(struct irq_desc *desc, |
648 | struct irqaction *action, bool force) | 655 | struct irqaction *action) |
649 | { | 656 | { |
650 | if (!(desc->istate & IRQS_ONESHOT)) | 657 | if (!(desc->istate & IRQS_ONESHOT)) |
651 | return; | 658 | return; |
@@ -679,7 +686,7 @@ again: | |||
679 | * we would clear the threads_oneshot bit of this thread which | 686 | * we would clear the threads_oneshot bit of this thread which |
680 | * was just set. | 687 | * was just set. |
681 | */ | 688 | */ |
682 | if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | 689 | if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) |
683 | goto out_unlock; | 690 | goto out_unlock; |
684 | 691 | ||
685 | desc->threads_oneshot &= ~action->thread_mask; | 692 | desc->threads_oneshot &= ~action->thread_mask; |
@@ -739,7 +746,7 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) | |||
739 | 746 | ||
740 | local_bh_disable(); | 747 | local_bh_disable(); |
741 | ret = action->thread_fn(action->irq, action->dev_id); | 748 | ret = action->thread_fn(action->irq, action->dev_id); |
742 | irq_finalize_oneshot(desc, action, false); | 749 | irq_finalize_oneshot(desc, action); |
743 | local_bh_enable(); | 750 | local_bh_enable(); |
744 | return ret; | 751 | return ret; |
745 | } | 752 | } |
@@ -755,10 +762,17 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc, | |||
755 | irqreturn_t ret; | 762 | irqreturn_t ret; |
756 | 763 | ||
757 | ret = action->thread_fn(action->irq, action->dev_id); | 764 | ret = action->thread_fn(action->irq, action->dev_id); |
758 | irq_finalize_oneshot(desc, action, false); | 765 | irq_finalize_oneshot(desc, action); |
759 | return ret; | 766 | return ret; |
760 | } | 767 | } |
761 | 768 | ||
769 | static void wake_threads_waitq(struct irq_desc *desc) | ||
770 | { | ||
771 | if (atomic_dec_and_test(&desc->threads_active) && | ||
772 | waitqueue_active(&desc->wait_for_threads)) | ||
773 | wake_up(&desc->wait_for_threads); | ||
774 | } | ||
775 | |||
762 | /* | 776 | /* |
763 | * Interrupt handler thread | 777 | * Interrupt handler thread |
764 | */ | 778 | */ |
@@ -771,57 +785,41 @@ static int irq_thread(void *data) | |||
771 | struct irq_desc *desc = irq_to_desc(action->irq); | 785 | struct irq_desc *desc = irq_to_desc(action->irq); |
772 | irqreturn_t (*handler_fn)(struct irq_desc *desc, | 786 | irqreturn_t (*handler_fn)(struct irq_desc *desc, |
773 | struct irqaction *action); | 787 | struct irqaction *action); |
774 | int wake; | ||
775 | 788 | ||
776 | if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, | 789 | if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, |
777 | &action->thread_flags)) | 790 | &action->thread_flags)) |
778 | handler_fn = irq_forced_thread_fn; | 791 | handler_fn = irq_forced_thread_fn; |
779 | else | 792 | else |
780 | handler_fn = irq_thread_fn; | 793 | handler_fn = irq_thread_fn; |
781 | 794 | ||
782 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 795 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
783 | current->irqaction = action; | 796 | current->irq_thread = 1; |
784 | 797 | ||
785 | while (!irq_wait_for_interrupt(action)) { | 798 | while (!irq_wait_for_interrupt(action)) { |
799 | irqreturn_t action_ret; | ||
786 | 800 | ||
787 | irq_thread_check_affinity(desc, action); | 801 | irq_thread_check_affinity(desc, action); |
788 | 802 | ||
789 | atomic_inc(&desc->threads_active); | 803 | action_ret = handler_fn(desc, action); |
804 | if (!noirqdebug) | ||
805 | note_interrupt(action->irq, desc, action_ret); | ||
790 | 806 | ||
791 | raw_spin_lock_irq(&desc->lock); | 807 | wake_threads_waitq(desc); |
792 | if (unlikely(irqd_irq_disabled(&desc->irq_data))) { | ||
793 | /* | ||
794 | * CHECKME: We might need a dedicated | ||
795 | * IRQ_THREAD_PENDING flag here, which | ||
796 | * retriggers the thread in check_irq_resend() | ||
797 | * but AFAICT IRQS_PENDING should be fine as it | ||
798 | * retriggers the interrupt itself --- tglx | ||
799 | */ | ||
800 | desc->istate |= IRQS_PENDING; | ||
801 | raw_spin_unlock_irq(&desc->lock); | ||
802 | } else { | ||
803 | irqreturn_t action_ret; | ||
804 | |||
805 | raw_spin_unlock_irq(&desc->lock); | ||
806 | action_ret = handler_fn(desc, action); | ||
807 | if (!noirqdebug) | ||
808 | note_interrupt(action->irq, desc, action_ret); | ||
809 | } | ||
810 | |||
811 | wake = atomic_dec_and_test(&desc->threads_active); | ||
812 | |||
813 | if (wake && waitqueue_active(&desc->wait_for_threads)) | ||
814 | wake_up(&desc->wait_for_threads); | ||
815 | } | 808 | } |
816 | 809 | ||
817 | /* Prevent a stale desc->threads_oneshot */ | ||
818 | irq_finalize_oneshot(desc, action, true); | ||
819 | |||
820 | /* | 810 | /* |
821 | * Clear irqaction. Otherwise exit_irq_thread() would make | 811 | * This is the regular exit path. __free_irq() is stopping the |
812 | * thread via kthread_stop() after calling | ||
813 | * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the | ||
814 | * oneshot mask bit can be set. We cannot verify that as we | ||
815 | * cannot touch the oneshot mask at this point anymore as | ||
816 | * __setup_irq() might have given out currents thread_mask | ||
817 | * again. | ||
818 | * | ||
819 | * Clear irq_thread. Otherwise exit_irq_thread() would make | ||
822 | * fuzz about an active irq thread going into nirvana. | 820 | * fuzz about an active irq thread going into nirvana. |
823 | */ | 821 | */ |
824 | current->irqaction = NULL; | 822 | current->irq_thread = 0; |
825 | return 0; | 823 | return 0; |
826 | } | 824 | } |
827 | 825 | ||
@@ -832,27 +830,28 @@ void exit_irq_thread(void) | |||
832 | { | 830 | { |
833 | struct task_struct *tsk = current; | 831 | struct task_struct *tsk = current; |
834 | struct irq_desc *desc; | 832 | struct irq_desc *desc; |
833 | struct irqaction *action; | ||
835 | 834 | ||
836 | if (!tsk->irqaction) | 835 | if (!tsk->irq_thread) |
837 | return; | 836 | return; |
838 | 837 | ||
838 | action = kthread_data(tsk); | ||
839 | |||
839 | printk(KERN_ERR | 840 | printk(KERN_ERR |
840 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | 841 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", |
841 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | 842 | tsk->comm ? tsk->comm : "", tsk->pid, action->irq); |
842 | 843 | ||
843 | desc = irq_to_desc(tsk->irqaction->irq); | 844 | desc = irq_to_desc(action->irq); |
844 | 845 | ||
845 | /* | 846 | /* |
846 | * Prevent a stale desc->threads_oneshot. Must be called | 847 | * If IRQTF_RUNTHREAD is set, we need to decrement |
847 | * before setting the IRQTF_DIED flag. | 848 | * desc->threads_active and wake possible waiters. |
848 | */ | 849 | */ |
849 | irq_finalize_oneshot(desc, tsk->irqaction, true); | 850 | if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) |
851 | wake_threads_waitq(desc); | ||
850 | 852 | ||
851 | /* | 853 | /* Prevent a stale desc->threads_oneshot */ |
852 | * Set the THREAD DIED flag to prevent further wakeups of the | 854 | irq_finalize_oneshot(desc, action); |
853 | * soon to be gone threaded handler. | ||
854 | */ | ||
855 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | ||
856 | } | 855 | } |
857 | 856 | ||
858 | static void irq_setup_forced_threading(struct irqaction *new) | 857 | static void irq_setup_forced_threading(struct irqaction *new) |
@@ -985,6 +984,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
985 | 984 | ||
986 | /* add new interrupt at end of irq queue */ | 985 | /* add new interrupt at end of irq queue */ |
987 | do { | 986 | do { |
987 | /* | ||
988 | * Or all existing action->thread_mask bits, | ||
989 | * so we can find the next zero bit for this | ||
990 | * new action. | ||
991 | */ | ||
988 | thread_mask |= old->thread_mask; | 992 | thread_mask |= old->thread_mask; |
989 | old_ptr = &old->next; | 993 | old_ptr = &old->next; |
990 | old = *old_ptr; | 994 | old = *old_ptr; |
@@ -993,14 +997,41 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
993 | } | 997 | } |
994 | 998 | ||
995 | /* | 999 | /* |
996 | * Setup the thread mask for this irqaction. Unlikely to have | 1000 | * Setup the thread mask for this irqaction for ONESHOT. For |
997 | * 32 resp 64 irqs sharing one line, but who knows. | 1001 | * !ONESHOT irqs the thread mask is 0 so we can avoid a |
1002 | * conditional in irq_wake_thread(). | ||
998 | */ | 1003 | */ |
999 | if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) { | 1004 | if (new->flags & IRQF_ONESHOT) { |
1000 | ret = -EBUSY; | 1005 | /* |
1001 | goto out_mask; | 1006 | * Unlikely to have 32 resp 64 irqs sharing one line, |
1007 | * but who knows. | ||
1008 | */ | ||
1009 | if (thread_mask == ~0UL) { | ||
1010 | ret = -EBUSY; | ||
1011 | goto out_mask; | ||
1012 | } | ||
1013 | /* | ||
1014 | * The thread_mask for the action is or'ed to | ||
1015 | * desc->thread_active to indicate that the | ||
1016 | * IRQF_ONESHOT thread handler has been woken, but not | ||
1017 | * yet finished. The bit is cleared when a thread | ||
1018 | * completes. When all threads of a shared interrupt | ||
1019 | * line have completed desc->threads_active becomes | ||
1020 | * zero and the interrupt line is unmasked. See | ||
1021 | * handle.c:irq_wake_thread() for further information. | ||
1022 | * | ||
1023 | * If no thread is woken by primary (hard irq context) | ||
1024 | * interrupt handlers, then desc->threads_active is | ||
1025 | * also checked for zero to unmask the irq line in the | ||
1026 | * affected hard irq flow handlers | ||
1027 | * (handle_[fasteoi|level]_irq). | ||
1028 | * | ||
1029 | * The new action gets the first zero bit of | ||
1030 | * thread_mask assigned. See the loop above which or's | ||
1031 | * all existing action->thread_mask bits. | ||
1032 | */ | ||
1033 | new->thread_mask = 1 << ffz(thread_mask); | ||
1002 | } | 1034 | } |
1003 | new->thread_mask = 1 << ffz(thread_mask); | ||
1004 | 1035 | ||
1005 | if (!shared) { | 1036 | if (!shared) { |
1006 | init_waitqueue_head(&desc->wait_for_threads); | 1037 | init_waitqueue_head(&desc->wait_for_threads); |
@@ -1103,8 +1134,7 @@ out_thread: | |||
1103 | struct task_struct *t = new->thread; | 1134 | struct task_struct *t = new->thread; |
1104 | 1135 | ||
1105 | new->thread = NULL; | 1136 | new->thread = NULL; |
1106 | if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | 1137 | kthread_stop(t); |
1107 | kthread_stop(t); | ||
1108 | put_task_struct(t); | 1138 | put_task_struct(t); |
1109 | } | 1139 | } |
1110 | out_mput: | 1140 | out_mput: |
@@ -1214,8 +1244,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
1214 | #endif | 1244 | #endif |
1215 | 1245 | ||
1216 | if (action->thread) { | 1246 | if (action->thread) { |
1217 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | 1247 | kthread_stop(action->thread); |
1218 | kthread_stop(action->thread); | ||
1219 | put_task_struct(action->thread); | 1248 | put_task_struct(action->thread); |
1220 | } | 1249 | } |
1221 | 1250 | ||