diff options
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 54 |
1 files changed, 48 insertions, 6 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 01f8a9519e63..2301de19ac7d 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -617,8 +617,11 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
617 | * handler finished. unmask if the interrupt has not been disabled and | 617 | * handler finished. unmask if the interrupt has not been disabled and |
618 | * is marked MASKED. | 618 | * is marked MASKED. |
619 | */ | 619 | */ |
620 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | 620 | static void irq_finalize_oneshot(struct irq_desc *desc, |
621 | struct irqaction *action, bool force) | ||
621 | { | 622 | { |
623 | if (!(desc->istate & IRQS_ONESHOT)) | ||
624 | return; | ||
622 | again: | 625 | again: |
623 | chip_bus_lock(desc); | 626 | chip_bus_lock(desc); |
624 | raw_spin_lock_irq(&desc->lock); | 627 | raw_spin_lock_irq(&desc->lock); |
@@ -631,6 +634,11 @@ again: | |||
631 | * on the other CPU. If we unmask the irq line then the | 634 | * on the other CPU. If we unmask the irq line then the |
632 | * interrupt can come in again and masks the line, leaves due | 635 | * interrupt can come in again and masks the line, leaves due |
633 | * to IRQS_INPROGRESS and the irq line is masked forever. | 636 | * to IRQS_INPROGRESS and the irq line is masked forever. |
637 | * | ||
638 | * This also serializes the state of shared oneshot handlers | ||
639 | * versus "desc->threads_onehsot |= action->thread_mask;" in | ||
640 | * irq_wake_thread(). See the comment there which explains the | ||
641 | * serialization. | ||
634 | */ | 642 | */ |
635 | if (unlikely(desc->istate & IRQS_INPROGRESS)) { | 643 | if (unlikely(desc->istate & IRQS_INPROGRESS)) { |
636 | raw_spin_unlock_irq(&desc->lock); | 644 | raw_spin_unlock_irq(&desc->lock); |
@@ -639,11 +647,23 @@ again: | |||
639 | goto again; | 647 | goto again; |
640 | } | 648 | } |
641 | 649 | ||
642 | if (!(desc->istate & IRQS_DISABLED) && (desc->istate & IRQS_MASKED)) { | 650 | /* |
651 | * Now check again, whether the thread should run. Otherwise | ||
652 | * we would clear the threads_oneshot bit of this thread which | ||
653 | * was just set. | ||
654 | */ | ||
655 | if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
656 | goto out_unlock; | ||
657 | |||
658 | desc->threads_oneshot &= ~action->thread_mask; | ||
659 | |||
660 | if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) && | ||
661 | (desc->istate & IRQS_MASKED)) { | ||
643 | irq_compat_clr_masked(desc); | 662 | irq_compat_clr_masked(desc); |
644 | desc->istate &= ~IRQS_MASKED; | 663 | desc->istate &= ~IRQS_MASKED; |
645 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | 664 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
646 | } | 665 | } |
666 | out_unlock: | ||
647 | raw_spin_unlock_irq(&desc->lock); | 667 | raw_spin_unlock_irq(&desc->lock); |
648 | chip_bus_sync_unlock(desc); | 668 | chip_bus_sync_unlock(desc); |
649 | } | 669 | } |
@@ -691,7 +711,7 @@ static int irq_thread(void *data) | |||
691 | }; | 711 | }; |
692 | struct irqaction *action = data; | 712 | struct irqaction *action = data; |
693 | struct irq_desc *desc = irq_to_desc(action->irq); | 713 | struct irq_desc *desc = irq_to_desc(action->irq); |
694 | int wake, oneshot = desc->istate & IRQS_ONESHOT; | 714 | int wake; |
695 | 715 | ||
696 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 716 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
697 | current->irqaction = action; | 717 | current->irqaction = action; |
@@ -719,8 +739,7 @@ static int irq_thread(void *data) | |||
719 | 739 | ||
720 | action->thread_fn(action->irq, action->dev_id); | 740 | action->thread_fn(action->irq, action->dev_id); |
721 | 741 | ||
722 | if (oneshot) | 742 | irq_finalize_oneshot(desc, action, false); |
723 | irq_finalize_oneshot(action->irq, desc); | ||
724 | } | 743 | } |
725 | 744 | ||
726 | wake = atomic_dec_and_test(&desc->threads_active); | 745 | wake = atomic_dec_and_test(&desc->threads_active); |
@@ -729,6 +748,9 @@ static int irq_thread(void *data) | |||
729 | wake_up(&desc->wait_for_threads); | 748 | wake_up(&desc->wait_for_threads); |
730 | } | 749 | } |
731 | 750 | ||
751 | /* Prevent a stale desc->threads_oneshot */ | ||
752 | irq_finalize_oneshot(desc, action, true); | ||
753 | |||
732 | /* | 754 | /* |
733 | * Clear irqaction. Otherwise exit_irq_thread() would make | 755 | * Clear irqaction. Otherwise exit_irq_thread() would make |
734 | * fuzz about an active irq thread going into nirvana. | 756 | * fuzz about an active irq thread going into nirvana. |
@@ -743,6 +765,7 @@ static int irq_thread(void *data) | |||
743 | void exit_irq_thread(void) | 765 | void exit_irq_thread(void) |
744 | { | 766 | { |
745 | struct task_struct *tsk = current; | 767 | struct task_struct *tsk = current; |
768 | struct irq_desc *desc; | ||
746 | 769 | ||
747 | if (!tsk->irqaction) | 770 | if (!tsk->irqaction) |
748 | return; | 771 | return; |
@@ -751,6 +774,14 @@ void exit_irq_thread(void) | |||
751 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | 774 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", |
752 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | 775 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); |
753 | 776 | ||
777 | desc = irq_to_desc(tsk->irqaction->irq); | ||
778 | |||
779 | /* | ||
780 | * Prevent a stale desc->threads_oneshot. Must be called | ||
781 | * before setting the IRQTF_DIED flag. | ||
782 | */ | ||
783 | irq_finalize_oneshot(desc, tsk->irqaction, true); | ||
784 | |||
754 | /* | 785 | /* |
755 | * Set the THREAD DIED flag to prevent further wakeups of the | 786 | * Set the THREAD DIED flag to prevent further wakeups of the |
756 | * soon to be gone threaded handler. | 787 | * soon to be gone threaded handler. |
@@ -767,7 +798,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
767 | { | 798 | { |
768 | struct irqaction *old, **old_ptr; | 799 | struct irqaction *old, **old_ptr; |
769 | const char *old_name = NULL; | 800 | const char *old_name = NULL; |
770 | unsigned long flags; | 801 | unsigned long flags, thread_mask = 0; |
771 | int ret, nested, shared = 0; | 802 | int ret, nested, shared = 0; |
772 | cpumask_var_t mask; | 803 | cpumask_var_t mask; |
773 | 804 | ||
@@ -865,12 +896,23 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
865 | 896 | ||
866 | /* add new interrupt at end of irq queue */ | 897 | /* add new interrupt at end of irq queue */ |
867 | do { | 898 | do { |
899 | thread_mask |= old->thread_mask; | ||
868 | old_ptr = &old->next; | 900 | old_ptr = &old->next; |
869 | old = *old_ptr; | 901 | old = *old_ptr; |
870 | } while (old); | 902 | } while (old); |
871 | shared = 1; | 903 | shared = 1; |
872 | } | 904 | } |
873 | 905 | ||
906 | /* | ||
907 | * Setup the thread mask for this irqaction. Unlikely to have | ||
908 | * 32 resp 64 irqs sharing one line, but who knows. | ||
909 | */ | ||
910 | if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) { | ||
911 | ret = -EBUSY; | ||
912 | goto out_mask; | ||
913 | } | ||
914 | new->thread_mask = 1 << ffz(thread_mask); | ||
915 | |||
874 | if (!shared) { | 916 | if (!shared) { |
875 | irq_chip_set_defaults(desc->irq_data.chip); | 917 | irq_chip_set_defaults(desc->irq_data.chip); |
876 | 918 | ||