aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c43
1 files changed, 40 insertions, 3 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 4c69326aa773..e49a288fa479 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -616,6 +616,22 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
616 return ret; 616 return ret;
617} 617}
618 618
619#ifdef CONFIG_HARDIRQS_SW_RESEND
620int irq_set_parent(int irq, int parent_irq)
621{
622 unsigned long flags;
623 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
624
625 if (!desc)
626 return -EINVAL;
627
628 desc->parent_irq = parent_irq;
629
630 irq_put_desc_unlock(desc, flags);
631 return 0;
632}
633#endif
634
619/* 635/*
620 * Default primary interrupt handler for threaded interrupts. Is 636 * Default primary interrupt handler for threaded interrupts. Is
621 * assigned as primary handler when request_threaded_irq is called 637 * assigned as primary handler when request_threaded_irq is called
@@ -716,6 +732,7 @@ static void
716irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 732irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
717{ 733{
718 cpumask_var_t mask; 734 cpumask_var_t mask;
735 bool valid = true;
719 736
720 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 737 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
721 return; 738 return;
@@ -730,10 +747,18 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
730 } 747 }
731 748
732 raw_spin_lock_irq(&desc->lock); 749 raw_spin_lock_irq(&desc->lock);
733 cpumask_copy(mask, desc->irq_data.affinity); 750 /*
751 * This code is triggered unconditionally. Check the affinity
752 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
753 */
754 if (desc->irq_data.affinity)
755 cpumask_copy(mask, desc->irq_data.affinity);
756 else
757 valid = false;
734 raw_spin_unlock_irq(&desc->lock); 758 raw_spin_unlock_irq(&desc->lock);
735 759
736 set_cpus_allowed_ptr(current, mask); 760 if (valid)
761 set_cpus_allowed_ptr(current, mask);
737 free_cpumask_var(mask); 762 free_cpumask_var(mask);
738} 763}
739#else 764#else
@@ -793,7 +818,7 @@ static void irq_thread_dtor(struct callback_head *unused)
793 action = kthread_data(tsk); 818 action = kthread_data(tsk);
794 819
795 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 820 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
796 tsk->comm ? tsk->comm : "", tsk->pid, action->irq); 821 tsk->comm, tsk->pid, action->irq);
797 822
798 823
799 desc = irq_to_desc(action->irq); 824 desc = irq_to_desc(action->irq);
@@ -833,6 +858,8 @@ static int irq_thread(void *data)
833 init_task_work(&on_exit_work, irq_thread_dtor); 858 init_task_work(&on_exit_work, irq_thread_dtor);
834 task_work_add(current, &on_exit_work, false); 859 task_work_add(current, &on_exit_work, false);
835 860
861 irq_thread_check_affinity(desc, action);
862
836 while (!irq_wait_for_interrupt(action)) { 863 while (!irq_wait_for_interrupt(action)) {
837 irqreturn_t action_ret; 864 irqreturn_t action_ret;
838 865
@@ -936,6 +963,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
936 */ 963 */
937 get_task_struct(t); 964 get_task_struct(t);
938 new->thread = t; 965 new->thread = t;
966 /*
967 * Tell the thread to set its affinity. This is
968 * important for shared interrupt handlers as we do
969 * not invoke setup_affinity() for the secondary
970 * handlers as everything is already set up. Even for
971 * interrupts marked with IRQF_NO_BALANCE this is
972 * correct as we want the thread to move to the cpu(s)
973 * on which the requesting code placed the interrupt.
974 */
975 set_bit(IRQTF_AFFINITY, &new->thread_flags);
939 } 976 }
940 977
941 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 978 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {