diff options
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 46 |
1 files changed, 43 insertions, 3 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 4c69326aa773..fa17855ca65a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/sched/rt.h> | ||
19 | #include <linux/task_work.h> | 20 | #include <linux/task_work.h> |
20 | 21 | ||
21 | #include "internals.h" | 22 | #include "internals.h" |
@@ -616,6 +617,22 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
616 | return ret; | 617 | return ret; |
617 | } | 618 | } |
618 | 619 | ||
620 | #ifdef CONFIG_HARDIRQS_SW_RESEND | ||
621 | int irq_set_parent(int irq, int parent_irq) | ||
622 | { | ||
623 | unsigned long flags; | ||
624 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); | ||
625 | |||
626 | if (!desc) | ||
627 | return -EINVAL; | ||
628 | |||
629 | desc->parent_irq = parent_irq; | ||
630 | |||
631 | irq_put_desc_unlock(desc, flags); | ||
632 | return 0; | ||
633 | } | ||
634 | #endif | ||
635 | |||
619 | /* | 636 | /* |
620 | * Default primary interrupt handler for threaded interrupts. Is | 637 | * Default primary interrupt handler for threaded interrupts. Is |
621 | * assigned as primary handler when request_threaded_irq is called | 638 | * assigned as primary handler when request_threaded_irq is called |
@@ -716,6 +733,7 @@ static void | |||
716 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 733 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) |
717 | { | 734 | { |
718 | cpumask_var_t mask; | 735 | cpumask_var_t mask; |
736 | bool valid = true; | ||
719 | 737 | ||
720 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | 738 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) |
721 | return; | 739 | return; |
@@ -730,10 +748,18 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |||
730 | } | 748 | } |
731 | 749 | ||
732 | raw_spin_lock_irq(&desc->lock); | 750 | raw_spin_lock_irq(&desc->lock); |
733 | cpumask_copy(mask, desc->irq_data.affinity); | 751 | /* |
752 | * This code is triggered unconditionally. Check the affinity | ||
753 | * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. | ||
754 | */ | ||
755 | if (desc->irq_data.affinity) | ||
756 | cpumask_copy(mask, desc->irq_data.affinity); | ||
757 | else | ||
758 | valid = false; | ||
734 | raw_spin_unlock_irq(&desc->lock); | 759 | raw_spin_unlock_irq(&desc->lock); |
735 | 760 | ||
736 | set_cpus_allowed_ptr(current, mask); | 761 | if (valid) |
762 | set_cpus_allowed_ptr(current, mask); | ||
737 | free_cpumask_var(mask); | 763 | free_cpumask_var(mask); |
738 | } | 764 | } |
739 | #else | 765 | #else |
@@ -793,7 +819,7 @@ static void irq_thread_dtor(struct callback_head *unused) | |||
793 | action = kthread_data(tsk); | 819 | action = kthread_data(tsk); |
794 | 820 | ||
795 | pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | 821 | pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", |
796 | tsk->comm ? tsk->comm : "", tsk->pid, action->irq); | 822 | tsk->comm, tsk->pid, action->irq); |
797 | 823 | ||
798 | 824 | ||
799 | desc = irq_to_desc(action->irq); | 825 | desc = irq_to_desc(action->irq); |
@@ -833,6 +859,8 @@ static int irq_thread(void *data) | |||
833 | init_task_work(&on_exit_work, irq_thread_dtor); | 859 | init_task_work(&on_exit_work, irq_thread_dtor); |
834 | task_work_add(current, &on_exit_work, false); | 860 | task_work_add(current, &on_exit_work, false); |
835 | 861 | ||
862 | irq_thread_check_affinity(desc, action); | ||
863 | |||
836 | while (!irq_wait_for_interrupt(action)) { | 864 | while (!irq_wait_for_interrupt(action)) { |
837 | irqreturn_t action_ret; | 865 | irqreturn_t action_ret; |
838 | 866 | ||
@@ -936,6 +964,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
936 | */ | 964 | */ |
937 | get_task_struct(t); | 965 | get_task_struct(t); |
938 | new->thread = t; | 966 | new->thread = t; |
967 | /* | ||
968 | * Tell the thread to set its affinity. This is | ||
969 | * important for shared interrupt handlers as we do | ||
970 | * not invoke setup_affinity() for the secondary | ||
971 | * handlers as everything is already set up. Even for | ||
972 | * interrupts marked with IRQF_NO_BALANCE this is | ||
973 | * correct as we want the thread to move to the cpu(s) | ||
974 | * on which the requesting code placed the interrupt. | ||
975 | */ | ||
976 | set_bit(IRQTF_AFFINITY, &new->thread_flags); | ||
939 | } | 977 | } |
940 | 978 | ||
941 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | 979 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { |
@@ -1487,6 +1525,7 @@ void enable_percpu_irq(unsigned int irq, unsigned int type) | |||
1487 | out: | 1525 | out: |
1488 | irq_put_desc_unlock(desc, flags); | 1526 | irq_put_desc_unlock(desc, flags); |
1489 | } | 1527 | } |
1528 | EXPORT_SYMBOL_GPL(enable_percpu_irq); | ||
1490 | 1529 | ||
1491 | void disable_percpu_irq(unsigned int irq) | 1530 | void disable_percpu_irq(unsigned int irq) |
1492 | { | 1531 | { |
@@ -1500,6 +1539,7 @@ void disable_percpu_irq(unsigned int irq) | |||
1500 | irq_percpu_disable(desc, cpu); | 1539 | irq_percpu_disable(desc, cpu); |
1501 | irq_put_desc_unlock(desc, flags); | 1540 | irq_put_desc_unlock(desc, flags); |
1502 | } | 1541 | } |
1542 | EXPORT_SYMBOL_GPL(disable_percpu_irq); | ||
1503 | 1543 | ||
1504 | /* | 1544 | /* |
1505 | * Internal function to unregister a percpu irqaction. | 1545 | * Internal function to unregister a percpu irqaction. |