diff options
-rw-r--r-- | include/linux/irq.h | 9 | ||||
-rw-r--r-- | include/linux/irqdesc.h | 3 | ||||
-rw-r--r-- | kernel/irq/chip.c | 1 | ||||
-rw-r--r-- | kernel/irq/manage.c | 41 | ||||
-rw-r--r-- | kernel/irq/resend.c | 8 |
5 files changed, 60 insertions, 2 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h index 216b0ba109d7..526f10a637c1 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -392,6 +392,15 @@ static inline void irq_move_masked_irq(struct irq_data *data) { } | |||
392 | 392 | ||
393 | extern int no_irq_affinity; | 393 | extern int no_irq_affinity; |
394 | 394 | ||
395 | #ifdef CONFIG_HARDIRQS_SW_RESEND | ||
396 | int irq_set_parent(int irq, int parent_irq); | ||
397 | #else | ||
398 | static inline int irq_set_parent(int irq, int parent_irq) | ||
399 | { | ||
400 | return 0; | ||
401 | } | ||
402 | #endif | ||
403 | |||
395 | /* | 404 | /* |
396 | * Built-in IRQ handlers for various IRQ types, | 405 | * Built-in IRQ handlers for various IRQ types, |
397 | * callable via desc->handle_irq() | 406 | * callable via desc->handle_irq() |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 0ba014c55056..623325e2ff97 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
@@ -11,6 +11,8 @@ | |||
11 | struct irq_affinity_notify; | 11 | struct irq_affinity_notify; |
12 | struct proc_dir_entry; | 12 | struct proc_dir_entry; |
13 | struct module; | 13 | struct module; |
14 | struct irq_desc; | ||
15 | |||
14 | /** | 16 | /** |
15 | * struct irq_desc - interrupt descriptor | 17 | * struct irq_desc - interrupt descriptor |
16 | * @irq_data: per irq and chip data passed down to chip functions | 18 | * @irq_data: per irq and chip data passed down to chip functions |
@@ -65,6 +67,7 @@ struct irq_desc { | |||
65 | #ifdef CONFIG_PROC_FS | 67 | #ifdef CONFIG_PROC_FS |
66 | struct proc_dir_entry *dir; | 68 | struct proc_dir_entry *dir; |
67 | #endif | 69 | #endif |
70 | int parent_irq; | ||
68 | struct module *owner; | 71 | struct module *owner; |
69 | const char *name; | 72 | const char *name; |
70 | } ____cacheline_internodealigned_in_smp; | 73 | } ____cacheline_internodealigned_in_smp; |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 57d86d07221e..3aca9f29d30e 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -272,6 +272,7 @@ void handle_nested_irq(unsigned int irq) | |||
272 | 272 | ||
273 | raw_spin_lock_irq(&desc->lock); | 273 | raw_spin_lock_irq(&desc->lock); |
274 | 274 | ||
275 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | ||
275 | kstat_incr_irqs_this_cpu(irq, desc); | 276 | kstat_incr_irqs_this_cpu(irq, desc); |
276 | 277 | ||
277 | action = desc->action; | 278 | action = desc->action; |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 4c69326aa773..35c70c9e24d8 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -616,6 +616,22 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
616 | return ret; | 616 | return ret; |
617 | } | 617 | } |
618 | 618 | ||
619 | #ifdef CONFIG_HARDIRQS_SW_RESEND | ||
620 | int irq_set_parent(int irq, int parent_irq) | ||
621 | { | ||
622 | unsigned long flags; | ||
623 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); | ||
624 | |||
625 | if (!desc) | ||
626 | return -EINVAL; | ||
627 | |||
628 | desc->parent_irq = parent_irq; | ||
629 | |||
630 | irq_put_desc_unlock(desc, flags); | ||
631 | return 0; | ||
632 | } | ||
633 | #endif | ||
634 | |||
619 | /* | 635 | /* |
620 | * Default primary interrupt handler for threaded interrupts. Is | 636 | * Default primary interrupt handler for threaded interrupts. Is |
621 | * assigned as primary handler when request_threaded_irq is called | 637 | * assigned as primary handler when request_threaded_irq is called |
@@ -716,6 +732,7 @@ static void | |||
716 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 732 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) |
717 | { | 733 | { |
718 | cpumask_var_t mask; | 734 | cpumask_var_t mask; |
735 | bool valid = true; | ||
719 | 736 | ||
720 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | 737 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) |
721 | return; | 738 | return; |
@@ -730,10 +747,18 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |||
730 | } | 747 | } |
731 | 748 | ||
732 | raw_spin_lock_irq(&desc->lock); | 749 | raw_spin_lock_irq(&desc->lock); |
733 | cpumask_copy(mask, desc->irq_data.affinity); | 750 | /* |
751 | * This code is triggered unconditionally. Check the affinity | ||
752 | * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. | ||
753 | */ | ||
754 | if (desc->irq_data.affinity) | ||
755 | cpumask_copy(mask, desc->irq_data.affinity); | ||
756 | else | ||
757 | valid = false; | ||
734 | raw_spin_unlock_irq(&desc->lock); | 758 | raw_spin_unlock_irq(&desc->lock); |
735 | 759 | ||
736 | set_cpus_allowed_ptr(current, mask); | 760 | if (valid) |
761 | set_cpus_allowed_ptr(current, mask); | ||
737 | free_cpumask_var(mask); | 762 | free_cpumask_var(mask); |
738 | } | 763 | } |
739 | #else | 764 | #else |
@@ -833,6 +858,8 @@ static int irq_thread(void *data) | |||
833 | init_task_work(&on_exit_work, irq_thread_dtor); | 858 | init_task_work(&on_exit_work, irq_thread_dtor); |
834 | task_work_add(current, &on_exit_work, false); | 859 | task_work_add(current, &on_exit_work, false); |
835 | 860 | ||
861 | irq_thread_check_affinity(desc, action); | ||
862 | |||
836 | while (!irq_wait_for_interrupt(action)) { | 863 | while (!irq_wait_for_interrupt(action)) { |
837 | irqreturn_t action_ret; | 864 | irqreturn_t action_ret; |
838 | 865 | ||
@@ -936,6 +963,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
936 | */ | 963 | */ |
937 | get_task_struct(t); | 964 | get_task_struct(t); |
938 | new->thread = t; | 965 | new->thread = t; |
966 | /* | ||
967 | * Tell the thread to set its affinity. This is | ||
968 | * important for shared interrupt handlers as we do | ||
969 | * not invoke setup_affinity() for the secondary | ||
970 | * handlers as everything is already set up. Even for | ||
971 | * interrupts marked with IRQF_NO_BALANCE this is | ||
972 | * correct as we want the thread to move to the cpu(s) | ||
973 | * on which the requesting code placed the interrupt. | ||
974 | */ | ||
975 | set_bit(IRQTF_AFFINITY, &new->thread_flags); | ||
939 | } | 976 | } |
940 | 977 | ||
941 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | 978 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { |
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 6454db7b6a4d..9065107f083e 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
@@ -74,6 +74,14 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) | |||
74 | if (!desc->irq_data.chip->irq_retrigger || | 74 | if (!desc->irq_data.chip->irq_retrigger || |
75 | !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { | 75 | !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { |
76 | #ifdef CONFIG_HARDIRQS_SW_RESEND | 76 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
77 | /* | ||
78 | * If the interrupt has a parent irq and runs | ||
79 | * in the thread context of the parent irq, | ||
80 | * retrigger the parent. | ||
81 | */ | ||
82 | if (desc->parent_irq && | ||
83 | irq_settings_is_nested_thread(desc)) | ||
84 | irq = desc->parent_irq; | ||
77 | /* Set it pending and activate the softirq: */ | 85 | /* Set it pending and activate the softirq: */ |
78 | set_bit(irq, irqs_resend); | 86 | set_bit(irq, irqs_resend); |
79 | tasklet_schedule(&resend_tasklet); | 87 | tasklet_schedule(&resend_tasklet); |