diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 21:12:06 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 21:12:06 -0500 |
| commit | aefb058b0c27dafb15072406fbfd92d2ac2c8790 (patch) | |
| tree | de24b50221cfdbd8ebedb2add38c6125de604c3d /kernel/irq | |
| parent | 37ea95a959d4a49846ecbf2dd45326b6b34bf049 (diff) | |
| parent | 04aa530ec04f61875b99c12721162e2964e3318c (diff) | |
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq fixes from Ingo Molnar:
"Affinity fixes and a nested threaded IRQ handling fix."
* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
genirq: Always force thread affinity
irq: Set CPU affinity right on thread creation
genirq: Provide means to retrigger parent
Diffstat (limited to 'kernel/irq')
| -rw-r--r-- | kernel/irq/chip.c | 1 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 41 | ||||
| -rw-r--r-- | kernel/irq/resend.c | 8 |
3 files changed, 48 insertions, 2 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 57d86d07221..3aca9f29d30 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -272,6 +272,7 @@ void handle_nested_irq(unsigned int irq) | |||
| 272 | 272 | ||
| 273 | raw_spin_lock_irq(&desc->lock); | 273 | raw_spin_lock_irq(&desc->lock); |
| 274 | 274 | ||
| 275 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | ||
| 275 | kstat_incr_irqs_this_cpu(irq, desc); | 276 | kstat_incr_irqs_this_cpu(irq, desc); |
| 276 | 277 | ||
| 277 | action = desc->action; | 278 | action = desc->action; |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 4c69326aa77..35c70c9e24d 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -616,6 +616,22 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 616 | return ret; | 616 | return ret; |
| 617 | } | 617 | } |
| 618 | 618 | ||
| 619 | #ifdef CONFIG_HARDIRQS_SW_RESEND | ||
| 620 | int irq_set_parent(int irq, int parent_irq) | ||
| 621 | { | ||
| 622 | unsigned long flags; | ||
| 623 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); | ||
| 624 | |||
| 625 | if (!desc) | ||
| 626 | return -EINVAL; | ||
| 627 | |||
| 628 | desc->parent_irq = parent_irq; | ||
| 629 | |||
| 630 | irq_put_desc_unlock(desc, flags); | ||
| 631 | return 0; | ||
| 632 | } | ||
| 633 | #endif | ||
| 634 | |||
| 619 | /* | 635 | /* |
| 620 | * Default primary interrupt handler for threaded interrupts. Is | 636 | * Default primary interrupt handler for threaded interrupts. Is |
| 621 | * assigned as primary handler when request_threaded_irq is called | 637 | * assigned as primary handler when request_threaded_irq is called |
| @@ -716,6 +732,7 @@ static void | |||
| 716 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 732 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) |
| 717 | { | 733 | { |
| 718 | cpumask_var_t mask; | 734 | cpumask_var_t mask; |
| 735 | bool valid = true; | ||
| 719 | 736 | ||
| 720 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | 737 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) |
| 721 | return; | 738 | return; |
| @@ -730,10 +747,18 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |||
| 730 | } | 747 | } |
| 731 | 748 | ||
| 732 | raw_spin_lock_irq(&desc->lock); | 749 | raw_spin_lock_irq(&desc->lock); |
| 733 | cpumask_copy(mask, desc->irq_data.affinity); | 750 | /* |
| 751 | * This code is triggered unconditionally. Check the affinity | ||
| 752 | * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. | ||
| 753 | */ | ||
| 754 | if (desc->irq_data.affinity) | ||
| 755 | cpumask_copy(mask, desc->irq_data.affinity); | ||
| 756 | else | ||
| 757 | valid = false; | ||
| 734 | raw_spin_unlock_irq(&desc->lock); | 758 | raw_spin_unlock_irq(&desc->lock); |
| 735 | 759 | ||
| 736 | set_cpus_allowed_ptr(current, mask); | 760 | if (valid) |
| 761 | set_cpus_allowed_ptr(current, mask); | ||
| 737 | free_cpumask_var(mask); | 762 | free_cpumask_var(mask); |
| 738 | } | 763 | } |
| 739 | #else | 764 | #else |
| @@ -833,6 +858,8 @@ static int irq_thread(void *data) | |||
| 833 | init_task_work(&on_exit_work, irq_thread_dtor); | 858 | init_task_work(&on_exit_work, irq_thread_dtor); |
| 834 | task_work_add(current, &on_exit_work, false); | 859 | task_work_add(current, &on_exit_work, false); |
| 835 | 860 | ||
| 861 | irq_thread_check_affinity(desc, action); | ||
| 862 | |||
| 836 | while (!irq_wait_for_interrupt(action)) { | 863 | while (!irq_wait_for_interrupt(action)) { |
| 837 | irqreturn_t action_ret; | 864 | irqreturn_t action_ret; |
| 838 | 865 | ||
| @@ -936,6 +963,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 936 | */ | 963 | */ |
| 937 | get_task_struct(t); | 964 | get_task_struct(t); |
| 938 | new->thread = t; | 965 | new->thread = t; |
| 966 | /* | ||
| 967 | * Tell the thread to set its affinity. This is | ||
| 968 | * important for shared interrupt handlers as we do | ||
| 969 | * not invoke setup_affinity() for the secondary | ||
| 970 | * handlers as everything is already set up. Even for | ||
| 971 | * interrupts marked with IRQF_NO_BALANCE this is | ||
| 972 | * correct as we want the thread to move to the cpu(s) | ||
| 973 | * on which the requesting code placed the interrupt. | ||
| 974 | */ | ||
| 975 | set_bit(IRQTF_AFFINITY, &new->thread_flags); | ||
| 939 | } | 976 | } |
| 940 | 977 | ||
| 941 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | 978 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { |
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 6454db7b6a4..9065107f083 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
| @@ -74,6 +74,14 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) | |||
| 74 | if (!desc->irq_data.chip->irq_retrigger || | 74 | if (!desc->irq_data.chip->irq_retrigger || |
| 75 | !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { | 75 | !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { |
| 76 | #ifdef CONFIG_HARDIRQS_SW_RESEND | 76 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
| 77 | /* | ||
| 78 | * If the interrupt has a parent irq and runs | ||
| 79 | * in the thread context of the parent irq, | ||
| 80 | * retrigger the parent. | ||
| 81 | */ | ||
| 82 | if (desc->parent_irq && | ||
| 83 | irq_settings_is_nested_thread(desc)) | ||
| 84 | irq = desc->parent_irq; | ||
| 77 | /* Set it pending and activate the softirq: */ | 85 | /* Set it pending and activate the softirq: */ |
| 78 | set_bit(irq, irqs_resend); | 86 | set_bit(irq, irqs_resend); |
| 79 | tasklet_schedule(&resend_tasklet); | 87 | tasklet_schedule(&resend_tasklet); |
