diff options
| -rw-r--r-- | kernel/irq/handle.c | 16 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 19 | ||||
| -rw-r--r-- | kernel/irq/migration.c | 10 |
3 files changed, 30 insertions, 15 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 6ff84e6a954c..bdb180325551 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -54,14 +54,18 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action) | |||
| 54 | static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action) | 54 | static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action) |
| 55 | { | 55 | { |
| 56 | /* | 56 | /* |
| 57 | * Wake up the handler thread for this action. In case the | 57 | * In case the thread crashed and was killed we just pretend that |
| 58 | * thread crashed and was killed we just pretend that we | 58 | * we handled the interrupt. The hardirq handler has disabled the |
| 59 | * handled the interrupt. The hardirq handler has disabled the | 59 | * device interrupt, so no irq storm is lurking. |
| 60 | * device interrupt, so no irq storm is lurking. If the | 60 | */ |
| 61 | if (action->thread->flags & PF_EXITING) | ||
| 62 | return; | ||
| 63 | |||
| 64 | /* | ||
| 65 | * Wake up the handler thread for this action. If the | ||
| 61 | * RUNTHREAD bit is already set, nothing to do. | 66 | * RUNTHREAD bit is already set, nothing to do. |
| 62 | */ | 67 | */ |
| 63 | if ((action->thread->flags & PF_EXITING) || | 68 | if (test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags)) |
| 64 | test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
| 65 | return; | 69 | return; |
| 66 | 70 | ||
| 67 | /* | 71 | /* |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index b0ccd1ac2d6a..89a3ea82569b 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -282,7 +282,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |||
| 282 | { | 282 | { |
| 283 | struct irq_chip *chip = irq_desc_get_chip(desc); | 283 | struct irq_chip *chip = irq_desc_get_chip(desc); |
| 284 | struct cpumask *set = irq_default_affinity; | 284 | struct cpumask *set = irq_default_affinity; |
| 285 | int ret; | 285 | int ret, node = desc->irq_data.node; |
| 286 | 286 | ||
| 287 | /* Excludes PER_CPU and NO_BALANCE interrupts */ | 287 | /* Excludes PER_CPU and NO_BALANCE interrupts */ |
| 288 | if (!irq_can_set_affinity(irq)) | 288 | if (!irq_can_set_affinity(irq)) |
| @@ -301,6 +301,13 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |||
| 301 | } | 301 | } |
| 302 | 302 | ||
| 303 | cpumask_and(mask, cpu_online_mask, set); | 303 | cpumask_and(mask, cpu_online_mask, set); |
| 304 | if (node != NUMA_NO_NODE) { | ||
| 305 | const struct cpumask *nodemask = cpumask_of_node(node); | ||
| 306 | |||
| 307 | /* make sure at least one of the cpus in nodemask is online */ | ||
| 308 | if (cpumask_intersects(mask, nodemask)) | ||
| 309 | cpumask_and(mask, mask, nodemask); | ||
| 310 | } | ||
| 304 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); | 311 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); |
| 305 | switch (ret) { | 312 | switch (ret) { |
| 306 | case IRQ_SET_MASK_OK: | 313 | case IRQ_SET_MASK_OK: |
| @@ -645,7 +652,7 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
| 645 | * is marked MASKED. | 652 | * is marked MASKED. |
| 646 | */ | 653 | */ |
| 647 | static void irq_finalize_oneshot(struct irq_desc *desc, | 654 | static void irq_finalize_oneshot(struct irq_desc *desc, |
| 648 | struct irqaction *action, bool force) | 655 | struct irqaction *action) |
| 649 | { | 656 | { |
| 650 | if (!(desc->istate & IRQS_ONESHOT)) | 657 | if (!(desc->istate & IRQS_ONESHOT)) |
| 651 | return; | 658 | return; |
| @@ -679,7 +686,7 @@ again: | |||
| 679 | * we would clear the threads_oneshot bit of this thread which | 686 | * we would clear the threads_oneshot bit of this thread which |
| 680 | * was just set. | 687 | * was just set. |
| 681 | */ | 688 | */ |
| 682 | if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | 689 | if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) |
| 683 | goto out_unlock; | 690 | goto out_unlock; |
| 684 | 691 | ||
| 685 | desc->threads_oneshot &= ~action->thread_mask; | 692 | desc->threads_oneshot &= ~action->thread_mask; |
| @@ -739,7 +746,7 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) | |||
| 739 | 746 | ||
| 740 | local_bh_disable(); | 747 | local_bh_disable(); |
| 741 | ret = action->thread_fn(action->irq, action->dev_id); | 748 | ret = action->thread_fn(action->irq, action->dev_id); |
| 742 | irq_finalize_oneshot(desc, action, false); | 749 | irq_finalize_oneshot(desc, action); |
| 743 | local_bh_enable(); | 750 | local_bh_enable(); |
| 744 | return ret; | 751 | return ret; |
| 745 | } | 752 | } |
| @@ -755,7 +762,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc, | |||
| 755 | irqreturn_t ret; | 762 | irqreturn_t ret; |
| 756 | 763 | ||
| 757 | ret = action->thread_fn(action->irq, action->dev_id); | 764 | ret = action->thread_fn(action->irq, action->dev_id); |
| 758 | irq_finalize_oneshot(desc, action, false); | 765 | irq_finalize_oneshot(desc, action); |
| 759 | return ret; | 766 | return ret; |
| 760 | } | 767 | } |
| 761 | 768 | ||
| @@ -844,7 +851,7 @@ void exit_irq_thread(void) | |||
| 844 | wake_threads_waitq(desc); | 851 | wake_threads_waitq(desc); |
| 845 | 852 | ||
| 846 | /* Prevent a stale desc->threads_oneshot */ | 853 | /* Prevent a stale desc->threads_oneshot */ |
| 847 | irq_finalize_oneshot(desc, action, true); | 854 | irq_finalize_oneshot(desc, action); |
| 848 | } | 855 | } |
| 849 | 856 | ||
| 850 | static void irq_setup_forced_threading(struct irqaction *new) | 857 | static void irq_setup_forced_threading(struct irqaction *new) |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 47420908fba0..c3c89751b327 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
| @@ -43,12 +43,16 @@ void irq_move_masked_irq(struct irq_data *idata) | |||
| 43 | * masking the irqs. | 43 | * masking the irqs. |
| 44 | */ | 44 | */ |
| 45 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) | 45 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) |
| 46 | < nr_cpu_ids)) | 46 | < nr_cpu_ids)) { |
| 47 | if (!chip->irq_set_affinity(&desc->irq_data, | 47 | int ret = chip->irq_set_affinity(&desc->irq_data, |
| 48 | desc->pending_mask, false)) { | 48 | desc->pending_mask, false); |
| 49 | switch (ret) { | ||
| 50 | case IRQ_SET_MASK_OK: | ||
| 49 | cpumask_copy(desc->irq_data.affinity, desc->pending_mask); | 51 | cpumask_copy(desc->irq_data.affinity, desc->pending_mask); |
| 52 | case IRQ_SET_MASK_OK_NOCOPY: | ||
| 50 | irq_set_thread_affinity(desc); | 53 | irq_set_thread_affinity(desc); |
| 51 | } | 54 | } |
| 55 | } | ||
| 52 | 56 | ||
| 53 | cpumask_clear(desc->pending_mask); | 57 | cpumask_clear(desc->pending_mask); |
| 54 | } | 58 | } |
