diff options
Diffstat (limited to 'arch/arm64/kernel/irq.c')
| -rw-r--r-- | arch/arm64/kernel/irq.c | 39 |
1 files changed, 4 insertions, 35 deletions
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 0f08dfd69ebc..071a6ec13bd8 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c | |||
| @@ -40,33 +40,6 @@ int arch_show_interrupts(struct seq_file *p, int prec) | |||
| 40 | return 0; | 40 | return 0; |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | /* | ||
| 44 | * handle_IRQ handles all hardware IRQ's. Decoded IRQs should | ||
| 45 | * not come via this function. Instead, they should provide their | ||
| 46 | * own 'handler'. Used by platform code implementing C-based 1st | ||
| 47 | * level decoding. | ||
| 48 | */ | ||
| 49 | void handle_IRQ(unsigned int irq, struct pt_regs *regs) | ||
| 50 | { | ||
| 51 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
| 52 | |||
| 53 | irq_enter(); | ||
| 54 | |||
| 55 | /* | ||
| 56 | * Some hardware gives randomly wrong interrupts. Rather | ||
| 57 | * than crashing, do something sensible. | ||
| 58 | */ | ||
| 59 | if (unlikely(irq >= nr_irqs)) { | ||
| 60 | pr_warn_ratelimited("Bad IRQ%u\n", irq); | ||
| 61 | ack_bad_irq(irq); | ||
| 62 | } else { | ||
| 63 | generic_handle_irq(irq); | ||
| 64 | } | ||
| 65 | |||
| 66 | irq_exit(); | ||
| 67 | set_irq_regs(old_regs); | ||
| 68 | } | ||
| 69 | |||
| 70 | void __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) | 43 | void __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) |
| 71 | { | 44 | { |
| 72 | if (handle_arch_irq) | 45 | if (handle_arch_irq) |
| @@ -97,19 +70,15 @@ static bool migrate_one_irq(struct irq_desc *desc) | |||
| 97 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) | 70 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) |
| 98 | return false; | 71 | return false; |
| 99 | 72 | ||
| 100 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) | 73 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
| 74 | affinity = cpu_online_mask; | ||
| 101 | ret = true; | 75 | ret = true; |
| 76 | } | ||
| 102 | 77 | ||
| 103 | /* | ||
| 104 | * when using forced irq_set_affinity we must ensure that the cpu | ||
| 105 | * being offlined is not present in the affinity mask, it may be | ||
| 106 | * selected as the target CPU otherwise | ||
| 107 | */ | ||
| 108 | affinity = cpu_online_mask; | ||
| 109 | c = irq_data_get_irq_chip(d); | 78 | c = irq_data_get_irq_chip(d); |
| 110 | if (!c->irq_set_affinity) | 79 | if (!c->irq_set_affinity) |
| 111 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | 80 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); |
| 112 | else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) | 81 | else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) |
| 113 | cpumask_copy(d->affinity, affinity); | 82 | cpumask_copy(d->affinity, affinity); |
| 114 | 83 | ||
| 115 | return ret; | 84 | return ret; |
