diff options
Diffstat (limited to 'kernel/irq/cpuhotplug.c')
-rw-r--r-- | kernel/irq/cpuhotplug.c | 150 |
1 files changed, 126 insertions, 24 deletions
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index 011f8c4c63da..aee8f7ec40af 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c | |||
@@ -14,37 +14,99 @@ | |||
14 | 14 | ||
15 | #include "internals.h" | 15 | #include "internals.h" |
16 | 16 | ||
17 | /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */ | ||
18 | static inline bool irq_needs_fixup(struct irq_data *d) | ||
19 | { | ||
20 | const struct cpumask *m = irq_data_get_effective_affinity_mask(d); | ||
21 | |||
22 | return cpumask_test_cpu(smp_processor_id(), m); | ||
23 | } | ||
24 | |||
17 | static bool migrate_one_irq(struct irq_desc *desc) | 25 | static bool migrate_one_irq(struct irq_desc *desc) |
18 | { | 26 | { |
19 | struct irq_data *d = irq_desc_get_irq_data(desc); | 27 | struct irq_data *d = irq_desc_get_irq_data(desc); |
20 | const struct cpumask *affinity = d->common->affinity; | 28 | struct irq_chip *chip = irq_data_get_irq_chip(d); |
21 | struct irq_chip *c; | 29 | bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d); |
22 | bool ret = false; | 30 | const struct cpumask *affinity; |
31 | bool brokeaff = false; | ||
32 | int err; | ||
23 | 33 | ||
24 | /* | 34 | /* |
25 | * If this is a per-CPU interrupt, or the affinity does not | 35 | * IRQ chip might be already torn down, but the irq descriptor is |
26 | * include this CPU, then we have nothing to do. | 36 | * still in the radix tree. Also if the chip has no affinity setter, |
37 | * nothing can be done here. | ||
27 | */ | 38 | */ |
28 | if (irqd_is_per_cpu(d) || | 39 | if (!chip || !chip->irq_set_affinity) { |
29 | !cpumask_test_cpu(smp_processor_id(), affinity)) | 40 | pr_debug("IRQ %u: Unable to migrate away\n", d->irq); |
30 | return false; | 41 | return false; |
42 | } | ||
43 | |||
44 | /* | ||
45 | * No move required, if: | ||
46 | * - Interrupt is per cpu | ||
47 | * - Interrupt is not started | ||
48 | * - Affinity mask does not include this CPU. | ||
49 | * | ||
50 | * Note: Do not check desc->action as this might be a chained | ||
51 | * interrupt. | ||
52 | */ | ||
53 | if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) { | ||
54 | /* | ||
55 | * If an irq move is pending, abort it if the dying CPU is | ||
56 | * the sole target. | ||
57 | */ | ||
58 | irq_fixup_move_pending(desc, false); | ||
59 | return false; | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Complete an eventually pending irq move cleanup. If this | ||
64 | * interrupt was moved in hard irq context, then the vectors need | ||
65 | * to be cleaned up. It can't wait until this interrupt actually | ||
66 | * happens and this CPU was involved. | ||
67 | */ | ||
68 | irq_force_complete_move(desc); | ||
69 | |||
70 | /* | ||
71 | * If there is a setaffinity pending, then try to reuse the pending | ||
72 | * mask, so the last change of the affinity does not get lost. If | ||
73 | * there is no move pending or the pending mask does not contain | ||
74 | * any online CPU, use the current affinity mask. | ||
75 | */ | ||
76 | if (irq_fixup_move_pending(desc, true)) | ||
77 | affinity = irq_desc_get_pending_mask(desc); | ||
78 | else | ||
79 | affinity = irq_data_get_affinity_mask(d); | ||
80 | |||
81 | /* Mask the chip for interrupts which cannot move in process context */ | ||
82 | if (maskchip && chip->irq_mask) | ||
83 | chip->irq_mask(d); | ||
31 | 84 | ||
32 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | 85 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
86 | /* | ||
87 | * If the interrupt is managed, then shut it down and leave | ||
88 | * the affinity untouched. | ||
89 | */ | ||
90 | if (irqd_affinity_is_managed(d)) { | ||
91 | irqd_set_managed_shutdown(d); | ||
92 | irq_shutdown(desc); | ||
93 | return false; | ||
94 | } | ||
33 | affinity = cpu_online_mask; | 95 | affinity = cpu_online_mask; |
34 | ret = true; | 96 | brokeaff = true; |
35 | } | 97 | } |
36 | 98 | ||
37 | c = irq_data_get_irq_chip(d); | 99 | err = irq_do_set_affinity(d, affinity, true); |
38 | if (!c->irq_set_affinity) { | 100 | if (err) { |
39 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | 101 | pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n", |
40 | } else { | 102 | d->irq, err); |
41 | int r = irq_do_set_affinity(d, affinity, false); | 103 | brokeaff = false; |
42 | if (r) | ||
43 | pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n", | ||
44 | d->irq, r); | ||
45 | } | 104 | } |
46 | 105 | ||
47 | return ret; | 106 | if (maskchip && chip->irq_unmask) |
107 | chip->irq_unmask(d); | ||
108 | |||
109 | return brokeaff; | ||
48 | } | 110 | } |
49 | 111 | ||
50 | /** | 112 | /** |
@@ -59,11 +121,8 @@ static bool migrate_one_irq(struct irq_desc *desc) | |||
59 | */ | 121 | */ |
60 | void irq_migrate_all_off_this_cpu(void) | 122 | void irq_migrate_all_off_this_cpu(void) |
61 | { | 123 | { |
62 | unsigned int irq; | ||
63 | struct irq_desc *desc; | 124 | struct irq_desc *desc; |
64 | unsigned long flags; | 125 | unsigned int irq; |
65 | |||
66 | local_irq_save(flags); | ||
67 | 126 | ||
68 | for_each_active_irq(irq) { | 127 | for_each_active_irq(irq) { |
69 | bool affinity_broken; | 128 | bool affinity_broken; |
@@ -73,10 +132,53 @@ void irq_migrate_all_off_this_cpu(void) | |||
73 | affinity_broken = migrate_one_irq(desc); | 132 | affinity_broken = migrate_one_irq(desc); |
74 | raw_spin_unlock(&desc->lock); | 133 | raw_spin_unlock(&desc->lock); |
75 | 134 | ||
76 | if (affinity_broken) | 135 | if (affinity_broken) { |
77 | pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", | 136 | pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n", |
78 | irq, smp_processor_id()); | 137 | irq, smp_processor_id()); |
138 | } | ||
139 | } | ||
140 | } | ||
141 | |||
142 | static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu) | ||
143 | { | ||
144 | struct irq_data *data = irq_desc_get_irq_data(desc); | ||
145 | const struct cpumask *affinity = irq_data_get_affinity_mask(data); | ||
146 | |||
147 | if (!irqd_affinity_is_managed(data) || !desc->action || | ||
148 | !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity)) | ||
149 | return; | ||
150 | |||
151 | if (irqd_is_managed_and_shutdown(data)) { | ||
152 | irq_startup(desc, IRQ_RESEND, IRQ_START_COND); | ||
153 | return; | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * If the interrupt can only be directed to a single target | ||
158 | * CPU then it is already assigned to a CPU in the affinity | ||
159 | * mask. No point in trying to move it around. | ||
160 | */ | ||
161 | if (!irqd_is_single_target(data)) | ||
162 | irq_set_affinity_locked(data, affinity, false); | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * irq_affinity_online_cpu - Restore affinity for managed interrupts | ||
167 | * @cpu: Upcoming CPU for which interrupts should be restored | ||
168 | */ | ||
169 | int irq_affinity_online_cpu(unsigned int cpu) | ||
170 | { | ||
171 | struct irq_desc *desc; | ||
172 | unsigned int irq; | ||
173 | |||
174 | irq_lock_sparse(); | ||
175 | for_each_active_irq(irq) { | ||
176 | desc = irq_to_desc(irq); | ||
177 | raw_spin_lock_irq(&desc->lock); | ||
178 | irq_restore_affinity_of_irq(desc, cpu); | ||
179 | raw_spin_unlock_irq(&desc->lock); | ||
79 | } | 180 | } |
181 | irq_unlock_sparse(); | ||
80 | 182 | ||
81 | local_irq_restore(flags); | 183 | return 0; |
82 | } | 184 | } |