summaryrefslogtreecommitdiffstats
path: root/kernel/irq/chip.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/chip.c')
-rw-r--r--kernel/irq/chip.c35
1 files changed, 29 insertions, 6 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 5a2ef92c2782..043bfc35b353 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -207,20 +207,24 @@ __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
207 * Catch code which fiddles with enable_irq() on a managed 207 * Catch code which fiddles with enable_irq() on a managed
208 * and potentially shutdown IRQ. Chained interrupt 208 * and potentially shutdown IRQ. Chained interrupt
209 * installment or irq auto probing should not happen on 209 * installment or irq auto probing should not happen on
210 * managed irqs either. Emit a warning, break the affinity 210 * managed irqs either.
211 * and start it up as a normal interrupt.
212 */ 211 */
213 if (WARN_ON_ONCE(force)) 212 if (WARN_ON_ONCE(force))
214 return IRQ_STARTUP_NORMAL; 213 return IRQ_STARTUP_ABORT;
215 /* 214 /*
216 * The interrupt was requested, but there is no online CPU 215 * The interrupt was requested, but there is no online CPU
217 * in it's affinity mask. Put it into managed shutdown 216 * in it's affinity mask. Put it into managed shutdown
218 * state and let the cpu hotplug mechanism start it up once 217 * state and let the cpu hotplug mechanism start it up once
219 * a CPU in the mask becomes available. 218 * a CPU in the mask becomes available.
220 */ 219 */
221 irqd_set_managed_shutdown(d);
222 return IRQ_STARTUP_ABORT; 220 return IRQ_STARTUP_ABORT;
223 } 221 }
222 /*
223 * Managed interrupts have reserved resources, so this should not
224 * happen.
225 */
226 if (WARN_ON(irq_domain_activate_irq(d, false)))
227 return IRQ_STARTUP_ABORT;
224 return IRQ_STARTUP_MANAGED; 228 return IRQ_STARTUP_MANAGED;
225} 229}
226#else 230#else
@@ -236,7 +240,9 @@ static int __irq_startup(struct irq_desc *desc)
236 struct irq_data *d = irq_desc_get_irq_data(desc); 240 struct irq_data *d = irq_desc_get_irq_data(desc);
237 int ret = 0; 241 int ret = 0;
238 242
239 irq_domain_activate_irq(d); 243 /* Warn if this interrupt is not activated but try nevertheless */
244 WARN_ON_ONCE(!irqd_is_activated(d));
245
240 if (d->chip->irq_startup) { 246 if (d->chip->irq_startup) {
241 ret = d->chip->irq_startup(d); 247 ret = d->chip->irq_startup(d);
242 irq_state_clr_disabled(desc); 248 irq_state_clr_disabled(desc);
@@ -269,6 +275,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
269 ret = __irq_startup(desc); 275 ret = __irq_startup(desc);
270 break; 276 break;
271 case IRQ_STARTUP_ABORT: 277 case IRQ_STARTUP_ABORT:
278 irqd_set_managed_shutdown(d);
272 return 0; 279 return 0;
273 } 280 }
274 } 281 }
@@ -278,6 +285,22 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
278 return ret; 285 return ret;
279} 286}
280 287
288int irq_activate(struct irq_desc *desc)
289{
290 struct irq_data *d = irq_desc_get_irq_data(desc);
291
292 if (!irqd_affinity_is_managed(d))
293 return irq_domain_activate_irq(d, false);
294 return 0;
295}
296
297void irq_activate_and_startup(struct irq_desc *desc, bool resend)
298{
299 if (WARN_ON(irq_activate(desc)))
300 return;
301 irq_startup(desc, resend, IRQ_START_FORCE);
302}
303
281static void __irq_disable(struct irq_desc *desc, bool mask); 304static void __irq_disable(struct irq_desc *desc, bool mask);
282 305
283void irq_shutdown(struct irq_desc *desc) 306void irq_shutdown(struct irq_desc *desc)
@@ -953,7 +976,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
953 irq_settings_set_norequest(desc); 976 irq_settings_set_norequest(desc);
954 irq_settings_set_nothread(desc); 977 irq_settings_set_nothread(desc);
955 desc->action = &chained_action; 978 desc->action = &chained_action;
956 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); 979 irq_activate_and_startup(desc, IRQ_RESEND);
957 } 980 }
958} 981}
959 982