summaryrefslogtreecommitdiffstats
path: root/kernel/irq/chip.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/chip.c')
-rw-r--r--kernel/irq/chip.c195
1 files changed, 157 insertions, 38 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index c94da688ee9b..2e30d925a40d 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -185,47 +185,162 @@ static void irq_state_set_masked(struct irq_desc *desc)
185 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 185 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
186} 186}
187 187
188int irq_startup(struct irq_desc *desc, bool resend) 188static void irq_state_clr_started(struct irq_desc *desc)
189{ 189{
190 int ret = 0; 190 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
191}
191 192
192 irq_state_clr_disabled(desc); 193static void irq_state_set_started(struct irq_desc *desc)
193 desc->depth = 0; 194{
195 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
196}
197
198enum {
199 IRQ_STARTUP_NORMAL,
200 IRQ_STARTUP_MANAGED,
201 IRQ_STARTUP_ABORT,
202};
203
204#ifdef CONFIG_SMP
205static int
206__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
207{
208 struct irq_data *d = irq_desc_get_irq_data(desc);
209
210 if (!irqd_affinity_is_managed(d))
211 return IRQ_STARTUP_NORMAL;
212
213 irqd_clr_managed_shutdown(d);
194 214
195 irq_domain_activate_irq(&desc->irq_data); 215 if (cpumask_any_and(aff, cpu_online_mask) > nr_cpu_ids) {
196 if (desc->irq_data.chip->irq_startup) { 216 /*
197 ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 217 * Catch code which fiddles with enable_irq() on a managed
218 * and potentially shutdown IRQ. Chained interrupt
219 * installment or irq auto probing should not happen on
220 * managed irqs either. Emit a warning, break the affinity
221 * and start it up as a normal interrupt.
222 */
223 if (WARN_ON_ONCE(force))
224 return IRQ_STARTUP_NORMAL;
225 /*
226 * The interrupt was requested, but there is no online CPU
227 * in it's affinity mask. Put it into managed shutdown
228 * state and let the cpu hotplug mechanism start it up once
229 * a CPU in the mask becomes available.
230 */
231 irqd_set_managed_shutdown(d);
232 return IRQ_STARTUP_ABORT;
233 }
234 return IRQ_STARTUP_MANAGED;
235}
236#else
237static int
238__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
239{
240 return IRQ_STARTUP_NORMAL;
241}
242#endif
243
244static int __irq_startup(struct irq_desc *desc)
245{
246 struct irq_data *d = irq_desc_get_irq_data(desc);
247 int ret = 0;
248
249 irq_domain_activate_irq(d);
250 if (d->chip->irq_startup) {
251 ret = d->chip->irq_startup(d);
252 irq_state_clr_disabled(desc);
198 irq_state_clr_masked(desc); 253 irq_state_clr_masked(desc);
199 } else { 254 } else {
200 irq_enable(desc); 255 irq_enable(desc);
201 } 256 }
257 irq_state_set_started(desc);
258 return ret;
259}
260
261int irq_startup(struct irq_desc *desc, bool resend, bool force)
262{
263 struct irq_data *d = irq_desc_get_irq_data(desc);
264 struct cpumask *aff = irq_data_get_affinity_mask(d);
265 int ret = 0;
266
267 desc->depth = 0;
268
269 if (irqd_is_started(d)) {
270 irq_enable(desc);
271 } else {
272 switch (__irq_startup_managed(desc, aff, force)) {
273 case IRQ_STARTUP_NORMAL:
274 ret = __irq_startup(desc);
275 irq_setup_affinity(desc);
276 break;
277 case IRQ_STARTUP_MANAGED:
278 ret = __irq_startup(desc);
279 irq_set_affinity_locked(d, aff, false);
280 break;
281 case IRQ_STARTUP_ABORT:
282 return 0;
283 }
284 }
202 if (resend) 285 if (resend)
203 check_irq_resend(desc); 286 check_irq_resend(desc);
287
204 return ret; 288 return ret;
205} 289}
206 290
291static void __irq_disable(struct irq_desc *desc, bool mask);
292
207void irq_shutdown(struct irq_desc *desc) 293void irq_shutdown(struct irq_desc *desc)
208{ 294{
209 irq_state_set_disabled(desc); 295 if (irqd_is_started(&desc->irq_data)) {
210 desc->depth = 1; 296 desc->depth = 1;
211 if (desc->irq_data.chip->irq_shutdown) 297 if (desc->irq_data.chip->irq_shutdown) {
212 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 298 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
213 else if (desc->irq_data.chip->irq_disable) 299 irq_state_set_disabled(desc);
214 desc->irq_data.chip->irq_disable(&desc->irq_data); 300 irq_state_set_masked(desc);
215 else 301 } else {
216 desc->irq_data.chip->irq_mask(&desc->irq_data); 302 __irq_disable(desc, true);
303 }
304 irq_state_clr_started(desc);
305 }
306 /*
307 * This must be called even if the interrupt was never started up,
308 * because the activation can happen before the interrupt is
309 * available for request/startup. It has it's own state tracking so
310 * it's safe to call it unconditionally.
311 */
217 irq_domain_deactivate_irq(&desc->irq_data); 312 irq_domain_deactivate_irq(&desc->irq_data);
218 irq_state_set_masked(desc);
219} 313}
220 314
221void irq_enable(struct irq_desc *desc) 315void irq_enable(struct irq_desc *desc)
222{ 316{
223 irq_state_clr_disabled(desc); 317 if (!irqd_irq_disabled(&desc->irq_data)) {
224 if (desc->irq_data.chip->irq_enable) 318 unmask_irq(desc);
225 desc->irq_data.chip->irq_enable(&desc->irq_data); 319 } else {
226 else 320 irq_state_clr_disabled(desc);
227 desc->irq_data.chip->irq_unmask(&desc->irq_data); 321 if (desc->irq_data.chip->irq_enable) {
228 irq_state_clr_masked(desc); 322 desc->irq_data.chip->irq_enable(&desc->irq_data);
323 irq_state_clr_masked(desc);
324 } else {
325 unmask_irq(desc);
326 }
327 }
328}
329
330static void __irq_disable(struct irq_desc *desc, bool mask)
331{
332 if (irqd_irq_disabled(&desc->irq_data)) {
333 if (mask)
334 mask_irq(desc);
335 } else {
336 irq_state_set_disabled(desc);
337 if (desc->irq_data.chip->irq_disable) {
338 desc->irq_data.chip->irq_disable(&desc->irq_data);
339 irq_state_set_masked(desc);
340 } else if (mask) {
341 mask_irq(desc);
342 }
343 }
229} 344}
230 345
231/** 346/**
@@ -250,13 +365,7 @@ void irq_enable(struct irq_desc *desc)
250 */ 365 */
251void irq_disable(struct irq_desc *desc) 366void irq_disable(struct irq_desc *desc)
252{ 367{
253 irq_state_set_disabled(desc); 368 __irq_disable(desc, irq_settings_disable_unlazy(desc));
254 if (desc->irq_data.chip->irq_disable) {
255 desc->irq_data.chip->irq_disable(&desc->irq_data);
256 irq_state_set_masked(desc);
257 } else if (irq_settings_disable_unlazy(desc)) {
258 mask_irq(desc);
259 }
260} 369}
261 370
262void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 371void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
@@ -279,18 +388,21 @@ void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
279 388
280static inline void mask_ack_irq(struct irq_desc *desc) 389static inline void mask_ack_irq(struct irq_desc *desc)
281{ 390{
282 if (desc->irq_data.chip->irq_mask_ack) 391 if (desc->irq_data.chip->irq_mask_ack) {
283 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 392 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
284 else { 393 irq_state_set_masked(desc);
285 desc->irq_data.chip->irq_mask(&desc->irq_data); 394 } else {
395 mask_irq(desc);
286 if (desc->irq_data.chip->irq_ack) 396 if (desc->irq_data.chip->irq_ack)
287 desc->irq_data.chip->irq_ack(&desc->irq_data); 397 desc->irq_data.chip->irq_ack(&desc->irq_data);
288 } 398 }
289 irq_state_set_masked(desc);
290} 399}
291 400
292void mask_irq(struct irq_desc *desc) 401void mask_irq(struct irq_desc *desc)
293{ 402{
403 if (irqd_irq_masked(&desc->irq_data))
404 return;
405
294 if (desc->irq_data.chip->irq_mask) { 406 if (desc->irq_data.chip->irq_mask) {
295 desc->irq_data.chip->irq_mask(&desc->irq_data); 407 desc->irq_data.chip->irq_mask(&desc->irq_data);
296 irq_state_set_masked(desc); 408 irq_state_set_masked(desc);
@@ -299,6 +411,9 @@ void mask_irq(struct irq_desc *desc)
299 411
300void unmask_irq(struct irq_desc *desc) 412void unmask_irq(struct irq_desc *desc)
301{ 413{
414 if (!irqd_irq_masked(&desc->irq_data))
415 return;
416
302 if (desc->irq_data.chip->irq_unmask) { 417 if (desc->irq_data.chip->irq_unmask) {
303 desc->irq_data.chip->irq_unmask(&desc->irq_data); 418 desc->irq_data.chip->irq_unmask(&desc->irq_data);
304 irq_state_clr_masked(desc); 419 irq_state_clr_masked(desc);
@@ -312,10 +427,7 @@ void unmask_threaded_irq(struct irq_desc *desc)
312 if (chip->flags & IRQCHIP_EOI_THREADED) 427 if (chip->flags & IRQCHIP_EOI_THREADED)
313 chip->irq_eoi(&desc->irq_data); 428 chip->irq_eoi(&desc->irq_data);
314 429
315 if (chip->irq_unmask) { 430 unmask_irq(desc);
316 chip->irq_unmask(&desc->irq_data);
317 irq_state_clr_masked(desc);
318 }
319} 431}
320 432
321/* 433/*
@@ -851,7 +963,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
851 irq_settings_set_norequest(desc); 963 irq_settings_set_norequest(desc);
852 irq_settings_set_nothread(desc); 964 irq_settings_set_nothread(desc);
853 desc->action = &chained_action; 965 desc->action = &chained_action;
854 irq_startup(desc, true); 966 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
855 } 967 }
856} 968}
857 969
@@ -903,6 +1015,13 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
903 1015
904 if (!desc) 1016 if (!desc)
905 return; 1017 return;
1018
1019 /*
1020 * Warn when a driver sets the no autoenable flag on an already
1021 * active interrupt.
1022 */
1023 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1024
906 irq_settings_clr_and_set(desc, clr, set); 1025 irq_settings_clr_and_set(desc, clr, set);
907 1026
908 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 1027 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |