aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/chip.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/chip.c')
-rw-r--r--kernel/irq/chip.c285
1 files changed, 134 insertions, 151 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index c9c0601f0615..4af1e2b244cb 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -34,9 +34,14 @@ int irq_set_chip(unsigned int irq, struct irq_chip *chip)
34 if (!chip) 34 if (!chip)
35 chip = &no_irq_chip; 35 chip = &no_irq_chip;
36 36
37 irq_chip_set_defaults(chip);
38 desc->irq_data.chip = chip; 37 desc->irq_data.chip = chip;
39 irq_put_desc_unlock(desc, flags); 38 irq_put_desc_unlock(desc, flags);
39 /*
40 * For !CONFIG_SPARSE_IRQ make the irq show up in
41 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
42 * already marked, and this call is harmless.
43 */
44 irq_reserve_irq(irq);
40 return 0; 45 return 0;
41} 46}
42EXPORT_SYMBOL(irq_set_chip); 47EXPORT_SYMBOL(irq_set_chip);
@@ -134,26 +139,22 @@ EXPORT_SYMBOL_GPL(irq_get_irq_data);
134 139
135static void irq_state_clr_disabled(struct irq_desc *desc) 140static void irq_state_clr_disabled(struct irq_desc *desc)
136{ 141{
137 desc->istate &= ~IRQS_DISABLED; 142 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
138 irq_compat_clr_disabled(desc);
139} 143}
140 144
141static void irq_state_set_disabled(struct irq_desc *desc) 145static void irq_state_set_disabled(struct irq_desc *desc)
142{ 146{
143 desc->istate |= IRQS_DISABLED; 147 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
144 irq_compat_set_disabled(desc);
145} 148}
146 149
147static void irq_state_clr_masked(struct irq_desc *desc) 150static void irq_state_clr_masked(struct irq_desc *desc)
148{ 151{
149 desc->istate &= ~IRQS_MASKED; 152 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
150 irq_compat_clr_masked(desc);
151} 153}
152 154
153static void irq_state_set_masked(struct irq_desc *desc) 155static void irq_state_set_masked(struct irq_desc *desc)
154{ 156{
155 desc->istate |= IRQS_MASKED; 157 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
156 irq_compat_set_masked(desc);
157} 158}
158 159
159int irq_startup(struct irq_desc *desc) 160int irq_startup(struct irq_desc *desc)
@@ -203,126 +204,6 @@ void irq_disable(struct irq_desc *desc)
203 } 204 }
204} 205}
205 206
206#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
207/* Temporary migration helpers */
208static void compat_irq_mask(struct irq_data *data)
209{
210 data->chip->mask(data->irq);
211}
212
213static void compat_irq_unmask(struct irq_data *data)
214{
215 data->chip->unmask(data->irq);
216}
217
218static void compat_irq_ack(struct irq_data *data)
219{
220 data->chip->ack(data->irq);
221}
222
223static void compat_irq_mask_ack(struct irq_data *data)
224{
225 data->chip->mask_ack(data->irq);
226}
227
228static void compat_irq_eoi(struct irq_data *data)
229{
230 data->chip->eoi(data->irq);
231}
232
233static void compat_irq_enable(struct irq_data *data)
234{
235 data->chip->enable(data->irq);
236}
237
238static void compat_irq_disable(struct irq_data *data)
239{
240 data->chip->disable(data->irq);
241}
242
243static void compat_irq_shutdown(struct irq_data *data)
244{
245 data->chip->shutdown(data->irq);
246}
247
248static unsigned int compat_irq_startup(struct irq_data *data)
249{
250 return data->chip->startup(data->irq);
251}
252
253static int compat_irq_set_affinity(struct irq_data *data,
254 const struct cpumask *dest, bool force)
255{
256 return data->chip->set_affinity(data->irq, dest);
257}
258
259static int compat_irq_set_type(struct irq_data *data, unsigned int type)
260{
261 return data->chip->set_type(data->irq, type);
262}
263
264static int compat_irq_set_wake(struct irq_data *data, unsigned int on)
265{
266 return data->chip->set_wake(data->irq, on);
267}
268
269static int compat_irq_retrigger(struct irq_data *data)
270{
271 return data->chip->retrigger(data->irq);
272}
273
274static void compat_bus_lock(struct irq_data *data)
275{
276 data->chip->bus_lock(data->irq);
277}
278
279static void compat_bus_sync_unlock(struct irq_data *data)
280{
281 data->chip->bus_sync_unlock(data->irq);
282}
283#endif
284
285/*
286 * Fixup enable/disable function pointers
287 */
288void irq_chip_set_defaults(struct irq_chip *chip)
289{
290#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
291 if (chip->enable)
292 chip->irq_enable = compat_irq_enable;
293 if (chip->disable)
294 chip->irq_disable = compat_irq_disable;
295 if (chip->shutdown)
296 chip->irq_shutdown = compat_irq_shutdown;
297 if (chip->startup)
298 chip->irq_startup = compat_irq_startup;
299 if (!chip->end)
300 chip->end = dummy_irq_chip.end;
301 if (chip->bus_lock)
302 chip->irq_bus_lock = compat_bus_lock;
303 if (chip->bus_sync_unlock)
304 chip->irq_bus_sync_unlock = compat_bus_sync_unlock;
305 if (chip->mask)
306 chip->irq_mask = compat_irq_mask;
307 if (chip->unmask)
308 chip->irq_unmask = compat_irq_unmask;
309 if (chip->ack)
310 chip->irq_ack = compat_irq_ack;
311 if (chip->mask_ack)
312 chip->irq_mask_ack = compat_irq_mask_ack;
313 if (chip->eoi)
314 chip->irq_eoi = compat_irq_eoi;
315 if (chip->set_affinity)
316 chip->irq_set_affinity = compat_irq_set_affinity;
317 if (chip->set_type)
318 chip->irq_set_type = compat_irq_set_type;
319 if (chip->set_wake)
320 chip->irq_set_wake = compat_irq_set_wake;
321 if (chip->retrigger)
322 chip->irq_retrigger = compat_irq_retrigger;
323#endif
324}
325
326static inline void mask_ack_irq(struct irq_desc *desc) 207static inline void mask_ack_irq(struct irq_desc *desc)
327{ 208{
328 if (desc->irq_data.chip->irq_mask_ack) 209 if (desc->irq_data.chip->irq_mask_ack)
@@ -372,11 +253,10 @@ void handle_nested_irq(unsigned int irq)
372 kstat_incr_irqs_this_cpu(irq, desc); 253 kstat_incr_irqs_this_cpu(irq, desc);
373 254
374 action = desc->action; 255 action = desc->action;
375 if (unlikely(!action || (desc->istate & IRQS_DISABLED))) 256 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
376 goto out_unlock; 257 goto out_unlock;
377 258
378 irq_compat_set_progress(desc); 259 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
379 desc->istate |= IRQS_INPROGRESS;
380 raw_spin_unlock_irq(&desc->lock); 260 raw_spin_unlock_irq(&desc->lock);
381 261
382 action_ret = action->thread_fn(action->irq, action->dev_id); 262 action_ret = action->thread_fn(action->irq, action->dev_id);
@@ -384,8 +264,7 @@ void handle_nested_irq(unsigned int irq)
384 note_interrupt(irq, desc, action_ret); 264 note_interrupt(irq, desc, action_ret);
385 265
386 raw_spin_lock_irq(&desc->lock); 266 raw_spin_lock_irq(&desc->lock);
387 desc->istate &= ~IRQS_INPROGRESS; 267 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
388 irq_compat_clr_progress(desc);
389 268
390out_unlock: 269out_unlock:
391 raw_spin_unlock_irq(&desc->lock); 270 raw_spin_unlock_irq(&desc->lock);
@@ -416,14 +295,14 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
416{ 295{
417 raw_spin_lock(&desc->lock); 296 raw_spin_lock(&desc->lock);
418 297
419 if (unlikely(desc->istate & IRQS_INPROGRESS)) 298 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
420 if (!irq_check_poll(desc)) 299 if (!irq_check_poll(desc))
421 goto out_unlock; 300 goto out_unlock;
422 301
423 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 302 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
424 kstat_incr_irqs_this_cpu(irq, desc); 303 kstat_incr_irqs_this_cpu(irq, desc);
425 304
426 if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) 305 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
427 goto out_unlock; 306 goto out_unlock;
428 307
429 handle_irq_event(desc); 308 handle_irq_event(desc);
@@ -448,7 +327,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
448 raw_spin_lock(&desc->lock); 327 raw_spin_lock(&desc->lock);
449 mask_ack_irq(desc); 328 mask_ack_irq(desc);
450 329
451 if (unlikely(desc->istate & IRQS_INPROGRESS)) 330 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
452 if (!irq_check_poll(desc)) 331 if (!irq_check_poll(desc))
453 goto out_unlock; 332 goto out_unlock;
454 333
@@ -459,12 +338,12 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
459 * If its disabled or no action available 338 * If its disabled or no action available
460 * keep it masked and get out of here 339 * keep it masked and get out of here
461 */ 340 */
462 if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) 341 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
463 goto out_unlock; 342 goto out_unlock;
464 343
465 handle_irq_event(desc); 344 handle_irq_event(desc);
466 345
467 if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT))) 346 if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT))
468 unmask_irq(desc); 347 unmask_irq(desc);
469out_unlock: 348out_unlock:
470 raw_spin_unlock(&desc->lock); 349 raw_spin_unlock(&desc->lock);
@@ -496,7 +375,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
496{ 375{
497 raw_spin_lock(&desc->lock); 376 raw_spin_lock(&desc->lock);
498 377
499 if (unlikely(desc->istate & IRQS_INPROGRESS)) 378 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
500 if (!irq_check_poll(desc)) 379 if (!irq_check_poll(desc))
501 goto out; 380 goto out;
502 381
@@ -507,8 +386,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
507 * If its disabled or no action available 386 * If its disabled or no action available
508 * then mask it and get out of here: 387 * then mask it and get out of here:
509 */ 388 */
510 if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) { 389 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
511 irq_compat_set_pending(desc);
512 desc->istate |= IRQS_PENDING; 390 desc->istate |= IRQS_PENDING;
513 mask_irq(desc); 391 mask_irq(desc);
514 goto out; 392 goto out;
@@ -537,7 +415,7 @@ out:
537 * @desc: the interrupt description structure for this irq 415 * @desc: the interrupt description structure for this irq
538 * 416 *
539 * Interrupt occures on the falling and/or rising edge of a hardware 417 * Interrupt occures on the falling and/or rising edge of a hardware
540 * signal. The occurence is latched into the irq controller hardware 418 * signal. The occurrence is latched into the irq controller hardware
541 * and must be acked in order to be reenabled. After the ack another 419 * and must be acked in order to be reenabled. After the ack another
542 * interrupt can happen on the same source even before the first one 420 * interrupt can happen on the same source even before the first one
543 * is handled by the associated event handler. If this happens it 421 * is handled by the associated event handler. If this happens it
@@ -558,10 +436,9 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
558 * we shouldn't process the IRQ. Mark it pending, handle 436 * we shouldn't process the IRQ. Mark it pending, handle
559 * the necessary masking and go out 437 * the necessary masking and go out
560 */ 438 */
561 if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) || 439 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
562 !desc->action))) { 440 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
563 if (!irq_check_poll(desc)) { 441 if (!irq_check_poll(desc)) {
564 irq_compat_set_pending(desc);
565 desc->istate |= IRQS_PENDING; 442 desc->istate |= IRQS_PENDING;
566 mask_ack_irq(desc); 443 mask_ack_irq(desc);
567 goto out_unlock; 444 goto out_unlock;
@@ -584,20 +461,65 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
584 * Renable it, if it was not disabled in meantime. 461 * Renable it, if it was not disabled in meantime.
585 */ 462 */
586 if (unlikely(desc->istate & IRQS_PENDING)) { 463 if (unlikely(desc->istate & IRQS_PENDING)) {
587 if (!(desc->istate & IRQS_DISABLED) && 464 if (!irqd_irq_disabled(&desc->irq_data) &&
588 (desc->istate & IRQS_MASKED)) 465 irqd_irq_masked(&desc->irq_data))
589 unmask_irq(desc); 466 unmask_irq(desc);
590 } 467 }
591 468
592 handle_irq_event(desc); 469 handle_irq_event(desc);
593 470
594 } while ((desc->istate & IRQS_PENDING) && 471 } while ((desc->istate & IRQS_PENDING) &&
595 !(desc->istate & IRQS_DISABLED)); 472 !irqd_irq_disabled(&desc->irq_data));
596 473
597out_unlock: 474out_unlock:
598 raw_spin_unlock(&desc->lock); 475 raw_spin_unlock(&desc->lock);
599} 476}
600 477
478#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
479/**
480 * handle_edge_eoi_irq - edge eoi type IRQ handler
481 * @irq: the interrupt number
482 * @desc: the interrupt description structure for this irq
483 *
484 * Similar as the above handle_edge_irq, but using eoi and w/o the
485 * mask/unmask logic.
486 */
487void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
488{
489 struct irq_chip *chip = irq_desc_get_chip(desc);
490
491 raw_spin_lock(&desc->lock);
492
493 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
494 /*
495 * If we're currently running this IRQ, or its disabled,
496 * we shouldn't process the IRQ. Mark it pending, handle
497 * the necessary masking and go out
498 */
499 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
500 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
501 if (!irq_check_poll(desc)) {
502 desc->istate |= IRQS_PENDING;
503 goto out_eoi;
504 }
505 }
506 kstat_incr_irqs_this_cpu(irq, desc);
507
508 do {
509 if (unlikely(!desc->action))
510 goto out_eoi;
511
512 handle_irq_event(desc);
513
514 } while ((desc->istate & IRQS_PENDING) &&
515 !irqd_irq_disabled(&desc->irq_data));
516
517out_eoi:
518 chip->irq_eoi(&desc->irq_data);
519 raw_spin_unlock(&desc->lock);
520}
521#endif
522
601/** 523/**
602 * handle_percpu_irq - Per CPU local irq handler 524 * handle_percpu_irq - Per CPU local irq handler
603 * @irq: the interrupt number 525 * @irq: the interrupt number
@@ -642,8 +564,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
642 if (handle == handle_bad_irq) { 564 if (handle == handle_bad_irq) {
643 if (desc->irq_data.chip != &no_irq_chip) 565 if (desc->irq_data.chip != &no_irq_chip)
644 mask_ack_irq(desc); 566 mask_ack_irq(desc);
645 irq_compat_set_disabled(desc); 567 irq_state_set_disabled(desc);
646 desc->istate |= IRQS_DISABLED;
647 desc->depth = 1; 568 desc->depth = 1;
648 } 569 }
649 desc->handle_irq = handle; 570 desc->handle_irq = handle;
@@ -684,8 +605,70 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
684 irqd_set(&desc->irq_data, IRQD_PER_CPU); 605 irqd_set(&desc->irq_data, IRQD_PER_CPU);
685 if (irq_settings_can_move_pcntxt(desc)) 606 if (irq_settings_can_move_pcntxt(desc))
686 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 607 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
608 if (irq_settings_is_level(desc))
609 irqd_set(&desc->irq_data, IRQD_LEVEL);
687 610
688 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); 611 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
689 612
690 irq_put_desc_unlock(desc, flags); 613 irq_put_desc_unlock(desc, flags);
691} 614}
615
616/**
617 * irq_cpu_online - Invoke all irq_cpu_online functions.
618 *
619 * Iterate through all irqs and invoke the chip.irq_cpu_online()
620 * for each.
621 */
622void irq_cpu_online(void)
623{
624 struct irq_desc *desc;
625 struct irq_chip *chip;
626 unsigned long flags;
627 unsigned int irq;
628
629 for_each_active_irq(irq) {
630 desc = irq_to_desc(irq);
631 if (!desc)
632 continue;
633
634 raw_spin_lock_irqsave(&desc->lock, flags);
635
636 chip = irq_data_get_irq_chip(&desc->irq_data);
637 if (chip && chip->irq_cpu_online &&
638 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
639 !irqd_irq_disabled(&desc->irq_data)))
640 chip->irq_cpu_online(&desc->irq_data);
641
642 raw_spin_unlock_irqrestore(&desc->lock, flags);
643 }
644}
645
646/**
647 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
648 *
649 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
650 * for each.
651 */
652void irq_cpu_offline(void)
653{
654 struct irq_desc *desc;
655 struct irq_chip *chip;
656 unsigned long flags;
657 unsigned int irq;
658
659 for_each_active_irq(irq) {
660 desc = irq_to_desc(irq);
661 if (!desc)
662 continue;
663
664 raw_spin_lock_irqsave(&desc->lock, flags);
665
666 chip = irq_data_get_irq_chip(&desc->irq_data);
667 if (chip && chip->irq_cpu_offline &&
668 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
669 !irqd_irq_disabled(&desc->irq_data)))
670 chip->irq_cpu_offline(&desc->irq_data);
671
672 raw_spin_unlock_irqrestore(&desc->lock, flags);
673 }
674}