diff options
Diffstat (limited to 'kernel/irq/chip.c')
-rw-r--r-- | kernel/irq/chip.c | 154 |
1 files changed, 133 insertions, 21 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index c9c0601f0615..03099d521f5e 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -37,6 +37,12 @@ int irq_set_chip(unsigned int irq, struct irq_chip *chip) | |||
37 | irq_chip_set_defaults(chip); | 37 | irq_chip_set_defaults(chip); |
38 | desc->irq_data.chip = chip; | 38 | desc->irq_data.chip = chip; |
39 | irq_put_desc_unlock(desc, flags); | 39 | irq_put_desc_unlock(desc, flags); |
40 | /* | ||
41 | * For !CONFIG_SPARSE_IRQ make the irq show up in | ||
42 | * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is | ||
43 | * already marked, and this call is harmless. | ||
44 | */ | ||
45 | irq_reserve_irq(irq); | ||
40 | return 0; | 46 | return 0; |
41 | } | 47 | } |
42 | EXPORT_SYMBOL(irq_set_chip); | 48 | EXPORT_SYMBOL(irq_set_chip); |
@@ -134,25 +140,25 @@ EXPORT_SYMBOL_GPL(irq_get_irq_data); | |||
134 | 140 | ||
135 | static void irq_state_clr_disabled(struct irq_desc *desc) | 141 | static void irq_state_clr_disabled(struct irq_desc *desc) |
136 | { | 142 | { |
137 | desc->istate &= ~IRQS_DISABLED; | 143 | irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); |
138 | irq_compat_clr_disabled(desc); | 144 | irq_compat_clr_disabled(desc); |
139 | } | 145 | } |
140 | 146 | ||
141 | static void irq_state_set_disabled(struct irq_desc *desc) | 147 | static void irq_state_set_disabled(struct irq_desc *desc) |
142 | { | 148 | { |
143 | desc->istate |= IRQS_DISABLED; | 149 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
144 | irq_compat_set_disabled(desc); | 150 | irq_compat_set_disabled(desc); |
145 | } | 151 | } |
146 | 152 | ||
147 | static void irq_state_clr_masked(struct irq_desc *desc) | 153 | static void irq_state_clr_masked(struct irq_desc *desc) |
148 | { | 154 | { |
149 | desc->istate &= ~IRQS_MASKED; | 155 | irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); |
150 | irq_compat_clr_masked(desc); | 156 | irq_compat_clr_masked(desc); |
151 | } | 157 | } |
152 | 158 | ||
153 | static void irq_state_set_masked(struct irq_desc *desc) | 159 | static void irq_state_set_masked(struct irq_desc *desc) |
154 | { | 160 | { |
155 | desc->istate |= IRQS_MASKED; | 161 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
156 | irq_compat_set_masked(desc); | 162 | irq_compat_set_masked(desc); |
157 | } | 163 | } |
158 | 164 | ||
@@ -372,11 +378,11 @@ void handle_nested_irq(unsigned int irq) | |||
372 | kstat_incr_irqs_this_cpu(irq, desc); | 378 | kstat_incr_irqs_this_cpu(irq, desc); |
373 | 379 | ||
374 | action = desc->action; | 380 | action = desc->action; |
375 | if (unlikely(!action || (desc->istate & IRQS_DISABLED))) | 381 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) |
376 | goto out_unlock; | 382 | goto out_unlock; |
377 | 383 | ||
378 | irq_compat_set_progress(desc); | 384 | irq_compat_set_progress(desc); |
379 | desc->istate |= IRQS_INPROGRESS; | 385 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
380 | raw_spin_unlock_irq(&desc->lock); | 386 | raw_spin_unlock_irq(&desc->lock); |
381 | 387 | ||
382 | action_ret = action->thread_fn(action->irq, action->dev_id); | 388 | action_ret = action->thread_fn(action->irq, action->dev_id); |
@@ -384,7 +390,7 @@ void handle_nested_irq(unsigned int irq) | |||
384 | note_interrupt(irq, desc, action_ret); | 390 | note_interrupt(irq, desc, action_ret); |
385 | 391 | ||
386 | raw_spin_lock_irq(&desc->lock); | 392 | raw_spin_lock_irq(&desc->lock); |
387 | desc->istate &= ~IRQS_INPROGRESS; | 393 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
388 | irq_compat_clr_progress(desc); | 394 | irq_compat_clr_progress(desc); |
389 | 395 | ||
390 | out_unlock: | 396 | out_unlock: |
@@ -416,14 +422,14 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
416 | { | 422 | { |
417 | raw_spin_lock(&desc->lock); | 423 | raw_spin_lock(&desc->lock); |
418 | 424 | ||
419 | if (unlikely(desc->istate & IRQS_INPROGRESS)) | 425 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
420 | if (!irq_check_poll(desc)) | 426 | if (!irq_check_poll(desc)) |
421 | goto out_unlock; | 427 | goto out_unlock; |
422 | 428 | ||
423 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 429 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
424 | kstat_incr_irqs_this_cpu(irq, desc); | 430 | kstat_incr_irqs_this_cpu(irq, desc); |
425 | 431 | ||
426 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) | 432 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) |
427 | goto out_unlock; | 433 | goto out_unlock; |
428 | 434 | ||
429 | handle_irq_event(desc); | 435 | handle_irq_event(desc); |
@@ -448,7 +454,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
448 | raw_spin_lock(&desc->lock); | 454 | raw_spin_lock(&desc->lock); |
449 | mask_ack_irq(desc); | 455 | mask_ack_irq(desc); |
450 | 456 | ||
451 | if (unlikely(desc->istate & IRQS_INPROGRESS)) | 457 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
452 | if (!irq_check_poll(desc)) | 458 | if (!irq_check_poll(desc)) |
453 | goto out_unlock; | 459 | goto out_unlock; |
454 | 460 | ||
@@ -459,12 +465,12 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
459 | * If its disabled or no action available | 465 | * If its disabled or no action available |
460 | * keep it masked and get out of here | 466 | * keep it masked and get out of here |
461 | */ | 467 | */ |
462 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) | 468 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) |
463 | goto out_unlock; | 469 | goto out_unlock; |
464 | 470 | ||
465 | handle_irq_event(desc); | 471 | handle_irq_event(desc); |
466 | 472 | ||
467 | if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT))) | 473 | if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT)) |
468 | unmask_irq(desc); | 474 | unmask_irq(desc); |
469 | out_unlock: | 475 | out_unlock: |
470 | raw_spin_unlock(&desc->lock); | 476 | raw_spin_unlock(&desc->lock); |
@@ -496,7 +502,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
496 | { | 502 | { |
497 | raw_spin_lock(&desc->lock); | 503 | raw_spin_lock(&desc->lock); |
498 | 504 | ||
499 | if (unlikely(desc->istate & IRQS_INPROGRESS)) | 505 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
500 | if (!irq_check_poll(desc)) | 506 | if (!irq_check_poll(desc)) |
501 | goto out; | 507 | goto out; |
502 | 508 | ||
@@ -507,7 +513,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
507 | * If its disabled or no action available | 513 | * If its disabled or no action available |
508 | * then mask it and get out of here: | 514 | * then mask it and get out of here: |
509 | */ | 515 | */ |
510 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) { | 516 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
511 | irq_compat_set_pending(desc); | 517 | irq_compat_set_pending(desc); |
512 | desc->istate |= IRQS_PENDING; | 518 | desc->istate |= IRQS_PENDING; |
513 | mask_irq(desc); | 519 | mask_irq(desc); |
@@ -558,8 +564,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
558 | * we shouldn't process the IRQ. Mark it pending, handle | 564 | * we shouldn't process the IRQ. Mark it pending, handle |
559 | * the necessary masking and go out | 565 | * the necessary masking and go out |
560 | */ | 566 | */ |
561 | if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) || | 567 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || |
562 | !desc->action))) { | 568 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { |
563 | if (!irq_check_poll(desc)) { | 569 | if (!irq_check_poll(desc)) { |
564 | irq_compat_set_pending(desc); | 570 | irq_compat_set_pending(desc); |
565 | desc->istate |= IRQS_PENDING; | 571 | desc->istate |= IRQS_PENDING; |
@@ -584,20 +590,65 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
584 | * Renable it, if it was not disabled in meantime. | 590 | * Renable it, if it was not disabled in meantime. |
585 | */ | 591 | */ |
586 | if (unlikely(desc->istate & IRQS_PENDING)) { | 592 | if (unlikely(desc->istate & IRQS_PENDING)) { |
587 | if (!(desc->istate & IRQS_DISABLED) && | 593 | if (!irqd_irq_disabled(&desc->irq_data) && |
588 | (desc->istate & IRQS_MASKED)) | 594 | irqd_irq_masked(&desc->irq_data)) |
589 | unmask_irq(desc); | 595 | unmask_irq(desc); |
590 | } | 596 | } |
591 | 597 | ||
592 | handle_irq_event(desc); | 598 | handle_irq_event(desc); |
593 | 599 | ||
594 | } while ((desc->istate & IRQS_PENDING) && | 600 | } while ((desc->istate & IRQS_PENDING) && |
595 | !(desc->istate & IRQS_DISABLED)); | 601 | !irqd_irq_disabled(&desc->irq_data)); |
596 | 602 | ||
597 | out_unlock: | 603 | out_unlock: |
598 | raw_spin_unlock(&desc->lock); | 604 | raw_spin_unlock(&desc->lock); |
599 | } | 605 | } |
600 | 606 | ||
607 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER | ||
608 | /** | ||
609 | * handle_edge_eoi_irq - edge eoi type IRQ handler | ||
610 | * @irq: the interrupt number | ||
611 | * @desc: the interrupt description structure for this irq | ||
612 | * | ||
613 | * Similar as the above handle_edge_irq, but using eoi and w/o the | ||
614 | * mask/unmask logic. | ||
615 | */ | ||
616 | void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) | ||
617 | { | ||
618 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
619 | |||
620 | raw_spin_lock(&desc->lock); | ||
621 | |||
622 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | ||
623 | /* | ||
624 | * If we're currently running this IRQ, or its disabled, | ||
625 | * we shouldn't process the IRQ. Mark it pending, handle | ||
626 | * the necessary masking and go out | ||
627 | */ | ||
628 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || | ||
629 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | ||
630 | if (!irq_check_poll(desc)) { | ||
631 | desc->istate |= IRQS_PENDING; | ||
632 | goto out_eoi; | ||
633 | } | ||
634 | } | ||
635 | kstat_incr_irqs_this_cpu(irq, desc); | ||
636 | |||
637 | do { | ||
638 | if (unlikely(!desc->action)) | ||
639 | goto out_eoi; | ||
640 | |||
641 | handle_irq_event(desc); | ||
642 | |||
643 | } while ((desc->istate & IRQS_PENDING) && | ||
644 | !irqd_irq_disabled(&desc->irq_data)); | ||
645 | |||
646 | out_unlock: | ||
647 | chip->irq_eoi(&desc->irq_data); | ||
648 | raw_spin_unlock(&desc->lock); | ||
649 | } | ||
650 | #endif | ||
651 | |||
601 | /** | 652 | /** |
602 | * handle_percpu_irq - Per CPU local irq handler | 653 | * handle_percpu_irq - Per CPU local irq handler |
603 | * @irq: the interrupt number | 654 | * @irq: the interrupt number |
@@ -642,8 +693,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
642 | if (handle == handle_bad_irq) { | 693 | if (handle == handle_bad_irq) { |
643 | if (desc->irq_data.chip != &no_irq_chip) | 694 | if (desc->irq_data.chip != &no_irq_chip) |
644 | mask_ack_irq(desc); | 695 | mask_ack_irq(desc); |
645 | irq_compat_set_disabled(desc); | 696 | irq_state_set_disabled(desc); |
646 | desc->istate |= IRQS_DISABLED; | ||
647 | desc->depth = 1; | 697 | desc->depth = 1; |
648 | } | 698 | } |
649 | desc->handle_irq = handle; | 699 | desc->handle_irq = handle; |
@@ -684,8 +734,70 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) | |||
684 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | 734 | irqd_set(&desc->irq_data, IRQD_PER_CPU); |
685 | if (irq_settings_can_move_pcntxt(desc)) | 735 | if (irq_settings_can_move_pcntxt(desc)) |
686 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); | 736 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); |
737 | if (irq_settings_is_level(desc)) | ||
738 | irqd_set(&desc->irq_data, IRQD_LEVEL); | ||
687 | 739 | ||
688 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); | 740 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); |
689 | 741 | ||
690 | irq_put_desc_unlock(desc, flags); | 742 | irq_put_desc_unlock(desc, flags); |
691 | } | 743 | } |
744 | |||
745 | /** | ||
746 | * irq_cpu_online - Invoke all irq_cpu_online functions. | ||
747 | * | ||
748 | * Iterate through all irqs and invoke the chip.irq_cpu_online() | ||
749 | * for each. | ||
750 | */ | ||
751 | void irq_cpu_online(void) | ||
752 | { | ||
753 | struct irq_desc *desc; | ||
754 | struct irq_chip *chip; | ||
755 | unsigned long flags; | ||
756 | unsigned int irq; | ||
757 | |||
758 | for_each_active_irq(irq) { | ||
759 | desc = irq_to_desc(irq); | ||
760 | if (!desc) | ||
761 | continue; | ||
762 | |||
763 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
764 | |||
765 | chip = irq_data_get_irq_chip(&desc->irq_data); | ||
766 | if (chip && chip->irq_cpu_online && | ||
767 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | ||
768 | !irqd_irq_disabled(&desc->irq_data))) | ||
769 | chip->irq_cpu_online(&desc->irq_data); | ||
770 | |||
771 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
772 | } | ||
773 | } | ||
774 | |||
775 | /** | ||
776 | * irq_cpu_offline - Invoke all irq_cpu_offline functions. | ||
777 | * | ||
778 | * Iterate through all irqs and invoke the chip.irq_cpu_offline() | ||
779 | * for each. | ||
780 | */ | ||
781 | void irq_cpu_offline(void) | ||
782 | { | ||
783 | struct irq_desc *desc; | ||
784 | struct irq_chip *chip; | ||
785 | unsigned long flags; | ||
786 | unsigned int irq; | ||
787 | |||
788 | for_each_active_irq(irq) { | ||
789 | desc = irq_to_desc(irq); | ||
790 | if (!desc) | ||
791 | continue; | ||
792 | |||
793 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
794 | |||
795 | chip = irq_data_get_irq_chip(&desc->irq_data); | ||
796 | if (chip && chip->irq_cpu_offline && | ||
797 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | ||
798 | !irqd_irq_disabled(&desc->irq_data))) | ||
799 | chip->irq_cpu_offline(&desc->irq_data); | ||
800 | |||
801 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
802 | } | ||
803 | } | ||