diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/irq.c | 617 |
1 files changed, 3 insertions, 614 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 01e2877e8e04..bdfb3eee3e6f 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -490,409 +490,19 @@ void do_softirq(void) | |||
490 | local_irq_restore(flags); | 490 | local_irq_restore(flags); |
491 | } | 491 | } |
492 | 492 | ||
493 | |||
494 | /* | ||
495 | * IRQ controller and virtual interrupts | ||
496 | */ | ||
497 | |||
498 | /* The main irq map itself is an array of NR_IRQ entries containing the | ||
499 | * associate host and irq number. An entry with a host of NULL is free. | ||
500 | * An entry can be allocated if it's free, the allocator always then sets | ||
501 | * hwirq first to the host's invalid irq number and then fills ops. | ||
502 | */ | ||
503 | struct irq_map_entry { | ||
504 | irq_hw_number_t hwirq; | ||
505 | struct irq_host *host; | ||
506 | }; | ||
507 | |||
508 | static LIST_HEAD(irq_hosts); | ||
509 | static DEFINE_RAW_SPINLOCK(irq_big_lock); | ||
510 | static DEFINE_MUTEX(revmap_trees_mutex); | ||
511 | static struct irq_map_entry irq_map[NR_IRQS]; | ||
512 | static unsigned int irq_virq_count = NR_IRQS; | ||
513 | static struct irq_host *irq_default_host; | ||
514 | |||
515 | irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | 493 | irq_hw_number_t irqd_to_hwirq(struct irq_data *d) |
516 | { | 494 | { |
517 | return irq_map[d->irq].hwirq; | 495 | return d->hwirq; |
518 | } | 496 | } |
519 | EXPORT_SYMBOL_GPL(irqd_to_hwirq); | 497 | EXPORT_SYMBOL_GPL(irqd_to_hwirq); |
520 | 498 | ||
521 | irq_hw_number_t virq_to_hw(unsigned int virq) | 499 | irq_hw_number_t virq_to_hw(unsigned int virq) |
522 | { | 500 | { |
523 | return irq_map[virq].hwirq; | 501 | struct irq_data *irq_data = irq_get_irq_data(virq); |
502 | return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; | ||
524 | } | 503 | } |
525 | EXPORT_SYMBOL_GPL(virq_to_hw); | 504 | EXPORT_SYMBOL_GPL(virq_to_hw); |
526 | 505 | ||
527 | bool virq_is_host(unsigned int virq, struct irq_host *host) | ||
528 | { | ||
529 | return irq_map[virq].host == host; | ||
530 | } | ||
531 | EXPORT_SYMBOL_GPL(virq_is_host); | ||
532 | |||
533 | static int default_irq_host_match(struct irq_host *h, struct device_node *np) | ||
534 | { | ||
535 | return h->of_node != NULL && h->of_node == np; | ||
536 | } | ||
537 | |||
538 | struct irq_host *irq_alloc_host(struct device_node *of_node, | ||
539 | unsigned int revmap_type, | ||
540 | unsigned int revmap_arg, | ||
541 | struct irq_host_ops *ops, | ||
542 | irq_hw_number_t inval_irq) | ||
543 | { | ||
544 | struct irq_host *host; | ||
545 | unsigned int size = sizeof(struct irq_host); | ||
546 | unsigned int i; | ||
547 | unsigned int *rmap; | ||
548 | unsigned long flags; | ||
549 | |||
550 | /* Allocate structure and revmap table if using linear mapping */ | ||
551 | if (revmap_type == IRQ_HOST_MAP_LINEAR) | ||
552 | size += revmap_arg * sizeof(unsigned int); | ||
553 | host = kzalloc(size, GFP_KERNEL); | ||
554 | if (host == NULL) | ||
555 | return NULL; | ||
556 | |||
557 | /* Fill structure */ | ||
558 | host->revmap_type = revmap_type; | ||
559 | host->inval_irq = inval_irq; | ||
560 | host->ops = ops; | ||
561 | host->of_node = of_node_get(of_node); | ||
562 | |||
563 | if (host->ops->match == NULL) | ||
564 | host->ops->match = default_irq_host_match; | ||
565 | |||
566 | raw_spin_lock_irqsave(&irq_big_lock, flags); | ||
567 | |||
568 | /* If it's a legacy controller, check for duplicates and | ||
569 | * mark it as allocated (we use irq 0 host pointer for that | ||
570 | */ | ||
571 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { | ||
572 | if (irq_map[0].host != NULL) { | ||
573 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
574 | of_node_put(host->of_node); | ||
575 | kfree(host); | ||
576 | return NULL; | ||
577 | } | ||
578 | irq_map[0].host = host; | ||
579 | } | ||
580 | |||
581 | list_add(&host->link, &irq_hosts); | ||
582 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
583 | |||
584 | /* Additional setups per revmap type */ | ||
585 | switch(revmap_type) { | ||
586 | case IRQ_HOST_MAP_LEGACY: | ||
587 | /* 0 is always the invalid number for legacy */ | ||
588 | host->inval_irq = 0; | ||
589 | /* setup us as the host for all legacy interrupts */ | ||
590 | for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { | ||
591 | irq_map[i].hwirq = i; | ||
592 | smp_wmb(); | ||
593 | irq_map[i].host = host; | ||
594 | smp_wmb(); | ||
595 | |||
596 | /* Legacy flags are left to default at this point, | ||
597 | * one can then use irq_create_mapping() to | ||
598 | * explicitly change them | ||
599 | */ | ||
600 | ops->map(host, i, i); | ||
601 | |||
602 | /* Clear norequest flags */ | ||
603 | irq_clear_status_flags(i, IRQ_NOREQUEST); | ||
604 | } | ||
605 | break; | ||
606 | case IRQ_HOST_MAP_LINEAR: | ||
607 | rmap = (unsigned int *)(host + 1); | ||
608 | for (i = 0; i < revmap_arg; i++) | ||
609 | rmap[i] = NO_IRQ; | ||
610 | host->revmap_data.linear.size = revmap_arg; | ||
611 | smp_wmb(); | ||
612 | host->revmap_data.linear.revmap = rmap; | ||
613 | break; | ||
614 | case IRQ_HOST_MAP_TREE: | ||
615 | INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL); | ||
616 | break; | ||
617 | default: | ||
618 | break; | ||
619 | } | ||
620 | |||
621 | pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); | ||
622 | |||
623 | return host; | ||
624 | } | ||
625 | |||
626 | struct irq_host *irq_find_host(struct device_node *node) | ||
627 | { | ||
628 | struct irq_host *h, *found = NULL; | ||
629 | unsigned long flags; | ||
630 | |||
631 | /* We might want to match the legacy controller last since | ||
632 | * it might potentially be set to match all interrupts in | ||
633 | * the absence of a device node. This isn't a problem so far | ||
634 | * yet though... | ||
635 | */ | ||
636 | raw_spin_lock_irqsave(&irq_big_lock, flags); | ||
637 | list_for_each_entry(h, &irq_hosts, link) | ||
638 | if (h->ops->match(h, node)) { | ||
639 | found = h; | ||
640 | break; | ||
641 | } | ||
642 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
643 | return found; | ||
644 | } | ||
645 | EXPORT_SYMBOL_GPL(irq_find_host); | ||
646 | |||
647 | void irq_set_default_host(struct irq_host *host) | ||
648 | { | ||
649 | pr_debug("irq: Default host set to @0x%p\n", host); | ||
650 | |||
651 | irq_default_host = host; | ||
652 | } | ||
653 | |||
654 | void irq_set_virq_count(unsigned int count) | ||
655 | { | ||
656 | pr_debug("irq: Trying to set virq count to %d\n", count); | ||
657 | |||
658 | BUG_ON(count < NUM_ISA_INTERRUPTS); | ||
659 | if (count < NR_IRQS) | ||
660 | irq_virq_count = count; | ||
661 | } | ||
662 | |||
663 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, | ||
664 | irq_hw_number_t hwirq) | ||
665 | { | ||
666 | int res; | ||
667 | |||
668 | res = irq_alloc_desc_at(virq, 0); | ||
669 | if (res != virq) { | ||
670 | pr_debug("irq: -> allocating desc failed\n"); | ||
671 | goto error; | ||
672 | } | ||
673 | |||
674 | /* map it */ | ||
675 | smp_wmb(); | ||
676 | irq_map[virq].hwirq = hwirq; | ||
677 | smp_mb(); | ||
678 | |||
679 | if (host->ops->map(host, virq, hwirq)) { | ||
680 | pr_debug("irq: -> mapping failed, freeing\n"); | ||
681 | goto errdesc; | ||
682 | } | ||
683 | |||
684 | irq_clear_status_flags(virq, IRQ_NOREQUEST); | ||
685 | |||
686 | return 0; | ||
687 | |||
688 | errdesc: | ||
689 | irq_free_descs(virq, 1); | ||
690 | error: | ||
691 | irq_free_virt(virq, 1); | ||
692 | return -1; | ||
693 | } | ||
694 | |||
695 | unsigned int irq_create_direct_mapping(struct irq_host *host) | ||
696 | { | ||
697 | unsigned int virq; | ||
698 | |||
699 | if (host == NULL) | ||
700 | host = irq_default_host; | ||
701 | |||
702 | BUG_ON(host == NULL); | ||
703 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); | ||
704 | |||
705 | virq = irq_alloc_virt(host, 1, 0); | ||
706 | if (virq == NO_IRQ) { | ||
707 | pr_debug("irq: create_direct virq allocation failed\n"); | ||
708 | return NO_IRQ; | ||
709 | } | ||
710 | |||
711 | pr_debug("irq: create_direct obtained virq %d\n", virq); | ||
712 | |||
713 | if (irq_setup_virq(host, virq, virq)) | ||
714 | return NO_IRQ; | ||
715 | |||
716 | return virq; | ||
717 | } | ||
718 | |||
719 | unsigned int irq_create_mapping(struct irq_host *host, | ||
720 | irq_hw_number_t hwirq) | ||
721 | { | ||
722 | unsigned int virq, hint; | ||
723 | |||
724 | pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); | ||
725 | |||
726 | /* Look for default host if nececssary */ | ||
727 | if (host == NULL) | ||
728 | host = irq_default_host; | ||
729 | if (host == NULL) { | ||
730 | printk(KERN_WARNING "irq_create_mapping called for" | ||
731 | " NULL host, hwirq=%lx\n", hwirq); | ||
732 | WARN_ON(1); | ||
733 | return NO_IRQ; | ||
734 | } | ||
735 | pr_debug("irq: -> using host @%p\n", host); | ||
736 | |||
737 | /* Check if mapping already exists */ | ||
738 | virq = irq_find_mapping(host, hwirq); | ||
739 | if (virq != NO_IRQ) { | ||
740 | pr_debug("irq: -> existing mapping on virq %d\n", virq); | ||
741 | return virq; | ||
742 | } | ||
743 | |||
744 | /* Get a virtual interrupt number */ | ||
745 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { | ||
746 | /* Handle legacy */ | ||
747 | virq = (unsigned int)hwirq; | ||
748 | if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) | ||
749 | return NO_IRQ; | ||
750 | return virq; | ||
751 | } else { | ||
752 | /* Allocate a virtual interrupt number */ | ||
753 | hint = hwirq % irq_virq_count; | ||
754 | virq = irq_alloc_virt(host, 1, hint); | ||
755 | if (virq == NO_IRQ) { | ||
756 | pr_debug("irq: -> virq allocation failed\n"); | ||
757 | return NO_IRQ; | ||
758 | } | ||
759 | } | ||
760 | |||
761 | if (irq_setup_virq(host, virq, hwirq)) | ||
762 | return NO_IRQ; | ||
763 | |||
764 | pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n", | ||
765 | hwirq, host->of_node ? host->of_node->full_name : "null", virq); | ||
766 | |||
767 | return virq; | ||
768 | } | ||
769 | EXPORT_SYMBOL_GPL(irq_create_mapping); | ||
770 | |||
771 | unsigned int irq_create_of_mapping(struct device_node *controller, | ||
772 | const u32 *intspec, unsigned int intsize) | ||
773 | { | ||
774 | struct irq_host *host; | ||
775 | irq_hw_number_t hwirq; | ||
776 | unsigned int type = IRQ_TYPE_NONE; | ||
777 | unsigned int virq; | ||
778 | |||
779 | if (controller == NULL) | ||
780 | host = irq_default_host; | ||
781 | else | ||
782 | host = irq_find_host(controller); | ||
783 | if (host == NULL) { | ||
784 | printk(KERN_WARNING "irq: no irq host found for %s !\n", | ||
785 | controller->full_name); | ||
786 | return NO_IRQ; | ||
787 | } | ||
788 | |||
789 | /* If host has no translation, then we assume interrupt line */ | ||
790 | if (host->ops->xlate == NULL) | ||
791 | hwirq = intspec[0]; | ||
792 | else { | ||
793 | if (host->ops->xlate(host, controller, intspec, intsize, | ||
794 | &hwirq, &type)) | ||
795 | return NO_IRQ; | ||
796 | } | ||
797 | |||
798 | /* Create mapping */ | ||
799 | virq = irq_create_mapping(host, hwirq); | ||
800 | if (virq == NO_IRQ) | ||
801 | return virq; | ||
802 | |||
803 | /* Set type if specified and different than the current one */ | ||
804 | if (type != IRQ_TYPE_NONE && | ||
805 | type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) | ||
806 | irq_set_irq_type(virq, type); | ||
807 | return virq; | ||
808 | } | ||
809 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | ||
810 | |||
811 | void irq_dispose_mapping(unsigned int virq) | ||
812 | { | ||
813 | struct irq_host *host; | ||
814 | irq_hw_number_t hwirq; | ||
815 | |||
816 | if (virq == NO_IRQ) | ||
817 | return; | ||
818 | |||
819 | host = irq_map[virq].host; | ||
820 | if (WARN_ON(host == NULL)) | ||
821 | return; | ||
822 | |||
823 | /* Never unmap legacy interrupts */ | ||
824 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | ||
825 | return; | ||
826 | |||
827 | irq_set_status_flags(virq, IRQ_NOREQUEST); | ||
828 | |||
829 | /* remove chip and handler */ | ||
830 | irq_set_chip_and_handler(virq, NULL, NULL); | ||
831 | |||
832 | /* Make sure it's completed */ | ||
833 | synchronize_irq(virq); | ||
834 | |||
835 | /* Tell the PIC about it */ | ||
836 | if (host->ops->unmap) | ||
837 | host->ops->unmap(host, virq); | ||
838 | smp_mb(); | ||
839 | |||
840 | /* Clear reverse map */ | ||
841 | hwirq = irq_map[virq].hwirq; | ||
842 | switch(host->revmap_type) { | ||
843 | case IRQ_HOST_MAP_LINEAR: | ||
844 | if (hwirq < host->revmap_data.linear.size) | ||
845 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; | ||
846 | break; | ||
847 | case IRQ_HOST_MAP_TREE: | ||
848 | mutex_lock(&revmap_trees_mutex); | ||
849 | radix_tree_delete(&host->revmap_data.tree, hwirq); | ||
850 | mutex_unlock(&revmap_trees_mutex); | ||
851 | break; | ||
852 | } | ||
853 | |||
854 | /* Destroy map */ | ||
855 | smp_mb(); | ||
856 | irq_map[virq].hwirq = host->inval_irq; | ||
857 | |||
858 | irq_free_descs(virq, 1); | ||
859 | /* Free it */ | ||
860 | irq_free_virt(virq, 1); | ||
861 | } | ||
862 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); | ||
863 | |||
864 | unsigned int irq_find_mapping(struct irq_host *host, | ||
865 | irq_hw_number_t hwirq) | ||
866 | { | ||
867 | unsigned int i; | ||
868 | unsigned int hint = hwirq % irq_virq_count; | ||
869 | |||
870 | /* Look for default host if nececssary */ | ||
871 | if (host == NULL) | ||
872 | host = irq_default_host; | ||
873 | if (host == NULL) | ||
874 | return NO_IRQ; | ||
875 | |||
876 | /* legacy -> bail early */ | ||
877 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | ||
878 | return hwirq; | ||
879 | |||
880 | /* Slow path does a linear search of the map */ | ||
881 | if (hint < NUM_ISA_INTERRUPTS) | ||
882 | hint = NUM_ISA_INTERRUPTS; | ||
883 | i = hint; | ||
884 | do { | ||
885 | if (irq_map[i].host == host && | ||
886 | irq_map[i].hwirq == hwirq) | ||
887 | return i; | ||
888 | i++; | ||
889 | if (i >= irq_virq_count) | ||
890 | i = NUM_ISA_INTERRUPTS; | ||
891 | } while(i != hint); | ||
892 | return NO_IRQ; | ||
893 | } | ||
894 | EXPORT_SYMBOL_GPL(irq_find_mapping); | ||
895 | |||
896 | #ifdef CONFIG_SMP | 506 | #ifdef CONFIG_SMP |
897 | int irq_choose_cpu(const struct cpumask *mask) | 507 | int irq_choose_cpu(const struct cpumask *mask) |
898 | { | 508 | { |
@@ -929,232 +539,11 @@ int irq_choose_cpu(const struct cpumask *mask) | |||
929 | } | 539 | } |
930 | #endif | 540 | #endif |
931 | 541 | ||
932 | unsigned int irq_radix_revmap_lookup(struct irq_host *host, | ||
933 | irq_hw_number_t hwirq) | ||
934 | { | ||
935 | struct irq_map_entry *ptr; | ||
936 | unsigned int virq; | ||
937 | |||
938 | if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE)) | ||
939 | return irq_find_mapping(host, hwirq); | ||
940 | |||
941 | /* | ||
942 | * The ptr returned references the static global irq_map. | ||
943 | * but freeing an irq can delete nodes along the path to | ||
944 | * do the lookup via call_rcu. | ||
945 | */ | ||
946 | rcu_read_lock(); | ||
947 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); | ||
948 | rcu_read_unlock(); | ||
949 | |||
950 | /* | ||
951 | * If found in radix tree, then fine. | ||
952 | * Else fallback to linear lookup - this should not happen in practice | ||
953 | * as it means that we failed to insert the node in the radix tree. | ||
954 | */ | ||
955 | if (ptr) | ||
956 | virq = ptr - irq_map; | ||
957 | else | ||
958 | virq = irq_find_mapping(host, hwirq); | ||
959 | |||
960 | return virq; | ||
961 | } | ||
962 | |||
963 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | ||
964 | irq_hw_number_t hwirq) | ||
965 | { | ||
966 | if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE)) | ||
967 | return; | ||
968 | |||
969 | if (virq != NO_IRQ) { | ||
970 | mutex_lock(&revmap_trees_mutex); | ||
971 | radix_tree_insert(&host->revmap_data.tree, hwirq, | ||
972 | &irq_map[virq]); | ||
973 | mutex_unlock(&revmap_trees_mutex); | ||
974 | } | ||
975 | } | ||
976 | |||
977 | unsigned int irq_linear_revmap(struct irq_host *host, | ||
978 | irq_hw_number_t hwirq) | ||
979 | { | ||
980 | unsigned int *revmap; | ||
981 | |||
982 | if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR)) | ||
983 | return irq_find_mapping(host, hwirq); | ||
984 | |||
985 | /* Check revmap bounds */ | ||
986 | if (unlikely(hwirq >= host->revmap_data.linear.size)) | ||
987 | return irq_find_mapping(host, hwirq); | ||
988 | |||
989 | /* Check if revmap was allocated */ | ||
990 | revmap = host->revmap_data.linear.revmap; | ||
991 | if (unlikely(revmap == NULL)) | ||
992 | return irq_find_mapping(host, hwirq); | ||
993 | |||
994 | /* Fill up revmap with slow path if no mapping found */ | ||
995 | if (unlikely(revmap[hwirq] == NO_IRQ)) | ||
996 | revmap[hwirq] = irq_find_mapping(host, hwirq); | ||
997 | |||
998 | return revmap[hwirq]; | ||
999 | } | ||
1000 | |||
1001 | unsigned int irq_alloc_virt(struct irq_host *host, | ||
1002 | unsigned int count, | ||
1003 | unsigned int hint) | ||
1004 | { | ||
1005 | unsigned long flags; | ||
1006 | unsigned int i, j, found = NO_IRQ; | ||
1007 | |||
1008 | if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) | ||
1009 | return NO_IRQ; | ||
1010 | |||
1011 | raw_spin_lock_irqsave(&irq_big_lock, flags); | ||
1012 | |||
1013 | /* Use hint for 1 interrupt if any */ | ||
1014 | if (count == 1 && hint >= NUM_ISA_INTERRUPTS && | ||
1015 | hint < irq_virq_count && irq_map[hint].host == NULL) { | ||
1016 | found = hint; | ||
1017 | goto hint_found; | ||
1018 | } | ||
1019 | |||
1020 | /* Look for count consecutive numbers in the allocatable | ||
1021 | * (non-legacy) space | ||
1022 | */ | ||
1023 | for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) { | ||
1024 | if (irq_map[i].host != NULL) | ||
1025 | j = 0; | ||
1026 | else | ||
1027 | j++; | ||
1028 | |||
1029 | if (j == count) { | ||
1030 | found = i - count + 1; | ||
1031 | break; | ||
1032 | } | ||
1033 | } | ||
1034 | if (found == NO_IRQ) { | ||
1035 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
1036 | return NO_IRQ; | ||
1037 | } | ||
1038 | hint_found: | ||
1039 | for (i = found; i < (found + count); i++) { | ||
1040 | irq_map[i].hwirq = host->inval_irq; | ||
1041 | smp_wmb(); | ||
1042 | irq_map[i].host = host; | ||
1043 | } | ||
1044 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
1045 | return found; | ||
1046 | } | ||
1047 | |||
1048 | void irq_free_virt(unsigned int virq, unsigned int count) | ||
1049 | { | ||
1050 | unsigned long flags; | ||
1051 | unsigned int i; | ||
1052 | |||
1053 | WARN_ON (virq < NUM_ISA_INTERRUPTS); | ||
1054 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); | ||
1055 | |||
1056 | if (virq < NUM_ISA_INTERRUPTS) { | ||
1057 | if (virq + count < NUM_ISA_INTERRUPTS) | ||
1058 | return; | ||
1059 | count =- NUM_ISA_INTERRUPTS - virq; | ||
1060 | virq = NUM_ISA_INTERRUPTS; | ||
1061 | } | ||
1062 | |||
1063 | if (count > irq_virq_count || virq > irq_virq_count - count) { | ||
1064 | if (virq > irq_virq_count) | ||
1065 | return; | ||
1066 | count = irq_virq_count - virq; | ||
1067 | } | ||
1068 | |||
1069 | raw_spin_lock_irqsave(&irq_big_lock, flags); | ||
1070 | for (i = virq; i < (virq + count); i++) { | ||
1071 | struct irq_host *host; | ||
1072 | |||
1073 | host = irq_map[i].host; | ||
1074 | irq_map[i].hwirq = host->inval_irq; | ||
1075 | smp_wmb(); | ||
1076 | irq_map[i].host = NULL; | ||
1077 | } | ||
1078 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
1079 | } | ||
1080 | |||
1081 | int arch_early_irq_init(void) | 542 | int arch_early_irq_init(void) |
1082 | { | 543 | { |
1083 | return 0; | 544 | return 0; |
1084 | } | 545 | } |
1085 | 546 | ||
1086 | #ifdef CONFIG_VIRQ_DEBUG | ||
1087 | static int virq_debug_show(struct seq_file *m, void *private) | ||
1088 | { | ||
1089 | unsigned long flags; | ||
1090 | struct irq_desc *desc; | ||
1091 | const char *p; | ||
1092 | static const char none[] = "none"; | ||
1093 | void *data; | ||
1094 | int i; | ||
1095 | |||
1096 | seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq", | ||
1097 | "chip name", "chip data", "host name"); | ||
1098 | |||
1099 | for (i = 1; i < nr_irqs; i++) { | ||
1100 | desc = irq_to_desc(i); | ||
1101 | if (!desc) | ||
1102 | continue; | ||
1103 | |||
1104 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
1105 | |||
1106 | if (desc->action && desc->action->handler) { | ||
1107 | struct irq_chip *chip; | ||
1108 | |||
1109 | seq_printf(m, "%5d ", i); | ||
1110 | seq_printf(m, "0x%05lx ", irq_map[i].hwirq); | ||
1111 | |||
1112 | chip = irq_desc_get_chip(desc); | ||
1113 | if (chip && chip->name) | ||
1114 | p = chip->name; | ||
1115 | else | ||
1116 | p = none; | ||
1117 | seq_printf(m, "%-15s ", p); | ||
1118 | |||
1119 | data = irq_desc_get_chip_data(desc); | ||
1120 | seq_printf(m, "0x%16p ", data); | ||
1121 | |||
1122 | if (irq_map[i].host && irq_map[i].host->of_node) | ||
1123 | p = irq_map[i].host->of_node->full_name; | ||
1124 | else | ||
1125 | p = none; | ||
1126 | seq_printf(m, "%s\n", p); | ||
1127 | } | ||
1128 | |||
1129 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
1130 | } | ||
1131 | |||
1132 | return 0; | ||
1133 | } | ||
1134 | |||
1135 | static int virq_debug_open(struct inode *inode, struct file *file) | ||
1136 | { | ||
1137 | return single_open(file, virq_debug_show, inode->i_private); | ||
1138 | } | ||
1139 | |||
1140 | static const struct file_operations virq_debug_fops = { | ||
1141 | .open = virq_debug_open, | ||
1142 | .read = seq_read, | ||
1143 | .llseek = seq_lseek, | ||
1144 | .release = single_release, | ||
1145 | }; | ||
1146 | |||
1147 | static int __init irq_debugfs_init(void) | ||
1148 | { | ||
1149 | if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, | ||
1150 | NULL, &virq_debug_fops) == NULL) | ||
1151 | return -ENOMEM; | ||
1152 | |||
1153 | return 0; | ||
1154 | } | ||
1155 | __initcall(irq_debugfs_init); | ||
1156 | #endif /* CONFIG_VIRQ_DEBUG */ | ||
1157 | |||
1158 | #ifdef CONFIG_PPC64 | 547 | #ifdef CONFIG_PPC64 |
1159 | static int __init setup_noirqdistrib(char *str) | 548 | static int __init setup_noirqdistrib(char *str) |
1160 | { | 549 | { |