diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/Kconfig | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/irq.h | 144 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 502 |
3 files changed, 1 insertions, 646 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 1919634a9b32..303703d716fe 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -135,6 +135,7 @@ config PPC | |||
135 | select HAVE_GENERIC_HARDIRQS | 135 | select HAVE_GENERIC_HARDIRQS |
136 | select HAVE_SPARSE_IRQ | 136 | select HAVE_SPARSE_IRQ |
137 | select IRQ_PER_CPU | 137 | select IRQ_PER_CPU |
138 | select IRQ_DOMAIN | ||
138 | select GENERIC_IRQ_SHOW | 139 | select GENERIC_IRQ_SHOW |
139 | select GENERIC_IRQ_SHOW_LEVEL | 140 | select GENERIC_IRQ_SHOW_LEVEL |
140 | select IRQ_FORCED_THREADING | 141 | select IRQ_FORCED_THREADING |
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index f80f262e0597..728cc30d04ea 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h | |||
@@ -42,155 +42,11 @@ extern atomic_t ppc_n_lost_interrupts; | |||
42 | /* Same thing, used by the generic IRQ code */ | 42 | /* Same thing, used by the generic IRQ code */ |
43 | #define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS | 43 | #define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS |
44 | 44 | ||
45 | /* | ||
46 | * The host code and data structures are fairly agnostic to the fact that | ||
47 | * we use an open firmware device-tree. We do have references to struct | ||
48 | * device_node in two places: in irq_find_host() to find the host matching | ||
49 | * a given interrupt controller node, and of course as an argument to its | ||
50 | * counterpart host->ops->match() callback. However, those are treated as | ||
51 | * generic pointers by the core and the fact that it's actually a device-node | ||
52 | * pointer is purely a convention between callers and implementation. This | ||
53 | * code could thus be used on other architectures by replacing those two | ||
54 | * by some sort of arch-specific void * "token" used to identify interrupt | ||
55 | * controllers. | ||
56 | */ | ||
57 | |||
58 | struct irq_data; | 45 | struct irq_data; |
59 | extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); | 46 | extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); |
60 | extern irq_hw_number_t virq_to_hw(unsigned int virq); | 47 | extern irq_hw_number_t virq_to_hw(unsigned int virq); |
61 | 48 | ||
62 | /** | 49 | /** |
63 | * irq_alloc_host - Allocate a new irq_domain data structure | ||
64 | * @of_node: optional device-tree node of the interrupt controller | ||
65 | * @revmap_type: type of reverse mapping to use | ||
66 | * @revmap_arg: for IRQ_DOMAIN_MAP_LINEAR linear only: size of the map | ||
67 | * @ops: map/unmap host callbacks | ||
68 | * @inval_irq: provide a hw number in that host space that is always invalid | ||
69 | * | ||
70 | * Allocates and initialize and irq_domain structure. Note that in the case of | ||
71 | * IRQ_DOMAIN_MAP_LEGACY, the map() callback will be called before this returns | ||
72 | * for all legacy interrupts except 0 (which is always the invalid irq for | ||
73 | * a legacy controller). For a IRQ_DOMAIN_MAP_LINEAR, the map is allocated by | ||
74 | * this call as well. For a IRQ_DOMAIN_MAP_TREE, the radix tree will be allocated | ||
75 | * later during boot automatically (the reverse mapping will use the slow path | ||
76 | * until that happens). | ||
77 | */ | ||
78 | extern struct irq_domain *irq_alloc_host(struct device_node *of_node, | ||
79 | unsigned int revmap_type, | ||
80 | unsigned int revmap_arg, | ||
81 | struct irq_domain_ops *ops, | ||
82 | irq_hw_number_t inval_irq); | ||
83 | |||
84 | |||
85 | /** | ||
86 | * irq_find_host - Locates a host for a given device node | ||
87 | * @node: device-tree node of the interrupt controller | ||
88 | */ | ||
89 | extern struct irq_domain *irq_find_host(struct device_node *node); | ||
90 | |||
91 | |||
92 | /** | ||
93 | * irq_set_default_host - Set a "default" host | ||
94 | * @host: default host pointer | ||
95 | * | ||
96 | * For convenience, it's possible to set a "default" host that will be used | ||
97 | * whenever NULL is passed to irq_create_mapping(). It makes life easier for | ||
98 | * platforms that want to manipulate a few hard coded interrupt numbers that | ||
99 | * aren't properly represented in the device-tree. | ||
100 | */ | ||
101 | extern void irq_set_default_host(struct irq_domain *host); | ||
102 | |||
103 | |||
104 | /** | ||
105 | * irq_set_virq_count - Set the maximum number of virt irqs | ||
106 | * @count: number of linux virtual irqs, capped with NR_IRQS | ||
107 | * | ||
108 | * This is mainly for use by platforms like iSeries who want to program | ||
109 | * the virtual irq number in the controller to avoid the reverse mapping | ||
110 | */ | ||
111 | extern void irq_set_virq_count(unsigned int count); | ||
112 | |||
113 | |||
114 | /** | ||
115 | * irq_create_mapping - Map a hardware interrupt into linux virq space | ||
116 | * @host: host owning this hardware interrupt or NULL for default host | ||
117 | * @hwirq: hardware irq number in that host space | ||
118 | * | ||
119 | * Only one mapping per hardware interrupt is permitted. Returns a linux | ||
120 | * virq number. | ||
121 | * If the sense/trigger is to be specified, set_irq_type() should be called | ||
122 | * on the number returned from that call. | ||
123 | */ | ||
124 | extern unsigned int irq_create_mapping(struct irq_domain *host, | ||
125 | irq_hw_number_t hwirq); | ||
126 | |||
127 | |||
128 | /** | ||
129 | * irq_dispose_mapping - Unmap an interrupt | ||
130 | * @virq: linux virq number of the interrupt to unmap | ||
131 | */ | ||
132 | extern void irq_dispose_mapping(unsigned int virq); | ||
133 | |||
134 | /** | ||
135 | * irq_find_mapping - Find a linux virq from an hw irq number. | ||
136 | * @host: host owning this hardware interrupt | ||
137 | * @hwirq: hardware irq number in that host space | ||
138 | * | ||
139 | * This is a slow path, for use by generic code. It's expected that an | ||
140 | * irq controller implementation directly calls the appropriate low level | ||
141 | * mapping function. | ||
142 | */ | ||
143 | extern unsigned int irq_find_mapping(struct irq_domain *host, | ||
144 | irq_hw_number_t hwirq); | ||
145 | |||
146 | /** | ||
147 | * irq_create_direct_mapping - Allocate a virq for direct mapping | ||
148 | * @host: host to allocate the virq for or NULL for default host | ||
149 | * | ||
150 | * This routine is used for irq controllers which can choose the hardware | ||
151 | * interrupt numbers they generate. In such a case it's simplest to use | ||
152 | * the linux virq as the hardware interrupt number. | ||
153 | */ | ||
154 | extern unsigned int irq_create_direct_mapping(struct irq_domain *host); | ||
155 | |||
156 | /** | ||
157 | * irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping. | ||
158 | * @host: host owning this hardware interrupt | ||
159 | * @virq: linux irq number | ||
160 | * @hwirq: hardware irq number in that host space | ||
161 | * | ||
162 | * This is for use by irq controllers that use a radix tree reverse | ||
163 | * mapping for fast lookup. | ||
164 | */ | ||
165 | extern void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq, | ||
166 | irq_hw_number_t hwirq); | ||
167 | |||
168 | /** | ||
169 | * irq_radix_revmap_lookup - Find a linux virq from a hw irq number. | ||
170 | * @host: host owning this hardware interrupt | ||
171 | * @hwirq: hardware irq number in that host space | ||
172 | * | ||
173 | * This is a fast path, for use by irq controller code that uses radix tree | ||
174 | * revmaps | ||
175 | */ | ||
176 | extern unsigned int irq_radix_revmap_lookup(struct irq_domain *host, | ||
177 | irq_hw_number_t hwirq); | ||
178 | |||
179 | /** | ||
180 | * irq_linear_revmap - Find a linux virq from a hw irq number. | ||
181 | * @host: host owning this hardware interrupt | ||
182 | * @hwirq: hardware irq number in that host space | ||
183 | * | ||
184 | * This is a fast path, for use by irq controller code that uses linear | ||
185 | * revmaps. It does fallback to the slow path if the revmap doesn't exist | ||
186 | * yet and will create the revmap entry with appropriate locking | ||
187 | */ | ||
188 | |||
189 | extern unsigned int irq_linear_revmap(struct irq_domain *host, | ||
190 | irq_hw_number_t hwirq); | ||
191 | |||
192 | |||
193 | /** | ||
194 | * irq_early_init - Init irq remapping subsystem | 50 | * irq_early_init - Init irq remapping subsystem |
195 | */ | 51 | */ |
196 | extern void irq_early_init(void); | 52 | extern void irq_early_init(void); |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 269fbd5ac62f..e3673ff6b7a0 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -486,17 +486,6 @@ void do_softirq(void) | |||
486 | local_irq_restore(flags); | 486 | local_irq_restore(flags); |
487 | } | 487 | } |
488 | 488 | ||
489 | |||
490 | /* | ||
491 | * IRQ controller and virtual interrupts | ||
492 | */ | ||
493 | |||
494 | static LIST_HEAD(irq_domain_list); | ||
495 | static DEFINE_MUTEX(irq_domain_mutex); | ||
496 | static DEFINE_MUTEX(revmap_trees_mutex); | ||
497 | static unsigned int irq_virq_count = NR_IRQS; | ||
498 | static struct irq_domain *irq_default_host; | ||
499 | |||
500 | irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | 489 | irq_hw_number_t irqd_to_hwirq(struct irq_data *d) |
501 | { | 490 | { |
502 | return d->hwirq; | 491 | return d->hwirq; |
@@ -510,362 +499,6 @@ irq_hw_number_t virq_to_hw(unsigned int virq) | |||
510 | } | 499 | } |
511 | EXPORT_SYMBOL_GPL(virq_to_hw); | 500 | EXPORT_SYMBOL_GPL(virq_to_hw); |
512 | 501 | ||
513 | static int default_irq_host_match(struct irq_domain *h, struct device_node *np) | ||
514 | { | ||
515 | return h->of_node != NULL && h->of_node == np; | ||
516 | } | ||
517 | |||
518 | struct irq_domain *irq_alloc_host(struct device_node *of_node, | ||
519 | unsigned int revmap_type, | ||
520 | unsigned int revmap_arg, | ||
521 | struct irq_domain_ops *ops, | ||
522 | irq_hw_number_t inval_irq) | ||
523 | { | ||
524 | struct irq_domain *host, *h; | ||
525 | unsigned int size = sizeof(struct irq_domain); | ||
526 | unsigned int i; | ||
527 | unsigned int *rmap; | ||
528 | |||
529 | /* Allocate structure and revmap table if using linear mapping */ | ||
530 | if (revmap_type == IRQ_DOMAIN_MAP_LINEAR) | ||
531 | size += revmap_arg * sizeof(unsigned int); | ||
532 | host = kzalloc(size, GFP_KERNEL); | ||
533 | if (host == NULL) | ||
534 | return NULL; | ||
535 | |||
536 | /* Fill structure */ | ||
537 | host->revmap_type = revmap_type; | ||
538 | host->inval_irq = inval_irq; | ||
539 | host->ops = ops; | ||
540 | host->of_node = of_node_get(of_node); | ||
541 | |||
542 | if (host->ops->match == NULL) | ||
543 | host->ops->match = default_irq_host_match; | ||
544 | |||
545 | mutex_lock(&irq_domain_mutex); | ||
546 | /* Make sure only one legacy controller can be created */ | ||
547 | if (revmap_type == IRQ_DOMAIN_MAP_LEGACY) { | ||
548 | list_for_each_entry(h, &irq_domain_list, link) { | ||
549 | if (WARN_ON(h->revmap_type == IRQ_DOMAIN_MAP_LEGACY)) { | ||
550 | mutex_unlock(&irq_domain_mutex); | ||
551 | of_node_put(host->of_node); | ||
552 | kfree(host); | ||
553 | return NULL; | ||
554 | } | ||
555 | } | ||
556 | } | ||
557 | list_add(&host->link, &irq_domain_list); | ||
558 | mutex_unlock(&irq_domain_mutex); | ||
559 | |||
560 | /* Additional setups per revmap type */ | ||
561 | switch(revmap_type) { | ||
562 | case IRQ_DOMAIN_MAP_LEGACY: | ||
563 | /* 0 is always the invalid number for legacy */ | ||
564 | host->inval_irq = 0; | ||
565 | /* setup us as the host for all legacy interrupts */ | ||
566 | for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { | ||
567 | struct irq_data *irq_data = irq_get_irq_data(i); | ||
568 | irq_data->hwirq = i; | ||
569 | irq_data->domain = host; | ||
570 | |||
571 | /* Legacy flags are left to default at this point, | ||
572 | * one can then use irq_create_mapping() to | ||
573 | * explicitly change them | ||
574 | */ | ||
575 | ops->map(host, i, i); | ||
576 | |||
577 | /* Clear norequest flags */ | ||
578 | irq_clear_status_flags(i, IRQ_NOREQUEST); | ||
579 | } | ||
580 | break; | ||
581 | case IRQ_DOMAIN_MAP_LINEAR: | ||
582 | rmap = (unsigned int *)(host + 1); | ||
583 | for (i = 0; i < revmap_arg; i++) | ||
584 | rmap[i] = NO_IRQ; | ||
585 | host->revmap_data.linear.size = revmap_arg; | ||
586 | host->revmap_data.linear.revmap = rmap; | ||
587 | break; | ||
588 | case IRQ_DOMAIN_MAP_TREE: | ||
589 | INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL); | ||
590 | break; | ||
591 | default: | ||
592 | break; | ||
593 | } | ||
594 | |||
595 | pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); | ||
596 | |||
597 | return host; | ||
598 | } | ||
599 | |||
600 | struct irq_domain *irq_find_host(struct device_node *node) | ||
601 | { | ||
602 | struct irq_domain *h, *found = NULL; | ||
603 | |||
604 | /* We might want to match the legacy controller last since | ||
605 | * it might potentially be set to match all interrupts in | ||
606 | * the absence of a device node. This isn't a problem so far | ||
607 | * yet though... | ||
608 | */ | ||
609 | mutex_lock(&irq_domain_mutex); | ||
610 | list_for_each_entry(h, &irq_domain_list, link) | ||
611 | if (h->ops->match(h, node)) { | ||
612 | found = h; | ||
613 | break; | ||
614 | } | ||
615 | mutex_unlock(&irq_domain_mutex); | ||
616 | return found; | ||
617 | } | ||
618 | EXPORT_SYMBOL_GPL(irq_find_host); | ||
619 | |||
620 | void irq_set_default_host(struct irq_domain *host) | ||
621 | { | ||
622 | pr_debug("irq: Default host set to @0x%p\n", host); | ||
623 | |||
624 | irq_default_host = host; | ||
625 | } | ||
626 | |||
627 | void irq_set_virq_count(unsigned int count) | ||
628 | { | ||
629 | pr_debug("irq: Trying to set virq count to %d\n", count); | ||
630 | |||
631 | BUG_ON(count < NUM_ISA_INTERRUPTS); | ||
632 | if (count < NR_IRQS) | ||
633 | irq_virq_count = count; | ||
634 | } | ||
635 | |||
636 | static int irq_setup_virq(struct irq_domain *host, unsigned int virq, | ||
637 | irq_hw_number_t hwirq) | ||
638 | { | ||
639 | struct irq_data *irq_data = irq_get_irq_data(virq); | ||
640 | |||
641 | irq_data->hwirq = hwirq; | ||
642 | irq_data->domain = host; | ||
643 | if (host->ops->map(host, virq, hwirq)) { | ||
644 | pr_debug("irq: -> mapping failed, freeing\n"); | ||
645 | irq_data->domain = NULL; | ||
646 | irq_data->hwirq = 0; | ||
647 | return -1; | ||
648 | } | ||
649 | |||
650 | irq_clear_status_flags(virq, IRQ_NOREQUEST); | ||
651 | |||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | unsigned int irq_create_direct_mapping(struct irq_domain *host) | ||
656 | { | ||
657 | unsigned int virq; | ||
658 | |||
659 | if (host == NULL) | ||
660 | host = irq_default_host; | ||
661 | |||
662 | BUG_ON(host == NULL); | ||
663 | WARN_ON(host->revmap_type != IRQ_DOMAIN_MAP_NOMAP); | ||
664 | |||
665 | virq = irq_alloc_desc_from(1, 0); | ||
666 | if (virq == NO_IRQ) { | ||
667 | pr_debug("irq: create_direct virq allocation failed\n"); | ||
668 | return NO_IRQ; | ||
669 | } | ||
670 | if (virq >= irq_virq_count) { | ||
671 | pr_err("ERROR: no free irqs available below %i maximum\n", | ||
672 | irq_virq_count); | ||
673 | irq_free_desc(virq); | ||
674 | return 0; | ||
675 | } | ||
676 | |||
677 | pr_debug("irq: create_direct obtained virq %d\n", virq); | ||
678 | |||
679 | if (irq_setup_virq(host, virq, virq)) { | ||
680 | irq_free_desc(virq); | ||
681 | return NO_IRQ; | ||
682 | } | ||
683 | |||
684 | return virq; | ||
685 | } | ||
686 | |||
687 | unsigned int irq_create_mapping(struct irq_domain *host, | ||
688 | irq_hw_number_t hwirq) | ||
689 | { | ||
690 | unsigned int virq, hint; | ||
691 | |||
692 | pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); | ||
693 | |||
694 | /* Look for default host if nececssary */ | ||
695 | if (host == NULL) | ||
696 | host = irq_default_host; | ||
697 | if (host == NULL) { | ||
698 | printk(KERN_WARNING "irq_create_mapping called for" | ||
699 | " NULL host, hwirq=%lx\n", hwirq); | ||
700 | WARN_ON(1); | ||
701 | return NO_IRQ; | ||
702 | } | ||
703 | pr_debug("irq: -> using host @%p\n", host); | ||
704 | |||
705 | /* Check if mapping already exists */ | ||
706 | virq = irq_find_mapping(host, hwirq); | ||
707 | if (virq != NO_IRQ) { | ||
708 | pr_debug("irq: -> existing mapping on virq %d\n", virq); | ||
709 | return virq; | ||
710 | } | ||
711 | |||
712 | /* Get a virtual interrupt number */ | ||
713 | if (host->revmap_type == IRQ_DOMAIN_MAP_LEGACY) { | ||
714 | /* Handle legacy */ | ||
715 | virq = (unsigned int)hwirq; | ||
716 | if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) | ||
717 | return NO_IRQ; | ||
718 | return virq; | ||
719 | } else { | ||
720 | /* Allocate a virtual interrupt number */ | ||
721 | hint = hwirq % irq_virq_count; | ||
722 | if (hint == 0) | ||
723 | hint = 1; | ||
724 | virq = irq_alloc_desc_from(hint, 0); | ||
725 | if (!virq) | ||
726 | virq = irq_alloc_desc_from(1, 0); | ||
727 | if (virq == NO_IRQ) { | ||
728 | pr_debug("irq: -> virq allocation failed\n"); | ||
729 | return NO_IRQ; | ||
730 | } | ||
731 | } | ||
732 | |||
733 | if (irq_setup_virq(host, virq, hwirq)) { | ||
734 | if (host->revmap_type != IRQ_DOMAIN_MAP_LEGACY) | ||
735 | irq_free_desc(virq); | ||
736 | return NO_IRQ; | ||
737 | } | ||
738 | |||
739 | pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n", | ||
740 | hwirq, host->of_node ? host->of_node->full_name : "null", virq); | ||
741 | |||
742 | return virq; | ||
743 | } | ||
744 | EXPORT_SYMBOL_GPL(irq_create_mapping); | ||
745 | |||
746 | unsigned int irq_create_of_mapping(struct device_node *controller, | ||
747 | const u32 *intspec, unsigned int intsize) | ||
748 | { | ||
749 | struct irq_domain *host; | ||
750 | irq_hw_number_t hwirq; | ||
751 | unsigned int type = IRQ_TYPE_NONE; | ||
752 | unsigned int virq; | ||
753 | |||
754 | if (controller == NULL) | ||
755 | host = irq_default_host; | ||
756 | else | ||
757 | host = irq_find_host(controller); | ||
758 | if (host == NULL) { | ||
759 | printk(KERN_WARNING "irq: no irq host found for %s !\n", | ||
760 | controller->full_name); | ||
761 | return NO_IRQ; | ||
762 | } | ||
763 | |||
764 | /* If host has no translation, then we assume interrupt line */ | ||
765 | if (host->ops->xlate == NULL) | ||
766 | hwirq = intspec[0]; | ||
767 | else { | ||
768 | if (host->ops->xlate(host, controller, intspec, intsize, | ||
769 | &hwirq, &type)) | ||
770 | return NO_IRQ; | ||
771 | } | ||
772 | |||
773 | /* Create mapping */ | ||
774 | virq = irq_create_mapping(host, hwirq); | ||
775 | if (virq == NO_IRQ) | ||
776 | return virq; | ||
777 | |||
778 | /* Set type if specified and different than the current one */ | ||
779 | if (type != IRQ_TYPE_NONE && | ||
780 | type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) | ||
781 | irq_set_irq_type(virq, type); | ||
782 | return virq; | ||
783 | } | ||
784 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | ||
785 | |||
786 | void irq_dispose_mapping(unsigned int virq) | ||
787 | { | ||
788 | struct irq_data *irq_data = irq_get_irq_data(virq); | ||
789 | struct irq_domain *host; | ||
790 | irq_hw_number_t hwirq; | ||
791 | |||
792 | if (virq == NO_IRQ || !irq_data) | ||
793 | return; | ||
794 | |||
795 | host = irq_data->domain; | ||
796 | if (WARN_ON(host == NULL)) | ||
797 | return; | ||
798 | |||
799 | /* Never unmap legacy interrupts */ | ||
800 | if (host->revmap_type == IRQ_DOMAIN_MAP_LEGACY) | ||
801 | return; | ||
802 | |||
803 | irq_set_status_flags(virq, IRQ_NOREQUEST); | ||
804 | |||
805 | /* remove chip and handler */ | ||
806 | irq_set_chip_and_handler(virq, NULL, NULL); | ||
807 | |||
808 | /* Make sure it's completed */ | ||
809 | synchronize_irq(virq); | ||
810 | |||
811 | /* Tell the PIC about it */ | ||
812 | if (host->ops->unmap) | ||
813 | host->ops->unmap(host, virq); | ||
814 | smp_mb(); | ||
815 | |||
816 | /* Clear reverse map */ | ||
817 | hwirq = irq_data->hwirq; | ||
818 | switch(host->revmap_type) { | ||
819 | case IRQ_DOMAIN_MAP_LINEAR: | ||
820 | if (hwirq < host->revmap_data.linear.size) | ||
821 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; | ||
822 | break; | ||
823 | case IRQ_DOMAIN_MAP_TREE: | ||
824 | mutex_lock(&revmap_trees_mutex); | ||
825 | radix_tree_delete(&host->revmap_data.tree, hwirq); | ||
826 | mutex_unlock(&revmap_trees_mutex); | ||
827 | break; | ||
828 | } | ||
829 | |||
830 | /* Destroy map */ | ||
831 | irq_data->hwirq = host->inval_irq; | ||
832 | |||
833 | irq_free_desc(virq); | ||
834 | } | ||
835 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); | ||
836 | |||
837 | unsigned int irq_find_mapping(struct irq_domain *host, | ||
838 | irq_hw_number_t hwirq) | ||
839 | { | ||
840 | unsigned int i; | ||
841 | unsigned int hint = hwirq % irq_virq_count; | ||
842 | |||
843 | /* Look for default host if nececssary */ | ||
844 | if (host == NULL) | ||
845 | host = irq_default_host; | ||
846 | if (host == NULL) | ||
847 | return NO_IRQ; | ||
848 | |||
849 | /* legacy -> bail early */ | ||
850 | if (host->revmap_type == IRQ_DOMAIN_MAP_LEGACY) | ||
851 | return hwirq; | ||
852 | |||
853 | /* Slow path does a linear search of the map */ | ||
854 | if (hint == 0) | ||
855 | hint = 1; | ||
856 | i = hint; | ||
857 | do { | ||
858 | struct irq_data *data = irq_get_irq_data(i); | ||
859 | if (data && (data->domain == host) && (data->hwirq == hwirq)) | ||
860 | return i; | ||
861 | i++; | ||
862 | if (i >= irq_virq_count) | ||
863 | i = 1; | ||
864 | } while(i != hint); | ||
865 | return NO_IRQ; | ||
866 | } | ||
867 | EXPORT_SYMBOL_GPL(irq_find_mapping); | ||
868 | |||
869 | #ifdef CONFIG_SMP | 502 | #ifdef CONFIG_SMP |
870 | int irq_choose_cpu(const struct cpumask *mask) | 503 | int irq_choose_cpu(const struct cpumask *mask) |
871 | { | 504 | { |
@@ -902,146 +535,11 @@ int irq_choose_cpu(const struct cpumask *mask) | |||
902 | } | 535 | } |
903 | #endif | 536 | #endif |
904 | 537 | ||
905 | unsigned int irq_radix_revmap_lookup(struct irq_domain *host, | ||
906 | irq_hw_number_t hwirq) | ||
907 | { | ||
908 | struct irq_data *irq_data; | ||
909 | |||
910 | if (WARN_ON_ONCE(host->revmap_type != IRQ_DOMAIN_MAP_TREE)) | ||
911 | return irq_find_mapping(host, hwirq); | ||
912 | |||
913 | /* | ||
914 | * Freeing an irq can delete nodes along the path to | ||
915 | * do the lookup via call_rcu. | ||
916 | */ | ||
917 | rcu_read_lock(); | ||
918 | irq_data = radix_tree_lookup(&host->revmap_data.tree, hwirq); | ||
919 | rcu_read_unlock(); | ||
920 | |||
921 | /* | ||
922 | * If found in radix tree, then fine. | ||
923 | * Else fallback to linear lookup - this should not happen in practice | ||
924 | * as it means that we failed to insert the node in the radix tree. | ||
925 | */ | ||
926 | return irq_data ? irq_data->irq : irq_find_mapping(host, hwirq); | ||
927 | } | ||
928 | |||
929 | void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq, | ||
930 | irq_hw_number_t hwirq) | ||
931 | { | ||
932 | struct irq_data *irq_data = irq_get_irq_data(virq); | ||
933 | |||
934 | if (WARN_ON(host->revmap_type != IRQ_DOMAIN_MAP_TREE)) | ||
935 | return; | ||
936 | |||
937 | if (virq != NO_IRQ) { | ||
938 | mutex_lock(&revmap_trees_mutex); | ||
939 | radix_tree_insert(&host->revmap_data.tree, hwirq, irq_data); | ||
940 | mutex_unlock(&revmap_trees_mutex); | ||
941 | } | ||
942 | } | ||
943 | |||
944 | unsigned int irq_linear_revmap(struct irq_domain *host, | ||
945 | irq_hw_number_t hwirq) | ||
946 | { | ||
947 | unsigned int *revmap; | ||
948 | |||
949 | if (WARN_ON_ONCE(host->revmap_type != IRQ_DOMAIN_MAP_LINEAR)) | ||
950 | return irq_find_mapping(host, hwirq); | ||
951 | |||
952 | /* Check revmap bounds */ | ||
953 | if (unlikely(hwirq >= host->revmap_data.linear.size)) | ||
954 | return irq_find_mapping(host, hwirq); | ||
955 | |||
956 | /* Check if revmap was allocated */ | ||
957 | revmap = host->revmap_data.linear.revmap; | ||
958 | if (unlikely(revmap == NULL)) | ||
959 | return irq_find_mapping(host, hwirq); | ||
960 | |||
961 | /* Fill up revmap with slow path if no mapping found */ | ||
962 | if (unlikely(revmap[hwirq] == NO_IRQ)) | ||
963 | revmap[hwirq] = irq_find_mapping(host, hwirq); | ||
964 | |||
965 | return revmap[hwirq]; | ||
966 | } | ||
967 | |||
968 | int arch_early_irq_init(void) | 538 | int arch_early_irq_init(void) |
969 | { | 539 | { |
970 | return 0; | 540 | return 0; |
971 | } | 541 | } |
972 | 542 | ||
973 | #ifdef CONFIG_VIRQ_DEBUG | ||
974 | static int virq_debug_show(struct seq_file *m, void *private) | ||
975 | { | ||
976 | unsigned long flags; | ||
977 | struct irq_desc *desc; | ||
978 | const char *p; | ||
979 | static const char none[] = "none"; | ||
980 | void *data; | ||
981 | int i; | ||
982 | |||
983 | seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq", | ||
984 | "chip name", "chip data", "host name"); | ||
985 | |||
986 | for (i = 1; i < nr_irqs; i++) { | ||
987 | desc = irq_to_desc(i); | ||
988 | if (!desc) | ||
989 | continue; | ||
990 | |||
991 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
992 | |||
993 | if (desc->action && desc->action->handler) { | ||
994 | struct irq_chip *chip; | ||
995 | |||
996 | seq_printf(m, "%5d ", i); | ||
997 | seq_printf(m, "0x%05lx ", desc->irq_data.hwirq); | ||
998 | |||
999 | chip = irq_desc_get_chip(desc); | ||
1000 | if (chip && chip->name) | ||
1001 | p = chip->name; | ||
1002 | else | ||
1003 | p = none; | ||
1004 | seq_printf(m, "%-15s ", p); | ||
1005 | |||
1006 | data = irq_desc_get_chip_data(desc); | ||
1007 | seq_printf(m, "0x%16p ", data); | ||
1008 | |||
1009 | if (desc->irq_data.domain->of_node) | ||
1010 | p = desc->irq_data.domain->of_node->full_name; | ||
1011 | else | ||
1012 | p = none; | ||
1013 | seq_printf(m, "%s\n", p); | ||
1014 | } | ||
1015 | |||
1016 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
1017 | } | ||
1018 | |||
1019 | return 0; | ||
1020 | } | ||
1021 | |||
1022 | static int virq_debug_open(struct inode *inode, struct file *file) | ||
1023 | { | ||
1024 | return single_open(file, virq_debug_show, inode->i_private); | ||
1025 | } | ||
1026 | |||
1027 | static const struct file_operations virq_debug_fops = { | ||
1028 | .open = virq_debug_open, | ||
1029 | .read = seq_read, | ||
1030 | .llseek = seq_lseek, | ||
1031 | .release = single_release, | ||
1032 | }; | ||
1033 | |||
1034 | static int __init irq_debugfs_init(void) | ||
1035 | { | ||
1036 | if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, | ||
1037 | NULL, &virq_debug_fops) == NULL) | ||
1038 | return -ENOMEM; | ||
1039 | |||
1040 | return 0; | ||
1041 | } | ||
1042 | __initcall(irq_debugfs_init); | ||
1043 | #endif /* CONFIG_VIRQ_DEBUG */ | ||
1044 | |||
1045 | #ifdef CONFIG_PPC64 | 543 | #ifdef CONFIG_PPC64 |
1046 | static int __init setup_noirqdistrib(char *str) | 544 | static int __init setup_noirqdistrib(char *str) |
1047 | { | 545 | { |