aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2012-02-14 16:06:51 -0500
committerGrant Likely <grant.likely@secretlab.ca>2012-02-14 16:06:51 -0500
commit4bbdd45afdae208a7c4ade89cf602f89a6397cff (patch)
treef6a061599d777e3177a369165f58bed7132cb30b /arch/powerpc/kernel
parentbae1d8f19983fbfa25559aa3cb6a81a84aa82a18 (diff)
irq_domain/powerpc: eliminate irq_map; use irq_alloc_desc() instead
This patch drops the powerpc-specific irq_map table and replaces it with directly using the irq_alloc_desc()/irq_free_desc() interfaces for allocating and freeing irq_desc structures. This patch is a preparation step for generalizing the powerpc-specific virq infrastructure to become irq_domains. As part of this change, the irq_big_lock is changed to a mutex from a raw spinlock. There is no longer any need to use a spin lock since the irq_desc allocation code is now responsible for the critical section of finding an unused range of irq numbers. The radix lookup table is also changed to store the irq_data pointer instead of the irq_map entry since the irq_map is removed. This should end up being functionally equivalent since only allocated irq_descs are ever added to the radix tree. v5: - Really don't ever allocate virq 0. The previous version could still do it if hint == 0 - Respect irq_virq_count setting for NOMAP. Some NOMAP domains cannot use virq values above irq_virq_count. - Use numa_node_id() when allocating irq_descs. Ideally the API should obtain that value from the caller, but that touches a lot of call sites so will be deferred to a follow-on patch. - Fix irq_find_mapping() to include irq numbers lower than NUM_ISA_INTERRUPTS. With the switch to irq_alloc_desc*(), the lowest possible allocated irq is now returned by arch_probe_nr_irqs(). v4: - Fix incorrect access to irq_data structure in debugfs code - Don't ever allocate virq 0 Signed-off-by: Grant Likely <grant.likely@secretlab.ca> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Milton Miller <miltonm@bga.com> Tested-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/irq.c240
1 files changed, 69 insertions, 171 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 7305f2f65534..03c95f03d792 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -491,38 +491,29 @@ void do_softirq(void)
491 * IRQ controller and virtual interrupts 491 * IRQ controller and virtual interrupts
492 */ 492 */
493 493
494/* The main irq map itself is an array of NR_IRQ entries containing the
495 * associate host and irq number. An entry with a host of NULL is free.
496 * An entry can be allocated if it's free, the allocator always then sets
497 * hwirq first to the host's invalid irq number and then fills ops.
498 */
499struct irq_map_entry {
500 irq_hw_number_t hwirq;
501 struct irq_domain *host;
502};
503
504static LIST_HEAD(irq_domain_list); 494static LIST_HEAD(irq_domain_list);
505static DEFINE_RAW_SPINLOCK(irq_big_lock); 495static DEFINE_MUTEX(irq_domain_mutex);
506static DEFINE_MUTEX(revmap_trees_mutex); 496static DEFINE_MUTEX(revmap_trees_mutex);
507static struct irq_map_entry irq_map[NR_IRQS];
508static unsigned int irq_virq_count = NR_IRQS; 497static unsigned int irq_virq_count = NR_IRQS;
509static struct irq_domain *irq_default_host; 498static struct irq_domain *irq_default_host;
510 499
511irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 500irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
512{ 501{
513 return irq_map[d->irq].hwirq; 502 return d->hwirq;
514} 503}
515EXPORT_SYMBOL_GPL(irqd_to_hwirq); 504EXPORT_SYMBOL_GPL(irqd_to_hwirq);
516 505
517irq_hw_number_t virq_to_hw(unsigned int virq) 506irq_hw_number_t virq_to_hw(unsigned int virq)
518{ 507{
519 return irq_map[virq].hwirq; 508 struct irq_data *irq_data = irq_get_irq_data(virq);
509 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
520} 510}
521EXPORT_SYMBOL_GPL(virq_to_hw); 511EXPORT_SYMBOL_GPL(virq_to_hw);
522 512
523bool virq_is_host(unsigned int virq, struct irq_domain *host) 513bool virq_is_host(unsigned int virq, struct irq_domain *host)
524{ 514{
525 return irq_map[virq].host == host; 515 struct irq_data *irq_data = irq_get_irq_data(virq);
516 return irq_data ? irq_data->domain == host : false;
526} 517}
527EXPORT_SYMBOL_GPL(virq_is_host); 518EXPORT_SYMBOL_GPL(virq_is_host);
528 519
@@ -537,11 +528,10 @@ struct irq_domain *irq_alloc_host(struct device_node *of_node,
537 struct irq_domain_ops *ops, 528 struct irq_domain_ops *ops,
538 irq_hw_number_t inval_irq) 529 irq_hw_number_t inval_irq)
539{ 530{
540 struct irq_domain *host; 531 struct irq_domain *host, *h;
541 unsigned int size = sizeof(struct irq_domain); 532 unsigned int size = sizeof(struct irq_domain);
542 unsigned int i; 533 unsigned int i;
543 unsigned int *rmap; 534 unsigned int *rmap;
544 unsigned long flags;
545 535
546 /* Allocate structure and revmap table if using linear mapping */ 536 /* Allocate structure and revmap table if using linear mapping */
547 if (revmap_type == IRQ_DOMAIN_MAP_LINEAR) 537 if (revmap_type == IRQ_DOMAIN_MAP_LINEAR)
@@ -559,23 +549,20 @@ struct irq_domain *irq_alloc_host(struct device_node *of_node,
559 if (host->ops->match == NULL) 549 if (host->ops->match == NULL)
560 host->ops->match = default_irq_host_match; 550 host->ops->match = default_irq_host_match;
561 551
562 raw_spin_lock_irqsave(&irq_big_lock, flags); 552 mutex_lock(&irq_domain_mutex);
563 553 /* Make sure only one legacy controller can be created */
564 /* If it's a legacy controller, check for duplicates and
565 * mark it as allocated (we use irq 0 host pointer for that
566 */
567 if (revmap_type == IRQ_DOMAIN_MAP_LEGACY) { 554 if (revmap_type == IRQ_DOMAIN_MAP_LEGACY) {
568 if (irq_map[0].host != NULL) { 555 list_for_each_entry(h, &irq_domain_list, link) {
569 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 556 if (WARN_ON(h->revmap_type == IRQ_DOMAIN_MAP_LEGACY)) {
570 of_node_put(host->of_node); 557 mutex_unlock(&irq_domain_mutex);
571 kfree(host); 558 of_node_put(host->of_node);
572 return NULL; 559 kfree(host);
560 return NULL;
561 }
573 } 562 }
574 irq_map[0].host = host;
575 } 563 }
576
577 list_add(&host->link, &irq_domain_list); 564 list_add(&host->link, &irq_domain_list);
578 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 565 mutex_unlock(&irq_domain_mutex);
579 566
580 /* Additional setups per revmap type */ 567 /* Additional setups per revmap type */
581 switch(revmap_type) { 568 switch(revmap_type) {
@@ -584,10 +571,9 @@ struct irq_domain *irq_alloc_host(struct device_node *of_node,
584 host->inval_irq = 0; 571 host->inval_irq = 0;
585 /* setup us as the host for all legacy interrupts */ 572 /* setup us as the host for all legacy interrupts */
586 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { 573 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
587 irq_map[i].hwirq = i; 574 struct irq_data *irq_data = irq_get_irq_data(i);
588 smp_wmb(); 575 irq_data->hwirq = i;
589 irq_map[i].host = host; 576 irq_data->domain = host;
590 smp_wmb();
591 577
592 /* Legacy flags are left to default at this point, 578 /* Legacy flags are left to default at this point,
593 * one can then use irq_create_mapping() to 579 * one can then use irq_create_mapping() to
@@ -604,7 +590,6 @@ struct irq_domain *irq_alloc_host(struct device_node *of_node,
604 for (i = 0; i < revmap_arg; i++) 590 for (i = 0; i < revmap_arg; i++)
605 rmap[i] = NO_IRQ; 591 rmap[i] = NO_IRQ;
606 host->revmap_data.linear.size = revmap_arg; 592 host->revmap_data.linear.size = revmap_arg;
607 smp_wmb();
608 host->revmap_data.linear.revmap = rmap; 593 host->revmap_data.linear.revmap = rmap;
609 break; 594 break;
610 case IRQ_DOMAIN_MAP_TREE: 595 case IRQ_DOMAIN_MAP_TREE:
@@ -622,20 +607,19 @@ struct irq_domain *irq_alloc_host(struct device_node *of_node,
622struct irq_domain *irq_find_host(struct device_node *node) 607struct irq_domain *irq_find_host(struct device_node *node)
623{ 608{
624 struct irq_domain *h, *found = NULL; 609 struct irq_domain *h, *found = NULL;
625 unsigned long flags;
626 610
627 /* We might want to match the legacy controller last since 611 /* We might want to match the legacy controller last since
628 * it might potentially be set to match all interrupts in 612 * it might potentially be set to match all interrupts in
629 * the absence of a device node. This isn't a problem so far 613 * the absence of a device node. This isn't a problem so far
630 * yet though... 614 * yet though...
631 */ 615 */
632 raw_spin_lock_irqsave(&irq_big_lock, flags); 616 mutex_lock(&irq_domain_mutex);
633 list_for_each_entry(h, &irq_domain_list, link) 617 list_for_each_entry(h, &irq_domain_list, link)
634 if (h->ops->match(h, node)) { 618 if (h->ops->match(h, node)) {
635 found = h; 619 found = h;
636 break; 620 break;
637 } 621 }
638 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 622 mutex_unlock(&irq_domain_mutex);
639 return found; 623 return found;
640} 624}
641EXPORT_SYMBOL_GPL(irq_find_host); 625EXPORT_SYMBOL_GPL(irq_find_host);
@@ -659,33 +643,20 @@ void irq_set_virq_count(unsigned int count)
659static int irq_setup_virq(struct irq_domain *host, unsigned int virq, 643static int irq_setup_virq(struct irq_domain *host, unsigned int virq,
660 irq_hw_number_t hwirq) 644 irq_hw_number_t hwirq)
661{ 645{
662 int res; 646 struct irq_data *irq_data = irq_get_irq_data(virq);
663
664 res = irq_alloc_desc_at(virq, 0);
665 if (res != virq) {
666 pr_debug("irq: -> allocating desc failed\n");
667 goto error;
668 }
669
670 /* map it */
671 smp_wmb();
672 irq_map[virq].hwirq = hwirq;
673 smp_mb();
674 647
648 irq_data->hwirq = hwirq;
649 irq_data->domain = host;
675 if (host->ops->map(host, virq, hwirq)) { 650 if (host->ops->map(host, virq, hwirq)) {
676 pr_debug("irq: -> mapping failed, freeing\n"); 651 pr_debug("irq: -> mapping failed, freeing\n");
677 goto errdesc; 652 irq_data->domain = NULL;
653 irq_data->hwirq = 0;
654 return -1;
678 } 655 }
679 656
680 irq_clear_status_flags(virq, IRQ_NOREQUEST); 657 irq_clear_status_flags(virq, IRQ_NOREQUEST);
681 658
682 return 0; 659 return 0;
683
684errdesc:
685 irq_free_descs(virq, 1);
686error:
687 irq_free_virt(virq, 1);
688 return -1;
689} 660}
690 661
691unsigned int irq_create_direct_mapping(struct irq_domain *host) 662unsigned int irq_create_direct_mapping(struct irq_domain *host)
@@ -698,16 +669,24 @@ unsigned int irq_create_direct_mapping(struct irq_domain *host)
698 BUG_ON(host == NULL); 669 BUG_ON(host == NULL);
699 WARN_ON(host->revmap_type != IRQ_DOMAIN_MAP_NOMAP); 670 WARN_ON(host->revmap_type != IRQ_DOMAIN_MAP_NOMAP);
700 671
701 virq = irq_alloc_virt(host, 1, 0); 672 virq = irq_alloc_desc_from(1, 0);
702 if (virq == NO_IRQ) { 673 if (virq == NO_IRQ) {
703 pr_debug("irq: create_direct virq allocation failed\n"); 674 pr_debug("irq: create_direct virq allocation failed\n");
704 return NO_IRQ; 675 return NO_IRQ;
705 } 676 }
677 if (virq >= irq_virq_count) {
678 pr_err("ERROR: no free irqs available below %i maximum\n",
679 irq_virq_count);
680 irq_free_desc(virq);
681 return 0;
682 }
706 683
707 pr_debug("irq: create_direct obtained virq %d\n", virq); 684 pr_debug("irq: create_direct obtained virq %d\n", virq);
708 685
709 if (irq_setup_virq(host, virq, virq)) 686 if (irq_setup_virq(host, virq, virq)) {
687 irq_free_desc(virq);
710 return NO_IRQ; 688 return NO_IRQ;
689 }
711 690
712 return virq; 691 return virq;
713} 692}
@@ -747,15 +726,22 @@ unsigned int irq_create_mapping(struct irq_domain *host,
747 } else { 726 } else {
748 /* Allocate a virtual interrupt number */ 727 /* Allocate a virtual interrupt number */
749 hint = hwirq % irq_virq_count; 728 hint = hwirq % irq_virq_count;
750 virq = irq_alloc_virt(host, 1, hint); 729 if (hint == 0)
730 hint = 1;
731 virq = irq_alloc_desc_from(hint, 0);
732 if (!virq)
733 virq = irq_alloc_desc_from(1, 0);
751 if (virq == NO_IRQ) { 734 if (virq == NO_IRQ) {
752 pr_debug("irq: -> virq allocation failed\n"); 735 pr_debug("irq: -> virq allocation failed\n");
753 return NO_IRQ; 736 return NO_IRQ;
754 } 737 }
755 } 738 }
756 739
757 if (irq_setup_virq(host, virq, hwirq)) 740 if (irq_setup_virq(host, virq, hwirq)) {
741 if (host->revmap_type != IRQ_DOMAIN_MAP_LEGACY)
742 irq_free_desc(virq);
758 return NO_IRQ; 743 return NO_IRQ;
744 }
759 745
760 pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n", 746 pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
761 hwirq, host->of_node ? host->of_node->full_name : "null", virq); 747 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
@@ -806,13 +792,14 @@ EXPORT_SYMBOL_GPL(irq_create_of_mapping);
806 792
807void irq_dispose_mapping(unsigned int virq) 793void irq_dispose_mapping(unsigned int virq)
808{ 794{
795 struct irq_data *irq_data = irq_get_irq_data(virq);
809 struct irq_domain *host; 796 struct irq_domain *host;
810 irq_hw_number_t hwirq; 797 irq_hw_number_t hwirq;
811 798
812 if (virq == NO_IRQ) 799 if (virq == NO_IRQ || !irq_data)
813 return; 800 return;
814 801
815 host = irq_map[virq].host; 802 host = irq_data->domain;
816 if (WARN_ON(host == NULL)) 803 if (WARN_ON(host == NULL))
817 return; 804 return;
818 805
@@ -834,7 +821,7 @@ void irq_dispose_mapping(unsigned int virq)
834 smp_mb(); 821 smp_mb();
835 822
836 /* Clear reverse map */ 823 /* Clear reverse map */
837 hwirq = irq_map[virq].hwirq; 824 hwirq = irq_data->hwirq;
838 switch(host->revmap_type) { 825 switch(host->revmap_type) {
839 case IRQ_DOMAIN_MAP_LINEAR: 826 case IRQ_DOMAIN_MAP_LINEAR:
840 if (hwirq < host->revmap_data.linear.size) 827 if (hwirq < host->revmap_data.linear.size)
@@ -848,12 +835,9 @@ void irq_dispose_mapping(unsigned int virq)
848 } 835 }
849 836
850 /* Destroy map */ 837 /* Destroy map */
851 smp_mb(); 838 irq_data->hwirq = host->inval_irq;
852 irq_map[virq].hwirq = host->inval_irq;
853 839
854 irq_free_descs(virq, 1); 840 irq_free_desc(virq);
855 /* Free it */
856 irq_free_virt(virq, 1);
857} 841}
858EXPORT_SYMBOL_GPL(irq_dispose_mapping); 842EXPORT_SYMBOL_GPL(irq_dispose_mapping);
859 843
@@ -874,16 +858,16 @@ unsigned int irq_find_mapping(struct irq_domain *host,
874 return hwirq; 858 return hwirq;
875 859
876 /* Slow path does a linear search of the map */ 860 /* Slow path does a linear search of the map */
877 if (hint < NUM_ISA_INTERRUPTS) 861 if (hint == 0)
878 hint = NUM_ISA_INTERRUPTS; 862 hint = 1;
879 i = hint; 863 i = hint;
880 do { 864 do {
881 if (irq_map[i].host == host && 865 struct irq_data *data = irq_get_irq_data(i);
882 irq_map[i].hwirq == hwirq) 866 if (data && (data->domain == host) && (data->hwirq == hwirq))
883 return i; 867 return i;
884 i++; 868 i++;
885 if (i >= irq_virq_count) 869 if (i >= irq_virq_count)
886 i = NUM_ISA_INTERRUPTS; 870 i = 1;
887 } while(i != hint); 871 } while(i != hint);
888 return NO_IRQ; 872 return NO_IRQ;
889} 873}
@@ -928,19 +912,17 @@ int irq_choose_cpu(const struct cpumask *mask)
928unsigned int irq_radix_revmap_lookup(struct irq_domain *host, 912unsigned int irq_radix_revmap_lookup(struct irq_domain *host,
929 irq_hw_number_t hwirq) 913 irq_hw_number_t hwirq)
930{ 914{
931 struct irq_map_entry *ptr; 915 struct irq_data *irq_data;
932 unsigned int virq;
933 916
934 if (WARN_ON_ONCE(host->revmap_type != IRQ_DOMAIN_MAP_TREE)) 917 if (WARN_ON_ONCE(host->revmap_type != IRQ_DOMAIN_MAP_TREE))
935 return irq_find_mapping(host, hwirq); 918 return irq_find_mapping(host, hwirq);
936 919
937 /* 920 /*
938 * The ptr returned references the static global irq_map. 921 * Freeing an irq can delete nodes along the path to
939 * but freeing an irq can delete nodes along the path to
940 * do the lookup via call_rcu. 922 * do the lookup via call_rcu.
941 */ 923 */
942 rcu_read_lock(); 924 rcu_read_lock();
943 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); 925 irq_data = radix_tree_lookup(&host->revmap_data.tree, hwirq);
944 rcu_read_unlock(); 926 rcu_read_unlock();
945 927
946 /* 928 /*
@@ -948,24 +930,20 @@ unsigned int irq_radix_revmap_lookup(struct irq_domain *host,
948 * Else fallback to linear lookup - this should not happen in practice 930 * Else fallback to linear lookup - this should not happen in practice
949 * as it means that we failed to insert the node in the radix tree. 931 * as it means that we failed to insert the node in the radix tree.
950 */ 932 */
951 if (ptr) 933 return irq_data ? irq_data->irq : irq_find_mapping(host, hwirq);
952 virq = ptr - irq_map;
953 else
954 virq = irq_find_mapping(host, hwirq);
955
956 return virq;
957} 934}
958 935
959void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq, 936void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq,
960 irq_hw_number_t hwirq) 937 irq_hw_number_t hwirq)
961{ 938{
939 struct irq_data *irq_data = irq_get_irq_data(virq);
940
962 if (WARN_ON(host->revmap_type != IRQ_DOMAIN_MAP_TREE)) 941 if (WARN_ON(host->revmap_type != IRQ_DOMAIN_MAP_TREE))
963 return; 942 return;
964 943
965 if (virq != NO_IRQ) { 944 if (virq != NO_IRQ) {
966 mutex_lock(&revmap_trees_mutex); 945 mutex_lock(&revmap_trees_mutex);
967 radix_tree_insert(&host->revmap_data.tree, hwirq, 946 radix_tree_insert(&host->revmap_data.tree, hwirq, irq_data);
968 &irq_map[virq]);
969 mutex_unlock(&revmap_trees_mutex); 947 mutex_unlock(&revmap_trees_mutex);
970 } 948 }
971} 949}
@@ -994,86 +972,6 @@ unsigned int irq_linear_revmap(struct irq_domain *host,
994 return revmap[hwirq]; 972 return revmap[hwirq];
995} 973}
996 974
997unsigned int irq_alloc_virt(struct irq_domain *host,
998 unsigned int count,
999 unsigned int hint)
1000{
1001 unsigned long flags;
1002 unsigned int i, j, found = NO_IRQ;
1003
1004 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
1005 return NO_IRQ;
1006
1007 raw_spin_lock_irqsave(&irq_big_lock, flags);
1008
1009 /* Use hint for 1 interrupt if any */
1010 if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
1011 hint < irq_virq_count && irq_map[hint].host == NULL) {
1012 found = hint;
1013 goto hint_found;
1014 }
1015
1016 /* Look for count consecutive numbers in the allocatable
1017 * (non-legacy) space
1018 */
1019 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1020 if (irq_map[i].host != NULL)
1021 j = 0;
1022 else
1023 j++;
1024
1025 if (j == count) {
1026 found = i - count + 1;
1027 break;
1028 }
1029 }
1030 if (found == NO_IRQ) {
1031 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1032 return NO_IRQ;
1033 }
1034 hint_found:
1035 for (i = found; i < (found + count); i++) {
1036 irq_map[i].hwirq = host->inval_irq;
1037 smp_wmb();
1038 irq_map[i].host = host;
1039 }
1040 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1041 return found;
1042}
1043
1044void irq_free_virt(unsigned int virq, unsigned int count)
1045{
1046 unsigned long flags;
1047 unsigned int i;
1048
1049 WARN_ON (virq < NUM_ISA_INTERRUPTS);
1050 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1051
1052 if (virq < NUM_ISA_INTERRUPTS) {
1053 if (virq + count < NUM_ISA_INTERRUPTS)
1054 return;
1055 count =- NUM_ISA_INTERRUPTS - virq;
1056 virq = NUM_ISA_INTERRUPTS;
1057 }
1058
1059 if (count > irq_virq_count || virq > irq_virq_count - count) {
1060 if (virq > irq_virq_count)
1061 return;
1062 count = irq_virq_count - virq;
1063 }
1064
1065 raw_spin_lock_irqsave(&irq_big_lock, flags);
1066 for (i = virq; i < (virq + count); i++) {
1067 struct irq_domain *host;
1068
1069 host = irq_map[i].host;
1070 irq_map[i].hwirq = host->inval_irq;
1071 smp_wmb();
1072 irq_map[i].host = NULL;
1073 }
1074 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1075}
1076
1077int arch_early_irq_init(void) 975int arch_early_irq_init(void)
1078{ 976{
1079 return 0; 977 return 0;
@@ -1103,7 +1001,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
1103 struct irq_chip *chip; 1001 struct irq_chip *chip;
1104 1002
1105 seq_printf(m, "%5d ", i); 1003 seq_printf(m, "%5d ", i);
1106 seq_printf(m, "0x%05lx ", irq_map[i].hwirq); 1004 seq_printf(m, "0x%05lx ", desc->irq_data.hwirq);
1107 1005
1108 chip = irq_desc_get_chip(desc); 1006 chip = irq_desc_get_chip(desc);
1109 if (chip && chip->name) 1007 if (chip && chip->name)
@@ -1115,8 +1013,8 @@ static int virq_debug_show(struct seq_file *m, void *private)
1115 data = irq_desc_get_chip_data(desc); 1013 data = irq_desc_get_chip_data(desc);
1116 seq_printf(m, "0x%16p ", data); 1014 seq_printf(m, "0x%16p ", data);
1117 1015
1118 if (irq_map[i].host && irq_map[i].host->of_node) 1016 if (desc->irq_data.domain->of_node)
1119 p = irq_map[i].host->of_node->full_name; 1017 p = desc->irq_data.domain->of_node->full_name;
1120 else 1018 else
1121 p = none; 1019 p = none;
1122 seq_printf(m, "%s\n", p); 1020 seq_printf(m, "%s\n", p);