diff options
author | Grant Likely <grant.likely@secretlab.ca> | 2012-02-14 16:06:50 -0500 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2012-02-14 16:06:50 -0500 |
commit | bae1d8f19983fbfa25559aa3cb6a81a84aa82a18 (patch) | |
tree | 387012cc698159bfb5851c5022d5b55db2dafadc /arch/powerpc/kernel/irq.c | |
parent | 644bd954313254b54e08b69077e16831b6e04dfa (diff) |
irq_domain/powerpc: Use common irq_domain structure instead of irq_host
This patch drops the powerpc-specific irq_host structures and uses the common
irq_domain strucutres defined in linux/irqdomain.h. It also fixes all
the users to use the new structure names.
Renaming irq_host to irq_domain has been discussed for a long time, and this
patch is a step in the process of generalizing the powerpc virq code to be
usable by all architecture.
An astute reader will notice that this patch actually removes the irq_host
structure instead of renaming it. This is because the irq_domain structure
already exists in include/linux/irqdomain.h and has the needed data members.
Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Milton Miller <miltonm@bga.com>
Tested-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r-- | arch/powerpc/kernel/irq.c | 78 |
1 files changed, 39 insertions, 39 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 701d4aceb4f..7305f2f6553 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -498,15 +498,15 @@ void do_softirq(void) | |||
498 | */ | 498 | */ |
499 | struct irq_map_entry { | 499 | struct irq_map_entry { |
500 | irq_hw_number_t hwirq; | 500 | irq_hw_number_t hwirq; |
501 | struct irq_host *host; | 501 | struct irq_domain *host; |
502 | }; | 502 | }; |
503 | 503 | ||
504 | static LIST_HEAD(irq_hosts); | 504 | static LIST_HEAD(irq_domain_list); |
505 | static DEFINE_RAW_SPINLOCK(irq_big_lock); | 505 | static DEFINE_RAW_SPINLOCK(irq_big_lock); |
506 | static DEFINE_MUTEX(revmap_trees_mutex); | 506 | static DEFINE_MUTEX(revmap_trees_mutex); |
507 | static struct irq_map_entry irq_map[NR_IRQS]; | 507 | static struct irq_map_entry irq_map[NR_IRQS]; |
508 | static unsigned int irq_virq_count = NR_IRQS; | 508 | static unsigned int irq_virq_count = NR_IRQS; |
509 | static struct irq_host *irq_default_host; | 509 | static struct irq_domain *irq_default_host; |
510 | 510 | ||
511 | irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | 511 | irq_hw_number_t irqd_to_hwirq(struct irq_data *d) |
512 | { | 512 | { |
@@ -520,31 +520,31 @@ irq_hw_number_t virq_to_hw(unsigned int virq) | |||
520 | } | 520 | } |
521 | EXPORT_SYMBOL_GPL(virq_to_hw); | 521 | EXPORT_SYMBOL_GPL(virq_to_hw); |
522 | 522 | ||
523 | bool virq_is_host(unsigned int virq, struct irq_host *host) | 523 | bool virq_is_host(unsigned int virq, struct irq_domain *host) |
524 | { | 524 | { |
525 | return irq_map[virq].host == host; | 525 | return irq_map[virq].host == host; |
526 | } | 526 | } |
527 | EXPORT_SYMBOL_GPL(virq_is_host); | 527 | EXPORT_SYMBOL_GPL(virq_is_host); |
528 | 528 | ||
529 | static int default_irq_host_match(struct irq_host *h, struct device_node *np) | 529 | static int default_irq_host_match(struct irq_domain *h, struct device_node *np) |
530 | { | 530 | { |
531 | return h->of_node != NULL && h->of_node == np; | 531 | return h->of_node != NULL && h->of_node == np; |
532 | } | 532 | } |
533 | 533 | ||
534 | struct irq_host *irq_alloc_host(struct device_node *of_node, | 534 | struct irq_domain *irq_alloc_host(struct device_node *of_node, |
535 | unsigned int revmap_type, | 535 | unsigned int revmap_type, |
536 | unsigned int revmap_arg, | 536 | unsigned int revmap_arg, |
537 | struct irq_host_ops *ops, | 537 | struct irq_domain_ops *ops, |
538 | irq_hw_number_t inval_irq) | 538 | irq_hw_number_t inval_irq) |
539 | { | 539 | { |
540 | struct irq_host *host; | 540 | struct irq_domain *host; |
541 | unsigned int size = sizeof(struct irq_host); | 541 | unsigned int size = sizeof(struct irq_domain); |
542 | unsigned int i; | 542 | unsigned int i; |
543 | unsigned int *rmap; | 543 | unsigned int *rmap; |
544 | unsigned long flags; | 544 | unsigned long flags; |
545 | 545 | ||
546 | /* Allocate structure and revmap table if using linear mapping */ | 546 | /* Allocate structure and revmap table if using linear mapping */ |
547 | if (revmap_type == IRQ_HOST_MAP_LINEAR) | 547 | if (revmap_type == IRQ_DOMAIN_MAP_LINEAR) |
548 | size += revmap_arg * sizeof(unsigned int); | 548 | size += revmap_arg * sizeof(unsigned int); |
549 | host = kzalloc(size, GFP_KERNEL); | 549 | host = kzalloc(size, GFP_KERNEL); |
550 | if (host == NULL) | 550 | if (host == NULL) |
@@ -564,7 +564,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
564 | /* If it's a legacy controller, check for duplicates and | 564 | /* If it's a legacy controller, check for duplicates and |
565 | * mark it as allocated (we use irq 0 host pointer for that | 565 | * mark it as allocated (we use irq 0 host pointer for that |
566 | */ | 566 | */ |
567 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { | 567 | if (revmap_type == IRQ_DOMAIN_MAP_LEGACY) { |
568 | if (irq_map[0].host != NULL) { | 568 | if (irq_map[0].host != NULL) { |
569 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | 569 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
570 | of_node_put(host->of_node); | 570 | of_node_put(host->of_node); |
@@ -574,12 +574,12 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
574 | irq_map[0].host = host; | 574 | irq_map[0].host = host; |
575 | } | 575 | } |
576 | 576 | ||
577 | list_add(&host->link, &irq_hosts); | 577 | list_add(&host->link, &irq_domain_list); |
578 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | 578 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
579 | 579 | ||
580 | /* Additional setups per revmap type */ | 580 | /* Additional setups per revmap type */ |
581 | switch(revmap_type) { | 581 | switch(revmap_type) { |
582 | case IRQ_HOST_MAP_LEGACY: | 582 | case IRQ_DOMAIN_MAP_LEGACY: |
583 | /* 0 is always the invalid number for legacy */ | 583 | /* 0 is always the invalid number for legacy */ |
584 | host->inval_irq = 0; | 584 | host->inval_irq = 0; |
585 | /* setup us as the host for all legacy interrupts */ | 585 | /* setup us as the host for all legacy interrupts */ |
@@ -599,7 +599,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
599 | irq_clear_status_flags(i, IRQ_NOREQUEST); | 599 | irq_clear_status_flags(i, IRQ_NOREQUEST); |
600 | } | 600 | } |
601 | break; | 601 | break; |
602 | case IRQ_HOST_MAP_LINEAR: | 602 | case IRQ_DOMAIN_MAP_LINEAR: |
603 | rmap = (unsigned int *)(host + 1); | 603 | rmap = (unsigned int *)(host + 1); |
604 | for (i = 0; i < revmap_arg; i++) | 604 | for (i = 0; i < revmap_arg; i++) |
605 | rmap[i] = NO_IRQ; | 605 | rmap[i] = NO_IRQ; |
@@ -607,7 +607,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
607 | smp_wmb(); | 607 | smp_wmb(); |
608 | host->revmap_data.linear.revmap = rmap; | 608 | host->revmap_data.linear.revmap = rmap; |
609 | break; | 609 | break; |
610 | case IRQ_HOST_MAP_TREE: | 610 | case IRQ_DOMAIN_MAP_TREE: |
611 | INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL); | 611 | INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL); |
612 | break; | 612 | break; |
613 | default: | 613 | default: |
@@ -619,9 +619,9 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
619 | return host; | 619 | return host; |
620 | } | 620 | } |
621 | 621 | ||
622 | struct irq_host *irq_find_host(struct device_node *node) | 622 | struct irq_domain *irq_find_host(struct device_node *node) |
623 | { | 623 | { |
624 | struct irq_host *h, *found = NULL; | 624 | struct irq_domain *h, *found = NULL; |
625 | unsigned long flags; | 625 | unsigned long flags; |
626 | 626 | ||
627 | /* We might want to match the legacy controller last since | 627 | /* We might want to match the legacy controller last since |
@@ -630,7 +630,7 @@ struct irq_host *irq_find_host(struct device_node *node) | |||
630 | * yet though... | 630 | * yet though... |
631 | */ | 631 | */ |
632 | raw_spin_lock_irqsave(&irq_big_lock, flags); | 632 | raw_spin_lock_irqsave(&irq_big_lock, flags); |
633 | list_for_each_entry(h, &irq_hosts, link) | 633 | list_for_each_entry(h, &irq_domain_list, link) |
634 | if (h->ops->match(h, node)) { | 634 | if (h->ops->match(h, node)) { |
635 | found = h; | 635 | found = h; |
636 | break; | 636 | break; |
@@ -640,7 +640,7 @@ struct irq_host *irq_find_host(struct device_node *node) | |||
640 | } | 640 | } |
641 | EXPORT_SYMBOL_GPL(irq_find_host); | 641 | EXPORT_SYMBOL_GPL(irq_find_host); |
642 | 642 | ||
643 | void irq_set_default_host(struct irq_host *host) | 643 | void irq_set_default_host(struct irq_domain *host) |
644 | { | 644 | { |
645 | pr_debug("irq: Default host set to @0x%p\n", host); | 645 | pr_debug("irq: Default host set to @0x%p\n", host); |
646 | 646 | ||
@@ -656,7 +656,7 @@ void irq_set_virq_count(unsigned int count) | |||
656 | irq_virq_count = count; | 656 | irq_virq_count = count; |
657 | } | 657 | } |
658 | 658 | ||
659 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, | 659 | static int irq_setup_virq(struct irq_domain *host, unsigned int virq, |
660 | irq_hw_number_t hwirq) | 660 | irq_hw_number_t hwirq) |
661 | { | 661 | { |
662 | int res; | 662 | int res; |
@@ -688,7 +688,7 @@ error: | |||
688 | return -1; | 688 | return -1; |
689 | } | 689 | } |
690 | 690 | ||
691 | unsigned int irq_create_direct_mapping(struct irq_host *host) | 691 | unsigned int irq_create_direct_mapping(struct irq_domain *host) |
692 | { | 692 | { |
693 | unsigned int virq; | 693 | unsigned int virq; |
694 | 694 | ||
@@ -696,7 +696,7 @@ unsigned int irq_create_direct_mapping(struct irq_host *host) | |||
696 | host = irq_default_host; | 696 | host = irq_default_host; |
697 | 697 | ||
698 | BUG_ON(host == NULL); | 698 | BUG_ON(host == NULL); |
699 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); | 699 | WARN_ON(host->revmap_type != IRQ_DOMAIN_MAP_NOMAP); |
700 | 700 | ||
701 | virq = irq_alloc_virt(host, 1, 0); | 701 | virq = irq_alloc_virt(host, 1, 0); |
702 | if (virq == NO_IRQ) { | 702 | if (virq == NO_IRQ) { |
@@ -712,7 +712,7 @@ unsigned int irq_create_direct_mapping(struct irq_host *host) | |||
712 | return virq; | 712 | return virq; |
713 | } | 713 | } |
714 | 714 | ||
715 | unsigned int irq_create_mapping(struct irq_host *host, | 715 | unsigned int irq_create_mapping(struct irq_domain *host, |
716 | irq_hw_number_t hwirq) | 716 | irq_hw_number_t hwirq) |
717 | { | 717 | { |
718 | unsigned int virq, hint; | 718 | unsigned int virq, hint; |
@@ -738,7 +738,7 @@ unsigned int irq_create_mapping(struct irq_host *host, | |||
738 | } | 738 | } |
739 | 739 | ||
740 | /* Get a virtual interrupt number */ | 740 | /* Get a virtual interrupt number */ |
741 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { | 741 | if (host->revmap_type == IRQ_DOMAIN_MAP_LEGACY) { |
742 | /* Handle legacy */ | 742 | /* Handle legacy */ |
743 | virq = (unsigned int)hwirq; | 743 | virq = (unsigned int)hwirq; |
744 | if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) | 744 | if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) |
@@ -767,7 +767,7 @@ EXPORT_SYMBOL_GPL(irq_create_mapping); | |||
767 | unsigned int irq_create_of_mapping(struct device_node *controller, | 767 | unsigned int irq_create_of_mapping(struct device_node *controller, |
768 | const u32 *intspec, unsigned int intsize) | 768 | const u32 *intspec, unsigned int intsize) |
769 | { | 769 | { |
770 | struct irq_host *host; | 770 | struct irq_domain *host; |
771 | irq_hw_number_t hwirq; | 771 | irq_hw_number_t hwirq; |
772 | unsigned int type = IRQ_TYPE_NONE; | 772 | unsigned int type = IRQ_TYPE_NONE; |
773 | unsigned int virq; | 773 | unsigned int virq; |
@@ -806,7 +806,7 @@ EXPORT_SYMBOL_GPL(irq_create_of_mapping); | |||
806 | 806 | ||
807 | void irq_dispose_mapping(unsigned int virq) | 807 | void irq_dispose_mapping(unsigned int virq) |
808 | { | 808 | { |
809 | struct irq_host *host; | 809 | struct irq_domain *host; |
810 | irq_hw_number_t hwirq; | 810 | irq_hw_number_t hwirq; |
811 | 811 | ||
812 | if (virq == NO_IRQ) | 812 | if (virq == NO_IRQ) |
@@ -817,7 +817,7 @@ void irq_dispose_mapping(unsigned int virq) | |||
817 | return; | 817 | return; |
818 | 818 | ||
819 | /* Never unmap legacy interrupts */ | 819 | /* Never unmap legacy interrupts */ |
820 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | 820 | if (host->revmap_type == IRQ_DOMAIN_MAP_LEGACY) |
821 | return; | 821 | return; |
822 | 822 | ||
823 | irq_set_status_flags(virq, IRQ_NOREQUEST); | 823 | irq_set_status_flags(virq, IRQ_NOREQUEST); |
@@ -836,11 +836,11 @@ void irq_dispose_mapping(unsigned int virq) | |||
836 | /* Clear reverse map */ | 836 | /* Clear reverse map */ |
837 | hwirq = irq_map[virq].hwirq; | 837 | hwirq = irq_map[virq].hwirq; |
838 | switch(host->revmap_type) { | 838 | switch(host->revmap_type) { |
839 | case IRQ_HOST_MAP_LINEAR: | 839 | case IRQ_DOMAIN_MAP_LINEAR: |
840 | if (hwirq < host->revmap_data.linear.size) | 840 | if (hwirq < host->revmap_data.linear.size) |
841 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; | 841 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; |
842 | break; | 842 | break; |
843 | case IRQ_HOST_MAP_TREE: | 843 | case IRQ_DOMAIN_MAP_TREE: |
844 | mutex_lock(&revmap_trees_mutex); | 844 | mutex_lock(&revmap_trees_mutex); |
845 | radix_tree_delete(&host->revmap_data.tree, hwirq); | 845 | radix_tree_delete(&host->revmap_data.tree, hwirq); |
846 | mutex_unlock(&revmap_trees_mutex); | 846 | mutex_unlock(&revmap_trees_mutex); |
@@ -857,7 +857,7 @@ void irq_dispose_mapping(unsigned int virq) | |||
857 | } | 857 | } |
858 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); | 858 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); |
859 | 859 | ||
860 | unsigned int irq_find_mapping(struct irq_host *host, | 860 | unsigned int irq_find_mapping(struct irq_domain *host, |
861 | irq_hw_number_t hwirq) | 861 | irq_hw_number_t hwirq) |
862 | { | 862 | { |
863 | unsigned int i; | 863 | unsigned int i; |
@@ -870,7 +870,7 @@ unsigned int irq_find_mapping(struct irq_host *host, | |||
870 | return NO_IRQ; | 870 | return NO_IRQ; |
871 | 871 | ||
872 | /* legacy -> bail early */ | 872 | /* legacy -> bail early */ |
873 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | 873 | if (host->revmap_type == IRQ_DOMAIN_MAP_LEGACY) |
874 | return hwirq; | 874 | return hwirq; |
875 | 875 | ||
876 | /* Slow path does a linear search of the map */ | 876 | /* Slow path does a linear search of the map */ |
@@ -925,13 +925,13 @@ int irq_choose_cpu(const struct cpumask *mask) | |||
925 | } | 925 | } |
926 | #endif | 926 | #endif |
927 | 927 | ||
928 | unsigned int irq_radix_revmap_lookup(struct irq_host *host, | 928 | unsigned int irq_radix_revmap_lookup(struct irq_domain *host, |
929 | irq_hw_number_t hwirq) | 929 | irq_hw_number_t hwirq) |
930 | { | 930 | { |
931 | struct irq_map_entry *ptr; | 931 | struct irq_map_entry *ptr; |
932 | unsigned int virq; | 932 | unsigned int virq; |
933 | 933 | ||
934 | if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE)) | 934 | if (WARN_ON_ONCE(host->revmap_type != IRQ_DOMAIN_MAP_TREE)) |
935 | return irq_find_mapping(host, hwirq); | 935 | return irq_find_mapping(host, hwirq); |
936 | 936 | ||
937 | /* | 937 | /* |
@@ -956,10 +956,10 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, | |||
956 | return virq; | 956 | return virq; |
957 | } | 957 | } |
958 | 958 | ||
959 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | 959 | void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq, |
960 | irq_hw_number_t hwirq) | 960 | irq_hw_number_t hwirq) |
961 | { | 961 | { |
962 | if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE)) | 962 | if (WARN_ON(host->revmap_type != IRQ_DOMAIN_MAP_TREE)) |
963 | return; | 963 | return; |
964 | 964 | ||
965 | if (virq != NO_IRQ) { | 965 | if (virq != NO_IRQ) { |
@@ -970,12 +970,12 @@ void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | |||
970 | } | 970 | } |
971 | } | 971 | } |
972 | 972 | ||
973 | unsigned int irq_linear_revmap(struct irq_host *host, | 973 | unsigned int irq_linear_revmap(struct irq_domain *host, |
974 | irq_hw_number_t hwirq) | 974 | irq_hw_number_t hwirq) |
975 | { | 975 | { |
976 | unsigned int *revmap; | 976 | unsigned int *revmap; |
977 | 977 | ||
978 | if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR)) | 978 | if (WARN_ON_ONCE(host->revmap_type != IRQ_DOMAIN_MAP_LINEAR)) |
979 | return irq_find_mapping(host, hwirq); | 979 | return irq_find_mapping(host, hwirq); |
980 | 980 | ||
981 | /* Check revmap bounds */ | 981 | /* Check revmap bounds */ |
@@ -994,7 +994,7 @@ unsigned int irq_linear_revmap(struct irq_host *host, | |||
994 | return revmap[hwirq]; | 994 | return revmap[hwirq]; |
995 | } | 995 | } |
996 | 996 | ||
997 | unsigned int irq_alloc_virt(struct irq_host *host, | 997 | unsigned int irq_alloc_virt(struct irq_domain *host, |
998 | unsigned int count, | 998 | unsigned int count, |
999 | unsigned int hint) | 999 | unsigned int hint) |
1000 | { | 1000 | { |
@@ -1064,7 +1064,7 @@ void irq_free_virt(unsigned int virq, unsigned int count) | |||
1064 | 1064 | ||
1065 | raw_spin_lock_irqsave(&irq_big_lock, flags); | 1065 | raw_spin_lock_irqsave(&irq_big_lock, flags); |
1066 | for (i = virq; i < (virq + count); i++) { | 1066 | for (i = virq; i < (virq + count); i++) { |
1067 | struct irq_host *host; | 1067 | struct irq_domain *host; |
1068 | 1068 | ||
1069 | host = irq_map[i].host; | 1069 | host = irq_map[i].host; |
1070 | irq_map[i].hwirq = host->inval_irq; | 1070 | irq_map[i].hwirq = host->inval_irq; |