diff options
author | Sebastien Dugue <sebastien.dugue@bull.net> | 2008-09-04 08:37:07 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-09-15 14:08:44 -0400 |
commit | 967e012ef306e99cfddcd7423f37414e6b568361 (patch) | |
tree | b265dc79fcc5d46d8397a01bd310cabfa3a2e28e /arch/powerpc/kernel | |
parent | aaf4a9b0f78786e6915077cbbb1d6f4fb6a8ee0b (diff) |
powerpc: Separate the irq radix tree insertion and lookup
irq_radix_revmap() currently serves 2 purposes, irq mapping lookup
and insertion which happen in interrupt and process context respectively.
Separate the function into its 2 components, one for lookup only and one
for insertion only.
Fix the only user of the revmap tree (XICS) to use the new functions.
Also, move the insertion into the radix tree of those irqs that were
requested before it was initialized at said tree initialization.
Mutual exclusion between the tree initialization and readers/writers is
handled via a state variable (revmap_trees_allocated) set to 1 when the tree
has been initialized and set to 2 after the already requested irqs have been
inserted in the tree by the init path. This state is checked before any reader
or writer access just like we used to check for tree.gfp_mask != 0 before.
Finally, now that we're not any longer inserting nodes into the radix-tree
in interrupt context, turn the GFP_ATOMIC allocations into GFP_KERNEL ones.
Signed-off-by: Sebastien Dugue <sebastien.dugue@bull.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/irq.c | 97 |
1 files changed, 76 insertions, 21 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index d972decf0324..2656924415da 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -441,6 +441,7 @@ static LIST_HEAD(irq_hosts); | |||
441 | static DEFINE_SPINLOCK(irq_big_lock); | 441 | static DEFINE_SPINLOCK(irq_big_lock); |
442 | static DEFINE_PER_CPU(unsigned int, irq_radix_reader); | 442 | static DEFINE_PER_CPU(unsigned int, irq_radix_reader); |
443 | static unsigned int irq_radix_writer; | 443 | static unsigned int irq_radix_writer; |
444 | static unsigned int revmap_trees_allocated; | ||
444 | struct irq_map_entry irq_map[NR_IRQS]; | 445 | struct irq_map_entry irq_map[NR_IRQS]; |
445 | static unsigned int irq_virq_count = NR_IRQS; | 446 | static unsigned int irq_virq_count = NR_IRQS; |
446 | static struct irq_host *irq_default_host; | 447 | static struct irq_host *irq_default_host; |
@@ -821,8 +822,12 @@ void irq_dispose_mapping(unsigned int virq) | |||
821 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; | 822 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; |
822 | break; | 823 | break; |
823 | case IRQ_HOST_MAP_TREE: | 824 | case IRQ_HOST_MAP_TREE: |
824 | /* Check if radix tree allocated yet */ | 825 | /* |
825 | if (host->revmap_data.tree.gfp_mask == 0) | 826 | * Check if radix tree allocated yet, if not then nothing to |
827 | * remove. | ||
828 | */ | ||
829 | smp_rmb(); | ||
830 | if (revmap_trees_allocated < 1) | ||
826 | break; | 831 | break; |
827 | irq_radix_wrlock(&flags); | 832 | irq_radix_wrlock(&flags); |
828 | radix_tree_delete(&host->revmap_data.tree, hwirq); | 833 | radix_tree_delete(&host->revmap_data.tree, hwirq); |
@@ -875,43 +880,62 @@ unsigned int irq_find_mapping(struct irq_host *host, | |||
875 | EXPORT_SYMBOL_GPL(irq_find_mapping); | 880 | EXPORT_SYMBOL_GPL(irq_find_mapping); |
876 | 881 | ||
877 | 882 | ||
878 | unsigned int irq_radix_revmap(struct irq_host *host, | 883 | unsigned int irq_radix_revmap_lookup(struct irq_host *host, |
879 | irq_hw_number_t hwirq) | 884 | irq_hw_number_t hwirq) |
880 | { | 885 | { |
881 | struct radix_tree_root *tree; | ||
882 | struct irq_map_entry *ptr; | 886 | struct irq_map_entry *ptr; |
883 | unsigned int virq; | 887 | unsigned int virq; |
884 | unsigned long flags; | 888 | unsigned long flags; |
885 | 889 | ||
886 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); | 890 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); |
887 | 891 | ||
888 | /* Check if the radix tree exist yet. We test the value of | 892 | /* |
889 | * the gfp_mask for that. Sneaky but saves another int in the | 893 | * Check if the radix tree exists and has bee initialized. |
890 | * structure. If not, we fallback to slow mode | 894 | * If not, we fallback to slow mode |
891 | */ | 895 | */ |
892 | tree = &host->revmap_data.tree; | 896 | if (revmap_trees_allocated < 2) |
893 | if (tree->gfp_mask == 0) | ||
894 | return irq_find_mapping(host, hwirq); | 897 | return irq_find_mapping(host, hwirq); |
895 | 898 | ||
896 | /* Now try to resolve */ | 899 | /* Now try to resolve */ |
897 | irq_radix_rdlock(&flags); | 900 | irq_radix_rdlock(&flags); |
898 | ptr = radix_tree_lookup(tree, hwirq); | 901 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); |
899 | irq_radix_rdunlock(flags); | 902 | irq_radix_rdunlock(flags); |
900 | 903 | ||
901 | /* Found it, return */ | 904 | /* |
902 | if (ptr) { | 905 | * If found in radix tree, then fine. |
906 | * Else fallback to linear lookup - this should not happen in practice | ||
907 | * as it means that we failed to insert the node in the radix tree. | ||
908 | */ | ||
909 | if (ptr) | ||
903 | virq = ptr - irq_map; | 910 | virq = ptr - irq_map; |
904 | return virq; | 911 | else |
905 | } | 912 | virq = irq_find_mapping(host, hwirq); |
913 | |||
914 | return virq; | ||
915 | } | ||
916 | |||
917 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | ||
918 | irq_hw_number_t hwirq) | ||
919 | { | ||
920 | unsigned long flags; | ||
921 | |||
922 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); | ||
923 | |||
924 | /* | ||
925 | * Check if the radix tree exists yet. | ||
926 | * If not, then the irq will be inserted into the tree when it gets | ||
927 | * initialized. | ||
928 | */ | ||
929 | smp_rmb(); | ||
930 | if (revmap_trees_allocated < 1) | ||
931 | return; | ||
906 | 932 | ||
907 | /* If not there, try to insert it */ | ||
908 | virq = irq_find_mapping(host, hwirq); | ||
909 | if (virq != NO_IRQ) { | 933 | if (virq != NO_IRQ) { |
910 | irq_radix_wrlock(&flags); | 934 | irq_radix_wrlock(&flags); |
911 | radix_tree_insert(tree, hwirq, &irq_map[virq]); | 935 | radix_tree_insert(&host->revmap_data.tree, hwirq, |
936 | &irq_map[virq]); | ||
912 | irq_radix_wrunlock(flags); | 937 | irq_radix_wrunlock(flags); |
913 | } | 938 | } |
914 | return virq; | ||
915 | } | 939 | } |
916 | 940 | ||
917 | unsigned int irq_linear_revmap(struct irq_host *host, | 941 | unsigned int irq_linear_revmap(struct irq_host *host, |
@@ -1021,14 +1045,45 @@ static int irq_late_init(void) | |||
1021 | { | 1045 | { |
1022 | struct irq_host *h; | 1046 | struct irq_host *h; |
1023 | unsigned long flags; | 1047 | unsigned long flags; |
1048 | unsigned int i; | ||
1024 | 1049 | ||
1025 | irq_radix_wrlock(&flags); | 1050 | /* |
1051 | * No mutual exclusion with respect to accessors of the tree is needed | ||
1052 | * here as the synchronization is done via the state variable | ||
1053 | * revmap_trees_allocated. | ||
1054 | */ | ||
1026 | list_for_each_entry(h, &irq_hosts, link) { | 1055 | list_for_each_entry(h, &irq_hosts, link) { |
1027 | if (h->revmap_type == IRQ_HOST_MAP_TREE) | 1056 | if (h->revmap_type == IRQ_HOST_MAP_TREE) |
1028 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); | 1057 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL); |
1058 | } | ||
1059 | |||
1060 | /* | ||
1061 | * Make sure the radix trees inits are visible before setting | ||
1062 | * the flag | ||
1063 | */ | ||
1064 | smp_wmb(); | ||
1065 | revmap_trees_allocated = 1; | ||
1066 | |||
1067 | /* | ||
1068 | * Insert the reverse mapping for those interrupts already present | ||
1069 | * in irq_map[]. | ||
1070 | */ | ||
1071 | irq_radix_wrlock(&flags); | ||
1072 | for (i = 0; i < irq_virq_count; i++) { | ||
1073 | if (irq_map[i].host && | ||
1074 | (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE)) | ||
1075 | radix_tree_insert(&irq_map[i].host->revmap_data.tree, | ||
1076 | irq_map[i].hwirq, &irq_map[i]); | ||
1029 | } | 1077 | } |
1030 | irq_radix_wrunlock(flags); | 1078 | irq_radix_wrunlock(flags); |
1031 | 1079 | ||
1080 | /* | ||
1081 | * Make sure the radix trees insertions are visible before setting | ||
1082 | * the flag | ||
1083 | */ | ||
1084 | smp_wmb(); | ||
1085 | revmap_trees_allocated = 2; | ||
1086 | |||
1032 | return 0; | 1087 | return 0; |
1033 | } | 1088 | } |
1034 | arch_initcall(irq_late_init); | 1089 | arch_initcall(irq_late_init); |