aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/irq.c
diff options
context:
space:
mode:
authorMilton Miller <miltonm@bga.com>2011-05-10 15:29:53 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-05-19 01:31:35 -0400
commit3af259d1555a93b3b6f6545af13e0eb99b0d5d32 (patch)
tree8b907637ca8816014e87260d6d5ac72911605956 /arch/powerpc/kernel/irq.c
parente085255ebce87c0b85d4752638d8a7d4f35f5b64 (diff)
powerpc: Radix trees are available before init_IRQ
Since the generic irq code uses a radix tree for sparse interrupts, the initcall ordering has been changed to initialize radix trees before irqs. We no longer need to defer creating revmap radix trees to the arch_initcall irq_late_init. Also, the kmem caches are allocated so we don't need to use zalloc_maybe_bootmem. Signed-off-by: Milton Miller <miltonm@bga.com> Reviewed-by: Grant Likely <grant.likely@secretlab.ca> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r--arch/powerpc/kernel/irq.c78
1 files changed, 4 insertions, 74 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 826552cecebd..f42e869ee3cc 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -493,7 +493,6 @@ struct irq_map_entry {
493 493
494static LIST_HEAD(irq_hosts); 494static LIST_HEAD(irq_hosts);
495static DEFINE_RAW_SPINLOCK(irq_big_lock); 495static DEFINE_RAW_SPINLOCK(irq_big_lock);
496static unsigned int revmap_trees_allocated;
497static DEFINE_MUTEX(revmap_trees_mutex); 496static DEFINE_MUTEX(revmap_trees_mutex);
498static struct irq_map_entry irq_map[NR_IRQS]; 497static struct irq_map_entry irq_map[NR_IRQS];
499static unsigned int irq_virq_count = NR_IRQS; 498static unsigned int irq_virq_count = NR_IRQS;
@@ -537,7 +536,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
537 /* Allocate structure and revmap table if using linear mapping */ 536 /* Allocate structure and revmap table if using linear mapping */
538 if (revmap_type == IRQ_HOST_MAP_LINEAR) 537 if (revmap_type == IRQ_HOST_MAP_LINEAR)
539 size += revmap_arg * sizeof(unsigned int); 538 size += revmap_arg * sizeof(unsigned int);
540 host = zalloc_maybe_bootmem(size, GFP_KERNEL); 539 host = kzalloc(size, GFP_KERNEL);
541 if (host == NULL) 540 if (host == NULL)
542 return NULL; 541 return NULL;
543 542
@@ -605,6 +604,9 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
605 smp_wmb(); 604 smp_wmb();
606 host->revmap_data.linear.revmap = rmap; 605 host->revmap_data.linear.revmap = rmap;
607 break; 606 break;
607 case IRQ_HOST_MAP_TREE:
608 INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
609 break;
608 default: 610 default:
609 break; 611 break;
610 } 612 }
@@ -839,13 +841,6 @@ void irq_dispose_mapping(unsigned int virq)
839 host->revmap_data.linear.revmap[hwirq] = NO_IRQ; 841 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
840 break; 842 break;
841 case IRQ_HOST_MAP_TREE: 843 case IRQ_HOST_MAP_TREE:
842 /*
843 * Check if radix tree allocated yet, if not then nothing to
844 * remove.
845 */
846 smp_rmb();
847 if (revmap_trees_allocated < 1)
848 break;
849 mutex_lock(&revmap_trees_mutex); 844 mutex_lock(&revmap_trees_mutex);
850 radix_tree_delete(&host->revmap_data.tree, hwirq); 845 radix_tree_delete(&host->revmap_data.tree, hwirq);
851 mutex_unlock(&revmap_trees_mutex); 846 mutex_unlock(&revmap_trees_mutex);
@@ -906,14 +901,6 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host,
906 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); 901 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
907 902
908 /* 903 /*
909 * Check if the radix tree exists and has bee initialized.
910 * If not, we fallback to slow mode
911 */
912 if (revmap_trees_allocated < 2)
913 return irq_find_mapping(host, hwirq);
914
915 /* Now try to resolve */
916 /*
917 * No rcu_read_lock(ing) needed, the ptr returned can't go under us 904 * No rcu_read_lock(ing) needed, the ptr returned can't go under us
918 * as it's referencing an entry in the static irq_map table. 905 * as it's referencing an entry in the static irq_map table.
919 */ 906 */
@@ -935,18 +922,8 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host,
935void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, 922void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
936 irq_hw_number_t hwirq) 923 irq_hw_number_t hwirq)
937{ 924{
938
939 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); 925 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
940 926
941 /*
942 * Check if the radix tree exists yet.
943 * If not, then the irq will be inserted into the tree when it gets
944 * initialized.
945 */
946 smp_rmb();
947 if (revmap_trees_allocated < 1)
948 return;
949
950 if (virq != NO_IRQ) { 927 if (virq != NO_IRQ) {
951 mutex_lock(&revmap_trees_mutex); 928 mutex_lock(&revmap_trees_mutex);
952 radix_tree_insert(&host->revmap_data.tree, hwirq, 929 radix_tree_insert(&host->revmap_data.tree, hwirq,
@@ -1054,53 +1031,6 @@ int arch_early_irq_init(void)
1054 return 0; 1031 return 0;
1055} 1032}
1056 1033
1057/* We need to create the radix trees late */
1058static int irq_late_init(void)
1059{
1060 struct irq_host *h;
1061 unsigned int i;
1062
1063 /*
1064 * No mutual exclusion with respect to accessors of the tree is needed
1065 * here as the synchronization is done via the state variable
1066 * revmap_trees_allocated.
1067 */
1068 list_for_each_entry(h, &irq_hosts, link) {
1069 if (h->revmap_type == IRQ_HOST_MAP_TREE)
1070 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1071 }
1072
1073 /*
1074 * Make sure the radix trees inits are visible before setting
1075 * the flag
1076 */
1077 smp_wmb();
1078 revmap_trees_allocated = 1;
1079
1080 /*
1081 * Insert the reverse mapping for those interrupts already present
1082 * in irq_map[].
1083 */
1084 mutex_lock(&revmap_trees_mutex);
1085 for (i = 0; i < irq_virq_count; i++) {
1086 if (irq_map[i].host &&
1087 (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1088 radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1089 irq_map[i].hwirq, &irq_map[i]);
1090 }
1091 mutex_unlock(&revmap_trees_mutex);
1092
1093 /*
1094 * Make sure the radix trees insertions are visible before setting
1095 * the flag
1096 */
1097 smp_wmb();
1098 revmap_trees_allocated = 2;
1099
1100 return 0;
1101}
1102arch_initcall(irq_late_init);
1103
1104#ifdef CONFIG_VIRQ_DEBUG 1034#ifdef CONFIG_VIRQ_DEBUG
1105static int virq_debug_show(struct seq_file *m, void *private) 1035static int virq_debug_show(struct seq_file *m, void *private)
1106{ 1036{