aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@linaro.org>2013-06-08 07:03:59 -0400
committerGrant Likely <grant.likely@linaro.org>2013-06-10 06:52:09 -0400
commit1aa0dd94ca07df818cf14588c9031ab1d7fd84d3 (patch)
tree9305dbac0984fdb76677bf558f545f716d737f40 /kernel/irq
parentcef5075c8c238ffd04c86a77a5a9bdbd18031137 (diff)
irqdomain: Eliminate revmap type
The NOMAP irq_domain type is only used by a handful of interrupt controllers and it unnecessarily complicates the code by adding special cases on how to look up mappings and different revmap functions are used for each type which need to validate the correct type is passed to it before performing the reverse map. Eliminating the revmap_type and making a single reverse mapping function simplifies the code. It also shouldn't be any slower than having separate revmap functions because the type of the revmap needed to be checked anyway. The linear and tree revmap types were already merged in a previous patch. This patch rolls the NOMAP or direct mapping behaviour into the same domain code making is possible for an irq domain to do any mapping type; linear, tree or direct; and that the mapping will be transparent to the interrupt controller driver. With this change, direct mappings will get stored in the linear or tree mapping for consistency. Reverse mapping from the hwirq to virq will go through the normal lookup process. However, any controller using a direct mapping can take advantage of knowing that hwirq==virq for any mapped interrupts skip doing a revmap lookup when handling IRQs. Signed-off-by: Grant Likely <grant.likely@linaro.org>
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/generic-chip.c5
-rw-r--r--kernel/irq/irqdomain.c55
2 files changed, 23 insertions, 37 deletions
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index ca98cc5d6308..4b011064e146 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -270,10 +270,7 @@ int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
270 if (d->gc) 270 if (d->gc)
271 return -EBUSY; 271 return -EBUSY;
272 272
273 if (d->revmap_type != IRQ_DOMAIN_MAP_LINEAR) 273 numchips = d->revmap_size / irqs_per_chip;
274 return -EINVAL;
275
276 numchips = d->revmap_data.linear.size / irqs_per_chip;
277 if (!numchips) 274 if (!numchips)
278 return -EINVAL; 275 return -EINVAL;
279 276
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 5a1d8ec8509e..c38be78fceb4 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -25,7 +25,6 @@ static struct irq_domain *irq_default_domain;
25/** 25/**
26 * irq_domain_alloc() - Allocate a new irq_domain data structure 26 * irq_domain_alloc() - Allocate a new irq_domain data structure
27 * @of_node: optional device-tree node of the interrupt controller 27 * @of_node: optional device-tree node of the interrupt controller
28 * @revmap_type: type of reverse mapping to use
29 * @ops: map/unmap domain callbacks 28 * @ops: map/unmap domain callbacks
30 * @host_data: Controller private data pointer 29 * @host_data: Controller private data pointer
31 * 30 *
@@ -34,7 +33,7 @@ static struct irq_domain *irq_default_domain;
34 * to IRQ domain, or NULL on failure. 33 * to IRQ domain, or NULL on failure.
35 */ 34 */
36static struct irq_domain *irq_domain_alloc(struct device_node *of_node, 35static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
37 unsigned int revmap_type, int size, 36 int size,
38 const struct irq_domain_ops *ops, 37 const struct irq_domain_ops *ops,
39 void *host_data) 38 void *host_data)
40{ 39{
@@ -46,12 +45,11 @@ static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
46 return NULL; 45 return NULL;
47 46
48 /* Fill structure */ 47 /* Fill structure */
49 INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL); 48 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
50 domain->revmap_type = revmap_type;
51 domain->ops = ops; 49 domain->ops = ops;
52 domain->host_data = host_data; 50 domain->host_data = host_data;
53 domain->of_node = of_node_get(of_node); 51 domain->of_node = of_node_get(of_node);
54 domain->revmap_data.linear.size = size; 52 domain->revmap_size = size;
55 53
56 return domain; 54 return domain;
57} 55}
@@ -67,8 +65,7 @@ static void irq_domain_add(struct irq_domain *domain)
67 mutex_lock(&irq_domain_mutex); 65 mutex_lock(&irq_domain_mutex);
68 list_add(&domain->link, &irq_domain_list); 66 list_add(&domain->link, &irq_domain_list);
69 mutex_unlock(&irq_domain_mutex); 67 mutex_unlock(&irq_domain_mutex);
70 pr_debug("Allocated domain of type %d @0x%p\n", 68 pr_debug("Added domain %s\n", domain->name);
71 domain->revmap_type, domain);
72} 69}
73 70
74/** 71/**
@@ -88,7 +85,7 @@ void irq_domain_remove(struct irq_domain *domain)
88 * node when all entries are removed. Shout if there are 85 * node when all entries are removed. Shout if there are
89 * any mappings left. 86 * any mappings left.
90 */ 87 */
91 WARN_ON(domain->revmap_data.tree.height); 88 WARN_ON(domain->revmap_tree.height);
92 89
93 list_del(&domain->link); 90 list_del(&domain->link);
94 91
@@ -100,8 +97,7 @@ void irq_domain_remove(struct irq_domain *domain)
100 97
101 mutex_unlock(&irq_domain_mutex); 98 mutex_unlock(&irq_domain_mutex);
102 99
103 pr_debug("Removed domain of type %d @0x%p\n", 100 pr_debug("Removed domain %s\n", domain->name);
104 domain->revmap_type, domain);
105 101
106 irq_domain_free(domain); 102 irq_domain_free(domain);
107} 103}
@@ -216,7 +212,7 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
216{ 212{
217 struct irq_domain *domain; 213 struct irq_domain *domain;
218 214
219 domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, size, ops, host_data); 215 domain = irq_domain_alloc(of_node, size, ops, host_data);
220 if (!domain) 216 if (!domain)
221 return NULL; 217 return NULL;
222 218
@@ -230,10 +226,9 @@ struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
230 const struct irq_domain_ops *ops, 226 const struct irq_domain_ops *ops,
231 void *host_data) 227 void *host_data)
232{ 228{
233 struct irq_domain *domain = irq_domain_alloc(of_node, 229 struct irq_domain *domain = irq_domain_alloc(of_node, 0, ops, host_data);
234 IRQ_DOMAIN_MAP_NOMAP, 0, ops, host_data);
235 if (domain) { 230 if (domain) {
236 domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0; 231 domain->revmap_direct_max_irq = max_irq ? max_irq : ~0;
237 irq_domain_add(domain); 232 irq_domain_add(domain);
238 } 233 }
239 return domain; 234 return domain;
@@ -321,11 +316,11 @@ static void irq_domain_disassociate_many(struct irq_domain *domain,
321 irq_data->hwirq = 0; 316 irq_data->hwirq = 0;
322 317
323 /* Clear reverse map for this hwirq */ 318 /* Clear reverse map for this hwirq */
324 if (hwirq < domain->revmap_data.linear.size) { 319 if (hwirq < domain->revmap_size) {
325 domain->linear_revmap[hwirq] = 0; 320 domain->linear_revmap[hwirq] = 0;
326 } else { 321 } else {
327 mutex_lock(&revmap_trees_mutex); 322 mutex_lock(&revmap_trees_mutex);
328 radix_tree_delete(&domain->revmap_data.tree, hwirq); 323 radix_tree_delete(&domain->revmap_tree, hwirq);
329 mutex_unlock(&revmap_trees_mutex); 324 mutex_unlock(&revmap_trees_mutex);
330 } 325 }
331 } 326 }
@@ -378,11 +373,11 @@ int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
378 domain->name = irq_data->chip->name; 373 domain->name = irq_data->chip->name;
379 } 374 }
380 375
381 if (hwirq < domain->revmap_data.linear.size) { 376 if (hwirq < domain->revmap_size) {
382 domain->linear_revmap[hwirq] = virq; 377 domain->linear_revmap[hwirq] = virq;
383 } else { 378 } else {
384 mutex_lock(&revmap_trees_mutex); 379 mutex_lock(&revmap_trees_mutex);
385 radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data); 380 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
386 mutex_unlock(&revmap_trees_mutex); 381 mutex_unlock(&revmap_trees_mutex);
387 } 382 }
388 383
@@ -399,7 +394,9 @@ EXPORT_SYMBOL_GPL(irq_domain_associate_many);
399 * 394 *
400 * This routine is used for irq controllers which can choose the hardware 395 * This routine is used for irq controllers which can choose the hardware
401 * interrupt numbers they generate. In such a case it's simplest to use 396 * interrupt numbers they generate. In such a case it's simplest to use
402 * the linux irq as the hardware interrupt number. 397 * the linux irq as the hardware interrupt number. It still uses the linear
398 * or radix tree to store the mapping, but the irq controller can optimize
399 * the revmap path by using the hwirq directly.
403 */ 400 */
404unsigned int irq_create_direct_mapping(struct irq_domain *domain) 401unsigned int irq_create_direct_mapping(struct irq_domain *domain)
405{ 402{
@@ -408,17 +405,14 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
408 if (domain == NULL) 405 if (domain == NULL)
409 domain = irq_default_domain; 406 domain = irq_default_domain;
410 407
411 if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP))
412 return 0;
413
414 virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); 408 virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
415 if (!virq) { 409 if (!virq) {
416 pr_debug("create_direct virq allocation failed\n"); 410 pr_debug("create_direct virq allocation failed\n");
417 return 0; 411 return 0;
418 } 412 }
419 if (virq >= domain->revmap_data.nomap.max_irq) { 413 if (virq >= domain->revmap_direct_max_irq) {
420 pr_err("ERROR: no free irqs available below %i maximum\n", 414 pr_err("ERROR: no free irqs available below %i maximum\n",
421 domain->revmap_data.nomap.max_irq); 415 domain->revmap_direct_max_irq);
422 irq_free_desc(virq); 416 irq_free_desc(virq);
423 return 0; 417 return 0;
424 } 418 }
@@ -617,17 +611,13 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
617 if (domain == NULL) 611 if (domain == NULL)
618 return 0; 612 return 0;
619 613
620 switch (domain->revmap_type) { 614 if (hwirq < domain->revmap_direct_max_irq) {
621 case IRQ_DOMAIN_MAP_LINEAR:
622 return irq_linear_revmap(domain, hwirq);
623 case IRQ_DOMAIN_MAP_NOMAP:
624 data = irq_get_irq_data(hwirq); 615 data = irq_get_irq_data(hwirq);
625 if (data && (data->domain == domain) && (data->hwirq == hwirq)) 616 if (data && (data->domain == domain) && (data->hwirq == hwirq))
626 return hwirq; 617 return hwirq;
627 break;
628 } 618 }
629 619
630 return 0; 620 return irq_linear_revmap(domain, hwirq);
631} 621}
632EXPORT_SYMBOL_GPL(irq_find_mapping); 622EXPORT_SYMBOL_GPL(irq_find_mapping);
633 623
@@ -643,12 +633,11 @@ unsigned int irq_linear_revmap(struct irq_domain *domain,
643 irq_hw_number_t hwirq) 633 irq_hw_number_t hwirq)
644{ 634{
645 struct irq_data *data; 635 struct irq_data *data;
646 BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
647 636
648 /* Check revmap bounds; complain if exceeded */ 637 /* Check revmap bounds; complain if exceeded */
649 if (hwirq >= domain->revmap_data.linear.size) { 638 if (hwirq >= domain->revmap_size) {
650 rcu_read_lock(); 639 rcu_read_lock();
651 data = radix_tree_lookup(&domain->revmap_data.tree, hwirq); 640 data = radix_tree_lookup(&domain->revmap_tree, hwirq);
652 rcu_read_unlock(); 641 rcu_read_unlock();
653 return data ? data->irq : 0; 642 return data ? data->irq : 0;
654 } 643 }