aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/irqdomain.h48
-rw-r--r--kernel/irq/generic-chip.c5
-rw-r--r--kernel/irq/irqdomain.c55
3 files changed, 43 insertions, 65 deletions
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 1cbb7413c121..51ef84a3c990 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -73,50 +73,42 @@ struct irq_domain_chip_generic;
73/** 73/**
74 * struct irq_domain - Hardware interrupt number translation object 74 * struct irq_domain - Hardware interrupt number translation object
75 * @link: Element in global irq_domain list. 75 * @link: Element in global irq_domain list.
76 * @revmap_type: Method used for reverse mapping hwirq numbers to linux irq. This 76 * @name: Name of interrupt domain
77 * will be one of the IRQ_DOMAIN_MAP_* values.
78 * @ops: pointer to irq_domain methods 77 * @ops: pointer to irq_domain methods
79 * @host_data: private data pointer for use by owner. Not touched by irq_domain 78 * @host_data: private data pointer for use by owner. Not touched by irq_domain
80 * core code. 79 * core code.
81 * @irq_base: Start of irq_desc range assigned to the irq_domain. The creator 80 *
82 * of the irq_domain is responsible for allocating the array of 81 * Optional elements
83 * irq_desc structures. 82 * @of_node: Pointer to device tree nodes associated with the irq_domain. Used
84 * @nr_irq: Number of irqs managed by the irq domain 83 * when decoding device tree interrupt specifiers.
85 * @hwirq_base: Starting number for hwirqs managed by the irq domain 84 * @gc: Pointer to a list of generic chips. There is a helper function for
86 * @of_node: (optional) Pointer to device tree nodes associated with the 85 * setting up one or more generic chips for interrupt controllers
87 * irq_domain. Used when decoding device tree interrupt specifiers. 86 * drivers using the generic chip library which uses this pointer.
87 *
88 * Revmap data, used internally by irq_domain
89 * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
90 * support direct mapping
91 * @revmap_size: Size of the linear map table @linear_revmap[]
92 * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
93 * @linear_revmap: Linear table of hwirq->virq reverse mappings
88 */ 94 */
89struct irq_domain { 95struct irq_domain {
90 struct list_head link; 96 struct list_head link;
91 const char *name; 97 const char *name;
92
93 /* type of reverse mapping_technique */
94 unsigned int revmap_type;
95 struct {
96 struct {
97 unsigned int size;
98 } linear;
99 struct {
100 unsigned int max_irq;
101 } nomap;
102 struct radix_tree_root tree;
103 } revmap_data;
104 const struct irq_domain_ops *ops; 98 const struct irq_domain_ops *ops;
105 void *host_data; 99 void *host_data;
106 irq_hw_number_t inval_irq;
107 100
108 /* Optional device node pointer */ 101 /* Optional data */
109 struct device_node *of_node; 102 struct device_node *of_node;
110 /* Optional pointer to generic interrupt chips */
111 struct irq_domain_chip_generic *gc; 103 struct irq_domain_chip_generic *gc;
112 104
113 /* Linear reverse map */ 105 /* reverse map data. The linear map gets appended to the irq_domain */
106 unsigned int revmap_direct_max_irq;
107 unsigned int revmap_size;
108 struct radix_tree_root revmap_tree;
114 unsigned int linear_revmap[]; 109 unsigned int linear_revmap[];
115}; 110};
116 111
117#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
118#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
119
120#ifdef CONFIG_IRQ_DOMAIN 112#ifdef CONFIG_IRQ_DOMAIN
121struct irq_domain *irq_domain_add_simple(struct device_node *of_node, 113struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
122 unsigned int size, 114 unsigned int size,
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index ca98cc5d6308..4b011064e146 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -270,10 +270,7 @@ int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
270 if (d->gc) 270 if (d->gc)
271 return -EBUSY; 271 return -EBUSY;
272 272
273 if (d->revmap_type != IRQ_DOMAIN_MAP_LINEAR) 273 numchips = d->revmap_size / irqs_per_chip;
274 return -EINVAL;
275
276 numchips = d->revmap_data.linear.size / irqs_per_chip;
277 if (!numchips) 274 if (!numchips)
278 return -EINVAL; 275 return -EINVAL;
279 276
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 5a1d8ec8509e..c38be78fceb4 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -25,7 +25,6 @@ static struct irq_domain *irq_default_domain;
25/** 25/**
26 * irq_domain_alloc() - Allocate a new irq_domain data structure 26 * irq_domain_alloc() - Allocate a new irq_domain data structure
27 * @of_node: optional device-tree node of the interrupt controller 27 * @of_node: optional device-tree node of the interrupt controller
28 * @revmap_type: type of reverse mapping to use
29 * @ops: map/unmap domain callbacks 28 * @ops: map/unmap domain callbacks
30 * @host_data: Controller private data pointer 29 * @host_data: Controller private data pointer
31 * 30 *
@@ -34,7 +33,7 @@ static struct irq_domain *irq_default_domain;
34 * to IRQ domain, or NULL on failure. 33 * to IRQ domain, or NULL on failure.
35 */ 34 */
36static struct irq_domain *irq_domain_alloc(struct device_node *of_node, 35static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
37 unsigned int revmap_type, int size, 36 int size,
38 const struct irq_domain_ops *ops, 37 const struct irq_domain_ops *ops,
39 void *host_data) 38 void *host_data)
40{ 39{
@@ -46,12 +45,11 @@ static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
46 return NULL; 45 return NULL;
47 46
48 /* Fill structure */ 47 /* Fill structure */
49 INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL); 48 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
50 domain->revmap_type = revmap_type;
51 domain->ops = ops; 49 domain->ops = ops;
52 domain->host_data = host_data; 50 domain->host_data = host_data;
53 domain->of_node = of_node_get(of_node); 51 domain->of_node = of_node_get(of_node);
54 domain->revmap_data.linear.size = size; 52 domain->revmap_size = size;
55 53
56 return domain; 54 return domain;
57} 55}
@@ -67,8 +65,7 @@ static void irq_domain_add(struct irq_domain *domain)
67 mutex_lock(&irq_domain_mutex); 65 mutex_lock(&irq_domain_mutex);
68 list_add(&domain->link, &irq_domain_list); 66 list_add(&domain->link, &irq_domain_list);
69 mutex_unlock(&irq_domain_mutex); 67 mutex_unlock(&irq_domain_mutex);
70 pr_debug("Allocated domain of type %d @0x%p\n", 68 pr_debug("Added domain %s\n", domain->name);
71 domain->revmap_type, domain);
72} 69}
73 70
74/** 71/**
@@ -88,7 +85,7 @@ void irq_domain_remove(struct irq_domain *domain)
88 * node when all entries are removed. Shout if there are 85 * node when all entries are removed. Shout if there are
89 * any mappings left. 86 * any mappings left.
90 */ 87 */
91 WARN_ON(domain->revmap_data.tree.height); 88 WARN_ON(domain->revmap_tree.height);
92 89
93 list_del(&domain->link); 90 list_del(&domain->link);
94 91
@@ -100,8 +97,7 @@ void irq_domain_remove(struct irq_domain *domain)
100 97
101 mutex_unlock(&irq_domain_mutex); 98 mutex_unlock(&irq_domain_mutex);
102 99
103 pr_debug("Removed domain of type %d @0x%p\n", 100 pr_debug("Removed domain %s\n", domain->name);
104 domain->revmap_type, domain);
105 101
106 irq_domain_free(domain); 102 irq_domain_free(domain);
107} 103}
@@ -216,7 +212,7 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
216{ 212{
217 struct irq_domain *domain; 213 struct irq_domain *domain;
218 214
219 domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, size, ops, host_data); 215 domain = irq_domain_alloc(of_node, size, ops, host_data);
220 if (!domain) 216 if (!domain)
221 return NULL; 217 return NULL;
222 218
@@ -230,10 +226,9 @@ struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
230 const struct irq_domain_ops *ops, 226 const struct irq_domain_ops *ops,
231 void *host_data) 227 void *host_data)
232{ 228{
233 struct irq_domain *domain = irq_domain_alloc(of_node, 229 struct irq_domain *domain = irq_domain_alloc(of_node, 0, ops, host_data);
234 IRQ_DOMAIN_MAP_NOMAP, 0, ops, host_data);
235 if (domain) { 230 if (domain) {
236 domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0; 231 domain->revmap_direct_max_irq = max_irq ? max_irq : ~0;
237 irq_domain_add(domain); 232 irq_domain_add(domain);
238 } 233 }
239 return domain; 234 return domain;
@@ -321,11 +316,11 @@ static void irq_domain_disassociate_many(struct irq_domain *domain,
321 irq_data->hwirq = 0; 316 irq_data->hwirq = 0;
322 317
323 /* Clear reverse map for this hwirq */ 318 /* Clear reverse map for this hwirq */
324 if (hwirq < domain->revmap_data.linear.size) { 319 if (hwirq < domain->revmap_size) {
325 domain->linear_revmap[hwirq] = 0; 320 domain->linear_revmap[hwirq] = 0;
326 } else { 321 } else {
327 mutex_lock(&revmap_trees_mutex); 322 mutex_lock(&revmap_trees_mutex);
328 radix_tree_delete(&domain->revmap_data.tree, hwirq); 323 radix_tree_delete(&domain->revmap_tree, hwirq);
329 mutex_unlock(&revmap_trees_mutex); 324 mutex_unlock(&revmap_trees_mutex);
330 } 325 }
331 } 326 }
@@ -378,11 +373,11 @@ int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
378 domain->name = irq_data->chip->name; 373 domain->name = irq_data->chip->name;
379 } 374 }
380 375
381 if (hwirq < domain->revmap_data.linear.size) { 376 if (hwirq < domain->revmap_size) {
382 domain->linear_revmap[hwirq] = virq; 377 domain->linear_revmap[hwirq] = virq;
383 } else { 378 } else {
384 mutex_lock(&revmap_trees_mutex); 379 mutex_lock(&revmap_trees_mutex);
385 radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data); 380 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
386 mutex_unlock(&revmap_trees_mutex); 381 mutex_unlock(&revmap_trees_mutex);
387 } 382 }
388 383
@@ -399,7 +394,9 @@ EXPORT_SYMBOL_GPL(irq_domain_associate_many);
399 * 394 *
400 * This routine is used for irq controllers which can choose the hardware 395 * This routine is used for irq controllers which can choose the hardware
401 * interrupt numbers they generate. In such a case it's simplest to use 396 * interrupt numbers they generate. In such a case it's simplest to use
402 * the linux irq as the hardware interrupt number. 397 * the linux irq as the hardware interrupt number. It still uses the linear
398 * or radix tree to store the mapping, but the irq controller can optimize
399 * the revmap path by using the hwirq directly.
403 */ 400 */
404unsigned int irq_create_direct_mapping(struct irq_domain *domain) 401unsigned int irq_create_direct_mapping(struct irq_domain *domain)
405{ 402{
@@ -408,17 +405,14 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
408 if (domain == NULL) 405 if (domain == NULL)
409 domain = irq_default_domain; 406 domain = irq_default_domain;
410 407
411 if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP))
412 return 0;
413
414 virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); 408 virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
415 if (!virq) { 409 if (!virq) {
416 pr_debug("create_direct virq allocation failed\n"); 410 pr_debug("create_direct virq allocation failed\n");
417 return 0; 411 return 0;
418 } 412 }
419 if (virq >= domain->revmap_data.nomap.max_irq) { 413 if (virq >= domain->revmap_direct_max_irq) {
420 pr_err("ERROR: no free irqs available below %i maximum\n", 414 pr_err("ERROR: no free irqs available below %i maximum\n",
421 domain->revmap_data.nomap.max_irq); 415 domain->revmap_direct_max_irq);
422 irq_free_desc(virq); 416 irq_free_desc(virq);
423 return 0; 417 return 0;
424 } 418 }
@@ -617,17 +611,13 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
617 if (domain == NULL) 611 if (domain == NULL)
618 return 0; 612 return 0;
619 613
620 switch (domain->revmap_type) { 614 if (hwirq < domain->revmap_direct_max_irq) {
621 case IRQ_DOMAIN_MAP_LINEAR:
622 return irq_linear_revmap(domain, hwirq);
623 case IRQ_DOMAIN_MAP_NOMAP:
624 data = irq_get_irq_data(hwirq); 615 data = irq_get_irq_data(hwirq);
625 if (data && (data->domain == domain) && (data->hwirq == hwirq)) 616 if (data && (data->domain == domain) && (data->hwirq == hwirq))
626 return hwirq; 617 return hwirq;
627 break;
628 } 618 }
629 619
630 return 0; 620 return irq_linear_revmap(domain, hwirq);
631} 621}
632EXPORT_SYMBOL_GPL(irq_find_mapping); 622EXPORT_SYMBOL_GPL(irq_find_mapping);
633 623
@@ -643,12 +633,11 @@ unsigned int irq_linear_revmap(struct irq_domain *domain,
643 irq_hw_number_t hwirq) 633 irq_hw_number_t hwirq)
644{ 634{
645 struct irq_data *data; 635 struct irq_data *data;
646 BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
647 636
648 /* Check revmap bounds; complain if exceeded */ 637 /* Check revmap bounds; complain if exceeded */
649 if (hwirq >= domain->revmap_data.linear.size) { 638 if (hwirq >= domain->revmap_size) {
650 rcu_read_lock(); 639 rcu_read_lock();
651 data = radix_tree_lookup(&domain->revmap_data.tree, hwirq); 640 data = radix_tree_lookup(&domain->revmap_tree, hwirq);
652 rcu_read_unlock(); 641 rcu_read_unlock();
653 return data ? data->irq : 0; 642 return data ? data->irq : 0;
654 } 643 }