aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2012-07-11 12:24:31 -0400
committerGrant Likely <grant.likely@linaro.org>2013-06-10 06:52:09 -0400
commitcef5075c8c238ffd04c86a77a5a9bdbd18031137 (patch)
tree0163ec330ce81f90375ee1208c27d4e063a0e89f /kernel/irq
parent0bb4afb45dd1add73ca643a865daa38716aeff0c (diff)
irqdomain: merge linear and tree reverse mappings.
Keeping them separate makes irq_domain more complex and adds a lot of code (as proven by the diffstat). Merging them simplifies the whole scheme. This change makes it so both the tree and linear methods can be used by the same irq_domain instance. If the hwirq is less than the ->linear_size, then the linear map is used to reverse map the hwirq. Otherwise the radix tree is used. The test for which map to use is no more expensive that the existing code, so the performance of fast path is preserved. It also means that complex interrupt controllers can use both the linear map and a tree in the same domain. This may be useful for an interrupt controller with a base set of core irqs and a large number of GPIOs which might be used as irqs. The linear map could cover the core irqs, and the tree used for thas irqs. The linear map could cover the core irqs, and the tree used for the gpios. v2: Drop reorganization of revmap data Signed-off-by: Grant Likely <grant.likely@secretlab.ca> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Rob Herring <rob.herring@calxeda.com>
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/irqdomain.c107
1 files changed, 29 insertions, 78 deletions
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index b1b5e6793fd2..5a1d8ec8509e 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -34,22 +34,24 @@ static struct irq_domain *irq_default_domain;
34 * to IRQ domain, or NULL on failure. 34 * to IRQ domain, or NULL on failure.
35 */ 35 */
36static struct irq_domain *irq_domain_alloc(struct device_node *of_node, 36static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
37 unsigned int revmap_type, 37 unsigned int revmap_type, int size,
38 const struct irq_domain_ops *ops, 38 const struct irq_domain_ops *ops,
39 void *host_data) 39 void *host_data)
40{ 40{
41 struct irq_domain *domain; 41 struct irq_domain *domain;
42 42
43 domain = kzalloc_node(sizeof(*domain), GFP_KERNEL, 43 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
44 of_node_to_nid(of_node)); 44 GFP_KERNEL, of_node_to_nid(of_node));
45 if (WARN_ON(!domain)) 45 if (WARN_ON(!domain))
46 return NULL; 46 return NULL;
47 47
48 /* Fill structure */ 48 /* Fill structure */
49 INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
49 domain->revmap_type = revmap_type; 50 domain->revmap_type = revmap_type;
50 domain->ops = ops; 51 domain->ops = ops;
51 domain->host_data = host_data; 52 domain->host_data = host_data;
52 domain->of_node = of_node_get(of_node); 53 domain->of_node = of_node_get(of_node);
54 domain->revmap_data.linear.size = size;
53 55
54 return domain; 56 return domain;
55} 57}
@@ -81,22 +83,12 @@ void irq_domain_remove(struct irq_domain *domain)
81{ 83{
82 mutex_lock(&irq_domain_mutex); 84 mutex_lock(&irq_domain_mutex);
83 85
84 switch (domain->revmap_type) { 86 /*
85 case IRQ_DOMAIN_MAP_TREE: 87 * radix_tree_delete() takes care of destroying the root
86 /* 88 * node when all entries are removed. Shout if there are
87 * radix_tree_delete() takes care of destroying the root 89 * any mappings left.
88 * node when all entries are removed. Shout if there are 90 */
89 * any mappings left. 91 WARN_ON(domain->revmap_data.tree.height);
90 */
91 WARN_ON(domain->revmap_data.tree.height);
92 break;
93 case IRQ_DOMAIN_MAP_LINEAR:
94 kfree(domain->revmap_data.linear.revmap);
95 domain->revmap_data.linear.size = 0;
96 break;
97 case IRQ_DOMAIN_MAP_NOMAP:
98 break;
99 }
100 92
101 list_del(&domain->link); 93 list_del(&domain->link);
102 94
@@ -223,20 +215,11 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
223 void *host_data) 215 void *host_data)
224{ 216{
225 struct irq_domain *domain; 217 struct irq_domain *domain;
226 unsigned int *revmap;
227 218
228 revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL, 219 domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, size, ops, host_data);
229 of_node_to_nid(of_node)); 220 if (!domain)
230 if (WARN_ON(!revmap))
231 return NULL; 221 return NULL;
232 222
233 domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data);
234 if (!domain) {
235 kfree(revmap);
236 return NULL;
237 }
238 domain->revmap_data.linear.size = size;
239 domain->revmap_data.linear.revmap = revmap;
240 irq_domain_add(domain); 223 irq_domain_add(domain);
241 return domain; 224 return domain;
242} 225}
@@ -248,7 +231,7 @@ struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
248 void *host_data) 231 void *host_data)
249{ 232{
250 struct irq_domain *domain = irq_domain_alloc(of_node, 233 struct irq_domain *domain = irq_domain_alloc(of_node,
251 IRQ_DOMAIN_MAP_NOMAP, ops, host_data); 234 IRQ_DOMAIN_MAP_NOMAP, 0, ops, host_data);
252 if (domain) { 235 if (domain) {
253 domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0; 236 domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
254 irq_domain_add(domain); 237 irq_domain_add(domain);
@@ -258,28 +241,6 @@ struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
258EXPORT_SYMBOL_GPL(irq_domain_add_nomap); 241EXPORT_SYMBOL_GPL(irq_domain_add_nomap);
259 242
260/** 243/**
261 * irq_domain_add_tree()
262 * @of_node: pointer to interrupt controller's device tree node.
263 * @ops: map/unmap domain callbacks
264 *
265 * Note: The radix tree will be allocated later during boot automatically
266 * (the reverse mapping will use the slow path until that happens).
267 */
268struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
269 const struct irq_domain_ops *ops,
270 void *host_data)
271{
272 struct irq_domain *domain = irq_domain_alloc(of_node,
273 IRQ_DOMAIN_MAP_TREE, ops, host_data);
274 if (domain) {
275 INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
276 irq_domain_add(domain);
277 }
278 return domain;
279}
280EXPORT_SYMBOL_GPL(irq_domain_add_tree);
281
282/**
283 * irq_find_host() - Locates a domain for a given device node 244 * irq_find_host() - Locates a domain for a given device node
284 * @node: device-tree node of the interrupt controller 245 * @node: device-tree node of the interrupt controller
285 */ 246 */
@@ -359,17 +320,13 @@ static void irq_domain_disassociate_many(struct irq_domain *domain,
359 irq_data->domain = NULL; 320 irq_data->domain = NULL;
360 irq_data->hwirq = 0; 321 irq_data->hwirq = 0;
361 322
362 /* Clear reverse map */ 323 /* Clear reverse map for this hwirq */
363 switch(domain->revmap_type) { 324 if (hwirq < domain->revmap_data.linear.size) {
364 case IRQ_DOMAIN_MAP_LINEAR: 325 domain->linear_revmap[hwirq] = 0;
365 if (hwirq < domain->revmap_data.linear.size) 326 } else {
366 domain->revmap_data.linear.revmap[hwirq] = 0;
367 break;
368 case IRQ_DOMAIN_MAP_TREE:
369 mutex_lock(&revmap_trees_mutex); 327 mutex_lock(&revmap_trees_mutex);
370 radix_tree_delete(&domain->revmap_data.tree, hwirq); 328 radix_tree_delete(&domain->revmap_data.tree, hwirq);
371 mutex_unlock(&revmap_trees_mutex); 329 mutex_unlock(&revmap_trees_mutex);
372 break;
373 } 330 }
374 } 331 }
375} 332}
@@ -421,16 +378,12 @@ int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
421 domain->name = irq_data->chip->name; 378 domain->name = irq_data->chip->name;
422 } 379 }
423 380
424 switch (domain->revmap_type) { 381 if (hwirq < domain->revmap_data.linear.size) {
425 case IRQ_DOMAIN_MAP_LINEAR: 382 domain->linear_revmap[hwirq] = virq;
426 if (hwirq < domain->revmap_data.linear.size) 383 } else {
427 domain->revmap_data.linear.revmap[hwirq] = virq;
428 break;
429 case IRQ_DOMAIN_MAP_TREE:
430 mutex_lock(&revmap_trees_mutex); 384 mutex_lock(&revmap_trees_mutex);
431 radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data); 385 radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
432 mutex_unlock(&revmap_trees_mutex); 386 mutex_unlock(&revmap_trees_mutex);
433 break;
434 } 387 }
435 388
436 irq_clear_status_flags(virq, IRQ_NOREQUEST); 389 irq_clear_status_flags(virq, IRQ_NOREQUEST);
@@ -667,13 +620,6 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
667 switch (domain->revmap_type) { 620 switch (domain->revmap_type) {
668 case IRQ_DOMAIN_MAP_LINEAR: 621 case IRQ_DOMAIN_MAP_LINEAR:
669 return irq_linear_revmap(domain, hwirq); 622 return irq_linear_revmap(domain, hwirq);
670 case IRQ_DOMAIN_MAP_TREE:
671 rcu_read_lock();
672 data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
673 rcu_read_unlock();
674 if (data)
675 return data->irq;
676 break;
677 case IRQ_DOMAIN_MAP_NOMAP: 623 case IRQ_DOMAIN_MAP_NOMAP:
678 data = irq_get_irq_data(hwirq); 624 data = irq_get_irq_data(hwirq);
679 if (data && (data->domain == domain) && (data->hwirq == hwirq)) 625 if (data && (data->domain == domain) && (data->hwirq == hwirq))
@@ -696,13 +642,18 @@ EXPORT_SYMBOL_GPL(irq_find_mapping);
696unsigned int irq_linear_revmap(struct irq_domain *domain, 642unsigned int irq_linear_revmap(struct irq_domain *domain,
697 irq_hw_number_t hwirq) 643 irq_hw_number_t hwirq)
698{ 644{
645 struct irq_data *data;
699 BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR); 646 BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
700 647
701 /* Check revmap bounds; complain if exceeded */ 648 /* Check revmap bounds; complain if exceeded */
702 if (WARN_ON(hwirq >= domain->revmap_data.linear.size)) 649 if (hwirq >= domain->revmap_data.linear.size) {
703 return 0; 650 rcu_read_lock();
651 data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
652 rcu_read_unlock();
653 return data ? data->irq : 0;
654 }
704 655
705 return domain->revmap_data.linear.revmap[hwirq]; 656 return domain->linear_revmap[hwirq];
706} 657}
707EXPORT_SYMBOL_GPL(irq_linear_revmap); 658EXPORT_SYMBOL_GPL(irq_linear_revmap);
708 659