aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2012-08-01 04:14:52 -0400
committerPaul Mundt <lethal@linux-sh.org>2012-08-01 04:14:52 -0400
commitf38770477a30d03b6296570071ca2fcd6d3a5f11 (patch)
tree3dd11460c4115d97137a6ff82683e593a3813607
parent1ca8fe38a6c958babe6571e39cb0115a40b94603 (diff)
parent1d6a21b0a672fb29b01ccf397d478e0541e17716 (diff)
Merge branch 'common/irqdomain' into sh-latest
-rw-r--r--Documentation/IRQ-domain.txt5
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c2
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c3
-rw-r--r--drivers/sh/intc/Kconfig4
-rw-r--r--drivers/sh/intc/Makefile2
-rw-r--r--drivers/sh/intc/core.c11
-rw-r--r--drivers/sh/intc/internals.h5
-rw-r--r--drivers/sh/intc/irqdomain.c68
-rw-r--r--include/linux/irqdomain.h28
-rw-r--r--include/linux/of.h15
-rw-r--r--kernel/irq/irqdomain.c362
12 files changed, 333 insertions, 174 deletions
diff --git a/Documentation/IRQ-domain.txt b/Documentation/IRQ-domain.txt
index 27dcaabfb4db..1401cece745a 100644
--- a/Documentation/IRQ-domain.txt
+++ b/Documentation/IRQ-domain.txt
@@ -93,6 +93,7 @@ Linux IRQ number into the hardware.
93Most drivers cannot use this mapping. 93Most drivers cannot use this mapping.
94 94
95==== Legacy ==== 95==== Legacy ====
96irq_domain_add_simple()
96irq_domain_add_legacy() 97irq_domain_add_legacy()
97irq_domain_add_legacy_isa() 98irq_domain_add_legacy_isa()
98 99
@@ -115,3 +116,7 @@ The legacy map should only be used if fixed IRQ mappings must be
115supported. For example, ISA controllers would use the legacy map for 116supported. For example, ISA controllers would use the legacy map for
116mapping Linux IRQs 0-15 so that existing ISA drivers get the correct IRQ 117mapping Linux IRQs 0-15 so that existing ISA drivers get the correct IRQ
117numbers. 118numbers.
119
120Most users of legacy mappings should use irq_domain_add_simple() which
121will use a legacy domain only if an IRQ range is supplied by the
122system and will otherwise use a linear domain mapping.
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index 253dce98c16e..14469cf9df68 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -111,7 +111,7 @@ static unsigned int icp_hv_get_irq(void)
111 if (vec == XICS_IRQ_SPURIOUS) 111 if (vec == XICS_IRQ_SPURIOUS)
112 return NO_IRQ; 112 return NO_IRQ;
113 113
114 irq = irq_radix_revmap_lookup(xics_host, vec); 114 irq = irq_find_mapping(xics_host, vec);
115 if (likely(irq != NO_IRQ)) { 115 if (likely(irq != NO_IRQ)) {
116 xics_push_cppr(vec); 116 xics_push_cppr(vec);
117 return irq; 117 return irq;
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 4c79b6fbee1c..48861d3fcd07 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -119,7 +119,7 @@ static unsigned int icp_native_get_irq(void)
119 if (vec == XICS_IRQ_SPURIOUS) 119 if (vec == XICS_IRQ_SPURIOUS)
120 return NO_IRQ; 120 return NO_IRQ;
121 121
122 irq = irq_radix_revmap_lookup(xics_host, vec); 122 irq = irq_find_mapping(xics_host, vec);
123 if (likely(irq != NO_IRQ)) { 123 if (likely(irq != NO_IRQ)) {
124 xics_push_cppr(vec); 124 xics_push_cppr(vec);
125 return irq; 125 return irq;
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index cd1d18db92c6..9049d9f44485 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -329,9 +329,6 @@ static int xics_host_map(struct irq_domain *h, unsigned int virq,
329 329
330 pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); 330 pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
331 331
332 /* Insert the interrupt mapping into the radix tree for fast lookup */
333 irq_radix_revmap_insert(xics_host, virq, hw);
334
335 /* They aren't all level sensitive but we just don't really know */ 332 /* They aren't all level sensitive but we just don't really know */
336 irq_set_status_flags(virq, IRQ_LEVEL); 333 irq_set_status_flags(virq, IRQ_LEVEL);
337 334
diff --git a/drivers/sh/intc/Kconfig b/drivers/sh/intc/Kconfig
index c88cbccc62b0..a305731742a9 100644
--- a/drivers/sh/intc/Kconfig
+++ b/drivers/sh/intc/Kconfig
@@ -1,3 +1,7 @@
1config SH_INTC
2 def_bool y
3 select IRQ_DOMAIN
4
1comment "Interrupt controller options" 5comment "Interrupt controller options"
2 6
3config INTC_USERIMASK 7config INTC_USERIMASK
diff --git a/drivers/sh/intc/Makefile b/drivers/sh/intc/Makefile
index 44f006d09471..54ec2a0643df 100644
--- a/drivers/sh/intc/Makefile
+++ b/drivers/sh/intc/Makefile
@@ -1,4 +1,4 @@
1obj-y := access.o chip.o core.o handle.o virq.o 1obj-y := access.o chip.o core.o handle.o irqdomain.o virq.o
2 2
3obj-$(CONFIG_INTC_BALANCING) += balancing.o 3obj-$(CONFIG_INTC_BALANCING) += balancing.o
4obj-$(CONFIG_INTC_USERIMASK) += userimask.o 4obj-$(CONFIG_INTC_USERIMASK) += userimask.o
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 7e562ccb6997..2374468615ed 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -25,6 +25,7 @@
25#include <linux/stat.h> 25#include <linux/stat.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/sh_intc.h> 27#include <linux/sh_intc.h>
28#include <linux/irqdomain.h>
28#include <linux/device.h> 29#include <linux/device.h>
29#include <linux/syscore_ops.h> 30#include <linux/syscore_ops.h>
30#include <linux/list.h> 31#include <linux/list.h>
@@ -310,6 +311,8 @@ int __init register_intc_controller(struct intc_desc *desc)
310 311
311 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ 312 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
312 313
314 intc_irq_domain_init(d, hw);
315
313 /* register the vectors one by one */ 316 /* register the vectors one by one */
314 for (i = 0; i < hw->nr_vectors; i++) { 317 for (i = 0; i < hw->nr_vectors; i++) {
315 struct intc_vect *vect = hw->vectors + i; 318 struct intc_vect *vect = hw->vectors + i;
@@ -319,8 +322,8 @@ int __init register_intc_controller(struct intc_desc *desc)
319 if (!vect->enum_id) 322 if (!vect->enum_id)
320 continue; 323 continue;
321 324
322 res = irq_alloc_desc_at(irq, numa_node_id()); 325 res = irq_create_identity_mapping(d->domain, irq);
323 if (res != irq && res != -EEXIST) { 326 if (unlikely(res)) {
324 pr_err("can't get irq_desc for %d\n", irq); 327 pr_err("can't get irq_desc for %d\n", irq);
325 continue; 328 continue;
326 } 329 }
@@ -340,8 +343,8 @@ int __init register_intc_controller(struct intc_desc *desc)
340 * IRQ support, each vector still needs to have 343 * IRQ support, each vector still needs to have
341 * its own backing irq_desc. 344 * its own backing irq_desc.
342 */ 345 */
343 res = irq_alloc_desc_at(irq2, numa_node_id()); 346 res = irq_create_identity_mapping(d->domain, irq2);
344 if (res != irq2 && res != -EEXIST) { 347 if (unlikely(res)) {
345 pr_err("can't get irq_desc for %d\n", irq2); 348 pr_err("can't get irq_desc for %d\n", irq2);
346 continue; 349 continue;
347 } 350 }
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index f034a979a16f..7dff08e2a071 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -1,5 +1,6 @@
1#include <linux/sh_intc.h> 1#include <linux/sh_intc.h>
2#include <linux/irq.h> 2#include <linux/irq.h>
3#include <linux/irqdomain.h>
3#include <linux/list.h> 4#include <linux/list.h>
4#include <linux/kernel.h> 5#include <linux/kernel.h>
5#include <linux/types.h> 6#include <linux/types.h>
@@ -66,6 +67,7 @@ struct intc_desc_int {
66 unsigned int nr_sense; 67 unsigned int nr_sense;
67 struct intc_window *window; 68 struct intc_window *window;
68 unsigned int nr_windows; 69 unsigned int nr_windows;
70 struct irq_domain *domain;
69 struct irq_chip chip; 71 struct irq_chip chip;
70 bool skip_suspend; 72 bool skip_suspend;
71}; 73};
@@ -187,6 +189,9 @@ unsigned long intc_get_ack_handle(unsigned int irq);
187void intc_enable_disable_enum(struct intc_desc *desc, struct intc_desc_int *d, 189void intc_enable_disable_enum(struct intc_desc *desc, struct intc_desc_int *d,
188 intc_enum enum_id, int enable); 190 intc_enum enum_id, int enable);
189 191
192/* irqdomain.c */
193void intc_irq_domain_init(struct intc_desc_int *d, struct intc_hw_desc *hw);
194
190/* virq.c */ 195/* virq.c */
191void intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d); 196void intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d);
192void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d); 197void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d);
diff --git a/drivers/sh/intc/irqdomain.c b/drivers/sh/intc/irqdomain.c
new file mode 100644
index 000000000000..3968f1c3c5c3
--- /dev/null
+++ b/drivers/sh/intc/irqdomain.c
@@ -0,0 +1,68 @@
1/*
2 * IRQ domain support for SH INTC subsystem
3 *
4 * Copyright (C) 2012 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#define pr_fmt(fmt) "intc: " fmt
11
12#include <linux/irqdomain.h>
13#include <linux/sh_intc.h>
14#include <linux/export.h>
15#include "internals.h"
16
17/**
18 * intc_irq_domain_evt_xlate() - Generic xlate for vectored IRQs.
19 *
20 * This takes care of exception vector to hwirq translation through
21 * by way of evt2irq() translation.
22 *
23 * Note: For platforms that use a flat vector space without INTEVT this
24 * basically just mimics irq_domain_xlate_onecell() by way of a nopped
25 * out evt2irq() implementation.
26 */
27static int intc_evt_xlate(struct irq_domain *d, struct device_node *ctrlr,
28 const u32 *intspec, unsigned int intsize,
29 unsigned long *out_hwirq, unsigned int *out_type)
30{
31 if (WARN_ON(intsize < 1))
32 return -EINVAL;
33
34 *out_hwirq = evt2irq(intspec[0]);
35 *out_type = IRQ_TYPE_NONE;
36
37 return 0;
38}
39
40static const struct irq_domain_ops intc_evt_ops = {
41 .xlate = intc_evt_xlate,
42};
43
44void __init intc_irq_domain_init(struct intc_desc_int *d,
45 struct intc_hw_desc *hw)
46{
47 unsigned int irq_base, irq_end;
48
49 /*
50 * Quick linear revmap check
51 */
52 irq_base = evt2irq(hw->vectors[0].vect);
53 irq_end = evt2irq(hw->vectors[hw->nr_vectors - 1].vect);
54
55 /*
56 * Linear domains have a hard-wired assertion that IRQs start at
57 * 0 in order to make some performance optimizations. Lamely
58 * restrict the linear case to these conditions here, taking the
59 * tree penalty for linear cases with non-zero hwirq bases.
60 */
61 if (irq_base == 0 && irq_end == (irq_base + hw->nr_vectors - 1))
62 d->domain = irq_domain_add_linear(NULL, hw->nr_vectors,
63 &intc_evt_ops, NULL);
64 else
65 d->domain = irq_domain_add_tree(NULL, &intc_evt_ops, NULL);
66
67 BUG_ON(!d->domain);
68}
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 5abb533eb8eb..0d5b17bf5e51 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -112,6 +112,11 @@ struct irq_domain {
112}; 112};
113 113
114#ifdef CONFIG_IRQ_DOMAIN 114#ifdef CONFIG_IRQ_DOMAIN
115struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
116 unsigned int size,
117 unsigned int first_irq,
118 const struct irq_domain_ops *ops,
119 void *host_data);
115struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, 120struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
116 unsigned int size, 121 unsigned int size,
117 unsigned int first_irq, 122 unsigned int first_irq,
@@ -144,16 +149,31 @@ static inline struct irq_domain *irq_domain_add_legacy_isa(
144 149
145extern void irq_domain_remove(struct irq_domain *host); 150extern void irq_domain_remove(struct irq_domain *host);
146 151
152extern int irq_domain_associate_many(struct irq_domain *domain,
153 unsigned int irq_base,
154 irq_hw_number_t hwirq_base, int count);
155static inline int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
156 irq_hw_number_t hwirq)
157{
158 return irq_domain_associate_many(domain, irq, hwirq, 1);
159}
160
147extern unsigned int irq_create_mapping(struct irq_domain *host, 161extern unsigned int irq_create_mapping(struct irq_domain *host,
148 irq_hw_number_t hwirq); 162 irq_hw_number_t hwirq);
149extern void irq_dispose_mapping(unsigned int virq); 163extern void irq_dispose_mapping(unsigned int virq);
150extern unsigned int irq_find_mapping(struct irq_domain *host, 164extern unsigned int irq_find_mapping(struct irq_domain *host,
151 irq_hw_number_t hwirq); 165 irq_hw_number_t hwirq);
152extern unsigned int irq_create_direct_mapping(struct irq_domain *host); 166extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
153extern void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq, 167extern int irq_create_strict_mappings(struct irq_domain *domain,
154 irq_hw_number_t hwirq); 168 unsigned int irq_base,
155extern unsigned int irq_radix_revmap_lookup(struct irq_domain *host, 169 irq_hw_number_t hwirq_base, int count);
156 irq_hw_number_t hwirq); 170
171static inline int irq_create_identity_mapping(struct irq_domain *host,
172 irq_hw_number_t hwirq)
173{
174 return irq_create_strict_mappings(host, hwirq, hwirq, 1);
175}
176
157extern unsigned int irq_linear_revmap(struct irq_domain *host, 177extern unsigned int irq_linear_revmap(struct irq_domain *host,
158 irq_hw_number_t hwirq); 178 irq_hw_number_t hwirq);
159 179
diff --git a/include/linux/of.h b/include/linux/of.h
index 42c2a58328c1..5919ee33f2b7 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -21,6 +21,7 @@
21#include <linux/kref.h> 21#include <linux/kref.h>
22#include <linux/mod_devicetable.h> 22#include <linux/mod_devicetable.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/topology.h>
24 25
25#include <asm/byteorder.h> 26#include <asm/byteorder.h>
26#include <asm/errno.h> 27#include <asm/errno.h>
@@ -158,11 +159,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
158 159
159#define OF_BAD_ADDR ((u64)-1) 160#define OF_BAD_ADDR ((u64)-1)
160 161
161#ifndef of_node_to_nid
162static inline int of_node_to_nid(struct device_node *np) { return -1; }
163#define of_node_to_nid of_node_to_nid
164#endif
165
166static inline const char* of_node_full_name(struct device_node *np) 162static inline const char* of_node_full_name(struct device_node *np)
167{ 163{
168 return np ? np->full_name : "<no-node>"; 164 return np ? np->full_name : "<no-node>";
@@ -427,6 +423,15 @@ static inline int of_machine_is_compatible(const char *compat)
427 while (0) 423 while (0)
428#endif /* CONFIG_OF */ 424#endif /* CONFIG_OF */
429 425
426#ifndef of_node_to_nid
427static inline int of_node_to_nid(struct device_node *np)
428{
429 return numa_node_id();
430}
431
432#define of_node_to_nid of_node_to_nid
433#endif
434
430/** 435/**
431 * of_property_read_bool - Findfrom a property 436 * of_property_read_bool - Findfrom a property
432 * @np: device node from which the property value is to be read. 437 * @np: device node from which the property value is to be read.
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 38c5eb839c92..49a77727db42 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -10,6 +10,7 @@
10#include <linux/mutex.h> 10#include <linux/mutex.h>
11#include <linux/of.h> 11#include <linux/of.h>
12#include <linux/of_address.h> 12#include <linux/of_address.h>
13#include <linux/topology.h>
13#include <linux/seq_file.h> 14#include <linux/seq_file.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/smp.h> 16#include <linux/smp.h>
@@ -45,7 +46,8 @@ static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
45{ 46{
46 struct irq_domain *domain; 47 struct irq_domain *domain;
47 48
48 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 49 domain = kzalloc_node(sizeof(*domain), GFP_KERNEL,
50 of_node_to_nid(of_node));
49 if (WARN_ON(!domain)) 51 if (WARN_ON(!domain))
50 return NULL; 52 return NULL;
51 53
@@ -138,6 +140,36 @@ static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
138} 140}
139 141
140/** 142/**
143 * irq_domain_add_simple() - Allocate and register a simple irq_domain.
144 * @of_node: pointer to interrupt controller's device tree node.
145 * @size: total number of irqs in mapping
146 * @first_irq: first number of irq block assigned to the domain
147 * @ops: map/unmap domain callbacks
148 * @host_data: Controller private data pointer
149 *
150 * Allocates a legacy irq_domain if irq_base is positive or a linear
151 * domain otherwise.
152 *
153 * This is intended to implement the expected behaviour for most
154 * interrupt controllers which is that a linear mapping should
155 * normally be used unless the system requires a legacy mapping in
156 * order to support supplying interrupt numbers during non-DT
157 * registration of devices.
158 */
159struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
160 unsigned int size,
161 unsigned int first_irq,
162 const struct irq_domain_ops *ops,
163 void *host_data)
164{
165 if (first_irq > 0)
166 return irq_domain_add_legacy(of_node, size, first_irq, 0,
167 ops, host_data);
168 else
169 return irq_domain_add_linear(of_node, size, ops, host_data);
170}
171
172/**
141 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. 173 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
142 * @of_node: pointer to interrupt controller's device tree node. 174 * @of_node: pointer to interrupt controller's device tree node.
143 * @size: total number of irqs in legacy mapping 175 * @size: total number of irqs in legacy mapping
@@ -203,7 +235,8 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
203 * one can then use irq_create_mapping() to 235 * one can then use irq_create_mapping() to
204 * explicitly change them 236 * explicitly change them
205 */ 237 */
206 ops->map(domain, irq, hwirq); 238 if (ops->map)
239 ops->map(domain, irq, hwirq);
207 240
208 /* Clear norequest flags */ 241 /* Clear norequest flags */
209 irq_clear_status_flags(irq, IRQ_NOREQUEST); 242 irq_clear_status_flags(irq, IRQ_NOREQUEST);
@@ -215,7 +248,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
215EXPORT_SYMBOL_GPL(irq_domain_add_legacy); 248EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
216 249
217/** 250/**
218 * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain. 251 * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
219 * @of_node: pointer to interrupt controller's device tree node. 252 * @of_node: pointer to interrupt controller's device tree node.
220 * @size: Number of interrupts in the domain. 253 * @size: Number of interrupts in the domain.
221 * @ops: map/unmap domain callbacks 254 * @ops: map/unmap domain callbacks
@@ -229,7 +262,8 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
229 struct irq_domain *domain; 262 struct irq_domain *domain;
230 unsigned int *revmap; 263 unsigned int *revmap;
231 264
232 revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL); 265 revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL,
266 of_node_to_nid(of_node));
233 if (WARN_ON(!revmap)) 267 if (WARN_ON(!revmap))
234 return NULL; 268 return NULL;
235 269
@@ -330,24 +364,112 @@ void irq_set_default_host(struct irq_domain *domain)
330} 364}
331EXPORT_SYMBOL_GPL(irq_set_default_host); 365EXPORT_SYMBOL_GPL(irq_set_default_host);
332 366
333static int irq_setup_virq(struct irq_domain *domain, unsigned int virq, 367static void irq_domain_disassociate_many(struct irq_domain *domain,
334 irq_hw_number_t hwirq) 368 unsigned int irq_base, int count)
335{ 369{
336 struct irq_data *irq_data = irq_get_irq_data(virq); 370 /*
371 * disassociate in reverse order;
372 * not strictly necessary, but nice for unwinding
373 */
374 while (count--) {
375 int irq = irq_base + count;
376 struct irq_data *irq_data = irq_get_irq_data(irq);
377 irq_hw_number_t hwirq = irq_data->hwirq;
378
379 if (WARN_ON(!irq_data || irq_data->domain != domain))
380 continue;
381
382 irq_set_status_flags(irq, IRQ_NOREQUEST);
383
384 /* remove chip and handler */
385 irq_set_chip_and_handler(irq, NULL, NULL);
386
387 /* Make sure it's completed */
388 synchronize_irq(irq);
389
390 /* Tell the PIC about it */
391 if (domain->ops->unmap)
392 domain->ops->unmap(domain, irq);
393 smp_mb();
337 394
338 irq_data->hwirq = hwirq;
339 irq_data->domain = domain;
340 if (domain->ops->map(domain, virq, hwirq)) {
341 pr_debug("irq-%i==>hwirq-0x%lx mapping failed\n", virq, hwirq);
342 irq_data->domain = NULL; 395 irq_data->domain = NULL;
343 irq_data->hwirq = 0; 396 irq_data->hwirq = 0;
344 return -1; 397
398 /* Clear reverse map */
399 switch(domain->revmap_type) {
400 case IRQ_DOMAIN_MAP_LINEAR:
401 if (hwirq < domain->revmap_data.linear.size)
402 domain->revmap_data.linear.revmap[hwirq] = 0;
403 break;
404 case IRQ_DOMAIN_MAP_TREE:
405 mutex_lock(&revmap_trees_mutex);
406 radix_tree_delete(&domain->revmap_data.tree, hwirq);
407 mutex_unlock(&revmap_trees_mutex);
408 break;
409 }
345 } 410 }
411}
412
413int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
414 irq_hw_number_t hwirq_base, int count)
415{
416 unsigned int virq = irq_base;
417 irq_hw_number_t hwirq = hwirq_base;
418 int i, ret;
419
420 pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
421 of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
422
423 for (i = 0; i < count; i++) {
424 struct irq_data *irq_data = irq_get_irq_data(virq + i);
425
426 if (WARN(!irq_data, "error: irq_desc not allocated; "
427 "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
428 return -EINVAL;
429 if (WARN(irq_data->domain, "error: irq_desc already associated; "
430 "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
431 return -EINVAL;
432 };
433
434 for (i = 0; i < count; i++, virq++, hwirq++) {
435 struct irq_data *irq_data = irq_get_irq_data(virq);
436
437 irq_data->hwirq = hwirq;
438 irq_data->domain = domain;
439 if (domain->ops->map) {
440 ret = domain->ops->map(domain, virq, hwirq);
441 if (ret != 0) {
442 pr_err("irq-%i==>hwirq-0x%lx mapping failed: %d\n",
443 virq, hwirq, ret);
444 WARN_ON(1);
445 irq_data->domain = NULL;
446 irq_data->hwirq = 0;
447 goto err_unmap;
448 }
449 }
346 450
347 irq_clear_status_flags(virq, IRQ_NOREQUEST); 451 switch (domain->revmap_type) {
452 case IRQ_DOMAIN_MAP_LINEAR:
453 if (hwirq < domain->revmap_data.linear.size)
454 domain->revmap_data.linear.revmap[hwirq] = virq;
455 break;
456 case IRQ_DOMAIN_MAP_TREE:
457 mutex_lock(&revmap_trees_mutex);
458 radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
459 mutex_unlock(&revmap_trees_mutex);
460 break;
461 }
462
463 irq_clear_status_flags(virq, IRQ_NOREQUEST);
464 }
348 465
349 return 0; 466 return 0;
467
468 err_unmap:
469 irq_domain_disassociate_many(domain, irq_base, i);
470 return -EINVAL;
350} 471}
472EXPORT_SYMBOL_GPL(irq_domain_associate_many);
351 473
352/** 474/**
353 * irq_create_direct_mapping() - Allocate an irq for direct mapping 475 * irq_create_direct_mapping() - Allocate an irq for direct mapping
@@ -364,10 +486,10 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
364 if (domain == NULL) 486 if (domain == NULL)
365 domain = irq_default_domain; 487 domain = irq_default_domain;
366 488
367 BUG_ON(domain == NULL); 489 if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP))
368 WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP); 490 return 0;
369 491
370 virq = irq_alloc_desc_from(1, 0); 492 virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
371 if (!virq) { 493 if (!virq) {
372 pr_debug("create_direct virq allocation failed\n"); 494 pr_debug("create_direct virq allocation failed\n");
373 return 0; 495 return 0;
@@ -380,7 +502,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
380 } 502 }
381 pr_debug("create_direct obtained virq %d\n", virq); 503 pr_debug("create_direct obtained virq %d\n", virq);
382 504
383 if (irq_setup_virq(domain, virq, virq)) { 505 if (irq_domain_associate(domain, virq, virq)) {
384 irq_free_desc(virq); 506 irq_free_desc(virq);
385 return 0; 507 return 0;
386 } 508 }
@@ -433,17 +555,16 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
433 hint = hwirq % nr_irqs; 555 hint = hwirq % nr_irqs;
434 if (hint == 0) 556 if (hint == 0)
435 hint++; 557 hint++;
436 virq = irq_alloc_desc_from(hint, 0); 558 virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
437 if (virq <= 0) 559 if (virq <= 0)
438 virq = irq_alloc_desc_from(1, 0); 560 virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
439 if (virq <= 0) { 561 if (virq <= 0) {
440 pr_debug("-> virq allocation failed\n"); 562 pr_debug("-> virq allocation failed\n");
441 return 0; 563 return 0;
442 } 564 }
443 565
444 if (irq_setup_virq(domain, virq, hwirq)) { 566 if (irq_domain_associate(domain, virq, hwirq)) {
445 if (domain->revmap_type != IRQ_DOMAIN_MAP_LEGACY) 567 irq_free_desc(virq);
446 irq_free_desc(virq);
447 return 0; 568 return 0;
448 } 569 }
449 570
@@ -454,6 +575,44 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
454} 575}
455EXPORT_SYMBOL_GPL(irq_create_mapping); 576EXPORT_SYMBOL_GPL(irq_create_mapping);
456 577
578/**
579 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
580 * @domain: domain owning the interrupt range
581 * @irq_base: beginning of linux IRQ range
582 * @hwirq_base: beginning of hardware IRQ range
583 * @count: Number of interrupts to map
584 *
585 * This routine is used for allocating and mapping a range of hardware
586 * irqs to linux irqs where the linux irq numbers are at pre-defined
587 * locations. For use by controllers that already have static mappings
588 * to insert in to the domain.
589 *
590 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
591 * domain insertion.
592 *
593 * 0 is returned upon success, while any failure to establish a static
594 * mapping is treated as an error.
595 */
596int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
597 irq_hw_number_t hwirq_base, int count)
598{
599 int ret;
600
601 ret = irq_alloc_descs(irq_base, irq_base, count,
602 of_node_to_nid(domain->of_node));
603 if (unlikely(ret < 0))
604 return ret;
605
606 ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count);
607 if (unlikely(ret < 0)) {
608 irq_free_descs(irq_base, count);
609 return ret;
610 }
611
612 return 0;
613}
614EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
615
457unsigned int irq_create_of_mapping(struct device_node *controller, 616unsigned int irq_create_of_mapping(struct device_node *controller,
458 const u32 *intspec, unsigned int intsize) 617 const u32 *intspec, unsigned int intsize)
459{ 618{
@@ -511,7 +670,6 @@ void irq_dispose_mapping(unsigned int virq)
511{ 670{
512 struct irq_data *irq_data = irq_get_irq_data(virq); 671 struct irq_data *irq_data = irq_get_irq_data(virq);
513 struct irq_domain *domain; 672 struct irq_domain *domain;
514 irq_hw_number_t hwirq;
515 673
516 if (!virq || !irq_data) 674 if (!virq || !irq_data)
517 return; 675 return;
@@ -524,33 +682,7 @@ void irq_dispose_mapping(unsigned int virq)
524 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) 682 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
525 return; 683 return;
526 684
527 irq_set_status_flags(virq, IRQ_NOREQUEST); 685 irq_domain_disassociate_many(domain, virq, 1);
528
529 /* remove chip and handler */
530 irq_set_chip_and_handler(virq, NULL, NULL);
531
532 /* Make sure it's completed */
533 synchronize_irq(virq);
534
535 /* Tell the PIC about it */
536 if (domain->ops->unmap)
537 domain->ops->unmap(domain, virq);
538 smp_mb();
539
540 /* Clear reverse map */
541 hwirq = irq_data->hwirq;
542 switch(domain->revmap_type) {
543 case IRQ_DOMAIN_MAP_LINEAR:
544 if (hwirq < domain->revmap_data.linear.size)
545 domain->revmap_data.linear.revmap[hwirq] = 0;
546 break;
547 case IRQ_DOMAIN_MAP_TREE:
548 mutex_lock(&revmap_trees_mutex);
549 radix_tree_delete(&domain->revmap_data.tree, hwirq);
550 mutex_unlock(&revmap_trees_mutex);
551 break;
552 }
553
554 irq_free_desc(virq); 686 irq_free_desc(virq);
555} 687}
556EXPORT_SYMBOL_GPL(irq_dispose_mapping); 688EXPORT_SYMBOL_GPL(irq_dispose_mapping);
@@ -559,16 +691,11 @@ EXPORT_SYMBOL_GPL(irq_dispose_mapping);
559 * irq_find_mapping() - Find a linux irq from an hw irq number. 691 * irq_find_mapping() - Find a linux irq from an hw irq number.
560 * @domain: domain owning this hardware interrupt 692 * @domain: domain owning this hardware interrupt
561 * @hwirq: hardware irq number in that domain space 693 * @hwirq: hardware irq number in that domain space
562 *
563 * This is a slow path, for use by generic code. It's expected that an
564 * irq controller implementation directly calls the appropriate low level
565 * mapping function.
566 */ 694 */
567unsigned int irq_find_mapping(struct irq_domain *domain, 695unsigned int irq_find_mapping(struct irq_domain *domain,
568 irq_hw_number_t hwirq) 696 irq_hw_number_t hwirq)
569{ 697{
570 unsigned int i; 698 struct irq_data *data;
571 unsigned int hint = hwirq % nr_irqs;
572 699
573 /* Look for default domain if nececssary */ 700 /* Look for default domain if nececssary */
574 if (domain == NULL) 701 if (domain == NULL)
@@ -576,115 +703,47 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
576 if (domain == NULL) 703 if (domain == NULL)
577 return 0; 704 return 0;
578 705
579 /* legacy -> bail early */ 706 switch (domain->revmap_type) {
580 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) 707 case IRQ_DOMAIN_MAP_LEGACY:
581 return irq_domain_legacy_revmap(domain, hwirq); 708 return irq_domain_legacy_revmap(domain, hwirq);
582 709 case IRQ_DOMAIN_MAP_LINEAR:
583 /* Slow path does a linear search of the map */ 710 return irq_linear_revmap(domain, hwirq);
584 if (hint == 0) 711 case IRQ_DOMAIN_MAP_TREE:
585 hint = 1; 712 rcu_read_lock();
586 i = hint; 713 data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
587 do { 714 rcu_read_unlock();
588 struct irq_data *data = irq_get_irq_data(i); 715 if (data)
716 return data->irq;
717 break;
718 case IRQ_DOMAIN_MAP_NOMAP:
719 data = irq_get_irq_data(hwirq);
589 if (data && (data->domain == domain) && (data->hwirq == hwirq)) 720 if (data && (data->domain == domain) && (data->hwirq == hwirq))
590 return i; 721 return hwirq;
591 i++; 722 break;
592 if (i >= nr_irqs) 723 }
593 i = 1; 724
594 } while(i != hint);
595 return 0; 725 return 0;
596} 726}
597EXPORT_SYMBOL_GPL(irq_find_mapping); 727EXPORT_SYMBOL_GPL(irq_find_mapping);
598 728
599/** 729/**
600 * irq_radix_revmap_lookup() - Find a linux irq from a hw irq number.
601 * @domain: domain owning this hardware interrupt
602 * @hwirq: hardware irq number in that domain space
603 *
604 * This is a fast path, for use by irq controller code that uses radix tree
605 * revmaps
606 */
607unsigned int irq_radix_revmap_lookup(struct irq_domain *domain,
608 irq_hw_number_t hwirq)
609{
610 struct irq_data *irq_data;
611
612 if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
613 return irq_find_mapping(domain, hwirq);
614
615 /*
616 * Freeing an irq can delete nodes along the path to
617 * do the lookup via call_rcu.
618 */
619 rcu_read_lock();
620 irq_data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
621 rcu_read_unlock();
622
623 /*
624 * If found in radix tree, then fine.
625 * Else fallback to linear lookup - this should not happen in practice
626 * as it means that we failed to insert the node in the radix tree.
627 */
628 return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq);
629}
630EXPORT_SYMBOL_GPL(irq_radix_revmap_lookup);
631
632/**
633 * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping.
634 * @domain: domain owning this hardware interrupt
635 * @virq: linux irq number
636 * @hwirq: hardware irq number in that domain space
637 *
638 * This is for use by irq controllers that use a radix tree reverse
639 * mapping for fast lookup.
640 */
641void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq,
642 irq_hw_number_t hwirq)
643{
644 struct irq_data *irq_data = irq_get_irq_data(virq);
645
646 if (WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
647 return;
648
649 if (virq) {
650 mutex_lock(&revmap_trees_mutex);
651 radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
652 mutex_unlock(&revmap_trees_mutex);
653 }
654}
655EXPORT_SYMBOL_GPL(irq_radix_revmap_insert);
656
657/**
658 * irq_linear_revmap() - Find a linux irq from a hw irq number. 730 * irq_linear_revmap() - Find a linux irq from a hw irq number.
659 * @domain: domain owning this hardware interrupt 731 * @domain: domain owning this hardware interrupt
660 * @hwirq: hardware irq number in that domain space 732 * @hwirq: hardware irq number in that domain space
661 * 733 *
662 * This is a fast path, for use by irq controller code that uses linear 734 * This is a fast path that can be called directly by irq controller code to
663 * revmaps. It does fallback to the slow path if the revmap doesn't exist 735 * save a handful of instructions.
664 * yet and will create the revmap entry with appropriate locking
665 */ 736 */
666unsigned int irq_linear_revmap(struct irq_domain *domain, 737unsigned int irq_linear_revmap(struct irq_domain *domain,
667 irq_hw_number_t hwirq) 738 irq_hw_number_t hwirq)
668{ 739{
669 unsigned int *revmap; 740 BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
670
671 if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR))
672 return irq_find_mapping(domain, hwirq);
673 741
674 /* Check revmap bounds */ 742 /* Check revmap bounds; complain if exceeded */
675 if (unlikely(hwirq >= domain->revmap_data.linear.size)) 743 if (WARN_ON(hwirq >= domain->revmap_data.linear.size))
676 return irq_find_mapping(domain, hwirq); 744 return 0;
677
678 /* Check if revmap was allocated */
679 revmap = domain->revmap_data.linear.revmap;
680 if (unlikely(revmap == NULL))
681 return irq_find_mapping(domain, hwirq);
682
683 /* Fill up revmap with slow path if no mapping found */
684 if (unlikely(!revmap[hwirq]))
685 revmap[hwirq] = irq_find_mapping(domain, hwirq);
686 745
687 return revmap[hwirq]; 746 return domain->revmap_data.linear.revmap[hwirq];
688} 747}
689EXPORT_SYMBOL_GPL(irq_linear_revmap); 748EXPORT_SYMBOL_GPL(irq_linear_revmap);
690 749
@@ -761,12 +820,6 @@ static int __init irq_debugfs_init(void)
761__initcall(irq_debugfs_init); 820__initcall(irq_debugfs_init);
762#endif /* CONFIG_IRQ_DOMAIN_DEBUG */ 821#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
763 822
764static int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
765 irq_hw_number_t hwirq)
766{
767 return 0;
768}
769
770/** 823/**
771 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings 824 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
772 * 825 *
@@ -829,7 +882,6 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d,
829EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell); 882EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
830 883
831const struct irq_domain_ops irq_domain_simple_ops = { 884const struct irq_domain_ops irq_domain_simple_ops = {
832 .map = irq_domain_simple_map,
833 .xlate = irq_domain_xlate_onetwocell, 885 .xlate = irq_domain_xlate_onetwocell,
834}; 886};
835EXPORT_SYMBOL_GPL(irq_domain_simple_ops); 887EXPORT_SYMBOL_GPL(irq_domain_simple_ops);