diff options
Diffstat (limited to 'kernel/irq/irqdomain.c')
-rw-r--r-- | kernel/irq/irqdomain.c | 587 |
1 files changed, 183 insertions, 404 deletions
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 54a4d5223238..2d7cd3428365 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -16,12 +16,6 @@ | |||
16 | #include <linux/smp.h> | 16 | #include <linux/smp.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | 18 | ||
19 | #define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs. | ||
20 | * ie. legacy 8259, gets irqs 1..15 */ | ||
21 | #define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */ | ||
22 | #define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */ | ||
23 | #define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */ | ||
24 | |||
25 | static LIST_HEAD(irq_domain_list); | 19 | static LIST_HEAD(irq_domain_list); |
26 | static DEFINE_MUTEX(irq_domain_mutex); | 20 | static DEFINE_MUTEX(irq_domain_mutex); |
27 | 21 | ||
@@ -29,9 +23,11 @@ static DEFINE_MUTEX(revmap_trees_mutex); | |||
29 | static struct irq_domain *irq_default_domain; | 23 | static struct irq_domain *irq_default_domain; |
30 | 24 | ||
31 | /** | 25 | /** |
32 | * irq_domain_alloc() - Allocate a new irq_domain data structure | 26 | * __irq_domain_add() - Allocate a new irq_domain data structure |
33 | * @of_node: optional device-tree node of the interrupt controller | 27 | * @of_node: optional device-tree node of the interrupt controller |
34 | * @revmap_type: type of reverse mapping to use | 28 | * @size: Size of linear map; 0 for radix mapping only |
29 | * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no | ||
30 | * direct mapping | ||
35 | * @ops: map/unmap domain callbacks | 31 | * @ops: map/unmap domain callbacks |
36 | * @host_data: Controller private data pointer | 32 | * @host_data: Controller private data pointer |
37 | * | 33 | * |
@@ -39,41 +35,35 @@ static struct irq_domain *irq_default_domain; | |||
39 | * register allocated irq_domain with irq_domain_register(). Returns pointer | 35 | * register allocated irq_domain with irq_domain_register(). Returns pointer |
40 | * to IRQ domain, or NULL on failure. | 36 | * to IRQ domain, or NULL on failure. |
41 | */ | 37 | */ |
42 | static struct irq_domain *irq_domain_alloc(struct device_node *of_node, | 38 | struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, |
43 | unsigned int revmap_type, | 39 | irq_hw_number_t hwirq_max, int direct_max, |
44 | const struct irq_domain_ops *ops, | 40 | const struct irq_domain_ops *ops, |
45 | void *host_data) | 41 | void *host_data) |
46 | { | 42 | { |
47 | struct irq_domain *domain; | 43 | struct irq_domain *domain; |
48 | 44 | ||
49 | domain = kzalloc_node(sizeof(*domain), GFP_KERNEL, | 45 | domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), |
50 | of_node_to_nid(of_node)); | 46 | GFP_KERNEL, of_node_to_nid(of_node)); |
51 | if (WARN_ON(!domain)) | 47 | if (WARN_ON(!domain)) |
52 | return NULL; | 48 | return NULL; |
53 | 49 | ||
54 | /* Fill structure */ | 50 | /* Fill structure */ |
55 | domain->revmap_type = revmap_type; | 51 | INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); |
56 | domain->ops = ops; | 52 | domain->ops = ops; |
57 | domain->host_data = host_data; | 53 | domain->host_data = host_data; |
58 | domain->of_node = of_node_get(of_node); | 54 | domain->of_node = of_node_get(of_node); |
55 | domain->hwirq_max = hwirq_max; | ||
56 | domain->revmap_size = size; | ||
57 | domain->revmap_direct_max_irq = direct_max; | ||
59 | 58 | ||
60 | return domain; | ||
61 | } | ||
62 | |||
63 | static void irq_domain_free(struct irq_domain *domain) | ||
64 | { | ||
65 | of_node_put(domain->of_node); | ||
66 | kfree(domain); | ||
67 | } | ||
68 | |||
69 | static void irq_domain_add(struct irq_domain *domain) | ||
70 | { | ||
71 | mutex_lock(&irq_domain_mutex); | 59 | mutex_lock(&irq_domain_mutex); |
72 | list_add(&domain->link, &irq_domain_list); | 60 | list_add(&domain->link, &irq_domain_list); |
73 | mutex_unlock(&irq_domain_mutex); | 61 | mutex_unlock(&irq_domain_mutex); |
74 | pr_debug("Allocated domain of type %d @0x%p\n", | 62 | |
75 | domain->revmap_type, domain); | 63 | pr_debug("Added domain %s\n", domain->name); |
64 | return domain; | ||
76 | } | 65 | } |
66 | EXPORT_SYMBOL_GPL(__irq_domain_add); | ||
77 | 67 | ||
78 | /** | 68 | /** |
79 | * irq_domain_remove() - Remove an irq domain. | 69 | * irq_domain_remove() - Remove an irq domain. |
@@ -87,29 +77,12 @@ void irq_domain_remove(struct irq_domain *domain) | |||
87 | { | 77 | { |
88 | mutex_lock(&irq_domain_mutex); | 78 | mutex_lock(&irq_domain_mutex); |
89 | 79 | ||
90 | switch (domain->revmap_type) { | 80 | /* |
91 | case IRQ_DOMAIN_MAP_LEGACY: | 81 | * radix_tree_delete() takes care of destroying the root |
92 | /* | 82 | * node when all entries are removed. Shout if there are |
93 | * Legacy domains don't manage their own irq_desc | 83 | * any mappings left. |
94 | * allocations, we expect the caller to handle irq_desc | 84 | */ |
95 | * freeing on their own. | 85 | WARN_ON(domain->revmap_tree.height); |
96 | */ | ||
97 | break; | ||
98 | case IRQ_DOMAIN_MAP_TREE: | ||
99 | /* | ||
100 | * radix_tree_delete() takes care of destroying the root | ||
101 | * node when all entries are removed. Shout if there are | ||
102 | * any mappings left. | ||
103 | */ | ||
104 | WARN_ON(domain->revmap_data.tree.height); | ||
105 | break; | ||
106 | case IRQ_DOMAIN_MAP_LINEAR: | ||
107 | kfree(domain->revmap_data.linear.revmap); | ||
108 | domain->revmap_data.linear.size = 0; | ||
109 | break; | ||
110 | case IRQ_DOMAIN_MAP_NOMAP: | ||
111 | break; | ||
112 | } | ||
113 | 86 | ||
114 | list_del(&domain->link); | 87 | list_del(&domain->link); |
115 | 88 | ||
@@ -121,44 +94,30 @@ void irq_domain_remove(struct irq_domain *domain) | |||
121 | 94 | ||
122 | mutex_unlock(&irq_domain_mutex); | 95 | mutex_unlock(&irq_domain_mutex); |
123 | 96 | ||
124 | pr_debug("Removed domain of type %d @0x%p\n", | 97 | pr_debug("Removed domain %s\n", domain->name); |
125 | domain->revmap_type, domain); | ||
126 | 98 | ||
127 | irq_domain_free(domain); | 99 | of_node_put(domain->of_node); |
100 | kfree(domain); | ||
128 | } | 101 | } |
129 | EXPORT_SYMBOL_GPL(irq_domain_remove); | 102 | EXPORT_SYMBOL_GPL(irq_domain_remove); |
130 | 103 | ||
131 | static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain, | ||
132 | irq_hw_number_t hwirq) | ||
133 | { | ||
134 | irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq; | ||
135 | int size = domain->revmap_data.legacy.size; | ||
136 | |||
137 | if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size)) | ||
138 | return 0; | ||
139 | return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq; | ||
140 | } | ||
141 | |||
142 | /** | 104 | /** |
143 | * irq_domain_add_simple() - Allocate and register a simple irq_domain. | 105 | * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs |
144 | * @of_node: pointer to interrupt controller's device tree node. | 106 | * @of_node: pointer to interrupt controller's device tree node. |
145 | * @size: total number of irqs in mapping | 107 | * @size: total number of irqs in mapping |
146 | * @first_irq: first number of irq block assigned to the domain, | 108 | * @first_irq: first number of irq block assigned to the domain, |
147 | * pass zero to assign irqs on-the-fly. This will result in a | 109 | * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then |
148 | * linear IRQ domain so it is important to use irq_create_mapping() | 110 | * pre-map all of the irqs in the domain to virqs starting at first_irq. |
149 | * for each used IRQ, especially when SPARSE_IRQ is enabled. | ||
150 | * @ops: map/unmap domain callbacks | 111 | * @ops: map/unmap domain callbacks |
151 | * @host_data: Controller private data pointer | 112 | * @host_data: Controller private data pointer |
152 | * | 113 | * |
153 | * Allocates a legacy irq_domain if irq_base is positive or a linear | 114 | * Allocates an irq_domain, and optionally if first_irq is positive then also |
154 | * domain otherwise. For the legacy domain, IRQ descriptors will also | 115 | * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq. |
155 | * be allocated. | ||
156 | * | 116 | * |
157 | * This is intended to implement the expected behaviour for most | 117 | * This is intended to implement the expected behaviour for most |
158 | * interrupt controllers which is that a linear mapping should | 118 | * interrupt controllers. If device tree is used, then first_irq will be 0 and |
159 | * normally be used unless the system requires a legacy mapping in | 119 | * irqs get mapped dynamically on the fly. However, if the controller requires |
160 | * order to support supplying interrupt numbers during non-DT | 120 | * static virq assignments (non-DT boot) then it will set that up correctly. |
161 | * registration of devices. | ||
162 | */ | 121 | */ |
163 | struct irq_domain *irq_domain_add_simple(struct device_node *of_node, | 122 | struct irq_domain *irq_domain_add_simple(struct device_node *of_node, |
164 | unsigned int size, | 123 | unsigned int size, |
@@ -166,33 +125,25 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node, | |||
166 | const struct irq_domain_ops *ops, | 125 | const struct irq_domain_ops *ops, |
167 | void *host_data) | 126 | void *host_data) |
168 | { | 127 | { |
169 | if (first_irq > 0) { | 128 | struct irq_domain *domain; |
170 | int irq_base; | 129 | |
130 | domain = __irq_domain_add(of_node, size, size, 0, ops, host_data); | ||
131 | if (!domain) | ||
132 | return NULL; | ||
171 | 133 | ||
134 | if (first_irq > 0) { | ||
172 | if (IS_ENABLED(CONFIG_SPARSE_IRQ)) { | 135 | if (IS_ENABLED(CONFIG_SPARSE_IRQ)) { |
173 | /* | 136 | /* attempt to allocated irq_descs */ |
174 | * Set the descriptor allocator to search for a | 137 | int rc = irq_alloc_descs(first_irq, first_irq, size, |
175 | * 1-to-1 mapping, such as irq_alloc_desc_at(). | 138 | of_node_to_nid(of_node)); |
176 | * Use of_node_to_nid() which is defined to | 139 | if (rc < 0) |
177 | * numa_node_id() on platforms that have no custom | ||
178 | * implementation. | ||
179 | */ | ||
180 | irq_base = irq_alloc_descs(first_irq, first_irq, size, | ||
181 | of_node_to_nid(of_node)); | ||
182 | if (irq_base < 0) { | ||
183 | pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", | 140 | pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", |
184 | first_irq); | 141 | first_irq); |
185 | irq_base = first_irq; | 142 | } |
186 | } | 143 | irq_domain_associate_many(domain, first_irq, 0, size); |
187 | } else | ||
188 | irq_base = first_irq; | ||
189 | |||
190 | return irq_domain_add_legacy(of_node, size, irq_base, 0, | ||
191 | ops, host_data); | ||
192 | } | 144 | } |
193 | 145 | ||
194 | /* A linear domain is the default */ | 146 | return domain; |
195 | return irq_domain_add_linear(of_node, size, ops, host_data); | ||
196 | } | 147 | } |
197 | EXPORT_SYMBOL_GPL(irq_domain_add_simple); | 148 | EXPORT_SYMBOL_GPL(irq_domain_add_simple); |
198 | 149 | ||
@@ -219,131 +170,19 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, | |||
219 | void *host_data) | 170 | void *host_data) |
220 | { | 171 | { |
221 | struct irq_domain *domain; | 172 | struct irq_domain *domain; |
222 | unsigned int i; | ||
223 | 173 | ||
224 | domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data); | 174 | domain = __irq_domain_add(of_node, first_hwirq + size, |
175 | first_hwirq + size, 0, ops, host_data); | ||
225 | if (!domain) | 176 | if (!domain) |
226 | return NULL; | 177 | return NULL; |
227 | 178 | ||
228 | domain->revmap_data.legacy.first_irq = first_irq; | 179 | irq_domain_associate_many(domain, first_irq, first_hwirq, size); |
229 | domain->revmap_data.legacy.first_hwirq = first_hwirq; | ||
230 | domain->revmap_data.legacy.size = size; | ||
231 | |||
232 | mutex_lock(&irq_domain_mutex); | ||
233 | /* Verify that all the irqs are available */ | ||
234 | for (i = 0; i < size; i++) { | ||
235 | int irq = first_irq + i; | ||
236 | struct irq_data *irq_data = irq_get_irq_data(irq); | ||
237 | 180 | ||
238 | if (WARN_ON(!irq_data || irq_data->domain)) { | ||
239 | mutex_unlock(&irq_domain_mutex); | ||
240 | irq_domain_free(domain); | ||
241 | return NULL; | ||
242 | } | ||
243 | } | ||
244 | |||
245 | /* Claim all of the irqs before registering a legacy domain */ | ||
246 | for (i = 0; i < size; i++) { | ||
247 | struct irq_data *irq_data = irq_get_irq_data(first_irq + i); | ||
248 | irq_data->hwirq = first_hwirq + i; | ||
249 | irq_data->domain = domain; | ||
250 | } | ||
251 | mutex_unlock(&irq_domain_mutex); | ||
252 | |||
253 | for (i = 0; i < size; i++) { | ||
254 | int irq = first_irq + i; | ||
255 | int hwirq = first_hwirq + i; | ||
256 | |||
257 | /* IRQ0 gets ignored */ | ||
258 | if (!irq) | ||
259 | continue; | ||
260 | |||
261 | /* Legacy flags are left to default at this point, | ||
262 | * one can then use irq_create_mapping() to | ||
263 | * explicitly change them | ||
264 | */ | ||
265 | if (ops->map) | ||
266 | ops->map(domain, irq, hwirq); | ||
267 | |||
268 | /* Clear norequest flags */ | ||
269 | irq_clear_status_flags(irq, IRQ_NOREQUEST); | ||
270 | } | ||
271 | |||
272 | irq_domain_add(domain); | ||
273 | return domain; | 181 | return domain; |
274 | } | 182 | } |
275 | EXPORT_SYMBOL_GPL(irq_domain_add_legacy); | 183 | EXPORT_SYMBOL_GPL(irq_domain_add_legacy); |
276 | 184 | ||
277 | /** | 185 | /** |
278 | * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. | ||
279 | * @of_node: pointer to interrupt controller's device tree node. | ||
280 | * @size: Number of interrupts in the domain. | ||
281 | * @ops: map/unmap domain callbacks | ||
282 | * @host_data: Controller private data pointer | ||
283 | */ | ||
284 | struct irq_domain *irq_domain_add_linear(struct device_node *of_node, | ||
285 | unsigned int size, | ||
286 | const struct irq_domain_ops *ops, | ||
287 | void *host_data) | ||
288 | { | ||
289 | struct irq_domain *domain; | ||
290 | unsigned int *revmap; | ||
291 | |||
292 | revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL, | ||
293 | of_node_to_nid(of_node)); | ||
294 | if (WARN_ON(!revmap)) | ||
295 | return NULL; | ||
296 | |||
297 | domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data); | ||
298 | if (!domain) { | ||
299 | kfree(revmap); | ||
300 | return NULL; | ||
301 | } | ||
302 | domain->revmap_data.linear.size = size; | ||
303 | domain->revmap_data.linear.revmap = revmap; | ||
304 | irq_domain_add(domain); | ||
305 | return domain; | ||
306 | } | ||
307 | EXPORT_SYMBOL_GPL(irq_domain_add_linear); | ||
308 | |||
309 | struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, | ||
310 | unsigned int max_irq, | ||
311 | const struct irq_domain_ops *ops, | ||
312 | void *host_data) | ||
313 | { | ||
314 | struct irq_domain *domain = irq_domain_alloc(of_node, | ||
315 | IRQ_DOMAIN_MAP_NOMAP, ops, host_data); | ||
316 | if (domain) { | ||
317 | domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0; | ||
318 | irq_domain_add(domain); | ||
319 | } | ||
320 | return domain; | ||
321 | } | ||
322 | EXPORT_SYMBOL_GPL(irq_domain_add_nomap); | ||
323 | |||
324 | /** | ||
325 | * irq_domain_add_tree() | ||
326 | * @of_node: pointer to interrupt controller's device tree node. | ||
327 | * @ops: map/unmap domain callbacks | ||
328 | * | ||
329 | * Note: The radix tree will be allocated later during boot automatically | ||
330 | * (the reverse mapping will use the slow path until that happens). | ||
331 | */ | ||
332 | struct irq_domain *irq_domain_add_tree(struct device_node *of_node, | ||
333 | const struct irq_domain_ops *ops, | ||
334 | void *host_data) | ||
335 | { | ||
336 | struct irq_domain *domain = irq_domain_alloc(of_node, | ||
337 | IRQ_DOMAIN_MAP_TREE, ops, host_data); | ||
338 | if (domain) { | ||
339 | INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL); | ||
340 | irq_domain_add(domain); | ||
341 | } | ||
342 | return domain; | ||
343 | } | ||
344 | EXPORT_SYMBOL_GPL(irq_domain_add_tree); | ||
345 | |||
346 | /** | ||
347 | * irq_find_host() - Locates a domain for a given device node | 186 | * irq_find_host() - Locates a domain for a given device node |
348 | * @node: device-tree node of the interrupt controller | 187 | * @node: device-tree node of the interrupt controller |
349 | */ | 188 | */ |
@@ -391,125 +230,108 @@ void irq_set_default_host(struct irq_domain *domain) | |||
391 | } | 230 | } |
392 | EXPORT_SYMBOL_GPL(irq_set_default_host); | 231 | EXPORT_SYMBOL_GPL(irq_set_default_host); |
393 | 232 | ||
394 | static void irq_domain_disassociate_many(struct irq_domain *domain, | 233 | static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) |
395 | unsigned int irq_base, int count) | ||
396 | { | 234 | { |
397 | /* | 235 | struct irq_data *irq_data = irq_get_irq_data(irq); |
398 | * disassociate in reverse order; | 236 | irq_hw_number_t hwirq; |
399 | * not strictly necessary, but nice for unwinding | ||
400 | */ | ||
401 | while (count--) { | ||
402 | int irq = irq_base + count; | ||
403 | struct irq_data *irq_data = irq_get_irq_data(irq); | ||
404 | irq_hw_number_t hwirq; | ||
405 | 237 | ||
406 | if (WARN_ON(!irq_data || irq_data->domain != domain)) | 238 | if (WARN(!irq_data || irq_data->domain != domain, |
407 | continue; | 239 | "virq%i doesn't exist; cannot disassociate\n", irq)) |
240 | return; | ||
408 | 241 | ||
409 | hwirq = irq_data->hwirq; | 242 | hwirq = irq_data->hwirq; |
410 | irq_set_status_flags(irq, IRQ_NOREQUEST); | 243 | irq_set_status_flags(irq, IRQ_NOREQUEST); |
411 | 244 | ||
412 | /* remove chip and handler */ | 245 | /* remove chip and handler */ |
413 | irq_set_chip_and_handler(irq, NULL, NULL); | 246 | irq_set_chip_and_handler(irq, NULL, NULL); |
414 | 247 | ||
415 | /* Make sure it's completed */ | 248 | /* Make sure it's completed */ |
416 | synchronize_irq(irq); | 249 | synchronize_irq(irq); |
417 | 250 | ||
418 | /* Tell the PIC about it */ | 251 | /* Tell the PIC about it */ |
419 | if (domain->ops->unmap) | 252 | if (domain->ops->unmap) |
420 | domain->ops->unmap(domain, irq); | 253 | domain->ops->unmap(domain, irq); |
421 | smp_mb(); | 254 | smp_mb(); |
422 | 255 | ||
423 | irq_data->domain = NULL; | 256 | irq_data->domain = NULL; |
424 | irq_data->hwirq = 0; | 257 | irq_data->hwirq = 0; |
425 | 258 | ||
426 | /* Clear reverse map */ | 259 | /* Clear reverse map for this hwirq */ |
427 | switch(domain->revmap_type) { | 260 | if (hwirq < domain->revmap_size) { |
428 | case IRQ_DOMAIN_MAP_LINEAR: | 261 | domain->linear_revmap[hwirq] = 0; |
429 | if (hwirq < domain->revmap_data.linear.size) | 262 | } else { |
430 | domain->revmap_data.linear.revmap[hwirq] = 0; | 263 | mutex_lock(&revmap_trees_mutex); |
431 | break; | 264 | radix_tree_delete(&domain->revmap_tree, hwirq); |
432 | case IRQ_DOMAIN_MAP_TREE: | 265 | mutex_unlock(&revmap_trees_mutex); |
433 | mutex_lock(&revmap_trees_mutex); | ||
434 | radix_tree_delete(&domain->revmap_data.tree, hwirq); | ||
435 | mutex_unlock(&revmap_trees_mutex); | ||
436 | break; | ||
437 | } | ||
438 | } | 266 | } |
439 | } | 267 | } |
440 | 268 | ||
441 | int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, | 269 | int irq_domain_associate(struct irq_domain *domain, unsigned int virq, |
442 | irq_hw_number_t hwirq_base, int count) | 270 | irq_hw_number_t hwirq) |
443 | { | 271 | { |
444 | unsigned int virq = irq_base; | 272 | struct irq_data *irq_data = irq_get_irq_data(virq); |
445 | irq_hw_number_t hwirq = hwirq_base; | 273 | int ret; |
446 | int i, ret; | ||
447 | 274 | ||
448 | pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, | 275 | if (WARN(hwirq >= domain->hwirq_max, |
449 | of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count); | 276 | "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name)) |
277 | return -EINVAL; | ||
278 | if (WARN(!irq_data, "error: virq%i is not allocated", virq)) | ||
279 | return -EINVAL; | ||
280 | if (WARN(irq_data->domain, "error: virq%i is already associated", virq)) | ||
281 | return -EINVAL; | ||
450 | 282 | ||
451 | for (i = 0; i < count; i++) { | 283 | mutex_lock(&irq_domain_mutex); |
452 | struct irq_data *irq_data = irq_get_irq_data(virq + i); | 284 | irq_data->hwirq = hwirq; |
453 | 285 | irq_data->domain = domain; | |
454 | if (WARN(!irq_data, "error: irq_desc not allocated; " | 286 | if (domain->ops->map) { |
455 | "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i)) | 287 | ret = domain->ops->map(domain, virq, hwirq); |
456 | return -EINVAL; | 288 | if (ret != 0) { |
457 | if (WARN(irq_data->domain, "error: irq_desc already associated; " | 289 | /* |
458 | "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i)) | 290 | * If map() returns -EPERM, this interrupt is protected |
459 | return -EINVAL; | 291 | * by the firmware or some other service and shall not |
460 | }; | 292 | * be mapped. Don't bother telling the user about it. |
461 | 293 | */ | |
462 | for (i = 0; i < count; i++, virq++, hwirq++) { | 294 | if (ret != -EPERM) { |
463 | struct irq_data *irq_data = irq_get_irq_data(virq); | 295 | pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n", |
464 | 296 | domain->name, hwirq, virq, ret); | |
465 | irq_data->hwirq = hwirq; | ||
466 | irq_data->domain = domain; | ||
467 | if (domain->ops->map) { | ||
468 | ret = domain->ops->map(domain, virq, hwirq); | ||
469 | if (ret != 0) { | ||
470 | /* | ||
471 | * If map() returns -EPERM, this interrupt is protected | ||
472 | * by the firmware or some other service and shall not | ||
473 | * be mapped. | ||
474 | * | ||
475 | * Since on some platforms we blindly try to map everything | ||
476 | * we end up with a log full of backtraces. | ||
477 | * | ||
478 | * So instead, we silently fail on -EPERM, it is the | ||
479 | * responsibility of the PIC driver to display a relevant | ||
480 | * message if needed. | ||
481 | */ | ||
482 | if (ret != -EPERM) { | ||
483 | pr_err("irq-%i==>hwirq-0x%lx mapping failed: %d\n", | ||
484 | virq, hwirq, ret); | ||
485 | WARN_ON(1); | ||
486 | } | ||
487 | irq_data->domain = NULL; | ||
488 | irq_data->hwirq = 0; | ||
489 | goto err_unmap; | ||
490 | } | 297 | } |
298 | irq_data->domain = NULL; | ||
299 | irq_data->hwirq = 0; | ||
300 | mutex_unlock(&irq_domain_mutex); | ||
301 | return ret; | ||
491 | } | 302 | } |
492 | 303 | ||
493 | switch (domain->revmap_type) { | 304 | /* If not already assigned, give the domain the chip's name */ |
494 | case IRQ_DOMAIN_MAP_LINEAR: | 305 | if (!domain->name && irq_data->chip) |
495 | if (hwirq < domain->revmap_data.linear.size) | 306 | domain->name = irq_data->chip->name; |
496 | domain->revmap_data.linear.revmap[hwirq] = virq; | 307 | } |
497 | break; | ||
498 | case IRQ_DOMAIN_MAP_TREE: | ||
499 | mutex_lock(&revmap_trees_mutex); | ||
500 | radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data); | ||
501 | mutex_unlock(&revmap_trees_mutex); | ||
502 | break; | ||
503 | } | ||
504 | 308 | ||
505 | irq_clear_status_flags(virq, IRQ_NOREQUEST); | 309 | if (hwirq < domain->revmap_size) { |
310 | domain->linear_revmap[hwirq] = virq; | ||
311 | } else { | ||
312 | mutex_lock(&revmap_trees_mutex); | ||
313 | radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); | ||
314 | mutex_unlock(&revmap_trees_mutex); | ||
506 | } | 315 | } |
316 | mutex_unlock(&irq_domain_mutex); | ||
317 | |||
318 | irq_clear_status_flags(virq, IRQ_NOREQUEST); | ||
507 | 319 | ||
508 | return 0; | 320 | return 0; |
321 | } | ||
322 | EXPORT_SYMBOL_GPL(irq_domain_associate); | ||
509 | 323 | ||
510 | err_unmap: | 324 | void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, |
511 | irq_domain_disassociate_many(domain, irq_base, i); | 325 | irq_hw_number_t hwirq_base, int count) |
512 | return -EINVAL; | 326 | { |
327 | int i; | ||
328 | |||
329 | pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, | ||
330 | of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count); | ||
331 | |||
332 | for (i = 0; i < count; i++) { | ||
333 | irq_domain_associate(domain, irq_base + i, hwirq_base + i); | ||
334 | } | ||
513 | } | 335 | } |
514 | EXPORT_SYMBOL_GPL(irq_domain_associate_many); | 336 | EXPORT_SYMBOL_GPL(irq_domain_associate_many); |
515 | 337 | ||
@@ -519,7 +341,9 @@ EXPORT_SYMBOL_GPL(irq_domain_associate_many); | |||
519 | * | 341 | * |
520 | * This routine is used for irq controllers which can choose the hardware | 342 | * This routine is used for irq controllers which can choose the hardware |
521 | * interrupt numbers they generate. In such a case it's simplest to use | 343 | * interrupt numbers they generate. In such a case it's simplest to use |
522 | * the linux irq as the hardware interrupt number. | 344 | * the linux irq as the hardware interrupt number. It still uses the linear |
345 | * or radix tree to store the mapping, but the irq controller can optimize | ||
346 | * the revmap path by using the hwirq directly. | ||
523 | */ | 347 | */ |
524 | unsigned int irq_create_direct_mapping(struct irq_domain *domain) | 348 | unsigned int irq_create_direct_mapping(struct irq_domain *domain) |
525 | { | 349 | { |
@@ -528,17 +352,14 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain) | |||
528 | if (domain == NULL) | 352 | if (domain == NULL) |
529 | domain = irq_default_domain; | 353 | domain = irq_default_domain; |
530 | 354 | ||
531 | if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP)) | ||
532 | return 0; | ||
533 | |||
534 | virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); | 355 | virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); |
535 | if (!virq) { | 356 | if (!virq) { |
536 | pr_debug("create_direct virq allocation failed\n"); | 357 | pr_debug("create_direct virq allocation failed\n"); |
537 | return 0; | 358 | return 0; |
538 | } | 359 | } |
539 | if (virq >= domain->revmap_data.nomap.max_irq) { | 360 | if (virq >= domain->revmap_direct_max_irq) { |
540 | pr_err("ERROR: no free irqs available below %i maximum\n", | 361 | pr_err("ERROR: no free irqs available below %i maximum\n", |
541 | domain->revmap_data.nomap.max_irq); | 362 | domain->revmap_direct_max_irq); |
542 | irq_free_desc(virq); | 363 | irq_free_desc(virq); |
543 | return 0; | 364 | return 0; |
544 | } | 365 | } |
@@ -575,9 +396,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain, | |||
575 | if (domain == NULL) | 396 | if (domain == NULL) |
576 | domain = irq_default_domain; | 397 | domain = irq_default_domain; |
577 | if (domain == NULL) { | 398 | if (domain == NULL) { |
578 | pr_warning("irq_create_mapping called for" | 399 | WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq); |
579 | " NULL domain, hwirq=%lx\n", hwirq); | ||
580 | WARN_ON(1); | ||
581 | return 0; | 400 | return 0; |
582 | } | 401 | } |
583 | pr_debug("-> using domain @%p\n", domain); | 402 | pr_debug("-> using domain @%p\n", domain); |
@@ -589,10 +408,6 @@ unsigned int irq_create_mapping(struct irq_domain *domain, | |||
589 | return virq; | 408 | return virq; |
590 | } | 409 | } |
591 | 410 | ||
592 | /* Get a virtual interrupt number */ | ||
593 | if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) | ||
594 | return irq_domain_legacy_revmap(domain, hwirq); | ||
595 | |||
596 | /* Allocate a virtual interrupt number */ | 411 | /* Allocate a virtual interrupt number */ |
597 | hint = hwirq % nr_irqs; | 412 | hint = hwirq % nr_irqs; |
598 | if (hint == 0) | 413 | if (hint == 0) |
@@ -645,12 +460,7 @@ int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, | |||
645 | if (unlikely(ret < 0)) | 460 | if (unlikely(ret < 0)) |
646 | return ret; | 461 | return ret; |
647 | 462 | ||
648 | ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count); | 463 | irq_domain_associate_many(domain, irq_base, hwirq_base, count); |
649 | if (unlikely(ret < 0)) { | ||
650 | irq_free_descs(irq_base, count); | ||
651 | return ret; | ||
652 | } | ||
653 | |||
654 | return 0; | 464 | return 0; |
655 | } | 465 | } |
656 | EXPORT_SYMBOL_GPL(irq_create_strict_mappings); | 466 | EXPORT_SYMBOL_GPL(irq_create_strict_mappings); |
@@ -677,8 +487,8 @@ unsigned int irq_create_of_mapping(struct device_node *controller, | |||
677 | if (intsize > 0) | 487 | if (intsize > 0) |
678 | return intspec[0]; | 488 | return intspec[0]; |
679 | #endif | 489 | #endif |
680 | pr_warning("no irq domain found for %s !\n", | 490 | pr_warn("no irq domain found for %s !\n", |
681 | of_node_full_name(controller)); | 491 | of_node_full_name(controller)); |
682 | return 0; | 492 | return 0; |
683 | } | 493 | } |
684 | 494 | ||
@@ -698,7 +508,7 @@ unsigned int irq_create_of_mapping(struct device_node *controller, | |||
698 | 508 | ||
699 | /* Set type if specified and different than the current one */ | 509 | /* Set type if specified and different than the current one */ |
700 | if (type != IRQ_TYPE_NONE && | 510 | if (type != IRQ_TYPE_NONE && |
701 | type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) | 511 | type != irq_get_trigger_type(virq)) |
702 | irq_set_irq_type(virq, type); | 512 | irq_set_irq_type(virq, type); |
703 | return virq; | 513 | return virq; |
704 | } | 514 | } |
@@ -720,11 +530,7 @@ void irq_dispose_mapping(unsigned int virq) | |||
720 | if (WARN_ON(domain == NULL)) | 530 | if (WARN_ON(domain == NULL)) |
721 | return; | 531 | return; |
722 | 532 | ||
723 | /* Never unmap legacy interrupts */ | 533 | irq_domain_disassociate(domain, virq); |
724 | if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) | ||
725 | return; | ||
726 | |||
727 | irq_domain_disassociate_many(domain, virq, 1); | ||
728 | irq_free_desc(virq); | 534 | irq_free_desc(virq); |
729 | } | 535 | } |
730 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); | 536 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); |
@@ -745,63 +551,51 @@ unsigned int irq_find_mapping(struct irq_domain *domain, | |||
745 | if (domain == NULL) | 551 | if (domain == NULL) |
746 | return 0; | 552 | return 0; |
747 | 553 | ||
748 | switch (domain->revmap_type) { | 554 | if (hwirq < domain->revmap_direct_max_irq) { |
749 | case IRQ_DOMAIN_MAP_LEGACY: | ||
750 | return irq_domain_legacy_revmap(domain, hwirq); | ||
751 | case IRQ_DOMAIN_MAP_LINEAR: | ||
752 | return irq_linear_revmap(domain, hwirq); | ||
753 | case IRQ_DOMAIN_MAP_TREE: | ||
754 | rcu_read_lock(); | ||
755 | data = radix_tree_lookup(&domain->revmap_data.tree, hwirq); | ||
756 | rcu_read_unlock(); | ||
757 | if (data) | ||
758 | return data->irq; | ||
759 | break; | ||
760 | case IRQ_DOMAIN_MAP_NOMAP: | ||
761 | data = irq_get_irq_data(hwirq); | 555 | data = irq_get_irq_data(hwirq); |
762 | if (data && (data->domain == domain) && (data->hwirq == hwirq)) | 556 | if (data && (data->domain == domain) && (data->hwirq == hwirq)) |
763 | return hwirq; | 557 | return hwirq; |
764 | break; | ||
765 | } | 558 | } |
766 | 559 | ||
767 | return 0; | 560 | /* Check if the hwirq is in the linear revmap. */ |
768 | } | 561 | if (hwirq < domain->revmap_size) |
769 | EXPORT_SYMBOL_GPL(irq_find_mapping); | 562 | return domain->linear_revmap[hwirq]; |
770 | 563 | ||
771 | /** | 564 | rcu_read_lock(); |
772 | * irq_linear_revmap() - Find a linux irq from a hw irq number. | 565 | data = radix_tree_lookup(&domain->revmap_tree, hwirq); |
773 | * @domain: domain owning this hardware interrupt | 566 | rcu_read_unlock(); |
774 | * @hwirq: hardware irq number in that domain space | 567 | return data ? data->irq : 0; |
775 | * | ||
776 | * This is a fast path that can be called directly by irq controller code to | ||
777 | * save a handful of instructions. | ||
778 | */ | ||
779 | unsigned int irq_linear_revmap(struct irq_domain *domain, | ||
780 | irq_hw_number_t hwirq) | ||
781 | { | ||
782 | BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR); | ||
783 | |||
784 | /* Check revmap bounds; complain if exceeded */ | ||
785 | if (WARN_ON(hwirq >= domain->revmap_data.linear.size)) | ||
786 | return 0; | ||
787 | |||
788 | return domain->revmap_data.linear.revmap[hwirq]; | ||
789 | } | 568 | } |
790 | EXPORT_SYMBOL_GPL(irq_linear_revmap); | 569 | EXPORT_SYMBOL_GPL(irq_find_mapping); |
791 | 570 | ||
792 | #ifdef CONFIG_IRQ_DOMAIN_DEBUG | 571 | #ifdef CONFIG_IRQ_DOMAIN_DEBUG |
793 | static int virq_debug_show(struct seq_file *m, void *private) | 572 | static int virq_debug_show(struct seq_file *m, void *private) |
794 | { | 573 | { |
795 | unsigned long flags; | 574 | unsigned long flags; |
796 | struct irq_desc *desc; | 575 | struct irq_desc *desc; |
797 | const char *p; | 576 | struct irq_domain *domain; |
798 | static const char none[] = "none"; | 577 | struct radix_tree_iter iter; |
799 | void *data; | 578 | void *data, **slot; |
800 | int i; | 579 | int i; |
801 | 580 | ||
802 | seq_printf(m, "%-5s %-7s %-15s %-*s %s\n", "irq", "hwirq", | 581 | seq_printf(m, " %-16s %-6s %-10s %-10s %s\n", |
582 | "name", "mapped", "linear-max", "direct-max", "devtree-node"); | ||
583 | mutex_lock(&irq_domain_mutex); | ||
584 | list_for_each_entry(domain, &irq_domain_list, link) { | ||
585 | int count = 0; | ||
586 | radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0) | ||
587 | count++; | ||
588 | seq_printf(m, "%c%-16s %6u %10u %10u %s\n", | ||
589 | domain == irq_default_domain ? '*' : ' ', domain->name, | ||
590 | domain->revmap_size + count, domain->revmap_size, | ||
591 | domain->revmap_direct_max_irq, | ||
592 | domain->of_node ? of_node_full_name(domain->of_node) : ""); | ||
593 | } | ||
594 | mutex_unlock(&irq_domain_mutex); | ||
595 | |||
596 | seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq", | ||
803 | "chip name", (int)(2 * sizeof(void *) + 2), "chip data", | 597 | "chip name", (int)(2 * sizeof(void *) + 2), "chip data", |
804 | "domain name"); | 598 | "active", "type", "domain"); |
805 | 599 | ||
806 | for (i = 1; i < nr_irqs; i++) { | 600 | for (i = 1; i < nr_irqs; i++) { |
807 | desc = irq_to_desc(i); | 601 | desc = irq_to_desc(i); |
@@ -809,28 +603,28 @@ static int virq_debug_show(struct seq_file *m, void *private) | |||
809 | continue; | 603 | continue; |
810 | 604 | ||
811 | raw_spin_lock_irqsave(&desc->lock, flags); | 605 | raw_spin_lock_irqsave(&desc->lock, flags); |
606 | domain = desc->irq_data.domain; | ||
812 | 607 | ||
813 | if (desc->action && desc->action->handler) { | 608 | if (domain) { |
814 | struct irq_chip *chip; | 609 | struct irq_chip *chip; |
610 | int hwirq = desc->irq_data.hwirq; | ||
611 | bool direct; | ||
815 | 612 | ||
816 | seq_printf(m, "%5d ", i); | 613 | seq_printf(m, "%5d ", i); |
817 | seq_printf(m, "0x%05lx ", desc->irq_data.hwirq); | 614 | seq_printf(m, "0x%05x ", hwirq); |
818 | 615 | ||
819 | chip = irq_desc_get_chip(desc); | 616 | chip = irq_desc_get_chip(desc); |
820 | if (chip && chip->name) | 617 | seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none"); |
821 | p = chip->name; | ||
822 | else | ||
823 | p = none; | ||
824 | seq_printf(m, "%-15s ", p); | ||
825 | 618 | ||
826 | data = irq_desc_get_chip_data(desc); | 619 | data = irq_desc_get_chip_data(desc); |
827 | seq_printf(m, data ? "0x%p " : " %p ", data); | 620 | seq_printf(m, data ? "0x%p " : " %p ", data); |
828 | 621 | ||
829 | if (desc->irq_data.domain) | 622 | seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' '); |
830 | p = of_node_full_name(desc->irq_data.domain->of_node); | 623 | direct = (i == hwirq) && (i < domain->revmap_direct_max_irq); |
831 | else | 624 | seq_printf(m, "%6s%-8s ", |
832 | p = none; | 625 | (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX", |
833 | seq_printf(m, "%s\n", p); | 626 | direct ? "(DIRECT)" : ""); |
627 | seq_printf(m, "%s\n", desc->irq_data.domain->name); | ||
834 | } | 628 | } |
835 | 629 | ||
836 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 630 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
@@ -927,18 +721,3 @@ const struct irq_domain_ops irq_domain_simple_ops = { | |||
927 | .xlate = irq_domain_xlate_onetwocell, | 721 | .xlate = irq_domain_xlate_onetwocell, |
928 | }; | 722 | }; |
929 | EXPORT_SYMBOL_GPL(irq_domain_simple_ops); | 723 | EXPORT_SYMBOL_GPL(irq_domain_simple_ops); |
930 | |||
931 | #ifdef CONFIG_OF_IRQ | ||
932 | void irq_domain_generate_simple(const struct of_device_id *match, | ||
933 | u64 phys_base, unsigned int irq_start) | ||
934 | { | ||
935 | struct device_node *node; | ||
936 | pr_debug("looking for phys_base=%llx, irq_start=%i\n", | ||
937 | (unsigned long long) phys_base, (int) irq_start); | ||
938 | node = of_find_matching_node_by_address(NULL, match, phys_base); | ||
939 | if (node) | ||
940 | irq_domain_add_legacy(node, 32, irq_start, 0, | ||
941 | &irq_domain_simple_ops, NULL); | ||
942 | } | ||
943 | EXPORT_SYMBOL_GPL(irq_domain_generate_simple); | ||
944 | #endif | ||