diff options
author | Grant Likely <grant.likely@linaro.org> | 2013-06-09 20:06:02 -0400 |
---|---|---|
committer | Grant Likely <grant.likely@linaro.org> | 2013-06-24 09:01:42 -0400 |
commit | ddaf144c61da45ae5c49ae38556c3ac4524f9318 (patch) | |
tree | 6992bedc5f2a620f7b4e4dadfce76658e7667d35 | |
parent | 1400ea86025a22862f97e7fe544433751b43ecec (diff) |
irqdomain: Refactor irq_domain_associate_many()
Originally, irq_domain_associate_many() was designed to unwind the
mapped irqs on a failure of any individual association. However, that
proved to be a problem with certain IRQ controllers. Some of them only
support a subset of irqs, and will fail when attempting to map a
reserved IRQ. In those cases we want to map as many IRQs as possible, so
instead it is better for irq_domain_associate_many() to make a
best-effort attempt to map irqs, but not fail if any or all of them
don't succeed. If a caller really cares about how many irqs got
associated, then it should instead go back and check that all of the
irqs is cares about were mapped.
The original design open-coded the individual association code into the
body of irq_domain_associate_many(), but with no longer needing to
unwind associations, the code becomes simpler to split out
irq_domain_associate() to contain the bulk of the logic, and
irq_domain_associate_many() to be a simple loop wrapper.
This patch also adds a new error check to the associate path to make
sure it isn't called for an irq larger than the controller can handle,
and adds locking so that the irq_domain_mutex is held while setting up a
new association.
v3: Fixup missing change to irq_domain_add_tree()
v2: Fixup x86 warning. irq_domain_associate_many() no longer returns an
error code, but reports errors to the printk log directly. In the
majority of cases we don't actually want to fail if there is a
problem, but rather log it and still try to boot the system.
Signed-off-by: Grant Likely <grant.likely@linaro.org>
irqdomain: Fix flubbed irq_domain_associate_many refactoring
commit d39046ec72, "irqdomain: Refactor irq_domain_associate_many()" was
missing the following hunk which causes a boot failure on anything using
irq_domain_add_tree() to allocate an irq domain.
Signed-off-by: Grant Likely <grant.likely@linaro.org>
Cc: Michael Neuling <mikey@neuling.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>,
Cc: Thomas Gleixner <tglx@linutronix.de>,
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
-rw-r--r-- | arch/x86/kernel/devicetree.c | 4 | ||||
-rw-r--r-- | include/linux/irqdomain.h | 24 | ||||
-rw-r--r-- | kernel/irq/irqdomain.c | 185 |
3 files changed, 103 insertions, 110 deletions
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index b1581527a236..4934890e4db2 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c | |||
@@ -364,9 +364,7 @@ static void dt_add_ioapic_domain(unsigned int ioapic_num, | |||
364 | * and assigned so we can keep the 1:1 mapping which the ioapic | 364 | * and assigned so we can keep the 1:1 mapping which the ioapic |
365 | * is having. | 365 | * is having. |
366 | */ | 366 | */ |
367 | ret = irq_domain_associate_many(id, 0, 0, NR_IRQS_LEGACY); | 367 | irq_domain_associate_many(id, 0, 0, NR_IRQS_LEGACY); |
368 | if (ret) | ||
369 | pr_err("Error mapping legacy IRQs: %d\n", ret); | ||
370 | 368 | ||
371 | if (num > NR_IRQS_LEGACY) { | 369 | if (num > NR_IRQS_LEGACY) { |
372 | ret = irq_create_strict_mappings(id, NR_IRQS_LEGACY, | 370 | ret = irq_create_strict_mappings(id, NR_IRQS_LEGACY, |
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index fd4b26f8f44c..208d1352c40a 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
@@ -103,6 +103,7 @@ struct irq_domain { | |||
103 | struct irq_domain_chip_generic *gc; | 103 | struct irq_domain_chip_generic *gc; |
104 | 104 | ||
105 | /* reverse map data. The linear map gets appended to the irq_domain */ | 105 | /* reverse map data. The linear map gets appended to the irq_domain */ |
106 | irq_hw_number_t hwirq_max; | ||
106 | unsigned int revmap_direct_max_irq; | 107 | unsigned int revmap_direct_max_irq; |
107 | unsigned int revmap_size; | 108 | unsigned int revmap_size; |
108 | struct radix_tree_root revmap_tree; | 109 | struct radix_tree_root revmap_tree; |
@@ -110,8 +111,8 @@ struct irq_domain { | |||
110 | }; | 111 | }; |
111 | 112 | ||
112 | #ifdef CONFIG_IRQ_DOMAIN | 113 | #ifdef CONFIG_IRQ_DOMAIN |
113 | struct irq_domain *__irq_domain_add(struct device_node *of_node, | 114 | struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, |
114 | int size, int direct_max, | 115 | irq_hw_number_t hwirq_max, int direct_max, |
115 | const struct irq_domain_ops *ops, | 116 | const struct irq_domain_ops *ops, |
116 | void *host_data); | 117 | void *host_data); |
117 | struct irq_domain *irq_domain_add_simple(struct device_node *of_node, | 118 | struct irq_domain *irq_domain_add_simple(struct device_node *of_node, |
@@ -140,14 +141,14 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no | |||
140 | const struct irq_domain_ops *ops, | 141 | const struct irq_domain_ops *ops, |
141 | void *host_data) | 142 | void *host_data) |
142 | { | 143 | { |
143 | return __irq_domain_add(of_node, size, 0, ops, host_data); | 144 | return __irq_domain_add(of_node, size, size, 0, ops, host_data); |
144 | } | 145 | } |
145 | static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, | 146 | static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, |
146 | unsigned int max_irq, | 147 | unsigned int max_irq, |
147 | const struct irq_domain_ops *ops, | 148 | const struct irq_domain_ops *ops, |
148 | void *host_data) | 149 | void *host_data) |
149 | { | 150 | { |
150 | return __irq_domain_add(of_node, 0, max_irq, ops, host_data); | 151 | return __irq_domain_add(of_node, 0, max_irq, max_irq, ops, host_data); |
151 | } | 152 | } |
152 | static inline struct irq_domain *irq_domain_add_legacy_isa( | 153 | static inline struct irq_domain *irq_domain_add_legacy_isa( |
153 | struct device_node *of_node, | 154 | struct device_node *of_node, |
@@ -161,19 +162,16 @@ static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node | |||
161 | const struct irq_domain_ops *ops, | 162 | const struct irq_domain_ops *ops, |
162 | void *host_data) | 163 | void *host_data) |
163 | { | 164 | { |
164 | return irq_domain_add_linear(of_node, 0, ops, host_data); | 165 | return __irq_domain_add(of_node, 0, ~0, 0, ops, host_data); |
165 | } | 166 | } |
166 | 167 | ||
167 | extern void irq_domain_remove(struct irq_domain *host); | 168 | extern void irq_domain_remove(struct irq_domain *host); |
168 | 169 | ||
169 | extern int irq_domain_associate_many(struct irq_domain *domain, | 170 | extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq, |
170 | unsigned int irq_base, | 171 | irq_hw_number_t hwirq); |
171 | irq_hw_number_t hwirq_base, int count); | 172 | extern void irq_domain_associate_many(struct irq_domain *domain, |
172 | static inline int irq_domain_associate(struct irq_domain *domain, unsigned int irq, | 173 | unsigned int irq_base, |
173 | irq_hw_number_t hwirq) | 174 | irq_hw_number_t hwirq_base, int count); |
174 | { | ||
175 | return irq_domain_associate_many(domain, irq, hwirq, 1); | ||
176 | } | ||
177 | 175 | ||
178 | extern unsigned int irq_create_mapping(struct irq_domain *host, | 176 | extern unsigned int irq_create_mapping(struct irq_domain *host, |
179 | irq_hw_number_t hwirq); | 177 | irq_hw_number_t hwirq); |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 280b8047d8db..80e92492c77b 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -35,8 +35,8 @@ static struct irq_domain *irq_default_domain; | |||
35 | * register allocated irq_domain with irq_domain_register(). Returns pointer | 35 | * register allocated irq_domain with irq_domain_register(). Returns pointer |
36 | * to IRQ domain, or NULL on failure. | 36 | * to IRQ domain, or NULL on failure. |
37 | */ | 37 | */ |
38 | struct irq_domain *__irq_domain_add(struct device_node *of_node, | 38 | struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, |
39 | int size, int direct_max, | 39 | irq_hw_number_t hwirq_max, int direct_max, |
40 | const struct irq_domain_ops *ops, | 40 | const struct irq_domain_ops *ops, |
41 | void *host_data) | 41 | void *host_data) |
42 | { | 42 | { |
@@ -52,6 +52,7 @@ struct irq_domain *__irq_domain_add(struct device_node *of_node, | |||
52 | domain->ops = ops; | 52 | domain->ops = ops; |
53 | domain->host_data = host_data; | 53 | domain->host_data = host_data; |
54 | domain->of_node = of_node_get(of_node); | 54 | domain->of_node = of_node_get(of_node); |
55 | domain->hwirq_max = hwirq_max; | ||
55 | domain->revmap_size = size; | 56 | domain->revmap_size = size; |
56 | domain->revmap_direct_max_irq = direct_max; | 57 | domain->revmap_direct_max_irq = direct_max; |
57 | 58 | ||
@@ -126,7 +127,7 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node, | |||
126 | { | 127 | { |
127 | struct irq_domain *domain; | 128 | struct irq_domain *domain; |
128 | 129 | ||
129 | domain = __irq_domain_add(of_node, size, 0, ops, host_data); | 130 | domain = __irq_domain_add(of_node, size, size, 0, ops, host_data); |
130 | if (!domain) | 131 | if (!domain) |
131 | return NULL; | 132 | return NULL; |
132 | 133 | ||
@@ -139,7 +140,7 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node, | |||
139 | pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", | 140 | pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", |
140 | first_irq); | 141 | first_irq); |
141 | } | 142 | } |
142 | WARN_ON(irq_domain_associate_many(domain, first_irq, 0, size)); | 143 | irq_domain_associate_many(domain, first_irq, 0, size); |
143 | } | 144 | } |
144 | 145 | ||
145 | return domain; | 146 | return domain; |
@@ -170,11 +171,12 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, | |||
170 | { | 171 | { |
171 | struct irq_domain *domain; | 172 | struct irq_domain *domain; |
172 | 173 | ||
173 | domain = __irq_domain_add(of_node, first_hwirq + size, 0, ops, host_data); | 174 | domain = __irq_domain_add(of_node, first_hwirq + size, |
175 | first_hwirq + size, 0, ops, host_data); | ||
174 | if (!domain) | 176 | if (!domain) |
175 | return NULL; | 177 | return NULL; |
176 | 178 | ||
177 | WARN_ON(irq_domain_associate_many(domain, first_irq, first_hwirq, size)); | 179 | irq_domain_associate_many(domain, first_irq, first_hwirq, size); |
178 | 180 | ||
179 | return domain; | 181 | return domain; |
180 | } | 182 | } |
@@ -228,109 +230,109 @@ void irq_set_default_host(struct irq_domain *domain) | |||
228 | } | 230 | } |
229 | EXPORT_SYMBOL_GPL(irq_set_default_host); | 231 | EXPORT_SYMBOL_GPL(irq_set_default_host); |
230 | 232 | ||
231 | static void irq_domain_disassociate_many(struct irq_domain *domain, | 233 | static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) |
232 | unsigned int irq_base, int count) | ||
233 | { | 234 | { |
234 | /* | 235 | struct irq_data *irq_data = irq_get_irq_data(irq); |
235 | * disassociate in reverse order; | 236 | irq_hw_number_t hwirq; |
236 | * not strictly necessary, but nice for unwinding | ||
237 | */ | ||
238 | while (count--) { | ||
239 | int irq = irq_base + count; | ||
240 | struct irq_data *irq_data = irq_get_irq_data(irq); | ||
241 | irq_hw_number_t hwirq; | ||
242 | 237 | ||
243 | if (WARN_ON(!irq_data || irq_data->domain != domain)) | 238 | if (WARN(!irq_data || irq_data->domain != domain, |
244 | continue; | 239 | "virq%i doesn't exist; cannot disassociate\n", irq)) |
240 | return; | ||
245 | 241 | ||
246 | hwirq = irq_data->hwirq; | 242 | hwirq = irq_data->hwirq; |
247 | irq_set_status_flags(irq, IRQ_NOREQUEST); | 243 | irq_set_status_flags(irq, IRQ_NOREQUEST); |
248 | 244 | ||
249 | /* remove chip and handler */ | 245 | /* remove chip and handler */ |
250 | irq_set_chip_and_handler(irq, NULL, NULL); | 246 | irq_set_chip_and_handler(irq, NULL, NULL); |
251 | 247 | ||
252 | /* Make sure it's completed */ | 248 | /* Make sure it's completed */ |
253 | synchronize_irq(irq); | 249 | synchronize_irq(irq); |
254 | 250 | ||
255 | /* Tell the PIC about it */ | 251 | /* Tell the PIC about it */ |
256 | if (domain->ops->unmap) | 252 | if (domain->ops->unmap) |
257 | domain->ops->unmap(domain, irq); | 253 | domain->ops->unmap(domain, irq); |
258 | smp_mb(); | 254 | smp_mb(); |
259 | 255 | ||
260 | irq_data->domain = NULL; | 256 | irq_data->domain = NULL; |
261 | irq_data->hwirq = 0; | 257 | irq_data->hwirq = 0; |
262 | 258 | ||
263 | /* Clear reverse map for this hwirq */ | 259 | /* Clear reverse map for this hwirq */ |
264 | if (hwirq < domain->revmap_size) { | 260 | if (hwirq < domain->revmap_size) { |
265 | domain->linear_revmap[hwirq] = 0; | 261 | domain->linear_revmap[hwirq] = 0; |
266 | } else { | 262 | } else { |
267 | mutex_lock(&revmap_trees_mutex); | 263 | mutex_lock(&revmap_trees_mutex); |
268 | radix_tree_delete(&domain->revmap_tree, hwirq); | 264 | radix_tree_delete(&domain->revmap_tree, hwirq); |
269 | mutex_unlock(&revmap_trees_mutex); | 265 | mutex_unlock(&revmap_trees_mutex); |
270 | } | ||
271 | } | 266 | } |
272 | } | 267 | } |
273 | 268 | ||
274 | int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, | 269 | int irq_domain_associate(struct irq_domain *domain, unsigned int virq, |
275 | irq_hw_number_t hwirq_base, int count) | 270 | irq_hw_number_t hwirq) |
276 | { | 271 | { |
277 | unsigned int virq = irq_base; | 272 | struct irq_data *irq_data = irq_get_irq_data(virq); |
278 | irq_hw_number_t hwirq = hwirq_base; | 273 | int ret; |
279 | int i, ret; | ||
280 | 274 | ||
281 | pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, | 275 | if (WARN(hwirq >= domain->hwirq_max, |
282 | of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count); | 276 | "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name)) |
277 | return -EINVAL; | ||
278 | if (WARN(!irq_data, "error: virq%i is not allocated", virq)) | ||
279 | return -EINVAL; | ||
280 | if (WARN(irq_data->domain, "error: virq%i is already associated", virq)) | ||
281 | return -EINVAL; | ||
283 | 282 | ||
284 | for (i = 0; i < count; i++) { | 283 | mutex_lock(&irq_domain_mutex); |
285 | struct irq_data *irq_data = irq_get_irq_data(virq + i); | 284 | irq_data->hwirq = hwirq; |
286 | 285 | irq_data->domain = domain; | |
287 | if (WARN(!irq_data, "error: irq_desc not allocated; " | 286 | if (domain->ops->map) { |
288 | "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i)) | 287 | ret = domain->ops->map(domain, virq, hwirq); |
289 | return -EINVAL; | 288 | if (ret != 0) { |
290 | if (WARN(irq_data->domain, "error: irq_desc already associated; " | 289 | /* |
291 | "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i)) | 290 | * If map() returns -EPERM, this interrupt is protected |
292 | return -EINVAL; | 291 | * by the firmware or some other service and shall not |
293 | }; | 292 | * be mapped. Don't bother telling the user about it. |
294 | 293 | */ | |
295 | for (i = 0; i < count; i++, virq++, hwirq++) { | 294 | if (ret != -EPERM) { |
296 | struct irq_data *irq_data = irq_get_irq_data(virq); | 295 | pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n", |
297 | 296 | domain->name, hwirq, virq, ret); | |
298 | irq_data->hwirq = hwirq; | ||
299 | irq_data->domain = domain; | ||
300 | if (domain->ops->map) { | ||
301 | ret = domain->ops->map(domain, virq, hwirq); | ||
302 | if (ret != 0) { | ||
303 | /* | ||
304 | * If map() returns -EPERM, this interrupt is protected | ||
305 | * by the firmware or some other service and shall not | ||
306 | * be mapped. Don't bother telling the user about it. | ||
307 | */ | ||
308 | if (ret != -EPERM) { | ||
309 | pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n", | ||
310 | domain->name, hwirq, virq, ret); | ||
311 | } | ||
312 | irq_data->domain = NULL; | ||
313 | irq_data->hwirq = 0; | ||
314 | continue; | ||
315 | } | 297 | } |
316 | /* If not already assigned, give the domain the chip's name */ | 298 | irq_data->domain = NULL; |
317 | if (!domain->name && irq_data->chip) | 299 | irq_data->hwirq = 0; |
318 | domain->name = irq_data->chip->name; | 300 | mutex_unlock(&irq_domain_mutex); |
301 | return ret; | ||
319 | } | 302 | } |
320 | 303 | ||
321 | if (hwirq < domain->revmap_size) { | 304 | /* If not already assigned, give the domain the chip's name */ |
322 | domain->linear_revmap[hwirq] = virq; | 305 | if (!domain->name && irq_data->chip) |
323 | } else { | 306 | domain->name = irq_data->chip->name; |
324 | mutex_lock(&revmap_trees_mutex); | 307 | } |
325 | radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); | ||
326 | mutex_unlock(&revmap_trees_mutex); | ||
327 | } | ||
328 | 308 | ||
329 | irq_clear_status_flags(virq, IRQ_NOREQUEST); | 309 | if (hwirq < domain->revmap_size) { |
310 | domain->linear_revmap[hwirq] = virq; | ||
311 | } else { | ||
312 | mutex_lock(&revmap_trees_mutex); | ||
313 | radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); | ||
314 | mutex_unlock(&revmap_trees_mutex); | ||
330 | } | 315 | } |
316 | mutex_unlock(&irq_domain_mutex); | ||
317 | |||
318 | irq_clear_status_flags(virq, IRQ_NOREQUEST); | ||
331 | 319 | ||
332 | return 0; | 320 | return 0; |
333 | } | 321 | } |
322 | EXPORT_SYMBOL_GPL(irq_domain_associate); | ||
323 | |||
324 | void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, | ||
325 | irq_hw_number_t hwirq_base, int count) | ||
326 | { | ||
327 | int i; | ||
328 | |||
329 | pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, | ||
330 | of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count); | ||
331 | |||
332 | for (i = 0; i < count; i++) { | ||
333 | irq_domain_associate(domain, irq_base + i, hwirq_base + i); | ||
334 | } | ||
335 | } | ||
334 | EXPORT_SYMBOL_GPL(irq_domain_associate_many); | 336 | EXPORT_SYMBOL_GPL(irq_domain_associate_many); |
335 | 337 | ||
336 | /** | 338 | /** |
@@ -460,12 +462,7 @@ int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, | |||
460 | if (unlikely(ret < 0)) | 462 | if (unlikely(ret < 0)) |
461 | return ret; | 463 | return ret; |
462 | 464 | ||
463 | ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count); | 465 | irq_domain_associate_many(domain, irq_base, hwirq_base, count); |
464 | if (unlikely(ret < 0)) { | ||
465 | irq_free_descs(irq_base, count); | ||
466 | return ret; | ||
467 | } | ||
468 | |||
469 | return 0; | 466 | return 0; |
470 | } | 467 | } |
471 | EXPORT_SYMBOL_GPL(irq_create_strict_mappings); | 468 | EXPORT_SYMBOL_GPL(irq_create_strict_mappings); |
@@ -535,7 +532,7 @@ void irq_dispose_mapping(unsigned int virq) | |||
535 | if (WARN_ON(domain == NULL)) | 532 | if (WARN_ON(domain == NULL)) |
536 | return; | 533 | return; |
537 | 534 | ||
538 | irq_domain_disassociate_many(domain, virq, 1); | 535 | irq_domain_disassociate(domain, virq); |
539 | irq_free_desc(virq); | 536 | irq_free_desc(virq); |
540 | } | 537 | } |
541 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); | 538 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); |