diff options
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/chip.c | 8 | ||||
-rw-r--r-- | kernel/irq/internals.h | 3 | ||||
-rw-r--r-- | kernel/irq/irqdomain.c | 106 | ||||
-rw-r--r-- | kernel/irq/manage.c | 119 | ||||
-rw-r--r-- | kernel/irq/migration.c | 13 |
5 files changed, 161 insertions, 88 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index fc275e4f629b..eebd6d5cfb44 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -275,8 +275,10 @@ void handle_nested_irq(unsigned int irq) | |||
275 | kstat_incr_irqs_this_cpu(irq, desc); | 275 | kstat_incr_irqs_this_cpu(irq, desc); |
276 | 276 | ||
277 | action = desc->action; | 277 | action = desc->action; |
278 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) | 278 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { |
279 | desc->istate |= IRQS_PENDING; | ||
279 | goto out_unlock; | 280 | goto out_unlock; |
281 | } | ||
280 | 282 | ||
281 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); | 283 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
282 | raw_spin_unlock_irq(&desc->lock); | 284 | raw_spin_unlock_irq(&desc->lock); |
@@ -324,8 +326,10 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
324 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 326 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
325 | kstat_incr_irqs_this_cpu(irq, desc); | 327 | kstat_incr_irqs_this_cpu(irq, desc); |
326 | 328 | ||
327 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) | 329 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
330 | desc->istate |= IRQS_PENDING; | ||
328 | goto out_unlock; | 331 | goto out_unlock; |
332 | } | ||
329 | 333 | ||
330 | handle_irq_event(desc); | 334 | handle_irq_event(desc); |
331 | 335 | ||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 8e5c56b3b7d9..001fa5bab490 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -101,6 +101,9 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); | |||
101 | 101 | ||
102 | extern void irq_set_thread_affinity(struct irq_desc *desc); | 102 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
103 | 103 | ||
104 | extern int irq_do_set_affinity(struct irq_data *data, | ||
105 | const struct cpumask *dest, bool force); | ||
106 | |||
104 | /* Inline functions for support of irq chips on slow busses */ | 107 | /* Inline functions for support of irq chips on slow busses */ |
105 | static inline void chip_bus_lock(struct irq_desc *desc) | 108 | static inline void chip_bus_lock(struct irq_desc *desc) |
106 | { | 109 | { |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 0e0ba5f840b2..41c1564103f1 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -1,3 +1,5 @@ | |||
1 | #define pr_fmt(fmt) "irq: " fmt | ||
2 | |||
1 | #include <linux/debugfs.h> | 3 | #include <linux/debugfs.h> |
2 | #include <linux/hardirq.h> | 4 | #include <linux/hardirq.h> |
3 | #include <linux/interrupt.h> | 5 | #include <linux/interrupt.h> |
@@ -56,14 +58,73 @@ static struct irq_domain *irq_domain_alloc(struct device_node *of_node, | |||
56 | return domain; | 58 | return domain; |
57 | } | 59 | } |
58 | 60 | ||
61 | static void irq_domain_free(struct irq_domain *domain) | ||
62 | { | ||
63 | of_node_put(domain->of_node); | ||
64 | kfree(domain); | ||
65 | } | ||
66 | |||
59 | static void irq_domain_add(struct irq_domain *domain) | 67 | static void irq_domain_add(struct irq_domain *domain) |
60 | { | 68 | { |
61 | mutex_lock(&irq_domain_mutex); | 69 | mutex_lock(&irq_domain_mutex); |
62 | list_add(&domain->link, &irq_domain_list); | 70 | list_add(&domain->link, &irq_domain_list); |
63 | mutex_unlock(&irq_domain_mutex); | 71 | mutex_unlock(&irq_domain_mutex); |
64 | pr_debug("irq: Allocated domain of type %d @0x%p\n", | 72 | pr_debug("Allocated domain of type %d @0x%p\n", |
73 | domain->revmap_type, domain); | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * irq_domain_remove() - Remove an irq domain. | ||
78 | * @domain: domain to remove | ||
79 | * | ||
80 | * This routine is used to remove an irq domain. The caller must ensure | ||
81 | * that all mappings within the domain have been disposed of prior to | ||
82 | * use, depending on the revmap type. | ||
83 | */ | ||
84 | void irq_domain_remove(struct irq_domain *domain) | ||
85 | { | ||
86 | mutex_lock(&irq_domain_mutex); | ||
87 | |||
88 | switch (domain->revmap_type) { | ||
89 | case IRQ_DOMAIN_MAP_LEGACY: | ||
90 | /* | ||
91 | * Legacy domains don't manage their own irq_desc | ||
92 | * allocations, we expect the caller to handle irq_desc | ||
93 | * freeing on their own. | ||
94 | */ | ||
95 | break; | ||
96 | case IRQ_DOMAIN_MAP_TREE: | ||
97 | /* | ||
98 | * radix_tree_delete() takes care of destroying the root | ||
99 | * node when all entries are removed. Shout if there are | ||
100 | * any mappings left. | ||
101 | */ | ||
102 | WARN_ON(domain->revmap_data.tree.height); | ||
103 | break; | ||
104 | case IRQ_DOMAIN_MAP_LINEAR: | ||
105 | kfree(domain->revmap_data.linear.revmap); | ||
106 | domain->revmap_data.linear.size = 0; | ||
107 | break; | ||
108 | case IRQ_DOMAIN_MAP_NOMAP: | ||
109 | break; | ||
110 | } | ||
111 | |||
112 | list_del(&domain->link); | ||
113 | |||
114 | /* | ||
115 | * If the going away domain is the default one, reset it. | ||
116 | */ | ||
117 | if (unlikely(irq_default_domain == domain)) | ||
118 | irq_set_default_host(NULL); | ||
119 | |||
120 | mutex_unlock(&irq_domain_mutex); | ||
121 | |||
122 | pr_debug("Removed domain of type %d @0x%p\n", | ||
65 | domain->revmap_type, domain); | 123 | domain->revmap_type, domain); |
124 | |||
125 | irq_domain_free(domain); | ||
66 | } | 126 | } |
127 | EXPORT_SYMBOL_GPL(irq_domain_remove); | ||
67 | 128 | ||
68 | static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain, | 129 | static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain, |
69 | irq_hw_number_t hwirq) | 130 | irq_hw_number_t hwirq) |
@@ -117,8 +178,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, | |||
117 | 178 | ||
118 | if (WARN_ON(!irq_data || irq_data->domain)) { | 179 | if (WARN_ON(!irq_data || irq_data->domain)) { |
119 | mutex_unlock(&irq_domain_mutex); | 180 | mutex_unlock(&irq_domain_mutex); |
120 | of_node_put(domain->of_node); | 181 | irq_domain_free(domain); |
121 | kfree(domain); | ||
122 | return NULL; | 182 | return NULL; |
123 | } | 183 | } |
124 | } | 184 | } |
@@ -152,10 +212,12 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, | |||
152 | irq_domain_add(domain); | 212 | irq_domain_add(domain); |
153 | return domain; | 213 | return domain; |
154 | } | 214 | } |
215 | EXPORT_SYMBOL_GPL(irq_domain_add_legacy); | ||
155 | 216 | ||
156 | /** | 217 | /** |
157 | * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain. | 218 | * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain. |
158 | * @of_node: pointer to interrupt controller's device tree node. | 219 | * @of_node: pointer to interrupt controller's device tree node. |
220 | * @size: Number of interrupts in the domain. | ||
159 | * @ops: map/unmap domain callbacks | 221 | * @ops: map/unmap domain callbacks |
160 | * @host_data: Controller private data pointer | 222 | * @host_data: Controller private data pointer |
161 | */ | 223 | */ |
@@ -181,6 +243,7 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node, | |||
181 | irq_domain_add(domain); | 243 | irq_domain_add(domain); |
182 | return domain; | 244 | return domain; |
183 | } | 245 | } |
246 | EXPORT_SYMBOL_GPL(irq_domain_add_linear); | ||
184 | 247 | ||
185 | struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, | 248 | struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, |
186 | unsigned int max_irq, | 249 | unsigned int max_irq, |
@@ -195,6 +258,7 @@ struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, | |||
195 | } | 258 | } |
196 | return domain; | 259 | return domain; |
197 | } | 260 | } |
261 | EXPORT_SYMBOL_GPL(irq_domain_add_nomap); | ||
198 | 262 | ||
199 | /** | 263 | /** |
200 | * irq_domain_add_tree() | 264 | * irq_domain_add_tree() |
@@ -216,6 +280,7 @@ struct irq_domain *irq_domain_add_tree(struct device_node *of_node, | |||
216 | } | 280 | } |
217 | return domain; | 281 | return domain; |
218 | } | 282 | } |
283 | EXPORT_SYMBOL_GPL(irq_domain_add_tree); | ||
219 | 284 | ||
220 | /** | 285 | /** |
221 | * irq_find_host() - Locates a domain for a given device node | 286 | * irq_find_host() - Locates a domain for a given device node |
@@ -259,10 +324,11 @@ EXPORT_SYMBOL_GPL(irq_find_host); | |||
259 | */ | 324 | */ |
260 | void irq_set_default_host(struct irq_domain *domain) | 325 | void irq_set_default_host(struct irq_domain *domain) |
261 | { | 326 | { |
262 | pr_debug("irq: Default domain set to @0x%p\n", domain); | 327 | pr_debug("Default domain set to @0x%p\n", domain); |
263 | 328 | ||
264 | irq_default_domain = domain; | 329 | irq_default_domain = domain; |
265 | } | 330 | } |
331 | EXPORT_SYMBOL_GPL(irq_set_default_host); | ||
266 | 332 | ||
267 | static int irq_setup_virq(struct irq_domain *domain, unsigned int virq, | 333 | static int irq_setup_virq(struct irq_domain *domain, unsigned int virq, |
268 | irq_hw_number_t hwirq) | 334 | irq_hw_number_t hwirq) |
@@ -272,7 +338,7 @@ static int irq_setup_virq(struct irq_domain *domain, unsigned int virq, | |||
272 | irq_data->hwirq = hwirq; | 338 | irq_data->hwirq = hwirq; |
273 | irq_data->domain = domain; | 339 | irq_data->domain = domain; |
274 | if (domain->ops->map(domain, virq, hwirq)) { | 340 | if (domain->ops->map(domain, virq, hwirq)) { |
275 | pr_debug("irq: -> mapping failed, freeing\n"); | 341 | pr_debug("irq-%i==>hwirq-0x%lx mapping failed\n", virq, hwirq); |
276 | irq_data->domain = NULL; | 342 | irq_data->domain = NULL; |
277 | irq_data->hwirq = 0; | 343 | irq_data->hwirq = 0; |
278 | return -1; | 344 | return -1; |
@@ -303,7 +369,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain) | |||
303 | 369 | ||
304 | virq = irq_alloc_desc_from(1, 0); | 370 | virq = irq_alloc_desc_from(1, 0); |
305 | if (!virq) { | 371 | if (!virq) { |
306 | pr_debug("irq: create_direct virq allocation failed\n"); | 372 | pr_debug("create_direct virq allocation failed\n"); |
307 | return 0; | 373 | return 0; |
308 | } | 374 | } |
309 | if (virq >= domain->revmap_data.nomap.max_irq) { | 375 | if (virq >= domain->revmap_data.nomap.max_irq) { |
@@ -312,7 +378,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain) | |||
312 | irq_free_desc(virq); | 378 | irq_free_desc(virq); |
313 | return 0; | 379 | return 0; |
314 | } | 380 | } |
315 | pr_debug("irq: create_direct obtained virq %d\n", virq); | 381 | pr_debug("create_direct obtained virq %d\n", virq); |
316 | 382 | ||
317 | if (irq_setup_virq(domain, virq, virq)) { | 383 | if (irq_setup_virq(domain, virq, virq)) { |
318 | irq_free_desc(virq); | 384 | irq_free_desc(virq); |
@@ -321,6 +387,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain) | |||
321 | 387 | ||
322 | return virq; | 388 | return virq; |
323 | } | 389 | } |
390 | EXPORT_SYMBOL_GPL(irq_create_direct_mapping); | ||
324 | 391 | ||
325 | /** | 392 | /** |
326 | * irq_create_mapping() - Map a hardware interrupt into linux irq space | 393 | * irq_create_mapping() - Map a hardware interrupt into linux irq space |
@@ -338,23 +405,23 @@ unsigned int irq_create_mapping(struct irq_domain *domain, | |||
338 | unsigned int hint; | 405 | unsigned int hint; |
339 | int virq; | 406 | int virq; |
340 | 407 | ||
341 | pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); | 408 | pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); |
342 | 409 | ||
343 | /* Look for default domain if nececssary */ | 410 | /* Look for default domain if nececssary */ |
344 | if (domain == NULL) | 411 | if (domain == NULL) |
345 | domain = irq_default_domain; | 412 | domain = irq_default_domain; |
346 | if (domain == NULL) { | 413 | if (domain == NULL) { |
347 | printk(KERN_WARNING "irq_create_mapping called for" | 414 | pr_warning("irq_create_mapping called for" |
348 | " NULL domain, hwirq=%lx\n", hwirq); | 415 | " NULL domain, hwirq=%lx\n", hwirq); |
349 | WARN_ON(1); | 416 | WARN_ON(1); |
350 | return 0; | 417 | return 0; |
351 | } | 418 | } |
352 | pr_debug("irq: -> using domain @%p\n", domain); | 419 | pr_debug("-> using domain @%p\n", domain); |
353 | 420 | ||
354 | /* Check if mapping already exists */ | 421 | /* Check if mapping already exists */ |
355 | virq = irq_find_mapping(domain, hwirq); | 422 | virq = irq_find_mapping(domain, hwirq); |
356 | if (virq) { | 423 | if (virq) { |
357 | pr_debug("irq: -> existing mapping on virq %d\n", virq); | 424 | pr_debug("-> existing mapping on virq %d\n", virq); |
358 | return virq; | 425 | return virq; |
359 | } | 426 | } |
360 | 427 | ||
@@ -370,7 +437,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain, | |||
370 | if (virq <= 0) | 437 | if (virq <= 0) |
371 | virq = irq_alloc_desc_from(1, 0); | 438 | virq = irq_alloc_desc_from(1, 0); |
372 | if (virq <= 0) { | 439 | if (virq <= 0) { |
373 | pr_debug("irq: -> virq allocation failed\n"); | 440 | pr_debug("-> virq allocation failed\n"); |
374 | return 0; | 441 | return 0; |
375 | } | 442 | } |
376 | 443 | ||
@@ -380,7 +447,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain, | |||
380 | return 0; | 447 | return 0; |
381 | } | 448 | } |
382 | 449 | ||
383 | pr_debug("irq: irq %lu on domain %s mapped to virtual irq %u\n", | 450 | pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", |
384 | hwirq, domain->of_node ? domain->of_node->full_name : "null", virq); | 451 | hwirq, domain->of_node ? domain->of_node->full_name : "null", virq); |
385 | 452 | ||
386 | return virq; | 453 | return virq; |
@@ -409,8 +476,8 @@ unsigned int irq_create_of_mapping(struct device_node *controller, | |||
409 | if (intsize > 0) | 476 | if (intsize > 0) |
410 | return intspec[0]; | 477 | return intspec[0]; |
411 | #endif | 478 | #endif |
412 | printk(KERN_WARNING "irq: no irq domain found for %s !\n", | 479 | pr_warning("no irq domain found for %s !\n", |
413 | controller->full_name); | 480 | controller->full_name); |
414 | return 0; | 481 | return 0; |
415 | } | 482 | } |
416 | 483 | ||
@@ -560,6 +627,7 @@ unsigned int irq_radix_revmap_lookup(struct irq_domain *domain, | |||
560 | */ | 627 | */ |
561 | return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq); | 628 | return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq); |
562 | } | 629 | } |
630 | EXPORT_SYMBOL_GPL(irq_radix_revmap_lookup); | ||
563 | 631 | ||
564 | /** | 632 | /** |
565 | * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping. | 633 | * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping. |
@@ -584,6 +652,7 @@ void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq, | |||
584 | mutex_unlock(&revmap_trees_mutex); | 652 | mutex_unlock(&revmap_trees_mutex); |
585 | } | 653 | } |
586 | } | 654 | } |
655 | EXPORT_SYMBOL_GPL(irq_radix_revmap_insert); | ||
587 | 656 | ||
588 | /** | 657 | /** |
589 | * irq_linear_revmap() - Find a linux irq from a hw irq number. | 658 | * irq_linear_revmap() - Find a linux irq from a hw irq number. |
@@ -617,6 +686,7 @@ unsigned int irq_linear_revmap(struct irq_domain *domain, | |||
617 | 686 | ||
618 | return revmap[hwirq]; | 687 | return revmap[hwirq]; |
619 | } | 688 | } |
689 | EXPORT_SYMBOL_GPL(irq_linear_revmap); | ||
620 | 690 | ||
621 | #ifdef CONFIG_IRQ_DOMAIN_DEBUG | 691 | #ifdef CONFIG_IRQ_DOMAIN_DEBUG |
622 | static int virq_debug_show(struct seq_file *m, void *private) | 692 | static int virq_debug_show(struct seq_file *m, void *private) |
@@ -691,8 +761,8 @@ static int __init irq_debugfs_init(void) | |||
691 | __initcall(irq_debugfs_init); | 761 | __initcall(irq_debugfs_init); |
692 | #endif /* CONFIG_IRQ_DOMAIN_DEBUG */ | 762 | #endif /* CONFIG_IRQ_DOMAIN_DEBUG */ |
693 | 763 | ||
694 | int irq_domain_simple_map(struct irq_domain *d, unsigned int irq, | 764 | static int irq_domain_simple_map(struct irq_domain *d, unsigned int irq, |
695 | irq_hw_number_t hwirq) | 765 | irq_hw_number_t hwirq) |
696 | { | 766 | { |
697 | return 0; | 767 | return 0; |
698 | } | 768 | } |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index bb32326afe87..8c548232ba39 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -7,6 +7,8 @@ | |||
7 | * This file contains driver APIs to the irq subsystem. | 7 | * This file contains driver APIs to the irq subsystem. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define pr_fmt(fmt) "genirq: " fmt | ||
11 | |||
10 | #include <linux/irq.h> | 12 | #include <linux/irq.h> |
11 | #include <linux/kthread.h> | 13 | #include <linux/kthread.h> |
12 | #include <linux/module.h> | 14 | #include <linux/module.h> |
@@ -14,6 +16,7 @@ | |||
14 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
15 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
16 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/task_work.h> | ||
17 | 20 | ||
18 | #include "internals.h" | 21 | #include "internals.h" |
19 | 22 | ||
@@ -139,6 +142,25 @@ static inline void | |||
139 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | 142 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } |
140 | #endif | 143 | #endif |
141 | 144 | ||
145 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | ||
146 | bool force) | ||
147 | { | ||
148 | struct irq_desc *desc = irq_data_to_desc(data); | ||
149 | struct irq_chip *chip = irq_data_get_irq_chip(data); | ||
150 | int ret; | ||
151 | |||
152 | ret = chip->irq_set_affinity(data, mask, false); | ||
153 | switch (ret) { | ||
154 | case IRQ_SET_MASK_OK: | ||
155 | cpumask_copy(data->affinity, mask); | ||
156 | case IRQ_SET_MASK_OK_NOCOPY: | ||
157 | irq_set_thread_affinity(desc); | ||
158 | ret = 0; | ||
159 | } | ||
160 | |||
161 | return ret; | ||
162 | } | ||
163 | |||
142 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | 164 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) |
143 | { | 165 | { |
144 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 166 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
@@ -149,14 +171,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | |||
149 | return -EINVAL; | 171 | return -EINVAL; |
150 | 172 | ||
151 | if (irq_can_move_pcntxt(data)) { | 173 | if (irq_can_move_pcntxt(data)) { |
152 | ret = chip->irq_set_affinity(data, mask, false); | 174 | ret = irq_do_set_affinity(data, mask, false); |
153 | switch (ret) { | ||
154 | case IRQ_SET_MASK_OK: | ||
155 | cpumask_copy(data->affinity, mask); | ||
156 | case IRQ_SET_MASK_OK_NOCOPY: | ||
157 | irq_set_thread_affinity(desc); | ||
158 | ret = 0; | ||
159 | } | ||
160 | } else { | 175 | } else { |
161 | irqd_set_move_pending(data); | 176 | irqd_set_move_pending(data); |
162 | irq_copy_pending(desc, mask); | 177 | irq_copy_pending(desc, mask); |
@@ -280,9 +295,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); | |||
280 | static int | 295 | static int |
281 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | 296 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) |
282 | { | 297 | { |
283 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
284 | struct cpumask *set = irq_default_affinity; | 298 | struct cpumask *set = irq_default_affinity; |
285 | int ret, node = desc->irq_data.node; | 299 | int node = desc->irq_data.node; |
286 | 300 | ||
287 | /* Excludes PER_CPU and NO_BALANCE interrupts */ | 301 | /* Excludes PER_CPU and NO_BALANCE interrupts */ |
288 | if (!irq_can_set_affinity(irq)) | 302 | if (!irq_can_set_affinity(irq)) |
@@ -308,13 +322,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |||
308 | if (cpumask_intersects(mask, nodemask)) | 322 | if (cpumask_intersects(mask, nodemask)) |
309 | cpumask_and(mask, mask, nodemask); | 323 | cpumask_and(mask, mask, nodemask); |
310 | } | 324 | } |
311 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); | 325 | irq_do_set_affinity(&desc->irq_data, mask, false); |
312 | switch (ret) { | ||
313 | case IRQ_SET_MASK_OK: | ||
314 | cpumask_copy(desc->irq_data.affinity, mask); | ||
315 | case IRQ_SET_MASK_OK_NOCOPY: | ||
316 | irq_set_thread_affinity(desc); | ||
317 | } | ||
318 | return 0; | 326 | return 0; |
319 | } | 327 | } |
320 | #else | 328 | #else |
@@ -565,7 +573,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
565 | * IRQF_TRIGGER_* but the PIC does not support multiple | 573 | * IRQF_TRIGGER_* but the PIC does not support multiple |
566 | * flow-types? | 574 | * flow-types? |
567 | */ | 575 | */ |
568 | pr_debug("genirq: No set_type function for IRQ %d (%s)\n", irq, | 576 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, |
569 | chip ? (chip->name ? : "unknown") : "unknown"); | 577 | chip ? (chip->name ? : "unknown") : "unknown"); |
570 | return 0; | 578 | return 0; |
571 | } | 579 | } |
@@ -600,7 +608,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
600 | ret = 0; | 608 | ret = 0; |
601 | break; | 609 | break; |
602 | default: | 610 | default: |
603 | pr_err("genirq: Setting trigger mode %lu for irq %u failed (%pF)\n", | 611 | pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", |
604 | flags, irq, chip->irq_set_type); | 612 | flags, irq, chip->irq_set_type); |
605 | } | 613 | } |
606 | if (unmask) | 614 | if (unmask) |
@@ -773,11 +781,39 @@ static void wake_threads_waitq(struct irq_desc *desc) | |||
773 | wake_up(&desc->wait_for_threads); | 781 | wake_up(&desc->wait_for_threads); |
774 | } | 782 | } |
775 | 783 | ||
784 | static void irq_thread_dtor(struct task_work *unused) | ||
785 | { | ||
786 | struct task_struct *tsk = current; | ||
787 | struct irq_desc *desc; | ||
788 | struct irqaction *action; | ||
789 | |||
790 | if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) | ||
791 | return; | ||
792 | |||
793 | action = kthread_data(tsk); | ||
794 | |||
795 | pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | ||
796 | tsk->comm ? tsk->comm : "", tsk->pid, action->irq); | ||
797 | |||
798 | |||
799 | desc = irq_to_desc(action->irq); | ||
800 | /* | ||
801 | * If IRQTF_RUNTHREAD is set, we need to decrement | ||
802 | * desc->threads_active and wake possible waiters. | ||
803 | */ | ||
804 | if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
805 | wake_threads_waitq(desc); | ||
806 | |||
807 | /* Prevent a stale desc->threads_oneshot */ | ||
808 | irq_finalize_oneshot(desc, action); | ||
809 | } | ||
810 | |||
776 | /* | 811 | /* |
777 | * Interrupt handler thread | 812 | * Interrupt handler thread |
778 | */ | 813 | */ |
779 | static int irq_thread(void *data) | 814 | static int irq_thread(void *data) |
780 | { | 815 | { |
816 | struct task_work on_exit_work; | ||
781 | static const struct sched_param param = { | 817 | static const struct sched_param param = { |
782 | .sched_priority = MAX_USER_RT_PRIO/2, | 818 | .sched_priority = MAX_USER_RT_PRIO/2, |
783 | }; | 819 | }; |
@@ -793,7 +829,9 @@ static int irq_thread(void *data) | |||
793 | handler_fn = irq_thread_fn; | 829 | handler_fn = irq_thread_fn; |
794 | 830 | ||
795 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 831 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
796 | current->irq_thread = 1; | 832 | |
833 | init_task_work(&on_exit_work, irq_thread_dtor, NULL); | ||
834 | task_work_add(current, &on_exit_work, false); | ||
797 | 835 | ||
798 | while (!irq_wait_for_interrupt(action)) { | 836 | while (!irq_wait_for_interrupt(action)) { |
799 | irqreturn_t action_ret; | 837 | irqreturn_t action_ret; |
@@ -815,44 +853,11 @@ static int irq_thread(void *data) | |||
815 | * cannot touch the oneshot mask at this point anymore as | 853 | * cannot touch the oneshot mask at this point anymore as |
816 | * __setup_irq() might have given out currents thread_mask | 854 | * __setup_irq() might have given out currents thread_mask |
817 | * again. | 855 | * again. |
818 | * | ||
819 | * Clear irq_thread. Otherwise exit_irq_thread() would make | ||
820 | * fuzz about an active irq thread going into nirvana. | ||
821 | */ | 856 | */ |
822 | current->irq_thread = 0; | 857 | task_work_cancel(current, irq_thread_dtor); |
823 | return 0; | 858 | return 0; |
824 | } | 859 | } |
825 | 860 | ||
826 | /* | ||
827 | * Called from do_exit() | ||
828 | */ | ||
829 | void exit_irq_thread(void) | ||
830 | { | ||
831 | struct task_struct *tsk = current; | ||
832 | struct irq_desc *desc; | ||
833 | struct irqaction *action; | ||
834 | |||
835 | if (!tsk->irq_thread) | ||
836 | return; | ||
837 | |||
838 | action = kthread_data(tsk); | ||
839 | |||
840 | pr_err("genirq: exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | ||
841 | tsk->comm ? tsk->comm : "", tsk->pid, action->irq); | ||
842 | |||
843 | desc = irq_to_desc(action->irq); | ||
844 | |||
845 | /* | ||
846 | * If IRQTF_RUNTHREAD is set, we need to decrement | ||
847 | * desc->threads_active and wake possible waiters. | ||
848 | */ | ||
849 | if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
850 | wake_threads_waitq(desc); | ||
851 | |||
852 | /* Prevent a stale desc->threads_oneshot */ | ||
853 | irq_finalize_oneshot(desc, action); | ||
854 | } | ||
855 | |||
856 | static void irq_setup_forced_threading(struct irqaction *new) | 861 | static void irq_setup_forced_threading(struct irqaction *new) |
857 | { | 862 | { |
858 | if (!force_irqthreads) | 863 | if (!force_irqthreads) |
@@ -1044,7 +1049,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1044 | * has. The type flags are unreliable as the | 1049 | * has. The type flags are unreliable as the |
1045 | * underlying chip implementation can override them. | 1050 | * underlying chip implementation can override them. |
1046 | */ | 1051 | */ |
1047 | pr_err("genirq: Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", | 1052 | pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", |
1048 | irq); | 1053 | irq); |
1049 | ret = -EINVAL; | 1054 | ret = -EINVAL; |
1050 | goto out_mask; | 1055 | goto out_mask; |
@@ -1095,7 +1100,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1095 | 1100 | ||
1096 | if (nmsk != omsk) | 1101 | if (nmsk != omsk) |
1097 | /* hope the handler works with current trigger mode */ | 1102 | /* hope the handler works with current trigger mode */ |
1098 | pr_warning("genirq: irq %d uses trigger mode %u; requested %u\n", | 1103 | pr_warning("irq %d uses trigger mode %u; requested %u\n", |
1099 | irq, nmsk, omsk); | 1104 | irq, nmsk, omsk); |
1100 | } | 1105 | } |
1101 | 1106 | ||
@@ -1133,7 +1138,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1133 | 1138 | ||
1134 | mismatch: | 1139 | mismatch: |
1135 | if (!(new->flags & IRQF_PROBE_SHARED)) { | 1140 | if (!(new->flags & IRQF_PROBE_SHARED)) { |
1136 | pr_err("genirq: Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", | 1141 | pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", |
1137 | irq, new->flags, new->name, old->flags, old->name); | 1142 | irq, new->flags, new->name, old->flags, old->name); |
1138 | #ifdef CONFIG_DEBUG_SHIRQ | 1143 | #ifdef CONFIG_DEBUG_SHIRQ |
1139 | dump_stack(); | 1144 | dump_stack(); |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index c3c89751b327..ca3f4aaff707 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -42,17 +42,8 @@ void irq_move_masked_irq(struct irq_data *idata) | |||
42 | * For correct operation this depends on the caller | 42 | * For correct operation this depends on the caller |
43 | * masking the irqs. | 43 | * masking the irqs. |
44 | */ | 44 | */ |
45 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) | 45 | if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) |
46 | < nr_cpu_ids)) { | 46 | irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false); |
47 | int ret = chip->irq_set_affinity(&desc->irq_data, | ||
48 | desc->pending_mask, false); | ||
49 | switch (ret) { | ||
50 | case IRQ_SET_MASK_OK: | ||
51 | cpumask_copy(desc->irq_data.affinity, desc->pending_mask); | ||
52 | case IRQ_SET_MASK_OK_NOCOPY: | ||
53 | irq_set_thread_affinity(desc); | ||
54 | } | ||
55 | } | ||
56 | 47 | ||
57 | cpumask_clear(desc->pending_mask); | 48 | cpumask_clear(desc->pending_mask); |
58 | } | 49 | } |