aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/chip.c5
-rw-r--r--kernel/irq/irqdesc.c1
-rw-r--r--kernel/irq/irqdomain.c106
-rw-r--r--kernel/irq/manage.c118
-rw-r--r--kernel/irq/pm.c7
-rw-r--r--kernel/irq/resend.c7
6 files changed, 168 insertions, 76 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 6080f6bc8c33..fc275e4f629b 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -379,8 +379,10 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
379 * If its disabled or no action available 379 * If its disabled or no action available
380 * keep it masked and get out of here 380 * keep it masked and get out of here
381 */ 381 */
382 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) 382 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
383 desc->istate |= IRQS_PENDING;
383 goto out_unlock; 384 goto out_unlock;
385 }
384 386
385 handle_irq_event(desc); 387 handle_irq_event(desc);
386 388
@@ -518,6 +520,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
518out_unlock: 520out_unlock:
519 raw_spin_unlock(&desc->lock); 521 raw_spin_unlock(&desc->lock);
520} 522}
523EXPORT_SYMBOL(handle_edge_irq);
521 524
522#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 525#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
523/** 526/**
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index d86e254b95eb..192a302d6cfd 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -112,6 +112,7 @@ struct irq_desc *irq_to_desc(unsigned int irq)
112{ 112{
113 return radix_tree_lookup(&irq_desc_tree, irq); 113 return radix_tree_lookup(&irq_desc_tree, irq);
114} 114}
115EXPORT_SYMBOL(irq_to_desc);
115 116
116static void delete_irq_desc(unsigned int irq) 117static void delete_irq_desc(unsigned int irq)
117{ 118{
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 0e0ba5f840b2..41c1564103f1 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) "irq: " fmt
2
1#include <linux/debugfs.h> 3#include <linux/debugfs.h>
2#include <linux/hardirq.h> 4#include <linux/hardirq.h>
3#include <linux/interrupt.h> 5#include <linux/interrupt.h>
@@ -56,14 +58,73 @@ static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
56 return domain; 58 return domain;
57} 59}
58 60
61static void irq_domain_free(struct irq_domain *domain)
62{
63 of_node_put(domain->of_node);
64 kfree(domain);
65}
66
59static void irq_domain_add(struct irq_domain *domain) 67static void irq_domain_add(struct irq_domain *domain)
60{ 68{
61 mutex_lock(&irq_domain_mutex); 69 mutex_lock(&irq_domain_mutex);
62 list_add(&domain->link, &irq_domain_list); 70 list_add(&domain->link, &irq_domain_list);
63 mutex_unlock(&irq_domain_mutex); 71 mutex_unlock(&irq_domain_mutex);
64 pr_debug("irq: Allocated domain of type %d @0x%p\n", 72 pr_debug("Allocated domain of type %d @0x%p\n",
73 domain->revmap_type, domain);
74}
75
76/**
77 * irq_domain_remove() - Remove an irq domain.
78 * @domain: domain to remove
79 *
80 * This routine is used to remove an irq domain. The caller must ensure
81 * that all mappings within the domain have been disposed of prior to
82 * use, depending on the revmap type.
83 */
84void irq_domain_remove(struct irq_domain *domain)
85{
86 mutex_lock(&irq_domain_mutex);
87
88 switch (domain->revmap_type) {
89 case IRQ_DOMAIN_MAP_LEGACY:
90 /*
91 * Legacy domains don't manage their own irq_desc
92 * allocations, we expect the caller to handle irq_desc
93 * freeing on their own.
94 */
95 break;
96 case IRQ_DOMAIN_MAP_TREE:
97 /*
98 * radix_tree_delete() takes care of destroying the root
99 * node when all entries are removed. Shout if there are
100 * any mappings left.
101 */
102 WARN_ON(domain->revmap_data.tree.height);
103 break;
104 case IRQ_DOMAIN_MAP_LINEAR:
105 kfree(domain->revmap_data.linear.revmap);
106 domain->revmap_data.linear.size = 0;
107 break;
108 case IRQ_DOMAIN_MAP_NOMAP:
109 break;
110 }
111
112 list_del(&domain->link);
113
114 /*
115 * If the going away domain is the default one, reset it.
116 */
117 if (unlikely(irq_default_domain == domain))
118 irq_set_default_host(NULL);
119
120 mutex_unlock(&irq_domain_mutex);
121
122 pr_debug("Removed domain of type %d @0x%p\n",
65 domain->revmap_type, domain); 123 domain->revmap_type, domain);
124
125 irq_domain_free(domain);
66} 126}
127EXPORT_SYMBOL_GPL(irq_domain_remove);
67 128
68static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain, 129static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
69 irq_hw_number_t hwirq) 130 irq_hw_number_t hwirq)
@@ -117,8 +178,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
117 178
118 if (WARN_ON(!irq_data || irq_data->domain)) { 179 if (WARN_ON(!irq_data || irq_data->domain)) {
119 mutex_unlock(&irq_domain_mutex); 180 mutex_unlock(&irq_domain_mutex);
120 of_node_put(domain->of_node); 181 irq_domain_free(domain);
121 kfree(domain);
122 return NULL; 182 return NULL;
123 } 183 }
124 } 184 }
@@ -152,10 +212,12 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
152 irq_domain_add(domain); 212 irq_domain_add(domain);
153 return domain; 213 return domain;
154} 214}
215EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
155 216
156/** 217/**
157 * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain. 218 * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain.
158 * @of_node: pointer to interrupt controller's device tree node. 219 * @of_node: pointer to interrupt controller's device tree node.
220 * @size: Number of interrupts in the domain.
159 * @ops: map/unmap domain callbacks 221 * @ops: map/unmap domain callbacks
160 * @host_data: Controller private data pointer 222 * @host_data: Controller private data pointer
161 */ 223 */
@@ -181,6 +243,7 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
181 irq_domain_add(domain); 243 irq_domain_add(domain);
182 return domain; 244 return domain;
183} 245}
246EXPORT_SYMBOL_GPL(irq_domain_add_linear);
184 247
185struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, 248struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
186 unsigned int max_irq, 249 unsigned int max_irq,
@@ -195,6 +258,7 @@ struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
195 } 258 }
196 return domain; 259 return domain;
197} 260}
261EXPORT_SYMBOL_GPL(irq_domain_add_nomap);
198 262
199/** 263/**
200 * irq_domain_add_tree() 264 * irq_domain_add_tree()
@@ -216,6 +280,7 @@ struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
216 } 280 }
217 return domain; 281 return domain;
218} 282}
283EXPORT_SYMBOL_GPL(irq_domain_add_tree);
219 284
220/** 285/**
221 * irq_find_host() - Locates a domain for a given device node 286 * irq_find_host() - Locates a domain for a given device node
@@ -259,10 +324,11 @@ EXPORT_SYMBOL_GPL(irq_find_host);
259 */ 324 */
260void irq_set_default_host(struct irq_domain *domain) 325void irq_set_default_host(struct irq_domain *domain)
261{ 326{
262 pr_debug("irq: Default domain set to @0x%p\n", domain); 327 pr_debug("Default domain set to @0x%p\n", domain);
263 328
264 irq_default_domain = domain; 329 irq_default_domain = domain;
265} 330}
331EXPORT_SYMBOL_GPL(irq_set_default_host);
266 332
267static int irq_setup_virq(struct irq_domain *domain, unsigned int virq, 333static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
268 irq_hw_number_t hwirq) 334 irq_hw_number_t hwirq)
@@ -272,7 +338,7 @@ static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
272 irq_data->hwirq = hwirq; 338 irq_data->hwirq = hwirq;
273 irq_data->domain = domain; 339 irq_data->domain = domain;
274 if (domain->ops->map(domain, virq, hwirq)) { 340 if (domain->ops->map(domain, virq, hwirq)) {
275 pr_debug("irq: -> mapping failed, freeing\n"); 341 pr_debug("irq-%i==>hwirq-0x%lx mapping failed\n", virq, hwirq);
276 irq_data->domain = NULL; 342 irq_data->domain = NULL;
277 irq_data->hwirq = 0; 343 irq_data->hwirq = 0;
278 return -1; 344 return -1;
@@ -303,7 +369,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
303 369
304 virq = irq_alloc_desc_from(1, 0); 370 virq = irq_alloc_desc_from(1, 0);
305 if (!virq) { 371 if (!virq) {
306 pr_debug("irq: create_direct virq allocation failed\n"); 372 pr_debug("create_direct virq allocation failed\n");
307 return 0; 373 return 0;
308 } 374 }
309 if (virq >= domain->revmap_data.nomap.max_irq) { 375 if (virq >= domain->revmap_data.nomap.max_irq) {
@@ -312,7 +378,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
312 irq_free_desc(virq); 378 irq_free_desc(virq);
313 return 0; 379 return 0;
314 } 380 }
315 pr_debug("irq: create_direct obtained virq %d\n", virq); 381 pr_debug("create_direct obtained virq %d\n", virq);
316 382
317 if (irq_setup_virq(domain, virq, virq)) { 383 if (irq_setup_virq(domain, virq, virq)) {
318 irq_free_desc(virq); 384 irq_free_desc(virq);
@@ -321,6 +387,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
321 387
322 return virq; 388 return virq;
323} 389}
390EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
324 391
325/** 392/**
326 * irq_create_mapping() - Map a hardware interrupt into linux irq space 393 * irq_create_mapping() - Map a hardware interrupt into linux irq space
@@ -338,23 +405,23 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
338 unsigned int hint; 405 unsigned int hint;
339 int virq; 406 int virq;
340 407
341 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); 408 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
342 409
343 /* Look for default domain if nececssary */ 410 /* Look for default domain if nececssary */
344 if (domain == NULL) 411 if (domain == NULL)
345 domain = irq_default_domain; 412 domain = irq_default_domain;
346 if (domain == NULL) { 413 if (domain == NULL) {
347 printk(KERN_WARNING "irq_create_mapping called for" 414 pr_warning("irq_create_mapping called for"
348 " NULL domain, hwirq=%lx\n", hwirq); 415 " NULL domain, hwirq=%lx\n", hwirq);
349 WARN_ON(1); 416 WARN_ON(1);
350 return 0; 417 return 0;
351 } 418 }
352 pr_debug("irq: -> using domain @%p\n", domain); 419 pr_debug("-> using domain @%p\n", domain);
353 420
354 /* Check if mapping already exists */ 421 /* Check if mapping already exists */
355 virq = irq_find_mapping(domain, hwirq); 422 virq = irq_find_mapping(domain, hwirq);
356 if (virq) { 423 if (virq) {
357 pr_debug("irq: -> existing mapping on virq %d\n", virq); 424 pr_debug("-> existing mapping on virq %d\n", virq);
358 return virq; 425 return virq;
359 } 426 }
360 427
@@ -370,7 +437,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
370 if (virq <= 0) 437 if (virq <= 0)
371 virq = irq_alloc_desc_from(1, 0); 438 virq = irq_alloc_desc_from(1, 0);
372 if (virq <= 0) { 439 if (virq <= 0) {
373 pr_debug("irq: -> virq allocation failed\n"); 440 pr_debug("-> virq allocation failed\n");
374 return 0; 441 return 0;
375 } 442 }
376 443
@@ -380,7 +447,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
380 return 0; 447 return 0;
381 } 448 }
382 449
383 pr_debug("irq: irq %lu on domain %s mapped to virtual irq %u\n", 450 pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
384 hwirq, domain->of_node ? domain->of_node->full_name : "null", virq); 451 hwirq, domain->of_node ? domain->of_node->full_name : "null", virq);
385 452
386 return virq; 453 return virq;
@@ -409,8 +476,8 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
409 if (intsize > 0) 476 if (intsize > 0)
410 return intspec[0]; 477 return intspec[0];
411#endif 478#endif
412 printk(KERN_WARNING "irq: no irq domain found for %s !\n", 479 pr_warning("no irq domain found for %s !\n",
413 controller->full_name); 480 controller->full_name);
414 return 0; 481 return 0;
415 } 482 }
416 483
@@ -560,6 +627,7 @@ unsigned int irq_radix_revmap_lookup(struct irq_domain *domain,
560 */ 627 */
561 return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq); 628 return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq);
562} 629}
630EXPORT_SYMBOL_GPL(irq_radix_revmap_lookup);
563 631
564/** 632/**
565 * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping. 633 * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping.
@@ -584,6 +652,7 @@ void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq,
584 mutex_unlock(&revmap_trees_mutex); 652 mutex_unlock(&revmap_trees_mutex);
585 } 653 }
586} 654}
655EXPORT_SYMBOL_GPL(irq_radix_revmap_insert);
587 656
588/** 657/**
589 * irq_linear_revmap() - Find a linux irq from a hw irq number. 658 * irq_linear_revmap() - Find a linux irq from a hw irq number.
@@ -617,6 +686,7 @@ unsigned int irq_linear_revmap(struct irq_domain *domain,
617 686
618 return revmap[hwirq]; 687 return revmap[hwirq];
619} 688}
689EXPORT_SYMBOL_GPL(irq_linear_revmap);
620 690
621#ifdef CONFIG_IRQ_DOMAIN_DEBUG 691#ifdef CONFIG_IRQ_DOMAIN_DEBUG
622static int virq_debug_show(struct seq_file *m, void *private) 692static int virq_debug_show(struct seq_file *m, void *private)
@@ -691,8 +761,8 @@ static int __init irq_debugfs_init(void)
691__initcall(irq_debugfs_init); 761__initcall(irq_debugfs_init);
692#endif /* CONFIG_IRQ_DOMAIN_DEBUG */ 762#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
693 763
694int irq_domain_simple_map(struct irq_domain *d, unsigned int irq, 764static int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
695 irq_hw_number_t hwirq) 765 irq_hw_number_t hwirq)
696{ 766{
697 return 0; 767 return 0;
698} 768}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 89a3ea82569b..ea0c6c2ae6f7 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -7,6 +7,8 @@
7 * This file contains driver APIs to the irq subsystem. 7 * This file contains driver APIs to the irq subsystem.
8 */ 8 */
9 9
10#define pr_fmt(fmt) "genirq: " fmt
11
10#include <linux/irq.h> 12#include <linux/irq.h>
11#include <linux/kthread.h> 13#include <linux/kthread.h>
12#include <linux/module.h> 14#include <linux/module.h>
@@ -14,6 +16,7 @@
14#include <linux/interrupt.h> 16#include <linux/interrupt.h>
15#include <linux/slab.h> 17#include <linux/slab.h>
16#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/task_work.h>
17 20
18#include "internals.h" 21#include "internals.h"
19 22
@@ -566,7 +569,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
566 * flow-types? 569 * flow-types?
567 */ 570 */
568 pr_debug("No set_type function for IRQ %d (%s)\n", irq, 571 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
569 chip ? (chip->name ? : "unknown") : "unknown"); 572 chip ? (chip->name ? : "unknown") : "unknown");
570 return 0; 573 return 0;
571 } 574 }
572 575
@@ -600,7 +603,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
600 ret = 0; 603 ret = 0;
601 break; 604 break;
602 default: 605 default:
603 pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", 606 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
604 flags, irq, chip->irq_set_type); 607 flags, irq, chip->irq_set_type);
605 } 608 }
606 if (unmask) 609 if (unmask)
@@ -773,11 +776,39 @@ static void wake_threads_waitq(struct irq_desc *desc)
773 wake_up(&desc->wait_for_threads); 776 wake_up(&desc->wait_for_threads);
774} 777}
775 778
779static void irq_thread_dtor(struct task_work *unused)
780{
781 struct task_struct *tsk = current;
782 struct irq_desc *desc;
783 struct irqaction *action;
784
785 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
786 return;
787
788 action = kthread_data(tsk);
789
790 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
791 tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
792
793
794 desc = irq_to_desc(action->irq);
795 /*
796 * If IRQTF_RUNTHREAD is set, we need to decrement
797 * desc->threads_active and wake possible waiters.
798 */
799 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
800 wake_threads_waitq(desc);
801
802 /* Prevent a stale desc->threads_oneshot */
803 irq_finalize_oneshot(desc, action);
804}
805
776/* 806/*
777 * Interrupt handler thread 807 * Interrupt handler thread
778 */ 808 */
779static int irq_thread(void *data) 809static int irq_thread(void *data)
780{ 810{
811 struct task_work on_exit_work;
781 static const struct sched_param param = { 812 static const struct sched_param param = {
782 .sched_priority = MAX_USER_RT_PRIO/2, 813 .sched_priority = MAX_USER_RT_PRIO/2,
783 }; 814 };
@@ -793,7 +824,9 @@ static int irq_thread(void *data)
793 handler_fn = irq_thread_fn; 824 handler_fn = irq_thread_fn;
794 825
795 sched_setscheduler(current, SCHED_FIFO, &param); 826 sched_setscheduler(current, SCHED_FIFO, &param);
796 current->irq_thread = 1; 827
828 init_task_work(&on_exit_work, irq_thread_dtor, NULL);
829 task_work_add(current, &on_exit_work, false);
797 830
798 while (!irq_wait_for_interrupt(action)) { 831 while (!irq_wait_for_interrupt(action)) {
799 irqreturn_t action_ret; 832 irqreturn_t action_ret;
@@ -815,45 +848,11 @@ static int irq_thread(void *data)
815 * cannot touch the oneshot mask at this point anymore as 848 * cannot touch the oneshot mask at this point anymore as
816 * __setup_irq() might have given out currents thread_mask 849 * __setup_irq() might have given out currents thread_mask
817 * again. 850 * again.
818 *
819 * Clear irq_thread. Otherwise exit_irq_thread() would make
820 * fuzz about an active irq thread going into nirvana.
821 */ 851 */
822 current->irq_thread = 0; 852 task_work_cancel(current, irq_thread_dtor);
823 return 0; 853 return 0;
824} 854}
825 855
826/*
827 * Called from do_exit()
828 */
829void exit_irq_thread(void)
830{
831 struct task_struct *tsk = current;
832 struct irq_desc *desc;
833 struct irqaction *action;
834
835 if (!tsk->irq_thread)
836 return;
837
838 action = kthread_data(tsk);
839
840 printk(KERN_ERR
841 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
842 tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
843
844 desc = irq_to_desc(action->irq);
845
846 /*
847 * If IRQTF_RUNTHREAD is set, we need to decrement
848 * desc->threads_active and wake possible waiters.
849 */
850 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
851 wake_threads_waitq(desc);
852
853 /* Prevent a stale desc->threads_oneshot */
854 irq_finalize_oneshot(desc, action);
855}
856
857static void irq_setup_forced_threading(struct irqaction *new) 856static void irq_setup_forced_threading(struct irqaction *new)
858{ 857{
859 if (!force_irqthreads) 858 if (!force_irqthreads)
@@ -878,7 +877,6 @@ static int
878__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 877__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
879{ 878{
880 struct irqaction *old, **old_ptr; 879 struct irqaction *old, **old_ptr;
881 const char *old_name = NULL;
882 unsigned long flags, thread_mask = 0; 880 unsigned long flags, thread_mask = 0;
883 int ret, nested, shared = 0; 881 int ret, nested, shared = 0;
884 cpumask_var_t mask; 882 cpumask_var_t mask;
@@ -972,10 +970,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
972 */ 970 */
973 if (!((old->flags & new->flags) & IRQF_SHARED) || 971 if (!((old->flags & new->flags) & IRQF_SHARED) ||
974 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || 972 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
975 ((old->flags ^ new->flags) & IRQF_ONESHOT)) { 973 ((old->flags ^ new->flags) & IRQF_ONESHOT))
976 old_name = old->name;
977 goto mismatch; 974 goto mismatch;
978 }
979 975
980 /* All handlers must agree on per-cpuness */ 976 /* All handlers must agree on per-cpuness */
981 if ((old->flags & IRQF_PERCPU) != 977 if ((old->flags & IRQF_PERCPU) !=
@@ -1031,6 +1027,27 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1031 * all existing action->thread_mask bits. 1027 * all existing action->thread_mask bits.
1032 */ 1028 */
1033 new->thread_mask = 1 << ffz(thread_mask); 1029 new->thread_mask = 1 << ffz(thread_mask);
1030
1031 } else if (new->handler == irq_default_primary_handler) {
1032 /*
1033 * The interrupt was requested with handler = NULL, so
1034 * we use the default primary handler for it. But it
1035 * does not have the oneshot flag set. In combination
1036 * with level interrupts this is deadly, because the
1037 * default primary handler just wakes the thread, then
1038 * the irq lines is reenabled, but the device still
1039 * has the level irq asserted. Rinse and repeat....
1040 *
1041 * While this works for edge type interrupts, we play
1042 * it safe and reject unconditionally because we can't
1043 * say for sure which type this interrupt really
1044 * has. The type flags are unreliable as the
1045 * underlying chip implementation can override them.
1046 */
1047 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1048 irq);
1049 ret = -EINVAL;
1050 goto out_mask;
1034 } 1051 }
1035 1052
1036 if (!shared) { 1053 if (!shared) {
@@ -1078,7 +1095,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1078 1095
1079 if (nmsk != omsk) 1096 if (nmsk != omsk)
1080 /* hope the handler works with current trigger mode */ 1097 /* hope the handler works with current trigger mode */
1081 pr_warning("IRQ %d uses trigger mode %u; requested %u\n", 1098 pr_warning("irq %d uses trigger mode %u; requested %u\n",
1082 irq, nmsk, omsk); 1099 irq, nmsk, omsk);
1083 } 1100 }
1084 1101
@@ -1115,14 +1132,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1115 return 0; 1132 return 0;
1116 1133
1117mismatch: 1134mismatch:
1118#ifdef CONFIG_DEBUG_SHIRQ
1119 if (!(new->flags & IRQF_PROBE_SHARED)) { 1135 if (!(new->flags & IRQF_PROBE_SHARED)) {
1120 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); 1136 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1121 if (old_name) 1137 irq, new->flags, new->name, old->flags, old->name);
1122 printk(KERN_ERR "current handler: %s\n", old_name); 1138#ifdef CONFIG_DEBUG_SHIRQ
1123 dump_stack(); 1139 dump_stack();
1124 }
1125#endif 1140#endif
1141 }
1126 ret = -EBUSY; 1142 ret = -EBUSY;
1127 1143
1128out_mask: 1144out_mask:
@@ -1204,12 +1220,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1204 /* Found it - now remove it from the list of entries: */ 1220 /* Found it - now remove it from the list of entries: */
1205 *action_ptr = action->next; 1221 *action_ptr = action->next;
1206 1222
1207 /* Currently used only by UML, might disappear one day: */
1208#ifdef CONFIG_IRQ_RELEASE_METHOD
1209 if (desc->irq_data.chip->release)
1210 desc->irq_data.chip->release(irq, dev_id);
1211#endif
1212
1213 /* If this was the last handler, shut down the IRQ line: */ 1223 /* If this was the last handler, shut down the IRQ line: */
1214 if (!desc->action) 1224 if (!desc->action)
1215 irq_shutdown(desc); 1225 irq_shutdown(desc);
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 15e53b1766a6..cb228bf21760 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -103,8 +103,13 @@ int check_wakeup_irqs(void)
103 int irq; 103 int irq;
104 104
105 for_each_irq_desc(irq, desc) { 105 for_each_irq_desc(irq, desc) {
106 /*
107 * Only interrupts which are marked as wakeup source
108 * and have not been disabled before the suspend check
109 * can abort suspend.
110 */
106 if (irqd_is_wakeup_set(&desc->irq_data)) { 111 if (irqd_is_wakeup_set(&desc->irq_data)) {
107 if (desc->istate & IRQS_PENDING) 112 if (desc->depth == 1 && desc->istate & IRQS_PENDING)
108 return -EBUSY; 113 return -EBUSY;
109 continue; 114 continue;
110 } 115 }
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 14dd5761e8c9..6454db7b6a4d 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -58,10 +58,13 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
58 /* 58 /*
59 * We do not resend level type interrupts. Level type 59 * We do not resend level type interrupts. Level type
60 * interrupts are resent by hardware when they are still 60 * interrupts are resent by hardware when they are still
61 * active. 61 * active. Clear the pending bit so suspend/resume does not
62 * get confused.
62 */ 63 */
63 if (irq_settings_is_level(desc)) 64 if (irq_settings_is_level(desc)) {
65 desc->istate &= ~IRQS_PENDING;
64 return; 66 return;
67 }
65 if (desc->istate & IRQS_REPLAY) 68 if (desc->istate & IRQS_REPLAY)
66 return; 69 return;
67 if (desc->istate & IRQS_PENDING) { 70 if (desc->istate & IRQS_PENDING) {