aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/devres.c16
-rw-r--r--kernel/irq/handle.c58
-rw-r--r--kernel/irq/manage.c194
-rw-r--r--kernel/irq/numa_migrate.c1
4 files changed, 244 insertions, 25 deletions
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index 38a25b8d8bff..d06df9c41cba 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -26,10 +26,12 @@ static int devm_irq_match(struct device *dev, void *res, void *data)
26} 26}
27 27
28/** 28/**
29 * devm_request_irq - allocate an interrupt line for a managed device 29 * devm_request_threaded_irq - allocate an interrupt line for a managed device
30 * @dev: device to request interrupt for 30 * @dev: device to request interrupt for
31 * @irq: Interrupt line to allocate 31 * @irq: Interrupt line to allocate
32 * @handler: Function to be called when the IRQ occurs 32 * @handler: Function to be called when the IRQ occurs
33 * @thread_fn: function to be called in a threaded interrupt context. NULL
34 * for devices which handle everything in @handler
33 * @irqflags: Interrupt type flags 35 * @irqflags: Interrupt type flags
34 * @devname: An ascii name for the claiming device 36 * @devname: An ascii name for the claiming device
35 * @dev_id: A cookie passed back to the handler function 37 * @dev_id: A cookie passed back to the handler function
@@ -42,9 +44,10 @@ static int devm_irq_match(struct device *dev, void *res, void *data)
42 * If an IRQ allocated with this function needs to be freed 44 * If an IRQ allocated with this function needs to be freed
43 * separately, dev_free_irq() must be used. 45 * separately, dev_free_irq() must be used.
44 */ 46 */
45int devm_request_irq(struct device *dev, unsigned int irq, 47int devm_request_threaded_irq(struct device *dev, unsigned int irq,
46 irq_handler_t handler, unsigned long irqflags, 48 irq_handler_t handler, irq_handler_t thread_fn,
47 const char *devname, void *dev_id) 49 unsigned long irqflags, const char *devname,
50 void *dev_id)
48{ 51{
49 struct irq_devres *dr; 52 struct irq_devres *dr;
50 int rc; 53 int rc;
@@ -54,7 +57,8 @@ int devm_request_irq(struct device *dev, unsigned int irq,
54 if (!dr) 57 if (!dr)
55 return -ENOMEM; 58 return -ENOMEM;
56 59
57 rc = request_irq(irq, handler, irqflags, devname, dev_id); 60 rc = request_threaded_irq(irq, handler, thread_fn, irqflags, devname,
61 dev_id);
58 if (rc) { 62 if (rc) {
59 devres_free(dr); 63 devres_free(dr);
60 return rc; 64 return rc;
@@ -66,7 +70,7 @@ int devm_request_irq(struct device *dev, unsigned int irq,
66 70
67 return 0; 71 return 0;
68} 72}
69EXPORT_SYMBOL(devm_request_irq); 73EXPORT_SYMBOL(devm_request_threaded_irq);
70 74
71/** 75/**
72 * devm_free_irq - free an interrupt 76 * devm_free_irq - free an interrupt
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 9ebf77968871..26e08754744f 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -17,6 +17,7 @@
17#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <linux/rculist.h> 18#include <linux/rculist.h>
19#include <linux/hash.h> 19#include <linux/hash.h>
20#include <trace/irq.h>
20#include <linux/bootmem.h> 21#include <linux/bootmem.h>
21 22
22#include "internals.h" 23#include "internals.h"
@@ -338,6 +339,18 @@ irqreturn_t no_action(int cpl, void *dev_id)
338 return IRQ_NONE; 339 return IRQ_NONE;
339} 340}
340 341
342static void warn_no_thread(unsigned int irq, struct irqaction *action)
343{
344 if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
345 return;
346
347 printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
348 "but no thread function available.", irq, action->name);
349}
350
351DEFINE_TRACE(irq_handler_entry);
352DEFINE_TRACE(irq_handler_exit);
353
341/** 354/**
342 * handle_IRQ_event - irq action chain handler 355 * handle_IRQ_event - irq action chain handler
343 * @irq: the interrupt number 356 * @irq: the interrupt number
@@ -350,15 +363,54 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
350 irqreturn_t ret, retval = IRQ_NONE; 363 irqreturn_t ret, retval = IRQ_NONE;
351 unsigned int status = 0; 364 unsigned int status = 0;
352 365
353 WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
354
355 if (!(action->flags & IRQF_DISABLED)) 366 if (!(action->flags & IRQF_DISABLED))
356 local_irq_enable_in_hardirq(); 367 local_irq_enable_in_hardirq();
357 368
358 do { 369 do {
370 trace_irq_handler_entry(irq, action);
359 ret = action->handler(irq, action->dev_id); 371 ret = action->handler(irq, action->dev_id);
360 if (ret == IRQ_HANDLED) 372 trace_irq_handler_exit(irq, action, ret);
373
374 switch (ret) {
375 case IRQ_WAKE_THREAD:
376 /*
377 * Set result to handled so the spurious check
378 * does not trigger.
379 */
380 ret = IRQ_HANDLED;
381
382 /*
383 * Catch drivers which return WAKE_THREAD but
384 * did not set up a thread function
385 */
386 if (unlikely(!action->thread_fn)) {
387 warn_no_thread(irq, action);
388 break;
389 }
390
391 /*
392 * Wake up the handler thread for this
393 * action. In case the thread crashed and was
394 * killed we just pretend that we handled the
395 * interrupt. The hardirq handler above has
396 * disabled the device interrupt, so no irq
397 * storm is lurking.
398 */
399 if (likely(!test_bit(IRQTF_DIED,
400 &action->thread_flags))) {
401 set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
402 wake_up_process(action->thread);
403 }
404
405 /* Fall through to add to randomness */
406 case IRQ_HANDLED:
361 status |= action->flags; 407 status |= action->flags;
408 break;
409
410 default:
411 break;
412 }
413
362 retval |= ret; 414 retval |= ret;
363 action = action->next; 415 action = action->next;
364 } while (action); 416 } while (action);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 1516ab77355c..2734eca59243 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -8,16 +8,15 @@
8 */ 8 */
9 9
10#include <linux/irq.h> 10#include <linux/irq.h>
11#include <linux/kthread.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/random.h> 13#include <linux/random.h>
13#include <linux/interrupt.h> 14#include <linux/interrupt.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/sched.h>
15 17
16#include "internals.h" 18#include "internals.h"
17 19
18#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
19cpumask_var_t irq_default_affinity;
20
21/** 20/**
22 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 21 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
23 * @irq: interrupt number to wait for 22 * @irq: interrupt number to wait for
@@ -53,9 +52,18 @@ void synchronize_irq(unsigned int irq)
53 52
54 /* Oops, that failed? */ 53 /* Oops, that failed? */
55 } while (status & IRQ_INPROGRESS); 54 } while (status & IRQ_INPROGRESS);
55
56 /*
57 * We made sure that no hardirq handler is running. Now verify
58 * that no threaded handlers are active.
59 */
60 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
56} 61}
57EXPORT_SYMBOL(synchronize_irq); 62EXPORT_SYMBOL(synchronize_irq);
58 63
64#ifdef CONFIG_SMP
65cpumask_var_t irq_default_affinity;
66
59/** 67/**
60 * irq_can_set_affinity - Check if the affinity of a given irq can be set 68 * irq_can_set_affinity - Check if the affinity of a given irq can be set
61 * @irq: Interrupt to check 69 * @irq: Interrupt to check
@@ -72,6 +80,18 @@ int irq_can_set_affinity(unsigned int irq)
72 return 1; 80 return 1;
73} 81}
74 82
83static void
84irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
85{
86 struct irqaction *action = desc->action;
87
88 while (action) {
89 if (action->thread)
90 set_cpus_allowed_ptr(action->thread, cpumask);
91 action = action->next;
92 }
93}
94
75/** 95/**
76 * irq_set_affinity - Set the irq affinity of a given irq 96 * irq_set_affinity - Set the irq affinity of a given irq
77 * @irq: Interrupt to set affinity 97 * @irq: Interrupt to set affinity
@@ -89,10 +109,9 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
89 spin_lock_irqsave(&desc->lock, flags); 109 spin_lock_irqsave(&desc->lock, flags);
90 110
91#ifdef CONFIG_GENERIC_PENDING_IRQ 111#ifdef CONFIG_GENERIC_PENDING_IRQ
92 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 112 if (desc->status & IRQ_MOVE_PCNTXT)
93 cpumask_copy(desc->affinity, cpumask);
94 desc->chip->set_affinity(irq, cpumask); 113 desc->chip->set_affinity(irq, cpumask);
95 } else { 114 else {
96 desc->status |= IRQ_MOVE_PENDING; 115 desc->status |= IRQ_MOVE_PENDING;
97 cpumask_copy(desc->pending_mask, cpumask); 116 cpumask_copy(desc->pending_mask, cpumask);
98 } 117 }
@@ -100,6 +119,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
100 cpumask_copy(desc->affinity, cpumask); 119 cpumask_copy(desc->affinity, cpumask);
101 desc->chip->set_affinity(irq, cpumask); 120 desc->chip->set_affinity(irq, cpumask);
102#endif 121#endif
122 irq_set_thread_affinity(desc, cpumask);
103 desc->status |= IRQ_AFFINITY_SET; 123 desc->status |= IRQ_AFFINITY_SET;
104 spin_unlock_irqrestore(&desc->lock, flags); 124 spin_unlock_irqrestore(&desc->lock, flags);
105 return 0; 125 return 0;
@@ -150,6 +170,8 @@ int irq_select_affinity_usr(unsigned int irq)
150 170
151 spin_lock_irqsave(&desc->lock, flags); 171 spin_lock_irqsave(&desc->lock, flags);
152 ret = setup_affinity(irq, desc); 172 ret = setup_affinity(irq, desc);
173 if (!ret)
174 irq_set_thread_affinity(desc, desc->affinity);
153 spin_unlock_irqrestore(&desc->lock, flags); 175 spin_unlock_irqrestore(&desc->lock, flags);
154 176
155 return ret; 177 return ret;
@@ -401,6 +423,90 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
401 return ret; 423 return ret;
402} 424}
403 425
426static int irq_wait_for_interrupt(struct irqaction *action)
427{
428 while (!kthread_should_stop()) {
429 set_current_state(TASK_INTERRUPTIBLE);
430
431 if (test_and_clear_bit(IRQTF_RUNTHREAD,
432 &action->thread_flags)) {
433 __set_current_state(TASK_RUNNING);
434 return 0;
435 }
436 schedule();
437 }
438 return -1;
439}
440
441/*
442 * Interrupt handler thread
443 */
444static int irq_thread(void *data)
445{
446 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, };
447 struct irqaction *action = data;
448 struct irq_desc *desc = irq_to_desc(action->irq);
449 int wake;
450
451 sched_setscheduler(current, SCHED_FIFO, &param);
452 current->irqaction = action;
453
454 while (!irq_wait_for_interrupt(action)) {
455
456 atomic_inc(&desc->threads_active);
457
458 spin_lock_irq(&desc->lock);
459 if (unlikely(desc->status & IRQ_DISABLED)) {
460 /*
461 * CHECKME: We might need a dedicated
462 * IRQ_THREAD_PENDING flag here, which
463 * retriggers the thread in check_irq_resend()
464 * but AFAICT IRQ_PENDING should be fine as it
465 * retriggers the interrupt itself --- tglx
466 */
467 desc->status |= IRQ_PENDING;
468 spin_unlock_irq(&desc->lock);
469 } else {
470 spin_unlock_irq(&desc->lock);
471
472 action->thread_fn(action->irq, action->dev_id);
473 }
474
475 wake = atomic_dec_and_test(&desc->threads_active);
476
477 if (wake && waitqueue_active(&desc->wait_for_threads))
478 wake_up(&desc->wait_for_threads);
479 }
480
481 /*
482 * Clear irqaction. Otherwise exit_irq_thread() would make
483 * fuzz about an active irq thread going into nirvana.
484 */
485 current->irqaction = NULL;
486 return 0;
487}
488
489/*
490 * Called from do_exit()
491 */
492void exit_irq_thread(void)
493{
494 struct task_struct *tsk = current;
495
496 if (!tsk->irqaction)
497 return;
498
499 printk(KERN_ERR
500 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
501 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
502
503 /*
504 * Set the THREAD DIED flag to prevent further wakeups of the
505 * soon to be gone threaded handler.
506 */
507 set_bit(IRQTF_DIED, &tsk->irqaction->flags);
508}
509
404/* 510/*
405 * Internal function to register an irqaction - typically used to 511 * Internal function to register an irqaction - typically used to
406 * allocate special interrupts that are part of the architecture. 512 * allocate special interrupts that are part of the architecture.
@@ -437,6 +543,26 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
437 } 543 }
438 544
439 /* 545 /*
546 * Threaded handler ?
547 */
548 if (new->thread_fn) {
549 struct task_struct *t;
550
551 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
552 new->name);
553 if (IS_ERR(t))
554 return PTR_ERR(t);
555 /*
556 * We keep the reference to the task struct even if
557 * the thread dies to avoid that the interrupt code
558 * references an already freed task_struct.
559 */
560 get_task_struct(t);
561 new->thread = t;
562 wake_up_process(t);
563 }
564
565 /*
440 * The following block of code has to be executed atomically 566 * The following block of code has to be executed atomically
441 */ 567 */
442 spin_lock_irqsave(&desc->lock, flags); 568 spin_lock_irqsave(&desc->lock, flags);
@@ -473,15 +599,15 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
473 if (!shared) { 599 if (!shared) {
474 irq_chip_set_defaults(desc->chip); 600 irq_chip_set_defaults(desc->chip);
475 601
602 init_waitqueue_head(&desc->wait_for_threads);
603
476 /* Setup the type (level, edge polarity) if configured: */ 604 /* Setup the type (level, edge polarity) if configured: */
477 if (new->flags & IRQF_TRIGGER_MASK) { 605 if (new->flags & IRQF_TRIGGER_MASK) {
478 ret = __irq_set_trigger(desc, irq, 606 ret = __irq_set_trigger(desc, irq,
479 new->flags & IRQF_TRIGGER_MASK); 607 new->flags & IRQF_TRIGGER_MASK);
480 608
481 if (ret) { 609 if (ret)
482 spin_unlock_irqrestore(&desc->lock, flags); 610 goto out_thread;
483 return ret;
484 }
485 } else 611 } else
486 compat_irq_chip_set_default_handler(desc); 612 compat_irq_chip_set_default_handler(desc);
487#if defined(CONFIG_IRQ_PER_CPU) 613#if defined(CONFIG_IRQ_PER_CPU)
@@ -549,8 +675,19 @@ mismatch:
549 dump_stack(); 675 dump_stack();
550 } 676 }
551#endif 677#endif
678 ret = -EBUSY;
679
680out_thread:
552 spin_unlock_irqrestore(&desc->lock, flags); 681 spin_unlock_irqrestore(&desc->lock, flags);
553 return -EBUSY; 682 if (new->thread) {
683 struct task_struct *t = new->thread;
684
685 new->thread = NULL;
686 if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
687 kthread_stop(t);
688 put_task_struct(t);
689 }
690 return ret;
554} 691}
555 692
556/** 693/**
@@ -576,6 +713,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
576{ 713{
577 struct irq_desc *desc = irq_to_desc(irq); 714 struct irq_desc *desc = irq_to_desc(irq);
578 struct irqaction *action, **action_ptr; 715 struct irqaction *action, **action_ptr;
716 struct task_struct *irqthread;
579 unsigned long flags; 717 unsigned long flags;
580 718
581 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 719 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
@@ -622,6 +760,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
622 else 760 else
623 desc->chip->disable(irq); 761 desc->chip->disable(irq);
624 } 762 }
763
764 irqthread = action->thread;
765 action->thread = NULL;
766
625 spin_unlock_irqrestore(&desc->lock, flags); 767 spin_unlock_irqrestore(&desc->lock, flags);
626 768
627 unregister_handler_proc(irq, action); 769 unregister_handler_proc(irq, action);
@@ -629,6 +771,12 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
629 /* Make sure it's not being used on another CPU: */ 771 /* Make sure it's not being used on another CPU: */
630 synchronize_irq(irq); 772 synchronize_irq(irq);
631 773
774 if (irqthread) {
775 if (!test_bit(IRQTF_DIED, &action->thread_flags))
776 kthread_stop(irqthread);
777 put_task_struct(irqthread);
778 }
779
632#ifdef CONFIG_DEBUG_SHIRQ 780#ifdef CONFIG_DEBUG_SHIRQ
633 /* 781 /*
634 * It's a shared IRQ -- the driver ought to be prepared for an IRQ 782 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
@@ -681,9 +829,12 @@ void free_irq(unsigned int irq, void *dev_id)
681EXPORT_SYMBOL(free_irq); 829EXPORT_SYMBOL(free_irq);
682 830
683/** 831/**
684 * request_irq - allocate an interrupt line 832 * request_threaded_irq - allocate an interrupt line
685 * @irq: Interrupt line to allocate 833 * @irq: Interrupt line to allocate
686 * @handler: Function to be called when the IRQ occurs 834 * @handler: Function to be called when the IRQ occurs.
835 * Primary handler for threaded interrupts
836 * @thread_fn: Function called from the irq handler thread
837 * If NULL, no irq thread is created
687 * @irqflags: Interrupt type flags 838 * @irqflags: Interrupt type flags
688 * @devname: An ascii name for the claiming device 839 * @devname: An ascii name for the claiming device
689 * @dev_id: A cookie passed back to the handler function 840 * @dev_id: A cookie passed back to the handler function
@@ -695,6 +846,15 @@ EXPORT_SYMBOL(free_irq);
695 * raises, you must take care both to initialise your hardware 846 * raises, you must take care both to initialise your hardware
696 * and to set up the interrupt handler in the right order. 847 * and to set up the interrupt handler in the right order.
697 * 848 *
849 * If you want to set up a threaded irq handler for your device
850 * then you need to supply @handler and @thread_fn. @handler ist
851 * still called in hard interrupt context and has to check
852 * whether the interrupt originates from the device. If yes it
853 * needs to disable the interrupt on the device and return
854 * IRQ_THREAD_WAKE which will wake up the handler thread and run
855 * @thread_fn. This split handler design is necessary to support
856 * shared interrupts.
857 *
698 * Dev_id must be globally unique. Normally the address of the 858 * Dev_id must be globally unique. Normally the address of the
699 * device data structure is used as the cookie. Since the handler 859 * device data structure is used as the cookie. Since the handler
700 * receives this value it makes sense to use it. 860 * receives this value it makes sense to use it.
@@ -710,8 +870,9 @@ EXPORT_SYMBOL(free_irq);
710 * IRQF_TRIGGER_* Specify active edge(s) or level 870 * IRQF_TRIGGER_* Specify active edge(s) or level
711 * 871 *
712 */ 872 */
713int request_irq(unsigned int irq, irq_handler_t handler, 873int request_threaded_irq(unsigned int irq, irq_handler_t handler,
714 unsigned long irqflags, const char *devname, void *dev_id) 874 irq_handler_t thread_fn, unsigned long irqflags,
875 const char *devname, void *dev_id)
715{ 876{
716 struct irqaction *action; 877 struct irqaction *action;
717 struct irq_desc *desc; 878 struct irq_desc *desc;
@@ -759,6 +920,7 @@ int request_irq(unsigned int irq, irq_handler_t handler,
759 return -ENOMEM; 920 return -ENOMEM;
760 921
761 action->handler = handler; 922 action->handler = handler;
923 action->thread_fn = thread_fn;
762 action->flags = irqflags; 924 action->flags = irqflags;
763 action->name = devname; 925 action->name = devname;
764 action->dev_id = dev_id; 926 action->dev_id = dev_id;
@@ -788,4 +950,4 @@ int request_irq(unsigned int irq, irq_handler_t handler,
788#endif 950#endif
789 return retval; 951 return retval;
790} 952}
791EXPORT_SYMBOL(request_irq); 953EXPORT_SYMBOL(request_threaded_irq);
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 243d6121e50e..44bbdcbaf8d2 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -54,6 +54,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
54static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) 54static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
55{ 55{
56 free_kstat_irqs(old_desc, desc); 56 free_kstat_irqs(old_desc, desc);
57 free_desc_masks(old_desc, desc);
57 arch_free_chip_data(old_desc, desc); 58 arch_free_chip_data(old_desc, desc);
58} 59}
59 60