diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-03-23 13:28:15 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-03-24 07:15:23 -0400 |
commit | 3aa551c9b4c40018f0e261a178e3d25478dc04a9 (patch) | |
tree | 2a696109273fcc421d774cc8fefa4180331a85ad | |
parent | 80c5520811d3805adcb15c570ea5e2d489fa5d0b (diff) |
genirq: add threaded interrupt handler support
Add support for threaded interrupt handlers:
A device driver can request that its main interrupt handler runs in a
thread. To achive this the device driver requests the interrupt with
request_threaded_irq() and provides additionally to the handler a
thread function. The handler function is called in hard interrupt
context and needs to check whether the interrupt originated from the
device. If the interrupt originated from the device then the handler
can either return IRQ_HANDLED or IRQ_WAKE_THREAD. IRQ_HANDLED is
returned when no further action is required. IRQ_WAKE_THREAD causes
the genirq code to invoke the threaded (main) handler. When
IRQ_WAKE_THREAD is returned handler must have disabled the interrupt
on the device level. This is mandatory for shared interrupt handlers,
but we need to do it as well for obscure x86 hardware where disabling
an interrupt on the IO_APIC level redirects the interrupt to the
legacy PIC interrupt lines.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/hardirq.h | 2 | ||||
-rw-r--r-- | include/linux/interrupt.h | 37 | ||||
-rw-r--r-- | include/linux/irq.h | 5 | ||||
-rw-r--r-- | include/linux/irqreturn.h | 2 | ||||
-rw-r--r-- | include/linux/sched.h | 5 | ||||
-rw-r--r-- | kernel/exit.c | 2 | ||||
-rw-r--r-- | kernel/irq/handle.c | 31 | ||||
-rw-r--r-- | kernel/irq/manage.c | 192 |
8 files changed, 259 insertions, 17 deletions
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index f83288347dda..2dfaadbdb2ac 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -105,7 +105,7 @@ | |||
105 | # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET | 105 | # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET |
106 | #endif | 106 | #endif |
107 | 107 | ||
108 | #ifdef CONFIG_SMP | 108 | #if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS) |
109 | extern void synchronize_irq(unsigned int irq); | 109 | extern void synchronize_irq(unsigned int irq); |
110 | #else | 110 | #else |
111 | # define synchronize_irq(irq) barrier() | 111 | # define synchronize_irq(irq) barrier() |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 0c9cb63e6895..6fc2b720c231 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -59,6 +59,16 @@ | |||
59 | #define IRQF_NOBALANCING 0x00000800 | 59 | #define IRQF_NOBALANCING 0x00000800 |
60 | #define IRQF_IRQPOLL 0x00001000 | 60 | #define IRQF_IRQPOLL 0x00001000 |
61 | 61 | ||
62 | /* | ||
63 | * Bits used by threaded handlers: | ||
64 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run | ||
65 | * IRQTF_DIED - handler thread died | ||
66 | */ | ||
67 | enum { | ||
68 | IRQTF_RUNTHREAD, | ||
69 | IRQTF_DIED, | ||
70 | }; | ||
71 | |||
62 | typedef irqreturn_t (*irq_handler_t)(int, void *); | 72 | typedef irqreturn_t (*irq_handler_t)(int, void *); |
63 | 73 | ||
64 | /** | 74 | /** |
@@ -71,6 +81,9 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); | |||
71 | * @next: pointer to the next irqaction for shared interrupts | 81 | * @next: pointer to the next irqaction for shared interrupts |
72 | * @irq: interrupt number | 82 | * @irq: interrupt number |
73 | * @dir: pointer to the proc/irq/NN/name entry | 83 | * @dir: pointer to the proc/irq/NN/name entry |
84 | * @thread_fn: interupt handler function for threaded interrupts | ||
85 | * @thread: thread pointer for threaded interrupts | ||
86 | * @thread_flags: flags related to @thread | ||
74 | */ | 87 | */ |
75 | struct irqaction { | 88 | struct irqaction { |
76 | irq_handler_t handler; | 89 | irq_handler_t handler; |
@@ -81,11 +94,31 @@ struct irqaction { | |||
81 | struct irqaction *next; | 94 | struct irqaction *next; |
82 | int irq; | 95 | int irq; |
83 | struct proc_dir_entry *dir; | 96 | struct proc_dir_entry *dir; |
97 | irq_handler_t thread_fn; | ||
98 | struct task_struct *thread; | ||
99 | unsigned long thread_flags; | ||
84 | }; | 100 | }; |
85 | 101 | ||
86 | extern irqreturn_t no_action(int cpl, void *dev_id); | 102 | extern irqreturn_t no_action(int cpl, void *dev_id); |
87 | extern int __must_check request_irq(unsigned int, irq_handler_t handler, | 103 | |
88 | unsigned long, const char *, void *); | 104 | extern int __must_check |
105 | request_threaded_irq(unsigned int irq, irq_handler_t handler, | ||
106 | irq_handler_t thread_fn, | ||
107 | unsigned long flags, const char *name, void *dev); | ||
108 | |||
109 | static inline int __must_check | ||
110 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, | ||
111 | const char *name, void *dev) | ||
112 | { | ||
113 | return request_threaded_irq(irq, handler, NULL, flags, name, dev); | ||
114 | } | ||
115 | |||
116 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
117 | extern void exit_irq_thread(void); | ||
118 | #else | ||
119 | static inline void exit_irq_thread(void) { } | ||
120 | #endif | ||
121 | |||
89 | extern void free_irq(unsigned int, void *); | 122 | extern void free_irq(unsigned int, void *); |
90 | 123 | ||
91 | struct device; | 124 | struct device; |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 873e4ac11b81..8b1cf0630210 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/irqreturn.h> | 20 | #include <linux/irqreturn.h> |
21 | #include <linux/irqnr.h> | 21 | #include <linux/irqnr.h> |
22 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
23 | #include <linux/wait.h> | ||
23 | 24 | ||
24 | #include <asm/irq.h> | 25 | #include <asm/irq.h> |
25 | #include <asm/ptrace.h> | 26 | #include <asm/ptrace.h> |
@@ -155,6 +156,8 @@ struct irq_2_iommu; | |||
155 | * @affinity: IRQ affinity on SMP | 156 | * @affinity: IRQ affinity on SMP |
156 | * @cpu: cpu index useful for balancing | 157 | * @cpu: cpu index useful for balancing |
157 | * @pending_mask: pending rebalanced interrupts | 158 | * @pending_mask: pending rebalanced interrupts |
159 | * @threads_active: number of irqaction threads currently running | ||
160 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | ||
158 | * @dir: /proc/irq/ procfs entry | 161 | * @dir: /proc/irq/ procfs entry |
159 | * @name: flow handler name for /proc/interrupts output | 162 | * @name: flow handler name for /proc/interrupts output |
160 | */ | 163 | */ |
@@ -186,6 +189,8 @@ struct irq_desc { | |||
186 | cpumask_var_t pending_mask; | 189 | cpumask_var_t pending_mask; |
187 | #endif | 190 | #endif |
188 | #endif | 191 | #endif |
192 | atomic_t threads_active; | ||
193 | wait_queue_head_t wait_for_threads; | ||
189 | #ifdef CONFIG_PROC_FS | 194 | #ifdef CONFIG_PROC_FS |
190 | struct proc_dir_entry *dir; | 195 | struct proc_dir_entry *dir; |
191 | #endif | 196 | #endif |
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h index c5584ca5b8c9..819acaaac3f5 100644 --- a/include/linux/irqreturn.h +++ b/include/linux/irqreturn.h | |||
@@ -5,10 +5,12 @@ | |||
5 | * enum irqreturn | 5 | * enum irqreturn |
6 | * @IRQ_NONE interrupt was not from this device | 6 | * @IRQ_NONE interrupt was not from this device |
7 | * @IRQ_HANDLED interrupt was handled by this device | 7 | * @IRQ_HANDLED interrupt was handled by this device |
8 | * @IRQ_WAKE_THREAD handler requests to wake the handler thread | ||
8 | */ | 9 | */ |
9 | enum irqreturn { | 10 | enum irqreturn { |
10 | IRQ_NONE, | 11 | IRQ_NONE, |
11 | IRQ_HANDLED, | 12 | IRQ_HANDLED, |
13 | IRQ_WAKE_THREAD, | ||
12 | }; | 14 | }; |
13 | 15 | ||
14 | typedef enum irqreturn irqreturn_t; | 16 | typedef enum irqreturn irqreturn_t; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 46d680643f89..38b77b0f56e5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1292,6 +1292,11 @@ struct task_struct { | |||
1292 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ | 1292 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ |
1293 | spinlock_t alloc_lock; | 1293 | spinlock_t alloc_lock; |
1294 | 1294 | ||
1295 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
1296 | /* IRQ handler threads */ | ||
1297 | struct irqaction *irqaction; | ||
1298 | #endif | ||
1299 | |||
1295 | /* Protection of the PI data structures: */ | 1300 | /* Protection of the PI data structures: */ |
1296 | spinlock_t pi_lock; | 1301 | spinlock_t pi_lock; |
1297 | 1302 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 167e1e3ad7c6..ca0b3488c4a9 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -1037,6 +1037,8 @@ NORET_TYPE void do_exit(long code) | |||
1037 | schedule(); | 1037 | schedule(); |
1038 | } | 1038 | } |
1039 | 1039 | ||
1040 | exit_irq_thread(); | ||
1041 | |||
1040 | exit_signals(tsk); /* sets PF_EXITING */ | 1042 | exit_signals(tsk); /* sets PF_EXITING */ |
1041 | /* | 1043 | /* |
1042 | * tsk->flags are checked in the futex code to protect against | 1044 | * tsk->flags are checked in the futex code to protect against |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 9ebf77968871..fe8f45374e86 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -357,8 +357,37 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
357 | 357 | ||
358 | do { | 358 | do { |
359 | ret = action->handler(irq, action->dev_id); | 359 | ret = action->handler(irq, action->dev_id); |
360 | if (ret == IRQ_HANDLED) | 360 | |
361 | switch (ret) { | ||
362 | case IRQ_WAKE_THREAD: | ||
363 | /* | ||
364 | * Wake up the handler thread for this | ||
365 | * action. In case the thread crashed and was | ||
366 | * killed we just pretend that we handled the | ||
367 | * interrupt. The hardirq handler above has | ||
368 | * disabled the device interrupt, so no irq | ||
369 | * storm is lurking. | ||
370 | */ | ||
371 | if (likely(!test_bit(IRQTF_DIED, | ||
372 | &action->thread_flags))) { | ||
373 | set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
374 | wake_up_process(action->thread); | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * Set it to handled so the spurious check | ||
379 | * does not trigger. | ||
380 | */ | ||
381 | ret = IRQ_HANDLED; | ||
382 | /* Fall through to add to randomness */ | ||
383 | case IRQ_HANDLED: | ||
361 | status |= action->flags; | 384 | status |= action->flags; |
385 | break; | ||
386 | |||
387 | default: | ||
388 | break; | ||
389 | } | ||
390 | |||
362 | retval |= ret; | 391 | retval |= ret; |
363 | action = action->next; | 392 | action = action->next; |
364 | } while (action); | 393 | } while (action); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 6458e99984c0..a4c1ab86cd25 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -8,16 +8,15 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
11 | #include <linux/kthread.h> | ||
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
12 | #include <linux/random.h> | 13 | #include <linux/random.h> |
13 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/sched.h> | ||
15 | 17 | ||
16 | #include "internals.h" | 18 | #include "internals.h" |
17 | 19 | ||
18 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | ||
19 | cpumask_var_t irq_default_affinity; | ||
20 | |||
21 | /** | 20 | /** |
22 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
23 | * @irq: interrupt number to wait for | 22 | * @irq: interrupt number to wait for |
@@ -53,9 +52,18 @@ void synchronize_irq(unsigned int irq) | |||
53 | 52 | ||
54 | /* Oops, that failed? */ | 53 | /* Oops, that failed? */ |
55 | } while (status & IRQ_INPROGRESS); | 54 | } while (status & IRQ_INPROGRESS); |
55 | |||
56 | /* | ||
57 | * We made sure that no hardirq handler is running. Now verify | ||
58 | * that no threaded handlers are active. | ||
59 | */ | ||
60 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | ||
56 | } | 61 | } |
57 | EXPORT_SYMBOL(synchronize_irq); | 62 | EXPORT_SYMBOL(synchronize_irq); |
58 | 63 | ||
64 | #ifdef CONFIG_SMP | ||
65 | cpumask_var_t irq_default_affinity; | ||
66 | |||
59 | /** | 67 | /** |
60 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | 68 | * irq_can_set_affinity - Check if the affinity of a given irq can be set |
61 | * @irq: Interrupt to check | 69 | * @irq: Interrupt to check |
@@ -72,6 +80,18 @@ int irq_can_set_affinity(unsigned int irq) | |||
72 | return 1; | 80 | return 1; |
73 | } | 81 | } |
74 | 82 | ||
83 | static void | ||
84 | irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) | ||
85 | { | ||
86 | struct irqaction *action = desc->action; | ||
87 | |||
88 | while (action) { | ||
89 | if (action->thread) | ||
90 | set_cpus_allowed_ptr(action->thread, cpumask); | ||
91 | action = action->next; | ||
92 | } | ||
93 | } | ||
94 | |||
75 | /** | 95 | /** |
76 | * irq_set_affinity - Set the irq affinity of a given irq | 96 | * irq_set_affinity - Set the irq affinity of a given irq |
77 | * @irq: Interrupt to set affinity | 97 | * @irq: Interrupt to set affinity |
@@ -100,6 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
100 | cpumask_copy(desc->affinity, cpumask); | 120 | cpumask_copy(desc->affinity, cpumask); |
101 | desc->chip->set_affinity(irq, cpumask); | 121 | desc->chip->set_affinity(irq, cpumask); |
102 | #endif | 122 | #endif |
123 | irq_set_thread_affinity(desc, cpumask); | ||
103 | desc->status |= IRQ_AFFINITY_SET; | 124 | desc->status |= IRQ_AFFINITY_SET; |
104 | spin_unlock_irqrestore(&desc->lock, flags); | 125 | spin_unlock_irqrestore(&desc->lock, flags); |
105 | return 0; | 126 | return 0; |
@@ -150,6 +171,8 @@ int irq_select_affinity_usr(unsigned int irq) | |||
150 | 171 | ||
151 | spin_lock_irqsave(&desc->lock, flags); | 172 | spin_lock_irqsave(&desc->lock, flags); |
152 | ret = setup_affinity(irq, desc); | 173 | ret = setup_affinity(irq, desc); |
174 | if (!ret) | ||
175 | irq_set_thread_affinity(desc, desc->affinity); | ||
153 | spin_unlock_irqrestore(&desc->lock, flags); | 176 | spin_unlock_irqrestore(&desc->lock, flags); |
154 | 177 | ||
155 | return ret; | 178 | return ret; |
@@ -384,6 +407,93 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
384 | return ret; | 407 | return ret; |
385 | } | 408 | } |
386 | 409 | ||
410 | static inline int irq_thread_should_run(struct irqaction *action) | ||
411 | { | ||
412 | return test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
413 | } | ||
414 | |||
415 | static int irq_wait_for_interrupt(struct irqaction *action) | ||
416 | { | ||
417 | while (!kthread_should_stop()) { | ||
418 | set_current_state(TASK_INTERRUPTIBLE); | ||
419 | if (irq_thread_should_run(action)) { | ||
420 | __set_current_state(TASK_RUNNING); | ||
421 | return 0; | ||
422 | } else | ||
423 | schedule(); | ||
424 | } | ||
425 | return -1; | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * Interrupt handler thread | ||
430 | */ | ||
431 | static int irq_thread(void *data) | ||
432 | { | ||
433 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | ||
434 | struct irqaction *action = data; | ||
435 | struct irq_desc *desc = irq_to_desc(action->irq); | ||
436 | int wake; | ||
437 | |||
438 | sched_setscheduler(current, SCHED_FIFO, ¶m); | ||
439 | current->irqaction = action; | ||
440 | |||
441 | while (!irq_wait_for_interrupt(action)) { | ||
442 | |||
443 | atomic_inc(&desc->threads_active); | ||
444 | |||
445 | spin_lock_irq(&desc->lock); | ||
446 | if (unlikely(desc->status & IRQ_DISABLED)) { | ||
447 | /* | ||
448 | * CHECKME: We might need a dedicated | ||
449 | * IRQ_THREAD_PENDING flag here, which | ||
450 | * retriggers the thread in check_irq_resend() | ||
451 | * but AFAICT IRQ_PENDING should be fine as it | ||
452 | * retriggers the interrupt itself --- tglx | ||
453 | */ | ||
454 | desc->status |= IRQ_PENDING; | ||
455 | spin_unlock_irq(&desc->lock); | ||
456 | } else { | ||
457 | spin_unlock_irq(&desc->lock); | ||
458 | |||
459 | action->thread_fn(action->irq, action->dev_id); | ||
460 | } | ||
461 | |||
462 | wake = atomic_dec_and_test(&desc->threads_active); | ||
463 | |||
464 | if (wake && waitqueue_active(&desc->wait_for_threads)) | ||
465 | wake_up(&desc->wait_for_threads); | ||
466 | } | ||
467 | |||
468 | /* | ||
469 | * Clear irqaction. Otherwise exit_irq_thread() would make | ||
470 | * fuzz about an active irq thread going into nirvana. | ||
471 | */ | ||
472 | current->irqaction = NULL; | ||
473 | return 0; | ||
474 | } | ||
475 | |||
476 | /* | ||
477 | * Called from do_exit() | ||
478 | */ | ||
479 | void exit_irq_thread(void) | ||
480 | { | ||
481 | struct task_struct *tsk = current; | ||
482 | |||
483 | if (!tsk->irqaction) | ||
484 | return; | ||
485 | |||
486 | printk(KERN_ERR | ||
487 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | ||
488 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | ||
489 | |||
490 | /* | ||
491 | * Set the THREAD DIED flag to prevent further wakeups of the | ||
492 | * soon to be gone threaded handler. | ||
493 | */ | ||
494 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | ||
495 | } | ||
496 | |||
387 | /* | 497 | /* |
388 | * Internal function to register an irqaction - typically used to | 498 | * Internal function to register an irqaction - typically used to |
389 | * allocate special interrupts that are part of the architecture. | 499 | * allocate special interrupts that are part of the architecture. |
@@ -420,6 +530,26 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
420 | } | 530 | } |
421 | 531 | ||
422 | /* | 532 | /* |
533 | * Threaded handler ? | ||
534 | */ | ||
535 | if (new->thread_fn) { | ||
536 | struct task_struct *t; | ||
537 | |||
538 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | ||
539 | new->name); | ||
540 | if (IS_ERR(t)) | ||
541 | return PTR_ERR(t); | ||
542 | /* | ||
543 | * We keep the reference to the task struct even if | ||
544 | * the thread dies to avoid that the interrupt code | ||
545 | * references an already freed task_struct. | ||
546 | */ | ||
547 | get_task_struct(t); | ||
548 | new->thread = t; | ||
549 | wake_up_process(t); | ||
550 | } | ||
551 | |||
552 | /* | ||
423 | * The following block of code has to be executed atomically | 553 | * The following block of code has to be executed atomically |
424 | */ | 554 | */ |
425 | spin_lock_irqsave(&desc->lock, flags); | 555 | spin_lock_irqsave(&desc->lock, flags); |
@@ -456,15 +586,15 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
456 | if (!shared) { | 586 | if (!shared) { |
457 | irq_chip_set_defaults(desc->chip); | 587 | irq_chip_set_defaults(desc->chip); |
458 | 588 | ||
589 | init_waitqueue_head(&desc->wait_for_threads); | ||
590 | |||
459 | /* Setup the type (level, edge polarity) if configured: */ | 591 | /* Setup the type (level, edge polarity) if configured: */ |
460 | if (new->flags & IRQF_TRIGGER_MASK) { | 592 | if (new->flags & IRQF_TRIGGER_MASK) { |
461 | ret = __irq_set_trigger(desc, irq, | 593 | ret = __irq_set_trigger(desc, irq, |
462 | new->flags & IRQF_TRIGGER_MASK); | 594 | new->flags & IRQF_TRIGGER_MASK); |
463 | 595 | ||
464 | if (ret) { | 596 | if (ret) |
465 | spin_unlock_irqrestore(&desc->lock, flags); | 597 | goto out_thread; |
466 | return ret; | ||
467 | } | ||
468 | } else | 598 | } else |
469 | compat_irq_chip_set_default_handler(desc); | 599 | compat_irq_chip_set_default_handler(desc); |
470 | #if defined(CONFIG_IRQ_PER_CPU) | 600 | #if defined(CONFIG_IRQ_PER_CPU) |
@@ -532,8 +662,19 @@ mismatch: | |||
532 | dump_stack(); | 662 | dump_stack(); |
533 | } | 663 | } |
534 | #endif | 664 | #endif |
665 | ret = -EBUSY; | ||
666 | |||
667 | out_thread: | ||
535 | spin_unlock_irqrestore(&desc->lock, flags); | 668 | spin_unlock_irqrestore(&desc->lock, flags); |
536 | return -EBUSY; | 669 | if (new->thread) { |
670 | struct task_struct *t = new->thread; | ||
671 | |||
672 | new->thread = NULL; | ||
673 | if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | ||
674 | kthread_stop(t); | ||
675 | put_task_struct(t); | ||
676 | } | ||
677 | return ret; | ||
537 | } | 678 | } |
538 | 679 | ||
539 | /** | 680 | /** |
@@ -559,6 +700,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
559 | { | 700 | { |
560 | struct irq_desc *desc = irq_to_desc(irq); | 701 | struct irq_desc *desc = irq_to_desc(irq); |
561 | struct irqaction *action, **action_ptr; | 702 | struct irqaction *action, **action_ptr; |
703 | struct task_struct *irqthread; | ||
562 | unsigned long flags; | 704 | unsigned long flags; |
563 | 705 | ||
564 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | 706 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
@@ -605,6 +747,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
605 | else | 747 | else |
606 | desc->chip->disable(irq); | 748 | desc->chip->disable(irq); |
607 | } | 749 | } |
750 | |||
751 | irqthread = action->thread; | ||
752 | action->thread = NULL; | ||
753 | |||
608 | spin_unlock_irqrestore(&desc->lock, flags); | 754 | spin_unlock_irqrestore(&desc->lock, flags); |
609 | 755 | ||
610 | unregister_handler_proc(irq, action); | 756 | unregister_handler_proc(irq, action); |
@@ -612,6 +758,12 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
612 | /* Make sure it's not being used on another CPU: */ | 758 | /* Make sure it's not being used on another CPU: */ |
613 | synchronize_irq(irq); | 759 | synchronize_irq(irq); |
614 | 760 | ||
761 | if (irqthread) { | ||
762 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | ||
763 | kthread_stop(irqthread); | ||
764 | put_task_struct(irqthread); | ||
765 | } | ||
766 | |||
615 | #ifdef CONFIG_DEBUG_SHIRQ | 767 | #ifdef CONFIG_DEBUG_SHIRQ |
616 | /* | 768 | /* |
617 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | 769 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ |
@@ -664,9 +816,12 @@ void free_irq(unsigned int irq, void *dev_id) | |||
664 | EXPORT_SYMBOL(free_irq); | 816 | EXPORT_SYMBOL(free_irq); |
665 | 817 | ||
666 | /** | 818 | /** |
667 | * request_irq - allocate an interrupt line | 819 | * request_threaded_irq - allocate an interrupt line |
668 | * @irq: Interrupt line to allocate | 820 | * @irq: Interrupt line to allocate |
669 | * @handler: Function to be called when the IRQ occurs | 821 | * @handler: Function to be called when the IRQ occurs. |
822 | * Primary handler for threaded interrupts | ||
823 | * @thread_fn: Function called from the irq handler thread | ||
824 | * If NULL, no irq thread is created | ||
670 | * @irqflags: Interrupt type flags | 825 | * @irqflags: Interrupt type flags |
671 | * @devname: An ascii name for the claiming device | 826 | * @devname: An ascii name for the claiming device |
672 | * @dev_id: A cookie passed back to the handler function | 827 | * @dev_id: A cookie passed back to the handler function |
@@ -678,6 +833,15 @@ EXPORT_SYMBOL(free_irq); | |||
678 | * raises, you must take care both to initialise your hardware | 833 | * raises, you must take care both to initialise your hardware |
679 | * and to set up the interrupt handler in the right order. | 834 | * and to set up the interrupt handler in the right order. |
680 | * | 835 | * |
836 | * If you want to set up a threaded irq handler for your device | ||
837 | * then you need to supply @handler and @thread_fn. @handler ist | ||
838 | * still called in hard interrupt context and has to check | ||
839 | * whether the interrupt originates from the device. If yes it | ||
840 | * needs to disable the interrupt on the device and return | ||
841 | * IRQ_THREAD_WAKE which will wake up the handler thread and run | ||
842 | * @thread_fn. This split handler design is necessary to support | ||
843 | * shared interrupts. | ||
844 | * | ||
681 | * Dev_id must be globally unique. Normally the address of the | 845 | * Dev_id must be globally unique. Normally the address of the |
682 | * device data structure is used as the cookie. Since the handler | 846 | * device data structure is used as the cookie. Since the handler |
683 | * receives this value it makes sense to use it. | 847 | * receives this value it makes sense to use it. |
@@ -693,8 +857,9 @@ EXPORT_SYMBOL(free_irq); | |||
693 | * IRQF_TRIGGER_* Specify active edge(s) or level | 857 | * IRQF_TRIGGER_* Specify active edge(s) or level |
694 | * | 858 | * |
695 | */ | 859 | */ |
696 | int request_irq(unsigned int irq, irq_handler_t handler, | 860 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
697 | unsigned long irqflags, const char *devname, void *dev_id) | 861 | irq_handler_t thread_fn, unsigned long irqflags, |
862 | const char *devname, void *dev_id) | ||
698 | { | 863 | { |
699 | struct irqaction *action; | 864 | struct irqaction *action; |
700 | struct irq_desc *desc; | 865 | struct irq_desc *desc; |
@@ -742,6 +907,7 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
742 | return -ENOMEM; | 907 | return -ENOMEM; |
743 | 908 | ||
744 | action->handler = handler; | 909 | action->handler = handler; |
910 | action->thread_fn = thread_fn; | ||
745 | action->flags = irqflags; | 911 | action->flags = irqflags; |
746 | action->name = devname; | 912 | action->name = devname; |
747 | action->dev_id = dev_id; | 913 | action->dev_id = dev_id; |
@@ -771,4 +937,4 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
771 | #endif | 937 | #endif |
772 | return retval; | 938 | return retval; |
773 | } | 939 | } |
774 | EXPORT_SYMBOL(request_irq); | 940 | EXPORT_SYMBOL(request_threaded_irq); |