diff options
| -rw-r--r-- | include/linux/hardirq.h | 2 | ||||
| -rw-r--r-- | include/linux/interrupt.h | 75 | ||||
| -rw-r--r-- | include/linux/irq.h | 5 | ||||
| -rw-r--r-- | include/linux/irqreturn.h | 2 | ||||
| -rw-r--r-- | include/linux/sched.h | 5 | ||||
| -rw-r--r-- | kernel/exit.c | 2 | ||||
| -rw-r--r-- | kernel/irq/devres.c | 16 | ||||
| -rw-r--r-- | kernel/irq/handle.c | 50 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 189 |
9 files changed, 320 insertions, 26 deletions
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index faa1cf848bcd..45257475623c 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
| @@ -116,7 +116,7 @@ | |||
| 116 | # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET | 116 | # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET |
| 117 | #endif | 117 | #endif |
| 118 | 118 | ||
| 119 | #ifdef CONFIG_SMP | 119 | #if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS) |
| 120 | extern void synchronize_irq(unsigned int irq); | 120 | extern void synchronize_irq(unsigned int irq); |
| 121 | #else | 121 | #else |
| 122 | # define synchronize_irq(irq) barrier() | 122 | # define synchronize_irq(irq) barrier() |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index ce2c07d99fc3..675727fb4b47 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -59,6 +59,18 @@ | |||
| 59 | #define IRQF_NOBALANCING 0x00000800 | 59 | #define IRQF_NOBALANCING 0x00000800 |
| 60 | #define IRQF_IRQPOLL 0x00001000 | 60 | #define IRQF_IRQPOLL 0x00001000 |
| 61 | 61 | ||
| 62 | /* | ||
| 63 | * Bits used by threaded handlers: | ||
| 64 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run | ||
| 65 | * IRQTF_DIED - handler thread died | ||
| 66 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed | ||
| 67 | */ | ||
| 68 | enum { | ||
| 69 | IRQTF_RUNTHREAD, | ||
| 70 | IRQTF_DIED, | ||
| 71 | IRQTF_WARNED, | ||
| 72 | }; | ||
| 73 | |||
| 62 | typedef irqreturn_t (*irq_handler_t)(int, void *); | 74 | typedef irqreturn_t (*irq_handler_t)(int, void *); |
| 63 | 75 | ||
| 64 | /** | 76 | /** |
| @@ -71,6 +83,9 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); | |||
| 71 | * @next: pointer to the next irqaction for shared interrupts | 83 | * @next: pointer to the next irqaction for shared interrupts |
| 72 | * @irq: interrupt number | 84 | * @irq: interrupt number |
| 73 | * @dir: pointer to the proc/irq/NN/name entry | 85 | * @dir: pointer to the proc/irq/NN/name entry |
| 86 | * @thread_fn: interupt handler function for threaded interrupts | ||
| 87 | * @thread: thread pointer for threaded interrupts | ||
| 88 | * @thread_flags: flags related to @thread | ||
| 74 | */ | 89 | */ |
| 75 | struct irqaction { | 90 | struct irqaction { |
| 76 | irq_handler_t handler; | 91 | irq_handler_t handler; |
| @@ -81,18 +96,68 @@ struct irqaction { | |||
| 81 | struct irqaction *next; | 96 | struct irqaction *next; |
| 82 | int irq; | 97 | int irq; |
| 83 | struct proc_dir_entry *dir; | 98 | struct proc_dir_entry *dir; |
| 99 | irq_handler_t thread_fn; | ||
| 100 | struct task_struct *thread; | ||
| 101 | unsigned long thread_flags; | ||
| 84 | }; | 102 | }; |
| 85 | 103 | ||
| 86 | extern irqreturn_t no_action(int cpl, void *dev_id); | 104 | extern irqreturn_t no_action(int cpl, void *dev_id); |
| 87 | extern int __must_check request_irq(unsigned int, irq_handler_t handler, | 105 | |
| 88 | unsigned long, const char *, void *); | 106 | #ifdef CONFIG_GENERIC_HARDIRQS |
| 107 | extern int __must_check | ||
| 108 | request_threaded_irq(unsigned int irq, irq_handler_t handler, | ||
| 109 | irq_handler_t thread_fn, | ||
| 110 | unsigned long flags, const char *name, void *dev); | ||
| 111 | |||
| 112 | static inline int __must_check | ||
| 113 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, | ||
| 114 | const char *name, void *dev) | ||
| 115 | { | ||
| 116 | return request_threaded_irq(irq, handler, NULL, flags, name, dev); | ||
| 117 | } | ||
| 118 | |||
| 119 | extern void exit_irq_thread(void); | ||
| 120 | #else | ||
| 121 | |||
| 122 | extern int __must_check | ||
| 123 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, | ||
| 124 | const char *name, void *dev); | ||
| 125 | |||
| 126 | /* | ||
| 127 | * Special function to avoid ifdeffery in kernel/irq/devres.c which | ||
| 128 | * gets magically built by GENERIC_HARDIRQS=n architectures (sparc, | ||
| 129 | * m68k). I really love these $@%#!* obvious Makefile references: | ||
| 130 | * ../../../kernel/irq/devres.o | ||
| 131 | */ | ||
| 132 | static inline int __must_check | ||
| 133 | request_threaded_irq(unsigned int irq, irq_handler_t handler, | ||
| 134 | irq_handler_t thread_fn, | ||
| 135 | unsigned long flags, const char *name, void *dev) | ||
| 136 | { | ||
| 137 | return request_irq(irq, handler, flags, name, dev); | ||
| 138 | } | ||
| 139 | |||
| 140 | static inline void exit_irq_thread(void) { } | ||
| 141 | #endif | ||
| 142 | |||
| 89 | extern void free_irq(unsigned int, void *); | 143 | extern void free_irq(unsigned int, void *); |
| 90 | 144 | ||
| 91 | struct device; | 145 | struct device; |
| 92 | 146 | ||
| 93 | extern int __must_check devm_request_irq(struct device *dev, unsigned int irq, | 147 | extern int __must_check |
| 94 | irq_handler_t handler, unsigned long irqflags, | 148 | devm_request_threaded_irq(struct device *dev, unsigned int irq, |
| 95 | const char *devname, void *dev_id); | 149 | irq_handler_t handler, irq_handler_t thread_fn, |
| 150 | unsigned long irqflags, const char *devname, | ||
| 151 | void *dev_id); | ||
| 152 | |||
| 153 | static inline int __must_check | ||
| 154 | devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, | ||
| 155 | unsigned long irqflags, const char *devname, void *dev_id) | ||
| 156 | { | ||
| 157 | return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, | ||
| 158 | devname, dev_id); | ||
| 159 | } | ||
| 160 | |||
| 96 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); | 161 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); |
| 97 | 162 | ||
| 98 | /* | 163 | /* |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 974890b3c52f..ca507c9426b0 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/irqnr.h> | 22 | #include <linux/irqnr.h> |
| 23 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
| 24 | #include <linux/topology.h> | 24 | #include <linux/topology.h> |
| 25 | #include <linux/wait.h> | ||
| 25 | 26 | ||
| 26 | #include <asm/irq.h> | 27 | #include <asm/irq.h> |
| 27 | #include <asm/ptrace.h> | 28 | #include <asm/ptrace.h> |
| @@ -158,6 +159,8 @@ struct irq_2_iommu; | |||
| 158 | * @affinity: IRQ affinity on SMP | 159 | * @affinity: IRQ affinity on SMP |
| 159 | * @cpu: cpu index useful for balancing | 160 | * @cpu: cpu index useful for balancing |
| 160 | * @pending_mask: pending rebalanced interrupts | 161 | * @pending_mask: pending rebalanced interrupts |
| 162 | * @threads_active: number of irqaction threads currently running | ||
| 163 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | ||
| 161 | * @dir: /proc/irq/ procfs entry | 164 | * @dir: /proc/irq/ procfs entry |
| 162 | * @name: flow handler name for /proc/interrupts output | 165 | * @name: flow handler name for /proc/interrupts output |
| 163 | */ | 166 | */ |
| @@ -189,6 +192,8 @@ struct irq_desc { | |||
| 189 | cpumask_var_t pending_mask; | 192 | cpumask_var_t pending_mask; |
| 190 | #endif | 193 | #endif |
| 191 | #endif | 194 | #endif |
| 195 | atomic_t threads_active; | ||
| 196 | wait_queue_head_t wait_for_threads; | ||
| 192 | #ifdef CONFIG_PROC_FS | 197 | #ifdef CONFIG_PROC_FS |
| 193 | struct proc_dir_entry *dir; | 198 | struct proc_dir_entry *dir; |
| 194 | #endif | 199 | #endif |
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h index c5584ca5b8c9..819acaaac3f5 100644 --- a/include/linux/irqreturn.h +++ b/include/linux/irqreturn.h | |||
| @@ -5,10 +5,12 @@ | |||
| 5 | * enum irqreturn | 5 | * enum irqreturn |
| 6 | * @IRQ_NONE interrupt was not from this device | 6 | * @IRQ_NONE interrupt was not from this device |
| 7 | * @IRQ_HANDLED interrupt was handled by this device | 7 | * @IRQ_HANDLED interrupt was handled by this device |
| 8 | * @IRQ_WAKE_THREAD handler requests to wake the handler thread | ||
| 8 | */ | 9 | */ |
| 9 | enum irqreturn { | 10 | enum irqreturn { |
| 10 | IRQ_NONE, | 11 | IRQ_NONE, |
| 11 | IRQ_HANDLED, | 12 | IRQ_HANDLED, |
| 13 | IRQ_WAKE_THREAD, | ||
| 12 | }; | 14 | }; |
| 13 | 15 | ||
| 14 | typedef enum irqreturn irqreturn_t; | 16 | typedef enum irqreturn irqreturn_t; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index b94f3541f67b..c96140210d1c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1294,6 +1294,11 @@ struct task_struct { | |||
| 1294 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ | 1294 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ |
| 1295 | spinlock_t alloc_lock; | 1295 | spinlock_t alloc_lock; |
| 1296 | 1296 | ||
| 1297 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
| 1298 | /* IRQ handler threads */ | ||
| 1299 | struct irqaction *irqaction; | ||
| 1300 | #endif | ||
| 1301 | |||
| 1297 | /* Protection of the PI data structures: */ | 1302 | /* Protection of the PI data structures: */ |
| 1298 | spinlock_t pi_lock; | 1303 | spinlock_t pi_lock; |
| 1299 | 1304 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 6686ed1e4aa3..789b8862fe3b 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -924,6 +924,8 @@ NORET_TYPE void do_exit(long code) | |||
| 924 | schedule(); | 924 | schedule(); |
| 925 | } | 925 | } |
| 926 | 926 | ||
| 927 | exit_irq_thread(); | ||
| 928 | |||
| 927 | exit_signals(tsk); /* sets PF_EXITING */ | 929 | exit_signals(tsk); /* sets PF_EXITING */ |
| 928 | /* | 930 | /* |
| 929 | * tsk->flags are checked in the futex code to protect against | 931 | * tsk->flags are checked in the futex code to protect against |
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c index 38a25b8d8bff..d06df9c41cba 100644 --- a/kernel/irq/devres.c +++ b/kernel/irq/devres.c | |||
| @@ -26,10 +26,12 @@ static int devm_irq_match(struct device *dev, void *res, void *data) | |||
| 26 | } | 26 | } |
| 27 | 27 | ||
| 28 | /** | 28 | /** |
| 29 | * devm_request_irq - allocate an interrupt line for a managed device | 29 | * devm_request_threaded_irq - allocate an interrupt line for a managed device |
| 30 | * @dev: device to request interrupt for | 30 | * @dev: device to request interrupt for |
| 31 | * @irq: Interrupt line to allocate | 31 | * @irq: Interrupt line to allocate |
| 32 | * @handler: Function to be called when the IRQ occurs | 32 | * @handler: Function to be called when the IRQ occurs |
| 33 | * @thread_fn: function to be called in a threaded interrupt context. NULL | ||
| 34 | * for devices which handle everything in @handler | ||
| 33 | * @irqflags: Interrupt type flags | 35 | * @irqflags: Interrupt type flags |
| 34 | * @devname: An ascii name for the claiming device | 36 | * @devname: An ascii name for the claiming device |
| 35 | * @dev_id: A cookie passed back to the handler function | 37 | * @dev_id: A cookie passed back to the handler function |
| @@ -42,9 +44,10 @@ static int devm_irq_match(struct device *dev, void *res, void *data) | |||
| 42 | * If an IRQ allocated with this function needs to be freed | 44 | * If an IRQ allocated with this function needs to be freed |
| 43 | * separately, dev_free_irq() must be used. | 45 | * separately, dev_free_irq() must be used. |
| 44 | */ | 46 | */ |
| 45 | int devm_request_irq(struct device *dev, unsigned int irq, | 47 | int devm_request_threaded_irq(struct device *dev, unsigned int irq, |
| 46 | irq_handler_t handler, unsigned long irqflags, | 48 | irq_handler_t handler, irq_handler_t thread_fn, |
| 47 | const char *devname, void *dev_id) | 49 | unsigned long irqflags, const char *devname, |
| 50 | void *dev_id) | ||
| 48 | { | 51 | { |
| 49 | struct irq_devres *dr; | 52 | struct irq_devres *dr; |
| 50 | int rc; | 53 | int rc; |
| @@ -54,7 +57,8 @@ int devm_request_irq(struct device *dev, unsigned int irq, | |||
| 54 | if (!dr) | 57 | if (!dr) |
| 55 | return -ENOMEM; | 58 | return -ENOMEM; |
| 56 | 59 | ||
| 57 | rc = request_irq(irq, handler, irqflags, devname, dev_id); | 60 | rc = request_threaded_irq(irq, handler, thread_fn, irqflags, devname, |
| 61 | dev_id); | ||
| 58 | if (rc) { | 62 | if (rc) { |
| 59 | devres_free(dr); | 63 | devres_free(dr); |
| 60 | return rc; | 64 | return rc; |
| @@ -66,7 +70,7 @@ int devm_request_irq(struct device *dev, unsigned int irq, | |||
| 66 | 70 | ||
| 67 | return 0; | 71 | return 0; |
| 68 | } | 72 | } |
| 69 | EXPORT_SYMBOL(devm_request_irq); | 73 | EXPORT_SYMBOL(devm_request_threaded_irq); |
| 70 | 74 | ||
| 71 | /** | 75 | /** |
| 72 | * devm_free_irq - free an interrupt | 76 | * devm_free_irq - free an interrupt |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 343acecae629..d82142be8dd2 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -339,6 +339,15 @@ irqreturn_t no_action(int cpl, void *dev_id) | |||
| 339 | return IRQ_NONE; | 339 | return IRQ_NONE; |
| 340 | } | 340 | } |
| 341 | 341 | ||
| 342 | static void warn_no_thread(unsigned int irq, struct irqaction *action) | ||
| 343 | { | ||
| 344 | if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags)) | ||
| 345 | return; | ||
| 346 | |||
| 347 | printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD " | ||
| 348 | "but no thread function available.", irq, action->name); | ||
| 349 | } | ||
| 350 | |||
| 342 | DEFINE_TRACE(irq_handler_entry); | 351 | DEFINE_TRACE(irq_handler_entry); |
| 343 | DEFINE_TRACE(irq_handler_exit); | 352 | DEFINE_TRACE(irq_handler_exit); |
| 344 | 353 | ||
| @@ -363,8 +372,47 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
| 363 | trace_irq_handler_entry(irq, action); | 372 | trace_irq_handler_entry(irq, action); |
| 364 | ret = action->handler(irq, action->dev_id); | 373 | ret = action->handler(irq, action->dev_id); |
| 365 | trace_irq_handler_exit(irq, action, ret); | 374 | trace_irq_handler_exit(irq, action, ret); |
| 366 | if (ret == IRQ_HANDLED) | 375 | |
| 376 | switch (ret) { | ||
| 377 | case IRQ_WAKE_THREAD: | ||
| 378 | /* | ||
| 379 | * Set result to handled so the spurious check | ||
| 380 | * does not trigger. | ||
| 381 | */ | ||
| 382 | ret = IRQ_HANDLED; | ||
| 383 | |||
| 384 | /* | ||
| 385 | * Catch drivers which return WAKE_THREAD but | ||
| 386 | * did not set up a thread function | ||
| 387 | */ | ||
| 388 | if (unlikely(!action->thread_fn)) { | ||
| 389 | warn_no_thread(irq, action); | ||
| 390 | break; | ||
| 391 | } | ||
| 392 | |||
| 393 | /* | ||
| 394 | * Wake up the handler thread for this | ||
| 395 | * action. In case the thread crashed and was | ||
| 396 | * killed we just pretend that we handled the | ||
| 397 | * interrupt. The hardirq handler above has | ||
| 398 | * disabled the device interrupt, so no irq | ||
| 399 | * storm is lurking. | ||
| 400 | */ | ||
| 401 | if (likely(!test_bit(IRQTF_DIED, | ||
| 402 | &action->thread_flags))) { | ||
| 403 | set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
| 404 | wake_up_process(action->thread); | ||
| 405 | } | ||
| 406 | |||
| 407 | /* Fall through to add to randomness */ | ||
| 408 | case IRQ_HANDLED: | ||
| 367 | status |= action->flags; | 409 | status |= action->flags; |
| 410 | break; | ||
| 411 | |||
| 412 | default: | ||
| 413 | break; | ||
| 414 | } | ||
| 415 | |||
| 368 | retval |= ret; | 416 | retval |= ret; |
| 369 | action = action->next; | 417 | action = action->next; |
| 370 | } while (action); | 418 | } while (action); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 1516ab77355c..7e2e7dd4cd2f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -8,16 +8,15 @@ | |||
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
| 11 | #include <linux/kthread.h> | ||
| 11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
| 12 | #include <linux/random.h> | 13 | #include <linux/random.h> |
| 13 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
| 14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <linux/sched.h> | ||
| 15 | 17 | ||
| 16 | #include "internals.h" | 18 | #include "internals.h" |
| 17 | 19 | ||
| 18 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | ||
| 19 | cpumask_var_t irq_default_affinity; | ||
| 20 | |||
| 21 | /** | 20 | /** |
| 22 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
| 23 | * @irq: interrupt number to wait for | 22 | * @irq: interrupt number to wait for |
| @@ -53,9 +52,18 @@ void synchronize_irq(unsigned int irq) | |||
| 53 | 52 | ||
| 54 | /* Oops, that failed? */ | 53 | /* Oops, that failed? */ |
| 55 | } while (status & IRQ_INPROGRESS); | 54 | } while (status & IRQ_INPROGRESS); |
| 55 | |||
| 56 | /* | ||
| 57 | * We made sure that no hardirq handler is running. Now verify | ||
| 58 | * that no threaded handlers are active. | ||
| 59 | */ | ||
| 60 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | ||
| 56 | } | 61 | } |
| 57 | EXPORT_SYMBOL(synchronize_irq); | 62 | EXPORT_SYMBOL(synchronize_irq); |
| 58 | 63 | ||
| 64 | #ifdef CONFIG_SMP | ||
| 65 | cpumask_var_t irq_default_affinity; | ||
| 66 | |||
| 59 | /** | 67 | /** |
| 60 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | 68 | * irq_can_set_affinity - Check if the affinity of a given irq can be set |
| 61 | * @irq: Interrupt to check | 69 | * @irq: Interrupt to check |
| @@ -72,6 +80,18 @@ int irq_can_set_affinity(unsigned int irq) | |||
| 72 | return 1; | 80 | return 1; |
| 73 | } | 81 | } |
| 74 | 82 | ||
| 83 | static void | ||
| 84 | irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) | ||
| 85 | { | ||
| 86 | struct irqaction *action = desc->action; | ||
| 87 | |||
| 88 | while (action) { | ||
| 89 | if (action->thread) | ||
| 90 | set_cpus_allowed_ptr(action->thread, cpumask); | ||
| 91 | action = action->next; | ||
| 92 | } | ||
| 93 | } | ||
| 94 | |||
| 75 | /** | 95 | /** |
| 76 | * irq_set_affinity - Set the irq affinity of a given irq | 96 | * irq_set_affinity - Set the irq affinity of a given irq |
| 77 | * @irq: Interrupt to set affinity | 97 | * @irq: Interrupt to set affinity |
| @@ -100,6 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 100 | cpumask_copy(desc->affinity, cpumask); | 120 | cpumask_copy(desc->affinity, cpumask); |
| 101 | desc->chip->set_affinity(irq, cpumask); | 121 | desc->chip->set_affinity(irq, cpumask); |
| 102 | #endif | 122 | #endif |
| 123 | irq_set_thread_affinity(desc, cpumask); | ||
| 103 | desc->status |= IRQ_AFFINITY_SET; | 124 | desc->status |= IRQ_AFFINITY_SET; |
| 104 | spin_unlock_irqrestore(&desc->lock, flags); | 125 | spin_unlock_irqrestore(&desc->lock, flags); |
| 105 | return 0; | 126 | return 0; |
| @@ -150,6 +171,8 @@ int irq_select_affinity_usr(unsigned int irq) | |||
| 150 | 171 | ||
| 151 | spin_lock_irqsave(&desc->lock, flags); | 172 | spin_lock_irqsave(&desc->lock, flags); |
| 152 | ret = setup_affinity(irq, desc); | 173 | ret = setup_affinity(irq, desc); |
| 174 | if (!ret) | ||
| 175 | irq_set_thread_affinity(desc, desc->affinity); | ||
| 153 | spin_unlock_irqrestore(&desc->lock, flags); | 176 | spin_unlock_irqrestore(&desc->lock, flags); |
| 154 | 177 | ||
| 155 | return ret; | 178 | return ret; |
| @@ -401,6 +424,90 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 401 | return ret; | 424 | return ret; |
| 402 | } | 425 | } |
| 403 | 426 | ||
| 427 | static int irq_wait_for_interrupt(struct irqaction *action) | ||
| 428 | { | ||
| 429 | while (!kthread_should_stop()) { | ||
| 430 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 431 | |||
| 432 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | ||
| 433 | &action->thread_flags)) { | ||
| 434 | __set_current_state(TASK_RUNNING); | ||
| 435 | return 0; | ||
| 436 | } | ||
| 437 | schedule(); | ||
| 438 | } | ||
| 439 | return -1; | ||
| 440 | } | ||
| 441 | |||
| 442 | /* | ||
| 443 | * Interrupt handler thread | ||
| 444 | */ | ||
| 445 | static int irq_thread(void *data) | ||
| 446 | { | ||
| 447 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | ||
| 448 | struct irqaction *action = data; | ||
| 449 | struct irq_desc *desc = irq_to_desc(action->irq); | ||
| 450 | int wake; | ||
| 451 | |||
| 452 | sched_setscheduler(current, SCHED_FIFO, ¶m); | ||
| 453 | current->irqaction = action; | ||
| 454 | |||
| 455 | while (!irq_wait_for_interrupt(action)) { | ||
| 456 | |||
| 457 | atomic_inc(&desc->threads_active); | ||
| 458 | |||
| 459 | spin_lock_irq(&desc->lock); | ||
| 460 | if (unlikely(desc->status & IRQ_DISABLED)) { | ||
| 461 | /* | ||
| 462 | * CHECKME: We might need a dedicated | ||
| 463 | * IRQ_THREAD_PENDING flag here, which | ||
| 464 | * retriggers the thread in check_irq_resend() | ||
| 465 | * but AFAICT IRQ_PENDING should be fine as it | ||
| 466 | * retriggers the interrupt itself --- tglx | ||
| 467 | */ | ||
| 468 | desc->status |= IRQ_PENDING; | ||
| 469 | spin_unlock_irq(&desc->lock); | ||
| 470 | } else { | ||
| 471 | spin_unlock_irq(&desc->lock); | ||
| 472 | |||
| 473 | action->thread_fn(action->irq, action->dev_id); | ||
| 474 | } | ||
| 475 | |||
| 476 | wake = atomic_dec_and_test(&desc->threads_active); | ||
| 477 | |||
| 478 | if (wake && waitqueue_active(&desc->wait_for_threads)) | ||
| 479 | wake_up(&desc->wait_for_threads); | ||
| 480 | } | ||
| 481 | |||
| 482 | /* | ||
| 483 | * Clear irqaction. Otherwise exit_irq_thread() would make | ||
| 484 | * fuzz about an active irq thread going into nirvana. | ||
| 485 | */ | ||
| 486 | current->irqaction = NULL; | ||
| 487 | return 0; | ||
| 488 | } | ||
| 489 | |||
| 490 | /* | ||
| 491 | * Called from do_exit() | ||
| 492 | */ | ||
| 493 | void exit_irq_thread(void) | ||
| 494 | { | ||
| 495 | struct task_struct *tsk = current; | ||
| 496 | |||
| 497 | if (!tsk->irqaction) | ||
| 498 | return; | ||
| 499 | |||
| 500 | printk(KERN_ERR | ||
| 501 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | ||
| 502 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | ||
| 503 | |||
| 504 | /* | ||
| 505 | * Set the THREAD DIED flag to prevent further wakeups of the | ||
| 506 | * soon to be gone threaded handler. | ||
| 507 | */ | ||
| 508 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | ||
| 509 | } | ||
| 510 | |||
| 404 | /* | 511 | /* |
| 405 | * Internal function to register an irqaction - typically used to | 512 | * Internal function to register an irqaction - typically used to |
| 406 | * allocate special interrupts that are part of the architecture. | 513 | * allocate special interrupts that are part of the architecture. |
| @@ -437,6 +544,26 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 437 | } | 544 | } |
| 438 | 545 | ||
| 439 | /* | 546 | /* |
| 547 | * Threaded handler ? | ||
| 548 | */ | ||
| 549 | if (new->thread_fn) { | ||
| 550 | struct task_struct *t; | ||
| 551 | |||
| 552 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | ||
| 553 | new->name); | ||
| 554 | if (IS_ERR(t)) | ||
| 555 | return PTR_ERR(t); | ||
| 556 | /* | ||
| 557 | * We keep the reference to the task struct even if | ||
| 558 | * the thread dies to avoid that the interrupt code | ||
| 559 | * references an already freed task_struct. | ||
| 560 | */ | ||
| 561 | get_task_struct(t); | ||
| 562 | new->thread = t; | ||
| 563 | wake_up_process(t); | ||
| 564 | } | ||
| 565 | |||
| 566 | /* | ||
| 440 | * The following block of code has to be executed atomically | 567 | * The following block of code has to be executed atomically |
| 441 | */ | 568 | */ |
| 442 | spin_lock_irqsave(&desc->lock, flags); | 569 | spin_lock_irqsave(&desc->lock, flags); |
| @@ -473,15 +600,15 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 473 | if (!shared) { | 600 | if (!shared) { |
| 474 | irq_chip_set_defaults(desc->chip); | 601 | irq_chip_set_defaults(desc->chip); |
| 475 | 602 | ||
| 603 | init_waitqueue_head(&desc->wait_for_threads); | ||
| 604 | |||
| 476 | /* Setup the type (level, edge polarity) if configured: */ | 605 | /* Setup the type (level, edge polarity) if configured: */ |
| 477 | if (new->flags & IRQF_TRIGGER_MASK) { | 606 | if (new->flags & IRQF_TRIGGER_MASK) { |
| 478 | ret = __irq_set_trigger(desc, irq, | 607 | ret = __irq_set_trigger(desc, irq, |
| 479 | new->flags & IRQF_TRIGGER_MASK); | 608 | new->flags & IRQF_TRIGGER_MASK); |
| 480 | 609 | ||
| 481 | if (ret) { | 610 | if (ret) |
| 482 | spin_unlock_irqrestore(&desc->lock, flags); | 611 | goto out_thread; |
| 483 | return ret; | ||
| 484 | } | ||
| 485 | } else | 612 | } else |
| 486 | compat_irq_chip_set_default_handler(desc); | 613 | compat_irq_chip_set_default_handler(desc); |
| 487 | #if defined(CONFIG_IRQ_PER_CPU) | 614 | #if defined(CONFIG_IRQ_PER_CPU) |
| @@ -549,8 +676,19 @@ mismatch: | |||
| 549 | dump_stack(); | 676 | dump_stack(); |
| 550 | } | 677 | } |
| 551 | #endif | 678 | #endif |
| 679 | ret = -EBUSY; | ||
| 680 | |||
| 681 | out_thread: | ||
| 552 | spin_unlock_irqrestore(&desc->lock, flags); | 682 | spin_unlock_irqrestore(&desc->lock, flags); |
| 553 | return -EBUSY; | 683 | if (new->thread) { |
| 684 | struct task_struct *t = new->thread; | ||
| 685 | |||
| 686 | new->thread = NULL; | ||
| 687 | if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | ||
| 688 | kthread_stop(t); | ||
| 689 | put_task_struct(t); | ||
| 690 | } | ||
| 691 | return ret; | ||
| 554 | } | 692 | } |
| 555 | 693 | ||
| 556 | /** | 694 | /** |
| @@ -576,6 +714,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
| 576 | { | 714 | { |
| 577 | struct irq_desc *desc = irq_to_desc(irq); | 715 | struct irq_desc *desc = irq_to_desc(irq); |
| 578 | struct irqaction *action, **action_ptr; | 716 | struct irqaction *action, **action_ptr; |
| 717 | struct task_struct *irqthread; | ||
| 579 | unsigned long flags; | 718 | unsigned long flags; |
| 580 | 719 | ||
| 581 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | 720 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
| @@ -622,6 +761,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
| 622 | else | 761 | else |
| 623 | desc->chip->disable(irq); | 762 | desc->chip->disable(irq); |
| 624 | } | 763 | } |
| 764 | |||
| 765 | irqthread = action->thread; | ||
| 766 | action->thread = NULL; | ||
| 767 | |||
| 625 | spin_unlock_irqrestore(&desc->lock, flags); | 768 | spin_unlock_irqrestore(&desc->lock, flags); |
| 626 | 769 | ||
| 627 | unregister_handler_proc(irq, action); | 770 | unregister_handler_proc(irq, action); |
| @@ -629,6 +772,12 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
| 629 | /* Make sure it's not being used on another CPU: */ | 772 | /* Make sure it's not being used on another CPU: */ |
| 630 | synchronize_irq(irq); | 773 | synchronize_irq(irq); |
| 631 | 774 | ||
| 775 | if (irqthread) { | ||
| 776 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | ||
| 777 | kthread_stop(irqthread); | ||
| 778 | put_task_struct(irqthread); | ||
| 779 | } | ||
| 780 | |||
| 632 | #ifdef CONFIG_DEBUG_SHIRQ | 781 | #ifdef CONFIG_DEBUG_SHIRQ |
| 633 | /* | 782 | /* |
| 634 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | 783 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ |
| @@ -681,9 +830,12 @@ void free_irq(unsigned int irq, void *dev_id) | |||
| 681 | EXPORT_SYMBOL(free_irq); | 830 | EXPORT_SYMBOL(free_irq); |
| 682 | 831 | ||
| 683 | /** | 832 | /** |
| 684 | * request_irq - allocate an interrupt line | 833 | * request_threaded_irq - allocate an interrupt line |
| 685 | * @irq: Interrupt line to allocate | 834 | * @irq: Interrupt line to allocate |
| 686 | * @handler: Function to be called when the IRQ occurs | 835 | * @handler: Function to be called when the IRQ occurs. |
| 836 | * Primary handler for threaded interrupts | ||
| 837 | * @thread_fn: Function called from the irq handler thread | ||
| 838 | * If NULL, no irq thread is created | ||
| 687 | * @irqflags: Interrupt type flags | 839 | * @irqflags: Interrupt type flags |
| 688 | * @devname: An ascii name for the claiming device | 840 | * @devname: An ascii name for the claiming device |
| 689 | * @dev_id: A cookie passed back to the handler function | 841 | * @dev_id: A cookie passed back to the handler function |
| @@ -695,6 +847,15 @@ EXPORT_SYMBOL(free_irq); | |||
| 695 | * raises, you must take care both to initialise your hardware | 847 | * raises, you must take care both to initialise your hardware |
| 696 | * and to set up the interrupt handler in the right order. | 848 | * and to set up the interrupt handler in the right order. |
| 697 | * | 849 | * |
| 850 | * If you want to set up a threaded irq handler for your device | ||
| 851 | * then you need to supply @handler and @thread_fn. @handler ist | ||
| 852 | * still called in hard interrupt context and has to check | ||
| 853 | * whether the interrupt originates from the device. If yes it | ||
| 854 | * needs to disable the interrupt on the device and return | ||
| 855 | * IRQ_THREAD_WAKE which will wake up the handler thread and run | ||
| 856 | * @thread_fn. This split handler design is necessary to support | ||
| 857 | * shared interrupts. | ||
| 858 | * | ||
| 698 | * Dev_id must be globally unique. Normally the address of the | 859 | * Dev_id must be globally unique. Normally the address of the |
| 699 | * device data structure is used as the cookie. Since the handler | 860 | * device data structure is used as the cookie. Since the handler |
| 700 | * receives this value it makes sense to use it. | 861 | * receives this value it makes sense to use it. |
| @@ -710,8 +871,9 @@ EXPORT_SYMBOL(free_irq); | |||
| 710 | * IRQF_TRIGGER_* Specify active edge(s) or level | 871 | * IRQF_TRIGGER_* Specify active edge(s) or level |
| 711 | * | 872 | * |
| 712 | */ | 873 | */ |
| 713 | int request_irq(unsigned int irq, irq_handler_t handler, | 874 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
| 714 | unsigned long irqflags, const char *devname, void *dev_id) | 875 | irq_handler_t thread_fn, unsigned long irqflags, |
| 876 | const char *devname, void *dev_id) | ||
| 715 | { | 877 | { |
| 716 | struct irqaction *action; | 878 | struct irqaction *action; |
| 717 | struct irq_desc *desc; | 879 | struct irq_desc *desc; |
| @@ -759,6 +921,7 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 759 | return -ENOMEM; | 921 | return -ENOMEM; |
| 760 | 922 | ||
| 761 | action->handler = handler; | 923 | action->handler = handler; |
| 924 | action->thread_fn = thread_fn; | ||
| 762 | action->flags = irqflags; | 925 | action->flags = irqflags; |
| 763 | action->name = devname; | 926 | action->name = devname; |
| 764 | action->dev_id = dev_id; | 927 | action->dev_id = dev_id; |
| @@ -788,4 +951,4 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 788 | #endif | 951 | #endif |
| 789 | return retval; | 952 | return retval; |
| 790 | } | 953 | } |
| 791 | EXPORT_SYMBOL(request_irq); | 954 | EXPORT_SYMBOL(request_threaded_irq); |
