diff options
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 424 |
1 files changed, 315 insertions, 109 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 291f03664552..7e2e7dd4cd2f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -8,16 +8,15 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
11 | #include <linux/kthread.h> | ||
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
12 | #include <linux/random.h> | 13 | #include <linux/random.h> |
13 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/sched.h> | ||
15 | 17 | ||
16 | #include "internals.h" | 18 | #include "internals.h" |
17 | 19 | ||
18 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | ||
19 | cpumask_var_t irq_default_affinity; | ||
20 | |||
21 | /** | 20 | /** |
22 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
23 | * @irq: interrupt number to wait for | 22 | * @irq: interrupt number to wait for |
@@ -53,9 +52,18 @@ void synchronize_irq(unsigned int irq) | |||
53 | 52 | ||
54 | /* Oops, that failed? */ | 53 | /* Oops, that failed? */ |
55 | } while (status & IRQ_INPROGRESS); | 54 | } while (status & IRQ_INPROGRESS); |
55 | |||
56 | /* | ||
57 | * We made sure that no hardirq handler is running. Now verify | ||
58 | * that no threaded handlers are active. | ||
59 | */ | ||
60 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | ||
56 | } | 61 | } |
57 | EXPORT_SYMBOL(synchronize_irq); | 62 | EXPORT_SYMBOL(synchronize_irq); |
58 | 63 | ||
64 | #ifdef CONFIG_SMP | ||
65 | cpumask_var_t irq_default_affinity; | ||
66 | |||
59 | /** | 67 | /** |
60 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | 68 | * irq_can_set_affinity - Check if the affinity of a given irq can be set |
61 | * @irq: Interrupt to check | 69 | * @irq: Interrupt to check |
@@ -72,6 +80,18 @@ int irq_can_set_affinity(unsigned int irq) | |||
72 | return 1; | 80 | return 1; |
73 | } | 81 | } |
74 | 82 | ||
83 | static void | ||
84 | irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) | ||
85 | { | ||
86 | struct irqaction *action = desc->action; | ||
87 | |||
88 | while (action) { | ||
89 | if (action->thread) | ||
90 | set_cpus_allowed_ptr(action->thread, cpumask); | ||
91 | action = action->next; | ||
92 | } | ||
93 | } | ||
94 | |||
75 | /** | 95 | /** |
76 | * irq_set_affinity - Set the irq affinity of a given irq | 96 | * irq_set_affinity - Set the irq affinity of a given irq |
77 | * @irq: Interrupt to set affinity | 97 | * @irq: Interrupt to set affinity |
@@ -90,16 +110,17 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
90 | 110 | ||
91 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 111 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
92 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 112 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
93 | cpumask_copy(&desc->affinity, cpumask); | 113 | cpumask_copy(desc->affinity, cpumask); |
94 | desc->chip->set_affinity(irq, cpumask); | 114 | desc->chip->set_affinity(irq, cpumask); |
95 | } else { | 115 | } else { |
96 | desc->status |= IRQ_MOVE_PENDING; | 116 | desc->status |= IRQ_MOVE_PENDING; |
97 | cpumask_copy(&desc->pending_mask, cpumask); | 117 | cpumask_copy(desc->pending_mask, cpumask); |
98 | } | 118 | } |
99 | #else | 119 | #else |
100 | cpumask_copy(&desc->affinity, cpumask); | 120 | cpumask_copy(desc->affinity, cpumask); |
101 | desc->chip->set_affinity(irq, cpumask); | 121 | desc->chip->set_affinity(irq, cpumask); |
102 | #endif | 122 | #endif |
123 | irq_set_thread_affinity(desc, cpumask); | ||
103 | desc->status |= IRQ_AFFINITY_SET; | 124 | desc->status |= IRQ_AFFINITY_SET; |
104 | spin_unlock_irqrestore(&desc->lock, flags); | 125 | spin_unlock_irqrestore(&desc->lock, flags); |
105 | return 0; | 126 | return 0; |
@@ -109,7 +130,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
109 | /* | 130 | /* |
110 | * Generic version of the affinity autoselector. | 131 | * Generic version of the affinity autoselector. |
111 | */ | 132 | */ |
112 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | 133 | static int setup_affinity(unsigned int irq, struct irq_desc *desc) |
113 | { | 134 | { |
114 | if (!irq_can_set_affinity(irq)) | 135 | if (!irq_can_set_affinity(irq)) |
115 | return 0; | 136 | return 0; |
@@ -119,21 +140,21 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | |||
119 | * one of the targets is online. | 140 | * one of the targets is online. |
120 | */ | 141 | */ |
121 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 142 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { |
122 | if (cpumask_any_and(&desc->affinity, cpu_online_mask) | 143 | if (cpumask_any_and(desc->affinity, cpu_online_mask) |
123 | < nr_cpu_ids) | 144 | < nr_cpu_ids) |
124 | goto set_affinity; | 145 | goto set_affinity; |
125 | else | 146 | else |
126 | desc->status &= ~IRQ_AFFINITY_SET; | 147 | desc->status &= ~IRQ_AFFINITY_SET; |
127 | } | 148 | } |
128 | 149 | ||
129 | cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); | 150 | cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); |
130 | set_affinity: | 151 | set_affinity: |
131 | desc->chip->set_affinity(irq, &desc->affinity); | 152 | desc->chip->set_affinity(irq, desc->affinity); |
132 | 153 | ||
133 | return 0; | 154 | return 0; |
134 | } | 155 | } |
135 | #else | 156 | #else |
136 | static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) | 157 | static inline int setup_affinity(unsigned int irq, struct irq_desc *d) |
137 | { | 158 | { |
138 | return irq_select_affinity(irq); | 159 | return irq_select_affinity(irq); |
139 | } | 160 | } |
@@ -149,19 +170,35 @@ int irq_select_affinity_usr(unsigned int irq) | |||
149 | int ret; | 170 | int ret; |
150 | 171 | ||
151 | spin_lock_irqsave(&desc->lock, flags); | 172 | spin_lock_irqsave(&desc->lock, flags); |
152 | ret = do_irq_select_affinity(irq, desc); | 173 | ret = setup_affinity(irq, desc); |
174 | if (!ret) | ||
175 | irq_set_thread_affinity(desc, desc->affinity); | ||
153 | spin_unlock_irqrestore(&desc->lock, flags); | 176 | spin_unlock_irqrestore(&desc->lock, flags); |
154 | 177 | ||
155 | return ret; | 178 | return ret; |
156 | } | 179 | } |
157 | 180 | ||
158 | #else | 181 | #else |
159 | static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) | 182 | static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) |
160 | { | 183 | { |
161 | return 0; | 184 | return 0; |
162 | } | 185 | } |
163 | #endif | 186 | #endif |
164 | 187 | ||
188 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | ||
189 | { | ||
190 | if (suspend) { | ||
191 | if (!desc->action || (desc->action->flags & IRQF_TIMER)) | ||
192 | return; | ||
193 | desc->status |= IRQ_SUSPENDED; | ||
194 | } | ||
195 | |||
196 | if (!desc->depth++) { | ||
197 | desc->status |= IRQ_DISABLED; | ||
198 | desc->chip->disable(irq); | ||
199 | } | ||
200 | } | ||
201 | |||
165 | /** | 202 | /** |
166 | * disable_irq_nosync - disable an irq without waiting | 203 | * disable_irq_nosync - disable an irq without waiting |
167 | * @irq: Interrupt to disable | 204 | * @irq: Interrupt to disable |
@@ -182,10 +219,7 @@ void disable_irq_nosync(unsigned int irq) | |||
182 | return; | 219 | return; |
183 | 220 | ||
184 | spin_lock_irqsave(&desc->lock, flags); | 221 | spin_lock_irqsave(&desc->lock, flags); |
185 | if (!desc->depth++) { | 222 | __disable_irq(desc, irq, false); |
186 | desc->status |= IRQ_DISABLED; | ||
187 | desc->chip->disable(irq); | ||
188 | } | ||
189 | spin_unlock_irqrestore(&desc->lock, flags); | 223 | spin_unlock_irqrestore(&desc->lock, flags); |
190 | } | 224 | } |
191 | EXPORT_SYMBOL(disable_irq_nosync); | 225 | EXPORT_SYMBOL(disable_irq_nosync); |
@@ -215,15 +249,21 @@ void disable_irq(unsigned int irq) | |||
215 | } | 249 | } |
216 | EXPORT_SYMBOL(disable_irq); | 250 | EXPORT_SYMBOL(disable_irq); |
217 | 251 | ||
218 | static void __enable_irq(struct irq_desc *desc, unsigned int irq) | 252 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
219 | { | 253 | { |
254 | if (resume) | ||
255 | desc->status &= ~IRQ_SUSPENDED; | ||
256 | |||
220 | switch (desc->depth) { | 257 | switch (desc->depth) { |
221 | case 0: | 258 | case 0: |
259 | err_out: | ||
222 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 260 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
223 | break; | 261 | break; |
224 | case 1: { | 262 | case 1: { |
225 | unsigned int status = desc->status & ~IRQ_DISABLED; | 263 | unsigned int status = desc->status & ~IRQ_DISABLED; |
226 | 264 | ||
265 | if (desc->status & IRQ_SUSPENDED) | ||
266 | goto err_out; | ||
227 | /* Prevent probing on this irq: */ | 267 | /* Prevent probing on this irq: */ |
228 | desc->status = status | IRQ_NOPROBE; | 268 | desc->status = status | IRQ_NOPROBE; |
229 | check_irq_resend(desc, irq); | 269 | check_irq_resend(desc, irq); |
@@ -253,7 +293,7 @@ void enable_irq(unsigned int irq) | |||
253 | return; | 293 | return; |
254 | 294 | ||
255 | spin_lock_irqsave(&desc->lock, flags); | 295 | spin_lock_irqsave(&desc->lock, flags); |
256 | __enable_irq(desc, irq); | 296 | __enable_irq(desc, irq, false); |
257 | spin_unlock_irqrestore(&desc->lock, flags); | 297 | spin_unlock_irqrestore(&desc->lock, flags); |
258 | } | 298 | } |
259 | EXPORT_SYMBOL(enable_irq); | 299 | EXPORT_SYMBOL(enable_irq); |
@@ -384,14 +424,98 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
384 | return ret; | 424 | return ret; |
385 | } | 425 | } |
386 | 426 | ||
427 | static int irq_wait_for_interrupt(struct irqaction *action) | ||
428 | { | ||
429 | while (!kthread_should_stop()) { | ||
430 | set_current_state(TASK_INTERRUPTIBLE); | ||
431 | |||
432 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | ||
433 | &action->thread_flags)) { | ||
434 | __set_current_state(TASK_RUNNING); | ||
435 | return 0; | ||
436 | } | ||
437 | schedule(); | ||
438 | } | ||
439 | return -1; | ||
440 | } | ||
441 | |||
442 | /* | ||
443 | * Interrupt handler thread | ||
444 | */ | ||
445 | static int irq_thread(void *data) | ||
446 | { | ||
447 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | ||
448 | struct irqaction *action = data; | ||
449 | struct irq_desc *desc = irq_to_desc(action->irq); | ||
450 | int wake; | ||
451 | |||
452 | sched_setscheduler(current, SCHED_FIFO, ¶m); | ||
453 | current->irqaction = action; | ||
454 | |||
455 | while (!irq_wait_for_interrupt(action)) { | ||
456 | |||
457 | atomic_inc(&desc->threads_active); | ||
458 | |||
459 | spin_lock_irq(&desc->lock); | ||
460 | if (unlikely(desc->status & IRQ_DISABLED)) { | ||
461 | /* | ||
462 | * CHECKME: We might need a dedicated | ||
463 | * IRQ_THREAD_PENDING flag here, which | ||
464 | * retriggers the thread in check_irq_resend() | ||
465 | * but AFAICT IRQ_PENDING should be fine as it | ||
466 | * retriggers the interrupt itself --- tglx | ||
467 | */ | ||
468 | desc->status |= IRQ_PENDING; | ||
469 | spin_unlock_irq(&desc->lock); | ||
470 | } else { | ||
471 | spin_unlock_irq(&desc->lock); | ||
472 | |||
473 | action->thread_fn(action->irq, action->dev_id); | ||
474 | } | ||
475 | |||
476 | wake = atomic_dec_and_test(&desc->threads_active); | ||
477 | |||
478 | if (wake && waitqueue_active(&desc->wait_for_threads)) | ||
479 | wake_up(&desc->wait_for_threads); | ||
480 | } | ||
481 | |||
482 | /* | ||
483 | * Clear irqaction. Otherwise exit_irq_thread() would make | ||
484 | * fuzz about an active irq thread going into nirvana. | ||
485 | */ | ||
486 | current->irqaction = NULL; | ||
487 | return 0; | ||
488 | } | ||
489 | |||
490 | /* | ||
491 | * Called from do_exit() | ||
492 | */ | ||
493 | void exit_irq_thread(void) | ||
494 | { | ||
495 | struct task_struct *tsk = current; | ||
496 | |||
497 | if (!tsk->irqaction) | ||
498 | return; | ||
499 | |||
500 | printk(KERN_ERR | ||
501 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | ||
502 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | ||
503 | |||
504 | /* | ||
505 | * Set the THREAD DIED flag to prevent further wakeups of the | ||
506 | * soon to be gone threaded handler. | ||
507 | */ | ||
508 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | ||
509 | } | ||
510 | |||
387 | /* | 511 | /* |
388 | * Internal function to register an irqaction - typically used to | 512 | * Internal function to register an irqaction - typically used to |
389 | * allocate special interrupts that are part of the architecture. | 513 | * allocate special interrupts that are part of the architecture. |
390 | */ | 514 | */ |
391 | static int | 515 | static int |
392 | __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | 516 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) |
393 | { | 517 | { |
394 | struct irqaction *old, **p; | 518 | struct irqaction *old, **old_ptr; |
395 | const char *old_name = NULL; | 519 | const char *old_name = NULL; |
396 | unsigned long flags; | 520 | unsigned long flags; |
397 | int shared = 0; | 521 | int shared = 0; |
@@ -420,11 +544,31 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
420 | } | 544 | } |
421 | 545 | ||
422 | /* | 546 | /* |
547 | * Threaded handler ? | ||
548 | */ | ||
549 | if (new->thread_fn) { | ||
550 | struct task_struct *t; | ||
551 | |||
552 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | ||
553 | new->name); | ||
554 | if (IS_ERR(t)) | ||
555 | return PTR_ERR(t); | ||
556 | /* | ||
557 | * We keep the reference to the task struct even if | ||
558 | * the thread dies to avoid that the interrupt code | ||
559 | * references an already freed task_struct. | ||
560 | */ | ||
561 | get_task_struct(t); | ||
562 | new->thread = t; | ||
563 | wake_up_process(t); | ||
564 | } | ||
565 | |||
566 | /* | ||
423 | * The following block of code has to be executed atomically | 567 | * The following block of code has to be executed atomically |
424 | */ | 568 | */ |
425 | spin_lock_irqsave(&desc->lock, flags); | 569 | spin_lock_irqsave(&desc->lock, flags); |
426 | p = &desc->action; | 570 | old_ptr = &desc->action; |
427 | old = *p; | 571 | old = *old_ptr; |
428 | if (old) { | 572 | if (old) { |
429 | /* | 573 | /* |
430 | * Can't share interrupts unless both agree to and are | 574 | * Can't share interrupts unless both agree to and are |
@@ -447,8 +591,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
447 | 591 | ||
448 | /* add new interrupt at end of irq queue */ | 592 | /* add new interrupt at end of irq queue */ |
449 | do { | 593 | do { |
450 | p = &old->next; | 594 | old_ptr = &old->next; |
451 | old = *p; | 595 | old = *old_ptr; |
452 | } while (old); | 596 | } while (old); |
453 | shared = 1; | 597 | shared = 1; |
454 | } | 598 | } |
@@ -456,15 +600,15 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
456 | if (!shared) { | 600 | if (!shared) { |
457 | irq_chip_set_defaults(desc->chip); | 601 | irq_chip_set_defaults(desc->chip); |
458 | 602 | ||
603 | init_waitqueue_head(&desc->wait_for_threads); | ||
604 | |||
459 | /* Setup the type (level, edge polarity) if configured: */ | 605 | /* Setup the type (level, edge polarity) if configured: */ |
460 | if (new->flags & IRQF_TRIGGER_MASK) { | 606 | if (new->flags & IRQF_TRIGGER_MASK) { |
461 | ret = __irq_set_trigger(desc, irq, | 607 | ret = __irq_set_trigger(desc, irq, |
462 | new->flags & IRQF_TRIGGER_MASK); | 608 | new->flags & IRQF_TRIGGER_MASK); |
463 | 609 | ||
464 | if (ret) { | 610 | if (ret) |
465 | spin_unlock_irqrestore(&desc->lock, flags); | 611 | goto out_thread; |
466 | return ret; | ||
467 | } | ||
468 | } else | 612 | } else |
469 | compat_irq_chip_set_default_handler(desc); | 613 | compat_irq_chip_set_default_handler(desc); |
470 | #if defined(CONFIG_IRQ_PER_CPU) | 614 | #if defined(CONFIG_IRQ_PER_CPU) |
@@ -488,7 +632,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
488 | desc->status |= IRQ_NO_BALANCING; | 632 | desc->status |= IRQ_NO_BALANCING; |
489 | 633 | ||
490 | /* Set default affinity mask once everything is setup */ | 634 | /* Set default affinity mask once everything is setup */ |
491 | do_irq_select_affinity(irq, desc); | 635 | setup_affinity(irq, desc); |
492 | 636 | ||
493 | } else if ((new->flags & IRQF_TRIGGER_MASK) | 637 | } else if ((new->flags & IRQF_TRIGGER_MASK) |
494 | && (new->flags & IRQF_TRIGGER_MASK) | 638 | && (new->flags & IRQF_TRIGGER_MASK) |
@@ -499,7 +643,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
499 | (int)(new->flags & IRQF_TRIGGER_MASK)); | 643 | (int)(new->flags & IRQF_TRIGGER_MASK)); |
500 | } | 644 | } |
501 | 645 | ||
502 | *p = new; | 646 | *old_ptr = new; |
503 | 647 | ||
504 | /* Reset broken irq detection when installing new handler */ | 648 | /* Reset broken irq detection when installing new handler */ |
505 | desc->irq_count = 0; | 649 | desc->irq_count = 0; |
@@ -511,7 +655,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
511 | */ | 655 | */ |
512 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | 656 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { |
513 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | 657 | desc->status &= ~IRQ_SPURIOUS_DISABLED; |
514 | __enable_irq(desc, irq); | 658 | __enable_irq(desc, irq, false); |
515 | } | 659 | } |
516 | 660 | ||
517 | spin_unlock_irqrestore(&desc->lock, flags); | 661 | spin_unlock_irqrestore(&desc->lock, flags); |
@@ -532,8 +676,19 @@ mismatch: | |||
532 | dump_stack(); | 676 | dump_stack(); |
533 | } | 677 | } |
534 | #endif | 678 | #endif |
679 | ret = -EBUSY; | ||
680 | |||
681 | out_thread: | ||
535 | spin_unlock_irqrestore(&desc->lock, flags); | 682 | spin_unlock_irqrestore(&desc->lock, flags); |
536 | return -EBUSY; | 683 | if (new->thread) { |
684 | struct task_struct *t = new->thread; | ||
685 | |||
686 | new->thread = NULL; | ||
687 | if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | ||
688 | kthread_stop(t); | ||
689 | put_task_struct(t); | ||
690 | } | ||
691 | return ret; | ||
537 | } | 692 | } |
538 | 693 | ||
539 | /** | 694 | /** |
@@ -549,97 +704,138 @@ int setup_irq(unsigned int irq, struct irqaction *act) | |||
549 | 704 | ||
550 | return __setup_irq(irq, desc, act); | 705 | return __setup_irq(irq, desc, act); |
551 | } | 706 | } |
707 | EXPORT_SYMBOL_GPL(setup_irq); | ||
552 | 708 | ||
553 | /** | 709 | /* |
554 | * free_irq - free an interrupt | 710 | * Internal function to unregister an irqaction - used to free |
555 | * @irq: Interrupt line to free | 711 | * regular and special interrupts that are part of the architecture. |
556 | * @dev_id: Device identity to free | ||
557 | * | ||
558 | * Remove an interrupt handler. The handler is removed and if the | ||
559 | * interrupt line is no longer in use by any driver it is disabled. | ||
560 | * On a shared IRQ the caller must ensure the interrupt is disabled | ||
561 | * on the card it drives before calling this function. The function | ||
562 | * does not return until any executing interrupts for this IRQ | ||
563 | * have completed. | ||
564 | * | ||
565 | * This function must not be called from interrupt context. | ||
566 | */ | 712 | */ |
567 | void free_irq(unsigned int irq, void *dev_id) | 713 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) |
568 | { | 714 | { |
569 | struct irq_desc *desc = irq_to_desc(irq); | 715 | struct irq_desc *desc = irq_to_desc(irq); |
570 | struct irqaction **p; | 716 | struct irqaction *action, **action_ptr; |
717 | struct task_struct *irqthread; | ||
571 | unsigned long flags; | 718 | unsigned long flags; |
572 | 719 | ||
573 | WARN_ON(in_interrupt()); | 720 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
574 | 721 | ||
575 | if (!desc) | 722 | if (!desc) |
576 | return; | 723 | return NULL; |
577 | 724 | ||
578 | spin_lock_irqsave(&desc->lock, flags); | 725 | spin_lock_irqsave(&desc->lock, flags); |
579 | p = &desc->action; | 726 | |
727 | /* | ||
728 | * There can be multiple actions per IRQ descriptor, find the right | ||
729 | * one based on the dev_id: | ||
730 | */ | ||
731 | action_ptr = &desc->action; | ||
580 | for (;;) { | 732 | for (;;) { |
581 | struct irqaction *action = *p; | 733 | action = *action_ptr; |
734 | |||
735 | if (!action) { | ||
736 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | ||
737 | spin_unlock_irqrestore(&desc->lock, flags); | ||
582 | 738 | ||
583 | if (action) { | 739 | return NULL; |
584 | struct irqaction **pp = p; | 740 | } |
585 | 741 | ||
586 | p = &action->next; | 742 | if (action->dev_id == dev_id) |
587 | if (action->dev_id != dev_id) | 743 | break; |
588 | continue; | 744 | action_ptr = &action->next; |
745 | } | ||
589 | 746 | ||
590 | /* Found it - now remove it from the list of entries */ | 747 | /* Found it - now remove it from the list of entries: */ |
591 | *pp = action->next; | 748 | *action_ptr = action->next; |
592 | 749 | ||
593 | /* Currently used only by UML, might disappear one day.*/ | 750 | /* Currently used only by UML, might disappear one day: */ |
594 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 751 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
595 | if (desc->chip->release) | 752 | if (desc->chip->release) |
596 | desc->chip->release(irq, dev_id); | 753 | desc->chip->release(irq, dev_id); |
597 | #endif | 754 | #endif |
598 | 755 | ||
599 | if (!desc->action) { | 756 | /* If this was the last handler, shut down the IRQ line: */ |
600 | desc->status |= IRQ_DISABLED; | 757 | if (!desc->action) { |
601 | if (desc->chip->shutdown) | 758 | desc->status |= IRQ_DISABLED; |
602 | desc->chip->shutdown(irq); | 759 | if (desc->chip->shutdown) |
603 | else | 760 | desc->chip->shutdown(irq); |
604 | desc->chip->disable(irq); | 761 | else |
605 | } | 762 | desc->chip->disable(irq); |
606 | spin_unlock_irqrestore(&desc->lock, flags); | 763 | } |
607 | unregister_handler_proc(irq, action); | 764 | |
765 | irqthread = action->thread; | ||
766 | action->thread = NULL; | ||
767 | |||
768 | spin_unlock_irqrestore(&desc->lock, flags); | ||
769 | |||
770 | unregister_handler_proc(irq, action); | ||
771 | |||
772 | /* Make sure it's not being used on another CPU: */ | ||
773 | synchronize_irq(irq); | ||
774 | |||
775 | if (irqthread) { | ||
776 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | ||
777 | kthread_stop(irqthread); | ||
778 | put_task_struct(irqthread); | ||
779 | } | ||
608 | 780 | ||
609 | /* Make sure it's not being used on another CPU */ | ||
610 | synchronize_irq(irq); | ||
611 | #ifdef CONFIG_DEBUG_SHIRQ | ||
612 | /* | ||
613 | * It's a shared IRQ -- the driver ought to be | ||
614 | * prepared for it to happen even now it's | ||
615 | * being freed, so let's make sure.... We do | ||
616 | * this after actually deregistering it, to | ||
617 | * make sure that a 'real' IRQ doesn't run in | ||
618 | * parallel with our fake | ||
619 | */ | ||
620 | if (action->flags & IRQF_SHARED) { | ||
621 | local_irq_save(flags); | ||
622 | action->handler(irq, dev_id); | ||
623 | local_irq_restore(flags); | ||
624 | } | ||
625 | #endif | ||
626 | kfree(action); | ||
627 | return; | ||
628 | } | ||
629 | printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq); | ||
630 | #ifdef CONFIG_DEBUG_SHIRQ | 781 | #ifdef CONFIG_DEBUG_SHIRQ |
631 | dump_stack(); | 782 | /* |
632 | #endif | 783 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ |
633 | spin_unlock_irqrestore(&desc->lock, flags); | 784 | * event to happen even now it's being freed, so let's make sure that |
634 | return; | 785 | * is so by doing an extra call to the handler .... |
786 | * | ||
787 | * ( We do this after actually deregistering it, to make sure that a | ||
788 | * 'real' IRQ doesn't run in * parallel with our fake. ) | ||
789 | */ | ||
790 | if (action->flags & IRQF_SHARED) { | ||
791 | local_irq_save(flags); | ||
792 | action->handler(irq, dev_id); | ||
793 | local_irq_restore(flags); | ||
635 | } | 794 | } |
795 | #endif | ||
796 | return action; | ||
797 | } | ||
798 | |||
799 | /** | ||
800 | * remove_irq - free an interrupt | ||
801 | * @irq: Interrupt line to free | ||
802 | * @act: irqaction for the interrupt | ||
803 | * | ||
804 | * Used to remove interrupts statically setup by the early boot process. | ||
805 | */ | ||
806 | void remove_irq(unsigned int irq, struct irqaction *act) | ||
807 | { | ||
808 | __free_irq(irq, act->dev_id); | ||
809 | } | ||
810 | EXPORT_SYMBOL_GPL(remove_irq); | ||
811 | |||
812 | /** | ||
813 | * free_irq - free an interrupt allocated with request_irq | ||
814 | * @irq: Interrupt line to free | ||
815 | * @dev_id: Device identity to free | ||
816 | * | ||
817 | * Remove an interrupt handler. The handler is removed and if the | ||
818 | * interrupt line is no longer in use by any driver it is disabled. | ||
819 | * On a shared IRQ the caller must ensure the interrupt is disabled | ||
820 | * on the card it drives before calling this function. The function | ||
821 | * does not return until any executing interrupts for this IRQ | ||
822 | * have completed. | ||
823 | * | ||
824 | * This function must not be called from interrupt context. | ||
825 | */ | ||
826 | void free_irq(unsigned int irq, void *dev_id) | ||
827 | { | ||
828 | kfree(__free_irq(irq, dev_id)); | ||
636 | } | 829 | } |
637 | EXPORT_SYMBOL(free_irq); | 830 | EXPORT_SYMBOL(free_irq); |
638 | 831 | ||
639 | /** | 832 | /** |
640 | * request_irq - allocate an interrupt line | 833 | * request_threaded_irq - allocate an interrupt line |
641 | * @irq: Interrupt line to allocate | 834 | * @irq: Interrupt line to allocate |
642 | * @handler: Function to be called when the IRQ occurs | 835 | * @handler: Function to be called when the IRQ occurs. |
836 | * Primary handler for threaded interrupts | ||
837 | * @thread_fn: Function called from the irq handler thread | ||
838 | * If NULL, no irq thread is created | ||
643 | * @irqflags: Interrupt type flags | 839 | * @irqflags: Interrupt type flags |
644 | * @devname: An ascii name for the claiming device | 840 | * @devname: An ascii name for the claiming device |
645 | * @dev_id: A cookie passed back to the handler function | 841 | * @dev_id: A cookie passed back to the handler function |
@@ -651,6 +847,15 @@ EXPORT_SYMBOL(free_irq); | |||
651 | * raises, you must take care both to initialise your hardware | 847 | * raises, you must take care both to initialise your hardware |
652 | * and to set up the interrupt handler in the right order. | 848 | * and to set up the interrupt handler in the right order. |
653 | * | 849 | * |
850 | * If you want to set up a threaded irq handler for your device | ||
851 | * then you need to supply @handler and @thread_fn. @handler ist | ||
852 | * still called in hard interrupt context and has to check | ||
853 | * whether the interrupt originates from the device. If yes it | ||
854 | * needs to disable the interrupt on the device and return | ||
855 | * IRQ_THREAD_WAKE which will wake up the handler thread and run | ||
856 | * @thread_fn. This split handler design is necessary to support | ||
857 | * shared interrupts. | ||
858 | * | ||
654 | * Dev_id must be globally unique. Normally the address of the | 859 | * Dev_id must be globally unique. Normally the address of the |
655 | * device data structure is used as the cookie. Since the handler | 860 | * device data structure is used as the cookie. Since the handler |
656 | * receives this value it makes sense to use it. | 861 | * receives this value it makes sense to use it. |
@@ -666,8 +871,9 @@ EXPORT_SYMBOL(free_irq); | |||
666 | * IRQF_TRIGGER_* Specify active edge(s) or level | 871 | * IRQF_TRIGGER_* Specify active edge(s) or level |
667 | * | 872 | * |
668 | */ | 873 | */ |
669 | int request_irq(unsigned int irq, irq_handler_t handler, | 874 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
670 | unsigned long irqflags, const char *devname, void *dev_id) | 875 | irq_handler_t thread_fn, unsigned long irqflags, |
876 | const char *devname, void *dev_id) | ||
671 | { | 877 | { |
672 | struct irqaction *action; | 878 | struct irqaction *action; |
673 | struct irq_desc *desc; | 879 | struct irq_desc *desc; |
@@ -679,11 +885,12 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
679 | * the behavior is classified as "will not fix" so we need to | 885 | * the behavior is classified as "will not fix" so we need to |
680 | * start nudging drivers away from using that idiom. | 886 | * start nudging drivers away from using that idiom. |
681 | */ | 887 | */ |
682 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) | 888 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == |
683 | == (IRQF_SHARED|IRQF_DISABLED)) | 889 | (IRQF_SHARED|IRQF_DISABLED)) { |
684 | pr_warning("IRQ %d/%s: IRQF_DISABLED is not " | 890 | pr_warning( |
685 | "guaranteed on shared IRQs\n", | 891 | "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", |
686 | irq, devname); | 892 | irq, devname); |
893 | } | ||
687 | 894 | ||
688 | #ifdef CONFIG_LOCKDEP | 895 | #ifdef CONFIG_LOCKDEP |
689 | /* | 896 | /* |
@@ -709,15 +916,14 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
709 | if (!handler) | 916 | if (!handler) |
710 | return -EINVAL; | 917 | return -EINVAL; |
711 | 918 | ||
712 | action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); | 919 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
713 | if (!action) | 920 | if (!action) |
714 | return -ENOMEM; | 921 | return -ENOMEM; |
715 | 922 | ||
716 | action->handler = handler; | 923 | action->handler = handler; |
924 | action->thread_fn = thread_fn; | ||
717 | action->flags = irqflags; | 925 | action->flags = irqflags; |
718 | cpus_clear(action->mask); | ||
719 | action->name = devname; | 926 | action->name = devname; |
720 | action->next = NULL; | ||
721 | action->dev_id = dev_id; | 927 | action->dev_id = dev_id; |
722 | 928 | ||
723 | retval = __setup_irq(irq, desc, action); | 929 | retval = __setup_irq(irq, desc, action); |
@@ -745,4 +951,4 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
745 | #endif | 951 | #endif |
746 | return retval; | 952 | return retval; |
747 | } | 953 | } |
748 | EXPORT_SYMBOL(request_irq); | 954 | EXPORT_SYMBOL(request_threaded_irq); |