diff options
Diffstat (limited to 'kernel/irq/manage.c')
| -rw-r--r-- | kernel/irq/manage.c | 482 | 
1 files changed, 369 insertions, 113 deletions
| diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 291f03664552..0ec9ed831737 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -8,16 +8,15 @@ | |||
| 8 | */ | 8 | */ | 
| 9 | 9 | ||
| 10 | #include <linux/irq.h> | 10 | #include <linux/irq.h> | 
| 11 | #include <linux/kthread.h> | ||
| 11 | #include <linux/module.h> | 12 | #include <linux/module.h> | 
| 12 | #include <linux/random.h> | 13 | #include <linux/random.h> | 
| 13 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> | 
| 14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> | 
| 16 | #include <linux/sched.h> | ||
| 15 | 17 | ||
| 16 | #include "internals.h" | 18 | #include "internals.h" | 
| 17 | 19 | ||
| 18 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | ||
| 19 | cpumask_var_t irq_default_affinity; | ||
| 20 | |||
| 21 | /** | 20 | /** | 
| 22 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 
| 23 | * @irq: interrupt number to wait for | 22 | * @irq: interrupt number to wait for | 
| @@ -53,9 +52,18 @@ void synchronize_irq(unsigned int irq) | |||
| 53 | 52 | ||
| 54 | /* Oops, that failed? */ | 53 | /* Oops, that failed? */ | 
| 55 | } while (status & IRQ_INPROGRESS); | 54 | } while (status & IRQ_INPROGRESS); | 
| 55 | |||
| 56 | /* | ||
| 57 | * We made sure that no hardirq handler is running. Now verify | ||
| 58 | * that no threaded handlers are active. | ||
| 59 | */ | ||
| 60 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | ||
| 56 | } | 61 | } | 
| 57 | EXPORT_SYMBOL(synchronize_irq); | 62 | EXPORT_SYMBOL(synchronize_irq); | 
| 58 | 63 | ||
| 64 | #ifdef CONFIG_SMP | ||
| 65 | cpumask_var_t irq_default_affinity; | ||
| 66 | |||
| 59 | /** | 67 | /** | 
| 60 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | 68 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | 
| 61 | * @irq: Interrupt to check | 69 | * @irq: Interrupt to check | 
| @@ -73,6 +81,26 @@ int irq_can_set_affinity(unsigned int irq) | |||
| 73 | } | 81 | } | 
| 74 | 82 | ||
| 75 | /** | 83 | /** | 
| 84 | * irq_set_thread_affinity - Notify irq threads to adjust affinity | ||
| 85 | * @desc: irq descriptor which has affitnity changed | ||
| 86 | * | ||
| 87 | * We just set IRQTF_AFFINITY and delegate the affinity setting | ||
| 88 | * to the interrupt thread itself. We can not call | ||
| 89 | * set_cpus_allowed_ptr() here as we hold desc->lock and this | ||
| 90 | * code can be called from hard interrupt context. | ||
| 91 | */ | ||
| 92 | void irq_set_thread_affinity(struct irq_desc *desc) | ||
| 93 | { | ||
| 94 | struct irqaction *action = desc->action; | ||
| 95 | |||
| 96 | while (action) { | ||
| 97 | if (action->thread) | ||
| 98 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | ||
| 99 | action = action->next; | ||
| 100 | } | ||
| 101 | } | ||
| 102 | |||
| 103 | /** | ||
| 76 | * irq_set_affinity - Set the irq affinity of a given irq | 104 | * irq_set_affinity - Set the irq affinity of a given irq | 
| 77 | * @irq: Interrupt to set affinity | 105 | * @irq: Interrupt to set affinity | 
| 78 | * @cpumask: cpumask | 106 | * @cpumask: cpumask | 
| @@ -89,16 +117,21 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 89 | spin_lock_irqsave(&desc->lock, flags); | 117 | spin_lock_irqsave(&desc->lock, flags); | 
| 90 | 118 | ||
| 91 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 119 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
| 92 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 120 | if (desc->status & IRQ_MOVE_PCNTXT) { | 
| 93 | cpumask_copy(&desc->affinity, cpumask); | 121 | if (!desc->chip->set_affinity(irq, cpumask)) { | 
| 94 | desc->chip->set_affinity(irq, cpumask); | 122 | cpumask_copy(desc->affinity, cpumask); | 
| 95 | } else { | 123 | irq_set_thread_affinity(desc); | 
| 124 | } | ||
| 125 | } | ||
| 126 | else { | ||
| 96 | desc->status |= IRQ_MOVE_PENDING; | 127 | desc->status |= IRQ_MOVE_PENDING; | 
| 97 | cpumask_copy(&desc->pending_mask, cpumask); | 128 | cpumask_copy(desc->pending_mask, cpumask); | 
| 98 | } | 129 | } | 
| 99 | #else | 130 | #else | 
| 100 | cpumask_copy(&desc->affinity, cpumask); | 131 | if (!desc->chip->set_affinity(irq, cpumask)) { | 
| 101 | desc->chip->set_affinity(irq, cpumask); | 132 | cpumask_copy(desc->affinity, cpumask); | 
| 133 | irq_set_thread_affinity(desc); | ||
| 134 | } | ||
| 102 | #endif | 135 | #endif | 
| 103 | desc->status |= IRQ_AFFINITY_SET; | 136 | desc->status |= IRQ_AFFINITY_SET; | 
| 104 | spin_unlock_irqrestore(&desc->lock, flags); | 137 | spin_unlock_irqrestore(&desc->lock, flags); | 
| @@ -109,7 +142,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 109 | /* | 142 | /* | 
| 110 | * Generic version of the affinity autoselector. | 143 | * Generic version of the affinity autoselector. | 
| 111 | */ | 144 | */ | 
| 112 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | 145 | static int setup_affinity(unsigned int irq, struct irq_desc *desc) | 
| 113 | { | 146 | { | 
| 114 | if (!irq_can_set_affinity(irq)) | 147 | if (!irq_can_set_affinity(irq)) | 
| 115 | return 0; | 148 | return 0; | 
| @@ -119,21 +152,21 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | |||
| 119 | * one of the targets is online. | 152 | * one of the targets is online. | 
| 120 | */ | 153 | */ | 
| 121 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 154 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 
| 122 | if (cpumask_any_and(&desc->affinity, cpu_online_mask) | 155 | if (cpumask_any_and(desc->affinity, cpu_online_mask) | 
| 123 | < nr_cpu_ids) | 156 | < nr_cpu_ids) | 
| 124 | goto set_affinity; | 157 | goto set_affinity; | 
| 125 | else | 158 | else | 
| 126 | desc->status &= ~IRQ_AFFINITY_SET; | 159 | desc->status &= ~IRQ_AFFINITY_SET; | 
| 127 | } | 160 | } | 
| 128 | 161 | ||
| 129 | cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); | 162 | cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); | 
| 130 | set_affinity: | 163 | set_affinity: | 
| 131 | desc->chip->set_affinity(irq, &desc->affinity); | 164 | desc->chip->set_affinity(irq, desc->affinity); | 
| 132 | 165 | ||
| 133 | return 0; | 166 | return 0; | 
| 134 | } | 167 | } | 
| 135 | #else | 168 | #else | 
| 136 | static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) | 169 | static inline int setup_affinity(unsigned int irq, struct irq_desc *d) | 
| 137 | { | 170 | { | 
| 138 | return irq_select_affinity(irq); | 171 | return irq_select_affinity(irq); | 
| 139 | } | 172 | } | 
| @@ -149,19 +182,35 @@ int irq_select_affinity_usr(unsigned int irq) | |||
| 149 | int ret; | 182 | int ret; | 
| 150 | 183 | ||
| 151 | spin_lock_irqsave(&desc->lock, flags); | 184 | spin_lock_irqsave(&desc->lock, flags); | 
| 152 | ret = do_irq_select_affinity(irq, desc); | 185 | ret = setup_affinity(irq, desc); | 
| 186 | if (!ret) | ||
| 187 | irq_set_thread_affinity(desc); | ||
| 153 | spin_unlock_irqrestore(&desc->lock, flags); | 188 | spin_unlock_irqrestore(&desc->lock, flags); | 
| 154 | 189 | ||
| 155 | return ret; | 190 | return ret; | 
| 156 | } | 191 | } | 
| 157 | 192 | ||
| 158 | #else | 193 | #else | 
| 159 | static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) | 194 | static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) | 
| 160 | { | 195 | { | 
| 161 | return 0; | 196 | return 0; | 
| 162 | } | 197 | } | 
| 163 | #endif | 198 | #endif | 
| 164 | 199 | ||
| 200 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | ||
| 201 | { | ||
| 202 | if (suspend) { | ||
| 203 | if (!desc->action || (desc->action->flags & IRQF_TIMER)) | ||
| 204 | return; | ||
| 205 | desc->status |= IRQ_SUSPENDED; | ||
| 206 | } | ||
| 207 | |||
| 208 | if (!desc->depth++) { | ||
| 209 | desc->status |= IRQ_DISABLED; | ||
| 210 | desc->chip->disable(irq); | ||
| 211 | } | ||
| 212 | } | ||
| 213 | |||
| 165 | /** | 214 | /** | 
| 166 | * disable_irq_nosync - disable an irq without waiting | 215 | * disable_irq_nosync - disable an irq without waiting | 
| 167 | * @irq: Interrupt to disable | 216 | * @irq: Interrupt to disable | 
| @@ -182,10 +231,7 @@ void disable_irq_nosync(unsigned int irq) | |||
| 182 | return; | 231 | return; | 
| 183 | 232 | ||
| 184 | spin_lock_irqsave(&desc->lock, flags); | 233 | spin_lock_irqsave(&desc->lock, flags); | 
| 185 | if (!desc->depth++) { | 234 | __disable_irq(desc, irq, false); | 
| 186 | desc->status |= IRQ_DISABLED; | ||
| 187 | desc->chip->disable(irq); | ||
| 188 | } | ||
| 189 | spin_unlock_irqrestore(&desc->lock, flags); | 235 | spin_unlock_irqrestore(&desc->lock, flags); | 
| 190 | } | 236 | } | 
| 191 | EXPORT_SYMBOL(disable_irq_nosync); | 237 | EXPORT_SYMBOL(disable_irq_nosync); | 
| @@ -215,15 +261,21 @@ void disable_irq(unsigned int irq) | |||
| 215 | } | 261 | } | 
| 216 | EXPORT_SYMBOL(disable_irq); | 262 | EXPORT_SYMBOL(disable_irq); | 
| 217 | 263 | ||
| 218 | static void __enable_irq(struct irq_desc *desc, unsigned int irq) | 264 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | 
| 219 | { | 265 | { | 
| 266 | if (resume) | ||
| 267 | desc->status &= ~IRQ_SUSPENDED; | ||
| 268 | |||
| 220 | switch (desc->depth) { | 269 | switch (desc->depth) { | 
| 221 | case 0: | 270 | case 0: | 
| 271 | err_out: | ||
| 222 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 272 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 
| 223 | break; | 273 | break; | 
| 224 | case 1: { | 274 | case 1: { | 
| 225 | unsigned int status = desc->status & ~IRQ_DISABLED; | 275 | unsigned int status = desc->status & ~IRQ_DISABLED; | 
| 226 | 276 | ||
| 277 | if (desc->status & IRQ_SUSPENDED) | ||
| 278 | goto err_out; | ||
| 227 | /* Prevent probing on this irq: */ | 279 | /* Prevent probing on this irq: */ | 
| 228 | desc->status = status | IRQ_NOPROBE; | 280 | desc->status = status | IRQ_NOPROBE; | 
| 229 | check_irq_resend(desc, irq); | 281 | check_irq_resend(desc, irq); | 
| @@ -253,7 +305,7 @@ void enable_irq(unsigned int irq) | |||
| 253 | return; | 305 | return; | 
| 254 | 306 | ||
| 255 | spin_lock_irqsave(&desc->lock, flags); | 307 | spin_lock_irqsave(&desc->lock, flags); | 
| 256 | __enable_irq(desc, irq); | 308 | __enable_irq(desc, irq, false); | 
| 257 | spin_unlock_irqrestore(&desc->lock, flags); | 309 | spin_unlock_irqrestore(&desc->lock, flags); | 
| 258 | } | 310 | } | 
| 259 | EXPORT_SYMBOL(enable_irq); | 311 | EXPORT_SYMBOL(enable_irq); | 
| @@ -384,14 +436,133 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 384 | return ret; | 436 | return ret; | 
| 385 | } | 437 | } | 
| 386 | 438 | ||
| 439 | static int irq_wait_for_interrupt(struct irqaction *action) | ||
| 440 | { | ||
| 441 | while (!kthread_should_stop()) { | ||
| 442 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 443 | |||
| 444 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | ||
| 445 | &action->thread_flags)) { | ||
| 446 | __set_current_state(TASK_RUNNING); | ||
| 447 | return 0; | ||
| 448 | } | ||
| 449 | schedule(); | ||
| 450 | } | ||
| 451 | return -1; | ||
| 452 | } | ||
| 453 | |||
| 454 | #ifdef CONFIG_SMP | ||
| 455 | /* | ||
| 456 | * Check whether we need to change the affinity of the interrupt thread. | ||
| 457 | */ | ||
| 458 | static void | ||
| 459 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | ||
| 460 | { | ||
| 461 | cpumask_var_t mask; | ||
| 462 | |||
| 463 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | ||
| 464 | return; | ||
| 465 | |||
| 466 | /* | ||
| 467 | * In case we are out of memory we set IRQTF_AFFINITY again and | ||
| 468 | * try again next time | ||
| 469 | */ | ||
| 470 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | ||
| 471 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | ||
| 472 | return; | ||
| 473 | } | ||
| 474 | |||
| 475 | spin_lock_irq(&desc->lock); | ||
| 476 | cpumask_copy(mask, desc->affinity); | ||
| 477 | spin_unlock_irq(&desc->lock); | ||
| 478 | |||
| 479 | set_cpus_allowed_ptr(current, mask); | ||
| 480 | free_cpumask_var(mask); | ||
| 481 | } | ||
| 482 | #else | ||
| 483 | static inline void | ||
| 484 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | ||
| 485 | #endif | ||
| 486 | |||
| 487 | /* | ||
| 488 | * Interrupt handler thread | ||
| 489 | */ | ||
| 490 | static int irq_thread(void *data) | ||
| 491 | { | ||
| 492 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | ||
| 493 | struct irqaction *action = data; | ||
| 494 | struct irq_desc *desc = irq_to_desc(action->irq); | ||
| 495 | int wake; | ||
| 496 | |||
| 497 | sched_setscheduler(current, SCHED_FIFO, ¶m); | ||
| 498 | current->irqaction = action; | ||
| 499 | |||
| 500 | while (!irq_wait_for_interrupt(action)) { | ||
| 501 | |||
| 502 | irq_thread_check_affinity(desc, action); | ||
| 503 | |||
| 504 | atomic_inc(&desc->threads_active); | ||
| 505 | |||
| 506 | spin_lock_irq(&desc->lock); | ||
| 507 | if (unlikely(desc->status & IRQ_DISABLED)) { | ||
| 508 | /* | ||
| 509 | * CHECKME: We might need a dedicated | ||
| 510 | * IRQ_THREAD_PENDING flag here, which | ||
| 511 | * retriggers the thread in check_irq_resend() | ||
| 512 | * but AFAICT IRQ_PENDING should be fine as it | ||
| 513 | * retriggers the interrupt itself --- tglx | ||
| 514 | */ | ||
| 515 | desc->status |= IRQ_PENDING; | ||
| 516 | spin_unlock_irq(&desc->lock); | ||
| 517 | } else { | ||
| 518 | spin_unlock_irq(&desc->lock); | ||
| 519 | |||
| 520 | action->thread_fn(action->irq, action->dev_id); | ||
| 521 | } | ||
| 522 | |||
| 523 | wake = atomic_dec_and_test(&desc->threads_active); | ||
| 524 | |||
| 525 | if (wake && waitqueue_active(&desc->wait_for_threads)) | ||
| 526 | wake_up(&desc->wait_for_threads); | ||
| 527 | } | ||
| 528 | |||
| 529 | /* | ||
| 530 | * Clear irqaction. Otherwise exit_irq_thread() would make | ||
| 531 | * fuzz about an active irq thread going into nirvana. | ||
| 532 | */ | ||
| 533 | current->irqaction = NULL; | ||
| 534 | return 0; | ||
| 535 | } | ||
| 536 | |||
| 537 | /* | ||
| 538 | * Called from do_exit() | ||
| 539 | */ | ||
| 540 | void exit_irq_thread(void) | ||
| 541 | { | ||
| 542 | struct task_struct *tsk = current; | ||
| 543 | |||
| 544 | if (!tsk->irqaction) | ||
| 545 | return; | ||
| 546 | |||
| 547 | printk(KERN_ERR | ||
| 548 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | ||
| 549 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | ||
| 550 | |||
| 551 | /* | ||
| 552 | * Set the THREAD DIED flag to prevent further wakeups of the | ||
| 553 | * soon to be gone threaded handler. | ||
| 554 | */ | ||
| 555 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | ||
| 556 | } | ||
| 557 | |||
| 387 | /* | 558 | /* | 
| 388 | * Internal function to register an irqaction - typically used to | 559 | * Internal function to register an irqaction - typically used to | 
| 389 | * allocate special interrupts that are part of the architecture. | 560 | * allocate special interrupts that are part of the architecture. | 
| 390 | */ | 561 | */ | 
| 391 | static int | 562 | static int | 
| 392 | __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | 563 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | 
| 393 | { | 564 | { | 
| 394 | struct irqaction *old, **p; | 565 | struct irqaction *old, **old_ptr; | 
| 395 | const char *old_name = NULL; | 566 | const char *old_name = NULL; | 
| 396 | unsigned long flags; | 567 | unsigned long flags; | 
| 397 | int shared = 0; | 568 | int shared = 0; | 
| @@ -420,11 +591,30 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 420 | } | 591 | } | 
| 421 | 592 | ||
| 422 | /* | 593 | /* | 
| 594 | * Threaded handler ? | ||
| 595 | */ | ||
| 596 | if (new->thread_fn) { | ||
| 597 | struct task_struct *t; | ||
| 598 | |||
| 599 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | ||
| 600 | new->name); | ||
| 601 | if (IS_ERR(t)) | ||
| 602 | return PTR_ERR(t); | ||
| 603 | /* | ||
| 604 | * We keep the reference to the task struct even if | ||
| 605 | * the thread dies to avoid that the interrupt code | ||
| 606 | * references an already freed task_struct. | ||
| 607 | */ | ||
| 608 | get_task_struct(t); | ||
| 609 | new->thread = t; | ||
| 610 | } | ||
| 611 | |||
| 612 | /* | ||
| 423 | * The following block of code has to be executed atomically | 613 | * The following block of code has to be executed atomically | 
| 424 | */ | 614 | */ | 
| 425 | spin_lock_irqsave(&desc->lock, flags); | 615 | spin_lock_irqsave(&desc->lock, flags); | 
| 426 | p = &desc->action; | 616 | old_ptr = &desc->action; | 
| 427 | old = *p; | 617 | old = *old_ptr; | 
| 428 | if (old) { | 618 | if (old) { | 
| 429 | /* | 619 | /* | 
| 430 | * Can't share interrupts unless both agree to and are | 620 | * Can't share interrupts unless both agree to and are | 
| @@ -447,8 +637,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 447 | 637 | ||
| 448 | /* add new interrupt at end of irq queue */ | 638 | /* add new interrupt at end of irq queue */ | 
| 449 | do { | 639 | do { | 
| 450 | p = &old->next; | 640 | old_ptr = &old->next; | 
| 451 | old = *p; | 641 | old = *old_ptr; | 
| 452 | } while (old); | 642 | } while (old); | 
| 453 | shared = 1; | 643 | shared = 1; | 
| 454 | } | 644 | } | 
| @@ -456,15 +646,15 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 456 | if (!shared) { | 646 | if (!shared) { | 
| 457 | irq_chip_set_defaults(desc->chip); | 647 | irq_chip_set_defaults(desc->chip); | 
| 458 | 648 | ||
| 649 | init_waitqueue_head(&desc->wait_for_threads); | ||
| 650 | |||
| 459 | /* Setup the type (level, edge polarity) if configured: */ | 651 | /* Setup the type (level, edge polarity) if configured: */ | 
| 460 | if (new->flags & IRQF_TRIGGER_MASK) { | 652 | if (new->flags & IRQF_TRIGGER_MASK) { | 
| 461 | ret = __irq_set_trigger(desc, irq, | 653 | ret = __irq_set_trigger(desc, irq, | 
| 462 | new->flags & IRQF_TRIGGER_MASK); | 654 | new->flags & IRQF_TRIGGER_MASK); | 
| 463 | 655 | ||
| 464 | if (ret) { | 656 | if (ret) | 
| 465 | spin_unlock_irqrestore(&desc->lock, flags); | 657 | goto out_thread; | 
| 466 | return ret; | ||
| 467 | } | ||
| 468 | } else | 658 | } else | 
| 469 | compat_irq_chip_set_default_handler(desc); | 659 | compat_irq_chip_set_default_handler(desc); | 
| 470 | #if defined(CONFIG_IRQ_PER_CPU) | 660 | #if defined(CONFIG_IRQ_PER_CPU) | 
| @@ -488,7 +678,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 488 | desc->status |= IRQ_NO_BALANCING; | 678 | desc->status |= IRQ_NO_BALANCING; | 
| 489 | 679 | ||
| 490 | /* Set default affinity mask once everything is setup */ | 680 | /* Set default affinity mask once everything is setup */ | 
| 491 | do_irq_select_affinity(irq, desc); | 681 | setup_affinity(irq, desc); | 
| 492 | 682 | ||
| 493 | } else if ((new->flags & IRQF_TRIGGER_MASK) | 683 | } else if ((new->flags & IRQF_TRIGGER_MASK) | 
| 494 | && (new->flags & IRQF_TRIGGER_MASK) | 684 | && (new->flags & IRQF_TRIGGER_MASK) | 
| @@ -499,7 +689,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 499 | (int)(new->flags & IRQF_TRIGGER_MASK)); | 689 | (int)(new->flags & IRQF_TRIGGER_MASK)); | 
| 500 | } | 690 | } | 
| 501 | 691 | ||
| 502 | *p = new; | 692 | new->irq = irq; | 
| 693 | *old_ptr = new; | ||
| 503 | 694 | ||
| 504 | /* Reset broken irq detection when installing new handler */ | 695 | /* Reset broken irq detection when installing new handler */ | 
| 505 | desc->irq_count = 0; | 696 | desc->irq_count = 0; | 
| @@ -511,12 +702,18 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 511 | */ | 702 | */ | 
| 512 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | 703 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | 
| 513 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | 704 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | 
| 514 | __enable_irq(desc, irq); | 705 | __enable_irq(desc, irq, false); | 
| 515 | } | 706 | } | 
| 516 | 707 | ||
| 517 | spin_unlock_irqrestore(&desc->lock, flags); | 708 | spin_unlock_irqrestore(&desc->lock, flags); | 
| 518 | 709 | ||
| 519 | new->irq = irq; | 710 | /* | 
| 711 | * Strictly no need to wake it up, but hung_task complains | ||
| 712 | * when no hard interrupt wakes the thread up. | ||
| 713 | */ | ||
| 714 | if (new->thread) | ||
| 715 | wake_up_process(new->thread); | ||
| 716 | |||
| 520 | register_irq_proc(irq, desc); | 717 | register_irq_proc(irq, desc); | 
| 521 | new->dir = NULL; | 718 | new->dir = NULL; | 
| 522 | register_handler_proc(irq, new); | 719 | register_handler_proc(irq, new); | 
| @@ -532,8 +729,19 @@ mismatch: | |||
| 532 | dump_stack(); | 729 | dump_stack(); | 
| 533 | } | 730 | } | 
| 534 | #endif | 731 | #endif | 
| 732 | ret = -EBUSY; | ||
| 733 | |||
| 734 | out_thread: | ||
| 535 | spin_unlock_irqrestore(&desc->lock, flags); | 735 | spin_unlock_irqrestore(&desc->lock, flags); | 
| 536 | return -EBUSY; | 736 | if (new->thread) { | 
| 737 | struct task_struct *t = new->thread; | ||
| 738 | |||
| 739 | new->thread = NULL; | ||
| 740 | if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | ||
| 741 | kthread_stop(t); | ||
| 742 | put_task_struct(t); | ||
| 743 | } | ||
| 744 | return ret; | ||
| 537 | } | 745 | } | 
| 538 | 746 | ||
| 539 | /** | 747 | /** | 
| @@ -549,97 +757,135 @@ int setup_irq(unsigned int irq, struct irqaction *act) | |||
| 549 | 757 | ||
| 550 | return __setup_irq(irq, desc, act); | 758 | return __setup_irq(irq, desc, act); | 
| 551 | } | 759 | } | 
| 760 | EXPORT_SYMBOL_GPL(setup_irq); | ||
| 552 | 761 | ||
| 553 | /** | 762 | /* | 
| 554 | * free_irq - free an interrupt | 763 | * Internal function to unregister an irqaction - used to free | 
| 555 | * @irq: Interrupt line to free | 764 | * regular and special interrupts that are part of the architecture. | 
| 556 | * @dev_id: Device identity to free | ||
| 557 | * | ||
| 558 | * Remove an interrupt handler. The handler is removed and if the | ||
| 559 | * interrupt line is no longer in use by any driver it is disabled. | ||
| 560 | * On a shared IRQ the caller must ensure the interrupt is disabled | ||
| 561 | * on the card it drives before calling this function. The function | ||
| 562 | * does not return until any executing interrupts for this IRQ | ||
| 563 | * have completed. | ||
| 564 | * | ||
| 565 | * This function must not be called from interrupt context. | ||
| 566 | */ | 765 | */ | 
| 567 | void free_irq(unsigned int irq, void *dev_id) | 766 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | 
| 568 | { | 767 | { | 
| 569 | struct irq_desc *desc = irq_to_desc(irq); | 768 | struct irq_desc *desc = irq_to_desc(irq); | 
| 570 | struct irqaction **p; | 769 | struct irqaction *action, **action_ptr; | 
| 571 | unsigned long flags; | 770 | unsigned long flags; | 
| 572 | 771 | ||
| 573 | WARN_ON(in_interrupt()); | 772 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | 
| 574 | 773 | ||
| 575 | if (!desc) | 774 | if (!desc) | 
| 576 | return; | 775 | return NULL; | 
| 577 | 776 | ||
| 578 | spin_lock_irqsave(&desc->lock, flags); | 777 | spin_lock_irqsave(&desc->lock, flags); | 
| 579 | p = &desc->action; | 778 | |
| 779 | /* | ||
| 780 | * There can be multiple actions per IRQ descriptor, find the right | ||
| 781 | * one based on the dev_id: | ||
| 782 | */ | ||
| 783 | action_ptr = &desc->action; | ||
| 580 | for (;;) { | 784 | for (;;) { | 
| 581 | struct irqaction *action = *p; | 785 | action = *action_ptr; | 
| 582 | 786 | ||
| 583 | if (action) { | 787 | if (!action) { | 
| 584 | struct irqaction **pp = p; | 788 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | 
| 789 | spin_unlock_irqrestore(&desc->lock, flags); | ||
| 790 | |||
| 791 | return NULL; | ||
| 792 | } | ||
| 585 | 793 | ||
| 586 | p = &action->next; | 794 | if (action->dev_id == dev_id) | 
| 587 | if (action->dev_id != dev_id) | 795 | break; | 
| 588 | continue; | 796 | action_ptr = &action->next; | 
| 797 | } | ||
| 589 | 798 | ||
| 590 | /* Found it - now remove it from the list of entries */ | 799 | /* Found it - now remove it from the list of entries: */ | 
| 591 | *pp = action->next; | 800 | *action_ptr = action->next; | 
| 592 | 801 | ||
| 593 | /* Currently used only by UML, might disappear one day.*/ | 802 | /* Currently used only by UML, might disappear one day: */ | 
| 594 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 803 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 
| 595 | if (desc->chip->release) | 804 | if (desc->chip->release) | 
| 596 | desc->chip->release(irq, dev_id); | 805 | desc->chip->release(irq, dev_id); | 
| 597 | #endif | 806 | #endif | 
| 598 | 807 | ||
| 599 | if (!desc->action) { | 808 | /* If this was the last handler, shut down the IRQ line: */ | 
| 600 | desc->status |= IRQ_DISABLED; | 809 | if (!desc->action) { | 
| 601 | if (desc->chip->shutdown) | 810 | desc->status |= IRQ_DISABLED; | 
| 602 | desc->chip->shutdown(irq); | 811 | if (desc->chip->shutdown) | 
| 603 | else | 812 | desc->chip->shutdown(irq); | 
| 604 | desc->chip->disable(irq); | 813 | else | 
| 605 | } | 814 | desc->chip->disable(irq); | 
| 606 | spin_unlock_irqrestore(&desc->lock, flags); | 815 | } | 
| 607 | unregister_handler_proc(irq, action); | 816 | |
| 817 | spin_unlock_irqrestore(&desc->lock, flags); | ||
| 818 | |||
| 819 | unregister_handler_proc(irq, action); | ||
| 820 | |||
| 821 | /* Make sure it's not being used on another CPU: */ | ||
| 822 | synchronize_irq(irq); | ||
| 608 | 823 | ||
| 609 | /* Make sure it's not being used on another CPU */ | ||
| 610 | synchronize_irq(irq); | ||
| 611 | #ifdef CONFIG_DEBUG_SHIRQ | ||
| 612 | /* | ||
| 613 | * It's a shared IRQ -- the driver ought to be | ||
| 614 | * prepared for it to happen even now it's | ||
| 615 | * being freed, so let's make sure.... We do | ||
| 616 | * this after actually deregistering it, to | ||
| 617 | * make sure that a 'real' IRQ doesn't run in | ||
| 618 | * parallel with our fake | ||
| 619 | */ | ||
| 620 | if (action->flags & IRQF_SHARED) { | ||
| 621 | local_irq_save(flags); | ||
| 622 | action->handler(irq, dev_id); | ||
| 623 | local_irq_restore(flags); | ||
| 624 | } | ||
| 625 | #endif | ||
| 626 | kfree(action); | ||
| 627 | return; | ||
| 628 | } | ||
| 629 | printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq); | ||
| 630 | #ifdef CONFIG_DEBUG_SHIRQ | 824 | #ifdef CONFIG_DEBUG_SHIRQ | 
| 631 | dump_stack(); | 825 | /* | 
| 826 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | ||
| 827 | * event to happen even now it's being freed, so let's make sure that | ||
| 828 | * is so by doing an extra call to the handler .... | ||
| 829 | * | ||
| 830 | * ( We do this after actually deregistering it, to make sure that a | ||
| 831 | * 'real' IRQ doesn't run in * parallel with our fake. ) | ||
| 832 | */ | ||
| 833 | if (action->flags & IRQF_SHARED) { | ||
| 834 | local_irq_save(flags); | ||
| 835 | action->handler(irq, dev_id); | ||
| 836 | local_irq_restore(flags); | ||
| 837 | } | ||
| 632 | #endif | 838 | #endif | 
| 633 | spin_unlock_irqrestore(&desc->lock, flags); | 839 | |
| 634 | return; | 840 | if (action->thread) { | 
| 841 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | ||
| 842 | kthread_stop(action->thread); | ||
| 843 | put_task_struct(action->thread); | ||
| 635 | } | 844 | } | 
| 845 | |||
| 846 | return action; | ||
| 847 | } | ||
| 848 | |||
| 849 | /** | ||
| 850 | * remove_irq - free an interrupt | ||
| 851 | * @irq: Interrupt line to free | ||
| 852 | * @act: irqaction for the interrupt | ||
| 853 | * | ||
| 854 | * Used to remove interrupts statically setup by the early boot process. | ||
| 855 | */ | ||
| 856 | void remove_irq(unsigned int irq, struct irqaction *act) | ||
| 857 | { | ||
| 858 | __free_irq(irq, act->dev_id); | ||
| 859 | } | ||
| 860 | EXPORT_SYMBOL_GPL(remove_irq); | ||
| 861 | |||
| 862 | /** | ||
| 863 | * free_irq - free an interrupt allocated with request_irq | ||
| 864 | * @irq: Interrupt line to free | ||
| 865 | * @dev_id: Device identity to free | ||
| 866 | * | ||
| 867 | * Remove an interrupt handler. The handler is removed and if the | ||
| 868 | * interrupt line is no longer in use by any driver it is disabled. | ||
| 869 | * On a shared IRQ the caller must ensure the interrupt is disabled | ||
| 870 | * on the card it drives before calling this function. The function | ||
| 871 | * does not return until any executing interrupts for this IRQ | ||
| 872 | * have completed. | ||
| 873 | * | ||
| 874 | * This function must not be called from interrupt context. | ||
| 875 | */ | ||
| 876 | void free_irq(unsigned int irq, void *dev_id) | ||
| 877 | { | ||
| 878 | kfree(__free_irq(irq, dev_id)); | ||
| 636 | } | 879 | } | 
| 637 | EXPORT_SYMBOL(free_irq); | 880 | EXPORT_SYMBOL(free_irq); | 
| 638 | 881 | ||
| 639 | /** | 882 | /** | 
| 640 | * request_irq - allocate an interrupt line | 883 | * request_threaded_irq - allocate an interrupt line | 
| 641 | * @irq: Interrupt line to allocate | 884 | * @irq: Interrupt line to allocate | 
| 642 | * @handler: Function to be called when the IRQ occurs | 885 | * @handler: Function to be called when the IRQ occurs. | 
| 886 | * Primary handler for threaded interrupts | ||
| 887 | * @thread_fn: Function called from the irq handler thread | ||
| 888 | * If NULL, no irq thread is created | ||
| 643 | * @irqflags: Interrupt type flags | 889 | * @irqflags: Interrupt type flags | 
| 644 | * @devname: An ascii name for the claiming device | 890 | * @devname: An ascii name for the claiming device | 
| 645 | * @dev_id: A cookie passed back to the handler function | 891 | * @dev_id: A cookie passed back to the handler function | 
| @@ -651,6 +897,15 @@ EXPORT_SYMBOL(free_irq); | |||
| 651 | * raises, you must take care both to initialise your hardware | 897 | * raises, you must take care both to initialise your hardware | 
| 652 | * and to set up the interrupt handler in the right order. | 898 | * and to set up the interrupt handler in the right order. | 
| 653 | * | 899 | * | 
| 900 | * If you want to set up a threaded irq handler for your device | ||
| 901 | * then you need to supply @handler and @thread_fn. @handler ist | ||
| 902 | * still called in hard interrupt context and has to check | ||
| 903 | * whether the interrupt originates from the device. If yes it | ||
| 904 | * needs to disable the interrupt on the device and return | ||
| 905 | * IRQ_WAKE_THREAD which will wake up the handler thread and run | ||
| 906 | * @thread_fn. This split handler design is necessary to support | ||
| 907 | * shared interrupts. | ||
| 908 | * | ||
| 654 | * Dev_id must be globally unique. Normally the address of the | 909 | * Dev_id must be globally unique. Normally the address of the | 
| 655 | * device data structure is used as the cookie. Since the handler | 910 | * device data structure is used as the cookie. Since the handler | 
| 656 | * receives this value it makes sense to use it. | 911 | * receives this value it makes sense to use it. | 
| @@ -666,8 +921,9 @@ EXPORT_SYMBOL(free_irq); | |||
| 666 | * IRQF_TRIGGER_* Specify active edge(s) or level | 921 | * IRQF_TRIGGER_* Specify active edge(s) or level | 
| 667 | * | 922 | * | 
| 668 | */ | 923 | */ | 
| 669 | int request_irq(unsigned int irq, irq_handler_t handler, | 924 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, | 
| 670 | unsigned long irqflags, const char *devname, void *dev_id) | 925 | irq_handler_t thread_fn, unsigned long irqflags, | 
| 926 | const char *devname, void *dev_id) | ||
| 671 | { | 927 | { | 
| 672 | struct irqaction *action; | 928 | struct irqaction *action; | 
| 673 | struct irq_desc *desc; | 929 | struct irq_desc *desc; | 
| @@ -679,11 +935,12 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 679 | * the behavior is classified as "will not fix" so we need to | 935 | * the behavior is classified as "will not fix" so we need to | 
| 680 | * start nudging drivers away from using that idiom. | 936 | * start nudging drivers away from using that idiom. | 
| 681 | */ | 937 | */ | 
| 682 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) | 938 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == | 
| 683 | == (IRQF_SHARED|IRQF_DISABLED)) | 939 | (IRQF_SHARED|IRQF_DISABLED)) { | 
| 684 | pr_warning("IRQ %d/%s: IRQF_DISABLED is not " | 940 | pr_warning( | 
| 685 | "guaranteed on shared IRQs\n", | 941 | "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", | 
| 686 | irq, devname); | 942 | irq, devname); | 
| 943 | } | ||
| 687 | 944 | ||
| 688 | #ifdef CONFIG_LOCKDEP | 945 | #ifdef CONFIG_LOCKDEP | 
| 689 | /* | 946 | /* | 
| @@ -709,15 +966,14 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 709 | if (!handler) | 966 | if (!handler) | 
| 710 | return -EINVAL; | 967 | return -EINVAL; | 
| 711 | 968 | ||
| 712 | action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); | 969 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | 
| 713 | if (!action) | 970 | if (!action) | 
| 714 | return -ENOMEM; | 971 | return -ENOMEM; | 
| 715 | 972 | ||
| 716 | action->handler = handler; | 973 | action->handler = handler; | 
| 974 | action->thread_fn = thread_fn; | ||
| 717 | action->flags = irqflags; | 975 | action->flags = irqflags; | 
| 718 | cpus_clear(action->mask); | ||
| 719 | action->name = devname; | 976 | action->name = devname; | 
| 720 | action->next = NULL; | ||
| 721 | action->dev_id = dev_id; | 977 | action->dev_id = dev_id; | 
| 722 | 978 | ||
| 723 | retval = __setup_irq(irq, desc, action); | 979 | retval = __setup_irq(irq, desc, action); | 
| @@ -745,4 +1001,4 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 745 | #endif | 1001 | #endif | 
| 746 | return retval; | 1002 | return retval; | 
| 747 | } | 1003 | } | 
| 748 | EXPORT_SYMBOL(request_irq); | 1004 | EXPORT_SYMBOL(request_threaded_irq); | 
