diff options
Diffstat (limited to 'kernel/irq/manage.c')
| -rw-r--r-- | kernel/irq/manage.c | 189 |
1 files changed, 176 insertions, 13 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 1516ab77355c..7e2e7dd4cd2f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -8,16 +8,15 @@ | |||
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
| 11 | #include <linux/kthread.h> | ||
| 11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
| 12 | #include <linux/random.h> | 13 | #include <linux/random.h> |
| 13 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
| 14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <linux/sched.h> | ||
| 15 | 17 | ||
| 16 | #include "internals.h" | 18 | #include "internals.h" |
| 17 | 19 | ||
| 18 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | ||
| 19 | cpumask_var_t irq_default_affinity; | ||
| 20 | |||
| 21 | /** | 20 | /** |
| 22 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
| 23 | * @irq: interrupt number to wait for | 22 | * @irq: interrupt number to wait for |
| @@ -53,9 +52,18 @@ void synchronize_irq(unsigned int irq) | |||
| 53 | 52 | ||
| 54 | /* Oops, that failed? */ | 53 | /* Oops, that failed? */ |
| 55 | } while (status & IRQ_INPROGRESS); | 54 | } while (status & IRQ_INPROGRESS); |
| 55 | |||
| 56 | /* | ||
| 57 | * We made sure that no hardirq handler is running. Now verify | ||
| 58 | * that no threaded handlers are active. | ||
| 59 | */ | ||
| 60 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | ||
| 56 | } | 61 | } |
| 57 | EXPORT_SYMBOL(synchronize_irq); | 62 | EXPORT_SYMBOL(synchronize_irq); |
| 58 | 63 | ||
| 64 | #ifdef CONFIG_SMP | ||
| 65 | cpumask_var_t irq_default_affinity; | ||
| 66 | |||
| 59 | /** | 67 | /** |
| 60 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | 68 | * irq_can_set_affinity - Check if the affinity of a given irq can be set |
| 61 | * @irq: Interrupt to check | 69 | * @irq: Interrupt to check |
| @@ -72,6 +80,18 @@ int irq_can_set_affinity(unsigned int irq) | |||
| 72 | return 1; | 80 | return 1; |
| 73 | } | 81 | } |
| 74 | 82 | ||
| 83 | static void | ||
| 84 | irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) | ||
| 85 | { | ||
| 86 | struct irqaction *action = desc->action; | ||
| 87 | |||
| 88 | while (action) { | ||
| 89 | if (action->thread) | ||
| 90 | set_cpus_allowed_ptr(action->thread, cpumask); | ||
| 91 | action = action->next; | ||
| 92 | } | ||
| 93 | } | ||
| 94 | |||
| 75 | /** | 95 | /** |
| 76 | * irq_set_affinity - Set the irq affinity of a given irq | 96 | * irq_set_affinity - Set the irq affinity of a given irq |
| 77 | * @irq: Interrupt to set affinity | 97 | * @irq: Interrupt to set affinity |
| @@ -100,6 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 100 | cpumask_copy(desc->affinity, cpumask); | 120 | cpumask_copy(desc->affinity, cpumask); |
| 101 | desc->chip->set_affinity(irq, cpumask); | 121 | desc->chip->set_affinity(irq, cpumask); |
| 102 | #endif | 122 | #endif |
| 123 | irq_set_thread_affinity(desc, cpumask); | ||
| 103 | desc->status |= IRQ_AFFINITY_SET; | 124 | desc->status |= IRQ_AFFINITY_SET; |
| 104 | spin_unlock_irqrestore(&desc->lock, flags); | 125 | spin_unlock_irqrestore(&desc->lock, flags); |
| 105 | return 0; | 126 | return 0; |
| @@ -150,6 +171,8 @@ int irq_select_affinity_usr(unsigned int irq) | |||
| 150 | 171 | ||
| 151 | spin_lock_irqsave(&desc->lock, flags); | 172 | spin_lock_irqsave(&desc->lock, flags); |
| 152 | ret = setup_affinity(irq, desc); | 173 | ret = setup_affinity(irq, desc); |
| 174 | if (!ret) | ||
| 175 | irq_set_thread_affinity(desc, desc->affinity); | ||
| 153 | spin_unlock_irqrestore(&desc->lock, flags); | 176 | spin_unlock_irqrestore(&desc->lock, flags); |
| 154 | 177 | ||
| 155 | return ret; | 178 | return ret; |
| @@ -401,6 +424,90 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 401 | return ret; | 424 | return ret; |
| 402 | } | 425 | } |
| 403 | 426 | ||
| 427 | static int irq_wait_for_interrupt(struct irqaction *action) | ||
| 428 | { | ||
| 429 | while (!kthread_should_stop()) { | ||
| 430 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 431 | |||
| 432 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | ||
| 433 | &action->thread_flags)) { | ||
| 434 | __set_current_state(TASK_RUNNING); | ||
| 435 | return 0; | ||
| 436 | } | ||
| 437 | schedule(); | ||
| 438 | } | ||
| 439 | return -1; | ||
| 440 | } | ||
| 441 | |||
| 442 | /* | ||
| 443 | * Interrupt handler thread | ||
| 444 | */ | ||
| 445 | static int irq_thread(void *data) | ||
| 446 | { | ||
| 447 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | ||
| 448 | struct irqaction *action = data; | ||
| 449 | struct irq_desc *desc = irq_to_desc(action->irq); | ||
| 450 | int wake; | ||
| 451 | |||
| 452 | sched_setscheduler(current, SCHED_FIFO, ¶m); | ||
| 453 | current->irqaction = action; | ||
| 454 | |||
| 455 | while (!irq_wait_for_interrupt(action)) { | ||
| 456 | |||
| 457 | atomic_inc(&desc->threads_active); | ||
| 458 | |||
| 459 | spin_lock_irq(&desc->lock); | ||
| 460 | if (unlikely(desc->status & IRQ_DISABLED)) { | ||
| 461 | /* | ||
| 462 | * CHECKME: We might need a dedicated | ||
| 463 | * IRQ_THREAD_PENDING flag here, which | ||
| 464 | * retriggers the thread in check_irq_resend() | ||
| 465 | * but AFAICT IRQ_PENDING should be fine as it | ||
| 466 | * retriggers the interrupt itself --- tglx | ||
| 467 | */ | ||
| 468 | desc->status |= IRQ_PENDING; | ||
| 469 | spin_unlock_irq(&desc->lock); | ||
| 470 | } else { | ||
| 471 | spin_unlock_irq(&desc->lock); | ||
| 472 | |||
| 473 | action->thread_fn(action->irq, action->dev_id); | ||
| 474 | } | ||
| 475 | |||
| 476 | wake = atomic_dec_and_test(&desc->threads_active); | ||
| 477 | |||
| 478 | if (wake && waitqueue_active(&desc->wait_for_threads)) | ||
| 479 | wake_up(&desc->wait_for_threads); | ||
| 480 | } | ||
| 481 | |||
| 482 | /* | ||
| 483 | * Clear irqaction. Otherwise exit_irq_thread() would make | ||
| 484 | * fuzz about an active irq thread going into nirvana. | ||
| 485 | */ | ||
| 486 | current->irqaction = NULL; | ||
| 487 | return 0; | ||
| 488 | } | ||
| 489 | |||
| 490 | /* | ||
| 491 | * Called from do_exit() | ||
| 492 | */ | ||
| 493 | void exit_irq_thread(void) | ||
| 494 | { | ||
| 495 | struct task_struct *tsk = current; | ||
| 496 | |||
| 497 | if (!tsk->irqaction) | ||
| 498 | return; | ||
| 499 | |||
| 500 | printk(KERN_ERR | ||
| 501 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | ||
| 502 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | ||
| 503 | |||
| 504 | /* | ||
| 505 | * Set the THREAD DIED flag to prevent further wakeups of the | ||
| 506 | * soon to be gone threaded handler. | ||
| 507 | */ | ||
| 508 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | ||
| 509 | } | ||
| 510 | |||
| 404 | /* | 511 | /* |
| 405 | * Internal function to register an irqaction - typically used to | 512 | * Internal function to register an irqaction - typically used to |
| 406 | * allocate special interrupts that are part of the architecture. | 513 | * allocate special interrupts that are part of the architecture. |
| @@ -437,6 +544,26 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 437 | } | 544 | } |
| 438 | 545 | ||
| 439 | /* | 546 | /* |
| 547 | * Threaded handler ? | ||
| 548 | */ | ||
| 549 | if (new->thread_fn) { | ||
| 550 | struct task_struct *t; | ||
| 551 | |||
| 552 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | ||
| 553 | new->name); | ||
| 554 | if (IS_ERR(t)) | ||
| 555 | return PTR_ERR(t); | ||
| 556 | /* | ||
| 557 | * We keep the reference to the task struct even if | ||
| 558 | * the thread dies to avoid that the interrupt code | ||
| 559 | * references an already freed task_struct. | ||
| 560 | */ | ||
| 561 | get_task_struct(t); | ||
| 562 | new->thread = t; | ||
| 563 | wake_up_process(t); | ||
| 564 | } | ||
| 565 | |||
| 566 | /* | ||
| 440 | * The following block of code has to be executed atomically | 567 | * The following block of code has to be executed atomically |
| 441 | */ | 568 | */ |
| 442 | spin_lock_irqsave(&desc->lock, flags); | 569 | spin_lock_irqsave(&desc->lock, flags); |
| @@ -473,15 +600,15 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 473 | if (!shared) { | 600 | if (!shared) { |
| 474 | irq_chip_set_defaults(desc->chip); | 601 | irq_chip_set_defaults(desc->chip); |
| 475 | 602 | ||
| 603 | init_waitqueue_head(&desc->wait_for_threads); | ||
| 604 | |||
| 476 | /* Setup the type (level, edge polarity) if configured: */ | 605 | /* Setup the type (level, edge polarity) if configured: */ |
| 477 | if (new->flags & IRQF_TRIGGER_MASK) { | 606 | if (new->flags & IRQF_TRIGGER_MASK) { |
| 478 | ret = __irq_set_trigger(desc, irq, | 607 | ret = __irq_set_trigger(desc, irq, |
| 479 | new->flags & IRQF_TRIGGER_MASK); | 608 | new->flags & IRQF_TRIGGER_MASK); |
| 480 | 609 | ||
| 481 | if (ret) { | 610 | if (ret) |
| 482 | spin_unlock_irqrestore(&desc->lock, flags); | 611 | goto out_thread; |
| 483 | return ret; | ||
| 484 | } | ||
| 485 | } else | 612 | } else |
| 486 | compat_irq_chip_set_default_handler(desc); | 613 | compat_irq_chip_set_default_handler(desc); |
| 487 | #if defined(CONFIG_IRQ_PER_CPU) | 614 | #if defined(CONFIG_IRQ_PER_CPU) |
| @@ -549,8 +676,19 @@ mismatch: | |||
| 549 | dump_stack(); | 676 | dump_stack(); |
| 550 | } | 677 | } |
| 551 | #endif | 678 | #endif |
| 679 | ret = -EBUSY; | ||
| 680 | |||
| 681 | out_thread: | ||
| 552 | spin_unlock_irqrestore(&desc->lock, flags); | 682 | spin_unlock_irqrestore(&desc->lock, flags); |
| 553 | return -EBUSY; | 683 | if (new->thread) { |
| 684 | struct task_struct *t = new->thread; | ||
| 685 | |||
| 686 | new->thread = NULL; | ||
| 687 | if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | ||
| 688 | kthread_stop(t); | ||
| 689 | put_task_struct(t); | ||
| 690 | } | ||
| 691 | return ret; | ||
| 554 | } | 692 | } |
| 555 | 693 | ||
| 556 | /** | 694 | /** |
| @@ -576,6 +714,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
| 576 | { | 714 | { |
| 577 | struct irq_desc *desc = irq_to_desc(irq); | 715 | struct irq_desc *desc = irq_to_desc(irq); |
| 578 | struct irqaction *action, **action_ptr; | 716 | struct irqaction *action, **action_ptr; |
| 717 | struct task_struct *irqthread; | ||
| 579 | unsigned long flags; | 718 | unsigned long flags; |
| 580 | 719 | ||
| 581 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | 720 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
| @@ -622,6 +761,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
| 622 | else | 761 | else |
| 623 | desc->chip->disable(irq); | 762 | desc->chip->disable(irq); |
| 624 | } | 763 | } |
| 764 | |||
| 765 | irqthread = action->thread; | ||
| 766 | action->thread = NULL; | ||
| 767 | |||
| 625 | spin_unlock_irqrestore(&desc->lock, flags); | 768 | spin_unlock_irqrestore(&desc->lock, flags); |
| 626 | 769 | ||
| 627 | unregister_handler_proc(irq, action); | 770 | unregister_handler_proc(irq, action); |
| @@ -629,6 +772,12 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
| 629 | /* Make sure it's not being used on another CPU: */ | 772 | /* Make sure it's not being used on another CPU: */ |
| 630 | synchronize_irq(irq); | 773 | synchronize_irq(irq); |
| 631 | 774 | ||
| 775 | if (irqthread) { | ||
| 776 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | ||
| 777 | kthread_stop(irqthread); | ||
| 778 | put_task_struct(irqthread); | ||
| 779 | } | ||
| 780 | |||
| 632 | #ifdef CONFIG_DEBUG_SHIRQ | 781 | #ifdef CONFIG_DEBUG_SHIRQ |
| 633 | /* | 782 | /* |
| 634 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | 783 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ |
| @@ -681,9 +830,12 @@ void free_irq(unsigned int irq, void *dev_id) | |||
| 681 | EXPORT_SYMBOL(free_irq); | 830 | EXPORT_SYMBOL(free_irq); |
| 682 | 831 | ||
| 683 | /** | 832 | /** |
| 684 | * request_irq - allocate an interrupt line | 833 | * request_threaded_irq - allocate an interrupt line |
| 685 | * @irq: Interrupt line to allocate | 834 | * @irq: Interrupt line to allocate |
| 686 | * @handler: Function to be called when the IRQ occurs | 835 | * @handler: Function to be called when the IRQ occurs. |
| 836 | * Primary handler for threaded interrupts | ||
| 837 | * @thread_fn: Function called from the irq handler thread | ||
| 838 | * If NULL, no irq thread is created | ||
| 687 | * @irqflags: Interrupt type flags | 839 | * @irqflags: Interrupt type flags |
| 688 | * @devname: An ascii name for the claiming device | 840 | * @devname: An ascii name for the claiming device |
| 689 | * @dev_id: A cookie passed back to the handler function | 841 | * @dev_id: A cookie passed back to the handler function |
| @@ -695,6 +847,15 @@ EXPORT_SYMBOL(free_irq); | |||
| 695 | * raises, you must take care both to initialise your hardware | 847 | * raises, you must take care both to initialise your hardware |
| 696 | * and to set up the interrupt handler in the right order. | 848 | * and to set up the interrupt handler in the right order. |
| 697 | * | 849 | * |
| 850 | * If you want to set up a threaded irq handler for your device | ||
| 851 | * then you need to supply @handler and @thread_fn. @handler ist | ||
| 852 | * still called in hard interrupt context and has to check | ||
| 853 | * whether the interrupt originates from the device. If yes it | ||
| 854 | * needs to disable the interrupt on the device and return | ||
| 855 | * IRQ_THREAD_WAKE which will wake up the handler thread and run | ||
| 856 | * @thread_fn. This split handler design is necessary to support | ||
| 857 | * shared interrupts. | ||
| 858 | * | ||
| 698 | * Dev_id must be globally unique. Normally the address of the | 859 | * Dev_id must be globally unique. Normally the address of the |
| 699 | * device data structure is used as the cookie. Since the handler | 860 | * device data structure is used as the cookie. Since the handler |
| 700 | * receives this value it makes sense to use it. | 861 | * receives this value it makes sense to use it. |
| @@ -710,8 +871,9 @@ EXPORT_SYMBOL(free_irq); | |||
| 710 | * IRQF_TRIGGER_* Specify active edge(s) or level | 871 | * IRQF_TRIGGER_* Specify active edge(s) or level |
| 711 | * | 872 | * |
| 712 | */ | 873 | */ |
| 713 | int request_irq(unsigned int irq, irq_handler_t handler, | 874 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
| 714 | unsigned long irqflags, const char *devname, void *dev_id) | 875 | irq_handler_t thread_fn, unsigned long irqflags, |
| 876 | const char *devname, void *dev_id) | ||
| 715 | { | 877 | { |
| 716 | struct irqaction *action; | 878 | struct irqaction *action; |
| 717 | struct irq_desc *desc; | 879 | struct irq_desc *desc; |
| @@ -759,6 +921,7 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 759 | return -ENOMEM; | 921 | return -ENOMEM; |
| 760 | 922 | ||
| 761 | action->handler = handler; | 923 | action->handler = handler; |
| 924 | action->thread_fn = thread_fn; | ||
| 762 | action->flags = irqflags; | 925 | action->flags = irqflags; |
| 763 | action->name = devname; | 926 | action->name = devname; |
| 764 | action->dev_id = dev_id; | 927 | action->dev_id = dev_id; |
| @@ -788,4 +951,4 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 788 | #endif | 951 | #endif |
| 789 | return retval; | 952 | return retval; |
| 790 | } | 953 | } |
| 791 | EXPORT_SYMBOL(request_irq); | 954 | EXPORT_SYMBOL(request_threaded_irq); |
