diff options
Diffstat (limited to 'kernel/irq/manage.c')
| -rw-r--r-- | kernel/irq/manage.c | 132 |
1 files changed, 107 insertions, 25 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 481a13c43b17..2486a4c1a710 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -32,24 +32,10 @@ static int __init setup_forced_irqthreads(char *arg) | |||
| 32 | early_param("threadirqs", setup_forced_irqthreads); | 32 | early_param("threadirqs", setup_forced_irqthreads); |
| 33 | #endif | 33 | #endif |
| 34 | 34 | ||
| 35 | /** | 35 | static void __synchronize_hardirq(struct irq_desc *desc) |
| 36 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | ||
| 37 | * @irq: interrupt number to wait for | ||
| 38 | * | ||
| 39 | * This function waits for any pending IRQ handlers for this interrupt | ||
| 40 | * to complete before returning. If you use this function while | ||
| 41 | * holding a resource the IRQ handler may need you will deadlock. | ||
| 42 | * | ||
| 43 | * This function may be called - with care - from IRQ context. | ||
| 44 | */ | ||
| 45 | void synchronize_irq(unsigned int irq) | ||
| 46 | { | 36 | { |
| 47 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 48 | bool inprogress; | 37 | bool inprogress; |
| 49 | 38 | ||
| 50 | if (!desc) | ||
| 51 | return; | ||
| 52 | |||
| 53 | do { | 39 | do { |
| 54 | unsigned long flags; | 40 | unsigned long flags; |
| 55 | 41 | ||
| @@ -67,12 +53,56 @@ void synchronize_irq(unsigned int irq) | |||
| 67 | 53 | ||
| 68 | /* Oops, that failed? */ | 54 | /* Oops, that failed? */ |
| 69 | } while (inprogress); | 55 | } while (inprogress); |
| 56 | } | ||
| 70 | 57 | ||
| 71 | /* | 58 | /** |
| 72 | * We made sure that no hardirq handler is running. Now verify | 59 | * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) |
| 73 | * that no threaded handlers are active. | 60 | * @irq: interrupt number to wait for |
| 74 | */ | 61 | * |
| 75 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | 62 | * This function waits for any pending hard IRQ handlers for this |
| 63 | * interrupt to complete before returning. If you use this | ||
| 64 | * function while holding a resource the IRQ handler may need you | ||
| 65 | * will deadlock. It does not take associated threaded handlers | ||
| 66 | * into account. | ||
| 67 | * | ||
| 68 | * Do not use this for shutdown scenarios where you must be sure | ||
| 69 | * that all parts (hardirq and threaded handler) have completed. | ||
| 70 | * | ||
| 71 | * This function may be called - with care - from IRQ context. | ||
| 72 | */ | ||
| 73 | void synchronize_hardirq(unsigned int irq) | ||
| 74 | { | ||
| 75 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 76 | |||
| 77 | if (desc) | ||
| 78 | __synchronize_hardirq(desc); | ||
| 79 | } | ||
| 80 | EXPORT_SYMBOL(synchronize_hardirq); | ||
| 81 | |||
| 82 | /** | ||
| 83 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | ||
| 84 | * @irq: interrupt number to wait for | ||
| 85 | * | ||
| 86 | * This function waits for any pending IRQ handlers for this interrupt | ||
| 87 | * to complete before returning. If you use this function while | ||
| 88 | * holding a resource the IRQ handler may need you will deadlock. | ||
| 89 | * | ||
| 90 | * This function may be called - with care - from IRQ context. | ||
| 91 | */ | ||
| 92 | void synchronize_irq(unsigned int irq) | ||
| 93 | { | ||
| 94 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 95 | |||
| 96 | if (desc) { | ||
| 97 | __synchronize_hardirq(desc); | ||
| 98 | /* | ||
| 99 | * We made sure that no hardirq handler is | ||
| 100 | * running. Now verify that no threaded handlers are | ||
| 101 | * active. | ||
| 102 | */ | ||
| 103 | wait_event(desc->wait_for_threads, | ||
| 104 | !atomic_read(&desc->threads_active)); | ||
| 105 | } | ||
| 76 | } | 106 | } |
| 77 | EXPORT_SYMBOL(synchronize_irq); | 107 | EXPORT_SYMBOL(synchronize_irq); |
| 78 | 108 | ||
| @@ -718,7 +748,7 @@ again: | |||
| 718 | 748 | ||
| 719 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && | 749 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && |
| 720 | irqd_irq_masked(&desc->irq_data)) | 750 | irqd_irq_masked(&desc->irq_data)) |
| 721 | unmask_irq(desc); | 751 | unmask_threaded_irq(desc); |
| 722 | 752 | ||
| 723 | out_unlock: | 753 | out_unlock: |
| 724 | raw_spin_unlock_irq(&desc->lock); | 754 | raw_spin_unlock_irq(&desc->lock); |
| @@ -727,7 +757,7 @@ out_unlock: | |||
| 727 | 757 | ||
| 728 | #ifdef CONFIG_SMP | 758 | #ifdef CONFIG_SMP |
| 729 | /* | 759 | /* |
| 730 | * Check whether we need to chasnge the affinity of the interrupt thread. | 760 | * Check whether we need to change the affinity of the interrupt thread. |
| 731 | */ | 761 | */ |
| 732 | static void | 762 | static void |
| 733 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 763 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) |
| @@ -802,8 +832,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc, | |||
| 802 | 832 | ||
| 803 | static void wake_threads_waitq(struct irq_desc *desc) | 833 | static void wake_threads_waitq(struct irq_desc *desc) |
| 804 | { | 834 | { |
| 805 | if (atomic_dec_and_test(&desc->threads_active) && | 835 | if (atomic_dec_and_test(&desc->threads_active)) |
| 806 | waitqueue_active(&desc->wait_for_threads)) | ||
| 807 | wake_up(&desc->wait_for_threads); | 836 | wake_up(&desc->wait_for_threads); |
| 808 | } | 837 | } |
| 809 | 838 | ||
| @@ -881,6 +910,33 @@ static int irq_thread(void *data) | |||
| 881 | return 0; | 910 | return 0; |
| 882 | } | 911 | } |
| 883 | 912 | ||
| 913 | /** | ||
| 914 | * irq_wake_thread - wake the irq thread for the action identified by dev_id | ||
| 915 | * @irq: Interrupt line | ||
| 916 | * @dev_id: Device identity for which the thread should be woken | ||
| 917 | * | ||
| 918 | */ | ||
| 919 | void irq_wake_thread(unsigned int irq, void *dev_id) | ||
| 920 | { | ||
| 921 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 922 | struct irqaction *action; | ||
| 923 | unsigned long flags; | ||
| 924 | |||
| 925 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) | ||
| 926 | return; | ||
| 927 | |||
| 928 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 929 | for (action = desc->action; action; action = action->next) { | ||
| 930 | if (action->dev_id == dev_id) { | ||
| 931 | if (action->thread) | ||
| 932 | __irq_wake_thread(desc, action); | ||
| 933 | break; | ||
| 934 | } | ||
| 935 | } | ||
| 936 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 937 | } | ||
| 938 | EXPORT_SYMBOL_GPL(irq_wake_thread); | ||
| 939 | |||
| 884 | static void irq_setup_forced_threading(struct irqaction *new) | 940 | static void irq_setup_forced_threading(struct irqaction *new) |
| 885 | { | 941 | { |
| 886 | if (!force_irqthreads) | 942 | if (!force_irqthreads) |
| @@ -897,6 +953,23 @@ static void irq_setup_forced_threading(struct irqaction *new) | |||
| 897 | } | 953 | } |
| 898 | } | 954 | } |
| 899 | 955 | ||
| 956 | static int irq_request_resources(struct irq_desc *desc) | ||
| 957 | { | ||
| 958 | struct irq_data *d = &desc->irq_data; | ||
| 959 | struct irq_chip *c = d->chip; | ||
| 960 | |||
| 961 | return c->irq_request_resources ? c->irq_request_resources(d) : 0; | ||
| 962 | } | ||
| 963 | |||
| 964 | static void irq_release_resources(struct irq_desc *desc) | ||
| 965 | { | ||
| 966 | struct irq_data *d = &desc->irq_data; | ||
| 967 | struct irq_chip *c = d->chip; | ||
| 968 | |||
| 969 | if (c->irq_release_resources) | ||
| 970 | c->irq_release_resources(d); | ||
| 971 | } | ||
| 972 | |||
| 900 | /* | 973 | /* |
| 901 | * Internal function to register an irqaction - typically used to | 974 | * Internal function to register an irqaction - typically used to |
| 902 | * allocate special interrupts that are part of the architecture. | 975 | * allocate special interrupts that are part of the architecture. |
| @@ -1092,6 +1165,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 1092 | } | 1165 | } |
| 1093 | 1166 | ||
| 1094 | if (!shared) { | 1167 | if (!shared) { |
| 1168 | ret = irq_request_resources(desc); | ||
| 1169 | if (ret) { | ||
| 1170 | pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", | ||
| 1171 | new->name, irq, desc->irq_data.chip->name); | ||
| 1172 | goto out_mask; | ||
| 1173 | } | ||
| 1174 | |||
| 1095 | init_waitqueue_head(&desc->wait_for_threads); | 1175 | init_waitqueue_head(&desc->wait_for_threads); |
| 1096 | 1176 | ||
| 1097 | /* Setup the type (level, edge polarity) if configured: */ | 1177 | /* Setup the type (level, edge polarity) if configured: */ |
| @@ -1262,8 +1342,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
| 1262 | *action_ptr = action->next; | 1342 | *action_ptr = action->next; |
| 1263 | 1343 | ||
| 1264 | /* If this was the last handler, shut down the IRQ line: */ | 1344 | /* If this was the last handler, shut down the IRQ line: */ |
| 1265 | if (!desc->action) | 1345 | if (!desc->action) { |
| 1266 | irq_shutdown(desc); | 1346 | irq_shutdown(desc); |
| 1347 | irq_release_resources(desc); | ||
| 1348 | } | ||
| 1267 | 1349 | ||
| 1268 | #ifdef CONFIG_SMP | 1350 | #ifdef CONFIG_SMP |
| 1269 | /* make sure affinity_hint is cleaned up */ | 1351 | /* make sure affinity_hint is cleaned up */ |
