diff options
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 101 |
1 files changed, 81 insertions, 20 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index eb6078ca60c7..3164ba7ce151 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -138,6 +138,22 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
138 | return 0; | 138 | return 0; |
139 | } | 139 | } |
140 | 140 | ||
141 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | ||
142 | { | ||
143 | struct irq_desc *desc = irq_to_desc(irq); | ||
144 | unsigned long flags; | ||
145 | |||
146 | if (!desc) | ||
147 | return -EINVAL; | ||
148 | |||
149 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
150 | desc->affinity_hint = m; | ||
151 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | ||
156 | |||
141 | #ifndef CONFIG_AUTO_IRQ_AFFINITY | 157 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
142 | /* | 158 | /* |
143 | * Generic version of the affinity autoselector. | 159 | * Generic version of the affinity autoselector. |
@@ -382,6 +398,7 @@ int can_request_irq(unsigned int irq, unsigned long irqflags) | |||
382 | { | 398 | { |
383 | struct irq_desc *desc = irq_to_desc(irq); | 399 | struct irq_desc *desc = irq_to_desc(irq); |
384 | struct irqaction *action; | 400 | struct irqaction *action; |
401 | unsigned long flags; | ||
385 | 402 | ||
386 | if (!desc) | 403 | if (!desc) |
387 | return 0; | 404 | return 0; |
@@ -389,11 +406,14 @@ int can_request_irq(unsigned int irq, unsigned long irqflags) | |||
389 | if (desc->status & IRQ_NOREQUEST) | 406 | if (desc->status & IRQ_NOREQUEST) |
390 | return 0; | 407 | return 0; |
391 | 408 | ||
409 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
392 | action = desc->action; | 410 | action = desc->action; |
393 | if (action) | 411 | if (action) |
394 | if (irqflags & action->flags & IRQF_SHARED) | 412 | if (irqflags & action->flags & IRQF_SHARED) |
395 | action = NULL; | 413 | action = NULL; |
396 | 414 | ||
415 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
416 | |||
397 | return !action; | 417 | return !action; |
398 | } | 418 | } |
399 | 419 | ||
@@ -483,8 +503,26 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
483 | */ | 503 | */ |
484 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | 504 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) |
485 | { | 505 | { |
506 | again: | ||
486 | chip_bus_lock(irq, desc); | 507 | chip_bus_lock(irq, desc); |
487 | raw_spin_lock_irq(&desc->lock); | 508 | raw_spin_lock_irq(&desc->lock); |
509 | |||
510 | /* | ||
511 | * Implausible though it may be we need to protect us against | ||
512 | * the following scenario: | ||
513 | * | ||
514 | * The thread is faster done than the hard interrupt handler | ||
515 | * on the other CPU. If we unmask the irq line then the | ||
516 | * interrupt can come in again and masks the line, leaves due | ||
517 | * to IRQ_INPROGRESS and the irq line is masked forever. | ||
518 | */ | ||
519 | if (unlikely(desc->status & IRQ_INPROGRESS)) { | ||
520 | raw_spin_unlock_irq(&desc->lock); | ||
521 | chip_bus_sync_unlock(irq, desc); | ||
522 | cpu_relax(); | ||
523 | goto again; | ||
524 | } | ||
525 | |||
488 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { | 526 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { |
489 | desc->status &= ~IRQ_MASKED; | 527 | desc->status &= ~IRQ_MASKED; |
490 | desc->chip->unmask(irq); | 528 | desc->chip->unmask(irq); |
@@ -884,6 +922,12 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
884 | desc->chip->disable(irq); | 922 | desc->chip->disable(irq); |
885 | } | 923 | } |
886 | 924 | ||
925 | #ifdef CONFIG_SMP | ||
926 | /* make sure affinity_hint is cleaned up */ | ||
927 | if (WARN_ON_ONCE(desc->affinity_hint)) | ||
928 | desc->affinity_hint = NULL; | ||
929 | #endif | ||
930 | |||
887 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 931 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
888 | 932 | ||
889 | unregister_handler_proc(irq, action); | 933 | unregister_handler_proc(irq, action); |
@@ -995,7 +1039,6 @@ EXPORT_SYMBOL(free_irq); | |||
995 | * Flags: | 1039 | * Flags: |
996 | * | 1040 | * |
997 | * IRQF_SHARED Interrupt is shared | 1041 | * IRQF_SHARED Interrupt is shared |
998 | * IRQF_DISABLED Disable local interrupts while processing | ||
999 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy | 1042 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy |
1000 | * IRQF_TRIGGER_* Specify active edge(s) or level | 1043 | * IRQF_TRIGGER_* Specify active edge(s) or level |
1001 | * | 1044 | * |
@@ -1009,25 +1052,6 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1009 | int retval; | 1052 | int retval; |
1010 | 1053 | ||
1011 | /* | 1054 | /* |
1012 | * handle_IRQ_event() always ignores IRQF_DISABLED except for | ||
1013 | * the _first_ irqaction (sigh). That can cause oopsing, but | ||
1014 | * the behavior is classified as "will not fix" so we need to | ||
1015 | * start nudging drivers away from using that idiom. | ||
1016 | */ | ||
1017 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == | ||
1018 | (IRQF_SHARED|IRQF_DISABLED)) { | ||
1019 | pr_warning( | ||
1020 | "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", | ||
1021 | irq, devname); | ||
1022 | } | ||
1023 | |||
1024 | #ifdef CONFIG_LOCKDEP | ||
1025 | /* | ||
1026 | * Lockdep wants atomic interrupt handlers: | ||
1027 | */ | ||
1028 | irqflags |= IRQF_DISABLED; | ||
1029 | #endif | ||
1030 | /* | ||
1031 | * Sanity-check: shared interrupts must pass in a real dev-ID, | 1055 | * Sanity-check: shared interrupts must pass in a real dev-ID, |
1032 | * otherwise we'll have trouble later trying to figure out | 1056 | * otherwise we'll have trouble later trying to figure out |
1033 | * which interrupt is which (messes up the interrupt freeing | 1057 | * which interrupt is which (messes up the interrupt freeing |
@@ -1088,3 +1112,40 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1088 | return retval; | 1112 | return retval; |
1089 | } | 1113 | } |
1090 | EXPORT_SYMBOL(request_threaded_irq); | 1114 | EXPORT_SYMBOL(request_threaded_irq); |
1115 | |||
1116 | /** | ||
1117 | * request_any_context_irq - allocate an interrupt line | ||
1118 | * @irq: Interrupt line to allocate | ||
1119 | * @handler: Function to be called when the IRQ occurs. | ||
1120 | * Threaded handler for threaded interrupts. | ||
1121 | * @flags: Interrupt type flags | ||
1122 | * @name: An ascii name for the claiming device | ||
1123 | * @dev_id: A cookie passed back to the handler function | ||
1124 | * | ||
1125 | * This call allocates interrupt resources and enables the | ||
1126 | * interrupt line and IRQ handling. It selects either a | ||
1127 | * hardirq or threaded handling method depending on the | ||
1128 | * context. | ||
1129 | * | ||
1130 | * On failure, it returns a negative value. On success, | ||
1131 | * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. | ||
1132 | */ | ||
1133 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, | ||
1134 | unsigned long flags, const char *name, void *dev_id) | ||
1135 | { | ||
1136 | struct irq_desc *desc = irq_to_desc(irq); | ||
1137 | int ret; | ||
1138 | |||
1139 | if (!desc) | ||
1140 | return -EINVAL; | ||
1141 | |||
1142 | if (desc->status & IRQ_NESTED_THREAD) { | ||
1143 | ret = request_threaded_irq(irq, NULL, handler, | ||
1144 | flags, name, dev_id); | ||
1145 | return !ret ? IRQC_IS_NESTED : ret; | ||
1146 | } | ||
1147 | |||
1148 | ret = request_irq(irq, handler, flags, name, dev_id); | ||
1149 | return !ret ? IRQC_IS_HARDIRQ : ret; | ||
1150 | } | ||
1151 | EXPORT_SYMBOL_GPL(request_any_context_irq); | ||