diff options
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 184 |
1 files changed, 158 insertions, 26 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 50da67672901..bde4c667d24d 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -80,14 +80,22 @@ int irq_can_set_affinity(unsigned int irq) | |||
80 | return 1; | 80 | return 1; |
81 | } | 81 | } |
82 | 82 | ||
83 | void | 83 | /** |
84 | irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) | 84 | * irq_set_thread_affinity - Notify irq threads to adjust affinity |
85 | * @desc: irq descriptor which has affitnity changed | ||
86 | * | ||
87 | * We just set IRQTF_AFFINITY and delegate the affinity setting | ||
88 | * to the interrupt thread itself. We can not call | ||
89 | * set_cpus_allowed_ptr() here as we hold desc->lock and this | ||
90 | * code can be called from hard interrupt context. | ||
91 | */ | ||
92 | void irq_set_thread_affinity(struct irq_desc *desc) | ||
85 | { | 93 | { |
86 | struct irqaction *action = desc->action; | 94 | struct irqaction *action = desc->action; |
87 | 95 | ||
88 | while (action) { | 96 | while (action) { |
89 | if (action->thread) | 97 | if (action->thread) |
90 | set_cpus_allowed_ptr(action->thread, cpumask); | 98 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
91 | action = action->next; | 99 | action = action->next; |
92 | } | 100 | } |
93 | } | 101 | } |
@@ -112,7 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
112 | if (desc->status & IRQ_MOVE_PCNTXT) { | 120 | if (desc->status & IRQ_MOVE_PCNTXT) { |
113 | if (!desc->chip->set_affinity(irq, cpumask)) { | 121 | if (!desc->chip->set_affinity(irq, cpumask)) { |
114 | cpumask_copy(desc->affinity, cpumask); | 122 | cpumask_copy(desc->affinity, cpumask); |
115 | irq_set_thread_affinity(desc, cpumask); | 123 | irq_set_thread_affinity(desc); |
116 | } | 124 | } |
117 | } | 125 | } |
118 | else { | 126 | else { |
@@ -122,7 +130,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
122 | #else | 130 | #else |
123 | if (!desc->chip->set_affinity(irq, cpumask)) { | 131 | if (!desc->chip->set_affinity(irq, cpumask)) { |
124 | cpumask_copy(desc->affinity, cpumask); | 132 | cpumask_copy(desc->affinity, cpumask); |
125 | irq_set_thread_affinity(desc, cpumask); | 133 | irq_set_thread_affinity(desc); |
126 | } | 134 | } |
127 | #endif | 135 | #endif |
128 | desc->status |= IRQ_AFFINITY_SET; | 136 | desc->status |= IRQ_AFFINITY_SET; |
@@ -176,7 +184,7 @@ int irq_select_affinity_usr(unsigned int irq) | |||
176 | spin_lock_irqsave(&desc->lock, flags); | 184 | spin_lock_irqsave(&desc->lock, flags); |
177 | ret = setup_affinity(irq, desc); | 185 | ret = setup_affinity(irq, desc); |
178 | if (!ret) | 186 | if (!ret) |
179 | irq_set_thread_affinity(desc, desc->affinity); | 187 | irq_set_thread_affinity(desc); |
180 | spin_unlock_irqrestore(&desc->lock, flags); | 188 | spin_unlock_irqrestore(&desc->lock, flags); |
181 | 189 | ||
182 | return ret; | 190 | return ret; |
@@ -222,9 +230,11 @@ void disable_irq_nosync(unsigned int irq) | |||
222 | if (!desc) | 230 | if (!desc) |
223 | return; | 231 | return; |
224 | 232 | ||
233 | chip_bus_lock(irq, desc); | ||
225 | spin_lock_irqsave(&desc->lock, flags); | 234 | spin_lock_irqsave(&desc->lock, flags); |
226 | __disable_irq(desc, irq, false); | 235 | __disable_irq(desc, irq, false); |
227 | spin_unlock_irqrestore(&desc->lock, flags); | 236 | spin_unlock_irqrestore(&desc->lock, flags); |
237 | chip_bus_sync_unlock(irq, desc); | ||
228 | } | 238 | } |
229 | EXPORT_SYMBOL(disable_irq_nosync); | 239 | EXPORT_SYMBOL(disable_irq_nosync); |
230 | 240 | ||
@@ -286,7 +296,8 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
286 | * matches the last disable, processing of interrupts on this | 296 | * matches the last disable, processing of interrupts on this |
287 | * IRQ line is re-enabled. | 297 | * IRQ line is re-enabled. |
288 | * | 298 | * |
289 | * This function may be called from IRQ context. | 299 | * This function may be called from IRQ context only when |
300 | * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! | ||
290 | */ | 301 | */ |
291 | void enable_irq(unsigned int irq) | 302 | void enable_irq(unsigned int irq) |
292 | { | 303 | { |
@@ -296,9 +307,11 @@ void enable_irq(unsigned int irq) | |||
296 | if (!desc) | 307 | if (!desc) |
297 | return; | 308 | return; |
298 | 309 | ||
310 | chip_bus_lock(irq, desc); | ||
299 | spin_lock_irqsave(&desc->lock, flags); | 311 | spin_lock_irqsave(&desc->lock, flags); |
300 | __enable_irq(desc, irq, false); | 312 | __enable_irq(desc, irq, false); |
301 | spin_unlock_irqrestore(&desc->lock, flags); | 313 | spin_unlock_irqrestore(&desc->lock, flags); |
314 | chip_bus_sync_unlock(irq, desc); | ||
302 | } | 315 | } |
303 | EXPORT_SYMBOL(enable_irq); | 316 | EXPORT_SYMBOL(enable_irq); |
304 | 317 | ||
@@ -428,6 +441,26 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
428 | return ret; | 441 | return ret; |
429 | } | 442 | } |
430 | 443 | ||
444 | /* | ||
445 | * Default primary interrupt handler for threaded interrupts. Is | ||
446 | * assigned as primary handler when request_threaded_irq is called | ||
447 | * with handler == NULL. Useful for oneshot interrupts. | ||
448 | */ | ||
449 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) | ||
450 | { | ||
451 | return IRQ_WAKE_THREAD; | ||
452 | } | ||
453 | |||
454 | /* | ||
455 | * Primary handler for nested threaded interrupts. Should never be | ||
456 | * called. | ||
457 | */ | ||
458 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | ||
459 | { | ||
460 | WARN(1, "Primary handler called for nested irq %d\n", irq); | ||
461 | return IRQ_NONE; | ||
462 | } | ||
463 | |||
431 | static int irq_wait_for_interrupt(struct irqaction *action) | 464 | static int irq_wait_for_interrupt(struct irqaction *action) |
432 | { | 465 | { |
433 | while (!kthread_should_stop()) { | 466 | while (!kthread_should_stop()) { |
@@ -444,6 +477,56 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
444 | } | 477 | } |
445 | 478 | ||
446 | /* | 479 | /* |
480 | * Oneshot interrupts keep the irq line masked until the threaded | ||
481 | * handler finished. unmask if the interrupt has not been disabled and | ||
482 | * is marked MASKED. | ||
483 | */ | ||
484 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | ||
485 | { | ||
486 | chip_bus_lock(irq, desc); | ||
487 | spin_lock_irq(&desc->lock); | ||
488 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { | ||
489 | desc->status &= ~IRQ_MASKED; | ||
490 | desc->chip->unmask(irq); | ||
491 | } | ||
492 | spin_unlock_irq(&desc->lock); | ||
493 | chip_bus_sync_unlock(irq, desc); | ||
494 | } | ||
495 | |||
496 | #ifdef CONFIG_SMP | ||
497 | /* | ||
498 | * Check whether we need to change the affinity of the interrupt thread. | ||
499 | */ | ||
500 | static void | ||
501 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | ||
502 | { | ||
503 | cpumask_var_t mask; | ||
504 | |||
505 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | ||
506 | return; | ||
507 | |||
508 | /* | ||
509 | * In case we are out of memory we set IRQTF_AFFINITY again and | ||
510 | * try again next time | ||
511 | */ | ||
512 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | ||
513 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | ||
514 | return; | ||
515 | } | ||
516 | |||
517 | spin_lock_irq(&desc->lock); | ||
518 | cpumask_copy(mask, desc->affinity); | ||
519 | spin_unlock_irq(&desc->lock); | ||
520 | |||
521 | set_cpus_allowed_ptr(current, mask); | ||
522 | free_cpumask_var(mask); | ||
523 | } | ||
524 | #else | ||
525 | static inline void | ||
526 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | ||
527 | #endif | ||
528 | |||
529 | /* | ||
447 | * Interrupt handler thread | 530 | * Interrupt handler thread |
448 | */ | 531 | */ |
449 | static int irq_thread(void *data) | 532 | static int irq_thread(void *data) |
@@ -451,13 +534,15 @@ static int irq_thread(void *data) | |||
451 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | 534 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; |
452 | struct irqaction *action = data; | 535 | struct irqaction *action = data; |
453 | struct irq_desc *desc = irq_to_desc(action->irq); | 536 | struct irq_desc *desc = irq_to_desc(action->irq); |
454 | int wake; | 537 | int wake, oneshot = desc->status & IRQ_ONESHOT; |
455 | 538 | ||
456 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 539 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
457 | current->irqaction = action; | 540 | current->irqaction = action; |
458 | 541 | ||
459 | while (!irq_wait_for_interrupt(action)) { | 542 | while (!irq_wait_for_interrupt(action)) { |
460 | 543 | ||
544 | irq_thread_check_affinity(desc, action); | ||
545 | |||
461 | atomic_inc(&desc->threads_active); | 546 | atomic_inc(&desc->threads_active); |
462 | 547 | ||
463 | spin_lock_irq(&desc->lock); | 548 | spin_lock_irq(&desc->lock); |
@@ -475,6 +560,9 @@ static int irq_thread(void *data) | |||
475 | spin_unlock_irq(&desc->lock); | 560 | spin_unlock_irq(&desc->lock); |
476 | 561 | ||
477 | action->thread_fn(action->irq, action->dev_id); | 562 | action->thread_fn(action->irq, action->dev_id); |
563 | |||
564 | if (oneshot) | ||
565 | irq_finalize_oneshot(action->irq, desc); | ||
478 | } | 566 | } |
479 | 567 | ||
480 | wake = atomic_dec_and_test(&desc->threads_active); | 568 | wake = atomic_dec_and_test(&desc->threads_active); |
@@ -522,7 +610,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
522 | struct irqaction *old, **old_ptr; | 610 | struct irqaction *old, **old_ptr; |
523 | const char *old_name = NULL; | 611 | const char *old_name = NULL; |
524 | unsigned long flags; | 612 | unsigned long flags; |
525 | int shared = 0; | 613 | int nested, shared = 0; |
526 | int ret; | 614 | int ret; |
527 | 615 | ||
528 | if (!desc) | 616 | if (!desc) |
@@ -547,10 +635,32 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
547 | rand_initialize_irq(irq); | 635 | rand_initialize_irq(irq); |
548 | } | 636 | } |
549 | 637 | ||
638 | /* Oneshot interrupts are not allowed with shared */ | ||
639 | if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) | ||
640 | return -EINVAL; | ||
641 | |||
642 | /* | ||
643 | * Check whether the interrupt nests into another interrupt | ||
644 | * thread. | ||
645 | */ | ||
646 | nested = desc->status & IRQ_NESTED_THREAD; | ||
647 | if (nested) { | ||
648 | if (!new->thread_fn) | ||
649 | return -EINVAL; | ||
650 | /* | ||
651 | * Replace the primary handler which was provided from | ||
652 | * the driver for non nested interrupt handling by the | ||
653 | * dummy function which warns when called. | ||
654 | */ | ||
655 | new->handler = irq_nested_primary_handler; | ||
656 | } | ||
657 | |||
550 | /* | 658 | /* |
551 | * Threaded handler ? | 659 | * Create a handler thread when a thread function is supplied |
660 | * and the interrupt does not nest into another interrupt | ||
661 | * thread. | ||
552 | */ | 662 | */ |
553 | if (new->thread_fn) { | 663 | if (new->thread_fn && !nested) { |
554 | struct task_struct *t; | 664 | struct task_struct *t; |
555 | 665 | ||
556 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | 666 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, |
@@ -564,7 +674,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
564 | */ | 674 | */ |
565 | get_task_struct(t); | 675 | get_task_struct(t); |
566 | new->thread = t; | 676 | new->thread = t; |
567 | wake_up_process(t); | ||
568 | } | 677 | } |
569 | 678 | ||
570 | /* | 679 | /* |
@@ -620,9 +729,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
620 | desc->status |= IRQ_PER_CPU; | 729 | desc->status |= IRQ_PER_CPU; |
621 | #endif | 730 | #endif |
622 | 731 | ||
623 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | | 732 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | |
624 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); | 733 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); |
625 | 734 | ||
735 | if (new->flags & IRQF_ONESHOT) | ||
736 | desc->status |= IRQ_ONESHOT; | ||
737 | |||
626 | if (!(desc->status & IRQ_NOAUTOEN)) { | 738 | if (!(desc->status & IRQ_NOAUTOEN)) { |
627 | desc->depth = 0; | 739 | desc->depth = 0; |
628 | desc->status &= ~IRQ_DISABLED; | 740 | desc->status &= ~IRQ_DISABLED; |
@@ -647,6 +759,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
647 | (int)(new->flags & IRQF_TRIGGER_MASK)); | 759 | (int)(new->flags & IRQF_TRIGGER_MASK)); |
648 | } | 760 | } |
649 | 761 | ||
762 | new->irq = irq; | ||
650 | *old_ptr = new; | 763 | *old_ptr = new; |
651 | 764 | ||
652 | /* Reset broken irq detection when installing new handler */ | 765 | /* Reset broken irq detection when installing new handler */ |
@@ -664,7 +777,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
664 | 777 | ||
665 | spin_unlock_irqrestore(&desc->lock, flags); | 778 | spin_unlock_irqrestore(&desc->lock, flags); |
666 | 779 | ||
667 | new->irq = irq; | 780 | /* |
781 | * Strictly no need to wake it up, but hung_task complains | ||
782 | * when no hard interrupt wakes the thread up. | ||
783 | */ | ||
784 | if (new->thread) | ||
785 | wake_up_process(new->thread); | ||
786 | |||
668 | register_irq_proc(irq, desc); | 787 | register_irq_proc(irq, desc); |
669 | new->dir = NULL; | 788 | new->dir = NULL; |
670 | register_handler_proc(irq, new); | 789 | register_handler_proc(irq, new); |
@@ -718,7 +837,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
718 | { | 837 | { |
719 | struct irq_desc *desc = irq_to_desc(irq); | 838 | struct irq_desc *desc = irq_to_desc(irq); |
720 | struct irqaction *action, **action_ptr; | 839 | struct irqaction *action, **action_ptr; |
721 | struct task_struct *irqthread; | ||
722 | unsigned long flags; | 840 | unsigned long flags; |
723 | 841 | ||
724 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | 842 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
@@ -766,9 +884,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
766 | desc->chip->disable(irq); | 884 | desc->chip->disable(irq); |
767 | } | 885 | } |
768 | 886 | ||
769 | irqthread = action->thread; | ||
770 | action->thread = NULL; | ||
771 | |||
772 | spin_unlock_irqrestore(&desc->lock, flags); | 887 | spin_unlock_irqrestore(&desc->lock, flags); |
773 | 888 | ||
774 | unregister_handler_proc(irq, action); | 889 | unregister_handler_proc(irq, action); |
@@ -776,12 +891,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
776 | /* Make sure it's not being used on another CPU: */ | 891 | /* Make sure it's not being used on another CPU: */ |
777 | synchronize_irq(irq); | 892 | synchronize_irq(irq); |
778 | 893 | ||
779 | if (irqthread) { | ||
780 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | ||
781 | kthread_stop(irqthread); | ||
782 | put_task_struct(irqthread); | ||
783 | } | ||
784 | |||
785 | #ifdef CONFIG_DEBUG_SHIRQ | 894 | #ifdef CONFIG_DEBUG_SHIRQ |
786 | /* | 895 | /* |
787 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | 896 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ |
@@ -797,6 +906,13 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
797 | local_irq_restore(flags); | 906 | local_irq_restore(flags); |
798 | } | 907 | } |
799 | #endif | 908 | #endif |
909 | |||
910 | if (action->thread) { | ||
911 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | ||
912 | kthread_stop(action->thread); | ||
913 | put_task_struct(action->thread); | ||
914 | } | ||
915 | |||
800 | return action; | 916 | return action; |
801 | } | 917 | } |
802 | 918 | ||
@@ -829,7 +945,14 @@ EXPORT_SYMBOL_GPL(remove_irq); | |||
829 | */ | 945 | */ |
830 | void free_irq(unsigned int irq, void *dev_id) | 946 | void free_irq(unsigned int irq, void *dev_id) |
831 | { | 947 | { |
948 | struct irq_desc *desc = irq_to_desc(irq); | ||
949 | |||
950 | if (!desc) | ||
951 | return; | ||
952 | |||
953 | chip_bus_lock(irq, desc); | ||
832 | kfree(__free_irq(irq, dev_id)); | 954 | kfree(__free_irq(irq, dev_id)); |
955 | chip_bus_sync_unlock(irq, desc); | ||
833 | } | 956 | } |
834 | EXPORT_SYMBOL(free_irq); | 957 | EXPORT_SYMBOL(free_irq); |
835 | 958 | ||
@@ -838,6 +961,8 @@ EXPORT_SYMBOL(free_irq); | |||
838 | * @irq: Interrupt line to allocate | 961 | * @irq: Interrupt line to allocate |
839 | * @handler: Function to be called when the IRQ occurs. | 962 | * @handler: Function to be called when the IRQ occurs. |
840 | * Primary handler for threaded interrupts | 963 | * Primary handler for threaded interrupts |
964 | * If NULL and thread_fn != NULL the default | ||
965 | * primary handler is installed | ||
841 | * @thread_fn: Function called from the irq handler thread | 966 | * @thread_fn: Function called from the irq handler thread |
842 | * If NULL, no irq thread is created | 967 | * If NULL, no irq thread is created |
843 | * @irqflags: Interrupt type flags | 968 | * @irqflags: Interrupt type flags |
@@ -917,8 +1042,12 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
917 | 1042 | ||
918 | if (desc->status & IRQ_NOREQUEST) | 1043 | if (desc->status & IRQ_NOREQUEST) |
919 | return -EINVAL; | 1044 | return -EINVAL; |
920 | if (!handler) | 1045 | |
921 | return -EINVAL; | 1046 | if (!handler) { |
1047 | if (!thread_fn) | ||
1048 | return -EINVAL; | ||
1049 | handler = irq_default_primary_handler; | ||
1050 | } | ||
922 | 1051 | ||
923 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | 1052 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
924 | if (!action) | 1053 | if (!action) |
@@ -930,7 +1059,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
930 | action->name = devname; | 1059 | action->name = devname; |
931 | action->dev_id = dev_id; | 1060 | action->dev_id = dev_id; |
932 | 1061 | ||
1062 | chip_bus_lock(irq, desc); | ||
933 | retval = __setup_irq(irq, desc, action); | 1063 | retval = __setup_irq(irq, desc, action); |
1064 | chip_bus_sync_unlock(irq, desc); | ||
1065 | |||
934 | if (retval) | 1066 | if (retval) |
935 | kfree(action); | 1067 | kfree(action); |
936 | 1068 | ||