diff options
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 112 |
1 files changed, 102 insertions, 10 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index d222515a5a06..bde4c667d24d 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -230,9 +230,11 @@ void disable_irq_nosync(unsigned int irq) | |||
230 | if (!desc) | 230 | if (!desc) |
231 | return; | 231 | return; |
232 | 232 | ||
233 | chip_bus_lock(irq, desc); | ||
233 | spin_lock_irqsave(&desc->lock, flags); | 234 | spin_lock_irqsave(&desc->lock, flags); |
234 | __disable_irq(desc, irq, false); | 235 | __disable_irq(desc, irq, false); |
235 | spin_unlock_irqrestore(&desc->lock, flags); | 236 | spin_unlock_irqrestore(&desc->lock, flags); |
237 | chip_bus_sync_unlock(irq, desc); | ||
236 | } | 238 | } |
237 | EXPORT_SYMBOL(disable_irq_nosync); | 239 | EXPORT_SYMBOL(disable_irq_nosync); |
238 | 240 | ||
@@ -294,7 +296,8 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
294 | * matches the last disable, processing of interrupts on this | 296 | * matches the last disable, processing of interrupts on this |
295 | * IRQ line is re-enabled. | 297 | * IRQ line is re-enabled. |
296 | * | 298 | * |
297 | * This function may be called from IRQ context. | 299 | * This function may be called from IRQ context only when |
300 | * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! | ||
298 | */ | 301 | */ |
299 | void enable_irq(unsigned int irq) | 302 | void enable_irq(unsigned int irq) |
300 | { | 303 | { |
@@ -304,9 +307,11 @@ void enable_irq(unsigned int irq) | |||
304 | if (!desc) | 307 | if (!desc) |
305 | return; | 308 | return; |
306 | 309 | ||
310 | chip_bus_lock(irq, desc); | ||
307 | spin_lock_irqsave(&desc->lock, flags); | 311 | spin_lock_irqsave(&desc->lock, flags); |
308 | __enable_irq(desc, irq, false); | 312 | __enable_irq(desc, irq, false); |
309 | spin_unlock_irqrestore(&desc->lock, flags); | 313 | spin_unlock_irqrestore(&desc->lock, flags); |
314 | chip_bus_sync_unlock(irq, desc); | ||
310 | } | 315 | } |
311 | EXPORT_SYMBOL(enable_irq); | 316 | EXPORT_SYMBOL(enable_irq); |
312 | 317 | ||
@@ -436,6 +441,26 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
436 | return ret; | 441 | return ret; |
437 | } | 442 | } |
438 | 443 | ||
444 | /* | ||
445 | * Default primary interrupt handler for threaded interrupts. Is | ||
446 | * assigned as primary handler when request_threaded_irq is called | ||
447 | * with handler == NULL. Useful for oneshot interrupts. | ||
448 | */ | ||
449 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) | ||
450 | { | ||
451 | return IRQ_WAKE_THREAD; | ||
452 | } | ||
453 | |||
454 | /* | ||
455 | * Primary handler for nested threaded interrupts. Should never be | ||
456 | * called. | ||
457 | */ | ||
458 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | ||
459 | { | ||
460 | WARN(1, "Primary handler called for nested irq %d\n", irq); | ||
461 | return IRQ_NONE; | ||
462 | } | ||
463 | |||
439 | static int irq_wait_for_interrupt(struct irqaction *action) | 464 | static int irq_wait_for_interrupt(struct irqaction *action) |
440 | { | 465 | { |
441 | while (!kthread_should_stop()) { | 466 | while (!kthread_should_stop()) { |
@@ -451,6 +476,23 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
451 | return -1; | 476 | return -1; |
452 | } | 477 | } |
453 | 478 | ||
479 | /* | ||
480 | * Oneshot interrupts keep the irq line masked until the threaded | ||
481 | * handler finished. unmask if the interrupt has not been disabled and | ||
482 | * is marked MASKED. | ||
483 | */ | ||
484 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | ||
485 | { | ||
486 | chip_bus_lock(irq, desc); | ||
487 | spin_lock_irq(&desc->lock); | ||
488 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { | ||
489 | desc->status &= ~IRQ_MASKED; | ||
490 | desc->chip->unmask(irq); | ||
491 | } | ||
492 | spin_unlock_irq(&desc->lock); | ||
493 | chip_bus_sync_unlock(irq, desc); | ||
494 | } | ||
495 | |||
454 | #ifdef CONFIG_SMP | 496 | #ifdef CONFIG_SMP |
455 | /* | 497 | /* |
456 | * Check whether we need to change the affinity of the interrupt thread. | 498 | * Check whether we need to change the affinity of the interrupt thread. |
@@ -492,7 +534,7 @@ static int irq_thread(void *data) | |||
492 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | 534 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; |
493 | struct irqaction *action = data; | 535 | struct irqaction *action = data; |
494 | struct irq_desc *desc = irq_to_desc(action->irq); | 536 | struct irq_desc *desc = irq_to_desc(action->irq); |
495 | int wake; | 537 | int wake, oneshot = desc->status & IRQ_ONESHOT; |
496 | 538 | ||
497 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 539 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
498 | current->irqaction = action; | 540 | current->irqaction = action; |
@@ -518,6 +560,9 @@ static int irq_thread(void *data) | |||
518 | spin_unlock_irq(&desc->lock); | 560 | spin_unlock_irq(&desc->lock); |
519 | 561 | ||
520 | action->thread_fn(action->irq, action->dev_id); | 562 | action->thread_fn(action->irq, action->dev_id); |
563 | |||
564 | if (oneshot) | ||
565 | irq_finalize_oneshot(action->irq, desc); | ||
521 | } | 566 | } |
522 | 567 | ||
523 | wake = atomic_dec_and_test(&desc->threads_active); | 568 | wake = atomic_dec_and_test(&desc->threads_active); |
@@ -565,7 +610,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
565 | struct irqaction *old, **old_ptr; | 610 | struct irqaction *old, **old_ptr; |
566 | const char *old_name = NULL; | 611 | const char *old_name = NULL; |
567 | unsigned long flags; | 612 | unsigned long flags; |
568 | int shared = 0; | 613 | int nested, shared = 0; |
569 | int ret; | 614 | int ret; |
570 | 615 | ||
571 | if (!desc) | 616 | if (!desc) |
@@ -590,10 +635,32 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
590 | rand_initialize_irq(irq); | 635 | rand_initialize_irq(irq); |
591 | } | 636 | } |
592 | 637 | ||
638 | /* Oneshot interrupts are not allowed with shared */ | ||
639 | if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) | ||
640 | return -EINVAL; | ||
641 | |||
593 | /* | 642 | /* |
594 | * Threaded handler ? | 643 | * Check whether the interrupt nests into another interrupt |
644 | * thread. | ||
595 | */ | 645 | */ |
596 | if (new->thread_fn) { | 646 | nested = desc->status & IRQ_NESTED_THREAD; |
647 | if (nested) { | ||
648 | if (!new->thread_fn) | ||
649 | return -EINVAL; | ||
650 | /* | ||
651 | * Replace the primary handler which was provided from | ||
652 | * the driver for non nested interrupt handling by the | ||
653 | * dummy function which warns when called. | ||
654 | */ | ||
655 | new->handler = irq_nested_primary_handler; | ||
656 | } | ||
657 | |||
658 | /* | ||
659 | * Create a handler thread when a thread function is supplied | ||
660 | * and the interrupt does not nest into another interrupt | ||
661 | * thread. | ||
662 | */ | ||
663 | if (new->thread_fn && !nested) { | ||
597 | struct task_struct *t; | 664 | struct task_struct *t; |
598 | 665 | ||
599 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | 666 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, |
@@ -607,7 +674,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
607 | */ | 674 | */ |
608 | get_task_struct(t); | 675 | get_task_struct(t); |
609 | new->thread = t; | 676 | new->thread = t; |
610 | wake_up_process(t); | ||
611 | } | 677 | } |
612 | 678 | ||
613 | /* | 679 | /* |
@@ -663,9 +729,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
663 | desc->status |= IRQ_PER_CPU; | 729 | desc->status |= IRQ_PER_CPU; |
664 | #endif | 730 | #endif |
665 | 731 | ||
666 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | | 732 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | |
667 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); | 733 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); |
668 | 734 | ||
735 | if (new->flags & IRQF_ONESHOT) | ||
736 | desc->status |= IRQ_ONESHOT; | ||
737 | |||
669 | if (!(desc->status & IRQ_NOAUTOEN)) { | 738 | if (!(desc->status & IRQ_NOAUTOEN)) { |
670 | desc->depth = 0; | 739 | desc->depth = 0; |
671 | desc->status &= ~IRQ_DISABLED; | 740 | desc->status &= ~IRQ_DISABLED; |
@@ -690,6 +759,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
690 | (int)(new->flags & IRQF_TRIGGER_MASK)); | 759 | (int)(new->flags & IRQF_TRIGGER_MASK)); |
691 | } | 760 | } |
692 | 761 | ||
762 | new->irq = irq; | ||
693 | *old_ptr = new; | 763 | *old_ptr = new; |
694 | 764 | ||
695 | /* Reset broken irq detection when installing new handler */ | 765 | /* Reset broken irq detection when installing new handler */ |
@@ -707,7 +777,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
707 | 777 | ||
708 | spin_unlock_irqrestore(&desc->lock, flags); | 778 | spin_unlock_irqrestore(&desc->lock, flags); |
709 | 779 | ||
710 | new->irq = irq; | 780 | /* |
781 | * Strictly no need to wake it up, but hung_task complains | ||
782 | * when no hard interrupt wakes the thread up. | ||
783 | */ | ||
784 | if (new->thread) | ||
785 | wake_up_process(new->thread); | ||
786 | |||
711 | register_irq_proc(irq, desc); | 787 | register_irq_proc(irq, desc); |
712 | new->dir = NULL; | 788 | new->dir = NULL; |
713 | register_handler_proc(irq, new); | 789 | register_handler_proc(irq, new); |
@@ -869,7 +945,14 @@ EXPORT_SYMBOL_GPL(remove_irq); | |||
869 | */ | 945 | */ |
870 | void free_irq(unsigned int irq, void *dev_id) | 946 | void free_irq(unsigned int irq, void *dev_id) |
871 | { | 947 | { |
948 | struct irq_desc *desc = irq_to_desc(irq); | ||
949 | |||
950 | if (!desc) | ||
951 | return; | ||
952 | |||
953 | chip_bus_lock(irq, desc); | ||
872 | kfree(__free_irq(irq, dev_id)); | 954 | kfree(__free_irq(irq, dev_id)); |
955 | chip_bus_sync_unlock(irq, desc); | ||
873 | } | 956 | } |
874 | EXPORT_SYMBOL(free_irq); | 957 | EXPORT_SYMBOL(free_irq); |
875 | 958 | ||
@@ -878,6 +961,8 @@ EXPORT_SYMBOL(free_irq); | |||
878 | * @irq: Interrupt line to allocate | 961 | * @irq: Interrupt line to allocate |
879 | * @handler: Function to be called when the IRQ occurs. | 962 | * @handler: Function to be called when the IRQ occurs. |
880 | * Primary handler for threaded interrupts | 963 | * Primary handler for threaded interrupts |
964 | * If NULL and thread_fn != NULL the default | ||
965 | * primary handler is installed | ||
881 | * @thread_fn: Function called from the irq handler thread | 966 | * @thread_fn: Function called from the irq handler thread |
882 | * If NULL, no irq thread is created | 967 | * If NULL, no irq thread is created |
883 | * @irqflags: Interrupt type flags | 968 | * @irqflags: Interrupt type flags |
@@ -957,8 +1042,12 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
957 | 1042 | ||
958 | if (desc->status & IRQ_NOREQUEST) | 1043 | if (desc->status & IRQ_NOREQUEST) |
959 | return -EINVAL; | 1044 | return -EINVAL; |
960 | if (!handler) | 1045 | |
961 | return -EINVAL; | 1046 | if (!handler) { |
1047 | if (!thread_fn) | ||
1048 | return -EINVAL; | ||
1049 | handler = irq_default_primary_handler; | ||
1050 | } | ||
962 | 1051 | ||
963 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | 1052 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
964 | if (!action) | 1053 | if (!action) |
@@ -970,7 +1059,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
970 | action->name = devname; | 1059 | action->name = devname; |
971 | action->dev_id = dev_id; | 1060 | action->dev_id = dev_id; |
972 | 1061 | ||
1062 | chip_bus_lock(irq, desc); | ||
973 | retval = __setup_irq(irq, desc, action); | 1063 | retval = __setup_irq(irq, desc, action); |
1064 | chip_bus_sync_unlock(irq, desc); | ||
1065 | |||
974 | if (retval) | 1066 | if (retval) |
975 | kfree(action); | 1067 | kfree(action); |
976 | 1068 | ||