diff options
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 119 |
1 files changed, 108 insertions, 11 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0b23ff71b9b0..b75d3d2d71f8 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -280,7 +280,8 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
280 | goto err_out; | 280 | goto err_out; |
281 | /* Prevent probing on this irq: */ | 281 | /* Prevent probing on this irq: */ |
282 | desc->status = status | IRQ_NOPROBE; | 282 | desc->status = status | IRQ_NOPROBE; |
283 | check_irq_resend(desc, irq); | 283 | if (!desc->forced_threads_active) |
284 | check_irq_resend(desc, irq); | ||
284 | /* fall-through */ | 285 | /* fall-through */ |
285 | } | 286 | } |
286 | default: | 287 | default: |
@@ -461,7 +462,88 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | |||
461 | return IRQ_NONE; | 462 | return IRQ_NONE; |
462 | } | 463 | } |
463 | 464 | ||
464 | static int irq_wait_for_interrupt(struct irqaction *action) | 465 | #ifdef CONFIG_PREEMPT_HARDIRQS |
466 | /* | ||
467 | * If the caller does not request irq threading then the handler | ||
468 | * becomes the thread function and we use the above handler as the | ||
469 | * primary hardirq context handler. | ||
470 | */ | ||
471 | static void preempt_hardirq_setup(struct irqaction *new) | ||
472 | { | ||
473 | if (new->thread_fn || (new->flags & (IRQF_NODELAY | IRQF_PERCPU))) | ||
474 | return; | ||
475 | |||
476 | new->flags |= IRQF_ONESHOT; | ||
477 | new->thread_fn = new->handler; | ||
478 | new->handler = irq_default_primary_handler; | ||
479 | } | ||
480 | |||
481 | #else | ||
482 | static inline void preempt_hardirq_setup(struct irqaction *new) { } | ||
483 | #endif | ||
484 | |||
485 | /* | ||
486 | * forced threaded interrupts need to unmask the interrupt line | ||
487 | */ | ||
488 | static int preempt_hardirq_thread_done(struct irq_desc *desc, | ||
489 | struct irqaction *action) | ||
490 | { | ||
491 | unsigned long masked; | ||
492 | |||
493 | if (!(desc->status & IRQ_ONESHOT)) | ||
494 | return 0; | ||
495 | again: | ||
496 | raw_spin_lock_irq(&desc->lock); | ||
497 | /* | ||
498 | * Be careful. The hardirq handler might be running on the | ||
499 | * other CPU. | ||
500 | */ | ||
501 | if (desc->status & IRQ_INPROGRESS) { | ||
502 | raw_spin_unlock_irq(&desc->lock); | ||
503 | cpu_relax(); | ||
504 | goto again; | ||
505 | } | ||
506 | |||
507 | /* | ||
508 | * Now check again, whether the thread should run. Otherwise | ||
509 | * we would clear the forced_threads_active bit which was just | ||
510 | * set. | ||
511 | */ | ||
512 | if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) { | ||
513 | raw_spin_unlock_irq(&desc->lock); | ||
514 | return 1; | ||
515 | } | ||
516 | |||
517 | masked = desc->forced_threads_active; | ||
518 | desc->forced_threads_active &= ~action->thread_mask; | ||
519 | |||
520 | /* | ||
521 | * Unmask the interrupt line when this is the last active | ||
522 | * thread and the interrupt is not disabled. | ||
523 | */ | ||
524 | if (masked && !desc->forced_threads_active && | ||
525 | !(desc->status & IRQ_DISABLED)) { | ||
526 | if (desc->chip->unmask) | ||
527 | desc->chip->unmask(action->irq); | ||
528 | /* | ||
529 | * Do we need to call check_irq_resend() here ? | ||
530 | * No. check_irq_resend needs only to be checked when | ||
531 | * we go from IRQ_DISABLED to IRQ_ENABLED state. | ||
532 | */ | ||
533 | } | ||
534 | raw_spin_unlock_irq(&desc->lock); | ||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | static inline void | ||
539 | preempt_hardirq_cleanup(struct irq_desc *desc, struct irqaction *action) | ||
540 | { | ||
541 | clear_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
542 | preempt_hardirq_thread_done(desc, action); | ||
543 | } | ||
544 | |||
545 | static int | ||
546 | irq_wait_for_interrupt(struct irq_desc *desc, struct irqaction *action) | ||
465 | { | 547 | { |
466 | while (!kthread_should_stop()) { | 548 | while (!kthread_should_stop()) { |
467 | set_current_state(TASK_INTERRUPTIBLE); | 549 | set_current_state(TASK_INTERRUPTIBLE); |
@@ -481,10 +563,12 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
481 | * handler finished. unmask if the interrupt has not been disabled and | 563 | * handler finished. unmask if the interrupt has not been disabled and |
482 | * is marked MASKED. | 564 | * is marked MASKED. |
483 | */ | 565 | */ |
484 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | 566 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc, |
567 | struct irqaction *action) | ||
485 | { | 568 | { |
486 | again: | 569 | again: |
487 | chip_bus_lock(irq, desc); | 570 | chip_bus_lock(irq, desc); |
571 | #ifndef CONFIG_PREEMPT_RT | ||
488 | raw_spin_lock_irq(&desc->lock); | 572 | raw_spin_lock_irq(&desc->lock); |
489 | 573 | ||
490 | /* | 574 | /* |
@@ -508,6 +592,9 @@ again: | |||
508 | desc->chip->unmask(irq); | 592 | desc->chip->unmask(irq); |
509 | } | 593 | } |
510 | raw_spin_unlock_irq(&desc->lock); | 594 | raw_spin_unlock_irq(&desc->lock); |
595 | #else | ||
596 | preempt_hardirq_thread_done(desc, action); | ||
597 | #endif | ||
511 | chip_bus_sync_unlock(irq, desc); | 598 | chip_bus_sync_unlock(irq, desc); |
512 | } | 599 | } |
513 | 600 | ||
@@ -552,12 +639,13 @@ static int irq_thread(void *data) | |||
552 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | 639 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; |
553 | struct irqaction *action = data; | 640 | struct irqaction *action = data; |
554 | struct irq_desc *desc = irq_to_desc(action->irq); | 641 | struct irq_desc *desc = irq_to_desc(action->irq); |
555 | int wake, oneshot = desc->status & IRQ_ONESHOT; | 642 | int wake; |
556 | 643 | ||
557 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 644 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
645 | current->extra_flags |= PFE_HARDIRQ; | ||
558 | current->irqaction = action; | 646 | current->irqaction = action; |
559 | 647 | ||
560 | while (!irq_wait_for_interrupt(action)) { | 648 | while (!irq_wait_for_interrupt(desc, action)) { |
561 | 649 | ||
562 | irq_thread_check_affinity(desc, action); | 650 | irq_thread_check_affinity(desc, action); |
563 | 651 | ||
@@ -579,8 +667,8 @@ static int irq_thread(void *data) | |||
579 | 667 | ||
580 | action->thread_fn(action->irq, action->dev_id); | 668 | action->thread_fn(action->irq, action->dev_id); |
581 | 669 | ||
582 | if (oneshot) | 670 | if (desc->status & IRQ_ONESHOT) |
583 | irq_finalize_oneshot(action->irq, desc); | 671 | irq_finalize_oneshot(action->irq, desc, action); |
584 | } | 672 | } |
585 | 673 | ||
586 | wake = atomic_dec_and_test(&desc->threads_active); | 674 | wake = atomic_dec_and_test(&desc->threads_active); |
@@ -589,6 +677,8 @@ static int irq_thread(void *data) | |||
589 | wake_up(&desc->wait_for_threads); | 677 | wake_up(&desc->wait_for_threads); |
590 | } | 678 | } |
591 | 679 | ||
680 | preempt_hardirq_cleanup(desc, action); | ||
681 | |||
592 | /* | 682 | /* |
593 | * Clear irqaction. Otherwise exit_irq_thread() would make | 683 | * Clear irqaction. Otherwise exit_irq_thread() would make |
594 | * fuzz about an active irq thread going into nirvana. | 684 | * fuzz about an active irq thread going into nirvana. |
@@ -627,7 +717,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
627 | { | 717 | { |
628 | struct irqaction *old, **old_ptr; | 718 | struct irqaction *old, **old_ptr; |
629 | const char *old_name = NULL; | 719 | const char *old_name = NULL; |
630 | unsigned long flags; | 720 | unsigned long flags, thread_mask = 0; |
631 | int nested, shared = 0; | 721 | int nested, shared = 0; |
632 | int ret; | 722 | int ret; |
633 | 723 | ||
@@ -653,9 +743,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
653 | rand_initialize_irq(irq); | 743 | rand_initialize_irq(irq); |
654 | } | 744 | } |
655 | 745 | ||
656 | /* Oneshot interrupts are not allowed with shared */ | 746 | /* Preempt-RT setup for forced threading */ |
657 | if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) | 747 | preempt_hardirq_setup(new); |
658 | return -EINVAL; | ||
659 | 748 | ||
660 | /* | 749 | /* |
661 | * Check whether the interrupt nests into another interrupt | 750 | * Check whether the interrupt nests into another interrupt |
@@ -722,12 +811,20 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
722 | 811 | ||
723 | /* add new interrupt at end of irq queue */ | 812 | /* add new interrupt at end of irq queue */ |
724 | do { | 813 | do { |
814 | thread_mask |= old->thread_mask; | ||
725 | old_ptr = &old->next; | 815 | old_ptr = &old->next; |
726 | old = *old_ptr; | 816 | old = *old_ptr; |
727 | } while (old); | 817 | } while (old); |
728 | shared = 1; | 818 | shared = 1; |
729 | } | 819 | } |
730 | 820 | ||
821 | /* | ||
822 | * Setup the thread mask for this irqaction. No risk that ffz | ||
823 | * will fail. If we have 32 resp. 64 devices sharing one irq | ||
824 | * then ..... | ||
825 | */ | ||
826 | new->thread_mask = 1 << ffz(thread_mask); | ||
827 | |||
731 | if (!shared) { | 828 | if (!shared) { |
732 | irq_chip_set_defaults(desc->chip); | 829 | irq_chip_set_defaults(desc->chip); |
733 | 830 | ||