diff options
-rw-r--r-- | Documentation/kernel-parameters.txt | 4 | ||||
-rw-r--r-- | include/linux/interrupt.h | 7 | ||||
-rw-r--r-- | kernel/irq/Kconfig | 3 | ||||
-rw-r--r-- | kernel/irq/internals.h | 2 | ||||
-rw-r--r-- | kernel/irq/manage.c | 67 | ||||
-rw-r--r-- | kernel/softirq.c | 16 |
6 files changed, 93 insertions, 6 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 89835a4766a6..cac6cf9a588c 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -2436,6 +2436,10 @@ and is between 256 and 4096 characters. It is defined in the file | |||
2436 | <deci-seconds>: poll all this frequency | 2436 | <deci-seconds>: poll all this frequency |
2437 | 0: no polling (default) | 2437 | 0: no polling (default) |
2438 | 2438 | ||
2439 | threadirqs [KNL] | ||
2440 | Force threading of all interrupt handlers except those | ||
2441 | marked explicitely IRQF_NO_THREAD. | ||
2442 | |||
2439 | topology= [S390] | 2443 | topology= [S390] |
2440 | Format: {off | on} | 2444 | Format: {off | on} |
2441 | Specify if the kernel should make use of the cpu | 2445 | Specify if the kernel should make use of the cpu |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 0fc3eb9397b4..f8a8af108e0c 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -383,6 +383,13 @@ static inline int disable_irq_wake(unsigned int irq) | |||
383 | } | 383 | } |
384 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 384 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
385 | 385 | ||
386 | |||
387 | #ifdef CONFIG_IRQ_FORCED_THREADING | ||
388 | extern bool force_irqthreads; | ||
389 | #else | ||
390 | #define force_irqthreads (0) | ||
391 | #endif | ||
392 | |||
386 | #ifndef __ARCH_SET_SOFTIRQ_PENDING | 393 | #ifndef __ARCH_SET_SOFTIRQ_PENDING |
387 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) | 394 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) |
388 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) | 395 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) |
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 355b8c7957f5..144db9dcfcde 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
@@ -38,6 +38,9 @@ config HARDIRQS_SW_RESEND | |||
38 | config IRQ_PREFLOW_FASTEOI | 38 | config IRQ_PREFLOW_FASTEOI |
39 | bool | 39 | bool |
40 | 40 | ||
41 | config IRQ_FORCED_THREADING | ||
42 | bool | ||
43 | |||
41 | config SPARSE_IRQ | 44 | config SPARSE_IRQ |
42 | bool "Support sparse irq numbering" | 45 | bool "Support sparse irq numbering" |
43 | depends on HAVE_SPARSE_IRQ | 46 | depends on HAVE_SPARSE_IRQ |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 935bec4bfa87..6c6ec9a49027 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -27,12 +27,14 @@ extern int noirqdebug; | |||
27 | * IRQTF_DIED - handler thread died | 27 | * IRQTF_DIED - handler thread died |
28 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed | 28 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed |
29 | * IRQTF_AFFINITY - irq thread is requested to adjust affinity | 29 | * IRQTF_AFFINITY - irq thread is requested to adjust affinity |
30 | * IRQTF_FORCED_THREAD - irq action is force threaded | ||
30 | */ | 31 | */ |
31 | enum { | 32 | enum { |
32 | IRQTF_RUNTHREAD, | 33 | IRQTF_RUNTHREAD, |
33 | IRQTF_DIED, | 34 | IRQTF_DIED, |
34 | IRQTF_WARNED, | 35 | IRQTF_WARNED, |
35 | IRQTF_AFFINITY, | 36 | IRQTF_AFFINITY, |
37 | IRQTF_FORCED_THREAD, | ||
36 | }; | 38 | }; |
37 | 39 | ||
38 | /* | 40 | /* |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 58c861367300..acd599a43bfb 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -17,6 +17,17 @@ | |||
17 | 17 | ||
18 | #include "internals.h" | 18 | #include "internals.h" |
19 | 19 | ||
20 | #ifdef CONFIG_IRQ_FORCED_THREADING | ||
21 | __read_mostly bool force_irqthreads; | ||
22 | |||
23 | static int __init setup_forced_irqthreads(char *arg) | ||
24 | { | ||
25 | force_irqthreads = true; | ||
26 | return 0; | ||
27 | } | ||
28 | early_param("threadirqs", setup_forced_irqthreads); | ||
29 | #endif | ||
30 | |||
20 | /** | 31 | /** |
21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 32 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
22 | * @irq: interrupt number to wait for | 33 | * @irq: interrupt number to wait for |
@@ -702,6 +713,32 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | |||
702 | #endif | 713 | #endif |
703 | 714 | ||
704 | /* | 715 | /* |
716 | * Interrupts which are not explicitely requested as threaded | ||
717 | * interrupts rely on the implicit bh/preempt disable of the hard irq | ||
718 | * context. So we need to disable bh here to avoid deadlocks and other | ||
719 | * side effects. | ||
720 | */ | ||
721 | static void | ||
722 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) | ||
723 | { | ||
724 | local_bh_disable(); | ||
725 | action->thread_fn(action->irq, action->dev_id); | ||
726 | irq_finalize_oneshot(desc, action, false); | ||
727 | local_bh_enable(); | ||
728 | } | ||
729 | |||
730 | /* | ||
731 | * Interrupts explicitely requested as threaded interupts want to be | ||
732 | * preemtible - many of them need to sleep and wait for slow busses to | ||
733 | * complete. | ||
734 | */ | ||
735 | static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action) | ||
736 | { | ||
737 | action->thread_fn(action->irq, action->dev_id); | ||
738 | irq_finalize_oneshot(desc, action, false); | ||
739 | } | ||
740 | |||
741 | /* | ||
705 | * Interrupt handler thread | 742 | * Interrupt handler thread |
706 | */ | 743 | */ |
707 | static int irq_thread(void *data) | 744 | static int irq_thread(void *data) |
@@ -711,8 +748,15 @@ static int irq_thread(void *data) | |||
711 | }; | 748 | }; |
712 | struct irqaction *action = data; | 749 | struct irqaction *action = data; |
713 | struct irq_desc *desc = irq_to_desc(action->irq); | 750 | struct irq_desc *desc = irq_to_desc(action->irq); |
751 | void (*handler_fn)(struct irq_desc *desc, struct irqaction *action); | ||
714 | int wake; | 752 | int wake; |
715 | 753 | ||
754 | if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, | ||
755 | &action->thread_flags)) | ||
756 | handler_fn = irq_forced_thread_fn; | ||
757 | else | ||
758 | handler_fn = irq_thread_fn; | ||
759 | |||
716 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 760 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
717 | current->irqaction = action; | 761 | current->irqaction = action; |
718 | 762 | ||
@@ -736,10 +780,7 @@ static int irq_thread(void *data) | |||
736 | raw_spin_unlock_irq(&desc->lock); | 780 | raw_spin_unlock_irq(&desc->lock); |
737 | } else { | 781 | } else { |
738 | raw_spin_unlock_irq(&desc->lock); | 782 | raw_spin_unlock_irq(&desc->lock); |
739 | 783 | handler_fn(desc, action); | |
740 | action->thread_fn(action->irq, action->dev_id); | ||
741 | |||
742 | irq_finalize_oneshot(desc, action, false); | ||
743 | } | 784 | } |
744 | 785 | ||
745 | wake = atomic_dec_and_test(&desc->threads_active); | 786 | wake = atomic_dec_and_test(&desc->threads_active); |
@@ -789,6 +830,22 @@ void exit_irq_thread(void) | |||
789 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | 830 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); |
790 | } | 831 | } |
791 | 832 | ||
833 | static void irq_setup_forced_threading(struct irqaction *new) | ||
834 | { | ||
835 | if (!force_irqthreads) | ||
836 | return; | ||
837 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) | ||
838 | return; | ||
839 | |||
840 | new->flags |= IRQF_ONESHOT; | ||
841 | |||
842 | if (!new->thread_fn) { | ||
843 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); | ||
844 | new->thread_fn = new->handler; | ||
845 | new->handler = irq_default_primary_handler; | ||
846 | } | ||
847 | } | ||
848 | |||
792 | /* | 849 | /* |
793 | * Internal function to register an irqaction - typically used to | 850 | * Internal function to register an irqaction - typically used to |
794 | * allocate special interrupts that are part of the architecture. | 851 | * allocate special interrupts that are part of the architecture. |
@@ -838,6 +895,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
838 | * dummy function which warns when called. | 895 | * dummy function which warns when called. |
839 | */ | 896 | */ |
840 | new->handler = irq_nested_primary_handler; | 897 | new->handler = irq_nested_primary_handler; |
898 | } else { | ||
899 | irq_setup_forced_threading(new); | ||
841 | } | 900 | } |
842 | 901 | ||
843 | /* | 902 | /* |
diff --git a/kernel/softirq.c b/kernel/softirq.c index c0490464e92f..a33fb2911248 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -311,9 +311,21 @@ void irq_enter(void) | |||
311 | } | 311 | } |
312 | 312 | ||
313 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED | 313 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED |
314 | # define invoke_softirq() __do_softirq() | 314 | static inline void invoke_softirq(void) |
315 | { | ||
316 | if (!force_irqthreads) | ||
317 | __do_softirq(); | ||
318 | else | ||
319 | wakeup_softirqd(); | ||
320 | } | ||
315 | #else | 321 | #else |
316 | # define invoke_softirq() do_softirq() | 322 | static inline void invoke_softirq(void) |
323 | { | ||
324 | if (!force_irqthreads) | ||
325 | do_softirq(); | ||
326 | else | ||
327 | wakeup_softirqd(); | ||
328 | } | ||
317 | #endif | 329 | #endif |
318 | 330 | ||
319 | /* | 331 | /* |