aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-02-23 18:52:23 -0500
committerThomas Gleixner <tglx@linutronix.de>2011-02-26 05:57:18 -0500
commit8d32a307e4faa8b123dc8a9cd56d1a7525f69ad3 (patch)
tree50141928db9f0ac1377dd8bb355d600232d6b38c /kernel/irq
parent8eb90c30e0e815a1308828352eabd03ca04229dd (diff)
genirq: Provide forced interrupt threading
Add a commandline parameter "threadirqs" which forces all interrupts except those marked IRQF_NO_THREAD to run threaded. That's mostly a debug option to allow retrieving better debug data from crashing interrupt handlers. If "threadirqs" is not enabled on the kernel command line, then there is no impact in the interrupt hotpath. Architecture code needs to select CONFIG_IRQ_FORCED_THREADING after marking the interrupts which cant be threaded IRQF_NO_THREAD. All interrupts which have IRQF_TIMER set are implict marked IRQF_NO_THREAD. Also all PER_CPU interrupts are excluded. Forced threading hard interrupts also forces all soft interrupt handling into thread context. When enabled it might slow down things a bit, but for debugging problems in interrupt code it's a reasonable penalty as it does not immediately crash and burn the machine when an interrupt handler is buggy. Some test results on a Core2Duo machine: Cache cold run of: # time git grep irq_desc non-threaded threaded real 1m18.741s 1m19.061s user 0m1.874s 0m1.757s sys 0m5.843s 0m5.427s # iperf -c server non-threaded [ 3] 0.0-10.0 sec 1.09 GBytes 933 Mbits/sec [ 3] 0.0-10.0 sec 1.09 GBytes 934 Mbits/sec [ 3] 0.0-10.0 sec 1.09 GBytes 933 Mbits/sec threaded [ 3] 0.0-10.0 sec 1.09 GBytes 939 Mbits/sec [ 3] 0.0-10.0 sec 1.09 GBytes 934 Mbits/sec [ 3] 0.0-10.0 sec 1.09 GBytes 937 Mbits/sec Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <20110223234956.772668648@linutronix.de>
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/Kconfig3
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/manage.c67
3 files changed, 68 insertions, 4 deletions
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 355b8c7957f5..144db9dcfcde 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -38,6 +38,9 @@ config HARDIRQS_SW_RESEND
38config IRQ_PREFLOW_FASTEOI 38config IRQ_PREFLOW_FASTEOI
39 bool 39 bool
40 40
41config IRQ_FORCED_THREADING
42 bool
43
41config SPARSE_IRQ 44config SPARSE_IRQ
42 bool "Support sparse irq numbering" 45 bool "Support sparse irq numbering"
43 depends on HAVE_SPARSE_IRQ 46 depends on HAVE_SPARSE_IRQ
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 935bec4bfa87..6c6ec9a49027 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -27,12 +27,14 @@ extern int noirqdebug;
27 * IRQTF_DIED - handler thread died 27 * IRQTF_DIED - handler thread died
28 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed 28 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
29 * IRQTF_AFFINITY - irq thread is requested to adjust affinity 29 * IRQTF_AFFINITY - irq thread is requested to adjust affinity
30 * IRQTF_FORCED_THREAD - irq action is force threaded
30 */ 31 */
31enum { 32enum {
32 IRQTF_RUNTHREAD, 33 IRQTF_RUNTHREAD,
33 IRQTF_DIED, 34 IRQTF_DIED,
34 IRQTF_WARNED, 35 IRQTF_WARNED,
35 IRQTF_AFFINITY, 36 IRQTF_AFFINITY,
37 IRQTF_FORCED_THREAD,
36}; 38};
37 39
38/* 40/*
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 58c861367300..acd599a43bfb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -17,6 +17,17 @@
17 17
18#include "internals.h" 18#include "internals.h"
19 19
20#ifdef CONFIG_IRQ_FORCED_THREADING
21__read_mostly bool force_irqthreads;
22
23static int __init setup_forced_irqthreads(char *arg)
24{
25 force_irqthreads = true;
26 return 0;
27}
28early_param("threadirqs", setup_forced_irqthreads);
29#endif
30
20/** 31/**
21 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 32 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
22 * @irq: interrupt number to wait for 33 * @irq: interrupt number to wait for
@@ -702,6 +713,32 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
702#endif 713#endif
703 714
704/* 715/*
716 * Interrupts which are not explicitely requested as threaded
717 * interrupts rely on the implicit bh/preempt disable of the hard irq
718 * context. So we need to disable bh here to avoid deadlocks and other
719 * side effects.
720 */
721static void
722irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
723{
724 local_bh_disable();
725 action->thread_fn(action->irq, action->dev_id);
726 irq_finalize_oneshot(desc, action, false);
727 local_bh_enable();
728}
729
730/*
731 * Interrupts explicitely requested as threaded interupts want to be
732 * preemtible - many of them need to sleep and wait for slow busses to
733 * complete.
734 */
735static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
736{
737 action->thread_fn(action->irq, action->dev_id);
738 irq_finalize_oneshot(desc, action, false);
739}
740
741/*
705 * Interrupt handler thread 742 * Interrupt handler thread
706 */ 743 */
707static int irq_thread(void *data) 744static int irq_thread(void *data)
@@ -711,8 +748,15 @@ static int irq_thread(void *data)
711 }; 748 };
712 struct irqaction *action = data; 749 struct irqaction *action = data;
713 struct irq_desc *desc = irq_to_desc(action->irq); 750 struct irq_desc *desc = irq_to_desc(action->irq);
751 void (*handler_fn)(struct irq_desc *desc, struct irqaction *action);
714 int wake; 752 int wake;
715 753
754 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
755 &action->thread_flags))
756 handler_fn = irq_forced_thread_fn;
757 else
758 handler_fn = irq_thread_fn;
759
716 sched_setscheduler(current, SCHED_FIFO, &param); 760 sched_setscheduler(current, SCHED_FIFO, &param);
717 current->irqaction = action; 761 current->irqaction = action;
718 762
@@ -736,10 +780,7 @@ static int irq_thread(void *data)
736 raw_spin_unlock_irq(&desc->lock); 780 raw_spin_unlock_irq(&desc->lock);
737 } else { 781 } else {
738 raw_spin_unlock_irq(&desc->lock); 782 raw_spin_unlock_irq(&desc->lock);
739 783 handler_fn(desc, action);
740 action->thread_fn(action->irq, action->dev_id);
741
742 irq_finalize_oneshot(desc, action, false);
743 } 784 }
744 785
745 wake = atomic_dec_and_test(&desc->threads_active); 786 wake = atomic_dec_and_test(&desc->threads_active);
@@ -789,6 +830,22 @@ void exit_irq_thread(void)
789 set_bit(IRQTF_DIED, &tsk->irqaction->flags); 830 set_bit(IRQTF_DIED, &tsk->irqaction->flags);
790} 831}
791 832
833static void irq_setup_forced_threading(struct irqaction *new)
834{
835 if (!force_irqthreads)
836 return;
837 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
838 return;
839
840 new->flags |= IRQF_ONESHOT;
841
842 if (!new->thread_fn) {
843 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
844 new->thread_fn = new->handler;
845 new->handler = irq_default_primary_handler;
846 }
847}
848
792/* 849/*
793 * Internal function to register an irqaction - typically used to 850 * Internal function to register an irqaction - typically used to
794 * allocate special interrupts that are part of the architecture. 851 * allocate special interrupts that are part of the architecture.
@@ -838,6 +895,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
838 * dummy function which warns when called. 895 * dummy function which warns when called.
839 */ 896 */
840 new->handler = irq_nested_primary_handler; 897 new->handler = irq_nested_primary_handler;
898 } else {
899 irq_setup_forced_threading(new);
841 } 900 }
842 901
843 /* 902 /*