aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-20 13:28:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-20 13:28:56 -0400
commit0bbfcaff9b2a69c71a95e6902253487ab30cb498 (patch)
treef422bbfb8ba57909bf6e43f1d560b3f056cc3c61
parent5928a2b60cfdbad730f93696acab142d0b607280 (diff)
parente04268b0effc0ceea366c50b3107baad9edadafa (diff)
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq/core changes for v3.4 from Ingo Molnar * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: genirq: Remove paranoid warnons and bogus fixups genirq: Flush the irq thread on synchronization genirq: Get rid of unnecessary IRQTF_DIED flag genirq: No need to check IRQTF_DIED before stopping a thread handler genirq: Get rid of unnecessary irqaction field in task_struct genirq: Fix incorrect check for forced IRQ thread handler softirq: Reduce invoke_softirq() code duplication genirq: Fix long-term regression in genirq irq_set_irq_type() handling x86-32/irq: Don't switch to irq stack for a user-mode irq
-rw-r--r--arch/x86/kernel/irq_32.c11
-rw-r--r--include/linux/sched.h10
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/irq/chip.c3
-rw-r--r--kernel/irq/handle.c14
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/manage.c86
-rw-r--r--kernel/softirq.c18
8 files changed, 66 insertions, 82 deletions
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 40fc86161d92..58b7f27cb3e9 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -100,13 +100,8 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
100 irqctx->tinfo.task = curctx->tinfo.task; 100 irqctx->tinfo.task = curctx->tinfo.task;
101 irqctx->tinfo.previous_esp = current_stack_pointer; 101 irqctx->tinfo.previous_esp = current_stack_pointer;
102 102
103 /* 103 /* Copy the preempt_count so that the [soft]irq checks work. */
104 * Copy the softirq bits in preempt_count so that the 104 irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
105 * softirq checks work in the hardirq context.
106 */
107 irqctx->tinfo.preempt_count =
108 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
109 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
110 105
111 if (unlikely(overflow)) 106 if (unlikely(overflow))
112 call_on_stack(print_stack_overflow, isp); 107 call_on_stack(print_stack_overflow, isp);
@@ -196,7 +191,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
196 if (unlikely(!desc)) 191 if (unlikely(!desc))
197 return false; 192 return false;
198 193
199 if (!execute_on_irq_stack(overflow, desc, irq)) { 194 if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
200 if (unlikely(overflow)) 195 if (unlikely(overflow))
201 print_stack_overflow(); 196 print_stack_overflow();
202 desc->handle_irq(irq, desc); 197 desc->handle_irq(irq, desc);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f58889b8a608..e345163da657 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1319,6 +1319,11 @@ struct task_struct {
1319 unsigned sched_reset_on_fork:1; 1319 unsigned sched_reset_on_fork:1;
1320 unsigned sched_contributes_to_load:1; 1320 unsigned sched_contributes_to_load:1;
1321 1321
1322#ifdef CONFIG_GENERIC_HARDIRQS
1323 /* IRQ handler threads */
1324 unsigned irq_thread:1;
1325#endif
1326
1322 pid_t pid; 1327 pid_t pid;
1323 pid_t tgid; 1328 pid_t tgid;
1324 1329
@@ -1427,11 +1432,6 @@ struct task_struct {
1427 * mempolicy */ 1432 * mempolicy */
1428 spinlock_t alloc_lock; 1433 spinlock_t alloc_lock;
1429 1434
1430#ifdef CONFIG_GENERIC_HARDIRQS
1431 /* IRQ handler threads */
1432 struct irqaction *irqaction;
1433#endif
1434
1435 /* Protection of the PI data structures: */ 1435 /* Protection of the PI data structures: */
1436 raw_spinlock_t pi_lock; 1436 raw_spinlock_t pi_lock;
1437 1437
diff --git a/kernel/exit.c b/kernel/exit.c
index 4b4042f9bc6a..752d2c0abd19 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -935,8 +935,6 @@ void do_exit(long code)
935 schedule(); 935 schedule();
936 } 936 }
937 937
938 exit_irq_thread();
939
940 exit_signals(tsk); /* sets PF_EXITING */ 938 exit_signals(tsk); /* sets PF_EXITING */
941 /* 939 /*
942 * tsk->flags are checked in the futex code to protect against 940 * tsk->flags are checked in the futex code to protect against
@@ -945,6 +943,8 @@ void do_exit(long code)
945 smp_mb(); 943 smp_mb();
946 raw_spin_unlock_wait(&tsk->pi_lock); 944 raw_spin_unlock_wait(&tsk->pi_lock);
947 945
946 exit_irq_thread();
947
948 if (unlikely(in_atomic())) 948 if (unlikely(in_atomic()))
949 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", 949 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
950 current->comm, task_pid_nr(current), 950 current->comm, task_pid_nr(current),
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index fb7db75ee0c8..25784d630a12 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -61,8 +61,7 @@ int irq_set_irq_type(unsigned int irq, unsigned int type)
61 return -EINVAL; 61 return -EINVAL;
62 62
63 type &= IRQ_TYPE_SENSE_MASK; 63 type &= IRQ_TYPE_SENSE_MASK;
64 if (type != IRQ_TYPE_NONE) 64 ret = __irq_set_trigger(desc, irq, type);
65 ret = __irq_set_trigger(desc, irq, type);
66 irq_put_desc_busunlock(desc, flags); 65 irq_put_desc_busunlock(desc, flags);
67 return ret; 66 return ret;
68} 67}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 470d08c82bbe..6ff84e6a954c 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -60,7 +60,7 @@ static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
60 * device interrupt, so no irq storm is lurking. If the 60 * device interrupt, so no irq storm is lurking. If the
61 * RUNTHREAD bit is already set, nothing to do. 61 * RUNTHREAD bit is already set, nothing to do.
62 */ 62 */
63 if (test_bit(IRQTF_DIED, &action->thread_flags) || 63 if ((action->thread->flags & PF_EXITING) ||
64 test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 64 test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
65 return; 65 return;
66 66
@@ -110,6 +110,18 @@ static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
110 * threads_oneshot untouched and runs the thread another time. 110 * threads_oneshot untouched and runs the thread another time.
111 */ 111 */
112 desc->threads_oneshot |= action->thread_mask; 112 desc->threads_oneshot |= action->thread_mask;
113
114 /*
115 * We increment the threads_active counter in case we wake up
116 * the irq thread. The irq thread decrements the counter when
117 * it returns from the handler or in the exit path and wakes
118 * up waiters which are stuck in synchronize_irq() when the
119 * active count becomes zero. synchronize_irq() is serialized
120 * against this code (hard irq handler) via IRQS_INPROGRESS
121 * like the finalize_oneshot() code. See comment above.
122 */
123 atomic_inc(&desc->threads_active);
124
113 wake_up_process(action->thread); 125 wake_up_process(action->thread);
114} 126}
115 127
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 40378ff877e7..8e5c56b3b7d9 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -20,14 +20,12 @@ extern bool noirqdebug;
20/* 20/*
21 * Bits used by threaded handlers: 21 * Bits used by threaded handlers:
22 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run 22 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
23 * IRQTF_DIED - handler thread died
24 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed 23 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
25 * IRQTF_AFFINITY - irq thread is requested to adjust affinity 24 * IRQTF_AFFINITY - irq thread is requested to adjust affinity
26 * IRQTF_FORCED_THREAD - irq action is force threaded 25 * IRQTF_FORCED_THREAD - irq action is force threaded
27 */ 26 */
28enum { 27enum {
29 IRQTF_RUNTHREAD, 28 IRQTF_RUNTHREAD,
30 IRQTF_DIED,
31 IRQTF_WARNED, 29 IRQTF_WARNED,
32 IRQTF_AFFINITY, 30 IRQTF_AFFINITY,
33 IRQTF_FORCED_THREAD, 31 IRQTF_FORCED_THREAD,
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0f0d4704ddd8..b0ccd1ac2d6a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -759,6 +759,13 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
759 return ret; 759 return ret;
760} 760}
761 761
762static void wake_threads_waitq(struct irq_desc *desc)
763{
764 if (atomic_dec_and_test(&desc->threads_active) &&
765 waitqueue_active(&desc->wait_for_threads))
766 wake_up(&desc->wait_for_threads);
767}
768
762/* 769/*
763 * Interrupt handler thread 770 * Interrupt handler thread
764 */ 771 */
@@ -771,57 +778,41 @@ static int irq_thread(void *data)
771 struct irq_desc *desc = irq_to_desc(action->irq); 778 struct irq_desc *desc = irq_to_desc(action->irq);
772 irqreturn_t (*handler_fn)(struct irq_desc *desc, 779 irqreturn_t (*handler_fn)(struct irq_desc *desc,
773 struct irqaction *action); 780 struct irqaction *action);
774 int wake;
775 781
776 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, 782 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
777 &action->thread_flags)) 783 &action->thread_flags))
778 handler_fn = irq_forced_thread_fn; 784 handler_fn = irq_forced_thread_fn;
779 else 785 else
780 handler_fn = irq_thread_fn; 786 handler_fn = irq_thread_fn;
781 787
782 sched_setscheduler(current, SCHED_FIFO, &param); 788 sched_setscheduler(current, SCHED_FIFO, &param);
783 current->irqaction = action; 789 current->irq_thread = 1;
784 790
785 while (!irq_wait_for_interrupt(action)) { 791 while (!irq_wait_for_interrupt(action)) {
792 irqreturn_t action_ret;
786 793
787 irq_thread_check_affinity(desc, action); 794 irq_thread_check_affinity(desc, action);
788 795
789 atomic_inc(&desc->threads_active); 796 action_ret = handler_fn(desc, action);
797 if (!noirqdebug)
798 note_interrupt(action->irq, desc, action_ret);
790 799
791 raw_spin_lock_irq(&desc->lock); 800 wake_threads_waitq(desc);
792 if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
793 /*
794 * CHECKME: We might need a dedicated
795 * IRQ_THREAD_PENDING flag here, which
796 * retriggers the thread in check_irq_resend()
797 * but AFAICT IRQS_PENDING should be fine as it
798 * retriggers the interrupt itself --- tglx
799 */
800 desc->istate |= IRQS_PENDING;
801 raw_spin_unlock_irq(&desc->lock);
802 } else {
803 irqreturn_t action_ret;
804
805 raw_spin_unlock_irq(&desc->lock);
806 action_ret = handler_fn(desc, action);
807 if (!noirqdebug)
808 note_interrupt(action->irq, desc, action_ret);
809 }
810
811 wake = atomic_dec_and_test(&desc->threads_active);
812
813 if (wake && waitqueue_active(&desc->wait_for_threads))
814 wake_up(&desc->wait_for_threads);
815 } 801 }
816 802
817 /* Prevent a stale desc->threads_oneshot */
818 irq_finalize_oneshot(desc, action, true);
819
820 /* 803 /*
821 * Clear irqaction. Otherwise exit_irq_thread() would make 804 * This is the regular exit path. __free_irq() is stopping the
805 * thread via kthread_stop() after calling
806 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
807 * oneshot mask bit can be set. We cannot verify that as we
808 * cannot touch the oneshot mask at this point anymore as
809 * __setup_irq() might have given out currents thread_mask
810 * again.
811 *
812 * Clear irq_thread. Otherwise exit_irq_thread() would make
822 * fuzz about an active irq thread going into nirvana. 813 * fuzz about an active irq thread going into nirvana.
823 */ 814 */
824 current->irqaction = NULL; 815 current->irq_thread = 0;
825 return 0; 816 return 0;
826} 817}
827 818
@@ -832,27 +823,28 @@ void exit_irq_thread(void)
832{ 823{
833 struct task_struct *tsk = current; 824 struct task_struct *tsk = current;
834 struct irq_desc *desc; 825 struct irq_desc *desc;
826 struct irqaction *action;
835 827
836 if (!tsk->irqaction) 828 if (!tsk->irq_thread)
837 return; 829 return;
838 830
831 action = kthread_data(tsk);
832
839 printk(KERN_ERR 833 printk(KERN_ERR
840 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 834 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
841 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); 835 tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
842 836
843 desc = irq_to_desc(tsk->irqaction->irq); 837 desc = irq_to_desc(action->irq);
844 838
845 /* 839 /*
846 * Prevent a stale desc->threads_oneshot. Must be called 840 * If IRQTF_RUNTHREAD is set, we need to decrement
847 * before setting the IRQTF_DIED flag. 841 * desc->threads_active and wake possible waiters.
848 */ 842 */
849 irq_finalize_oneshot(desc, tsk->irqaction, true); 843 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
844 wake_threads_waitq(desc);
850 845
851 /* 846 /* Prevent a stale desc->threads_oneshot */
852 * Set the THREAD DIED flag to prevent further wakeups of the 847 irq_finalize_oneshot(desc, action, true);
853 * soon to be gone threaded handler.
854 */
855 set_bit(IRQTF_DIED, &tsk->irqaction->flags);
856} 848}
857 849
858static void irq_setup_forced_threading(struct irqaction *new) 850static void irq_setup_forced_threading(struct irqaction *new)
@@ -1135,8 +1127,7 @@ out_thread:
1135 struct task_struct *t = new->thread; 1127 struct task_struct *t = new->thread;
1136 1128
1137 new->thread = NULL; 1129 new->thread = NULL;
1138 if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) 1130 kthread_stop(t);
1139 kthread_stop(t);
1140 put_task_struct(t); 1131 put_task_struct(t);
1141 } 1132 }
1142out_mput: 1133out_mput:
@@ -1246,8 +1237,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1246#endif 1237#endif
1247 1238
1248 if (action->thread) { 1239 if (action->thread) {
1249 if (!test_bit(IRQTF_DIED, &action->thread_flags)) 1240 kthread_stop(action->thread);
1250 kthread_stop(action->thread);
1251 put_task_struct(action->thread); 1241 put_task_struct(action->thread);
1252 } 1242 }
1253 1243
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 4eb3a0fa351e..c82d95a022ef 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -310,31 +310,21 @@ void irq_enter(void)
310 __irq_enter(); 310 __irq_enter();
311} 311}
312 312
313#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
314static inline void invoke_softirq(void) 313static inline void invoke_softirq(void)
315{ 314{
316 if (!force_irqthreads) 315 if (!force_irqthreads) {
316#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
317 __do_softirq(); 317 __do_softirq();
318 else {
319 __local_bh_disable((unsigned long)__builtin_return_address(0),
320 SOFTIRQ_OFFSET);
321 wakeup_softirqd();
322 __local_bh_enable(SOFTIRQ_OFFSET);
323 }
324}
325#else 318#else
326static inline void invoke_softirq(void)
327{
328 if (!force_irqthreads)
329 do_softirq(); 319 do_softirq();
330 else { 320#endif
321 } else {
331 __local_bh_disable((unsigned long)__builtin_return_address(0), 322 __local_bh_disable((unsigned long)__builtin_return_address(0),
332 SOFTIRQ_OFFSET); 323 SOFTIRQ_OFFSET);
333 wakeup_softirqd(); 324 wakeup_softirqd();
334 __local_bh_enable(SOFTIRQ_OFFSET); 325 __local_bh_enable(SOFTIRQ_OFFSET);
335 } 326 }
336} 327}
337#endif
338 328
339/* 329/*
340 * Exit an interrupt context. Process softirqs if needed and possible: 330 * Exit an interrupt context. Process softirqs if needed and possible: