aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/softirq.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-24 04:22:02 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-07-24 05:41:26 -0400
commit514e0e295511c6a4a54eb0228ccbb519162cc088 (patch)
tree8787cdc7e2fc3e19335b3c29bbb4cdceae549f97 /kernel/softirq.c
parent030dc4adec445a7a670ef7a4ef664f83f3943c30 (diff)
sched: Debug missed preemption checks
Developers use preempt_enable_no_resched() in places where the code calls schedule() immediately which is correct. But there are places where preempt_enable_no_resched() is not followed by schedule(). Add debug infrastructre to find the offending code. The identified correct users are converted to use __preempt_enable_no_resched(). For the ever repeating "preempt_enable_no_resched(); schedule();" sequences a onvenience macro preempt_enable_and_schedule() is introduced. Based on a previous patch from Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 5965d9f48fe9..07d2eba650b1 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -308,7 +308,7 @@ void irq_exit(void)
308 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) 308 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
309 tick_nohz_stop_sched_tick(0); 309 tick_nohz_stop_sched_tick(0);
310#endif 310#endif
311 preempt_enable_no_resched(); 311 __preempt_enable_no_resched();
312} 312}
313 313
314/* 314/*
@@ -642,8 +642,7 @@ static int ksoftirqd(void * __bind_cpu)
642 while (!kthread_should_stop()) { 642 while (!kthread_should_stop()) {
643 preempt_disable(); 643 preempt_disable();
644 if (!local_softirq_pending()) { 644 if (!local_softirq_pending()) {
645 preempt_enable_no_resched(); 645 preempt_enable_and_schedule();
646 schedule();
647 preempt_disable(); 646 preempt_disable();
648 } 647 }
649 648
@@ -656,7 +655,7 @@ static int ksoftirqd(void * __bind_cpu)
656 if (cpu_is_offline((long)__bind_cpu)) 655 if (cpu_is_offline((long)__bind_cpu))
657 goto wait_to_die; 656 goto wait_to_die;
658 do_softirq(); 657 do_softirq();
659 preempt_enable_no_resched(); 658 __preempt_enable_no_resched();
660 cond_resched(); 659 cond_resched();
661 preempt_disable(); 660 preempt_disable();
662 rcu_qsctr_inc((long)__bind_cpu); 661 rcu_qsctr_inc((long)__bind_cpu);