aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-12-29 19:48:13 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-30 13:56:41 -0500
commit9414232fa0cc28e2f51b8c76d260f2748f7953fc (patch)
tree71717c2907f4fe2f50aba355e8cf03f3a2a8b385 /kernel/sched.c
parentd449db98d5d7d90f29f9f6e091b0e1d996184df1 (diff)
[PATCH] sched: fix cond_resched_softirq() offset
Remove the __resched_legal() check: it is conceptually broken. The biggest problem it had is that it can mask buggy cond_resched() calls. A cond_resched() call is only legal if we are not in an atomic context, with two narrow exceptions: - if the system is booting - a reacquire_kernel_lock() down() done while PREEMPT_ACTIVE is set But __resched_legal() hid this and just silently returned whenever these primitives were called from invalid contexts. (Same goes for cond_resched_locked() and cond_resched_softirq()). Furthermore, the __legal_resched(0) call was buggy in that it caused unnecessarily long softirq latencies via cond_resched_softirq(). (which is only called from softirq-off sections, hence the code did nothing.) The fix is to resurrect the efficiency of the might_sleep checks and to only allow the narrow exceptions. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c18
1 files changed, 4 insertions, 14 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b515e3caad7f..3df33da0dafc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4617,17 +4617,6 @@ asmlinkage long sys_sched_yield(void)
4617 return 0; 4617 return 0;
4618} 4618}
4619 4619
4620static inline int __resched_legal(int expected_preempt_count)
4621{
4622#ifdef CONFIG_PREEMPT
4623 if (unlikely(preempt_count() != expected_preempt_count))
4624 return 0;
4625#endif
4626 if (unlikely(system_state != SYSTEM_RUNNING))
4627 return 0;
4628 return 1;
4629}
4630
4631static void __cond_resched(void) 4620static void __cond_resched(void)
4632{ 4621{
4633#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 4622#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
@@ -4647,7 +4636,8 @@ static void __cond_resched(void)
4647 4636
4648int __sched cond_resched(void) 4637int __sched cond_resched(void)
4649{ 4638{
4650 if (need_resched() && __resched_legal(0)) { 4639 if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
4640 system_state == SYSTEM_RUNNING) {
4651 __cond_resched(); 4641 __cond_resched();
4652 return 1; 4642 return 1;
4653 } 4643 }
@@ -4673,7 +4663,7 @@ int cond_resched_lock(spinlock_t *lock)
4673 ret = 1; 4663 ret = 1;
4674 spin_lock(lock); 4664 spin_lock(lock);
4675 } 4665 }
4676 if (need_resched() && __resched_legal(1)) { 4666 if (need_resched() && system_state == SYSTEM_RUNNING) {
4677 spin_release(&lock->dep_map, 1, _THIS_IP_); 4667 spin_release(&lock->dep_map, 1, _THIS_IP_);
4678 _raw_spin_unlock(lock); 4668 _raw_spin_unlock(lock);
4679 preempt_enable_no_resched(); 4669 preempt_enable_no_resched();
@@ -4689,7 +4679,7 @@ int __sched cond_resched_softirq(void)
4689{ 4679{
4690 BUG_ON(!in_softirq()); 4680 BUG_ON(!in_softirq());
4691 4681
4692 if (need_resched() && __resched_legal(0)) { 4682 if (need_resched() && system_state == SYSTEM_RUNNING) {
4693 raw_local_irq_disable(); 4683 raw_local_irq_disable();
4694 _local_bh_enable(); 4684 _local_bh_enable();
4695 raw_local_irq_enable(); 4685 raw_local_irq_enable();