aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-07-10 08:57:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-07-10 17:24:05 -0400
commitd86ee4809d0329d4aa0d0f2c76c2295a16862799 (patch)
tree32b3eaf766c1127ef2298c2e48b5441b7cf9b843 /kernel/sched.c
parentc99e6efe1ba04561e7d93a81f0be07e37427e835 (diff)
sched: optimize cond_resched()
Optimize cond_resched() by removing one conditional. Currently cond_resched() checks system_state == SYSTEM_RUNNING in order to avoid scheduling before the scheduler is running. We can however, as per suggestion of Matt, use PREEMPT_ACTIVE to accomplish that very same. Suggested-by: Matt Mackall <mpm@selenic.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Matt Mackall <mpm@selenic.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 7c9098d186e6..01f55ada3598 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6541,6 +6541,11 @@ SYSCALL_DEFINE0(sched_yield)
6541 return 0; 6541 return 0;
6542} 6542}
6543 6543
6544static inline int should_resched(void)
6545{
6546 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
6547}
6548
6544static void __cond_resched(void) 6549static void __cond_resched(void)
6545{ 6550{
6546#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 6551#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
@@ -6560,8 +6565,7 @@ static void __cond_resched(void)
6560 6565
6561int __sched _cond_resched(void) 6566int __sched _cond_resched(void)
6562{ 6567{
6563 if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) && 6568 if (should_resched()) {
6564 system_state == SYSTEM_RUNNING) {
6565 __cond_resched(); 6569 __cond_resched();
6566 return 1; 6570 return 1;
6567 } 6571 }
@@ -6579,12 +6583,12 @@ EXPORT_SYMBOL(_cond_resched);
6579 */ 6583 */
6580int cond_resched_lock(spinlock_t *lock) 6584int cond_resched_lock(spinlock_t *lock)
6581{ 6585{
6582 int resched = need_resched() && system_state == SYSTEM_RUNNING; 6586 int resched = should_resched();
6583 int ret = 0; 6587 int ret = 0;
6584 6588
6585 if (spin_needbreak(lock) || resched) { 6589 if (spin_needbreak(lock) || resched) {
6586 spin_unlock(lock); 6590 spin_unlock(lock);
6587 if (resched && need_resched()) 6591 if (resched)
6588 __cond_resched(); 6592 __cond_resched();
6589 else 6593 else
6590 cpu_relax(); 6594 cpu_relax();
@@ -6599,7 +6603,7 @@ int __sched cond_resched_softirq(void)
6599{ 6603{
6600 BUG_ON(!in_softirq()); 6604 BUG_ON(!in_softirq());
6601 6605
6602 if (need_resched() && system_state == SYSTEM_RUNNING) { 6606 if (should_resched()) {
6603 local_bh_enable(); 6607 local_bh_enable();
6604 __cond_resched(); 6608 __cond_resched();
6605 local_bh_disable(); 6609 local_bh_disable();