aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 2629c1711fd6..d5e37072ea54 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4386,7 +4386,16 @@ asmlinkage long sys_sched_yield(void)
4386 return 0; 4386 return 0;
4387} 4387}
4388 4388
4389static inline void __cond_resched(void) 4389static inline int __resched_legal(void)
4390{
4391 if (unlikely(preempt_count()))
4392 return 0;
4393 if (unlikely(system_state != SYSTEM_RUNNING))
4394 return 0;
4395 return 1;
4396}
4397
4398static void __cond_resched(void)
4390{ 4399{
4391#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 4400#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
4392 __might_sleep(__FILE__, __LINE__); 4401 __might_sleep(__FILE__, __LINE__);
@@ -4396,10 +4405,6 @@ static inline void __cond_resched(void)
4396 * PREEMPT_ACTIVE, which could trigger a second 4405 * PREEMPT_ACTIVE, which could trigger a second
4397 * cond_resched() call. 4406 * cond_resched() call.
4398 */ 4407 */
4399 if (unlikely(preempt_count()))
4400 return;
4401 if (unlikely(system_state != SYSTEM_RUNNING))
4402 return;
4403 do { 4408 do {
4404 add_preempt_count(PREEMPT_ACTIVE); 4409 add_preempt_count(PREEMPT_ACTIVE);
4405 schedule(); 4410 schedule();
@@ -4409,13 +4414,12 @@ static inline void __cond_resched(void)
4409 4414
4410int __sched cond_resched(void) 4415int __sched cond_resched(void)
4411{ 4416{
4412 if (need_resched()) { 4417 if (need_resched() && __resched_legal()) {
4413 __cond_resched(); 4418 __cond_resched();
4414 return 1; 4419 return 1;
4415 } 4420 }
4416 return 0; 4421 return 0;
4417} 4422}
4418
4419EXPORT_SYMBOL(cond_resched); 4423EXPORT_SYMBOL(cond_resched);
4420 4424
4421/* 4425/*
@@ -4436,7 +4440,7 @@ int cond_resched_lock(spinlock_t *lock)
4436 ret = 1; 4440 ret = 1;
4437 spin_lock(lock); 4441 spin_lock(lock);
4438 } 4442 }
4439 if (need_resched()) { 4443 if (need_resched() && __resched_legal()) {
4440 _raw_spin_unlock(lock); 4444 _raw_spin_unlock(lock);
4441 preempt_enable_no_resched(); 4445 preempt_enable_no_resched();
4442 __cond_resched(); 4446 __cond_resched();
@@ -4445,14 +4449,13 @@ int cond_resched_lock(spinlock_t *lock)
4445 } 4449 }
4446 return ret; 4450 return ret;
4447} 4451}
4448
4449EXPORT_SYMBOL(cond_resched_lock); 4452EXPORT_SYMBOL(cond_resched_lock);
4450 4453
4451int __sched cond_resched_softirq(void) 4454int __sched cond_resched_softirq(void)
4452{ 4455{
4453 BUG_ON(!in_softirq()); 4456 BUG_ON(!in_softirq());
4454 4457
4455 if (need_resched()) { 4458 if (need_resched() && __resched_legal()) {
4456 __local_bh_enable(); 4459 __local_bh_enable();
4457 __cond_resched(); 4460 __cond_resched();
4458 local_bh_disable(); 4461 local_bh_disable();
@@ -4460,10 +4463,8 @@ int __sched cond_resched_softirq(void)
4460 } 4463 }
4461 return 0; 4464 return 0;
4462} 4465}
4463
4464EXPORT_SYMBOL(cond_resched_softirq); 4466EXPORT_SYMBOL(cond_resched_softirq);
4465 4467
4466
4467/** 4468/**
4468 * yield - yield the current processor to other threads. 4469 * yield - yield the current processor to other threads.
4469 * 4470 *