diff options
author | Andrew Morton <akpm@osdl.org> | 2006-06-30 04:56:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-30 14:25:38 -0400 |
commit | e7b384043e27bed4f23b108481b99c518dd01a01 (patch) | |
tree | 52f944bf39d3a7b329f4e38d619d7949e35510a0 | |
parent | 92fe15a3d24fa53e7e961c549c488d0bb642d895 (diff) |
[PATCH] cond_resched() fix
Fix a bug identified by Zou Nan hai <nanhai.zou@intel.com>:
If the system is in state SYSTEM_BOOTING, and need_resched() is true,
cond_resched() returns true even though it didn't reschedule. Consequently
need_resched() remains true and JBD locks up.
Fix that by teaching cond_resched() to only return true if it really did call
schedule().
cond_resched_lock() and cond_resched_softirq() have a problem too. If we're
in SYSTEM_BOOTING state and need_resched() is true, these functions will drop
the lock and will then try to call schedule(), but the SYSTEM_BOOTING state
will prevent schedule() from being called. So on return, need_resched() will
still be true, but cond_resched_lock() has to return 1 to tell the caller that
the lock was dropped. The caller will probably lock up.
Bottom line: if these functions dropped the lock, they _must_ call schedule()
to clear need_resched(). Make it so.
Also, uninline __cond_resched(). It's largeish, and slowpath.
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | kernel/sched.c | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 2629c1711fd6..d5e37072ea54 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4386,7 +4386,16 @@ asmlinkage long sys_sched_yield(void) | |||
4386 | return 0; | 4386 | return 0; |
4387 | } | 4387 | } |
4388 | 4388 | ||
4389 | static inline void __cond_resched(void) | 4389 | static inline int __resched_legal(void) |
4390 | { | ||
4391 | if (unlikely(preempt_count())) | ||
4392 | return 0; | ||
4393 | if (unlikely(system_state != SYSTEM_RUNNING)) | ||
4394 | return 0; | ||
4395 | return 1; | ||
4396 | } | ||
4397 | |||
4398 | static void __cond_resched(void) | ||
4390 | { | 4399 | { |
4391 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP | 4400 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP |
4392 | __might_sleep(__FILE__, __LINE__); | 4401 | __might_sleep(__FILE__, __LINE__); |
@@ -4396,10 +4405,6 @@ static inline void __cond_resched(void) | |||
4396 | * PREEMPT_ACTIVE, which could trigger a second | 4405 | * PREEMPT_ACTIVE, which could trigger a second |
4397 | * cond_resched() call. | 4406 | * cond_resched() call. |
4398 | */ | 4407 | */ |
4399 | if (unlikely(preempt_count())) | ||
4400 | return; | ||
4401 | if (unlikely(system_state != SYSTEM_RUNNING)) | ||
4402 | return; | ||
4403 | do { | 4408 | do { |
4404 | add_preempt_count(PREEMPT_ACTIVE); | 4409 | add_preempt_count(PREEMPT_ACTIVE); |
4405 | schedule(); | 4410 | schedule(); |
@@ -4409,13 +4414,12 @@ static inline void __cond_resched(void) | |||
4409 | 4414 | ||
4410 | int __sched cond_resched(void) | 4415 | int __sched cond_resched(void) |
4411 | { | 4416 | { |
4412 | if (need_resched()) { | 4417 | if (need_resched() && __resched_legal()) { |
4413 | __cond_resched(); | 4418 | __cond_resched(); |
4414 | return 1; | 4419 | return 1; |
4415 | } | 4420 | } |
4416 | return 0; | 4421 | return 0; |
4417 | } | 4422 | } |
4418 | |||
4419 | EXPORT_SYMBOL(cond_resched); | 4423 | EXPORT_SYMBOL(cond_resched); |
4420 | 4424 | ||
4421 | /* | 4425 | /* |
@@ -4436,7 +4440,7 @@ int cond_resched_lock(spinlock_t *lock) | |||
4436 | ret = 1; | 4440 | ret = 1; |
4437 | spin_lock(lock); | 4441 | spin_lock(lock); |
4438 | } | 4442 | } |
4439 | if (need_resched()) { | 4443 | if (need_resched() && __resched_legal()) { |
4440 | _raw_spin_unlock(lock); | 4444 | _raw_spin_unlock(lock); |
4441 | preempt_enable_no_resched(); | 4445 | preempt_enable_no_resched(); |
4442 | __cond_resched(); | 4446 | __cond_resched(); |
@@ -4445,14 +4449,13 @@ int cond_resched_lock(spinlock_t *lock) | |||
4445 | } | 4449 | } |
4446 | return ret; | 4450 | return ret; |
4447 | } | 4451 | } |
4448 | |||
4449 | EXPORT_SYMBOL(cond_resched_lock); | 4452 | EXPORT_SYMBOL(cond_resched_lock); |
4450 | 4453 | ||
4451 | int __sched cond_resched_softirq(void) | 4454 | int __sched cond_resched_softirq(void) |
4452 | { | 4455 | { |
4453 | BUG_ON(!in_softirq()); | 4456 | BUG_ON(!in_softirq()); |
4454 | 4457 | ||
4455 | if (need_resched()) { | 4458 | if (need_resched() && __resched_legal()) { |
4456 | __local_bh_enable(); | 4459 | __local_bh_enable(); |
4457 | __cond_resched(); | 4460 | __cond_resched(); |
4458 | local_bh_disable(); | 4461 | local_bh_disable(); |
@@ -4460,10 +4463,8 @@ int __sched cond_resched_softirq(void) | |||
4460 | } | 4463 | } |
4461 | return 0; | 4464 | return 0; |
4462 | } | 4465 | } |
4463 | |||
4464 | EXPORT_SYMBOL(cond_resched_softirq); | 4466 | EXPORT_SYMBOL(cond_resched_softirq); |
4465 | 4467 | ||
4466 | |||
4467 | /** | 4468 | /** |
4468 | * yield - yield the current processor to other threads. | 4469 | * yield - yield the current processor to other threads. |
4469 | * | 4470 | * |