diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-07-16 09:44:29 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-07-18 09:51:44 -0400 |
commit | 613afbf83298efaead05ebcac23d2285609d7160 (patch) | |
tree | 15fa2025d74cee5f6805ab974c532c6b6a603333 /kernel/sched.c | |
parent | 6f80bd985fe242c2e6a8b6209ed20b0495d3d63b (diff) |
sched: Pull up the might_sleep() check into cond_resched()
might_sleep() is called late-ish in cond_resched(), after the
need_resched()/preempt enabled/system running tests are
checked.
It's better to check the sleeps while atomic earlier and not
depend on some environment datas that reduce the chances to
detect a problem.
Also define cond_resched_*() helpers as macros, so that the
FILE/LINE reported in the sleeping while atomic warning
displays the real origin and not sched.h
Changes in v2:
- Call __might_sleep() directly instead of might_sleep() which
may call cond_resched()
- Turn cond_resched() into a macro so that the file:line
couple reported refers to the caller of cond_resched() and
not __cond_resched() itself.
Changes in v3:
- Also propagate this __might_sleep() pull up to
cond_resched_lock() and cond_resched_softirq()
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1247725694-6082-6-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 12 |
1 files changed, 5 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 3ff4d004bd95..1f7919add8ae 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6610,8 +6610,6 @@ static inline int should_resched(void) | |||
6610 | 6610 | ||
6611 | static void __cond_resched(void) | 6611 | static void __cond_resched(void) |
6612 | { | 6612 | { |
6613 | __might_sleep(__FILE__, __LINE__, 0); | ||
6614 | |||
6615 | add_preempt_count(PREEMPT_ACTIVE); | 6613 | add_preempt_count(PREEMPT_ACTIVE); |
6616 | schedule(); | 6614 | schedule(); |
6617 | sub_preempt_count(PREEMPT_ACTIVE); | 6615 | sub_preempt_count(PREEMPT_ACTIVE); |
@@ -6628,14 +6626,14 @@ int __sched _cond_resched(void) | |||
6628 | EXPORT_SYMBOL(_cond_resched); | 6626 | EXPORT_SYMBOL(_cond_resched); |
6629 | 6627 | ||
6630 | /* | 6628 | /* |
6631 | * cond_resched_lock() - if a reschedule is pending, drop the given lock, | 6629 | * __cond_resched_lock() - if a reschedule is pending, drop the given lock, |
6632 | * call schedule, and on return reacquire the lock. | 6630 | * call schedule, and on return reacquire the lock. |
6633 | * | 6631 | * |
6634 | * This works OK both with and without CONFIG_PREEMPT. We do strange low-level | 6632 | * This works OK both with and without CONFIG_PREEMPT. We do strange low-level |
6635 | * operations here to prevent schedule() from being called twice (once via | 6633 | * operations here to prevent schedule() from being called twice (once via |
6636 | * spin_unlock(), once by hand). | 6634 | * spin_unlock(), once by hand). |
6637 | */ | 6635 | */ |
6638 | int cond_resched_lock(spinlock_t *lock) | 6636 | int __cond_resched_lock(spinlock_t *lock) |
6639 | { | 6637 | { |
6640 | int resched = should_resched(); | 6638 | int resched = should_resched(); |
6641 | int ret = 0; | 6639 | int ret = 0; |
@@ -6651,9 +6649,9 @@ int cond_resched_lock(spinlock_t *lock) | |||
6651 | } | 6649 | } |
6652 | return ret; | 6650 | return ret; |
6653 | } | 6651 | } |
6654 | EXPORT_SYMBOL(cond_resched_lock); | 6652 | EXPORT_SYMBOL(__cond_resched_lock); |
6655 | 6653 | ||
6656 | int __sched cond_resched_softirq(void) | 6654 | int __sched __cond_resched_softirq(void) |
6657 | { | 6655 | { |
6658 | BUG_ON(!in_softirq()); | 6656 | BUG_ON(!in_softirq()); |
6659 | 6657 | ||
@@ -6665,7 +6663,7 @@ int __sched cond_resched_softirq(void) | |||
6665 | } | 6663 | } |
6666 | return 0; | 6664 | return 0; |
6667 | } | 6665 | } |
6668 | EXPORT_SYMBOL(cond_resched_softirq); | 6666 | EXPORT_SYMBOL(__cond_resched_softirq); |
6669 | 6667 | ||
6670 | /** | 6668 | /** |
6671 | * yield - yield the current processor to other threads. | 6669 | * yield - yield the current processor to other threads. |