diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-05-11 19:04:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-05-11 19:04:48 -0400 |
commit | c3921ab71507b108d51a0f1ee960f80cd668a93d (patch) | |
tree | b1408b898a8b50f15ad4a0cf1f29e17cc0138485 /include/linux/sched.h | |
parent | 9662369786b9d07fd46d65b0f9e3938a3e01a5d9 (diff) |
Add new 'cond_resched_bkl()' helper function
It acts exactly like a regular 'cond_resched()', but will not get
optimized away when CONFIG_PREEMPT is set.
Normal kernel code is already preemptable in the presense of
CONFIG_PREEMPT, so cond_resched() is optimized away (see commit
02b67cc3ba36bdba351d6c3a00593f4ec550d9d3 "sched: do not do
cond_resched() when CONFIG_PREEMPT").
But when wanting to conditionally reschedule while holding a lock, you
need to use "cond_sched_lock(lock)", and the new function is the BKL
equivalent of that.
Also make fs/locks.c use it.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 6 |
1 files changed, 5 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 0c35b0343a76..4ab9f32f9238 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2037,13 +2037,13 @@ static inline int need_resched(void) | |||
2037 | * cond_resched_lock() will drop the spinlock before scheduling, | 2037 | * cond_resched_lock() will drop the spinlock before scheduling, |
2038 | * cond_resched_softirq() will enable bhs before scheduling. | 2038 | * cond_resched_softirq() will enable bhs before scheduling. |
2039 | */ | 2039 | */ |
2040 | extern int _cond_resched(void); | ||
2040 | #ifdef CONFIG_PREEMPT | 2041 | #ifdef CONFIG_PREEMPT |
2041 | static inline int cond_resched(void) | 2042 | static inline int cond_resched(void) |
2042 | { | 2043 | { |
2043 | return 0; | 2044 | return 0; |
2044 | } | 2045 | } |
2045 | #else | 2046 | #else |
2046 | extern int _cond_resched(void); | ||
2047 | static inline int cond_resched(void) | 2047 | static inline int cond_resched(void) |
2048 | { | 2048 | { |
2049 | return _cond_resched(); | 2049 | return _cond_resched(); |
@@ -2051,6 +2051,10 @@ static inline int cond_resched(void) | |||
2051 | #endif | 2051 | #endif |
2052 | extern int cond_resched_lock(spinlock_t * lock); | 2052 | extern int cond_resched_lock(spinlock_t * lock); |
2053 | extern int cond_resched_softirq(void); | 2053 | extern int cond_resched_softirq(void); |
2054 | static inline int cond_resched_bkl(void) | ||
2055 | { | ||
2056 | return _cond_resched(); | ||
2057 | } | ||
2054 | 2058 | ||
2055 | /* | 2059 | /* |
2056 | * Does a critical section need to be broken due to another | 2060 | * Does a critical section need to be broken due to another |