aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-05-11 19:04:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-11 19:04:48 -0400
commitc3921ab71507b108d51a0f1ee960f80cd668a93d (patch)
treeb1408b898a8b50f15ad4a0cf1f29e17cc0138485
parent9662369786b9d07fd46d65b0f9e3938a3e01a5d9 (diff)
Add new 'cond_resched_bkl()' helper function
It acts exactly like a regular 'cond_resched()', but will not get optimized away when CONFIG_PREEMPT is set. Normal kernel code is already preemptable in the presense of CONFIG_PREEMPT, so cond_resched() is optimized away (see commit 02b67cc3ba36bdba351d6c3a00593f4ec550d9d3 "sched: do not do cond_resched() when CONFIG_PREEMPT"). But when wanting to conditionally reschedule while holding a lock, you need to use "cond_sched_lock(lock)", and the new function is the BKL equivalent of that. Also make fs/locks.c use it. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/locks.c2
-rw-r--r--include/linux/sched.h6
-rw-r--r--kernel/sched.c2
3 files changed, 6 insertions, 4 deletions
diff --git a/fs/locks.c b/fs/locks.c
index 0ac6b92cb0b6..11dbf08651b7 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -773,7 +773,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
773 * give it the opportunity to lock the file. 773 * give it the opportunity to lock the file.
774 */ 774 */
775 if (found) 775 if (found)
776 cond_resched(); 776 cond_resched_bkl();
777 777
778find_conflict: 778find_conflict:
779 for_each_lock(inode, before) { 779 for_each_lock(inode, before) {
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0c35b0343a76..4ab9f32f9238 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2037,13 +2037,13 @@ static inline int need_resched(void)
2037 * cond_resched_lock() will drop the spinlock before scheduling, 2037 * cond_resched_lock() will drop the spinlock before scheduling,
2038 * cond_resched_softirq() will enable bhs before scheduling. 2038 * cond_resched_softirq() will enable bhs before scheduling.
2039 */ 2039 */
2040extern int _cond_resched(void);
2040#ifdef CONFIG_PREEMPT 2041#ifdef CONFIG_PREEMPT
2041static inline int cond_resched(void) 2042static inline int cond_resched(void)
2042{ 2043{
2043 return 0; 2044 return 0;
2044} 2045}
2045#else 2046#else
2046extern int _cond_resched(void);
2047static inline int cond_resched(void) 2047static inline int cond_resched(void)
2048{ 2048{
2049 return _cond_resched(); 2049 return _cond_resched();
@@ -2051,6 +2051,10 @@ static inline int cond_resched(void)
2051#endif 2051#endif
2052extern int cond_resched_lock(spinlock_t * lock); 2052extern int cond_resched_lock(spinlock_t * lock);
2053extern int cond_resched_softirq(void); 2053extern int cond_resched_softirq(void);
2054static inline int cond_resched_bkl(void)
2055{
2056 return _cond_resched();
2057}
2054 2058
2055/* 2059/*
2056 * Does a critical section need to be broken due to another 2060 * Does a critical section need to be broken due to another
diff --git a/kernel/sched.c b/kernel/sched.c
index c51b6565e07c..8841a915545d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5525,7 +5525,6 @@ static void __cond_resched(void)
5525 } while (need_resched()); 5525 } while (need_resched());
5526} 5526}
5527 5527
5528#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
5529int __sched _cond_resched(void) 5528int __sched _cond_resched(void)
5530{ 5529{
5531 if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) && 5530 if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
@@ -5536,7 +5535,6 @@ int __sched _cond_resched(void)
5536 return 0; 5535 return 0;
5537} 5536}
5538EXPORT_SYMBOL(_cond_resched); 5537EXPORT_SYMBOL(_cond_resched);
5539#endif
5540 5538
5541/* 5539/*
5542 * cond_resched_lock() - if a reschedule is pending, drop the given lock, 5540 * cond_resched_lock() - if a reschedule is pending, drop the given lock,