aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-07-16 09:44:29 -0400
committerIngo Molnar <mingo@elte.hu>2009-07-18 09:51:44 -0400
commit613afbf83298efaead05ebcac23d2285609d7160 (patch)
tree15fa2025d74cee5f6805ab974c532c6b6a603333
parent6f80bd985fe242c2e6a8b6209ed20b0495d3d63b (diff)
sched: Pull up the might_sleep() check into cond_resched()
might_sleep() is called late-ish in cond_resched(), after the need_resched()/preempt enabled/system running tests are checked. It's better to check the sleeps while atomic earlier and not depend on some environment datas that reduce the chances to detect a problem. Also define cond_resched_*() helpers as macros, so that the FILE/LINE reported in the sleeping while atomic warning displays the real origin and not sched.h Changes in v2: - Call __might_sleep() directly instead of might_sleep() which may call cond_resched() - Turn cond_resched() into a macro so that the file:line couple reported refers to the caller of cond_resched() and not __cond_resched() itself. Changes in v3: - Also propagate this __might_sleep() pull up to cond_resched_lock() and cond_resched_softirq() Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1247725694-6082-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--fs/dcache.c1
-rw-r--r--include/linux/sched.h29
-rw-r--r--kernel/sched.c12
3 files changed, 25 insertions, 17 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 9e5cd3c3a6ba..a100fa35a48f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -32,6 +32,7 @@
32#include <linux/swap.h> 32#include <linux/swap.h>
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
34#include <linux/fs_struct.h> 34#include <linux/fs_struct.h>
35#include <linux/hardirq.h>
35#include "internal.h" 36#include "internal.h"
36 37
37int sysctl_vfs_cache_pressure __read_mostly = 100; 38int sysctl_vfs_cache_pressure __read_mostly = 100;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e2bdf18e05c4..c41d424db887 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2286,17 +2286,26 @@ static inline int need_resched(void)
2286 */ 2286 */
2287extern int _cond_resched(void); 2287extern int _cond_resched(void);
2288 2288
2289static inline int cond_resched(void) 2289#define cond_resched() ({ \
2290{ 2290 __might_sleep(__FILE__, __LINE__, 0); \
2291 return _cond_resched(); 2291 _cond_resched(); \
2292} 2292})
2293 2293
2294extern int cond_resched_lock(spinlock_t * lock); 2294extern int __cond_resched_lock(spinlock_t *lock);
2295extern int cond_resched_softirq(void); 2295
2296static inline int cond_resched_bkl(void) 2296#define cond_resched_lock(lock) ({ \
2297{ 2297 __might_sleep(__FILE__, __LINE__, PREEMPT_OFFSET); \
2298 return _cond_resched(); 2298 __cond_resched_lock(lock); \
2299} 2299})
2300
2301extern int __cond_resched_softirq(void);
2302
2303#define cond_resched_softirq() ({ \
2304 __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
2305 __cond_resched_softirq(); \
2306})
2307
2308#define cond_resched_bkl() cond_resched()
2300 2309
2301/* 2310/*
2302 * Does a critical section need to be broken due to another 2311 * Does a critical section need to be broken due to another
diff --git a/kernel/sched.c b/kernel/sched.c
index 3ff4d004bd95..1f7919add8ae 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6610,8 +6610,6 @@ static inline int should_resched(void)
6610 6610
6611static void __cond_resched(void) 6611static void __cond_resched(void)
6612{ 6612{
6613 __might_sleep(__FILE__, __LINE__, 0);
6614
6615 add_preempt_count(PREEMPT_ACTIVE); 6613 add_preempt_count(PREEMPT_ACTIVE);
6616 schedule(); 6614 schedule();
6617 sub_preempt_count(PREEMPT_ACTIVE); 6615 sub_preempt_count(PREEMPT_ACTIVE);
@@ -6628,14 +6626,14 @@ int __sched _cond_resched(void)
6628EXPORT_SYMBOL(_cond_resched); 6626EXPORT_SYMBOL(_cond_resched);
6629 6627
6630/* 6628/*
6631 * cond_resched_lock() - if a reschedule is pending, drop the given lock, 6629 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
6632 * call schedule, and on return reacquire the lock. 6630 * call schedule, and on return reacquire the lock.
6633 * 6631 *
6634 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level 6632 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
6635 * operations here to prevent schedule() from being called twice (once via 6633 * operations here to prevent schedule() from being called twice (once via
6636 * spin_unlock(), once by hand). 6634 * spin_unlock(), once by hand).
6637 */ 6635 */
6638int cond_resched_lock(spinlock_t *lock) 6636int __cond_resched_lock(spinlock_t *lock)
6639{ 6637{
6640 int resched = should_resched(); 6638 int resched = should_resched();
6641 int ret = 0; 6639 int ret = 0;
@@ -6651,9 +6649,9 @@ int cond_resched_lock(spinlock_t *lock)
6651 } 6649 }
6652 return ret; 6650 return ret;
6653} 6651}
6654EXPORT_SYMBOL(cond_resched_lock); 6652EXPORT_SYMBOL(__cond_resched_lock);
6655 6653
6656int __sched cond_resched_softirq(void) 6654int __sched __cond_resched_softirq(void)
6657{ 6655{
6658 BUG_ON(!in_softirq()); 6656 BUG_ON(!in_softirq());
6659 6657
@@ -6665,7 +6663,7 @@ int __sched cond_resched_softirq(void)
6665 } 6663 }
6666 return 0; 6664 return 0;
6667} 6665}
6668EXPORT_SYMBOL(cond_resched_softirq); 6666EXPORT_SYMBOL(__cond_resched_softirq);
6669 6667
6670/** 6668/**
6671 * yield - yield the current processor to other threads. 6669 * yield - yield the current processor to other threads.