diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-03-17 00:36:25 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-05-14 12:46:11 -0400 |
commit | ac1bea85781e9004da9b3e8a4b097c18492d857c (patch) | |
tree | e28ea65bf56d1624371885954a46ab64cab1524b /kernel/sched | |
parent | 0e980234c97f98be6619b9281d83777f725b94ff (diff) |
sched,rcu: Make cond_resched() report RCU quiescent states
Given a CPU running a loop containing cond_resched(), with no
other tasks runnable on that CPU, RCU will eventually report RCU
CPU stall warnings due to lack of quiescent states. Fortunately,
every call to cond_resched() is a perfectly good quiescent state.
Unfortunately, invoking rcu_note_context_switch() is a bit heavyweight
for cond_resched(), especially given the need to disable preemption,
and, for RCU-preempt, interrupts as well.
This commit therefore maintains a per-CPU counter that causes
cond_resched(), cond_resched_lock(), and cond_resched_softirq() to call
rcu_note_context_switch(), but only about once per 256 invocations.
This ratio was chosen in keeping with the relative time constants of
RCU grace periods.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 7 |
1 files changed, 6 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 268a45ea238c..9f530c9ed911 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -4051,6 +4051,7 @@ static void __cond_resched(void) | |||
4051 | 4051 | ||
4052 | int __sched _cond_resched(void) | 4052 | int __sched _cond_resched(void) |
4053 | { | 4053 | { |
4054 | rcu_cond_resched(); | ||
4054 | if (should_resched()) { | 4055 | if (should_resched()) { |
4055 | __cond_resched(); | 4056 | __cond_resched(); |
4056 | return 1; | 4057 | return 1; |
@@ -4069,15 +4070,18 @@ EXPORT_SYMBOL(_cond_resched); | |||
4069 | */ | 4070 | */ |
4070 | int __cond_resched_lock(spinlock_t *lock) | 4071 | int __cond_resched_lock(spinlock_t *lock) |
4071 | { | 4072 | { |
4073 | bool need_rcu_resched = rcu_should_resched(); | ||
4072 | int resched = should_resched(); | 4074 | int resched = should_resched(); |
4073 | int ret = 0; | 4075 | int ret = 0; |
4074 | 4076 | ||
4075 | lockdep_assert_held(lock); | 4077 | lockdep_assert_held(lock); |
4076 | 4078 | ||
4077 | if (spin_needbreak(lock) || resched) { | 4079 | if (spin_needbreak(lock) || resched || need_rcu_resched) { |
4078 | spin_unlock(lock); | 4080 | spin_unlock(lock); |
4079 | if (resched) | 4081 | if (resched) |
4080 | __cond_resched(); | 4082 | __cond_resched(); |
4083 | else if (unlikely(need_rcu_resched)) | ||
4084 | rcu_resched(); | ||
4081 | else | 4085 | else |
4082 | cpu_relax(); | 4086 | cpu_relax(); |
4083 | ret = 1; | 4087 | ret = 1; |
@@ -4091,6 +4095,7 @@ int __sched __cond_resched_softirq(void) | |||
4091 | { | 4095 | { |
4092 | BUG_ON(!in_softirq()); | 4096 | BUG_ON(!in_softirq()); |
4093 | 4097 | ||
4098 | rcu_cond_resched(); /* BH disabled OK, just recording QSes. */ | ||
4094 | if (should_resched()) { | 4099 | if (should_resched()) { |
4095 | local_bh_enable(); | 4100 | local_bh_enable(); |
4096 | __cond_resched(); | 4101 | __cond_resched(); |