aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-03-17 00:36:25 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-05-14 12:46:11 -0400
commitac1bea85781e9004da9b3e8a4b097c18492d857c (patch)
treee28ea65bf56d1624371885954a46ab64cab1524b /kernel
parent0e980234c97f98be6619b9281d83777f725b94ff (diff)
sched,rcu: Make cond_resched() report RCU quiescent states
Given a CPU running a loop containing cond_resched(), with no other tasks runnable on that CPU, RCU will eventually report RCU CPU stall warnings due to lack of quiescent states. Fortunately, every call to cond_resched() is a perfectly good quiescent state. Unfortunately, invoking rcu_note_context_switch() is a bit heavyweight for cond_resched(), especially given the need to disable preemption, and, for RCU-preempt, interrupts as well. This commit therefore maintains a per-CPU counter that causes cond_resched(), cond_resched_lock(), and cond_resched_softirq() to call rcu_note_context_switch(), but only about once per 256 invocations. This ratio was chosen in keeping with the relative time constants of RCU grace periods. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/update.c18
-rw-r--r--kernel/sched/core.c7
2 files changed, 24 insertions, 1 deletions
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 4c0a9b0af469..ed7a0d72562c 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -338,3 +338,21 @@ static int __init check_cpu_stall_init(void)
338early_initcall(check_cpu_stall_init); 338early_initcall(check_cpu_stall_init);
339 339
340#endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 340#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
341
342/*
343 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
344 */
345
346DEFINE_PER_CPU(int, rcu_cond_resched_count);
347
348/*
349 * Report a set of RCU quiescent states, for use by cond_resched()
350 * and friends. Out of line due to being called infrequently.
351 */
352void rcu_resched(void)
353{
354 preempt_disable();
355 __this_cpu_write(rcu_cond_resched_count, 0);
356 rcu_note_context_switch(smp_processor_id());
357 preempt_enable();
358}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 268a45ea238c..9f530c9ed911 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4051,6 +4051,7 @@ static void __cond_resched(void)
4051 4051
4052int __sched _cond_resched(void) 4052int __sched _cond_resched(void)
4053{ 4053{
4054 rcu_cond_resched();
4054 if (should_resched()) { 4055 if (should_resched()) {
4055 __cond_resched(); 4056 __cond_resched();
4056 return 1; 4057 return 1;
@@ -4069,15 +4070,18 @@ EXPORT_SYMBOL(_cond_resched);
4069 */ 4070 */
4070int __cond_resched_lock(spinlock_t *lock) 4071int __cond_resched_lock(spinlock_t *lock)
4071{ 4072{
4073 bool need_rcu_resched = rcu_should_resched();
4072 int resched = should_resched(); 4074 int resched = should_resched();
4073 int ret = 0; 4075 int ret = 0;
4074 4076
4075 lockdep_assert_held(lock); 4077 lockdep_assert_held(lock);
4076 4078
4077 if (spin_needbreak(lock) || resched) { 4079 if (spin_needbreak(lock) || resched || need_rcu_resched) {
4078 spin_unlock(lock); 4080 spin_unlock(lock);
4079 if (resched) 4081 if (resched)
4080 __cond_resched(); 4082 __cond_resched();
4083 else if (unlikely(need_rcu_resched))
4084 rcu_resched();
4081 else 4085 else
4082 cpu_relax(); 4086 cpu_relax();
4083 ret = 1; 4087 ret = 1;
@@ -4091,6 +4095,7 @@ int __sched __cond_resched_softirq(void)
4091{ 4095{
4092 BUG_ON(!in_softirq()); 4096 BUG_ON(!in_softirq());
4093 4097
4098 rcu_cond_resched(); /* BH disabled OK, just recording QSes. */
4094 if (should_resched()) { 4099 if (should_resched()) {
4095 local_bh_enable(); 4100 local_bh_enable();
4096 __cond_resched(); 4101 __cond_resched();