aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/rcupdate.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-03-17 00:36:25 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-05-14 12:46:11 -0400
commitac1bea85781e9004da9b3e8a4b097c18492d857c (patch)
treee28ea65bf56d1624371885954a46ab64cab1524b /include/linux/rcupdate.h
parent0e980234c97f98be6619b9281d83777f725b94ff (diff)
sched,rcu: Make cond_resched() report RCU quiescent states
Given a CPU running a loop containing cond_resched(), with no other tasks runnable on that CPU, RCU will eventually report RCU CPU stall warnings due to lack of quiescent states. Fortunately, every call to cond_resched() is a perfectly good quiescent state. Unfortunately, invoking rcu_note_context_switch() is a bit heavyweight for cond_resched(), especially given the need to disable preemption, and, for RCU-preempt, interrupts as well. This commit therefore maintains a per-CPU counter that causes cond_resched(), cond_resched_lock(), and cond_resched_softirq() to call rcu_note_context_switch(), but only about once per 256 invocations. This ratio was chosen in keeping with the relative time constants of RCU grace periods. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r--include/linux/rcupdate.h36
1 files changed, 36 insertions, 0 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 82973738125b..97cc8d6679b4 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -44,6 +44,7 @@
44#include <linux/debugobjects.h> 44#include <linux/debugobjects.h>
45#include <linux/bug.h> 45#include <linux/bug.h>
46#include <linux/compiler.h> 46#include <linux/compiler.h>
47#include <linux/percpu.h>
47#include <asm/barrier.h> 48#include <asm/barrier.h>
48 49
49extern int rcu_expedited; /* for sysctl */ 50extern int rcu_expedited; /* for sysctl */
@@ -287,6 +288,41 @@ bool __rcu_is_watching(void);
287#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 288#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
288 289
289/* 290/*
291 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
292 */
293
294#define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */
295DECLARE_PER_CPU(int, rcu_cond_resched_count);
296void rcu_resched(void);
297
298/*
299 * Is it time to report RCU quiescent states?
300 *
301 * Note unsynchronized access to rcu_cond_resched_count. Yes, we might
302 * increment some random CPU's count, and possibly also load the result from
303 * yet another CPU's count. We might even clobber some other CPU's attempt
304 * to zero its counter. This is all OK because the goal is not precision,
305 * but rather reasonable amortization of rcu_note_context_switch() overhead
306 * and extremely high probability of avoiding RCU CPU stall warnings.
307 * Note that this function has to be preempted in just the wrong place,
308 * many thousands of times in a row, for anything bad to happen.
309 */
310static inline bool rcu_should_resched(void)
311{
312 return raw_cpu_inc_return(rcu_cond_resched_count) >=
313 RCU_COND_RESCHED_LIM;
314}
315
316/*
317 * Report quiscent states to RCU if it is time to do so.
318 */
319static inline void rcu_cond_resched(void)
320{
321 if (unlikely(rcu_should_resched()))
322 rcu_resched();
323}
324
325/*
290 * Infrastructure to implement the synchronize_() primitives in 326 * Infrastructure to implement the synchronize_() primitives in
291 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 327 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
292 */ 328 */