aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-05-30 13:11:24 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-07-22 18:26:58 -0400
commit24560056de61d86153cecb84d04e4237437f5888 (patch)
tree4674113128e446afb740fcc1d45d7fbd64dcff1f /kernel/rcu/tree.c
parentf765d1130700878c2275bc1ea09eed428f870a2a (diff)
rcu: Add RCU-sched flavors of get-state and cond-sync
The get_state_synchronize_rcu() and cond_synchronize_rcu() functions allow polling for grace-period completion, with an actual wait for a grace period occurring only when cond_synchronize_rcu() is called too soon after the corresponding get_state_synchronize_rcu(). However, these functions work only for vanilla RCU. This commit adds the get_state_synchronize_sched() and cond_synchronize_sched(), which provide the same capability for RCU-sched. Reported-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c52
1 files changed, 52 insertions, 0 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 8b5dd8ba9495..9629298eea24 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3253,6 +3253,58 @@ void cond_synchronize_rcu(unsigned long oldstate)
3253} 3253}
3254EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 3254EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3255 3255
3256/**
3257 * get_state_synchronize_sched - Snapshot current RCU-sched state
3258 *
3259 * Returns a cookie that is used by a later call to cond_synchronize_sched()
3260 * to determine whether or not a full grace period has elapsed in the
3261 * meantime.
3262 */
3263unsigned long get_state_synchronize_sched(void)
3264{
3265 /*
3266 * Any prior manipulation of RCU-protected data must happen
3267 * before the load from ->gpnum.
3268 */
3269 smp_mb(); /* ^^^ */
3270
3271 /*
3272 * Make sure this load happens before the purportedly
3273 * time-consuming work between get_state_synchronize_sched()
3274 * and cond_synchronize_sched().
3275 */
3276 return smp_load_acquire(&rcu_sched_state.gpnum);
3277}
3278EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
3279
3280/**
3281 * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period
3282 *
3283 * @oldstate: return value from earlier call to get_state_synchronize_sched()
3284 *
3285 * If a full RCU-sched grace period has elapsed since the earlier call to
3286 * get_state_synchronize_sched(), just return. Otherwise, invoke
3287 * synchronize_sched() to wait for a full grace period.
3288 *
3289 * Yes, this function does not take counter wrap into account. But
3290 * counter wrap is harmless. If the counter wraps, we have waited for
3291 * more than 2 billion grace periods (and way more on a 64-bit system!),
3292 * so waiting for one additional grace period should be just fine.
3293 */
3294void cond_synchronize_sched(unsigned long oldstate)
3295{
3296 unsigned long newstate;
3297
3298 /*
3299 * Ensure that this load happens before any RCU-destructive
3300 * actions the caller might carry out after we return.
3301 */
3302 newstate = smp_load_acquire(&rcu_sched_state.completed);
3303 if (ULONG_CMP_GE(oldstate, newstate))
3304 synchronize_sched();
3305}
3306EXPORT_SYMBOL_GPL(cond_synchronize_sched);
3307
3256static int synchronize_sched_expedited_cpu_stop(void *data) 3308static int synchronize_sched_expedited_cpu_stop(void *data)
3257{ 3309{
3258 /* 3310 /*