aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-06-25 18:00:58 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-07-17 17:58:51 -0400
commit28f00767e3db933cacc3030f4d9736acd037be2c (patch)
treecd49629a604b727bc8d871870e3bb27000907210 /kernel/rcu/tree.c
parent3a6d7c64d78a78d279851524d39999637a549363 (diff)
rcu: Abstract sequence counting from synchronize_sched_expedited()
This commit creates rcu_exp_gp_seq_start() and rcu_exp_gp_seq_end() to bracket an expedited grace period, rcu_exp_gp_seq_snap() to snapshot the sequence counter, and rcu_exp_gp_seq_done() to check to see if a full expedited grace period has elapsed since the snapshot. These will be applied to synchronize_rcu_expedited(). These are defined in terms of underlying rcu_seq_start(), rcu_seq_end(), rcu_seq_snap(), rcu_seq_done(), which will be applied to _rcu_barrier(). One reason that this commit doesn't use the seqcount primitives themselves is that the smp_wmb() in those primitive is insufficient due to the fact that expedited grace periods do reads as well as writes. In addition, the read-side seqcount primitives detect a potentially partial change, where the expedited primitives instead need a guaranteed full change. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c68
1 files changed, 58 insertions, 10 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index c5c8509054ef..67fe75725486 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3255,6 +3255,60 @@ void cond_synchronize_rcu(unsigned long oldstate)
3255} 3255}
3256EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 3256EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3257 3257
3258/* Adjust sequence number for start of update-side operation. */
3259static void rcu_seq_start(unsigned long *sp)
3260{
3261 WRITE_ONCE(*sp, *sp + 1);
3262 smp_mb(); /* Ensure update-side operation after counter increment. */
3263 WARN_ON_ONCE(!(*sp & 0x1));
3264}
3265
3266/* Adjust sequence number for end of update-side operation. */
3267static void rcu_seq_end(unsigned long *sp)
3268{
3269 smp_mb(); /* Ensure update-side operation before counter increment. */
3270 WRITE_ONCE(*sp, *sp + 1);
3271 WARN_ON_ONCE(*sp & 0x1);
3272}
3273
3274/* Take a snapshot of the update side's sequence number. */
3275static unsigned long rcu_seq_snap(unsigned long *sp)
3276{
3277 unsigned long s;
3278
3279 smp_mb(); /* Caller's modifications seen first by other CPUs. */
3280 s = (READ_ONCE(*sp) + 3) & ~0x1;
3281 smp_mb(); /* Above access must not bleed into critical section. */
3282 return s;
3283}
3284
3285/*
3286 * Given a snapshot from rcu_seq_snap(), determine whether or not a
3287 * full update-side operation has occurred.
3288 */
3289static bool rcu_seq_done(unsigned long *sp, unsigned long s)
3290{
3291 return ULONG_CMP_GE(READ_ONCE(*sp), s);
3292}
3293
3294/* Wrapper functions for expedited grace periods. */
3295static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
3296{
3297 rcu_seq_start(&rsp->expedited_sequence);
3298}
3299static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
3300{
3301 rcu_seq_end(&rsp->expedited_sequence);
3302}
3303static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
3304{
3305 return rcu_seq_snap(&rsp->expedited_sequence);
3306}
3307static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
3308{
3309 return rcu_seq_done(&rsp->expedited_sequence, s);
3310}
3311
3258static int synchronize_sched_expedited_cpu_stop(void *data) 3312static int synchronize_sched_expedited_cpu_stop(void *data)
3259{ 3313{
3260 struct rcu_state *rsp = data; 3314 struct rcu_state *rsp = data;
@@ -3269,7 +3323,7 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
3269static bool sync_sched_exp_wd(struct rcu_state *rsp, struct rcu_node *rnp, 3323static bool sync_sched_exp_wd(struct rcu_state *rsp, struct rcu_node *rnp,
3270 atomic_long_t *stat, unsigned long s) 3324 atomic_long_t *stat, unsigned long s)
3271{ 3325{
3272 if (ULONG_CMP_GE(READ_ONCE(rsp->expedited_sequence), s)) { 3326 if (rcu_exp_gp_seq_done(rsp, s)) {
3273 if (rnp) 3327 if (rnp)
3274 mutex_unlock(&rnp->exp_funnel_mutex); 3328 mutex_unlock(&rnp->exp_funnel_mutex);
3275 /* Ensure test happens before caller kfree(). */ 3329 /* Ensure test happens before caller kfree(). */
@@ -3306,9 +3360,7 @@ void synchronize_sched_expedited(void)
3306 struct rcu_state *rsp = &rcu_sched_state; 3360 struct rcu_state *rsp = &rcu_sched_state;
3307 3361
3308 /* Take a snapshot of the sequence number. */ 3362 /* Take a snapshot of the sequence number. */
3309 smp_mb(); /* Caller's modifications seen first by other CPUs. */ 3363 s = rcu_exp_gp_seq_snap(rsp);
3310 s = (READ_ONCE(rsp->expedited_sequence) + 3) & ~0x1;
3311 smp_mb(); /* Above access must not bleed into critical section. */
3312 3364
3313 if (!try_get_online_cpus()) { 3365 if (!try_get_online_cpus()) {
3314 /* CPU hotplug operation in flight, fall back to normal GP. */ 3366 /* CPU hotplug operation in flight, fall back to normal GP. */
@@ -3339,9 +3391,7 @@ void synchronize_sched_expedited(void)
3339 if (sync_sched_exp_wd(rsp, rnp0, &rsp->expedited_workdone2, s)) 3391 if (sync_sched_exp_wd(rsp, rnp0, &rsp->expedited_workdone2, s))
3340 return; 3392 return;
3341 3393
3342 WRITE_ONCE(rsp->expedited_sequence, rsp->expedited_sequence + 1); 3394 rcu_exp_gp_seq_start(rsp);
3343 smp_mb(); /* Ensure expedited GP seen after counter increment. */
3344 WARN_ON_ONCE(!(rsp->expedited_sequence & 0x1));
3345 3395
3346 /* Stop each CPU that is online, non-idle, and not us. */ 3396 /* Stop each CPU that is online, non-idle, and not us. */
3347 init_waitqueue_head(&rsp->expedited_wq); 3397 init_waitqueue_head(&rsp->expedited_wq);
@@ -3364,9 +3414,7 @@ void synchronize_sched_expedited(void)
3364 wait_event(rsp->expedited_wq, 3414 wait_event(rsp->expedited_wq,
3365 !atomic_read(&rsp->expedited_need_qs)); 3415 !atomic_read(&rsp->expedited_need_qs));
3366 3416
3367 smp_mb(); /* Ensure expedited GP seen before counter increment. */ 3417 rcu_exp_gp_seq_end(rsp);
3368 WRITE_ONCE(rsp->expedited_sequence, rsp->expedited_sequence + 1);
3369 WARN_ON_ONCE(rsp->expedited_sequence & 0x1);
3370 mutex_unlock(&rnp0->exp_funnel_mutex); 3418 mutex_unlock(&rnp0->exp_funnel_mutex);
3371 smp_mb(); /* ensure subsequent action seen after grace period. */ 3419 smp_mb(); /* ensure subsequent action seen after grace period. */
3372 3420