aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-04-27 18:16:50 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-07-12 17:27:49 -0400
commite4be81a2ed3a7356a2c22c7571af622ceb57eb2b (patch)
tree38cbee1c366f001f058b327acc7fda176e08d793 /kernel/rcu/tree.c
parentc9a24e2d0c7d33b141167f5fa13f95cf6d35cb1e (diff)
rcu: Convert conditional grace-period primitives to ->gp_seq
This commit converts get_state_synchronize_rcu(), cond_synchronize_rcu(), get_state_synchronize_sched(), and cond_synchronize_sched() from ->gpnum and ->completed to ->gp_seq. Note that this also introduces a full memory barrier in the already-done paths off cond_synchronize_rcu() and cond_synchronize_sched(), as work with LKMM indicates that the earlier smp_load_acquire() were insufficiently strong in some situations where these two functions were called just as the grace period ended. In such cases, these two functions would not gain the benefit of memory ordering at the end of the grace period. Please note that the performance impact is negligible, as you shouldn't be using either function anywhere near a fastpath in any case. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c42
1 files changed, 10 insertions, 32 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index a54587dc13f0..fd2f582a6db0 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3183,16 +3183,10 @@ unsigned long get_state_synchronize_rcu(void)
3183{ 3183{
3184 /* 3184 /*
3185 * Any prior manipulation of RCU-protected data must happen 3185 * Any prior manipulation of RCU-protected data must happen
3186 * before the load from ->gpnum. 3186 * before the load from ->gp_seq.
3187 */ 3187 */
3188 smp_mb(); /* ^^^ */ 3188 smp_mb(); /* ^^^ */
3189 3189 return rcu_seq_snap(&rcu_state_p->gp_seq);
3190 /*
3191 * Make sure this load happens before the purportedly
3192 * time-consuming work between get_state_synchronize_rcu()
3193 * and cond_synchronize_rcu().
3194 */
3195 return smp_load_acquire(&rcu_state_p->gpnum);
3196} 3190}
3197EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); 3191EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3198 3192
@@ -3212,15 +3206,10 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3212 */ 3206 */
3213void cond_synchronize_rcu(unsigned long oldstate) 3207void cond_synchronize_rcu(unsigned long oldstate)
3214{ 3208{
3215 unsigned long newstate; 3209 if (!rcu_seq_done(&rcu_state_p->gp_seq, oldstate))
3216
3217 /*
3218 * Ensure that this load happens before any RCU-destructive
3219 * actions the caller might carry out after we return.
3220 */
3221 newstate = smp_load_acquire(&rcu_state_p->completed);
3222 if (ULONG_CMP_GE(oldstate, newstate))
3223 synchronize_rcu(); 3210 synchronize_rcu();
3211 else
3212 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3224} 3213}
3225EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 3214EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3226 3215
@@ -3235,16 +3224,10 @@ unsigned long get_state_synchronize_sched(void)
3235{ 3224{
3236 /* 3225 /*
3237 * Any prior manipulation of RCU-protected data must happen 3226 * Any prior manipulation of RCU-protected data must happen
3238 * before the load from ->gpnum. 3227 * before the load from ->gp_seq.
3239 */ 3228 */
3240 smp_mb(); /* ^^^ */ 3229 smp_mb(); /* ^^^ */
3241 3230 return rcu_seq_snap(&rcu_sched_state.gp_seq);
3242 /*
3243 * Make sure this load happens before the purportedly
3244 * time-consuming work between get_state_synchronize_sched()
3245 * and cond_synchronize_sched().
3246 */
3247 return smp_load_acquire(&rcu_sched_state.gpnum);
3248} 3231}
3249EXPORT_SYMBOL_GPL(get_state_synchronize_sched); 3232EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
3250 3233
@@ -3264,15 +3247,10 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
3264 */ 3247 */
3265void cond_synchronize_sched(unsigned long oldstate) 3248void cond_synchronize_sched(unsigned long oldstate)
3266{ 3249{
3267 unsigned long newstate; 3250 if (!rcu_seq_done(&rcu_sched_state.gp_seq, oldstate))
3268
3269 /*
3270 * Ensure that this load happens before any RCU-destructive
3271 * actions the caller might carry out after we return.
3272 */
3273 newstate = smp_load_acquire(&rcu_sched_state.completed);
3274 if (ULONG_CMP_GE(oldstate, newstate))
3275 synchronize_sched(); 3251 synchronize_sched();
3252 else
3253 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3276} 3254}
3277EXPORT_SYMBOL_GPL(cond_synchronize_sched); 3255EXPORT_SYMBOL_GPL(cond_synchronize_sched);
3278 3256