aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-06-02 17:54:34 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-07-09 12:14:49 -0400
commita792563bd47632d85158c72e2acf4484eed0ec32 (patch)
treebbe0ccd1e3966c92f978347e9e32cbe9cd8a86bd /kernel/rcu/tree.c
parent4da117cfa72e6cde3d9e8f5ed932381863cdeec9 (diff)
rcu: Eliminate read-modify-write ACCESS_ONCE() calls
RCU contains code of the following forms: ACCESS_ONCE(x)++; ACCESS_ONCE(x) += y; ACCESS_ONCE(x) -= y; Now these constructs do operate correctly, but they really result in a pair of volatile accesses, one to do the load and another to do the store. This can be confusing, as the casual reader might well assume that (for example) gcc might generate a memory-to-memory add instruction for each of these three cases. In fact, gcc will do no such thing. Also, there is a good chance that the kernel will move to separate load and store variants of ACCESS_ONCE(), and constructs like the above could easily confuse both people and scripts attempting to make that sort of change. Finally, most of RCU's read-modify-write uses of ACCESS_ONCE() really only need the store to be volatile, so that the read-modify-write form might be misleading. This commit therefore changes the above forms in RCU so that each instance of ACCESS_ONCE() either does a load or a store, but not both. In a few cases, ACCESS_ONCE() was not critical, for example, for maintaining statisitics. In these cases, ACCESS_ONCE() has been dispensed with entirely. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index ebd99af2214e..6bf7daebcc6b 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2347,7 +2347,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2347 } 2347 }
2348 smp_mb(); /* List handling before counting for rcu_barrier(). */ 2348 smp_mb(); /* List handling before counting for rcu_barrier(). */
2349 rdp->qlen_lazy -= count_lazy; 2349 rdp->qlen_lazy -= count_lazy;
2350 ACCESS_ONCE(rdp->qlen) -= count; 2350 ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
2351 rdp->n_cbs_invoked += count; 2351 rdp->n_cbs_invoked += count;
2352 2352
2353 /* Reinstate batch limit if we have worked down the excess. */ 2353 /* Reinstate batch limit if we have worked down the excess. */
@@ -2492,7 +2492,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
2492 if (rnp_old != NULL) 2492 if (rnp_old != NULL)
2493 raw_spin_unlock(&rnp_old->fqslock); 2493 raw_spin_unlock(&rnp_old->fqslock);
2494 if (ret) { 2494 if (ret) {
2495 ACCESS_ONCE(rsp->n_force_qs_lh)++; 2495 rsp->n_force_qs_lh++;
2496 return; 2496 return;
2497 } 2497 }
2498 rnp_old = rnp; 2498 rnp_old = rnp;
@@ -2504,7 +2504,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
2504 smp_mb__after_unlock_lock(); 2504 smp_mb__after_unlock_lock();
2505 raw_spin_unlock(&rnp_old->fqslock); 2505 raw_spin_unlock(&rnp_old->fqslock);
2506 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 2506 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2507 ACCESS_ONCE(rsp->n_force_qs_lh)++; 2507 rsp->n_force_qs_lh++;
2508 raw_spin_unlock_irqrestore(&rnp_old->lock, flags); 2508 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2509 return; /* Someone beat us to it. */ 2509 return; /* Someone beat us to it. */
2510 } 2510 }
@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
2693 local_irq_restore(flags); 2693 local_irq_restore(flags);
2694 return; 2694 return;
2695 } 2695 }
2696 ACCESS_ONCE(rdp->qlen)++; 2696 ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
2697 if (lazy) 2697 if (lazy)
2698 rdp->qlen_lazy++; 2698 rdp->qlen_lazy++;
2699 else 2699 else
@@ -3257,7 +3257,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
3257 * ACCESS_ONCE() to prevent the compiler from speculating 3257 * ACCESS_ONCE() to prevent the compiler from speculating
3258 * the increment to precede the early-exit check. 3258 * the increment to precede the early-exit check.
3259 */ 3259 */
3260 ACCESS_ONCE(rsp->n_barrier_done)++; 3260 ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
3261 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); 3261 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
3262 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done); 3262 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
3263 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ 3263 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
@@ -3307,7 +3307,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
3307 3307
3308 /* Increment ->n_barrier_done to prevent duplicate work. */ 3308 /* Increment ->n_barrier_done to prevent duplicate work. */
3309 smp_mb(); /* Keep increment after above mechanism. */ 3309 smp_mb(); /* Keep increment after above mechanism. */
3310 ACCESS_ONCE(rsp->n_barrier_done)++; 3310 ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
3311 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); 3311 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
3312 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done); 3312 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
3313 smp_mb(); /* Keep increment before caller's subsequent code. */ 3313 smp_mb(); /* Keep increment before caller's subsequent code. */