aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index ebd99af2214e..6bf7daebcc6b 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2347,7 +2347,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2347 } 2347 }
2348 smp_mb(); /* List handling before counting for rcu_barrier(). */ 2348 smp_mb(); /* List handling before counting for rcu_barrier(). */
2349 rdp->qlen_lazy -= count_lazy; 2349 rdp->qlen_lazy -= count_lazy;
2350 ACCESS_ONCE(rdp->qlen) -= count; 2350 ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
2351 rdp->n_cbs_invoked += count; 2351 rdp->n_cbs_invoked += count;
2352 2352
2353 /* Reinstate batch limit if we have worked down the excess. */ 2353 /* Reinstate batch limit if we have worked down the excess. */
@@ -2492,7 +2492,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
2492 if (rnp_old != NULL) 2492 if (rnp_old != NULL)
2493 raw_spin_unlock(&rnp_old->fqslock); 2493 raw_spin_unlock(&rnp_old->fqslock);
2494 if (ret) { 2494 if (ret) {
2495 ACCESS_ONCE(rsp->n_force_qs_lh)++; 2495 rsp->n_force_qs_lh++;
2496 return; 2496 return;
2497 } 2497 }
2498 rnp_old = rnp; 2498 rnp_old = rnp;
@@ -2504,7 +2504,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
2504 smp_mb__after_unlock_lock(); 2504 smp_mb__after_unlock_lock();
2505 raw_spin_unlock(&rnp_old->fqslock); 2505 raw_spin_unlock(&rnp_old->fqslock);
2506 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 2506 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2507 ACCESS_ONCE(rsp->n_force_qs_lh)++; 2507 rsp->n_force_qs_lh++;
2508 raw_spin_unlock_irqrestore(&rnp_old->lock, flags); 2508 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2509 return; /* Someone beat us to it. */ 2509 return; /* Someone beat us to it. */
2510 } 2510 }
@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
2693 local_irq_restore(flags); 2693 local_irq_restore(flags);
2694 return; 2694 return;
2695 } 2695 }
2696 ACCESS_ONCE(rdp->qlen)++; 2696 ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
2697 if (lazy) 2697 if (lazy)
2698 rdp->qlen_lazy++; 2698 rdp->qlen_lazy++;
2699 else 2699 else
@@ -3257,7 +3257,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
3257 * ACCESS_ONCE() to prevent the compiler from speculating 3257 * ACCESS_ONCE() to prevent the compiler from speculating
3258 * the increment to precede the early-exit check. 3258 * the increment to precede the early-exit check.
3259 */ 3259 */
3260 ACCESS_ONCE(rsp->n_barrier_done)++; 3260 ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
3261 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); 3261 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
3262 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done); 3262 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
3263 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ 3263 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
@@ -3307,7 +3307,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
3307 3307
3308 /* Increment ->n_barrier_done to prevent duplicate work. */ 3308 /* Increment ->n_barrier_done to prevent duplicate work. */
3309 smp_mb(); /* Keep increment after above mechanism. */ 3309 smp_mb(); /* Keep increment after above mechanism. */
3310 ACCESS_ONCE(rsp->n_barrier_done)++; 3310 ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
3311 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); 3311 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
3312 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done); 3312 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
3313 smp_mb(); /* Keep increment before caller's subsequent code. */ 3313 smp_mb(); /* Keep increment before caller's subsequent code. */