diff options
-rw-r--r-- | kernel/rcu/srcu.c | 4 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 12 |
2 files changed, 8 insertions, 8 deletions
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index c639556f3fa0..e037f3eb2f7b 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c | |||
@@ -298,9 +298,9 @@ int __srcu_read_lock(struct srcu_struct *sp) | |||
298 | 298 | ||
299 | idx = ACCESS_ONCE(sp->completed) & 0x1; | 299 | idx = ACCESS_ONCE(sp->completed) & 0x1; |
300 | preempt_disable(); | 300 | preempt_disable(); |
301 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1; | 301 | __this_cpu_inc(sp->per_cpu_ref->c[idx]); |
302 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ | 302 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
303 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1; | 303 | __this_cpu_inc(sp->per_cpu_ref->seq[idx]); |
304 | preempt_enable(); | 304 | preempt_enable(); |
305 | return idx; | 305 | return idx; |
306 | } | 306 | } |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index ebd99af2214e..6bf7daebcc6b 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -2347,7 +2347,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
2347 | } | 2347 | } |
2348 | smp_mb(); /* List handling before counting for rcu_barrier(). */ | 2348 | smp_mb(); /* List handling before counting for rcu_barrier(). */ |
2349 | rdp->qlen_lazy -= count_lazy; | 2349 | rdp->qlen_lazy -= count_lazy; |
2350 | ACCESS_ONCE(rdp->qlen) -= count; | 2350 | ACCESS_ONCE(rdp->qlen) = rdp->qlen - count; |
2351 | rdp->n_cbs_invoked += count; | 2351 | rdp->n_cbs_invoked += count; |
2352 | 2352 | ||
2353 | /* Reinstate batch limit if we have worked down the excess. */ | 2353 | /* Reinstate batch limit if we have worked down the excess. */ |
@@ -2492,7 +2492,7 @@ static void force_quiescent_state(struct rcu_state *rsp) | |||
2492 | if (rnp_old != NULL) | 2492 | if (rnp_old != NULL) |
2493 | raw_spin_unlock(&rnp_old->fqslock); | 2493 | raw_spin_unlock(&rnp_old->fqslock); |
2494 | if (ret) { | 2494 | if (ret) { |
2495 | ACCESS_ONCE(rsp->n_force_qs_lh)++; | 2495 | rsp->n_force_qs_lh++; |
2496 | return; | 2496 | return; |
2497 | } | 2497 | } |
2498 | rnp_old = rnp; | 2498 | rnp_old = rnp; |
@@ -2504,7 +2504,7 @@ static void force_quiescent_state(struct rcu_state *rsp) | |||
2504 | smp_mb__after_unlock_lock(); | 2504 | smp_mb__after_unlock_lock(); |
2505 | raw_spin_unlock(&rnp_old->fqslock); | 2505 | raw_spin_unlock(&rnp_old->fqslock); |
2506 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { | 2506 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { |
2507 | ACCESS_ONCE(rsp->n_force_qs_lh)++; | 2507 | rsp->n_force_qs_lh++; |
2508 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); | 2508 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); |
2509 | return; /* Someone beat us to it. */ | 2509 | return; /* Someone beat us to it. */ |
2510 | } | 2510 | } |
@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
2693 | local_irq_restore(flags); | 2693 | local_irq_restore(flags); |
2694 | return; | 2694 | return; |
2695 | } | 2695 | } |
2696 | ACCESS_ONCE(rdp->qlen)++; | 2696 | ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1; |
2697 | if (lazy) | 2697 | if (lazy) |
2698 | rdp->qlen_lazy++; | 2698 | rdp->qlen_lazy++; |
2699 | else | 2699 | else |
@@ -3257,7 +3257,7 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
3257 | * ACCESS_ONCE() to prevent the compiler from speculating | 3257 | * ACCESS_ONCE() to prevent the compiler from speculating |
3258 | * the increment to precede the early-exit check. | 3258 | * the increment to precede the early-exit check. |
3259 | */ | 3259 | */ |
3260 | ACCESS_ONCE(rsp->n_barrier_done)++; | 3260 | ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1; |
3261 | WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); | 3261 | WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); |
3262 | _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done); | 3262 | _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done); |
3263 | smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ | 3263 | smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ |
@@ -3307,7 +3307,7 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
3307 | 3307 | ||
3308 | /* Increment ->n_barrier_done to prevent duplicate work. */ | 3308 | /* Increment ->n_barrier_done to prevent duplicate work. */ |
3309 | smp_mb(); /* Keep increment after above mechanism. */ | 3309 | smp_mb(); /* Keep increment after above mechanism. */ |
3310 | ACCESS_ONCE(rsp->n_barrier_done)++; | 3310 | ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1; |
3311 | WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); | 3311 | WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); |
3312 | _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done); | 3312 | _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done); |
3313 | smp_mb(); /* Keep increment before caller's subsequent code. */ | 3313 | smp_mb(); /* Keep increment before caller's subsequent code. */ |