diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-12-03 12:24:02 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-02-17 18:01:10 -0500 |
commit | 3660c2813fb6d0ba48ee44bcbf9feddf7218c11d (patch) | |
tree | db632e8ff7302aae520dfab42e6b2fce4615fb7f /kernel/rcu | |
parent | 6d0abeca3242a88cab8232e4acd7e2bf088f3bc2 (diff) |
rcu: Add ACCESS_ONCE() to ->n_force_qs_lh accesses
The ->n_force_qs_lh field is accessed without the benefit of any
synchronization, so this commit adds the needed ACCESS_ONCE() wrappers.
Yes, increments to ->n_force_qs_lh can be lost, but contention should
be low and the field is strictly statistical in nature, so this is not
a problem.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/tree.c | 4 | ||||
-rw-r--r-- | kernel/rcu/tree_trace.c | 2 |
2 files changed, 3 insertions, 3 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b3d116cd072d..e64157798624 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -2304,7 +2304,7 @@ static void force_quiescent_state(struct rcu_state *rsp) | |||
2304 | if (rnp_old != NULL) | 2304 | if (rnp_old != NULL) |
2305 | raw_spin_unlock(&rnp_old->fqslock); | 2305 | raw_spin_unlock(&rnp_old->fqslock); |
2306 | if (ret) { | 2306 | if (ret) { |
2307 | rsp->n_force_qs_lh++; | 2307 | ACCESS_ONCE(rsp->n_force_qs_lh)++; |
2308 | return; | 2308 | return; |
2309 | } | 2309 | } |
2310 | rnp_old = rnp; | 2310 | rnp_old = rnp; |
@@ -2316,7 +2316,7 @@ static void force_quiescent_state(struct rcu_state *rsp) | |||
2316 | smp_mb__after_unlock_lock(); | 2316 | smp_mb__after_unlock_lock(); |
2317 | raw_spin_unlock(&rnp_old->fqslock); | 2317 | raw_spin_unlock(&rnp_old->fqslock); |
2318 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { | 2318 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { |
2319 | rsp->n_force_qs_lh++; | 2319 | ACCESS_ONCE(rsp->n_force_qs_lh)++; |
2320 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); | 2320 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); |
2321 | return; /* Someone beat us to it. */ | 2321 | return; /* Someone beat us to it. */ |
2322 | } | 2322 | } |
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c index 4def475336d4..d1f1e64a6d72 100644 --- a/kernel/rcu/tree_trace.c +++ b/kernel/rcu/tree_trace.c | |||
@@ -273,7 +273,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) | |||
273 | seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n", | 273 | seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n", |
274 | rsp->n_force_qs, rsp->n_force_qs_ngp, | 274 | rsp->n_force_qs, rsp->n_force_qs_ngp, |
275 | rsp->n_force_qs - rsp->n_force_qs_ngp, | 275 | rsp->n_force_qs - rsp->n_force_qs_ngp, |
276 | rsp->n_force_qs_lh, rsp->qlen_lazy, rsp->qlen); | 276 | ACCESS_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen); |
277 | for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) { | 277 | for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) { |
278 | if (rnp->level != level) { | 278 | if (rnp->level != level) { |
279 | seq_puts(m, "\n"); | 279 | seq_puts(m, "\n"); |