diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-02-18 12:47:13 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-04-29 11:42:31 -0400 |
commit | 91dc95427a0d30ac2c58d6e943c7f40a3f25d908 (patch) | |
tree | 546418f74ad6d0fae8a736290139475fd34dafa1 /kernel/rcu | |
parent | c9eaa447e77efe77b7fa4c953bd62de8297fd6c5 (diff) |
rcu: Protect ->gp_flags accesses with ACCESS_ONCE()
A number of ->gp_flags accesses don't have ACCESS_ONCE(), but all of
the can race against other loads or stores. This commit therefore
applies ACCESS_ONCE() to the unprotected ->gp_flags accesses.
Reported-by: Alexey Roytman <alexey.roytman@oracle.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/tree.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0c47e300210a..2c53ac924cab 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -1403,12 +1403,12 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1403 | rcu_bind_gp_kthread(); | 1403 | rcu_bind_gp_kthread(); |
1404 | raw_spin_lock_irq(&rnp->lock); | 1404 | raw_spin_lock_irq(&rnp->lock); |
1405 | smp_mb__after_unlock_lock(); | 1405 | smp_mb__after_unlock_lock(); |
1406 | if (rsp->gp_flags == 0) { | 1406 | if (!ACCESS_ONCE(rsp->gp_flags)) { |
1407 | /* Spurious wakeup, tell caller to go back to sleep. */ | 1407 | /* Spurious wakeup, tell caller to go back to sleep. */ |
1408 | raw_spin_unlock_irq(&rnp->lock); | 1408 | raw_spin_unlock_irq(&rnp->lock); |
1409 | return 0; | 1409 | return 0; |
1410 | } | 1410 | } |
1411 | rsp->gp_flags = 0; /* Clear all flags: New grace period. */ | 1411 | ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */ |
1412 | 1412 | ||
1413 | if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) { | 1413 | if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) { |
1414 | /* | 1414 | /* |
@@ -1501,7 +1501,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) | |||
1501 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { | 1501 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { |
1502 | raw_spin_lock_irq(&rnp->lock); | 1502 | raw_spin_lock_irq(&rnp->lock); |
1503 | smp_mb__after_unlock_lock(); | 1503 | smp_mb__after_unlock_lock(); |
1504 | rsp->gp_flags &= ~RCU_GP_FLAG_FQS; | 1504 | ACCESS_ONCE(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS; |
1505 | raw_spin_unlock_irq(&rnp->lock); | 1505 | raw_spin_unlock_irq(&rnp->lock); |
1506 | } | 1506 | } |
1507 | return fqs_state; | 1507 | return fqs_state; |
@@ -1566,7 +1566,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
1566 | rdp = this_cpu_ptr(rsp->rda); | 1566 | rdp = this_cpu_ptr(rsp->rda); |
1567 | rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */ | 1567 | rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */ |
1568 | if (cpu_needs_another_gp(rsp, rdp)) { | 1568 | if (cpu_needs_another_gp(rsp, rdp)) { |
1569 | rsp->gp_flags = RCU_GP_FLAG_INIT; | 1569 | ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT; |
1570 | trace_rcu_grace_period(rsp->name, | 1570 | trace_rcu_grace_period(rsp->name, |
1571 | ACCESS_ONCE(rsp->gpnum), | 1571 | ACCESS_ONCE(rsp->gpnum), |
1572 | TPS("newreq")); | 1572 | TPS("newreq")); |
@@ -1695,7 +1695,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, | |||
1695 | */ | 1695 | */ |
1696 | return; | 1696 | return; |
1697 | } | 1697 | } |
1698 | rsp->gp_flags = RCU_GP_FLAG_INIT; | 1698 | ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT; |
1699 | trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum), | 1699 | trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum), |
1700 | TPS("newreq")); | 1700 | TPS("newreq")); |
1701 | 1701 | ||
@@ -2320,7 +2320,7 @@ static void force_quiescent_state(struct rcu_state *rsp) | |||
2320 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); | 2320 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); |
2321 | return; /* Someone beat us to it. */ | 2321 | return; /* Someone beat us to it. */ |
2322 | } | 2322 | } |
2323 | rsp->gp_flags |= RCU_GP_FLAG_FQS; | 2323 | ACCESS_ONCE(rsp->gp_flags) |= RCU_GP_FLAG_FQS; |
2324 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); | 2324 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); |
2325 | wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ | 2325 | wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ |
2326 | } | 2326 | } |