diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-09-24 18:04:06 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-12-03 13:10:18 -0500 |
commit | 78e4bc34e5d966cfd95f1238565afc399d56225c (patch) | |
tree | e430291c48ec41b22e31865bf5aa13c4db87a3cb /kernel/rcu/torture.c | |
parent | 6193c76aba8ec3cc5f083c35efbab9ed924125f6 (diff) |
rcu: Fix and comment ordering around wait_event()
It is all too easy to forget that wait_event() does not necessarily
imply a full memory barrier. The case where it does not is where the
condition transitions to true just as wait_event() starts execution.
This is actually a feature: The standard use of wait_event() involves
locking, in which case the locks provide the needed ordering (you hold a
lock across the wake_up() and acquire that same lock after wait_event()
returns).
Given that I did forget that wait_event() does not necessarily imply a
full memory barrier in one case, this commit fixes that case. This commit
also adds comments calling out the placement of existing memory barriers
relied on by wait_event() calls.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/torture.c')
-rw-r--r-- | kernel/rcu/torture.c | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/rcu/torture.c b/kernel/rcu/torture.c index 3929cd451511..69a4ec80a788 100644 --- a/kernel/rcu/torture.c +++ b/kernel/rcu/torture.c | |||
@@ -1578,6 +1578,7 @@ static int rcu_torture_barrier_cbs(void *arg) | |||
1578 | { | 1578 | { |
1579 | long myid = (long)arg; | 1579 | long myid = (long)arg; |
1580 | bool lastphase = 0; | 1580 | bool lastphase = 0; |
1581 | bool newphase; | ||
1581 | struct rcu_head rcu; | 1582 | struct rcu_head rcu; |
1582 | 1583 | ||
1583 | init_rcu_head_on_stack(&rcu); | 1584 | init_rcu_head_on_stack(&rcu); |
@@ -1585,10 +1586,11 @@ static int rcu_torture_barrier_cbs(void *arg) | |||
1585 | set_user_nice(current, 19); | 1586 | set_user_nice(current, 19); |
1586 | do { | 1587 | do { |
1587 | wait_event(barrier_cbs_wq[myid], | 1588 | wait_event(barrier_cbs_wq[myid], |
1588 | barrier_phase != lastphase || | 1589 | (newphase = |
1590 | ACCESS_ONCE(barrier_phase)) != lastphase || | ||
1589 | kthread_should_stop() || | 1591 | kthread_should_stop() || |
1590 | fullstop != FULLSTOP_DONTSTOP); | 1592 | fullstop != FULLSTOP_DONTSTOP); |
1591 | lastphase = barrier_phase; | 1593 | lastphase = newphase; |
1592 | smp_mb(); /* ensure barrier_phase load before ->call(). */ | 1594 | smp_mb(); /* ensure barrier_phase load before ->call(). */ |
1593 | if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) | 1595 | if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) |
1594 | break; | 1596 | break; |
@@ -1625,7 +1627,7 @@ static int rcu_torture_barrier(void *arg) | |||
1625 | if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) | 1627 | if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) |
1626 | break; | 1628 | break; |
1627 | n_barrier_attempts++; | 1629 | n_barrier_attempts++; |
1628 | cur_ops->cb_barrier(); | 1630 | cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ |
1629 | if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { | 1631 | if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { |
1630 | n_rcu_torture_barrier_error++; | 1632 | n_rcu_torture_barrier_error++; |
1631 | WARN_ON_ONCE(1); | 1633 | WARN_ON_ONCE(1); |