diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-09-24 18:04:06 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-12-03 13:10:18 -0500 |
commit | 78e4bc34e5d966cfd95f1238565afc399d56225c (patch) | |
tree | e430291c48ec41b22e31865bf5aa13c4db87a3cb /kernel/rcu/tree.c | |
parent | 6193c76aba8ec3cc5f083c35efbab9ed924125f6 (diff) |
rcu: Fix and comment ordering around wait_event()
It is all too easy to forget that wait_event() does not necessarily
imply a full memory barrier. The case where it does not is where the
condition transitions to true just as wait_event() starts execution.
This is actually a feature: The standard use of wait_event() involves
locking, in which case the locks provide the needed ordering (you hold a
lock across the wake_up() and acquire that same lock after wait_event()
returns).
Given that I did forget that wait_event() does not necessarily imply a
full memory barrier in one case, this commit fixes that case. This commit
also adds comments calling out the placement of existing memory barriers
relied on by wait_event() calls.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r-- | kernel/rcu/tree.c | 3 |
1 files changed, 3 insertions, 0 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 5243ebea0fc1..abef9c358d47 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -1533,6 +1533,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
1533 | rdp = this_cpu_ptr(rsp->rda); | 1533 | rdp = this_cpu_ptr(rsp->rda); |
1534 | if (rnp == rdp->mynode) | 1534 | if (rnp == rdp->mynode) |
1535 | __note_gp_changes(rsp, rnp, rdp); | 1535 | __note_gp_changes(rsp, rnp, rdp); |
1536 | /* smp_mb() provided by prior unlock-lock pair. */ | ||
1536 | nocb += rcu_future_gp_cleanup(rsp, rnp); | 1537 | nocb += rcu_future_gp_cleanup(rsp, rnp); |
1537 | raw_spin_unlock_irq(&rnp->lock); | 1538 | raw_spin_unlock_irq(&rnp->lock); |
1538 | cond_resched(); | 1539 | cond_resched(); |
@@ -1577,6 +1578,7 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
1577 | wait_event_interruptible(rsp->gp_wq, | 1578 | wait_event_interruptible(rsp->gp_wq, |
1578 | ACCESS_ONCE(rsp->gp_flags) & | 1579 | ACCESS_ONCE(rsp->gp_flags) & |
1579 | RCU_GP_FLAG_INIT); | 1580 | RCU_GP_FLAG_INIT); |
1581 | /* Locking provides needed memory barrier. */ | ||
1580 | if (rcu_gp_init(rsp)) | 1582 | if (rcu_gp_init(rsp)) |
1581 | break; | 1583 | break; |
1582 | cond_resched(); | 1584 | cond_resched(); |
@@ -1606,6 +1608,7 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
1606 | (!ACCESS_ONCE(rnp->qsmask) && | 1608 | (!ACCESS_ONCE(rnp->qsmask) && |
1607 | !rcu_preempt_blocked_readers_cgp(rnp)), | 1609 | !rcu_preempt_blocked_readers_cgp(rnp)), |
1608 | j); | 1610 | j); |
1611 | /* Locking provides needed memory barriers. */ | ||
1609 | /* If grace period done, leave loop. */ | 1612 | /* If grace period done, leave loop. */ |
1610 | if (!ACCESS_ONCE(rnp->qsmask) && | 1613 | if (!ACCESS_ONCE(rnp->qsmask) && |
1611 | !rcu_preempt_blocked_readers_cgp(rnp)) | 1614 | !rcu_preempt_blocked_readers_cgp(rnp)) |