aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2016-03-16 19:47:55 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2016-03-31 16:34:11 -0400
commit3b5f668e715bc19610ad967ef97a7e8c55a186ec (patch)
tree1adb1e9d9d1aa5220163ae2cefccc27bd93e3db4 /kernel/rcu/tree.c
parentaff12cdf86e6fa891d1c30c0fad112d138bd7b10 (diff)
rcu: Overlap wakeups with next expedited grace period
The current expedited grace-period implementation makes subsequent grace periods wait on wakeups for the prior grace period. This does not fit the dictionary definition of "expedited", so this commit allows these two phases to overlap. Doing this requires four waitqueues rather than two because tasks can now be waiting on the previous, current, and next grace periods. The fourth waitqueue makes the bit masking work out nicely. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e8fff14e417b..1df100cb7a62 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -103,6 +103,7 @@ struct rcu_state sname##_state = { \
103 .name = RCU_STATE_NAME(sname), \ 103 .name = RCU_STATE_NAME(sname), \
104 .abbr = sabbr, \ 104 .abbr = sabbr, \
105 .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \ 105 .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
106 .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
106} 107}
107 108
108RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); 109RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
@@ -3637,7 +3638,7 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
3637 trace_rcu_exp_funnel_lock(rsp->name, rnp->level, 3638 trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
3638 rnp->grplo, rnp->grphi, 3639 rnp->grplo, rnp->grphi,
3639 TPS("wait")); 3640 TPS("wait"));
3640 wait_event(rnp->exp_wq[(s >> 1) & 0x1], 3641 wait_event(rnp->exp_wq[(s >> 1) & 0x3],
3641 sync_exp_work_done(rsp, 3642 sync_exp_work_done(rsp,
3642 &rdp->exp_workdone2, s)); 3643 &rdp->exp_workdone2, s));
3643 return true; 3644 return true;
@@ -3857,6 +3858,14 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
3857 synchronize_sched_expedited_wait(rsp); 3858 synchronize_sched_expedited_wait(rsp);
3858 rcu_exp_gp_seq_end(rsp); 3859 rcu_exp_gp_seq_end(rsp);
3859 trace_rcu_exp_grace_period(rsp->name, s, TPS("end")); 3860 trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
3861
3862 /*
3863 * Switch over to wakeup mode, allowing the next GP, but -only- the
3864 * next GP, to proceed.
3865 */
3866 mutex_lock(&rsp->exp_wake_mutex);
3867 mutex_unlock(&rsp->exp_mutex);
3868
3860 rcu_for_each_node_breadth_first(rsp, rnp) { 3869 rcu_for_each_node_breadth_first(rsp, rnp) {
3861 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { 3870 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
3862 spin_lock(&rnp->exp_lock); 3871 spin_lock(&rnp->exp_lock);
@@ -3865,10 +3874,10 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
3865 rnp->exp_seq_rq = s; 3874 rnp->exp_seq_rq = s;
3866 spin_unlock(&rnp->exp_lock); 3875 spin_unlock(&rnp->exp_lock);
3867 } 3876 }
3868 wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x1]); 3877 wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x3]);
3869 } 3878 }
3870 trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake")); 3879 trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
3871 mutex_unlock(&rsp->exp_mutex); 3880 mutex_unlock(&rsp->exp_wake_mutex);
3872} 3881}
3873 3882
3874/** 3883/**
@@ -4530,6 +4539,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
4530 rcu_init_one_nocb(rnp); 4539 rcu_init_one_nocb(rnp);
4531 init_waitqueue_head(&rnp->exp_wq[0]); 4540 init_waitqueue_head(&rnp->exp_wq[0]);
4532 init_waitqueue_head(&rnp->exp_wq[1]); 4541 init_waitqueue_head(&rnp->exp_wq[1]);
4542 init_waitqueue_head(&rnp->exp_wq[2]);
4543 init_waitqueue_head(&rnp->exp_wq[3]);
4533 spin_lock_init(&rnp->exp_lock); 4544 spin_lock_init(&rnp->exp_lock);
4534 } 4545 }
4535 } 4546 }