aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-04-12 10:20:30 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-05-15 13:29:28 -0400
commitfb31340f8a43a6f2e871164822ef4979b36232ae (patch)
treee5ad7915dac6fee31e38e9925292a13c53c5eef1 /kernel/rcu/tree.c
parent5fe0a56298e674358ff2740a6288bf21509d895d (diff)
rcu: Make rcu_gp_cleanup() more accurately predict need for new GP
Currently, rcu_gp_cleanup() scans the rcu_node tree in order to reset state to reflect the end of the grace period. It also checks to see whether a new grace period is needed, but in a number of cases, rather than directly cause the new grace period to be immediately started, it instead leaves the grace-period-needed state where various fail-safes can find it. This works fine, but results in higher contention on the root rcu_node structure's ->lock, which is undesirable, and contention on that lock has recently become noticeable. This commit therefore makes rcu_gp_cleanup() immediately start a new grace period if there is any need for one. It is quite possible that it will later be necessary to throttle the grace-period rate, but that can be dealt with when and if. Reported-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Nicholas Piggin <npiggin@gmail.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 497f139056c7..afc5e32f0da4 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1763,14 +1763,14 @@ out:
1763 * Clean up any old requests for the just-ended grace period. Also return 1763 * Clean up any old requests for the just-ended grace period. Also return
1764 * whether any additional grace periods have been requested. 1764 * whether any additional grace periods have been requested.
1765 */ 1765 */
1766static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) 1766static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1767{ 1767{
1768 int c = rnp->completed; 1768 int c = rnp->completed;
1769 int needmore; 1769 bool needmore;
1770 struct rcu_data *rdp = this_cpu_ptr(rsp->rda); 1770 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1771 1771
1772 need_future_gp_element(rnp, c) = 0; 1772 need_future_gp_element(rnp, c) = 0;
1773 needmore = need_future_gp_element(rnp, c + 1); 1773 needmore = need_any_future_gp(rnp);
1774 trace_rcu_future_gp(rnp, rdp, c, 1774 trace_rcu_future_gp(rnp, rdp, c,
1775 needmore ? TPS("CleanupMore") : TPS("Cleanup")); 1775 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1776 return needmore; 1776 return needmore;
@@ -2113,7 +2113,6 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2113{ 2113{
2114 unsigned long gp_duration; 2114 unsigned long gp_duration;
2115 bool needgp = false; 2115 bool needgp = false;
2116 int nocb = 0;
2117 struct rcu_data *rdp; 2116 struct rcu_data *rdp;
2118 struct rcu_node *rnp = rcu_get_root(rsp); 2117 struct rcu_node *rnp = rcu_get_root(rsp);
2119 struct swait_queue_head *sq; 2118 struct swait_queue_head *sq;
@@ -2152,7 +2151,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2152 if (rnp == rdp->mynode) 2151 if (rnp == rdp->mynode)
2153 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; 2152 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
2154 /* smp_mb() provided by prior unlock-lock pair. */ 2153 /* smp_mb() provided by prior unlock-lock pair. */
2155 nocb += rcu_future_gp_cleanup(rsp, rnp); 2154 needgp = rcu_future_gp_cleanup(rsp, rnp) || needgp;
2156 sq = rcu_nocb_gp_get(rnp); 2155 sq = rcu_nocb_gp_get(rnp);
2157 raw_spin_unlock_irq_rcu_node(rnp); 2156 raw_spin_unlock_irq_rcu_node(rnp);
2158 rcu_nocb_gp_cleanup(sq); 2157 rcu_nocb_gp_cleanup(sq);
@@ -2162,13 +2161,18 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2162 } 2161 }
2163 rnp = rcu_get_root(rsp); 2162 rnp = rcu_get_root(rsp);
2164 raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */ 2163 raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */
2165 rcu_nocb_gp_set(rnp, nocb);
2166 2164
2167 /* Declare grace period done. */ 2165 /* Declare grace period done. */
2168 WRITE_ONCE(rsp->completed, rsp->gpnum); 2166 WRITE_ONCE(rsp->completed, rsp->gpnum);
2169 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); 2167 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
2170 rsp->gp_state = RCU_GP_IDLE; 2168 rsp->gp_state = RCU_GP_IDLE;
2169 /* Check for GP requests since above loop. */
2171 rdp = this_cpu_ptr(rsp->rda); 2170 rdp = this_cpu_ptr(rsp->rda);
2171 if (need_any_future_gp(rnp)) {
2172 trace_rcu_future_gp(rnp, rdp, rsp->completed - 1,
2173 TPS("CleanupMore"));
2174 needgp = true;
2175 }
2172 /* Advance CBs to reduce false positives below. */ 2176 /* Advance CBs to reduce false positives below. */
2173 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp; 2177 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
2174 if (needgp || cpu_needs_another_gp(rsp, rdp)) { 2178 if (needgp || cpu_needs_another_gp(rsp, rdp)) {