summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2019-07-15 09:06:40 -0400
committerPaul E. McKenney <paulmck@linux.ibm.com>2019-08-13 17:38:24 -0400
commit296181d78df9892e08e794f2a9a4d2c38f9acedb (patch)
treeebec54a8019bec324e60c76ac6980f80d4823f0f
parent1d5a81c18dc68fc38a52e8dab1992a043a358927 (diff)
rcu/nocb: Reduce __call_rcu_nocb_wake() leaf rcu_node ->lock contention
Currently, __call_rcu_nocb_wake() advances callbacks each time that it detects excessive numbers of callbacks, though only if it succeeds in conditionally acquiring its leaf rcu_node structure's ->lock. Despite the conditional acquisition of ->lock, this does increase contention. This commit therefore avoids advancing callbacks unless there are callbacks in ->cblist whose grace period has completed and advancing has not yet been done during this jiffy. Note that this decision does not take the presence of new callbacks into account. That is because on this code path, there will always be at least one new callback, namely the one we just enqueued. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
-rw-r--r--kernel/rcu/tree_plugin.h13
1 files changed, 10 insertions, 3 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index f6f23a16bd64..f56fb4e97a8e 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1872,6 +1872,8 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
1872 unsigned long flags) 1872 unsigned long flags)
1873 __releases(rdp->nocb_lock) 1873 __releases(rdp->nocb_lock)
1874{ 1874{
1875 unsigned long cur_gp_seq;
1876 unsigned long j;
1875 long len; 1877 long len;
1876 struct task_struct *t; 1878 struct task_struct *t;
1877 1879
@@ -1900,12 +1902,17 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
1900 } else if (len > rdp->qlen_last_fqs_check + qhimark) { 1902 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
1901 /* ... or if many callbacks queued. */ 1903 /* ... or if many callbacks queued. */
1902 rdp->qlen_last_fqs_check = len; 1904 rdp->qlen_last_fqs_check = len;
1903 if (rdp->nocb_cb_sleep || 1905 j = jiffies;
1904 !rcu_segcblist_ready_cbs(&rdp->cblist)) { 1906 if (j != rdp->nocb_gp_adv_time &&
1907 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
1908 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
1905 rcu_advance_cbs_nowake(rdp->mynode, rdp); 1909 rcu_advance_cbs_nowake(rdp->mynode, rdp);
1910 rdp->nocb_gp_adv_time = j;
1911 }
1912 if (rdp->nocb_cb_sleep ||
1913 !rcu_segcblist_ready_cbs(&rdp->cblist))
1906 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE, 1914 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
1907 TPS("WakeOvfIsDeferred")); 1915 TPS("WakeOvfIsDeferred"));
1908 }
1909 rcu_nocb_unlock_irqrestore(rdp, flags); 1916 rcu_nocb_unlock_irqrestore(rdp, flags);
1910 } else { 1917 } else {
1911 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); 1918 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));