aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-04-11 17:33:18 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-05-15 13:29:57 -0400
commita6058d85a2b24fa40ce7f0d7683989ec47b603b9 (patch)
tree5e721a38d4ef5bf23e618fc98b1c230546321bb5 /kernel/rcu/tree.c
parentec4eaccef4af28376345554580606a43d7392ed8 (diff)
rcu: Avoid __call_rcu_core() root rcu_node ->lock acquisition
When __call_rcu_core() notices excessive numbers of callbacks pending on the current CPU, we know that at least one of them is not yet classified, namely the one that was just now queued. Therefore, it is not necessary to invoke rcu_start_gp() and thus not necessary to acquire the root rcu_node structure's ->lock. This commit therefore replaces the rcu_start_gp() with rcu_accelerate_cbs(), thus replacing an acquisition of the root rcu_node structure's ->lock with that of this CPU's leaf rcu_node structure. This decreases contention on the root rcu_node structure's ->lock. Reported-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Nicholas Piggin <npiggin@gmail.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f75eb5174021..6396a3d10be9 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2988,11 +2988,11 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2988 2988
2989 /* Start a new grace period if one not already started. */ 2989 /* Start a new grace period if one not already started. */
2990 if (!rcu_gp_in_progress(rsp)) { 2990 if (!rcu_gp_in_progress(rsp)) {
2991 struct rcu_node *rnp_root = rcu_get_root(rsp); 2991 struct rcu_node *rnp = rdp->mynode;
2992 2992
2993 raw_spin_lock_rcu_node(rnp_root); 2993 raw_spin_lock_rcu_node(rnp);
2994 needwake = rcu_start_gp(rsp); 2994 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2995 raw_spin_unlock_rcu_node(rnp_root); 2995 raw_spin_unlock_rcu_node(rnp);
2996 if (needwake) 2996 if (needwake)
2997 rcu_gp_kthread_wake(rsp); 2997 rcu_gp_kthread_wake(rsp);
2998 } else { 2998 } else {