aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-05-02 17:46:43 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-07-12 18:38:59 -0400
commit8d672fa6bf68ffc36a0c5e4868499f86bbea2308 (patch)
tree6ca1c30d5d329b35398a74752b4fea310acbc177 /kernel/rcu/tree.c
parentc50cbe535c972150c2caf923239ef77e85c5ad60 (diff)
rcu: Make rcu_init_new_rnp() stop upon already-set bit
Currently, rcu_init_new_rnp() walks up the rcu_node combining tree, setting bits in the ->qsmaskinit fields on the way up. It walks up unconditionally, regardless of the initial state of these bits. This is OK because only the corresponding RCU grace-period kthread ever tests or sets these bits during runtime. However, it is also pointless, and it increases both memory and lock contention (albeit only slightly), so this commit stops the walk as soon as an already-set bit is encountered. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 05f69b787a57..3fe854a15d82 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3479,9 +3479,10 @@ EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3479static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) 3479static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3480{ 3480{
3481 long mask; 3481 long mask;
3482 long oldmask;
3482 struct rcu_node *rnp = rnp_leaf; 3483 struct rcu_node *rnp = rnp_leaf;
3483 3484
3484 raw_lockdep_assert_held_rcu_node(rnp); 3485 raw_lockdep_assert_held_rcu_node(rnp_leaf);
3485 WARN_ON_ONCE(rnp->wait_blkd_tasks); 3486 WARN_ON_ONCE(rnp->wait_blkd_tasks);
3486 for (;;) { 3487 for (;;) {
3487 mask = rnp->grpmask; 3488 mask = rnp->grpmask;
@@ -3489,8 +3490,11 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3489 if (rnp == NULL) 3490 if (rnp == NULL)
3490 return; 3491 return;
3491 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 3492 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
3493 oldmask = rnp->qsmaskinit;
3492 rnp->qsmaskinit |= mask; 3494 rnp->qsmaskinit |= mask;
3493 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ 3495 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
3496 if (oldmask)
3497 return;
3494 } 3498 }
3495} 3499}
3496 3500