aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-04-22 11:49:24 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-05-15 13:29:51 -0400
commitec4eaccef4af28376345554580606a43d7392ed8 (patch)
treea5aec9ba6b05c9f6ffdfd95bdf4ac891c904a368
parent6f576e281690316270275bbef17c79ea304ad511 (diff)
rcu: Make rcu_migrate_callbacks wake GP kthread when needed
The rcu_migrate_callbacks() function invokes rcu_advance_cbs() twice, ignoring the return value. This is OK at pressent because of failsafe code that does the wakeup when needed. However, this failsafe code acquires the root rcu_node structure's lock frequently, while rcu_migrate_callbacks() does so only once per CPU-offline operation. This commit therefore makes rcu_migrate_callbacks() wake up the RCU GP kthread when either call to rcu_advance_cbs() returns true, thus removing need for the failsafe code. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Nicholas Piggin <npiggin@gmail.com>
-rw-r--r--kernel/rcu/tree.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 6ef1f2b4a6d3..f75eb5174021 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3876,6 +3876,7 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
3876 struct rcu_data *my_rdp; 3876 struct rcu_data *my_rdp;
3877 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 3877 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3878 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); 3878 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
3879 bool needwake;
3879 3880
3880 if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist)) 3881 if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
3881 return; /* No callbacks to migrate. */ 3882 return; /* No callbacks to migrate. */
@@ -3887,12 +3888,15 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
3887 return; 3888 return;
3888 } 3889 }
3889 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ 3890 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
3890 rcu_advance_cbs(rsp, rnp_root, rdp); /* Leverage recent GPs. */ 3891 /* Leverage recent GPs and set GP for new callbacks. */
3891 rcu_advance_cbs(rsp, rnp_root, my_rdp); /* Assign GP to pending CBs. */ 3892 needwake = rcu_advance_cbs(rsp, rnp_root, rdp) ||
3893 rcu_advance_cbs(rsp, rnp_root, my_rdp);
3892 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); 3894 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
3893 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != 3895 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
3894 !rcu_segcblist_n_cbs(&my_rdp->cblist)); 3896 !rcu_segcblist_n_cbs(&my_rdp->cblist));
3895 raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags); 3897 raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags);
3898 if (needwake)
3899 rcu_gp_kthread_wake(rsp);
3896 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || 3900 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
3897 !rcu_segcblist_empty(&rdp->cblist), 3901 !rcu_segcblist_empty(&rdp->cblist),
3898 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", 3902 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",