aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-06-26 18:43:27 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-07-25 16:04:47 -0400
commit9fa46fb8c9c6dfad30487fb3d905c2ff04b379b7 (patch)
tree5ee89ccbbac6ff2418ebaf4e2a2c4dab9ae49f00 /kernel/rcu/tree.c
parentb1a2d79fe7d210c114003362d93d529912d244df (diff)
rcu: Advance outgoing CPU's callbacks before migrating them
It is possible that the outgoing CPU is unaware of recent grace periods, and so it is also possible that some of its pending callbacks are actually ready to be invoked. The current callback-migration code would needlessly force these callbacks to pass through another grace period. This commit therefore invokes rcu_advance_cbs() on the outgoing CPU's callbacks in order to give them full credit for having passed through any recent grace periods. This also fixes an odd theoretical bug where there are no callbacks in the system except for those on the outgoing CPU, none of those callbacks have yet been associated with a grace-period number, there is never again another callback registered, and the surviving CPU never again takes a scheduling-clock interrupt, never goes idle, and never enters nohz_full userspace execution. Yes, this is (just barely) possible. It requires that the surviving CPU be a nohz_full CPU, that its scheduler-clock interrupt be shut off, and that it loop forever in the kernel. You get bonus points if you can make this one happen! ;-) Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 4ea28e820f4a..c080c6ed66af 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3926,6 +3926,7 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
3926 struct rcu_data *my_rdp; 3926 struct rcu_data *my_rdp;
3927 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 3927 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3928 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 3928 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
3929 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
3929 3930
3930 if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist)) 3931 if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
3931 return; /* No callbacks to migrate. */ 3932 return; /* No callbacks to migrate. */
@@ -3936,7 +3937,11 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
3936 local_irq_restore(flags); 3937 local_irq_restore(flags);
3937 return; 3938 return;
3938 } 3939 }
3939 raw_spin_lock(&rsp->orphan_lock); /* irqs already disabled. */ 3940 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
3941 rcu_advance_cbs(rsp, rnp_root, rdp); /* Leverage recent GPs. */
3942 raw_spin_unlock_rcu_node(rnp_root);
3943
3944 raw_spin_lock(&rsp->orphan_lock);
3940 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); 3945 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
3941 rcu_adopt_orphan_cbs(rsp, flags); 3946 rcu_adopt_orphan_cbs(rsp, flags);
3942 raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags); 3947 raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);