aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-06-27 10:44:06 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-07-25 16:04:49 -0400
commitf2dbe4a562d4f17cc1bad3e36a9d1ccb19c86604 (patch)
treead3875857f1d0e0bdc890ad04afd53259e8e2a05 /kernel/rcu/tree.c
parent21cc248384aeb0375b3cac164c276c78c503291a (diff)
rcu: Localize rcu_state ->orphan_pend and ->orphan_done
Given that the rcu_state structure's >orphan_pend and ->orphan_done fields are used only during migration of callbacks from the recently offlined CPU to a surviving CPU, if rcu_send_cbs_to_orphanage() and rcu_adopt_orphan_cbs() are combined, these fields can become local variables in the combined function. This commit therefore combines rcu_send_cbs_to_orphanage() and rcu_adopt_orphan_cbs() into a new rcu_segcblist_merge() function and removes the ->orphan_pend and ->orphan_done fields. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c73
1 files changed, 4 insertions, 69 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f9f01aeb5add..d330c17c8df4 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -97,8 +97,6 @@ struct rcu_state sname##_state = { \
97 .gp_state = RCU_GP_IDLE, \ 97 .gp_state = RCU_GP_IDLE, \
98 .gpnum = 0UL - 300UL, \ 98 .gpnum = 0UL - 300UL, \
99 .completed = 0UL - 300UL, \ 99 .completed = 0UL - 300UL, \
100 .orphan_pend = RCU_CBLIST_INITIALIZER(sname##_state.orphan_pend), \
101 .orphan_done = RCU_CBLIST_INITIALIZER(sname##_state.orphan_done), \
102 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ 100 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
103 .name = RCU_STATE_NAME(sname), \ 101 .name = RCU_STATE_NAME(sname), \
104 .abbr = sabbr, \ 102 .abbr = sabbr, \
@@ -3850,76 +3848,12 @@ void rcu_report_dead(unsigned int cpu)
3850 rcu_cleanup_dying_idle_cpu(cpu, rsp); 3848 rcu_cleanup_dying_idle_cpu(cpu, rsp);
3851} 3849}
3852 3850
3853/* 3851/* Migrate the dead CPU's callbacks to the current CPU. */
3854 * Send the specified CPU's RCU callbacks to the orphanage. The
3855 * specified CPU must be offline.
3856 */
3857static void
3858rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
3859 struct rcu_node *rnp, struct rcu_data *rdp)
3860{
3861 /*
3862 * Orphan the callbacks. First adjust the counts. This is safe
3863 * because _rcu_barrier() excludes CPU-hotplug operations, so it
3864 * cannot be running now. Thus no memory barrier is required.
3865 */
3866 rcu_segcblist_extract_count(&rdp->cblist, &rsp->orphan_done);
3867
3868 /*
3869 * Next, move those callbacks still needing a grace period to
3870 * the orphanage, where some other CPU will pick them up.
3871 * Some of the callbacks might have gone partway through a grace
3872 * period, but that is too bad. They get to start over because we
3873 * cannot assume that grace periods are synchronized across CPUs.
3874 */
3875 rcu_segcblist_extract_pend_cbs(&rdp->cblist, &rsp->orphan_pend);
3876
3877 /*
3878 * Then move the ready-to-invoke callbacks to the orphanage,
3879 * where some other CPU will pick them up. These will not be
3880 * required to pass though another grace period: They are done.
3881 */
3882 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rsp->orphan_done);
3883
3884 /* Finally, disallow further callbacks on this CPU. */
3885 rcu_segcblist_disable(&rdp->cblist);
3886}
3887
3888/*
3889 * Adopt the RCU callbacks from the specified rcu_state structure's
3890 * orphanage.
3891 */
3892static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
3893{
3894 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
3895
3896 /* Do the accounting first. */
3897 if (rsp->orphan_done.len_lazy != rsp->orphan_done.len)
3898 rcu_idle_count_callbacks_posted();
3899 rcu_segcblist_insert_count(&rdp->cblist, &rsp->orphan_done);
3900
3901 /*
3902 * We do not need a memory barrier here because the only way we
3903 * can get here if there is an rcu_barrier() in flight is if
3904 * we are the task doing the rcu_barrier().
3905 */
3906
3907 /* First adopt the ready-to-invoke callbacks, then the done ones. */
3908 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rsp->orphan_done);
3909 WARN_ON_ONCE(rsp->orphan_done.head);
3910 rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rsp->orphan_pend);
3911 WARN_ON_ONCE(rsp->orphan_pend.head);
3912 WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) !=
3913 !rcu_segcblist_n_cbs(&rdp->cblist));
3914}
3915
3916/* Orphan the dead CPU's callbacks, and then adopt them. */
3917static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp) 3852static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
3918{ 3853{
3919 unsigned long flags; 3854 unsigned long flags;
3920 struct rcu_data *my_rdp; 3855 struct rcu_data *my_rdp;
3921 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 3856 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3922 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
3923 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); 3857 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
3924 3858
3925 if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist)) 3859 if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
@@ -3933,15 +3867,16 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
3933 } 3867 }
3934 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ 3868 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
3935 rcu_advance_cbs(rsp, rnp_root, rdp); /* Leverage recent GPs. */ 3869 rcu_advance_cbs(rsp, rnp_root, rdp); /* Leverage recent GPs. */
3936 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
3937 rcu_adopt_orphan_cbs(rsp, flags);
3938 rcu_advance_cbs(rsp, rnp_root, my_rdp); /* Assign GP to pending CBs. */ 3870 rcu_advance_cbs(rsp, rnp_root, my_rdp); /* Assign GP to pending CBs. */
3871 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
3939 raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags); 3872 raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags);
3940 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || 3873 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
3941 !rcu_segcblist_empty(&rdp->cblist), 3874 !rcu_segcblist_empty(&rdp->cblist),
3942 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", 3875 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
3943 cpu, rcu_segcblist_n_cbs(&rdp->cblist), 3876 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
3944 rcu_segcblist_first_cb(&rdp->cblist)); 3877 rcu_segcblist_first_cb(&rdp->cblist));
3878 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
3879 !rcu_segcblist_n_cbs(&my_rdp->cblist));
3945} 3880}
3946 3881
3947/* 3882/*