aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2019-05-21 11:28:41 -0400
committerPaul E. McKenney <paulmck@linux.ibm.com>2019-08-13 17:35:49 -0400
commitc035280f1761b3336f4dad336906c19735d7ba5f (patch)
tree85ad343275c09506e944585ea78b5f4056aa3d9f
parente7f4c5b3998a3cf1bd8dbf110948075b47ac9b78 (diff)
rcu/nocb: Remove obsolete nocb_q_count and nocb_q_count_lazy fields
This commit removes the obsolete nocb_q_count and nocb_q_count_lazy fields, also removing rcu_get_n_cbs_nocb_cpu(), adjusting rcu_get_n_cbs_cpu(), and making rcutree_migrate_callbacks() once again disable the ->cblist fields of offline CPUs. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
-rw-r--r--kernel/rcu/tree.c6
-rw-r--r--kernel/rcu/tree.h3
-rw-r--r--kernel/rcu/tree_plugin.h14
3 files changed, 3 insertions, 20 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 054418d2d960..e5f30b364276 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -210,10 +210,9 @@ static long rcu_get_n_cbs_cpu(int cpu)
210{ 210{
211 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 211 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
212 212
213 if (rcu_segcblist_is_enabled(&rdp->cblist) && 213 if (rcu_segcblist_is_enabled(&rdp->cblist))
214 !rcu_segcblist_is_offloaded(&rdp->cblist)) /* Online normal CPU? */
215 return rcu_segcblist_n_cbs(&rdp->cblist); 214 return rcu_segcblist_n_cbs(&rdp->cblist);
216 return rcu_get_n_cbs_nocb_cpu(rdp); /* Works for offline, too. */ 215 return 0;
217} 216}
218 217
219void rcu_softirq_qs(void) 218void rcu_softirq_qs(void)
@@ -3181,6 +3180,7 @@ void rcutree_migrate_callbacks(int cpu)
3181 needwake = rcu_advance_cbs(my_rnp, rdp) || 3180 needwake = rcu_advance_cbs(my_rnp, rdp) ||
3182 rcu_advance_cbs(my_rnp, my_rdp); 3181 rcu_advance_cbs(my_rnp, my_rdp);
3183 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); 3182 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
3183 rcu_segcblist_disable(&rdp->cblist);
3184 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != 3184 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
3185 !rcu_segcblist_n_cbs(&my_rdp->cblist)); 3185 !rcu_segcblist_n_cbs(&my_rdp->cblist));
3186 if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) { 3186 if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) {
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 74e3a4ab8095..d1df192272fb 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -194,8 +194,6 @@ struct rcu_data {
194 194
195 /* 5) Callback offloading. */ 195 /* 5) Callback offloading. */
196#ifdef CONFIG_RCU_NOCB_CPU 196#ifdef CONFIG_RCU_NOCB_CPU
197 atomic_long_t nocb_q_count; /* # CBs waiting for nocb */
198 atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
199 struct rcu_head *nocb_cb_head; /* CBs ready to invoke. */ 197 struct rcu_head *nocb_cb_head; /* CBs ready to invoke. */
200 struct rcu_head **nocb_cb_tail; 198 struct rcu_head **nocb_cb_tail;
201 struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */ 199 struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
@@ -437,7 +435,6 @@ static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
437#ifdef CONFIG_RCU_NOCB_CPU 435#ifdef CONFIG_RCU_NOCB_CPU
438static void __init rcu_organize_nocb_kthreads(void); 436static void __init rcu_organize_nocb_kthreads(void);
439#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 437#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
440static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp);
441static void rcu_bind_gp_kthread(void); 438static void rcu_bind_gp_kthread(void);
442static bool rcu_nohz_full_cpu(void); 439static bool rcu_nohz_full_cpu(void);
443static void rcu_dynticks_task_enter(void); 440static void rcu_dynticks_task_enter(void);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 838e0caaf53a..458838c63a6c 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2040,15 +2040,6 @@ void rcu_bind_current_to_nocb(void)
2040} 2040}
2041EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb); 2041EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
2042 2042
2043/*
2044 * Return the number of RCU callbacks still queued from the specified
2045 * CPU, which must be a nocbs CPU.
2046 */
2047static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp)
2048{
2049 return atomic_long_read(&rdp->nocb_q_count);
2050}
2051
2052#else /* #ifdef CONFIG_RCU_NOCB_CPU */ 2043#else /* #ifdef CONFIG_RCU_NOCB_CPU */
2053 2044
2054/* No ->nocb_lock to acquire. */ 2045/* No ->nocb_lock to acquire. */
@@ -2108,11 +2099,6 @@ static void __init rcu_spawn_nocb_kthreads(void)
2108{ 2099{
2109} 2100}
2110 2101
2111static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp)
2112{
2113 return 0;
2114}
2115
2116#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 2102#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
2117 2103
2118/* 2104/*