diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-02-23 11:59:29 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-03-12 18:19:36 -0400 |
commit | cc99a310caf811aebbd0986f433d824e4a5e7ce5 (patch) | |
tree | 2ab92ffc88d89991bd6b18216cdf60830445a777 | |
parent | 8eb74b2b291e7bf6aa59fcb4e59f236382f00bf5 (diff) |
rcu: Move rcu_report_unblock_qs_rnp() to common code
The rcu_report_unblock_qs_rnp() function is invoked when the
last task blocking the current grace period exits its outermost
RCU read-side critical section. Previously, this was called only
from rcu_read_unlock_special(), and was therefore defined only when
CONFIG_RCU_PREEMPT=y. However, this function will be invoked even when
CONFIG_RCU_PREEMPT=n once CPU-hotplug operations are processed only at
the beginnings of RCU grace periods. The reason for this change is that
the last task on a given leaf rcu_node structure's ->blkd_tasks list
might well exit its RCU read-side critical section between the time that
recent CPU-hotplug operations were applied and when the new grace period
was initialized. This situation could result in RCU waiting forever on
that leaf rcu_node structure, because if all that structure's CPUs were
already offline, there would be no quiescent-state events to drive that
structure's part of the grace period.
This commit therefore moves rcu_report_unblock_qs_rnp() to common code
that is built unconditionally so that the quiescent-state-forcing code
can clean up after this situation, avoiding the grace-period stall.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r-- | kernel/rcu/tree.c | 39 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 40 |
2 files changed, 41 insertions, 38 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a7151d26b940..5b5cb1ff73ed 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -2127,6 +2127,45 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, | |||
2127 | } | 2127 | } |
2128 | 2128 | ||
2129 | /* | 2129 | /* |
2130 | * Record a quiescent state for all tasks that were previously queued | ||
2131 | * on the specified rcu_node structure and that were blocking the current | ||
2132 | * RCU grace period. The caller must hold the specified rnp->lock with | ||
2133 | * irqs disabled, and this lock is released upon return, but irqs remain | ||
2134 | * disabled. | ||
2135 | */ | ||
2136 | static void __maybe_unused rcu_report_unblock_qs_rnp(struct rcu_state *rsp, | ||
2137 | struct rcu_node *rnp, unsigned long flags) | ||
2138 | __releases(rnp->lock) | ||
2139 | { | ||
2140 | unsigned long mask; | ||
2141 | struct rcu_node *rnp_p; | ||
2142 | |||
2143 | WARN_ON_ONCE(rsp == &rcu_bh_state || rsp == &rcu_sched_state); | ||
2144 | if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { | ||
2145 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
2146 | return; /* Still need more quiescent states! */ | ||
2147 | } | ||
2148 | |||
2149 | rnp_p = rnp->parent; | ||
2150 | if (rnp_p == NULL) { | ||
2151 | /* | ||
2152 | * Either there is only one rcu_node in the tree, | ||
2153 | * or tasks were kicked up to root rcu_node due to | ||
2154 | * CPUs going offline. | ||
2155 | */ | ||
2156 | rcu_report_qs_rsp(rsp, flags); | ||
2157 | return; | ||
2158 | } | ||
2159 | |||
2160 | /* Report up the rest of the hierarchy. */ | ||
2161 | mask = rnp->grpmask; | ||
2162 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
2163 | raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */ | ||
2164 | smp_mb__after_unlock_lock(); | ||
2165 | rcu_report_qs_rnp(mask, rsp, rnp_p, flags); | ||
2166 | } | ||
2167 | |||
2168 | /* | ||
2130 | * Record a quiescent state for the specified CPU to that CPU's rcu_data | 2169 | * Record a quiescent state for the specified CPU to that CPU's rcu_data |
2131 | * structure. This must be either called from the specified CPU, or | 2170 | * structure. This must be either called from the specified CPU, or |
2132 | * called when the specified CPU is known to be offline (and when it is | 2171 | * called when the specified CPU is known to be offline (and when it is |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index a22721547442..ec6c2efb28cd 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -233,43 +233,6 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) | |||
233 | } | 233 | } |
234 | 234 | ||
235 | /* | 235 | /* |
236 | * Record a quiescent state for all tasks that were previously queued | ||
237 | * on the specified rcu_node structure and that were blocking the current | ||
238 | * RCU grace period. The caller must hold the specified rnp->lock with | ||
239 | * irqs disabled, and this lock is released upon return, but irqs remain | ||
240 | * disabled. | ||
241 | */ | ||
242 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) | ||
243 | __releases(rnp->lock) | ||
244 | { | ||
245 | unsigned long mask; | ||
246 | struct rcu_node *rnp_p; | ||
247 | |||
248 | if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { | ||
249 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
250 | return; /* Still need more quiescent states! */ | ||
251 | } | ||
252 | |||
253 | rnp_p = rnp->parent; | ||
254 | if (rnp_p == NULL) { | ||
255 | /* | ||
256 | * Either there is only one rcu_node in the tree, | ||
257 | * or tasks were kicked up to root rcu_node due to | ||
258 | * CPUs going offline. | ||
259 | */ | ||
260 | rcu_report_qs_rsp(&rcu_preempt_state, flags); | ||
261 | return; | ||
262 | } | ||
263 | |||
264 | /* Report up the rest of the hierarchy. */ | ||
265 | mask = rnp->grpmask; | ||
266 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
267 | raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */ | ||
268 | smp_mb__after_unlock_lock(); | ||
269 | rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * Advance a ->blkd_tasks-list pointer to the next entry, instead | 236 | * Advance a ->blkd_tasks-list pointer to the next entry, instead |
274 | * returning NULL if at the end of the list. | 237 | * returning NULL if at the end of the list. |
275 | */ | 238 | */ |
@@ -399,7 +362,8 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
399 | rnp->grplo, | 362 | rnp->grplo, |
400 | rnp->grphi, | 363 | rnp->grphi, |
401 | !!rnp->gp_tasks); | 364 | !!rnp->gp_tasks); |
402 | rcu_report_unblock_qs_rnp(rnp, flags); | 365 | rcu_report_unblock_qs_rnp(&rcu_preempt_state, |
366 | rnp, flags); | ||
403 | } else { | 367 | } else { |
404 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 368 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
405 | } | 369 | } |