aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-10-31 14:22:37 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-01-06 14:02:41 -0500
commit8af3a5e78cfb63abe8813743946b7bd5a8a3134c (patch)
treee8648ab2a77ef746ce609ee943e2e97917954dce /kernel/rcu/tree.c
parent74e871ac6cb17f67cbefb569d98a8d05de666e07 (diff)
rcu: Abstract rcu_cleanup_dead_rnp() from rcu_cleanup_dead_cpu()
This commit abstracts rcu_cleanup_dead_rnp() from rcu_cleanup_dead_cpu() in preparation for the rework of RCU priority boosting. This new function will be invoked from rcu_read_unlock_special() in the reworked scheme, which is why rcu_cleanup_dead_rnp() assumes that the leaf rcu_node structure's ->qsmaskinit field has already been updated. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c67
1 files changed, 48 insertions, 19 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 4c106fcc0d54..75c6b3301abb 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2227,6 +2227,46 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2227} 2227}
2228 2228
2229/* 2229/*
2230 * All CPUs for the specified rcu_node structure have gone offline,
2231 * and all tasks that were preempted within an RCU read-side critical
2232 * section while running on one of those CPUs have since exited their RCU
2233 * read-side critical section. Some other CPU is reporting this fact with
2234 * the specified rcu_node structure's ->lock held and interrupts disabled.
2235 * This function therefore goes up the tree of rcu_node structures,
2236 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
2237 * the leaf rcu_node structure's ->qsmaskinit field has already been
2238 * updated
2239 *
2240 * This function does check that the specified rcu_node structure has
2241 * all CPUs offline and no blocked tasks, so it is OK to invoke it
2242 * prematurely. That said, invoking it after the fact will cost you
2243 * a needless lock acquisition. So once it has done its work, don't
2244 * invoke it again.
2245 */
2246static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2247{
2248 long mask;
2249 struct rcu_node *rnp = rnp_leaf;
2250
2251 if (rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
2252 return;
2253 for (;;) {
2254 mask = rnp->grpmask;
2255 rnp = rnp->parent;
2256 if (!rnp)
2257 break;
2258 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
2259 smp_mb__after_unlock_lock(); /* GP memory ordering. */
2260 rnp->qsmaskinit &= ~mask;
2261 if (rnp->qsmaskinit) {
2262 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2263 return;
2264 }
2265 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2266 }
2267}
2268
2269/*
2230 * The CPU has been completely removed, and some other CPU is reporting 2270 * The CPU has been completely removed, and some other CPU is reporting
2231 * this fact from process context. Do the remainder of the cleanup, 2271 * this fact from process context. Do the remainder of the cleanup,
2232 * including orphaning the outgoing CPU's RCU callbacks, and also 2272 * including orphaning the outgoing CPU's RCU callbacks, and also
@@ -2236,7 +2276,6 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2236static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) 2276static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2237{ 2277{
2238 unsigned long flags; 2278 unsigned long flags;
2239 unsigned long mask;
2240 int need_report = 0; 2279 int need_report = 0;
2241 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 2280 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2242 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 2281 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
@@ -2252,24 +2291,14 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2252 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); 2291 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
2253 rcu_adopt_orphan_cbs(rsp, flags); 2292 rcu_adopt_orphan_cbs(rsp, flags);
2254 2293
2255 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ 2294 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
2256 mask = rdp->grpmask; /* rnp->grplo is constant. */ 2295 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
2257 do { 2296 smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */
2258 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 2297 rnp->qsmaskinit &= ~rdp->grpmask;
2259 smp_mb__after_unlock_lock(); 2298 if (rnp->qsmaskinit == 0) {
2260 rnp->qsmaskinit &= ~mask; 2299 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
2261 if (rnp->qsmaskinit != 0) { 2300 rcu_cleanup_dead_rnp(rnp);
2262 if (rnp != rdp->mynode) 2301 }
2263 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2264 break;
2265 }
2266 if (rnp == rdp->mynode)
2267 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
2268 else
2269 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2270 mask = rnp->grpmask;
2271 rnp = rnp->parent;
2272 } while (rnp != NULL);
2273 2302
2274 /* 2303 /*
2275 * We still hold the leaf rcu_node structure lock here, and 2304 * We still hold the leaf rcu_node structure lock here, and