aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-02-23 11:59:29 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-03-12 18:19:36 -0400
commitcc99a310caf811aebbd0986f433d824e4a5e7ce5 (patch)
tree2ab92ffc88d89991bd6b18216cdf60830445a777 /kernel/rcu/tree.c
parent8eb74b2b291e7bf6aa59fcb4e59f236382f00bf5 (diff)
rcu: Move rcu_report_unblock_qs_rnp() to common code
The rcu_report_unblock_qs_rnp() function is invoked when the last task blocking the current grace period exits its outermost RCU read-side critical section. Previously, this was called only from rcu_read_unlock_special(), and was therefore defined only when CONFIG_RCU_PREEMPT=y. However, this function will be invoked even when CONFIG_RCU_PREEMPT=n once CPU-hotplug operations are processed only at the beginnings of RCU grace periods. The reason for this change is that the last task on a given leaf rcu_node structure's ->blkd_tasks list might well exit its RCU read-side critical section between the time that recent CPU-hotplug operations were applied and when the new grace period was initialized. This situation could result in RCU waiting forever on that leaf rcu_node structure, because if all that structure's CPUs were already offline, there would be no quiescent-state events to drive that structure's part of the grace period. This commit therefore moves rcu_report_unblock_qs_rnp() to common code that is built unconditionally so that the quiescent-state-forcing code can clean up after this situation, avoiding the grace-period stall. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c39
1 files changed, 39 insertions, 0 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index a7151d26b940..5b5cb1ff73ed 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2127,6 +2127,45 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2127} 2127}
2128 2128
2129/* 2129/*
2130 * Record a quiescent state for all tasks that were previously queued
2131 * on the specified rcu_node structure and that were blocking the current
2132 * RCU grace period. The caller must hold the specified rnp->lock with
2133 * irqs disabled, and this lock is released upon return, but irqs remain
2134 * disabled.
2135 */
2136static void __maybe_unused rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2137 struct rcu_node *rnp, unsigned long flags)
2138 __releases(rnp->lock)
2139{
2140 unsigned long mask;
2141 struct rcu_node *rnp_p;
2142
2143 WARN_ON_ONCE(rsp == &rcu_bh_state || rsp == &rcu_sched_state);
2144 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2145 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2146 return; /* Still need more quiescent states! */
2147 }
2148
2149 rnp_p = rnp->parent;
2150 if (rnp_p == NULL) {
2151 /*
2152 * Either there is only one rcu_node in the tree,
2153 * or tasks were kicked up to root rcu_node due to
2154 * CPUs going offline.
2155 */
2156 rcu_report_qs_rsp(rsp, flags);
2157 return;
2158 }
2159
2160 /* Report up the rest of the hierarchy. */
2161 mask = rnp->grpmask;
2162 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2163 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
2164 smp_mb__after_unlock_lock();
2165 rcu_report_qs_rnp(mask, rsp, rnp_p, flags);
2166}
2167
2168/*
2130 * Record a quiescent state for the specified CPU to that CPU's rcu_data 2169 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2131 * structure. This must be either called from the specified CPU, or 2170 * structure. This must be either called from the specified CPU, or
2132 * called when the specified CPU is known to be offline (and when it is 2171 * called when the specified CPU is known to be offline (and when it is