diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2009-09-18 12:50:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-19 02:53:19 -0400 |
commit | 28ecd58020409be8eb176c716f957fc3386fa2fa (patch) | |
tree | 820dd14bbfc5b69384c3b88611c6173c06ba5e08 /kernel/rcutree_plugin.h | |
parent | 16e3081191837a6a04733de5cd5d1d1b303140d4 (diff) |
rcu: Add WARN_ON_ONCE() consistency checks covering state transitions
o Verify that qsmask bits stay clear through GP
initialization.
o Verify that cpu_quiet_msk_finish() is never invoked unless
there actually is an RCU grace period in progress.
o Verify that all internal-node rcu_node structures have empty
blocked_tasks[] lists.
o Verify that child rcu_node structure's bits remain clear after
acquiring parent's lock.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
LKML-Reference: <12532926191947-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 20 |
1 files changed, 14 insertions, 6 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index c9616e48379b..5f94619450af 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -206,7 +206,8 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
206 | */ | 206 | */ |
207 | if (!empty && rnp->qsmask == 0 && | 207 | if (!empty && rnp->qsmask == 0 && |
208 | list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) { | 208 | list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) { |
209 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | 209 | struct rcu_node *rnp_p; |
210 | |||
210 | if (rnp->parent == NULL) { | 211 | if (rnp->parent == NULL) { |
211 | /* Only one rcu_node in the tree. */ | 212 | /* Only one rcu_node in the tree. */ |
212 | cpu_quiet_msk_finish(&rcu_preempt_state, flags); | 213 | cpu_quiet_msk_finish(&rcu_preempt_state, flags); |
@@ -215,9 +216,10 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
215 | /* Report up the rest of the hierarchy. */ | 216 | /* Report up the rest of the hierarchy. */ |
216 | mask = rnp->grpmask; | 217 | mask = rnp->grpmask; |
217 | spin_unlock_irqrestore(&rnp->lock, flags); | 218 | spin_unlock_irqrestore(&rnp->lock, flags); |
218 | rnp = rnp->parent; | 219 | rnp_p = rnp->parent; |
219 | spin_lock_irqsave(&rnp->lock, flags); | 220 | spin_lock_irqsave(&rnp_p->lock, flags); |
220 | cpu_quiet_msk(mask, &rcu_preempt_state, rnp, flags); | 221 | WARN_ON_ONCE(rnp->qsmask); |
222 | cpu_quiet_msk(mask, &rcu_preempt_state, rnp_p, flags); | ||
221 | return; | 223 | return; |
222 | } | 224 | } |
223 | spin_unlock(&rnp->lock); | 225 | spin_unlock(&rnp->lock); |
@@ -278,6 +280,7 @@ static void rcu_print_task_stall(struct rcu_node *rnp) | |||
278 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | 280 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) |
279 | { | 281 | { |
280 | WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])); | 282 | WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])); |
283 | WARN_ON_ONCE(rnp->qsmask); | ||
281 | } | 284 | } |
282 | 285 | ||
283 | /* | 286 | /* |
@@ -302,7 +305,8 @@ static int rcu_preempted_readers(struct rcu_node *rnp) | |||
302 | * The caller must hold rnp->lock with irqs disabled. | 305 | * The caller must hold rnp->lock with irqs disabled. |
303 | */ | 306 | */ |
304 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 307 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, |
305 | struct rcu_node *rnp) | 308 | struct rcu_node *rnp, |
309 | struct rcu_data *rdp) | ||
306 | { | 310 | { |
307 | int i; | 311 | int i; |
308 | struct list_head *lp; | 312 | struct list_head *lp; |
@@ -314,6 +318,9 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
314 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | 318 | WARN_ONCE(1, "Last CPU thought to be offlined?"); |
315 | return; /* Shouldn't happen: at least one CPU online. */ | 319 | return; /* Shouldn't happen: at least one CPU online. */ |
316 | } | 320 | } |
321 | WARN_ON_ONCE(rnp != rdp->mynode && | ||
322 | (!list_empty(&rnp->blocked_tasks[0]) || | ||
323 | !list_empty(&rnp->blocked_tasks[1]))); | ||
317 | 324 | ||
318 | /* | 325 | /* |
319 | * Move tasks up to root rcu_node. Rely on the fact that the | 326 | * Move tasks up to root rcu_node. Rely on the fact that the |
@@ -489,7 +496,8 @@ static int rcu_preempted_readers(struct rcu_node *rnp) | |||
489 | * tasks that were blocked within RCU read-side critical sections. | 496 | * tasks that were blocked within RCU read-side critical sections. |
490 | */ | 497 | */ |
491 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 498 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, |
492 | struct rcu_node *rnp) | 499 | struct rcu_node *rnp, |
500 | struct rcu_data *rdp) | ||
493 | { | 501 | { |
494 | } | 502 | } |
495 | 503 | ||