aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/rcu/tree.c2
-rw-r--r--kernel/rcu/tree_plugin.h11
2 files changed, 11 insertions, 2 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 2b37f1a8e235..ac2617d857a3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2410,6 +2410,8 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2410 return; 2410 return;
2411 } 2411 }
2412 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ 2412 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2413 WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1 &&
2414 rcu_preempt_blocked_readers_cgp(rnp));
2413 rnp->qsmask &= ~mask; 2415 rnp->qsmask &= ~mask;
2414 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum, 2416 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
2415 mask, rnp->qsmask, rnp->level, 2417 mask, rnp->qsmask, rnp->level,
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 3e3f92e981a1..eadf8b95b5e9 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -180,6 +180,8 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
180 struct task_struct *t = current; 180 struct task_struct *t = current;
181 181
182 lockdep_assert_held(&rnp->lock); 182 lockdep_assert_held(&rnp->lock);
183 WARN_ON_ONCE(rdp->mynode != rnp);
184 WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1);
183 185
184 /* 186 /*
185 * Decide where to queue the newly blocked task. In theory, 187 * Decide where to queue the newly blocked task. In theory,
@@ -261,6 +263,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
261 rnp->gp_tasks = &t->rcu_node_entry; 263 rnp->gp_tasks = &t->rcu_node_entry;
262 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) 264 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
263 rnp->exp_tasks = &t->rcu_node_entry; 265 rnp->exp_tasks = &t->rcu_node_entry;
266 WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
267 !(rnp->qsmask & rdp->grpmask));
268 WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
269 !(rnp->expmask & rdp->grpmask));
264 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */ 270 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
265 271
266 /* 272 /*
@@ -482,6 +488,7 @@ void rcu_read_unlock_special(struct task_struct *t)
482 rnp = t->rcu_blocked_node; 488 rnp = t->rcu_blocked_node;
483 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 489 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
484 WARN_ON_ONCE(rnp != t->rcu_blocked_node); 490 WARN_ON_ONCE(rnp != t->rcu_blocked_node);
491 WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1);
485 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); 492 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
486 empty_exp = sync_rcu_preempt_exp_done(rnp); 493 empty_exp = sync_rcu_preempt_exp_done(rnp);
487 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 494 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
@@ -495,10 +502,10 @@ void rcu_read_unlock_special(struct task_struct *t)
495 if (&t->rcu_node_entry == rnp->exp_tasks) 502 if (&t->rcu_node_entry == rnp->exp_tasks)
496 rnp->exp_tasks = np; 503 rnp->exp_tasks = np;
497 if (IS_ENABLED(CONFIG_RCU_BOOST)) { 504 if (IS_ENABLED(CONFIG_RCU_BOOST)) {
498 if (&t->rcu_node_entry == rnp->boost_tasks)
499 rnp->boost_tasks = np;
500 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */ 505 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */
501 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; 506 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
507 if (&t->rcu_node_entry == rnp->boost_tasks)
508 rnp->boost_tasks = np;
502 } 509 }
503 510
504 /* 511 /*