aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree.c13
-rw-r--r--kernel/rcutree_plugin.h20
2 files changed, 23 insertions, 10 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 6c99553e9f15..e8624ebf2320 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -628,8 +628,8 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
628 628
629 /* Special-case the common single-level case. */ 629 /* Special-case the common single-level case. */
630 if (NUM_RCU_NODES == 1) { 630 if (NUM_RCU_NODES == 1) {
631 rnp->qsmask = rnp->qsmaskinit;
632 rcu_preempt_check_blocked_tasks(rnp); 631 rcu_preempt_check_blocked_tasks(rnp);
632 rnp->qsmask = rnp->qsmaskinit;
633 rnp->gpnum = rsp->gpnum; 633 rnp->gpnum = rsp->gpnum;
634 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ 634 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
635 spin_unlock_irqrestore(&rnp->lock, flags); 635 spin_unlock_irqrestore(&rnp->lock, flags);
@@ -662,8 +662,8 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
662 rnp_end = &rsp->node[NUM_RCU_NODES]; 662 rnp_end = &rsp->node[NUM_RCU_NODES];
663 for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) { 663 for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) {
664 spin_lock(&rnp_cur->lock); /* irqs already disabled. */ 664 spin_lock(&rnp_cur->lock); /* irqs already disabled. */
665 rnp_cur->qsmask = rnp_cur->qsmaskinit;
666 rcu_preempt_check_blocked_tasks(rnp); 665 rcu_preempt_check_blocked_tasks(rnp);
666 rnp_cur->qsmask = rnp_cur->qsmaskinit;
667 rnp->gpnum = rsp->gpnum; 667 rnp->gpnum = rsp->gpnum;
668 spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ 668 spin_unlock(&rnp_cur->lock); /* irqs already disabled. */
669 } 669 }
@@ -708,6 +708,7 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
708static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) 708static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
709 __releases(rnp->lock) 709 __releases(rnp->lock)
710{ 710{
711 WARN_ON_ONCE(rsp->completed == rsp->gpnum);
711 rsp->completed = rsp->gpnum; 712 rsp->completed = rsp->gpnum;
712 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); 713 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
713 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 714 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
@@ -725,6 +726,8 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
725 unsigned long flags) 726 unsigned long flags)
726 __releases(rnp->lock) 727 __releases(rnp->lock)
727{ 728{
729 struct rcu_node *rnp_c;
730
728 /* Walk up the rcu_node hierarchy. */ 731 /* Walk up the rcu_node hierarchy. */
729 for (;;) { 732 for (;;) {
730 if (!(rnp->qsmask & mask)) { 733 if (!(rnp->qsmask & mask)) {
@@ -748,8 +751,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
748 break; 751 break;
749 } 752 }
750 spin_unlock_irqrestore(&rnp->lock, flags); 753 spin_unlock_irqrestore(&rnp->lock, flags);
754 rnp_c = rnp;
751 rnp = rnp->parent; 755 rnp = rnp->parent;
752 spin_lock_irqsave(&rnp->lock, flags); 756 spin_lock_irqsave(&rnp->lock, flags);
757 WARN_ON_ONCE(rnp_c->qsmask);
753 } 758 }
754 759
755 /* 760 /*
@@ -858,7 +863,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
858 spin_lock_irqsave(&rsp->onofflock, flags); 863 spin_lock_irqsave(&rsp->onofflock, flags);
859 864
860 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ 865 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
861 rnp = rdp->mynode; 866 rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */
862 mask = rdp->grpmask; /* rnp->grplo is constant. */ 867 mask = rdp->grpmask; /* rnp->grplo is constant. */
863 do { 868 do {
864 spin_lock(&rnp->lock); /* irqs already disabled. */ 869 spin_lock(&rnp->lock); /* irqs already disabled. */
@@ -867,7 +872,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
867 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 872 spin_unlock(&rnp->lock); /* irqs remain disabled. */
868 break; 873 break;
869 } 874 }
870 rcu_preempt_offline_tasks(rsp, rnp); 875 rcu_preempt_offline_tasks(rsp, rnp, rdp);
871 mask = rnp->grpmask; 876 mask = rnp->grpmask;
872 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 877 spin_unlock(&rnp->lock); /* irqs remain disabled. */
873 rnp = rnp->parent; 878 rnp = rnp->parent;
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index c9616e48379b..5f94619450af 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -206,7 +206,8 @@ static void rcu_read_unlock_special(struct task_struct *t)
206 */ 206 */
207 if (!empty && rnp->qsmask == 0 && 207 if (!empty && rnp->qsmask == 0 &&
208 list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) { 208 list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) {
209 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; 209 struct rcu_node *rnp_p;
210
210 if (rnp->parent == NULL) { 211 if (rnp->parent == NULL) {
211 /* Only one rcu_node in the tree. */ 212 /* Only one rcu_node in the tree. */
212 cpu_quiet_msk_finish(&rcu_preempt_state, flags); 213 cpu_quiet_msk_finish(&rcu_preempt_state, flags);
@@ -215,9 +216,10 @@ static void rcu_read_unlock_special(struct task_struct *t)
215 /* Report up the rest of the hierarchy. */ 216 /* Report up the rest of the hierarchy. */
216 mask = rnp->grpmask; 217 mask = rnp->grpmask;
217 spin_unlock_irqrestore(&rnp->lock, flags); 218 spin_unlock_irqrestore(&rnp->lock, flags);
218 rnp = rnp->parent; 219 rnp_p = rnp->parent;
219 spin_lock_irqsave(&rnp->lock, flags); 220 spin_lock_irqsave(&rnp_p->lock, flags);
220 cpu_quiet_msk(mask, &rcu_preempt_state, rnp, flags); 221 WARN_ON_ONCE(rnp->qsmask);
222 cpu_quiet_msk(mask, &rcu_preempt_state, rnp_p, flags);
221 return; 223 return;
222 } 224 }
223 spin_unlock(&rnp->lock); 225 spin_unlock(&rnp->lock);
@@ -278,6 +280,7 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
278static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 280static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
279{ 281{
280 WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])); 282 WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]));
283 WARN_ON_ONCE(rnp->qsmask);
281} 284}
282 285
283/* 286/*
@@ -302,7 +305,8 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
302 * The caller must hold rnp->lock with irqs disabled. 305 * The caller must hold rnp->lock with irqs disabled.
303 */ 306 */
304static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 307static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
305 struct rcu_node *rnp) 308 struct rcu_node *rnp,
309 struct rcu_data *rdp)
306{ 310{
307 int i; 311 int i;
308 struct list_head *lp; 312 struct list_head *lp;
@@ -314,6 +318,9 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
314 WARN_ONCE(1, "Last CPU thought to be offlined?"); 318 WARN_ONCE(1, "Last CPU thought to be offlined?");
315 return; /* Shouldn't happen: at least one CPU online. */ 319 return; /* Shouldn't happen: at least one CPU online. */
316 } 320 }
321 WARN_ON_ONCE(rnp != rdp->mynode &&
322 (!list_empty(&rnp->blocked_tasks[0]) ||
323 !list_empty(&rnp->blocked_tasks[1])));
317 324
318 /* 325 /*
319 * Move tasks up to root rcu_node. Rely on the fact that the 326 * Move tasks up to root rcu_node. Rely on the fact that the
@@ -489,7 +496,8 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
489 * tasks that were blocked within RCU read-side critical sections. 496 * tasks that were blocked within RCU read-side critical sections.
490 */ 497 */
491static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 498static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
492 struct rcu_node *rnp) 499 struct rcu_node *rnp,
500 struct rcu_data *rdp)
493{ 501{
494} 502}
495 503