diff options
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r-- | kernel/rcu/tree_plugin.h | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 08a765232432..506a7a97a2e2 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -204,6 +204,7 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
204 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); | 204 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); |
205 | rnp = rdp->mynode; | 205 | rnp = rdp->mynode; |
206 | raw_spin_lock_irqsave(&rnp->lock, flags); | 206 | raw_spin_lock_irqsave(&rnp->lock, flags); |
207 | smp_mb__after_unlock_lock(); | ||
207 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | 208 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
208 | t->rcu_blocked_node = rnp; | 209 | t->rcu_blocked_node = rnp; |
209 | 210 | ||
@@ -312,6 +313,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) | |||
312 | mask = rnp->grpmask; | 313 | mask = rnp->grpmask; |
313 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 314 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
314 | raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */ | 315 | raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */ |
316 | smp_mb__after_unlock_lock(); | ||
315 | rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); | 317 | rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); |
316 | } | 318 | } |
317 | 319 | ||
@@ -381,6 +383,7 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
381 | for (;;) { | 383 | for (;;) { |
382 | rnp = t->rcu_blocked_node; | 384 | rnp = t->rcu_blocked_node; |
383 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ | 385 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
386 | smp_mb__after_unlock_lock(); | ||
384 | if (rnp == t->rcu_blocked_node) | 387 | if (rnp == t->rcu_blocked_node) |
385 | break; | 388 | break; |
386 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 389 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
@@ -605,6 +608,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
605 | while (!list_empty(lp)) { | 608 | while (!list_empty(lp)) { |
606 | t = list_entry(lp->next, typeof(*t), rcu_node_entry); | 609 | t = list_entry(lp->next, typeof(*t), rcu_node_entry); |
607 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ | 610 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ |
611 | smp_mb__after_unlock_lock(); | ||
608 | list_del(&t->rcu_node_entry); | 612 | list_del(&t->rcu_node_entry); |
609 | t->rcu_blocked_node = rnp_root; | 613 | t->rcu_blocked_node = rnp_root; |
610 | list_add(&t->rcu_node_entry, lp_root); | 614 | list_add(&t->rcu_node_entry, lp_root); |
@@ -629,6 +633,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
629 | * in this case. | 633 | * in this case. |
630 | */ | 634 | */ |
631 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ | 635 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ |
636 | smp_mb__after_unlock_lock(); | ||
632 | if (rnp_root->boost_tasks != NULL && | 637 | if (rnp_root->boost_tasks != NULL && |
633 | rnp_root->boost_tasks != rnp_root->gp_tasks && | 638 | rnp_root->boost_tasks != rnp_root->gp_tasks && |
634 | rnp_root->boost_tasks != rnp_root->exp_tasks) | 639 | rnp_root->boost_tasks != rnp_root->exp_tasks) |
@@ -772,6 +777,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | |||
772 | unsigned long mask; | 777 | unsigned long mask; |
773 | 778 | ||
774 | raw_spin_lock_irqsave(&rnp->lock, flags); | 779 | raw_spin_lock_irqsave(&rnp->lock, flags); |
780 | smp_mb__after_unlock_lock(); | ||
775 | for (;;) { | 781 | for (;;) { |
776 | if (!sync_rcu_preempt_exp_done(rnp)) { | 782 | if (!sync_rcu_preempt_exp_done(rnp)) { |
777 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 783 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -787,6 +793,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | |||
787 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | 793 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
788 | rnp = rnp->parent; | 794 | rnp = rnp->parent; |
789 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ | 795 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
796 | smp_mb__after_unlock_lock(); | ||
790 | rnp->expmask &= ~mask; | 797 | rnp->expmask &= ~mask; |
791 | } | 798 | } |
792 | } | 799 | } |
@@ -806,6 +813,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | |||
806 | int must_wait = 0; | 813 | int must_wait = 0; |
807 | 814 | ||
808 | raw_spin_lock_irqsave(&rnp->lock, flags); | 815 | raw_spin_lock_irqsave(&rnp->lock, flags); |
816 | smp_mb__after_unlock_lock(); | ||
809 | if (list_empty(&rnp->blkd_tasks)) { | 817 | if (list_empty(&rnp->blkd_tasks)) { |
810 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 818 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
811 | } else { | 819 | } else { |
@@ -886,6 +894,7 @@ void synchronize_rcu_expedited(void) | |||
886 | /* Initialize ->expmask for all non-leaf rcu_node structures. */ | 894 | /* Initialize ->expmask for all non-leaf rcu_node structures. */ |
887 | rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { | 895 | rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { |
888 | raw_spin_lock_irqsave(&rnp->lock, flags); | 896 | raw_spin_lock_irqsave(&rnp->lock, flags); |
897 | smp_mb__after_unlock_lock(); | ||
889 | rnp->expmask = rnp->qsmaskinit; | 898 | rnp->expmask = rnp->qsmaskinit; |
890 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 899 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
891 | } | 900 | } |
@@ -1191,6 +1200,7 @@ static int rcu_boost(struct rcu_node *rnp) | |||
1191 | return 0; /* Nothing left to boost. */ | 1200 | return 0; /* Nothing left to boost. */ |
1192 | 1201 | ||
1193 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1202 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1203 | smp_mb__after_unlock_lock(); | ||
1194 | 1204 | ||
1195 | /* | 1205 | /* |
1196 | * Recheck under the lock: all tasks in need of boosting | 1206 | * Recheck under the lock: all tasks in need of boosting |
@@ -1377,6 +1387,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1377 | if (IS_ERR(t)) | 1387 | if (IS_ERR(t)) |
1378 | return PTR_ERR(t); | 1388 | return PTR_ERR(t); |
1379 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1389 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1390 | smp_mb__after_unlock_lock(); | ||
1380 | rnp->boost_kthread_task = t; | 1391 | rnp->boost_kthread_task = t; |
1381 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1392 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1382 | sp.sched_priority = RCU_BOOST_PRIO; | 1393 | sp.sched_priority = RCU_BOOST_PRIO; |
@@ -1769,6 +1780,7 @@ static void rcu_prepare_for_idle(int cpu) | |||
1769 | continue; | 1780 | continue; |
1770 | rnp = rdp->mynode; | 1781 | rnp = rdp->mynode; |
1771 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ | 1782 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
1783 | smp_mb__after_unlock_lock(); | ||
1772 | rcu_accelerate_cbs(rsp, rnp, rdp); | 1784 | rcu_accelerate_cbs(rsp, rnp, rdp); |
1773 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1785 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
1774 | } | 1786 | } |
@@ -2209,6 +2221,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) | |||
2209 | struct rcu_node *rnp = rdp->mynode; | 2221 | struct rcu_node *rnp = rdp->mynode; |
2210 | 2222 | ||
2211 | raw_spin_lock_irqsave(&rnp->lock, flags); | 2223 | raw_spin_lock_irqsave(&rnp->lock, flags); |
2224 | smp_mb__after_unlock_lock(); | ||
2212 | c = rcu_start_future_gp(rnp, rdp); | 2225 | c = rcu_start_future_gp(rnp, rdp); |
2213 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 2226 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
2214 | 2227 | ||