aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c78
1 files changed, 26 insertions, 52 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f07343b54fe5..daf17e248757 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1534,10 +1534,8 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1534 * hold it, acquire the root rcu_node structure's lock in order to 1534 * hold it, acquire the root rcu_node structure's lock in order to
1535 * start one (if needed). 1535 * start one (if needed).
1536 */ 1536 */
1537 if (rnp != rnp_root) { 1537 if (rnp != rnp_root)
1538 raw_spin_lock(&rnp_root->lock); 1538 raw_spin_lock_rcu_node(rnp_root);
1539 smp_mb__after_unlock_lock();
1540 }
1541 1539
1542 /* 1540 /*
1543 * Get a new grace-period number. If there really is no grace 1541 * Get a new grace-period number. If there really is no grace
@@ -1786,11 +1784,10 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1786 if ((rdp->gpnum == READ_ONCE(rnp->gpnum) && 1784 if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
1787 rdp->completed == READ_ONCE(rnp->completed) && 1785 rdp->completed == READ_ONCE(rnp->completed) &&
1788 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ 1786 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1789 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ 1787 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1790 local_irq_restore(flags); 1788 local_irq_restore(flags);
1791 return; 1789 return;
1792 } 1790 }
1793 smp_mb__after_unlock_lock();
1794 needwake = __note_gp_changes(rsp, rnp, rdp); 1791 needwake = __note_gp_changes(rsp, rnp, rdp);
1795 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1792 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1796 if (needwake) 1793 if (needwake)
@@ -1814,8 +1811,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1814 struct rcu_node *rnp = rcu_get_root(rsp); 1811 struct rcu_node *rnp = rcu_get_root(rsp);
1815 1812
1816 WRITE_ONCE(rsp->gp_activity, jiffies); 1813 WRITE_ONCE(rsp->gp_activity, jiffies);
1817 raw_spin_lock_irq(&rnp->lock); 1814 raw_spin_lock_irq_rcu_node(rnp);
1818 smp_mb__after_unlock_lock();
1819 if (!READ_ONCE(rsp->gp_flags)) { 1815 if (!READ_ONCE(rsp->gp_flags)) {
1820 /* Spurious wakeup, tell caller to go back to sleep. */ 1816 /* Spurious wakeup, tell caller to go back to sleep. */
1821 raw_spin_unlock_irq(&rnp->lock); 1817 raw_spin_unlock_irq(&rnp->lock);
@@ -1847,8 +1843,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1847 */ 1843 */
1848 rcu_for_each_leaf_node(rsp, rnp) { 1844 rcu_for_each_leaf_node(rsp, rnp) {
1849 rcu_gp_slow(rsp, gp_preinit_delay); 1845 rcu_gp_slow(rsp, gp_preinit_delay);
1850 raw_spin_lock_irq(&rnp->lock); 1846 raw_spin_lock_irq_rcu_node(rnp);
1851 smp_mb__after_unlock_lock();
1852 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1847 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1853 !rnp->wait_blkd_tasks) { 1848 !rnp->wait_blkd_tasks) {
1854 /* Nothing to do on this leaf rcu_node structure. */ 1849 /* Nothing to do on this leaf rcu_node structure. */
@@ -1904,8 +1899,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1904 */ 1899 */
1905 rcu_for_each_node_breadth_first(rsp, rnp) { 1900 rcu_for_each_node_breadth_first(rsp, rnp) {
1906 rcu_gp_slow(rsp, gp_init_delay); 1901 rcu_gp_slow(rsp, gp_init_delay);
1907 raw_spin_lock_irq(&rnp->lock); 1902 raw_spin_lock_irq_rcu_node(rnp);
1908 smp_mb__after_unlock_lock();
1909 rdp = this_cpu_ptr(rsp->rda); 1903 rdp = this_cpu_ptr(rsp->rda);
1910 rcu_preempt_check_blocked_tasks(rnp); 1904 rcu_preempt_check_blocked_tasks(rnp);
1911 rnp->qsmask = rnp->qsmaskinit; 1905 rnp->qsmask = rnp->qsmaskinit;
@@ -1973,8 +1967,7 @@ static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
1973 } 1967 }
1974 /* Clear flag to prevent immediate re-entry. */ 1968 /* Clear flag to prevent immediate re-entry. */
1975 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 1969 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
1976 raw_spin_lock_irq(&rnp->lock); 1970 raw_spin_lock_irq_rcu_node(rnp);
1977 smp_mb__after_unlock_lock();
1978 WRITE_ONCE(rsp->gp_flags, 1971 WRITE_ONCE(rsp->gp_flags,
1979 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS); 1972 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
1980 raw_spin_unlock_irq(&rnp->lock); 1973 raw_spin_unlock_irq(&rnp->lock);
@@ -1993,8 +1986,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1993 struct rcu_node *rnp = rcu_get_root(rsp); 1986 struct rcu_node *rnp = rcu_get_root(rsp);
1994 1987
1995 WRITE_ONCE(rsp->gp_activity, jiffies); 1988 WRITE_ONCE(rsp->gp_activity, jiffies);
1996 raw_spin_lock_irq(&rnp->lock); 1989 raw_spin_lock_irq_rcu_node(rnp);
1997 smp_mb__after_unlock_lock();
1998 gp_duration = jiffies - rsp->gp_start; 1990 gp_duration = jiffies - rsp->gp_start;
1999 if (gp_duration > rsp->gp_max) 1991 if (gp_duration > rsp->gp_max)
2000 rsp->gp_max = gp_duration; 1992 rsp->gp_max = gp_duration;
@@ -2019,8 +2011,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2019 * grace period is recorded in any of the rcu_node structures. 2011 * grace period is recorded in any of the rcu_node structures.
2020 */ 2012 */
2021 rcu_for_each_node_breadth_first(rsp, rnp) { 2013 rcu_for_each_node_breadth_first(rsp, rnp) {
2022 raw_spin_lock_irq(&rnp->lock); 2014 raw_spin_lock_irq_rcu_node(rnp);
2023 smp_mb__after_unlock_lock();
2024 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); 2015 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
2025 WARN_ON_ONCE(rnp->qsmask); 2016 WARN_ON_ONCE(rnp->qsmask);
2026 WRITE_ONCE(rnp->completed, rsp->gpnum); 2017 WRITE_ONCE(rnp->completed, rsp->gpnum);
@@ -2035,8 +2026,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2035 rcu_gp_slow(rsp, gp_cleanup_delay); 2026 rcu_gp_slow(rsp, gp_cleanup_delay);
2036 } 2027 }
2037 rnp = rcu_get_root(rsp); 2028 rnp = rcu_get_root(rsp);
2038 raw_spin_lock_irq(&rnp->lock); 2029 raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */
2039 smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */
2040 rcu_nocb_gp_set(rnp, nocb); 2030 rcu_nocb_gp_set(rnp, nocb);
2041 2031
2042 /* Declare grace period done. */ 2032 /* Declare grace period done. */
@@ -2284,8 +2274,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2284 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2274 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2285 rnp_c = rnp; 2275 rnp_c = rnp;
2286 rnp = rnp->parent; 2276 rnp = rnp->parent;
2287 raw_spin_lock_irqsave(&rnp->lock, flags); 2277 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2288 smp_mb__after_unlock_lock();
2289 oldmask = rnp_c->qsmask; 2278 oldmask = rnp_c->qsmask;
2290 } 2279 }
2291 2280
@@ -2332,8 +2321,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2332 gps = rnp->gpnum; 2321 gps = rnp->gpnum;
2333 mask = rnp->grpmask; 2322 mask = rnp->grpmask;
2334 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 2323 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2335 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */ 2324 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
2336 smp_mb__after_unlock_lock();
2337 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags); 2325 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
2338} 2326}
2339 2327
@@ -2355,8 +2343,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2355 struct rcu_node *rnp; 2343 struct rcu_node *rnp;
2356 2344
2357 rnp = rdp->mynode; 2345 rnp = rdp->mynode;
2358 raw_spin_lock_irqsave(&rnp->lock, flags); 2346 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2359 smp_mb__after_unlock_lock();
2360 if ((rdp->cpu_no_qs.b.norm && 2347 if ((rdp->cpu_no_qs.b.norm &&
2361 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) || 2348 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
2362 rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum || 2349 rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
@@ -2582,8 +2569,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2582 rnp = rnp->parent; 2569 rnp = rnp->parent;
2583 if (!rnp) 2570 if (!rnp)
2584 break; 2571 break;
2585 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 2572 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2586 smp_mb__after_unlock_lock(); /* GP memory ordering. */
2587 rnp->qsmaskinit &= ~mask; 2573 rnp->qsmaskinit &= ~mask;
2588 rnp->qsmask &= ~mask; 2574 rnp->qsmask &= ~mask;
2589 if (rnp->qsmaskinit) { 2575 if (rnp->qsmaskinit) {
@@ -2611,8 +2597,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
2611 2597
2612 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 2598 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
2613 mask = rdp->grpmask; 2599 mask = rdp->grpmask;
2614 raw_spin_lock_irqsave(&rnp->lock, flags); 2600 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
2615 smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */
2616 rnp->qsmaskinitnext &= ~mask; 2601 rnp->qsmaskinitnext &= ~mask;
2617 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2602 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2618} 2603}
@@ -2809,8 +2794,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
2809 rcu_for_each_leaf_node(rsp, rnp) { 2794 rcu_for_each_leaf_node(rsp, rnp) {
2810 cond_resched_rcu_qs(); 2795 cond_resched_rcu_qs();
2811 mask = 0; 2796 mask = 0;
2812 raw_spin_lock_irqsave(&rnp->lock, flags); 2797 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2813 smp_mb__after_unlock_lock();
2814 if (rnp->qsmask == 0) { 2798 if (rnp->qsmask == 0) {
2815 if (rcu_state_p == &rcu_sched_state || 2799 if (rcu_state_p == &rcu_sched_state ||
2816 rsp != rcu_state_p || 2800 rsp != rcu_state_p ||
@@ -2881,8 +2865,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
2881 /* rnp_old == rcu_get_root(rsp), rnp == NULL. */ 2865 /* rnp_old == rcu_get_root(rsp), rnp == NULL. */
2882 2866
2883 /* Reached the root of the rcu_node tree, acquire lock. */ 2867 /* Reached the root of the rcu_node tree, acquire lock. */
2884 raw_spin_lock_irqsave(&rnp_old->lock, flags); 2868 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2885 smp_mb__after_unlock_lock();
2886 raw_spin_unlock(&rnp_old->fqslock); 2869 raw_spin_unlock(&rnp_old->fqslock);
2887 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 2870 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2888 rsp->n_force_qs_lh++; 2871 rsp->n_force_qs_lh++;
@@ -3005,8 +2988,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
3005 if (!rcu_gp_in_progress(rsp)) { 2988 if (!rcu_gp_in_progress(rsp)) {
3006 struct rcu_node *rnp_root = rcu_get_root(rsp); 2989 struct rcu_node *rnp_root = rcu_get_root(rsp);
3007 2990
3008 raw_spin_lock(&rnp_root->lock); 2991 raw_spin_lock_rcu_node(rnp_root);
3009 smp_mb__after_unlock_lock();
3010 needwake = rcu_start_gp(rsp); 2992 needwake = rcu_start_gp(rsp);
3011 raw_spin_unlock(&rnp_root->lock); 2993 raw_spin_unlock(&rnp_root->lock);
3012 if (needwake) 2994 if (needwake)
@@ -3426,8 +3408,7 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
3426 * CPUs for the current rcu_node structure up the rcu_node tree. 3408 * CPUs for the current rcu_node structure up the rcu_node tree.
3427 */ 3409 */
3428 rcu_for_each_leaf_node(rsp, rnp) { 3410 rcu_for_each_leaf_node(rsp, rnp) {
3429 raw_spin_lock_irqsave(&rnp->lock, flags); 3411 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3430 smp_mb__after_unlock_lock();
3431 if (rnp->expmaskinit == rnp->expmaskinitnext) { 3412 if (rnp->expmaskinit == rnp->expmaskinitnext) {
3432 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3413 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3433 continue; /* No new CPUs, nothing to do. */ 3414 continue; /* No new CPUs, nothing to do. */
@@ -3447,8 +3428,7 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
3447 rnp_up = rnp->parent; 3428 rnp_up = rnp->parent;
3448 done = false; 3429 done = false;
3449 while (rnp_up) { 3430 while (rnp_up) {
3450 raw_spin_lock_irqsave(&rnp_up->lock, flags); 3431 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
3451 smp_mb__after_unlock_lock();
3452 if (rnp_up->expmaskinit) 3432 if (rnp_up->expmaskinit)
3453 done = true; 3433 done = true;
3454 rnp_up->expmaskinit |= mask; 3434 rnp_up->expmaskinit |= mask;
@@ -3472,8 +3452,7 @@ static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
3472 3452
3473 sync_exp_reset_tree_hotplug(rsp); 3453 sync_exp_reset_tree_hotplug(rsp);
3474 rcu_for_each_node_breadth_first(rsp, rnp) { 3454 rcu_for_each_node_breadth_first(rsp, rnp) {
3475 raw_spin_lock_irqsave(&rnp->lock, flags); 3455 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3476 smp_mb__after_unlock_lock();
3477 WARN_ON_ONCE(rnp->expmask); 3456 WARN_ON_ONCE(rnp->expmask);
3478 rnp->expmask = rnp->expmaskinit; 3457 rnp->expmask = rnp->expmaskinit;
3479 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3458 raw_spin_unlock_irqrestore(&rnp->lock, flags);
@@ -3531,8 +3510,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
3531 mask = rnp->grpmask; 3510 mask = rnp->grpmask;
3532 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ 3511 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
3533 rnp = rnp->parent; 3512 rnp = rnp->parent;
3534 raw_spin_lock(&rnp->lock); /* irqs already disabled */ 3513 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
3535 smp_mb__after_unlock_lock();
3536 WARN_ON_ONCE(!(rnp->expmask & mask)); 3514 WARN_ON_ONCE(!(rnp->expmask & mask));
3537 rnp->expmask &= ~mask; 3515 rnp->expmask &= ~mask;
3538 } 3516 }
@@ -3549,8 +3527,7 @@ static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
3549{ 3527{
3550 unsigned long flags; 3528 unsigned long flags;
3551 3529
3552 raw_spin_lock_irqsave(&rnp->lock, flags); 3530 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3553 smp_mb__after_unlock_lock();
3554 __rcu_report_exp_rnp(rsp, rnp, wake, flags); 3531 __rcu_report_exp_rnp(rsp, rnp, wake, flags);
3555} 3532}
3556 3533
@@ -3564,8 +3541,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
3564{ 3541{
3565 unsigned long flags; 3542 unsigned long flags;
3566 3543
3567 raw_spin_lock_irqsave(&rnp->lock, flags); 3544 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3568 smp_mb__after_unlock_lock();
3569 if (!(rnp->expmask & mask)) { 3545 if (!(rnp->expmask & mask)) {
3570 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3546 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3571 return; 3547 return;
@@ -3708,8 +3684,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
3708 3684
3709 sync_exp_reset_tree(rsp); 3685 sync_exp_reset_tree(rsp);
3710 rcu_for_each_leaf_node(rsp, rnp) { 3686 rcu_for_each_leaf_node(rsp, rnp) {
3711 raw_spin_lock_irqsave(&rnp->lock, flags); 3687 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3712 smp_mb__after_unlock_lock();
3713 3688
3714 /* Each pass checks a CPU for identity, offline, and idle. */ 3689 /* Each pass checks a CPU for identity, offline, and idle. */
3715 mask_ofl_test = 0; 3690 mask_ofl_test = 0;
@@ -4198,8 +4173,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
4198 */ 4173 */
4199 rnp = rdp->mynode; 4174 rnp = rdp->mynode;
4200 mask = rdp->grpmask; 4175 mask = rdp->grpmask;
4201 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 4176 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4202 smp_mb__after_unlock_lock();
4203 rnp->qsmaskinitnext |= mask; 4177 rnp->qsmaskinitnext |= mask;
4204 rnp->expmaskinitnext |= mask; 4178 rnp->expmaskinitnext |= mask;
4205 if (!rdp->beenonline) 4179 if (!rdp->beenonline)