aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-05-08 17:18:57 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-07-12 18:39:09 -0400
commit577389423187d8b51dfe6199297e579a3419b72b (patch)
tree8cb47b71dc71d38c6f36f698281bf8a05b929393 /kernel/rcu/tree.c
parentff3cee39088b1931a432587059d66cd505f785dc (diff)
rcu: Add CPU online/offline state to dump_blkd_tasks()
Interactions between CPU-hotplug operations and grace-period initialization can result in dump_blkd_tasks(). One of the first debugging actions in this case is to search back in dmesg to work out which of the affected rcu_node structure's CPUs are online and to determine the last CPU-hotplug operation affecting any of those CPUs. This can be laborious and error-prone, especially when console output is lost. This commit therefore causes dump_blkd_tasks() to dump the state of the affected rcu_node structure's CPUs and the last grace period during which the last offline and online operation affected each of these CPUs. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 5f1a11f1f7bc..a2503ef1bbe2 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1954,7 +1954,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1954 rcu_gp_slow(rsp, gp_init_delay); 1954 rcu_gp_slow(rsp, gp_init_delay);
1955 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1955 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1956 rdp = this_cpu_ptr(rsp->rda); 1956 rdp = this_cpu_ptr(rsp->rda);
1957 rcu_preempt_check_blocked_tasks(rnp); 1957 rcu_preempt_check_blocked_tasks(rsp, rnp);
1958 rnp->qsmask = rnp->qsmaskinit; 1958 rnp->qsmask = rnp->qsmaskinit;
1959 WRITE_ONCE(rnp->gp_seq, rsp->gp_seq); 1959 WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
1960 if (rnp == rdp->mynode) 1960 if (rnp == rdp->mynode)
@@ -2063,7 +2063,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2063 rcu_for_each_node_breadth_first(rsp, rnp) { 2063 rcu_for_each_node_breadth_first(rsp, rnp) {
2064 raw_spin_lock_irq_rcu_node(rnp); 2064 raw_spin_lock_irq_rcu_node(rnp);
2065 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 2065 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2066 dump_blkd_tasks(rnp, 10); 2066 dump_blkd_tasks(rsp, rnp, 10);
2067 WARN_ON_ONCE(rnp->qsmask); 2067 WARN_ON_ONCE(rnp->qsmask);
2068 WRITE_ONCE(rnp->gp_seq, new_gp_seq); 2068 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2069 rdp = this_cpu_ptr(rsp->rda); 2069 rdp = this_cpu_ptr(rsp->rda);
@@ -3516,6 +3516,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3516 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 3516 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3517 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1); 3517 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
3518 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks))); 3518 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
3519 rdp->rcu_ofl_gp_seq = rsp->gp_seq;
3520 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
3521 rdp->rcu_onl_gp_seq = rsp->gp_seq;
3522 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
3519 rdp->cpu = cpu; 3523 rdp->cpu = cpu;
3520 rdp->rsp = rsp; 3524 rdp->rsp = rsp;
3521 rcu_boot_init_nocb_percpu_data(rdp); 3525 rcu_boot_init_nocb_percpu_data(rdp);
@@ -3711,6 +3715,8 @@ void rcu_cpu_starting(unsigned int cpu)
3711 /* Allow lockless access for expedited grace periods. */ 3715 /* Allow lockless access for expedited grace periods. */
3712 smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */ 3716 smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
3713 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ 3717 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
3718 rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq);
3719 rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
3714 if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */ 3720 if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
3715 /* Report QS -after- changing ->qsmaskinitnext! */ 3721 /* Report QS -after- changing ->qsmaskinitnext! */
3716 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); 3722 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
@@ -3738,6 +3744,8 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
3738 mask = rdp->grpmask; 3744 mask = rdp->grpmask;
3739 spin_lock(&rsp->ofl_lock); 3745 spin_lock(&rsp->ofl_lock);
3740 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 3746 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
3747 rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq);
3748 rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags);
3741 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ 3749 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
3742 /* Report quiescent state -before- changing ->qsmaskinitnext! */ 3750 /* Report quiescent state -before- changing ->qsmaskinitnext! */
3743 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); 3751 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);