aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-05-08 17:18:57 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-07-12 18:39:09 -0400
commit577389423187d8b51dfe6199297e579a3419b72b (patch)
tree8cb47b71dc71d38c6f36f698281bf8a05b929393 /kernel/rcu/tree_plugin.h
parentff3cee39088b1931a432587059d66cd505f785dc (diff)
rcu: Add CPU online/offline state to dump_blkd_tasks()
Interactions between CPU-hotplug operations and grace-period initialization can result in dump_blkd_tasks(). One of the first debugging actions in this case is to search back in dmesg to work out which of the affected rcu_node structure's CPUs are online and to determine the last CPU-hotplug operation affecting any of those CPUs. This can be laborious and error-prone, especially when console output is lost. This commit therefore causes dump_blkd_tasks() to dump the state of the affected rcu_node structure's CPUs and the last grace period during which the last offline and online operation affected each of these CPUs. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r--kernel/rcu/tree_plugin.h25
1 files changed, 20 insertions, 5 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index f45ff97b0d51..613372246a07 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -699,13 +699,14 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
699 * Also, if there are blocked tasks on the list, they automatically 699 * Also, if there are blocked tasks on the list, they automatically
700 * block the newly created grace period, so set up ->gp_tasks accordingly. 700 * block the newly created grace period, so set up ->gp_tasks accordingly.
701 */ 701 */
702static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 702static void
703rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
703{ 704{
704 struct task_struct *t; 705 struct task_struct *t;
705 706
706 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n"); 707 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
707 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 708 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
708 dump_blkd_tasks(rnp, 10); 709 dump_blkd_tasks(rsp, rnp, 10);
709 if (rcu_preempt_has_tasks(rnp) && 710 if (rcu_preempt_has_tasks(rnp) &&
710 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { 711 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
711 rnp->gp_tasks = rnp->blkd_tasks.next; 712 rnp->gp_tasks = rnp->blkd_tasks.next;
@@ -854,10 +855,14 @@ void exit_rcu(void)
854 * Dump the blocked-tasks state, but limit the list dump to the 855 * Dump the blocked-tasks state, but limit the list dump to the
855 * specified number of elements. 856 * specified number of elements.
856 */ 857 */
857static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck) 858static void
859dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
858{ 860{
861 int cpu;
859 int i; 862 int i;
860 struct list_head *lhp; 863 struct list_head *lhp;
864 bool onl;
865 struct rcu_data *rdp;
861 struct rcu_node *rnp1; 866 struct rcu_node *rnp1;
862 867
863 raw_lockdep_assert_held_rcu_node(rnp); 868 raw_lockdep_assert_held_rcu_node(rnp);
@@ -877,6 +882,14 @@ static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
877 break; 882 break;
878 } 883 }
879 pr_cont("\n"); 884 pr_cont("\n");
885 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
886 rdp = per_cpu_ptr(rsp->rda, cpu);
887 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
888 pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
889 cpu, ".o"[onl],
890 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
891 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
892 }
880} 893}
881 894
882#else /* #ifdef CONFIG_PREEMPT_RCU */ 895#else /* #ifdef CONFIG_PREEMPT_RCU */
@@ -949,7 +962,8 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
949 * so there is no need to check for blocked tasks. So check only for 962 * so there is no need to check for blocked tasks. So check only for
950 * bogus qsmask values. 963 * bogus qsmask values.
951 */ 964 */
952static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 965static void
966rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
953{ 967{
954 WARN_ON_ONCE(rnp->qsmask); 968 WARN_ON_ONCE(rnp->qsmask);
955} 969}
@@ -990,7 +1004,8 @@ void exit_rcu(void)
990/* 1004/*
991 * Dump the guaranteed-empty blocked-tasks state. Trust but verify. 1005 * Dump the guaranteed-empty blocked-tasks state. Trust but verify.
992 */ 1006 */
993static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck) 1007static void
1008dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
994{ 1009{
995 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); 1010 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
996} 1011}