diff options
author | Paul E. McKenney <paulmck@linux.ibm.com> | 2019-01-11 23:51:49 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.ibm.com> | 2019-03-26 17:40:13 -0400 |
commit | d87cda5094585b7a0f62075de68266cb9c1b35ca (patch) | |
tree | d23e6a6f7143ee36505312db1bebee7e2a189e52 | |
parent | 21d0d79ab051bf9facb9960a30e58b93a31c75a5 (diff) |
rcu: Move rcu_print_task_exp_stall() to tree_exp.h
Because expedited CPU stall warnings are contained within the
kernel/rcu/tree_exp.h file, rcu_print_task_exp_stall() should live
there too. This commit carries out the required code motion.
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
-rw-r--r-- | kernel/rcu/tree_exp.h | 32 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 31 |
2 files changed, 32 insertions, 31 deletions
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 4c2a0189e748..7be3e085ddd6 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/lockdep.h> | 10 | #include <linux/lockdep.h> |
11 | 11 | ||
12 | static void rcu_exp_handler(void *unused); | 12 | static void rcu_exp_handler(void *unused); |
13 | static int rcu_print_task_exp_stall(struct rcu_node *rnp); | ||
13 | 14 | ||
14 | /* | 15 | /* |
15 | * Record the start of an expedited grace period. | 16 | * Record the start of an expedited grace period. |
@@ -670,6 +671,27 @@ static void sync_sched_exp_online_cleanup(int cpu) | |||
670 | { | 671 | { |
671 | } | 672 | } |
672 | 673 | ||
674 | /* | ||
675 | * Scan the current list of tasks blocked within RCU read-side critical | ||
676 | * sections, printing out the tid of each that is blocking the current | ||
677 | * expedited grace period. | ||
678 | */ | ||
679 | static int rcu_print_task_exp_stall(struct rcu_node *rnp) | ||
680 | { | ||
681 | struct task_struct *t; | ||
682 | int ndetected = 0; | ||
683 | |||
684 | if (!rnp->exp_tasks) | ||
685 | return 0; | ||
686 | t = list_entry(rnp->exp_tasks->prev, | ||
687 | struct task_struct, rcu_node_entry); | ||
688 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { | ||
689 | pr_cont(" P%d", t->pid); | ||
690 | ndetected++; | ||
691 | } | ||
692 | return ndetected; | ||
693 | } | ||
694 | |||
673 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | 695 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
674 | 696 | ||
675 | /* Invoked on each online non-idle CPU for expedited quiescent state. */ | 697 | /* Invoked on each online non-idle CPU for expedited quiescent state. */ |
@@ -709,6 +731,16 @@ static void sync_sched_exp_online_cleanup(int cpu) | |||
709 | WARN_ON_ONCE(ret); | 731 | WARN_ON_ONCE(ret); |
710 | } | 732 | } |
711 | 733 | ||
734 | /* | ||
735 | * Because preemptible RCU does not exist, we never have to check for | ||
736 | * tasks blocked within RCU read-side critical sections that are | ||
737 | * blocking the current expedited grace period. | ||
738 | */ | ||
739 | static int rcu_print_task_exp_stall(struct rcu_node *rnp) | ||
740 | { | ||
741 | return 0; | ||
742 | } | ||
743 | |||
712 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | 744 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
713 | 745 | ||
714 | /** | 746 | /** |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 7fa3bc4d481b..72519c57f656 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -643,27 +643,6 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
643 | } | 643 | } |
644 | 644 | ||
645 | /* | 645 | /* |
646 | * Scan the current list of tasks blocked within RCU read-side critical | ||
647 | * sections, printing out the tid of each that is blocking the current | ||
648 | * expedited grace period. | ||
649 | */ | ||
650 | static int rcu_print_task_exp_stall(struct rcu_node *rnp) | ||
651 | { | ||
652 | struct task_struct *t; | ||
653 | int ndetected = 0; | ||
654 | |||
655 | if (!rnp->exp_tasks) | ||
656 | return 0; | ||
657 | t = list_entry(rnp->exp_tasks->prev, | ||
658 | struct task_struct, rcu_node_entry); | ||
659 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { | ||
660 | pr_cont(" P%d", t->pid); | ||
661 | ndetected++; | ||
662 | } | ||
663 | return ndetected; | ||
664 | } | ||
665 | |||
666 | /* | ||
667 | * Check that the list of blocked tasks for the newly completed grace | 646 | * Check that the list of blocked tasks for the newly completed grace |
668 | * period is in fact empty. It is a serious bug to complete a grace | 647 | * period is in fact empty. It is a serious bug to complete a grace |
669 | * period that still has RCU readers blocked! This function must be | 648 | * period that still has RCU readers blocked! This function must be |
@@ -907,16 +886,6 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t) | |||
907 | static void rcu_preempt_deferred_qs(struct task_struct *t) { } | 886 | static void rcu_preempt_deferred_qs(struct task_struct *t) { } |
908 | 887 | ||
909 | /* | 888 | /* |
910 | * Because preemptible RCU does not exist, we never have to check for | ||
911 | * tasks blocked within RCU read-side critical sections that are | ||
912 | * blocking the current expedited grace period. | ||
913 | */ | ||
914 | static int rcu_print_task_exp_stall(struct rcu_node *rnp) | ||
915 | { | ||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | /* | ||
920 | * Because there is no preemptible RCU, there can be no readers blocked, | 889 | * Because there is no preemptible RCU, there can be no readers blocked, |
921 | * so there is no need to check for blocked tasks. So check only for | 890 | * so there is no need to check for blocked tasks. So check only for |
922 | * bogus qsmask values. | 891 | * bogus qsmask values. |