diff options
Diffstat (limited to 'kernel/rcu/tree_exp.h')
-rw-r--r-- | kernel/rcu/tree_exp.h | 36 |
1 files changed, 34 insertions, 2 deletions
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 4c2a0189e748..9c990df880d1 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/lockdep.h> | 10 | #include <linux/lockdep.h> |
11 | 11 | ||
12 | static void rcu_exp_handler(void *unused); | 12 | static void rcu_exp_handler(void *unused); |
13 | static int rcu_print_task_exp_stall(struct rcu_node *rnp); | ||
13 | 14 | ||
14 | /* | 15 | /* |
15 | * Record the start of an expedited grace period. | 16 | * Record the start of an expedited grace period. |
@@ -633,7 +634,7 @@ static void rcu_exp_handler(void *unused) | |||
633 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | 634 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
634 | if (rnp->expmask & rdp->grpmask) { | 635 | if (rnp->expmask & rdp->grpmask) { |
635 | rdp->deferred_qs = true; | 636 | rdp->deferred_qs = true; |
636 | WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true); | 637 | t->rcu_read_unlock_special.b.exp_hint = true; |
637 | } | 638 | } |
638 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 639 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
639 | return; | 640 | return; |
@@ -648,7 +649,7 @@ static void rcu_exp_handler(void *unused) | |||
648 | * | 649 | * |
649 | * If the CPU is fully enabled (or if some buggy RCU-preempt | 650 | * If the CPU is fully enabled (or if some buggy RCU-preempt |
650 | * read-side critical section is being used from idle), just | 651 | * read-side critical section is being used from idle), just |
651 | * invoke rcu_preempt_defer_qs() to immediately report the | 652 | * invoke rcu_preempt_deferred_qs() to immediately report the |
652 | * quiescent state. We cannot use rcu_read_unlock_special() | 653 | * quiescent state. We cannot use rcu_read_unlock_special() |
653 | * because we are in an interrupt handler, which will cause that | 654 | * because we are in an interrupt handler, which will cause that |
654 | * function to take an early exit without doing anything. | 655 | * function to take an early exit without doing anything. |
@@ -670,6 +671,27 @@ static void sync_sched_exp_online_cleanup(int cpu) | |||
670 | { | 671 | { |
671 | } | 672 | } |
672 | 673 | ||
674 | /* | ||
675 | * Scan the current list of tasks blocked within RCU read-side critical | ||
676 | * sections, printing out the tid of each that is blocking the current | ||
677 | * expedited grace period. | ||
678 | */ | ||
679 | static int rcu_print_task_exp_stall(struct rcu_node *rnp) | ||
680 | { | ||
681 | struct task_struct *t; | ||
682 | int ndetected = 0; | ||
683 | |||
684 | if (!rnp->exp_tasks) | ||
685 | return 0; | ||
686 | t = list_entry(rnp->exp_tasks->prev, | ||
687 | struct task_struct, rcu_node_entry); | ||
688 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { | ||
689 | pr_cont(" P%d", t->pid); | ||
690 | ndetected++; | ||
691 | } | ||
692 | return ndetected; | ||
693 | } | ||
694 | |||
673 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | 695 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
674 | 696 | ||
675 | /* Invoked on each online non-idle CPU for expedited quiescent state. */ | 697 | /* Invoked on each online non-idle CPU for expedited quiescent state. */ |
@@ -709,6 +731,16 @@ static void sync_sched_exp_online_cleanup(int cpu) | |||
709 | WARN_ON_ONCE(ret); | 731 | WARN_ON_ONCE(ret); |
710 | } | 732 | } |
711 | 733 | ||
734 | /* | ||
735 | * Because preemptible RCU does not exist, we never have to check for | ||
736 | * tasks blocked within RCU read-side critical sections that are | ||
737 | * blocking the current expedited grace period. | ||
738 | */ | ||
739 | static int rcu_print_task_exp_stall(struct rcu_node *rnp) | ||
740 | { | ||
741 | return 0; | ||
742 | } | ||
743 | |||
712 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | 744 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
713 | 745 | ||
714 | /** | 746 | /** |