diff options
Diffstat (limited to 'kernel/rcu/tree.c')
| -rw-r--r-- | kernel/rcu/tree.c | 48 | 
1 files changed, 23 insertions, 25 deletions
| diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 50fee7689e71..8b4d273331e4 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
| @@ -57,6 +57,7 @@ | |||
| 57 | #include <linux/random.h> | 57 | #include <linux/random.h> | 
| 58 | #include <linux/trace_events.h> | 58 | #include <linux/trace_events.h> | 
| 59 | #include <linux/suspend.h> | 59 | #include <linux/suspend.h> | 
| 60 | #include <linux/ftrace.h> | ||
| 60 | 61 | ||
| 61 | #include "tree.h" | 62 | #include "tree.h" | 
| 62 | #include "rcu.h" | 63 | #include "rcu.h" | 
| @@ -771,25 +772,24 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 771 | } | 772 | } | 
| 772 | 773 | ||
| 773 | /* | 774 | /* | 
| 774 | * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state | 775 | * rcu_eqs_enter_common - current CPU is entering an extended quiescent state | 
| 775 | * | 776 | * | 
| 776 | * If the new value of the ->dynticks_nesting counter now is zero, | 777 | * Enter idle, doing appropriate accounting. The caller must have | 
| 777 | * we really have entered idle, and must do the appropriate accounting. | 778 | * disabled interrupts. | 
| 778 | * The caller must have disabled interrupts. | ||
| 779 | */ | 779 | */ | 
| 780 | static void rcu_eqs_enter_common(long long oldval, bool user) | 780 | static void rcu_eqs_enter_common(bool user) | 
| 781 | { | 781 | { | 
| 782 | struct rcu_state *rsp; | 782 | struct rcu_state *rsp; | 
| 783 | struct rcu_data *rdp; | 783 | struct rcu_data *rdp; | 
| 784 | RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);) | 784 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | 
| 785 | 785 | ||
| 786 | trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); | 786 | trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0); | 
| 787 | if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 787 | if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 
| 788 | !user && !is_idle_task(current)) { | 788 | !user && !is_idle_task(current)) { | 
| 789 | struct task_struct *idle __maybe_unused = | 789 | struct task_struct *idle __maybe_unused = | 
| 790 | idle_task(smp_processor_id()); | 790 | idle_task(smp_processor_id()); | 
| 791 | 791 | ||
| 792 | trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0); | 792 | trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0); | 
| 793 | rcu_ftrace_dump(DUMP_ORIG); | 793 | rcu_ftrace_dump(DUMP_ORIG); | 
| 794 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", | 794 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", | 
| 795 | current->pid, current->comm, | 795 | current->pid, current->comm, | 
| @@ -800,7 +800,10 @@ static void rcu_eqs_enter_common(long long oldval, bool user) | |||
| 800 | do_nocb_deferred_wakeup(rdp); | 800 | do_nocb_deferred_wakeup(rdp); | 
| 801 | } | 801 | } | 
| 802 | rcu_prepare_for_idle(); | 802 | rcu_prepare_for_idle(); | 
| 803 | rcu_dynticks_eqs_enter(); | 803 | stack_tracer_disable(); | 
| 804 | rdtp->dynticks_nesting = 0; /* Breaks tracing momentarily. */ | ||
| 805 | rcu_dynticks_eqs_enter(); /* After this, tracing works again. */ | ||
| 806 | stack_tracer_enable(); | ||
| 804 | rcu_dynticks_task_enter(); | 807 | rcu_dynticks_task_enter(); | 
| 805 | 808 | ||
| 806 | /* | 809 | /* | 
| @@ -821,19 +824,15 @@ static void rcu_eqs_enter_common(long long oldval, bool user) | |||
| 821 | */ | 824 | */ | 
| 822 | static void rcu_eqs_enter(bool user) | 825 | static void rcu_eqs_enter(bool user) | 
| 823 | { | 826 | { | 
| 824 | long long oldval; | ||
| 825 | struct rcu_dynticks *rdtp; | 827 | struct rcu_dynticks *rdtp; | 
| 826 | 828 | ||
| 827 | rdtp = this_cpu_ptr(&rcu_dynticks); | 829 | rdtp = this_cpu_ptr(&rcu_dynticks); | 
| 828 | oldval = rdtp->dynticks_nesting; | ||
| 829 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 830 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 
| 830 | (oldval & DYNTICK_TASK_NEST_MASK) == 0); | 831 | (rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); | 
| 831 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) { | 832 | if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) | 
| 832 | rdtp->dynticks_nesting = 0; | 833 | rcu_eqs_enter_common(user); | 
| 833 | rcu_eqs_enter_common(oldval, user); | 834 | else | 
| 834 | } else { | ||
| 835 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; | 835 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; | 
| 836 | } | ||
| 837 | } | 836 | } | 
| 838 | 837 | ||
| 839 | /** | 838 | /** | 
| @@ -892,19 +891,18 @@ void rcu_user_enter(void) | |||
| 892 | */ | 891 | */ | 
| 893 | void rcu_irq_exit(void) | 892 | void rcu_irq_exit(void) | 
| 894 | { | 893 | { | 
| 895 | long long oldval; | ||
| 896 | struct rcu_dynticks *rdtp; | 894 | struct rcu_dynticks *rdtp; | 
| 897 | 895 | ||
| 898 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); | 896 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); | 
| 899 | rdtp = this_cpu_ptr(&rcu_dynticks); | 897 | rdtp = this_cpu_ptr(&rcu_dynticks); | 
| 900 | oldval = rdtp->dynticks_nesting; | ||
| 901 | rdtp->dynticks_nesting--; | ||
| 902 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 898 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 
| 903 | rdtp->dynticks_nesting < 0); | 899 | rdtp->dynticks_nesting < 1); | 
| 904 | if (rdtp->dynticks_nesting) | 900 | if (rdtp->dynticks_nesting <= 1) { | 
| 905 | trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting); | 901 | rcu_eqs_enter_common(true); | 
| 906 | else | 902 | } else { | 
| 907 | rcu_eqs_enter_common(oldval, true); | 903 | trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1); | 
| 904 | rdtp->dynticks_nesting--; | ||
| 905 | } | ||
| 908 | rcu_sysidle_enter(1); | 906 | rcu_sysidle_enter(1); | 
| 909 | } | 907 | } | 
| 910 | 908 | ||
