aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-04-05 12:05:18 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-04-10 15:21:57 -0400
commita278d4718988a253a08d12e6d76c80979693d39e (patch)
tree37918ca0a265f5811bc57617a38fd1c1123989b5 /kernel/rcu/tree.c
parent8aaf1ee70e19ac74cbbb81098edfa328d1ab4bd7 (diff)
rcu: Fix dyntick-idle tracing
The tracing subsystem started using rcu_irq_entry() and rcu_irq_exit() (with my blessing) to allow the current _rcuidle alternative tracepoint name to be dispensed with while still maintaining good performance. Unfortunately, this causes RCU's dyntick-idle entry code's tracing to appear to RCU like an interrupt that occurs where RCU is not designed to handle interrupts. This commit fixes this problem by moving the zeroing of ->dynticks_nesting after the offending trace_rcu_dyntick() statement, which narrows the window of vulnerability to a pair of adjacent statements that are now marked with comments to that effect. Link: http://lkml.kernel.org/r/20170405093207.404f8deb@gandalf.local.home Link: http://lkml.kernel.org/r/20170405193928.GM1600@linux.vnet.ibm.com Reported-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c48
1 files changed, 23 insertions, 25 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 50fee7689e71..8b4d273331e4 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -57,6 +57,7 @@
57#include <linux/random.h> 57#include <linux/random.h>
58#include <linux/trace_events.h> 58#include <linux/trace_events.h>
59#include <linux/suspend.h> 59#include <linux/suspend.h>
60#include <linux/ftrace.h>
60 61
61#include "tree.h" 62#include "tree.h"
62#include "rcu.h" 63#include "rcu.h"
@@ -771,25 +772,24 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
771} 772}
772 773
773/* 774/*
774 * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state 775 * rcu_eqs_enter_common - current CPU is entering an extended quiescent state
775 * 776 *
776 * If the new value of the ->dynticks_nesting counter now is zero, 777 * Enter idle, doing appropriate accounting. The caller must have
777 * we really have entered idle, and must do the appropriate accounting. 778 * disabled interrupts.
778 * The caller must have disabled interrupts.
779 */ 779 */
780static void rcu_eqs_enter_common(long long oldval, bool user) 780static void rcu_eqs_enter_common(bool user)
781{ 781{
782 struct rcu_state *rsp; 782 struct rcu_state *rsp;
783 struct rcu_data *rdp; 783 struct rcu_data *rdp;
784 RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);) 784 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
785 785
786 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); 786 trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
787 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 787 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
788 !user && !is_idle_task(current)) { 788 !user && !is_idle_task(current)) {
789 struct task_struct *idle __maybe_unused = 789 struct task_struct *idle __maybe_unused =
790 idle_task(smp_processor_id()); 790 idle_task(smp_processor_id());
791 791
792 trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0); 792 trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0);
793 rcu_ftrace_dump(DUMP_ORIG); 793 rcu_ftrace_dump(DUMP_ORIG);
794 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", 794 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
795 current->pid, current->comm, 795 current->pid, current->comm,
@@ -800,7 +800,10 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
800 do_nocb_deferred_wakeup(rdp); 800 do_nocb_deferred_wakeup(rdp);
801 } 801 }
802 rcu_prepare_for_idle(); 802 rcu_prepare_for_idle();
803 rcu_dynticks_eqs_enter(); 803 stack_tracer_disable();
804 rdtp->dynticks_nesting = 0; /* Breaks tracing momentarily. */
805 rcu_dynticks_eqs_enter(); /* After this, tracing works again. */
806 stack_tracer_enable();
804 rcu_dynticks_task_enter(); 807 rcu_dynticks_task_enter();
805 808
806 /* 809 /*
@@ -821,19 +824,15 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
821 */ 824 */
822static void rcu_eqs_enter(bool user) 825static void rcu_eqs_enter(bool user)
823{ 826{
824 long long oldval;
825 struct rcu_dynticks *rdtp; 827 struct rcu_dynticks *rdtp;
826 828
827 rdtp = this_cpu_ptr(&rcu_dynticks); 829 rdtp = this_cpu_ptr(&rcu_dynticks);
828 oldval = rdtp->dynticks_nesting;
829 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 830 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
830 (oldval & DYNTICK_TASK_NEST_MASK) == 0); 831 (rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
831 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) { 832 if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
832 rdtp->dynticks_nesting = 0; 833 rcu_eqs_enter_common(user);
833 rcu_eqs_enter_common(oldval, user); 834 else
834 } else {
835 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; 835 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
836 }
837} 836}
838 837
839/** 838/**
@@ -892,19 +891,18 @@ void rcu_user_enter(void)
892 */ 891 */
893void rcu_irq_exit(void) 892void rcu_irq_exit(void)
894{ 893{
895 long long oldval;
896 struct rcu_dynticks *rdtp; 894 struct rcu_dynticks *rdtp;
897 895
898 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); 896 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
899 rdtp = this_cpu_ptr(&rcu_dynticks); 897 rdtp = this_cpu_ptr(&rcu_dynticks);
900 oldval = rdtp->dynticks_nesting;
901 rdtp->dynticks_nesting--;
902 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 898 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
903 rdtp->dynticks_nesting < 0); 899 rdtp->dynticks_nesting < 1);
904 if (rdtp->dynticks_nesting) 900 if (rdtp->dynticks_nesting <= 1) {
905 trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting); 901 rcu_eqs_enter_common(true);
906 else 902 } else {
907 rcu_eqs_enter_common(oldval, true); 903 trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1);
904 rdtp->dynticks_nesting--;
905 }
908 rcu_sysidle_enter(1); 906 rcu_sysidle_enter(1);
909} 907}
910 908