aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoel Fernandes (Google) <joel@joelfernandes.org>2018-08-05 23:40:49 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2018-08-06 21:55:24 -0400
commitda5b3ebb4527733299661229a8d035d64a4f0b1a (patch)
treee422ae6632fd0d11e45e2f0b889b89a7dac0b09c
parentda25a672cf0e2c143bffb40acb507a342e25b4f4 (diff)
tracing: irqsoff: Account for additional preempt_disable
Recently we tried to make the preemptirqsoff tracer to use irqsoff tracepoint probes. However this causes issues as reported by Masami: [2.271078] Testing tracer preemptirqsoff: .. no entries found ..FAILED! [2.381015] WARNING: CPU: 0 PID: 1 at /home/mhiramat/ksrc/linux/kernel/ trace/trace.c:1512 run_tracer_selftest+0xf3/0x154 This is due to the tracepoint code increasing the preempt nesting count by calling an additional preempt_disable before calling into the preemptoff tracer which messes up the preempt_count() check in tracer_hardirqs_off. To fix this, make the irqsoff tracer probes balance the additional outer preempt_disable with a preempt_enable_notrace. The other way to fix this is to just use SRCU for all tracepoints. However we can't do that because we can't use NMIs from RCU context. Link: http://lkml.kernel.org/r/20180806034049.67949-1-joel@joelfernandes.org Fixes: c3bc8fd637a9 ("tracing: Centralize preemptirq tracepoints and unify their usage") Fixes: e6753f23d961 ("tracepoint: Make rcuidle tracepoint callers use SRCU") Reported-by: Masami Hiramatsu <mhiramat@kernel.org> Tested-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r--kernel/trace/trace_irqsoff.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 770cd30cda40..ffbf1505d5bc 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -603,14 +603,40 @@ static void irqsoff_tracer_stop(struct trace_array *tr)
603 */ 603 */
604static void tracer_hardirqs_on(void *none, unsigned long a0, unsigned long a1) 604static void tracer_hardirqs_on(void *none, unsigned long a0, unsigned long a1)
605{ 605{
606 /*
607 * Tracepoint probes are expected to be called with preempt disabled,
608 * We don't care about being called with preempt disabled but we need
609 * to know in the future if that changes so we can remove the next
610 * preempt_enable.
611 */
612 WARN_ON_ONCE(!preempt_count());
613
614 /* Tracepoint probes disable preemption atleast once, account for that */
615 preempt_enable_notrace();
616
606 if (!preempt_trace() && irq_trace()) 617 if (!preempt_trace() && irq_trace())
607 stop_critical_timing(a0, a1); 618 stop_critical_timing(a0, a1);
619
620 preempt_disable_notrace();
608} 621}
609 622
610static void tracer_hardirqs_off(void *none, unsigned long a0, unsigned long a1) 623static void tracer_hardirqs_off(void *none, unsigned long a0, unsigned long a1)
611{ 624{
625 /*
626 * Tracepoint probes are expected to be called with preempt disabled,
627 * We don't care about being called with preempt disabled but we need
628 * to know in the future if that changes so we can remove the next
629 * preempt_enable.
630 */
631 WARN_ON_ONCE(!preempt_count());
632
633 /* Tracepoint probes disable preemption atleast once, account for that */
634 preempt_enable_notrace();
635
612 if (!preempt_trace() && irq_trace()) 636 if (!preempt_trace() && irq_trace())
613 start_critical_timing(a0, a1); 637 start_critical_timing(a0, a1);
638
639 preempt_disable_notrace();
614} 640}
615 641
616static int irqsoff_tracer_init(struct trace_array *tr) 642static int irqsoff_tracer_init(struct trace_array *tr)