summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2018-08-08 21:28:05 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2018-08-10 15:12:00 -0400
commit3f1756dc210e5abb37121da3e7c10d65920f6ec0 (patch)
tree9adb16d03c5e6f1de6d1fc765da1d5aa0590e78e
parentf27107fa20ad531ace5fd580473ff8dd0c6b9ca9 (diff)
tracing: More reverting of "tracing: Centralize preemptirq tracepoints and unify their usage"
Joel Fernandes created a nice patch that cleaned up the duplicate hooks used by lockdep and irqsoff latency tracer. It made both use tracepoints. But the latency tracer is triggering warnings when using tracepoints to call into the latency tracer's routines. Mainly, they can be called from NMI context. If that happens, then the SRCU may not work properly because on some architectures, SRCU is not safe to be called in both NMI and non-NMI context. This is a partial revert of the clean up patch c3bc8fd637a9 ("tracing: Centralize preemptirq tracepoints and unify their usage") that adds back the direct calls into the latency tracer. It also only calls the trace events when not in NMI. Link: http://lkml.kernel.org/r/20180809210654.622445925@goodmis.org Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org> Fixes: c3bc8fd637a9 ("tracing: Centralize preemptirq tracepoints and unify their usage") Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r--kernel/trace/trace.h15
-rw-r--r--kernel/trace/trace_irqsoff.c48
-rw-r--r--kernel/trace/trace_preemptirq.c25
3 files changed, 38 insertions, 50 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index d88cd9bb72f4..a62b678731e3 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1827,6 +1827,21 @@ static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1827} 1827}
1828#endif 1828#endif
1829 1829
1830#ifdef CONFIG_PREEMPT_TRACER
1831void tracer_preempt_on(unsigned long a0, unsigned long a1);
1832void tracer_preempt_off(unsigned long a0, unsigned long a1);
1833#else
1834static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
1835static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
1836#endif
1837#ifdef CONFIG_IRQSOFF_TRACER
1838void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
1839void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
1840#else
1841static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
1842static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
1843#endif
1844
1830extern struct trace_iterator *tracepoint_print_iter; 1845extern struct trace_iterator *tracepoint_print_iter;
1831 1846
1832#endif /* _LINUX_KERNEL_TRACE_H */ 1847#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 4af990e9c594..94c1ba139b3b 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -605,40 +605,18 @@ static void irqsoff_tracer_stop(struct trace_array *tr)
605/* 605/*
606 * We are only interested in hardirq on/off events: 606 * We are only interested in hardirq on/off events:
607 */ 607 */
608static void tracer_hardirqs_on(void *none, unsigned long a0, unsigned long a1) 608void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
609{ 609{
610 unsigned int pc = preempt_count(); 610 unsigned int pc = preempt_count();
611 611
612 /*
613 * Tracepoint probes are expected to be called with preempt disabled,
614 * We don't care about being called with preempt disabled but we need
615 * to know in the future if that changes so we can remove the next
616 * preempt_enable.
617 */
618 WARN_ON_ONCE(pc < PREEMPT_DISABLE_OFFSET);
619
620 /* Use PREEMPT_DISABLE_OFFSET to handle !CONFIG_PREEMPT cases */
621 pc -= PREEMPT_DISABLE_OFFSET;
622
623 if (!preempt_trace(pc) && irq_trace()) 612 if (!preempt_trace(pc) && irq_trace())
624 stop_critical_timing(a0, a1, pc); 613 stop_critical_timing(a0, a1, pc);
625} 614}
626 615
627static void tracer_hardirqs_off(void *none, unsigned long a0, unsigned long a1) 616void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
628{ 617{
629 unsigned int pc = preempt_count(); 618 unsigned int pc = preempt_count();
630 619
631 /*
632 * Tracepoint probes are expected to be called with preempt disabled,
633 * We don't care about being called with preempt disabled but we need
634 * to know in the future if that changes so we can remove the next
635 * preempt_enable.
636 */
637 WARN_ON_ONCE(pc < PREEMPT_DISABLE_OFFSET);
638
639 /* Use PREEMPT_DISABLE_OFFSET to handle !CONFIG_PREEMPT cases */
640 pc -= PREEMPT_DISABLE_OFFSET;
641
642 if (!preempt_trace(pc) && irq_trace()) 620 if (!preempt_trace(pc) && irq_trace())
643 start_critical_timing(a0, a1, pc); 621 start_critical_timing(a0, a1, pc);
644} 622}
@@ -647,15 +625,11 @@ static int irqsoff_tracer_init(struct trace_array *tr)
647{ 625{
648 trace_type = TRACER_IRQS_OFF; 626 trace_type = TRACER_IRQS_OFF;
649 627
650 register_trace_irq_disable(tracer_hardirqs_off, NULL);
651 register_trace_irq_enable(tracer_hardirqs_on, NULL);
652 return __irqsoff_tracer_init(tr); 628 return __irqsoff_tracer_init(tr);
653} 629}
654 630
655static void irqsoff_tracer_reset(struct trace_array *tr) 631static void irqsoff_tracer_reset(struct trace_array *tr)
656{ 632{
657 unregister_trace_irq_disable(tracer_hardirqs_off, NULL);
658 unregister_trace_irq_enable(tracer_hardirqs_on, NULL);
659 __irqsoff_tracer_reset(tr); 633 __irqsoff_tracer_reset(tr);
660} 634}
661 635
@@ -681,7 +655,7 @@ static struct tracer irqsoff_tracer __read_mostly =
681#endif /* CONFIG_IRQSOFF_TRACER */ 655#endif /* CONFIG_IRQSOFF_TRACER */
682 656
683#ifdef CONFIG_PREEMPT_TRACER 657#ifdef CONFIG_PREEMPT_TRACER
684static void tracer_preempt_on(void *none, unsigned long a0, unsigned long a1) 658void tracer_preempt_on(unsigned long a0, unsigned long a1)
685{ 659{
686 int pc = preempt_count(); 660 int pc = preempt_count();
687 661
@@ -689,7 +663,7 @@ static void tracer_preempt_on(void *none, unsigned long a0, unsigned long a1)
689 stop_critical_timing(a0, a1, pc); 663 stop_critical_timing(a0, a1, pc);
690} 664}
691 665
692static void tracer_preempt_off(void *none, unsigned long a0, unsigned long a1) 666void tracer_preempt_off(unsigned long a0, unsigned long a1)
693{ 667{
694 int pc = preempt_count(); 668 int pc = preempt_count();
695 669
@@ -701,15 +675,11 @@ static int preemptoff_tracer_init(struct trace_array *tr)
701{ 675{
702 trace_type = TRACER_PREEMPT_OFF; 676 trace_type = TRACER_PREEMPT_OFF;
703 677
704 register_trace_preempt_disable(tracer_preempt_off, NULL);
705 register_trace_preempt_enable(tracer_preempt_on, NULL);
706 return __irqsoff_tracer_init(tr); 678 return __irqsoff_tracer_init(tr);
707} 679}
708 680
709static void preemptoff_tracer_reset(struct trace_array *tr) 681static void preemptoff_tracer_reset(struct trace_array *tr)
710{ 682{
711 unregister_trace_preempt_disable(tracer_preempt_off, NULL);
712 unregister_trace_preempt_enable(tracer_preempt_on, NULL);
713 __irqsoff_tracer_reset(tr); 683 __irqsoff_tracer_reset(tr);
714} 684}
715 685
@@ -740,21 +710,11 @@ static int preemptirqsoff_tracer_init(struct trace_array *tr)
740{ 710{
741 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; 711 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
742 712
743 register_trace_irq_disable(tracer_hardirqs_off, NULL);
744 register_trace_irq_enable(tracer_hardirqs_on, NULL);
745 register_trace_preempt_disable(tracer_preempt_off, NULL);
746 register_trace_preempt_enable(tracer_preempt_on, NULL);
747
748 return __irqsoff_tracer_init(tr); 713 return __irqsoff_tracer_init(tr);
749} 714}
750 715
751static void preemptirqsoff_tracer_reset(struct trace_array *tr) 716static void preemptirqsoff_tracer_reset(struct trace_array *tr)
752{ 717{
753 unregister_trace_irq_disable(tracer_hardirqs_off, NULL);
754 unregister_trace_irq_enable(tracer_hardirqs_on, NULL);
755 unregister_trace_preempt_disable(tracer_preempt_off, NULL);
756 unregister_trace_preempt_enable(tracer_preempt_on, NULL);
757
758 __irqsoff_tracer_reset(tr); 718 __irqsoff_tracer_reset(tr);
759} 719}
760 720
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index fa656b25f427..71f553cceb3c 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -9,6 +9,7 @@
9#include <linux/uaccess.h> 9#include <linux/uaccess.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include "trace.h"
12 13
13#define CREATE_TRACE_POINTS 14#define CREATE_TRACE_POINTS
14#include <trace/events/preemptirq.h> 15#include <trace/events/preemptirq.h>
@@ -20,7 +21,9 @@ static DEFINE_PER_CPU(int, tracing_irq_cpu);
20void trace_hardirqs_on(void) 21void trace_hardirqs_on(void)
21{ 22{
22 if (this_cpu_read(tracing_irq_cpu)) { 23 if (this_cpu_read(tracing_irq_cpu)) {
23 trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); 24 if (!in_nmi())
25 trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
26 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
24 this_cpu_write(tracing_irq_cpu, 0); 27 this_cpu_write(tracing_irq_cpu, 0);
25 } 28 }
26 29
@@ -32,7 +35,9 @@ void trace_hardirqs_off(void)
32{ 35{
33 if (!this_cpu_read(tracing_irq_cpu)) { 36 if (!this_cpu_read(tracing_irq_cpu)) {
34 this_cpu_write(tracing_irq_cpu, 1); 37 this_cpu_write(tracing_irq_cpu, 1);
35 trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); 38 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
39 if (!in_nmi())
40 trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
36 } 41 }
37 42
38 lockdep_hardirqs_off(CALLER_ADDR0); 43 lockdep_hardirqs_off(CALLER_ADDR0);
@@ -42,7 +47,9 @@ EXPORT_SYMBOL(trace_hardirqs_off);
42__visible void trace_hardirqs_on_caller(unsigned long caller_addr) 47__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
43{ 48{
44 if (this_cpu_read(tracing_irq_cpu)) { 49 if (this_cpu_read(tracing_irq_cpu)) {
45 trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr); 50 if (!in_nmi())
51 trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
52 tracer_hardirqs_on(CALLER_ADDR0, caller_addr);
46 this_cpu_write(tracing_irq_cpu, 0); 53 this_cpu_write(tracing_irq_cpu, 0);
47 } 54 }
48 55
@@ -54,7 +61,9 @@ __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
54{ 61{
55 if (!this_cpu_read(tracing_irq_cpu)) { 62 if (!this_cpu_read(tracing_irq_cpu)) {
56 this_cpu_write(tracing_irq_cpu, 1); 63 this_cpu_write(tracing_irq_cpu, 1);
57 trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); 64 tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
65 if (!in_nmi())
66 trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
58 } 67 }
59 68
60 lockdep_hardirqs_off(CALLER_ADDR0); 69 lockdep_hardirqs_off(CALLER_ADDR0);
@@ -66,11 +75,15 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
66 75
67void trace_preempt_on(unsigned long a0, unsigned long a1) 76void trace_preempt_on(unsigned long a0, unsigned long a1)
68{ 77{
69 trace_preempt_enable_rcuidle(a0, a1); 78 if (!in_nmi())
79 trace_preempt_enable_rcuidle(a0, a1);
80 tracer_preempt_on(a0, a1);
70} 81}
71 82
72void trace_preempt_off(unsigned long a0, unsigned long a1) 83void trace_preempt_off(unsigned long a0, unsigned long a1)
73{ 84{
74 trace_preempt_disable_rcuidle(a0, a1); 85 if (!in_nmi())
86 trace_preempt_disable_rcuidle(a0, a1);
87 tracer_preempt_off(a0, a1);
75} 88}
76#endif 89#endif