aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoel Fernandes (Google) <joel@joelfernandes.org>2018-07-30 18:24:23 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2018-07-31 11:32:27 -0400
commitc3bc8fd637a9623f5c507bd18f9677effbddf584 (patch)
treead36860302f50e38252cd9ec555f9e998f39f718
parente6753f23d961d601dbae50a2fc2a3975c9715b14 (diff)
tracing: Centralize preemptirq tracepoints and unify their usage
This patch detaches the preemptirq tracepoints from the tracers and keeps it separate. Advantages: * Lockdep and irqsoff event can now run in parallel since they no longer have their own calls. * This unifies the usecase of adding hooks to an irqsoff and irqson event, and a preemptoff and preempton event. 3 users of the events exist: - Lockdep - irqsoff and preemptoff tracers - irqs and preempt trace events The unification cleans up several ifdefs and makes the code in preempt tracer and irqsoff tracers simpler. It gets rid of all the horrific ifdeferry around PROVE_LOCKING and makes configuration of the different users of the tracepoints more easy and understandable. It also gets rid of the time_* function calls from the lockdep hooks used to call into the preemptirq tracer which is not needed anymore. The negative delta in lines of code in this patch is quite large too. In the patch we introduce a new CONFIG option PREEMPTIRQ_TRACEPOINTS as a single point for registering probes onto the tracepoints. With this, the web of config options for preempt/irq toggle tracepoints and its users becomes: PREEMPT_TRACER PREEMPTIRQ_EVENTS IRQSOFF_TRACER PROVE_LOCKING | | \ | | \ (selects) / \ \ (selects) / TRACE_PREEMPT_TOGGLE ----> TRACE_IRQFLAGS \ / \ (depends on) / PREEMPTIRQ_TRACEPOINTS Other than the performance tests mentioned in the previous patch, I also ran the locking API test suite. I verified that all tests cases are passing. I also injected issues by not registering lockdep probes onto the tracepoints and I see failures to confirm that the probes are indeed working. This series + lockdep probes not registered (just to inject errors): [ 0.000000] hard-irqs-on + irq-safe-A/21: ok | ok | ok | [ 0.000000] soft-irqs-on + irq-safe-A/21: ok | ok | ok | [ 0.000000] sirq-safe-A => hirqs-on/12:FAILED|FAILED| ok | [ 0.000000] sirq-safe-A => hirqs-on/21:FAILED|FAILED| ok | [ 0.000000] hard-safe-A + irqs-on/12:FAILED|FAILED| ok | [ 0.000000] soft-safe-A + irqs-on/12:FAILED|FAILED| ok | [ 0.000000] hard-safe-A + irqs-on/21:FAILED|FAILED| ok | [ 0.000000] soft-safe-A + irqs-on/21:FAILED|FAILED| ok | [ 0.000000] hard-safe-A + unsafe-B #1/123: ok | ok | ok | [ 0.000000] soft-safe-A + unsafe-B #1/123: ok | ok | ok | With this series + lockdep probes registered, all locking tests pass: [ 0.000000] hard-irqs-on + irq-safe-A/21: ok | ok | ok | [ 0.000000] soft-irqs-on + irq-safe-A/21: ok | ok | ok | [ 0.000000] sirq-safe-A => hirqs-on/12: ok | ok | ok | [ 0.000000] sirq-safe-A => hirqs-on/21: ok | ok | ok | [ 0.000000] hard-safe-A + irqs-on/12: ok | ok | ok | [ 0.000000] soft-safe-A + irqs-on/12: ok | ok | ok | [ 0.000000] hard-safe-A + irqs-on/21: ok | ok | ok | [ 0.000000] soft-safe-A + irqs-on/21: ok | ok | ok | [ 0.000000] hard-safe-A + unsafe-B #1/123: ok | ok | ok | [ 0.000000] soft-safe-A + unsafe-B #1/123: ok | ok | ok | Link: http://lkml.kernel.org/r/20180730222423.196630-4-joel@joelfernandes.org Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r--include/linux/ftrace.h11
-rw-r--r--include/linux/irqflags.h11
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/linux/preempt.h2
-rw-r--r--include/trace/events/preemptirq.h23
-rw-r--r--init/main.c5
-rw-r--r--kernel/locking/lockdep.c35
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/trace/Kconfig22
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/trace_irqsoff.c231
-rw-r--r--kernel/trace/trace_preemptirq.c72
12 files changed, 195 insertions, 229 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 63af5eb0ff46..a397907e8d72 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -701,16 +701,7 @@ static inline unsigned long get_lock_parent_ip(void)
701 return CALLER_ADDR2; 701 return CALLER_ADDR2;
702} 702}
703 703
704#ifdef CONFIG_IRQSOFF_TRACER 704#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
705 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
706 extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
707#else
708 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
709 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
710#endif
711
712#if defined(CONFIG_PREEMPT_TRACER) || \
713 (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
714 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 705 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
715 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 706 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
716#else 707#else
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 9700f00bbc04..50edb9cbbd26 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -15,9 +15,16 @@
15#include <linux/typecheck.h> 15#include <linux/typecheck.h>
16#include <asm/irqflags.h> 16#include <asm/irqflags.h>
17 17
18#ifdef CONFIG_TRACE_IRQFLAGS 18/* Currently trace_softirqs_on/off is used only by lockdep */
19#ifdef CONFIG_PROVE_LOCKING
19 extern void trace_softirqs_on(unsigned long ip); 20 extern void trace_softirqs_on(unsigned long ip);
20 extern void trace_softirqs_off(unsigned long ip); 21 extern void trace_softirqs_off(unsigned long ip);
22#else
23# define trace_softirqs_on(ip) do { } while (0)
24# define trace_softirqs_off(ip) do { } while (0)
25#endif
26
27#ifdef CONFIG_TRACE_IRQFLAGS
21 extern void trace_hardirqs_on(void); 28 extern void trace_hardirqs_on(void);
22 extern void trace_hardirqs_off(void); 29 extern void trace_hardirqs_off(void);
23# define trace_hardirq_context(p) ((p)->hardirq_context) 30# define trace_hardirq_context(p) ((p)->hardirq_context)
@@ -43,8 +50,6 @@ do { \
43#else 50#else
44# define trace_hardirqs_on() do { } while (0) 51# define trace_hardirqs_on() do { } while (0)
45# define trace_hardirqs_off() do { } while (0) 52# define trace_hardirqs_off() do { } while (0)
46# define trace_softirqs_on(ip) do { } while (0)
47# define trace_softirqs_off(ip) do { } while (0)
48# define trace_hardirq_context(p) 0 53# define trace_hardirq_context(p) 0
49# define trace_softirq_context(p) 0 54# define trace_softirq_context(p) 0
50# define trace_hardirqs_enabled(p) 0 55# define trace_hardirqs_enabled(p) 0
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 6fc77d4dbdcd..a8113357ceeb 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -266,7 +266,8 @@ struct held_lock {
266/* 266/*
267 * Initialization, self-test and debugging-output methods: 267 * Initialization, self-test and debugging-output methods:
268 */ 268 */
269extern void lockdep_info(void); 269extern void lockdep_init(void);
270extern void lockdep_init_early(void);
270extern void lockdep_reset(void); 271extern void lockdep_reset(void);
271extern void lockdep_reset_lock(struct lockdep_map *lock); 272extern void lockdep_reset_lock(struct lockdep_map *lock);
272extern void lockdep_free_key_range(void *start, unsigned long size); 273extern void lockdep_free_key_range(void *start, unsigned long size);
@@ -406,7 +407,8 @@ static inline void lockdep_on(void)
406# define lock_downgrade(l, i) do { } while (0) 407# define lock_downgrade(l, i) do { } while (0)
407# define lock_set_class(l, n, k, s, i) do { } while (0) 408# define lock_set_class(l, n, k, s, i) do { } while (0)
408# define lock_set_subclass(l, s, i) do { } while (0) 409# define lock_set_subclass(l, s, i) do { } while (0)
409# define lockdep_info() do { } while (0) 410# define lockdep_init() do { } while (0)
411# define lockdep_init_early() do { } while (0)
410# define lockdep_init_map(lock, name, key, sub) \ 412# define lockdep_init_map(lock, name, key, sub) \
411 do { (void)(name); (void)(key); } while (0) 413 do { (void)(name); (void)(key); } while (0)
412# define lockdep_set_class(lock, key) do { (void)(key); } while (0) 414# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
@@ -532,7 +534,7 @@ do { \
532 534
533#endif /* CONFIG_LOCKDEP */ 535#endif /* CONFIG_LOCKDEP */
534 536
535#ifdef CONFIG_TRACE_IRQFLAGS 537#ifdef CONFIG_PROVE_LOCKING
536extern void print_irqtrace_events(struct task_struct *curr); 538extern void print_irqtrace_events(struct task_struct *curr);
537#else 539#else
538static inline void print_irqtrace_events(struct task_struct *curr) 540static inline void print_irqtrace_events(struct task_struct *curr)
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 5bd3f151da78..c01813c3fbe9 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -150,7 +150,7 @@
150 */ 150 */
151#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET) 151#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
152 152
153#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) 153#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
154extern void preempt_count_add(int val); 154extern void preempt_count_add(int val);
155extern void preempt_count_sub(int val); 155extern void preempt_count_sub(int val);
156#define preempt_count_dec_and_test() \ 156#define preempt_count_dec_and_test() \
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
index 9c4eb33c5a1d..9a0d4ceeb166 100644
--- a/include/trace/events/preemptirq.h
+++ b/include/trace/events/preemptirq.h
@@ -1,4 +1,4 @@
1#ifdef CONFIG_PREEMPTIRQ_EVENTS 1#ifdef CONFIG_PREEMPTIRQ_TRACEPOINTS
2 2
3#undef TRACE_SYSTEM 3#undef TRACE_SYSTEM
4#define TRACE_SYSTEM preemptirq 4#define TRACE_SYSTEM preemptirq
@@ -32,7 +32,7 @@ DECLARE_EVENT_CLASS(preemptirq_template,
32 (void *)((unsigned long)(_stext) + __entry->parent_offs)) 32 (void *)((unsigned long)(_stext) + __entry->parent_offs))
33); 33);
34 34
35#ifndef CONFIG_PROVE_LOCKING 35#ifdef CONFIG_TRACE_IRQFLAGS
36DEFINE_EVENT(preemptirq_template, irq_disable, 36DEFINE_EVENT(preemptirq_template, irq_disable,
37 TP_PROTO(unsigned long ip, unsigned long parent_ip), 37 TP_PROTO(unsigned long ip, unsigned long parent_ip),
38 TP_ARGS(ip, parent_ip)); 38 TP_ARGS(ip, parent_ip));
@@ -40,9 +40,14 @@ DEFINE_EVENT(preemptirq_template, irq_disable,
40DEFINE_EVENT(preemptirq_template, irq_enable, 40DEFINE_EVENT(preemptirq_template, irq_enable,
41 TP_PROTO(unsigned long ip, unsigned long parent_ip), 41 TP_PROTO(unsigned long ip, unsigned long parent_ip),
42 TP_ARGS(ip, parent_ip)); 42 TP_ARGS(ip, parent_ip));
43#else
44#define trace_irq_enable(...)
45#define trace_irq_disable(...)
46#define trace_irq_enable_rcuidle(...)
47#define trace_irq_disable_rcuidle(...)
43#endif 48#endif
44 49
45#ifdef CONFIG_DEBUG_PREEMPT 50#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
46DEFINE_EVENT(preemptirq_template, preempt_disable, 51DEFINE_EVENT(preemptirq_template, preempt_disable,
47 TP_PROTO(unsigned long ip, unsigned long parent_ip), 52 TP_PROTO(unsigned long ip, unsigned long parent_ip),
48 TP_ARGS(ip, parent_ip)); 53 TP_ARGS(ip, parent_ip));
@@ -50,22 +55,22 @@ DEFINE_EVENT(preemptirq_template, preempt_disable,
50DEFINE_EVENT(preemptirq_template, preempt_enable, 55DEFINE_EVENT(preemptirq_template, preempt_enable,
51 TP_PROTO(unsigned long ip, unsigned long parent_ip), 56 TP_PROTO(unsigned long ip, unsigned long parent_ip),
52 TP_ARGS(ip, parent_ip)); 57 TP_ARGS(ip, parent_ip));
58#else
59#define trace_preempt_enable(...)
60#define trace_preempt_disable(...)
61#define trace_preempt_enable_rcuidle(...)
62#define trace_preempt_disable_rcuidle(...)
53#endif 63#endif
54 64
55#endif /* _TRACE_PREEMPTIRQ_H */ 65#endif /* _TRACE_PREEMPTIRQ_H */
56 66
57#include <trace/define_trace.h> 67#include <trace/define_trace.h>
58 68
59#endif /* !CONFIG_PREEMPTIRQ_EVENTS */ 69#else /* !CONFIG_PREEMPTIRQ_TRACEPOINTS */
60
61#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING)
62#define trace_irq_enable(...) 70#define trace_irq_enable(...)
63#define trace_irq_disable(...) 71#define trace_irq_disable(...)
64#define trace_irq_enable_rcuidle(...) 72#define trace_irq_enable_rcuidle(...)
65#define trace_irq_disable_rcuidle(...) 73#define trace_irq_disable_rcuidle(...)
66#endif
67
68#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT)
69#define trace_preempt_enable(...) 74#define trace_preempt_enable(...)
70#define trace_preempt_disable(...) 75#define trace_preempt_disable(...)
71#define trace_preempt_enable_rcuidle(...) 76#define trace_preempt_enable_rcuidle(...)
diff --git a/init/main.c b/init/main.c
index 3b4ada11ed52..44fe43be84c1 100644
--- a/init/main.c
+++ b/init/main.c
@@ -648,6 +648,9 @@ asmlinkage __visible void __init start_kernel(void)
648 profile_init(); 648 profile_init();
649 call_function_init(); 649 call_function_init();
650 WARN(!irqs_disabled(), "Interrupts were enabled early\n"); 650 WARN(!irqs_disabled(), "Interrupts were enabled early\n");
651
652 lockdep_init_early();
653
651 early_boot_irqs_disabled = false; 654 early_boot_irqs_disabled = false;
652 local_irq_enable(); 655 local_irq_enable();
653 656
@@ -663,7 +666,7 @@ asmlinkage __visible void __init start_kernel(void)
663 panic("Too many boot %s vars at `%s'", panic_later, 666 panic("Too many boot %s vars at `%s'", panic_later,
664 panic_param); 667 panic_param);
665 668
666 lockdep_info(); 669 lockdep_init();
667 670
668 /* 671 /*
669 * Need to run this when irqs are enabled, because it wants 672 * Need to run this when irqs are enabled, because it wants
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index fbbb79d5cfa0..03bfaeb9f4e6 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -55,6 +55,7 @@
55 55
56#include "lockdep_internals.h" 56#include "lockdep_internals.h"
57 57
58#include <trace/events/preemptirq.h>
58#define CREATE_TRACE_POINTS 59#define CREATE_TRACE_POINTS
59#include <trace/events/lock.h> 60#include <trace/events/lock.h>
60 61
@@ -2839,10 +2840,9 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
2839 debug_atomic_inc(hardirqs_on_events); 2840 debug_atomic_inc(hardirqs_on_events);
2840} 2841}
2841 2842
2842__visible void trace_hardirqs_on_caller(unsigned long ip) 2843static void lockdep_hardirqs_on(void *none, unsigned long ignore,
2844 unsigned long ip)
2843{ 2845{
2844 time_hardirqs_on(CALLER_ADDR0, ip);
2845
2846 if (unlikely(!debug_locks || current->lockdep_recursion)) 2846 if (unlikely(!debug_locks || current->lockdep_recursion))
2847 return; 2847 return;
2848 2848
@@ -2881,23 +2881,15 @@ __visible void trace_hardirqs_on_caller(unsigned long ip)
2881 __trace_hardirqs_on_caller(ip); 2881 __trace_hardirqs_on_caller(ip);
2882 current->lockdep_recursion = 0; 2882 current->lockdep_recursion = 0;
2883} 2883}
2884EXPORT_SYMBOL(trace_hardirqs_on_caller);
2885
2886void trace_hardirqs_on(void)
2887{
2888 trace_hardirqs_on_caller(CALLER_ADDR0);
2889}
2890EXPORT_SYMBOL(trace_hardirqs_on);
2891 2884
2892/* 2885/*
2893 * Hardirqs were disabled: 2886 * Hardirqs were disabled:
2894 */ 2887 */
2895__visible void trace_hardirqs_off_caller(unsigned long ip) 2888static void lockdep_hardirqs_off(void *none, unsigned long ignore,
2889 unsigned long ip)
2896{ 2890{
2897 struct task_struct *curr = current; 2891 struct task_struct *curr = current;
2898 2892
2899 time_hardirqs_off(CALLER_ADDR0, ip);
2900
2901 if (unlikely(!debug_locks || current->lockdep_recursion)) 2893 if (unlikely(!debug_locks || current->lockdep_recursion))
2902 return; 2894 return;
2903 2895
@@ -2919,13 +2911,6 @@ __visible void trace_hardirqs_off_caller(unsigned long ip)
2919 } else 2911 } else
2920 debug_atomic_inc(redundant_hardirqs_off); 2912 debug_atomic_inc(redundant_hardirqs_off);
2921} 2913}
2922EXPORT_SYMBOL(trace_hardirqs_off_caller);
2923
2924void trace_hardirqs_off(void)
2925{
2926 trace_hardirqs_off_caller(CALLER_ADDR0);
2927}
2928EXPORT_SYMBOL(trace_hardirqs_off);
2929 2914
2930/* 2915/*
2931 * Softirqs will be enabled: 2916 * Softirqs will be enabled:
@@ -4330,7 +4315,15 @@ out_restore:
4330 raw_local_irq_restore(flags); 4315 raw_local_irq_restore(flags);
4331} 4316}
4332 4317
4333void __init lockdep_info(void) 4318void __init lockdep_init_early(void)
4319{
4320#ifdef CONFIG_PROVE_LOCKING
4321 register_trace_prio_irq_disable(lockdep_hardirqs_off, NULL, INT_MAX);
4322 register_trace_prio_irq_enable(lockdep_hardirqs_on, NULL, INT_MIN);
4323#endif
4324}
4325
4326void __init lockdep_init(void)
4334{ 4327{
4335 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); 4328 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
4336 4329
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fe365c9a08e9..5de1a4343424 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3189,7 +3189,7 @@ static inline void sched_tick_stop(int cpu) { }
3189#endif 3189#endif
3190 3190
3191#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ 3191#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
3192 defined(CONFIG_PREEMPT_TRACER)) 3192 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
3193/* 3193/*
3194 * If the value passed in is equal to the current preempt count 3194 * If the value passed in is equal to the current preempt count
3195 * then we just disabled preemption. Start timing the latency. 3195 * then we just disabled preemption. Start timing the latency.
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 4d4eb15cc7fd..036cec1fcd24 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -82,6 +82,15 @@ config RING_BUFFER_ALLOW_SWAP
82 Allow the use of ring_buffer_swap_cpu. 82 Allow the use of ring_buffer_swap_cpu.
83 Adds a very slight overhead to tracing when enabled. 83 Adds a very slight overhead to tracing when enabled.
84 84
85config PREEMPTIRQ_TRACEPOINTS
86 bool
87 depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS
88 select TRACING
89 default y
90 help
91 Create preempt/irq toggle tracepoints if needed, so that other parts
92 of the kernel can use them to generate or add hooks to them.
93
85# All tracer options should select GENERIC_TRACER. For those options that are 94# All tracer options should select GENERIC_TRACER. For those options that are
86# enabled by all tracers (context switch and event tracer) they select TRACING. 95# enabled by all tracers (context switch and event tracer) they select TRACING.
87# This allows those options to appear when no other tracer is selected. But the 96# This allows those options to appear when no other tracer is selected. But the
@@ -155,18 +164,20 @@ config FUNCTION_GRAPH_TRACER
155 the return value. This is done by setting the current return 164 the return value. This is done by setting the current return
156 address on the current task structure into a stack of calls. 165 address on the current task structure into a stack of calls.
157 166
167config TRACE_PREEMPT_TOGGLE
168 bool
169 help
170 Enables hooks which will be called when preemption is first disabled,
171 and last enabled.
158 172
159config PREEMPTIRQ_EVENTS 173config PREEMPTIRQ_EVENTS
160 bool "Enable trace events for preempt and irq disable/enable" 174 bool "Enable trace events for preempt and irq disable/enable"
161 select TRACE_IRQFLAGS 175 select TRACE_IRQFLAGS
162 depends on DEBUG_PREEMPT || !PROVE_LOCKING 176 select TRACE_PREEMPT_TOGGLE if PREEMPT
163 depends on TRACING 177 select GENERIC_TRACER
164 default n 178 default n
165 help 179 help
166 Enable tracing of disable and enable events for preemption and irqs. 180 Enable tracing of disable and enable events for preemption and irqs.
167 For tracing preempt disable/enable events, DEBUG_PREEMPT must be
168 enabled. For tracing irq disable/enable events, PROVE_LOCKING must
169 be disabled.
170 181
171config IRQSOFF_TRACER 182config IRQSOFF_TRACER
172 bool "Interrupts-off Latency Tracer" 183 bool "Interrupts-off Latency Tracer"
@@ -203,6 +214,7 @@ config PREEMPT_TRACER
203 select RING_BUFFER_ALLOW_SWAP 214 select RING_BUFFER_ALLOW_SWAP
204 select TRACER_SNAPSHOT 215 select TRACER_SNAPSHOT
205 select TRACER_SNAPSHOT_PER_CPU_SWAP 216 select TRACER_SNAPSHOT_PER_CPU_SWAP
217 select TRACE_PREEMPT_TOGGLE
206 help 218 help
207 This option measures the time spent in preemption-off critical 219 This option measures the time spent in preemption-off critical
208 sections, with microsecond accuracy. 220 sections, with microsecond accuracy.
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 81902a79e049..98d53b39a8ee 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -41,7 +41,7 @@ obj-$(CONFIG_TRACING_MAP) += tracing_map.o
41obj-$(CONFIG_PREEMPTIRQ_DELAY_TEST) += preemptirq_delay_test.o 41obj-$(CONFIG_PREEMPTIRQ_DELAY_TEST) += preemptirq_delay_test.o
42obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o 42obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
43obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o 43obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
44obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o 44obj-$(CONFIG_PREEMPTIRQ_TRACEPOINTS) += trace_preemptirq.o
45obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o 45obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
46obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o 46obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
47obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o 47obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index f8daa754cce2..770cd30cda40 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -16,7 +16,6 @@
16 16
17#include "trace.h" 17#include "trace.h"
18 18
19#define CREATE_TRACE_POINTS
20#include <trace/events/preemptirq.h> 19#include <trace/events/preemptirq.h>
21 20
22#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER) 21#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
@@ -450,66 +449,6 @@ void stop_critical_timings(void)
450} 449}
451EXPORT_SYMBOL_GPL(stop_critical_timings); 450EXPORT_SYMBOL_GPL(stop_critical_timings);
452 451
453#ifdef CONFIG_IRQSOFF_TRACER
454#ifdef CONFIG_PROVE_LOCKING
455void time_hardirqs_on(unsigned long a0, unsigned long a1)
456{
457 if (!preempt_trace() && irq_trace())
458 stop_critical_timing(a0, a1);
459}
460
461void time_hardirqs_off(unsigned long a0, unsigned long a1)
462{
463 if (!preempt_trace() && irq_trace())
464 start_critical_timing(a0, a1);
465}
466
467#else /* !CONFIG_PROVE_LOCKING */
468
469/*
470 * We are only interested in hardirq on/off events:
471 */
472static inline void tracer_hardirqs_on(void)
473{
474 if (!preempt_trace() && irq_trace())
475 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
476}
477
478static inline void tracer_hardirqs_off(void)
479{
480 if (!preempt_trace() && irq_trace())
481 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
482}
483
484static inline void tracer_hardirqs_on_caller(unsigned long caller_addr)
485{
486 if (!preempt_trace() && irq_trace())
487 stop_critical_timing(CALLER_ADDR0, caller_addr);
488}
489
490static inline void tracer_hardirqs_off_caller(unsigned long caller_addr)
491{
492 if (!preempt_trace() && irq_trace())
493 start_critical_timing(CALLER_ADDR0, caller_addr);
494}
495
496#endif /* CONFIG_PROVE_LOCKING */
497#endif /* CONFIG_IRQSOFF_TRACER */
498
499#ifdef CONFIG_PREEMPT_TRACER
500static inline void tracer_preempt_on(unsigned long a0, unsigned long a1)
501{
502 if (preempt_trace() && !irq_trace())
503 stop_critical_timing(a0, a1);
504}
505
506static inline void tracer_preempt_off(unsigned long a0, unsigned long a1)
507{
508 if (preempt_trace() && !irq_trace())
509 start_critical_timing(a0, a1);
510}
511#endif /* CONFIG_PREEMPT_TRACER */
512
513#ifdef CONFIG_FUNCTION_TRACER 452#ifdef CONFIG_FUNCTION_TRACER
514static bool function_enabled; 453static bool function_enabled;
515 454
@@ -659,15 +598,34 @@ static void irqsoff_tracer_stop(struct trace_array *tr)
659} 598}
660 599
661#ifdef CONFIG_IRQSOFF_TRACER 600#ifdef CONFIG_IRQSOFF_TRACER
601/*
602 * We are only interested in hardirq on/off events:
603 */
604static void tracer_hardirqs_on(void *none, unsigned long a0, unsigned long a1)
605{
606 if (!preempt_trace() && irq_trace())
607 stop_critical_timing(a0, a1);
608}
609
610static void tracer_hardirqs_off(void *none, unsigned long a0, unsigned long a1)
611{
612 if (!preempt_trace() && irq_trace())
613 start_critical_timing(a0, a1);
614}
615
662static int irqsoff_tracer_init(struct trace_array *tr) 616static int irqsoff_tracer_init(struct trace_array *tr)
663{ 617{
664 trace_type = TRACER_IRQS_OFF; 618 trace_type = TRACER_IRQS_OFF;
665 619
620 register_trace_irq_disable(tracer_hardirqs_off, NULL);
621 register_trace_irq_enable(tracer_hardirqs_on, NULL);
666 return __irqsoff_tracer_init(tr); 622 return __irqsoff_tracer_init(tr);
667} 623}
668 624
669static void irqsoff_tracer_reset(struct trace_array *tr) 625static void irqsoff_tracer_reset(struct trace_array *tr)
670{ 626{
627 unregister_trace_irq_disable(tracer_hardirqs_off, NULL);
628 unregister_trace_irq_enable(tracer_hardirqs_on, NULL);
671 __irqsoff_tracer_reset(tr); 629 __irqsoff_tracer_reset(tr);
672} 630}
673 631
@@ -690,21 +648,34 @@ static struct tracer irqsoff_tracer __read_mostly =
690 .allow_instances = true, 648 .allow_instances = true,
691 .use_max_tr = true, 649 .use_max_tr = true,
692}; 650};
693# define register_irqsoff(trace) register_tracer(&trace) 651#endif /* CONFIG_IRQSOFF_TRACER */
694#else
695# define register_irqsoff(trace) do { } while (0)
696#endif
697 652
698#ifdef CONFIG_PREEMPT_TRACER 653#ifdef CONFIG_PREEMPT_TRACER
654static void tracer_preempt_on(void *none, unsigned long a0, unsigned long a1)
655{
656 if (preempt_trace() && !irq_trace())
657 stop_critical_timing(a0, a1);
658}
659
660static void tracer_preempt_off(void *none, unsigned long a0, unsigned long a1)
661{
662 if (preempt_trace() && !irq_trace())
663 start_critical_timing(a0, a1);
664}
665
699static int preemptoff_tracer_init(struct trace_array *tr) 666static int preemptoff_tracer_init(struct trace_array *tr)
700{ 667{
701 trace_type = TRACER_PREEMPT_OFF; 668 trace_type = TRACER_PREEMPT_OFF;
702 669
670 register_trace_preempt_disable(tracer_preempt_off, NULL);
671 register_trace_preempt_enable(tracer_preempt_on, NULL);
703 return __irqsoff_tracer_init(tr); 672 return __irqsoff_tracer_init(tr);
704} 673}
705 674
706static void preemptoff_tracer_reset(struct trace_array *tr) 675static void preemptoff_tracer_reset(struct trace_array *tr)
707{ 676{
677 unregister_trace_preempt_disable(tracer_preempt_off, NULL);
678 unregister_trace_preempt_enable(tracer_preempt_on, NULL);
708 __irqsoff_tracer_reset(tr); 679 __irqsoff_tracer_reset(tr);
709} 680}
710 681
@@ -727,23 +698,29 @@ static struct tracer preemptoff_tracer __read_mostly =
727 .allow_instances = true, 698 .allow_instances = true,
728 .use_max_tr = true, 699 .use_max_tr = true,
729}; 700};
730# define register_preemptoff(trace) register_tracer(&trace) 701#endif /* CONFIG_PREEMPT_TRACER */
731#else
732# define register_preemptoff(trace) do { } while (0)
733#endif
734 702
735#if defined(CONFIG_IRQSOFF_TRACER) && \ 703#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
736 defined(CONFIG_PREEMPT_TRACER)
737 704
738static int preemptirqsoff_tracer_init(struct trace_array *tr) 705static int preemptirqsoff_tracer_init(struct trace_array *tr)
739{ 706{
740 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; 707 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
741 708
709 register_trace_irq_disable(tracer_hardirqs_off, NULL);
710 register_trace_irq_enable(tracer_hardirqs_on, NULL);
711 register_trace_preempt_disable(tracer_preempt_off, NULL);
712 register_trace_preempt_enable(tracer_preempt_on, NULL);
713
742 return __irqsoff_tracer_init(tr); 714 return __irqsoff_tracer_init(tr);
743} 715}
744 716
745static void preemptirqsoff_tracer_reset(struct trace_array *tr) 717static void preemptirqsoff_tracer_reset(struct trace_array *tr)
746{ 718{
719 unregister_trace_irq_disable(tracer_hardirqs_off, NULL);
720 unregister_trace_irq_enable(tracer_hardirqs_on, NULL);
721 unregister_trace_preempt_disable(tracer_preempt_off, NULL);
722 unregister_trace_preempt_enable(tracer_preempt_on, NULL);
723
747 __irqsoff_tracer_reset(tr); 724 __irqsoff_tracer_reset(tr);
748} 725}
749 726
@@ -766,115 +743,21 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
766 .allow_instances = true, 743 .allow_instances = true,
767 .use_max_tr = true, 744 .use_max_tr = true,
768}; 745};
769
770# define register_preemptirqsoff(trace) register_tracer(&trace)
771#else
772# define register_preemptirqsoff(trace) do { } while (0)
773#endif 746#endif
774 747
775__init static int init_irqsoff_tracer(void) 748__init static int init_irqsoff_tracer(void)
776{ 749{
777 register_irqsoff(irqsoff_tracer); 750#ifdef CONFIG_IRQSOFF_TRACER
778 register_preemptoff(preemptoff_tracer); 751 register_tracer(&irqsoff_tracer);
779 register_preemptirqsoff(preemptirqsoff_tracer);
780
781 return 0;
782}
783core_initcall(init_irqsoff_tracer);
784#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
785
786#ifndef CONFIG_IRQSOFF_TRACER
787static inline void tracer_hardirqs_on(void) { }
788static inline void tracer_hardirqs_off(void) { }
789static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { }
790static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { }
791#endif 752#endif
792 753#ifdef CONFIG_PREEMPT_TRACER
793#ifndef CONFIG_PREEMPT_TRACER 754 register_tracer(&preemptoff_tracer);
794static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
795static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
796#endif 755#endif
797 756#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
798#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING) 757 register_tracer(&preemptirqsoff_tracer);
799/* Per-cpu variable to prevent redundant calls when IRQs already off */
800static DEFINE_PER_CPU(int, tracing_irq_cpu);
801
802void trace_hardirqs_on(void)
803{
804 if (!this_cpu_read(tracing_irq_cpu))
805 return;
806
807 trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
808 tracer_hardirqs_on();
809
810 this_cpu_write(tracing_irq_cpu, 0);
811}
812EXPORT_SYMBOL(trace_hardirqs_on);
813
814void trace_hardirqs_off(void)
815{
816 if (this_cpu_read(tracing_irq_cpu))
817 return;
818
819 this_cpu_write(tracing_irq_cpu, 1);
820
821 trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
822 tracer_hardirqs_off();
823}
824EXPORT_SYMBOL(trace_hardirqs_off);
825
826__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
827{
828 if (!this_cpu_read(tracing_irq_cpu))
829 return;
830
831 trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
832 tracer_hardirqs_on_caller(caller_addr);
833
834 this_cpu_write(tracing_irq_cpu, 0);
835}
836EXPORT_SYMBOL(trace_hardirqs_on_caller);
837
838__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
839{
840 if (this_cpu_read(tracing_irq_cpu))
841 return;
842
843 this_cpu_write(tracing_irq_cpu, 1);
844
845 trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
846 tracer_hardirqs_off_caller(caller_addr);
847}
848EXPORT_SYMBOL(trace_hardirqs_off_caller);
849
850/*
851 * Stubs:
852 */
853
854void trace_softirqs_on(unsigned long ip)
855{
856}
857
858void trace_softirqs_off(unsigned long ip)
859{
860}
861
862inline void print_irqtrace_events(struct task_struct *curr)
863{
864}
865#endif 758#endif
866 759
867#if defined(CONFIG_PREEMPT_TRACER) || \ 760 return 0;
868 (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
869void trace_preempt_on(unsigned long a0, unsigned long a1)
870{
871 trace_preempt_enable_rcuidle(a0, a1);
872 tracer_preempt_on(a0, a1);
873}
874
875void trace_preempt_off(unsigned long a0, unsigned long a1)
876{
877 trace_preempt_disable_rcuidle(a0, a1);
878 tracer_preempt_off(a0, a1);
879} 761}
880#endif 762core_initcall(init_irqsoff_tracer);
763#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
new file mode 100644
index 000000000000..e76b78bf258e
--- /dev/null
+++ b/kernel/trace/trace_preemptirq.c
@@ -0,0 +1,72 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * preemptoff and irqoff tracepoints
4 *
5 * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
6 */
7
8#include <linux/kallsyms.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/ftrace.h>
12
13#define CREATE_TRACE_POINTS
14#include <trace/events/preemptirq.h>
15
16#ifdef CONFIG_TRACE_IRQFLAGS
17/* Per-cpu variable to prevent redundant calls when IRQs already off */
18static DEFINE_PER_CPU(int, tracing_irq_cpu);
19
20void trace_hardirqs_on(void)
21{
22 if (!this_cpu_read(tracing_irq_cpu))
23 return;
24
25 trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
26 this_cpu_write(tracing_irq_cpu, 0);
27}
28EXPORT_SYMBOL(trace_hardirqs_on);
29
30void trace_hardirqs_off(void)
31{
32 if (this_cpu_read(tracing_irq_cpu))
33 return;
34
35 this_cpu_write(tracing_irq_cpu, 1);
36 trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
37}
38EXPORT_SYMBOL(trace_hardirqs_off);
39
40__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
41{
42 if (!this_cpu_read(tracing_irq_cpu))
43 return;
44
45 trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
46 this_cpu_write(tracing_irq_cpu, 0);
47}
48EXPORT_SYMBOL(trace_hardirqs_on_caller);
49
50__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
51{
52 if (this_cpu_read(tracing_irq_cpu))
53 return;
54
55 this_cpu_write(tracing_irq_cpu, 1);
56 trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
57}
58EXPORT_SYMBOL(trace_hardirqs_off_caller);
59#endif /* CONFIG_TRACE_IRQFLAGS */
60
61#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
62
63void trace_preempt_on(unsigned long a0, unsigned long a1)
64{
65 trace_preempt_enable_rcuidle(a0, a1);
66}
67
68void trace_preempt_off(unsigned long a0, unsigned long a1)
69{
70 trace_preempt_disable_rcuidle(a0, a1);
71}
72#endif