aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-05-12 15:20:42 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 14:32:46 -0400
commit81d68a96a39844853b37f20cc8282d9b65b78ef3 (patch)
treebbc05f415930c15fb5a1004620bd77585fcec43a /kernel
parent352ad25aa4a189c667cb2af333948d34692a2d27 (diff)
ftrace: trace irq disabled critical timings
This patch adds latency tracing for critical timings (how long interrupts are disabled for). "irqsoff" is added to /debugfs/tracing/available_tracers Note: tracing_max_latency also holds the max latency for irqsoff (in usecs). (default to large number so one must start latency tracing) tracing_thresh threshold (in usecs) to always print out if irqs off is detected to be longer than stated here. If irq_thresh is non-zero, then max_irq_latency is ignored. Here's an example of a trace with ftrace_enabled = 0 ======= preemption latency trace v1.1.5 on 2.6.24-rc7 Signed-off-by: Ingo Molnar <mingo@elte.hu> -------------------------------------------------------------------- latency: 100 us, #3/3, CPU#1 | (M:rt VP:0, KP:0, SP:0 HP:0 #P:2) ----------------- | task: swapper-0 (uid:0 nice:0 policy:0 rt_prio:0) ----------------- => started at: _spin_lock_irqsave+0x2a/0xb7 => ended at: _spin_unlock_irqrestore+0x32/0x5f _------=> CPU# / _-----=> irqs-off | / _----=> need-resched || / _---=> hardirq/softirq ||| / _--=> preempt-depth |||| / ||||| delay cmd pid ||||| time | caller \ / ||||| \ | / swapper-0 1d.s3 0us+: _spin_lock_irqsave+0x2a/0xb7 (e1000_update_stats+0x47/0x64c [e1000]) swapper-0 1d.s3 100us : _spin_unlock_irqrestore+0x32/0x5f (e1000_update_stats+0x641/0x64c [e1000]) swapper-0 1d.s3 100us : trace_hardirqs_on_caller+0x75/0x89 (_spin_unlock_irqrestore+0x32/0x5f) vim:ft=help ======= And this is a trace with ftrace_enabled == 1 ======= preemption latency trace v1.1.5 on 2.6.24-rc7 -------------------------------------------------------------------- latency: 102 us, #12/12, CPU#1 | (M:rt VP:0, KP:0, SP:0 HP:0 #P:2) ----------------- | task: swapper-0 (uid:0 nice:0 policy:0 rt_prio:0) ----------------- => started at: _spin_lock_irqsave+0x2a/0xb7 => ended at: _spin_unlock_irqrestore+0x32/0x5f _------=> CPU# / _-----=> irqs-off | / _----=> need-resched || / _---=> hardirq/softirq ||| / _--=> preempt-depth |||| / ||||| delay cmd pid ||||| time | caller \ / ||||| \ | / swapper-0 1dNs3 0us+: _spin_lock_irqsave+0x2a/0xb7 (e1000_update_stats+0x47/0x64c [e1000]) swapper-0 1dNs3 46us : e1000_read_phy_reg+0x16/0x225 [e1000] (e1000_update_stats+0x5e2/0x64c [e1000]) swapper-0 1dNs3 46us : e1000_swfw_sync_acquire+0x10/0x99 [e1000] (e1000_read_phy_reg+0x49/0x225 [e1000]) swapper-0 1dNs3 46us : e1000_get_hw_eeprom_semaphore+0x12/0xa6 [e1000] (e1000_swfw_sync_acquire+0x36/0x99 [e1000]) swapper-0 1dNs3 47us : __const_udelay+0x9/0x47 (e1000_read_phy_reg+0x116/0x225 [e1000]) swapper-0 1dNs3 47us+: __delay+0x9/0x50 (__const_udelay+0x45/0x47) swapper-0 1dNs3 97us : preempt_schedule+0xc/0x84 (__delay+0x4e/0x50) swapper-0 1dNs3 98us : e1000_swfw_sync_release+0xc/0x55 [e1000] (e1000_read_phy_reg+0x211/0x225 [e1000]) swapper-0 1dNs3 99us+: e1000_put_hw_eeprom_semaphore+0x9/0x35 [e1000] (e1000_swfw_sync_release+0x50/0x55 [e1000]) swapper-0 1dNs3 101us : _spin_unlock_irqrestore+0xe/0x5f (e1000_update_stats+0x641/0x64c [e1000]) swapper-0 1dNs3 102us : _spin_unlock_irqrestore+0x32/0x5f (e1000_update_stats+0x641/0x64c [e1000]) swapper-0 1dNs3 102us : trace_hardirqs_on_caller+0x75/0x89 (_spin_unlock_irqrestore+0x32/0x5f) vim:ft=help ======= Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/lockdep.c23
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/trace/Kconfig18
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/trace_irqsoff.c402
6 files changed, 443 insertions, 5 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 19908b26cf80..d66d676dc362 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -909,7 +909,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
909 909
910 rt_mutex_init_task(p); 910 rt_mutex_init_task(p);
911 911
912#ifdef CONFIG_TRACE_IRQFLAGS 912#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_LOCKDEP)
913 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 913 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
914 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 914 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
915#endif 915#endif
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 81a4e4a3f087..e21924365ea3 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -39,6 +39,7 @@
39#include <linux/irqflags.h> 39#include <linux/irqflags.h>
40#include <linux/utsname.h> 40#include <linux/utsname.h>
41#include <linux/hash.h> 41#include <linux/hash.h>
42#include <linux/ftrace.h>
42 43
43#include <asm/sections.h> 44#include <asm/sections.h>
44 45
@@ -982,7 +983,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
982 return 1; 983 return 1;
983} 984}
984 985
985#ifdef CONFIG_TRACE_IRQFLAGS 986#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
986/* 987/*
987 * Forwards and backwards subgraph searching, for the purposes of 988 * Forwards and backwards subgraph searching, for the purposes of
988 * proving that two subgraphs can be connected by a new dependency 989 * proving that two subgraphs can be connected by a new dependency
@@ -1680,7 +1681,7 @@ valid_state(struct task_struct *curr, struct held_lock *this,
1680static int mark_lock(struct task_struct *curr, struct held_lock *this, 1681static int mark_lock(struct task_struct *curr, struct held_lock *this,
1681 enum lock_usage_bit new_bit); 1682 enum lock_usage_bit new_bit);
1682 1683
1683#ifdef CONFIG_TRACE_IRQFLAGS 1684#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1684 1685
1685/* 1686/*
1686 * print irq inversion bug: 1687 * print irq inversion bug:
@@ -2013,11 +2014,13 @@ void early_boot_irqs_on(void)
2013/* 2014/*
2014 * Hardirqs will be enabled: 2015 * Hardirqs will be enabled:
2015 */ 2016 */
2016void trace_hardirqs_on(void) 2017void notrace trace_hardirqs_on_caller(unsigned long a0)
2017{ 2018{
2018 struct task_struct *curr = current; 2019 struct task_struct *curr = current;
2019 unsigned long ip; 2020 unsigned long ip;
2020 2021
2022 time_hardirqs_on(CALLER_ADDR0, a0);
2023
2021 if (unlikely(!debug_locks || current->lockdep_recursion)) 2024 if (unlikely(!debug_locks || current->lockdep_recursion))
2022 return; 2025 return;
2023 2026
@@ -2055,16 +2058,23 @@ void trace_hardirqs_on(void)
2055 curr->hardirq_enable_event = ++curr->irq_events; 2058 curr->hardirq_enable_event = ++curr->irq_events;
2056 debug_atomic_inc(&hardirqs_on_events); 2059 debug_atomic_inc(&hardirqs_on_events);
2057} 2060}
2061EXPORT_SYMBOL(trace_hardirqs_on_caller);
2058 2062
2063void notrace trace_hardirqs_on(void)
2064{
2065 trace_hardirqs_on_caller(CALLER_ADDR0);
2066}
2059EXPORT_SYMBOL(trace_hardirqs_on); 2067EXPORT_SYMBOL(trace_hardirqs_on);
2060 2068
2061/* 2069/*
2062 * Hardirqs were disabled: 2070 * Hardirqs were disabled:
2063 */ 2071 */
2064void trace_hardirqs_off(void) 2072void notrace trace_hardirqs_off_caller(unsigned long a0)
2065{ 2073{
2066 struct task_struct *curr = current; 2074 struct task_struct *curr = current;
2067 2075
2076 time_hardirqs_off(CALLER_ADDR0, a0);
2077
2068 if (unlikely(!debug_locks || current->lockdep_recursion)) 2078 if (unlikely(!debug_locks || current->lockdep_recursion))
2069 return; 2079 return;
2070 2080
@@ -2082,7 +2092,12 @@ void trace_hardirqs_off(void)
2082 } else 2092 } else
2083 debug_atomic_inc(&redundant_hardirqs_off); 2093 debug_atomic_inc(&redundant_hardirqs_off);
2084} 2094}
2095EXPORT_SYMBOL(trace_hardirqs_off_caller);
2085 2096
2097void notrace trace_hardirqs_off(void)
2098{
2099 trace_hardirqs_off_caller(CALLER_ADDR0);
2100}
2086EXPORT_SYMBOL(trace_hardirqs_off); 2101EXPORT_SYMBOL(trace_hardirqs_off);
2087 2102
2088/* 2103/*
diff --git a/kernel/printk.c b/kernel/printk.c
index 8fb01c32aa3b..ae7d5b9e535d 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1041,7 +1041,9 @@ void release_console_sem(void)
1041 _log_end = log_end; 1041 _log_end = log_end;
1042 con_start = log_end; /* Flush */ 1042 con_start = log_end; /* Flush */
1043 spin_unlock(&logbuf_lock); 1043 spin_unlock(&logbuf_lock);
1044 stop_critical_timings(); /* don't trace print latency */
1044 call_console_drivers(_con_start, _log_end); 1045 call_console_drivers(_con_start, _log_end);
1046 start_critical_timings();
1045 local_irq_restore(flags); 1047 local_irq_restore(flags);
1046 } 1048 }
1047 console_locked = 0; 1049 console_locked = 0;
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 892ecc94a82b..896df1cf6adc 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -26,6 +26,24 @@ config FTRACE
26 (the bootup default), then the overhead of the instructions is very 26 (the bootup default), then the overhead of the instructions is very
27 small and not measurable even in micro-benchmarks. 27 small and not measurable even in micro-benchmarks.
28 28
29config IRQSOFF_TRACER
30 bool "Interrupts-off Latency Tracer"
31 default n
32 depends on TRACE_IRQFLAGS_SUPPORT
33 depends on GENERIC_TIME
34 select TRACE_IRQFLAGS
35 select TRACING
36 select TRACER_MAX_TRACE
37 help
38 This option measures the time spent in irqs-off critical
39 sections, with microsecond accuracy.
40
41 The default measurement method is a maximum search, which is
42 disabled by default and can be runtime (re-)started
43 via:
44
45 echo 0 > /debugfs/tracing/tracing_max_latency
46
29config SCHED_TRACER 47config SCHED_TRACER
30 bool "Scheduling Latency Tracer" 48 bool "Scheduling Latency Tracer"
31 depends on DEBUG_KERNEL 49 depends on DEBUG_KERNEL
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 5508cdb19aea..46be8647fb65 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_FTRACE) += libftrace.o
3obj-$(CONFIG_TRACING) += trace.o 3obj-$(CONFIG_TRACING) += trace.o
4obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o 4obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
5obj-$(CONFIG_FTRACE) += trace_functions.o 5obj-$(CONFIG_FTRACE) += trace_functions.o
6obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
6obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o 7obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
7 8
8libftrace-y := ftrace.o 9libftrace-y := ftrace.o
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
new file mode 100644
index 000000000000..a9131b0cf1a5
--- /dev/null
+++ b/kernel/trace/trace_irqsoff.c
@@ -0,0 +1,402 @@
1/*
2 * trace irqs off criticall timings
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12#include <linux/kallsyms.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static struct trace_array *irqsoff_trace __read_mostly;
22static int tracer_enabled __read_mostly;
23
24/*
25 * Sequence count - we record it when starting a measurement and
26 * skip the latency if the sequence has changed - some other section
27 * did a maximum and could disturb our measurement with serial console
28 * printouts, etc. Truly coinciding maximum latencies should be rare
29 * and what happens together happens separately as well, so this doesnt
30 * decrease the validity of the maximum found:
31 */
32static __cacheline_aligned_in_smp unsigned long max_sequence;
33
34#ifdef CONFIG_FTRACE
35/*
36 * irqsoff uses its own tracer function to keep the overhead down:
37 */
38static void notrace
39irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
40{
41 struct trace_array *tr = irqsoff_trace;
42 struct trace_array_cpu *data;
43 unsigned long flags;
44 long disabled;
45 int cpu;
46
47 if (likely(!tracer_enabled))
48 return;
49
50 local_save_flags(flags);
51
52 if (!irqs_disabled_flags(flags))
53 return;
54
55 cpu = raw_smp_processor_id();
56 data = tr->data[cpu];
57 disabled = atomic_inc_return(&data->disabled);
58
59 if (likely(disabled == 1))
60 ftrace(tr, data, ip, parent_ip, flags);
61
62 atomic_dec(&data->disabled);
63}
64
65static struct ftrace_ops trace_ops __read_mostly =
66{
67 .func = irqsoff_tracer_call,
68};
69#endif /* CONFIG_FTRACE */
70
71/*
72 * Should this new latency be reported/recorded?
73 */
74static int notrace report_latency(cycle_t delta)
75{
76 if (tracing_thresh) {
77 if (delta < tracing_thresh)
78 return 0;
79 } else {
80 if (delta <= tracing_max_latency)
81 return 0;
82 }
83 return 1;
84}
85
86static void notrace
87check_critical_timing(struct trace_array *tr,
88 struct trace_array_cpu *data,
89 unsigned long parent_ip,
90 int cpu)
91{
92 unsigned long latency, t0, t1;
93 cycle_t T0, T1, T2, delta;
94 unsigned long flags;
95
96 /*
97 * usecs conversion is slow so we try to delay the conversion
98 * as long as possible:
99 */
100 T0 = data->preempt_timestamp;
101 T1 = now(cpu);
102 delta = T1-T0;
103
104 local_save_flags(flags);
105
106 if (!report_latency(delta))
107 goto out;
108
109 ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
110 /*
111 * Update the timestamp, because the trace entry above
112 * might change it (it can only get larger so the latency
113 * is fair to be reported):
114 */
115 T2 = now(cpu);
116
117 delta = T2-T0;
118
119 latency = nsecs_to_usecs(delta);
120
121 if (data->critical_sequence != max_sequence)
122 goto out;
123
124 tracing_max_latency = delta;
125 t0 = nsecs_to_usecs(T0);
126 t1 = nsecs_to_usecs(T1);
127
128 data->critical_end = parent_ip;
129
130 update_max_tr_single(tr, current, cpu);
131
132 if (tracing_thresh)
133 printk(KERN_INFO "(%16s-%-5d|#%d): %lu us critical section "
134 "violates %lu us threshold.\n"
135 " => started at timestamp %lu: ",
136 current->comm, current->pid,
137 raw_smp_processor_id(),
138 latency, nsecs_to_usecs(tracing_thresh), t0);
139 else
140 printk(KERN_INFO "(%16s-%-5d|#%d):"
141 " new %lu us maximum-latency "
142 "critical section.\n => started at timestamp %lu: ",
143 current->comm, current->pid,
144 raw_smp_processor_id(),
145 latency, t0);
146
147 print_symbol(KERN_CONT "<%s>\n", data->critical_start);
148 printk(KERN_CONT " => ended at timestamp %lu: ", t1);
149 print_symbol(KERN_CONT "<%s>\n", data->critical_end);
150 dump_stack();
151 t1 = nsecs_to_usecs(now(cpu));
152 printk(KERN_CONT " => dump-end timestamp %lu\n\n", t1);
153
154 max_sequence++;
155
156out:
157 data->critical_sequence = max_sequence;
158 data->preempt_timestamp = now(cpu);
159 tracing_reset(data);
160 ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
161}
162
163static inline void notrace
164start_critical_timing(unsigned long ip, unsigned long parent_ip)
165{
166 int cpu;
167 struct trace_array *tr = irqsoff_trace;
168 struct trace_array_cpu *data;
169 unsigned long flags;
170
171 if (likely(!tracer_enabled))
172 return;
173
174 cpu = raw_smp_processor_id();
175 data = tr->data[cpu];
176
177 if (unlikely(!data) || unlikely(!data->trace) ||
178 data->critical_start || atomic_read(&data->disabled))
179 return;
180
181 atomic_inc(&data->disabled);
182
183 data->critical_sequence = max_sequence;
184 data->preempt_timestamp = now(cpu);
185 data->critical_start = parent_ip;
186 tracing_reset(data);
187
188 local_save_flags(flags);
189 ftrace(tr, data, ip, parent_ip, flags);
190
191 atomic_dec(&data->disabled);
192}
193
194static inline void notrace
195stop_critical_timing(unsigned long ip, unsigned long parent_ip)
196{
197 int cpu;
198 struct trace_array *tr = irqsoff_trace;
199 struct trace_array_cpu *data;
200 unsigned long flags;
201
202 if (likely(!tracer_enabled))
203 return;
204
205 cpu = raw_smp_processor_id();
206 data = tr->data[cpu];
207
208 if (unlikely(!data) || unlikely(!data->trace) ||
209 !data->critical_start || atomic_read(&data->disabled))
210 return;
211
212 atomic_inc(&data->disabled);
213 local_save_flags(flags);
214 ftrace(tr, data, ip, parent_ip, flags);
215 check_critical_timing(tr, data, parent_ip, cpu);
216 data->critical_start = 0;
217 atomic_dec(&data->disabled);
218}
219
220void notrace start_critical_timings(void)
221{
222 unsigned long flags;
223
224 local_save_flags(flags);
225
226 if (irqs_disabled_flags(flags))
227 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
228}
229
230void notrace stop_critical_timings(void)
231{
232 unsigned long flags;
233
234 local_save_flags(flags);
235
236 if (irqs_disabled_flags(flags))
237 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
238}
239
240#ifdef CONFIG_PROVE_LOCKING
241void notrace time_hardirqs_on(unsigned long a0, unsigned long a1)
242{
243 unsigned long flags;
244
245 local_save_flags(flags);
246
247 if (irqs_disabled_flags(flags))
248 stop_critical_timing(a0, a1);
249}
250
251void notrace time_hardirqs_off(unsigned long a0, unsigned long a1)
252{
253 unsigned long flags;
254
255 local_save_flags(flags);
256
257 if (irqs_disabled_flags(flags))
258 start_critical_timing(a0, a1);
259}
260
261#else /* !CONFIG_PROVE_LOCKING */
262
263/*
264 * Stubs:
265 */
266
267void early_boot_irqs_off(void)
268{
269}
270
271void early_boot_irqs_on(void)
272{
273}
274
275void trace_softirqs_on(unsigned long ip)
276{
277}
278
279void trace_softirqs_off(unsigned long ip)
280{
281}
282
283inline void print_irqtrace_events(struct task_struct *curr)
284{
285}
286
287/*
288 * We are only interested in hardirq on/off events:
289 */
290void notrace trace_hardirqs_on(void)
291{
292 unsigned long flags;
293
294 local_save_flags(flags);
295
296 if (irqs_disabled_flags(flags))
297 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
298}
299EXPORT_SYMBOL(trace_hardirqs_on);
300
301void notrace trace_hardirqs_off(void)
302{
303 unsigned long flags;
304
305 local_save_flags(flags);
306
307 if (irqs_disabled_flags(flags))
308 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
309}
310EXPORT_SYMBOL(trace_hardirqs_off);
311
312void notrace trace_hardirqs_on_caller(unsigned long caller_addr)
313{
314 unsigned long flags;
315
316 local_save_flags(flags);
317
318 if (irqs_disabled_flags(flags))
319 stop_critical_timing(CALLER_ADDR0, caller_addr);
320}
321EXPORT_SYMBOL(trace_hardirqs_on_caller);
322
323void notrace trace_hardirqs_off_caller(unsigned long caller_addr)
324{
325 unsigned long flags;
326
327 local_save_flags(flags);
328
329 if (irqs_disabled_flags(flags))
330 start_critical_timing(CALLER_ADDR0, caller_addr);
331}
332EXPORT_SYMBOL(trace_hardirqs_off_caller);
333
334#endif /* CONFIG_PROVE_LOCKING */
335
336static void start_irqsoff_tracer(struct trace_array *tr)
337{
338 tracer_enabled = 1;
339 register_ftrace_function(&trace_ops);
340}
341
342static void stop_irqsoff_tracer(struct trace_array *tr)
343{
344 unregister_ftrace_function(&trace_ops);
345 tracer_enabled = 0;
346}
347
348static void irqsoff_tracer_init(struct trace_array *tr)
349{
350 irqsoff_trace = tr;
351 /* make sure that the tracer is visibel */
352 smp_wmb();
353
354 if (tr->ctrl)
355 start_irqsoff_tracer(tr);
356}
357
358static void irqsoff_tracer_reset(struct trace_array *tr)
359{
360 if (tr->ctrl)
361 stop_irqsoff_tracer(tr);
362}
363
364static void irqsoff_tracer_ctrl_update(struct trace_array *tr)
365{
366 if (tr->ctrl)
367 start_irqsoff_tracer(tr);
368 else
369 stop_irqsoff_tracer(tr);
370}
371
372static void notrace irqsoff_tracer_open(struct trace_iterator *iter)
373{
374 /* stop the trace while dumping */
375 if (iter->tr->ctrl)
376 stop_irqsoff_tracer(iter->tr);
377}
378
379static void notrace irqsoff_tracer_close(struct trace_iterator *iter)
380{
381 if (iter->tr->ctrl)
382 start_irqsoff_tracer(iter->tr);
383}
384
385static struct tracer irqsoff_tracer __read_mostly =
386{
387 .name = "irqsoff",
388 .init = irqsoff_tracer_init,
389 .reset = irqsoff_tracer_reset,
390 .open = irqsoff_tracer_open,
391 .close = irqsoff_tracer_close,
392 .ctrl_update = irqsoff_tracer_ctrl_update,
393 .print_max = 1,
394};
395
396__init static int init_irqsoff_tracer(void)
397{
398 register_tracer(&irqsoff_tracer);
399
400 return 0;
401}
402device_initcall(init_irqsoff_tracer);