diff options
-rw-r--r-- | arch/x86/kernel/process_32.c | 3 | ||||
-rw-r--r-- | include/linux/ftrace.h | 8 | ||||
-rw-r--r-- | include/linux/irqflags.h | 3 | ||||
-rw-r--r-- | include/linux/preempt.h | 2 | ||||
-rw-r--r-- | kernel/sched.c | 24 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 25 | ||||
-rw-r--r-- | kernel/trace/Makefile | 1 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 184 |
8 files changed, 197 insertions, 53 deletions
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index f8476dfbb60d..a30aa1f2607a 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -185,7 +185,10 @@ void cpu_idle(void) | |||
185 | 185 | ||
186 | local_irq_disable(); | 186 | local_irq_disable(); |
187 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; | 187 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; |
188 | /* Don't trace irqs off for idle */ | ||
189 | stop_critical_timings(); | ||
188 | idle(); | 190 | idle(); |
191 | start_critical_timings(); | ||
189 | } | 192 | } |
190 | tick_nohz_restart_sched_tick(); | 193 | tick_nohz_restart_sched_tick(); |
191 | preempt_enable_no_resched(); | 194 | preempt_enable_no_resched(); |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 0a20445dcbcc..740c97dcf9cb 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -58,4 +58,12 @@ extern void mcount(void); | |||
58 | # define time_hardirqs_off(a0, a1) do { } while (0) | 58 | # define time_hardirqs_off(a0, a1) do { } while (0) |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | #ifdef CONFIG_PREEMPT_TRACER | ||
62 | extern void notrace trace_preempt_on(unsigned long a0, unsigned long a1); | ||
63 | extern void notrace trace_preempt_off(unsigned long a0, unsigned long a1); | ||
64 | #else | ||
65 | # define trace_preempt_on(a0, a1) do { } while (0) | ||
66 | # define trace_preempt_off(a0, a1) do { } while (0) | ||
67 | #endif | ||
68 | |||
61 | #endif /* _LINUX_FTRACE_H */ | 69 | #endif /* _LINUX_FTRACE_H */ |
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 5b711d4e9fd9..2b1c2e58566e 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h | |||
@@ -41,7 +41,8 @@ | |||
41 | # define INIT_TRACE_IRQFLAGS | 41 | # define INIT_TRACE_IRQFLAGS |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | #ifdef CONFIG_IRQSOFF_TRACER | 44 | #if defined(CONFIG_IRQSOFF_TRACER) || \ |
45 | defined(CONFIG_PREEMPT_TRACER) | ||
45 | extern void stop_critical_timings(void); | 46 | extern void stop_critical_timings(void); |
46 | extern void start_critical_timings(void); | 47 | extern void start_critical_timings(void); |
47 | #else | 48 | #else |
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 36b03d50bf40..72b1a10a59b6 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | 12 | ||
13 | #ifdef CONFIG_DEBUG_PREEMPT | 13 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
14 | extern void add_preempt_count(int val); | 14 | extern void add_preempt_count(int val); |
15 | extern void sub_preempt_count(int val); | 15 | extern void sub_preempt_count(int val); |
16 | #else | 16 | #else |
diff --git a/kernel/sched.c b/kernel/sched.c index 73e600852365..328494e28df2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -70,6 +70,7 @@ | |||
70 | #include <linux/bootmem.h> | 70 | #include <linux/bootmem.h> |
71 | #include <linux/debugfs.h> | 71 | #include <linux/debugfs.h> |
72 | #include <linux/ctype.h> | 72 | #include <linux/ctype.h> |
73 | #include <linux/ftrace.h> | ||
73 | 74 | ||
74 | #include <asm/tlb.h> | 75 | #include <asm/tlb.h> |
75 | #include <asm/irq_regs.h> | 76 | #include <asm/irq_regs.h> |
@@ -4365,26 +4366,44 @@ void scheduler_tick(void) | |||
4365 | #endif | 4366 | #endif |
4366 | } | 4367 | } |
4367 | 4368 | ||
4368 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) | 4369 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ |
4370 | defined(CONFIG_PREEMPT_TRACER)) | ||
4371 | |||
4372 | static inline unsigned long get_parent_ip(unsigned long addr) | ||
4373 | { | ||
4374 | if (in_lock_functions(addr)) { | ||
4375 | addr = CALLER_ADDR2; | ||
4376 | if (in_lock_functions(addr)) | ||
4377 | addr = CALLER_ADDR3; | ||
4378 | } | ||
4379 | return addr; | ||
4380 | } | ||
4369 | 4381 | ||
4370 | void __kprobes add_preempt_count(int val) | 4382 | void __kprobes add_preempt_count(int val) |
4371 | { | 4383 | { |
4384 | #ifdef CONFIG_DEBUG_PREEMPT | ||
4372 | /* | 4385 | /* |
4373 | * Underflow? | 4386 | * Underflow? |
4374 | */ | 4387 | */ |
4375 | if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) | 4388 | if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) |
4376 | return; | 4389 | return; |
4390 | #endif | ||
4377 | preempt_count() += val; | 4391 | preempt_count() += val; |
4392 | #ifdef CONFIG_DEBUG_PREEMPT | ||
4378 | /* | 4393 | /* |
4379 | * Spinlock count overflowing soon? | 4394 | * Spinlock count overflowing soon? |
4380 | */ | 4395 | */ |
4381 | DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= | 4396 | DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= |
4382 | PREEMPT_MASK - 10); | 4397 | PREEMPT_MASK - 10); |
4398 | #endif | ||
4399 | if (preempt_count() == val) | ||
4400 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | ||
4383 | } | 4401 | } |
4384 | EXPORT_SYMBOL(add_preempt_count); | 4402 | EXPORT_SYMBOL(add_preempt_count); |
4385 | 4403 | ||
4386 | void __kprobes sub_preempt_count(int val) | 4404 | void __kprobes sub_preempt_count(int val) |
4387 | { | 4405 | { |
4406 | #ifdef CONFIG_DEBUG_PREEMPT | ||
4388 | /* | 4407 | /* |
4389 | * Underflow? | 4408 | * Underflow? |
4390 | */ | 4409 | */ |
@@ -4396,7 +4415,10 @@ void __kprobes sub_preempt_count(int val) | |||
4396 | if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && | 4415 | if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && |
4397 | !(preempt_count() & PREEMPT_MASK))) | 4416 | !(preempt_count() & PREEMPT_MASK))) |
4398 | return; | 4417 | return; |
4418 | #endif | ||
4399 | 4419 | ||
4420 | if (preempt_count() == val) | ||
4421 | trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | ||
4400 | preempt_count() -= val; | 4422 | preempt_count() -= val; |
4401 | } | 4423 | } |
4402 | EXPORT_SYMBOL(sub_preempt_count); | 4424 | EXPORT_SYMBOL(sub_preempt_count); |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 896df1cf6adc..6430016b98e8 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -44,6 +44,31 @@ config IRQSOFF_TRACER | |||
44 | 44 | ||
45 | echo 0 > /debugfs/tracing/tracing_max_latency | 45 | echo 0 > /debugfs/tracing/tracing_max_latency |
46 | 46 | ||
47 | (Note that kernel size and overhead increases with this option | ||
48 | enabled. This option and the preempt-off timing option can be | ||
49 | used together or separately.) | ||
50 | |||
51 | config PREEMPT_TRACER | ||
52 | bool "Preemption-off Latency Tracer" | ||
53 | default n | ||
54 | depends on GENERIC_TIME | ||
55 | depends on PREEMPT | ||
56 | select TRACING | ||
57 | select TRACER_MAX_TRACE | ||
58 | help | ||
59 | This option measures the time spent in preemption off critical | ||
60 | sections, with microsecond accuracy. | ||
61 | |||
62 | The default measurement method is a maximum search, which is | ||
63 | disabled by default and can be runtime (re-)started | ||
64 | via: | ||
65 | |||
66 | echo 0 > /debugfs/tracing/tracing_max_latency | ||
67 | |||
68 | (Note that kernel size and overhead increases with this option | ||
69 | enabled. This option and the irqs-off timing option can be | ||
70 | used together or separately.) | ||
71 | |||
47 | config SCHED_TRACER | 72 | config SCHED_TRACER |
48 | bool "Scheduling Latency Tracer" | 73 | bool "Scheduling Latency Tracer" |
49 | depends on DEBUG_KERNEL | 74 | depends on DEBUG_KERNEL |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 46be8647fb65..3fec653d6533 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -4,6 +4,7 @@ obj-$(CONFIG_TRACING) += trace.o | |||
4 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o | 4 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o |
5 | obj-$(CONFIG_FTRACE) += trace_functions.o | 5 | obj-$(CONFIG_FTRACE) += trace_functions.o |
6 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o | 6 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o |
7 | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o | ||
7 | obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o | 8 | obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o |
8 | 9 | ||
9 | libftrace-y := ftrace.o | 10 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index a9131b0cf1a5..8b1231633dc5 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -21,6 +21,36 @@ | |||
21 | static struct trace_array *irqsoff_trace __read_mostly; | 21 | static struct trace_array *irqsoff_trace __read_mostly; |
22 | static int tracer_enabled __read_mostly; | 22 | static int tracer_enabled __read_mostly; |
23 | 23 | ||
24 | static DEFINE_PER_CPU(int, tracing_cpu); | ||
25 | |||
26 | enum { | ||
27 | TRACER_IRQS_OFF = (1 << 1), | ||
28 | TRACER_PREEMPT_OFF = (1 << 2), | ||
29 | }; | ||
30 | |||
31 | static int trace_type __read_mostly; | ||
32 | |||
33 | #ifdef CONFIG_PREEMPT_TRACER | ||
34 | static inline int notrace | ||
35 | preempt_trace(void) | ||
36 | { | ||
37 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); | ||
38 | } | ||
39 | #else | ||
40 | # define preempt_trace() (0) | ||
41 | #endif | ||
42 | |||
43 | #ifdef CONFIG_IRQSOFF_TRACER | ||
44 | static inline int notrace | ||
45 | irq_trace(void) | ||
46 | { | ||
47 | return ((trace_type & TRACER_IRQS_OFF) && | ||
48 | irqs_disabled()); | ||
49 | } | ||
50 | #else | ||
51 | # define irq_trace() (0) | ||
52 | #endif | ||
53 | |||
24 | /* | 54 | /* |
25 | * Sequence count - we record it when starting a measurement and | 55 | * Sequence count - we record it when starting a measurement and |
26 | * skip the latency if the sequence has changed - some other section | 56 | * skip the latency if the sequence has changed - some other section |
@@ -44,14 +74,11 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
44 | long disabled; | 74 | long disabled; |
45 | int cpu; | 75 | int cpu; |
46 | 76 | ||
47 | if (likely(!tracer_enabled)) | 77 | if (likely(!__get_cpu_var(tracing_cpu))) |
48 | return; | 78 | return; |
49 | 79 | ||
50 | local_save_flags(flags); | 80 | local_save_flags(flags); |
51 | 81 | ||
52 | if (!irqs_disabled_flags(flags)) | ||
53 | return; | ||
54 | |||
55 | cpu = raw_smp_processor_id(); | 82 | cpu = raw_smp_processor_id(); |
56 | data = tr->data[cpu]; | 83 | data = tr->data[cpu]; |
57 | disabled = atomic_inc_return(&data->disabled); | 84 | disabled = atomic_inc_return(&data->disabled); |
@@ -171,23 +198,29 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
171 | if (likely(!tracer_enabled)) | 198 | if (likely(!tracer_enabled)) |
172 | return; | 199 | return; |
173 | 200 | ||
201 | if (__get_cpu_var(tracing_cpu)) | ||
202 | return; | ||
203 | |||
174 | cpu = raw_smp_processor_id(); | 204 | cpu = raw_smp_processor_id(); |
175 | data = tr->data[cpu]; | 205 | data = tr->data[cpu]; |
176 | 206 | ||
177 | if (unlikely(!data) || unlikely(!data->trace) || | 207 | if (unlikely(!data) || unlikely(!data->trace) || |
178 | data->critical_start || atomic_read(&data->disabled)) | 208 | atomic_read(&data->disabled)) |
179 | return; | 209 | return; |
180 | 210 | ||
181 | atomic_inc(&data->disabled); | 211 | atomic_inc(&data->disabled); |
182 | 212 | ||
183 | data->critical_sequence = max_sequence; | 213 | data->critical_sequence = max_sequence; |
184 | data->preempt_timestamp = now(cpu); | 214 | data->preempt_timestamp = now(cpu); |
185 | data->critical_start = parent_ip; | 215 | data->critical_start = parent_ip ? : ip; |
186 | tracing_reset(data); | 216 | tracing_reset(data); |
187 | 217 | ||
188 | local_save_flags(flags); | 218 | local_save_flags(flags); |
219 | |||
189 | ftrace(tr, data, ip, parent_ip, flags); | 220 | ftrace(tr, data, ip, parent_ip, flags); |
190 | 221 | ||
222 | __get_cpu_var(tracing_cpu) = 1; | ||
223 | |||
191 | atomic_dec(&data->disabled); | 224 | atomic_dec(&data->disabled); |
192 | } | 225 | } |
193 | 226 | ||
@@ -199,7 +232,13 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
199 | struct trace_array_cpu *data; | 232 | struct trace_array_cpu *data; |
200 | unsigned long flags; | 233 | unsigned long flags; |
201 | 234 | ||
202 | if (likely(!tracer_enabled)) | 235 | /* Always clear the tracing cpu on stopping the trace */ |
236 | if (unlikely(__get_cpu_var(tracing_cpu))) | ||
237 | __get_cpu_var(tracing_cpu) = 0; | ||
238 | else | ||
239 | return; | ||
240 | |||
241 | if (!tracer_enabled) | ||
203 | return; | 242 | return; |
204 | 243 | ||
205 | cpu = raw_smp_processor_id(); | 244 | cpu = raw_smp_processor_id(); |
@@ -212,49 +251,35 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
212 | atomic_inc(&data->disabled); | 251 | atomic_inc(&data->disabled); |
213 | local_save_flags(flags); | 252 | local_save_flags(flags); |
214 | ftrace(tr, data, ip, parent_ip, flags); | 253 | ftrace(tr, data, ip, parent_ip, flags); |
215 | check_critical_timing(tr, data, parent_ip, cpu); | 254 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
216 | data->critical_start = 0; | 255 | data->critical_start = 0; |
217 | atomic_dec(&data->disabled); | 256 | atomic_dec(&data->disabled); |
218 | } | 257 | } |
219 | 258 | ||
259 | /* start and stop critical timings used to for stoppage (in idle) */ | ||
220 | void notrace start_critical_timings(void) | 260 | void notrace start_critical_timings(void) |
221 | { | 261 | { |
222 | unsigned long flags; | 262 | if (preempt_trace() || irq_trace()) |
223 | |||
224 | local_save_flags(flags); | ||
225 | |||
226 | if (irqs_disabled_flags(flags)) | ||
227 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 263 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
228 | } | 264 | } |
229 | 265 | ||
230 | void notrace stop_critical_timings(void) | 266 | void notrace stop_critical_timings(void) |
231 | { | 267 | { |
232 | unsigned long flags; | 268 | if (preempt_trace() || irq_trace()) |
233 | |||
234 | local_save_flags(flags); | ||
235 | |||
236 | if (irqs_disabled_flags(flags)) | ||
237 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 269 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
238 | } | 270 | } |
239 | 271 | ||
272 | #ifdef CONFIG_IRQSOFF_TRACER | ||
240 | #ifdef CONFIG_PROVE_LOCKING | 273 | #ifdef CONFIG_PROVE_LOCKING |
241 | void notrace time_hardirqs_on(unsigned long a0, unsigned long a1) | 274 | void notrace time_hardirqs_on(unsigned long a0, unsigned long a1) |
242 | { | 275 | { |
243 | unsigned long flags; | 276 | if (!preempt_trace() && irq_trace()) |
244 | |||
245 | local_save_flags(flags); | ||
246 | |||
247 | if (irqs_disabled_flags(flags)) | ||
248 | stop_critical_timing(a0, a1); | 277 | stop_critical_timing(a0, a1); |
249 | } | 278 | } |
250 | 279 | ||
251 | void notrace time_hardirqs_off(unsigned long a0, unsigned long a1) | 280 | void notrace time_hardirqs_off(unsigned long a0, unsigned long a1) |
252 | { | 281 | { |
253 | unsigned long flags; | 282 | if (!preempt_trace() && irq_trace()) |
254 | |||
255 | local_save_flags(flags); | ||
256 | |||
257 | if (irqs_disabled_flags(flags)) | ||
258 | start_critical_timing(a0, a1); | 283 | start_critical_timing(a0, a1); |
259 | } | 284 | } |
260 | 285 | ||
@@ -289,49 +314,46 @@ inline void print_irqtrace_events(struct task_struct *curr) | |||
289 | */ | 314 | */ |
290 | void notrace trace_hardirqs_on(void) | 315 | void notrace trace_hardirqs_on(void) |
291 | { | 316 | { |
292 | unsigned long flags; | 317 | if (!preempt_trace() && irq_trace()) |
293 | |||
294 | local_save_flags(flags); | ||
295 | |||
296 | if (irqs_disabled_flags(flags)) | ||
297 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 318 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
298 | } | 319 | } |
299 | EXPORT_SYMBOL(trace_hardirqs_on); | 320 | EXPORT_SYMBOL(trace_hardirqs_on); |
300 | 321 | ||
301 | void notrace trace_hardirqs_off(void) | 322 | void notrace trace_hardirqs_off(void) |
302 | { | 323 | { |
303 | unsigned long flags; | 324 | if (!preempt_trace() && irq_trace()) |
304 | |||
305 | local_save_flags(flags); | ||
306 | |||
307 | if (irqs_disabled_flags(flags)) | ||
308 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 325 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
309 | } | 326 | } |
310 | EXPORT_SYMBOL(trace_hardirqs_off); | 327 | EXPORT_SYMBOL(trace_hardirqs_off); |
311 | 328 | ||
312 | void notrace trace_hardirqs_on_caller(unsigned long caller_addr) | 329 | void notrace trace_hardirqs_on_caller(unsigned long caller_addr) |
313 | { | 330 | { |
314 | unsigned long flags; | 331 | if (!preempt_trace() && irq_trace()) |
315 | |||
316 | local_save_flags(flags); | ||
317 | |||
318 | if (irqs_disabled_flags(flags)) | ||
319 | stop_critical_timing(CALLER_ADDR0, caller_addr); | 332 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
320 | } | 333 | } |
321 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | 334 | EXPORT_SYMBOL(trace_hardirqs_on_caller); |
322 | 335 | ||
323 | void notrace trace_hardirqs_off_caller(unsigned long caller_addr) | 336 | void notrace trace_hardirqs_off_caller(unsigned long caller_addr) |
324 | { | 337 | { |
325 | unsigned long flags; | 338 | if (!preempt_trace() && irq_trace()) |
326 | |||
327 | local_save_flags(flags); | ||
328 | |||
329 | if (irqs_disabled_flags(flags)) | ||
330 | start_critical_timing(CALLER_ADDR0, caller_addr); | 339 | start_critical_timing(CALLER_ADDR0, caller_addr); |
331 | } | 340 | } |
332 | EXPORT_SYMBOL(trace_hardirqs_off_caller); | 341 | EXPORT_SYMBOL(trace_hardirqs_off_caller); |
333 | 342 | ||
334 | #endif /* CONFIG_PROVE_LOCKING */ | 343 | #endif /* CONFIG_PROVE_LOCKING */ |
344 | #endif /* CONFIG_IRQSOFF_TRACER */ | ||
345 | |||
346 | #ifdef CONFIG_PREEMPT_TRACER | ||
347 | void notrace trace_preempt_on(unsigned long a0, unsigned long a1) | ||
348 | { | ||
349 | stop_critical_timing(a0, a1); | ||
350 | } | ||
351 | |||
352 | void notrace trace_preempt_off(unsigned long a0, unsigned long a1) | ||
353 | { | ||
354 | start_critical_timing(a0, a1); | ||
355 | } | ||
356 | #endif /* CONFIG_PREEMPT_TRACER */ | ||
335 | 357 | ||
336 | static void start_irqsoff_tracer(struct trace_array *tr) | 358 | static void start_irqsoff_tracer(struct trace_array *tr) |
337 | { | 359 | { |
@@ -345,7 +367,7 @@ static void stop_irqsoff_tracer(struct trace_array *tr) | |||
345 | tracer_enabled = 0; | 367 | tracer_enabled = 0; |
346 | } | 368 | } |
347 | 369 | ||
348 | static void irqsoff_tracer_init(struct trace_array *tr) | 370 | static void __irqsoff_tracer_init(struct trace_array *tr) |
349 | { | 371 | { |
350 | irqsoff_trace = tr; | 372 | irqsoff_trace = tr; |
351 | /* make sure that the tracer is visibel */ | 373 | /* make sure that the tracer is visibel */ |
@@ -382,6 +404,13 @@ static void notrace irqsoff_tracer_close(struct trace_iterator *iter) | |||
382 | start_irqsoff_tracer(iter->tr); | 404 | start_irqsoff_tracer(iter->tr); |
383 | } | 405 | } |
384 | 406 | ||
407 | #ifdef CONFIG_IRQSOFF_TRACER | ||
408 | static void irqsoff_tracer_init(struct trace_array *tr) | ||
409 | { | ||
410 | trace_type = TRACER_IRQS_OFF; | ||
411 | |||
412 | __irqsoff_tracer_init(tr); | ||
413 | } | ||
385 | static struct tracer irqsoff_tracer __read_mostly = | 414 | static struct tracer irqsoff_tracer __read_mostly = |
386 | { | 415 | { |
387 | .name = "irqsoff", | 416 | .name = "irqsoff", |
@@ -392,10 +421,65 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
392 | .ctrl_update = irqsoff_tracer_ctrl_update, | 421 | .ctrl_update = irqsoff_tracer_ctrl_update, |
393 | .print_max = 1, | 422 | .print_max = 1, |
394 | }; | 423 | }; |
424 | # define register_irqsoff(trace) register_tracer(&trace) | ||
425 | #else | ||
426 | # define register_irqsoff(trace) do { } while (0) | ||
427 | #endif | ||
428 | |||
429 | #ifdef CONFIG_PREEMPT_TRACER | ||
430 | static void preemptoff_tracer_init(struct trace_array *tr) | ||
431 | { | ||
432 | trace_type = TRACER_PREEMPT_OFF; | ||
433 | |||
434 | __irqsoff_tracer_init(tr); | ||
435 | } | ||
436 | |||
437 | static struct tracer preemptoff_tracer __read_mostly = | ||
438 | { | ||
439 | .name = "preemptoff", | ||
440 | .init = preemptoff_tracer_init, | ||
441 | .reset = irqsoff_tracer_reset, | ||
442 | .open = irqsoff_tracer_open, | ||
443 | .close = irqsoff_tracer_close, | ||
444 | .ctrl_update = irqsoff_tracer_ctrl_update, | ||
445 | .print_max = 1, | ||
446 | }; | ||
447 | # define register_preemptoff(trace) register_tracer(&trace) | ||
448 | #else | ||
449 | # define register_preemptoff(trace) do { } while (0) | ||
450 | #endif | ||
451 | |||
452 | #if defined(CONFIG_IRQSOFF_TRACER) && \ | ||
453 | defined(CONFIG_PREEMPT_TRACER) | ||
454 | |||
455 | static void preemptirqsoff_tracer_init(struct trace_array *tr) | ||
456 | { | ||
457 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; | ||
458 | |||
459 | __irqsoff_tracer_init(tr); | ||
460 | } | ||
461 | |||
462 | static struct tracer preemptirqsoff_tracer __read_mostly = | ||
463 | { | ||
464 | .name = "preemptirqsoff", | ||
465 | .init = preemptirqsoff_tracer_init, | ||
466 | .reset = irqsoff_tracer_reset, | ||
467 | .open = irqsoff_tracer_open, | ||
468 | .close = irqsoff_tracer_close, | ||
469 | .ctrl_update = irqsoff_tracer_ctrl_update, | ||
470 | .print_max = 1, | ||
471 | }; | ||
472 | |||
473 | # define register_preemptirqsoff(trace) register_tracer(&trace) | ||
474 | #else | ||
475 | # define register_preemptirqsoff(trace) do { } while (0) | ||
476 | #endif | ||
395 | 477 | ||
396 | __init static int init_irqsoff_tracer(void) | 478 | __init static int init_irqsoff_tracer(void) |
397 | { | 479 | { |
398 | register_tracer(&irqsoff_tracer); | 480 | register_irqsoff(irqsoff_tracer); |
481 | register_preemptoff(preemptoff_tracer); | ||
482 | register_preemptirqsoff(preemptirqsoff_tracer); | ||
399 | 483 | ||
400 | return 0; | 484 | return 0; |
401 | } | 485 | } |