diff options
Diffstat (limited to 'kernel/trace/trace_irqsoff.c')
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 44 |
1 files changed, 16 insertions, 28 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 713a2cac488..667aa8cc0cf 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * From code in the latency_tracer, that is: | 7 | * From code in the latency_tracer, that is: |
8 | * | 8 | * |
9 | * Copyright (C) 2004-2006 Ingo Molnar | 9 | * Copyright (C) 2004-2006 Ingo Molnar |
10 | * Copyright (C) 2004 Nadia Yvette Chambers | 10 | * Copyright (C) 2004 William Lee Irwin III |
11 | */ | 11 | */ |
12 | #include <linux/kallsyms.h> | 12 | #include <linux/kallsyms.h> |
13 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
@@ -23,7 +23,7 @@ static int tracer_enabled __read_mostly; | |||
23 | 23 | ||
24 | static DEFINE_PER_CPU(int, tracing_cpu); | 24 | static DEFINE_PER_CPU(int, tracing_cpu); |
25 | 25 | ||
26 | static DEFINE_RAW_SPINLOCK(max_trace_lock); | 26 | static DEFINE_SPINLOCK(max_trace_lock); |
27 | 27 | ||
28 | enum { | 28 | enum { |
29 | TRACER_IRQS_OFF = (1 << 1), | 29 | TRACER_IRQS_OFF = (1 << 1), |
@@ -136,8 +136,7 @@ static int func_prolog_dec(struct trace_array *tr, | |||
136 | * irqsoff uses its own tracer function to keep the overhead down: | 136 | * irqsoff uses its own tracer function to keep the overhead down: |
137 | */ | 137 | */ |
138 | static void | 138 | static void |
139 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, | 139 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) |
140 | struct ftrace_ops *op, struct pt_regs *pt_regs) | ||
141 | { | 140 | { |
142 | struct trace_array *tr = irqsoff_trace; | 141 | struct trace_array *tr = irqsoff_trace; |
143 | struct trace_array_cpu *data; | 142 | struct trace_array_cpu *data; |
@@ -154,7 +153,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, | |||
154 | static struct ftrace_ops trace_ops __read_mostly = | 153 | static struct ftrace_ops trace_ops __read_mostly = |
155 | { | 154 | { |
156 | .func = irqsoff_tracer_call, | 155 | .func = irqsoff_tracer_call, |
157 | .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, | 156 | .flags = FTRACE_OPS_FL_GLOBAL, |
158 | }; | 157 | }; |
159 | #endif /* CONFIG_FUNCTION_TRACER */ | 158 | #endif /* CONFIG_FUNCTION_TRACER */ |
160 | 159 | ||
@@ -281,20 +280,9 @@ static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) | |||
281 | } | 280 | } |
282 | 281 | ||
283 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } | 282 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } |
283 | static void irqsoff_print_header(struct seq_file *s) { } | ||
284 | static void irqsoff_trace_open(struct trace_iterator *iter) { } | 284 | static void irqsoff_trace_open(struct trace_iterator *iter) { } |
285 | static void irqsoff_trace_close(struct trace_iterator *iter) { } | 285 | static void irqsoff_trace_close(struct trace_iterator *iter) { } |
286 | |||
287 | #ifdef CONFIG_FUNCTION_TRACER | ||
288 | static void irqsoff_print_header(struct seq_file *s) | ||
289 | { | ||
290 | trace_default_header(s); | ||
291 | } | ||
292 | #else | ||
293 | static void irqsoff_print_header(struct seq_file *s) | ||
294 | { | ||
295 | trace_latency_header(s); | ||
296 | } | ||
297 | #endif /* CONFIG_FUNCTION_TRACER */ | ||
298 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 286 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
299 | 287 | ||
300 | /* | 288 | /* |
@@ -333,7 +321,7 @@ check_critical_timing(struct trace_array *tr, | |||
333 | if (!report_latency(delta)) | 321 | if (!report_latency(delta)) |
334 | goto out; | 322 | goto out; |
335 | 323 | ||
336 | raw_spin_lock_irqsave(&max_trace_lock, flags); | 324 | spin_lock_irqsave(&max_trace_lock, flags); |
337 | 325 | ||
338 | /* check if we are still the max latency */ | 326 | /* check if we are still the max latency */ |
339 | if (!report_latency(delta)) | 327 | if (!report_latency(delta)) |
@@ -356,7 +344,7 @@ check_critical_timing(struct trace_array *tr, | |||
356 | max_sequence++; | 344 | max_sequence++; |
357 | 345 | ||
358 | out_unlock: | 346 | out_unlock: |
359 | raw_spin_unlock_irqrestore(&max_trace_lock, flags); | 347 | spin_unlock_irqrestore(&max_trace_lock, flags); |
360 | 348 | ||
361 | out: | 349 | out: |
362 | data->critical_sequence = max_sequence; | 350 | data->critical_sequence = max_sequence; |
@@ -517,13 +505,13 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller); | |||
517 | #ifdef CONFIG_PREEMPT_TRACER | 505 | #ifdef CONFIG_PREEMPT_TRACER |
518 | void trace_preempt_on(unsigned long a0, unsigned long a1) | 506 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
519 | { | 507 | { |
520 | if (preempt_trace() && !irq_trace()) | 508 | if (preempt_trace()) |
521 | stop_critical_timing(a0, a1); | 509 | stop_critical_timing(a0, a1); |
522 | } | 510 | } |
523 | 511 | ||
524 | void trace_preempt_off(unsigned long a0, unsigned long a1) | 512 | void trace_preempt_off(unsigned long a0, unsigned long a1) |
525 | { | 513 | { |
526 | if (preempt_trace() && !irq_trace()) | 514 | if (preempt_trace()) |
527 | start_critical_timing(a0, a1); | 515 | start_critical_timing(a0, a1); |
528 | } | 516 | } |
529 | #endif /* CONFIG_PREEMPT_TRACER */ | 517 | #endif /* CONFIG_PREEMPT_TRACER */ |
@@ -604,7 +592,7 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
604 | .reset = irqsoff_tracer_reset, | 592 | .reset = irqsoff_tracer_reset, |
605 | .start = irqsoff_tracer_start, | 593 | .start = irqsoff_tracer_start, |
606 | .stop = irqsoff_tracer_stop, | 594 | .stop = irqsoff_tracer_stop, |
607 | .print_max = true, | 595 | .print_max = 1, |
608 | .print_header = irqsoff_print_header, | 596 | .print_header = irqsoff_print_header, |
609 | .print_line = irqsoff_print_line, | 597 | .print_line = irqsoff_print_line, |
610 | .flags = &tracer_flags, | 598 | .flags = &tracer_flags, |
@@ -614,7 +602,7 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
614 | #endif | 602 | #endif |
615 | .open = irqsoff_trace_open, | 603 | .open = irqsoff_trace_open, |
616 | .close = irqsoff_trace_close, | 604 | .close = irqsoff_trace_close, |
617 | .use_max_tr = true, | 605 | .use_max_tr = 1, |
618 | }; | 606 | }; |
619 | # define register_irqsoff(trace) register_tracer(&trace) | 607 | # define register_irqsoff(trace) register_tracer(&trace) |
620 | #else | 608 | #else |
@@ -637,7 +625,7 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
637 | .reset = irqsoff_tracer_reset, | 625 | .reset = irqsoff_tracer_reset, |
638 | .start = irqsoff_tracer_start, | 626 | .start = irqsoff_tracer_start, |
639 | .stop = irqsoff_tracer_stop, | 627 | .stop = irqsoff_tracer_stop, |
640 | .print_max = true, | 628 | .print_max = 1, |
641 | .print_header = irqsoff_print_header, | 629 | .print_header = irqsoff_print_header, |
642 | .print_line = irqsoff_print_line, | 630 | .print_line = irqsoff_print_line, |
643 | .flags = &tracer_flags, | 631 | .flags = &tracer_flags, |
@@ -647,7 +635,7 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
647 | #endif | 635 | #endif |
648 | .open = irqsoff_trace_open, | 636 | .open = irqsoff_trace_open, |
649 | .close = irqsoff_trace_close, | 637 | .close = irqsoff_trace_close, |
650 | .use_max_tr = true, | 638 | .use_max_tr = 1, |
651 | }; | 639 | }; |
652 | # define register_preemptoff(trace) register_tracer(&trace) | 640 | # define register_preemptoff(trace) register_tracer(&trace) |
653 | #else | 641 | #else |
@@ -672,7 +660,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
672 | .reset = irqsoff_tracer_reset, | 660 | .reset = irqsoff_tracer_reset, |
673 | .start = irqsoff_tracer_start, | 661 | .start = irqsoff_tracer_start, |
674 | .stop = irqsoff_tracer_stop, | 662 | .stop = irqsoff_tracer_stop, |
675 | .print_max = true, | 663 | .print_max = 1, |
676 | .print_header = irqsoff_print_header, | 664 | .print_header = irqsoff_print_header, |
677 | .print_line = irqsoff_print_line, | 665 | .print_line = irqsoff_print_line, |
678 | .flags = &tracer_flags, | 666 | .flags = &tracer_flags, |
@@ -682,7 +670,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
682 | #endif | 670 | #endif |
683 | .open = irqsoff_trace_open, | 671 | .open = irqsoff_trace_open, |
684 | .close = irqsoff_trace_close, | 672 | .close = irqsoff_trace_close, |
685 | .use_max_tr = true, | 673 | .use_max_tr = 1, |
686 | }; | 674 | }; |
687 | 675 | ||
688 | # define register_preemptirqsoff(trace) register_tracer(&trace) | 676 | # define register_preemptirqsoff(trace) register_tracer(&trace) |
@@ -698,4 +686,4 @@ __init static int init_irqsoff_tracer(void) | |||
698 | 686 | ||
699 | return 0; | 687 | return 0; |
700 | } | 688 | } |
701 | core_initcall(init_irqsoff_tracer); | 689 | device_initcall(init_irqsoff_tracer); |