diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-05-12 15:20:44 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 14:40:15 -0400 |
commit | 89b2f97819dd074297bbe3e19eaa4afcc98845ad (patch) | |
tree | 250196b493d42ac312b089f8f75866518cc772e6 /kernel/trace | |
parent | 18cef379d30f5ded20cc31d7f2d342639d39919d (diff) |
ftrace: fix updates to max trace
This patch fixes some bugs to the updating of the max trace that
was caused by implementing the new buffering.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 27 |
2 files changed, 20 insertions, 13 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9175ce91b8f6..95966561ba3d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -153,6 +153,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
153 | memcpy(max_tr.data[i], data, sizeof(*data)); | 153 | memcpy(max_tr.data[i], data, sizeof(*data)); |
154 | data->trace = save_trace; | 154 | data->trace = save_trace; |
155 | data->trace_pages = save_pages; | 155 | data->trace_pages = save_pages; |
156 | tracing_reset(data); | ||
156 | } | 157 | } |
157 | 158 | ||
158 | __update_max_tr(tr, tsk, cpu); | 159 | __update_max_tr(tr, tsk, cpu); |
@@ -183,6 +184,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
183 | memcpy(max_tr.data[cpu], data, sizeof(*data)); | 184 | memcpy(max_tr.data[cpu], data, sizeof(*data)); |
184 | data->trace = save_trace; | 185 | data->trace = save_trace; |
185 | data->trace_pages = save_pages; | 186 | data->trace_pages = save_pages; |
187 | tracing_reset(data); | ||
186 | 188 | ||
187 | __update_max_tr(tr, tsk, cpu); | 189 | __update_max_tr(tr, tsk, cpu); |
188 | spin_unlock(&ftrace_max_lock); | 190 | spin_unlock(&ftrace_max_lock); |
@@ -877,6 +879,8 @@ print_lat_fmt(struct seq_file *m, struct trace_iterator *iter, | |||
877 | entry->ctx.next_prio, | 879 | entry->ctx.next_prio, |
878 | comm); | 880 | comm); |
879 | break; | 881 | break; |
882 | default: | ||
883 | seq_printf(m, "Unknown type %d\n", entry->type); | ||
880 | } | 884 | } |
881 | } | 885 | } |
882 | 886 | ||
@@ -1625,7 +1629,6 @@ __init static int tracer_alloc_buffers(void) | |||
1625 | * round up a bit. | 1629 | * round up a bit. |
1626 | */ | 1630 | */ |
1627 | global_trace.entries = ENTRIES_PER_PAGE; | 1631 | global_trace.entries = ENTRIES_PER_PAGE; |
1628 | max_tr.entries = global_trace.entries; | ||
1629 | pages++; | 1632 | pages++; |
1630 | 1633 | ||
1631 | while (global_trace.entries < trace_nr_entries) { | 1634 | while (global_trace.entries < trace_nr_entries) { |
@@ -1633,6 +1636,7 @@ __init static int tracer_alloc_buffers(void) | |||
1633 | break; | 1636 | break; |
1634 | pages++; | 1637 | pages++; |
1635 | } | 1638 | } |
1639 | max_tr.entries = global_trace.entries; | ||
1636 | 1640 | ||
1637 | pr_info("tracer: %d pages allocated for %ld", | 1641 | pr_info("tracer: %d pages allocated for %ld", |
1638 | pages, trace_nr_entries); | 1642 | pages, trace_nr_entries); |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index bd3f88198308..74165f611f36 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -23,6 +23,8 @@ static int tracer_enabled __read_mostly; | |||
23 | 23 | ||
24 | static DEFINE_PER_CPU(int, tracing_cpu); | 24 | static DEFINE_PER_CPU(int, tracing_cpu); |
25 | 25 | ||
26 | static DEFINE_SPINLOCK(max_trace_lock); | ||
27 | |||
26 | enum { | 28 | enum { |
27 | TRACER_IRQS_OFF = (1 << 1), | 29 | TRACER_IRQS_OFF = (1 << 1), |
28 | TRACER_PREEMPT_OFF = (1 << 2), | 30 | TRACER_PREEMPT_OFF = (1 << 2), |
@@ -126,7 +128,7 @@ check_critical_timing(struct trace_array *tr, | |||
126 | int cpu) | 128 | int cpu) |
127 | { | 129 | { |
128 | unsigned long latency, t0, t1; | 130 | unsigned long latency, t0, t1; |
129 | cycle_t T0, T1, T2, delta; | 131 | cycle_t T0, T1, delta; |
130 | unsigned long flags; | 132 | unsigned long flags; |
131 | 133 | ||
132 | /* | 134 | /* |
@@ -142,20 +144,18 @@ check_critical_timing(struct trace_array *tr, | |||
142 | if (!report_latency(delta)) | 144 | if (!report_latency(delta)) |
143 | goto out; | 145 | goto out; |
144 | 146 | ||
145 | ftrace(tr, data, CALLER_ADDR0, parent_ip, flags); | 147 | spin_lock(&max_trace_lock); |
146 | /* | ||
147 | * Update the timestamp, because the trace entry above | ||
148 | * might change it (it can only get larger so the latency | ||
149 | * is fair to be reported): | ||
150 | */ | ||
151 | T2 = now(cpu); | ||
152 | 148 | ||
153 | delta = T2-T0; | 149 | /* check if we are still the max latency */ |
150 | if (!report_latency(delta)) | ||
151 | goto out_unlock; | ||
152 | |||
153 | ftrace(tr, data, CALLER_ADDR0, parent_ip, flags); | ||
154 | 154 | ||
155 | latency = nsecs_to_usecs(delta); | 155 | latency = nsecs_to_usecs(delta); |
156 | 156 | ||
157 | if (data->critical_sequence != max_sequence) | 157 | if (data->critical_sequence != max_sequence) |
158 | goto out; | 158 | goto out_unlock; |
159 | 159 | ||
160 | tracing_max_latency = delta; | 160 | tracing_max_latency = delta; |
161 | t0 = nsecs_to_usecs(T0); | 161 | t0 = nsecs_to_usecs(T0); |
@@ -189,6 +189,9 @@ check_critical_timing(struct trace_array *tr, | |||
189 | 189 | ||
190 | max_sequence++; | 190 | max_sequence++; |
191 | 191 | ||
192 | out_unlock: | ||
193 | spin_unlock(&max_trace_lock); | ||
194 | |||
192 | out: | 195 | out: |
193 | data->critical_sequence = max_sequence; | 196 | data->critical_sequence = max_sequence; |
194 | data->preempt_timestamp = now(cpu); | 197 | data->preempt_timestamp = now(cpu); |
@@ -366,14 +369,14 @@ void notrace trace_preempt_off(unsigned long a0, unsigned long a1) | |||
366 | 369 | ||
367 | static void start_irqsoff_tracer(struct trace_array *tr) | 370 | static void start_irqsoff_tracer(struct trace_array *tr) |
368 | { | 371 | { |
369 | tracer_enabled = 1; | ||
370 | register_ftrace_function(&trace_ops); | 372 | register_ftrace_function(&trace_ops); |
373 | tracer_enabled = 1; | ||
371 | } | 374 | } |
372 | 375 | ||
373 | static void stop_irqsoff_tracer(struct trace_array *tr) | 376 | static void stop_irqsoff_tracer(struct trace_array *tr) |
374 | { | 377 | { |
375 | unregister_ftrace_function(&trace_ops); | ||
376 | tracer_enabled = 0; | 378 | tracer_enabled = 0; |
379 | unregister_ftrace_function(&trace_ops); | ||
377 | } | 380 | } |
378 | 381 | ||
379 | static void __irqsoff_tracer_init(struct trace_array *tr) | 382 | static void __irqsoff_tracer_init(struct trace_array *tr) |