aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_irqsoff.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@srostedt@redhat.com>2008-05-12 15:20:49 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 14:55:55 -0400
commit6fb44b717c10ecf37beaaebd312f3afa93fed714 (patch)
treea86ec44e761ac9ea2cae992fb6351cbfbea434ac /kernel/trace/trace_irqsoff.c
parent2a2cc8f7c4d0dfd75720867f7dc58d24f075edfc (diff)
ftrace: add trace_function api for other tracers to use
A new check was added in the ftrace function that wont trace if the CPU trace buffer is disabled. Unfortunately, other tracers used ftrace() to write to the buffer after they disabled it. The new disable check makes these calls into a nop. This patch changes the __ftrace that is called without the check into a new api for the other tracers to use, called "trace_function". The other tracers use this interface instead when the trace CPU buffer is already disabled. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/trace_irqsoff.c')
-rw-r--r--kernel/trace/trace_irqsoff.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index d2a6e6f1ad2d..3269f4ff5172 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
95 disabled = atomic_inc_return(&data->disabled); 95 disabled = atomic_inc_return(&data->disabled);
96 96
97 if (likely(disabled == 1)) 97 if (likely(disabled == 1))
98 ftrace(tr, data, ip, parent_ip, flags); 98 trace_function(tr, data, ip, parent_ip, flags);
99 99
100 atomic_dec(&data->disabled); 100 atomic_dec(&data->disabled);
101} 101}
@@ -150,7 +150,7 @@ check_critical_timing(struct trace_array *tr,
150 if (!report_latency(delta)) 150 if (!report_latency(delta))
151 goto out_unlock; 151 goto out_unlock;
152 152
153 ftrace(tr, data, CALLER_ADDR0, parent_ip, flags); 153 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
154 154
155 latency = nsecs_to_usecs(delta); 155 latency = nsecs_to_usecs(delta);
156 156
@@ -188,7 +188,7 @@ out:
188 data->critical_sequence = max_sequence; 188 data->critical_sequence = max_sequence;
189 data->preempt_timestamp = ftrace_now(cpu); 189 data->preempt_timestamp = ftrace_now(cpu);
190 tracing_reset(data); 190 tracing_reset(data);
191 ftrace(tr, data, CALLER_ADDR0, parent_ip, flags); 191 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
192} 192}
193 193
194static inline void notrace 194static inline void notrace
@@ -221,7 +221,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
221 221
222 local_save_flags(flags); 222 local_save_flags(flags);
223 223
224 ftrace(tr, data, ip, parent_ip, flags); 224 trace_function(tr, data, ip, parent_ip, flags);
225 225
226 __get_cpu_var(tracing_cpu) = 1; 226 __get_cpu_var(tracing_cpu) = 1;
227 227
@@ -254,7 +254,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
254 254
255 atomic_inc(&data->disabled); 255 atomic_inc(&data->disabled);
256 local_save_flags(flags); 256 local_save_flags(flags);
257 ftrace(tr, data, ip, parent_ip, flags); 257 trace_function(tr, data, ip, parent_ip, flags);
258 check_critical_timing(tr, data, parent_ip ? : ip, cpu); 258 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
259 data->critical_start = 0; 259 data->critical_start = 0;
260 atomic_dec(&data->disabled); 260 atomic_dec(&data->disabled);