aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@srostedt@redhat.com>2008-05-12 15:20:49 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 14:55:55 -0400
commit6fb44b717c10ecf37beaaebd312f3afa93fed714 (patch)
treea86ec44e761ac9ea2cae992fb6351cbfbea434ac
parent2a2cc8f7c4d0dfd75720867f7dc58d24f075edfc (diff)
ftrace: add trace_function api for other tracers to use
A new check was added in the ftrace function that wont trace if the CPU trace buffer is disabled. Unfortunately, other tracers used ftrace() to write to the buffer after they disabled it. The new disable check makes these calls into a nop. This patch changes the __ftrace that is called without the check into a new api for the other tracers to use, called "trace_function". The other tracers use this interface instead when the trace CPU buffer is already disabled. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--kernel/trace/trace.c8
-rw-r--r--kernel/trace/trace.h5
-rw-r--r--kernel/trace/trace_irqsoff.c10
-rw-r--r--kernel/trace/trace_sched_wakeup.c5
4 files changed, 17 insertions, 11 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d041578affd0..9022c357032a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -641,8 +641,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
641} 641}
642 642
643notrace void 643notrace void
644__ftrace(struct trace_array *tr, struct trace_array_cpu *data, 644trace_function(struct trace_array *tr, struct trace_array_cpu *data,
645 unsigned long ip, unsigned long parent_ip, unsigned long flags) 645 unsigned long ip, unsigned long parent_ip, unsigned long flags)
646{ 646{
647 struct trace_entry *entry; 647 struct trace_entry *entry;
648 unsigned long irq_flags; 648 unsigned long irq_flags;
@@ -664,7 +664,7 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
664 unsigned long ip, unsigned long parent_ip, unsigned long flags) 664 unsigned long ip, unsigned long parent_ip, unsigned long flags)
665{ 665{
666 if (likely(!atomic_read(&data->disabled))) 666 if (likely(!atomic_read(&data->disabled)))
667 __ftrace(tr, data, ip, parent_ip, flags); 667 trace_function(tr, data, ip, parent_ip, flags);
668} 668}
669 669
670notrace void 670notrace void
@@ -730,7 +730,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
730 disabled = atomic_inc_return(&data->disabled); 730 disabled = atomic_inc_return(&data->disabled);
731 731
732 if (likely(disabled == 1)) 732 if (likely(disabled == 1))
733 __ftrace(tr, data, ip, parent_ip, flags); 733 trace_function(tr, data, ip, parent_ip, flags);
734 734
735 atomic_dec(&data->disabled); 735 atomic_dec(&data->disabled);
736 local_irq_restore(flags); 736 local_irq_restore(flags);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 7bdfef35c05a..faf9f67246ac 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -169,6 +169,11 @@ void trace_special(struct trace_array *tr,
169 unsigned long arg1, 169 unsigned long arg1,
170 unsigned long arg2, 170 unsigned long arg2,
171 unsigned long arg3); 171 unsigned long arg3);
172void trace_function(struct trace_array *tr,
173 struct trace_array_cpu *data,
174 unsigned long ip,
175 unsigned long parent_ip,
176 unsigned long flags);
172 177
173void tracing_start_function_trace(void); 178void tracing_start_function_trace(void);
174void tracing_stop_function_trace(void); 179void tracing_stop_function_trace(void);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index d2a6e6f1ad2d..3269f4ff5172 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
95 disabled = atomic_inc_return(&data->disabled); 95 disabled = atomic_inc_return(&data->disabled);
96 96
97 if (likely(disabled == 1)) 97 if (likely(disabled == 1))
98 ftrace(tr, data, ip, parent_ip, flags); 98 trace_function(tr, data, ip, parent_ip, flags);
99 99
100 atomic_dec(&data->disabled); 100 atomic_dec(&data->disabled);
101} 101}
@@ -150,7 +150,7 @@ check_critical_timing(struct trace_array *tr,
150 if (!report_latency(delta)) 150 if (!report_latency(delta))
151 goto out_unlock; 151 goto out_unlock;
152 152
153 ftrace(tr, data, CALLER_ADDR0, parent_ip, flags); 153 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
154 154
155 latency = nsecs_to_usecs(delta); 155 latency = nsecs_to_usecs(delta);
156 156
@@ -188,7 +188,7 @@ out:
188 data->critical_sequence = max_sequence; 188 data->critical_sequence = max_sequence;
189 data->preempt_timestamp = ftrace_now(cpu); 189 data->preempt_timestamp = ftrace_now(cpu);
190 tracing_reset(data); 190 tracing_reset(data);
191 ftrace(tr, data, CALLER_ADDR0, parent_ip, flags); 191 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
192} 192}
193 193
194static inline void notrace 194static inline void notrace
@@ -221,7 +221,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
221 221
222 local_save_flags(flags); 222 local_save_flags(flags);
223 223
224 ftrace(tr, data, ip, parent_ip, flags); 224 trace_function(tr, data, ip, parent_ip, flags);
225 225
226 __get_cpu_var(tracing_cpu) = 1; 226 __get_cpu_var(tracing_cpu) = 1;
227 227
@@ -254,7 +254,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
254 254
255 atomic_inc(&data->disabled); 255 atomic_inc(&data->disabled);
256 local_save_flags(flags); 256 local_save_flags(flags);
257 ftrace(tr, data, ip, parent_ip, flags); 257 trace_function(tr, data, ip, parent_ip, flags);
258 check_critical_timing(tr, data, parent_ip ? : ip, cpu); 258 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
259 data->critical_start = 0; 259 data->critical_start = 0;
260 atomic_dec(&data->disabled); 260 atomic_dec(&data->disabled);
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index b7df825c3af9..3549e4154f1f 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -85,7 +85,7 @@ wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
85 if (unlikely(!tracer_enabled || next != wakeup_task)) 85 if (unlikely(!tracer_enabled || next != wakeup_task))
86 goto out_unlock; 86 goto out_unlock;
87 87
88 ftrace(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags); 88 trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
89 89
90 /* 90 /*
91 * usecs conversion is slow so we try to delay the conversion 91 * usecs conversion is slow so we try to delay the conversion
@@ -192,7 +192,8 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
192 local_save_flags(flags); 192 local_save_flags(flags);
193 193
194 tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); 194 tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
195 ftrace(tr, tr->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags); 195 trace_function(tr, tr->data[wakeup_cpu],
196 CALLER_ADDR1, CALLER_ADDR2, flags);
196 197
197out_locked: 198out_locked:
198 spin_unlock(&wakeup_lock); 199 spin_unlock(&wakeup_lock);