aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-01-15 20:40:23 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-16 06:17:10 -0500
commitbb3c3c95f330f7bf16e33b002e48882616089db1 (patch)
treee2f4045f002fdb96adc3a82cbf24436063c82ce8 /kernel/trace/trace_functions.c
parent5361499101306cfb776c3cfa0f69d0479bc63868 (diff)
ftrace: move function tracer functions out of trace.c
Impact: clean up of trace.c The function tracer functions were put in trace.c because it needed to share static variables that were in trace.c. Since then, those variables have become global for various reasons. This patch moves the function tracer functions into trace_function.c where they belong. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r--kernel/trace/trace_functions.c84
1 files changed, 83 insertions, 1 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 3a5fa08cedb0..2dce3c7370d1 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -20,6 +20,7 @@ static struct trace_array *func_trace;
20 20
21static void start_function_trace(struct trace_array *tr) 21static void start_function_trace(struct trace_array *tr)
22{ 22{
23 func_trace = tr;
23 tr->cpu = get_cpu(); 24 tr->cpu = get_cpu();
24 tracing_reset_online_cpus(tr); 25 tracing_reset_online_cpus(tr);
25 put_cpu(); 26 put_cpu();
@@ -36,7 +37,6 @@ static void stop_function_trace(struct trace_array *tr)
36 37
37static int function_trace_init(struct trace_array *tr) 38static int function_trace_init(struct trace_array *tr)
38{ 39{
39 func_trace = tr;
40 start_function_trace(tr); 40 start_function_trace(tr);
41 return 0; 41 return 0;
42} 42}
@@ -52,6 +52,64 @@ static void function_trace_start(struct trace_array *tr)
52} 52}
53 53
54static void 54static void
55function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
56{
57 struct trace_array *tr = func_trace;
58 struct trace_array_cpu *data;
59 unsigned long flags;
60 long disabled;
61 int cpu, resched;
62 int pc;
63
64 if (unlikely(!ftrace_function_enabled))
65 return;
66
67 pc = preempt_count();
68 resched = ftrace_preempt_disable();
69 local_save_flags(flags);
70 cpu = raw_smp_processor_id();
71 data = tr->data[cpu];
72 disabled = atomic_inc_return(&data->disabled);
73
74 if (likely(disabled == 1))
75 trace_function(tr, data, ip, parent_ip, flags, pc);
76
77 atomic_dec(&data->disabled);
78 ftrace_preempt_enable(resched);
79}
80
81static void
82function_trace_call(unsigned long ip, unsigned long parent_ip)
83{
84 struct trace_array *tr = func_trace;
85 struct trace_array_cpu *data;
86 unsigned long flags;
87 long disabled;
88 int cpu;
89 int pc;
90
91 if (unlikely(!ftrace_function_enabled))
92 return;
93
94 /*
95 * Need to use raw, since this must be called before the
96 * recursive protection is performed.
97 */
98 local_irq_save(flags);
99 cpu = raw_smp_processor_id();
100 data = tr->data[cpu];
101 disabled = atomic_inc_return(&data->disabled);
102
103 if (likely(disabled == 1)) {
104 pc = preempt_count();
105 trace_function(tr, data, ip, parent_ip, flags, pc);
106 }
107
108 atomic_dec(&data->disabled);
109 local_irq_restore(flags);
110}
111
112static void
55function_stack_trace_call(unsigned long ip, unsigned long parent_ip) 113function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
56{ 114{
57 struct trace_array *tr = func_trace; 115 struct trace_array *tr = func_trace;
@@ -90,6 +148,30 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
90 local_irq_restore(flags); 148 local_irq_restore(flags);
91} 149}
92 150
151
152static struct ftrace_ops trace_ops __read_mostly =
153{
154 .func = function_trace_call,
155};
156
157void tracing_start_function_trace(void)
158{
159 ftrace_function_enabled = 0;
160
161 if (trace_flags & TRACE_ITER_PREEMPTONLY)
162 trace_ops.func = function_trace_call_preempt_only;
163 else
164 trace_ops.func = function_trace_call;
165
166 register_ftrace_function(&trace_ops);
167 ftrace_function_enabled = 1;
168}
169
170void tracing_stop_function_trace(void)
171{
172 ftrace_function_enabled = 0;
173 unregister_ftrace_function(&trace_ops);
174}
93static struct ftrace_ops trace_stack_ops __read_mostly = 175static struct ftrace_ops trace_stack_ops __read_mostly =
94{ 176{
95 .func = function_stack_trace_call, 177 .func = function_stack_trace_call,