aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-01-15 20:40:23 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-16 06:17:10 -0500
commitbb3c3c95f330f7bf16e33b002e48882616089db1 (patch)
treee2f4045f002fdb96adc3a82cbf24436063c82ce8
parent5361499101306cfb776c3cfa0f69d0479bc63868 (diff)
ftrace: move function tracer functions out of trace.c
Impact: clean up of trace.c The function tracer functions were put in trace.c because it needed to share static variables that were in trace.c. Since then, those variables have become global for various reasons. This patch moves the function tracer functions into trace_function.c where they belong. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/trace/trace.c84
-rw-r--r--kernel/trace/trace_functions.c84
2 files changed, 83 insertions, 85 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3c54cb125228..2585ffb6c6b5 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1046,65 +1046,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1046 local_irq_restore(flags); 1046 local_irq_restore(flags);
1047} 1047}
1048 1048
1049#ifdef CONFIG_FUNCTION_TRACER
1050static void
1051function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
1052{
1053 struct trace_array *tr = &global_trace;
1054 struct trace_array_cpu *data;
1055 unsigned long flags;
1056 long disabled;
1057 int cpu, resched;
1058 int pc;
1059
1060 if (unlikely(!ftrace_function_enabled))
1061 return;
1062
1063 pc = preempt_count();
1064 resched = ftrace_preempt_disable();
1065 local_save_flags(flags);
1066 cpu = raw_smp_processor_id();
1067 data = tr->data[cpu];
1068 disabled = atomic_inc_return(&data->disabled);
1069
1070 if (likely(disabled == 1))
1071 trace_function(tr, data, ip, parent_ip, flags, pc);
1072
1073 atomic_dec(&data->disabled);
1074 ftrace_preempt_enable(resched);
1075}
1076
1077static void
1078function_trace_call(unsigned long ip, unsigned long parent_ip)
1079{
1080 struct trace_array *tr = &global_trace;
1081 struct trace_array_cpu *data;
1082 unsigned long flags;
1083 long disabled;
1084 int cpu;
1085 int pc;
1086
1087 if (unlikely(!ftrace_function_enabled))
1088 return;
1089
1090 /*
1091 * Need to use raw, since this must be called before the
1092 * recursive protection is performed.
1093 */
1094 local_irq_save(flags);
1095 cpu = raw_smp_processor_id();
1096 data = tr->data[cpu];
1097 disabled = atomic_inc_return(&data->disabled);
1098
1099 if (likely(disabled == 1)) {
1100 pc = preempt_count();
1101 trace_function(tr, data, ip, parent_ip, flags, pc);
1102 }
1103
1104 atomic_dec(&data->disabled);
1105 local_irq_restore(flags);
1106}
1107
1108#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1049#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1109int trace_graph_entry(struct ftrace_graph_ent *trace) 1050int trace_graph_entry(struct ftrace_graph_ent *trace)
1110{ 1051{
@@ -1162,31 +1103,6 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
1162} 1103}
1163#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1104#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1164 1105
1165static struct ftrace_ops trace_ops __read_mostly =
1166{
1167 .func = function_trace_call,
1168};
1169
1170void tracing_start_function_trace(void)
1171{
1172 ftrace_function_enabled = 0;
1173
1174 if (trace_flags & TRACE_ITER_PREEMPTONLY)
1175 trace_ops.func = function_trace_call_preempt_only;
1176 else
1177 trace_ops.func = function_trace_call;
1178
1179 register_ftrace_function(&trace_ops);
1180 ftrace_function_enabled = 1;
1181}
1182
1183void tracing_stop_function_trace(void)
1184{
1185 ftrace_function_enabled = 0;
1186 unregister_ftrace_function(&trace_ops);
1187}
1188#endif
1189
1190enum trace_file_type { 1106enum trace_file_type {
1191 TRACE_FILE_LAT_FMT = 1, 1107 TRACE_FILE_LAT_FMT = 1,
1192 TRACE_FILE_ANNOTATE = 2, 1108 TRACE_FILE_ANNOTATE = 2,
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 3a5fa08cedb0..2dce3c7370d1 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -20,6 +20,7 @@ static struct trace_array *func_trace;
20 20
21static void start_function_trace(struct trace_array *tr) 21static void start_function_trace(struct trace_array *tr)
22{ 22{
23 func_trace = tr;
23 tr->cpu = get_cpu(); 24 tr->cpu = get_cpu();
24 tracing_reset_online_cpus(tr); 25 tracing_reset_online_cpus(tr);
25 put_cpu(); 26 put_cpu();
@@ -36,7 +37,6 @@ static void stop_function_trace(struct trace_array *tr)
36 37
37static int function_trace_init(struct trace_array *tr) 38static int function_trace_init(struct trace_array *tr)
38{ 39{
39 func_trace = tr;
40 start_function_trace(tr); 40 start_function_trace(tr);
41 return 0; 41 return 0;
42} 42}
@@ -52,6 +52,64 @@ static void function_trace_start(struct trace_array *tr)
52} 52}
53 53
54static void 54static void
55function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
56{
57 struct trace_array *tr = func_trace;
58 struct trace_array_cpu *data;
59 unsigned long flags;
60 long disabled;
61 int cpu, resched;
62 int pc;
63
64 if (unlikely(!ftrace_function_enabled))
65 return;
66
67 pc = preempt_count();
68 resched = ftrace_preempt_disable();
69 local_save_flags(flags);
70 cpu = raw_smp_processor_id();
71 data = tr->data[cpu];
72 disabled = atomic_inc_return(&data->disabled);
73
74 if (likely(disabled == 1))
75 trace_function(tr, data, ip, parent_ip, flags, pc);
76
77 atomic_dec(&data->disabled);
78 ftrace_preempt_enable(resched);
79}
80
81static void
82function_trace_call(unsigned long ip, unsigned long parent_ip)
83{
84 struct trace_array *tr = func_trace;
85 struct trace_array_cpu *data;
86 unsigned long flags;
87 long disabled;
88 int cpu;
89 int pc;
90
91 if (unlikely(!ftrace_function_enabled))
92 return;
93
94 /*
95 * Need to use raw, since this must be called before the
96 * recursive protection is performed.
97 */
98 local_irq_save(flags);
99 cpu = raw_smp_processor_id();
100 data = tr->data[cpu];
101 disabled = atomic_inc_return(&data->disabled);
102
103 if (likely(disabled == 1)) {
104 pc = preempt_count();
105 trace_function(tr, data, ip, parent_ip, flags, pc);
106 }
107
108 atomic_dec(&data->disabled);
109 local_irq_restore(flags);
110}
111
112static void
55function_stack_trace_call(unsigned long ip, unsigned long parent_ip) 113function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
56{ 114{
57 struct trace_array *tr = func_trace; 115 struct trace_array *tr = func_trace;
@@ -90,6 +148,30 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
90 local_irq_restore(flags); 148 local_irq_restore(flags);
91} 149}
92 150
151
152static struct ftrace_ops trace_ops __read_mostly =
153{
154 .func = function_trace_call,
155};
156
157void tracing_start_function_trace(void)
158{
159 ftrace_function_enabled = 0;
160
161 if (trace_flags & TRACE_ITER_PREEMPTONLY)
162 trace_ops.func = function_trace_call_preempt_only;
163 else
164 trace_ops.func = function_trace_call;
165
166 register_ftrace_function(&trace_ops);
167 ftrace_function_enabled = 1;
168}
169
170void tracing_stop_function_trace(void)
171{
172 ftrace_function_enabled = 0;
173 unregister_ftrace_function(&trace_ops);
174}
93static struct ftrace_ops trace_stack_ops __read_mostly = 175static struct ftrace_ops trace_stack_ops __read_mostly =
94{ 176{
95 .func = function_stack_trace_call, 177 .func = function_stack_trace_call,