aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-07-29 12:59:58 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-08-06 01:28:06 -0400
commit1a0799a8fef5acc6503f9c5e79b2cd003317826c (patch)
tree8aec6e623981cd8505de53752234d9f6b5d94843
parent82e04af498a85ba425efe77580b7ba08234411df (diff)
tracing/function-graph-tracer: Move graph event insertion helpers in the graph tracer file
The function graph events helpers which insert the function entry and return events into the ring buffer currently reside in trace.c But this file is quite overloaded and the right place for these helpers is in the function graph tracer file. Then move them to trace_functions_graph.c Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--kernel/trace/trace.c110
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_functions_graph.c122
-rw-r--r--kernel/trace/trace_selftest.c1
4 files changed, 121 insertions, 113 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1b73acb40e56..0cfd1a62def1 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -942,54 +942,6 @@ trace_function(struct trace_array *tr,
942 ring_buffer_unlock_commit(tr->buffer, event); 942 ring_buffer_unlock_commit(tr->buffer, event);
943} 943}
944 944
945#ifdef CONFIG_FUNCTION_GRAPH_TRACER
946static int __trace_graph_entry(struct trace_array *tr,
947 struct ftrace_graph_ent *trace,
948 unsigned long flags,
949 int pc)
950{
951 struct ftrace_event_call *call = &event_funcgraph_entry;
952 struct ring_buffer_event *event;
953 struct ftrace_graph_ent_entry *entry;
954
955 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
956 return 0;
957
958 event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
959 sizeof(*entry), flags, pc);
960 if (!event)
961 return 0;
962 entry = ring_buffer_event_data(event);
963 entry->graph_ent = *trace;
964 if (!filter_current_check_discard(call, entry, event))
965 ring_buffer_unlock_commit(global_trace.buffer, event);
966
967 return 1;
968}
969
970static void __trace_graph_return(struct trace_array *tr,
971 struct ftrace_graph_ret *trace,
972 unsigned long flags,
973 int pc)
974{
975 struct ftrace_event_call *call = &event_funcgraph_exit;
976 struct ring_buffer_event *event;
977 struct ftrace_graph_ret_entry *entry;
978
979 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
980 return;
981
982 event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
983 sizeof(*entry), flags, pc);
984 if (!event)
985 return;
986 entry = ring_buffer_event_data(event);
987 entry->ret = *trace;
988 if (!filter_current_check_discard(call, entry, event))
989 ring_buffer_unlock_commit(global_trace.buffer, event);
990}
991#endif
992
993void 945void
994ftrace(struct trace_array *tr, struct trace_array_cpu *data, 946ftrace(struct trace_array *tr, struct trace_array_cpu *data,
995 unsigned long ip, unsigned long parent_ip, unsigned long flags, 947 unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@ -1129,68 +1081,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1129 local_irq_restore(flags); 1081 local_irq_restore(flags);
1130} 1082}
1131 1083
1132#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1133int trace_graph_entry(struct ftrace_graph_ent *trace)
1134{
1135 struct trace_array *tr = &global_trace;
1136 struct trace_array_cpu *data;
1137 unsigned long flags;
1138 long disabled;
1139 int ret;
1140 int cpu;
1141 int pc;
1142
1143 if (!ftrace_trace_task(current))
1144 return 0;
1145
1146 if (!ftrace_graph_addr(trace->func))
1147 return 0;
1148
1149 local_irq_save(flags);
1150 cpu = raw_smp_processor_id();
1151 data = tr->data[cpu];
1152 disabled = atomic_inc_return(&data->disabled);
1153 if (likely(disabled == 1)) {
1154 pc = preempt_count();
1155 ret = __trace_graph_entry(tr, trace, flags, pc);
1156 } else {
1157 ret = 0;
1158 }
1159 /* Only do the atomic if it is not already set */
1160 if (!test_tsk_trace_graph(current))
1161 set_tsk_trace_graph(current);
1162
1163 atomic_dec(&data->disabled);
1164 local_irq_restore(flags);
1165
1166 return ret;
1167}
1168
1169void trace_graph_return(struct ftrace_graph_ret *trace)
1170{
1171 struct trace_array *tr = &global_trace;
1172 struct trace_array_cpu *data;
1173 unsigned long flags;
1174 long disabled;
1175 int cpu;
1176 int pc;
1177
1178 local_irq_save(flags);
1179 cpu = raw_smp_processor_id();
1180 data = tr->data[cpu];
1181 disabled = atomic_inc_return(&data->disabled);
1182 if (likely(disabled == 1)) {
1183 pc = preempt_count();
1184 __trace_graph_return(tr, trace, flags, pc);
1185 }
1186 if (!trace->depth)
1187 clear_tsk_trace_graph(current);
1188 atomic_dec(&data->disabled);
1189 local_irq_restore(flags);
1190}
1191#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1192
1193
1194/** 1084/**
1195 * trace_vbprintk - write binary msg to tracing buffer 1085 * trace_vbprintk - write binary msg to tracing buffer
1196 * 1086 *
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 116524d62366..9301f1263c5c 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -471,6 +471,7 @@ void trace_function(struct trace_array *tr,
471 471
472void trace_graph_return(struct ftrace_graph_ret *trace); 472void trace_graph_return(struct ftrace_graph_ret *trace);
473int trace_graph_entry(struct ftrace_graph_ent *trace); 473int trace_graph_entry(struct ftrace_graph_ent *trace);
474void set_graph_array(struct trace_array *tr);
474 475
475void tracing_start_cmdline_record(void); 476void tracing_start_cmdline_record(void);
476void tracing_stop_cmdline_record(void); 477void tracing_stop_cmdline_record(void);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index e30472da15d5..f97244a41a4f 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -52,7 +52,7 @@ static struct tracer_flags tracer_flags = {
52 .opts = trace_opts 52 .opts = trace_opts
53}; 53};
54 54
55/* pid on the last trace processed */ 55static struct trace_array *graph_array;
56 56
57 57
58/* Add a function return address to the trace stack on thread info.*/ 58/* Add a function return address to the trace stack on thread info.*/
@@ -166,10 +166,121 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
166 return ret; 166 return ret;
167} 167}
168 168
169static int __trace_graph_entry(struct trace_array *tr,
170 struct ftrace_graph_ent *trace,
171 unsigned long flags,
172 int pc)
173{
174 struct ftrace_event_call *call = &event_funcgraph_entry;
175 struct ring_buffer_event *event;
176 struct ftrace_graph_ent_entry *entry;
177
178 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
179 return 0;
180
181 event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_ENT,
182 sizeof(*entry), flags, pc);
183 if (!event)
184 return 0;
185 entry = ring_buffer_event_data(event);
186 entry->graph_ent = *trace;
187 if (!filter_current_check_discard(call, entry, event))
188 ring_buffer_unlock_commit(tr->buffer, event);
189
190 return 1;
191}
192
193int trace_graph_entry(struct ftrace_graph_ent *trace)
194{
195 struct trace_array *tr = graph_array;
196 struct trace_array_cpu *data;
197 unsigned long flags;
198 long disabled;
199 int ret;
200 int cpu;
201 int pc;
202
203 if (unlikely(!tr))
204 return 0;
205
206 if (!ftrace_trace_task(current))
207 return 0;
208
209 if (!ftrace_graph_addr(trace->func))
210 return 0;
211
212 local_irq_save(flags);
213 cpu = raw_smp_processor_id();
214 data = tr->data[cpu];
215 disabled = atomic_inc_return(&data->disabled);
216 if (likely(disabled == 1)) {
217 pc = preempt_count();
218 ret = __trace_graph_entry(tr, trace, flags, pc);
219 } else {
220 ret = 0;
221 }
222 /* Only do the atomic if it is not already set */
223 if (!test_tsk_trace_graph(current))
224 set_tsk_trace_graph(current);
225
226 atomic_dec(&data->disabled);
227 local_irq_restore(flags);
228
229 return ret;
230}
231
232static void __trace_graph_return(struct trace_array *tr,
233 struct ftrace_graph_ret *trace,
234 unsigned long flags,
235 int pc)
236{
237 struct ftrace_event_call *call = &event_funcgraph_exit;
238 struct ring_buffer_event *event;
239 struct ftrace_graph_ret_entry *entry;
240
241 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
242 return;
243
244 event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_RET,
245 sizeof(*entry), flags, pc);
246 if (!event)
247 return;
248 entry = ring_buffer_event_data(event);
249 entry->ret = *trace;
250 if (!filter_current_check_discard(call, entry, event))
251 ring_buffer_unlock_commit(tr->buffer, event);
252}
253
254void trace_graph_return(struct ftrace_graph_ret *trace)
255{
256 struct trace_array *tr = graph_array;
257 struct trace_array_cpu *data;
258 unsigned long flags;
259 long disabled;
260 int cpu;
261 int pc;
262
263 local_irq_save(flags);
264 cpu = raw_smp_processor_id();
265 data = tr->data[cpu];
266 disabled = atomic_inc_return(&data->disabled);
267 if (likely(disabled == 1)) {
268 pc = preempt_count();
269 __trace_graph_return(tr, trace, flags, pc);
270 }
271 if (!trace->depth)
272 clear_tsk_trace_graph(current);
273 atomic_dec(&data->disabled);
274 local_irq_restore(flags);
275}
276
169static int graph_trace_init(struct trace_array *tr) 277static int graph_trace_init(struct trace_array *tr)
170{ 278{
171 int ret = register_ftrace_graph(&trace_graph_return, 279 int ret;
172 &trace_graph_entry); 280
281 graph_array = tr;
282 ret = register_ftrace_graph(&trace_graph_return,
283 &trace_graph_entry);
173 if (ret) 284 if (ret)
174 return ret; 285 return ret;
175 tracing_start_cmdline_record(); 286 tracing_start_cmdline_record();
@@ -177,6 +288,11 @@ static int graph_trace_init(struct trace_array *tr)
177 return 0; 288 return 0;
178} 289}
179 290
291void set_graph_array(struct trace_array *tr)
292{
293 graph_array = tr;
294}
295
180static void graph_trace_reset(struct trace_array *tr) 296static void graph_trace_reset(struct trace_array *tr)
181{ 297{
182 tracing_stop_cmdline_record(); 298 tracing_stop_cmdline_record();
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 00dd6485bdd7..d2cdbabb4ead 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -288,6 +288,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
288 * to detect and recover from possible hangs 288 * to detect and recover from possible hangs
289 */ 289 */
290 tracing_reset_online_cpus(tr); 290 tracing_reset_online_cpus(tr);
291 set_graph_array(tr);
291 ret = register_ftrace_graph(&trace_graph_return, 292 ret = register_ftrace_graph(&trace_graph_return,
292 &trace_graph_entry_watchdog); 293 &trace_graph_entry_watchdog);
293 if (ret) { 294 if (ret) {