aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-07-29 12:59:58 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-08-06 01:28:06 -0400
commit1a0799a8fef5acc6503f9c5e79b2cd003317826c (patch)
tree8aec6e623981cd8505de53752234d9f6b5d94843 /kernel/trace/trace_functions_graph.c
parent82e04af498a85ba425efe77580b7ba08234411df (diff)
tracing/function-graph-tracer: Move graph event insertion helpers in the graph tracer file
The function graph events helpers which insert the function entry and return events into the ring buffer currently reside in trace.c But this file is quite overloaded and the right place for these helpers is in the function graph tracer file. Then move them to trace_functions_graph.c Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c122
1 files changed, 119 insertions, 3 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index e30472da15d..f97244a41a4 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -52,7 +52,7 @@ static struct tracer_flags tracer_flags = {
52 .opts = trace_opts 52 .opts = trace_opts
53}; 53};
54 54
55/* pid on the last trace processed */ 55static struct trace_array *graph_array;
56 56
57 57
58/* Add a function return address to the trace stack on thread info.*/ 58/* Add a function return address to the trace stack on thread info.*/
@@ -166,10 +166,121 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
166 return ret; 166 return ret;
167} 167}
168 168
169static int __trace_graph_entry(struct trace_array *tr,
170 struct ftrace_graph_ent *trace,
171 unsigned long flags,
172 int pc)
173{
174 struct ftrace_event_call *call = &event_funcgraph_entry;
175 struct ring_buffer_event *event;
176 struct ftrace_graph_ent_entry *entry;
177
178 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
179 return 0;
180
181 event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_ENT,
182 sizeof(*entry), flags, pc);
183 if (!event)
184 return 0;
185 entry = ring_buffer_event_data(event);
186 entry->graph_ent = *trace;
187 if (!filter_current_check_discard(call, entry, event))
188 ring_buffer_unlock_commit(tr->buffer, event);
189
190 return 1;
191}
192
193int trace_graph_entry(struct ftrace_graph_ent *trace)
194{
195 struct trace_array *tr = graph_array;
196 struct trace_array_cpu *data;
197 unsigned long flags;
198 long disabled;
199 int ret;
200 int cpu;
201 int pc;
202
203 if (unlikely(!tr))
204 return 0;
205
206 if (!ftrace_trace_task(current))
207 return 0;
208
209 if (!ftrace_graph_addr(trace->func))
210 return 0;
211
212 local_irq_save(flags);
213 cpu = raw_smp_processor_id();
214 data = tr->data[cpu];
215 disabled = atomic_inc_return(&data->disabled);
216 if (likely(disabled == 1)) {
217 pc = preempt_count();
218 ret = __trace_graph_entry(tr, trace, flags, pc);
219 } else {
220 ret = 0;
221 }
222 /* Only do the atomic if it is not already set */
223 if (!test_tsk_trace_graph(current))
224 set_tsk_trace_graph(current);
225
226 atomic_dec(&data->disabled);
227 local_irq_restore(flags);
228
229 return ret;
230}
231
232static void __trace_graph_return(struct trace_array *tr,
233 struct ftrace_graph_ret *trace,
234 unsigned long flags,
235 int pc)
236{
237 struct ftrace_event_call *call = &event_funcgraph_exit;
238 struct ring_buffer_event *event;
239 struct ftrace_graph_ret_entry *entry;
240
241 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
242 return;
243
244 event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_RET,
245 sizeof(*entry), flags, pc);
246 if (!event)
247 return;
248 entry = ring_buffer_event_data(event);
249 entry->ret = *trace;
250 if (!filter_current_check_discard(call, entry, event))
251 ring_buffer_unlock_commit(tr->buffer, event);
252}
253
254void trace_graph_return(struct ftrace_graph_ret *trace)
255{
256 struct trace_array *tr = graph_array;
257 struct trace_array_cpu *data;
258 unsigned long flags;
259 long disabled;
260 int cpu;
261 int pc;
262
263 local_irq_save(flags);
264 cpu = raw_smp_processor_id();
265 data = tr->data[cpu];
266 disabled = atomic_inc_return(&data->disabled);
267 if (likely(disabled == 1)) {
268 pc = preempt_count();
269 __trace_graph_return(tr, trace, flags, pc);
270 }
271 if (!trace->depth)
272 clear_tsk_trace_graph(current);
273 atomic_dec(&data->disabled);
274 local_irq_restore(flags);
275}
276
169static int graph_trace_init(struct trace_array *tr) 277static int graph_trace_init(struct trace_array *tr)
170{ 278{
171 int ret = register_ftrace_graph(&trace_graph_return, 279 int ret;
172 &trace_graph_entry); 280
281 graph_array = tr;
282 ret = register_ftrace_graph(&trace_graph_return,
283 &trace_graph_entry);
173 if (ret) 284 if (ret)
174 return ret; 285 return ret;
175 tracing_start_cmdline_record(); 286 tracing_start_cmdline_record();
@@ -177,6 +288,11 @@ static int graph_trace_init(struct trace_array *tr)
177 return 0; 288 return 0;
178} 289}
179 290
291void set_graph_array(struct trace_array *tr)
292{
293 graph_array = tr;
294}
295
180static void graph_trace_reset(struct trace_array *tr) 296static void graph_trace_reset(struct trace_array *tr)
181{ 297{
182 tracing_stop_cmdline_record(); 298 tracing_stop_cmdline_record();