diff options
author | Steven Rostedt <srostedt@redhat.com> | 2010-02-12 10:15:00 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2010-02-12 10:15:00 -0500 |
commit | f641b1a3f9010ff2d4cf572d4e92e7e43cc34bbb (patch) | |
tree | 3c3d7af701b42231745128781a13fc2cd1eb11c2 | |
parent | 183e6167573572dbaed0d78a53bc40ed0168060c (diff) |
trace-cmd: Clean up function graph plugin
Because we now have reference counting of records, we do not
need to jump through the hoops of allocating previous records'
data to use it after reading new records.
Remove the hacks from the function graph plugin that does
all this.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | trace-ftrace.c | 22 |
1 files changed, 3 insertions, 19 deletions
diff --git a/trace-ftrace.c b/trace-ftrace.c index 8b0646b..4e68601 100644 --- a/trace-ftrace.c +++ b/trace-ftrace.c | |||
@@ -224,10 +224,8 @@ fgraph_ent_handler(struct trace_seq *s, struct record *record, | |||
224 | struct event_format *event) | 224 | struct event_format *event) |
225 | { | 225 | { |
226 | struct record *rec; | 226 | struct record *rec; |
227 | void *copy_data; | ||
228 | unsigned long long val, pid; | 227 | unsigned long long val, pid; |
229 | void *data = record->data; | 228 | void *data = record->data; |
230 | int size = record->size; | ||
231 | int cpu = record->cpu; | 229 | int cpu = record->cpu; |
232 | 230 | ||
233 | if (get_field_val(s, data, event, "common_pid", &pid)) | 231 | if (get_field_val(s, data, event, "common_pid", &pid)) |
@@ -236,34 +234,20 @@ fgraph_ent_handler(struct trace_seq *s, struct record *record, | |||
236 | if (get_field_val(s, data, event, "func", &val)) | 234 | if (get_field_val(s, data, event, "func", &val)) |
237 | return trace_seq_putc(s, '!'); | 235 | return trace_seq_putc(s, '!'); |
238 | 236 | ||
239 | /* | ||
240 | * peek_data may unmap the data pointer. Copy it first. | ||
241 | */ | ||
242 | copy_data = malloc(size); | ||
243 | if (!copy_data) | ||
244 | return trace_seq_printf(s, " <FAILED TO ALLOCATE MEMORY!>"); | ||
245 | |||
246 | memcpy(copy_data, data, size); | ||
247 | data = copy_data; | ||
248 | |||
249 | rec = tracecmd_peek_data(tracecmd_curr_thread_handle, cpu); | 237 | rec = tracecmd_peek_data(tracecmd_curr_thread_handle, cpu); |
250 | if (rec) | 238 | if (rec) |
251 | rec = get_return_for_leaf(s, cpu, pid, val, rec); | 239 | rec = get_return_for_leaf(s, cpu, pid, val, rec); |
252 | 240 | ||
253 | if (rec) { | 241 | if (rec) { |
254 | /* | 242 | /* |
255 | * The record returned needs to be freed. | 243 | * If this is a leaf function, then get_return_for_leaf |
256 | * We also do a new peek on this CPU to update the | 244 | * returns the return of the function |
257 | * record cache. | ||
258 | */ | 245 | */ |
259 | print_graph_entry_leaf(s, event, data, rec); | 246 | print_graph_entry_leaf(s, event, data, rec); |
260 | free_record(rec); | 247 | free_record(rec); |
261 | tracecmd_peek_data(tracecmd_curr_thread_handle, cpu); | ||
262 | } else | 248 | } else |
263 | print_graph_nested(s, event, data); | 249 | print_graph_nested(s, event, data); |
264 | 250 | ||
265 | free(data); | ||
266 | |||
267 | return 0; | 251 | return 0; |
268 | } | 252 | } |
269 | 253 | ||