aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-11-25 18:57:25 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-25 19:59:45 -0500
commit287b6e68ca7209caec40b2f44f837c580a413bae (patch)
treeb0867d75868f6049dc5747bd39fdae2d477dde66
parentfb52607afcd0629776f1dc9e657647ceae81dd50 (diff)
tracing/function-return-tracer: set a more human readable output
Impact: feature This patch sets a C-like output for the function graph tracing. For this aim, we now call two handler for each function: one on the entry and one other on return. This way we can draw a well-ordered call stack. The pid of the previous trace is loosely stored to be compared against the one of the current trace to see if there were a context switch. Without this little feature, the call tree would seem broken at some locations. We could use the sched_tracer to capture these sched_events but this way of processing is much more simpler. 2 spaces have been chosen for indentation to fit the screen while deep calls. The time of execution in nanosecs is printed just after closed braces, it seems more easy this way to find the corresponding function. If the time was printed as a first column, it would be not so easy to find the corresponding function if it is called on a deep depth. I plan to output the return value but on 32 bits CPU, the return value can be 32 or 64, and its difficult to guess on which case we are. I don't know what would be the better solution on X86-32: only print eax (low-part) or even edx (high-part). Actually it's thee same problem when a function return a 8 bits value, the high part of eax could contain junk values... Here is an example of trace: sys_read() { fget_light() { } 526 vfs_read() { rw_verify_area() { security_file_permission() { cap_file_permission() { } 519 } 1564 } 2640 do_sync_read() { pipe_read() { __might_sleep() { } 511 pipe_wait() { prepare_to_wait() { } 760 deactivate_task() { dequeue_task() { dequeue_task_fair() { dequeue_entity() { update_curr() { update_min_vruntime() { } 504 } 1587 clear_buddies() { } 512 add_cfs_task_weight() { } 519 update_min_vruntime() { } 511 } 5602 dequeue_entity() { update_curr() { update_min_vruntime() { } 496 } 1631 clear_buddies() { } 496 update_min_vruntime() { } 527 } 4580 hrtick_update() { hrtick_start_fair() { } 488 } 1489 } 13700 } 14949 } 16016 msecs_to_jiffies() { } 496 put_prev_task_fair() { } 504 pick_next_task_fair() { } 489 pick_next_task_rt() { } 496 pick_next_task_fair() { } 489 pick_next_task_idle() { } 489 ------------8<---------- thread 4 ------------8<---------- finish_task_switch() { } 1203 do_softirq() { __do_softirq() { __local_bh_disable() { } 669 rcu_process_callbacks() { __rcu_process_callbacks() { cpu_quiet() { rcu_start_batch() { } 503 } 1647 } 3128 __rcu_process_callbacks() { } 542 } 5362 _local_bh_enable() { } 587 } 8880 } 9986 kthread_should_stop() { } 669 deactivate_task() { dequeue_task() { dequeue_task_fair() { dequeue_entity() { update_curr() { calc_delta_mine() { } 511 update_min_vruntime() { } 511 } 2813 Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/ftrace.c32
-rw-r--r--include/linux/ftrace.h25
-rw-r--r--kernel/trace/ftrace.c30
-rw-r--r--kernel/trace/trace.c67
-rw-r--r--kernel/trace/trace.h28
-rw-r--r--kernel/trace/trace_functions_graph.c104
-rw-r--r--kernel/trace/trace_functions_return.c98
7 files changed, 208 insertions, 176 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 3595a4c14aba..26b2d92d48b3 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -347,7 +347,7 @@ void ftrace_nmi_exit(void)
347 347
348/* Add a function return address to the trace stack on thread info.*/ 348/* Add a function return address to the trace stack on thread info.*/
349static int push_return_trace(unsigned long ret, unsigned long long time, 349static int push_return_trace(unsigned long ret, unsigned long long time,
350 unsigned long func) 350 unsigned long func, int *depth)
351{ 351{
352 int index; 352 int index;
353 353
@@ -365,21 +365,22 @@ static int push_return_trace(unsigned long ret, unsigned long long time,
365 current->ret_stack[index].ret = ret; 365 current->ret_stack[index].ret = ret;
366 current->ret_stack[index].func = func; 366 current->ret_stack[index].func = func;
367 current->ret_stack[index].calltime = time; 367 current->ret_stack[index].calltime = time;
368 *depth = index;
368 369
369 return 0; 370 return 0;
370} 371}
371 372
372/* Retrieve a function return address to the trace stack on thread info.*/ 373/* Retrieve a function return address to the trace stack on thread info.*/
373static void pop_return_trace(unsigned long *ret, unsigned long long *time, 374static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
374 unsigned long *func, unsigned long *overrun)
375{ 375{
376 int index; 376 int index;
377 377
378 index = current->curr_ret_stack; 378 index = current->curr_ret_stack;
379 *ret = current->ret_stack[index].ret; 379 *ret = current->ret_stack[index].ret;
380 *func = current->ret_stack[index].func; 380 trace->func = current->ret_stack[index].func;
381 *time = current->ret_stack[index].calltime; 381 trace->calltime = current->ret_stack[index].calltime;
382 *overrun = atomic_read(&current->trace_overrun); 382 trace->overrun = atomic_read(&current->trace_overrun);
383 trace->depth = index;
383 current->curr_ret_stack--; 384 current->curr_ret_stack--;
384} 385}
385 386
@@ -390,12 +391,13 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
390unsigned long ftrace_return_to_handler(void) 391unsigned long ftrace_return_to_handler(void)
391{ 392{
392 struct ftrace_graph_ret trace; 393 struct ftrace_graph_ret trace;
393 pop_return_trace(&trace.ret, &trace.calltime, &trace.func, 394 unsigned long ret;
394 &trace.overrun); 395
396 pop_return_trace(&trace, &ret);
395 trace.rettime = cpu_clock(raw_smp_processor_id()); 397 trace.rettime = cpu_clock(raw_smp_processor_id());
396 ftrace_graph_function(&trace); 398 ftrace_graph_return(&trace);
397 399
398 return trace.ret; 400 return ret;
399} 401}
400 402
401/* 403/*
@@ -407,6 +409,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
407 unsigned long old; 409 unsigned long old;
408 unsigned long long calltime; 410 unsigned long long calltime;
409 int faulted; 411 int faulted;
412 struct ftrace_graph_ent trace;
410 unsigned long return_hooker = (unsigned long) 413 unsigned long return_hooker = (unsigned long)
411 &return_to_handler; 414 &return_to_handler;
412 415
@@ -452,8 +455,15 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
452 455
453 calltime = cpu_clock(raw_smp_processor_id()); 456 calltime = cpu_clock(raw_smp_processor_id());
454 457
455 if (push_return_trace(old, calltime, self_addr) == -EBUSY) 458 if (push_return_trace(old, calltime,
459 self_addr, &trace.depth) == -EBUSY) {
456 *parent = old; 460 *parent = old;
461 return;
462 }
463
464 trace.func = self_addr;
465 ftrace_graph_entry(&trace);
466
457} 467}
458 468
459#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 469#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index b4ac734ad8d6..fc2d54987198 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -313,26 +313,39 @@ ftrace_init_module(struct module *mod,
313 313
314 314
315/* 315/*
316 * Structure that defines an entry function trace.
317 */
318struct ftrace_graph_ent {
319 unsigned long func; /* Current function */
320 int depth;
321};
322
323/*
316 * Structure that defines a return function trace. 324 * Structure that defines a return function trace.
317 */ 325 */
318struct ftrace_graph_ret { 326struct ftrace_graph_ret {
319 unsigned long ret; /* Return address */
320 unsigned long func; /* Current function */ 327 unsigned long func; /* Current function */
321 unsigned long long calltime; 328 unsigned long long calltime;
322 unsigned long long rettime; 329 unsigned long long rettime;
323 /* Number of functions that overran the depth limit for current task */ 330 /* Number of functions that overran the depth limit for current task */
324 unsigned long overrun; 331 unsigned long overrun;
332 int depth;
325}; 333};
326 334
327#ifdef CONFIG_FUNCTION_GRAPH_TRACER 335#ifdef CONFIG_FUNCTION_GRAPH_TRACER
328#define FTRACE_RETFUNC_DEPTH 50 336#define FTRACE_RETFUNC_DEPTH 50
329#define FTRACE_RETSTACK_ALLOC_SIZE 32 337#define FTRACE_RETSTACK_ALLOC_SIZE 32
330/* Type of a callback handler of tracing return function */ 338/* Type of the callback handlers for tracing function graph*/
331typedef void (*trace_function_graph_t)(struct ftrace_graph_ret *); 339typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
340typedef void (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
341
342extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
343 trace_func_graph_ent_t entryfunc);
344
345/* The current handlers in use */
346extern trace_func_graph_ret_t ftrace_graph_return;
347extern trace_func_graph_ent_t ftrace_graph_entry;
332 348
333extern int register_ftrace_graph(trace_function_graph_t func);
334/* The current handler in use */
335extern trace_function_graph_t ftrace_graph_function;
336extern void unregister_ftrace_graph(void); 349extern void unregister_ftrace_graph(void);
337 350
338extern void ftrace_graph_init_task(struct task_struct *t); 351extern void ftrace_graph_init_task(struct task_struct *t);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 9e19976af727..7e2d3b91692d 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1498,12 +1498,13 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1498 1498
1499#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1499#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1500 1500
1501static atomic_t ftrace_retfunc_active; 1501static atomic_t ftrace_graph_active;
1502
1503/* The callback that hooks the return of a function */
1504trace_function_graph_t ftrace_graph_function =
1505 (trace_function_graph_t)ftrace_stub;
1506 1502
1503/* The callbacks that hook a function */
1504trace_func_graph_ret_t ftrace_graph_return =
1505 (trace_func_graph_ret_t)ftrace_stub;
1506trace_func_graph_ent_t ftrace_graph_entry =
1507 (trace_func_graph_ent_t)ftrace_stub;
1507 1508
1508/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 1509/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1509static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) 1510static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
@@ -1569,7 +1570,8 @@ static int start_graph_tracing(void)
1569 return ret; 1570 return ret;
1570} 1571}
1571 1572
1572int register_ftrace_graph(trace_function_graph_t func) 1573int register_ftrace_graph(trace_func_graph_ret_t retfunc,
1574 trace_func_graph_ent_t entryfunc)
1573{ 1575{
1574 int ret = 0; 1576 int ret = 0;
1575 1577
@@ -1583,14 +1585,15 @@ int register_ftrace_graph(trace_function_graph_t func)
1583 ret = -EBUSY; 1585 ret = -EBUSY;
1584 goto out; 1586 goto out;
1585 } 1587 }
1586 atomic_inc(&ftrace_retfunc_active); 1588 atomic_inc(&ftrace_graph_active);
1587 ret = start_graph_tracing(); 1589 ret = start_graph_tracing();
1588 if (ret) { 1590 if (ret) {
1589 atomic_dec(&ftrace_retfunc_active); 1591 atomic_dec(&ftrace_graph_active);
1590 goto out; 1592 goto out;
1591 } 1593 }
1592 ftrace_tracing_type = FTRACE_TYPE_RETURN; 1594 ftrace_tracing_type = FTRACE_TYPE_RETURN;
1593 ftrace_graph_function = func; 1595 ftrace_graph_return = retfunc;
1596 ftrace_graph_entry = entryfunc;
1594 ftrace_startup(); 1597 ftrace_startup();
1595 1598
1596out: 1599out:
@@ -1602,8 +1605,9 @@ void unregister_ftrace_graph(void)
1602{ 1605{
1603 mutex_lock(&ftrace_sysctl_lock); 1606 mutex_lock(&ftrace_sysctl_lock);
1604 1607
1605 atomic_dec(&ftrace_retfunc_active); 1608 atomic_dec(&ftrace_graph_active);
1606 ftrace_graph_function = (trace_function_graph_t)ftrace_stub; 1609 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
1610 ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
1607 ftrace_shutdown(); 1611 ftrace_shutdown();
1608 /* Restore normal tracing type */ 1612 /* Restore normal tracing type */
1609 ftrace_tracing_type = FTRACE_TYPE_ENTER; 1613 ftrace_tracing_type = FTRACE_TYPE_ENTER;
@@ -1614,7 +1618,7 @@ void unregister_ftrace_graph(void)
1614/* Allocate a return stack for newly created task */ 1618/* Allocate a return stack for newly created task */
1615void ftrace_graph_init_task(struct task_struct *t) 1619void ftrace_graph_init_task(struct task_struct *t)
1616{ 1620{
1617 if (atomic_read(&ftrace_retfunc_active)) { 1621 if (atomic_read(&ftrace_graph_active)) {
1618 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH 1622 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1619 * sizeof(struct ftrace_ret_stack), 1623 * sizeof(struct ftrace_ret_stack),
1620 GFP_KERNEL); 1624 GFP_KERNEL);
@@ -1638,5 +1642,3 @@ void ftrace_graph_exit_task(struct task_struct *t)
1638} 1642}
1639#endif 1643#endif
1640 1644
1641
1642
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f21ab2c68fd4..9d5f7c94f251 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -879,14 +879,38 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
879} 879}
880 880
881#ifdef CONFIG_FUNCTION_GRAPH_TRACER 881#ifdef CONFIG_FUNCTION_GRAPH_TRACER
882static void __trace_function_graph(struct trace_array *tr, 882static void __trace_graph_entry(struct trace_array *tr,
883 struct trace_array_cpu *data,
884 struct ftrace_graph_ent *trace,
885 unsigned long flags,
886 int pc)
887{
888 struct ring_buffer_event *event;
889 struct ftrace_graph_ent_entry *entry;
890 unsigned long irq_flags;
891
892 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
893 return;
894
895 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
896 &irq_flags);
897 if (!event)
898 return;
899 entry = ring_buffer_event_data(event);
900 tracing_generic_entry_update(&entry->ent, flags, pc);
901 entry->ent.type = TRACE_GRAPH_ENT;
902 entry->graph_ent = *trace;
903 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
904}
905
906static void __trace_graph_return(struct trace_array *tr,
883 struct trace_array_cpu *data, 907 struct trace_array_cpu *data,
884 struct ftrace_graph_ret *trace, 908 struct ftrace_graph_ret *trace,
885 unsigned long flags, 909 unsigned long flags,
886 int pc) 910 int pc)
887{ 911{
888 struct ring_buffer_event *event; 912 struct ring_buffer_event *event;
889 struct ftrace_graph_entry *entry; 913 struct ftrace_graph_ret_entry *entry;
890 unsigned long irq_flags; 914 unsigned long irq_flags;
891 915
892 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 916 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
@@ -898,12 +922,8 @@ static void __trace_function_graph(struct trace_array *tr,
898 return; 922 return;
899 entry = ring_buffer_event_data(event); 923 entry = ring_buffer_event_data(event);
900 tracing_generic_entry_update(&entry->ent, flags, pc); 924 tracing_generic_entry_update(&entry->ent, flags, pc);
901 entry->ent.type = TRACE_FN_RET; 925 entry->ent.type = TRACE_GRAPH_RET;
902 entry->ip = trace->func; 926 entry->ret = *trace;
903 entry->parent_ip = trace->ret;
904 entry->rettime = trace->rettime;
905 entry->calltime = trace->calltime;
906 entry->overrun = trace->overrun;
907 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); 927 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
908} 928}
909#endif 929#endif
@@ -1178,7 +1198,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1178} 1198}
1179 1199
1180#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1200#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1181void trace_function_graph(struct ftrace_graph_ret *trace) 1201void trace_graph_entry(struct ftrace_graph_ent *trace)
1182{ 1202{
1183 struct trace_array *tr = &global_trace; 1203 struct trace_array *tr = &global_trace;
1184 struct trace_array_cpu *data; 1204 struct trace_array_cpu *data;
@@ -1193,7 +1213,28 @@ void trace_function_graph(struct ftrace_graph_ret *trace)
1193 disabled = atomic_inc_return(&data->disabled); 1213 disabled = atomic_inc_return(&data->disabled);
1194 if (likely(disabled == 1)) { 1214 if (likely(disabled == 1)) {
1195 pc = preempt_count(); 1215 pc = preempt_count();
1196 __trace_function_graph(tr, data, trace, flags, pc); 1216 __trace_graph_entry(tr, data, trace, flags, pc);
1217 }
1218 atomic_dec(&data->disabled);
1219 raw_local_irq_restore(flags);
1220}
1221
1222void trace_graph_return(struct ftrace_graph_ret *trace)
1223{
1224 struct trace_array *tr = &global_trace;
1225 struct trace_array_cpu *data;
1226 unsigned long flags;
1227 long disabled;
1228 int cpu;
1229 int pc;
1230
1231 raw_local_irq_save(flags);
1232 cpu = raw_smp_processor_id();
1233 data = tr->data[cpu];
1234 disabled = atomic_inc_return(&data->disabled);
1235 if (likely(disabled == 1)) {
1236 pc = preempt_count();
1237 __trace_graph_return(tr, data, trace, flags, pc);
1197 } 1238 }
1198 atomic_dec(&data->disabled); 1239 atomic_dec(&data->disabled);
1199 raw_local_irq_restore(flags); 1240 raw_local_irq_restore(flags);
@@ -2000,9 +2041,11 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2000 trace_seq_print_cont(s, iter); 2041 trace_seq_print_cont(s, iter);
2001 break; 2042 break;
2002 } 2043 }
2003 case TRACE_FN_RET: { 2044 case TRACE_GRAPH_RET: {
2045 return print_graph_function(iter);
2046 }
2047 case TRACE_GRAPH_ENT: {
2004 return print_graph_function(iter); 2048 return print_graph_function(iter);
2005 break;
2006 } 2049 }
2007 case TRACE_BRANCH: { 2050 case TRACE_BRANCH: {
2008 struct trace_branch *field; 2051 struct trace_branch *field;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 72b5ef868765..ffe1bb1eb620 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -25,7 +25,8 @@ enum trace_type {
25 TRACE_BRANCH, 25 TRACE_BRANCH,
26 TRACE_BOOT_CALL, 26 TRACE_BOOT_CALL,
27 TRACE_BOOT_RET, 27 TRACE_BOOT_RET,
28 TRACE_FN_RET, 28 TRACE_GRAPH_RET,
29 TRACE_GRAPH_ENT,
29 TRACE_USER_STACK, 30 TRACE_USER_STACK,
30 TRACE_BTS, 31 TRACE_BTS,
31 32
@@ -56,14 +57,16 @@ struct ftrace_entry {
56 unsigned long parent_ip; 57 unsigned long parent_ip;
57}; 58};
58 59
60/* Function call entry */
61struct ftrace_graph_ent_entry {
62 struct trace_entry ent;
63 struct ftrace_graph_ent graph_ent;
64};
65
59/* Function return entry */ 66/* Function return entry */
60struct ftrace_graph_entry { 67struct ftrace_graph_ret_entry {
61 struct trace_entry ent; 68 struct trace_entry ent;
62 unsigned long ip; 69 struct ftrace_graph_ret ret;
63 unsigned long parent_ip;
64 unsigned long long calltime;
65 unsigned long long rettime;
66 unsigned long overrun;
67}; 70};
68extern struct tracer boot_tracer; 71extern struct tracer boot_tracer;
69 72
@@ -264,7 +267,10 @@ extern void __ftrace_bad_type(void);
264 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ 267 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
265 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ 268 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
266 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 269 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
267 IF_ASSIGN(var, ent, struct ftrace_graph_entry, TRACE_FN_RET);\ 270 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
271 TRACE_GRAPH_ENT); \
272 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
273 TRACE_GRAPH_RET); \
268 IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\ 274 IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\
269 __ftrace_bad_type(); \ 275 __ftrace_bad_type(); \
270 } while (0) 276 } while (0)
@@ -397,9 +403,9 @@ void trace_function(struct trace_array *tr,
397 unsigned long ip, 403 unsigned long ip,
398 unsigned long parent_ip, 404 unsigned long parent_ip,
399 unsigned long flags, int pc); 405 unsigned long flags, int pc);
400void
401trace_function_graph(struct ftrace_graph_ret *trace);
402 406
407void trace_graph_return(struct ftrace_graph_ret *trace);
408void trace_graph_entry(struct ftrace_graph_ent *trace);
403void trace_bts(struct trace_array *tr, 409void trace_bts(struct trace_array *tr,
404 unsigned long from, 410 unsigned long from,
405 unsigned long to); 411 unsigned long to);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index f5bad4624d2b..b6f0cc2a00cb 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -13,6 +13,7 @@
13 13
14#include "trace.h" 14#include "trace.h"
15 15
16#define TRACE_GRAPH_INDENT 2
16 17
17#define TRACE_GRAPH_PRINT_OVERRUN 0x1 18#define TRACE_GRAPH_PRINT_OVERRUN 0x1
18static struct tracer_opt trace_opts[] = { 19static struct tracer_opt trace_opts[] = {
@@ -26,6 +27,8 @@ static struct tracer_flags tracer_flags = {
26 .opts = trace_opts 27 .opts = trace_opts
27}; 28};
28 29
30/* pid on the last trace processed */
31static pid_t last_pid = -1;
29 32
30static int graph_trace_init(struct trace_array *tr) 33static int graph_trace_init(struct trace_array *tr)
31{ 34{
@@ -33,7 +36,8 @@ static int graph_trace_init(struct trace_array *tr)
33 for_each_online_cpu(cpu) 36 for_each_online_cpu(cpu)
34 tracing_reset(tr, cpu); 37 tracing_reset(tr, cpu);
35 38
36 return register_ftrace_graph(&trace_function_graph); 39 return register_ftrace_graph(&trace_graph_return,
40 &trace_graph_entry);
37} 41}
38 42
39static void graph_trace_reset(struct trace_array *tr) 43static void graph_trace_reset(struct trace_array *tr)
@@ -41,45 +45,97 @@ static void graph_trace_reset(struct trace_array *tr)
41 unregister_ftrace_graph(); 45 unregister_ftrace_graph();
42} 46}
43 47
48/* If the pid changed since the last trace, output this event */
49static int verif_pid(struct trace_seq *s, pid_t pid)
50{
51 if (last_pid != -1 && last_pid == pid)
52 return 1;
44 53
45enum print_line_t 54 last_pid = pid;
46print_graph_function(struct trace_iterator *iter) 55 return trace_seq_printf(s, "\n------------8<---------- thread %d"
56 " ------------8<----------\n\n",
57 pid);
58}
59
60static enum print_line_t
61print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s,
62 struct trace_entry *ent)
47{ 63{
48 struct trace_seq *s = &iter->seq; 64 int i;
49 struct trace_entry *entry = iter->ent;
50 struct ftrace_graph_entry *field;
51 int ret; 65 int ret;
52 66
53 if (entry->type == TRACE_FN_RET) { 67 if (!verif_pid(s, ent->pid))
54 trace_assign_type(field, entry); 68 return TRACE_TYPE_PARTIAL_LINE;
55 ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip);
56 if (!ret)
57 return TRACE_TYPE_PARTIAL_LINE;
58 69
59 ret = seq_print_ip_sym(s, field->ip, 70 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
60 trace_flags & TRACE_ITER_SYM_MASK); 71 ret = trace_seq_printf(s, " ");
61 if (!ret) 72 if (!ret)
62 return TRACE_TYPE_PARTIAL_LINE; 73 return TRACE_TYPE_PARTIAL_LINE;
74 }
75
76 ret = seq_print_ip_sym(s, call->func, 0);
77 if (!ret)
78 return TRACE_TYPE_PARTIAL_LINE;
79
80 ret = trace_seq_printf(s, "() {\n");
81 if (!ret)
82 return TRACE_TYPE_PARTIAL_LINE;
83 return TRACE_TYPE_HANDLED;
84}
85
86static enum print_line_t
87print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
88 struct trace_entry *ent)
89{
90 int i;
91 int ret;
92
93 if (!verif_pid(s, ent->pid))
94 return TRACE_TYPE_PARTIAL_LINE;
63 95
64 ret = trace_seq_printf(s, " (%llu ns)", 96 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
65 field->rettime - field->calltime); 97 ret = trace_seq_printf(s, " ");
66 if (!ret) 98 if (!ret)
67 return TRACE_TYPE_PARTIAL_LINE; 99 return TRACE_TYPE_PARTIAL_LINE;
100 }
101
102 ret = trace_seq_printf(s, "} ");
103 if (!ret)
104 return TRACE_TYPE_PARTIAL_LINE;
68 105
69 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { 106 ret = trace_seq_printf(s, "%llu\n", trace->rettime - trace->calltime);
70 ret = trace_seq_printf(s, " (Overruns: %lu)", 107 if (!ret)
71 field->overrun); 108 return TRACE_TYPE_PARTIAL_LINE;
72 if (!ret)
73 return TRACE_TYPE_PARTIAL_LINE;
74 }
75 109
76 ret = trace_seq_printf(s, "\n"); 110 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
111 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
112 trace->overrun);
77 if (!ret) 113 if (!ret)
78 return TRACE_TYPE_PARTIAL_LINE; 114 return TRACE_TYPE_PARTIAL_LINE;
115 }
116 return TRACE_TYPE_HANDLED;
117}
118
119enum print_line_t
120print_graph_function(struct trace_iterator *iter)
121{
122 struct trace_seq *s = &iter->seq;
123 struct trace_entry *entry = iter->ent;
79 124
80 return TRACE_TYPE_HANDLED; 125 switch (entry->type) {
126 case TRACE_GRAPH_ENT: {
127 struct ftrace_graph_ent_entry *field;
128 trace_assign_type(field, entry);
129 return print_graph_entry(&field->graph_ent, s, entry);
130 }
131 case TRACE_GRAPH_RET: {
132 struct ftrace_graph_ret_entry *field;
133 trace_assign_type(field, entry);
134 return print_graph_return(&field->ret, s, entry);
135 }
136 default:
137 return TRACE_TYPE_UNHANDLED;
81 } 138 }
82 return TRACE_TYPE_UNHANDLED;
83} 139}
84 140
85static struct tracer graph_trace __read_mostly = { 141static struct tracer graph_trace __read_mostly = {
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c
deleted file mode 100644
index e00d64509c9c..000000000000
--- a/kernel/trace/trace_functions_return.c
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 *
3 * Function return tracer.
4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/fs.h>
13
14#include "trace.h"
15
16
17#define TRACE_RETURN_PRINT_OVERRUN 0x1
18static struct tracer_opt trace_opts[] = {
19 /* Display overruns or not */
20 { TRACER_OPT(overrun, TRACE_RETURN_PRINT_OVERRUN) },
21 { } /* Empty entry */
22};
23
24static struct tracer_flags tracer_flags = {
25 .val = 0, /* Don't display overruns by default */
26 .opts = trace_opts
27};
28
29
30static int return_trace_init(struct trace_array *tr)
31{
32 int cpu;
33 for_each_online_cpu(cpu)
34 tracing_reset(tr, cpu);
35
36 return register_ftrace_return(&trace_function_return);
37}
38
39static void return_trace_reset(struct trace_array *tr)
40{
41 unregister_ftrace_return();
42}
43
44
45enum print_line_t
46print_return_function(struct trace_iterator *iter)
47{
48 struct trace_seq *s = &iter->seq;
49 struct trace_entry *entry = iter->ent;
50 struct ftrace_ret_entry *field;
51 int ret;
52
53 if (entry->type == TRACE_FN_RET) {
54 trace_assign_type(field, entry);
55 ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip);
56 if (!ret)
57 return TRACE_TYPE_PARTIAL_LINE;
58
59 ret = seq_print_ip_sym(s, field->ip,
60 trace_flags & TRACE_ITER_SYM_MASK);
61 if (!ret)
62 return TRACE_TYPE_PARTIAL_LINE;
63
64 ret = trace_seq_printf(s, " (%llu ns)",
65 field->rettime - field->calltime);
66 if (!ret)
67 return TRACE_TYPE_PARTIAL_LINE;
68
69 if (tracer_flags.val & TRACE_RETURN_PRINT_OVERRUN) {
70 ret = trace_seq_printf(s, " (Overruns: %lu)",
71 field->overrun);
72 if (!ret)
73 return TRACE_TYPE_PARTIAL_LINE;
74 }
75
76 ret = trace_seq_printf(s, "\n");
77 if (!ret)
78 return TRACE_TYPE_PARTIAL_LINE;
79
80 return TRACE_TYPE_HANDLED;
81 }
82 return TRACE_TYPE_UNHANDLED;
83}
84
85static struct tracer return_trace __read_mostly = {
86 .name = "return",
87 .init = return_trace_init,
88 .reset = return_trace_reset,
89 .print_line = print_return_function,
90 .flags = &tracer_flags,
91};
92
93static __init int init_return_trace(void)
94{
95 return register_tracer(&return_trace);
96}
97
98device_initcall(init_return_trace);