diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-03-24 23:17:58 -0400 |
---|---|---|
committer | Steven Rostedt <srostedt@redhat.com> | 2009-03-24 23:41:11 -0400 |
commit | a2a16d6a3156ef7309ca7328a20c35df9418e670 (patch) | |
tree | 8f21d9c56eb34f9bd1057929661c96d04329dd69 | |
parent | cafb168a1c92e4c9e1731fe3d666c39611762c49 (diff) |
function-graph: add option to calculate graph time or not
graph time is the time that a function is executing another function.
Thus if function A calls B, if graph-time is set, then the time for
A includes B. This is the default behavior. But if graph-time is off,
then the time spent executing B is subtracted from A.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
-rw-r--r-- | include/linux/ftrace.h | 3 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 21 | ||||
-rw-r--r-- | kernel/trace/trace.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace.h | 1 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 8 |
5 files changed, 29 insertions, 8 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 015a3d22cf74..9e0a8d245e55 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -365,6 +365,7 @@ struct ftrace_ret_stack { | |||
365 | unsigned long ret; | 365 | unsigned long ret; |
366 | unsigned long func; | 366 | unsigned long func; |
367 | unsigned long long calltime; | 367 | unsigned long long calltime; |
368 | unsigned long long subtime; | ||
368 | }; | 369 | }; |
369 | 370 | ||
370 | /* | 371 | /* |
@@ -376,8 +377,6 @@ extern void return_to_handler(void); | |||
376 | 377 | ||
377 | extern int | 378 | extern int |
378 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); | 379 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); |
379 | extern void | ||
380 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); | ||
381 | 380 | ||
382 | /* | 381 | /* |
383 | * Sometimes we don't want to trace a function with the function | 382 | * Sometimes we don't want to trace a function with the function |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ed1fc5021d44..71e5faef12ab 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -604,6 +604,7 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace) | |||
604 | static void profile_graph_return(struct ftrace_graph_ret *trace) | 604 | static void profile_graph_return(struct ftrace_graph_ret *trace) |
605 | { | 605 | { |
606 | struct ftrace_profile_stat *stat; | 606 | struct ftrace_profile_stat *stat; |
607 | unsigned long long calltime; | ||
607 | struct ftrace_profile *rec; | 608 | struct ftrace_profile *rec; |
608 | unsigned long flags; | 609 | unsigned long flags; |
609 | 610 | ||
@@ -612,9 +613,27 @@ static void profile_graph_return(struct ftrace_graph_ret *trace) | |||
612 | if (!stat->hash) | 613 | if (!stat->hash) |
613 | goto out; | 614 | goto out; |
614 | 615 | ||
616 | calltime = trace->rettime - trace->calltime; | ||
617 | |||
618 | if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { | ||
619 | int index; | ||
620 | |||
621 | index = trace->depth; | ||
622 | |||
623 | /* Append this call time to the parent time to subtract */ | ||
624 | if (index) | ||
625 | current->ret_stack[index - 1].subtime += calltime; | ||
626 | |||
627 | if (current->ret_stack[index].subtime < calltime) | ||
628 | calltime -= current->ret_stack[index].subtime; | ||
629 | else | ||
630 | calltime = 0; | ||
631 | } | ||
632 | |||
615 | rec = ftrace_find_profiled_func(stat, trace->func); | 633 | rec = ftrace_find_profiled_func(stat, trace->func); |
616 | if (rec) | 634 | if (rec) |
617 | rec->time += trace->rettime - trace->calltime; | 635 | rec->time += calltime; |
636 | |||
618 | out: | 637 | out: |
619 | local_irq_restore(flags); | 638 | local_irq_restore(flags); |
620 | } | 639 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 821bf49771d4..5d1a16cae376 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -255,7 +255,8 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | |||
255 | 255 | ||
256 | /* trace_flags holds trace_options default values */ | 256 | /* trace_flags holds trace_options default values */ |
257 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 257 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
258 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME; | 258 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
259 | TRACE_ITER_GRAPH_TIME; | ||
259 | 260 | ||
260 | /** | 261 | /** |
261 | * trace_wake_up - wake up tasks waiting for trace input | 262 | * trace_wake_up - wake up tasks waiting for trace input |
@@ -317,6 +318,7 @@ static const char *trace_options[] = { | |||
317 | "latency-format", | 318 | "latency-format", |
318 | "global-clock", | 319 | "global-clock", |
319 | "sleep-time", | 320 | "sleep-time", |
321 | "graph-time", | ||
320 | NULL | 322 | NULL |
321 | }; | 323 | }; |
322 | 324 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index c66ca3b66050..e3429a8ab059 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -685,6 +685,7 @@ enum trace_iterator_flags { | |||
685 | TRACE_ITER_LATENCY_FMT = 0x40000, | 685 | TRACE_ITER_LATENCY_FMT = 0x40000, |
686 | TRACE_ITER_GLOBAL_CLK = 0x80000, | 686 | TRACE_ITER_GLOBAL_CLK = 0x80000, |
687 | TRACE_ITER_SLEEP_TIME = 0x100000, | 687 | TRACE_ITER_SLEEP_TIME = 0x100000, |
688 | TRACE_ITER_GRAPH_TIME = 0x200000, | ||
688 | }; | 689 | }; |
689 | 690 | ||
690 | /* | 691 | /* |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 85bba0f018b0..10f6ad7d85f6 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -78,13 +78,14 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) | |||
78 | current->ret_stack[index].ret = ret; | 78 | current->ret_stack[index].ret = ret; |
79 | current->ret_stack[index].func = func; | 79 | current->ret_stack[index].func = func; |
80 | current->ret_stack[index].calltime = calltime; | 80 | current->ret_stack[index].calltime = calltime; |
81 | current->ret_stack[index].subtime = 0; | ||
81 | *depth = index; | 82 | *depth = index; |
82 | 83 | ||
83 | return 0; | 84 | return 0; |
84 | } | 85 | } |
85 | 86 | ||
86 | /* Retrieve a function return address to the trace stack on thread info.*/ | 87 | /* Retrieve a function return address to the trace stack on thread info.*/ |
87 | void | 88 | static void |
88 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | 89 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) |
89 | { | 90 | { |
90 | int index; | 91 | int index; |
@@ -104,9 +105,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | |||
104 | trace->calltime = current->ret_stack[index].calltime; | 105 | trace->calltime = current->ret_stack[index].calltime; |
105 | trace->overrun = atomic_read(¤t->trace_overrun); | 106 | trace->overrun = atomic_read(¤t->trace_overrun); |
106 | trace->depth = index; | 107 | trace->depth = index; |
107 | barrier(); | ||
108 | current->curr_ret_stack--; | ||
109 | |||
110 | } | 108 | } |
111 | 109 | ||
112 | /* | 110 | /* |
@@ -121,6 +119,8 @@ unsigned long ftrace_return_to_handler(void) | |||
121 | ftrace_pop_return_trace(&trace, &ret); | 119 | ftrace_pop_return_trace(&trace, &ret); |
122 | trace.rettime = trace_clock_local(); | 120 | trace.rettime = trace_clock_local(); |
123 | ftrace_graph_return(&trace); | 121 | ftrace_graph_return(&trace); |
122 | barrier(); | ||
123 | current->curr_ret_stack--; | ||
124 | 124 | ||
125 | if (unlikely(!ret)) { | 125 | if (unlikely(!ret)) { |
126 | ftrace_graph_stop(); | 126 | ftrace_graph_stop(); |