diff options
author | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2018-11-19 20:54:08 -0500 |
---|---|---|
committer | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2018-12-08 20:54:07 -0500 |
commit | b0e21a61d3196762b61f43ae994ffd255f646774 (patch) | |
tree | c8d44c604668ecde57890429bc2b6f4fd81cc04f /kernel/trace/ftrace.c | |
parent | 76b42b63ed0d004961097d3a3cd979129d4afd26 (diff) |
function_graph: Have profiler use new helper ftrace_graph_get_ret_stack()
The ret_stack processing is going to change, and that is going
to break anything that is accessing the ret_stack directly. One user is the
function graph profiler. By using the ftrace_graph_get_ret_stack() helper
function, the profiler can access the ret_stack entry without relying on the
implementation details of the stack itself.
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index d06fe588e650..8ef9fc226037 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -792,7 +792,7 @@ void ftrace_graph_graph_time_control(bool enable) | |||
792 | 792 | ||
793 | static int profile_graph_entry(struct ftrace_graph_ent *trace) | 793 | static int profile_graph_entry(struct ftrace_graph_ent *trace) |
794 | { | 794 | { |
795 | int index = current->curr_ret_stack; | 795 | struct ftrace_ret_stack *ret_stack; |
796 | 796 | ||
797 | function_profile_call(trace->func, 0, NULL, NULL); | 797 | function_profile_call(trace->func, 0, NULL, NULL); |
798 | 798 | ||
@@ -800,14 +800,16 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace) | |||
800 | if (!current->ret_stack) | 800 | if (!current->ret_stack) |
801 | return 0; | 801 | return 0; |
802 | 802 | ||
803 | if (index >= 0 && index < FTRACE_RETFUNC_DEPTH) | 803 | ret_stack = ftrace_graph_get_ret_stack(current, 0); |
804 | current->ret_stack[index].subtime = 0; | 804 | if (ret_stack) |
805 | ret_stack->subtime = 0; | ||
805 | 806 | ||
806 | return 1; | 807 | return 1; |
807 | } | 808 | } |
808 | 809 | ||
809 | static void profile_graph_return(struct ftrace_graph_ret *trace) | 810 | static void profile_graph_return(struct ftrace_graph_ret *trace) |
810 | { | 811 | { |
812 | struct ftrace_ret_stack *ret_stack; | ||
811 | struct ftrace_profile_stat *stat; | 813 | struct ftrace_profile_stat *stat; |
812 | unsigned long long calltime; | 814 | unsigned long long calltime; |
813 | struct ftrace_profile *rec; | 815 | struct ftrace_profile *rec; |
@@ -825,16 +827,15 @@ static void profile_graph_return(struct ftrace_graph_ret *trace) | |||
825 | calltime = trace->rettime - trace->calltime; | 827 | calltime = trace->rettime - trace->calltime; |
826 | 828 | ||
827 | if (!fgraph_graph_time) { | 829 | if (!fgraph_graph_time) { |
828 | int index; | ||
829 | |||
830 | index = current->curr_ret_stack; | ||
831 | 830 | ||
832 | /* Append this call time to the parent time to subtract */ | 831 | /* Append this call time to the parent time to subtract */ |
833 | if (index) | 832 | ret_stack = ftrace_graph_get_ret_stack(current, 1); |
834 | current->ret_stack[index - 1].subtime += calltime; | 833 | if (ret_stack) |
834 | ret_stack->subtime += calltime; | ||
835 | 835 | ||
836 | if (current->ret_stack[index].subtime < calltime) | 836 | ret_stack = ftrace_graph_get_ret_stack(current, 0); |
837 | calltime -= current->ret_stack[index].subtime; | 837 | if (ret_stack && ret_stack->subtime < calltime) |
838 | calltime -= ret_stack->subtime; | ||
838 | else | 839 | else |
839 | calltime = 0; | 840 | calltime = 0; |
840 | } | 841 | } |