diff options
author | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2018-11-18 18:44:04 -0500 |
---|---|---|
committer | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2018-12-08 20:54:06 -0500 |
commit | 761efe8a94cfcd0a3dd90f2008411550f3520b63 (patch) | |
tree | eb286db507bb8c9407b197055a85adde795fb6c1 /kernel | |
parent | 421d1069cd85f6fee9f36984a071a73b6a431f65 (diff) |
function_graph: Remove the use of FTRACE_NOTRACE_DEPTH
The curr_ret_stack is no longer set to a negative value when a function is
not to be traced by the function graph tracer. Remove the usage of
FTRACE_NOTRACE_DEPTH, as it is no longer needed.
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/fgraph.c | 19 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 11 |
2 files changed, 0 insertions, 30 deletions
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index e852b69c0e64..de887a983ac7 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c | |||
@@ -112,16 +112,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, | |||
112 | 112 | ||
113 | index = current->curr_ret_stack; | 113 | index = current->curr_ret_stack; |
114 | 114 | ||
115 | /* | ||
116 | * A negative index here means that it's just returned from a | ||
117 | * notrace'd function. Recover index to get an original | ||
118 | * return address. See ftrace_push_return_trace(). | ||
119 | * | ||
120 | * TODO: Need to check whether the stack gets corrupted. | ||
121 | */ | ||
122 | if (index < 0) | ||
123 | index += FTRACE_NOTRACE_DEPTH; | ||
124 | |||
125 | if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { | 115 | if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { |
126 | ftrace_graph_stop(); | 116 | ftrace_graph_stop(); |
127 | WARN_ON(1); | 117 | WARN_ON(1); |
@@ -190,15 +180,6 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer) | |||
190 | */ | 180 | */ |
191 | barrier(); | 181 | barrier(); |
192 | current->curr_ret_stack--; | 182 | current->curr_ret_stack--; |
193 | /* | ||
194 | * The curr_ret_stack can be less than -1 only if it was | ||
195 | * filtered out and it's about to return from the function. | ||
196 | * Recover the index and continue to trace normal functions. | ||
197 | */ | ||
198 | if (current->curr_ret_stack < -1) { | ||
199 | current->curr_ret_stack += FTRACE_NOTRACE_DEPTH; | ||
200 | return ret; | ||
201 | } | ||
202 | 183 | ||
203 | if (unlikely(!ret)) { | 184 | if (unlikely(!ret)) { |
204 | ftrace_graph_stop(); | 185 | ftrace_graph_stop(); |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index ecf543df943b..eaf9b1629956 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -115,9 +115,6 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, | |||
115 | if (ret != (unsigned long)return_to_handler) | 115 | if (ret != (unsigned long)return_to_handler) |
116 | return ret; | 116 | return ret; |
117 | 117 | ||
118 | if (index < -1) | ||
119 | index += FTRACE_NOTRACE_DEPTH; | ||
120 | |||
121 | if (index < 0) | 118 | if (index < 0) |
122 | return ret; | 119 | return ret; |
123 | 120 | ||
@@ -675,10 +672,6 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
675 | 672 | ||
676 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | 673 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
677 | 674 | ||
678 | /* If a graph tracer ignored set_graph_notrace */ | ||
679 | if (call->depth < -1) | ||
680 | call->depth += FTRACE_NOTRACE_DEPTH; | ||
681 | |||
682 | /* | 675 | /* |
683 | * Comments display at + 1 to depth. Since | 676 | * Comments display at + 1 to depth. Since |
684 | * this is a leaf function, keep the comments | 677 | * this is a leaf function, keep the comments |
@@ -721,10 +714,6 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
721 | struct fgraph_cpu_data *cpu_data; | 714 | struct fgraph_cpu_data *cpu_data; |
722 | int cpu = iter->cpu; | 715 | int cpu = iter->cpu; |
723 | 716 | ||
724 | /* If a graph tracer ignored set_graph_notrace */ | ||
725 | if (call->depth < -1) | ||
726 | call->depth += FTRACE_NOTRACE_DEPTH; | ||
727 | |||
728 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | 717 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
729 | cpu_data->depth = call->depth; | 718 | cpu_data->depth = call->depth; |
730 | 719 | ||