aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2018-01-22 22:32:51 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2018-01-23 15:56:55 -0500
commit6be7fa3c74d1e0cd50f2157b5c1524f152bf641e (patch)
tree4ce16d7acac4f57ab60005997ca097faaf216e87 /kernel/trace/ftrace.c
parente2ac83d74a4d753cea88407e65136c84a0cb60b2 (diff)
ftrace, orc, x86: Handle ftrace dynamically allocated trampolines
The function tracer can create a dynamically allocated trampoline that is called by the function mcount or fentry hook that is used to call the function callback that is registered. The problem is that the orc undwinder will bail if it encounters one of these trampolines. This breaks the stack trace of function callbacks, which include the stack tracer and setting the stack trace for individual functions. Since these dynamic trampolines are basically copies of the static ftrace trampolines defined in ftrace_*.S, we do not need to create new orc entries for the dynamic trampolines. Finding the return address on the stack will be identical as the functions that were copied to create the dynamic trampolines. When encountering a ftrace dynamic trampoline, we can just use the orc entry of the ftrace static function that was copied for that trampoline. Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ccdf3664e4a9..554b517c61a0 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1119,15 +1119,11 @@ static struct ftrace_ops global_ops = {
1119}; 1119};
1120 1120
1121/* 1121/*
1122 * This is used by __kernel_text_address() to return true if the 1122 * Used by the stack undwinder to know about dynamic ftrace trampolines.
1123 * address is on a dynamically allocated trampoline that would
1124 * not return true for either core_kernel_text() or
1125 * is_module_text_address().
1126 */ 1123 */
1127bool is_ftrace_trampoline(unsigned long addr) 1124struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1128{ 1125{
1129 struct ftrace_ops *op; 1126 struct ftrace_ops *op = NULL;
1130 bool ret = false;
1131 1127
1132 /* 1128 /*
1133 * Some of the ops may be dynamically allocated, 1129 * Some of the ops may be dynamically allocated,
@@ -1144,15 +1140,24 @@ bool is_ftrace_trampoline(unsigned long addr)
1144 if (op->trampoline && op->trampoline_size) 1140 if (op->trampoline && op->trampoline_size)
1145 if (addr >= op->trampoline && 1141 if (addr >= op->trampoline &&
1146 addr < op->trampoline + op->trampoline_size) { 1142 addr < op->trampoline + op->trampoline_size) {
1147 ret = true; 1143 preempt_enable_notrace();
1148 goto out; 1144 return op;
1149 } 1145 }
1150 } while_for_each_ftrace_op(op); 1146 } while_for_each_ftrace_op(op);
1151
1152 out:
1153 preempt_enable_notrace(); 1147 preempt_enable_notrace();
1154 1148
1155 return ret; 1149 return NULL;
1150}
1151
1152/*
1153 * This is used by __kernel_text_address() to return true if the
1154 * address is on a dynamically allocated trampoline that would
1155 * not return true for either core_kernel_text() or
1156 * is_module_text_address().
1157 */
1158bool is_ftrace_trampoline(unsigned long addr)
1159{
1160 return ftrace_ops_trampoline(addr) != NULL;
1156} 1161}
1157 1162
1158struct ftrace_page { 1163struct ftrace_page {