diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2014-11-18 21:14:11 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2014-11-19 15:25:26 -0500 |
commit | aec0be2d6e9f02dbef41ee54854c2e003e55c23e (patch) | |
tree | d99c09ba4247724e467ab497f2184068a64ef63b /kernel/trace | |
parent | 9960efeb80f73bd073483dab0855ee0ddc27085c (diff) |
ftrace/x86/extable: Add is_ftrace_trampoline() function
Stack traces that happen from function tracing check if the address
on the stack is a __kernel_text_address(). That is, is the address
kernel code. This calls core_kernel_text() which returns true
if the address is part of the builtin kernel code. It also calls
is_module_text_address() which returns true if the address belongs
to module code.
But what is missing is ftrace dynamically allocated trampolines.
These trampolines are allocated for individual ftrace_ops that
call the ftrace_ops callback functions directly. But if they do a
stack trace, the code checking the stack wont detect them as they
are neither core kernel code nor module address space.
Adding another field to ftrace_ops that also stores the size of
the trampoline assigned to it we can create a new function called
is_ftrace_trampoline() that returns true if the address is a
dynamically allocate ftrace trampoline. Note, it ignores trampolines
that are not dynamically allocated as they will return true with
the core_kernel_text() function.
Link: http://lkml.kernel.org/r/20141119034829.497125839@goodmis.org
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 38 |
1 files changed, 38 insertions, 0 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6233f9102179..fa0f36bb32e9 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1117,6 +1117,43 @@ static struct ftrace_ops global_ops = { | |||
1117 | FTRACE_OPS_FL_INITIALIZED, | 1117 | FTRACE_OPS_FL_INITIALIZED, |
1118 | }; | 1118 | }; |
1119 | 1119 | ||
1120 | /* | ||
1121 | * This is used by __kernel_text_address() to return true if the | ||
1122 | * the address is on a dynamically allocated trampoline that would | ||
1123 | * not return true for either core_kernel_text() or | ||
1124 | * is_module_text_address(). | ||
1125 | */ | ||
1126 | bool is_ftrace_trampoline(unsigned long addr) | ||
1127 | { | ||
1128 | struct ftrace_ops *op; | ||
1129 | bool ret = false; | ||
1130 | |||
1131 | /* | ||
1132 | * Some of the ops may be dynamically allocated, | ||
1133 | * they are freed after a synchronize_sched(). | ||
1134 | */ | ||
1135 | preempt_disable_notrace(); | ||
1136 | |||
1137 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
1138 | /* | ||
1139 | * This is to check for dynamically allocated trampolines. | ||
1140 | * Trampolines that are in kernel text will have | ||
1141 | * core_kernel_text() return true. | ||
1142 | */ | ||
1143 | if (op->trampoline && op->trampoline_size) | ||
1144 | if (addr >= op->trampoline && | ||
1145 | addr < op->trampoline + op->trampoline_size) { | ||
1146 | ret = true; | ||
1147 | goto out; | ||
1148 | } | ||
1149 | } while_for_each_ftrace_op(op); | ||
1150 | |||
1151 | out: | ||
1152 | preempt_enable_notrace(); | ||
1153 | |||
1154 | return ret; | ||
1155 | } | ||
1156 | |||
1120 | struct ftrace_page { | 1157 | struct ftrace_page { |
1121 | struct ftrace_page *next; | 1158 | struct ftrace_page *next; |
1122 | struct dyn_ftrace *records; | 1159 | struct dyn_ftrace *records; |
@@ -5373,6 +5410,7 @@ static struct ftrace_ops graph_ops = { | |||
5373 | FTRACE_OPS_FL_STUB, | 5410 | FTRACE_OPS_FL_STUB, |
5374 | #ifdef FTRACE_GRAPH_TRAMP_ADDR | 5411 | #ifdef FTRACE_GRAPH_TRAMP_ADDR |
5375 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, | 5412 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, |
5413 | /* trampoline_size is only needed for dynamically allocated tramps */ | ||
5376 | #endif | 5414 | #endif |
5377 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | 5415 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) |
5378 | }; | 5416 | }; |