aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-11-18 21:14:11 -0500
committerSteven Rostedt <rostedt@goodmis.org>2014-11-19 15:25:26 -0500
commitaec0be2d6e9f02dbef41ee54854c2e003e55c23e (patch)
treed99c09ba4247724e467ab497f2184068a64ef63b
parent9960efeb80f73bd073483dab0855ee0ddc27085c (diff)
ftrace/x86/extable: Add is_ftrace_trampoline() function
Stack traces that happen from function tracing check if the address on the stack is a __kernel_text_address(). That is, is the address kernel code. This calls core_kernel_text() which returns true if the address is part of the builtin kernel code. It also calls is_module_text_address() which returns true if the address belongs to module code. But what is missing is ftrace dynamically allocated trampolines. These trampolines are allocated for individual ftrace_ops that call the ftrace_ops callback functions directly. But if they do a stack trace, the code checking the stack wont detect them as they are neither core kernel code nor module address space. Adding another field to ftrace_ops that also stores the size of the trampoline assigned to it we can create a new function called is_ftrace_trampoline() that returns true if the address is a dynamically allocate ftrace trampoline. Note, it ignores trampolines that are not dynamically allocated as they will return true with the core_kernel_text() function. Link: http://lkml.kernel.org/r/20141119034829.497125839@goodmis.org Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--arch/x86/kernel/ftrace.c9
-rw-r--r--include/linux/ftrace.h8
-rw-r--r--kernel/extable.c7
-rw-r--r--kernel/trace/ftrace.c38
4 files changed, 59 insertions, 3 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 1aea94d336c7..60881d919432 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -712,7 +712,8 @@ union ftrace_op_code_union {
712 } __attribute__((packed)); 712 } __attribute__((packed));
713}; 713};
714 714
715static unsigned long create_trampoline(struct ftrace_ops *ops) 715static unsigned long
716create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
716{ 717{
717 unsigned const char *jmp; 718 unsigned const char *jmp;
718 unsigned long start_offset; 719 unsigned long start_offset;
@@ -749,6 +750,8 @@ static unsigned long create_trampoline(struct ftrace_ops *ops)
749 if (!trampoline) 750 if (!trampoline)
750 return 0; 751 return 0;
751 752
753 *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
754
752 /* Copy ftrace_caller onto the trampoline memory */ 755 /* Copy ftrace_caller onto the trampoline memory */
753 ret = probe_kernel_read(trampoline, (void *)start_offset, size); 756 ret = probe_kernel_read(trampoline, (void *)start_offset, size);
754 if (WARN_ON(ret < 0)) { 757 if (WARN_ON(ret < 0)) {
@@ -819,6 +822,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
819 unsigned char *new; 822 unsigned char *new;
820 unsigned long offset; 823 unsigned long offset;
821 unsigned long ip; 824 unsigned long ip;
825 unsigned int size;
822 int ret; 826 int ret;
823 827
824 if (ops->trampoline) { 828 if (ops->trampoline) {
@@ -829,9 +833,10 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
829 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 833 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
830 return; 834 return;
831 } else { 835 } else {
832 ops->trampoline = create_trampoline(ops); 836 ops->trampoline = create_trampoline(ops, &size);
833 if (!ops->trampoline) 837 if (!ops->trampoline)
834 return; 838 return;
839 ops->trampoline_size = size;
835 } 840 }
836 841
837 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); 842 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 619e37cc17fd..7b2616fa2472 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -150,6 +150,7 @@ struct ftrace_ops {
150 struct ftrace_ops_hash *func_hash; 150 struct ftrace_ops_hash *func_hash;
151 struct ftrace_ops_hash old_hash; 151 struct ftrace_ops_hash old_hash;
152 unsigned long trampoline; 152 unsigned long trampoline;
153 unsigned long trampoline_size;
153#endif 154#endif
154}; 155};
155 156
@@ -297,6 +298,8 @@ extern int ftrace_text_reserved(const void *start, const void *end);
297 298
298extern int ftrace_nr_registered_ops(void); 299extern int ftrace_nr_registered_ops(void);
299 300
301bool is_ftrace_trampoline(unsigned long addr);
302
300/* 303/*
301 * The dyn_ftrace record's flags field is split into two parts. 304 * The dyn_ftrace record's flags field is split into two parts.
302 * the first part which is '0-FTRACE_REF_MAX' is a counter of 305 * the first part which is '0-FTRACE_REF_MAX' is a counter of
@@ -596,6 +599,11 @@ static inline ssize_t ftrace_notrace_write(struct file *file, const char __user
596 size_t cnt, loff_t *ppos) { return -ENODEV; } 599 size_t cnt, loff_t *ppos) { return -ENODEV; }
597static inline int 600static inline int
598ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } 601ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
602
603static inline bool is_ftrace_trampoline(unsigned long addr)
604{
605 return false;
606}
599#endif /* CONFIG_DYNAMIC_FTRACE */ 607#endif /* CONFIG_DYNAMIC_FTRACE */
600 608
601/* totally disable ftrace - can not re-enable after this */ 609/* totally disable ftrace - can not re-enable after this */
diff --git a/kernel/extable.c b/kernel/extable.c
index d8a6446adbcb..c98f926277a8 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -18,6 +18,7 @@
18#include <linux/ftrace.h> 18#include <linux/ftrace.h>
19#include <linux/memory.h> 19#include <linux/memory.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/ftrace.h>
21#include <linux/mutex.h> 22#include <linux/mutex.h>
22#include <linux/init.h> 23#include <linux/init.h>
23 24
@@ -102,6 +103,8 @@ int __kernel_text_address(unsigned long addr)
102 return 1; 103 return 1;
103 if (is_module_text_address(addr)) 104 if (is_module_text_address(addr))
104 return 1; 105 return 1;
106 if (is_ftrace_trampoline(addr))
107 return 1;
105 /* 108 /*
106 * There might be init symbols in saved stacktraces. 109 * There might be init symbols in saved stacktraces.
107 * Give those symbols a chance to be printed in 110 * Give those symbols a chance to be printed in
@@ -119,7 +122,9 @@ int kernel_text_address(unsigned long addr)
119{ 122{
120 if (core_kernel_text(addr)) 123 if (core_kernel_text(addr))
121 return 1; 124 return 1;
122 return is_module_text_address(addr); 125 if (is_module_text_address(addr))
126 return 1;
127 return is_ftrace_trampoline(addr);
123} 128}
124 129
125/* 130/*
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6233f9102179..fa0f36bb32e9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1117,6 +1117,43 @@ static struct ftrace_ops global_ops = {
1117 FTRACE_OPS_FL_INITIALIZED, 1117 FTRACE_OPS_FL_INITIALIZED,
1118}; 1118};
1119 1119
1120/*
1121 * This is used by __kernel_text_address() to return true if the
1122 * the address is on a dynamically allocated trampoline that would
1123 * not return true for either core_kernel_text() or
1124 * is_module_text_address().
1125 */
1126bool is_ftrace_trampoline(unsigned long addr)
1127{
1128 struct ftrace_ops *op;
1129 bool ret = false;
1130
1131 /*
1132 * Some of the ops may be dynamically allocated,
1133 * they are freed after a synchronize_sched().
1134 */
1135 preempt_disable_notrace();
1136
1137 do_for_each_ftrace_op(op, ftrace_ops_list) {
1138 /*
1139 * This is to check for dynamically allocated trampolines.
1140 * Trampolines that are in kernel text will have
1141 * core_kernel_text() return true.
1142 */
1143 if (op->trampoline && op->trampoline_size)
1144 if (addr >= op->trampoline &&
1145 addr < op->trampoline + op->trampoline_size) {
1146 ret = true;
1147 goto out;
1148 }
1149 } while_for_each_ftrace_op(op);
1150
1151 out:
1152 preempt_enable_notrace();
1153
1154 return ret;
1155}
1156
1120struct ftrace_page { 1157struct ftrace_page {
1121 struct ftrace_page *next; 1158 struct ftrace_page *next;
1122 struct dyn_ftrace *records; 1159 struct dyn_ftrace *records;
@@ -5373,6 +5410,7 @@ static struct ftrace_ops graph_ops = {
5373 FTRACE_OPS_FL_STUB, 5410 FTRACE_OPS_FL_STUB,
5374#ifdef FTRACE_GRAPH_TRAMP_ADDR 5411#ifdef FTRACE_GRAPH_TRAMP_ADDR
5375 .trampoline = FTRACE_GRAPH_TRAMP_ADDR, 5412 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
5413 /* trampoline_size is only needed for dynamically allocated tramps */
5376#endif 5414#endif
5377 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) 5415 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5378}; 5416};