aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ftrace.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-11-11 01:03:45 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-11 04:29:11 -0500
commitcaf4b323b02a16c92fba449952ac6515ddc76d7a (patch)
tree6fc234d00faca192248344996a168afb253e5f40 /arch/x86/kernel/ftrace.c
parentd844222a54c33a960755b44b934cd1b01b05dceb (diff)
tracing, x86: add low level support for ftrace return tracing
Impact: add infrastructure for function-return tracing Add low level support for ftrace return tracing. This plug-in stores return addresses on the thread_info structure of the current task. The index of the current return address is initialized when the task is the first one (init) and when a process forks (the child). It is not needed when a task does a sys_execve because after this syscall, it still needs to return on the kernel functions it called. Note that the code of return_to_handler has been suggested by Steven Rostedt as almost all of the ideas of improvements in this V3. For purpose of security, arch/x86/kernel/process_32.c is not traced because __switch_to() changes the current task during its execution. That could cause inconsistency in the stored return address of this function even if I didn't have any crash after testing with tracing on this function enabled. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r--arch/x86/kernel/ftrace.c181
1 files changed, 174 insertions, 7 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 69149337f2fe..d68033bba223 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -18,10 +18,173 @@
18#include <linux/list.h> 18#include <linux/list.h>
19 19
20#include <asm/ftrace.h> 20#include <asm/ftrace.h>
21#include <linux/ftrace.h>
21#include <asm/nops.h> 22#include <asm/nops.h>
23#include <asm/nmi.h>
22 24
23 25
24static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; 26
27#ifdef CONFIG_FUNCTION_RET_TRACER
28
29/*
30 * These functions are picked from those used on
31 * this page for dynamic ftrace. They have been
32 * simplified to ignore all traces in NMI context.
33 */
34static atomic_t in_nmi;
35
36void ftrace_nmi_enter(void)
37{
38 atomic_inc(&in_nmi);
39}
40
41void ftrace_nmi_exit(void)
42{
43 atomic_dec(&in_nmi);
44}
45
46/*
47 * Synchronize accesses to return adresses stack with
48 * interrupts.
49 */
50static raw_spinlock_t ret_stack_lock;
51
52/* Add a function return address to the trace stack on thread info.*/
53static int push_return_trace(unsigned long ret, unsigned long long time,
54 unsigned long func)
55{
56 int index;
57 struct thread_info *ti;
58 unsigned long flags;
59 int err = 0;
60
61 raw_local_irq_save(flags);
62 __raw_spin_lock(&ret_stack_lock);
63
64 ti = current_thread_info();
65 /* The return trace stack is full */
66 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) {
67 err = -EBUSY;
68 goto out;
69 }
70
71 index = ++ti->curr_ret_stack;
72 ti->ret_stack[index].ret = ret;
73 ti->ret_stack[index].func = func;
74 ti->ret_stack[index].calltime = time;
75
76out:
77 __raw_spin_unlock(&ret_stack_lock);
78 raw_local_irq_restore(flags);
79 return err;
80}
81
82/* Retrieve a function return address to the trace stack on thread info.*/
83static void pop_return_trace(unsigned long *ret, unsigned long long *time,
84 unsigned long *func)
85{
86 struct thread_info *ti;
87 int index;
88 unsigned long flags;
89
90 raw_local_irq_save(flags);
91 __raw_spin_lock(&ret_stack_lock);
92
93 ti = current_thread_info();
94 index = ti->curr_ret_stack;
95 *ret = ti->ret_stack[index].ret;
96 *func = ti->ret_stack[index].func;
97 *time = ti->ret_stack[index].calltime;
98 ti->curr_ret_stack--;
99
100 __raw_spin_unlock(&ret_stack_lock);
101 raw_local_irq_restore(flags);
102}
103
104/*
105 * Send the trace to the ring-buffer.
106 * @return the original return address.
107 */
108unsigned long ftrace_return_to_handler(void)
109{
110 struct ftrace_retfunc trace;
111 pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
112 trace.rettime = cpu_clock(raw_smp_processor_id());
113 ftrace_function_return(&trace);
114
115 return trace.ret;
116}
117
118/*
119 * Hook the return address and push it in the stack of return addrs
120 * in current thread info.
121 */
122asmlinkage
123void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
124{
125 unsigned long old;
126 unsigned long long calltime;
127 int faulted;
128 unsigned long return_hooker = (unsigned long)
129 &return_to_handler;
130
131 /* Nmi's are currently unsupported */
132 if (atomic_read(&in_nmi))
133 return;
134
135 /*
136 * Protect against fault, even if it shouldn't
137 * happen. This tool is too much intrusive to
138 * ignore such a protection.
139 */
140 asm volatile(
141 "1: movl (%[parent_old]), %[old]\n"
142 "2: movl %[return_hooker], (%[parent_replaced])\n"
143 " movl $0, %[faulted]\n"
144
145 ".section .fixup, \"ax\"\n"
146 "3: movl $1, %[faulted]\n"
147 ".previous\n"
148
149 ".section __ex_table, \"a\"\n"
150 " .long 1b, 3b\n"
151 " .long 2b, 3b\n"
152 ".previous\n"
153
154 : [parent_replaced] "=rm" (parent), [old] "=r" (old),
155 [faulted] "=r" (faulted)
156 : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
157 : "memory"
158 );
159
160 if (WARN_ON(faulted)) {
161 unregister_ftrace_return();
162 return;
163 }
164
165 if (WARN_ON(!__kernel_text_address(old))) {
166 unregister_ftrace_return();
167 *parent = old;
168 return;
169 }
170
171 calltime = cpu_clock(raw_smp_processor_id());
172
173 if (push_return_trace(old, calltime, self_addr) == -EBUSY)
174 *parent = old;
175}
176
177static int __init init_ftrace_function_return(void)
178{
179 ret_stack_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
180 return 0;
181}
182device_initcall(init_ftrace_function_return);
183
184
185#endif
186
187#ifdef CONFIG_DYNAMIC_FTRACE
25 188
26union ftrace_code_union { 189union ftrace_code_union {
27 char code[MCOUNT_INSN_SIZE]; 190 char code[MCOUNT_INSN_SIZE];
@@ -31,17 +194,11 @@ union ftrace_code_union {
31 } __attribute__((packed)); 194 } __attribute__((packed));
32}; 195};
33 196
34
35static int ftrace_calc_offset(long ip, long addr) 197static int ftrace_calc_offset(long ip, long addr)
36{ 198{
37 return (int)(addr - ip); 199 return (int)(addr - ip);
38} 200}
39 201
40unsigned char *ftrace_nop_replace(void)
41{
42 return ftrace_nop;
43}
44
45unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 202unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
46{ 203{
47 static union ftrace_code_union calc; 204 static union ftrace_code_union calc;
@@ -183,6 +340,15 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
183} 340}
184 341
185 342
343
344
345static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
346
347unsigned char *ftrace_nop_replace(void)
348{
349 return ftrace_nop;
350}
351
186int 352int
187ftrace_modify_code(unsigned long ip, unsigned char *old_code, 353ftrace_modify_code(unsigned long ip, unsigned char *old_code,
188 unsigned char *new_code) 354 unsigned char *new_code)
@@ -292,3 +458,4 @@ int __init ftrace_dyn_arch_init(void *data)
292 458
293 return 0; 459 return 0;
294} 460}
461#endif