aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-11-11 01:03:45 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-11 04:29:11 -0500
commitcaf4b323b02a16c92fba449952ac6515ddc76d7a (patch)
tree6fc234d00faca192248344996a168afb253e5f40
parentd844222a54c33a960755b44b934cd1b01b05dceb (diff)
tracing, x86: add low level support for ftrace return tracing
Impact: add infrastructure for function-return tracing Add low level support for ftrace return tracing. This plug-in stores return addresses on the thread_info structure of the current task. The index of the current return address is initialized when the task is the first one (init) and when a process forks (the child). It is not needed when a task does a sys_execve because after this syscall, it still needs to return on the kernel functions it called. Note that the code of return_to_handler has been suggested by Steven Rostedt as almost all of the ideas of improvements in this V3. For purpose of security, arch/x86/kernel/process_32.c is not traced because __switch_to() changes the current task during its execution. That could cause inconsistency in the stored return address of this function even if I didn't have any crash after testing with tracing on this function enabled. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/ftrace.h26
-rw-r--r--arch/x86/include/asm/thread_info.h24
-rw-r--r--arch/x86/kernel/Makefile6
-rw-r--r--arch/x86/kernel/entry_32.S33
-rw-r--r--arch/x86/kernel/ftrace.c181
-rw-r--r--include/linux/ftrace.h20
-rw-r--r--include/linux/ftrace_irq.h2
-rw-r--r--include/linux/sched.h11
-rw-r--r--kernel/Makefile4
10 files changed, 300 insertions, 8 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 27b8a3a39911..ca91e50bdb10 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -11,6 +11,7 @@ config 64BIT
11 11
12config X86_32 12config X86_32
13 def_bool !64BIT 13 def_bool !64BIT
14 select HAVE_FUNCTION_RET_TRACER
14 15
15config X86_64 16config X86_64
16 def_bool 64BIT 17 def_bool 64BIT
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index f8173ed1c970..9b6a1fa19e70 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -20,4 +20,30 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
20#endif /* __ASSEMBLY__ */ 20#endif /* __ASSEMBLY__ */
21#endif /* CONFIG_FUNCTION_TRACER */ 21#endif /* CONFIG_FUNCTION_TRACER */
22 22
23#ifdef CONFIG_FUNCTION_RET_TRACER
24#define FTRACE_RET_STACK_SIZE 20
25
26#ifndef __ASSEMBLY__
27
28/*
29 * Stack of return addresses for functions
30 * of a thread.
31 * Used in struct thread_info
32 */
33struct ftrace_ret_stack {
34 unsigned long ret;
35 unsigned long func;
36 unsigned long long calltime;
37};
38
39/*
40 * Primary handler of a function return.
41 * It relays on ftrace_return_to_handler.
42 * Defined in entry32.S
43 */
44extern void return_to_handler(void);
45
46#endif /* __ASSEMBLY__ */
47#endif /* CONFIG_FUNCTION_RET_TRACER */
48
23#endif /* _ASM_X86_FTRACE_H */ 49#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index e44d379faad2..a71158369fd4 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -20,6 +20,7 @@
20struct task_struct; 20struct task_struct;
21struct exec_domain; 21struct exec_domain;
22#include <asm/processor.h> 22#include <asm/processor.h>
23#include <asm/ftrace.h>
23 24
24struct thread_info { 25struct thread_info {
25 struct task_struct *task; /* main task structure */ 26 struct task_struct *task; /* main task structure */
@@ -38,8 +39,30 @@ struct thread_info {
38 */ 39 */
39 __u8 supervisor_stack[0]; 40 __u8 supervisor_stack[0];
40#endif 41#endif
42
43#ifdef CONFIG_FUNCTION_RET_TRACER
44 /* Index of current stored adress in ret_stack */
45 int curr_ret_stack;
46 /* Stack of return addresses for return function tracing */
47 struct ftrace_ret_stack ret_stack[FTRACE_RET_STACK_SIZE];
48#endif
41}; 49};
42 50
51#ifdef CONFIG_FUNCTION_RET_TRACER
52#define INIT_THREAD_INFO(tsk) \
53{ \
54 .task = &tsk, \
55 .exec_domain = &default_exec_domain, \
56 .flags = 0, \
57 .cpu = 0, \
58 .preempt_count = 1, \
59 .addr_limit = KERNEL_DS, \
60 .restart_block = { \
61 .fn = do_no_restart_syscall, \
62 }, \
63 .curr_ret_stack = -1,\
64}
65#else
43#define INIT_THREAD_INFO(tsk) \ 66#define INIT_THREAD_INFO(tsk) \
44{ \ 67{ \
45 .task = &tsk, \ 68 .task = &tsk, \
@@ -52,6 +75,7 @@ struct thread_info {
52 .fn = do_no_restart_syscall, \ 75 .fn = do_no_restart_syscall, \
53 }, \ 76 }, \
54} 77}
78#endif
55 79
56#define init_thread_info (init_thread_union.thread_info) 80#define init_thread_info (init_thread_union.thread_info)
57#define init_stack (init_thread_union.stack) 81#define init_stack (init_thread_union.stack)
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index e489ff9cb3e2..1d8ed95da846 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -14,6 +14,11 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
14CFLAGS_REMOVE_ftrace.o = -pg 14CFLAGS_REMOVE_ftrace.o = -pg
15endif 15endif
16 16
17ifdef CONFIG_FUNCTION_RET_TRACER
18# Don't trace __switch_to() but let it for function tracer
19CFLAGS_REMOVE_process_32.o = -pg
20endif
21
17# 22#
18# vsyscalls (which work on the user stack) should have 23# vsyscalls (which work on the user stack) should have
19# no stack-protector checks: 24# no stack-protector checks:
@@ -65,6 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
65obj-$(CONFIG_X86_IO_APIC) += io_apic.o 70obj-$(CONFIG_X86_IO_APIC) += io_apic.o
66obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 71obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
67obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 72obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
73obj-$(CONFIG_FUNCTION_RET_TRACER) += ftrace.o
68obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 74obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
69obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 75obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
70obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 76obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 9134de814c97..9a0ac85946db 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1188,6 +1188,10 @@ ENTRY(mcount)
1188 1188
1189 cmpl $ftrace_stub, ftrace_trace_function 1189 cmpl $ftrace_stub, ftrace_trace_function
1190 jnz trace 1190 jnz trace
1191#ifdef CONFIG_FUNCTION_RET_TRACER
1192 cmpl $ftrace_stub, ftrace_function_return
1193 jnz trace_return
1194#endif
1191.globl ftrace_stub 1195.globl ftrace_stub
1192ftrace_stub: 1196ftrace_stub:
1193 ret 1197 ret
@@ -1206,8 +1210,37 @@ trace:
1206 popl %edx 1210 popl %edx
1207 popl %ecx 1211 popl %ecx
1208 popl %eax 1212 popl %eax
1213 jmp ftrace_stub
1209 1214
1215#ifdef CONFIG_FUNCTION_RET_TRACER
1216trace_return:
1217 pushl %eax
1218 pushl %ecx
1219 pushl %edx
1220 movl 0xc(%esp), %eax
1221 pushl %eax
1222 lea 0x4(%ebp), %eax
1223 pushl %eax
1224 call prepare_ftrace_return
1225 addl $8, %esp
1226 popl %edx
1227 popl %ecx
1228 popl %eax
1210 jmp ftrace_stub 1229 jmp ftrace_stub
1230
1231.globl return_to_handler
1232return_to_handler:
1233 pushl $0
1234 pushl %eax
1235 pushl %ecx
1236 pushl %edx
1237 call ftrace_return_to_handler
1238 movl %eax, 0xc(%esp)
1239 popl %edx
1240 popl %ecx
1241 popl %eax
1242 ret
1243#endif /* CONFIG_FUNCTION_RET_TRACER */
1211END(mcount) 1244END(mcount)
1212#endif /* CONFIG_DYNAMIC_FTRACE */ 1245#endif /* CONFIG_DYNAMIC_FTRACE */
1213#endif /* CONFIG_FUNCTION_TRACER */ 1246#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 69149337f2fe..d68033bba223 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -18,10 +18,173 @@
18#include <linux/list.h> 18#include <linux/list.h>
19 19
20#include <asm/ftrace.h> 20#include <asm/ftrace.h>
21#include <linux/ftrace.h>
21#include <asm/nops.h> 22#include <asm/nops.h>
23#include <asm/nmi.h>
22 24
23 25
24static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; 26
27#ifdef CONFIG_FUNCTION_RET_TRACER
28
29/*
30 * These functions are picked from those used on
31 * this page for dynamic ftrace. They have been
32 * simplified to ignore all traces in NMI context.
33 */
34static atomic_t in_nmi;
35
36void ftrace_nmi_enter(void)
37{
38 atomic_inc(&in_nmi);
39}
40
41void ftrace_nmi_exit(void)
42{
43 atomic_dec(&in_nmi);
44}
45
46/*
47 * Synchronize accesses to return adresses stack with
48 * interrupts.
49 */
50static raw_spinlock_t ret_stack_lock;
51
52/* Add a function return address to the trace stack on thread info.*/
53static int push_return_trace(unsigned long ret, unsigned long long time,
54 unsigned long func)
55{
56 int index;
57 struct thread_info *ti;
58 unsigned long flags;
59 int err = 0;
60
61 raw_local_irq_save(flags);
62 __raw_spin_lock(&ret_stack_lock);
63
64 ti = current_thread_info();
65 /* The return trace stack is full */
66 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) {
67 err = -EBUSY;
68 goto out;
69 }
70
71 index = ++ti->curr_ret_stack;
72 ti->ret_stack[index].ret = ret;
73 ti->ret_stack[index].func = func;
74 ti->ret_stack[index].calltime = time;
75
76out:
77 __raw_spin_unlock(&ret_stack_lock);
78 raw_local_irq_restore(flags);
79 return err;
80}
81
82/* Retrieve a function return address to the trace stack on thread info.*/
83static void pop_return_trace(unsigned long *ret, unsigned long long *time,
84 unsigned long *func)
85{
86 struct thread_info *ti;
87 int index;
88 unsigned long flags;
89
90 raw_local_irq_save(flags);
91 __raw_spin_lock(&ret_stack_lock);
92
93 ti = current_thread_info();
94 index = ti->curr_ret_stack;
95 *ret = ti->ret_stack[index].ret;
96 *func = ti->ret_stack[index].func;
97 *time = ti->ret_stack[index].calltime;
98 ti->curr_ret_stack--;
99
100 __raw_spin_unlock(&ret_stack_lock);
101 raw_local_irq_restore(flags);
102}
103
104/*
105 * Send the trace to the ring-buffer.
106 * @return the original return address.
107 */
108unsigned long ftrace_return_to_handler(void)
109{
110 struct ftrace_retfunc trace;
111 pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
112 trace.rettime = cpu_clock(raw_smp_processor_id());
113 ftrace_function_return(&trace);
114
115 return trace.ret;
116}
117
118/*
119 * Hook the return address and push it in the stack of return addrs
120 * in current thread info.
121 */
122asmlinkage
123void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
124{
125 unsigned long old;
126 unsigned long long calltime;
127 int faulted;
128 unsigned long return_hooker = (unsigned long)
129 &return_to_handler;
130
131 /* Nmi's are currently unsupported */
132 if (atomic_read(&in_nmi))
133 return;
134
135 /*
136 * Protect against fault, even if it shouldn't
137 * happen. This tool is too much intrusive to
138 * ignore such a protection.
139 */
140 asm volatile(
141 "1: movl (%[parent_old]), %[old]\n"
142 "2: movl %[return_hooker], (%[parent_replaced])\n"
143 " movl $0, %[faulted]\n"
144
145 ".section .fixup, \"ax\"\n"
146 "3: movl $1, %[faulted]\n"
147 ".previous\n"
148
149 ".section __ex_table, \"a\"\n"
150 " .long 1b, 3b\n"
151 " .long 2b, 3b\n"
152 ".previous\n"
153
154 : [parent_replaced] "=rm" (parent), [old] "=r" (old),
155 [faulted] "=r" (faulted)
156 : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
157 : "memory"
158 );
159
160 if (WARN_ON(faulted)) {
161 unregister_ftrace_return();
162 return;
163 }
164
165 if (WARN_ON(!__kernel_text_address(old))) {
166 unregister_ftrace_return();
167 *parent = old;
168 return;
169 }
170
171 calltime = cpu_clock(raw_smp_processor_id());
172
173 if (push_return_trace(old, calltime, self_addr) == -EBUSY)
174 *parent = old;
175}
176
177static int __init init_ftrace_function_return(void)
178{
179 ret_stack_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
180 return 0;
181}
182device_initcall(init_ftrace_function_return);
183
184
185#endif
186
187#ifdef CONFIG_DYNAMIC_FTRACE
25 188
26union ftrace_code_union { 189union ftrace_code_union {
27 char code[MCOUNT_INSN_SIZE]; 190 char code[MCOUNT_INSN_SIZE];
@@ -31,17 +194,11 @@ union ftrace_code_union {
31 } __attribute__((packed)); 194 } __attribute__((packed));
32}; 195};
33 196
34
35static int ftrace_calc_offset(long ip, long addr) 197static int ftrace_calc_offset(long ip, long addr)
36{ 198{
37 return (int)(addr - ip); 199 return (int)(addr - ip);
38} 200}
39 201
40unsigned char *ftrace_nop_replace(void)
41{
42 return ftrace_nop;
43}
44
45unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 202unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
46{ 203{
47 static union ftrace_code_union calc; 204 static union ftrace_code_union calc;
@@ -183,6 +340,15 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
183} 340}
184 341
185 342
343
344
345static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
346
347unsigned char *ftrace_nop_replace(void)
348{
349 return ftrace_nop;
350}
351
186int 352int
187ftrace_modify_code(unsigned long ip, unsigned char *old_code, 353ftrace_modify_code(unsigned long ip, unsigned char *old_code,
188 unsigned char *new_code) 354 unsigned char *new_code)
@@ -292,3 +458,4 @@ int __init ftrace_dyn_arch_init(void *data)
292 458
293 return 0; 459 return 0;
294} 460}
461#endif
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 1f5608c11023..dcbbf72a88b1 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -268,6 +268,26 @@ ftrace_init_module(unsigned long *start, unsigned long *end) { }
268 268
269 269
270/* 270/*
271 * Structure that defines a return function trace.
272 */
273struct ftrace_retfunc {
274 unsigned long ret; /* Return address */
275 unsigned long func; /* Current function */
276 unsigned long long calltime;
277 unsigned long long rettime;
278};
279
280#ifdef CONFIG_FUNCTION_RET_TRACER
281/* Type of a callback handler of tracing return function */
282typedef void (*trace_function_return_t)(struct ftrace_retfunc *);
283
284extern void register_ftrace_return(trace_function_return_t func);
285/* The current handler in use */
286extern trace_function_return_t ftrace_function_return;
287extern void unregister_ftrace_return(void);
288#endif
289
290/*
271 * Structure which defines the trace of an initcall. 291 * Structure which defines the trace of an initcall.
272 * You don't have to fill the func field since it is 292 * You don't have to fill the func field since it is
273 * only used internally by the tracer. 293 * only used internally by the tracer.
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index b1299d6729f2..0b4df55d7a74 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -2,7 +2,7 @@
2#define _LINUX_FTRACE_IRQ_H 2#define _LINUX_FTRACE_IRQ_H
3 3
4 4
5#ifdef CONFIG_DYNAMIC_FTRACE 5#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_RET_TRACER)
6extern void ftrace_nmi_enter(void); 6extern void ftrace_nmi_enter(void);
7extern void ftrace_nmi_exit(void); 7extern void ftrace_nmi_exit(void);
8#else 8#else
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 295b7c756ca6..df77abe860c9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2005,6 +2005,17 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
2005{ 2005{
2006 *task_thread_info(p) = *task_thread_info(org); 2006 *task_thread_info(p) = *task_thread_info(org);
2007 task_thread_info(p)->task = p; 2007 task_thread_info(p)->task = p;
2008
2009#ifdef CONFIG_FUNCTION_RET_TRACER
2010 /*
2011 * When fork() creates a child process, this function is called.
2012 * But the child task may not inherit the return adresses traced
2013 * by the return function tracer because it will directly execute
2014 * in userspace and will not return to kernel functions its parent
2015 * used.
2016 */
2017 task_thread_info(p)->curr_ret_stack = -1;
2018#endif
2008} 2019}
2009 2020
2010static inline unsigned long *end_of_stack(struct task_struct *p) 2021static inline unsigned long *end_of_stack(struct task_struct *p)
diff --git a/kernel/Makefile b/kernel/Makefile
index 9a3ec66a9d84..af3be57acbbb 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -23,6 +23,10 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg
23CFLAGS_REMOVE_sched_clock.o = -pg 23CFLAGS_REMOVE_sched_clock.o = -pg
24CFLAGS_REMOVE_sched.o = -mno-spe -pg 24CFLAGS_REMOVE_sched.o = -mno-spe -pg
25endif 25endif
26ifdef CONFIG_FUNCTION_RET_TRACER
27CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address()
28CFLAGS_REMOVE_module.o = -pg # For __module_text_address()
29endif
26 30
27obj-$(CONFIG_FREEZER) += freezer.o 31obj-$(CONFIG_FREEZER) += freezer.o
28obj-$(CONFIG_PROFILING) += profile.o 32obj-$(CONFIG_PROFILING) += profile.o