aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/ftrace.h26
-rw-r--r--arch/x86/include/asm/thread_info.h24
-rw-r--r--arch/x86/kernel/Makefile6
-rw-r--r--arch/x86/kernel/entry_32.S33
-rw-r--r--arch/x86/kernel/ftrace.c182
6 files changed, 265 insertions, 7 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6ab097fd5241..7780cc8b51d1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -29,6 +29,7 @@ config X86
29 select HAVE_FTRACE_MCOUNT_RECORD 29 select HAVE_FTRACE_MCOUNT_RECORD
30 select HAVE_DYNAMIC_FTRACE 30 select HAVE_DYNAMIC_FTRACE
31 select HAVE_FUNCTION_TRACER 31 select HAVE_FUNCTION_TRACER
32 select HAVE_FUNCTION_RET_TRACER if X86_32
32 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 33 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
33 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 34 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
34 select HAVE_ARCH_KGDB if !X86_VOYAGER 35 select HAVE_ARCH_KGDB if !X86_VOYAGER
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index f8173ed1c970..9b6a1fa19e70 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -20,4 +20,30 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
20#endif /* __ASSEMBLY__ */ 20#endif /* __ASSEMBLY__ */
21#endif /* CONFIG_FUNCTION_TRACER */ 21#endif /* CONFIG_FUNCTION_TRACER */
22 22
23#ifdef CONFIG_FUNCTION_RET_TRACER
24#define FTRACE_RET_STACK_SIZE 20
25
26#ifndef __ASSEMBLY__
27
28/*
29 * Stack of return addresses for functions
30 * of a thread.
31 * Used in struct thread_info
32 */
33struct ftrace_ret_stack {
34 unsigned long ret;
35 unsigned long func;
36 unsigned long long calltime;
37};
38
39/*
40 * Primary handler of a function return.
41 * It relays on ftrace_return_to_handler.
42 * Defined in entry32.S
43 */
44extern void return_to_handler(void);
45
46#endif /* __ASSEMBLY__ */
47#endif /* CONFIG_FUNCTION_RET_TRACER */
48
23#endif /* _ASM_X86_FTRACE_H */ 49#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index e44d379faad2..a71158369fd4 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -20,6 +20,7 @@
20struct task_struct; 20struct task_struct;
21struct exec_domain; 21struct exec_domain;
22#include <asm/processor.h> 22#include <asm/processor.h>
23#include <asm/ftrace.h>
23 24
24struct thread_info { 25struct thread_info {
25 struct task_struct *task; /* main task structure */ 26 struct task_struct *task; /* main task structure */
@@ -38,8 +39,30 @@ struct thread_info {
38 */ 39 */
39 __u8 supervisor_stack[0]; 40 __u8 supervisor_stack[0];
40#endif 41#endif
42
43#ifdef CONFIG_FUNCTION_RET_TRACER
44 /* Index of current stored adress in ret_stack */
45 int curr_ret_stack;
46 /* Stack of return addresses for return function tracing */
47 struct ftrace_ret_stack ret_stack[FTRACE_RET_STACK_SIZE];
48#endif
41}; 49};
42 50
51#ifdef CONFIG_FUNCTION_RET_TRACER
52#define INIT_THREAD_INFO(tsk) \
53{ \
54 .task = &tsk, \
55 .exec_domain = &default_exec_domain, \
56 .flags = 0, \
57 .cpu = 0, \
58 .preempt_count = 1, \
59 .addr_limit = KERNEL_DS, \
60 .restart_block = { \
61 .fn = do_no_restart_syscall, \
62 }, \
63 .curr_ret_stack = -1,\
64}
65#else
43#define INIT_THREAD_INFO(tsk) \ 66#define INIT_THREAD_INFO(tsk) \
44{ \ 67{ \
45 .task = &tsk, \ 68 .task = &tsk, \
@@ -52,6 +75,7 @@ struct thread_info {
52 .fn = do_no_restart_syscall, \ 75 .fn = do_no_restart_syscall, \
53 }, \ 76 }, \
54} 77}
78#endif
55 79
56#define init_thread_info (init_thread_union.thread_info) 80#define init_thread_info (init_thread_union.thread_info)
57#define init_stack (init_thread_union.stack) 81#define init_stack (init_thread_union.stack)
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index e489ff9cb3e2..1d8ed95da846 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -14,6 +14,11 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
14CFLAGS_REMOVE_ftrace.o = -pg 14CFLAGS_REMOVE_ftrace.o = -pg
15endif 15endif
16 16
17ifdef CONFIG_FUNCTION_RET_TRACER
18# Don't trace __switch_to() but let it for function tracer
19CFLAGS_REMOVE_process_32.o = -pg
20endif
21
17# 22#
18# vsyscalls (which work on the user stack) should have 23# vsyscalls (which work on the user stack) should have
19# no stack-protector checks: 24# no stack-protector checks:
@@ -65,6 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
65obj-$(CONFIG_X86_IO_APIC) += io_apic.o 70obj-$(CONFIG_X86_IO_APIC) += io_apic.o
66obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 71obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
67obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 72obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
73obj-$(CONFIG_FUNCTION_RET_TRACER) += ftrace.o
68obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 74obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
69obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 75obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
70obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 76obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 9134de814c97..9a0ac85946db 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1188,6 +1188,10 @@ ENTRY(mcount)
1188 1188
1189 cmpl $ftrace_stub, ftrace_trace_function 1189 cmpl $ftrace_stub, ftrace_trace_function
1190 jnz trace 1190 jnz trace
1191#ifdef CONFIG_FUNCTION_RET_TRACER
1192 cmpl $ftrace_stub, ftrace_function_return
1193 jnz trace_return
1194#endif
1191.globl ftrace_stub 1195.globl ftrace_stub
1192ftrace_stub: 1196ftrace_stub:
1193 ret 1197 ret
@@ -1206,8 +1210,37 @@ trace:
1206 popl %edx 1210 popl %edx
1207 popl %ecx 1211 popl %ecx
1208 popl %eax 1212 popl %eax
1213 jmp ftrace_stub
1209 1214
1215#ifdef CONFIG_FUNCTION_RET_TRACER
1216trace_return:
1217 pushl %eax
1218 pushl %ecx
1219 pushl %edx
1220 movl 0xc(%esp), %eax
1221 pushl %eax
1222 lea 0x4(%ebp), %eax
1223 pushl %eax
1224 call prepare_ftrace_return
1225 addl $8, %esp
1226 popl %edx
1227 popl %ecx
1228 popl %eax
1210 jmp ftrace_stub 1229 jmp ftrace_stub
1230
1231.globl return_to_handler
1232return_to_handler:
1233 pushl $0
1234 pushl %eax
1235 pushl %ecx
1236 pushl %edx
1237 call ftrace_return_to_handler
1238 movl %eax, 0xc(%esp)
1239 popl %edx
1240 popl %ecx
1241 popl %eax
1242 ret
1243#endif /* CONFIG_FUNCTION_RET_TRACER */
1211END(mcount) 1244END(mcount)
1212#endif /* CONFIG_DYNAMIC_FTRACE */ 1245#endif /* CONFIG_DYNAMIC_FTRACE */
1213#endif /* CONFIG_FUNCTION_TRACER */ 1246#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 69149337f2fe..16a571dea2ef 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -14,14 +14,178 @@
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/percpu.h> 16#include <linux/percpu.h>
17#include <linux/sched.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/list.h> 19#include <linux/list.h>
19 20
20#include <asm/ftrace.h> 21#include <asm/ftrace.h>
22#include <linux/ftrace.h>
21#include <asm/nops.h> 23#include <asm/nops.h>
24#include <asm/nmi.h>
22 25
23 26
24static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; 27
28#ifdef CONFIG_FUNCTION_RET_TRACER
29
30/*
31 * These functions are picked from those used on
32 * this page for dynamic ftrace. They have been
33 * simplified to ignore all traces in NMI context.
34 */
35static atomic_t in_nmi;
36
37void ftrace_nmi_enter(void)
38{
39 atomic_inc(&in_nmi);
40}
41
42void ftrace_nmi_exit(void)
43{
44 atomic_dec(&in_nmi);
45}
46
47/*
48 * Synchronize accesses to return adresses stack with
49 * interrupts.
50 */
51static raw_spinlock_t ret_stack_lock;
52
53/* Add a function return address to the trace stack on thread info.*/
54static int push_return_trace(unsigned long ret, unsigned long long time,
55 unsigned long func)
56{
57 int index;
58 struct thread_info *ti;
59 unsigned long flags;
60 int err = 0;
61
62 raw_local_irq_save(flags);
63 __raw_spin_lock(&ret_stack_lock);
64
65 ti = current_thread_info();
66 /* The return trace stack is full */
67 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) {
68 err = -EBUSY;
69 goto out;
70 }
71
72 index = ++ti->curr_ret_stack;
73 ti->ret_stack[index].ret = ret;
74 ti->ret_stack[index].func = func;
75 ti->ret_stack[index].calltime = time;
76
77out:
78 __raw_spin_unlock(&ret_stack_lock);
79 raw_local_irq_restore(flags);
80 return err;
81}
82
83/* Retrieve a function return address to the trace stack on thread info.*/
84static void pop_return_trace(unsigned long *ret, unsigned long long *time,
85 unsigned long *func)
86{
87 struct thread_info *ti;
88 int index;
89 unsigned long flags;
90
91 raw_local_irq_save(flags);
92 __raw_spin_lock(&ret_stack_lock);
93
94 ti = current_thread_info();
95 index = ti->curr_ret_stack;
96 *ret = ti->ret_stack[index].ret;
97 *func = ti->ret_stack[index].func;
98 *time = ti->ret_stack[index].calltime;
99 ti->curr_ret_stack--;
100
101 __raw_spin_unlock(&ret_stack_lock);
102 raw_local_irq_restore(flags);
103}
104
105/*
106 * Send the trace to the ring-buffer.
107 * @return the original return address.
108 */
109unsigned long ftrace_return_to_handler(void)
110{
111 struct ftrace_retfunc trace;
112 pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
113 trace.rettime = cpu_clock(raw_smp_processor_id());
114 ftrace_function_return(&trace);
115
116 return trace.ret;
117}
118
119/*
120 * Hook the return address and push it in the stack of return addrs
121 * in current thread info.
122 */
123asmlinkage
124void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
125{
126 unsigned long old;
127 unsigned long long calltime;
128 int faulted;
129 unsigned long return_hooker = (unsigned long)
130 &return_to_handler;
131
132 /* Nmi's are currently unsupported */
133 if (atomic_read(&in_nmi))
134 return;
135
136 /*
137 * Protect against fault, even if it shouldn't
138 * happen. This tool is too much intrusive to
139 * ignore such a protection.
140 */
141 asm volatile(
142 "1: movl (%[parent_old]), %[old]\n"
143 "2: movl %[return_hooker], (%[parent_replaced])\n"
144 " movl $0, %[faulted]\n"
145
146 ".section .fixup, \"ax\"\n"
147 "3: movl $1, %[faulted]\n"
148 ".previous\n"
149
150 ".section __ex_table, \"a\"\n"
151 " .long 1b, 3b\n"
152 " .long 2b, 3b\n"
153 ".previous\n"
154
155 : [parent_replaced] "=r" (parent), [old] "=r" (old),
156 [faulted] "=r" (faulted)
157 : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
158 : "memory"
159 );
160
161 if (WARN_ON(faulted)) {
162 unregister_ftrace_return();
163 return;
164 }
165
166 if (WARN_ON(!__kernel_text_address(old))) {
167 unregister_ftrace_return();
168 *parent = old;
169 return;
170 }
171
172 calltime = cpu_clock(raw_smp_processor_id());
173
174 if (push_return_trace(old, calltime, self_addr) == -EBUSY)
175 *parent = old;
176}
177
178static int __init init_ftrace_function_return(void)
179{
180 ret_stack_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
181 return 0;
182}
183device_initcall(init_ftrace_function_return);
184
185
186#endif
187
188#ifdef CONFIG_DYNAMIC_FTRACE
25 189
26union ftrace_code_union { 190union ftrace_code_union {
27 char code[MCOUNT_INSN_SIZE]; 191 char code[MCOUNT_INSN_SIZE];
@@ -31,17 +195,11 @@ union ftrace_code_union {
31 } __attribute__((packed)); 195 } __attribute__((packed));
32}; 196};
33 197
34
35static int ftrace_calc_offset(long ip, long addr) 198static int ftrace_calc_offset(long ip, long addr)
36{ 199{
37 return (int)(addr - ip); 200 return (int)(addr - ip);
38} 201}
39 202
40unsigned char *ftrace_nop_replace(void)
41{
42 return ftrace_nop;
43}
44
45unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 203unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
46{ 204{
47 static union ftrace_code_union calc; 205 static union ftrace_code_union calc;
@@ -183,6 +341,15 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
183} 341}
184 342
185 343
344
345
346static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
347
348unsigned char *ftrace_nop_replace(void)
349{
350 return ftrace_nop;
351}
352
186int 353int
187ftrace_modify_code(unsigned long ip, unsigned char *old_code, 354ftrace_modify_code(unsigned long ip, unsigned char *old_code,
188 unsigned char *new_code) 355 unsigned char *new_code)
@@ -292,3 +459,4 @@ int __init ftrace_dyn_arch_init(void *data)
292 459
293 return 0; 460 return 0;
294} 461}
462#endif