diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-11-12 04:17:09 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-12 04:17:09 -0500 |
commit | 60a011c736e7dd09a0b01ca6a051a416f3f52ffb (patch) | |
tree | 20e91db58c3bbfa622d3ed8ec747e4974a807284 /arch/x86/kernel | |
parent | d06bbd669539215405874d8fe32ab65105e6c4bb (diff) | |
parent | 19b3e9671c5a219b8c34da2cc66e0ce7c3a501ae (diff) |
Merge branch 'tracing/function-return-tracer' into tracing/fastboot
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/Makefile | 6 | ||||
-rw-r--r-- | arch/x86/kernel/entry_32.S | 33 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 182 |
3 files changed, 214 insertions, 7 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index e489ff9cb3e2..1d8ed95da846 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -14,6 +14,11 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg | |||
14 | CFLAGS_REMOVE_ftrace.o = -pg | 14 | CFLAGS_REMOVE_ftrace.o = -pg |
15 | endif | 15 | endif |
16 | 16 | ||
17 | ifdef CONFIG_FUNCTION_RET_TRACER | ||
18 | # Don't trace __switch_to() but let it for function tracer | ||
19 | CFLAGS_REMOVE_process_32.o = -pg | ||
20 | endif | ||
21 | |||
17 | # | 22 | # |
18 | # vsyscalls (which work on the user stack) should have | 23 | # vsyscalls (which work on the user stack) should have |
19 | # no stack-protector checks: | 24 | # no stack-protector checks: |
@@ -65,6 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o | |||
65 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o | 70 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o |
66 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o | 71 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o |
67 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 72 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
73 | obj-$(CONFIG_FUNCTION_RET_TRACER) += ftrace.o | ||
68 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | 74 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o |
69 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 75 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
70 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 76 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 9134de814c97..9a0ac85946db 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -1188,6 +1188,10 @@ ENTRY(mcount) | |||
1188 | 1188 | ||
1189 | cmpl $ftrace_stub, ftrace_trace_function | 1189 | cmpl $ftrace_stub, ftrace_trace_function |
1190 | jnz trace | 1190 | jnz trace |
1191 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
1192 | cmpl $ftrace_stub, ftrace_function_return | ||
1193 | jnz trace_return | ||
1194 | #endif | ||
1191 | .globl ftrace_stub | 1195 | .globl ftrace_stub |
1192 | ftrace_stub: | 1196 | ftrace_stub: |
1193 | ret | 1197 | ret |
@@ -1206,8 +1210,37 @@ trace: | |||
1206 | popl %edx | 1210 | popl %edx |
1207 | popl %ecx | 1211 | popl %ecx |
1208 | popl %eax | 1212 | popl %eax |
1213 | jmp ftrace_stub | ||
1209 | 1214 | ||
1215 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
1216 | trace_return: | ||
1217 | pushl %eax | ||
1218 | pushl %ecx | ||
1219 | pushl %edx | ||
1220 | movl 0xc(%esp), %eax | ||
1221 | pushl %eax | ||
1222 | lea 0x4(%ebp), %eax | ||
1223 | pushl %eax | ||
1224 | call prepare_ftrace_return | ||
1225 | addl $8, %esp | ||
1226 | popl %edx | ||
1227 | popl %ecx | ||
1228 | popl %eax | ||
1210 | jmp ftrace_stub | 1229 | jmp ftrace_stub |
1230 | |||
1231 | .globl return_to_handler | ||
1232 | return_to_handler: | ||
1233 | pushl $0 | ||
1234 | pushl %eax | ||
1235 | pushl %ecx | ||
1236 | pushl %edx | ||
1237 | call ftrace_return_to_handler | ||
1238 | movl %eax, 0xc(%esp) | ||
1239 | popl %edx | ||
1240 | popl %ecx | ||
1241 | popl %eax | ||
1242 | ret | ||
1243 | #endif /* CONFIG_FUNCTION_RET_TRACER */ | ||
1211 | END(mcount) | 1244 | END(mcount) |
1212 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1245 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1213 | #endif /* CONFIG_FUNCTION_TRACER */ | 1246 | #endif /* CONFIG_FUNCTION_TRACER */ |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 69149337f2fe..16a571dea2ef 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -14,14 +14,178 @@ | |||
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
15 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
16 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
17 | #include <linux/sched.h> | ||
17 | #include <linux/init.h> | 18 | #include <linux/init.h> |
18 | #include <linux/list.h> | 19 | #include <linux/list.h> |
19 | 20 | ||
20 | #include <asm/ftrace.h> | 21 | #include <asm/ftrace.h> |
22 | #include <linux/ftrace.h> | ||
21 | #include <asm/nops.h> | 23 | #include <asm/nops.h> |
24 | #include <asm/nmi.h> | ||
22 | 25 | ||
23 | 26 | ||
24 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | 27 | |
28 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
29 | |||
30 | /* | ||
31 | * These functions are picked from those used on | ||
32 | * this page for dynamic ftrace. They have been | ||
33 | * simplified to ignore all traces in NMI context. | ||
34 | */ | ||
35 | static atomic_t in_nmi; | ||
36 | |||
37 | void ftrace_nmi_enter(void) | ||
38 | { | ||
39 | atomic_inc(&in_nmi); | ||
40 | } | ||
41 | |||
42 | void ftrace_nmi_exit(void) | ||
43 | { | ||
44 | atomic_dec(&in_nmi); | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Synchronize accesses to return adresses stack with | ||
49 | * interrupts. | ||
50 | */ | ||
51 | static raw_spinlock_t ret_stack_lock; | ||
52 | |||
53 | /* Add a function return address to the trace stack on thread info.*/ | ||
54 | static int push_return_trace(unsigned long ret, unsigned long long time, | ||
55 | unsigned long func) | ||
56 | { | ||
57 | int index; | ||
58 | struct thread_info *ti; | ||
59 | unsigned long flags; | ||
60 | int err = 0; | ||
61 | |||
62 | raw_local_irq_save(flags); | ||
63 | __raw_spin_lock(&ret_stack_lock); | ||
64 | |||
65 | ti = current_thread_info(); | ||
66 | /* The return trace stack is full */ | ||
67 | if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) { | ||
68 | err = -EBUSY; | ||
69 | goto out; | ||
70 | } | ||
71 | |||
72 | index = ++ti->curr_ret_stack; | ||
73 | ti->ret_stack[index].ret = ret; | ||
74 | ti->ret_stack[index].func = func; | ||
75 | ti->ret_stack[index].calltime = time; | ||
76 | |||
77 | out: | ||
78 | __raw_spin_unlock(&ret_stack_lock); | ||
79 | raw_local_irq_restore(flags); | ||
80 | return err; | ||
81 | } | ||
82 | |||
83 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
84 | static void pop_return_trace(unsigned long *ret, unsigned long long *time, | ||
85 | unsigned long *func) | ||
86 | { | ||
87 | struct thread_info *ti; | ||
88 | int index; | ||
89 | unsigned long flags; | ||
90 | |||
91 | raw_local_irq_save(flags); | ||
92 | __raw_spin_lock(&ret_stack_lock); | ||
93 | |||
94 | ti = current_thread_info(); | ||
95 | index = ti->curr_ret_stack; | ||
96 | *ret = ti->ret_stack[index].ret; | ||
97 | *func = ti->ret_stack[index].func; | ||
98 | *time = ti->ret_stack[index].calltime; | ||
99 | ti->curr_ret_stack--; | ||
100 | |||
101 | __raw_spin_unlock(&ret_stack_lock); | ||
102 | raw_local_irq_restore(flags); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Send the trace to the ring-buffer. | ||
107 | * @return the original return address. | ||
108 | */ | ||
109 | unsigned long ftrace_return_to_handler(void) | ||
110 | { | ||
111 | struct ftrace_retfunc trace; | ||
112 | pop_return_trace(&trace.ret, &trace.calltime, &trace.func); | ||
113 | trace.rettime = cpu_clock(raw_smp_processor_id()); | ||
114 | ftrace_function_return(&trace); | ||
115 | |||
116 | return trace.ret; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Hook the return address and push it in the stack of return addrs | ||
121 | * in current thread info. | ||
122 | */ | ||
123 | asmlinkage | ||
124 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | ||
125 | { | ||
126 | unsigned long old; | ||
127 | unsigned long long calltime; | ||
128 | int faulted; | ||
129 | unsigned long return_hooker = (unsigned long) | ||
130 | &return_to_handler; | ||
131 | |||
132 | /* Nmi's are currently unsupported */ | ||
133 | if (atomic_read(&in_nmi)) | ||
134 | return; | ||
135 | |||
136 | /* | ||
137 | * Protect against fault, even if it shouldn't | ||
138 | * happen. This tool is too much intrusive to | ||
139 | * ignore such a protection. | ||
140 | */ | ||
141 | asm volatile( | ||
142 | "1: movl (%[parent_old]), %[old]\n" | ||
143 | "2: movl %[return_hooker], (%[parent_replaced])\n" | ||
144 | " movl $0, %[faulted]\n" | ||
145 | |||
146 | ".section .fixup, \"ax\"\n" | ||
147 | "3: movl $1, %[faulted]\n" | ||
148 | ".previous\n" | ||
149 | |||
150 | ".section __ex_table, \"a\"\n" | ||
151 | " .long 1b, 3b\n" | ||
152 | " .long 2b, 3b\n" | ||
153 | ".previous\n" | ||
154 | |||
155 | : [parent_replaced] "=r" (parent), [old] "=r" (old), | ||
156 | [faulted] "=r" (faulted) | ||
157 | : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker) | ||
158 | : "memory" | ||
159 | ); | ||
160 | |||
161 | if (WARN_ON(faulted)) { | ||
162 | unregister_ftrace_return(); | ||
163 | return; | ||
164 | } | ||
165 | |||
166 | if (WARN_ON(!__kernel_text_address(old))) { | ||
167 | unregister_ftrace_return(); | ||
168 | *parent = old; | ||
169 | return; | ||
170 | } | ||
171 | |||
172 | calltime = cpu_clock(raw_smp_processor_id()); | ||
173 | |||
174 | if (push_return_trace(old, calltime, self_addr) == -EBUSY) | ||
175 | *parent = old; | ||
176 | } | ||
177 | |||
178 | static int __init init_ftrace_function_return(void) | ||
179 | { | ||
180 | ret_stack_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
181 | return 0; | ||
182 | } | ||
183 | device_initcall(init_ftrace_function_return); | ||
184 | |||
185 | |||
186 | #endif | ||
187 | |||
188 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
25 | 189 | ||
26 | union ftrace_code_union { | 190 | union ftrace_code_union { |
27 | char code[MCOUNT_INSN_SIZE]; | 191 | char code[MCOUNT_INSN_SIZE]; |
@@ -31,17 +195,11 @@ union ftrace_code_union { | |||
31 | } __attribute__((packed)); | 195 | } __attribute__((packed)); |
32 | }; | 196 | }; |
33 | 197 | ||
34 | |||
35 | static int ftrace_calc_offset(long ip, long addr) | 198 | static int ftrace_calc_offset(long ip, long addr) |
36 | { | 199 | { |
37 | return (int)(addr - ip); | 200 | return (int)(addr - ip); |
38 | } | 201 | } |
39 | 202 | ||
40 | unsigned char *ftrace_nop_replace(void) | ||
41 | { | ||
42 | return ftrace_nop; | ||
43 | } | ||
44 | |||
45 | unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | 203 | unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
46 | { | 204 | { |
47 | static union ftrace_code_union calc; | 205 | static union ftrace_code_union calc; |
@@ -183,6 +341,15 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) | |||
183 | } | 341 | } |
184 | 342 | ||
185 | 343 | ||
344 | |||
345 | |||
346 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | ||
347 | |||
348 | unsigned char *ftrace_nop_replace(void) | ||
349 | { | ||
350 | return ftrace_nop; | ||
351 | } | ||
352 | |||
186 | int | 353 | int |
187 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 354 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
188 | unsigned char *new_code) | 355 | unsigned char *new_code) |
@@ -292,3 +459,4 @@ int __init ftrace_dyn_arch_init(void *data) | |||
292 | 459 | ||
293 | return 0; | 460 | return 0; |
294 | } | 461 | } |
462 | #endif | ||