aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-12-01 18:20:39 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-02 03:47:48 -0500
commit48d68b20d00865035b8b65e69af343d0f53fac9d (patch)
treeeca27c0cf9486ae83c7e3193709abae099d1f019 /arch/x86
parent222658e08f72cd539d01f3aabdc258c596f487e2 (diff)
tracing/function-graph-tracer: support for x86-64
Impact: extend and enable the function graph tracer to 64-bit x86 This patch implements the support for function graph tracer under x86-64. Both static and dynamic tracing are supported. This causes some small CPP conditional asm on arch/x86/kernel/ftrace.c I wanted to use probe_kernel_read/write to make the return address saving/patching code more generic but it causes tracing recursion. That would be perhaps useful to implement a notrace version of these function for other archs ports. Note that arch/x86/process_64.c is not traced, as in X86-32. I first thought __switch_to() was responsible of crashes during tracing because I believed current task were changed inside but that's actually not the case (actually yes, but not the "current" pointer). So I will have to investigate to find the functions that harm here, to enable tracing of the other functions inside (but there is no issue at this time, while process_64.c stays out of -pg flags). A little possible race condition is fixed inside this patch too. When the tracer allocate a return stack dynamically, the current depth is not initialized before but after. An interrupt could occur at this time and, after seeing that the return stack is allocated, the tracer could try to trace it with a random uninitialized depth. It's a prevention, even if I hadn't problems with it. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Tim Bird <tim.bird@am.sony.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/entry_64.S74
-rw-r--r--arch/x86/kernel/ftrace.c11
4 files changed, 86 insertions, 2 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0842b1127684..45c86fb94132 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -29,7 +29,7 @@ config X86
29 select HAVE_FTRACE_MCOUNT_RECORD 29 select HAVE_FTRACE_MCOUNT_RECORD
30 select HAVE_DYNAMIC_FTRACE 30 select HAVE_DYNAMIC_FTRACE
31 select HAVE_FUNCTION_TRACER 31 select HAVE_FUNCTION_TRACER
32 select HAVE_FUNCTION_GRAPH_TRACER if X86_32 32 select HAVE_FUNCTION_GRAPH_TRACER
33 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 33 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
34 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 34 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
35 select HAVE_ARCH_KGDB if !X86_VOYAGER 35 select HAVE_ARCH_KGDB if !X86_VOYAGER
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 64939a0c3986..d274425fb076 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -17,6 +17,7 @@ endif
17ifdef CONFIG_FUNCTION_GRAPH_TRACER 17ifdef CONFIG_FUNCTION_GRAPH_TRACER
18# Don't trace __switch_to() but let it for function tracer 18# Don't trace __switch_to() but let it for function tracer
19CFLAGS_REMOVE_process_32.o = -pg 19CFLAGS_REMOVE_process_32.o = -pg
20CFLAGS_REMOVE_process_64.o = -pg
20endif 21endif
21 22
22# 23#
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 08aa6b10933c..2aa0526ac30e 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -98,6 +98,12 @@ ftrace_call:
98 movq (%rsp), %rax 98 movq (%rsp), %rax
99 addq $0x38, %rsp 99 addq $0x38, %rsp
100 100
101#ifdef CONFIG_FUNCTION_GRAPH_TRACER
102.globl ftrace_graph_call
103ftrace_graph_call:
104 jmp ftrace_stub
105#endif
106
101.globl ftrace_stub 107.globl ftrace_stub
102ftrace_stub: 108ftrace_stub:
103 retq 109 retq
@@ -110,6 +116,12 @@ ENTRY(mcount)
110 116
111 cmpq $ftrace_stub, ftrace_trace_function 117 cmpq $ftrace_stub, ftrace_trace_function
112 jnz trace 118 jnz trace
119
120#ifdef CONFIG_FUNCTION_GRAPH_TRACER
121 cmpq $ftrace_stub, ftrace_graph_return
122 jnz ftrace_graph_caller
123#endif
124
113.globl ftrace_stub 125.globl ftrace_stub
114ftrace_stub: 126ftrace_stub:
115 retq 127 retq
@@ -145,6 +157,68 @@ END(mcount)
145#endif /* CONFIG_DYNAMIC_FTRACE */ 157#endif /* CONFIG_DYNAMIC_FTRACE */
146#endif /* CONFIG_FUNCTION_TRACER */ 158#endif /* CONFIG_FUNCTION_TRACER */
147 159
160#ifdef CONFIG_FUNCTION_GRAPH_TRACER
161ENTRY(ftrace_graph_caller)
162 cmpl $0, function_trace_stop
163 jne ftrace_stub
164
165 subq $0x38, %rsp
166 movq %rax, (%rsp)
167 movq %rcx, 8(%rsp)
168 movq %rdx, 16(%rsp)
169 movq %rsi, 24(%rsp)
170 movq %rdi, 32(%rsp)
171 movq %r8, 40(%rsp)
172 movq %r9, 48(%rsp)
173
174 leaq 8(%rbp), %rdi
175 movq 0x38(%rsp), %rsi
176
177 call prepare_ftrace_return
178
179 movq 48(%rsp), %r9
180 movq 40(%rsp), %r8
181 movq 32(%rsp), %rdi
182 movq 24(%rsp), %rsi
183 movq 16(%rsp), %rdx
184 movq 8(%rsp), %rcx
185 movq (%rsp), %rax
186 addq $0x38, %rsp
187 retq
188END(ftrace_graph_caller)
189
190
191.globl return_to_handler
192return_to_handler:
193 subq $80, %rsp
194
195 movq %rax, (%rsp)
196 movq %rcx, 8(%rsp)
197 movq %rdx, 16(%rsp)
198 movq %rsi, 24(%rsp)
199 movq %rdi, 32(%rsp)
200 movq %r8, 40(%rsp)
201 movq %r9, 48(%rsp)
202 movq %r10, 56(%rsp)
203 movq %r11, 64(%rsp)
204
205 call ftrace_return_to_handler
206
207 movq %rax, 72(%rsp)
208 movq 64(%rsp), %r11
209 movq 56(%rsp), %r10
210 movq 48(%rsp), %r9
211 movq 40(%rsp), %r8
212 movq 32(%rsp), %rdi
213 movq 24(%rsp), %rsi
214 movq 16(%rsp), %rdx
215 movq 8(%rsp), %rcx
216 movq (%rsp), %rax
217 addq $72, %rsp
218 retq
219#endif
220
221
148#ifndef CONFIG_PREEMPT 222#ifndef CONFIG_PREEMPT
149#define retint_kernel retint_restore_args 223#define retint_kernel retint_restore_args
150#endif 224#endif
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 7ef914e6a2f6..58832478b94e 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -467,8 +467,13 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
467 * ignore such a protection. 467 * ignore such a protection.
468 */ 468 */
469 asm volatile( 469 asm volatile(
470#ifdef CONFIG_X86_64
471 "1: movq (%[parent_old]), %[old]\n"
472 "2: movq %[return_hooker], (%[parent_replaced])\n"
473#else
470 "1: movl (%[parent_old]), %[old]\n" 474 "1: movl (%[parent_old]), %[old]\n"
471 "2: movl %[return_hooker], (%[parent_replaced])\n" 475 "2: movl %[return_hooker], (%[parent_replaced])\n"
476#endif
472 " movl $0, %[faulted]\n" 477 " movl $0, %[faulted]\n"
473 478
474 ".section .fixup, \"ax\"\n" 479 ".section .fixup, \"ax\"\n"
@@ -476,8 +481,13 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
476 ".previous\n" 481 ".previous\n"
477 482
478 ".section __ex_table, \"a\"\n" 483 ".section __ex_table, \"a\"\n"
484#ifdef CONFIG_X86_64
485 " .quad 1b, 3b\n"
486 " .quad 2b, 3b\n"
487#else
479 " .long 1b, 3b\n" 488 " .long 1b, 3b\n"
480 " .long 2b, 3b\n" 489 " .long 2b, 3b\n"
490#endif
481 ".previous\n" 491 ".previous\n"
482 492
483 : [parent_replaced] "=r" (parent), [old] "=r" (old), 493 : [parent_replaced] "=r" (parent), [old] "=r" (old),
@@ -509,5 +519,4 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
509 ftrace_graph_entry(&trace); 519 ftrace_graph_entry(&trace);
510 520
511} 521}
512
513#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 522#endif /* CONFIG_FUNCTION_GRAPH_TRACER */