diff options
author | Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2008-05-12 15:20:42 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 14:31:58 -0400 |
commit | 16444a8a40d4c7b4f6de34af0cae1f76a4f6c901 (patch) | |
tree | 9c290bcdbdc1ecf8f578c30b3b36914e14fdaacc /arch/x86/kernel/entry_64.S | |
parent | 6e766410c4babd37bc7cd5e25009c179781742c8 (diff) |
ftrace: add basic support for gcc profiler instrumentation
If CONFIG_FTRACE is selected and /proc/sys/kernel/ftrace_enabled is
set to a non-zero value the ftrace routine will be called everytime
we enter a kernel function that is not marked with the "notrace"
attribute.
The ftrace routine will then call a registered function if a function
happens to be registered.
[ This code has been highly hacked by Steven Rostedt and Ingo Molnar,
so don't blame Arnaldo for all of this ;-) ]
Update:
It is now possible to register more than one ftrace function.
If only one ftrace function is registered, that will be the
function that ftrace calls directly. If more than one function
is registered, then ftrace will call a function that will loop
through the functions to call.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/entry_64.S')
-rw-r--r-- | arch/x86/kernel/entry_64.S | 37 |
1 files changed, 37 insertions, 0 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 556a8df522a7..f046e0c64883 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -54,6 +54,43 @@ | |||
54 | 54 | ||
55 | .code64 | 55 | .code64 |
56 | 56 | ||
57 | #ifdef CONFIG_FTRACE | ||
58 | ENTRY(mcount) | ||
59 | cmpq $ftrace_stub, ftrace_trace_function | ||
60 | jnz trace | ||
61 | .globl ftrace_stub | ||
62 | ftrace_stub: | ||
63 | retq | ||
64 | |||
65 | trace: | ||
66 | /* taken from glibc */ | ||
67 | subq $0x38, %rsp | ||
68 | movq %rax, (%rsp) | ||
69 | movq %rcx, 8(%rsp) | ||
70 | movq %rdx, 16(%rsp) | ||
71 | movq %rsi, 24(%rsp) | ||
72 | movq %rdi, 32(%rsp) | ||
73 | movq %r8, 40(%rsp) | ||
74 | movq %r9, 48(%rsp) | ||
75 | |||
76 | movq 0x38(%rsp), %rdi | ||
77 | movq 8(%rbp), %rsi | ||
78 | |||
79 | call *ftrace_trace_function | ||
80 | |||
81 | movq 48(%rsp), %r9 | ||
82 | movq 40(%rsp), %r8 | ||
83 | movq 32(%rsp), %rdi | ||
84 | movq 24(%rsp), %rsi | ||
85 | movq 16(%rsp), %rdx | ||
86 | movq 8(%rsp), %rcx | ||
87 | movq (%rsp), %rax | ||
88 | addq $0x38, %rsp | ||
89 | |||
90 | jmp ftrace_stub | ||
91 | END(mcount) | ||
92 | #endif | ||
93 | |||
57 | #ifndef CONFIG_PREEMPT | 94 | #ifndef CONFIG_PREEMPT |
58 | #define retint_kernel retint_restore_args | 95 | #define retint_kernel retint_restore_args |
59 | #endif | 96 | #endif |