diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-05-12 15:20:43 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 14:33:47 -0400 |
commit | d61f82d06672f57fca410da6f7fffd15867db622 (patch) | |
tree | 62ef5573934eaa638c0d39a45d789691aecbd7d3 /arch/x86/kernel/entry_64.S | |
parent | 3c1720f00bb619302ba19d55986ab565e74d06db (diff) |
ftrace: use dynamic patching for updating mcount calls
This patch replaces the indirect call to the mcount function
pointer with a direct call that will be patched by the
dynamic ftrace routines.
On boot up, the mcount function calls the ftace_stub function.
When the dynamic ftrace code is initialized, the ftrace_stub
is replaced with a call to the ftrace_record_ip, which records
the instruction pointers of the locations that call it.
Later, the ftraced daemon will call kstop_machine and patch all
the locations to nops.
When a ftrace is enabled, the original calls to mcount will now
be set top call ftrace_caller, which will do a direct call
to the registered ftrace function. This direct call is also patched
when the function that should be called is updated.
All patching is performed by a kstop_machine routine to prevent any
type of race conditions that is associated with modifying code
on the fly.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/entry_64.S')
-rw-r--r-- | arch/x86/kernel/entry_64.S | 67 |
1 files changed, 66 insertions, 1 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index f046e0c64883..fe25e5febca3 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -55,6 +55,70 @@ | |||
55 | .code64 | 55 | .code64 |
56 | 56 | ||
57 | #ifdef CONFIG_FTRACE | 57 | #ifdef CONFIG_FTRACE |
58 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
59 | ENTRY(mcount) | ||
60 | |||
61 | subq $0x38, %rsp | ||
62 | movq %rax, (%rsp) | ||
63 | movq %rcx, 8(%rsp) | ||
64 | movq %rdx, 16(%rsp) | ||
65 | movq %rsi, 24(%rsp) | ||
66 | movq %rdi, 32(%rsp) | ||
67 | movq %r8, 40(%rsp) | ||
68 | movq %r9, 48(%rsp) | ||
69 | |||
70 | movq 0x38(%rsp), %rdi | ||
71 | |||
72 | .globl mcount_call | ||
73 | mcount_call: | ||
74 | call ftrace_stub | ||
75 | |||
76 | movq 48(%rsp), %r9 | ||
77 | movq 40(%rsp), %r8 | ||
78 | movq 32(%rsp), %rdi | ||
79 | movq 24(%rsp), %rsi | ||
80 | movq 16(%rsp), %rdx | ||
81 | movq 8(%rsp), %rcx | ||
82 | movq (%rsp), %rax | ||
83 | addq $0x38, %rsp | ||
84 | |||
85 | retq | ||
86 | END(mcount) | ||
87 | |||
88 | ENTRY(ftrace_caller) | ||
89 | |||
90 | /* taken from glibc */ | ||
91 | subq $0x38, %rsp | ||
92 | movq %rax, (%rsp) | ||
93 | movq %rcx, 8(%rsp) | ||
94 | movq %rdx, 16(%rsp) | ||
95 | movq %rsi, 24(%rsp) | ||
96 | movq %rdi, 32(%rsp) | ||
97 | movq %r8, 40(%rsp) | ||
98 | movq %r9, 48(%rsp) | ||
99 | |||
100 | movq 0x38(%rsp), %rdi | ||
101 | movq 8(%rbp), %rsi | ||
102 | |||
103 | .globl ftrace_call | ||
104 | ftrace_call: | ||
105 | call ftrace_stub | ||
106 | |||
107 | movq 48(%rsp), %r9 | ||
108 | movq 40(%rsp), %r8 | ||
109 | movq 32(%rsp), %rdi | ||
110 | movq 24(%rsp), %rsi | ||
111 | movq 16(%rsp), %rdx | ||
112 | movq 8(%rsp), %rcx | ||
113 | movq (%rsp), %rax | ||
114 | addq $0x38, %rsp | ||
115 | |||
116 | .globl ftrace_stub | ||
117 | ftrace_stub: | ||
118 | retq | ||
119 | END(ftrace_caller) | ||
120 | |||
121 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | ||
58 | ENTRY(mcount) | 122 | ENTRY(mcount) |
59 | cmpq $ftrace_stub, ftrace_trace_function | 123 | cmpq $ftrace_stub, ftrace_trace_function |
60 | jnz trace | 124 | jnz trace |
@@ -89,7 +153,8 @@ trace: | |||
89 | 153 | ||
90 | jmp ftrace_stub | 154 | jmp ftrace_stub |
91 | END(mcount) | 155 | END(mcount) |
92 | #endif | 156 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
157 | #endif /* CONFIG_FTRACE */ | ||
93 | 158 | ||
94 | #ifndef CONFIG_PREEMPT | 159 | #ifndef CONFIG_PREEMPT |
95 | #define retint_kernel retint_restore_args | 160 | #define retint_kernel retint_restore_args |