aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-11-24 13:06:05 -0500
committerSteven Rostedt <rostedt@goodmis.org>2014-12-01 14:07:50 -0500
commit527aa75b333f90f4f90ac1730762156680a42fe8 (patch)
tree96c1d59adccb390b16f71aa75b8f94bcaa0223c0
parent094dfc545139510f251b9595850aa63fe2a8c131 (diff)
ftrace/x86: Simplify save_mcount_regs on getting RIP
Currently save_mcount_regs is passed a "skip" parameter to know how much stack updated the pt_regs, as it tries to keep the saved pt_regs in the same location for all users. This is rather stupid, especially since the part stored on the pt_regs has nothing to do with what is suppose to be in that location. Instead of doing that, just pass in an "added" parameter that lets that macro know how much stack was added before it was called so that it can get to the RIP. But the difference is that it will now offset the pt_regs by that "added" count. The caller now needs to take care of the offset of the pt_regs. This will make it easier to simplify the code later. Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1411262304010.3961@nanos Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--arch/x86/kernel/mcount_64.S37
1 files changed, 18 insertions, 19 deletions
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 4f1b27642495..596ac330c1db 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -37,12 +37,12 @@
37 * be saved in the locations that pt_regs has them in. 37 * be saved in the locations that pt_regs has them in.
38 */ 38 */
39 39
40/* skip is set if the stack was already partially adjusted */ 40/* @added: the amount of stack added before calling this */
41.macro save_mcount_regs skip=0 41.macro save_mcount_regs added=0
42 /* 42 /*
43 * We add enough stack to save all regs. 43 * We add enough stack to save all regs.
44 */ 44 */
45 subq $(SS+8-\skip), %rsp 45 subq $(SS+8), %rsp
46 movq %rax, RAX(%rsp) 46 movq %rax, RAX(%rsp)
47 movq %rcx, RCX(%rsp) 47 movq %rcx, RCX(%rsp)
48 movq %rdx, RDX(%rsp) 48 movq %rdx, RDX(%rsp)
@@ -51,11 +51,11 @@
51 movq %r8, R8(%rsp) 51 movq %r8, R8(%rsp)
52 movq %r9, R9(%rsp) 52 movq %r9, R9(%rsp)
53 /* Move RIP to its proper location */ 53 /* Move RIP to its proper location */
54 movq SS+8(%rsp), %rdi 54 movq SS+8+\added(%rsp), %rdi
55 movq %rdi, RIP(%rsp) 55 movq %rdi, RIP(%rsp)
56 .endm 56 .endm
57 57
58.macro restore_mcount_regs skip=0 58.macro restore_mcount_regs
59 movq R9(%rsp), %r9 59 movq R9(%rsp), %r9
60 movq R8(%rsp), %r8 60 movq R8(%rsp), %r8
61 movq RDI(%rsp), %rdi 61 movq RDI(%rsp), %rdi
@@ -63,12 +63,12 @@
63 movq RDX(%rsp), %rdx 63 movq RDX(%rsp), %rdx
64 movq RCX(%rsp), %rcx 64 movq RCX(%rsp), %rcx
65 movq RAX(%rsp), %rax 65 movq RAX(%rsp), %rax
66 addq $(SS+8-\skip), %rsp 66 addq $(SS+8), %rsp
67 .endm 67 .endm
68 68
69/* skip is set if stack has been adjusted */ 69/* skip is set if stack has been adjusted */
70.macro ftrace_caller_setup trace_label skip=0 70.macro ftrace_caller_setup trace_label added=0
71 save_mcount_regs \skip 71 save_mcount_regs \added
72 72
73 /* Save this location */ 73 /* Save this location */
74GLOBAL(\trace_label) 74GLOBAL(\trace_label)
@@ -79,9 +79,9 @@ GLOBAL(\trace_label)
79 subq $MCOUNT_INSN_SIZE, %rdi 79 subq $MCOUNT_INSN_SIZE, %rdi
80 /* Load the parent_ip into the second parameter */ 80 /* Load the parent_ip into the second parameter */
81#ifdef CC_USING_FENTRY 81#ifdef CC_USING_FENTRY
82 movq SS+16(%rsp), %rsi 82 movq SS+16+\added(%rsp), %rsi
83#else 83#else
84 movq 8(%rbp), %rsi 84 movq 8+\added(%rbp), %rsi
85#endif 85#endif
86.endm 86.endm
87 87
@@ -156,10 +156,10 @@ GLOBAL(ftrace_stub)
156END(ftrace_caller) 156END(ftrace_caller)
157 157
158ENTRY(ftrace_regs_caller) 158ENTRY(ftrace_regs_caller)
159 /* Save the current flags before compare (in SS location)*/ 159 /* Save the current flags before any operations that can change them */
160 pushfq 160 pushfq
161 161
162 /* skip=8 to skip flags saved in SS */ 162 /* added 8 bytes to save flags */
163 ftrace_caller_setup ftrace_regs_caller_op_ptr 8 163 ftrace_caller_setup ftrace_regs_caller_op_ptr 8
164 164
165 /* Save the rest of pt_regs */ 165 /* Save the rest of pt_regs */
@@ -172,15 +172,15 @@ ENTRY(ftrace_regs_caller)
172 movq %rbp, RBP(%rsp) 172 movq %rbp, RBP(%rsp)
173 movq %rbx, RBX(%rsp) 173 movq %rbx, RBX(%rsp)
174 /* Copy saved flags */ 174 /* Copy saved flags */
175 movq SS(%rsp), %rcx 175 movq SS+8(%rsp), %rcx
176 movq %rcx, EFLAGS(%rsp) 176 movq %rcx, EFLAGS(%rsp)
177 /* Kernel segments */ 177 /* Kernel segments */
178 movq $__KERNEL_DS, %rcx 178 movq $__KERNEL_DS, %rcx
179 movq %rcx, SS(%rsp) 179 movq %rcx, SS(%rsp)
180 movq $__KERNEL_CS, %rcx 180 movq $__KERNEL_CS, %rcx
181 movq %rcx, CS(%rsp) 181 movq %rcx, CS(%rsp)
182 /* Stack - skipping return address */ 182 /* Stack - skipping return address and flags */
183 leaq SS+16(%rsp), %rcx 183 leaq SS+8*3(%rsp), %rcx
184 movq %rcx, RSP(%rsp) 184 movq %rcx, RSP(%rsp)
185 185
186 /* regs go into 4th parameter */ 186 /* regs go into 4th parameter */
@@ -195,11 +195,11 @@ GLOBAL(ftrace_regs_call)
195 195
196 /* Copy flags back to SS, to restore them */ 196 /* Copy flags back to SS, to restore them */
197 movq EFLAGS(%rsp), %rax 197 movq EFLAGS(%rsp), %rax
198 movq %rax, SS(%rsp) 198 movq %rax, SS+8(%rsp)
199 199
200 /* Handlers can change the RIP */ 200 /* Handlers can change the RIP */
201 movq RIP(%rsp), %rax 201 movq RIP(%rsp), %rax
202 movq %rax, SS+8(%rsp) 202 movq %rax, SS+8*2(%rsp)
203 203
204 /* restore the rest of pt_regs */ 204 /* restore the rest of pt_regs */
205 movq R15(%rsp), %r15 205 movq R15(%rsp), %r15
@@ -210,8 +210,7 @@ GLOBAL(ftrace_regs_call)
210 movq RBP(%rsp), %rbp 210 movq RBP(%rsp), %rbp
211 movq RBX(%rsp), %rbx 211 movq RBX(%rsp), %rbx
212 212
213 /* skip=8 to skip flags saved in SS */ 213 restore_mcount_regs
214 restore_mcount_regs 8
215 214
216 /* Restore flags */ 215 /* Restore flags */
217 popfq 216 popfq