aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2014-05-08 15:21:52 -0400
committerSteven Rostedt <rostedt@goodmis.org>2014-05-14 11:37:31 -0400
commite18eead3c3e0087b38b3ccec684808b6ee9ba7c3 (patch)
treee08cd7e2beeb6a28dd0341435c9199315c2f3be3
parentf1b2f2bd5821c6ab7feed2e133343dd54b212ed9 (diff)
ftrace/x86: Move the mcount/fentry code out of entry_64.S
As the mcount code gets more complex, it really does not belong in the entry.S file. By moving it into its own file "mcount.S" keeps things a bit cleaner. Link: http://lkml.kernel.org/p/20140508152152.2130e8cf@gandalf.local.home Acked-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Jiri Kosina <jkosina@suse.cz> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/entry_64.S204
-rw-r--r--arch/x86/kernel/mcount_64.S217
3 files changed, 218 insertions, 204 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index f4d96000d33a..db7f41d74842 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_IRQ_WORK) += irq_work.o
26obj-y += probe_roms.o 26obj-y += probe_roms.o
27obj-$(CONFIG_X86_32) += i386_ksyms_32.o 27obj-$(CONFIG_X86_32) += i386_ksyms_32.o
28obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o 28obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
29obj-$(CONFIG_X86_64) += mcount_64.o
29obj-y += syscall_$(BITS).o vsyscall_gtod.o 30obj-y += syscall_$(BITS).o vsyscall_gtod.o
30obj-$(CONFIG_X86_64) += vsyscall_64.o 31obj-$(CONFIG_X86_64) += vsyscall_64.o
31obj-$(CONFIG_X86_64) += vsyscall_emu_64.o 32obj-$(CONFIG_X86_64) += vsyscall_emu_64.o
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1e96c3628bf2..3db806de57a0 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -53,7 +53,6 @@
53#include <asm/page_types.h> 53#include <asm/page_types.h>
54#include <asm/irqflags.h> 54#include <asm/irqflags.h>
55#include <asm/paravirt.h> 55#include <asm/paravirt.h>
56#include <asm/ftrace.h>
57#include <asm/percpu.h> 56#include <asm/percpu.h>
58#include <asm/asm.h> 57#include <asm/asm.h>
59#include <asm/context_tracking.h> 58#include <asm/context_tracking.h>
@@ -69,209 +68,6 @@
69 .code64 68 .code64
70 .section .entry.text, "ax" 69 .section .entry.text, "ax"
71 70
72#ifdef CONFIG_FUNCTION_TRACER
73
74#ifdef CC_USING_FENTRY
75# define function_hook __fentry__
76#else
77# define function_hook mcount
78#endif
79
80#ifdef CONFIG_DYNAMIC_FTRACE
81
82ENTRY(function_hook)
83 retq
84END(function_hook)
85
86/* skip is set if stack has been adjusted */
87.macro ftrace_caller_setup skip=0
88 MCOUNT_SAVE_FRAME \skip
89
90 /* Load the ftrace_ops into the 3rd parameter */
91 movq function_trace_op(%rip), %rdx
92
93 /* Load ip into the first parameter */
94 movq RIP(%rsp), %rdi
95 subq $MCOUNT_INSN_SIZE, %rdi
96 /* Load the parent_ip into the second parameter */
97#ifdef CC_USING_FENTRY
98 movq SS+16(%rsp), %rsi
99#else
100 movq 8(%rbp), %rsi
101#endif
102.endm
103
104ENTRY(ftrace_caller)
105 /* Check if tracing was disabled (quick check) */
106 cmpl $0, function_trace_stop
107 jne ftrace_stub
108
109 ftrace_caller_setup
110 /* regs go into 4th parameter (but make it NULL) */
111 movq $0, %rcx
112
113GLOBAL(ftrace_call)
114 call ftrace_stub
115
116 MCOUNT_RESTORE_FRAME
117ftrace_return:
118
119#ifdef CONFIG_FUNCTION_GRAPH_TRACER
120GLOBAL(ftrace_graph_call)
121 jmp ftrace_stub
122#endif
123
124GLOBAL(ftrace_stub)
125 retq
126END(ftrace_caller)
127
128ENTRY(ftrace_regs_caller)
129 /* Save the current flags before compare (in SS location)*/
130 pushfq
131
132 /* Check if tracing was disabled (quick check) */
133 cmpl $0, function_trace_stop
134 jne ftrace_restore_flags
135
136 /* skip=8 to skip flags saved in SS */
137 ftrace_caller_setup 8
138
139 /* Save the rest of pt_regs */
140 movq %r15, R15(%rsp)
141 movq %r14, R14(%rsp)
142 movq %r13, R13(%rsp)
143 movq %r12, R12(%rsp)
144 movq %r11, R11(%rsp)
145 movq %r10, R10(%rsp)
146 movq %rbp, RBP(%rsp)
147 movq %rbx, RBX(%rsp)
148 /* Copy saved flags */
149 movq SS(%rsp), %rcx
150 movq %rcx, EFLAGS(%rsp)
151 /* Kernel segments */
152 movq $__KERNEL_DS, %rcx
153 movq %rcx, SS(%rsp)
154 movq $__KERNEL_CS, %rcx
155 movq %rcx, CS(%rsp)
156 /* Stack - skipping return address */
157 leaq SS+16(%rsp), %rcx
158 movq %rcx, RSP(%rsp)
159
160 /* regs go into 4th parameter */
161 leaq (%rsp), %rcx
162
163GLOBAL(ftrace_regs_call)
164 call ftrace_stub
165
166 /* Copy flags back to SS, to restore them */
167 movq EFLAGS(%rsp), %rax
168 movq %rax, SS(%rsp)
169
170 /* Handlers can change the RIP */
171 movq RIP(%rsp), %rax
172 movq %rax, SS+8(%rsp)
173
174 /* restore the rest of pt_regs */
175 movq R15(%rsp), %r15
176 movq R14(%rsp), %r14
177 movq R13(%rsp), %r13
178 movq R12(%rsp), %r12
179 movq R10(%rsp), %r10
180 movq RBP(%rsp), %rbp
181 movq RBX(%rsp), %rbx
182
183 /* skip=8 to skip flags saved in SS */
184 MCOUNT_RESTORE_FRAME 8
185
186 /* Restore flags */
187 popfq
188
189 jmp ftrace_return
190ftrace_restore_flags:
191 popfq
192 jmp ftrace_stub
193
194END(ftrace_regs_caller)
195
196
197#else /* ! CONFIG_DYNAMIC_FTRACE */
198
199ENTRY(function_hook)
200 cmpl $0, function_trace_stop
201 jne ftrace_stub
202
203 cmpq $ftrace_stub, ftrace_trace_function
204 jnz trace
205
206#ifdef CONFIG_FUNCTION_GRAPH_TRACER
207 cmpq $ftrace_stub, ftrace_graph_return
208 jnz ftrace_graph_caller
209
210 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
211 jnz ftrace_graph_caller
212#endif
213
214GLOBAL(ftrace_stub)
215 retq
216
217trace:
218 MCOUNT_SAVE_FRAME
219
220 movq RIP(%rsp), %rdi
221#ifdef CC_USING_FENTRY
222 movq SS+16(%rsp), %rsi
223#else
224 movq 8(%rbp), %rsi
225#endif
226 subq $MCOUNT_INSN_SIZE, %rdi
227
228 call *ftrace_trace_function
229
230 MCOUNT_RESTORE_FRAME
231
232 jmp ftrace_stub
233END(function_hook)
234#endif /* CONFIG_DYNAMIC_FTRACE */
235#endif /* CONFIG_FUNCTION_TRACER */
236
237#ifdef CONFIG_FUNCTION_GRAPH_TRACER
238ENTRY(ftrace_graph_caller)
239 MCOUNT_SAVE_FRAME
240
241#ifdef CC_USING_FENTRY
242 leaq SS+16(%rsp), %rdi
243 movq $0, %rdx /* No framepointers needed */
244#else
245 leaq 8(%rbp), %rdi
246 movq (%rbp), %rdx
247#endif
248 movq RIP(%rsp), %rsi
249 subq $MCOUNT_INSN_SIZE, %rsi
250
251 call prepare_ftrace_return
252
253 MCOUNT_RESTORE_FRAME
254
255 retq
256END(ftrace_graph_caller)
257
258GLOBAL(return_to_handler)
259 subq $24, %rsp
260
261 /* Save the return values */
262 movq %rax, (%rsp)
263 movq %rdx, 8(%rsp)
264 movq %rbp, %rdi
265
266 call ftrace_return_to_handler
267
268 movq %rax, %rdi
269 movq 8(%rsp), %rdx
270 movq (%rsp), %rax
271 addq $24, %rsp
272 jmp *%rdi
273#endif
274
275 71
276#ifndef CONFIG_PREEMPT 72#ifndef CONFIG_PREEMPT
277#define retint_kernel retint_restore_args 73#define retint_kernel retint_restore_args
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
new file mode 100644
index 000000000000..c050a0153168
--- /dev/null
+++ b/arch/x86/kernel/mcount_64.S
@@ -0,0 +1,217 @@
1/*
2 * linux/arch/x86_64/mcount_64.S
3 *
4 * Copyright (C) 2014 Steven Rostedt, Red Hat Inc
5 */
6
7#include <linux/linkage.h>
8#include <asm/ptrace.h>
9#include <asm/ftrace.h>
10
11
12 .code64
13 .section .entry.text, "ax"
14
15
16#ifdef CONFIG_FUNCTION_TRACER
17
18#ifdef CC_USING_FENTRY
19# define function_hook __fentry__
20#else
21# define function_hook mcount
22#endif
23
24#ifdef CONFIG_DYNAMIC_FTRACE
25
26ENTRY(function_hook)
27 retq
28END(function_hook)
29
30/* skip is set if stack has been adjusted */
31.macro ftrace_caller_setup skip=0
32 MCOUNT_SAVE_FRAME \skip
33
34 /* Load the ftrace_ops into the 3rd parameter */
35 movq function_trace_op(%rip), %rdx
36
37 /* Load ip into the first parameter */
38 movq RIP(%rsp), %rdi
39 subq $MCOUNT_INSN_SIZE, %rdi
40 /* Load the parent_ip into the second parameter */
41#ifdef CC_USING_FENTRY
42 movq SS+16(%rsp), %rsi
43#else
44 movq 8(%rbp), %rsi
45#endif
46.endm
47
48ENTRY(ftrace_caller)
49 /* Check if tracing was disabled (quick check) */
50 cmpl $0, function_trace_stop
51 jne ftrace_stub
52
53 ftrace_caller_setup
54 /* regs go into 4th parameter (but make it NULL) */
55 movq $0, %rcx
56
57GLOBAL(ftrace_call)
58 call ftrace_stub
59
60 MCOUNT_RESTORE_FRAME
61ftrace_return:
62
63#ifdef CONFIG_FUNCTION_GRAPH_TRACER
64GLOBAL(ftrace_graph_call)
65 jmp ftrace_stub
66#endif
67
68GLOBAL(ftrace_stub)
69 retq
70END(ftrace_caller)
71
72ENTRY(ftrace_regs_caller)
73 /* Save the current flags before compare (in SS location)*/
74 pushfq
75
76 /* Check if tracing was disabled (quick check) */
77 cmpl $0, function_trace_stop
78 jne ftrace_restore_flags
79
80 /* skip=8 to skip flags saved in SS */
81 ftrace_caller_setup 8
82
83 /* Save the rest of pt_regs */
84 movq %r15, R15(%rsp)
85 movq %r14, R14(%rsp)
86 movq %r13, R13(%rsp)
87 movq %r12, R12(%rsp)
88 movq %r11, R11(%rsp)
89 movq %r10, R10(%rsp)
90 movq %rbp, RBP(%rsp)
91 movq %rbx, RBX(%rsp)
92 /* Copy saved flags */
93 movq SS(%rsp), %rcx
94 movq %rcx, EFLAGS(%rsp)
95 /* Kernel segments */
96 movq $__KERNEL_DS, %rcx
97 movq %rcx, SS(%rsp)
98 movq $__KERNEL_CS, %rcx
99 movq %rcx, CS(%rsp)
100 /* Stack - skipping return address */
101 leaq SS+16(%rsp), %rcx
102 movq %rcx, RSP(%rsp)
103
104 /* regs go into 4th parameter */
105 leaq (%rsp), %rcx
106
107GLOBAL(ftrace_regs_call)
108 call ftrace_stub
109
110 /* Copy flags back to SS, to restore them */
111 movq EFLAGS(%rsp), %rax
112 movq %rax, SS(%rsp)
113
114 /* Handlers can change the RIP */
115 movq RIP(%rsp), %rax
116 movq %rax, SS+8(%rsp)
117
118 /* restore the rest of pt_regs */
119 movq R15(%rsp), %r15
120 movq R14(%rsp), %r14
121 movq R13(%rsp), %r13
122 movq R12(%rsp), %r12
123 movq R10(%rsp), %r10
124 movq RBP(%rsp), %rbp
125 movq RBX(%rsp), %rbx
126
127 /* skip=8 to skip flags saved in SS */
128 MCOUNT_RESTORE_FRAME 8
129
130 /* Restore flags */
131 popfq
132
133 jmp ftrace_return
134ftrace_restore_flags:
135 popfq
136 jmp ftrace_stub
137
138END(ftrace_regs_caller)
139
140
141#else /* ! CONFIG_DYNAMIC_FTRACE */
142
143ENTRY(function_hook)
144 cmpl $0, function_trace_stop
145 jne ftrace_stub
146
147 cmpq $ftrace_stub, ftrace_trace_function
148 jnz trace
149
150#ifdef CONFIG_FUNCTION_GRAPH_TRACER
151 cmpq $ftrace_stub, ftrace_graph_return
152 jnz ftrace_graph_caller
153
154 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
155 jnz ftrace_graph_caller
156#endif
157
158GLOBAL(ftrace_stub)
159 retq
160
161trace:
162 MCOUNT_SAVE_FRAME
163
164 movq RIP(%rsp), %rdi
165#ifdef CC_USING_FENTRY
166 movq SS+16(%rsp), %rsi
167#else
168 movq 8(%rbp), %rsi
169#endif
170 subq $MCOUNT_INSN_SIZE, %rdi
171
172 call *ftrace_trace_function
173
174 MCOUNT_RESTORE_FRAME
175
176 jmp ftrace_stub
177END(function_hook)
178#endif /* CONFIG_DYNAMIC_FTRACE */
179#endif /* CONFIG_FUNCTION_TRACER */
180
181#ifdef CONFIG_FUNCTION_GRAPH_TRACER
182ENTRY(ftrace_graph_caller)
183 MCOUNT_SAVE_FRAME
184
185#ifdef CC_USING_FENTRY
186 leaq SS+16(%rsp), %rdi
187 movq $0, %rdx /* No framepointers needed */
188#else
189 leaq 8(%rbp), %rdi
190 movq (%rbp), %rdx
191#endif
192 movq RIP(%rsp), %rsi
193 subq $MCOUNT_INSN_SIZE, %rsi
194
195 call prepare_ftrace_return
196
197 MCOUNT_RESTORE_FRAME
198
199 retq
200END(ftrace_graph_caller)
201
202GLOBAL(return_to_handler)
203 subq $24, %rsp
204
205 /* Save the return values */
206 movq %rax, (%rsp)
207 movq %rdx, 8(%rsp)
208 movq %rbp, %rdi
209
210 call ftrace_return_to_handler
211
212 movq %rax, %rdi
213 movq 8(%rsp), %rdx
214 movq (%rsp), %rax
215 addq $24, %rsp
216 jmp *%rdi
217#endif