aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-09 19:39:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-09 19:39:15 -0400
commit214b93132023cc9305d5801add812515bea4d7d0 (patch)
treebb8db8677dd80b6ef570b8aa59475b072b81db11 /arch/x86
parent14208b0ec56919f5333dd654b1a7d10765d0ad05 (diff)
parenta9fcaaac37b3baba1343f906f52aeb65c4d4e356 (diff)
Merge tag 'trace-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "Lots of tweaks, small fixes, optimizations, and some helper functions to help out the rest of the kernel to ease their use of trace events. The big change for this release is the allowing of other tracers, such as the latency tracers, to be used in the trace instances and allow for function or function graph tracing to be in the top level simultaneously" * tag 'trace-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (44 commits) tracing: Fix memory leak on instance deletion tracing: Fix leak of ring buffer data when new instances creation fails tracing/kprobes: Avoid self tests if tracing is disabled on boot up tracing: Return error if ftrace_trace_arrays list is empty tracing: Only calculate stats of tracepoint benchmarks for 2^32 times tracing: Convert stddev into u64 in tracepoint benchmark tracing: Introduce saved_cmdlines_size file tracing: Add __get_dynamic_array_len() macro for trace events tracing: Remove unused variable in trace_benchmark tracing: Eliminate double free on failure of allocation on boot up ftrace/x86: Call text_ip_addr() instead of the duplicated code tracing: Print max callstack on stacktrace bug tracing: Move locking of trace_cmdline_lock into start/stop seq calls tracing: Try again for saved cmdline if failed due to locking tracing: Have saved_cmdlines use the seq_read infrastructure tracing: Add tracepoint benchmark tracepoint tracing: Print nasty banner when trace_printk() is in use tracing: Add funcgraph_tail option to print function name after closing braces tracing: Eliminate duplicate TRACE_GRAPH_PRINT_xx defines tracing: Add __bitmask() macro to trace events to cpumasks and other bitmasks ...
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/entry_64.S204
-rw-r--r--arch/x86/kernel/ftrace.c56
-rw-r--r--arch/x86/kernel/mcount_64.S217
4 files changed, 226 insertions, 252 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 491ef3e59850..047f9ff2e36c 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_IRQ_WORK) += irq_work.o
26obj-y += probe_roms.o 26obj-y += probe_roms.o
27obj-$(CONFIG_X86_32) += i386_ksyms_32.o 27obj-$(CONFIG_X86_32) += i386_ksyms_32.o
28obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o 28obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
29obj-$(CONFIG_X86_64) += mcount_64.o
29obj-y += syscall_$(BITS).o vsyscall_gtod.o 30obj-y += syscall_$(BITS).o vsyscall_gtod.o
30obj-$(CONFIG_X86_64) += vsyscall_64.o 31obj-$(CONFIG_X86_64) += vsyscall_64.o
31obj-$(CONFIG_X86_64) += vsyscall_emu_64.o 32obj-$(CONFIG_X86_64) += vsyscall_emu_64.o
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 96987987c5de..48a2644a082a 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -53,7 +53,6 @@
53#include <asm/page_types.h> 53#include <asm/page_types.h>
54#include <asm/irqflags.h> 54#include <asm/irqflags.h>
55#include <asm/paravirt.h> 55#include <asm/paravirt.h>
56#include <asm/ftrace.h>
57#include <asm/percpu.h> 56#include <asm/percpu.h>
58#include <asm/asm.h> 57#include <asm/asm.h>
59#include <asm/context_tracking.h> 58#include <asm/context_tracking.h>
@@ -70,209 +69,6 @@
70 .code64 69 .code64
71 .section .entry.text, "ax" 70 .section .entry.text, "ax"
72 71
73#ifdef CONFIG_FUNCTION_TRACER
74
75#ifdef CC_USING_FENTRY
76# define function_hook __fentry__
77#else
78# define function_hook mcount
79#endif
80
81#ifdef CONFIG_DYNAMIC_FTRACE
82
83ENTRY(function_hook)
84 retq
85END(function_hook)
86
87/* skip is set if stack has been adjusted */
88.macro ftrace_caller_setup skip=0
89 MCOUNT_SAVE_FRAME \skip
90
91 /* Load the ftrace_ops into the 3rd parameter */
92 movq function_trace_op(%rip), %rdx
93
94 /* Load ip into the first parameter */
95 movq RIP(%rsp), %rdi
96 subq $MCOUNT_INSN_SIZE, %rdi
97 /* Load the parent_ip into the second parameter */
98#ifdef CC_USING_FENTRY
99 movq SS+16(%rsp), %rsi
100#else
101 movq 8(%rbp), %rsi
102#endif
103.endm
104
105ENTRY(ftrace_caller)
106 /* Check if tracing was disabled (quick check) */
107 cmpl $0, function_trace_stop
108 jne ftrace_stub
109
110 ftrace_caller_setup
111 /* regs go into 4th parameter (but make it NULL) */
112 movq $0, %rcx
113
114GLOBAL(ftrace_call)
115 call ftrace_stub
116
117 MCOUNT_RESTORE_FRAME
118ftrace_return:
119
120#ifdef CONFIG_FUNCTION_GRAPH_TRACER
121GLOBAL(ftrace_graph_call)
122 jmp ftrace_stub
123#endif
124
125GLOBAL(ftrace_stub)
126 retq
127END(ftrace_caller)
128
129ENTRY(ftrace_regs_caller)
130 /* Save the current flags before compare (in SS location)*/
131 pushfq
132
133 /* Check if tracing was disabled (quick check) */
134 cmpl $0, function_trace_stop
135 jne ftrace_restore_flags
136
137 /* skip=8 to skip flags saved in SS */
138 ftrace_caller_setup 8
139
140 /* Save the rest of pt_regs */
141 movq %r15, R15(%rsp)
142 movq %r14, R14(%rsp)
143 movq %r13, R13(%rsp)
144 movq %r12, R12(%rsp)
145 movq %r11, R11(%rsp)
146 movq %r10, R10(%rsp)
147 movq %rbp, RBP(%rsp)
148 movq %rbx, RBX(%rsp)
149 /* Copy saved flags */
150 movq SS(%rsp), %rcx
151 movq %rcx, EFLAGS(%rsp)
152 /* Kernel segments */
153 movq $__KERNEL_DS, %rcx
154 movq %rcx, SS(%rsp)
155 movq $__KERNEL_CS, %rcx
156 movq %rcx, CS(%rsp)
157 /* Stack - skipping return address */
158 leaq SS+16(%rsp), %rcx
159 movq %rcx, RSP(%rsp)
160
161 /* regs go into 4th parameter */
162 leaq (%rsp), %rcx
163
164GLOBAL(ftrace_regs_call)
165 call ftrace_stub
166
167 /* Copy flags back to SS, to restore them */
168 movq EFLAGS(%rsp), %rax
169 movq %rax, SS(%rsp)
170
171 /* Handlers can change the RIP */
172 movq RIP(%rsp), %rax
173 movq %rax, SS+8(%rsp)
174
175 /* restore the rest of pt_regs */
176 movq R15(%rsp), %r15
177 movq R14(%rsp), %r14
178 movq R13(%rsp), %r13
179 movq R12(%rsp), %r12
180 movq R10(%rsp), %r10
181 movq RBP(%rsp), %rbp
182 movq RBX(%rsp), %rbx
183
184 /* skip=8 to skip flags saved in SS */
185 MCOUNT_RESTORE_FRAME 8
186
187 /* Restore flags */
188 popfq
189
190 jmp ftrace_return
191ftrace_restore_flags:
192 popfq
193 jmp ftrace_stub
194
195END(ftrace_regs_caller)
196
197
198#else /* ! CONFIG_DYNAMIC_FTRACE */
199
200ENTRY(function_hook)
201 cmpl $0, function_trace_stop
202 jne ftrace_stub
203
204 cmpq $ftrace_stub, ftrace_trace_function
205 jnz trace
206
207#ifdef CONFIG_FUNCTION_GRAPH_TRACER
208 cmpq $ftrace_stub, ftrace_graph_return
209 jnz ftrace_graph_caller
210
211 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
212 jnz ftrace_graph_caller
213#endif
214
215GLOBAL(ftrace_stub)
216 retq
217
218trace:
219 MCOUNT_SAVE_FRAME
220
221 movq RIP(%rsp), %rdi
222#ifdef CC_USING_FENTRY
223 movq SS+16(%rsp), %rsi
224#else
225 movq 8(%rbp), %rsi
226#endif
227 subq $MCOUNT_INSN_SIZE, %rdi
228
229 call *ftrace_trace_function
230
231 MCOUNT_RESTORE_FRAME
232
233 jmp ftrace_stub
234END(function_hook)
235#endif /* CONFIG_DYNAMIC_FTRACE */
236#endif /* CONFIG_FUNCTION_TRACER */
237
238#ifdef CONFIG_FUNCTION_GRAPH_TRACER
239ENTRY(ftrace_graph_caller)
240 MCOUNT_SAVE_FRAME
241
242#ifdef CC_USING_FENTRY
243 leaq SS+16(%rsp), %rdi
244 movq $0, %rdx /* No framepointers needed */
245#else
246 leaq 8(%rbp), %rdi
247 movq (%rbp), %rdx
248#endif
249 movq RIP(%rsp), %rsi
250 subq $MCOUNT_INSN_SIZE, %rsi
251
252 call prepare_ftrace_return
253
254 MCOUNT_RESTORE_FRAME
255
256 retq
257END(ftrace_graph_caller)
258
259GLOBAL(return_to_handler)
260 subq $24, %rsp
261
262 /* Save the return values */
263 movq %rax, (%rsp)
264 movq %rdx, 8(%rsp)
265 movq %rbp, %rdi
266
267 call ftrace_return_to_handler
268
269 movq %rax, %rdi
270 movq 8(%rsp), %rdx
271 movq (%rsp), %rax
272 addq $24, %rsp
273 jmp *%rdi
274#endif
275
276 72
277#ifndef CONFIG_PREEMPT 73#ifndef CONFIG_PREEMPT
278#define retint_kernel retint_restore_args 74#define retint_kernel retint_restore_args
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 52819e816f87..cbc4a91b131e 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -297,16 +297,7 @@ int ftrace_int3_handler(struct pt_regs *regs)
297 297
298static int ftrace_write(unsigned long ip, const char *val, int size) 298static int ftrace_write(unsigned long ip, const char *val, int size)
299{ 299{
300 /* 300 ip = text_ip_addr(ip);
301 * On x86_64, kernel text mappings are mapped read-only with
302 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
303 * of the kernel text mapping to modify the kernel text.
304 *
305 * For 32bit kernels, these mappings are same and we can use
306 * kernel identity mapping to modify code.
307 */
308 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
309 ip = (unsigned long)__va(__pa_symbol(ip));
310 301
311 if (probe_kernel_write((void *)ip, val, size)) 302 if (probe_kernel_write((void *)ip, val, size))
312 return -EPERM; 303 return -EPERM;
@@ -349,40 +340,14 @@ static int add_brk_on_nop(struct dyn_ftrace *rec)
349 return add_break(rec->ip, old); 340 return add_break(rec->ip, old);
350} 341}
351 342
352/*
353 * If the record has the FTRACE_FL_REGS set, that means that it
354 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
355 * is not not set, then it wants to convert to the normal callback.
356 */
357static unsigned long get_ftrace_addr(struct dyn_ftrace *rec)
358{
359 if (rec->flags & FTRACE_FL_REGS)
360 return (unsigned long)FTRACE_REGS_ADDR;
361 else
362 return (unsigned long)FTRACE_ADDR;
363}
364
365/*
366 * The FTRACE_FL_REGS_EN is set when the record already points to
367 * a function that saves all the regs. Basically the '_EN' version
368 * represents the current state of the function.
369 */
370static unsigned long get_ftrace_old_addr(struct dyn_ftrace *rec)
371{
372 if (rec->flags & FTRACE_FL_REGS_EN)
373 return (unsigned long)FTRACE_REGS_ADDR;
374 else
375 return (unsigned long)FTRACE_ADDR;
376}
377
378static int add_breakpoints(struct dyn_ftrace *rec, int enable) 343static int add_breakpoints(struct dyn_ftrace *rec, int enable)
379{ 344{
380 unsigned long ftrace_addr; 345 unsigned long ftrace_addr;
381 int ret; 346 int ret;
382 347
383 ret = ftrace_test_record(rec, enable); 348 ftrace_addr = ftrace_get_addr_curr(rec);
384 349
385 ftrace_addr = get_ftrace_addr(rec); 350 ret = ftrace_test_record(rec, enable);
386 351
387 switch (ret) { 352 switch (ret) {
388 case FTRACE_UPDATE_IGNORE: 353 case FTRACE_UPDATE_IGNORE:
@@ -392,10 +357,7 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable)
392 /* converting nop to call */ 357 /* converting nop to call */
393 return add_brk_on_nop(rec); 358 return add_brk_on_nop(rec);
394 359
395 case FTRACE_UPDATE_MODIFY_CALL_REGS:
396 case FTRACE_UPDATE_MODIFY_CALL: 360 case FTRACE_UPDATE_MODIFY_CALL:
397 ftrace_addr = get_ftrace_old_addr(rec);
398 /* fall through */
399 case FTRACE_UPDATE_MAKE_NOP: 361 case FTRACE_UPDATE_MAKE_NOP:
400 /* converting a call to a nop */ 362 /* converting a call to a nop */
401 return add_brk_on_call(rec, ftrace_addr); 363 return add_brk_on_call(rec, ftrace_addr);
@@ -440,14 +402,14 @@ static int remove_breakpoint(struct dyn_ftrace *rec)
440 * If not, don't touch the breakpoint, we make just create 402 * If not, don't touch the breakpoint, we make just create
441 * a disaster. 403 * a disaster.
442 */ 404 */
443 ftrace_addr = get_ftrace_addr(rec); 405 ftrace_addr = ftrace_get_addr_new(rec);
444 nop = ftrace_call_replace(ip, ftrace_addr); 406 nop = ftrace_call_replace(ip, ftrace_addr);
445 407
446 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0) 408 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
447 goto update; 409 goto update;
448 410
449 /* Check both ftrace_addr and ftrace_old_addr */ 411 /* Check both ftrace_addr and ftrace_old_addr */
450 ftrace_addr = get_ftrace_old_addr(rec); 412 ftrace_addr = ftrace_get_addr_curr(rec);
451 nop = ftrace_call_replace(ip, ftrace_addr); 413 nop = ftrace_call_replace(ip, ftrace_addr);
452 414
453 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) 415 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
@@ -491,13 +453,12 @@ static int add_update(struct dyn_ftrace *rec, int enable)
491 453
492 ret = ftrace_test_record(rec, enable); 454 ret = ftrace_test_record(rec, enable);
493 455
494 ftrace_addr = get_ftrace_addr(rec); 456 ftrace_addr = ftrace_get_addr_new(rec);
495 457
496 switch (ret) { 458 switch (ret) {
497 case FTRACE_UPDATE_IGNORE: 459 case FTRACE_UPDATE_IGNORE:
498 return 0; 460 return 0;
499 461
500 case FTRACE_UPDATE_MODIFY_CALL_REGS:
501 case FTRACE_UPDATE_MODIFY_CALL: 462 case FTRACE_UPDATE_MODIFY_CALL:
502 case FTRACE_UPDATE_MAKE_CALL: 463 case FTRACE_UPDATE_MAKE_CALL:
503 /* converting nop to call */ 464 /* converting nop to call */
@@ -538,13 +499,12 @@ static int finish_update(struct dyn_ftrace *rec, int enable)
538 499
539 ret = ftrace_update_record(rec, enable); 500 ret = ftrace_update_record(rec, enable);
540 501
541 ftrace_addr = get_ftrace_addr(rec); 502 ftrace_addr = ftrace_get_addr_new(rec);
542 503
543 switch (ret) { 504 switch (ret) {
544 case FTRACE_UPDATE_IGNORE: 505 case FTRACE_UPDATE_IGNORE:
545 return 0; 506 return 0;
546 507
547 case FTRACE_UPDATE_MODIFY_CALL_REGS:
548 case FTRACE_UPDATE_MODIFY_CALL: 508 case FTRACE_UPDATE_MODIFY_CALL:
549 case FTRACE_UPDATE_MAKE_CALL: 509 case FTRACE_UPDATE_MAKE_CALL:
550 /* converting nop to call */ 510 /* converting nop to call */
@@ -621,8 +581,8 @@ void ftrace_replace_code(int enable)
621 return; 581 return;
622 582
623 remove_breakpoints: 583 remove_breakpoints:
584 pr_warn("Failed on %s (%d):\n", report, count);
624 ftrace_bug(ret, rec ? rec->ip : 0); 585 ftrace_bug(ret, rec ? rec->ip : 0);
625 printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
626 for_ftrace_rec_iter(iter) { 586 for_ftrace_rec_iter(iter) {
627 rec = ftrace_rec_iter_record(iter); 587 rec = ftrace_rec_iter_record(iter);
628 /* 588 /*
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
new file mode 100644
index 000000000000..c050a0153168
--- /dev/null
+++ b/arch/x86/kernel/mcount_64.S
@@ -0,0 +1,217 @@
1/*
2 * linux/arch/x86_64/mcount_64.S
3 *
4 * Copyright (C) 2014 Steven Rostedt, Red Hat Inc
5 */
6
7#include <linux/linkage.h>
8#include <asm/ptrace.h>
9#include <asm/ftrace.h>
10
11
12 .code64
13 .section .entry.text, "ax"
14
15
16#ifdef CONFIG_FUNCTION_TRACER
17
18#ifdef CC_USING_FENTRY
19# define function_hook __fentry__
20#else
21# define function_hook mcount
22#endif
23
24#ifdef CONFIG_DYNAMIC_FTRACE
25
26ENTRY(function_hook)
27 retq
28END(function_hook)
29
30/* skip is set if stack has been adjusted */
31.macro ftrace_caller_setup skip=0
32 MCOUNT_SAVE_FRAME \skip
33
34 /* Load the ftrace_ops into the 3rd parameter */
35 movq function_trace_op(%rip), %rdx
36
37 /* Load ip into the first parameter */
38 movq RIP(%rsp), %rdi
39 subq $MCOUNT_INSN_SIZE, %rdi
40 /* Load the parent_ip into the second parameter */
41#ifdef CC_USING_FENTRY
42 movq SS+16(%rsp), %rsi
43#else
44 movq 8(%rbp), %rsi
45#endif
46.endm
47
48ENTRY(ftrace_caller)
49 /* Check if tracing was disabled (quick check) */
50 cmpl $0, function_trace_stop
51 jne ftrace_stub
52
53 ftrace_caller_setup
54 /* regs go into 4th parameter (but make it NULL) */
55 movq $0, %rcx
56
57GLOBAL(ftrace_call)
58 call ftrace_stub
59
60 MCOUNT_RESTORE_FRAME
61ftrace_return:
62
63#ifdef CONFIG_FUNCTION_GRAPH_TRACER
64GLOBAL(ftrace_graph_call)
65 jmp ftrace_stub
66#endif
67
68GLOBAL(ftrace_stub)
69 retq
70END(ftrace_caller)
71
72ENTRY(ftrace_regs_caller)
73 /* Save the current flags before compare (in SS location)*/
74 pushfq
75
76 /* Check if tracing was disabled (quick check) */
77 cmpl $0, function_trace_stop
78 jne ftrace_restore_flags
79
80 /* skip=8 to skip flags saved in SS */
81 ftrace_caller_setup 8
82
83 /* Save the rest of pt_regs */
84 movq %r15, R15(%rsp)
85 movq %r14, R14(%rsp)
86 movq %r13, R13(%rsp)
87 movq %r12, R12(%rsp)
88 movq %r11, R11(%rsp)
89 movq %r10, R10(%rsp)
90 movq %rbp, RBP(%rsp)
91 movq %rbx, RBX(%rsp)
92 /* Copy saved flags */
93 movq SS(%rsp), %rcx
94 movq %rcx, EFLAGS(%rsp)
95 /* Kernel segments */
96 movq $__KERNEL_DS, %rcx
97 movq %rcx, SS(%rsp)
98 movq $__KERNEL_CS, %rcx
99 movq %rcx, CS(%rsp)
100 /* Stack - skipping return address */
101 leaq SS+16(%rsp), %rcx
102 movq %rcx, RSP(%rsp)
103
104 /* regs go into 4th parameter */
105 leaq (%rsp), %rcx
106
107GLOBAL(ftrace_regs_call)
108 call ftrace_stub
109
110 /* Copy flags back to SS, to restore them */
111 movq EFLAGS(%rsp), %rax
112 movq %rax, SS(%rsp)
113
114 /* Handlers can change the RIP */
115 movq RIP(%rsp), %rax
116 movq %rax, SS+8(%rsp)
117
118 /* restore the rest of pt_regs */
119 movq R15(%rsp), %r15
120 movq R14(%rsp), %r14
121 movq R13(%rsp), %r13
122 movq R12(%rsp), %r12
123 movq R10(%rsp), %r10
124 movq RBP(%rsp), %rbp
125 movq RBX(%rsp), %rbx
126
127 /* skip=8 to skip flags saved in SS */
128 MCOUNT_RESTORE_FRAME 8
129
130 /* Restore flags */
131 popfq
132
133 jmp ftrace_return
134ftrace_restore_flags:
135 popfq
136 jmp ftrace_stub
137
138END(ftrace_regs_caller)
139
140
141#else /* ! CONFIG_DYNAMIC_FTRACE */
142
143ENTRY(function_hook)
144 cmpl $0, function_trace_stop
145 jne ftrace_stub
146
147 cmpq $ftrace_stub, ftrace_trace_function
148 jnz trace
149
150#ifdef CONFIG_FUNCTION_GRAPH_TRACER
151 cmpq $ftrace_stub, ftrace_graph_return
152 jnz ftrace_graph_caller
153
154 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
155 jnz ftrace_graph_caller
156#endif
157
158GLOBAL(ftrace_stub)
159 retq
160
161trace:
162 MCOUNT_SAVE_FRAME
163
164 movq RIP(%rsp), %rdi
165#ifdef CC_USING_FENTRY
166 movq SS+16(%rsp), %rsi
167#else
168 movq 8(%rbp), %rsi
169#endif
170 subq $MCOUNT_INSN_SIZE, %rdi
171
172 call *ftrace_trace_function
173
174 MCOUNT_RESTORE_FRAME
175
176 jmp ftrace_stub
177END(function_hook)
178#endif /* CONFIG_DYNAMIC_FTRACE */
179#endif /* CONFIG_FUNCTION_TRACER */
180
181#ifdef CONFIG_FUNCTION_GRAPH_TRACER
182ENTRY(ftrace_graph_caller)
183 MCOUNT_SAVE_FRAME
184
185#ifdef CC_USING_FENTRY
186 leaq SS+16(%rsp), %rdi
187 movq $0, %rdx /* No framepointers needed */
188#else
189 leaq 8(%rbp), %rdi
190 movq (%rbp), %rdx
191#endif
192 movq RIP(%rsp), %rsi
193 subq $MCOUNT_INSN_SIZE, %rsi
194
195 call prepare_ftrace_return
196
197 MCOUNT_RESTORE_FRAME
198
199 retq
200END(ftrace_graph_caller)
201
202GLOBAL(return_to_handler)
203 subq $24, %rsp
204
205 /* Save the return values */
206 movq %rax, (%rsp)
207 movq %rdx, 8(%rsp)
208 movq %rbp, %rdi
209
210 call ftrace_return_to_handler
211
212 movq %rax, %rdi
213 movq 8(%rsp), %rdx
214 movq (%rsp), %rax
215 addq $24, %rsp
216 jmp *%rdi
217#endif