aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2016-02-16 03:43:21 -0500
committerIngo Molnar <mingo@kernel.org>2016-02-17 02:47:22 -0500
commitf1b92bb6b5a4e17b508f128b084fa00e0eda590c (patch)
treed50ff35391cb9d0d928e3465032c0a6304446246 /arch/x86/kernel
parent4f6c893822932622fad2b46cc30467be9f20ee5d (diff)
x86/ftrace, x86/asm: Kill ftrace_caller_end label
One of ftrace_caller_end and ftrace_return is redundant so unify them. Rename ftrace_return to ftrace_epilogue to mean that everything after that label represents, like an afterword, work which happens *after* the ftrace call, e.g., the function graph tracer for one. Steve wants this to rather mean "[a]n event which reflects meaningfully on a recently ended conflict or struggle." I can imagine that ftrace can be a struggle sometimes. Anyway, beef up the comment about the code contents and layout before ftrace_epilogue label. Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1455612202-14414-4-git-send-email-bp@alien8.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/ftrace.c11
-rw-r--r--arch/x86/kernel/mcount_64.S14
2 files changed, 13 insertions, 12 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 29408d6d6626..04f9641e0cb6 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -697,9 +697,8 @@ static inline void tramp_free(void *tramp) { }
697#endif 697#endif
698 698
699/* Defined as markers to the end of the ftrace default trampolines */ 699/* Defined as markers to the end of the ftrace default trampolines */
700extern void ftrace_caller_end(void);
701extern void ftrace_regs_caller_end(void); 700extern void ftrace_regs_caller_end(void);
702extern void ftrace_return(void); 701extern void ftrace_epilogue(void);
703extern void ftrace_caller_op_ptr(void); 702extern void ftrace_caller_op_ptr(void);
704extern void ftrace_regs_caller_op_ptr(void); 703extern void ftrace_regs_caller_op_ptr(void);
705 704
@@ -746,7 +745,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
746 op_offset = (unsigned long)ftrace_regs_caller_op_ptr; 745 op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
747 } else { 746 } else {
748 start_offset = (unsigned long)ftrace_caller; 747 start_offset = (unsigned long)ftrace_caller;
749 end_offset = (unsigned long)ftrace_caller_end; 748 end_offset = (unsigned long)ftrace_epilogue;
750 op_offset = (unsigned long)ftrace_caller_op_ptr; 749 op_offset = (unsigned long)ftrace_caller_op_ptr;
751 } 750 }
752 751
@@ -754,7 +753,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
754 753
755 /* 754 /*
756 * Allocate enough size to store the ftrace_caller code, 755 * Allocate enough size to store the ftrace_caller code,
757 * the jmp to ftrace_return, as well as the address of 756 * the jmp to ftrace_epilogue, as well as the address of
758 * the ftrace_ops this trampoline is used for. 757 * the ftrace_ops this trampoline is used for.
759 */ 758 */
760 trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *)); 759 trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *));
@@ -772,8 +771,8 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
772 771
773 ip = (unsigned long)trampoline + size; 772 ip = (unsigned long)trampoline + size;
774 773
775 /* The trampoline ends with a jmp to ftrace_return */ 774 /* The trampoline ends with a jmp to ftrace_epilogue */
776 jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_return); 775 jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue);
777 memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE); 776 memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
778 777
779 /* 778 /*
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 87e1762e2bca..ed48a9f465f8 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -168,12 +168,14 @@ GLOBAL(ftrace_call)
168 restore_mcount_regs 168 restore_mcount_regs
169 169
170 /* 170 /*
171 * The copied trampoline must call ftrace_return as it 171 * The copied trampoline must call ftrace_epilogue as it
172 * still may need to call the function graph tracer. 172 * still may need to call the function graph tracer.
173 *
174 * The code up to this label is copied into trampolines so
175 * think twice before adding any new code or changing the
176 * layout here.
173 */ 177 */
174GLOBAL(ftrace_caller_end) 178GLOBAL(ftrace_epilogue)
175
176GLOBAL(ftrace_return)
177 179
178#ifdef CONFIG_FUNCTION_GRAPH_TRACER 180#ifdef CONFIG_FUNCTION_GRAPH_TRACER
179GLOBAL(ftrace_graph_call) 181GLOBAL(ftrace_graph_call)
@@ -244,14 +246,14 @@ GLOBAL(ftrace_regs_call)
244 popfq 246 popfq
245 247
246 /* 248 /*
247 * As this jmp to ftrace_return can be a short jump 249 * As this jmp to ftrace_epilogue can be a short jump
248 * it must not be copied into the trampoline. 250 * it must not be copied into the trampoline.
249 * The trampoline will add the code to jump 251 * The trampoline will add the code to jump
250 * to the return. 252 * to the return.
251 */ 253 */
252GLOBAL(ftrace_regs_caller_end) 254GLOBAL(ftrace_regs_caller_end)
253 255
254 jmp ftrace_return 256 jmp ftrace_epilogue
255 257
256END(ftrace_regs_caller) 258END(ftrace_regs_caller)
257 259