aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/alternative.c
diff options
context:
space:
mode:
authorJason Baron <jbaron@redhat.com>2010-09-17 11:08:51 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-09-20 18:19:39 -0400
commitf49aa448561fe9215f43405cac6f31eb86317792 (patch)
treec048d03e49cd20a0183ac173bd30893f8adcf91e /arch/x86/kernel/alternative.c
parente9d2b064149ff7ef4acbc65a1b9374ac8b218d3e (diff)
jump label: Make dynamic no-op selection available outside of ftrace
Move Steve's code for finding the best 5-byte no-op from ftrace.c to alternative.c. The idea is that other consumers (in this case jump label) want to make use of that code. Signed-off-by: Jason Baron <jbaron@redhat.com> LKML-Reference: <96259ae74172dcac99c0020c249743c523a92e18.1284733808.git.jbaron@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'arch/x86/kernel/alternative.c')
-rw-r--r--arch/x86/kernel/alternative.c64
1 files changed, 64 insertions, 0 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index f65ab8b014c4..1849d8036ee8 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -641,3 +641,67 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
641 return addr; 641 return addr;
642} 642}
643 643
644#if defined(CONFIG_DYNAMIC_FTRACE)
645
646unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
647
648void __init arch_init_ideal_nop5(void)
649{
650 extern const unsigned char ftrace_test_p6nop[];
651 extern const unsigned char ftrace_test_nop5[];
652 extern const unsigned char ftrace_test_jmp[];
653 int faulted = 0;
654
655 /*
656 * There is no good nop for all x86 archs.
657 * We will default to using the P6_NOP5, but first we
658 * will test to make sure that the nop will actually
659 * work on this CPU. If it faults, we will then
660 * go to a lesser efficient 5 byte nop. If that fails
661 * we then just use a jmp as our nop. This isn't the most
662 * efficient nop, but we can not use a multi part nop
663 * since we would then risk being preempted in the middle
664 * of that nop, and if we enabled tracing then, it might
665 * cause a system crash.
666 *
667 * TODO: check the cpuid to determine the best nop.
668 */
669 asm volatile (
670 "ftrace_test_jmp:"
671 "jmp ftrace_test_p6nop\n"
672 "nop\n"
673 "nop\n"
674 "nop\n" /* 2 byte jmp + 3 bytes */
675 "ftrace_test_p6nop:"
676 P6_NOP5
677 "jmp 1f\n"
678 "ftrace_test_nop5:"
679 ".byte 0x66,0x66,0x66,0x66,0x90\n"
680 "1:"
681 ".section .fixup, \"ax\"\n"
682 "2: movl $1, %0\n"
683 " jmp ftrace_test_nop5\n"
684 "3: movl $2, %0\n"
685 " jmp 1b\n"
686 ".previous\n"
687 _ASM_EXTABLE(ftrace_test_p6nop, 2b)
688 _ASM_EXTABLE(ftrace_test_nop5, 3b)
689 : "=r"(faulted) : "0" (faulted));
690
691 switch (faulted) {
692 case 0:
693 pr_info("converting mcount calls to 0f 1f 44 00 00\n");
694 memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
695 break;
696 case 1:
697 pr_info("converting mcount calls to 66 66 66 66 90\n");
698 memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
699 break;
700 case 2:
701 pr_info("converting mcount calls to jmp . + 5\n");
702 memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
703 break;
704 }
705
706}
707#endif