aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2010-10-28 00:09:15 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-10-29 13:07:59 -0400
commit2d1d7126bbde53989f1d7de174816c123bb7ecb0 (patch)
treea29e74a2a9e8ad6e8086acd2c242de14b6fa2755 /arch
parentf0daed0242d514033b9ecca9187108d3c4e281a5 (diff)
x86, ftrace: Use safe noops, drop trap test
Always use a safe 5-byte noop sequence. Drop the trap test, since it is known to return false negatives on some virtualization platforms on 32 bits. The resulting code is both simpler and safer. Cc: Daniel Drake <dsd@laptop.org> Cc: Jason Baron <jbaron@redhat.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/alternative.c69
1 files changed, 15 insertions, 54 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a36bb90aef53..0b30214282a8 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -644,65 +644,26 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
644 644
645#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) 645#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
646 646
647unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; 647#ifdef CONFIG_X86_64
648unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
649#else
650unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
651#endif
648 652
649void __init arch_init_ideal_nop5(void) 653void __init arch_init_ideal_nop5(void)
650{ 654{
651 extern const unsigned char ftrace_test_p6nop[];
652 extern const unsigned char ftrace_test_nop5[];
653 extern const unsigned char ftrace_test_jmp[];
654 int faulted = 0;
655
656 /* 655 /*
657 * There is no good nop for all x86 archs. 656 * There is no good nop for all x86 archs. This selection
658 * We will default to using the P6_NOP5, but first we 657 * algorithm should be unified with the one in find_nop_table(),
659 * will test to make sure that the nop will actually 658 * but this should be good enough for now.
660 * work on this CPU. If it faults, we will then
661 * go to a lesser efficient 5 byte nop. If that fails
662 * we then just use a jmp as our nop. This isn't the most
663 * efficient nop, but we can not use a multi part nop
664 * since we would then risk being preempted in the middle
665 * of that nop, and if we enabled tracing then, it might
666 * cause a system crash.
667 * 659 *
668 * TODO: check the cpuid to determine the best nop. 660 * For cases other than the ones below, use the safe (as in
661 * always functional) defaults above.
669 */ 662 */
670 asm volatile ( 663#ifdef CONFIG_X86_64
671 "ftrace_test_jmp:" 664 /* Don't use these on 32 bits due to broken virtualizers */
672 "jmp ftrace_test_p6nop\n" 665 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
673 "nop\n" 666 memcpy(ideal_nop5, p6_nops[5], 5);
674 "nop\n" 667#endif
675 "nop\n" /* 2 byte jmp + 3 bytes */
676 "ftrace_test_p6nop:"
677 P6_NOP5
678 "jmp 1f\n"
679 "ftrace_test_nop5:"
680 ".byte 0x66,0x66,0x66,0x66,0x90\n"
681 "1:"
682 ".section .fixup, \"ax\"\n"
683 "2: movl $1, %0\n"
684 " jmp ftrace_test_nop5\n"
685 "3: movl $2, %0\n"
686 " jmp 1b\n"
687 ".previous\n"
688 _ASM_EXTABLE(ftrace_test_p6nop, 2b)
689 _ASM_EXTABLE(ftrace_test_nop5, 3b)
690 : "=r"(faulted) : "0" (faulted));
691
692 switch (faulted) {
693 case 0:
694 pr_info("converting mcount calls to 0f 1f 44 00 00\n");
695 memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
696 break;
697 case 1:
698 pr_info("converting mcount calls to 66 66 66 66 90\n");
699 memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
700 break;
701 case 2:
702 pr_info("converting mcount calls to jmp . + 5\n");
703 memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
704 break;
705 }
706
707} 668}
708#endif 669#endif