diff options
author | Jason Baron <jbaron@redhat.com> | 2010-09-17 11:08:51 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2010-09-20 18:19:39 -0400 |
commit | f49aa448561fe9215f43405cac6f31eb86317792 (patch) | |
tree | c048d03e49cd20a0183ac173bd30893f8adcf91e /arch/x86/kernel | |
parent | e9d2b064149ff7ef4acbc65a1b9374ac8b218d3e (diff) |
jump label: Make dynamic no-op selection available outside of ftrace
Move Steve's code for finding the best 5-byte no-op from ftrace.c to
alternative.c. The idea is that other consumers (in this case jump label)
want to make use of that code.
Signed-off-by: Jason Baron <jbaron@redhat.com>
LKML-Reference: <96259ae74172dcac99c0020c249743c523a92e18.1284733808.git.jbaron@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/alternative.c | 64 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 63 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 6 |
3 files changed, 71 insertions, 62 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index f65ab8b014c4..1849d8036ee8 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -641,3 +641,67 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len) | |||
641 | return addr; | 641 | return addr; |
642 | } | 642 | } |
643 | 643 | ||
644 | #if defined(CONFIG_DYNAMIC_FTRACE) | ||
645 | |||
646 | unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; | ||
647 | |||
648 | void __init arch_init_ideal_nop5(void) | ||
649 | { | ||
650 | extern const unsigned char ftrace_test_p6nop[]; | ||
651 | extern const unsigned char ftrace_test_nop5[]; | ||
652 | extern const unsigned char ftrace_test_jmp[]; | ||
653 | int faulted = 0; | ||
654 | |||
655 | /* | ||
656 | * There is no good nop for all x86 archs. | ||
657 | * We will default to using the P6_NOP5, but first we | ||
658 | * will test to make sure that the nop will actually | ||
659 | * work on this CPU. If it faults, we will then | ||
660 | * go to a lesser efficient 5 byte nop. If that fails | ||
661 | * we then just use a jmp as our nop. This isn't the most | ||
662 | * efficient nop, but we can not use a multi part nop | ||
663 | * since we would then risk being preempted in the middle | ||
664 | * of that nop, and if we enabled tracing then, it might | ||
665 | * cause a system crash. | ||
666 | * | ||
667 | * TODO: check the cpuid to determine the best nop. | ||
668 | */ | ||
669 | asm volatile ( | ||
670 | "ftrace_test_jmp:" | ||
671 | "jmp ftrace_test_p6nop\n" | ||
672 | "nop\n" | ||
673 | "nop\n" | ||
674 | "nop\n" /* 2 byte jmp + 3 bytes */ | ||
675 | "ftrace_test_p6nop:" | ||
676 | P6_NOP5 | ||
677 | "jmp 1f\n" | ||
678 | "ftrace_test_nop5:" | ||
679 | ".byte 0x66,0x66,0x66,0x66,0x90\n" | ||
680 | "1:" | ||
681 | ".section .fixup, \"ax\"\n" | ||
682 | "2: movl $1, %0\n" | ||
683 | " jmp ftrace_test_nop5\n" | ||
684 | "3: movl $2, %0\n" | ||
685 | " jmp 1b\n" | ||
686 | ".previous\n" | ||
687 | _ASM_EXTABLE(ftrace_test_p6nop, 2b) | ||
688 | _ASM_EXTABLE(ftrace_test_nop5, 3b) | ||
689 | : "=r"(faulted) : "0" (faulted)); | ||
690 | |||
691 | switch (faulted) { | ||
692 | case 0: | ||
693 | pr_info("converting mcount calls to 0f 1f 44 00 00\n"); | ||
694 | memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5); | ||
695 | break; | ||
696 | case 1: | ||
697 | pr_info("converting mcount calls to 66 66 66 66 90\n"); | ||
698 | memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5); | ||
699 | break; | ||
700 | case 2: | ||
701 | pr_info("converting mcount calls to jmp . + 5\n"); | ||
702 | memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5); | ||
703 | break; | ||
704 | } | ||
705 | |||
706 | } | ||
707 | #endif | ||
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index cd37469b54ee..3afb33f14d2d 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -257,14 +257,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) | |||
257 | return mod_code_status; | 257 | return mod_code_status; |
258 | } | 258 | } |
259 | 259 | ||
260 | |||
261 | |||
262 | |||
263 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | ||
264 | |||
265 | static unsigned char *ftrace_nop_replace(void) | 260 | static unsigned char *ftrace_nop_replace(void) |
266 | { | 261 | { |
267 | return ftrace_nop; | 262 | return ideal_nop5; |
268 | } | 263 | } |
269 | 264 | ||
270 | static int | 265 | static int |
@@ -338,62 +333,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func) | |||
338 | 333 | ||
339 | int __init ftrace_dyn_arch_init(void *data) | 334 | int __init ftrace_dyn_arch_init(void *data) |
340 | { | 335 | { |
341 | extern const unsigned char ftrace_test_p6nop[]; | ||
342 | extern const unsigned char ftrace_test_nop5[]; | ||
343 | extern const unsigned char ftrace_test_jmp[]; | ||
344 | int faulted = 0; | ||
345 | |||
346 | /* | ||
347 | * There is no good nop for all x86 archs. | ||
348 | * We will default to using the P6_NOP5, but first we | ||
349 | * will test to make sure that the nop will actually | ||
350 | * work on this CPU. If it faults, we will then | ||
351 | * go to a lesser efficient 5 byte nop. If that fails | ||
352 | * we then just use a jmp as our nop. This isn't the most | ||
353 | * efficient nop, but we can not use a multi part nop | ||
354 | * since we would then risk being preempted in the middle | ||
355 | * of that nop, and if we enabled tracing then, it might | ||
356 | * cause a system crash. | ||
357 | * | ||
358 | * TODO: check the cpuid to determine the best nop. | ||
359 | */ | ||
360 | asm volatile ( | ||
361 | "ftrace_test_jmp:" | ||
362 | "jmp ftrace_test_p6nop\n" | ||
363 | "nop\n" | ||
364 | "nop\n" | ||
365 | "nop\n" /* 2 byte jmp + 3 bytes */ | ||
366 | "ftrace_test_p6nop:" | ||
367 | P6_NOP5 | ||
368 | "jmp 1f\n" | ||
369 | "ftrace_test_nop5:" | ||
370 | ".byte 0x66,0x66,0x66,0x66,0x90\n" | ||
371 | "1:" | ||
372 | ".section .fixup, \"ax\"\n" | ||
373 | "2: movl $1, %0\n" | ||
374 | " jmp ftrace_test_nop5\n" | ||
375 | "3: movl $2, %0\n" | ||
376 | " jmp 1b\n" | ||
377 | ".previous\n" | ||
378 | _ASM_EXTABLE(ftrace_test_p6nop, 2b) | ||
379 | _ASM_EXTABLE(ftrace_test_nop5, 3b) | ||
380 | : "=r"(faulted) : "0" (faulted)); | ||
381 | |||
382 | switch (faulted) { | ||
383 | case 0: | ||
384 | pr_info("converting mcount calls to 0f 1f 44 00 00\n"); | ||
385 | memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); | ||
386 | break; | ||
387 | case 1: | ||
388 | pr_info("converting mcount calls to 66 66 66 66 90\n"); | ||
389 | memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); | ||
390 | break; | ||
391 | case 2: | ||
392 | pr_info("converting mcount calls to jmp . + 5\n"); | ||
393 | memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); | ||
394 | break; | ||
395 | } | ||
396 | |||
397 | /* The return code is retured via data */ | 336 | /* The return code is retured via data */ |
398 | *(unsigned long *)data = 0; | 337 | *(unsigned long *)data = 0; |
399 | 338 | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index c3a4fbb2b996..00e167870f71 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -112,6 +112,7 @@ | |||
112 | #include <asm/numa_64.h> | 112 | #include <asm/numa_64.h> |
113 | #endif | 113 | #endif |
114 | #include <asm/mce.h> | 114 | #include <asm/mce.h> |
115 | #include <asm/alternative.h> | ||
115 | 116 | ||
116 | /* | 117 | /* |
117 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. | 118 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. |
@@ -726,6 +727,7 @@ void __init setup_arch(char **cmdline_p) | |||
726 | { | 727 | { |
727 | int acpi = 0; | 728 | int acpi = 0; |
728 | int k8 = 0; | 729 | int k8 = 0; |
730 | unsigned long flags; | ||
729 | 731 | ||
730 | #ifdef CONFIG_X86_32 | 732 | #ifdef CONFIG_X86_32 |
731 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); | 733 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); |
@@ -1071,6 +1073,10 @@ void __init setup_arch(char **cmdline_p) | |||
1071 | x86_init.oem.banner(); | 1073 | x86_init.oem.banner(); |
1072 | 1074 | ||
1073 | mcheck_init(); | 1075 | mcheck_init(); |
1076 | |||
1077 | local_irq_save(flags); | ||
1078 | arch_init_ideal_nop5(); | ||
1079 | local_irq_restore(flags); | ||
1074 | } | 1080 | } |
1075 | 1081 | ||
1076 | #ifdef CONFIG_X86_32 | 1082 | #ifdef CONFIG_X86_32 |