diff options
author | Borislav Petkov <bp@suse.de> | 2015-01-18 11:48:18 -0500 |
---|---|---|
committer | Borislav Petkov <bp@suse.de> | 2015-02-23 07:44:17 -0500 |
commit | a930dc4543a2b213deb9fde12682716edff8a4a6 (patch) | |
tree | 33211f604e11c55382f6ba6430d442cf931b157e | |
parent | c70e1b475f37f07ab7181ad28458666d59aae634 (diff) |
x86/asm: Cleanup prefetch primitives
This is based on a patch originally by hpa.
With the current improvements to the alternatives, we can simply use %P1
as a mem8 operand constraint and rely on the toolchain to generate the
proper instruction sizes. For example, on 32-bit, where we use an empty
old instruction we get:
apply_alternatives: feat: 6*32+8, old: (c104648b, len: 4), repl: (c195566c, len: 4)
c104648b: alt_insn: 90 90 90 90
c195566c: rpl_insn: 0f 0d 4b 5c
...
apply_alternatives: feat: 6*32+8, old: (c18e09b4, len: 3), repl: (c1955948, len: 3)
c18e09b4: alt_insn: 90 90 90
c1955948: rpl_insn: 0f 0d 08
...
apply_alternatives: feat: 6*32+8, old: (c1190cf9, len: 7), repl: (c1955a79, len: 7)
c1190cf9: alt_insn: 90 90 90 90 90 90 90
c1955a79: rpl_insn: 0f 0d 0d a0 d4 85 c1
all with the proper padding done depending on the size of the
replacement instruction the compiler generates.
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r-- | arch/x86/include/asm/apic.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 16 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 5 |
3 files changed, 13 insertions, 10 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index efc3b22d896e..8118e94d50ab 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -91,7 +91,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v) | |||
91 | { | 91 | { |
92 | volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); | 92 | volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); |
93 | 93 | ||
94 | alternative_io("movl %0, %1", "xchgl %0, %1", X86_BUG_11AP, | 94 | alternative_io("movl %0, %P1", "xchgl %0, %P1", X86_BUG_11AP, |
95 | ASM_OUTPUT2("=r" (v), "=m" (*addr)), | 95 | ASM_OUTPUT2("=r" (v), "=m" (*addr)), |
96 | ASM_OUTPUT2("0" (v), "m" (*addr))); | 96 | ASM_OUTPUT2("0" (v), "m" (*addr))); |
97 | } | 97 | } |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index ec1c93588cef..7be2c9a6caba 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -761,10 +761,10 @@ extern char ignore_fpu_irq; | |||
761 | #define ARCH_HAS_SPINLOCK_PREFETCH | 761 | #define ARCH_HAS_SPINLOCK_PREFETCH |
762 | 762 | ||
763 | #ifdef CONFIG_X86_32 | 763 | #ifdef CONFIG_X86_32 |
764 | # define BASE_PREFETCH ASM_NOP4 | 764 | # define BASE_PREFETCH "" |
765 | # define ARCH_HAS_PREFETCH | 765 | # define ARCH_HAS_PREFETCH |
766 | #else | 766 | #else |
767 | # define BASE_PREFETCH "prefetcht0 (%1)" | 767 | # define BASE_PREFETCH "prefetcht0 %P1" |
768 | #endif | 768 | #endif |
769 | 769 | ||
770 | /* | 770 | /* |
@@ -775,10 +775,9 @@ extern char ignore_fpu_irq; | |||
775 | */ | 775 | */ |
776 | static inline void prefetch(const void *x) | 776 | static inline void prefetch(const void *x) |
777 | { | 777 | { |
778 | alternative_input(BASE_PREFETCH, | 778 | alternative_input(BASE_PREFETCH, "prefetchnta %P1", |
779 | "prefetchnta (%1)", | ||
780 | X86_FEATURE_XMM, | 779 | X86_FEATURE_XMM, |
781 | "r" (x)); | 780 | "m" (*(const char *)x)); |
782 | } | 781 | } |
783 | 782 | ||
784 | /* | 783 | /* |
@@ -788,10 +787,9 @@ static inline void prefetch(const void *x) | |||
788 | */ | 787 | */ |
789 | static inline void prefetchw(const void *x) | 788 | static inline void prefetchw(const void *x) |
790 | { | 789 | { |
791 | alternative_input(BASE_PREFETCH, | 790 | alternative_input(BASE_PREFETCH, "prefetchw %P1", |
792 | "prefetchw (%1)", | 791 | X86_FEATURE_3DNOWPREFETCH, |
793 | X86_FEATURE_3DNOW, | 792 | "m" (*(const char *)x)); |
794 | "r" (x)); | ||
795 | } | 793 | } |
796 | 794 | ||
797 | static inline void spin_lock_prefetch(const void *x) | 795 | static inline void spin_lock_prefetch(const void *x) |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index a220239cea65..dd9e50500297 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -711,6 +711,11 @@ static void init_amd(struct cpuinfo_x86 *c) | |||
711 | set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); | 711 | set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); |
712 | 712 | ||
713 | rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); | 713 | rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); |
714 | |||
715 | /* 3DNow or LM implies PREFETCHW */ | ||
716 | if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH)) | ||
717 | if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM)) | ||
718 | set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); | ||
714 | } | 719 | } |
715 | 720 | ||
716 | #ifdef CONFIG_X86_32 | 721 | #ifdef CONFIG_X86_32 |