aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/processor.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-03-04 00:33:49 -0500
committerIngo Molnar <mingo@kernel.org>2015-03-04 00:36:15 -0500
commitf8e92fb4b0ffc4d62279ab39f34e798e37e90b0b (patch)
tree9caa8df664792e64ddcb4ea03fd418a8a529c82e /arch/x86/include/asm/processor.h
parentd2c032e3dc58137a7261a7824d3acce435db1d66 (diff)
parentdfecb95cdfeaf7872d83a96bec3a606e9cd95c8d (diff)
Merge tag 'alternatives_padding' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp into x86/asm
Pull alternative instructions framework improvements from Borislav Petkov: "A more involved rework of the alternatives framework to be able to pad instructions and thus make using the alternatives macros more straightforward and without having to figure out old and new instruction sizes but have the toolchain figure that out for us. Furthermore, it optimizes JMPs used so that fetch and decode can be relieved with smaller versions of the JMPs, where possible. Some stats: x86_64 defconfig: Alternatives sites total: 2478 Total padding added (in Bytes): 6051 The padding is currently done for: X86_FEATURE_ALWAYS X86_FEATURE_ERMS X86_FEATURE_LFENCE_RDTSC X86_FEATURE_MFENCE_RDTSC X86_FEATURE_SMAP This is with the latest version of the patchset. Of course, on each machine the alternatives sites actually being patched are a proper subset of the total number." Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/include/asm/processor.h')
-rw-r--r--arch/x86/include/asm/processor.h16
1 files changed, 7 insertions, 9 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index ec1c93588cef..7be2c9a6caba 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -761,10 +761,10 @@ extern char ignore_fpu_irq;
761#define ARCH_HAS_SPINLOCK_PREFETCH 761#define ARCH_HAS_SPINLOCK_PREFETCH
762 762
763#ifdef CONFIG_X86_32 763#ifdef CONFIG_X86_32
764# define BASE_PREFETCH ASM_NOP4 764# define BASE_PREFETCH ""
765# define ARCH_HAS_PREFETCH 765# define ARCH_HAS_PREFETCH
766#else 766#else
767# define BASE_PREFETCH "prefetcht0 (%1)" 767# define BASE_PREFETCH "prefetcht0 %P1"
768#endif 768#endif
769 769
770/* 770/*
@@ -775,10 +775,9 @@ extern char ignore_fpu_irq;
775 */ 775 */
776static inline void prefetch(const void *x) 776static inline void prefetch(const void *x)
777{ 777{
778 alternative_input(BASE_PREFETCH, 778 alternative_input(BASE_PREFETCH, "prefetchnta %P1",
779 "prefetchnta (%1)",
780 X86_FEATURE_XMM, 779 X86_FEATURE_XMM,
781 "r" (x)); 780 "m" (*(const char *)x));
782} 781}
783 782
784/* 783/*
@@ -788,10 +787,9 @@ static inline void prefetch(const void *x)
788 */ 787 */
789static inline void prefetchw(const void *x) 788static inline void prefetchw(const void *x)
790{ 789{
791 alternative_input(BASE_PREFETCH, 790 alternative_input(BASE_PREFETCH, "prefetchw %P1",
792 "prefetchw (%1)", 791 X86_FEATURE_3DNOWPREFETCH,
793 X86_FEATURE_3DNOW, 792 "m" (*(const char *)x));
794 "r" (x));
795} 793}
796 794
797static inline void spin_lock_prefetch(const void *x) 795static inline void spin_lock_prefetch(const void *x)