diff options
Diffstat (limited to 'include/asm-x86')
-rw-r--r-- | include/asm-x86/cpufeature.h | 13 | ||||
-rw-r--r-- | include/asm-x86/kvm_host.h | 2 | ||||
-rw-r--r-- | include/asm-x86/mce.h | 1 | ||||
-rw-r--r-- | include/asm-x86/msr.h | 27 | ||||
-rw-r--r-- | include/asm-x86/required-features.h | 8 |
5 files changed, 31 insertions, 20 deletions
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 2f5a792b0acc..9489283a4bcf 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h | |||
@@ -72,14 +72,15 @@ | |||
72 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ | 72 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ |
73 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ | 73 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ |
74 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ | 74 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ |
75 | #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ | 75 | #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ |
76 | #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ | 76 | #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ |
77 | #define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */ | 77 | #define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */ |
78 | #define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */ | 78 | #define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */ |
79 | #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ | 79 | #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ |
80 | #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ | 80 | #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ |
81 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ | 81 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ |
82 | #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ | 82 | #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ |
83 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ | ||
83 | 84 | ||
84 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 85 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
85 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 86 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ |
@@ -91,6 +92,7 @@ | |||
91 | #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ | 92 | #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ |
92 | #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ | 93 | #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ |
93 | #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ | 94 | #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ |
95 | #define X86_FEATURE_XMM4_2 (4*32+20) /* Streaming SIMD Extensions-4.2 */ | ||
94 | 96 | ||
95 | /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ | 97 | /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ |
96 | #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ | 98 | #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ |
@@ -189,6 +191,7 @@ extern const char * const x86_power_flags[32]; | |||
189 | #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) | 191 | #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) |
190 | #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) | 192 | #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) |
191 | #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) | 193 | #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) |
194 | #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) | ||
192 | 195 | ||
193 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) | 196 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) |
194 | # define cpu_has_invlpg 1 | 197 | # define cpu_has_invlpg 1 |
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 0f3c53114614..c2e34c275900 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h | |||
@@ -722,7 +722,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void); | |||
722 | 722 | ||
723 | #define __kvm_handle_fault_on_reboot(insn) \ | 723 | #define __kvm_handle_fault_on_reboot(insn) \ |
724 | "666: " insn "\n\t" \ | 724 | "666: " insn "\n\t" \ |
725 | ".pushsection .text.fixup, \"ax\" \n" \ | 725 | ".pushsection .fixup, \"ax\" \n" \ |
726 | "667: \n\t" \ | 726 | "667: \n\t" \ |
727 | KVM_EX_PUSH " $666b \n\t" \ | 727 | KVM_EX_PUSH " $666b \n\t" \ |
728 | "jmp kvm_handle_fault_on_reboot \n\t" \ | 728 | "jmp kvm_handle_fault_on_reboot \n\t" \ |
diff --git a/include/asm-x86/mce.h b/include/asm-x86/mce.h index 94f1fd79e22a..531eaa587455 100644 --- a/include/asm-x86/mce.h +++ b/include/asm-x86/mce.h | |||
@@ -92,6 +92,7 @@ extern int mce_disabled; | |||
92 | 92 | ||
93 | void mce_log(struct mce *m); | 93 | void mce_log(struct mce *m); |
94 | DECLARE_PER_CPU(struct sys_device, device_mce); | 94 | DECLARE_PER_CPU(struct sys_device, device_mce); |
95 | extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); | ||
95 | 96 | ||
96 | #ifdef CONFIG_X86_MCE_INTEL | 97 | #ifdef CONFIG_X86_MCE_INTEL |
97 | void mce_intel_feature_init(struct cpuinfo_x86 *c); | 98 | void mce_intel_feature_init(struct cpuinfo_x86 *c); |
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index ca110ee73f07..2362cfda1fbc 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h | |||
@@ -52,14 +52,14 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, | |||
52 | { | 52 | { |
53 | DECLARE_ARGS(val, low, high); | 53 | DECLARE_ARGS(val, low, high); |
54 | 54 | ||
55 | asm volatile("2: rdmsr ; xor %0,%0\n" | 55 | asm volatile("2: rdmsr ; xor %[err],%[err]\n" |
56 | "1:\n\t" | 56 | "1:\n\t" |
57 | ".section .fixup,\"ax\"\n\t" | 57 | ".section .fixup,\"ax\"\n\t" |
58 | "3: mov %3,%0 ; jmp 1b\n\t" | 58 | "3: mov %[fault],%[err] ; jmp 1b\n\t" |
59 | ".previous\n\t" | 59 | ".previous\n\t" |
60 | _ASM_EXTABLE(2b, 3b) | 60 | _ASM_EXTABLE(2b, 3b) |
61 | : "=r" (*err), EAX_EDX_RET(val, low, high) | 61 | : [err] "=r" (*err), EAX_EDX_RET(val, low, high) |
62 | : "c" (msr), "i" (-EFAULT)); | 62 | : "c" (msr), [fault] "i" (-EFAULT)); |
63 | return EAX_EDX_VAL(val, low, high); | 63 | return EAX_EDX_VAL(val, low, high); |
64 | } | 64 | } |
65 | 65 | ||
@@ -73,15 +73,15 @@ static inline int native_write_msr_safe(unsigned int msr, | |||
73 | unsigned low, unsigned high) | 73 | unsigned low, unsigned high) |
74 | { | 74 | { |
75 | int err; | 75 | int err; |
76 | asm volatile("2: wrmsr ; xor %0,%0\n" | 76 | asm volatile("2: wrmsr ; xor %[err],%[err]\n" |
77 | "1:\n\t" | 77 | "1:\n\t" |
78 | ".section .fixup,\"ax\"\n\t" | 78 | ".section .fixup,\"ax\"\n\t" |
79 | "3: mov %4,%0 ; jmp 1b\n\t" | 79 | "3: mov %[fault],%[err] ; jmp 1b\n\t" |
80 | ".previous\n\t" | 80 | ".previous\n\t" |
81 | _ASM_EXTABLE(2b, 3b) | 81 | _ASM_EXTABLE(2b, 3b) |
82 | : "=a" (err) | 82 | : [err] "=a" (err) |
83 | : "c" (msr), "0" (low), "d" (high), | 83 | : "c" (msr), "0" (low), "d" (high), |
84 | "i" (-EFAULT) | 84 | [fault] "i" (-EFAULT) |
85 | : "memory"); | 85 | : "memory"); |
86 | return err; | 86 | return err; |
87 | } | 87 | } |
@@ -192,19 +192,20 @@ do { \ | |||
192 | #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) | 192 | #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) |
193 | 193 | ||
194 | #ifdef CONFIG_SMP | 194 | #ifdef CONFIG_SMP |
195 | void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 195 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
196 | void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 196 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
197 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 197 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
198 | |||
199 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 198 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
200 | #else /* CONFIG_SMP */ | 199 | #else /* CONFIG_SMP */ |
201 | static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | 200 | static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
202 | { | 201 | { |
203 | rdmsr(msr_no, *l, *h); | 202 | rdmsr(msr_no, *l, *h); |
203 | return 0; | ||
204 | } | 204 | } |
205 | static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | 205 | static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
206 | { | 206 | { |
207 | wrmsr(msr_no, l, h); | 207 | wrmsr(msr_no, l, h); |
208 | return 0; | ||
208 | } | 209 | } |
209 | static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, | 210 | static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, |
210 | u32 *l, u32 *h) | 211 | u32 *l, u32 *h) |
diff --git a/include/asm-x86/required-features.h b/include/asm-x86/required-features.h index adec887dd7cd..5c2ff4bc2980 100644 --- a/include/asm-x86/required-features.h +++ b/include/asm-x86/required-features.h | |||
@@ -41,6 +41,12 @@ | |||
41 | # define NEED_3DNOW 0 | 41 | # define NEED_3DNOW 0 |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | #if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64) | ||
45 | # define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31)) | ||
46 | #else | ||
47 | # define NEED_NOPL 0 | ||
48 | #endif | ||
49 | |||
44 | #ifdef CONFIG_X86_64 | 50 | #ifdef CONFIG_X86_64 |
45 | #define NEED_PSE 0 | 51 | #define NEED_PSE 0 |
46 | #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) | 52 | #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) |
@@ -67,7 +73,7 @@ | |||
67 | #define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) | 73 | #define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) |
68 | 74 | ||
69 | #define REQUIRED_MASK2 0 | 75 | #define REQUIRED_MASK2 0 |
70 | #define REQUIRED_MASK3 0 | 76 | #define REQUIRED_MASK3 (NEED_NOPL) |
71 | #define REQUIRED_MASK4 0 | 77 | #define REQUIRED_MASK4 0 |
72 | #define REQUIRED_MASK5 0 | 78 | #define REQUIRED_MASK5 0 |
73 | #define REQUIRED_MASK6 0 | 79 | #define REQUIRED_MASK6 0 |