aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/apic.h6
-rw-r--r--arch/x86/include/asm/atomic.h1
-rw-r--r--arch/x86/include/asm/atomic64_32.h1
-rw-r--r--arch/x86/include/asm/boot.h2
-rw-r--r--arch/x86/include/asm/calgary.h2
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h2
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h2
-rw-r--r--arch/x86/include/asm/cpu.h3
-rw-r--r--arch/x86/include/asm/cpufeature.h115
-rw-r--r--arch/x86/include/asm/fixmap.h5
-rw-r--r--arch/x86/include/asm/fpu/internal.h174
-rw-r--r--arch/x86/include/asm/fpu/xstate.h11
-rw-r--r--arch/x86/include/asm/intel_pt.h10
-rw-r--r--arch/x86/include/asm/iosf_mbi.h51
-rw-r--r--arch/x86/include/asm/ipi.h2
-rw-r--r--arch/x86/include/asm/jump_label.h63
-rw-r--r--arch/x86/include/asm/kvm_host.h75
-rw-r--r--arch/x86/include/asm/lguest.h4
-rw-r--r--arch/x86/include/asm/microcode.h39
-rw-r--r--arch/x86/include/asm/mmu_context.h34
-rw-r--r--arch/x86/include/asm/msi.h6
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/msr-trace.h57
-rw-r--r--arch/x86/include/asm/msr.h43
-rw-r--r--arch/x86/include/asm/page_types.h6
-rw-r--r--arch/x86/include/asm/paravirt.h38
-rw-r--r--arch/x86/include/asm/paravirt_types.h35
-rw-r--r--arch/x86/include/asm/pgtable.h55
-rw-r--r--arch/x86/include/asm/pgtable_types.h9
-rw-r--r--arch/x86/include/asm/platform_sst_audio.h1
-rw-r--r--arch/x86/include/asm/pmem.h7
-rw-r--r--arch/x86/include/asm/pvclock.h14
-rw-r--r--arch/x86/include/asm/qspinlock_paravirt.h59
-rw-r--r--arch/x86/include/asm/reboot.h1
-rw-r--r--arch/x86/include/asm/smp.h12
-rw-r--r--arch/x86/include/asm/suspend_32.h1
-rw-r--r--arch/x86/include/asm/suspend_64.h1
-rw-r--r--arch/x86/include/asm/uaccess.h9
-rw-r--r--arch/x86/include/asm/vdso.h1
-rw-r--r--arch/x86/include/asm/x86_init.h2
-rw-r--r--arch/x86/include/asm/xen/hypercall.h6
-rw-r--r--arch/x86/include/asm/xor_32.h2
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h92
-rw-r--r--arch/x86/include/uapi/asm/mce.h2
44 files changed, 682 insertions, 380 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index a30316bf801a..c80f6b6f3da2 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -23,6 +23,11 @@
23#define APIC_VERBOSE 1 23#define APIC_VERBOSE 1
24#define APIC_DEBUG 2 24#define APIC_DEBUG 2
25 25
26/* Macros for apic_extnmi which controls external NMI masking */
27#define APIC_EXTNMI_BSP 0 /* Default */
28#define APIC_EXTNMI_ALL 1
29#define APIC_EXTNMI_NONE 2
30
26/* 31/*
27 * Define the default level of output to be very little 32 * Define the default level of output to be very little
28 * This can be turned up by using apic=verbose for more 33 * This can be turned up by using apic=verbose for more
@@ -303,6 +308,7 @@ struct apic {
303 unsigned int *apicid); 308 unsigned int *apicid);
304 309
305 /* ipi */ 310 /* ipi */
311 void (*send_IPI)(int cpu, int vector);
306 void (*send_IPI_mask)(const struct cpumask *mask, int vector); 312 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
307 void (*send_IPI_mask_allbutself)(const struct cpumask *mask, 313 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
308 int vector); 314 int vector);
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index ae5fb83e6d91..3e8674288198 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <asm/processor.h>
7#include <asm/alternative.h> 6#include <asm/alternative.h>
8#include <asm/cmpxchg.h> 7#include <asm/cmpxchg.h>
9#include <asm/rmwcc.h> 8#include <asm/rmwcc.h>
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index a11c30b77fb5..a984111135b1 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <asm/processor.h>
7//#include <asm/cmpxchg.h> 6//#include <asm/cmpxchg.h>
8 7
9/* An 64bit atomic type */ 8/* An 64bit atomic type */
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 4fa687a47a62..6b8d6e8cd449 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -27,7 +27,7 @@
27#define BOOT_HEAP_SIZE 0x400000 27#define BOOT_HEAP_SIZE 0x400000
28#else /* !CONFIG_KERNEL_BZIP2 */ 28#else /* !CONFIG_KERNEL_BZIP2 */
29 29
30#define BOOT_HEAP_SIZE 0x8000 30#define BOOT_HEAP_SIZE 0x10000
31 31
32#endif /* !CONFIG_KERNEL_BZIP2 */ 32#endif /* !CONFIG_KERNEL_BZIP2 */
33 33
diff --git a/arch/x86/include/asm/calgary.h b/arch/x86/include/asm/calgary.h
index 0d467b338835..a8303ebe089f 100644
--- a/arch/x86/include/asm/calgary.h
+++ b/arch/x86/include/asm/calgary.h
@@ -31,7 +31,7 @@
31#include <asm/types.h> 31#include <asm/types.h>
32 32
33struct iommu_table { 33struct iommu_table {
34 struct cal_chipset_ops *chip_ops; /* chipset specific funcs */ 34 const struct cal_chipset_ops *chip_ops; /* chipset specific funcs */
35 unsigned long it_base; /* mapped address of tce table */ 35 unsigned long it_base; /* mapped address of tce table */
36 unsigned long it_hint; /* Hint for next alloc */ 36 unsigned long it_hint; /* Hint for next alloc */
37 unsigned long *it_map; /* A simple allocation bitmap for now */ 37 unsigned long *it_map; /* A simple allocation bitmap for now */
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index f7e142926481..e4959d023af8 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -109,6 +109,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
109 109
110#endif 110#endif
111 111
112#define system_has_cmpxchg_double() cpu_has_cx8 112#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
113 113
114#endif /* _ASM_X86_CMPXCHG_32_H */ 114#endif /* _ASM_X86_CMPXCHG_32_H */
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 1af94697aae5..caa23a34c963 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -18,6 +18,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
18 cmpxchg_local((ptr), (o), (n)); \ 18 cmpxchg_local((ptr), (o), (n)); \
19}) 19})
20 20
21#define system_has_cmpxchg_double() cpu_has_cx16 21#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
22 22
23#endif /* _ASM_X86_CMPXCHG_64_H */ 23#endif /* _ASM_X86_CMPXCHG_64_H */
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index bf2caa1dedc5..678637ad7476 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -36,4 +36,7 @@ extern int _debug_hotplug_cpu(int cpu, int action);
36 36
37int mwait_usable(const struct cpuinfo_x86 *); 37int mwait_usable(const struct cpuinfo_x86 *);
38 38
39unsigned int x86_family(unsigned int sig);
40unsigned int x86_model(unsigned int sig);
41unsigned int x86_stepping(unsigned int sig);
39#endif /* _ASM_X86_CPU_H */ 42#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index f7ba9fbf12ee..7ad8c9464297 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -12,7 +12,7 @@
12#include <asm/disabled-features.h> 12#include <asm/disabled-features.h>
13#endif 13#endif
14 14
15#define NCAPINTS 14 /* N 32-bit words worth of info */ 15#define NCAPINTS 16 /* N 32-bit words worth of info */
16#define NBUGINTS 1 /* N 32-bit bug flags */ 16#define NBUGINTS 1 /* N 32-bit bug flags */
17 17
18/* 18/*
@@ -181,22 +181,17 @@
181 181
182/* 182/*
183 * Auxiliary flags: Linux defined - For features scattered in various 183 * Auxiliary flags: Linux defined - For features scattered in various
184 * CPUID levels like 0x6, 0xA etc, word 7 184 * CPUID levels like 0x6, 0xA etc, word 7.
185 *
186 * Reuse free bits when adding new feature flags!
185 */ 187 */
186#define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */ 188
187#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */
188#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ 189#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
189#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ 190#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
190#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */ 191
191#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */
192#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
193#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 192#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
194#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 193#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
195#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ 194
196#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
197#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
198#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
199#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
200#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ 195#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
201 196
202/* Virtualization flags: Linux defined, word 8 */ 197/* Virtualization flags: Linux defined, word 8 */
@@ -205,16 +200,7 @@
205#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ 200#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
206#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ 201#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
207#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ 202#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
208#define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */ 203
209#define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */
210#define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
211#define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
212#define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
213#define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
214#define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */
215#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */
216#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
217#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
218#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ 204#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
219#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ 205#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
220 206
@@ -259,6 +245,30 @@
259/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ 245/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
260#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ 246#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
261 247
248/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
249#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
250#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
251#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
252#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
253#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
254#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
255#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
256#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
257#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
258#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
259
260/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
261#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
262#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
263#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
264#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
265#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
266#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
267#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
268#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
269#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
270#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
271
262/* 272/*
263 * BUG word(s) 273 * BUG word(s)
264 */ 274 */
@@ -279,6 +289,26 @@
279#include <asm/asm.h> 289#include <asm/asm.h>
280#include <linux/bitops.h> 290#include <linux/bitops.h>
281 291
292enum cpuid_leafs
293{
294 CPUID_1_EDX = 0,
295 CPUID_8000_0001_EDX,
296 CPUID_8086_0001_EDX,
297 CPUID_LNX_1,
298 CPUID_1_ECX,
299 CPUID_C000_0001_EDX,
300 CPUID_8000_0001_ECX,
301 CPUID_LNX_2,
302 CPUID_LNX_3,
303 CPUID_7_0_EBX,
304 CPUID_D_1_EAX,
305 CPUID_F_0_EDX,
306 CPUID_F_1_EDX,
307 CPUID_8000_0008_EBX,
308 CPUID_6_EAX,
309 CPUID_8000_000A_EDX,
310};
311
282#ifdef CONFIG_X86_FEATURE_NAMES 312#ifdef CONFIG_X86_FEATURE_NAMES
283extern const char * const x86_cap_flags[NCAPINTS*32]; 313extern const char * const x86_cap_flags[NCAPINTS*32];
284extern const char * const x86_power_flags[32]; 314extern const char * const x86_power_flags[32];
@@ -356,60 +386,31 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
356} while (0) 386} while (0)
357 387
358#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) 388#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
359#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
360#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) 389#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
361#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) 390#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
362#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) 391#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
363#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) 392#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
364#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
365#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
366#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
367#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) 393#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
368#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) 394#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
369#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) 395#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
370#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
371#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
372#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) 396#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
373#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) 397#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
374#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2) 398#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
375#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
376#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
377#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
378#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
379#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
380#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
381#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
382#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
383#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
384#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
385#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
386#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
387#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
388#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
389#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH) 399#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
390#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
391#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) 400#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
392#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) 401#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
393#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) 402#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
394#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
395#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
396#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) 403#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
397#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) 404#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
398#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
399#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) 405#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
400#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) 406#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
401#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 407#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
402#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 408/*
403#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 409 * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
404#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) 410 * fast paths and boot_cpu_has() otherwise!
405#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2) 411 */
406#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) 412
407#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) 413#if __GNUC__ >= 4 && defined(CONFIG_X86_FAST_FEATURE_TESTS)
408#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
409#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
410#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
411
412#if __GNUC__ >= 4
413extern void warn_pre_alternatives(void); 414extern void warn_pre_alternatives(void);
414extern bool __static_cpu_has_safe(u16 bit); 415extern bool __static_cpu_has_safe(u16 bit);
415 416
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index f80d70009ff8..6d7d0e52ed5a 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -19,7 +19,6 @@
19#include <asm/acpi.h> 19#include <asm/acpi.h>
20#include <asm/apicdef.h> 20#include <asm/apicdef.h>
21#include <asm/page.h> 21#include <asm/page.h>
22#include <asm/pvclock.h>
23#ifdef CONFIG_X86_32 22#ifdef CONFIG_X86_32
24#include <linux/threads.h> 23#include <linux/threads.h>
25#include <asm/kmap_types.h> 24#include <asm/kmap_types.h>
@@ -72,10 +71,6 @@ enum fixed_addresses {
72#ifdef CONFIG_X86_VSYSCALL_EMULATION 71#ifdef CONFIG_X86_VSYSCALL_EMULATION
73 VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT, 72 VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT,
74#endif 73#endif
75#ifdef CONFIG_PARAVIRT_CLOCK
76 PVCLOCK_FIXMAP_BEGIN,
77 PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
78#endif
79#endif 74#endif
80 FIX_DBGP_BASE, 75 FIX_DBGP_BASE,
81 FIX_EARLYCON_MEM_BASE, 76 FIX_EARLYCON_MEM_BASE,
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 3c3550c3a4a3..0fd440df63f1 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -42,6 +42,7 @@ extern void fpu__init_cpu_xstate(void);
42extern void fpu__init_system(struct cpuinfo_x86 *c); 42extern void fpu__init_system(struct cpuinfo_x86 *c);
43extern void fpu__init_check_bugs(void); 43extern void fpu__init_check_bugs(void);
44extern void fpu__resume_cpu(void); 44extern void fpu__resume_cpu(void);
45extern u64 fpu__get_supported_xfeatures_mask(void);
45 46
46/* 47/*
47 * Debugging facility: 48 * Debugging facility:
@@ -224,18 +225,67 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
224#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" 225#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
225#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" 226#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
226 227
227/* xstate instruction fault handler: */ 228#define XSTATE_OP(op, st, lmask, hmask, err) \
228#define xstate_fault(__err) \ 229 asm volatile("1:" op "\n\t" \
229 \ 230 "xor %[err], %[err]\n" \
230 ".section .fixup,\"ax\"\n" \ 231 "2:\n\t" \
231 \ 232 ".pushsection .fixup,\"ax\"\n\t" \
232 "3: movl $-2,%[_err]\n" \ 233 "3: movl $-2,%[err]\n\t" \
233 " jmp 2b\n" \ 234 "jmp 2b\n\t" \
234 \ 235 ".popsection\n\t" \
235 ".previous\n" \ 236 _ASM_EXTABLE(1b, 3b) \
236 \ 237 : [err] "=r" (err) \
237 _ASM_EXTABLE(1b, 3b) \ 238 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
238 : [_err] "=r" (__err) 239 : "memory")
240
241/*
242 * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
243 * format and supervisor states in addition to modified optimization in
244 * XSAVEOPT.
245 *
246 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
247 * supports modified optimization which is not supported by XSAVE.
248 *
249 * We use XSAVE as a fallback.
250 *
251 * The 661 label is defined in the ALTERNATIVE* macros as the address of the
252 * original instruction which gets replaced. We need to use it here as the
253 * address of the instruction where we might get an exception at.
254 */
255#define XSTATE_XSAVE(st, lmask, hmask, err) \
256 asm volatile(ALTERNATIVE_2(XSAVE, \
257 XSAVEOPT, X86_FEATURE_XSAVEOPT, \
258 XSAVES, X86_FEATURE_XSAVES) \
259 "\n" \
260 "xor %[err], %[err]\n" \
261 "3:\n" \
262 ".pushsection .fixup,\"ax\"\n" \
263 "4: movl $-2, %[err]\n" \
264 "jmp 3b\n" \
265 ".popsection\n" \
266 _ASM_EXTABLE(661b, 4b) \
267 : [err] "=r" (err) \
268 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
269 : "memory")
270
271/*
272 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
273 * XSAVE area format.
274 */
275#define XSTATE_XRESTORE(st, lmask, hmask, err) \
276 asm volatile(ALTERNATIVE(XRSTOR, \
277 XRSTORS, X86_FEATURE_XSAVES) \
278 "\n" \
279 "xor %[err], %[err]\n" \
280 "3:\n" \
281 ".pushsection .fixup,\"ax\"\n" \
282 "4: movl $-2, %[err]\n" \
283 "jmp 3b\n" \
284 ".popsection\n" \
285 _ASM_EXTABLE(661b, 4b) \
286 : [err] "=r" (err) \
287 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
288 : "memory")
239 289
240/* 290/*
241 * This function is called only during boot time when x86 caps are not set 291 * This function is called only during boot time when x86 caps are not set
@@ -246,22 +296,14 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
246 u64 mask = -1; 296 u64 mask = -1;
247 u32 lmask = mask; 297 u32 lmask = mask;
248 u32 hmask = mask >> 32; 298 u32 hmask = mask >> 32;
249 int err = 0; 299 int err;
250 300
251 WARN_ON(system_state != SYSTEM_BOOTING); 301 WARN_ON(system_state != SYSTEM_BOOTING);
252 302
253 if (boot_cpu_has(X86_FEATURE_XSAVES)) 303 if (static_cpu_has_safe(X86_FEATURE_XSAVES))
254 asm volatile("1:"XSAVES"\n\t" 304 XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
255 "2:\n\t"
256 xstate_fault(err)
257 : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
258 : "memory");
259 else 305 else
260 asm volatile("1:"XSAVE"\n\t" 306 XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
261 "2:\n\t"
262 xstate_fault(err)
263 : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
264 : "memory");
265 307
266 /* We should never fault when copying to a kernel buffer: */ 308 /* We should never fault when copying to a kernel buffer: */
267 WARN_ON_FPU(err); 309 WARN_ON_FPU(err);
@@ -276,22 +318,14 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
276 u64 mask = -1; 318 u64 mask = -1;
277 u32 lmask = mask; 319 u32 lmask = mask;
278 u32 hmask = mask >> 32; 320 u32 hmask = mask >> 32;
279 int err = 0; 321 int err;
280 322
281 WARN_ON(system_state != SYSTEM_BOOTING); 323 WARN_ON(system_state != SYSTEM_BOOTING);
282 324
283 if (boot_cpu_has(X86_FEATURE_XSAVES)) 325 if (static_cpu_has_safe(X86_FEATURE_XSAVES))
284 asm volatile("1:"XRSTORS"\n\t" 326 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
285 "2:\n\t"
286 xstate_fault(err)
287 : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
288 : "memory");
289 else 327 else
290 asm volatile("1:"XRSTOR"\n\t" 328 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
291 "2:\n\t"
292 xstate_fault(err)
293 : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
294 : "memory");
295 329
296 /* We should never fault when copying from a kernel buffer: */ 330 /* We should never fault when copying from a kernel buffer: */
297 WARN_ON_FPU(err); 331 WARN_ON_FPU(err);
@@ -305,33 +339,11 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
305 u64 mask = -1; 339 u64 mask = -1;
306 u32 lmask = mask; 340 u32 lmask = mask;
307 u32 hmask = mask >> 32; 341 u32 hmask = mask >> 32;
308 int err = 0; 342 int err;
309 343
310 WARN_ON(!alternatives_patched); 344 WARN_ON(!alternatives_patched);
311 345
312 /* 346 XSTATE_XSAVE(xstate, lmask, hmask, err);
313 * If xsaves is enabled, xsaves replaces xsaveopt because
314 * it supports compact format and supervisor states in addition to
315 * modified optimization in xsaveopt.
316 *
317 * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
318 * because xsaveopt supports modified optimization which is not
319 * supported by xsave.
320 *
321 * If none of xsaves and xsaveopt is enabled, use xsave.
322 */
323 alternative_input_2(
324 "1:"XSAVE,
325 XSAVEOPT,
326 X86_FEATURE_XSAVEOPT,
327 XSAVES,
328 X86_FEATURE_XSAVES,
329 [xstate] "D" (xstate), "a" (lmask), "d" (hmask) :
330 "memory");
331 asm volatile("2:\n\t"
332 xstate_fault(err)
333 : "0" (err)
334 : "memory");
335 347
336 /* We should never fault when copying to a kernel buffer: */ 348 /* We should never fault when copying to a kernel buffer: */
337 WARN_ON_FPU(err); 349 WARN_ON_FPU(err);
@@ -344,23 +356,9 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
344{ 356{
345 u32 lmask = mask; 357 u32 lmask = mask;
346 u32 hmask = mask >> 32; 358 u32 hmask = mask >> 32;
347 int err = 0; 359 int err;
348 360
349 /* 361 XSTATE_XRESTORE(xstate, lmask, hmask, err);
350 * Use xrstors to restore context if it is enabled. xrstors supports
351 * compacted format of xsave area which is not supported by xrstor.
352 */
353 alternative_input(
354 "1: " XRSTOR,
355 XRSTORS,
356 X86_FEATURE_XSAVES,
357 "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask)
358 : "memory");
359
360 asm volatile("2:\n"
361 xstate_fault(err)
362 : "0" (err)
363 : "memory");
364 362
365 /* We should never fault when copying from a kernel buffer: */ 363 /* We should never fault when copying from a kernel buffer: */
366 WARN_ON_FPU(err); 364 WARN_ON_FPU(err);
@@ -388,12 +386,10 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
388 if (unlikely(err)) 386 if (unlikely(err))
389 return -EFAULT; 387 return -EFAULT;
390 388
391 __asm__ __volatile__(ASM_STAC "\n" 389 stac();
392 "1:"XSAVE"\n" 390 XSTATE_OP(XSAVE, buf, -1, -1, err);
393 "2: " ASM_CLAC "\n" 391 clac();
394 xstate_fault(err) 392
395 : "D" (buf), "a" (-1), "d" (-1), "0" (err)
396 : "memory");
397 return err; 393 return err;
398} 394}
399 395
@@ -405,14 +401,12 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
405 struct xregs_state *xstate = ((__force struct xregs_state *)buf); 401 struct xregs_state *xstate = ((__force struct xregs_state *)buf);
406 u32 lmask = mask; 402 u32 lmask = mask;
407 u32 hmask = mask >> 32; 403 u32 hmask = mask >> 32;
408 int err = 0; 404 int err;
409 405
410 __asm__ __volatile__(ASM_STAC "\n" 406 stac();
411 "1:"XRSTOR"\n" 407 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
412 "2: " ASM_CLAC "\n" 408 clac();
413 xstate_fault(err) 409
414 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (err)
415 : "memory"); /* memory required? */
416 return err; 410 return err;
417} 411}
418 412
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 3a6c89b70307..af30fdeb140d 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -20,15 +20,16 @@
20 20
21/* Supported features which support lazy state saving */ 21/* Supported features which support lazy state saving */
22#define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \ 22#define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \
23 XFEATURE_MASK_SSE | \ 23 XFEATURE_MASK_SSE)
24
25/* Supported features which require eager state saving */
26#define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | \
27 XFEATURE_MASK_BNDCSR | \
24 XFEATURE_MASK_YMM | \ 28 XFEATURE_MASK_YMM | \
25 XFEATURE_MASK_OPMASK | \ 29 XFEATURE_MASK_OPMASK | \
26 XFEATURE_MASK_ZMM_Hi256 | \ 30 XFEATURE_MASK_ZMM_Hi256 | \
27 XFEATURE_MASK_Hi16_ZMM) 31 XFEATURE_MASK_Hi16_ZMM)
28 32
29/* Supported features which require eager state saving */
30#define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)
31
32/* All currently supported features */ 33/* All currently supported features */
33#define XCNTXT_MASK (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER) 34#define XCNTXT_MASK (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER)
34 35
diff --git a/arch/x86/include/asm/intel_pt.h b/arch/x86/include/asm/intel_pt.h
new file mode 100644
index 000000000000..e1a411786bf5
--- /dev/null
+++ b/arch/x86/include/asm/intel_pt.h
@@ -0,0 +1,10 @@
1#ifndef _ASM_X86_INTEL_PT_H
2#define _ASM_X86_INTEL_PT_H
3
4#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
5void cpu_emergency_stop_pt(void);
6#else
7static inline void cpu_emergency_stop_pt(void) {}
8#endif
9
10#endif /* _ASM_X86_INTEL_PT_H */
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h
index b72ad0faa6c5..b41ee164930a 100644
--- a/arch/x86/include/asm/iosf_mbi.h
+++ b/arch/x86/include/asm/iosf_mbi.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * iosf_mbi.h: Intel OnChip System Fabric MailBox access support 2 * Intel OnChip System Fabric MailBox access support
3 */ 3 */
4 4
5#ifndef IOSF_MBI_SYMS_H 5#ifndef IOSF_MBI_SYMS_H
@@ -16,6 +16,18 @@
16#define MBI_MASK_LO 0x000000FF 16#define MBI_MASK_LO 0x000000FF
17#define MBI_ENABLE 0xF0 17#define MBI_ENABLE 0xF0
18 18
19/* IOSF SB read/write opcodes */
20#define MBI_MMIO_READ 0x00
21#define MBI_MMIO_WRITE 0x01
22#define MBI_CFG_READ 0x04
23#define MBI_CFG_WRITE 0x05
24#define MBI_CR_READ 0x06
25#define MBI_CR_WRITE 0x07
26#define MBI_REG_READ 0x10
27#define MBI_REG_WRITE 0x11
28#define MBI_ESRAM_READ 0x12
29#define MBI_ESRAM_WRITE 0x13
30
19/* Baytrail available units */ 31/* Baytrail available units */
20#define BT_MBI_UNIT_AUNIT 0x00 32#define BT_MBI_UNIT_AUNIT 0x00
21#define BT_MBI_UNIT_SMC 0x01 33#define BT_MBI_UNIT_SMC 0x01
@@ -28,50 +40,13 @@
28#define BT_MBI_UNIT_SATA 0xA3 40#define BT_MBI_UNIT_SATA 0xA3
29#define BT_MBI_UNIT_PCIE 0xA6 41#define BT_MBI_UNIT_PCIE 0xA6
30 42
31/* Baytrail read/write opcodes */
32#define BT_MBI_AUNIT_READ 0x10
33#define BT_MBI_AUNIT_WRITE 0x11
34#define BT_MBI_SMC_READ 0x10
35#define BT_MBI_SMC_WRITE 0x11
36#define BT_MBI_CPU_READ 0x10
37#define BT_MBI_CPU_WRITE 0x11
38#define BT_MBI_BUNIT_READ 0x10
39#define BT_MBI_BUNIT_WRITE 0x11
40#define BT_MBI_PMC_READ 0x06
41#define BT_MBI_PMC_WRITE 0x07
42#define BT_MBI_GFX_READ 0x00
43#define BT_MBI_GFX_WRITE 0x01
44#define BT_MBI_SMIO_READ 0x06
45#define BT_MBI_SMIO_WRITE 0x07
46#define BT_MBI_USB_READ 0x06
47#define BT_MBI_USB_WRITE 0x07
48#define BT_MBI_SATA_READ 0x00
49#define BT_MBI_SATA_WRITE 0x01
50#define BT_MBI_PCIE_READ 0x00
51#define BT_MBI_PCIE_WRITE 0x01
52
53/* Quark available units */ 43/* Quark available units */
54#define QRK_MBI_UNIT_HBA 0x00 44#define QRK_MBI_UNIT_HBA 0x00
55#define QRK_MBI_UNIT_HB 0x03 45#define QRK_MBI_UNIT_HB 0x03
56#define QRK_MBI_UNIT_RMU 0x04 46#define QRK_MBI_UNIT_RMU 0x04
57#define QRK_MBI_UNIT_MM 0x05 47#define QRK_MBI_UNIT_MM 0x05
58#define QRK_MBI_UNIT_MMESRAM 0x05
59#define QRK_MBI_UNIT_SOC 0x31 48#define QRK_MBI_UNIT_SOC 0x31
60 49
61/* Quark read/write opcodes */
62#define QRK_MBI_HBA_READ 0x10
63#define QRK_MBI_HBA_WRITE 0x11
64#define QRK_MBI_HB_READ 0x10
65#define QRK_MBI_HB_WRITE 0x11
66#define QRK_MBI_RMU_READ 0x10
67#define QRK_MBI_RMU_WRITE 0x11
68#define QRK_MBI_MM_READ 0x10
69#define QRK_MBI_MM_WRITE 0x11
70#define QRK_MBI_MMESRAM_READ 0x12
71#define QRK_MBI_MMESRAM_WRITE 0x13
72#define QRK_MBI_SOC_READ 0x06
73#define QRK_MBI_SOC_WRITE 0x07
74
75#if IS_ENABLED(CONFIG_IOSF_MBI) 50#if IS_ENABLED(CONFIG_IOSF_MBI)
76 51
77bool iosf_mbi_available(void); 52bool iosf_mbi_available(void);
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index 615fa9061b57..cfc9a0d2d07c 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -119,6 +119,8 @@ static inline void
119 native_apic_mem_write(APIC_ICR, cfg); 119 native_apic_mem_write(APIC_ICR, cfg);
120} 120}
121 121
122extern void default_send_IPI_single(int cpu, int vector);
123extern void default_send_IPI_single_phys(int cpu, int vector);
122extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, 124extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
123 int vector); 125 int vector);
124extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, 126extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 5daeca3d0f9e..adc54c12cbd1 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -1,12 +1,18 @@
1#ifndef _ASM_X86_JUMP_LABEL_H 1#ifndef _ASM_X86_JUMP_LABEL_H
2#define _ASM_X86_JUMP_LABEL_H 2#define _ASM_X86_JUMP_LABEL_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef HAVE_JUMP_LABEL
5 5/*
6#include <linux/stringify.h> 6 * For better or for worse, if jump labels (the gcc extension) are missing,
7#include <linux/types.h> 7 * then the entire static branch patching infrastructure is compiled out.
8#include <asm/nops.h> 8 * If that happens, the code in here will malfunction. Raise a compiler
9#include <asm/asm.h> 9 * error instead.
10 *
11 * In theory, jump labels and the static branch patching infrastructure
12 * could be decoupled to fix this.
13 */
14#error asm/jump_label.h included on a non-jump-label kernel
15#endif
10 16
11#define JUMP_LABEL_NOP_SIZE 5 17#define JUMP_LABEL_NOP_SIZE 5
12 18
@@ -16,6 +22,14 @@
16# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC 22# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
17#endif 23#endif
18 24
25#include <asm/asm.h>
26#include <asm/nops.h>
27
28#ifndef __ASSEMBLY__
29
30#include <linux/stringify.h>
31#include <linux/types.h>
32
19static __always_inline bool arch_static_branch(struct static_key *key, bool branch) 33static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
20{ 34{
21 asm_volatile_goto("1:" 35 asm_volatile_goto("1:"
@@ -59,5 +73,40 @@ struct jump_entry {
59 jump_label_t key; 73 jump_label_t key;
60}; 74};
61 75
62#endif /* __ASSEMBLY__ */ 76#else /* __ASSEMBLY__ */
77
78.macro STATIC_JUMP_IF_TRUE target, key, def
79.Lstatic_jump_\@:
80 .if \def
81 /* Equivalent to "jmp.d32 \target" */
82 .byte 0xe9
83 .long \target - .Lstatic_jump_after_\@
84.Lstatic_jump_after_\@:
85 .else
86 .byte STATIC_KEY_INIT_NOP
87 .endif
88 .pushsection __jump_table, "aw"
89 _ASM_ALIGN
90 _ASM_PTR .Lstatic_jump_\@, \target, \key
91 .popsection
92.endm
93
94.macro STATIC_JUMP_IF_FALSE target, key, def
95.Lstatic_jump_\@:
96 .if \def
97 .byte STATIC_KEY_INIT_NOP
98 .else
99 /* Equivalent to "jmp.d32 \target" */
100 .byte 0xe9
101 .long \target - .Lstatic_jump_after_\@
102.Lstatic_jump_after_\@:
103 .endif
104 .pushsection __jump_table, "aw"
105 _ASM_ALIGN
106 _ASM_PTR .Lstatic_jump_\@, \target, \key + 1
107 .popsection
108.endm
109
110#endif /* __ASSEMBLY__ */
111
63#endif 112#endif
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 30cfd64295a0..44adbb819041 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -25,6 +25,7 @@
25#include <linux/pvclock_gtod.h> 25#include <linux/pvclock_gtod.h>
26#include <linux/clocksource.h> 26#include <linux/clocksource.h>
27#include <linux/irqbypass.h> 27#include <linux/irqbypass.h>
28#include <linux/hyperv.h>
28 29
29#include <asm/pvclock-abi.h> 30#include <asm/pvclock-abi.h>
30#include <asm/desc.h> 31#include <asm/desc.h>
@@ -45,6 +46,31 @@
45 46
46#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS 47#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
47 48
49/* x86-specific vcpu->requests bit members */
50#define KVM_REQ_MIGRATE_TIMER 8
51#define KVM_REQ_REPORT_TPR_ACCESS 9
52#define KVM_REQ_TRIPLE_FAULT 10
53#define KVM_REQ_MMU_SYNC 11
54#define KVM_REQ_CLOCK_UPDATE 12
55#define KVM_REQ_DEACTIVATE_FPU 13
56#define KVM_REQ_EVENT 14
57#define KVM_REQ_APF_HALT 15
58#define KVM_REQ_STEAL_UPDATE 16
59#define KVM_REQ_NMI 17
60#define KVM_REQ_PMU 18
61#define KVM_REQ_PMI 19
62#define KVM_REQ_SMI 20
63#define KVM_REQ_MASTERCLOCK_UPDATE 21
64#define KVM_REQ_MCLOCK_INPROGRESS 22
65#define KVM_REQ_SCAN_IOAPIC 23
66#define KVM_REQ_GLOBAL_CLOCK_UPDATE 24
67#define KVM_REQ_APIC_PAGE_RELOAD 25
68#define KVM_REQ_HV_CRASH 26
69#define KVM_REQ_IOAPIC_EOI_EXIT 27
70#define KVM_REQ_HV_RESET 28
71#define KVM_REQ_HV_EXIT 29
72#define KVM_REQ_HV_STIMER 30
73
48#define CR0_RESERVED_BITS \ 74#define CR0_RESERVED_BITS \
49 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ 75 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
50 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ 76 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
@@ -213,6 +239,10 @@ union kvm_mmu_page_role {
213 }; 239 };
214}; 240};
215 241
242struct kvm_rmap_head {
243 unsigned long val;
244};
245
216struct kvm_mmu_page { 246struct kvm_mmu_page {
217 struct list_head link; 247 struct list_head link;
218 struct hlist_node hash_link; 248 struct hlist_node hash_link;
@@ -230,7 +260,7 @@ struct kvm_mmu_page {
230 bool unsync; 260 bool unsync;
231 int root_count; /* Currently serving as active root */ 261 int root_count; /* Currently serving as active root */
232 unsigned int unsync_children; 262 unsigned int unsync_children;
233 unsigned long parent_ptes; /* Reverse mapping for parent_pte */ 263 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
234 264
235 /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */ 265 /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */
236 unsigned long mmu_valid_gen; 266 unsigned long mmu_valid_gen;
@@ -374,10 +404,38 @@ struct kvm_mtrr {
374 struct list_head head; 404 struct list_head head;
375}; 405};
376 406
407/* Hyper-V SynIC timer */
408struct kvm_vcpu_hv_stimer {
409 struct hrtimer timer;
410 int index;
411 u64 config;
412 u64 count;
413 u64 exp_time;
414 struct hv_message msg;
415 bool msg_pending;
416};
417
418/* Hyper-V synthetic interrupt controller (SynIC)*/
419struct kvm_vcpu_hv_synic {
420 u64 version;
421 u64 control;
422 u64 msg_page;
423 u64 evt_page;
424 atomic64_t sint[HV_SYNIC_SINT_COUNT];
425 atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
426 DECLARE_BITMAP(auto_eoi_bitmap, 256);
427 DECLARE_BITMAP(vec_bitmap, 256);
428 bool active;
429};
430
377/* Hyper-V per vcpu emulation context */ 431/* Hyper-V per vcpu emulation context */
378struct kvm_vcpu_hv { 432struct kvm_vcpu_hv {
379 u64 hv_vapic; 433 u64 hv_vapic;
380 s64 runtime_offset; 434 s64 runtime_offset;
435 struct kvm_vcpu_hv_synic synic;
436 struct kvm_hyperv_exit exit;
437 struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
438 DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
381}; 439};
382 440
383struct kvm_vcpu_arch { 441struct kvm_vcpu_arch {
@@ -400,7 +458,8 @@ struct kvm_vcpu_arch {
400 u64 efer; 458 u64 efer;
401 u64 apic_base; 459 u64 apic_base;
402 struct kvm_lapic *apic; /* kernel irqchip context */ 460 struct kvm_lapic *apic; /* kernel irqchip context */
403 u64 eoi_exit_bitmap[4]; 461 bool apicv_active;
462 DECLARE_BITMAP(ioapic_handled_vectors, 256);
404 unsigned long apic_attention; 463 unsigned long apic_attention;
405 int32_t apic_arb_prio; 464 int32_t apic_arb_prio;
406 int mp_state; 465 int mp_state;
@@ -589,7 +648,7 @@ struct kvm_lpage_info {
589}; 648};
590 649
591struct kvm_arch_memory_slot { 650struct kvm_arch_memory_slot {
592 unsigned long *rmap[KVM_NR_PAGE_SIZES]; 651 struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
593 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; 652 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
594}; 653};
595 654
@@ -831,10 +890,11 @@ struct kvm_x86_ops {
831 void (*enable_nmi_window)(struct kvm_vcpu *vcpu); 890 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
832 void (*enable_irq_window)(struct kvm_vcpu *vcpu); 891 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
833 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); 892 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
834 int (*cpu_uses_apicv)(struct kvm_vcpu *vcpu); 893 bool (*get_enable_apicv)(void);
894 void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
835 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); 895 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
836 void (*hwapic_isr_update)(struct kvm *kvm, int isr); 896 void (*hwapic_isr_update)(struct kvm *kvm, int isr);
837 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu); 897 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
838 void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); 898 void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
839 void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); 899 void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
840 void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); 900 void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
@@ -1086,6 +1146,8 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
1086gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 1146gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
1087 struct x86_exception *exception); 1147 struct x86_exception *exception);
1088 1148
1149void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
1150
1089int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 1151int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
1090 1152
1091int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, 1153int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
@@ -1231,6 +1293,9 @@ u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
1231unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); 1293unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
1232bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); 1294bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
1233 1295
1296void kvm_make_mclock_inprogress_request(struct kvm *kvm);
1297void kvm_make_scan_ioapic_request(struct kvm *kvm);
1298
1234void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 1299void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1235 struct kvm_async_pf *work); 1300 struct kvm_async_pf *work);
1236void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 1301void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h
index 3bbc07a57a31..73d0c9b92087 100644
--- a/arch/x86/include/asm/lguest.h
+++ b/arch/x86/include/asm/lguest.h
@@ -12,7 +12,9 @@
12#define GUEST_PL 1 12#define GUEST_PL 1
13 13
14/* Page for Switcher text itself, then two pages per cpu */ 14/* Page for Switcher text itself, then two pages per cpu */
15#define TOTAL_SWITCHER_PAGES (1 + 2 * nr_cpu_ids) 15#define SWITCHER_TEXT_PAGES (1)
16#define SWITCHER_STACK_PAGES (2 * nr_cpu_ids)
17#define TOTAL_SWITCHER_PAGES (SWITCHER_TEXT_PAGES + SWITCHER_STACK_PAGES)
16 18
17/* Where we map the Switcher, in both Host and Guest. */ 19/* Where we map the Switcher, in both Host and Guest. */
18extern unsigned long switcher_addr; 20extern unsigned long switcher_addr;
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 34e62b1dcfce..1e1b07a5a738 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_X86_MICROCODE_H 1#ifndef _ASM_X86_MICROCODE_H
2#define _ASM_X86_MICROCODE_H 2#define _ASM_X86_MICROCODE_H
3 3
4#include <asm/cpu.h>
4#include <linux/earlycpio.h> 5#include <linux/earlycpio.h>
5 6
6#define native_rdmsr(msr, val1, val2) \ 7#define native_rdmsr(msr, val1, val2) \
@@ -95,14 +96,14 @@ static inline void __exit exit_amd_microcode(void) {}
95 96
96/* 97/*
97 * In early loading microcode phase on BSP, boot_cpu_data is not set up yet. 98 * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
98 * x86_vendor() gets vendor id for BSP. 99 * x86_cpuid_vendor() gets vendor id for BSP.
99 * 100 *
100 * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify 101 * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
101 * coding, we still use x86_vendor() to get vendor id for AP. 102 * coding, we still use x86_cpuid_vendor() to get vendor id for AP.
102 * 103 *
103 * x86_vendor() gets vendor information directly from CPUID. 104 * x86_cpuid_vendor() gets vendor information directly from CPUID.
104 */ 105 */
105static inline int x86_vendor(void) 106static inline int x86_cpuid_vendor(void)
106{ 107{
107 u32 eax = 0x00000000; 108 u32 eax = 0x00000000;
108 u32 ebx, ecx = 0, edx; 109 u32 ebx, ecx = 0, edx;
@@ -118,40 +119,14 @@ static inline int x86_vendor(void)
118 return X86_VENDOR_UNKNOWN; 119 return X86_VENDOR_UNKNOWN;
119} 120}
120 121
121static inline unsigned int __x86_family(unsigned int sig) 122static inline unsigned int x86_cpuid_family(void)
122{
123 unsigned int x86;
124
125 x86 = (sig >> 8) & 0xf;
126
127 if (x86 == 0xf)
128 x86 += (sig >> 20) & 0xff;
129
130 return x86;
131}
132
133static inline unsigned int x86_family(void)
134{ 123{
135 u32 eax = 0x00000001; 124 u32 eax = 0x00000001;
136 u32 ebx, ecx = 0, edx; 125 u32 ebx, ecx = 0, edx;
137 126
138 native_cpuid(&eax, &ebx, &ecx, &edx); 127 native_cpuid(&eax, &ebx, &ecx, &edx);
139 128
140 return __x86_family(eax); 129 return x86_family(eax);
141}
142
143static inline unsigned int x86_model(unsigned int sig)
144{
145 unsigned int x86, model;
146
147 x86 = __x86_family(sig);
148
149 model = (sig >> 4) & 0xf;
150
151 if (x86 == 0x6 || x86 == 0xf)
152 model += ((sig >> 16) & 0xf) << 4;
153
154 return model;
155} 130}
156 131
157#ifdef CONFIG_MICROCODE 132#ifdef CONFIG_MICROCODE
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 379cd3658799..bfd9b2a35a0b 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -116,8 +116,36 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
116#endif 116#endif
117 cpumask_set_cpu(cpu, mm_cpumask(next)); 117 cpumask_set_cpu(cpu, mm_cpumask(next));
118 118
119 /* Re-load page tables */ 119 /*
120 * Re-load page tables.
121 *
122 * This logic has an ordering constraint:
123 *
124 * CPU 0: Write to a PTE for 'next'
125 * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
126 * CPU 1: set bit 1 in next's mm_cpumask
127 * CPU 1: load from the PTE that CPU 0 writes (implicit)
128 *
129 * We need to prevent an outcome in which CPU 1 observes
130 * the new PTE value and CPU 0 observes bit 1 clear in
131 * mm_cpumask. (If that occurs, then the IPI will never
132 * be sent, and CPU 0's TLB will contain a stale entry.)
133 *
134 * The bad outcome can occur if either CPU's load is
135 * reordered before that CPU's store, so both CPUs must
136 * execute full barriers to prevent this from happening.
137 *
138 * Thus, switch_mm needs a full barrier between the
139 * store to mm_cpumask and any operation that could load
140 * from next->pgd. TLB fills are special and can happen
141 * due to instruction fetches or for no reason at all,
142 * and neither LOCK nor MFENCE orders them.
143 * Fortunately, load_cr3() is serializing and gives the
144 * ordering guarantee we need.
145 *
146 */
120 load_cr3(next->pgd); 147 load_cr3(next->pgd);
148
121 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 149 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
122 150
123 /* Stop flush ipis for the previous mm */ 151 /* Stop flush ipis for the previous mm */
@@ -156,10 +184,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
156 * schedule, protecting us from simultaneous changes. 184 * schedule, protecting us from simultaneous changes.
157 */ 185 */
158 cpumask_set_cpu(cpu, mm_cpumask(next)); 186 cpumask_set_cpu(cpu, mm_cpumask(next));
187
159 /* 188 /*
160 * We were in lazy tlb mode and leave_mm disabled 189 * We were in lazy tlb mode and leave_mm disabled
161 * tlb flush IPI delivery. We must reload CR3 190 * tlb flush IPI delivery. We must reload CR3
162 * to make sure to use no freed page tables. 191 * to make sure to use no freed page tables.
192 *
193 * As above, load_cr3() is serializing and orders TLB
194 * fills with respect to the mm_cpumask write.
163 */ 195 */
164 load_cr3(next->pgd); 196 load_cr3(next->pgd);
165 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 197 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
diff --git a/arch/x86/include/asm/msi.h b/arch/x86/include/asm/msi.h
index 93724cc62177..eb4b09b41df5 100644
--- a/arch/x86/include/asm/msi.h
+++ b/arch/x86/include/asm/msi.h
@@ -1,7 +1,13 @@
1#ifndef _ASM_X86_MSI_H 1#ifndef _ASM_X86_MSI_H
2#define _ASM_X86_MSI_H 2#define _ASM_X86_MSI_H
3#include <asm/hw_irq.h> 3#include <asm/hw_irq.h>
4#include <asm/irqdomain.h>
4 5
5typedef struct irq_alloc_info msi_alloc_info_t; 6typedef struct irq_alloc_info msi_alloc_info_t;
6 7
8int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
9 msi_alloc_info_t *arg);
10
11void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc);
12
7#endif /* _ASM_X86_MSI_H */ 13#endif /* _ASM_X86_MSI_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 690b4027e17c..b05402ef3b84 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -321,6 +321,7 @@
321#define MSR_F15H_PERF_CTR 0xc0010201 321#define MSR_F15H_PERF_CTR 0xc0010201
322#define MSR_F15H_NB_PERF_CTL 0xc0010240 322#define MSR_F15H_NB_PERF_CTL 0xc0010240
323#define MSR_F15H_NB_PERF_CTR 0xc0010241 323#define MSR_F15H_NB_PERF_CTR 0xc0010241
324#define MSR_F15H_IC_CFG 0xc0011021
324 325
325/* Fam 10h MSRs */ 326/* Fam 10h MSRs */
326#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 327#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
diff --git a/arch/x86/include/asm/msr-trace.h b/arch/x86/include/asm/msr-trace.h
new file mode 100644
index 000000000000..7567225747d8
--- /dev/null
+++ b/arch/x86/include/asm/msr-trace.h
@@ -0,0 +1,57 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM msr
3
4#undef TRACE_INCLUDE_FILE
5#define TRACE_INCLUDE_FILE msr-trace
6
7#undef TRACE_INCLUDE_PATH
8#define TRACE_INCLUDE_PATH asm/
9
10#if !defined(_TRACE_MSR_H) || defined(TRACE_HEADER_MULTI_READ)
11#define _TRACE_MSR_H
12
13#include <linux/tracepoint.h>
14
15/*
16 * Tracing for x86 model specific registers. Directly maps to the
17 * RDMSR/WRMSR instructions.
18 */
19
20DECLARE_EVENT_CLASS(msr_trace_class,
21 TP_PROTO(unsigned msr, u64 val, int failed),
22 TP_ARGS(msr, val, failed),
23 TP_STRUCT__entry(
24 __field( unsigned, msr )
25 __field( u64, val )
26 __field( int, failed )
27 ),
28 TP_fast_assign(
29 __entry->msr = msr;
30 __entry->val = val;
31 __entry->failed = failed;
32 ),
33 TP_printk("%x, value %llx%s",
34 __entry->msr,
35 __entry->val,
36 __entry->failed ? " #GP" : "")
37);
38
39DEFINE_EVENT(msr_trace_class, read_msr,
40 TP_PROTO(unsigned msr, u64 val, int failed),
41 TP_ARGS(msr, val, failed)
42);
43
44DEFINE_EVENT(msr_trace_class, write_msr,
45 TP_PROTO(unsigned msr, u64 val, int failed),
46 TP_ARGS(msr, val, failed)
47);
48
49DEFINE_EVENT(msr_trace_class, rdpmc,
50 TP_PROTO(unsigned msr, u64 val, int failed),
51 TP_ARGS(msr, val, failed)
52);
53
54#endif /* _TRACE_MSR_H */
55
56/* This part must be outside protection */
57#include <trace/define_trace.h>
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 77d8b284e4a7..93fb7c1cffda 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -32,6 +32,16 @@ struct msr_regs_info {
32 int err; 32 int err;
33}; 33};
34 34
35struct saved_msr {
36 bool valid;
37 struct msr_info info;
38};
39
40struct saved_msrs {
41 unsigned int num;
42 struct saved_msr *array;
43};
44
35static inline unsigned long long native_read_tscp(unsigned int *aux) 45static inline unsigned long long native_read_tscp(unsigned int *aux)
36{ 46{
37 unsigned long low, high; 47 unsigned long low, high;
@@ -57,11 +67,34 @@ static inline unsigned long long native_read_tscp(unsigned int *aux)
57#define EAX_EDX_RET(val, low, high) "=A" (val) 67#define EAX_EDX_RET(val, low, high) "=A" (val)
58#endif 68#endif
59 69
70#ifdef CONFIG_TRACEPOINTS
71/*
72 * Be very careful with includes. This header is prone to include loops.
73 */
74#include <asm/atomic.h>
75#include <linux/tracepoint-defs.h>
76
77extern struct tracepoint __tracepoint_read_msr;
78extern struct tracepoint __tracepoint_write_msr;
79extern struct tracepoint __tracepoint_rdpmc;
80#define msr_tracepoint_active(t) static_key_false(&(t).key)
81extern void do_trace_write_msr(unsigned msr, u64 val, int failed);
82extern void do_trace_read_msr(unsigned msr, u64 val, int failed);
83extern void do_trace_rdpmc(unsigned msr, u64 val, int failed);
84#else
85#define msr_tracepoint_active(t) false
86static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {}
87static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {}
88static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {}
89#endif
90
60static inline unsigned long long native_read_msr(unsigned int msr) 91static inline unsigned long long native_read_msr(unsigned int msr)
61{ 92{
62 DECLARE_ARGS(val, low, high); 93 DECLARE_ARGS(val, low, high);
63 94
64 asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr)); 95 asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
96 if (msr_tracepoint_active(__tracepoint_read_msr))
97 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
65 return EAX_EDX_VAL(val, low, high); 98 return EAX_EDX_VAL(val, low, high);
66} 99}
67 100
@@ -78,6 +111,8 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
78 _ASM_EXTABLE(2b, 3b) 111 _ASM_EXTABLE(2b, 3b)
79 : [err] "=r" (*err), EAX_EDX_RET(val, low, high) 112 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
80 : "c" (msr), [fault] "i" (-EIO)); 113 : "c" (msr), [fault] "i" (-EIO));
114 if (msr_tracepoint_active(__tracepoint_read_msr))
115 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
81 return EAX_EDX_VAL(val, low, high); 116 return EAX_EDX_VAL(val, low, high);
82} 117}
83 118
@@ -85,6 +120,8 @@ static inline void native_write_msr(unsigned int msr,
85 unsigned low, unsigned high) 120 unsigned low, unsigned high)
86{ 121{
87 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory"); 122 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
123 if (msr_tracepoint_active(__tracepoint_read_msr))
124 do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
88} 125}
89 126
90/* Can be uninlined because referenced by paravirt */ 127/* Can be uninlined because referenced by paravirt */
@@ -102,6 +139,8 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
102 : "c" (msr), "0" (low), "d" (high), 139 : "c" (msr), "0" (low), "d" (high),
103 [fault] "i" (-EIO) 140 [fault] "i" (-EIO)
104 : "memory"); 141 : "memory");
142 if (msr_tracepoint_active(__tracepoint_read_msr))
143 do_trace_write_msr(msr, ((u64)high << 32 | low), err);
105 return err; 144 return err;
106} 145}
107 146
@@ -160,6 +199,8 @@ static inline unsigned long long native_read_pmc(int counter)
160 DECLARE_ARGS(val, low, high); 199 DECLARE_ARGS(val, low, high);
161 200
162 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); 201 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
202 if (msr_tracepoint_active(__tracepoint_rdpmc))
203 do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
163 return EAX_EDX_VAL(val, low, high); 204 return EAX_EDX_VAL(val, low, high);
164} 205}
165 206
@@ -190,7 +231,7 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
190 231
191static inline void wrmsrl(unsigned msr, u64 val) 232static inline void wrmsrl(unsigned msr, u64 val)
192{ 233{
193 native_write_msr(msr, (u32)val, (u32)(val >> 32)); 234 native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
194} 235}
195 236
196/* wrmsr with exception handling */ 237/* wrmsr with exception handling */
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index cc071c6f7d4d..7bd0099384ca 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -5,9 +5,9 @@
5#include <linux/types.h> 5#include <linux/types.h>
6 6
7/* PAGE_SHIFT determines the page size */ 7/* PAGE_SHIFT determines the page size */
8#define PAGE_SHIFT 12 8#define PAGE_SHIFT 12
9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
10#define PAGE_MASK (~(PAGE_SIZE-1)) 10#define PAGE_MASK (~(PAGE_SIZE-1))
11 11
12#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) 12#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
13#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) 13#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index c759b3cca663..f6192502149e 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -291,15 +291,6 @@ static inline void slow_down_io(void)
291#endif 291#endif
292} 292}
293 293
294#ifdef CONFIG_SMP
295static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
296 unsigned long start_esp)
297{
298 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
299 phys_apicid, start_eip, start_esp);
300}
301#endif
302
303static inline void paravirt_activate_mm(struct mm_struct *prev, 294static inline void paravirt_activate_mm(struct mm_struct *prev,
304 struct mm_struct *next) 295 struct mm_struct *next)
305{ 296{
@@ -381,23 +372,6 @@ static inline void pte_update(struct mm_struct *mm, unsigned long addr,
381{ 372{
382 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep); 373 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
383} 374}
384static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
385 pmd_t *pmdp)
386{
387 PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
388}
389
390static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
391 pte_t *ptep)
392{
393 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
394}
395
396static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
397 pmd_t *pmdp)
398{
399 PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
400}
401 375
402static inline pte_t __pte(pteval_t val) 376static inline pte_t __pte(pteval_t val)
403{ 377{
@@ -928,23 +902,11 @@ extern void default_banner(void);
928 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ 902 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
929 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 903 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
930 904
931#define USERGS_SYSRET32 \
932 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
933 CLBR_NONE, \
934 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
935
936#ifdef CONFIG_X86_32 905#ifdef CONFIG_X86_32
937#define GET_CR0_INTO_EAX \ 906#define GET_CR0_INTO_EAX \
938 push %ecx; push %edx; \ 907 push %ecx; push %edx; \
939 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ 908 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
940 pop %edx; pop %ecx 909 pop %edx; pop %ecx
941
942#define ENABLE_INTERRUPTS_SYSEXIT \
943 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
944 CLBR_NONE, \
945 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
946
947
948#else /* !CONFIG_X86_32 */ 910#else /* !CONFIG_X86_32 */
949 911
950/* 912/*
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 3d44191185f8..77db5616a473 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -162,15 +162,6 @@ struct pv_cpu_ops {
162 162
163 u64 (*read_pmc)(int counter); 163 u64 (*read_pmc)(int counter);
164 164
165#ifdef CONFIG_X86_32
166 /*
167 * Atomically enable interrupts and return to userspace. This
168 * is only used in 32-bit kernels. 64-bit kernels use
169 * usergs_sysret32 instead.
170 */
171 void (*irq_enable_sysexit)(void);
172#endif
173
174 /* 165 /*
175 * Switch to usermode gs and return to 64-bit usermode using 166 * Switch to usermode gs and return to 64-bit usermode using
176 * sysret. Only used in 64-bit kernels to return to 64-bit 167 * sysret. Only used in 64-bit kernels to return to 64-bit
@@ -179,14 +170,6 @@ struct pv_cpu_ops {
179 */ 170 */
180 void (*usergs_sysret64)(void); 171 void (*usergs_sysret64)(void);
181 172
182 /*
183 * Switch to usermode gs and return to 32-bit usermode using
184 * sysret. Used to return to 32-on-64 compat processes.
185 * Other usermode register state, including %esp, must already
186 * be restored.
187 */
188 void (*usergs_sysret32)(void);
189
190 /* Normal iret. Jump to this with the standard iret stack 173 /* Normal iret. Jump to this with the standard iret stack
191 frame set up. */ 174 frame set up. */
192 void (*iret)(void); 175 void (*iret)(void);
@@ -220,14 +203,6 @@ struct pv_irq_ops {
220#endif 203#endif
221}; 204};
222 205
223struct pv_apic_ops {
224#ifdef CONFIG_X86_LOCAL_APIC
225 void (*startup_ipi_hook)(int phys_apicid,
226 unsigned long start_eip,
227 unsigned long start_esp);
228#endif
229};
230
231struct pv_mmu_ops { 206struct pv_mmu_ops {
232 unsigned long (*read_cr2)(void); 207 unsigned long (*read_cr2)(void);
233 void (*write_cr2)(unsigned long); 208 void (*write_cr2)(unsigned long);
@@ -279,12 +254,6 @@ struct pv_mmu_ops {
279 pmd_t *pmdp, pmd_t pmdval); 254 pmd_t *pmdp, pmd_t pmdval);
280 void (*pte_update)(struct mm_struct *mm, unsigned long addr, 255 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
281 pte_t *ptep); 256 pte_t *ptep);
282 void (*pte_update_defer)(struct mm_struct *mm,
283 unsigned long addr, pte_t *ptep);
284 void (*pmd_update)(struct mm_struct *mm, unsigned long addr,
285 pmd_t *pmdp);
286 void (*pmd_update_defer)(struct mm_struct *mm,
287 unsigned long addr, pmd_t *pmdp);
288 257
289 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, 258 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
290 pte_t *ptep); 259 pte_t *ptep);
@@ -359,7 +328,6 @@ struct paravirt_patch_template {
359 struct pv_time_ops pv_time_ops; 328 struct pv_time_ops pv_time_ops;
360 struct pv_cpu_ops pv_cpu_ops; 329 struct pv_cpu_ops pv_cpu_ops;
361 struct pv_irq_ops pv_irq_ops; 330 struct pv_irq_ops pv_irq_ops;
362 struct pv_apic_ops pv_apic_ops;
363 struct pv_mmu_ops pv_mmu_ops; 331 struct pv_mmu_ops pv_mmu_ops;
364 struct pv_lock_ops pv_lock_ops; 332 struct pv_lock_ops pv_lock_ops;
365}; 333};
@@ -369,7 +337,6 @@ extern struct pv_init_ops pv_init_ops;
369extern struct pv_time_ops pv_time_ops; 337extern struct pv_time_ops pv_time_ops;
370extern struct pv_cpu_ops pv_cpu_ops; 338extern struct pv_cpu_ops pv_cpu_ops;
371extern struct pv_irq_ops pv_irq_ops; 339extern struct pv_irq_ops pv_irq_ops;
372extern struct pv_apic_ops pv_apic_ops;
373extern struct pv_mmu_ops pv_mmu_ops; 340extern struct pv_mmu_ops pv_mmu_ops;
374extern struct pv_lock_ops pv_lock_ops; 341extern struct pv_lock_ops pv_lock_ops;
375 342
@@ -407,10 +374,8 @@ extern struct pv_lock_ops pv_lock_ops;
407 __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \ 374 __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
408 asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name)) 375 asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
409 376
410unsigned paravirt_patch_nop(void);
411unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); 377unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
412unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); 378unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
413unsigned paravirt_patch_ignore(unsigned len);
414unsigned paravirt_patch_call(void *insnbuf, 379unsigned paravirt_patch_call(void *insnbuf,
415 const void *target, u16 tgt_clobbers, 380 const void *target, u16 tgt_clobbers,
416 unsigned long addr, u16 site_clobbers, 381 unsigned long addr, u16 site_clobbers,
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 6ec0c8b2e9df..0687c4748b8f 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -69,9 +69,6 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
69#define pmd_clear(pmd) native_pmd_clear(pmd) 69#define pmd_clear(pmd) native_pmd_clear(pmd)
70 70
71#define pte_update(mm, addr, ptep) do { } while (0) 71#define pte_update(mm, addr, ptep) do { } while (0)
72#define pte_update_defer(mm, addr, ptep) do { } while (0)
73#define pmd_update(mm, addr, ptep) do { } while (0)
74#define pmd_update_defer(mm, addr, ptep) do { } while (0)
75 72
76#define pgd_val(x) native_pgd_val(x) 73#define pgd_val(x) native_pgd_val(x)
77#define __pgd(x) native_make_pgd(x) 74#define __pgd(x) native_make_pgd(x)
@@ -165,20 +162,22 @@ static inline int pmd_large(pmd_t pte)
165} 162}
166 163
167#ifdef CONFIG_TRANSPARENT_HUGEPAGE 164#ifdef CONFIG_TRANSPARENT_HUGEPAGE
168static inline int pmd_trans_splitting(pmd_t pmd)
169{
170 return pmd_val(pmd) & _PAGE_SPLITTING;
171}
172
173static inline int pmd_trans_huge(pmd_t pmd) 165static inline int pmd_trans_huge(pmd_t pmd)
174{ 166{
175 return pmd_val(pmd) & _PAGE_PSE; 167 return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
176} 168}
177 169
178static inline int has_transparent_hugepage(void) 170static inline int has_transparent_hugepage(void)
179{ 171{
180 return cpu_has_pse; 172 return cpu_has_pse;
181} 173}
174
175#ifdef __HAVE_ARCH_PTE_DEVMAP
176static inline int pmd_devmap(pmd_t pmd)
177{
178 return !!(pmd_val(pmd) & _PAGE_DEVMAP);
179}
180#endif
182#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 181#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
183 182
184static inline pte_t pte_set_flags(pte_t pte, pteval_t set) 183static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
@@ -255,6 +254,11 @@ static inline pte_t pte_mkspecial(pte_t pte)
255 return pte_set_flags(pte, _PAGE_SPECIAL); 254 return pte_set_flags(pte, _PAGE_SPECIAL);
256} 255}
257 256
257static inline pte_t pte_mkdevmap(pte_t pte)
258{
259 return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
260}
261
258static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) 262static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
259{ 263{
260 pmdval_t v = native_pmd_val(pmd); 264 pmdval_t v = native_pmd_val(pmd);
@@ -274,6 +278,11 @@ static inline pmd_t pmd_mkold(pmd_t pmd)
274 return pmd_clear_flags(pmd, _PAGE_ACCESSED); 278 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
275} 279}
276 280
281static inline pmd_t pmd_mkclean(pmd_t pmd)
282{
283 return pmd_clear_flags(pmd, _PAGE_DIRTY);
284}
285
277static inline pmd_t pmd_wrprotect(pmd_t pmd) 286static inline pmd_t pmd_wrprotect(pmd_t pmd)
278{ 287{
279 return pmd_clear_flags(pmd, _PAGE_RW); 288 return pmd_clear_flags(pmd, _PAGE_RW);
@@ -284,6 +293,11 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
284 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); 293 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
285} 294}
286 295
296static inline pmd_t pmd_mkdevmap(pmd_t pmd)
297{
298 return pmd_set_flags(pmd, _PAGE_DEVMAP);
299}
300
287static inline pmd_t pmd_mkhuge(pmd_t pmd) 301static inline pmd_t pmd_mkhuge(pmd_t pmd)
288{ 302{
289 return pmd_set_flags(pmd, _PAGE_PSE); 303 return pmd_set_flags(pmd, _PAGE_PSE);
@@ -465,6 +479,13 @@ static inline int pte_present(pte_t a)
465 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); 479 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
466} 480}
467 481
482#ifdef __HAVE_ARCH_PTE_DEVMAP
483static inline int pte_devmap(pte_t a)
484{
485 return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
486}
487#endif
488
468#define pte_accessible pte_accessible 489#define pte_accessible pte_accessible
469static inline bool pte_accessible(struct mm_struct *mm, pte_t a) 490static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
470{ 491{
@@ -731,14 +752,9 @@ static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
731 * updates should either be sets, clears, or set_pte_atomic for P->P 752 * updates should either be sets, clears, or set_pte_atomic for P->P
732 * transitions, which means this hook should only be called for user PTEs. 753 * transitions, which means this hook should only be called for user PTEs.
733 * This hook implies a P->P protection or access change has taken place, which 754 * This hook implies a P->P protection or access change has taken place, which
734 * requires a subsequent TLB flush. The notification can optionally be delayed 755 * requires a subsequent TLB flush.
735 * until the TLB flush event by using the pte_update_defer form of the
736 * interface, but care must be taken to assure that the flush happens while
737 * still holding the same page table lock so that the shadow and primary pages
738 * do not become out of sync on SMP.
739 */ 756 */
740#define pte_update(mm, addr, ptep) do { } while (0) 757#define pte_update(mm, addr, ptep) do { } while (0)
741#define pte_update_defer(mm, addr, ptep) do { } while (0)
742#endif 758#endif
743 759
744/* 760/*
@@ -816,10 +832,6 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
816 unsigned long address, pmd_t *pmdp); 832 unsigned long address, pmd_t *pmdp);
817 833
818 834
819#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
820extern void pmdp_splitting_flush(struct vm_area_struct *vma,
821 unsigned long addr, pmd_t *pmdp);
822
823#define __HAVE_ARCH_PMD_WRITE 835#define __HAVE_ARCH_PMD_WRITE
824static inline int pmd_write(pmd_t pmd) 836static inline int pmd_write(pmd_t pmd)
825{ 837{
@@ -830,9 +842,7 @@ static inline int pmd_write(pmd_t pmd)
830static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, 842static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
831 pmd_t *pmdp) 843 pmd_t *pmdp)
832{ 844{
833 pmd_t pmd = native_pmdp_get_and_clear(pmdp); 845 return native_pmdp_get_and_clear(pmdp);
834 pmd_update(mm, addr, pmdp);
835 return pmd;
836} 846}
837 847
838#define __HAVE_ARCH_PMDP_SET_WRPROTECT 848#define __HAVE_ARCH_PMDP_SET_WRPROTECT
@@ -840,7 +850,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
840 unsigned long addr, pmd_t *pmdp) 850 unsigned long addr, pmd_t *pmdp)
841{ 851{
842 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); 852 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
843 pmd_update(mm, addr, pmdp);
844} 853}
845 854
846/* 855/*
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index a471cadb9630..04c27a013165 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -22,10 +22,11 @@
22#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ 22#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
23#define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1 23#define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
24#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1 24#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
25#define _PAGE_BIT_SPLITTING _PAGE_BIT_SOFTW2 /* only valid on a PSE pmd */
26#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */ 25#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
27#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ 26#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
28#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ 27#define _PAGE_BIT_SOFTW4 58 /* available for programmer */
28#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
29#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
29 30
30/* If _PAGE_BIT_PRESENT is clear, we use these: */ 31/* If _PAGE_BIT_PRESENT is clear, we use these: */
31/* - if the user mapped it with PROT_NONE; pte_present gives true */ 32/* - if the user mapped it with PROT_NONE; pte_present gives true */
@@ -46,7 +47,6 @@
46#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) 47#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
47#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) 48#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
48#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) 49#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
49#define _PAGE_SPLITTING (_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING)
50#define __HAVE_ARCH_PTE_SPECIAL 50#define __HAVE_ARCH_PTE_SPECIAL
51 51
52#ifdef CONFIG_KMEMCHECK 52#ifdef CONFIG_KMEMCHECK
@@ -85,8 +85,11 @@
85 85
86#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 86#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
87#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) 87#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
88#define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP)
89#define __HAVE_ARCH_PTE_DEVMAP
88#else 90#else
89#define _PAGE_NX (_AT(pteval_t, 0)) 91#define _PAGE_NX (_AT(pteval_t, 0))
92#define _PAGE_DEVMAP (_AT(pteval_t, 0))
90#endif 93#endif
91 94
92#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) 95#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
diff --git a/arch/x86/include/asm/platform_sst_audio.h b/arch/x86/include/asm/platform_sst_audio.h
index 7249e6d0902d..5973a2f3db3d 100644
--- a/arch/x86/include/asm/platform_sst_audio.h
+++ b/arch/x86/include/asm/platform_sst_audio.h
@@ -55,6 +55,7 @@ enum sst_audio_device_id_mrfld {
55 PIPE_MEDIA0_IN = 0x8F, 55 PIPE_MEDIA0_IN = 0x8F,
56 PIPE_MEDIA1_IN = 0x90, 56 PIPE_MEDIA1_IN = 0x90,
57 PIPE_MEDIA2_IN = 0x91, 57 PIPE_MEDIA2_IN = 0x91,
58 PIPE_MEDIA3_IN = 0x9C,
58 PIPE_RSVD = 0xFF, 59 PIPE_RSVD = 0xFF,
59}; 60};
60 61
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index d8ce3ec816ab..1544fabcd7f9 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -132,12 +132,7 @@ static inline void arch_clear_pmem(void __pmem *addr, size_t size)
132{ 132{
133 void *vaddr = (void __force *)addr; 133 void *vaddr = (void __force *)addr;
134 134
135 /* TODO: implement the zeroing via non-temporal writes */ 135 memset(vaddr, 0, size);
136 if (size == PAGE_SIZE && ((unsigned long)vaddr & ~PAGE_MASK) == 0)
137 clear_page(vaddr);
138 else
139 memset(vaddr, 0, size);
140
141 __arch_wb_cache_pmem(vaddr, size); 136 __arch_wb_cache_pmem(vaddr, size);
142} 137}
143 138
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 7a6bed5c08bc..fdcc04020636 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -4,6 +4,15 @@
4#include <linux/clocksource.h> 4#include <linux/clocksource.h>
5#include <asm/pvclock-abi.h> 5#include <asm/pvclock-abi.h>
6 6
7#ifdef CONFIG_KVM_GUEST
8extern struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void);
9#else
10static inline struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
11{
12 return NULL;
13}
14#endif
15
7/* some helper functions for xen and kvm pv clock sources */ 16/* some helper functions for xen and kvm pv clock sources */
8cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); 17cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
9u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src); 18u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
@@ -91,10 +100,5 @@ struct pvclock_vsyscall_time_info {
91} __attribute__((__aligned__(SMP_CACHE_BYTES))); 100} __attribute__((__aligned__(SMP_CACHE_BYTES)));
92 101
93#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) 102#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
94#define PVCLOCK_VSYSCALL_NR_PAGES (((NR_CPUS-1)/(PAGE_SIZE/PVTI_SIZE))+1)
95
96int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
97 int size);
98struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu);
99 103
100#endif /* _ASM_X86_PVCLOCK_H */ 104#endif /* _ASM_X86_PVCLOCK_H */
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
index b002e711ba88..9f92c180ed2f 100644
--- a/arch/x86/include/asm/qspinlock_paravirt.h
+++ b/arch/x86/include/asm/qspinlock_paravirt.h
@@ -1,6 +1,65 @@
1#ifndef __ASM_QSPINLOCK_PARAVIRT_H 1#ifndef __ASM_QSPINLOCK_PARAVIRT_H
2#define __ASM_QSPINLOCK_PARAVIRT_H 2#define __ASM_QSPINLOCK_PARAVIRT_H
3 3
4/*
5 * For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
6 * registers. For i386, however, only 1 32-bit register needs to be saved
7 * and restored. So an optimized version of __pv_queued_spin_unlock() is
8 * hand-coded for 64-bit, but it isn't worthwhile to do it for 32-bit.
9 */
10#ifdef CONFIG_64BIT
11
12PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath);
13#define __pv_queued_spin_unlock __pv_queued_spin_unlock
14#define PV_UNLOCK "__raw_callee_save___pv_queued_spin_unlock"
15#define PV_UNLOCK_SLOWPATH "__raw_callee_save___pv_queued_spin_unlock_slowpath"
16
17/*
18 * Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock
19 * which combines the registers saving trunk and the body of the following
20 * C code:
21 *
22 * void __pv_queued_spin_unlock(struct qspinlock *lock)
23 * {
24 * struct __qspinlock *l = (void *)lock;
25 * u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
26 *
27 * if (likely(lockval == _Q_LOCKED_VAL))
28 * return;
29 * pv_queued_spin_unlock_slowpath(lock, lockval);
30 * }
31 *
32 * For x86-64,
33 * rdi = lock (first argument)
34 * rsi = lockval (second argument)
35 * rdx = internal variable (set to 0)
36 */
37asm (".pushsection .text;"
38 ".globl " PV_UNLOCK ";"
39 ".align 4,0x90;"
40 PV_UNLOCK ": "
41 "push %rdx;"
42 "mov $0x1,%eax;"
43 "xor %edx,%edx;"
44 "lock cmpxchg %dl,(%rdi);"
45 "cmp $0x1,%al;"
46 "jne .slowpath;"
47 "pop %rdx;"
48 "ret;"
49 ".slowpath: "
50 "push %rsi;"
51 "movzbl %al,%esi;"
52 "call " PV_UNLOCK_SLOWPATH ";"
53 "pop %rsi;"
54 "pop %rdx;"
55 "ret;"
56 ".size " PV_UNLOCK ", .-" PV_UNLOCK ";"
57 ".popsection");
58
59#else /* CONFIG_64BIT */
60
61extern void __pv_queued_spin_unlock(struct qspinlock *lock);
4PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock); 62PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock);
5 63
64#endif /* CONFIG_64BIT */
6#endif 65#endif
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
index a82c4f1b4d83..2cb1cc253d51 100644
--- a/arch/x86/include/asm/reboot.h
+++ b/arch/x86/include/asm/reboot.h
@@ -25,5 +25,6 @@ void __noreturn machine_real_restart(unsigned int type);
25 25
26typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); 26typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
27void nmi_shootdown_cpus(nmi_shootdown_cb callback); 27void nmi_shootdown_cpus(nmi_shootdown_cb callback);
28void run_crash_ipi_callback(struct pt_regs *regs);
28 29
29#endif /* _ASM_X86_REBOOT_H */ 30#endif /* _ASM_X86_REBOOT_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 222a6a3ca2b5..dfcf0727623b 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -21,15 +21,6 @@
21extern int smp_num_siblings; 21extern int smp_num_siblings;
22extern unsigned int num_processors; 22extern unsigned int num_processors;
23 23
24static inline bool cpu_has_ht_siblings(void)
25{
26 bool has_siblings = false;
27#ifdef CONFIG_SMP
28 has_siblings = cpu_has_ht && smp_num_siblings > 1;
29#endif
30 return has_siblings;
31}
32
33DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); 24DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
34DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); 25DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
35/* cpus sharing the last level cache: */ 26/* cpus sharing the last level cache: */
@@ -74,9 +65,6 @@ struct smp_ops {
74extern void set_cpu_sibling_map(int cpu); 65extern void set_cpu_sibling_map(int cpu);
75 66
76#ifdef CONFIG_SMP 67#ifdef CONFIG_SMP
77#ifndef CONFIG_PARAVIRT
78#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0)
79#endif
80extern struct smp_ops smp_ops; 68extern struct smp_ops smp_ops;
81 69
82static inline void smp_send_stop(void) 70static inline void smp_send_stop(void)
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index d1793f06854d..8e9dbe7b73a1 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -15,6 +15,7 @@ struct saved_context {
15 unsigned long cr0, cr2, cr3, cr4; 15 unsigned long cr0, cr2, cr3, cr4;
16 u64 misc_enable; 16 u64 misc_enable;
17 bool misc_enable_saved; 17 bool misc_enable_saved;
18 struct saved_msrs saved_msrs;
18 struct desc_ptr gdt_desc; 19 struct desc_ptr gdt_desc;
19 struct desc_ptr idt; 20 struct desc_ptr idt;
20 u16 ldt; 21 u16 ldt;
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 7ebf0ebe4e68..6136a18152af 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -24,6 +24,7 @@ struct saved_context {
24 unsigned long cr0, cr2, cr3, cr4, cr8; 24 unsigned long cr0, cr2, cr3, cr4, cr8;
25 u64 misc_enable; 25 u64 misc_enable;
26 bool misc_enable_saved; 26 bool misc_enable_saved;
27 struct saved_msrs saved_msrs;
27 unsigned long efer; 28 unsigned long efer;
28 u16 gdt_pad; /* Unused */ 29 u16 gdt_pad; /* Unused */
29 struct desc_ptr gdt_desc; 30 struct desc_ptr gdt_desc;
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 09b1b0ab94b7..660458af425d 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -745,5 +745,14 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
745#undef __copy_from_user_overflow 745#undef __copy_from_user_overflow
746#undef __copy_to_user_overflow 746#undef __copy_to_user_overflow
747 747
748/*
749 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
750 * nested NMI paths are careful to preserve CR2.
751 *
752 * Caller must use pagefault_enable/disable, or run in interrupt context,
753 * and also do a uaccess_ok() check
754 */
755#define __copy_from_user_nmi __copy_from_user_inatomic
756
748#endif /* _ASM_X86_UACCESS_H */ 757#endif /* _ASM_X86_UACCESS_H */
749 758
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index 756de9190aec..deabaf9759b6 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -22,6 +22,7 @@ struct vdso_image {
22 22
23 long sym_vvar_page; 23 long sym_vvar_page;
24 long sym_hpet_page; 24 long sym_hpet_page;
25 long sym_pvclock_page;
25 long sym_VDSO32_NOTE_MASK; 26 long sym_VDSO32_NOTE_MASK;
26 long sym___kernel_sigreturn; 27 long sym___kernel_sigreturn;
27 long sym___kernel_rt_sigreturn; 28 long sym___kernel_rt_sigreturn;
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index cd0fc0cc78bc..1ae89a2721d6 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -82,13 +82,11 @@ struct x86_init_paging {
82 * struct x86_init_timers - platform specific timer setup 82 * struct x86_init_timers - platform specific timer setup
83 * @setup_perpcu_clockev: set up the per cpu clock event device for the 83 * @setup_perpcu_clockev: set up the per cpu clock event device for the
84 * boot cpu 84 * boot cpu
85 * @tsc_pre_init: platform function called before TSC init
86 * @timer_init: initialize the platform timer (default PIT/HPET) 85 * @timer_init: initialize the platform timer (default PIT/HPET)
87 * @wallclock_init: init the wallclock device 86 * @wallclock_init: init the wallclock device
88 */ 87 */
89struct x86_init_timers { 88struct x86_init_timers {
90 void (*setup_percpu_clockev)(void); 89 void (*setup_percpu_clockev)(void);
91 void (*tsc_pre_init)(void);
92 void (*timer_init)(void); 90 void (*timer_init)(void);
93 void (*wallclock_init)(void); 91 void (*wallclock_init)(void);
94}; 92};
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 4c20dd333412..3bcdcc84259d 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -310,10 +310,10 @@ HYPERVISOR_mca(struct xen_mc *mc_op)
310} 310}
311 311
312static inline int 312static inline int
313HYPERVISOR_dom0_op(struct xen_platform_op *platform_op) 313HYPERVISOR_platform_op(struct xen_platform_op *op)
314{ 314{
315 platform_op->interface_version = XENPF_INTERFACE_VERSION; 315 op->interface_version = XENPF_INTERFACE_VERSION;
316 return _hypercall1(int, dom0_op, platform_op); 316 return _hypercall1(int, platform_op, op);
317} 317}
318 318
319static inline int 319static inline int
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
index 5a08bc8bff33..c54beb44c4c1 100644
--- a/arch/x86/include/asm/xor_32.h
+++ b/arch/x86/include/asm/xor_32.h
@@ -553,7 +553,7 @@ do { \
553 if (cpu_has_xmm) { \ 553 if (cpu_has_xmm) { \
554 xor_speed(&xor_block_pIII_sse); \ 554 xor_speed(&xor_block_pIII_sse); \
555 xor_speed(&xor_block_sse_pf64); \ 555 xor_speed(&xor_block_sse_pf64); \
556 } else if (cpu_has_mmx) { \ 556 } else if (boot_cpu_has(X86_FEATURE_MMX)) { \
557 xor_speed(&xor_block_pII_mmx); \ 557 xor_speed(&xor_block_pII_mmx); \
558 xor_speed(&xor_block_p5_mmx); \ 558 xor_speed(&xor_block_p5_mmx); \
559 } else { \ 559 } else { \
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 040d4083c24f..7956412d09bd 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -269,4 +269,96 @@ typedef struct _HV_REFERENCE_TSC_PAGE {
269#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17) 269#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17)
270#define HV_SYNIC_SINT_VECTOR_MASK (0xFF) 270#define HV_SYNIC_SINT_VECTOR_MASK (0xFF)
271 271
272#define HV_SYNIC_STIMER_COUNT (4)
273
274/* Define synthetic interrupt controller message constants. */
275#define HV_MESSAGE_SIZE (256)
276#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
277#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30)
278
279/* Define hypervisor message types. */
280enum hv_message_type {
281 HVMSG_NONE = 0x00000000,
282
283 /* Memory access messages. */
284 HVMSG_UNMAPPED_GPA = 0x80000000,
285 HVMSG_GPA_INTERCEPT = 0x80000001,
286
287 /* Timer notification messages. */
288 HVMSG_TIMER_EXPIRED = 0x80000010,
289
290 /* Error messages. */
291 HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
292 HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021,
293 HVMSG_UNSUPPORTED_FEATURE = 0x80000022,
294
295 /* Trace buffer complete messages. */
296 HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040,
297
298 /* Platform-specific processor intercept messages. */
299 HVMSG_X64_IOPORT_INTERCEPT = 0x80010000,
300 HVMSG_X64_MSR_INTERCEPT = 0x80010001,
301 HVMSG_X64_CPUID_INTERCEPT = 0x80010002,
302 HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003,
303 HVMSG_X64_APIC_EOI = 0x80010004,
304 HVMSG_X64_LEGACY_FP_ERROR = 0x80010005
305};
306
307/* Define synthetic interrupt controller message flags. */
308union hv_message_flags {
309 __u8 asu8;
310 struct {
311 __u8 msg_pending:1;
312 __u8 reserved:7;
313 };
314};
315
316/* Define port identifier type. */
317union hv_port_id {
318 __u32 asu32;
319 struct {
320 __u32 id:24;
321 __u32 reserved:8;
322 } u;
323};
324
325/* Define synthetic interrupt controller message header. */
326struct hv_message_header {
327 __u32 message_type;
328 __u8 payload_size;
329 union hv_message_flags message_flags;
330 __u8 reserved[2];
331 union {
332 __u64 sender;
333 union hv_port_id port;
334 };
335};
336
337/* Define synthetic interrupt controller message format. */
338struct hv_message {
339 struct hv_message_header header;
340 union {
341 __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
342 } u;
343};
344
345/* Define the synthetic interrupt message page layout. */
346struct hv_message_page {
347 struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
348};
349
350/* Define timer message payload structure. */
351struct hv_timer_message_payload {
352 __u32 timer_index;
353 __u32 reserved;
354 __u64 expiration_time; /* When the timer expired */
355 __u64 delivery_time; /* When the message was delivered */
356};
357
358#define HV_STIMER_ENABLE (1ULL << 0)
359#define HV_STIMER_PERIODIC (1ULL << 1)
360#define HV_STIMER_LAZY (1ULL << 2)
361#define HV_STIMER_AUTOENABLE (1ULL << 3)
362#define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F)
363
272#endif 364#endif
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index 03429da2fa80..2184943341bf 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -16,7 +16,7 @@ struct mce {
16 __u8 cpuvendor; /* cpu vendor as encoded in system.h */ 16 __u8 cpuvendor; /* cpu vendor as encoded in system.h */
17 __u8 inject_flags; /* software inject flags */ 17 __u8 inject_flags; /* software inject flags */
18 __u8 severity; 18 __u8 severity;
19 __u8 usable_addr; 19 __u8 pad;
20 __u32 cpuid; /* CPUID 1 EAX */ 20 __u32 cpuid; /* CPUID 1 EAX */
21 __u8 cs; /* code segment */ 21 __u8 cs; /* code segment */
22 __u8 bank; /* machine check bank */ 22 __u8 bank; /* machine check bank */