aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-01-12 05:08:13 -0500
committerIngo Molnar <mingo@kernel.org>2016-01-12 05:08:13 -0500
commitc0c57019a65341f08858541d2740b74dee821cf1 (patch)
tree584ac03f7dfa26c9c45c3d66ad306222e893fa33 /arch/x86/include
parent8c31902cffc4d716450be549c66a67a8a3dd479c (diff)
parentae8a52185e5c070cf4510b323dbc1b9e46b897d6 (diff)
Merge commit 'linus' into x86/urgent, to pick up recent x86 changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/apic.h6
-rw-r--r--arch/x86/include/asm/atomic.h1
-rw-r--r--arch/x86/include/asm/atomic64_32.h1
-rw-r--r--arch/x86/include/asm/calgary.h2
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h2
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h2
-rw-r--r--arch/x86/include/asm/cpu.h3
-rw-r--r--arch/x86/include/asm/cpufeature.h115
-rw-r--r--arch/x86/include/asm/fixmap.h5
-rw-r--r--arch/x86/include/asm/fpu/internal.h173
-rw-r--r--arch/x86/include/asm/intel_pt.h10
-rw-r--r--arch/x86/include/asm/ipi.h2
-rw-r--r--arch/x86/include/asm/jump_label.h63
-rw-r--r--arch/x86/include/asm/microcode.h39
-rw-r--r--arch/x86/include/asm/msi.h6
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/msr-trace.h57
-rw-r--r--arch/x86/include/asm/msr.h43
-rw-r--r--arch/x86/include/asm/page_types.h6
-rw-r--r--arch/x86/include/asm/paravirt.h38
-rw-r--r--arch/x86/include/asm/paravirt_types.h35
-rw-r--r--arch/x86/include/asm/pgtable.h15
-rw-r--r--arch/x86/include/asm/pvclock.h14
-rw-r--r--arch/x86/include/asm/qspinlock_paravirt.h59
-rw-r--r--arch/x86/include/asm/reboot.h1
-rw-r--r--arch/x86/include/asm/smp.h12
-rw-r--r--arch/x86/include/asm/suspend_32.h1
-rw-r--r--arch/x86/include/asm/suspend_64.h1
-rw-r--r--arch/x86/include/asm/uaccess.h9
-rw-r--r--arch/x86/include/asm/vdso.h1
-rw-r--r--arch/x86/include/asm/x86_init.h2
-rw-r--r--arch/x86/include/asm/xor_32.h2
-rw-r--r--arch/x86/include/uapi/asm/mce.h2
33 files changed, 422 insertions, 307 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index a30316bf801a..c80f6b6f3da2 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -23,6 +23,11 @@
23#define APIC_VERBOSE 1 23#define APIC_VERBOSE 1
24#define APIC_DEBUG 2 24#define APIC_DEBUG 2
25 25
26/* Macros for apic_extnmi which controls external NMI masking */
27#define APIC_EXTNMI_BSP 0 /* Default */
28#define APIC_EXTNMI_ALL 1
29#define APIC_EXTNMI_NONE 2
30
26/* 31/*
27 * Define the default level of output to be very little 32 * Define the default level of output to be very little
28 * This can be turned up by using apic=verbose for more 33 * This can be turned up by using apic=verbose for more
@@ -303,6 +308,7 @@ struct apic {
303 unsigned int *apicid); 308 unsigned int *apicid);
304 309
305 /* ipi */ 310 /* ipi */
311 void (*send_IPI)(int cpu, int vector);
306 void (*send_IPI_mask)(const struct cpumask *mask, int vector); 312 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
307 void (*send_IPI_mask_allbutself)(const struct cpumask *mask, 313 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
308 int vector); 314 int vector);
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index ae5fb83e6d91..3e8674288198 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <asm/processor.h>
7#include <asm/alternative.h> 6#include <asm/alternative.h>
8#include <asm/cmpxchg.h> 7#include <asm/cmpxchg.h>
9#include <asm/rmwcc.h> 8#include <asm/rmwcc.h>
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index a11c30b77fb5..a984111135b1 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <asm/processor.h>
7//#include <asm/cmpxchg.h> 6//#include <asm/cmpxchg.h>
8 7
9/* An 64bit atomic type */ 8/* An 64bit atomic type */
diff --git a/arch/x86/include/asm/calgary.h b/arch/x86/include/asm/calgary.h
index 0d467b338835..a8303ebe089f 100644
--- a/arch/x86/include/asm/calgary.h
+++ b/arch/x86/include/asm/calgary.h
@@ -31,7 +31,7 @@
31#include <asm/types.h> 31#include <asm/types.h>
32 32
33struct iommu_table { 33struct iommu_table {
34 struct cal_chipset_ops *chip_ops; /* chipset specific funcs */ 34 const struct cal_chipset_ops *chip_ops; /* chipset specific funcs */
35 unsigned long it_base; /* mapped address of tce table */ 35 unsigned long it_base; /* mapped address of tce table */
36 unsigned long it_hint; /* Hint for next alloc */ 36 unsigned long it_hint; /* Hint for next alloc */
37 unsigned long *it_map; /* A simple allocation bitmap for now */ 37 unsigned long *it_map; /* A simple allocation bitmap for now */
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index f7e142926481..e4959d023af8 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -109,6 +109,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
109 109
110#endif 110#endif
111 111
112#define system_has_cmpxchg_double() cpu_has_cx8 112#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
113 113
114#endif /* _ASM_X86_CMPXCHG_32_H */ 114#endif /* _ASM_X86_CMPXCHG_32_H */
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 1af94697aae5..caa23a34c963 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -18,6 +18,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
18 cmpxchg_local((ptr), (o), (n)); \ 18 cmpxchg_local((ptr), (o), (n)); \
19}) 19})
20 20
21#define system_has_cmpxchg_double() cpu_has_cx16 21#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
22 22
23#endif /* _ASM_X86_CMPXCHG_64_H */ 23#endif /* _ASM_X86_CMPXCHG_64_H */
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index bf2caa1dedc5..678637ad7476 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -36,4 +36,7 @@ extern int _debug_hotplug_cpu(int cpu, int action);
36 36
37int mwait_usable(const struct cpuinfo_x86 *); 37int mwait_usable(const struct cpuinfo_x86 *);
38 38
39unsigned int x86_family(unsigned int sig);
40unsigned int x86_model(unsigned int sig);
41unsigned int x86_stepping(unsigned int sig);
39#endif /* _ASM_X86_CPU_H */ 42#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index f7ba9fbf12ee..7ad8c9464297 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -12,7 +12,7 @@
12#include <asm/disabled-features.h> 12#include <asm/disabled-features.h>
13#endif 13#endif
14 14
15#define NCAPINTS 14 /* N 32-bit words worth of info */ 15#define NCAPINTS 16 /* N 32-bit words worth of info */
16#define NBUGINTS 1 /* N 32-bit bug flags */ 16#define NBUGINTS 1 /* N 32-bit bug flags */
17 17
18/* 18/*
@@ -181,22 +181,17 @@
181 181
182/* 182/*
183 * Auxiliary flags: Linux defined - For features scattered in various 183 * Auxiliary flags: Linux defined - For features scattered in various
184 * CPUID levels like 0x6, 0xA etc, word 7 184 * CPUID levels like 0x6, 0xA etc, word 7.
185 *
186 * Reuse free bits when adding new feature flags!
185 */ 187 */
186#define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */ 188
187#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */
188#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ 189#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
189#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ 190#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
190#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */ 191
191#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */
192#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
193#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 192#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
194#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 193#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
195#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ 194
196#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
197#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
198#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
199#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
200#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ 195#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
201 196
202/* Virtualization flags: Linux defined, word 8 */ 197/* Virtualization flags: Linux defined, word 8 */
@@ -205,16 +200,7 @@
205#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ 200#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
206#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ 201#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
207#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ 202#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
208#define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */ 203
209#define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */
210#define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
211#define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
212#define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
213#define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
214#define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */
215#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */
216#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
217#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
218#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ 204#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
219#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ 205#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
220 206
@@ -259,6 +245,30 @@
259/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ 245/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
260#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ 246#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
261 247
248/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
249#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
250#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
251#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
252#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
253#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
254#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
255#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
256#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
257#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
258#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
259
260/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
261#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
262#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
263#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
264#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
265#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
266#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
267#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
268#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
269#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
270#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
271
262/* 272/*
263 * BUG word(s) 273 * BUG word(s)
264 */ 274 */
@@ -279,6 +289,26 @@
279#include <asm/asm.h> 289#include <asm/asm.h>
280#include <linux/bitops.h> 290#include <linux/bitops.h>
281 291
292enum cpuid_leafs
293{
294 CPUID_1_EDX = 0,
295 CPUID_8000_0001_EDX,
296 CPUID_8086_0001_EDX,
297 CPUID_LNX_1,
298 CPUID_1_ECX,
299 CPUID_C000_0001_EDX,
300 CPUID_8000_0001_ECX,
301 CPUID_LNX_2,
302 CPUID_LNX_3,
303 CPUID_7_0_EBX,
304 CPUID_D_1_EAX,
305 CPUID_F_0_EDX,
306 CPUID_F_1_EDX,
307 CPUID_8000_0008_EBX,
308 CPUID_6_EAX,
309 CPUID_8000_000A_EDX,
310};
311
282#ifdef CONFIG_X86_FEATURE_NAMES 312#ifdef CONFIG_X86_FEATURE_NAMES
283extern const char * const x86_cap_flags[NCAPINTS*32]; 313extern const char * const x86_cap_flags[NCAPINTS*32];
284extern const char * const x86_power_flags[32]; 314extern const char * const x86_power_flags[32];
@@ -356,60 +386,31 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
356} while (0) 386} while (0)
357 387
358#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) 388#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
359#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
360#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) 389#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
361#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) 390#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
362#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) 391#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
363#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) 392#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
364#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
365#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
366#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
367#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) 393#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
368#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) 394#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
369#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) 395#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
370#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
371#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
372#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) 396#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
373#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) 397#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
374#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2) 398#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
375#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
376#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
377#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
378#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
379#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
380#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
381#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
382#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
383#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
384#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
385#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
386#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
387#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
388#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
389#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH) 399#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
390#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
391#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) 400#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
392#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) 401#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
393#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) 402#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
394#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
395#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
396#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) 403#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
397#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) 404#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
398#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
399#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) 405#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
400#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) 406#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
401#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 407#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
402#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 408/*
403#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 409 * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
404#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) 410 * fast paths and boot_cpu_has() otherwise!
405#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2) 411 */
406#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) 412
407#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) 413#if __GNUC__ >= 4 && defined(CONFIG_X86_FAST_FEATURE_TESTS)
408#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
409#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
410#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
411
412#if __GNUC__ >= 4
413extern void warn_pre_alternatives(void); 414extern void warn_pre_alternatives(void);
414extern bool __static_cpu_has_safe(u16 bit); 415extern bool __static_cpu_has_safe(u16 bit);
415 416
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index f80d70009ff8..6d7d0e52ed5a 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -19,7 +19,6 @@
19#include <asm/acpi.h> 19#include <asm/acpi.h>
20#include <asm/apicdef.h> 20#include <asm/apicdef.h>
21#include <asm/page.h> 21#include <asm/page.h>
22#include <asm/pvclock.h>
23#ifdef CONFIG_X86_32 22#ifdef CONFIG_X86_32
24#include <linux/threads.h> 23#include <linux/threads.h>
25#include <asm/kmap_types.h> 24#include <asm/kmap_types.h>
@@ -72,10 +71,6 @@ enum fixed_addresses {
72#ifdef CONFIG_X86_VSYSCALL_EMULATION 71#ifdef CONFIG_X86_VSYSCALL_EMULATION
73 VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT, 72 VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT,
74#endif 73#endif
75#ifdef CONFIG_PARAVIRT_CLOCK
76 PVCLOCK_FIXMAP_BEGIN,
77 PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
78#endif
79#endif 74#endif
80 FIX_DBGP_BASE, 75 FIX_DBGP_BASE,
81 FIX_EARLYCON_MEM_BASE, 76 FIX_EARLYCON_MEM_BASE,
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 3c3550c3a4a3..eadcdd5bb946 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -224,18 +224,67 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
224#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" 224#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
225#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" 225#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
226 226
227/* xstate instruction fault handler: */ 227#define XSTATE_OP(op, st, lmask, hmask, err) \
228#define xstate_fault(__err) \ 228 asm volatile("1:" op "\n\t" \
229 \ 229 "xor %[err], %[err]\n" \
230 ".section .fixup,\"ax\"\n" \ 230 "2:\n\t" \
231 \ 231 ".pushsection .fixup,\"ax\"\n\t" \
232 "3: movl $-2,%[_err]\n" \ 232 "3: movl $-2,%[err]\n\t" \
233 " jmp 2b\n" \ 233 "jmp 2b\n\t" \
234 \ 234 ".popsection\n\t" \
235 ".previous\n" \ 235 _ASM_EXTABLE(1b, 3b) \
236 \ 236 : [err] "=r" (err) \
237 _ASM_EXTABLE(1b, 3b) \ 237 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
238 : [_err] "=r" (__err) 238 : "memory")
239
240/*
241 * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
242 * format and supervisor states in addition to modified optimization in
243 * XSAVEOPT.
244 *
245 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
246 * supports modified optimization which is not supported by XSAVE.
247 *
248 * We use XSAVE as a fallback.
249 *
250 * The 661 label is defined in the ALTERNATIVE* macros as the address of the
251 * original instruction which gets replaced. We need to use it here as the
252 * address of the instruction where we might get an exception at.
253 */
254#define XSTATE_XSAVE(st, lmask, hmask, err) \
255 asm volatile(ALTERNATIVE_2(XSAVE, \
256 XSAVEOPT, X86_FEATURE_XSAVEOPT, \
257 XSAVES, X86_FEATURE_XSAVES) \
258 "\n" \
259 "xor %[err], %[err]\n" \
260 "3:\n" \
261 ".pushsection .fixup,\"ax\"\n" \
262 "4: movl $-2, %[err]\n" \
263 "jmp 3b\n" \
264 ".popsection\n" \
265 _ASM_EXTABLE(661b, 4b) \
266 : [err] "=r" (err) \
267 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
268 : "memory")
269
270/*
271 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
272 * XSAVE area format.
273 */
274#define XSTATE_XRESTORE(st, lmask, hmask, err) \
275 asm volatile(ALTERNATIVE(XRSTOR, \
276 XRSTORS, X86_FEATURE_XSAVES) \
277 "\n" \
278 "xor %[err], %[err]\n" \
279 "3:\n" \
280 ".pushsection .fixup,\"ax\"\n" \
281 "4: movl $-2, %[err]\n" \
282 "jmp 3b\n" \
283 ".popsection\n" \
284 _ASM_EXTABLE(661b, 4b) \
285 : [err] "=r" (err) \
286 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
287 : "memory")
239 288
240/* 289/*
241 * This function is called only during boot time when x86 caps are not set 290 * This function is called only during boot time when x86 caps are not set
@@ -246,22 +295,14 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
246 u64 mask = -1; 295 u64 mask = -1;
247 u32 lmask = mask; 296 u32 lmask = mask;
248 u32 hmask = mask >> 32; 297 u32 hmask = mask >> 32;
249 int err = 0; 298 int err;
250 299
251 WARN_ON(system_state != SYSTEM_BOOTING); 300 WARN_ON(system_state != SYSTEM_BOOTING);
252 301
253 if (boot_cpu_has(X86_FEATURE_XSAVES)) 302 if (static_cpu_has_safe(X86_FEATURE_XSAVES))
254 asm volatile("1:"XSAVES"\n\t" 303 XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
255 "2:\n\t"
256 xstate_fault(err)
257 : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
258 : "memory");
259 else 304 else
260 asm volatile("1:"XSAVE"\n\t" 305 XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
261 "2:\n\t"
262 xstate_fault(err)
263 : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
264 : "memory");
265 306
266 /* We should never fault when copying to a kernel buffer: */ 307 /* We should never fault when copying to a kernel buffer: */
267 WARN_ON_FPU(err); 308 WARN_ON_FPU(err);
@@ -276,22 +317,14 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
276 u64 mask = -1; 317 u64 mask = -1;
277 u32 lmask = mask; 318 u32 lmask = mask;
278 u32 hmask = mask >> 32; 319 u32 hmask = mask >> 32;
279 int err = 0; 320 int err;
280 321
281 WARN_ON(system_state != SYSTEM_BOOTING); 322 WARN_ON(system_state != SYSTEM_BOOTING);
282 323
283 if (boot_cpu_has(X86_FEATURE_XSAVES)) 324 if (static_cpu_has_safe(X86_FEATURE_XSAVES))
284 asm volatile("1:"XRSTORS"\n\t" 325 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
285 "2:\n\t"
286 xstate_fault(err)
287 : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
288 : "memory");
289 else 326 else
290 asm volatile("1:"XRSTOR"\n\t" 327 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
291 "2:\n\t"
292 xstate_fault(err)
293 : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
294 : "memory");
295 328
296 /* We should never fault when copying from a kernel buffer: */ 329 /* We should never fault when copying from a kernel buffer: */
297 WARN_ON_FPU(err); 330 WARN_ON_FPU(err);
@@ -305,33 +338,11 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
305 u64 mask = -1; 338 u64 mask = -1;
306 u32 lmask = mask; 339 u32 lmask = mask;
307 u32 hmask = mask >> 32; 340 u32 hmask = mask >> 32;
308 int err = 0; 341 int err;
309 342
310 WARN_ON(!alternatives_patched); 343 WARN_ON(!alternatives_patched);
311 344
312 /* 345 XSTATE_XSAVE(xstate, lmask, hmask, err);
313 * If xsaves is enabled, xsaves replaces xsaveopt because
314 * it supports compact format and supervisor states in addition to
315 * modified optimization in xsaveopt.
316 *
317 * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
318 * because xsaveopt supports modified optimization which is not
319 * supported by xsave.
320 *
321 * If none of xsaves and xsaveopt is enabled, use xsave.
322 */
323 alternative_input_2(
324 "1:"XSAVE,
325 XSAVEOPT,
326 X86_FEATURE_XSAVEOPT,
327 XSAVES,
328 X86_FEATURE_XSAVES,
329 [xstate] "D" (xstate), "a" (lmask), "d" (hmask) :
330 "memory");
331 asm volatile("2:\n\t"
332 xstate_fault(err)
333 : "0" (err)
334 : "memory");
335 346
336 /* We should never fault when copying to a kernel buffer: */ 347 /* We should never fault when copying to a kernel buffer: */
337 WARN_ON_FPU(err); 348 WARN_ON_FPU(err);
@@ -344,23 +355,9 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
344{ 355{
345 u32 lmask = mask; 356 u32 lmask = mask;
346 u32 hmask = mask >> 32; 357 u32 hmask = mask >> 32;
347 int err = 0; 358 int err;
348 359
349 /* 360 XSTATE_XRESTORE(xstate, lmask, hmask, err);
350 * Use xrstors to restore context if it is enabled. xrstors supports
351 * compacted format of xsave area which is not supported by xrstor.
352 */
353 alternative_input(
354 "1: " XRSTOR,
355 XRSTORS,
356 X86_FEATURE_XSAVES,
357 "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask)
358 : "memory");
359
360 asm volatile("2:\n"
361 xstate_fault(err)
362 : "0" (err)
363 : "memory");
364 361
365 /* We should never fault when copying from a kernel buffer: */ 362 /* We should never fault when copying from a kernel buffer: */
366 WARN_ON_FPU(err); 363 WARN_ON_FPU(err);
@@ -388,12 +385,10 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
388 if (unlikely(err)) 385 if (unlikely(err))
389 return -EFAULT; 386 return -EFAULT;
390 387
391 __asm__ __volatile__(ASM_STAC "\n" 388 stac();
392 "1:"XSAVE"\n" 389 XSTATE_OP(XSAVE, buf, -1, -1, err);
393 "2: " ASM_CLAC "\n" 390 clac();
394 xstate_fault(err) 391
395 : "D" (buf), "a" (-1), "d" (-1), "0" (err)
396 : "memory");
397 return err; 392 return err;
398} 393}
399 394
@@ -405,14 +400,12 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
405 struct xregs_state *xstate = ((__force struct xregs_state *)buf); 400 struct xregs_state *xstate = ((__force struct xregs_state *)buf);
406 u32 lmask = mask; 401 u32 lmask = mask;
407 u32 hmask = mask >> 32; 402 u32 hmask = mask >> 32;
408 int err = 0; 403 int err;
409 404
410 __asm__ __volatile__(ASM_STAC "\n" 405 stac();
411 "1:"XRSTOR"\n" 406 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
412 "2: " ASM_CLAC "\n" 407 clac();
413 xstate_fault(err) 408
414 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (err)
415 : "memory"); /* memory required? */
416 return err; 409 return err;
417} 410}
418 411
diff --git a/arch/x86/include/asm/intel_pt.h b/arch/x86/include/asm/intel_pt.h
new file mode 100644
index 000000000000..e1a411786bf5
--- /dev/null
+++ b/arch/x86/include/asm/intel_pt.h
@@ -0,0 +1,10 @@
1#ifndef _ASM_X86_INTEL_PT_H
2#define _ASM_X86_INTEL_PT_H
3
4#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
5void cpu_emergency_stop_pt(void);
6#else
7static inline void cpu_emergency_stop_pt(void) {}
8#endif
9
10#endif /* _ASM_X86_INTEL_PT_H */
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index 615fa9061b57..cfc9a0d2d07c 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -119,6 +119,8 @@ static inline void
119 native_apic_mem_write(APIC_ICR, cfg); 119 native_apic_mem_write(APIC_ICR, cfg);
120} 120}
121 121
122extern void default_send_IPI_single(int cpu, int vector);
123extern void default_send_IPI_single_phys(int cpu, int vector);
122extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, 124extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
123 int vector); 125 int vector);
124extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, 126extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 5daeca3d0f9e..adc54c12cbd1 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -1,12 +1,18 @@
1#ifndef _ASM_X86_JUMP_LABEL_H 1#ifndef _ASM_X86_JUMP_LABEL_H
2#define _ASM_X86_JUMP_LABEL_H 2#define _ASM_X86_JUMP_LABEL_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef HAVE_JUMP_LABEL
5 5/*
6#include <linux/stringify.h> 6 * For better or for worse, if jump labels (the gcc extension) are missing,
7#include <linux/types.h> 7 * then the entire static branch patching infrastructure is compiled out.
8#include <asm/nops.h> 8 * If that happens, the code in here will malfunction. Raise a compiler
9#include <asm/asm.h> 9 * error instead.
10 *
11 * In theory, jump labels and the static branch patching infrastructure
12 * could be decoupled to fix this.
13 */
14#error asm/jump_label.h included on a non-jump-label kernel
15#endif
10 16
11#define JUMP_LABEL_NOP_SIZE 5 17#define JUMP_LABEL_NOP_SIZE 5
12 18
@@ -16,6 +22,14 @@
16# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC 22# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
17#endif 23#endif
18 24
25#include <asm/asm.h>
26#include <asm/nops.h>
27
28#ifndef __ASSEMBLY__
29
30#include <linux/stringify.h>
31#include <linux/types.h>
32
19static __always_inline bool arch_static_branch(struct static_key *key, bool branch) 33static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
20{ 34{
21 asm_volatile_goto("1:" 35 asm_volatile_goto("1:"
@@ -59,5 +73,40 @@ struct jump_entry {
59 jump_label_t key; 73 jump_label_t key;
60}; 74};
61 75
62#endif /* __ASSEMBLY__ */ 76#else /* __ASSEMBLY__ */
77
78.macro STATIC_JUMP_IF_TRUE target, key, def
79.Lstatic_jump_\@:
80 .if \def
81 /* Equivalent to "jmp.d32 \target" */
82 .byte 0xe9
83 .long \target - .Lstatic_jump_after_\@
84.Lstatic_jump_after_\@:
85 .else
86 .byte STATIC_KEY_INIT_NOP
87 .endif
88 .pushsection __jump_table, "aw"
89 _ASM_ALIGN
90 _ASM_PTR .Lstatic_jump_\@, \target, \key
91 .popsection
92.endm
93
94.macro STATIC_JUMP_IF_FALSE target, key, def
95.Lstatic_jump_\@:
96 .if \def
97 .byte STATIC_KEY_INIT_NOP
98 .else
99 /* Equivalent to "jmp.d32 \target" */
100 .byte 0xe9
101 .long \target - .Lstatic_jump_after_\@
102.Lstatic_jump_after_\@:
103 .endif
104 .pushsection __jump_table, "aw"
105 _ASM_ALIGN
106 _ASM_PTR .Lstatic_jump_\@, \target, \key + 1
107 .popsection
108.endm
109
110#endif /* __ASSEMBLY__ */
111
63#endif 112#endif
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 34e62b1dcfce..1e1b07a5a738 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_X86_MICROCODE_H 1#ifndef _ASM_X86_MICROCODE_H
2#define _ASM_X86_MICROCODE_H 2#define _ASM_X86_MICROCODE_H
3 3
4#include <asm/cpu.h>
4#include <linux/earlycpio.h> 5#include <linux/earlycpio.h>
5 6
6#define native_rdmsr(msr, val1, val2) \ 7#define native_rdmsr(msr, val1, val2) \
@@ -95,14 +96,14 @@ static inline void __exit exit_amd_microcode(void) {}
95 96
96/* 97/*
97 * In early loading microcode phase on BSP, boot_cpu_data is not set up yet. 98 * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
98 * x86_vendor() gets vendor id for BSP. 99 * x86_cpuid_vendor() gets vendor id for BSP.
99 * 100 *
100 * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify 101 * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
101 * coding, we still use x86_vendor() to get vendor id for AP. 102 * coding, we still use x86_cpuid_vendor() to get vendor id for AP.
102 * 103 *
103 * x86_vendor() gets vendor information directly from CPUID. 104 * x86_cpuid_vendor() gets vendor information directly from CPUID.
104 */ 105 */
105static inline int x86_vendor(void) 106static inline int x86_cpuid_vendor(void)
106{ 107{
107 u32 eax = 0x00000000; 108 u32 eax = 0x00000000;
108 u32 ebx, ecx = 0, edx; 109 u32 ebx, ecx = 0, edx;
@@ -118,40 +119,14 @@ static inline int x86_vendor(void)
118 return X86_VENDOR_UNKNOWN; 119 return X86_VENDOR_UNKNOWN;
119} 120}
120 121
121static inline unsigned int __x86_family(unsigned int sig) 122static inline unsigned int x86_cpuid_family(void)
122{
123 unsigned int x86;
124
125 x86 = (sig >> 8) & 0xf;
126
127 if (x86 == 0xf)
128 x86 += (sig >> 20) & 0xff;
129
130 return x86;
131}
132
133static inline unsigned int x86_family(void)
134{ 123{
135 u32 eax = 0x00000001; 124 u32 eax = 0x00000001;
136 u32 ebx, ecx = 0, edx; 125 u32 ebx, ecx = 0, edx;
137 126
138 native_cpuid(&eax, &ebx, &ecx, &edx); 127 native_cpuid(&eax, &ebx, &ecx, &edx);
139 128
140 return __x86_family(eax); 129 return x86_family(eax);
141}
142
143static inline unsigned int x86_model(unsigned int sig)
144{
145 unsigned int x86, model;
146
147 x86 = __x86_family(sig);
148
149 model = (sig >> 4) & 0xf;
150
151 if (x86 == 0x6 || x86 == 0xf)
152 model += ((sig >> 16) & 0xf) << 4;
153
154 return model;
155} 130}
156 131
157#ifdef CONFIG_MICROCODE 132#ifdef CONFIG_MICROCODE
diff --git a/arch/x86/include/asm/msi.h b/arch/x86/include/asm/msi.h
index 93724cc62177..eb4b09b41df5 100644
--- a/arch/x86/include/asm/msi.h
+++ b/arch/x86/include/asm/msi.h
@@ -1,7 +1,13 @@
1#ifndef _ASM_X86_MSI_H 1#ifndef _ASM_X86_MSI_H
2#define _ASM_X86_MSI_H 2#define _ASM_X86_MSI_H
3#include <asm/hw_irq.h> 3#include <asm/hw_irq.h>
4#include <asm/irqdomain.h>
4 5
5typedef struct irq_alloc_info msi_alloc_info_t; 6typedef struct irq_alloc_info msi_alloc_info_t;
6 7
8int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
9 msi_alloc_info_t *arg);
10
11void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc);
12
7#endif /* _ASM_X86_MSI_H */ 13#endif /* _ASM_X86_MSI_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 690b4027e17c..b05402ef3b84 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -321,6 +321,7 @@
321#define MSR_F15H_PERF_CTR 0xc0010201 321#define MSR_F15H_PERF_CTR 0xc0010201
322#define MSR_F15H_NB_PERF_CTL 0xc0010240 322#define MSR_F15H_NB_PERF_CTL 0xc0010240
323#define MSR_F15H_NB_PERF_CTR 0xc0010241 323#define MSR_F15H_NB_PERF_CTR 0xc0010241
324#define MSR_F15H_IC_CFG 0xc0011021
324 325
325/* Fam 10h MSRs */ 326/* Fam 10h MSRs */
326#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 327#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
diff --git a/arch/x86/include/asm/msr-trace.h b/arch/x86/include/asm/msr-trace.h
new file mode 100644
index 000000000000..7567225747d8
--- /dev/null
+++ b/arch/x86/include/asm/msr-trace.h
@@ -0,0 +1,57 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM msr
3
4#undef TRACE_INCLUDE_FILE
5#define TRACE_INCLUDE_FILE msr-trace
6
7#undef TRACE_INCLUDE_PATH
8#define TRACE_INCLUDE_PATH asm/
9
10#if !defined(_TRACE_MSR_H) || defined(TRACE_HEADER_MULTI_READ)
11#define _TRACE_MSR_H
12
13#include <linux/tracepoint.h>
14
15/*
16 * Tracing for x86 model specific registers. Directly maps to the
17 * RDMSR/WRMSR instructions.
18 */
19
20DECLARE_EVENT_CLASS(msr_trace_class,
21 TP_PROTO(unsigned msr, u64 val, int failed),
22 TP_ARGS(msr, val, failed),
23 TP_STRUCT__entry(
24 __field( unsigned, msr )
25 __field( u64, val )
26 __field( int, failed )
27 ),
28 TP_fast_assign(
29 __entry->msr = msr;
30 __entry->val = val;
31 __entry->failed = failed;
32 ),
33 TP_printk("%x, value %llx%s",
34 __entry->msr,
35 __entry->val,
36 __entry->failed ? " #GP" : "")
37);
38
39DEFINE_EVENT(msr_trace_class, read_msr,
40 TP_PROTO(unsigned msr, u64 val, int failed),
41 TP_ARGS(msr, val, failed)
42);
43
44DEFINE_EVENT(msr_trace_class, write_msr,
45 TP_PROTO(unsigned msr, u64 val, int failed),
46 TP_ARGS(msr, val, failed)
47);
48
49DEFINE_EVENT(msr_trace_class, rdpmc,
50 TP_PROTO(unsigned msr, u64 val, int failed),
51 TP_ARGS(msr, val, failed)
52);
53
54#endif /* _TRACE_MSR_H */
55
56/* This part must be outside protection */
57#include <trace/define_trace.h>
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 77d8b284e4a7..93fb7c1cffda 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -32,6 +32,16 @@ struct msr_regs_info {
32 int err; 32 int err;
33}; 33};
34 34
35struct saved_msr {
36 bool valid;
37 struct msr_info info;
38};
39
40struct saved_msrs {
41 unsigned int num;
42 struct saved_msr *array;
43};
44
35static inline unsigned long long native_read_tscp(unsigned int *aux) 45static inline unsigned long long native_read_tscp(unsigned int *aux)
36{ 46{
37 unsigned long low, high; 47 unsigned long low, high;
@@ -57,11 +67,34 @@ static inline unsigned long long native_read_tscp(unsigned int *aux)
57#define EAX_EDX_RET(val, low, high) "=A" (val) 67#define EAX_EDX_RET(val, low, high) "=A" (val)
58#endif 68#endif
59 69
70#ifdef CONFIG_TRACEPOINTS
71/*
72 * Be very careful with includes. This header is prone to include loops.
73 */
74#include <asm/atomic.h>
75#include <linux/tracepoint-defs.h>
76
77extern struct tracepoint __tracepoint_read_msr;
78extern struct tracepoint __tracepoint_write_msr;
79extern struct tracepoint __tracepoint_rdpmc;
80#define msr_tracepoint_active(t) static_key_false(&(t).key)
81extern void do_trace_write_msr(unsigned msr, u64 val, int failed);
82extern void do_trace_read_msr(unsigned msr, u64 val, int failed);
83extern void do_trace_rdpmc(unsigned msr, u64 val, int failed);
84#else
85#define msr_tracepoint_active(t) false
86static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {}
87static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {}
88static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {}
89#endif
90
60static inline unsigned long long native_read_msr(unsigned int msr) 91static inline unsigned long long native_read_msr(unsigned int msr)
61{ 92{
62 DECLARE_ARGS(val, low, high); 93 DECLARE_ARGS(val, low, high);
63 94
64 asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr)); 95 asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
96 if (msr_tracepoint_active(__tracepoint_read_msr))
97 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
65 return EAX_EDX_VAL(val, low, high); 98 return EAX_EDX_VAL(val, low, high);
66} 99}
67 100
@@ -78,6 +111,8 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
78 _ASM_EXTABLE(2b, 3b) 111 _ASM_EXTABLE(2b, 3b)
79 : [err] "=r" (*err), EAX_EDX_RET(val, low, high) 112 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
80 : "c" (msr), [fault] "i" (-EIO)); 113 : "c" (msr), [fault] "i" (-EIO));
114 if (msr_tracepoint_active(__tracepoint_read_msr))
115 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
81 return EAX_EDX_VAL(val, low, high); 116 return EAX_EDX_VAL(val, low, high);
82} 117}
83 118
@@ -85,6 +120,8 @@ static inline void native_write_msr(unsigned int msr,
85 unsigned low, unsigned high) 120 unsigned low, unsigned high)
86{ 121{
87 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory"); 122 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
123 if (msr_tracepoint_active(__tracepoint_read_msr))
124 do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
88} 125}
89 126
90/* Can be uninlined because referenced by paravirt */ 127/* Can be uninlined because referenced by paravirt */
@@ -102,6 +139,8 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
102 : "c" (msr), "0" (low), "d" (high), 139 : "c" (msr), "0" (low), "d" (high),
103 [fault] "i" (-EIO) 140 [fault] "i" (-EIO)
104 : "memory"); 141 : "memory");
142 if (msr_tracepoint_active(__tracepoint_read_msr))
143 do_trace_write_msr(msr, ((u64)high << 32 | low), err);
105 return err; 144 return err;
106} 145}
107 146
@@ -160,6 +199,8 @@ static inline unsigned long long native_read_pmc(int counter)
160 DECLARE_ARGS(val, low, high); 199 DECLARE_ARGS(val, low, high);
161 200
162 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); 201 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
202 if (msr_tracepoint_active(__tracepoint_rdpmc))
203 do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
163 return EAX_EDX_VAL(val, low, high); 204 return EAX_EDX_VAL(val, low, high);
164} 205}
165 206
@@ -190,7 +231,7 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
190 231
191static inline void wrmsrl(unsigned msr, u64 val) 232static inline void wrmsrl(unsigned msr, u64 val)
192{ 233{
193 native_write_msr(msr, (u32)val, (u32)(val >> 32)); 234 native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
194} 235}
195 236
196/* wrmsr with exception handling */ 237/* wrmsr with exception handling */
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index cc071c6f7d4d..7bd0099384ca 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -5,9 +5,9 @@
5#include <linux/types.h> 5#include <linux/types.h>
6 6
7/* PAGE_SHIFT determines the page size */ 7/* PAGE_SHIFT determines the page size */
8#define PAGE_SHIFT 12 8#define PAGE_SHIFT 12
9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
10#define PAGE_MASK (~(PAGE_SIZE-1)) 10#define PAGE_MASK (~(PAGE_SIZE-1))
11 11
12#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) 12#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
13#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) 13#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index c759b3cca663..f6192502149e 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -291,15 +291,6 @@ static inline void slow_down_io(void)
291#endif 291#endif
292} 292}
293 293
294#ifdef CONFIG_SMP
295static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
296 unsigned long start_esp)
297{
298 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
299 phys_apicid, start_eip, start_esp);
300}
301#endif
302
303static inline void paravirt_activate_mm(struct mm_struct *prev, 294static inline void paravirt_activate_mm(struct mm_struct *prev,
304 struct mm_struct *next) 295 struct mm_struct *next)
305{ 296{
@@ -381,23 +372,6 @@ static inline void pte_update(struct mm_struct *mm, unsigned long addr,
381{ 372{
382 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep); 373 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
383} 374}
384static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
385 pmd_t *pmdp)
386{
387 PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
388}
389
390static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
391 pte_t *ptep)
392{
393 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
394}
395
396static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
397 pmd_t *pmdp)
398{
399 PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
400}
401 375
402static inline pte_t __pte(pteval_t val) 376static inline pte_t __pte(pteval_t val)
403{ 377{
@@ -928,23 +902,11 @@ extern void default_banner(void);
928 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ 902 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
929 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 903 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
930 904
931#define USERGS_SYSRET32 \
932 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
933 CLBR_NONE, \
934 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
935
936#ifdef CONFIG_X86_32 905#ifdef CONFIG_X86_32
937#define GET_CR0_INTO_EAX \ 906#define GET_CR0_INTO_EAX \
938 push %ecx; push %edx; \ 907 push %ecx; push %edx; \
939 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ 908 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
940 pop %edx; pop %ecx 909 pop %edx; pop %ecx
941
942#define ENABLE_INTERRUPTS_SYSEXIT \
943 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
944 CLBR_NONE, \
945 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
946
947
948#else /* !CONFIG_X86_32 */ 910#else /* !CONFIG_X86_32 */
949 911
950/* 912/*
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 3d44191185f8..77db5616a473 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -162,15 +162,6 @@ struct pv_cpu_ops {
162 162
163 u64 (*read_pmc)(int counter); 163 u64 (*read_pmc)(int counter);
164 164
165#ifdef CONFIG_X86_32
166 /*
167 * Atomically enable interrupts and return to userspace. This
168 * is only used in 32-bit kernels. 64-bit kernels use
169 * usergs_sysret32 instead.
170 */
171 void (*irq_enable_sysexit)(void);
172#endif
173
174 /* 165 /*
175 * Switch to usermode gs and return to 64-bit usermode using 166 * Switch to usermode gs and return to 64-bit usermode using
176 * sysret. Only used in 64-bit kernels to return to 64-bit 167 * sysret. Only used in 64-bit kernels to return to 64-bit
@@ -179,14 +170,6 @@ struct pv_cpu_ops {
179 */ 170 */
180 void (*usergs_sysret64)(void); 171 void (*usergs_sysret64)(void);
181 172
182 /*
183 * Switch to usermode gs and return to 32-bit usermode using
184 * sysret. Used to return to 32-on-64 compat processes.
185 * Other usermode register state, including %esp, must already
186 * be restored.
187 */
188 void (*usergs_sysret32)(void);
189
190 /* Normal iret. Jump to this with the standard iret stack 173 /* Normal iret. Jump to this with the standard iret stack
191 frame set up. */ 174 frame set up. */
192 void (*iret)(void); 175 void (*iret)(void);
@@ -220,14 +203,6 @@ struct pv_irq_ops {
220#endif 203#endif
221}; 204};
222 205
223struct pv_apic_ops {
224#ifdef CONFIG_X86_LOCAL_APIC
225 void (*startup_ipi_hook)(int phys_apicid,
226 unsigned long start_eip,
227 unsigned long start_esp);
228#endif
229};
230
231struct pv_mmu_ops { 206struct pv_mmu_ops {
232 unsigned long (*read_cr2)(void); 207 unsigned long (*read_cr2)(void);
233 void (*write_cr2)(unsigned long); 208 void (*write_cr2)(unsigned long);
@@ -279,12 +254,6 @@ struct pv_mmu_ops {
279 pmd_t *pmdp, pmd_t pmdval); 254 pmd_t *pmdp, pmd_t pmdval);
280 void (*pte_update)(struct mm_struct *mm, unsigned long addr, 255 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
281 pte_t *ptep); 256 pte_t *ptep);
282 void (*pte_update_defer)(struct mm_struct *mm,
283 unsigned long addr, pte_t *ptep);
284 void (*pmd_update)(struct mm_struct *mm, unsigned long addr,
285 pmd_t *pmdp);
286 void (*pmd_update_defer)(struct mm_struct *mm,
287 unsigned long addr, pmd_t *pmdp);
288 257
289 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, 258 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
290 pte_t *ptep); 259 pte_t *ptep);
@@ -359,7 +328,6 @@ struct paravirt_patch_template {
359 struct pv_time_ops pv_time_ops; 328 struct pv_time_ops pv_time_ops;
360 struct pv_cpu_ops pv_cpu_ops; 329 struct pv_cpu_ops pv_cpu_ops;
361 struct pv_irq_ops pv_irq_ops; 330 struct pv_irq_ops pv_irq_ops;
362 struct pv_apic_ops pv_apic_ops;
363 struct pv_mmu_ops pv_mmu_ops; 331 struct pv_mmu_ops pv_mmu_ops;
364 struct pv_lock_ops pv_lock_ops; 332 struct pv_lock_ops pv_lock_ops;
365}; 333};
@@ -369,7 +337,6 @@ extern struct pv_init_ops pv_init_ops;
369extern struct pv_time_ops pv_time_ops; 337extern struct pv_time_ops pv_time_ops;
370extern struct pv_cpu_ops pv_cpu_ops; 338extern struct pv_cpu_ops pv_cpu_ops;
371extern struct pv_irq_ops pv_irq_ops; 339extern struct pv_irq_ops pv_irq_ops;
372extern struct pv_apic_ops pv_apic_ops;
373extern struct pv_mmu_ops pv_mmu_ops; 340extern struct pv_mmu_ops pv_mmu_ops;
374extern struct pv_lock_ops pv_lock_ops; 341extern struct pv_lock_ops pv_lock_ops;
375 342
@@ -407,10 +374,8 @@ extern struct pv_lock_ops pv_lock_ops;
407 __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \ 374 __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
408 asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name)) 375 asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
409 376
410unsigned paravirt_patch_nop(void);
411unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); 377unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
412unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); 378unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
413unsigned paravirt_patch_ignore(unsigned len);
414unsigned paravirt_patch_call(void *insnbuf, 379unsigned paravirt_patch_call(void *insnbuf,
415 const void *target, u16 tgt_clobbers, 380 const void *target, u16 tgt_clobbers,
416 unsigned long addr, u16 site_clobbers, 381 unsigned long addr, u16 site_clobbers,
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 6ec0c8b2e9df..d3eee663c41f 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -69,9 +69,6 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
69#define pmd_clear(pmd) native_pmd_clear(pmd) 69#define pmd_clear(pmd) native_pmd_clear(pmd)
70 70
71#define pte_update(mm, addr, ptep) do { } while (0) 71#define pte_update(mm, addr, ptep) do { } while (0)
72#define pte_update_defer(mm, addr, ptep) do { } while (0)
73#define pmd_update(mm, addr, ptep) do { } while (0)
74#define pmd_update_defer(mm, addr, ptep) do { } while (0)
75 72
76#define pgd_val(x) native_pgd_val(x) 73#define pgd_val(x) native_pgd_val(x)
77#define __pgd(x) native_make_pgd(x) 74#define __pgd(x) native_make_pgd(x)
@@ -731,14 +728,9 @@ static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
731 * updates should either be sets, clears, or set_pte_atomic for P->P 728 * updates should either be sets, clears, or set_pte_atomic for P->P
732 * transitions, which means this hook should only be called for user PTEs. 729 * transitions, which means this hook should only be called for user PTEs.
733 * This hook implies a P->P protection or access change has taken place, which 730 * This hook implies a P->P protection or access change has taken place, which
734 * requires a subsequent TLB flush. The notification can optionally be delayed 731 * requires a subsequent TLB flush.
735 * until the TLB flush event by using the pte_update_defer form of the
736 * interface, but care must be taken to assure that the flush happens while
737 * still holding the same page table lock so that the shadow and primary pages
738 * do not become out of sync on SMP.
739 */ 732 */
740#define pte_update(mm, addr, ptep) do { } while (0) 733#define pte_update(mm, addr, ptep) do { } while (0)
741#define pte_update_defer(mm, addr, ptep) do { } while (0)
742#endif 734#endif
743 735
744/* 736/*
@@ -830,9 +822,7 @@ static inline int pmd_write(pmd_t pmd)
830static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, 822static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
831 pmd_t *pmdp) 823 pmd_t *pmdp)
832{ 824{
833 pmd_t pmd = native_pmdp_get_and_clear(pmdp); 825 return native_pmdp_get_and_clear(pmdp);
834 pmd_update(mm, addr, pmdp);
835 return pmd;
836} 826}
837 827
838#define __HAVE_ARCH_PMDP_SET_WRPROTECT 828#define __HAVE_ARCH_PMDP_SET_WRPROTECT
@@ -840,7 +830,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
840 unsigned long addr, pmd_t *pmdp) 830 unsigned long addr, pmd_t *pmdp)
841{ 831{
842 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); 832 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
843 pmd_update(mm, addr, pmdp);
844} 833}
845 834
846/* 835/*
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 7a6bed5c08bc..fdcc04020636 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -4,6 +4,15 @@
4#include <linux/clocksource.h> 4#include <linux/clocksource.h>
5#include <asm/pvclock-abi.h> 5#include <asm/pvclock-abi.h>
6 6
7#ifdef CONFIG_KVM_GUEST
8extern struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void);
9#else
10static inline struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
11{
12 return NULL;
13}
14#endif
15
7/* some helper functions for xen and kvm pv clock sources */ 16/* some helper functions for xen and kvm pv clock sources */
8cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); 17cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
9u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src); 18u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
@@ -91,10 +100,5 @@ struct pvclock_vsyscall_time_info {
91} __attribute__((__aligned__(SMP_CACHE_BYTES))); 100} __attribute__((__aligned__(SMP_CACHE_BYTES)));
92 101
93#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) 102#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
94#define PVCLOCK_VSYSCALL_NR_PAGES (((NR_CPUS-1)/(PAGE_SIZE/PVTI_SIZE))+1)
95
96int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
97 int size);
98struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu);
99 103
100#endif /* _ASM_X86_PVCLOCK_H */ 104#endif /* _ASM_X86_PVCLOCK_H */
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
index b002e711ba88..9f92c180ed2f 100644
--- a/arch/x86/include/asm/qspinlock_paravirt.h
+++ b/arch/x86/include/asm/qspinlock_paravirt.h
@@ -1,6 +1,65 @@
1#ifndef __ASM_QSPINLOCK_PARAVIRT_H 1#ifndef __ASM_QSPINLOCK_PARAVIRT_H
2#define __ASM_QSPINLOCK_PARAVIRT_H 2#define __ASM_QSPINLOCK_PARAVIRT_H
3 3
4/*
5 * For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
6 * registers. For i386, however, only 1 32-bit register needs to be saved
7 * and restored. So an optimized version of __pv_queued_spin_unlock() is
8 * hand-coded for 64-bit, but it isn't worthwhile to do it for 32-bit.
9 */
10#ifdef CONFIG_64BIT
11
12PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath);
13#define __pv_queued_spin_unlock __pv_queued_spin_unlock
14#define PV_UNLOCK "__raw_callee_save___pv_queued_spin_unlock"
15#define PV_UNLOCK_SLOWPATH "__raw_callee_save___pv_queued_spin_unlock_slowpath"
16
17/*
18 * Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock
19 * which combines the registers saving trunk and the body of the following
20 * C code:
21 *
22 * void __pv_queued_spin_unlock(struct qspinlock *lock)
23 * {
24 * struct __qspinlock *l = (void *)lock;
25 * u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
26 *
27 * if (likely(lockval == _Q_LOCKED_VAL))
28 * return;
29 * pv_queued_spin_unlock_slowpath(lock, lockval);
30 * }
31 *
32 * For x86-64,
33 * rdi = lock (first argument)
34 * rsi = lockval (second argument)
35 * rdx = internal variable (set to 0)
36 */
37asm (".pushsection .text;"
38 ".globl " PV_UNLOCK ";"
39 ".align 4,0x90;"
40 PV_UNLOCK ": "
41 "push %rdx;"
42 "mov $0x1,%eax;"
43 "xor %edx,%edx;"
44 "lock cmpxchg %dl,(%rdi);"
45 "cmp $0x1,%al;"
46 "jne .slowpath;"
47 "pop %rdx;"
48 "ret;"
49 ".slowpath: "
50 "push %rsi;"
51 "movzbl %al,%esi;"
52 "call " PV_UNLOCK_SLOWPATH ";"
53 "pop %rsi;"
54 "pop %rdx;"
55 "ret;"
56 ".size " PV_UNLOCK ", .-" PV_UNLOCK ";"
57 ".popsection");
58
59#else /* CONFIG_64BIT */
60
61extern void __pv_queued_spin_unlock(struct qspinlock *lock);
4PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock); 62PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock);
5 63
64#endif /* CONFIG_64BIT */
6#endif 65#endif
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
index a82c4f1b4d83..2cb1cc253d51 100644
--- a/arch/x86/include/asm/reboot.h
+++ b/arch/x86/include/asm/reboot.h
@@ -25,5 +25,6 @@ void __noreturn machine_real_restart(unsigned int type);
25 25
26typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); 26typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
27void nmi_shootdown_cpus(nmi_shootdown_cb callback); 27void nmi_shootdown_cpus(nmi_shootdown_cb callback);
28void run_crash_ipi_callback(struct pt_regs *regs);
28 29
29#endif /* _ASM_X86_REBOOT_H */ 30#endif /* _ASM_X86_REBOOT_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 222a6a3ca2b5..dfcf0727623b 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -21,15 +21,6 @@
21extern int smp_num_siblings; 21extern int smp_num_siblings;
22extern unsigned int num_processors; 22extern unsigned int num_processors;
23 23
24static inline bool cpu_has_ht_siblings(void)
25{
26 bool has_siblings = false;
27#ifdef CONFIG_SMP
28 has_siblings = cpu_has_ht && smp_num_siblings > 1;
29#endif
30 return has_siblings;
31}
32
33DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); 24DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
34DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); 25DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
35/* cpus sharing the last level cache: */ 26/* cpus sharing the last level cache: */
@@ -74,9 +65,6 @@ struct smp_ops {
74extern void set_cpu_sibling_map(int cpu); 65extern void set_cpu_sibling_map(int cpu);
75 66
76#ifdef CONFIG_SMP 67#ifdef CONFIG_SMP
77#ifndef CONFIG_PARAVIRT
78#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0)
79#endif
80extern struct smp_ops smp_ops; 68extern struct smp_ops smp_ops;
81 69
82static inline void smp_send_stop(void) 70static inline void smp_send_stop(void)
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index d1793f06854d..8e9dbe7b73a1 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -15,6 +15,7 @@ struct saved_context {
15 unsigned long cr0, cr2, cr3, cr4; 15 unsigned long cr0, cr2, cr3, cr4;
16 u64 misc_enable; 16 u64 misc_enable;
17 bool misc_enable_saved; 17 bool misc_enable_saved;
18 struct saved_msrs saved_msrs;
18 struct desc_ptr gdt_desc; 19 struct desc_ptr gdt_desc;
19 struct desc_ptr idt; 20 struct desc_ptr idt;
20 u16 ldt; 21 u16 ldt;
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 7ebf0ebe4e68..6136a18152af 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -24,6 +24,7 @@ struct saved_context {
24 unsigned long cr0, cr2, cr3, cr4, cr8; 24 unsigned long cr0, cr2, cr3, cr4, cr8;
25 u64 misc_enable; 25 u64 misc_enable;
26 bool misc_enable_saved; 26 bool misc_enable_saved;
27 struct saved_msrs saved_msrs;
27 unsigned long efer; 28 unsigned long efer;
28 u16 gdt_pad; /* Unused */ 29 u16 gdt_pad; /* Unused */
29 struct desc_ptr gdt_desc; 30 struct desc_ptr gdt_desc;
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 09b1b0ab94b7..660458af425d 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -745,5 +745,14 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
745#undef __copy_from_user_overflow 745#undef __copy_from_user_overflow
746#undef __copy_to_user_overflow 746#undef __copy_to_user_overflow
747 747
748/*
749 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
750 * nested NMI paths are careful to preserve CR2.
751 *
752 * Caller must use pagefault_enable/disable, or run in interrupt context,
753 * and also do a uaccess_ok() check
754 */
755#define __copy_from_user_nmi __copy_from_user_inatomic
756
748#endif /* _ASM_X86_UACCESS_H */ 757#endif /* _ASM_X86_UACCESS_H */
749 758
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index 756de9190aec..deabaf9759b6 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -22,6 +22,7 @@ struct vdso_image {
22 22
23 long sym_vvar_page; 23 long sym_vvar_page;
24 long sym_hpet_page; 24 long sym_hpet_page;
25 long sym_pvclock_page;
25 long sym_VDSO32_NOTE_MASK; 26 long sym_VDSO32_NOTE_MASK;
26 long sym___kernel_sigreturn; 27 long sym___kernel_sigreturn;
27 long sym___kernel_rt_sigreturn; 28 long sym___kernel_rt_sigreturn;
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index cd0fc0cc78bc..1ae89a2721d6 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -82,13 +82,11 @@ struct x86_init_paging {
82 * struct x86_init_timers - platform specific timer setup 82 * struct x86_init_timers - platform specific timer setup
83 * @setup_perpcu_clockev: set up the per cpu clock event device for the 83 * @setup_perpcu_clockev: set up the per cpu clock event device for the
84 * boot cpu 84 * boot cpu
85 * @tsc_pre_init: platform function called before TSC init
86 * @timer_init: initialize the platform timer (default PIT/HPET) 85 * @timer_init: initialize the platform timer (default PIT/HPET)
87 * @wallclock_init: init the wallclock device 86 * @wallclock_init: init the wallclock device
88 */ 87 */
89struct x86_init_timers { 88struct x86_init_timers {
90 void (*setup_percpu_clockev)(void); 89 void (*setup_percpu_clockev)(void);
91 void (*tsc_pre_init)(void);
92 void (*timer_init)(void); 90 void (*timer_init)(void);
93 void (*wallclock_init)(void); 91 void (*wallclock_init)(void);
94}; 92};
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
index 5a08bc8bff33..c54beb44c4c1 100644
--- a/arch/x86/include/asm/xor_32.h
+++ b/arch/x86/include/asm/xor_32.h
@@ -553,7 +553,7 @@ do { \
553 if (cpu_has_xmm) { \ 553 if (cpu_has_xmm) { \
554 xor_speed(&xor_block_pIII_sse); \ 554 xor_speed(&xor_block_pIII_sse); \
555 xor_speed(&xor_block_sse_pf64); \ 555 xor_speed(&xor_block_sse_pf64); \
556 } else if (cpu_has_mmx) { \ 556 } else if (boot_cpu_has(X86_FEATURE_MMX)) { \
557 xor_speed(&xor_block_pII_mmx); \ 557 xor_speed(&xor_block_pII_mmx); \
558 xor_speed(&xor_block_p5_mmx); \ 558 xor_speed(&xor_block_p5_mmx); \
559 } else { \ 559 } else { \
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index 03429da2fa80..2184943341bf 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -16,7 +16,7 @@ struct mce {
16 __u8 cpuvendor; /* cpu vendor as encoded in system.h */ 16 __u8 cpuvendor; /* cpu vendor as encoded in system.h */
17 __u8 inject_flags; /* software inject flags */ 17 __u8 inject_flags; /* software inject flags */
18 __u8 severity; 18 __u8 severity;
19 __u8 usable_addr; 19 __u8 pad;
20 __u32 cpuid; /* CPUID 1 EAX */ 20 __u32 cpuid; /* CPUID 1 EAX */
21 __u8 cs; /* code segment */ 21 __u8 cs; /* code segment */
22 __u8 bank; /* machine check bank */ 22 __u8 bank; /* machine check bank */