aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/apb_timer.h1
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h198
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h83
-rw-r--r--arch/x86/include/asm/cpufeature.h29
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h2
-rw-r--r--arch/x86/include/asm/hypervisor.h1
-rw-r--r--arch/x86/include/asm/i387.h11
-rw-r--r--arch/x86/include/asm/intel_scu_ipc.h20
-rw-r--r--arch/x86/include/asm/irq_vectors.h3
-rw-r--r--arch/x86/include/asm/kgdb.h20
-rw-r--r--arch/x86/include/asm/kvm.h22
-rw-r--r--arch/x86/include/asm/kvm_emulate.h30
-rw-r--r--arch/x86/include/asm/kvm_host.h70
-rw-r--r--arch/x86/include/asm/local64.h1
-rw-r--r--arch/x86/include/asm/mrst.h26
-rw-r--r--arch/x86/include/asm/msr-index.h23
-rw-r--r--arch/x86/include/asm/msr.h4
-rw-r--r--arch/x86/include/asm/nmi.h2
-rw-r--r--arch/x86/include/asm/pci_x86.h1
-rw-r--r--arch/x86/include/asm/perf_event.h18
-rw-r--r--arch/x86/include/asm/perf_event_p4.h99
-rw-r--r--arch/x86/include/asm/processor.h21
-rw-r--r--arch/x86/include/asm/required-features.h2
-rw-r--r--arch/x86/include/asm/rwsem.h21
-rw-r--r--arch/x86/include/asm/setup.h2
-rw-r--r--arch/x86/include/asm/stacktrace.h49
-rw-r--r--arch/x86/include/asm/system.h7
-rw-r--r--arch/x86/include/asm/vmx.h5
-rw-r--r--arch/x86/include/asm/x86_init.h2
-rw-r--r--arch/x86/include/asm/xen/hypercall.h6
-rw-r--r--arch/x86/include/asm/xsave.h16
32 files changed, 496 insertions, 301 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index aa2c39d968fc..92091de11113 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -134,7 +134,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
134 boot_cpu_data.x86_model <= 0x05 && 134 boot_cpu_data.x86_model <= 0x05 &&
135 boot_cpu_data.x86_mask < 0x0A) 135 boot_cpu_data.x86_mask < 0x0A)
136 return 1; 136 return 1;
137 else if (boot_cpu_has(X86_FEATURE_AMDC1E)) 137 else if (c1e_detected)
138 return 1; 138 return 1;
139 else 139 else
140 return max_cstate; 140 return max_cstate;
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h
index c74a2eebe570..a69b1ac9eaf8 100644
--- a/arch/x86/include/asm/apb_timer.h
+++ b/arch/x86/include/asm/apb_timer.h
@@ -55,7 +55,6 @@ extern unsigned long apbt_quick_calibrate(void);
55extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); 55extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
56extern void apbt_setup_secondary_clock(void); 56extern void apbt_setup_secondary_clock(void);
57extern unsigned int boot_cpu_id; 57extern unsigned int boot_cpu_id;
58extern int disable_apbt_percpu;
59 58
60extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); 59extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
61extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr); 60extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index 8859e12dd3cf..284a6e8f7ce1 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -11,38 +11,42 @@
11extern void __xchg_wrong_size(void); 11extern void __xchg_wrong_size(void);
12 12
13/* 13/*
14 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway 14 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
15 * Note 2: xchg has side effect, so that attribute volatile is necessary, 15 * Since this is generally used to protect other memory information, we
16 * but generally the primitive is invalid, *ptr is output argument. --ANK 16 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
17 * information around.
17 */ 18 */
18
19struct __xchg_dummy {
20 unsigned long a[100];
21};
22#define __xg(x) ((struct __xchg_dummy *)(x))
23
24#define __xchg(x, ptr, size) \ 19#define __xchg(x, ptr, size) \
25({ \ 20({ \
26 __typeof(*(ptr)) __x = (x); \ 21 __typeof(*(ptr)) __x = (x); \
27 switch (size) { \ 22 switch (size) { \
28 case 1: \ 23 case 1: \
29 asm volatile("xchgb %b0,%1" \ 24 { \
30 : "=q" (__x) \ 25 volatile u8 *__ptr = (volatile u8 *)(ptr); \
31 : "m" (*__xg(ptr)), "0" (__x) \ 26 asm volatile("xchgb %0,%1" \
27 : "=q" (__x), "+m" (*__ptr) \
28 : "0" (__x) \
32 : "memory"); \ 29 : "memory"); \
33 break; \ 30 break; \
31 } \
34 case 2: \ 32 case 2: \
35 asm volatile("xchgw %w0,%1" \ 33 { \
36 : "=r" (__x) \ 34 volatile u16 *__ptr = (volatile u16 *)(ptr); \
37 : "m" (*__xg(ptr)), "0" (__x) \ 35 asm volatile("xchgw %0,%1" \
36 : "=r" (__x), "+m" (*__ptr) \
37 : "0" (__x) \
38 : "memory"); \ 38 : "memory"); \
39 break; \ 39 break; \
40 } \
40 case 4: \ 41 case 4: \
42 { \
43 volatile u32 *__ptr = (volatile u32 *)(ptr); \
41 asm volatile("xchgl %0,%1" \ 44 asm volatile("xchgl %0,%1" \
42 : "=r" (__x) \ 45 : "=r" (__x), "+m" (*__ptr) \
43 : "m" (*__xg(ptr)), "0" (__x) \ 46 : "0" (__x) \
44 : "memory"); \ 47 : "memory"); \
45 break; \ 48 break; \
49 } \
46 default: \ 50 default: \
47 __xchg_wrong_size(); \ 51 __xchg_wrong_size(); \
48 } \ 52 } \
@@ -53,60 +57,33 @@ struct __xchg_dummy {
53 __xchg((v), (ptr), sizeof(*ptr)) 57 __xchg((v), (ptr), sizeof(*ptr))
54 58
55/* 59/*
56 * The semantics of XCHGCMP8B are a bit strange, this is why 60 * CMPXCHG8B only writes to the target if we had the previous
57 * there is a loop and the loading of %%eax and %%edx has to 61 * value in registers, otherwise it acts as a read and gives us the
58 * be inside. This inlines well in most cases, the cached 62 * "new previous" value. That is why there is a loop. Preloading
59 * cost is around ~38 cycles. (in the future we might want 63 * EDX:EAX is a performance optimization: in the common case it means
60 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that 64 * we need only one locked operation.
61 * might have an implicit FPU-save as a cost, so it's not
62 * clear which path to go.)
63 * 65 *
64 * cmpxchg8b must be used with the lock prefix here to allow 66 * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
65 * the instruction to be executed atomically, see page 3-102 67 * least an FPU save and/or %cr0.ts manipulation.
66 * of the instruction set reference 24319102.pdf. We need 68 *
67 * the reader side to see the coherent 64bit value. 69 * cmpxchg8b must be used with the lock prefix here to allow the
70 * instruction to be executed atomically. We need to have the reader
71 * side to see the coherent 64bit value.
68 */ 72 */
69static inline void __set_64bit(unsigned long long *ptr, 73static inline void set_64bit(volatile u64 *ptr, u64 value)
70 unsigned int low, unsigned int high)
71{ 74{
75 u32 low = value;
76 u32 high = value >> 32;
77 u64 prev = *ptr;
78
72 asm volatile("\n1:\t" 79 asm volatile("\n1:\t"
73 "movl (%0), %%eax\n\t" 80 LOCK_PREFIX "cmpxchg8b %0\n\t"
74 "movl 4(%0), %%edx\n\t"
75 LOCK_PREFIX "cmpxchg8b (%0)\n\t"
76 "jnz 1b" 81 "jnz 1b"
77 : /* no outputs */ 82 : "=m" (*ptr), "+A" (prev)
78 : "D"(ptr), 83 : "b" (low), "c" (high)
79 "b"(low), 84 : "memory");
80 "c"(high)
81 : "ax", "dx", "memory");
82}
83
84static inline void __set_64bit_constant(unsigned long long *ptr,
85 unsigned long long value)
86{
87 __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
88}
89
90#define ll_low(x) *(((unsigned int *)&(x)) + 0)
91#define ll_high(x) *(((unsigned int *)&(x)) + 1)
92
93static inline void __set_64bit_var(unsigned long long *ptr,
94 unsigned long long value)
95{
96 __set_64bit(ptr, ll_low(value), ll_high(value));
97} 85}
98 86
99#define set_64bit(ptr, value) \
100 (__builtin_constant_p((value)) \
101 ? __set_64bit_constant((ptr), (value)) \
102 : __set_64bit_var((ptr), (value)))
103
104#define _set_64bit(ptr, value) \
105 (__builtin_constant_p(value) \
106 ? __set_64bit(ptr, (unsigned int)(value), \
107 (unsigned int)((value) >> 32)) \
108 : __set_64bit(ptr, ll_low((value)), ll_high((value))))
109
110extern void __cmpxchg_wrong_size(void); 87extern void __cmpxchg_wrong_size(void);
111 88
112/* 89/*
@@ -121,23 +98,32 @@ extern void __cmpxchg_wrong_size(void);
121 __typeof__(*(ptr)) __new = (new); \ 98 __typeof__(*(ptr)) __new = (new); \
122 switch (size) { \ 99 switch (size) { \
123 case 1: \ 100 case 1: \
124 asm volatile(lock "cmpxchgb %b1,%2" \ 101 { \
125 : "=a"(__ret) \ 102 volatile u8 *__ptr = (volatile u8 *)(ptr); \
126 : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ 103 asm volatile(lock "cmpxchgb %2,%1" \
104 : "=a" (__ret), "+m" (*__ptr) \
105 : "q" (__new), "0" (__old) \
127 : "memory"); \ 106 : "memory"); \
128 break; \ 107 break; \
108 } \
129 case 2: \ 109 case 2: \
130 asm volatile(lock "cmpxchgw %w1,%2" \ 110 { \
131 : "=a"(__ret) \ 111 volatile u16 *__ptr = (volatile u16 *)(ptr); \
132 : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ 112 asm volatile(lock "cmpxchgw %2,%1" \
113 : "=a" (__ret), "+m" (*__ptr) \
114 : "r" (__new), "0" (__old) \
133 : "memory"); \ 115 : "memory"); \
134 break; \ 116 break; \
117 } \
135 case 4: \ 118 case 4: \
136 asm volatile(lock "cmpxchgl %1,%2" \ 119 { \
137 : "=a"(__ret) \ 120 volatile u32 *__ptr = (volatile u32 *)(ptr); \
138 : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ 121 asm volatile(lock "cmpxchgl %2,%1" \
122 : "=a" (__ret), "+m" (*__ptr) \
123 : "r" (__new), "0" (__old) \
139 : "memory"); \ 124 : "memory"); \
140 break; \ 125 break; \
126 } \
141 default: \ 127 default: \
142 __cmpxchg_wrong_size(); \ 128 __cmpxchg_wrong_size(); \
143 } \ 129 } \
@@ -175,32 +161,28 @@ extern void __cmpxchg_wrong_size(void);
175 (unsigned long long)(n))) 161 (unsigned long long)(n)))
176#endif 162#endif
177 163
178static inline unsigned long long __cmpxchg64(volatile void *ptr, 164static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
179 unsigned long long old,
180 unsigned long long new)
181{ 165{
182 unsigned long long prev; 166 u64 prev;
183 asm volatile(LOCK_PREFIX "cmpxchg8b %3" 167 asm volatile(LOCK_PREFIX "cmpxchg8b %1"
184 : "=A"(prev) 168 : "=A" (prev),
185 : "b"((unsigned long)new), 169 "+m" (*ptr)
186 "c"((unsigned long)(new >> 32)), 170 : "b" ((u32)new),
187 "m"(*__xg(ptr)), 171 "c" ((u32)(new >> 32)),
188 "0"(old) 172 "0" (old)
189 : "memory"); 173 : "memory");
190 return prev; 174 return prev;
191} 175}
192 176
193static inline unsigned long long __cmpxchg64_local(volatile void *ptr, 177static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
194 unsigned long long old,
195 unsigned long long new)
196{ 178{
197 unsigned long long prev; 179 u64 prev;
198 asm volatile("cmpxchg8b %3" 180 asm volatile("cmpxchg8b %1"
199 : "=A"(prev) 181 : "=A" (prev),
200 : "b"((unsigned long)new), 182 "+m" (*ptr)
201 "c"((unsigned long)(new >> 32)), 183 : "b" ((u32)new),
202 "m"(*__xg(ptr)), 184 "c" ((u32)(new >> 32)),
203 "0"(old) 185 "0" (old)
204 : "memory"); 186 : "memory");
205 return prev; 187 return prev;
206} 188}
@@ -264,8 +246,6 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
264 * to simulate the cmpxchg8b on the 80386 and 80486 CPU. 246 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
265 */ 247 */
266 248
267extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
268
269#define cmpxchg64(ptr, o, n) \ 249#define cmpxchg64(ptr, o, n) \
270({ \ 250({ \
271 __typeof__(*(ptr)) __ret; \ 251 __typeof__(*(ptr)) __ret; \
@@ -283,20 +263,20 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
283 __ret; }) 263 __ret; })
284 264
285 265
286 266#define cmpxchg64_local(ptr, o, n) \
287#define cmpxchg64_local(ptr, o, n) \ 267({ \
288({ \ 268 __typeof__(*(ptr)) __ret; \
289 __typeof__(*(ptr)) __ret; \ 269 __typeof__(*(ptr)) __old = (o); \
290 if (likely(boot_cpu_data.x86 > 4)) \ 270 __typeof__(*(ptr)) __new = (n); \
291 __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \ 271 alternative_io("call cmpxchg8b_emu", \
292 (unsigned long long)(o), \ 272 "cmpxchg8b (%%esi)" , \
293 (unsigned long long)(n)); \ 273 X86_FEATURE_CX8, \
294 else \ 274 "=A" (__ret), \
295 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \ 275 "S" ((ptr)), "0" (__old), \
296 (unsigned long long)(o), \ 276 "b" ((unsigned int)__new), \
297 (unsigned long long)(n)); \ 277 "c" ((unsigned int)(__new>>32)) \
298 __ret; \ 278 : "memory"); \
299}) 279 __ret; })
300 280
301#endif 281#endif
302 282
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 485ae415faec..423ae58aa020 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -3,51 +3,60 @@
3 3
4#include <asm/alternative.h> /* Provides LOCK_PREFIX */ 4#include <asm/alternative.h> /* Provides LOCK_PREFIX */
5 5
6#define __xg(x) ((volatile long *)(x)) 6static inline void set_64bit(volatile u64 *ptr, u64 val)
7
8static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
9{ 7{
10 *ptr = val; 8 *ptr = val;
11} 9}
12 10
13#define _set_64bit set_64bit
14
15extern void __xchg_wrong_size(void); 11extern void __xchg_wrong_size(void);
16extern void __cmpxchg_wrong_size(void); 12extern void __cmpxchg_wrong_size(void);
17 13
18/* 14/*
19 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway 15 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
20 * Note 2: xchg has side effect, so that attribute volatile is necessary, 16 * Since this is generally used to protect other memory information, we
21 * but generally the primitive is invalid, *ptr is output argument. --ANK 17 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
18 * information around.
22 */ 19 */
23#define __xchg(x, ptr, size) \ 20#define __xchg(x, ptr, size) \
24({ \ 21({ \
25 __typeof(*(ptr)) __x = (x); \ 22 __typeof(*(ptr)) __x = (x); \
26 switch (size) { \ 23 switch (size) { \
27 case 1: \ 24 case 1: \
28 asm volatile("xchgb %b0,%1" \ 25 { \
29 : "=q" (__x) \ 26 volatile u8 *__ptr = (volatile u8 *)(ptr); \
30 : "m" (*__xg(ptr)), "0" (__x) \ 27 asm volatile("xchgb %0,%1" \
28 : "=q" (__x), "+m" (*__ptr) \
29 : "0" (__x) \
31 : "memory"); \ 30 : "memory"); \
32 break; \ 31 break; \
32 } \
33 case 2: \ 33 case 2: \
34 asm volatile("xchgw %w0,%1" \ 34 { \
35 : "=r" (__x) \ 35 volatile u16 *__ptr = (volatile u16 *)(ptr); \
36 : "m" (*__xg(ptr)), "0" (__x) \ 36 asm volatile("xchgw %0,%1" \
37 : "=r" (__x), "+m" (*__ptr) \
38 : "0" (__x) \
37 : "memory"); \ 39 : "memory"); \
38 break; \ 40 break; \
41 } \
39 case 4: \ 42 case 4: \
40 asm volatile("xchgl %k0,%1" \ 43 { \
41 : "=r" (__x) \ 44 volatile u32 *__ptr = (volatile u32 *)(ptr); \
42 : "m" (*__xg(ptr)), "0" (__x) \ 45 asm volatile("xchgl %0,%1" \
46 : "=r" (__x), "+m" (*__ptr) \
47 : "0" (__x) \
43 : "memory"); \ 48 : "memory"); \
44 break; \ 49 break; \
50 } \
45 case 8: \ 51 case 8: \
52 { \
53 volatile u64 *__ptr = (volatile u64 *)(ptr); \
46 asm volatile("xchgq %0,%1" \ 54 asm volatile("xchgq %0,%1" \
47 : "=r" (__x) \ 55 : "=r" (__x), "+m" (*__ptr) \
48 : "m" (*__xg(ptr)), "0" (__x) \ 56 : "0" (__x) \
49 : "memory"); \ 57 : "memory"); \
50 break; \ 58 break; \
59 } \
51 default: \ 60 default: \
52 __xchg_wrong_size(); \ 61 __xchg_wrong_size(); \
53 } \ 62 } \
@@ -71,29 +80,41 @@ extern void __cmpxchg_wrong_size(void);
71 __typeof__(*(ptr)) __new = (new); \ 80 __typeof__(*(ptr)) __new = (new); \
72 switch (size) { \ 81 switch (size) { \
73 case 1: \ 82 case 1: \
74 asm volatile(lock "cmpxchgb %b1,%2" \ 83 { \
75 : "=a"(__ret) \ 84 volatile u8 *__ptr = (volatile u8 *)(ptr); \
76 : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ 85 asm volatile(lock "cmpxchgb %2,%1" \
86 : "=a" (__ret), "+m" (*__ptr) \
87 : "q" (__new), "0" (__old) \
77 : "memory"); \ 88 : "memory"); \
78 break; \ 89 break; \
90 } \
79 case 2: \ 91 case 2: \
80 asm volatile(lock "cmpxchgw %w1,%2" \ 92 { \
81 : "=a"(__ret) \ 93 volatile u16 *__ptr = (volatile u16 *)(ptr); \
82 : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ 94 asm volatile(lock "cmpxchgw %2,%1" \
95 : "=a" (__ret), "+m" (*__ptr) \
96 : "r" (__new), "0" (__old) \
83 : "memory"); \ 97 : "memory"); \
84 break; \ 98 break; \
99 } \
85 case 4: \ 100 case 4: \
86 asm volatile(lock "cmpxchgl %k1,%2" \ 101 { \
87 : "=a"(__ret) \ 102 volatile u32 *__ptr = (volatile u32 *)(ptr); \
88 : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ 103 asm volatile(lock "cmpxchgl %2,%1" \
104 : "=a" (__ret), "+m" (*__ptr) \
105 : "r" (__new), "0" (__old) \
89 : "memory"); \ 106 : "memory"); \
90 break; \ 107 break; \
108 } \
91 case 8: \ 109 case 8: \
92 asm volatile(lock "cmpxchgq %1,%2" \ 110 { \
93 : "=a"(__ret) \ 111 volatile u64 *__ptr = (volatile u64 *)(ptr); \
94 : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ 112 asm volatile(lock "cmpxchgq %2,%1" \
113 : "=a" (__ret), "+m" (*__ptr) \
114 : "r" (__new), "0" (__old) \
95 : "memory"); \ 115 : "memory"); \
96 break; \ 116 break; \
117 } \
97 default: \ 118 default: \
98 __cmpxchg_wrong_size(); \ 119 __cmpxchg_wrong_size(); \
99 } \ 120 } \
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index e8b88967de35..781a50b29a49 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -6,7 +6,7 @@
6 6
7#include <asm/required-features.h> 7#include <asm/required-features.h>
8 8
9#define NCAPINTS 9 /* N 32-bit words worth of info */ 9#define NCAPINTS 10 /* N 32-bit words worth of info */
10 10
11/* 11/*
12 * Note: If the comment begins with a quoted string, that string is used 12 * Note: If the comment begins with a quoted string, that string is used
@@ -89,7 +89,7 @@
89#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ 89#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
90#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ 90#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
91#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ 91#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
92#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ 92 /* 21 available, was AMD_C1E */
93#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ 93#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */
94#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ 94#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
95#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ 95#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
@@ -124,6 +124,8 @@
124#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ 124#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
125#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ 125#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
126#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ 126#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
127#define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */
128#define X86_FEATURE_RDRND (4*32+30) /* The RDRAND instruction */
127#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */ 129#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
128 130
129/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 131/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
@@ -157,22 +159,29 @@
157 159
158/* 160/*
159 * Auxiliary flags: Linux defined - For features scattered in various 161 * Auxiliary flags: Linux defined - For features scattered in various
160 * CPUID levels like 0x6, 0xA etc 162 * CPUID levels like 0x6, 0xA etc, word 7
161 */ 163 */
162#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ 164#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
163#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */ 165#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */
164#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */ 166#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */
167#define X86_FEATURE_EPB (7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
168#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
169#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
170#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
165 171
166/* Virtualization flags: Linux defined */ 172/* Virtualization flags: Linux defined, word 8 */
167#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ 173#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
168#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */ 174#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */
169#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */ 175#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */
170#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */ 176#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */
171#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */ 177#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */
172#define X86_FEATURE_NPT (8*32+5) /* AMD Nested Page Table support */ 178#define X86_FEATURE_NPT (8*32+ 5) /* AMD Nested Page Table support */
173#define X86_FEATURE_LBRV (8*32+6) /* AMD LBR Virtualization support */ 179#define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */
174#define X86_FEATURE_SVML (8*32+7) /* "svm_lock" AMD SVM locking MSR */ 180#define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
175#define X86_FEATURE_NRIPS (8*32+8) /* "nrip_save" AMD SVM next_rip save */ 181#define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
182
183/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
184#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
176 185
177#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 186#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
178 187
@@ -194,7 +203,9 @@ extern const char * const x86_power_flags[32];
194 (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ 203 (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
195 (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ 204 (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
196 (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ 205 (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
197 (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ 206 (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \
207 (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
208 (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) ) \
198 ? 1 : \ 209 ? 1 : \
199 test_cpu_cap(c, bit)) 210 test_cpu_cap(c, bit))
200 211
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index 942255310e6a..528a11e8d3e3 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -20,10 +20,10 @@ struct arch_hw_breakpoint {
20#include <linux/list.h> 20#include <linux/list.h>
21 21
22/* Available HW breakpoint length encodings */ 22/* Available HW breakpoint length encodings */
23#define X86_BREAKPOINT_LEN_X 0x00
23#define X86_BREAKPOINT_LEN_1 0x40 24#define X86_BREAKPOINT_LEN_1 0x40
24#define X86_BREAKPOINT_LEN_2 0x44 25#define X86_BREAKPOINT_LEN_2 0x44
25#define X86_BREAKPOINT_LEN_4 0x4c 26#define X86_BREAKPOINT_LEN_4 0x4c
26#define X86_BREAKPOINT_LEN_EXECUTE 0x40
27 27
28#ifdef CONFIG_X86_64 28#ifdef CONFIG_X86_64
29#define X86_BREAKPOINT_LEN_8 0x48 29#define X86_BREAKPOINT_LEN_8 0x48
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 70abda7058c8..ff2546ce7178 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -45,5 +45,6 @@ extern const struct hypervisor_x86 *x86_hyper;
45/* Recognized hypervisors */ 45/* Recognized hypervisors */
46extern const struct hypervisor_x86 x86_hyper_vmware; 46extern const struct hypervisor_x86 x86_hyper_vmware;
47extern const struct hypervisor_x86 x86_hyper_ms_hyperv; 47extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
48extern const struct hypervisor_x86 x86_hyper_xen_hvm;
48 49
49#endif 50#endif
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index c991b3a7b904..f1accc625beb 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -127,6 +127,15 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
127{ 127{
128 int err; 128 int err;
129 129
130 /*
131 * Clear the bytes not touched by the fxsave and reserved
132 * for the SW usage.
133 */
134 err = __clear_user(&fx->sw_reserved,
135 sizeof(struct _fpx_sw_bytes));
136 if (unlikely(err))
137 return -EFAULT;
138
130 asm volatile("1: rex64/fxsave (%[fx])\n\t" 139 asm volatile("1: rex64/fxsave (%[fx])\n\t"
131 "2:\n" 140 "2:\n"
132 ".section .fixup,\"ax\"\n" 141 ".section .fixup,\"ax\"\n"
@@ -482,6 +491,8 @@ static inline void fpu_copy(struct fpu *dst, struct fpu *src)
482 memcpy(dst->state, src->state, xstate_size); 491 memcpy(dst->state, src->state, xstate_size);
483} 492}
484 493
494extern void fpu_finit(struct fpu *fpu);
495
485#endif /* __ASSEMBLY__ */ 496#endif /* __ASSEMBLY__ */
486 497
487#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5 498#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h
index 4470c9ad4a3e..29f66793cc55 100644
--- a/arch/x86/include/asm/intel_scu_ipc.h
+++ b/arch/x86/include/asm/intel_scu_ipc.h
@@ -1,6 +1,12 @@
1#ifndef _ASM_X86_INTEL_SCU_IPC_H_ 1#ifndef _ASM_X86_INTEL_SCU_IPC_H_
2#define _ASM_X86_INTEL_SCU_IPC_H_ 2#define _ASM_X86_INTEL_SCU_IPC_H_
3 3
4#define IPCMSG_VRTC 0xFA /* Set vRTC device */
5
6/* Command id associated with message IPCMSG_VRTC */
7#define IPC_CMD_VRTC_SETTIME 1 /* Set time */
8#define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
9
4/* Read single register */ 10/* Read single register */
5int intel_scu_ipc_ioread8(u16 addr, u8 *data); 11int intel_scu_ipc_ioread8(u16 addr, u8 *data);
6 12
@@ -28,20 +34,6 @@ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len);
28/* Update single register based on the mask */ 34/* Update single register based on the mask */
29int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask); 35int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask);
30 36
31/*
32 * Indirect register read
33 * Can be used when SCCB(System Controller Configuration Block) register
34 * HRIM(Honor Restricted IPC Messages) is set (bit 23)
35 */
36int intel_scu_ipc_register_read(u32 addr, u32 *data);
37
38/*
39 * Indirect register write
40 * Can be used when SCCB(System Controller Configuration Block) register
41 * HRIM(Honor Restricted IPC Messages) is set (bit 23)
42 */
43int intel_scu_ipc_register_write(u32 addr, u32 data);
44
45/* Issue commands to the SCU with or without data */ 37/* Issue commands to the SCU with or without data */
46int intel_scu_ipc_simple_command(int cmd, int sub); 38int intel_scu_ipc_simple_command(int cmd, int sub);
47int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, 39int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 8767d99c4f64..e2ca30092557 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -125,6 +125,9 @@
125 */ 125 */
126#define MCE_SELF_VECTOR 0xeb 126#define MCE_SELF_VECTOR 0xeb
127 127
128/* Xen vector callback to receive events in a HVM domain */
129#define XEN_HVM_EVTCHN_CALLBACK 0xe9
130
128#define NR_VECTORS 256 131#define NR_VECTORS 256
129 132
130#define FPU_IRQ 13 133#define FPU_IRQ 13
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
index 006da3687cdc..396f5b5fc4d7 100644
--- a/arch/x86/include/asm/kgdb.h
+++ b/arch/x86/include/asm/kgdb.h
@@ -39,9 +39,11 @@ enum regnames {
39 GDB_FS, /* 14 */ 39 GDB_FS, /* 14 */
40 GDB_GS, /* 15 */ 40 GDB_GS, /* 15 */
41}; 41};
42#define GDB_ORIG_AX 41
43#define DBG_MAX_REG_NUM 16
42#define NUMREGBYTES ((GDB_GS+1)*4) 44#define NUMREGBYTES ((GDB_GS+1)*4)
43#else /* ! CONFIG_X86_32 */ 45#else /* ! CONFIG_X86_32 */
44enum regnames64 { 46enum regnames {
45 GDB_AX, /* 0 */ 47 GDB_AX, /* 0 */
46 GDB_BX, /* 1 */ 48 GDB_BX, /* 1 */
47 GDB_CX, /* 2 */ 49 GDB_CX, /* 2 */
@@ -59,15 +61,15 @@ enum regnames64 {
59 GDB_R14, /* 14 */ 61 GDB_R14, /* 14 */
60 GDB_R15, /* 15 */ 62 GDB_R15, /* 15 */
61 GDB_PC, /* 16 */ 63 GDB_PC, /* 16 */
64 GDB_PS, /* 17 */
65 GDB_CS, /* 18 */
66 GDB_SS, /* 19 */
62}; 67};
63 68#define GDB_ORIG_AX 57
64enum regnames32 { 69#define DBG_MAX_REG_NUM 20
65 GDB_PS = 34, 70/* 17 64 bit regs and 3 32 bit regs */
66 GDB_CS, 71#define NUMREGBYTES ((17 * 8) + (3 * 4))
67 GDB_SS, 72#endif /* ! CONFIG_X86_32 */
68};
69#define NUMREGBYTES ((GDB_SS+1)*4)
70#endif /* CONFIG_X86_32 */
71 73
72static inline void arch_kgdb_breakpoint(void) 74static inline void arch_kgdb_breakpoint(void)
73{ 75{
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index ff90055c7f0b..4d8dcbdfc120 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -22,6 +22,8 @@
22#define __KVM_HAVE_XEN_HVM 22#define __KVM_HAVE_XEN_HVM
23#define __KVM_HAVE_VCPU_EVENTS 23#define __KVM_HAVE_VCPU_EVENTS
24#define __KVM_HAVE_DEBUGREGS 24#define __KVM_HAVE_DEBUGREGS
25#define __KVM_HAVE_XSAVE
26#define __KVM_HAVE_XCRS
25 27
26/* Architectural interrupt line count. */ 28/* Architectural interrupt line count. */
27#define KVM_NR_INTERRUPTS 256 29#define KVM_NR_INTERRUPTS 256
@@ -299,4 +301,24 @@ struct kvm_debugregs {
299 __u64 reserved[9]; 301 __u64 reserved[9];
300}; 302};
301 303
304/* for KVM_CAP_XSAVE */
305struct kvm_xsave {
306 __u32 region[1024];
307};
308
309#define KVM_MAX_XCRS 16
310
311struct kvm_xcr {
312 __u32 xcr;
313 __u32 reserved;
314 __u64 value;
315};
316
317struct kvm_xcrs {
318 __u32 nr_xcrs;
319 __u32 flags;
320 struct kvm_xcr xcrs[KVM_MAX_XCRS];
321 __u64 padding[16];
322};
323
302#endif /* _ASM_X86_KVM_H */ 324#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 0b2729bf2070..51cfd730ac5d 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -51,8 +51,10 @@ struct x86_emulate_ctxt;
51#define X86EMUL_UNHANDLEABLE 1 51#define X86EMUL_UNHANDLEABLE 1
52/* Terminate emulation but return success to the caller. */ 52/* Terminate emulation but return success to the caller. */
53#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */ 53#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
54#define X86EMUL_RETRY_INSTR 2 /* retry the instruction for some reason */ 54#define X86EMUL_RETRY_INSTR 3 /* retry the instruction for some reason */
55#define X86EMUL_CMPXCHG_FAILED 2 /* cmpxchg did not see expected value */ 55#define X86EMUL_CMPXCHG_FAILED 4 /* cmpxchg did not see expected value */
56#define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */
57
56struct x86_emulate_ops { 58struct x86_emulate_ops {
57 /* 59 /*
58 * read_std: Read bytes of standard (non-emulated/special) memory. 60 * read_std: Read bytes of standard (non-emulated/special) memory.
@@ -92,6 +94,7 @@ struct x86_emulate_ops {
92 int (*read_emulated)(unsigned long addr, 94 int (*read_emulated)(unsigned long addr,
93 void *val, 95 void *val,
94 unsigned int bytes, 96 unsigned int bytes,
97 unsigned int *error,
95 struct kvm_vcpu *vcpu); 98 struct kvm_vcpu *vcpu);
96 99
97 /* 100 /*
@@ -104,6 +107,7 @@ struct x86_emulate_ops {
104 int (*write_emulated)(unsigned long addr, 107 int (*write_emulated)(unsigned long addr,
105 const void *val, 108 const void *val,
106 unsigned int bytes, 109 unsigned int bytes,
110 unsigned int *error,
107 struct kvm_vcpu *vcpu); 111 struct kvm_vcpu *vcpu);
108 112
109 /* 113 /*
@@ -118,6 +122,7 @@ struct x86_emulate_ops {
118 const void *old, 122 const void *old,
119 const void *new, 123 const void *new,
120 unsigned int bytes, 124 unsigned int bytes,
125 unsigned int *error,
121 struct kvm_vcpu *vcpu); 126 struct kvm_vcpu *vcpu);
122 127
123 int (*pio_in_emulated)(int size, unsigned short port, void *val, 128 int (*pio_in_emulated)(int size, unsigned short port, void *val,
@@ -132,18 +137,26 @@ struct x86_emulate_ops {
132 int seg, struct kvm_vcpu *vcpu); 137 int seg, struct kvm_vcpu *vcpu);
133 u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu); 138 u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu);
134 void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu); 139 void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu);
140 unsigned long (*get_cached_segment_base)(int seg, struct kvm_vcpu *vcpu);
135 void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu); 141 void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
136 ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu); 142 ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu);
137 void (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu); 143 int (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu);
138 int (*cpl)(struct kvm_vcpu *vcpu); 144 int (*cpl)(struct kvm_vcpu *vcpu);
139 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 145 int (*get_dr)(int dr, unsigned long *dest, struct kvm_vcpu *vcpu);
146 int (*set_dr)(int dr, unsigned long value, struct kvm_vcpu *vcpu);
147 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
148 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
140}; 149};
141 150
142/* Type, address-of, and value of an instruction's operand. */ 151/* Type, address-of, and value of an instruction's operand. */
143struct operand { 152struct operand {
144 enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; 153 enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
145 unsigned int bytes; 154 unsigned int bytes;
146 unsigned long val, orig_val, *ptr; 155 unsigned long orig_val, *ptr;
156 union {
157 unsigned long val;
158 char valptr[sizeof(unsigned long) + 2];
159 };
147}; 160};
148 161
149struct fetch_cache { 162struct fetch_cache {
@@ -186,6 +199,7 @@ struct decode_cache {
186 unsigned long modrm_val; 199 unsigned long modrm_val;
187 struct fetch_cache fetch; 200 struct fetch_cache fetch;
188 struct read_cache io_read; 201 struct read_cache io_read;
202 struct read_cache mem_read;
189}; 203};
190 204
191struct x86_emulate_ctxt { 205struct x86_emulate_ctxt {
@@ -202,6 +216,12 @@ struct x86_emulate_ctxt {
202 int interruptibility; 216 int interruptibility;
203 217
204 bool restart; /* restart string instruction after writeback */ 218 bool restart; /* restart string instruction after writeback */
219
220 int exception; /* exception that happens during emulation or -1 */
221 u32 error_code; /* error code for exception */
222 bool error_code_valid;
223 unsigned long cr2; /* faulted address in case of #PF */
224
205 /* decode cache */ 225 /* decode cache */
206 struct decode_cache decode; 226 struct decode_cache decode;
207}; 227};
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 76f5483cffec..502e53f999cf 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -15,6 +15,7 @@
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/mmu_notifier.h> 16#include <linux/mmu_notifier.h>
17#include <linux/tracepoint.h> 17#include <linux/tracepoint.h>
18#include <linux/cpumask.h>
18 19
19#include <linux/kvm.h> 20#include <linux/kvm.h>
20#include <linux/kvm_para.h> 21#include <linux/kvm_para.h>
@@ -39,11 +40,14 @@
39 0xFFFFFF0000000000ULL) 40 0xFFFFFF0000000000ULL)
40 41
41#define INVALID_PAGE (~(hpa_t)0) 42#define INVALID_PAGE (~(hpa_t)0)
43#define VALID_PAGE(x) ((x) != INVALID_PAGE)
44
42#define UNMAPPED_GVA (~(gpa_t)0) 45#define UNMAPPED_GVA (~(gpa_t)0)
43 46
44/* KVM Hugepage definitions for x86 */ 47/* KVM Hugepage definitions for x86 */
45#define KVM_NR_PAGE_SIZES 3 48#define KVM_NR_PAGE_SIZES 3
46#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9)) 49#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
50#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
47#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) 51#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
48#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) 52#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
49#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) 53#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
@@ -69,8 +73,6 @@
69 73
70#define IOPL_SHIFT 12 74#define IOPL_SHIFT 12
71 75
72#define KVM_ALIAS_SLOTS 4
73
74#define KVM_PERMILLE_MMU_PAGES 20 76#define KVM_PERMILLE_MMU_PAGES 20
75#define KVM_MIN_ALLOC_MMU_PAGES 64 77#define KVM_MIN_ALLOC_MMU_PAGES 64
76#define KVM_MMU_HASH_SHIFT 10 78#define KVM_MMU_HASH_SHIFT 10
@@ -241,7 +243,7 @@ struct kvm_mmu {
241 void (*prefetch_page)(struct kvm_vcpu *vcpu, 243 void (*prefetch_page)(struct kvm_vcpu *vcpu,
242 struct kvm_mmu_page *page); 244 struct kvm_mmu_page *page);
243 int (*sync_page)(struct kvm_vcpu *vcpu, 245 int (*sync_page)(struct kvm_vcpu *vcpu,
244 struct kvm_mmu_page *sp); 246 struct kvm_mmu_page *sp, bool clear_unsync);
245 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 247 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
246 hpa_t root_hpa; 248 hpa_t root_hpa;
247 int root_level; 249 int root_level;
@@ -301,8 +303,8 @@ struct kvm_vcpu_arch {
301 unsigned long mmu_seq; 303 unsigned long mmu_seq;
302 } update_pte; 304 } update_pte;
303 305
304 struct i387_fxsave_struct host_fx_image; 306 struct fpu guest_fpu;
305 struct i387_fxsave_struct guest_fx_image; 307 u64 xcr0;
306 308
307 gva_t mmio_fault_cr2; 309 gva_t mmio_fault_cr2;
308 struct kvm_pio_request pio; 310 struct kvm_pio_request pio;
@@ -360,26 +362,11 @@ struct kvm_vcpu_arch {
360 362
361 /* fields used by HYPER-V emulation */ 363 /* fields used by HYPER-V emulation */
362 u64 hv_vapic; 364 u64 hv_vapic;
363};
364
365struct kvm_mem_alias {
366 gfn_t base_gfn;
367 unsigned long npages;
368 gfn_t target_gfn;
369#define KVM_ALIAS_INVALID 1UL
370 unsigned long flags;
371};
372 365
373#define KVM_ARCH_HAS_UNALIAS_INSTANTIATION 366 cpumask_var_t wbinvd_dirty_mask;
374
375struct kvm_mem_aliases {
376 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
377 int naliases;
378}; 367};
379 368
380struct kvm_arch { 369struct kvm_arch {
381 struct kvm_mem_aliases *aliases;
382
383 unsigned int n_free_mmu_pages; 370 unsigned int n_free_mmu_pages;
384 unsigned int n_requested_mmu_pages; 371 unsigned int n_requested_mmu_pages;
385 unsigned int n_alloc_mmu_pages; 372 unsigned int n_alloc_mmu_pages;
@@ -533,6 +520,8 @@ struct kvm_x86_ops {
533 520
534 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); 521 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
535 522
523 bool (*has_wbinvd_exit)(void);
524
536 const struct trace_print_flags *exit_reasons_str; 525 const struct trace_print_flags *exit_reasons_str;
537}; 526};
538 527
@@ -576,7 +565,6 @@ enum emulation_result {
576#define EMULTYPE_SKIP (1 << 2) 565#define EMULTYPE_SKIP (1 << 2)
577int emulate_instruction(struct kvm_vcpu *vcpu, 566int emulate_instruction(struct kvm_vcpu *vcpu,
578 unsigned long cr2, u16 error_code, int emulation_type); 567 unsigned long cr2, u16 error_code, int emulation_type);
579void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
580void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 568void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
581void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 569void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
582 570
@@ -591,10 +579,7 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
591int kvm_emulate_halt(struct kvm_vcpu *vcpu); 579int kvm_emulate_halt(struct kvm_vcpu *vcpu);
592int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); 580int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
593int emulate_clts(struct kvm_vcpu *vcpu); 581int emulate_clts(struct kvm_vcpu *vcpu);
594int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, 582int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
595 unsigned long *dest);
596int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
597 unsigned long value);
598 583
599void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 584void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
600int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); 585int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
@@ -602,15 +587,16 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
602int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, 587int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
603 bool has_error_code, u32 error_code); 588 bool has_error_code, u32 error_code);
604 589
605void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 590int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
606void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 591int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
607void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 592int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
608void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); 593void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
609int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); 594int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
610int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); 595int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
611unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); 596unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
612void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); 597void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
613void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); 598void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
599int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
614 600
615int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 601int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
616int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); 602int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
@@ -630,12 +616,7 @@ int kvm_pic_set_irq(void *opaque, int irq, int level);
630 616
631void kvm_inject_nmi(struct kvm_vcpu *vcpu); 617void kvm_inject_nmi(struct kvm_vcpu *vcpu);
632 618
633void fx_init(struct kvm_vcpu *vcpu); 619int fx_init(struct kvm_vcpu *vcpu);
634
635int emulator_write_emulated(unsigned long addr,
636 const void *val,
637 unsigned int bytes,
638 struct kvm_vcpu *vcpu);
639 620
640void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); 621void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
641void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 622void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -664,8 +645,6 @@ void kvm_disable_tdp(void);
664int complete_pio(struct kvm_vcpu *vcpu); 645int complete_pio(struct kvm_vcpu *vcpu);
665bool kvm_check_iopl(struct kvm_vcpu *vcpu); 646bool kvm_check_iopl(struct kvm_vcpu *vcpu);
666 647
667struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn);
668
669static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 648static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
670{ 649{
671 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 650 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
@@ -719,21 +698,6 @@ static inline unsigned long read_msr(unsigned long msr)
719} 698}
720#endif 699#endif
721 700
722static inline void kvm_fx_save(struct i387_fxsave_struct *image)
723{
724 asm("fxsave (%0)":: "r" (image));
725}
726
727static inline void kvm_fx_restore(struct i387_fxsave_struct *image)
728{
729 asm("fxrstor (%0)":: "r" (image));
730}
731
732static inline void kvm_fx_finit(void)
733{
734 asm("finit");
735}
736
737static inline u32 get_rdx_init_val(void) 701static inline u32 get_rdx_init_val(void)
738{ 702{
739 return 0x600; /* P6 family */ 703 return 0x600; /* P6 family */
diff --git a/arch/x86/include/asm/local64.h b/arch/x86/include/asm/local64.h
new file mode 100644
index 000000000000..36c93b5cc239
--- /dev/null
+++ b/arch/x86/include/asm/local64.h
@@ -0,0 +1 @@
#include <asm-generic/local64.h>
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
index 451d30e7f62d..16350740edf6 100644
--- a/arch/x86/include/asm/mrst.h
+++ b/arch/x86/include/asm/mrst.h
@@ -13,6 +13,32 @@
13extern int pci_mrst_init(void); 13extern int pci_mrst_init(void);
14int __init sfi_parse_mrtc(struct sfi_table_header *table); 14int __init sfi_parse_mrtc(struct sfi_table_header *table);
15 15
16/*
17 * Medfield is the follow-up of Moorestown, it combines two chip solution into
18 * one. Other than that it also added always-on and constant tsc and lapic
19 * timers. Medfield is the platform name, and the chip name is called Penwell
20 * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
21 * identified via MSRs.
22 */
23enum mrst_cpu_type {
24 MRST_CPU_CHIP_LINCROFT = 1,
25 MRST_CPU_CHIP_PENWELL,
26};
27
28extern enum mrst_cpu_type __mrst_cpu_chip;
29static enum mrst_cpu_type mrst_identify_cpu(void)
30{
31 return __mrst_cpu_chip;
32}
33
34enum mrst_timer_options {
35 MRST_TIMER_DEFAULT,
36 MRST_TIMER_APBT_ONLY,
37 MRST_TIMER_LAPIC_APBT,
38};
39
40extern enum mrst_timer_options mrst_timer_options;
41
16#define SFI_MTMR_MAX_NUM 8 42#define SFI_MTMR_MAX_NUM 8
17#define SFI_MRTC_MAX 8 43#define SFI_MRTC_MAX 8
18 44
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 8c7ae4318629..65bbec2093aa 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -20,6 +20,7 @@
20#define _EFER_LMA 10 /* Long mode active (read-only) */ 20#define _EFER_LMA 10 /* Long mode active (read-only) */
21#define _EFER_NX 11 /* No execute enable */ 21#define _EFER_NX 11 /* No execute enable */
22#define _EFER_SVME 12 /* Enable virtualization */ 22#define _EFER_SVME 12 /* Enable virtualization */
23#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
23#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */ 24#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
24 25
25#define EFER_SCE (1<<_EFER_SCE) 26#define EFER_SCE (1<<_EFER_SCE)
@@ -27,6 +28,7 @@
27#define EFER_LMA (1<<_EFER_LMA) 28#define EFER_LMA (1<<_EFER_LMA)
28#define EFER_NX (1<<_EFER_NX) 29#define EFER_NX (1<<_EFER_NX)
29#define EFER_SVME (1<<_EFER_SVME) 30#define EFER_SVME (1<<_EFER_SVME)
31#define EFER_LMSLE (1<<_EFER_LMSLE)
30#define EFER_FFXSR (1<<_EFER_FFXSR) 32#define EFER_FFXSR (1<<_EFER_FFXSR)
31 33
32/* Intel MSRs. Some also available on other CPUs */ 34/* Intel MSRs. Some also available on other CPUs */
@@ -159,8 +161,6 @@
159#define MSR_K7_FID_VID_STATUS 0xc0010042 161#define MSR_K7_FID_VID_STATUS 0xc0010042
160 162
161/* K6 MSRs */ 163/* K6 MSRs */
162#define MSR_K6_EFER 0xc0000080
163#define MSR_K6_STAR 0xc0000081
164#define MSR_K6_WHCR 0xc0000082 164#define MSR_K6_WHCR 0xc0000082
165#define MSR_K6_UWCCR 0xc0000085 165#define MSR_K6_UWCCR 0xc0000085
166#define MSR_K6_EPMR 0xc0000086 166#define MSR_K6_EPMR 0xc0000086
@@ -224,12 +224,14 @@
224#define MSR_IA32_THERM_CONTROL 0x0000019a 224#define MSR_IA32_THERM_CONTROL 0x0000019a
225#define MSR_IA32_THERM_INTERRUPT 0x0000019b 225#define MSR_IA32_THERM_INTERRUPT 0x0000019b
226 226
227#define THERM_INT_LOW_ENABLE (1 << 0) 227#define THERM_INT_HIGH_ENABLE (1 << 0)
228#define THERM_INT_HIGH_ENABLE (1 << 1) 228#define THERM_INT_LOW_ENABLE (1 << 1)
229#define THERM_INT_PLN_ENABLE (1 << 24)
229 230
230#define MSR_IA32_THERM_STATUS 0x0000019c 231#define MSR_IA32_THERM_STATUS 0x0000019c
231 232
232#define THERM_STATUS_PROCHOT (1 << 0) 233#define THERM_STATUS_PROCHOT (1 << 0)
234#define THERM_STATUS_POWER_LIMIT (1 << 10)
233 235
234#define MSR_THERM2_CTL 0x0000019d 236#define MSR_THERM2_CTL 0x0000019d
235 237
@@ -239,6 +241,19 @@
239 241
240#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 242#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
241 243
244#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
245
246#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1
247
248#define PACKAGE_THERM_STATUS_PROCHOT (1 << 0)
249#define PACKAGE_THERM_STATUS_POWER_LIMIT (1 << 10)
250
251#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x000001b2
252
253#define PACKAGE_THERM_INT_HIGH_ENABLE (1 << 0)
254#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1)
255#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24)
256
242/* MISC_ENABLE bits: architectural */ 257/* MISC_ENABLE bits: architectural */
243#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) 258#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0)
244#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) 259#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1)
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index c5bc4c2d33f5..084ef95274cd 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -148,8 +148,8 @@ static inline unsigned long long native_read_pmc(int counter)
148#define rdmsr(msr, val1, val2) \ 148#define rdmsr(msr, val1, val2) \
149do { \ 149do { \
150 u64 __val = native_read_msr((msr)); \ 150 u64 __val = native_read_msr((msr)); \
151 (val1) = (u32)__val; \ 151 (void)((val1) = (u32)__val); \
152 (val2) = (u32)(__val >> 32); \ 152 (void)((val2) = (u32)(__val >> 32)); \
153} while (0) 153} while (0)
154 154
155static inline void wrmsr(unsigned msr, unsigned low, unsigned high) 155static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 93da9c3f3341..932f0f86b4b7 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -17,7 +17,9 @@ int do_nmi_callback(struct pt_regs *regs, int cpu);
17 17
18extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); 18extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
19extern int check_nmi_watchdog(void); 19extern int check_nmi_watchdog(void);
20#if !defined(CONFIG_LOCKUP_DETECTOR)
20extern int nmi_watchdog_enabled; 21extern int nmi_watchdog_enabled;
22#endif
21extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); 23extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
22extern int reserve_perfctr_nmi(unsigned int); 24extern int reserve_perfctr_nmi(unsigned int);
23extern void release_perfctr_nmi(unsigned int); 25extern void release_perfctr_nmi(unsigned int);
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index cd2a31dc5fb8..49c7219826f9 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -30,6 +30,7 @@
30#define PCI_HAS_IO_ECS 0x40000 30#define PCI_HAS_IO_ECS 0x40000
31#define PCI_NOASSIGN_ROMS 0x80000 31#define PCI_NOASSIGN_ROMS 0x80000
32#define PCI_ROOT_NO_CRS 0x100000 32#define PCI_ROOT_NO_CRS 0x100000
33#define PCI_NOASSIGN_BARS 0x200000
33 34
34extern unsigned int pci_probe; 35extern unsigned int pci_probe;
35extern unsigned long pirq_table_addr; 36extern unsigned long pirq_table_addr;
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 254883d0c7e0..6e742cc4251b 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -68,8 +68,9 @@ union cpuid10_eax {
68 68
69union cpuid10_edx { 69union cpuid10_edx {
70 struct { 70 struct {
71 unsigned int num_counters_fixed:4; 71 unsigned int num_counters_fixed:5;
72 unsigned int reserved:28; 72 unsigned int bit_width_fixed:8;
73 unsigned int reserved:19;
73 } split; 74 } split;
74 unsigned int full; 75 unsigned int full;
75}; 76};
@@ -140,6 +141,19 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
140extern unsigned long perf_misc_flags(struct pt_regs *regs); 141extern unsigned long perf_misc_flags(struct pt_regs *regs);
141#define perf_misc_flags(regs) perf_misc_flags(regs) 142#define perf_misc_flags(regs) perf_misc_flags(regs)
142 143
144#include <asm/stacktrace.h>
145
146/*
147 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
148 * and the comment with PERF_EFLAGS_EXACT.
149 */
150#define perf_arch_fetch_caller_regs(regs, __ip) { \
151 (regs)->ip = (__ip); \
152 (regs)->bp = caller_frame_pointer(); \
153 (regs)->cs = __KERNEL_CS; \
154 regs->flags = 0; \
155}
156
143#else 157#else
144static inline void init_hw_perf_events(void) { } 158static inline void init_hw_perf_events(void) { }
145static inline void perf_events_lapic_init(void) { } 159static inline void perf_events_lapic_init(void) { }
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index 64a8ebff06fc..def500776b16 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -19,7 +19,6 @@
19#define ARCH_P4_RESERVED_ESCR (2) /* IQ_ESCR(0,1) not always present */ 19#define ARCH_P4_RESERVED_ESCR (2) /* IQ_ESCR(0,1) not always present */
20#define ARCH_P4_MAX_ESCR (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR) 20#define ARCH_P4_MAX_ESCR (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR)
21#define ARCH_P4_MAX_CCCR (18) 21#define ARCH_P4_MAX_CCCR (18)
22#define ARCH_P4_MAX_COUNTER (ARCH_P4_MAX_CCCR / 2)
23 22
24#define P4_ESCR_EVENT_MASK 0x7e000000U 23#define P4_ESCR_EVENT_MASK 0x7e000000U
25#define P4_ESCR_EVENT_SHIFT 25 24#define P4_ESCR_EVENT_SHIFT 25
@@ -71,10 +70,6 @@
71#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT) 70#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT)
72#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT) 71#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
73 72
74/* Custom bits in reerved CCCR area */
75#define P4_CCCR_CACHE_OPS_MASK 0x0000003fU
76
77
78/* Non HT mask */ 73/* Non HT mask */
79#define P4_CCCR_MASK \ 74#define P4_CCCR_MASK \
80 (P4_CCCR_OVF | \ 75 (P4_CCCR_OVF | \
@@ -106,8 +101,7 @@
106 * ESCR and CCCR but rather an only packed value should 101 * ESCR and CCCR but rather an only packed value should
107 * be unpacked and written to a proper addresses 102 * be unpacked and written to a proper addresses
108 * 103 *
109 * the base idea is to pack as much info as 104 * the base idea is to pack as much info as possible
110 * possible
111 */ 105 */
112#define p4_config_pack_escr(v) (((u64)(v)) << 32) 106#define p4_config_pack_escr(v) (((u64)(v)) << 32)
113#define p4_config_pack_cccr(v) (((u64)(v)) & 0xffffffffULL) 107#define p4_config_pack_cccr(v) (((u64)(v)) & 0xffffffffULL)
@@ -130,8 +124,6 @@
130 t; \ 124 t; \
131 }) 125 })
132 126
133#define p4_config_unpack_cache_event(v) (((u64)(v)) & P4_CCCR_CACHE_OPS_MASK)
134
135#define P4_CONFIG_HT_SHIFT 63 127#define P4_CONFIG_HT_SHIFT 63
136#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT) 128#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT)
137 129
@@ -214,6 +206,12 @@ static inline u32 p4_default_escr_conf(int cpu, int exclude_os, int exclude_usr)
214 return escr; 206 return escr;
215} 207}
216 208
209/*
210 * This are the events which should be used in "Event Select"
211 * field of ESCR register, they are like unique keys which allow
212 * the kernel to determinate which CCCR and COUNTER should be
213 * used to track an event
214 */
217enum P4_EVENTS { 215enum P4_EVENTS {
218 P4_EVENT_TC_DELIVER_MODE, 216 P4_EVENT_TC_DELIVER_MODE,
219 P4_EVENT_BPU_FETCH_REQUEST, 217 P4_EVENT_BPU_FETCH_REQUEST,
@@ -561,7 +559,7 @@ enum P4_EVENT_OPCODES {
561 * a caller should use P4_ESCR_EMASK_NAME helper to 559 * a caller should use P4_ESCR_EMASK_NAME helper to
562 * pick the EventMask needed, for example 560 * pick the EventMask needed, for example
563 * 561 *
564 * P4_ESCR_EMASK_NAME(P4_EVENT_TC_DELIVER_MODE, DD) 562 * P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DD)
565 */ 563 */
566enum P4_ESCR_EMASKS { 564enum P4_ESCR_EMASKS {
567 P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DD, 0), 565 P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DD, 0),
@@ -753,43 +751,50 @@ enum P4_ESCR_EMASKS {
753 P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_COMPLETED, BOGUS, 1), 751 P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_COMPLETED, BOGUS, 1),
754}; 752};
755 753
756/* P4 PEBS: stale for a while */ 754/*
757#define P4_PEBS_METRIC_MASK 0x00001fffU 755 * P4 PEBS specifics (Replay Event only)
758#define P4_PEBS_UOB_TAG 0x01000000U 756 *
759#define P4_PEBS_ENABLE 0x02000000U 757 * Format (bits):
760 758 * 0-6: metric from P4_PEBS_METRIC enum
761/* Replay metrics for MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT */ 759 * 7 : reserved
762#define P4_PEBS__1stl_cache_load_miss_retired 0x3000001 760 * 8 : reserved
763#define P4_PEBS__2ndl_cache_load_miss_retired 0x3000002 761 * 9-11 : reserved
764#define P4_PEBS__dtlb_load_miss_retired 0x3000004 762 *
765#define P4_PEBS__dtlb_store_miss_retired 0x3000004 763 * Note we have UOP and PEBS bits reserved for now
766#define P4_PEBS__dtlb_all_miss_retired 0x3000004 764 * just in case if we will need them once
767#define P4_PEBS__tagged_mispred_branch 0x3018000 765 */
768#define P4_PEBS__mob_load_replay_retired 0x3000200 766#define P4_PEBS_CONFIG_ENABLE (1 << 7)
769#define P4_PEBS__split_load_retired 0x3000400 767#define P4_PEBS_CONFIG_UOP_TAG (1 << 8)
770#define P4_PEBS__split_store_retired 0x3000400 768#define P4_PEBS_CONFIG_METRIC_MASK 0x3f
771 769#define P4_PEBS_CONFIG_MASK 0xff
772#define P4_VERT__1stl_cache_load_miss_retired 0x0000001 770
773#define P4_VERT__2ndl_cache_load_miss_retired 0x0000001 771/*
774#define P4_VERT__dtlb_load_miss_retired 0x0000001 772 * mem: Only counters MSR_IQ_COUNTER4 (16) and
775#define P4_VERT__dtlb_store_miss_retired 0x0000002 773 * MSR_IQ_COUNTER5 (17) are allowed for PEBS sampling
776#define P4_VERT__dtlb_all_miss_retired 0x0000003 774 */
777#define P4_VERT__tagged_mispred_branch 0x0000010 775#define P4_PEBS_ENABLE 0x02000000U
778#define P4_VERT__mob_load_replay_retired 0x0000001 776#define P4_PEBS_ENABLE_UOP_TAG 0x01000000U
779#define P4_VERT__split_load_retired 0x0000001 777
780#define P4_VERT__split_store_retired 0x0000002 778#define p4_config_unpack_metric(v) (((u64)(v)) & P4_PEBS_CONFIG_METRIC_MASK)
781 779#define p4_config_unpack_pebs(v) (((u64)(v)) & P4_PEBS_CONFIG_MASK)
782enum P4_CACHE_EVENTS { 780
783 P4_CACHE__NONE, 781#define p4_config_pebs_has(v, mask) (p4_config_unpack_pebs(v) & (mask))
784 782
785 P4_CACHE__1stl_cache_load_miss_retired, 783enum P4_PEBS_METRIC {
786 P4_CACHE__2ndl_cache_load_miss_retired, 784 P4_PEBS_METRIC__none,
787 P4_CACHE__dtlb_load_miss_retired, 785
788 P4_CACHE__dtlb_store_miss_retired, 786 P4_PEBS_METRIC__1stl_cache_load_miss_retired,
789 P4_CACHE__itlb_reference_hit, 787 P4_PEBS_METRIC__2ndl_cache_load_miss_retired,
790 P4_CACHE__itlb_reference_miss, 788 P4_PEBS_METRIC__dtlb_load_miss_retired,
791 789 P4_PEBS_METRIC__dtlb_store_miss_retired,
792 P4_CACHE__MAX 790 P4_PEBS_METRIC__dtlb_all_miss_retired,
791 P4_PEBS_METRIC__tagged_mispred_branch,
792 P4_PEBS_METRIC__mob_load_replay_retired,
793 P4_PEBS_METRIC__split_load_retired,
794 P4_PEBS_METRIC__split_store_retired,
795
796 P4_PEBS_METRIC__max
793}; 797};
794 798
795#endif /* PERF_EVENT_P4_H */ 799#endif /* PERF_EVENT_P4_H */
800
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 7e5c6a60b8ee..325b7bdbebaa 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -762,6 +762,7 @@ extern void init_c1e_mask(void);
762extern unsigned long boot_option_idle_override; 762extern unsigned long boot_option_idle_override;
763extern unsigned long idle_halt; 763extern unsigned long idle_halt;
764extern unsigned long idle_nomwait; 764extern unsigned long idle_nomwait;
765extern bool c1e_detected;
765 766
766/* 767/*
767 * on systems with caches, caches must be flashed as the absolute 768 * on systems with caches, caches must be flashed as the absolute
@@ -1025,4 +1026,24 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
1025 return ratio; 1026 return ratio;
1026} 1027}
1027 1028
1029/*
1030 * AMD errata checking
1031 */
1032#ifdef CONFIG_CPU_SUP_AMD
1033extern const int amd_erratum_383[];
1034extern const int amd_erratum_400[];
1035extern bool cpu_has_amd_erratum(const int *);
1036
1037#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
1038#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
1039#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1040 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1041#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
1042#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
1043#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
1044
1045#else
1046#define cpu_has_amd_erratum(x) (false)
1047#endif /* CONFIG_CPU_SUP_AMD */
1048
1028#endif /* _ASM_X86_PROCESSOR_H */ 1049#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 64cf2d24fad1..6c7fc25f2c34 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -84,5 +84,7 @@
84#define REQUIRED_MASK5 0 84#define REQUIRED_MASK5 0
85#define REQUIRED_MASK6 0 85#define REQUIRED_MASK6 0
86#define REQUIRED_MASK7 0 86#define REQUIRED_MASK7 0
87#define REQUIRED_MASK8 0
88#define REQUIRED_MASK9 0
87 89
88#endif /* _ASM_X86_REQUIRED_FEATURES_H */ 90#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 606ede126972..d1e41b0f9b60 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -118,7 +118,7 @@ static inline void __down_read(struct rw_semaphore *sem)
118{ 118{
119 asm volatile("# beginning down_read\n\t" 119 asm volatile("# beginning down_read\n\t"
120 LOCK_PREFIX _ASM_INC "(%1)\n\t" 120 LOCK_PREFIX _ASM_INC "(%1)\n\t"
121 /* adds 0x00000001, returns the old value */ 121 /* adds 0x00000001 */
122 " jns 1f\n" 122 " jns 1f\n"
123 " call call_rwsem_down_read_failed\n" 123 " call call_rwsem_down_read_failed\n"
124 "1:\n\t" 124 "1:\n\t"
@@ -156,11 +156,9 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
156static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) 156static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
157{ 157{
158 rwsem_count_t tmp; 158 rwsem_count_t tmp;
159
160 tmp = RWSEM_ACTIVE_WRITE_BIAS;
161 asm volatile("# beginning down_write\n\t" 159 asm volatile("# beginning down_write\n\t"
162 LOCK_PREFIX " xadd %1,(%2)\n\t" 160 LOCK_PREFIX " xadd %1,(%2)\n\t"
163 /* subtract 0x0000ffff, returns the old value */ 161 /* adds 0xffff0001, returns the old value */
164 " test %1,%1\n\t" 162 " test %1,%1\n\t"
165 /* was the count 0 before? */ 163 /* was the count 0 before? */
166 " jz 1f\n" 164 " jz 1f\n"
@@ -168,7 +166,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
168 "1:\n" 166 "1:\n"
169 "# ending down_write" 167 "# ending down_write"
170 : "+m" (sem->count), "=d" (tmp) 168 : "+m" (sem->count), "=d" (tmp)
171 : "a" (sem), "1" (tmp) 169 : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS)
172 : "memory", "cc"); 170 : "memory", "cc");
173} 171}
174 172
@@ -195,16 +193,16 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
195 */ 193 */
196static inline void __up_read(struct rw_semaphore *sem) 194static inline void __up_read(struct rw_semaphore *sem)
197{ 195{
198 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS; 196 rwsem_count_t tmp;
199 asm volatile("# beginning __up_read\n\t" 197 asm volatile("# beginning __up_read\n\t"
200 LOCK_PREFIX " xadd %1,(%2)\n\t" 198 LOCK_PREFIX " xadd %1,(%2)\n\t"
201 /* subtracts 1, returns the old value */ 199 /* subtracts 1, returns the old value */
202 " jns 1f\n\t" 200 " jns 1f\n\t"
203 " call call_rwsem_wake\n" 201 " call call_rwsem_wake\n" /* expects old value in %edx */
204 "1:\n" 202 "1:\n"
205 "# ending __up_read\n" 203 "# ending __up_read\n"
206 : "+m" (sem->count), "=d" (tmp) 204 : "+m" (sem->count), "=d" (tmp)
207 : "a" (sem), "1" (tmp) 205 : "a" (sem), "1" (-RWSEM_ACTIVE_READ_BIAS)
208 : "memory", "cc"); 206 : "memory", "cc");
209} 207}
210 208
@@ -216,10 +214,9 @@ static inline void __up_write(struct rw_semaphore *sem)
216 rwsem_count_t tmp; 214 rwsem_count_t tmp;
217 asm volatile("# beginning __up_write\n\t" 215 asm volatile("# beginning __up_write\n\t"
218 LOCK_PREFIX " xadd %1,(%2)\n\t" 216 LOCK_PREFIX " xadd %1,(%2)\n\t"
219 /* tries to transition 217 /* subtracts 0xffff0001, returns the old value */
220 0xffff0001 -> 0x00000000 */ 218 " jns 1f\n\t"
221 " jz 1f\n" 219 " call call_rwsem_wake\n" /* expects old value in %edx */
222 " call call_rwsem_wake\n"
223 "1:\n\t" 220 "1:\n\t"
224 "# ending __up_write\n" 221 "# ending __up_write\n"
225 : "+m" (sem->count), "=d" (tmp) 222 : "+m" (sem->count), "=d" (tmp)
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 86b1506f4179..ef292c792d74 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -82,7 +82,7 @@ void *extend_brk(size_t size, size_t align);
82 * executable.) 82 * executable.)
83 */ 83 */
84#define RESERVE_BRK(name,sz) \ 84#define RESERVE_BRK(name,sz) \
85 static void __section(.discard) __used \ 85 static void __section(.discard.text) __used \
86 __brk_reservation_fn_##name##__(void) { \ 86 __brk_reservation_fn_##name##__(void) { \
87 asm volatile ( \ 87 asm volatile ( \
88 ".pushsection .brk_reservation,\"aw\",@nobits;" \ 88 ".pushsection .brk_reservation,\"aw\",@nobits;" \
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 4dab78edbad9..2b16a2ad23dc 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -1,6 +1,13 @@
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 */
5
1#ifndef _ASM_X86_STACKTRACE_H 6#ifndef _ASM_X86_STACKTRACE_H
2#define _ASM_X86_STACKTRACE_H 7#define _ASM_X86_STACKTRACE_H
3 8
9#include <linux/uaccess.h>
10
4extern int kstack_depth_to_print; 11extern int kstack_depth_to_print;
5 12
6struct thread_info; 13struct thread_info;
@@ -42,4 +49,46 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
42 unsigned long *stack, unsigned long bp, 49 unsigned long *stack, unsigned long bp,
43 const struct stacktrace_ops *ops, void *data); 50 const struct stacktrace_ops *ops, void *data);
44 51
52#ifdef CONFIG_X86_32
53#define STACKSLOTS_PER_LINE 8
54#define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :)
55#else
56#define STACKSLOTS_PER_LINE 4
57#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
58#endif
59
60extern void
61show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
62 unsigned long *stack, unsigned long bp, char *log_lvl);
63
64extern void
65show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
66 unsigned long *sp, unsigned long bp, char *log_lvl);
67
68extern unsigned int code_bytes;
69
70/* The form of the top of the frame on the stack */
71struct stack_frame {
72 struct stack_frame *next_frame;
73 unsigned long return_address;
74};
75
76struct stack_frame_ia32 {
77 u32 next_frame;
78 u32 return_address;
79};
80
81static inline unsigned long caller_frame_pointer(void)
82{
83 struct stack_frame *frame;
84
85 get_bp(frame);
86
87#ifdef CONFIG_FRAME_POINTER
88 frame = frame->next_frame;
89#endif
90
91 return (unsigned long)frame;
92}
93
45#endif /* _ASM_X86_STACKTRACE_H */ 94#endif /* _ASM_X86_STACKTRACE_H */
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index e7f4d33c55ed..33ecc3ea8782 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -457,4 +457,11 @@ static __always_inline void rdtsc_barrier(void)
457 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); 457 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
458} 458}
459 459
460/*
461 * We handle most unaligned accesses in hardware. On the other hand
462 * unaligned DMA can be quite expensive on some Nehalem processors.
463 *
464 * Based on this we disable the IP header alignment in network drivers.
465 */
466#define NET_IP_ALIGN 0
460#endif /* _ASM_X86_SYSTEM_H */ 467#endif /* _ASM_X86_SYSTEM_H */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 9e6779f7cf2d..9f0cbd987d50 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -257,6 +257,7 @@ enum vmcs_field {
257#define EXIT_REASON_IO_INSTRUCTION 30 257#define EXIT_REASON_IO_INSTRUCTION 30
258#define EXIT_REASON_MSR_READ 31 258#define EXIT_REASON_MSR_READ 31
259#define EXIT_REASON_MSR_WRITE 32 259#define EXIT_REASON_MSR_WRITE 32
260#define EXIT_REASON_INVALID_STATE 33
260#define EXIT_REASON_MWAIT_INSTRUCTION 36 261#define EXIT_REASON_MWAIT_INSTRUCTION 36
261#define EXIT_REASON_MONITOR_INSTRUCTION 39 262#define EXIT_REASON_MONITOR_INSTRUCTION 39
262#define EXIT_REASON_PAUSE_INSTRUCTION 40 263#define EXIT_REASON_PAUSE_INSTRUCTION 40
@@ -266,6 +267,7 @@ enum vmcs_field {
266#define EXIT_REASON_EPT_VIOLATION 48 267#define EXIT_REASON_EPT_VIOLATION 48
267#define EXIT_REASON_EPT_MISCONFIG 49 268#define EXIT_REASON_EPT_MISCONFIG 49
268#define EXIT_REASON_WBINVD 54 269#define EXIT_REASON_WBINVD 54
270#define EXIT_REASON_XSETBV 55
269 271
270/* 272/*
271 * Interruption-information format 273 * Interruption-information format
@@ -375,6 +377,9 @@ enum vmcs_field {
375#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) 377#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
376#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) 378#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
377 379
380#define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT (1ull << 9) /* (41 - 32) */
381#define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */
382
378#define VMX_EPT_DEFAULT_GAW 3 383#define VMX_EPT_DEFAULT_GAW 3
379#define VMX_EPT_MAX_GAW 0x4 384#define VMX_EPT_MAX_GAW 0x4
380#define VMX_EPT_MT_EPTE_SHIFT 3 385#define VMX_EPT_MT_EPTE_SHIFT 3
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 519b54327d75..baa579c8e038 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -142,6 +142,7 @@ struct x86_cpuinit_ops {
142 * @set_wallclock: set time back to HW clock 142 * @set_wallclock: set time back to HW clock
143 * @is_untracked_pat_range exclude from PAT logic 143 * @is_untracked_pat_range exclude from PAT logic
144 * @nmi_init enable NMI on cpus 144 * @nmi_init enable NMI on cpus
145 * @i8042_detect pre-detect if i8042 controller exists
145 */ 146 */
146struct x86_platform_ops { 147struct x86_platform_ops {
147 unsigned long (*calibrate_tsc)(void); 148 unsigned long (*calibrate_tsc)(void);
@@ -150,6 +151,7 @@ struct x86_platform_ops {
150 void (*iommu_shutdown)(void); 151 void (*iommu_shutdown)(void);
151 bool (*is_untracked_pat_range)(u64 start, u64 end); 152 bool (*is_untracked_pat_range)(u64 start, u64 end);
152 void (*nmi_init)(void); 153 void (*nmi_init)(void);
154 int (*i8042_detect)(void);
153}; 155};
154 156
155extern struct x86_init_ops x86_init; 157extern struct x86_init_ops x86_init;
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 9c371e4a9fa6..7fda040a76cd 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -417,6 +417,12 @@ HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
417 return _hypercall2(int, nmi_op, op, arg); 417 return _hypercall2(int, nmi_op, op, arg);
418} 418}
419 419
420static inline unsigned long __must_check
421HYPERVISOR_hvm_op(int op, void *arg)
422{
423 return _hypercall2(unsigned long, hvm_op, op, arg);
424}
425
420static inline void 426static inline void
421MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set) 427MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
422{ 428{
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 2c4390cae228..06acdbd7570a 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -13,6 +13,12 @@
13 13
14#define FXSAVE_SIZE 512 14#define FXSAVE_SIZE 512
15 15
16#define XSAVE_HDR_SIZE 64
17#define XSAVE_HDR_OFFSET FXSAVE_SIZE
18
19#define XSAVE_YMM_SIZE 256
20#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
21
16/* 22/*
17 * These are the features that the OS can handle currently. 23 * These are the features that the OS can handle currently.
18 */ 24 */
@@ -59,6 +65,16 @@ static inline int fpu_xrstor_checking(struct fpu *fpu)
59static inline int xsave_user(struct xsave_struct __user *buf) 65static inline int xsave_user(struct xsave_struct __user *buf)
60{ 66{
61 int err; 67 int err;
68
69 /*
70 * Clear the xsave header first, so that reserved fields are
71 * initialized to zero.
72 */
73 err = __clear_user(&buf->xsave_hdr,
74 sizeof(struct xsave_hdr_struct));
75 if (unlikely(err))
76 return -EFAULT;
77
62 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" 78 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
63 "2:\n" 79 "2:\n"
64 ".section .fixup,\"ax\"\n" 80 ".section .fixup,\"ax\"\n"