aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-06 13:07:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-06 13:07:34 -0400
commitd9a73c00161f3eaa4c8c035c62f45afd1549e38a (patch)
treec5cad9e1e286438b63e512c1912e8b7f39071886 /arch/x86
parentb304441c6f3a5cb5ea80b9a719d2851544f348d6 (diff)
parentbf676945cb5bfe455321f57968967c18976f4995 (diff)
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: um, x86: Cast to (u64 *) inside set_64bit() x86-32, asm: Directly access per-cpu GDT x86-64, asm: Directly access per-cpu IST x86, asm: Merge cmpxchg_486_u64() and cmpxchg8b_emu() x86, asm: Move cmpxchg emulation code to arch/x86/lib x86, asm: Clean up and simplify <asm/cmpxchg.h> x86, asm: Clean up and simplify set_64bit() x86: Add memory modify constraints to xchg() and cmpxchg() x86-64: Simplify loading initial_gs x86: Use symbolic MSR names x86: Remove redundant K6 MSRs
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h198
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h83
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.S2
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/entry_32.S11
-rw-r--r--arch/x86/kernel/entry_64.S6
-rw-r--r--arch/x86/kernel/head_64.S5
-rw-r--r--arch/x86/kernel/verify_cpu_64.S3
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c8
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/cmpxchg.c (renamed from arch/x86/kernel/cpu/cmpxchg.c)18
14 files changed, 164 insertions, 183 deletions
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index 8859e12dd3cf..284a6e8f7ce1 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -11,38 +11,42 @@
11extern void __xchg_wrong_size(void); 11extern void __xchg_wrong_size(void);
12 12
13/* 13/*
14 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway 14 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
15 * Note 2: xchg has side effect, so that attribute volatile is necessary, 15 * Since this is generally used to protect other memory information, we
16 * but generally the primitive is invalid, *ptr is output argument. --ANK 16 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
17 * information around.
17 */ 18 */
18
19struct __xchg_dummy {
20 unsigned long a[100];
21};
22#define __xg(x) ((struct __xchg_dummy *)(x))
23
24#define __xchg(x, ptr, size) \ 19#define __xchg(x, ptr, size) \
25({ \ 20({ \
26 __typeof(*(ptr)) __x = (x); \ 21 __typeof(*(ptr)) __x = (x); \
27 switch (size) { \ 22 switch (size) { \
28 case 1: \ 23 case 1: \
29 asm volatile("xchgb %b0,%1" \ 24 { \
30 : "=q" (__x) \ 25 volatile u8 *__ptr = (volatile u8 *)(ptr); \
31 : "m" (*__xg(ptr)), "0" (__x) \ 26 asm volatile("xchgb %0,%1" \
27 : "=q" (__x), "+m" (*__ptr) \
28 : "0" (__x) \
32 : "memory"); \ 29 : "memory"); \
33 break; \ 30 break; \
31 } \
34 case 2: \ 32 case 2: \
35 asm volatile("xchgw %w0,%1" \ 33 { \
36 : "=r" (__x) \ 34 volatile u16 *__ptr = (volatile u16 *)(ptr); \
37 : "m" (*__xg(ptr)), "0" (__x) \ 35 asm volatile("xchgw %0,%1" \
36 : "=r" (__x), "+m" (*__ptr) \
37 : "0" (__x) \
38 : "memory"); \ 38 : "memory"); \
39 break; \ 39 break; \
40 } \
40 case 4: \ 41 case 4: \
42 { \
43 volatile u32 *__ptr = (volatile u32 *)(ptr); \
41 asm volatile("xchgl %0,%1" \ 44 asm volatile("xchgl %0,%1" \
42 : "=r" (__x) \ 45 : "=r" (__x), "+m" (*__ptr) \
43 : "m" (*__xg(ptr)), "0" (__x) \ 46 : "0" (__x) \
44 : "memory"); \ 47 : "memory"); \
45 break; \ 48 break; \
49 } \
46 default: \ 50 default: \
47 __xchg_wrong_size(); \ 51 __xchg_wrong_size(); \
48 } \ 52 } \
@@ -53,60 +57,33 @@ struct __xchg_dummy {
53 __xchg((v), (ptr), sizeof(*ptr)) 57 __xchg((v), (ptr), sizeof(*ptr))
54 58
55/* 59/*
56 * The semantics of XCHGCMP8B are a bit strange, this is why 60 * CMPXCHG8B only writes to the target if we had the previous
57 * there is a loop and the loading of %%eax and %%edx has to 61 * value in registers, otherwise it acts as a read and gives us the
58 * be inside. This inlines well in most cases, the cached 62 * "new previous" value. That is why there is a loop. Preloading
59 * cost is around ~38 cycles. (in the future we might want 63 * EDX:EAX is a performance optimization: in the common case it means
60 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that 64 * we need only one locked operation.
61 * might have an implicit FPU-save as a cost, so it's not
62 * clear which path to go.)
63 * 65 *
64 * cmpxchg8b must be used with the lock prefix here to allow 66 * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
65 * the instruction to be executed atomically, see page 3-102 67 * least an FPU save and/or %cr0.ts manipulation.
66 * of the instruction set reference 24319102.pdf. We need 68 *
67 * the reader side to see the coherent 64bit value. 69 * cmpxchg8b must be used with the lock prefix here to allow the
70 * instruction to be executed atomically. We need to have the reader
71 * side to see the coherent 64bit value.
68 */ 72 */
69static inline void __set_64bit(unsigned long long *ptr, 73static inline void set_64bit(volatile u64 *ptr, u64 value)
70 unsigned int low, unsigned int high)
71{ 74{
75 u32 low = value;
76 u32 high = value >> 32;
77 u64 prev = *ptr;
78
72 asm volatile("\n1:\t" 79 asm volatile("\n1:\t"
73 "movl (%0), %%eax\n\t" 80 LOCK_PREFIX "cmpxchg8b %0\n\t"
74 "movl 4(%0), %%edx\n\t"
75 LOCK_PREFIX "cmpxchg8b (%0)\n\t"
76 "jnz 1b" 81 "jnz 1b"
77 : /* no outputs */ 82 : "=m" (*ptr), "+A" (prev)
78 : "D"(ptr), 83 : "b" (low), "c" (high)
79 "b"(low), 84 : "memory");
80 "c"(high)
81 : "ax", "dx", "memory");
82}
83
84static inline void __set_64bit_constant(unsigned long long *ptr,
85 unsigned long long value)
86{
87 __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
88}
89
90#define ll_low(x) *(((unsigned int *)&(x)) + 0)
91#define ll_high(x) *(((unsigned int *)&(x)) + 1)
92
93static inline void __set_64bit_var(unsigned long long *ptr,
94 unsigned long long value)
95{
96 __set_64bit(ptr, ll_low(value), ll_high(value));
97} 85}
98 86
99#define set_64bit(ptr, value) \
100 (__builtin_constant_p((value)) \
101 ? __set_64bit_constant((ptr), (value)) \
102 : __set_64bit_var((ptr), (value)))
103
104#define _set_64bit(ptr, value) \
105 (__builtin_constant_p(value) \
106 ? __set_64bit(ptr, (unsigned int)(value), \
107 (unsigned int)((value) >> 32)) \
108 : __set_64bit(ptr, ll_low((value)), ll_high((value))))
109
110extern void __cmpxchg_wrong_size(void); 87extern void __cmpxchg_wrong_size(void);
111 88
112/* 89/*
@@ -121,23 +98,32 @@ extern void __cmpxchg_wrong_size(void);
121 __typeof__(*(ptr)) __new = (new); \ 98 __typeof__(*(ptr)) __new = (new); \
122 switch (size) { \ 99 switch (size) { \
123 case 1: \ 100 case 1: \
124 asm volatile(lock "cmpxchgb %b1,%2" \ 101 { \
125 : "=a"(__ret) \ 102 volatile u8 *__ptr = (volatile u8 *)(ptr); \
126 : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ 103 asm volatile(lock "cmpxchgb %2,%1" \
104 : "=a" (__ret), "+m" (*__ptr) \
105 : "q" (__new), "0" (__old) \
127 : "memory"); \ 106 : "memory"); \
128 break; \ 107 break; \
108 } \
129 case 2: \ 109 case 2: \
130 asm volatile(lock "cmpxchgw %w1,%2" \ 110 { \
131 : "=a"(__ret) \ 111 volatile u16 *__ptr = (volatile u16 *)(ptr); \
132 : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ 112 asm volatile(lock "cmpxchgw %2,%1" \
113 : "=a" (__ret), "+m" (*__ptr) \
114 : "r" (__new), "0" (__old) \
133 : "memory"); \ 115 : "memory"); \
134 break; \ 116 break; \
117 } \
135 case 4: \ 118 case 4: \
136 asm volatile(lock "cmpxchgl %1,%2" \ 119 { \
137 : "=a"(__ret) \ 120 volatile u32 *__ptr = (volatile u32 *)(ptr); \
138 : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ 121 asm volatile(lock "cmpxchgl %2,%1" \
122 : "=a" (__ret), "+m" (*__ptr) \
123 : "r" (__new), "0" (__old) \
139 : "memory"); \ 124 : "memory"); \
140 break; \ 125 break; \
126 } \
141 default: \ 127 default: \
142 __cmpxchg_wrong_size(); \ 128 __cmpxchg_wrong_size(); \
143 } \ 129 } \
@@ -175,32 +161,28 @@ extern void __cmpxchg_wrong_size(void);
175 (unsigned long long)(n))) 161 (unsigned long long)(n)))
176#endif 162#endif
177 163
178static inline unsigned long long __cmpxchg64(volatile void *ptr, 164static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
179 unsigned long long old,
180 unsigned long long new)
181{ 165{
182 unsigned long long prev; 166 u64 prev;
183 asm volatile(LOCK_PREFIX "cmpxchg8b %3" 167 asm volatile(LOCK_PREFIX "cmpxchg8b %1"
184 : "=A"(prev) 168 : "=A" (prev),
185 : "b"((unsigned long)new), 169 "+m" (*ptr)
186 "c"((unsigned long)(new >> 32)), 170 : "b" ((u32)new),
187 "m"(*__xg(ptr)), 171 "c" ((u32)(new >> 32)),
188 "0"(old) 172 "0" (old)
189 : "memory"); 173 : "memory");
190 return prev; 174 return prev;
191} 175}
192 176
193static inline unsigned long long __cmpxchg64_local(volatile void *ptr, 177static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
194 unsigned long long old,
195 unsigned long long new)
196{ 178{
197 unsigned long long prev; 179 u64 prev;
198 asm volatile("cmpxchg8b %3" 180 asm volatile("cmpxchg8b %1"
199 : "=A"(prev) 181 : "=A" (prev),
200 : "b"((unsigned long)new), 182 "+m" (*ptr)
201 "c"((unsigned long)(new >> 32)), 183 : "b" ((u32)new),
202 "m"(*__xg(ptr)), 184 "c" ((u32)(new >> 32)),
203 "0"(old) 185 "0" (old)
204 : "memory"); 186 : "memory");
205 return prev; 187 return prev;
206} 188}
@@ -264,8 +246,6 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
264 * to simulate the cmpxchg8b on the 80386 and 80486 CPU. 246 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
265 */ 247 */
266 248
267extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
268
269#define cmpxchg64(ptr, o, n) \ 249#define cmpxchg64(ptr, o, n) \
270({ \ 250({ \
271 __typeof__(*(ptr)) __ret; \ 251 __typeof__(*(ptr)) __ret; \
@@ -283,20 +263,20 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
283 __ret; }) 263 __ret; })
284 264
285 265
286 266#define cmpxchg64_local(ptr, o, n) \
287#define cmpxchg64_local(ptr, o, n) \ 267({ \
288({ \ 268 __typeof__(*(ptr)) __ret; \
289 __typeof__(*(ptr)) __ret; \ 269 __typeof__(*(ptr)) __old = (o); \
290 if (likely(boot_cpu_data.x86 > 4)) \ 270 __typeof__(*(ptr)) __new = (n); \
291 __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \ 271 alternative_io("call cmpxchg8b_emu", \
292 (unsigned long long)(o), \ 272 "cmpxchg8b (%%esi)" , \
293 (unsigned long long)(n)); \ 273 X86_FEATURE_CX8, \
294 else \ 274 "=A" (__ret), \
295 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \ 275 "S" ((ptr)), "0" (__old), \
296 (unsigned long long)(o), \ 276 "b" ((unsigned int)__new), \
297 (unsigned long long)(n)); \ 277 "c" ((unsigned int)(__new>>32)) \
298 __ret; \ 278 : "memory"); \
299}) 279 __ret; })
300 280
301#endif 281#endif
302 282
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 485ae415faec..423ae58aa020 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -3,51 +3,60 @@
3 3
4#include <asm/alternative.h> /* Provides LOCK_PREFIX */ 4#include <asm/alternative.h> /* Provides LOCK_PREFIX */
5 5
6#define __xg(x) ((volatile long *)(x)) 6static inline void set_64bit(volatile u64 *ptr, u64 val)
7
8static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
9{ 7{
10 *ptr = val; 8 *ptr = val;
11} 9}
12 10
13#define _set_64bit set_64bit
14
15extern void __xchg_wrong_size(void); 11extern void __xchg_wrong_size(void);
16extern void __cmpxchg_wrong_size(void); 12extern void __cmpxchg_wrong_size(void);
17 13
18/* 14/*
19 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway 15 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
20 * Note 2: xchg has side effect, so that attribute volatile is necessary, 16 * Since this is generally used to protect other memory information, we
21 * but generally the primitive is invalid, *ptr is output argument. --ANK 17 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
18 * information around.
22 */ 19 */
23#define __xchg(x, ptr, size) \ 20#define __xchg(x, ptr, size) \
24({ \ 21({ \
25 __typeof(*(ptr)) __x = (x); \ 22 __typeof(*(ptr)) __x = (x); \
26 switch (size) { \ 23 switch (size) { \
27 case 1: \ 24 case 1: \
28 asm volatile("xchgb %b0,%1" \ 25 { \
29 : "=q" (__x) \ 26 volatile u8 *__ptr = (volatile u8 *)(ptr); \
30 : "m" (*__xg(ptr)), "0" (__x) \ 27 asm volatile("xchgb %0,%1" \
28 : "=q" (__x), "+m" (*__ptr) \
29 : "0" (__x) \
31 : "memory"); \ 30 : "memory"); \
32 break; \ 31 break; \
32 } \
33 case 2: \ 33 case 2: \
34 asm volatile("xchgw %w0,%1" \ 34 { \
35 : "=r" (__x) \ 35 volatile u16 *__ptr = (volatile u16 *)(ptr); \
36 : "m" (*__xg(ptr)), "0" (__x) \ 36 asm volatile("xchgw %0,%1" \
37 : "=r" (__x), "+m" (*__ptr) \
38 : "0" (__x) \
37 : "memory"); \ 39 : "memory"); \
38 break; \ 40 break; \
41 } \
39 case 4: \ 42 case 4: \
40 asm volatile("xchgl %k0,%1" \ 43 { \
41 : "=r" (__x) \ 44 volatile u32 *__ptr = (volatile u32 *)(ptr); \
42 : "m" (*__xg(ptr)), "0" (__x) \ 45 asm volatile("xchgl %0,%1" \
46 : "=r" (__x), "+m" (*__ptr) \
47 : "0" (__x) \
43 : "memory"); \ 48 : "memory"); \
44 break; \ 49 break; \
50 } \
45 case 8: \ 51 case 8: \
52 { \
53 volatile u64 *__ptr = (volatile u64 *)(ptr); \
46 asm volatile("xchgq %0,%1" \ 54 asm volatile("xchgq %0,%1" \
47 : "=r" (__x) \ 55 : "=r" (__x), "+m" (*__ptr) \
48 : "m" (*__xg(ptr)), "0" (__x) \ 56 : "0" (__x) \
49 : "memory"); \ 57 : "memory"); \
50 break; \ 58 break; \
59 } \
51 default: \ 60 default: \
52 __xchg_wrong_size(); \ 61 __xchg_wrong_size(); \
53 } \ 62 } \
@@ -71,29 +80,41 @@ extern void __cmpxchg_wrong_size(void);
71 __typeof__(*(ptr)) __new = (new); \ 80 __typeof__(*(ptr)) __new = (new); \
72 switch (size) { \ 81 switch (size) { \
73 case 1: \ 82 case 1: \
74 asm volatile(lock "cmpxchgb %b1,%2" \ 83 { \
75 : "=a"(__ret) \ 84 volatile u8 *__ptr = (volatile u8 *)(ptr); \
76 : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ 85 asm volatile(lock "cmpxchgb %2,%1" \
86 : "=a" (__ret), "+m" (*__ptr) \
87 : "q" (__new), "0" (__old) \
77 : "memory"); \ 88 : "memory"); \
78 break; \ 89 break; \
90 } \
79 case 2: \ 91 case 2: \
80 asm volatile(lock "cmpxchgw %w1,%2" \ 92 { \
81 : "=a"(__ret) \ 93 volatile u16 *__ptr = (volatile u16 *)(ptr); \
82 : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ 94 asm volatile(lock "cmpxchgw %2,%1" \
95 : "=a" (__ret), "+m" (*__ptr) \
96 : "r" (__new), "0" (__old) \
83 : "memory"); \ 97 : "memory"); \
84 break; \ 98 break; \
99 } \
85 case 4: \ 100 case 4: \
86 asm volatile(lock "cmpxchgl %k1,%2" \ 101 { \
87 : "=a"(__ret) \ 102 volatile u32 *__ptr = (volatile u32 *)(ptr); \
88 : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ 103 asm volatile(lock "cmpxchgl %2,%1" \
104 : "=a" (__ret), "+m" (*__ptr) \
105 : "r" (__new), "0" (__old) \
89 : "memory"); \ 106 : "memory"); \
90 break; \ 107 break; \
108 } \
91 case 8: \ 109 case 8: \
92 asm volatile(lock "cmpxchgq %1,%2" \ 110 { \
93 : "=a"(__ret) \ 111 volatile u64 *__ptr = (volatile u64 *)(ptr); \
94 : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ 112 asm volatile(lock "cmpxchgq %2,%1" \
113 : "=a" (__ret), "+m" (*__ptr) \
114 : "r" (__new), "0" (__old) \
95 : "memory"); \ 115 : "memory"); \
96 break; \ 116 break; \
117 } \
97 default: \ 118 default: \
98 __cmpxchg_wrong_size(); \ 119 __cmpxchg_wrong_size(); \
99 } \ 120 } \
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 2eeb2e692008..65bbec2093aa 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -161,8 +161,6 @@
161#define MSR_K7_FID_VID_STATUS 0xc0010042 161#define MSR_K7_FID_VID_STATUS 0xc0010042
162 162
163/* K6 MSRs */ 163/* K6 MSRs */
164#define MSR_K6_EFER 0xc0000080
165#define MSR_K6_STAR 0xc0000081
166#define MSR_K6_WHCR 0xc0000082 164#define MSR_K6_WHCR 0xc0000082
167#define MSR_K6_UWCCR 0xc0000085 165#define MSR_K6_UWCCR 0xc0000085
168#define MSR_K6_EPMR 0xc0000086 166#define MSR_K6_EPMR 0xc0000086
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
index 580b4e296010..28595d6df47c 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.S
+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
@@ -104,7 +104,7 @@ _start:
104 movl %eax, %ecx 104 movl %eax, %ecx
105 orl %edx, %ecx 105 orl %edx, %ecx
106 jz 1f 106 jz 1f
107 movl $0xc0000080, %ecx 107 movl $MSR_EFER, %ecx
108 wrmsr 108 wrmsr
1091: 1091:
110 110
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 5e3a3512ba05..3f0ebe429a01 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -16,7 +16,7 @@ obj-y := intel_cacheinfo.o scattered.o topology.o
16obj-y += proc.o capflags.o powerflags.o common.o 16obj-y += proc.o capflags.o powerflags.o common.o
17obj-y += vmware.o hypervisor.o sched.o mshyperv.o 17obj-y += vmware.o hypervisor.o sched.o mshyperv.o
18 18
19obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o 19obj-$(CONFIG_X86_32) += bugs.o
20obj-$(CONFIG_X86_64) += bugs_64.o 20obj-$(CONFIG_X86_64) += bugs_64.o
21 21
22obj-$(CONFIG_CPU_SUP_INTEL) += intel.o 22obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 6b196834a0dd..258e93fa2630 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -611,14 +611,14 @@ ldt_ss:
611 * compensating for the offset by changing to the ESPFIX segment with 611 * compensating for the offset by changing to the ESPFIX segment with
612 * a base address that matches for the difference. 612 * a base address that matches for the difference.
613 */ 613 */
614#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
614 mov %esp, %edx /* load kernel esp */ 615 mov %esp, %edx /* load kernel esp */
615 mov PT_OLDESP(%esp), %eax /* load userspace esp */ 616 mov PT_OLDESP(%esp), %eax /* load userspace esp */
616 mov %dx, %ax /* eax: new kernel esp */ 617 mov %dx, %ax /* eax: new kernel esp */
617 sub %eax, %edx /* offset (low word is 0) */ 618 sub %eax, %edx /* offset (low word is 0) */
618 PER_CPU(gdt_page, %ebx)
619 shr $16, %edx 619 shr $16, %edx
620 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */ 620 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
621 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */ 621 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
622 pushl $__ESPFIX_SS 622 pushl $__ESPFIX_SS
623 CFI_ADJUST_CFA_OFFSET 4 623 CFI_ADJUST_CFA_OFFSET 4
624 push %eax /* new kernel esp */ 624 push %eax /* new kernel esp */
@@ -791,9 +791,8 @@ ptregs_clone:
791 * normal stack and adjusts ESP with the matching offset. 791 * normal stack and adjusts ESP with the matching offset.
792 */ 792 */
793 /* fixup the stack */ 793 /* fixup the stack */
794 PER_CPU(gdt_page, %ebx) 794 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
795 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */ 795 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
796 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
797 shl $16, %eax 796 shl $16, %eax
798 addl %esp, %eax /* the adjusted stack pointer */ 797 addl %esp, %eax /* the adjusted stack pointer */
799 pushl $__KERNEL_DS 798 pushl $__KERNEL_DS
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 649ed17f7009..c5ea5cdbe7b3 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1065,6 +1065,7 @@ ENTRY(\sym)
1065END(\sym) 1065END(\sym)
1066.endm 1066.endm
1067 1067
1068#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
1068.macro paranoidzeroentry_ist sym do_sym ist 1069.macro paranoidzeroentry_ist sym do_sym ist
1069ENTRY(\sym) 1070ENTRY(\sym)
1070 INTR_FRAME 1071 INTR_FRAME
@@ -1076,10 +1077,9 @@ ENTRY(\sym)
1076 TRACE_IRQS_OFF 1077 TRACE_IRQS_OFF
1077 movq %rsp,%rdi /* pt_regs pointer */ 1078 movq %rsp,%rdi /* pt_regs pointer */
1078 xorl %esi,%esi /* no error code */ 1079 xorl %esi,%esi /* no error code */
1079 PER_CPU(init_tss, %r12) 1080 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
1080 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12)
1081 call \do_sym 1081 call \do_sym
1082 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12) 1082 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
1083 jmp paranoid_exit /* %ebx: no swapgs flag */ 1083 jmp paranoid_exit /* %ebx: no swapgs flag */
1084 CFI_ENDPROC 1084 CFI_ENDPROC
1085END(\sym) 1085END(\sym)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 3d1e6f16b7a6..239046bd447f 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -234,9 +234,8 @@ ENTRY(secondary_startup_64)
234 * init data section till per cpu areas are set up. 234 * init data section till per cpu areas are set up.
235 */ 235 */
236 movl $MSR_GS_BASE,%ecx 236 movl $MSR_GS_BASE,%ecx
237 movq initial_gs(%rip),%rax 237 movl initial_gs(%rip),%eax
238 movq %rax,%rdx 238 movl initial_gs+4(%rip),%edx
239 shrq $32,%rdx
240 wrmsr 239 wrmsr
241 240
242 /* esi is pointer to real mode structure with interesting info. 241 /* esi is pointer to real mode structure with interesting info.
diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
index 45b6f8a975a1..56a8c2a867d9 100644
--- a/arch/x86/kernel/verify_cpu_64.S
+++ b/arch/x86/kernel/verify_cpu_64.S
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include <asm/cpufeature.h> 33#include <asm/cpufeature.h>
34#include <asm/msr-index.h>
34 35
35verify_cpu: 36verify_cpu:
36 pushfl # Save caller passed flags 37 pushfl # Save caller passed flags
@@ -88,7 +89,7 @@ verify_cpu_sse_test:
88 je verify_cpu_sse_ok 89 je verify_cpu_sse_ok
89 test %di,%di 90 test %di,%di
90 jz verify_cpu_no_longmode # only try to force SSE on AMD 91 jz verify_cpu_no_longmode # only try to force SSE on AMD
91 movl $0xc0010015,%ecx # HWCR 92 movl $MSR_K7_HWCR,%ecx
92 rdmsr 93 rdmsr
93 btr $15,%eax # enable SSE 94 btr $15,%eax # enable SSE
94 wrmsr 95 wrmsr
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 5c81daf3ef57..bc5b9b8d4a33 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -131,7 +131,7 @@ static struct svm_direct_access_msrs {
131 u32 index; /* Index of the MSR */ 131 u32 index; /* Index of the MSR */
132 bool always; /* True if intercept is always on */ 132 bool always; /* True if intercept is always on */
133} direct_access_msrs[] = { 133} direct_access_msrs[] = {
134 { .index = MSR_K6_STAR, .always = true }, 134 { .index = MSR_STAR, .always = true },
135 { .index = MSR_IA32_SYSENTER_CS, .always = true }, 135 { .index = MSR_IA32_SYSENTER_CS, .always = true },
136#ifdef CONFIG_X86_64 136#ifdef CONFIG_X86_64
137 { .index = MSR_GS_BASE, .always = true }, 137 { .index = MSR_GS_BASE, .always = true },
@@ -2432,7 +2432,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2432 *data = tsc_offset + native_read_tsc(); 2432 *data = tsc_offset + native_read_tsc();
2433 break; 2433 break;
2434 } 2434 }
2435 case MSR_K6_STAR: 2435 case MSR_STAR:
2436 *data = svm->vmcb->save.star; 2436 *data = svm->vmcb->save.star;
2437 break; 2437 break;
2438#ifdef CONFIG_X86_64 2438#ifdef CONFIG_X86_64
@@ -2556,7 +2556,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2556 2556
2557 break; 2557 break;
2558 } 2558 }
2559 case MSR_K6_STAR: 2559 case MSR_STAR:
2560 svm->vmcb->save.star = data; 2560 svm->vmcb->save.star = data;
2561 break; 2561 break;
2562#ifdef CONFIG_X86_64 2562#ifdef CONFIG_X86_64
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 27a0222c2946..49b25eee25ac 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -240,14 +240,14 @@ static u64 host_efer;
240static void ept_save_pdptrs(struct kvm_vcpu *vcpu); 240static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
241 241
242/* 242/*
243 * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it 243 * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
244 * away by decrementing the array size. 244 * away by decrementing the array size.
245 */ 245 */
246static const u32 vmx_msr_index[] = { 246static const u32 vmx_msr_index[] = {
247#ifdef CONFIG_X86_64 247#ifdef CONFIG_X86_64
248 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, 248 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
249#endif 249#endif
250 MSR_EFER, MSR_TSC_AUX, MSR_K6_STAR, 250 MSR_EFER, MSR_TSC_AUX, MSR_STAR,
251}; 251};
252#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) 252#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
253 253
@@ -1117,10 +1117,10 @@ static void setup_msrs(struct vcpu_vmx *vmx)
1117 if (index >= 0 && vmx->rdtscp_enabled) 1117 if (index >= 0 && vmx->rdtscp_enabled)
1118 move_msr_up(vmx, index, save_nmsrs++); 1118 move_msr_up(vmx, index, save_nmsrs++);
1119 /* 1119 /*
1120 * MSR_K6_STAR is only needed on long mode guests, and only 1120 * MSR_STAR is only needed on long mode guests, and only
1121 * if efer.sce is enabled. 1121 * if efer.sce is enabled.
1122 */ 1122 */
1123 index = __find_msr_index(vmx, MSR_K6_STAR); 1123 index = __find_msr_index(vmx, MSR_STAR);
1124 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) 1124 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
1125 move_msr_up(vmx, index, save_nmsrs++); 1125 move_msr_up(vmx, index, save_nmsrs++);
1126 } 1126 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 97aab036dabf..25f19078b321 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -733,7 +733,7 @@ static u32 msrs_to_save[] = {
733 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, 733 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
734 HV_X64_MSR_APIC_ASSIST_PAGE, 734 HV_X64_MSR_APIC_ASSIST_PAGE,
735 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 735 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
736 MSR_K6_STAR, 736 MSR_STAR,
737#ifdef CONFIG_X86_64 737#ifdef CONFIG_X86_64
738 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 738 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
739#endif 739#endif
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index f871e04b6965..e10cf070ede0 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -30,6 +30,7 @@ ifeq ($(CONFIG_X86_32),y)
30 lib-y += checksum_32.o 30 lib-y += checksum_32.o
31 lib-y += strstr_32.o 31 lib-y += strstr_32.o
32 lib-y += semaphore_32.o string_32.o 32 lib-y += semaphore_32.o string_32.o
33 lib-y += cmpxchg.o
33ifneq ($(CONFIG_X86_CMPXCHG64),y) 34ifneq ($(CONFIG_X86_CMPXCHG64),y)
34 lib-y += cmpxchg8b_emu.o atomic64_386_32.o 35 lib-y += cmpxchg8b_emu.o atomic64_386_32.o
35endif 36endif
diff --git a/arch/x86/kernel/cpu/cmpxchg.c b/arch/x86/lib/cmpxchg.c
index 2056ccf572cc..5d619f6df3ee 100644
--- a/arch/x86/kernel/cpu/cmpxchg.c
+++ b/arch/x86/lib/cmpxchg.c
@@ -52,21 +52,3 @@ unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
52} 52}
53EXPORT_SYMBOL(cmpxchg_386_u32); 53EXPORT_SYMBOL(cmpxchg_386_u32);
54#endif 54#endif
55
56#ifndef CONFIG_X86_CMPXCHG64
57unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
58{
59 u64 prev;
60 unsigned long flags;
61
62 /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
63 local_irq_save(flags);
64 prev = *(u64 *)ptr;
65 if (prev == old)
66 *(u64 *)ptr = new;
67 local_irq_restore(flags);
68 return prev;
69}
70EXPORT_SYMBOL(cmpxchg_486_u64);
71#endif
72