diff options
author | Avi Kivity <avi@qumranet.com> | 2007-07-19 07:30:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-19 17:37:05 -0400 |
commit | 2d9ce177e68645945e3366cfe2d66ee3c28cd4f2 (patch) | |
tree | a98a3a8b0f1e92f0b8f9ecb44b67bb46c3b4451a /include/asm-i386/cmpxchg.h | |
parent | 3e1f900bff40460d7bbab0ccd1a9efc3c70aee49 (diff) |
i386: Allow KVM on i386 nonpae
Currently, CONFIG_X86_CMPXCHG64 both enables boot-time checking of
the cmpxchg64b feature and enables compilation of the set_64bit() family.
Since the option is dependent on PAE, and since KVM depends on set_64bit(),
this effectively disables KVM on i386 nopae.
Simplify by removing the config option altogether: the boot check is made
dependent on CONFIG_X86_PAE directly, and the set_64bit() family is exposed
without constraints. It is up to users to check for the feature flag (KVM
does not as virtualiation extensions imply its existence).
Signed-off-by: Avi Kivity <avi@qumranet.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-i386/cmpxchg.h')
-rw-r--r-- | include/asm-i386/cmpxchg.h | 14 |
1 files changed, 5 insertions, 9 deletions
diff --git a/include/asm-i386/cmpxchg.h b/include/asm-i386/cmpxchg.h index 7adcef0cd53b..64dcdf46117b 100644 --- a/include/asm-i386/cmpxchg.h +++ b/include/asm-i386/cmpxchg.h | |||
@@ -3,14 +3,16 @@ | |||
3 | 3 | ||
4 | #include <linux/bitops.h> /* for LOCK_PREFIX */ | 4 | #include <linux/bitops.h> /* for LOCK_PREFIX */ |
5 | 5 | ||
6 | /* | ||
7 | * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you | ||
8 | * you need to test for the feature in boot_cpu_data. | ||
9 | */ | ||
10 | |||
6 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) | 11 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) |
7 | 12 | ||
8 | struct __xchg_dummy { unsigned long a[100]; }; | 13 | struct __xchg_dummy { unsigned long a[100]; }; |
9 | #define __xg(x) ((struct __xchg_dummy *)(x)) | 14 | #define __xg(x) ((struct __xchg_dummy *)(x)) |
10 | 15 | ||
11 | |||
12 | #ifdef CONFIG_X86_CMPXCHG64 | ||
13 | |||
14 | /* | 16 | /* |
15 | * The semantics of XCHGCMP8B are a bit strange, this is why | 17 | * The semantics of XCHGCMP8B are a bit strange, this is why |
16 | * there is a loop and the loading of %%eax and %%edx has to | 18 | * there is a loop and the loading of %%eax and %%edx has to |
@@ -65,8 +67,6 @@ static inline void __set_64bit_var (unsigned long long *ptr, | |||
65 | __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ | 67 | __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ |
66 | __set_64bit(ptr, ll_low(value), ll_high(value)) ) | 68 | __set_64bit(ptr, ll_low(value), ll_high(value)) ) |
67 | 69 | ||
68 | #endif | ||
69 | |||
70 | /* | 70 | /* |
71 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | 71 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway |
72 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | 72 | * Note 2: xchg has side effect, so that attribute volatile is necessary, |
@@ -252,8 +252,6 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, | |||
252 | }) | 252 | }) |
253 | #endif | 253 | #endif |
254 | 254 | ||
255 | #ifdef CONFIG_X86_CMPXCHG64 | ||
256 | |||
257 | static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, | 255 | static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, |
258 | unsigned long long new) | 256 | unsigned long long new) |
259 | { | 257 | { |
@@ -289,5 +287,3 @@ static inline unsigned long long __cmpxchg64_local(volatile void *ptr, | |||
289 | ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\ | 287 | ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\ |
290 | (unsigned long long)(n))) | 288 | (unsigned long long)(n))) |
291 | #endif | 289 | #endif |
292 | |||
293 | #endif | ||