aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 22:59:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 22:59:32 -0500
commit743aa456c1834f76982af44e8b71d1a0b2a82e21 (patch)
treef240782115da675496c8d9d5328722933d0ef601
parenta05a4e24dcd73c2de4ef3f8d520b8bbb44570c60 (diff)
parent11af32b69ef7ee64c7d8848cad71a6f3749d9e37 (diff)
Merge branch 'x86-nuke386-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull "Nuke 386-DX/SX support" from Ingo Molnar: "This tree removes ancient-386-CPUs support and thus zaps quite a bit of complexity: 24 files changed, 56 insertions(+), 425 deletions(-) ... which complexity has plagued us with extra work whenever we wanted to change SMP primitives, for years. Unfortunately there's a nostalgic cost: your old original 386 DX33 system from early 1991 won't be able to boot modern Linux kernels anymore. Sniff." I'm not sentimental. Good riddance. * 'x86-nuke386-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, 386 removal: Document Nx586 as a 386 and thus unsupported x86, cleanups: Simplify sync_core() in the case of no CPUID x86, 386 removal: Remove CONFIG_X86_POPAD_OK x86, 386 removal: Remove CONFIG_X86_WP_WORKS_OK x86, 386 removal: Remove CONFIG_INVLPG x86, 386 removal: Remove CONFIG_BSWAP x86, 386 removal: Remove CONFIG_XADD x86, 386 removal: Remove CONFIG_CMPXCHG x86, 386 removal: Remove CONFIG_M386 from Kconfig
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/Kconfig.cpu73
-rw-r--r--arch/x86/Makefile_32.cpu1
-rw-r--r--arch/x86/include/asm/atomic.h16
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h55
-rw-r--r--arch/x86/include/asm/cpufeature.h6
-rw-r--r--arch/x86/include/asm/futex.h12
-rw-r--r--arch/x86/include/asm/local.h18
-rw-r--r--arch/x86/include/asm/module.h2
-rw-r--r--arch/x86/include/asm/percpu.h3
-rw-r--r--arch/x86/include/asm/processor.h33
-rw-r--r--arch/x86/include/asm/swab.h29
-rw-r--r--arch/x86/include/asm/tlbflush.h3
-rw-r--r--arch/x86/include/asm/uaccess.h42
-rw-r--r--arch/x86/kernel/cpu/amd.c3
-rw-r--r--arch/x86/kernel/cpu/bugs.c41
-rw-r--r--arch/x86/kernel/cpu/intel.c4
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/cmpxchg.c54
-rw-r--r--arch/x86/lib/usercopy_32.c57
-rw-r--r--arch/x86/mm/init_32.c5
-rw-r--r--arch/x86/mm/tlb.c8
-rw-r--r--arch/x86/um/Kconfig2
-rw-r--r--arch/x86/xen/Kconfig2
24 files changed, 56 insertions, 425 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 2d643255c40d..037c4e30c271 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -69,8 +69,8 @@ config X86
69 select HAVE_PERF_USER_STACK_DUMP 69 select HAVE_PERF_USER_STACK_DUMP
70 select HAVE_DEBUG_KMEMLEAK 70 select HAVE_DEBUG_KMEMLEAK
71 select ANON_INODES 71 select ANON_INODES
72 select HAVE_ALIGNED_STRUCT_PAGE if SLUB && !M386 72 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
73 select HAVE_CMPXCHG_LOCAL if !M386 73 select HAVE_CMPXCHG_LOCAL
74 select HAVE_CMPXCHG_DOUBLE 74 select HAVE_CMPXCHG_DOUBLE
75 select HAVE_ARCH_KMEMCHECK 75 select HAVE_ARCH_KMEMCHECK
76 select HAVE_USER_RETURN_NOTIFIER 76 select HAVE_USER_RETURN_NOTIFIER
@@ -171,13 +171,8 @@ config ARCH_MAY_HAVE_PC_FDC
171 def_bool y 171 def_bool y
172 depends on ISA_DMA_API 172 depends on ISA_DMA_API
173 173
174config RWSEM_GENERIC_SPINLOCK
175 def_bool y
176 depends on !X86_XADD
177
178config RWSEM_XCHGADD_ALGORITHM 174config RWSEM_XCHGADD_ALGORITHM
179 def_bool y 175 def_bool y
180 depends on X86_XADD
181 176
182config GENERIC_CALIBRATE_DELAY 177config GENERIC_CALIBRATE_DELAY
183 def_bool y 178 def_bool y
@@ -1100,7 +1095,7 @@ config HIGHMEM4G
1100 1095
1101config HIGHMEM64G 1096config HIGHMEM64G
1102 bool "64GB" 1097 bool "64GB"
1103 depends on !M386 && !M486 1098 depends on !M486
1104 select X86_PAE 1099 select X86_PAE
1105 ---help--- 1100 ---help---
1106 Select this if you have a 32-bit processor and more than 4 1101 Select this if you have a 32-bit processor and more than 4
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index f3b86d0df44e..c026cca5602c 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -4,23 +4,24 @@ choice
4 default M686 if X86_32 4 default M686 if X86_32
5 default GENERIC_CPU if X86_64 5 default GENERIC_CPU if X86_64
6 6
7config M386 7config M486
8 bool "386" 8 bool "486"
9 depends on X86_32 && !UML 9 depends on X86_32
10 ---help--- 10 ---help---
11 This is the processor type of your CPU. This information is used for 11 This is the processor type of your CPU. This information is
12 optimizing purposes. In order to compile a kernel that can run on 12 used for optimizing purposes. In order to compile a kernel
13 all x86 CPU types (albeit not optimally fast), you can specify 13 that can run on all supported x86 CPU types (albeit not
14 "386" here. 14 optimally fast), you can specify "486" here.
15
16 Note that the 386 is no longer supported, this includes
17 AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI 486DLC/DLC2,
18 UMC 486SX-S and the NexGen Nx586.
15 19
16 The kernel will not necessarily run on earlier architectures than 20 The kernel will not necessarily run on earlier architectures than
17 the one you have chosen, e.g. a Pentium optimized kernel will run on 21 the one you have chosen, e.g. a Pentium optimized kernel will run on
18 a PPro, but not necessarily on a i486. 22 a PPro, but not necessarily on a i486.
19 23
20 Here are the settings recommended for greatest speed: 24 Here are the settings recommended for greatest speed:
21 - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
22 486DLC/DLC2, and UMC 486SX-S. Only "386" kernels will run on a 386
23 class machine.
24 - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or 25 - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
25 SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S. 26 SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
26 - "586" for generic Pentium CPUs lacking the TSC 27 - "586" for generic Pentium CPUs lacking the TSC
@@ -43,16 +44,7 @@ config M386
43 - "VIA C3-2" for VIA C3-2 "Nehemiah" (model 9 and above). 44 - "VIA C3-2" for VIA C3-2 "Nehemiah" (model 9 and above).
44 - "VIA C7" for VIA C7. 45 - "VIA C7" for VIA C7.
45 46
46 If you don't know what to do, choose "386". 47 If you don't know what to do, choose "486".
47
48config M486
49 bool "486"
50 depends on X86_32
51 ---help---
52 Select this for a 486 series processor, either Intel or one of the
53 compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
54 DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
55 U5S.
56 48
57config M586 49config M586
58 bool "586/K5/5x86/6x86/6x86MX" 50 bool "586/K5/5x86/6x86/6x86MX"
@@ -305,24 +297,16 @@ config X86_INTERNODE_CACHE_SHIFT
305 default "12" if X86_VSMP 297 default "12" if X86_VSMP
306 default X86_L1_CACHE_SHIFT 298 default X86_L1_CACHE_SHIFT
307 299
308config X86_CMPXCHG
309 def_bool y
310 depends on X86_64 || (X86_32 && !M386)
311
312config X86_L1_CACHE_SHIFT 300config X86_L1_CACHE_SHIFT
313 int 301 int
314 default "7" if MPENTIUM4 || MPSC 302 default "7" if MPENTIUM4 || MPSC
315 default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU 303 default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
316 default "4" if MELAN || M486 || M386 || MGEODEGX1 304 default "4" if MELAN || M486 || MGEODEGX1
317 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX 305 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
318 306
319config X86_XADD
320 def_bool y
321 depends on !M386
322
323config X86_PPRO_FENCE 307config X86_PPRO_FENCE
324 bool "PentiumPro memory ordering errata workaround" 308 bool "PentiumPro memory ordering errata workaround"
325 depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1 309 depends on M686 || M586MMX || M586TSC || M586 || M486 || MGEODEGX1
326 ---help--- 310 ---help---
327 Old PentiumPro multiprocessor systems had errata that could cause 311 Old PentiumPro multiprocessor systems had errata that could cause
328 memory operations to violate the x86 ordering standard in rare cases. 312 memory operations to violate the x86 ordering standard in rare cases.
@@ -335,27 +319,11 @@ config X86_PPRO_FENCE
335 319
336config X86_F00F_BUG 320config X86_F00F_BUG
337 def_bool y 321 def_bool y
338 depends on M586MMX || M586TSC || M586 || M486 || M386 322 depends on M586MMX || M586TSC || M586 || M486
339 323
340config X86_INVD_BUG 324config X86_INVD_BUG
341 def_bool y 325 def_bool y
342 depends on M486 || M386 326 depends on M486
343
344config X86_WP_WORKS_OK
345 def_bool y
346 depends on !M386
347
348config X86_INVLPG
349 def_bool y
350 depends on X86_32 && !M386
351
352config X86_BSWAP
353 def_bool y
354 depends on X86_32 && !M386
355
356config X86_POPAD_OK
357 def_bool y
358 depends on X86_32 && !M386
359 327
360config X86_ALIGNMENT_16 328config X86_ALIGNMENT_16
361 def_bool y 329 def_bool y
@@ -412,12 +380,11 @@ config X86_MINIMUM_CPU_FAMILY
412 default "64" if X86_64 380 default "64" if X86_64
413 default "6" if X86_32 && X86_P6_NOP 381 default "6" if X86_32 && X86_P6_NOP
414 default "5" if X86_32 && X86_CMPXCHG64 382 default "5" if X86_32 && X86_CMPXCHG64
415 default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK) 383 default "4"
416 default "3"
417 384
418config X86_DEBUGCTLMSR 385config X86_DEBUGCTLMSR
419 def_bool y 386 def_bool y
420 depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) && !UML 387 depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486) && !UML
421 388
422menuconfig PROCESSOR_SELECT 389menuconfig PROCESSOR_SELECT
423 bool "Supported processor vendors" if EXPERT 390 bool "Supported processor vendors" if EXPERT
@@ -441,7 +408,7 @@ config CPU_SUP_INTEL
441config CPU_SUP_CYRIX_32 408config CPU_SUP_CYRIX_32
442 default y 409 default y
443 bool "Support Cyrix processors" if PROCESSOR_SELECT 410 bool "Support Cyrix processors" if PROCESSOR_SELECT
444 depends on M386 || M486 || M586 || M586TSC || M586MMX || (EXPERT && !64BIT) 411 depends on M486 || M586 || M586TSC || M586MMX || (EXPERT && !64BIT)
445 ---help--- 412 ---help---
446 This enables detection, tunings and quirks for Cyrix processors 413 This enables detection, tunings and quirks for Cyrix processors
447 414
@@ -495,7 +462,7 @@ config CPU_SUP_TRANSMETA_32
495config CPU_SUP_UMC_32 462config CPU_SUP_UMC_32
496 default y 463 default y
497 bool "Support UMC processors" if PROCESSOR_SELECT 464 bool "Support UMC processors" if PROCESSOR_SELECT
498 depends on M386 || M486 || (EXPERT && !64BIT) 465 depends on M486 || (EXPERT && !64BIT)
499 ---help--- 466 ---help---
500 This enables detection, tunings and quirks for UMC processors 467 This enables detection, tunings and quirks for UMC processors
501 468
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index 86cee7b749e1..6647ed49c66c 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -10,7 +10,6 @@ tune = $(call cc-option,-mcpu=$(1),$(2))
10endif 10endif
11 11
12align := $(cc-option-align) 12align := $(cc-option-align)
13cflags-$(CONFIG_M386) += -march=i386
14cflags-$(CONFIG_M486) += -march=i486 13cflags-$(CONFIG_M486) += -march=i486
15cflags-$(CONFIG_M586) += -march=i586 14cflags-$(CONFIG_M586) += -march=i586
16cflags-$(CONFIG_M586TSC) += -march=i586 15cflags-$(CONFIG_M586TSC) += -march=i586
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index b6c3b821acf6..722aa3b04624 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -172,23 +172,7 @@ static inline int atomic_add_negative(int i, atomic_t *v)
172 */ 172 */
173static inline int atomic_add_return(int i, atomic_t *v) 173static inline int atomic_add_return(int i, atomic_t *v)
174{ 174{
175#ifdef CONFIG_M386
176 int __i;
177 unsigned long flags;
178 if (unlikely(boot_cpu_data.x86 <= 3))
179 goto no_xadd;
180#endif
181 /* Modern 486+ processor */
182 return i + xadd(&v->counter, i); 175 return i + xadd(&v->counter, i);
183
184#ifdef CONFIG_M386
185no_xadd: /* Legacy 386 processor */
186 raw_local_irq_save(flags);
187 __i = atomic_read(v);
188 atomic_set(v, i + __i);
189 raw_local_irq_restore(flags);
190 return i + __i;
191#endif
192} 176}
193 177
194/** 178/**
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index 53f4b219336b..f8bf2eecab86 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -34,9 +34,7 @@ static inline void set_64bit(volatile u64 *ptr, u64 value)
34 : "memory"); 34 : "memory");
35} 35}
36 36
37#ifdef CONFIG_X86_CMPXCHG
38#define __HAVE_ARCH_CMPXCHG 1 37#define __HAVE_ARCH_CMPXCHG 1
39#endif
40 38
41#ifdef CONFIG_X86_CMPXCHG64 39#ifdef CONFIG_X86_CMPXCHG64
42#define cmpxchg64(ptr, o, n) \ 40#define cmpxchg64(ptr, o, n) \
@@ -73,59 +71,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
73 return prev; 71 return prev;
74} 72}
75 73
76#ifndef CONFIG_X86_CMPXCHG
77/*
78 * Building a kernel capable running on 80386. It may be necessary to
79 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
80 * a function for each of the sizes we support.
81 */
82
83extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
84extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
85extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
86
87static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
88 unsigned long new, int size)
89{
90 switch (size) {
91 case 1:
92 return cmpxchg_386_u8(ptr, old, new);
93 case 2:
94 return cmpxchg_386_u16(ptr, old, new);
95 case 4:
96 return cmpxchg_386_u32(ptr, old, new);
97 }
98 return old;
99}
100
101#define cmpxchg(ptr, o, n) \
102({ \
103 __typeof__(*(ptr)) __ret; \
104 if (likely(boot_cpu_data.x86 > 3)) \
105 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
106 (unsigned long)(o), (unsigned long)(n), \
107 sizeof(*(ptr))); \
108 else \
109 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
110 (unsigned long)(o), (unsigned long)(n), \
111 sizeof(*(ptr))); \
112 __ret; \
113})
114#define cmpxchg_local(ptr, o, n) \
115({ \
116 __typeof__(*(ptr)) __ret; \
117 if (likely(boot_cpu_data.x86 > 3)) \
118 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
119 (unsigned long)(o), (unsigned long)(n), \
120 sizeof(*(ptr))); \
121 else \
122 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
123 (unsigned long)(o), (unsigned long)(n), \
124 sizeof(*(ptr))); \
125 __ret; \
126})
127#endif
128
129#ifndef CONFIG_X86_CMPXCHG64 74#ifndef CONFIG_X86_CMPXCHG64
130/* 75/*
131 * Building a kernel capable running on 80386 and 80486. It may be necessary 76 * Building a kernel capable running on 80386 and 80486. It may be necessary
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index c22a492daf57..da40b1e2228e 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -313,12 +313,6 @@ extern const char * const x86_power_flags[32];
313#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) 313#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
314#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT) 314#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
315 315
316#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
317# define cpu_has_invlpg 1
318#else
319# define cpu_has_invlpg (boot_cpu_data.x86 > 3)
320#endif
321
322#ifdef CONFIG_X86_64 316#ifdef CONFIG_X86_64
323 317
324#undef cpu_has_vme 318#undef cpu_has_vme
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index f373046e63ec..be27ba1e947a 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -55,12 +55,6 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
55 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 55 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
56 return -EFAULT; 56 return -EFAULT;
57 57
58#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
59 /* Real i386 machines can only support FUTEX_OP_SET */
60 if (op != FUTEX_OP_SET && boot_cpu_data.x86 == 3)
61 return -ENOSYS;
62#endif
63
64 pagefault_disable(); 58 pagefault_disable();
65 59
66 switch (op) { 60 switch (op) {
@@ -118,12 +112,6 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
118{ 112{
119 int ret = 0; 113 int ret = 0;
120 114
121#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
122 /* Real i386 machines have no cmpxchg instruction */
123 if (boot_cpu_data.x86 == 3)
124 return -ENOSYS;
125#endif
126
127 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 115 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
128 return -EFAULT; 116 return -EFAULT;
129 117
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index c8bed0da434a..2d89e3980cbd 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -124,27 +124,11 @@ static inline int local_add_negative(long i, local_t *l)
124 */ 124 */
125static inline long local_add_return(long i, local_t *l) 125static inline long local_add_return(long i, local_t *l)
126{ 126{
127 long __i; 127 long __i = i;
128#ifdef CONFIG_M386
129 unsigned long flags;
130 if (unlikely(boot_cpu_data.x86 <= 3))
131 goto no_xadd;
132#endif
133 /* Modern 486+ processor */
134 __i = i;
135 asm volatile(_ASM_XADD "%0, %1;" 128 asm volatile(_ASM_XADD "%0, %1;"
136 : "+r" (i), "+m" (l->a.counter) 129 : "+r" (i), "+m" (l->a.counter)
137 : : "memory"); 130 : : "memory");
138 return i + __i; 131 return i + __i;
139
140#ifdef CONFIG_M386
141no_xadd: /* Legacy 386 processor */
142 local_irq_save(flags);
143 __i = local_read(l);
144 local_set(l, i + __i);
145 local_irq_restore(flags);
146 return i + __i;
147#endif
148} 132}
149 133
150static inline long local_sub_return(long i, local_t *l) 134static inline long local_sub_return(long i, local_t *l)
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index 9eae7752ae9b..e3b7819caeef 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -5,8 +5,6 @@
5 5
6#ifdef CONFIG_X86_64 6#ifdef CONFIG_X86_64
7/* X86_64 does not define MODULE_PROC_FAMILY */ 7/* X86_64 does not define MODULE_PROC_FAMILY */
8#elif defined CONFIG_M386
9#define MODULE_PROC_FAMILY "386 "
10#elif defined CONFIG_M486 8#elif defined CONFIG_M486
11#define MODULE_PROC_FAMILY "486 " 9#define MODULE_PROC_FAMILY "486 "
12#elif defined CONFIG_M586 10#elif defined CONFIG_M586
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 1104afaba52b..0da5200ee79d 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -406,7 +406,6 @@ do { \
406#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) 406#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
407#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) 407#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
408 408
409#ifndef CONFIG_M386
410#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) 409#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
411#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) 410#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
412#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) 411#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
@@ -421,8 +420,6 @@ do { \
421#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 420#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
422#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 421#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
423 422
424#endif /* !CONFIG_M386 */
425
426#ifdef CONFIG_X86_CMPXCHG64 423#ifdef CONFIG_X86_CMPXCHG64
427#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \ 424#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \
428({ \ 425({ \
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index db0d8c32090c..e101b38912de 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -672,18 +672,29 @@ static inline void sync_core(void)
672{ 672{
673 int tmp; 673 int tmp;
674 674
675#if defined(CONFIG_M386) || defined(CONFIG_M486) 675#ifdef CONFIG_M486
676 if (boot_cpu_data.x86 < 5) 676 /*
677 /* There is no speculative execution. 677 * Do a CPUID if available, otherwise do a jump. The jump
678 * jmp is a barrier to prefetching. */ 678 * can conveniently enough be the jump around CPUID.
679 asm volatile("jmp 1f\n1:\n" ::: "memory"); 679 */
680 else 680 asm volatile("cmpl %2,%1\n\t"
681 "jl 1f\n\t"
682 "cpuid\n"
683 "1:"
684 : "=a" (tmp)
685 : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1)
686 : "ebx", "ecx", "edx", "memory");
687#else
688 /*
689 * CPUID is a barrier to speculative execution.
690 * Prefetched instructions are automatically
691 * invalidated when modified.
692 */
693 asm volatile("cpuid"
694 : "=a" (tmp)
695 : "0" (1)
696 : "ebx", "ecx", "edx", "memory");
681#endif 697#endif
682 /* cpuid is a barrier to speculative execution.
683 * Prefetched instructions are automatically
684 * invalidated when modified. */
685 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
686 : "ebx", "ecx", "edx", "memory");
687} 698}
688 699
689static inline void __monitor(const void *eax, unsigned long ecx, 700static inline void __monitor(const void *eax, unsigned long ecx,
diff --git a/arch/x86/include/asm/swab.h b/arch/x86/include/asm/swab.h
index 557cd9f00661..7f235c7105c1 100644
--- a/arch/x86/include/asm/swab.h
+++ b/arch/x86/include/asm/swab.h
@@ -6,22 +6,7 @@
6 6
7static inline __attribute_const__ __u32 __arch_swab32(__u32 val) 7static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
8{ 8{
9#ifdef __i386__ 9 asm("bswapl %0" : "=r" (val) : "0" (val));
10# ifdef CONFIG_X86_BSWAP
11 asm("bswap %0" : "=r" (val) : "0" (val));
12# else
13 asm("xchgb %b0,%h0\n\t" /* swap lower bytes */
14 "rorl $16,%0\n\t" /* swap words */
15 "xchgb %b0,%h0" /* swap higher bytes */
16 : "=q" (val)
17 : "0" (val));
18# endif
19
20#else /* __i386__ */
21 asm("bswapl %0"
22 : "=r" (val)
23 : "0" (val));
24#endif
25 return val; 10 return val;
26} 11}
27#define __arch_swab32 __arch_swab32 12#define __arch_swab32 __arch_swab32
@@ -37,22 +22,12 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 val)
37 __u64 u; 22 __u64 u;
38 } v; 23 } v;
39 v.u = val; 24 v.u = val;
40# ifdef CONFIG_X86_BSWAP
41 asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" 25 asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
42 : "=r" (v.s.a), "=r" (v.s.b) 26 : "=r" (v.s.a), "=r" (v.s.b)
43 : "0" (v.s.a), "1" (v.s.b)); 27 : "0" (v.s.a), "1" (v.s.b));
44# else
45 v.s.a = __arch_swab32(v.s.a);
46 v.s.b = __arch_swab32(v.s.b);
47 asm("xchgl %0,%1"
48 : "=r" (v.s.a), "=r" (v.s.b)
49 : "0" (v.s.a), "1" (v.s.b));
50# endif
51 return v.u; 28 return v.u;
52#else /* __i386__ */ 29#else /* __i386__ */
53 asm("bswapq %0" 30 asm("bswapq %0" : "=r" (val) : "0" (val));
54 : "=r" (val)
55 : "0" (val));
56 return val; 31 return val;
57#endif 32#endif
58} 33}
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 74a44333545a..0fee48e279cc 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -56,10 +56,7 @@ static inline void __flush_tlb_all(void)
56 56
57static inline void __flush_tlb_one(unsigned long addr) 57static inline void __flush_tlb_one(unsigned long addr)
58{ 58{
59 if (cpu_has_invlpg)
60 __flush_tlb_single(addr); 59 __flush_tlb_single(addr);
61 else
62 __flush_tlb();
63} 60}
64 61
65#define TLB_FLUSH_ALL -1UL 62#define TLB_FLUSH_ALL -1UL
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 7ccf8d131535..1709801d18ec 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -237,8 +237,6 @@ extern void __put_user_2(void);
237extern void __put_user_4(void); 237extern void __put_user_4(void);
238extern void __put_user_8(void); 238extern void __put_user_8(void);
239 239
240#ifdef CONFIG_X86_WP_WORKS_OK
241
242/** 240/**
243 * put_user: - Write a simple value into user space. 241 * put_user: - Write a simple value into user space.
244 * @x: Value to copy to user space. 242 * @x: Value to copy to user space.
@@ -326,29 +324,6 @@ do { \
326 } \ 324 } \
327} while (0) 325} while (0)
328 326
329#else
330
331#define __put_user_size(x, ptr, size, retval, errret) \
332do { \
333 __typeof__(*(ptr))__pus_tmp = x; \
334 retval = 0; \
335 \
336 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
337 retval = errret; \
338} while (0)
339
340#define put_user(x, ptr) \
341({ \
342 int __ret_pu; \
343 __typeof__(*(ptr))__pus_tmp = x; \
344 __ret_pu = 0; \
345 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
346 sizeof(*(ptr))) != 0)) \
347 __ret_pu = -EFAULT; \
348 __ret_pu; \
349})
350#endif
351
352#ifdef CONFIG_X86_32 327#ifdef CONFIG_X86_32
353#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() 328#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
354#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() 329#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
@@ -543,29 +518,12 @@ struct __large_struct { unsigned long buf[100]; };
543 (x) = (__force __typeof__(*(ptr)))__gue_val; \ 518 (x) = (__force __typeof__(*(ptr)))__gue_val; \
544} while (0) 519} while (0)
545 520
546#ifdef CONFIG_X86_WP_WORKS_OK
547
548#define put_user_try uaccess_try 521#define put_user_try uaccess_try
549#define put_user_catch(err) uaccess_catch(err) 522#define put_user_catch(err) uaccess_catch(err)
550 523
551#define put_user_ex(x, ptr) \ 524#define put_user_ex(x, ptr) \
552 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 525 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
553 526
554#else /* !CONFIG_X86_WP_WORKS_OK */
555
556#define put_user_try do { \
557 int __uaccess_err = 0;
558
559#define put_user_catch(err) \
560 (err) |= __uaccess_err; \
561} while (0)
562
563#define put_user_ex(x, ptr) do { \
564 __uaccess_err |= __put_user(x, ptr); \
565} while (0)
566
567#endif /* CONFIG_X86_WP_WORKS_OK */
568
569extern unsigned long 527extern unsigned long
570copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 528copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
571extern __must_check long 529extern __must_check long
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index ce71a25f4523..15239fffd6fe 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -748,9 +748,6 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
748 748
749static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) 749static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
750{ 750{
751 if (!cpu_has_invlpg)
752 return;
753
754 tlb_flushall_shift = 5; 751 tlb_flushall_shift = 5;
755 752
756 if (c->x86 <= 0x11) 753 if (c->x86 <= 0x11)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index d0e910da16c5..92dfec986a48 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -107,53 +107,17 @@ static void __init check_hlt(void)
107} 107}
108 108
109/* 109/*
110 * Most 386 processors have a bug where a POPAD can lock the
111 * machine even from user space.
112 */
113
114static void __init check_popad(void)
115{
116#ifndef CONFIG_X86_POPAD_OK
117 int res, inp = (int) &res;
118
119 pr_info("Checking for popad bug... ");
120 __asm__ __volatile__(
121 "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
122 : "=&a" (res)
123 : "d" (inp)
124 : "ecx", "edi");
125 /*
126 * If this fails, it means that any user program may lock the
127 * CPU hard. Too bad.
128 */
129 if (res != 12345678)
130 pr_cont("Buggy\n");
131 else
132 pr_cont("OK\n");
133#endif
134}
135
136/*
137 * Check whether we are able to run this kernel safely on SMP. 110 * Check whether we are able to run this kernel safely on SMP.
138 * 111 *
139 * - In order to run on a i386, we need to be compiled for i386 112 * - i386 is no longer supported.
140 * (for due to lack of "invlpg" and working WP on a i386)
141 * - In order to run on anything without a TSC, we need to be 113 * - In order to run on anything without a TSC, we need to be
142 * compiled for a i486. 114 * compiled for a i486.
143 */ 115 */
144 116
145static void __init check_config(void) 117static void __init check_config(void)
146{ 118{
147/* 119 if (boot_cpu_data.x86 < 4)
148 * We'd better not be a i386 if we're configured to use some
149 * i486+ only features! (WP works in supervisor mode and the
150 * new "invlpg" and "bswap" instructions)
151 */
152#if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || \
153 defined(CONFIG_X86_BSWAP)
154 if (boot_cpu_data.x86 == 3)
155 panic("Kernel requires i486+ for 'invlpg' and other features"); 120 panic("Kernel requires i486+ for 'invlpg' and other features");
156#endif
157} 121}
158 122
159 123
@@ -166,7 +130,6 @@ void __init check_bugs(void)
166#endif 130#endif
167 check_config(); 131 check_config();
168 check_hlt(); 132 check_hlt();
169 check_popad();
170 init_utsname()->machine[1] = 133 init_utsname()->machine[1] =
171 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 134 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
172 alternative_instructions(); 135 alternative_instructions();
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 198e019a531a..fcaabd0432c5 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -612,10 +612,6 @@ static void __cpuinit intel_tlb_lookup(const unsigned char desc)
612 612
613static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) 613static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
614{ 614{
615 if (!cpu_has_invlpg) {
616 tlb_flushall_shift = -1;
617 return;
618 }
619 switch ((c->x86 << 8) + c->x86_model) { 615 switch ((c->x86 << 8) + c->x86_model) {
620 case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ 616 case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
621 case 0x616: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ 617 case 0x616: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index b00f6785da74..96b2c6697c9d 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -32,7 +32,6 @@ ifeq ($(CONFIG_X86_32),y)
32 lib-y += checksum_32.o 32 lib-y += checksum_32.o
33 lib-y += strstr_32.o 33 lib-y += strstr_32.o
34 lib-y += string_32.o 34 lib-y += string_32.o
35 lib-y += cmpxchg.o
36ifneq ($(CONFIG_X86_CMPXCHG64),y) 35ifneq ($(CONFIG_X86_CMPXCHG64),y)
37 lib-y += cmpxchg8b_emu.o atomic64_386_32.o 36 lib-y += cmpxchg8b_emu.o atomic64_386_32.o
38endif 37endif
diff --git a/arch/x86/lib/cmpxchg.c b/arch/x86/lib/cmpxchg.c
deleted file mode 100644
index 5d619f6df3ee..000000000000
--- a/arch/x86/lib/cmpxchg.c
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * cmpxchg*() fallbacks for CPU not supporting these instructions
3 */
4
5#include <linux/kernel.h>
6#include <linux/smp.h>
7#include <linux/module.h>
8
9#ifndef CONFIG_X86_CMPXCHG
10unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
11{
12 u8 prev;
13 unsigned long flags;
14
15 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
16 local_irq_save(flags);
17 prev = *(u8 *)ptr;
18 if (prev == old)
19 *(u8 *)ptr = new;
20 local_irq_restore(flags);
21 return prev;
22}
23EXPORT_SYMBOL(cmpxchg_386_u8);
24
25unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
26{
27 u16 prev;
28 unsigned long flags;
29
30 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
31 local_irq_save(flags);
32 prev = *(u16 *)ptr;
33 if (prev == old)
34 *(u16 *)ptr = new;
35 local_irq_restore(flags);
36 return prev;
37}
38EXPORT_SYMBOL(cmpxchg_386_u16);
39
40unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
41{
42 u32 prev;
43 unsigned long flags;
44
45 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
46 local_irq_save(flags);
47 prev = *(u32 *)ptr;
48 if (prev == old)
49 *(u32 *)ptr = new;
50 local_irq_restore(flags);
51 return prev;
52}
53EXPORT_SYMBOL(cmpxchg_386_u32);
54#endif
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 98f6d6b68f5a..f0312d746402 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -570,63 +570,6 @@ do { \
570unsigned long __copy_to_user_ll(void __user *to, const void *from, 570unsigned long __copy_to_user_ll(void __user *to, const void *from,
571 unsigned long n) 571 unsigned long n)
572{ 572{
573#ifndef CONFIG_X86_WP_WORKS_OK
574 if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
575 ((unsigned long)to) < TASK_SIZE) {
576 /*
577 * When we are in an atomic section (see
578 * mm/filemap.c:file_read_actor), return the full
579 * length to take the slow path.
580 */
581 if (in_atomic())
582 return n;
583
584 /*
585 * CPU does not honor the WP bit when writing
586 * from supervisory mode, and due to preemption or SMP,
587 * the page tables can change at any time.
588 * Do it manually. Manfred <manfred@colorfullife.com>
589 */
590 while (n) {
591 unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
592 unsigned long len = PAGE_SIZE - offset;
593 int retval;
594 struct page *pg;
595 void *maddr;
596
597 if (len > n)
598 len = n;
599
600survive:
601 down_read(&current->mm->mmap_sem);
602 retval = get_user_pages(current, current->mm,
603 (unsigned long)to, 1, 1, 0, &pg, NULL);
604
605 if (retval == -ENOMEM && is_global_init(current)) {
606 up_read(&current->mm->mmap_sem);
607 congestion_wait(BLK_RW_ASYNC, HZ/50);
608 goto survive;
609 }
610
611 if (retval != 1) {
612 up_read(&current->mm->mmap_sem);
613 break;
614 }
615
616 maddr = kmap_atomic(pg);
617 memcpy(maddr + offset, from, len);
618 kunmap_atomic(maddr);
619 set_page_dirty_lock(pg);
620 put_page(pg);
621 up_read(&current->mm->mmap_sem);
622
623 from += len;
624 to += len;
625 n -= len;
626 }
627 return n;
628 }
629#endif
630 stac(); 573 stac();
631 if (movsl_is_ok(to, from, n)) 574 if (movsl_is_ok(to, from, n))
632 __copy_user(to, from, n); 575 __copy_user(to, from, n);
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 11a58001b4ce..745d66b843c8 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -715,10 +715,7 @@ static void __init test_wp_bit(void)
715 715
716 if (!boot_cpu_data.wp_works_ok) { 716 if (!boot_cpu_data.wp_works_ok) {
717 printk(KERN_CONT "No.\n"); 717 printk(KERN_CONT "No.\n");
718#ifdef CONFIG_X86_WP_WORKS_OK 718 panic("Linux doesn't support CPUs with broken WP.");
719 panic(
720 "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
721#endif
722 } else { 719 } else {
723 printk(KERN_CONT "Ok.\n"); 720 printk(KERN_CONT "Ok.\n");
724 } 721 }
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 60f926cd8b0e..13a6b29e2e5d 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -104,7 +104,7 @@ static void flush_tlb_func(void *info)
104 return; 104 return;
105 105
106 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { 106 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
107 if (f->flush_end == TLB_FLUSH_ALL || !cpu_has_invlpg) 107 if (f->flush_end == TLB_FLUSH_ALL)
108 local_flush_tlb(); 108 local_flush_tlb();
109 else if (!f->flush_end) 109 else if (!f->flush_end)
110 __flush_tlb_single(f->flush_start); 110 __flush_tlb_single(f->flush_start);
@@ -337,10 +337,8 @@ static const struct file_operations fops_tlbflush = {
337 337
338static int __cpuinit create_tlb_flushall_shift(void) 338static int __cpuinit create_tlb_flushall_shift(void)
339{ 339{
340 if (cpu_has_invlpg) { 340 debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR,
341 debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR, 341 arch_debugfs_dir, NULL, &fops_tlbflush);
342 arch_debugfs_dir, NULL, &fops_tlbflush);
343 }
344 return 0; 342 return 0;
345} 343}
346late_initcall(create_tlb_flushall_shift); 344late_initcall(create_tlb_flushall_shift);
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index 07611759ce35..b0c30dae9f55 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -31,7 +31,7 @@ config X86_64
31 select MODULES_USE_ELF_RELA 31 select MODULES_USE_ELF_RELA
32 32
33config RWSEM_XCHGADD_ALGORITHM 33config RWSEM_XCHGADD_ALGORITHM
34 def_bool X86_XADD && 64BIT 34 def_bool 64BIT
35 35
36config RWSEM_GENERIC_SPINLOCK 36config RWSEM_GENERIC_SPINLOCK
37 def_bool !RWSEM_XCHGADD_ALGORITHM 37 def_bool !RWSEM_XCHGADD_ALGORITHM
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index fdce49c7aff6..9a6775c9ddca 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -7,7 +7,7 @@ config XEN
7 select PARAVIRT 7 select PARAVIRT
8 select PARAVIRT_CLOCK 8 select PARAVIRT_CLOCK
9 depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS) 9 depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
10 depends on X86_CMPXCHG && X86_TSC 10 depends on X86_TSC
11 help 11 help
12 This is the Linux Xen port. Enabling this will allow the 12 This is the Linux Xen port. Enabling this will allow the
13 kernel to boot in a paravirtualized environment under the 13 kernel to boot in a paravirtualized environment under the