aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Kconfig.cpu70
-rw-r--r--arch/x86/boot/cpucheck.c8
-rw-r--r--arch/x86/kernel/alternative.c36
-rw-r--r--arch/x86/kernel/apic_32.c14
-rw-r--r--arch/x86/kernel/cpu/Makefile21
-rw-r--r--arch/x86/kernel/cpu/bugs.c6
-rw-r--r--arch/x86/kernel/cpu/cmpxchg.c72
-rw-r--r--arch/x86/kernel/cpu/common.c32
-rw-r--r--arch/x86/kernel/cpu/common_64.c36
-rw-r--r--arch/x86/kernel/cpu/cyrix.c20
-rw-r--r--arch/x86/kernel/cpu/feature_names.c3
-rw-r--r--arch/x86/kernel/cpu/intel.c71
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c169
-rw-r--r--arch/x86/kernel/io_apic_32.c6
-rw-r--r--arch/x86/kernel/io_apic_64.c25
-rw-r--r--arch/x86/kernel/mpparse.c11
-rw-r--r--arch/x86/kernel/pci-calgary_64.c2
-rw-r--r--arch/x86/kernel/setup.c22
-rw-r--r--arch/x86/kernel/smpboot.c22
-rw-r--r--arch/x86/kernel/vmi_32.c3
-rw-r--r--arch/x86/lib/usercopy_32.c7
-rw-r--r--arch/x86/mm/Makefile3
-rw-r--r--arch/x86/mm/pgtable.c3
24 files changed, 473 insertions, 190 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 608a12ff483a..53ab36878736 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -22,7 +22,6 @@ config X86
22 select HAVE_IDE 22 select HAVE_IDE
23 select HAVE_OPROFILE 23 select HAVE_OPROFILE
24 select HAVE_IOREMAP_PROT 24 select HAVE_IOREMAP_PROT
25 select HAVE_GET_USER_PAGES_FAST
26 select HAVE_KPROBES 25 select HAVE_KPROBES
27 select ARCH_WANT_OPTIONAL_GPIOLIB 26 select ARCH_WANT_OPTIONAL_GPIOLIB
28 select HAVE_KRETPROBES 27 select HAVE_KRETPROBES
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 2c518fbc52ec..6156ac25ff8c 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -415,3 +415,73 @@ config X86_MINIMUM_CPU_FAMILY
415config X86_DEBUGCTLMSR 415config X86_DEBUGCTLMSR
416 def_bool y 416 def_bool y
417 depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) 417 depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386)
418
419menuconfig PROCESSOR_SELECT
420 default y
421 bool "Supported processor vendors" if EMBEDDED
422 help
423 This lets you choose what x86 vendor support code your kernel
424 will include.
425
426config CPU_SUP_INTEL_32
427 default y
428 bool "Support Intel processors" if PROCESSOR_SELECT
429 depends on !64BIT
430 help
431 This enables extended support for Intel processors
432
433config CPU_SUP_INTEL_64
434 default y
435 bool "Support Intel processors" if PROCESSOR_SELECT
436 depends on 64BIT
437 help
438 This enables extended support for Intel processors
439
440config CPU_SUP_CYRIX_32
441 default y
442 bool "Support Cyrix processors" if PROCESSOR_SELECT
443 depends on !64BIT
444 help
445 This enables extended support for Cyrix processors
446
447config CPU_SUP_AMD_32
448 default y
449 bool "Support AMD processors" if PROCESSOR_SELECT
450 depends on !64BIT
451 help
452 This enables extended support for AMD processors
453
454config CPU_SUP_AMD_64
455 default y
456 bool "Support AMD processors" if PROCESSOR_SELECT
457 depends on 64BIT
458 help
459 This enables extended support for AMD processors
460
461config CPU_SUP_CENTAUR_32
462 default y
463 bool "Support Centaur processors" if PROCESSOR_SELECT
464 depends on !64BIT
465 help
466 This enables extended support for Centaur processors
467
468config CPU_SUP_CENTAUR_64
469 default y
470 bool "Support Centaur processors" if PROCESSOR_SELECT
471 depends on 64BIT
472 help
473 This enables extended support for Centaur processors
474
475config CPU_SUP_TRANSMETA_32
476 default y
477 bool "Support Transmeta processors" if PROCESSOR_SELECT
478 depends on !64BIT
479 help
480 This enables extended support for Transmeta processors
481
482config CPU_SUP_UMC_32
483 default y
484 bool "Support UMC processors" if PROCESSOR_SELECT
485 depends on !64BIT
486 help
487 This enables extended support for UMC processors
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
index 7804389ee005..19b14f7ef9f1 100644
--- a/arch/x86/boot/cpucheck.c
+++ b/arch/x86/boot/cpucheck.c
@@ -46,12 +46,12 @@ static const u32 req_flags[NCAPINTS] =
46{ 46{
47 REQUIRED_MASK0, 47 REQUIRED_MASK0,
48 REQUIRED_MASK1, 48 REQUIRED_MASK1,
49 REQUIRED_MASK2, 49 0, /* REQUIRED_MASK2 not implemented in this file */
50 REQUIRED_MASK3, 50 0, /* REQUIRED_MASK3 not implemented in this file */
51 REQUIRED_MASK4, 51 REQUIRED_MASK4,
52 REQUIRED_MASK5, 52 0, /* REQUIRED_MASK5 not implemented in this file */
53 REQUIRED_MASK6, 53 REQUIRED_MASK6,
54 REQUIRED_MASK7, 54 0, /* REQUIRED_MASK7 not implemented in this file */
55}; 55};
56 56
57#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) 57#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 2763cb37b553..65a0c1b48696 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -145,35 +145,25 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = {
145extern char __vsyscall_0; 145extern char __vsyscall_0;
146const unsigned char *const *find_nop_table(void) 146const unsigned char *const *find_nop_table(void)
147{ 147{
148 return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || 148 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
149 boot_cpu_data.x86 < 6 ? k8_nops : p6_nops; 149 boot_cpu_has(X86_FEATURE_NOPL))
150 return p6_nops;
151 else
152 return k8_nops;
150} 153}
151 154
152#else /* CONFIG_X86_64 */ 155#else /* CONFIG_X86_64 */
153 156
154static const struct nop {
155 int cpuid;
156 const unsigned char *const *noptable;
157} noptypes[] = {
158 { X86_FEATURE_K8, k8_nops },
159 { X86_FEATURE_K7, k7_nops },
160 { X86_FEATURE_P4, p6_nops },
161 { X86_FEATURE_P3, p6_nops },
162 { -1, NULL }
163};
164
165const unsigned char *const *find_nop_table(void) 157const unsigned char *const *find_nop_table(void)
166{ 158{
167 const unsigned char *const *noptable = intel_nops; 159 if (boot_cpu_has(X86_FEATURE_K8))
168 int i; 160 return k8_nops;
169 161 else if (boot_cpu_has(X86_FEATURE_K7))
170 for (i = 0; noptypes[i].cpuid >= 0; i++) { 162 return k7_nops;
171 if (boot_cpu_has(noptypes[i].cpuid)) { 163 else if (boot_cpu_has(X86_FEATURE_NOPL))
172 noptable = noptypes[i].noptable; 164 return p6_nops;
173 break; 165 else
174 } 166 return intel_nops;
175 }
176 return noptable;
177} 167}
178 168
179#endif /* CONFIG_X86_64 */ 169#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index f93c18f5b79d..8228222ec917 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -1753,15 +1753,19 @@ static int __init parse_lapic_timer_c2_ok(char *arg)
1753} 1753}
1754early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); 1754early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1755 1755
1756static int __init apic_set_verbosity(char *str) 1756static int __init apic_set_verbosity(char *arg)
1757{ 1757{
1758 if (strcmp("debug", str) == 0) 1758 if (!arg)
1759 return -EINVAL;
1760
1761 if (strcmp(arg, "debug") == 0)
1759 apic_verbosity = APIC_DEBUG; 1762 apic_verbosity = APIC_DEBUG;
1760 else if (strcmp("verbose", str) == 0) 1763 else if (strcmp(arg, "verbose") == 0)
1761 apic_verbosity = APIC_VERBOSE; 1764 apic_verbosity = APIC_VERBOSE;
1762 return 1; 1765
1766 return 0;
1763} 1767}
1764__setup("apic=", apic_set_verbosity); 1768early_param("apic", apic_set_verbosity);
1765 1769
1766static int __init lapic_insert_resource(void) 1770static int __init lapic_insert_resource(void)
1767{ 1771{
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index ee76eaad3001..a0fc6c144384 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -5,17 +5,18 @@
5obj-y := intel_cacheinfo.o addon_cpuid_features.o 5obj-y := intel_cacheinfo.o addon_cpuid_features.o
6obj-y += proc.o feature_names.o 6obj-y += proc.o feature_names.o
7 7
8obj-$(CONFIG_X86_32) += common.o bugs.o 8obj-$(CONFIG_X86_32) += common.o bugs.o cmpxchg.o
9obj-$(CONFIG_X86_64) += common_64.o bugs_64.o 9obj-$(CONFIG_X86_64) += common_64.o bugs_64.o
10obj-$(CONFIG_X86_32) += amd.o 10
11obj-$(CONFIG_X86_64) += amd_64.o 11obj-$(CONFIG_CPU_SUP_AMD_32) += amd.o
12obj-$(CONFIG_X86_32) += cyrix.o 12obj-$(CONFIG_CPU_SUP_AMD_64) += amd_64.o
13obj-$(CONFIG_X86_32) += centaur.o 13obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
14obj-$(CONFIG_X86_64) += centaur_64.o 14obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o
15obj-$(CONFIG_X86_32) += transmeta.o 15obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o
16obj-$(CONFIG_X86_32) += intel.o 16obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
17obj-$(CONFIG_X86_64) += intel_64.o 17obj-$(CONFIG_CPU_SUP_INTEL_32) += intel.o
18obj-$(CONFIG_X86_32) += umc.o 18obj-$(CONFIG_CPU_SUP_INTEL_64) += intel_64.o
19obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
19 20
20obj-$(CONFIG_X86_MCE) += mcheck/ 21obj-$(CONFIG_X86_MCE) += mcheck/
21obj-$(CONFIG_MTRR) += mtrr/ 22obj-$(CONFIG_MTRR) += mtrr/
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c9b58a806e85..c8e315f1aa83 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -50,6 +50,8 @@ static double __initdata y = 3145727.0;
50 */ 50 */
51static void __init check_fpu(void) 51static void __init check_fpu(void)
52{ 52{
53 s32 fdiv_bug;
54
53 if (!boot_cpu_data.hard_math) { 55 if (!boot_cpu_data.hard_math) {
54#ifndef CONFIG_MATH_EMULATION 56#ifndef CONFIG_MATH_EMULATION
55 printk(KERN_EMERG "No coprocessor found and no math emulation present.\n"); 57 printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
@@ -74,8 +76,10 @@ static void __init check_fpu(void)
74 "fistpl %0\n\t" 76 "fistpl %0\n\t"
75 "fwait\n\t" 77 "fwait\n\t"
76 "fninit" 78 "fninit"
77 : "=m" (*&boot_cpu_data.fdiv_bug) 79 : "=m" (*&fdiv_bug)
78 : "m" (*&x), "m" (*&y)); 80 : "m" (*&x), "m" (*&y));
81
82 boot_cpu_data.fdiv_bug = fdiv_bug;
79 if (boot_cpu_data.fdiv_bug) 83 if (boot_cpu_data.fdiv_bug)
80 printk("Hmm, FPU with FDIV bug.\n"); 84 printk("Hmm, FPU with FDIV bug.\n");
81} 85}
diff --git a/arch/x86/kernel/cpu/cmpxchg.c b/arch/x86/kernel/cpu/cmpxchg.c
new file mode 100644
index 000000000000..2056ccf572cc
--- /dev/null
+++ b/arch/x86/kernel/cpu/cmpxchg.c
@@ -0,0 +1,72 @@
1/*
2 * cmpxchg*() fallbacks for CPU not supporting these instructions
3 */
4
5#include <linux/kernel.h>
6#include <linux/smp.h>
7#include <linux/module.h>
8
9#ifndef CONFIG_X86_CMPXCHG
10unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
11{
12 u8 prev;
13 unsigned long flags;
14
15 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
16 local_irq_save(flags);
17 prev = *(u8 *)ptr;
18 if (prev == old)
19 *(u8 *)ptr = new;
20 local_irq_restore(flags);
21 return prev;
22}
23EXPORT_SYMBOL(cmpxchg_386_u8);
24
25unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
26{
27 u16 prev;
28 unsigned long flags;
29
30 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
31 local_irq_save(flags);
32 prev = *(u16 *)ptr;
33 if (prev == old)
34 *(u16 *)ptr = new;
35 local_irq_restore(flags);
36 return prev;
37}
38EXPORT_SYMBOL(cmpxchg_386_u16);
39
40unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
41{
42 u32 prev;
43 unsigned long flags;
44
45 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
46 local_irq_save(flags);
47 prev = *(u32 *)ptr;
48 if (prev == old)
49 *(u32 *)ptr = new;
50 local_irq_restore(flags);
51 return prev;
52}
53EXPORT_SYMBOL(cmpxchg_386_u32);
54#endif
55
56#ifndef CONFIG_X86_CMPXCHG64
57unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
58{
59 u64 prev;
60 unsigned long flags;
61
62 /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
63 local_irq_save(flags);
64 prev = *(u64 *)ptr;
65 if (prev == old)
66 *(u64 *)ptr = new;
67 local_irq_restore(flags);
68 return prev;
69}
70EXPORT_SYMBOL(cmpxchg_486_u64);
71#endif
72
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 80ab20d4fa39..0785b3c8d043 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -13,6 +13,7 @@
13#include <asm/mtrr.h> 13#include <asm/mtrr.h>
14#include <asm/mce.h> 14#include <asm/mce.h>
15#include <asm/pat.h> 15#include <asm/pat.h>
16#include <asm/asm.h>
16#ifdef CONFIG_X86_LOCAL_APIC 17#ifdef CONFIG_X86_LOCAL_APIC
17#include <asm/mpspec.h> 18#include <asm/mpspec.h>
18#include <asm/apic.h> 19#include <asm/apic.h>
@@ -341,6 +342,35 @@ static void __init early_cpu_detect(void)
341 early_get_cap(c); 342 early_get_cap(c);
342} 343}
343 344
345/*
346 * The NOPL instruction is supposed to exist on all CPUs with
347 * family >= 6, unfortunately, that's not true in practice because
348 * of early VIA chips and (more importantly) broken virtualizers that
349 * are not easy to detect. Hence, probe for it based on first
350 * principles.
351 */
352static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
353{
354 const u32 nopl_signature = 0x888c53b1; /* Random number */
355 u32 has_nopl = nopl_signature;
356
357 clear_cpu_cap(c, X86_FEATURE_NOPL);
358 if (c->x86 >= 6) {
359 asm volatile("\n"
360 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
361 "2:\n"
362 " .section .fixup,\"ax\"\n"
363 "3: xor %0,%0\n"
364 " jmp 2b\n"
365 " .previous\n"
366 _ASM_EXTABLE(1b,3b)
367 : "+a" (has_nopl));
368
369 if (has_nopl == nopl_signature)
370 set_cpu_cap(c, X86_FEATURE_NOPL);
371 }
372}
373
344static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 374static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
345{ 375{
346 u32 tfms, xlvl; 376 u32 tfms, xlvl;
@@ -395,8 +425,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
395 } 425 }
396 426
397 init_scattered_cpuid_features(c); 427 init_scattered_cpuid_features(c);
428 detect_nopl(c);
398 } 429 }
399
400} 430}
401 431
402static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 432static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c
index 6f9b8924bdc0..af569a964e74 100644
--- a/arch/x86/kernel/cpu/common_64.c
+++ b/arch/x86/kernel/cpu/common_64.c
@@ -18,6 +18,7 @@
18#include <asm/mtrr.h> 18#include <asm/mtrr.h>
19#include <asm/mce.h> 19#include <asm/mce.h>
20#include <asm/pat.h> 20#include <asm/pat.h>
21#include <asm/asm.h>
21#include <asm/numa.h> 22#include <asm/numa.h>
22#ifdef CONFIG_X86_LOCAL_APIC 23#ifdef CONFIG_X86_LOCAL_APIC
23#include <asm/mpspec.h> 24#include <asm/mpspec.h>
@@ -215,6 +216,39 @@ static void __init early_cpu_support_print(void)
215 } 216 }
216} 217}
217 218
219/*
220 * The NOPL instruction is supposed to exist on all CPUs with
221 * family >= 6, unfortunately, that's not true in practice because
222 * of early VIA chips and (more importantly) broken virtualizers that
223 * are not easy to detect. Hence, probe for it based on first
224 * principles.
225 *
226 * Note: no 64-bit chip is known to lack these, but put the code here
227 * for consistency with 32 bits, and to make it utterly trivial to
228 * diagnose the problem should it ever surface.
229 */
230static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
231{
232 const u32 nopl_signature = 0x888c53b1; /* Random number */
233 u32 has_nopl = nopl_signature;
234
235 clear_cpu_cap(c, X86_FEATURE_NOPL);
236 if (c->x86 >= 6) {
237 asm volatile("\n"
238 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
239 "2:\n"
240 " .section .fixup,\"ax\"\n"
241 "3: xor %0,%0\n"
242 " jmp 2b\n"
243 " .previous\n"
244 _ASM_EXTABLE(1b,3b)
245 : "+a" (has_nopl));
246
247 if (has_nopl == nopl_signature)
248 set_cpu_cap(c, X86_FEATURE_NOPL);
249 }
250}
251
218static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); 252static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
219 253
220void __init early_cpu_init(void) 254void __init early_cpu_init(void)
@@ -313,6 +347,8 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
313 c->x86_phys_bits = eax & 0xff; 347 c->x86_phys_bits = eax & 0xff;
314 } 348 }
315 349
350 detect_nopl(c);
351
316 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 352 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
317 cpu_devs[c->x86_vendor]->c_early_init) 353 cpu_devs[c->x86_vendor]->c_early_init)
318 cpu_devs[c->x86_vendor]->c_early_init(c); 354 cpu_devs[c->x86_vendor]->c_early_init(c);
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 3fd7a67bb06a..db5868cd2443 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -116,7 +116,7 @@ static void __cpuinit set_cx86_reorder(void)
116 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ 116 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
117 117
118 /* Load/Store Serialize to mem access disable (=reorder it) */ 118 /* Load/Store Serialize to mem access disable (=reorder it) */
119 setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80); 119 setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
120 /* set load/store serialize from 1GB to 4GB */ 120 /* set load/store serialize from 1GB to 4GB */
121 ccr3 |= 0xe0; 121 ccr3 |= 0xe0;
122 setCx86(CX86_CCR3, ccr3); 122 setCx86(CX86_CCR3, ccr3);
@@ -127,11 +127,11 @@ static void __cpuinit set_cx86_memwb(void)
127 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); 127 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
128 128
129 /* CCR2 bit 2: unlock NW bit */ 129 /* CCR2 bit 2: unlock NW bit */
130 setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04); 130 setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
131 /* set 'Not Write-through' */ 131 /* set 'Not Write-through' */
132 write_cr0(read_cr0() | X86_CR0_NW); 132 write_cr0(read_cr0() | X86_CR0_NW);
133 /* CCR2 bit 2: lock NW bit and set WT1 */ 133 /* CCR2 bit 2: lock NW bit and set WT1 */
134 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); 134 setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
135} 135}
136 136
137static void __cpuinit set_cx86_inc(void) 137static void __cpuinit set_cx86_inc(void)
@@ -144,10 +144,10 @@ static void __cpuinit set_cx86_inc(void)
144 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ 144 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
145 /* PCR1 -- Performance Control */ 145 /* PCR1 -- Performance Control */
146 /* Incrementor on, whatever that is */ 146 /* Incrementor on, whatever that is */
147 setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02); 147 setCx86_old(CX86_PCR1, getCx86_old(CX86_PCR1) | 0x02);
148 /* PCR0 -- Performance Control */ 148 /* PCR0 -- Performance Control */
149 /* Incrementor Margin 10 */ 149 /* Incrementor Margin 10 */
150 setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04); 150 setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) | 0x04);
151 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ 151 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
152} 152}
153 153
@@ -162,14 +162,14 @@ static void __cpuinit geode_configure(void)
162 local_irq_save(flags); 162 local_irq_save(flags);
163 163
164 /* Suspend on halt power saving and enable #SUSP pin */ 164 /* Suspend on halt power saving and enable #SUSP pin */
165 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); 165 setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
166 166
167 ccr3 = getCx86(CX86_CCR3); 167 ccr3 = getCx86(CX86_CCR3);
168 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ 168 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
169 169
170 170
171 /* FPU fast, DTE cache, Mem bypass */ 171 /* FPU fast, DTE cache, Mem bypass */
172 setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38); 172 setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
173 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ 173 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
174 174
175 set_cx86_memwb(); 175 set_cx86_memwb();
@@ -286,7 +286,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
286 /* GXm supports extended cpuid levels 'ala' AMD */ 286 /* GXm supports extended cpuid levels 'ala' AMD */
287 if (c->cpuid_level == 2) { 287 if (c->cpuid_level == 2) {
288 /* Enable cxMMX extensions (GX1 Datasheet 54) */ 288 /* Enable cxMMX extensions (GX1 Datasheet 54) */
289 setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1); 289 setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
290 290
291 /* 291 /*
292 * GXm : 0x30 ... 0x5f GXm datasheet 51 292 * GXm : 0x30 ... 0x5f GXm datasheet 51
@@ -309,7 +309,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
309 if (dir1 > 7) { 309 if (dir1 > 7) {
310 dir0_msn++; /* M II */ 310 dir0_msn++; /* M II */
311 /* Enable MMX extensions (App note 108) */ 311 /* Enable MMX extensions (App note 108) */
312 setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); 312 setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
313 } else { 313 } else {
314 c->coma_bug = 1; /* 6x86MX, it has the bug. */ 314 c->coma_bug = 1; /* 6x86MX, it has the bug. */
315 } 315 }
@@ -424,7 +424,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
424 local_irq_save(flags); 424 local_irq_save(flags);
425 ccr3 = getCx86(CX86_CCR3); 425 ccr3 = getCx86(CX86_CCR3);
426 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ 426 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
427 setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); /* enable cpuid */ 427 setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */
428 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ 428 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
429 local_irq_restore(flags); 429 local_irq_restore(flags);
430 } 430 }
diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c
index 0bf4d37a0483..b96b69545fbf 100644
--- a/arch/x86/kernel/cpu/feature_names.c
+++ b/arch/x86/kernel/cpu/feature_names.c
@@ -39,7 +39,8 @@ const char * const x86_cap_flags[NCAPINTS*32] = {
39 NULL, NULL, NULL, NULL, 39 NULL, NULL, NULL, NULL,
40 "constant_tsc", "up", NULL, "arch_perfmon", 40 "constant_tsc", "up", NULL, "arch_perfmon",
41 "pebs", "bts", NULL, NULL, 41 "pebs", "bts", NULL, NULL,
42 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 42 "rep_good", NULL, NULL, NULL,
43 "nopl", NULL, NULL, NULL,
43 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
44 45
45 /* Intel-defined (#2) */ 46 /* Intel-defined (#2) */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index b75f2569b8f8..77618c717d76 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -23,13 +23,6 @@
23#include <mach_apic.h> 23#include <mach_apic.h>
24#endif 24#endif
25 25
26#ifdef CONFIG_X86_INTEL_USERCOPY
27/*
28 * Alignment at which movsl is preferred for bulk memory copies.
29 */
30struct movsl_mask movsl_mask __read_mostly;
31#endif
32
33static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 26static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
34{ 27{
35 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ 28 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
@@ -314,69 +307,5 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
314 307
315cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); 308cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
316 309
317#ifndef CONFIG_X86_CMPXCHG
318unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
319{
320 u8 prev;
321 unsigned long flags;
322
323 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
324 local_irq_save(flags);
325 prev = *(u8 *)ptr;
326 if (prev == old)
327 *(u8 *)ptr = new;
328 local_irq_restore(flags);
329 return prev;
330}
331EXPORT_SYMBOL(cmpxchg_386_u8);
332
333unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
334{
335 u16 prev;
336 unsigned long flags;
337
338 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
339 local_irq_save(flags);
340 prev = *(u16 *)ptr;
341 if (prev == old)
342 *(u16 *)ptr = new;
343 local_irq_restore(flags);
344 return prev;
345}
346EXPORT_SYMBOL(cmpxchg_386_u16);
347
348unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
349{
350 u32 prev;
351 unsigned long flags;
352
353 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
354 local_irq_save(flags);
355 prev = *(u32 *)ptr;
356 if (prev == old)
357 *(u32 *)ptr = new;
358 local_irq_restore(flags);
359 return prev;
360}
361EXPORT_SYMBOL(cmpxchg_386_u32);
362#endif
363
364#ifndef CONFIG_X86_CMPXCHG64
365unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
366{
367 u64 prev;
368 unsigned long flags;
369
370 /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
371 local_irq_save(flags);
372 prev = *(u64 *)ptr;
373 if (prev == old)
374 *(u64 *)ptr = new;
375 local_irq_restore(flags);
376 return prev;
377}
378EXPORT_SYMBOL(cmpxchg_486_u64);
379#endif
380
381/* arch_initcall(intel_cpu_init); */ 310/* arch_initcall(intel_cpu_init); */
382 311
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 6b0a10b002f1..3f46afbb1cf1 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * Routines to indentify caches on Intel CPU. 2 * Routines to indentify caches on Intel CPU.
3 * 3 *
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. 7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */ 8 */
@@ -13,6 +13,7 @@
13#include <linux/compiler.h> 13#include <linux/compiler.h>
14#include <linux/cpu.h> 14#include <linux/cpu.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/pci.h>
16 17
17#include <asm/processor.h> 18#include <asm/processor.h>
18#include <asm/smp.h> 19#include <asm/smp.h>
@@ -130,9 +131,18 @@ struct _cpuid4_info {
130 union _cpuid4_leaf_ebx ebx; 131 union _cpuid4_leaf_ebx ebx;
131 union _cpuid4_leaf_ecx ecx; 132 union _cpuid4_leaf_ecx ecx;
132 unsigned long size; 133 unsigned long size;
134 unsigned long can_disable;
133 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ 135 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
134}; 136};
135 137
138#ifdef CONFIG_PCI
139static struct pci_device_id k8_nb_id[] = {
140 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
141 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
142 {}
143};
144#endif
145
136unsigned short num_cache_leaves; 146unsigned short num_cache_leaves;
137 147
138/* AMD doesn't have CPUID4. Emulate it here to report the same 148/* AMD doesn't have CPUID4. Emulate it here to report the same
@@ -182,9 +192,10 @@ static unsigned short assocs[] __cpuinitdata = {
182static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; 192static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
183static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; 193static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
184 194
185static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 195static void __cpuinit
186 union _cpuid4_leaf_ebx *ebx, 196amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
187 union _cpuid4_leaf_ecx *ecx) 197 union _cpuid4_leaf_ebx *ebx,
198 union _cpuid4_leaf_ecx *ecx)
188{ 199{
189 unsigned dummy; 200 unsigned dummy;
190 unsigned line_size, lines_per_tag, assoc, size_in_kb; 201 unsigned line_size, lines_per_tag, assoc, size_in_kb;
@@ -251,27 +262,40 @@ static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
251 (ebx->split.ways_of_associativity + 1) - 1; 262 (ebx->split.ways_of_associativity + 1) - 1;
252} 263}
253 264
254static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 265static void __cpuinit
266amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
267{
268 if (index < 3)
269 return;
270 this_leaf->can_disable = 1;
271}
272
273static int
274__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
255{ 275{
256 union _cpuid4_leaf_eax eax; 276 union _cpuid4_leaf_eax eax;
257 union _cpuid4_leaf_ebx ebx; 277 union _cpuid4_leaf_ebx ebx;
258 union _cpuid4_leaf_ecx ecx; 278 union _cpuid4_leaf_ecx ecx;
259 unsigned edx; 279 unsigned edx;
260 280
261 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 281 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
262 amd_cpuid4(index, &eax, &ebx, &ecx); 282 amd_cpuid4(index, &eax, &ebx, &ecx);
263 else 283 if (boot_cpu_data.x86 >= 0x10)
264 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); 284 amd_check_l3_disable(index, this_leaf);
285 } else {
286 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
287 }
288
265 if (eax.split.type == CACHE_TYPE_NULL) 289 if (eax.split.type == CACHE_TYPE_NULL)
266 return -EIO; /* better error ? */ 290 return -EIO; /* better error ? */
267 291
268 this_leaf->eax = eax; 292 this_leaf->eax = eax;
269 this_leaf->ebx = ebx; 293 this_leaf->ebx = ebx;
270 this_leaf->ecx = ecx; 294 this_leaf->ecx = ecx;
271 this_leaf->size = (ecx.split.number_of_sets + 1) * 295 this_leaf->size = (ecx.split.number_of_sets + 1) *
272 (ebx.split.coherency_line_size + 1) * 296 (ebx.split.coherency_line_size + 1) *
273 (ebx.split.physical_line_partition + 1) * 297 (ebx.split.physical_line_partition + 1) *
274 (ebx.split.ways_of_associativity + 1); 298 (ebx.split.ways_of_associativity + 1);
275 return 0; 299 return 0;
276} 300}
277 301
@@ -453,7 +477,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
453 477
454/* pointer to _cpuid4_info array (for each cache leaf) */ 478/* pointer to _cpuid4_info array (for each cache leaf) */
455static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); 479static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
456#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) 480#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
457 481
458#ifdef CONFIG_SMP 482#ifdef CONFIG_SMP
459static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 483static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
@@ -490,7 +514,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
490 514
491 this_leaf = CPUID4_INFO_IDX(cpu, index); 515 this_leaf = CPUID4_INFO_IDX(cpu, index);
492 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { 516 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
493 sibling_leaf = CPUID4_INFO_IDX(sibling, index); 517 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
494 cpu_clear(cpu, sibling_leaf->shared_cpu_map); 518 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
495 } 519 }
496} 520}
@@ -572,7 +596,7 @@ struct _index_kobject {
572 596
573/* pointer to array of kobjects for cpuX/cache/indexY */ 597/* pointer to array of kobjects for cpuX/cache/indexY */
574static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); 598static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
575#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) 599#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
576 600
577#define show_one_plus(file_name, object, val) \ 601#define show_one_plus(file_name, object, val) \
578static ssize_t show_##file_name \ 602static ssize_t show_##file_name \
@@ -637,6 +661,99 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
637 } 661 }
638} 662}
639 663
664#define to_object(k) container_of(k, struct _index_kobject, kobj)
665#define to_attr(a) container_of(a, struct _cache_attr, attr)
666
667#ifdef CONFIG_PCI
668static struct pci_dev *get_k8_northbridge(int node)
669{
670 struct pci_dev *dev = NULL;
671 int i;
672
673 for (i = 0; i <= node; i++) {
674 do {
675 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
676 if (!dev)
677 break;
678 } while (!pci_match_id(&k8_nb_id[0], dev));
679 if (!dev)
680 break;
681 }
682 return dev;
683}
684#else
685static struct pci_dev *get_k8_northbridge(int node)
686{
687 return NULL;
688}
689#endif
690
691static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
692{
693 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
694 struct pci_dev *dev = NULL;
695 ssize_t ret = 0;
696 int i;
697
698 if (!this_leaf->can_disable)
699 return sprintf(buf, "Feature not enabled\n");
700
701 dev = get_k8_northbridge(node);
702 if (!dev) {
703 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
704 return -EINVAL;
705 }
706
707 for (i = 0; i < 2; i++) {
708 unsigned int reg;
709
710 pci_read_config_dword(dev, 0x1BC + i * 4, &reg);
711
712 ret += sprintf(buf, "%sEntry: %d\n", buf, i);
713 ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
714 buf,
715 reg & 0x80000000 ? "Disabled" : "Allowed",
716 reg & 0x40000000 ? "Disabled" : "Allowed");
717 ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n",
718 buf, (reg & 0x30000) >> 16, reg & 0xfff);
719 }
720 return ret;
721}
722
723static ssize_t
724store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
725 size_t count)
726{
727 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
728 struct pci_dev *dev = NULL;
729 unsigned int ret, index, val;
730
731 if (!this_leaf->can_disable)
732 return 0;
733
734 if (strlen(buf) > 15)
735 return -EINVAL;
736
737 ret = sscanf(buf, "%x %x", &index, &val);
738 if (ret != 2)
739 return -EINVAL;
740 if (index > 1)
741 return -EINVAL;
742
743 val |= 0xc0000000;
744 dev = get_k8_northbridge(node);
745 if (!dev) {
746 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
747 return -EINVAL;
748 }
749
750 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
751 wbinvd();
752 pci_write_config_dword(dev, 0x1BC + index * 4, val);
753
754 return 1;
755}
756
640struct _cache_attr { 757struct _cache_attr {
641 struct attribute attr; 758 struct attribute attr;
642 ssize_t (*show)(struct _cpuid4_info *, char *); 759 ssize_t (*show)(struct _cpuid4_info *, char *);
@@ -657,6 +774,8 @@ define_one_ro(size);
657define_one_ro(shared_cpu_map); 774define_one_ro(shared_cpu_map);
658define_one_ro(shared_cpu_list); 775define_one_ro(shared_cpu_list);
659 776
777static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
778
660static struct attribute * default_attrs[] = { 779static struct attribute * default_attrs[] = {
661 &type.attr, 780 &type.attr,
662 &level.attr, 781 &level.attr,
@@ -667,12 +786,10 @@ static struct attribute * default_attrs[] = {
667 &size.attr, 786 &size.attr,
668 &shared_cpu_map.attr, 787 &shared_cpu_map.attr,
669 &shared_cpu_list.attr, 788 &shared_cpu_list.attr,
789 &cache_disable.attr,
670 NULL 790 NULL
671}; 791};
672 792
673#define to_object(k) container_of(k, struct _index_kobject, kobj)
674#define to_attr(a) container_of(a, struct _cache_attr, attr)
675
676static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) 793static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
677{ 794{
678 struct _cache_attr *fattr = to_attr(attr); 795 struct _cache_attr *fattr = to_attr(attr);
@@ -682,14 +799,22 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
682 ret = fattr->show ? 799 ret = fattr->show ?
683 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), 800 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
684 buf) : 801 buf) :
685 0; 802 0;
686 return ret; 803 return ret;
687} 804}
688 805
689static ssize_t store(struct kobject * kobj, struct attribute * attr, 806static ssize_t store(struct kobject * kobj, struct attribute * attr,
690 const char * buf, size_t count) 807 const char * buf, size_t count)
691{ 808{
692 return 0; 809 struct _cache_attr *fattr = to_attr(attr);
810 struct _index_kobject *this_leaf = to_object(kobj);
811 ssize_t ret;
812
813 ret = fattr->store ?
814 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
815 buf, count) :
816 0;
817 return ret;
693} 818}
694 819
695static struct sysfs_ops sysfs_ops = { 820static struct sysfs_ops sysfs_ops = {
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index 39e063a9a28a..923be1f4a6d0 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -57,7 +57,7 @@ atomic_t irq_mis_count;
57static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 57static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
58 58
59static DEFINE_SPINLOCK(ioapic_lock); 59static DEFINE_SPINLOCK(ioapic_lock);
60static DEFINE_SPINLOCK(vector_lock); 60DEFINE_SPINLOCK(vector_lock);
61 61
62int timer_through_8259 __initdata; 62int timer_through_8259 __initdata;
63 63
@@ -1209,10 +1209,6 @@ static int assign_irq_vector(int irq)
1209 return vector; 1209 return vector;
1210} 1210}
1211 1211
1212void setup_vector_irq(int cpu)
1213{
1214}
1215
1216static struct irq_chip ioapic_chip; 1212static struct irq_chip ioapic_chip;
1217 1213
1218#define IOAPIC_AUTO -1 1214#define IOAPIC_AUTO -1
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index b9950dae59b7..e63282e78864 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -103,7 +103,7 @@ int timer_through_8259 __initdata;
103static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 103static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
104 104
105static DEFINE_SPINLOCK(ioapic_lock); 105static DEFINE_SPINLOCK(ioapic_lock);
106DEFINE_SPINLOCK(vector_lock); 106static DEFINE_SPINLOCK(vector_lock);
107 107
108/* 108/*
109 * # of IRQ routing registers 109 * # of IRQ routing registers
@@ -770,6 +770,19 @@ static int pin_2_irq(int idx, int apic, int pin)
770 return irq; 770 return irq;
771} 771}
772 772
773void lock_vector_lock(void)
774{
775 /* Used to the online set of cpus does not change
776 * during assign_irq_vector.
777 */
778 spin_lock(&vector_lock);
779}
780
781void unlock_vector_lock(void)
782{
783 spin_unlock(&vector_lock);
784}
785
773static int __assign_irq_vector(int irq, cpumask_t mask) 786static int __assign_irq_vector(int irq, cpumask_t mask)
774{ 787{
775 /* 788 /*
@@ -875,7 +888,7 @@ static void __clear_irq_vector(int irq)
875 cpus_clear(cfg->domain); 888 cpus_clear(cfg->domain);
876} 889}
877 890
878static void __setup_vector_irq(int cpu) 891void __setup_vector_irq(int cpu)
879{ 892{
880 /* Initialize vector_irq on a new cpu */ 893 /* Initialize vector_irq on a new cpu */
881 /* This function must be called with vector_lock held */ 894 /* This function must be called with vector_lock held */
@@ -898,14 +911,6 @@ static void __setup_vector_irq(int cpu)
898 } 911 }
899} 912}
900 913
901void setup_vector_irq(int cpu)
902{
903 spin_lock(&vector_lock);
904 __setup_vector_irq(smp_processor_id());
905 spin_unlock(&vector_lock);
906}
907
908
909static struct irq_chip ioapic_chip; 914static struct irq_chip ioapic_chip;
910#ifdef CONFIG_INTR_REMAP 915#ifdef CONFIG_INTR_REMAP
911static struct irq_chip ir_ioapic_chip; 916static struct irq_chip ir_ioapic_chip;
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index e362c6ab4d35..e5d23675bb7c 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -83,7 +83,7 @@ static void __init MP_bus_info(struct mpc_config_bus *m)
83 if (x86_quirks->mpc_oem_bus_info) 83 if (x86_quirks->mpc_oem_bus_info)
84 x86_quirks->mpc_oem_bus_info(m, str); 84 x86_quirks->mpc_oem_bus_info(m, str);
85 else 85 else
86 printk(KERN_INFO "Bus #%d is %s\n", m->mpc_busid, str); 86 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->mpc_busid, str);
87 87
88#if MAX_MP_BUSSES < 256 88#if MAX_MP_BUSSES < 256
89 if (m->mpc_busid >= MAX_MP_BUSSES) { 89 if (m->mpc_busid >= MAX_MP_BUSSES) {
@@ -154,7 +154,7 @@ static void __init MP_ioapic_info(struct mpc_config_ioapic *m)
154 154
155static void print_MP_intsrc_info(struct mpc_config_intsrc *m) 155static void print_MP_intsrc_info(struct mpc_config_intsrc *m)
156{ 156{
157 printk(KERN_CONT "Int: type %d, pol %d, trig %d, bus %02x," 157 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
158 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 158 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
159 m->mpc_irqtype, m->mpc_irqflag & 3, 159 m->mpc_irqtype, m->mpc_irqflag & 3,
160 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, 160 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
@@ -163,7 +163,7 @@ static void print_MP_intsrc_info(struct mpc_config_intsrc *m)
163 163
164static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq) 164static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
165{ 165{
166 printk(KERN_CONT "Int: type %d, pol %d, trig %d, bus %02x," 166 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
167 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 167 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
168 mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3, 168 mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3,
169 (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus, 169 (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus,
@@ -235,7 +235,7 @@ static void __init MP_intsrc_info(struct mpc_config_intsrc *m)
235 235
236static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) 236static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m)
237{ 237{
238 printk(KERN_INFO "Lint: type %d, pol %d, trig %d, bus %02x," 238 apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x,"
239 " IRQ %02x, APIC ID %x, APIC LINT %02x\n", 239 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
240 m->mpc_irqtype, m->mpc_irqflag & 3, 240 m->mpc_irqtype, m->mpc_irqflag & 3,
241 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid, 241 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid,
@@ -697,7 +697,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
697 unsigned int *bp = phys_to_virt(base); 697 unsigned int *bp = phys_to_virt(base);
698 struct intel_mp_floating *mpf; 698 struct intel_mp_floating *mpf;
699 699
700 printk(KERN_DEBUG "Scan SMP from %p for %ld bytes.\n", bp, length); 700 apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
701 bp, length);
701 BUILD_BUG_ON(sizeof(*mpf) != 16); 702 BUILD_BUG_ON(sizeof(*mpf) != 16);
702 703
703 while (length > 0) { 704 while (length > 0) {
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index b67a4b1d4eae..02d19328525d 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -1350,7 +1350,7 @@ static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl)
1350 * Function for kdump case. Get the tce tables from first kernel 1350 * Function for kdump case. Get the tce tables from first kernel
1351 * by reading the contents of the base adress register of calgary iommu 1351 * by reading the contents of the base adress register of calgary iommu
1352 */ 1352 */
1353static void get_tce_space_from_tar() 1353static void get_tce_space_from_tar(void)
1354{ 1354{
1355 int bus; 1355 int bus;
1356 void __iomem *target; 1356 void __iomem *target;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 60e8de19644b..59f07e14d083 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -604,6 +604,14 @@ void __init setup_arch(char **cmdline_p)
604 early_cpu_init(); 604 early_cpu_init();
605 early_ioremap_init(); 605 early_ioremap_init();
606 606
607#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
608 /*
609 * Must be before kernel pagetables are setup
610 * or fixmap area is touched.
611 */
612 vmi_init();
613#endif
614
607 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); 615 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
608 screen_info = boot_params.screen_info; 616 screen_info = boot_params.screen_info;
609 edid_info = boot_params.edid_info; 617 edid_info = boot_params.edid_info;
@@ -819,14 +827,6 @@ void __init setup_arch(char **cmdline_p)
819 kvmclock_init(); 827 kvmclock_init();
820#endif 828#endif
821 829
822#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
823 /*
824 * Must be after max_low_pfn is determined, and before kernel
825 * pagetables are setup.
826 */
827 vmi_init();
828#endif
829
830 paravirt_pagetable_setup_start(swapper_pg_dir); 830 paravirt_pagetable_setup_start(swapper_pg_dir);
831 paging_init(); 831 paging_init();
832 paravirt_pagetable_setup_done(swapper_pg_dir); 832 paravirt_pagetable_setup_done(swapper_pg_dir);
@@ -863,12 +863,6 @@ void __init setup_arch(char **cmdline_p)
863 init_apic_mappings(); 863 init_apic_mappings();
864 ioapic_init_mappings(); 864 ioapic_init_mappings();
865 865
866#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
867 if (def_to_bigsmp)
868 printk(KERN_WARNING "More than 8 CPUs detected and "
869 "CONFIG_X86_PC cannot handle it.\nUse "
870 "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
871#endif
872 kvm_guest_init(); 866 kvm_guest_init();
873 867
874 e820_reserve_resources(); 868 e820_reserve_resources();
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 626618bf2f81..04f78ab51b45 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -327,12 +327,16 @@ static void __cpuinit start_secondary(void *unused)
327 * for which cpus receive the IPI. Holding this 327 * for which cpus receive the IPI. Holding this
328 * lock helps us to not include this cpu in a currently in progress 328 * lock helps us to not include this cpu in a currently in progress
329 * smp_call_function(). 329 * smp_call_function().
330 *
331 * We need to hold vector_lock so there the set of online cpus
332 * does not change while we are assigning vectors to cpus. Holding
333 * this lock ensures we don't half assign or remove an irq from a cpu.
330 */ 334 */
331 ipi_call_lock_irq(); 335 ipi_call_lock_irq();
332#ifdef CONFIG_X86_IO_APIC 336 lock_vector_lock();
333 setup_vector_irq(smp_processor_id()); 337 __setup_vector_irq(smp_processor_id());
334#endif
335 cpu_set(smp_processor_id(), cpu_online_map); 338 cpu_set(smp_processor_id(), cpu_online_map);
339 unlock_vector_lock();
336 ipi_call_unlock_irq(); 340 ipi_call_unlock_irq();
337 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 341 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
338 342
@@ -983,7 +987,17 @@ int __cpuinit native_cpu_up(unsigned int cpu)
983 flush_tlb_all(); 987 flush_tlb_all();
984 low_mappings = 1; 988 low_mappings = 1;
985 989
990#ifdef CONFIG_X86_PC
991 if (def_to_bigsmp && apicid > 8) {
992 printk(KERN_WARNING
993 "More than 8 CPUs detected - skipping them.\n"
994 "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
995 err = -1;
996 } else
997 err = do_boot_cpu(apicid, cpu);
998#else
986 err = do_boot_cpu(apicid, cpu); 999 err = do_boot_cpu(apicid, cpu);
1000#endif
987 1001
988 zap_low_mappings(); 1002 zap_low_mappings();
989 low_mappings = 0; 1003 low_mappings = 0;
@@ -1336,7 +1350,9 @@ int __cpu_disable(void)
1336 remove_siblinginfo(cpu); 1350 remove_siblinginfo(cpu);
1337 1351
1338 /* It's now safe to remove this processor from the online map */ 1352 /* It's now safe to remove this processor from the online map */
1353 lock_vector_lock();
1339 remove_cpu_from_maps(cpu); 1354 remove_cpu_from_maps(cpu);
1355 unlock_vector_lock();
1340 fixup_irqs(cpu_online_map); 1356 fixup_irqs(cpu_online_map);
1341 return 0; 1357 return 0;
1342} 1358}
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 45c27c4e2a6e..61531d5c9507 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -37,6 +37,7 @@
37#include <asm/timer.h> 37#include <asm/timer.h>
38#include <asm/vmi_time.h> 38#include <asm/vmi_time.h>
39#include <asm/kmap_types.h> 39#include <asm/kmap_types.h>
40#include <asm/setup.h>
40 41
41/* Convenient for calling VMI functions indirectly in the ROM */ 42/* Convenient for calling VMI functions indirectly in the ROM */
42typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); 43typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
@@ -683,7 +684,7 @@ void vmi_bringup(void)
683{ 684{
684 /* We must establish the lowmem mapping for MMU ops to work */ 685 /* We must establish the lowmem mapping for MMU ops to work */
685 if (vmi_ops.set_linear_mapping) 686 if (vmi_ops.set_linear_mapping)
686 vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0); 687 vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0);
687} 688}
688 689
689/* 690/*
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 24e60944971a..9e68075544f6 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -14,6 +14,13 @@
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15#include <asm/mmx.h> 15#include <asm/mmx.h>
16 16
17#ifdef CONFIG_X86_INTEL_USERCOPY
18/*
19 * Alignment at which movsl is preferred for bulk memory copies.
20 */
21struct movsl_mask movsl_mask __read_mostly;
22#endif
23
17static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n) 24static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
18{ 25{
19#ifdef CONFIG_X86_INTEL_USERCOPY 26#ifdef CONFIG_X86_INTEL_USERCOPY
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 2977ea37791f..dfb932dcf136 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,7 +1,6 @@
1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
2 pat.o pgtable.o 2 pat.o pgtable.o gup.o
3 3
4obj-$(CONFIG_HAVE_GET_USER_PAGES_FAST) += gup.o
5obj-$(CONFIG_X86_32) += pgtable_32.o 4obj-$(CONFIG_X86_32) += pgtable_32.o
6 5
7obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 6obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 557b2abceef8..d50302774fe2 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -207,6 +207,9 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
207 unsigned long addr; 207 unsigned long addr;
208 int i; 208 int i;
209 209
210 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
211 return;
212
210 pud = pud_offset(pgd, 0); 213 pud = pud_offset(pgd, 0);
211 214
212 for (addr = i = 0; i < PREALLOCATED_PMDS; 215 for (addr = i = 0; i < PREALLOCATED_PMDS;