aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c23
-rw-r--r--arch/x86/kernel/cpu/bugs.c5
-rw-r--r--arch/x86/kernel/cpu/common.c179
-rw-r--r--arch/x86/kernel/cpu/cpu.h3
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c25
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c12
-rw-r--r--arch/x86/kernel/cpu/cyrix.c6
-rw-r--r--arch/x86/kernel/cpu/intel.c39
-rw-r--r--arch/x86/kernel/cpu/mcheck/k7.c25
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.h2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_32.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c45
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c21
-rw-r--r--arch/x86/kernel/cpu/mcheck/p4.c35
-rw-r--r--arch/x86/kernel/cpu/mcheck/p5.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/p6.c23
-rw-r--r--arch/x86/kernel/cpu/mcheck/winchip.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/amd.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/cyrix.c3
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c27
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c23
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c147
-rw-r--r--arch/x86/kernel/cpu/mtrr/mtrr.h9
-rw-r--r--arch/x86/kernel/cpu/mtrr/state.c3
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c1
-rw-r--r--arch/x86/kernel/cpu/proc.c2
28 files changed, 401 insertions, 271 deletions
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 3e91d3ee26ec..238468ae1993 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -45,6 +45,6 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
45 &regs[CR_ECX], &regs[CR_EDX]); 45 &regs[CR_ECX], &regs[CR_EDX]);
46 46
47 if (regs[cb->reg] & (1 << cb->bit)) 47 if (regs[cb->reg] & (1 << cb->bit))
48 set_bit(cb->feature, c->x86_capability); 48 set_cpu_cap(c, cb->feature);
49 } 49 }
50} 50}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 1ff88c7f45cf..06fa159232fd 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -63,6 +63,15 @@ static __cpuinit int amd_apic_timer_broken(void)
63 63
64int force_mwait __cpuinitdata; 64int force_mwait __cpuinitdata;
65 65
66void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
67{
68 if (cpuid_eax(0x80000000) >= 0x80000007) {
69 c->x86_power = cpuid_edx(0x80000007);
70 if (c->x86_power & (1<<8))
71 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
72 }
73}
74
66static void __cpuinit init_amd(struct cpuinfo_x86 *c) 75static void __cpuinit init_amd(struct cpuinfo_x86 *c)
67{ 76{
68 u32 l, h; 77 u32 l, h;
@@ -85,6 +94,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
85 } 94 }
86#endif 95#endif
87 96
97 early_init_amd(c);
98
88 /* 99 /*
89 * FIXME: We should handle the K5 here. Set up the write 100 * FIXME: We should handle the K5 here. Set up the write
90 * range and also turn on MSR 83 bits 4 and 31 (write alloc, 101 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
@@ -257,12 +268,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
257 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 268 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
258 } 269 }
259 270
260 if (cpuid_eax(0x80000000) >= 0x80000007) {
261 c->x86_power = cpuid_edx(0x80000007);
262 if (c->x86_power & (1<<8))
263 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
264 }
265
266#ifdef CONFIG_X86_HT 271#ifdef CONFIG_X86_HT
267 /* 272 /*
268 * On a AMD multi core setup the lower bits of the APIC id 273 * On a AMD multi core setup the lower bits of the APIC id
@@ -295,12 +300,12 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
295 local_apic_timer_disabled = 1; 300 local_apic_timer_disabled = 1;
296#endif 301#endif
297 302
298 if (c->x86 == 0x10 && !force_mwait)
299 clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
300
301 /* K6s reports MCEs but don't actually have all the MSRs */ 303 /* K6s reports MCEs but don't actually have all the MSRs */
302 if (c->x86 < 6) 304 if (c->x86 < 6)
303 clear_bit(X86_FEATURE_MCE, c->x86_capability); 305 clear_bit(X86_FEATURE_MCE, c->x86_capability);
306
307 if (cpu_has_xmm)
308 set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
304} 309}
305 310
306static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) 311static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 205fd5ba57f7..9b95edcfc6ae 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -11,6 +11,7 @@
11#include <linux/utsname.h> 11#include <linux/utsname.h>
12#include <asm/bugs.h> 12#include <asm/bugs.h>
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/processor-flags.h>
14#include <asm/i387.h> 15#include <asm/i387.h>
15#include <asm/msr.h> 16#include <asm/msr.h>
16#include <asm/paravirt.h> 17#include <asm/paravirt.h>
@@ -35,7 +36,7 @@ __setup("mca-pentium", mca_pentium);
35static int __init no_387(char *s) 36static int __init no_387(char *s)
36{ 37{
37 boot_cpu_data.hard_math = 0; 38 boot_cpu_data.hard_math = 0;
38 write_cr0(0xE | read_cr0()); 39 write_cr0(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP | read_cr0());
39 return 1; 40 return 1;
40} 41}
41 42
@@ -153,7 +154,7 @@ static void __init check_config(void)
153 * If we configured ourselves for a TSC, we'd better have one! 154 * If we configured ourselves for a TSC, we'd better have one!
154 */ 155 */
155#ifdef CONFIG_X86_TSC 156#ifdef CONFIG_X86_TSC
156 if (!cpu_has_tsc && !tsc_disable) 157 if (!cpu_has_tsc)
157 panic("Kernel compiled for Pentium+, requires TSC feature!"); 158 panic("Kernel compiled for Pentium+, requires TSC feature!");
158#endif 159#endif
159 160
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e2fcf2051bdb..db28aa9e2f69 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -22,43 +22,48 @@
22#include "cpu.h" 22#include "cpu.h"
23 23
24DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { 24DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
25 [GDT_ENTRY_KERNEL_CS] = { 0x0000ffff, 0x00cf9a00 }, 25 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
26 [GDT_ENTRY_KERNEL_DS] = { 0x0000ffff, 0x00cf9200 }, 26 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
27 [GDT_ENTRY_DEFAULT_USER_CS] = { 0x0000ffff, 0x00cffa00 }, 27 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
28 [GDT_ENTRY_DEFAULT_USER_DS] = { 0x0000ffff, 0x00cff200 }, 28 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
29 /* 29 /*
30 * Segments used for calling PnP BIOS have byte granularity. 30 * Segments used for calling PnP BIOS have byte granularity.
31 * They code segments and data segments have fixed 64k limits, 31 * They code segments and data segments have fixed 64k limits,
32 * the transfer segment sizes are set at run time. 32 * the transfer segment sizes are set at run time.
33 */ 33 */
34 [GDT_ENTRY_PNPBIOS_CS32] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */ 34 /* 32-bit code */
35 [GDT_ENTRY_PNPBIOS_CS16] = { 0x0000ffff, 0x00009a00 },/* 16-bit code */ 35 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
36 [GDT_ENTRY_PNPBIOS_DS] = { 0x0000ffff, 0x00009200 }, /* 16-bit data */ 36 /* 16-bit code */
37 [GDT_ENTRY_PNPBIOS_TS1] = { 0x00000000, 0x00009200 },/* 16-bit data */ 37 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
38 [GDT_ENTRY_PNPBIOS_TS2] = { 0x00000000, 0x00009200 },/* 16-bit data */ 38 /* 16-bit data */
39 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
40 /* 16-bit data */
41 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
42 /* 16-bit data */
43 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
39 /* 44 /*
40 * The APM segments have byte granularity and their bases 45 * The APM segments have byte granularity and their bases
41 * are set at run time. All have 64k limits. 46 * are set at run time. All have 64k limits.
42 */ 47 */
43 [GDT_ENTRY_APMBIOS_BASE] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */ 48 /* 32-bit code */
49 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
44 /* 16-bit code */ 50 /* 16-bit code */
45 [GDT_ENTRY_APMBIOS_BASE+1] = { 0x0000ffff, 0x00009a00 }, 51 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
46 [GDT_ENTRY_APMBIOS_BASE+2] = { 0x0000ffff, 0x00409200 }, /* data */ 52 /* data */
53 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
47 54
48 [GDT_ENTRY_ESPFIX_SS] = { 0x00000000, 0x00c09200 }, 55 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
49 [GDT_ENTRY_PERCPU] = { 0x00000000, 0x00000000 }, 56 [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
50} }; 57} };
51EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 58EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
52 59
60__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
61
53static int cachesize_override __cpuinitdata = -1; 62static int cachesize_override __cpuinitdata = -1;
54static int disable_x86_fxsr __cpuinitdata;
55static int disable_x86_serial_nr __cpuinitdata = 1; 63static int disable_x86_serial_nr __cpuinitdata = 1;
56static int disable_x86_sep __cpuinitdata;
57 64
58struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; 65struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
59 66
60extern int disable_pse;
61
62static void __cpuinit default_init(struct cpuinfo_x86 * c) 67static void __cpuinit default_init(struct cpuinfo_x86 * c)
63{ 68{
64 /* Not much we can do here... */ 69 /* Not much we can do here... */
@@ -207,16 +212,8 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
207 212
208static int __init x86_fxsr_setup(char * s) 213static int __init x86_fxsr_setup(char * s)
209{ 214{
210 /* Tell all the other CPUs to not use it... */ 215 setup_clear_cpu_cap(X86_FEATURE_FXSR);
211 disable_x86_fxsr = 1; 216 setup_clear_cpu_cap(X86_FEATURE_XMM);
212
213 /*
214 * ... and clear the bits early in the boot_cpu_data
215 * so that the bootup process doesn't try to do this
216 * either.
217 */
218 clear_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability);
219 clear_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability);
220 return 1; 217 return 1;
221} 218}
222__setup("nofxsr", x86_fxsr_setup); 219__setup("nofxsr", x86_fxsr_setup);
@@ -224,7 +221,7 @@ __setup("nofxsr", x86_fxsr_setup);
224 221
225static int __init x86_sep_setup(char * s) 222static int __init x86_sep_setup(char * s)
226{ 223{
227 disable_x86_sep = 1; 224 setup_clear_cpu_cap(X86_FEATURE_SEP);
228 return 1; 225 return 1;
229} 226}
230__setup("nosep", x86_sep_setup); 227__setup("nosep", x86_sep_setup);
@@ -281,6 +278,33 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
281 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; 278 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
282 } 279 }
283} 280}
281static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
282{
283 u32 tfms, xlvl;
284 int ebx;
285
286 memset(&c->x86_capability, 0, sizeof c->x86_capability);
287 if (have_cpuid_p()) {
288 /* Intel-defined flags: level 0x00000001 */
289 if (c->cpuid_level >= 0x00000001) {
290 u32 capability, excap;
291 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
292 c->x86_capability[0] = capability;
293 c->x86_capability[4] = excap;
294 }
295
296 /* AMD-defined flags: level 0x80000001 */
297 xlvl = cpuid_eax(0x80000000);
298 if ((xlvl & 0xffff0000) == 0x80000000) {
299 if (xlvl >= 0x80000001) {
300 c->x86_capability[1] = cpuid_edx(0x80000001);
301 c->x86_capability[6] = cpuid_ecx(0x80000001);
302 }
303 }
304
305 }
306
307}
284 308
285/* Do minimum CPU detection early. 309/* Do minimum CPU detection early.
286 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. 310 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
@@ -300,6 +324,17 @@ static void __init early_cpu_detect(void)
300 cpu_detect(c); 324 cpu_detect(c);
301 325
302 get_cpu_vendor(c, 1); 326 get_cpu_vendor(c, 1);
327
328 switch (c->x86_vendor) {
329 case X86_VENDOR_AMD:
330 early_init_amd(c);
331 break;
332 case X86_VENDOR_INTEL:
333 early_init_intel(c);
334 break;
335 }
336
337 early_get_cap(c);
303} 338}
304 339
305static void __cpuinit generic_identify(struct cpuinfo_x86 * c) 340static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
@@ -357,8 +392,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
357 init_scattered_cpuid_features(c); 392 init_scattered_cpuid_features(c);
358 } 393 }
359 394
360 early_intel_workaround(c);
361
362#ifdef CONFIG_X86_HT 395#ifdef CONFIG_X86_HT
363 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; 396 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
364#endif 397#endif
@@ -392,7 +425,7 @@ __setup("serialnumber", x86_serial_nr_setup);
392/* 425/*
393 * This does the hard work of actually picking apart the CPU stuff... 426 * This does the hard work of actually picking apart the CPU stuff...
394 */ 427 */
395static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) 428void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
396{ 429{
397 int i; 430 int i;
398 431
@@ -418,20 +451,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
418 451
419 generic_identify(c); 452 generic_identify(c);
420 453
421 printk(KERN_DEBUG "CPU: After generic identify, caps:"); 454 if (this_cpu->c_identify)
422 for (i = 0; i < NCAPINTS; i++)
423 printk(" %08lx", c->x86_capability[i]);
424 printk("\n");
425
426 if (this_cpu->c_identify) {
427 this_cpu->c_identify(c); 455 this_cpu->c_identify(c);
428 456
429 printk(KERN_DEBUG "CPU: After vendor identify, caps:");
430 for (i = 0; i < NCAPINTS; i++)
431 printk(" %08lx", c->x86_capability[i]);
432 printk("\n");
433 }
434
435 /* 457 /*
436 * Vendor-specific initialization. In this section we 458 * Vendor-specific initialization. In this section we
437 * canonicalize the feature flags, meaning if there are 459 * canonicalize the feature flags, meaning if there are
@@ -453,23 +475,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
453 * we do "generic changes." 475 * we do "generic changes."
454 */ 476 */
455 477
456 /* TSC disabled? */
457 if ( tsc_disable )
458 clear_bit(X86_FEATURE_TSC, c->x86_capability);
459
460 /* FXSR disabled? */
461 if (disable_x86_fxsr) {
462 clear_bit(X86_FEATURE_FXSR, c->x86_capability);
463 clear_bit(X86_FEATURE_XMM, c->x86_capability);
464 }
465
466 /* SEP disabled? */
467 if (disable_x86_sep)
468 clear_bit(X86_FEATURE_SEP, c->x86_capability);
469
470 if (disable_pse)
471 clear_bit(X86_FEATURE_PSE, c->x86_capability);
472
473 /* If the model name is still unset, do table lookup. */ 478 /* If the model name is still unset, do table lookup. */
474 if ( !c->x86_model_id[0] ) { 479 if ( !c->x86_model_id[0] ) {
475 char *p; 480 char *p;
@@ -482,13 +487,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
482 c->x86, c->x86_model); 487 c->x86, c->x86_model);
483 } 488 }
484 489
485 /* Now the feature flags better reflect actual CPU features! */
486
487 printk(KERN_DEBUG "CPU: After all inits, caps:");
488 for (i = 0; i < NCAPINTS; i++)
489 printk(" %08lx", c->x86_capability[i]);
490 printk("\n");
491
492 /* 490 /*
493 * On SMP, boot_cpu_data holds the common feature set between 491 * On SMP, boot_cpu_data holds the common feature set between
494 * all CPUs; so make sure that we indicate which features are 492 * all CPUs; so make sure that we indicate which features are
@@ -501,8 +499,14 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
501 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 499 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
502 } 500 }
503 501
502 /* Clear all flags overriden by options */
503 for (i = 0; i < NCAPINTS; i++)
504 c->x86_capability[i] ^= cleared_cpu_caps[i];
505
504 /* Init Machine Check Exception if available. */ 506 /* Init Machine Check Exception if available. */
505 mcheck_init(c); 507 mcheck_init(c);
508
509 select_idle_routine(c);
506} 510}
507 511
508void __init identify_boot_cpu(void) 512void __init identify_boot_cpu(void)
@@ -510,7 +514,6 @@ void __init identify_boot_cpu(void)
510 identify_cpu(&boot_cpu_data); 514 identify_cpu(&boot_cpu_data);
511 sysenter_setup(); 515 sysenter_setup();
512 enable_sep_cpu(); 516 enable_sep_cpu();
513 mtrr_bp_init();
514} 517}
515 518
516void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) 519void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
@@ -567,6 +570,13 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
567} 570}
568#endif 571#endif
569 572
573static __init int setup_noclflush(char *arg)
574{
575 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
576 return 1;
577}
578__setup("noclflush", setup_noclflush);
579
570void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 580void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
571{ 581{
572 char *vendor = NULL; 582 char *vendor = NULL;
@@ -590,6 +600,17 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
590 printk("\n"); 600 printk("\n");
591} 601}
592 602
603static __init int setup_disablecpuid(char *arg)
604{
605 int bit;
606 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
607 setup_clear_cpu_cap(bit);
608 else
609 return 0;
610 return 1;
611}
612__setup("clearcpuid=", setup_disablecpuid);
613
593cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; 614cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
594 615
595/* This is hacky. :) 616/* This is hacky. :)
@@ -620,21 +641,13 @@ void __init early_cpu_init(void)
620 nexgen_init_cpu(); 641 nexgen_init_cpu();
621 umc_init_cpu(); 642 umc_init_cpu();
622 early_cpu_detect(); 643 early_cpu_detect();
623
624#ifdef CONFIG_DEBUG_PAGEALLOC
625 /* pse is not compatible with on-the-fly unmapping,
626 * disable it even if the cpus claim to support it.
627 */
628 clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
629 disable_pse = 1;
630#endif
631} 644}
632 645
633/* Make sure %fs is initialized properly in idle threads */ 646/* Make sure %fs is initialized properly in idle threads */
634struct pt_regs * __devinit idle_regs(struct pt_regs *regs) 647struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
635{ 648{
636 memset(regs, 0, sizeof(struct pt_regs)); 649 memset(regs, 0, sizeof(struct pt_regs));
637 regs->xfs = __KERNEL_PERCPU; 650 regs->fs = __KERNEL_PERCPU;
638 return regs; 651 return regs;
639} 652}
640 653
@@ -642,7 +655,7 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
642 * it's on the real one. */ 655 * it's on the real one. */
643void switch_to_new_gdt(void) 656void switch_to_new_gdt(void)
644{ 657{
645 struct Xgt_desc_struct gdt_descr; 658 struct desc_ptr gdt_descr;
646 659
647 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); 660 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
648 gdt_descr.size = GDT_SIZE - 1; 661 gdt_descr.size = GDT_SIZE - 1;
@@ -672,12 +685,6 @@ void __cpuinit cpu_init(void)
672 685
673 if (cpu_has_vme || cpu_has_tsc || cpu_has_de) 686 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
674 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 687 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
675 if (tsc_disable && cpu_has_tsc) {
676 printk(KERN_NOTICE "Disabling TSC...\n");
677 /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
678 clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
679 set_in_cr4(X86_CR4_TSD);
680 }
681 688
682 load_idt(&idt_descr); 689 load_idt(&idt_descr);
683 switch_to_new_gdt(); 690 switch_to_new_gdt();
@@ -691,7 +698,7 @@ void __cpuinit cpu_init(void)
691 BUG(); 698 BUG();
692 enter_lazy_tlb(&init_mm, curr); 699 enter_lazy_tlb(&init_mm, curr);
693 700
694 load_esp0(t, thread); 701 load_sp0(t, thread);
695 set_tss_desc(cpu,t); 702 set_tss_desc(cpu,t);
696 load_TR_desc(); 703 load_TR_desc();
697 load_LDT(&init_mm.context); 704 load_LDT(&init_mm.context);
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 2f6432cef6ff..ad6527a5beb1 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -24,5 +24,6 @@ extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
24extern int get_model_name(struct cpuinfo_x86 *c); 24extern int get_model_name(struct cpuinfo_x86 *c);
25extern void display_cacheinfo(struct cpuinfo_x86 *c); 25extern void display_cacheinfo(struct cpuinfo_x86 *c);
26 26
27extern void early_intel_workaround(struct cpuinfo_x86 *c); 27extern void early_init_intel(struct cpuinfo_x86 *c);
28extern void early_init_amd(struct cpuinfo_x86 *c);
28 29
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index fea0af0476b9..a962dcb9c408 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -67,7 +67,8 @@ struct acpi_cpufreq_data {
67 unsigned int cpu_feature; 67 unsigned int cpu_feature;
68}; 68};
69 69
70static struct acpi_cpufreq_data *drv_data[NR_CPUS]; 70static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
71
71/* acpi_perf_data is a pointer to percpu data. */ 72/* acpi_perf_data is a pointer to percpu data. */
72static struct acpi_processor_performance *acpi_perf_data; 73static struct acpi_processor_performance *acpi_perf_data;
73 74
@@ -218,14 +219,14 @@ static u32 get_cur_val(cpumask_t mask)
218 if (unlikely(cpus_empty(mask))) 219 if (unlikely(cpus_empty(mask)))
219 return 0; 220 return 0;
220 221
221 switch (drv_data[first_cpu(mask)]->cpu_feature) { 222 switch (per_cpu(drv_data, first_cpu(mask))->cpu_feature) {
222 case SYSTEM_INTEL_MSR_CAPABLE: 223 case SYSTEM_INTEL_MSR_CAPABLE:
223 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 224 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
224 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 225 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
225 break; 226 break;
226 case SYSTEM_IO_CAPABLE: 227 case SYSTEM_IO_CAPABLE:
227 cmd.type = SYSTEM_IO_CAPABLE; 228 cmd.type = SYSTEM_IO_CAPABLE;
228 perf = drv_data[first_cpu(mask)]->acpi_data; 229 perf = per_cpu(drv_data, first_cpu(mask))->acpi_data;
229 cmd.addr.io.port = perf->control_register.address; 230 cmd.addr.io.port = perf->control_register.address;
230 cmd.addr.io.bit_width = perf->control_register.bit_width; 231 cmd.addr.io.bit_width = perf->control_register.bit_width;
231 break; 232 break;
@@ -325,7 +326,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
325 326
326#endif 327#endif
327 328
328 retval = drv_data[cpu]->max_freq * perf_percent / 100; 329 retval = per_cpu(drv_data, cpu)->max_freq * perf_percent / 100;
329 330
330 put_cpu(); 331 put_cpu();
331 set_cpus_allowed(current, saved_mask); 332 set_cpus_allowed(current, saved_mask);
@@ -336,7 +337,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
336 337
337static unsigned int get_cur_freq_on_cpu(unsigned int cpu) 338static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
338{ 339{
339 struct acpi_cpufreq_data *data = drv_data[cpu]; 340 struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
340 unsigned int freq; 341 unsigned int freq;
341 342
342 dprintk("get_cur_freq_on_cpu (%d)\n", cpu); 343 dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
@@ -370,7 +371,7 @@ static unsigned int check_freqs(cpumask_t mask, unsigned int freq,
370static int acpi_cpufreq_target(struct cpufreq_policy *policy, 371static int acpi_cpufreq_target(struct cpufreq_policy *policy,
371 unsigned int target_freq, unsigned int relation) 372 unsigned int target_freq, unsigned int relation)
372{ 373{
373 struct acpi_cpufreq_data *data = drv_data[policy->cpu]; 374 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
374 struct acpi_processor_performance *perf; 375 struct acpi_processor_performance *perf;
375 struct cpufreq_freqs freqs; 376 struct cpufreq_freqs freqs;
376 cpumask_t online_policy_cpus; 377 cpumask_t online_policy_cpus;
@@ -466,7 +467,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
466 467
467static int acpi_cpufreq_verify(struct cpufreq_policy *policy) 468static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
468{ 469{
469 struct acpi_cpufreq_data *data = drv_data[policy->cpu]; 470 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
470 471
471 dprintk("acpi_cpufreq_verify\n"); 472 dprintk("acpi_cpufreq_verify\n");
472 473
@@ -570,7 +571,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
570 return -ENOMEM; 571 return -ENOMEM;
571 572
572 data->acpi_data = percpu_ptr(acpi_perf_data, cpu); 573 data->acpi_data = percpu_ptr(acpi_perf_data, cpu);
573 drv_data[cpu] = data; 574 per_cpu(drv_data, cpu) = data;
574 575
575 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) 576 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
576 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; 577 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
@@ -714,20 +715,20 @@ err_unreg:
714 acpi_processor_unregister_performance(perf, cpu); 715 acpi_processor_unregister_performance(perf, cpu);
715err_free: 716err_free:
716 kfree(data); 717 kfree(data);
717 drv_data[cpu] = NULL; 718 per_cpu(drv_data, cpu) = NULL;
718 719
719 return result; 720 return result;
720} 721}
721 722
722static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) 723static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
723{ 724{
724 struct acpi_cpufreq_data *data = drv_data[policy->cpu]; 725 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
725 726
726 dprintk("acpi_cpufreq_cpu_exit\n"); 727 dprintk("acpi_cpufreq_cpu_exit\n");
727 728
728 if (data) { 729 if (data) {
729 cpufreq_frequency_table_put_attr(policy->cpu); 730 cpufreq_frequency_table_put_attr(policy->cpu);
730 drv_data[policy->cpu] = NULL; 731 per_cpu(drv_data, policy->cpu) = NULL;
731 acpi_processor_unregister_performance(data->acpi_data, 732 acpi_processor_unregister_performance(data->acpi_data,
732 policy->cpu); 733 policy->cpu);
733 kfree(data); 734 kfree(data);
@@ -738,7 +739,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
738 739
739static int acpi_cpufreq_resume(struct cpufreq_policy *policy) 740static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
740{ 741{
741 struct acpi_cpufreq_data *data = drv_data[policy->cpu]; 742 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
742 743
743 dprintk("acpi_cpufreq_resume\n"); 744 dprintk("acpi_cpufreq_resume\n");
744 745
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index 749d00cb2ebd..06fcce516d51 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -694,7 +694,7 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
694 if ( acpi_bus_get_device(obj_handle, &d) ) { 694 if ( acpi_bus_get_device(obj_handle, &d) ) {
695 return 0; 695 return 0;
696 } 696 }
697 *return_value = (void *)acpi_driver_data(d); 697 *return_value = acpi_driver_data(d);
698 return 1; 698 return 1;
699} 699}
700 700
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 99e1ef9939be..a0522735dd9d 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -52,7 +52,7 @@
52/* serialize freq changes */ 52/* serialize freq changes */
53static DEFINE_MUTEX(fidvid_mutex); 53static DEFINE_MUTEX(fidvid_mutex);
54 54
55static struct powernow_k8_data *powernow_data[NR_CPUS]; 55static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
56 56
57static int cpu_family = CPU_OPTERON; 57static int cpu_family = CPU_OPTERON;
58 58
@@ -1018,7 +1018,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
1018static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) 1018static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
1019{ 1019{
1020 cpumask_t oldmask = CPU_MASK_ALL; 1020 cpumask_t oldmask = CPU_MASK_ALL;
1021 struct powernow_k8_data *data = powernow_data[pol->cpu]; 1021 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1022 u32 checkfid; 1022 u32 checkfid;
1023 u32 checkvid; 1023 u32 checkvid;
1024 unsigned int newstate; 1024 unsigned int newstate;
@@ -1094,7 +1094,7 @@ err_out:
1094/* Driver entry point to verify the policy and range of frequencies */ 1094/* Driver entry point to verify the policy and range of frequencies */
1095static int powernowk8_verify(struct cpufreq_policy *pol) 1095static int powernowk8_verify(struct cpufreq_policy *pol)
1096{ 1096{
1097 struct powernow_k8_data *data = powernow_data[pol->cpu]; 1097 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1098 1098
1099 if (!data) 1099 if (!data)
1100 return -EINVAL; 1100 return -EINVAL;
@@ -1202,7 +1202,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1202 dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n", 1202 dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n",
1203 data->currfid, data->currvid); 1203 data->currfid, data->currvid);
1204 1204
1205 powernow_data[pol->cpu] = data; 1205 per_cpu(powernow_data, pol->cpu) = data;
1206 1206
1207 return 0; 1207 return 0;
1208 1208
@@ -1216,7 +1216,7 @@ err_out:
1216 1216
1217static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol) 1217static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol)
1218{ 1218{
1219 struct powernow_k8_data *data = powernow_data[pol->cpu]; 1219 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1220 1220
1221 if (!data) 1221 if (!data)
1222 return -EINVAL; 1222 return -EINVAL;
@@ -1237,7 +1237,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
1237 cpumask_t oldmask = current->cpus_allowed; 1237 cpumask_t oldmask = current->cpus_allowed;
1238 unsigned int khz = 0; 1238 unsigned int khz = 0;
1239 1239
1240 data = powernow_data[first_cpu(per_cpu(cpu_core_map, cpu))]; 1240 data = per_cpu(powernow_data, first_cpu(per_cpu(cpu_core_map, cpu)));
1241 1241
1242 if (!data) 1242 if (!data)
1243 return -EINVAL; 1243 return -EINVAL;
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 88d66fb8411d..404a6a2d4016 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -5,6 +5,7 @@
5#include <asm/dma.h> 5#include <asm/dma.h>
6#include <asm/io.h> 6#include <asm/io.h>
7#include <asm/processor-cyrix.h> 7#include <asm/processor-cyrix.h>
8#include <asm/processor-flags.h>
8#include <asm/timer.h> 9#include <asm/timer.h>
9#include <asm/pci-direct.h> 10#include <asm/pci-direct.h>
10#include <asm/tsc.h> 11#include <asm/tsc.h>
@@ -126,15 +127,12 @@ static void __cpuinit set_cx86_reorder(void)
126 127
127static void __cpuinit set_cx86_memwb(void) 128static void __cpuinit set_cx86_memwb(void)
128{ 129{
129 u32 cr0;
130
131 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); 130 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
132 131
133 /* CCR2 bit 2: unlock NW bit */ 132 /* CCR2 bit 2: unlock NW bit */
134 setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04); 133 setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
135 /* set 'Not Write-through' */ 134 /* set 'Not Write-through' */
136 cr0 = 0x20000000; 135 write_cr0(read_cr0() | X86_CR0_NW);
137 write_cr0(read_cr0() | cr0);
138 /* CCR2 bit 2: lock NW bit and set WT1 */ 136 /* CCR2 bit 2: lock NW bit and set WT1 */
139 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 ); 137 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 );
140} 138}
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index cc8c501b9f39..d1c372b018db 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -11,6 +11,8 @@
11#include <asm/pgtable.h> 11#include <asm/pgtable.h>
12#include <asm/msr.h> 12#include <asm/msr.h>
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14#include <asm/ptrace.h>
15#include <asm/ds.h>
14 16
15#include "cpu.h" 17#include "cpu.h"
16 18
@@ -27,13 +29,14 @@
27struct movsl_mask movsl_mask __read_mostly; 29struct movsl_mask movsl_mask __read_mostly;
28#endif 30#endif
29 31
30void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c) 32void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
31{ 33{
32 if (c->x86_vendor != X86_VENDOR_INTEL)
33 return;
34 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ 34 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
35 if (c->x86 == 15 && c->x86_cache_alignment == 64) 35 if (c->x86 == 15 && c->x86_cache_alignment == 64)
36 c->x86_cache_alignment = 128; 36 c->x86_cache_alignment = 128;
37 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
38 (c->x86 == 0x6 && c->x86_model >= 0x0e))
39 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
37} 40}
38 41
39/* 42/*
@@ -113,6 +116,8 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
113 unsigned int l2 = 0; 116 unsigned int l2 = 0;
114 char *p = NULL; 117 char *p = NULL;
115 118
119 early_init_intel(c);
120
116#ifdef CONFIG_X86_F00F_BUG 121#ifdef CONFIG_X86_F00F_BUG
117 /* 122 /*
118 * All current models of Pentium and Pentium with MMX technology CPUs 123 * All current models of Pentium and Pentium with MMX technology CPUs
@@ -132,7 +137,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
132 } 137 }
133#endif 138#endif
134 139
135 select_idle_routine(c);
136 l2 = init_intel_cacheinfo(c); 140 l2 = init_intel_cacheinfo(c);
137 if (c->cpuid_level > 9 ) { 141 if (c->cpuid_level > 9 ) {
138 unsigned eax = cpuid_eax(10); 142 unsigned eax = cpuid_eax(10);
@@ -201,16 +205,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
201 } 205 }
202#endif 206#endif
203 207
208 if (cpu_has_xmm2)
209 set_bit(X86_FEATURE_LFENCE_RDTSC, c->x86_capability);
204 if (c->x86 == 15) { 210 if (c->x86 == 15) {
205 set_bit(X86_FEATURE_P4, c->x86_capability); 211 set_bit(X86_FEATURE_P4, c->x86_capability);
206 set_bit(X86_FEATURE_SYNC_RDTSC, c->x86_capability);
207 } 212 }
208 if (c->x86 == 6) 213 if (c->x86 == 6)
209 set_bit(X86_FEATURE_P3, c->x86_capability); 214 set_bit(X86_FEATURE_P3, c->x86_capability);
210 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
211 (c->x86 == 0x6 && c->x86_model >= 0x0e))
212 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
213
214 if (cpu_has_ds) { 215 if (cpu_has_ds) {
215 unsigned int l1; 216 unsigned int l1;
216 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 217 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
@@ -219,6 +220,9 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
219 if (!(l1 & (1<<12))) 220 if (!(l1 & (1<<12)))
220 set_bit(X86_FEATURE_PEBS, c->x86_capability); 221 set_bit(X86_FEATURE_PEBS, c->x86_capability);
221 } 222 }
223
224 if (cpu_has_bts)
225 ds_init_intel(c);
222} 226}
223 227
224static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) 228static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
@@ -342,5 +346,22 @@ unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
342EXPORT_SYMBOL(cmpxchg_386_u32); 346EXPORT_SYMBOL(cmpxchg_386_u32);
343#endif 347#endif
344 348
349#ifndef CONFIG_X86_CMPXCHG64
350unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
351{
352 u64 prev;
353 unsigned long flags;
354
355 /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
356 local_irq_save(flags);
357 prev = *(u64 *)ptr;
358 if (prev == old)
359 *(u64 *)ptr = new;
360 local_irq_restore(flags);
361 return prev;
362}
363EXPORT_SYMBOL(cmpxchg_486_u64);
364#endif
365
345// arch_initcall(intel_cpu_init); 366// arch_initcall(intel_cpu_init);
346 367
diff --git a/arch/x86/kernel/cpu/mcheck/k7.c b/arch/x86/kernel/cpu/mcheck/k7.c
index eef63e3630c2..e633c9c2b764 100644
--- a/arch/x86/kernel/cpu/mcheck/k7.c
+++ b/arch/x86/kernel/cpu/mcheck/k7.c
@@ -16,7 +16,7 @@
16#include "mce.h" 16#include "mce.h"
17 17
18/* Machine Check Handler For AMD Athlon/Duron */ 18/* Machine Check Handler For AMD Athlon/Duron */
19static fastcall void k7_machine_check(struct pt_regs * regs, long error_code) 19static void k7_machine_check(struct pt_regs * regs, long error_code)
20{ 20{
21 int recover=1; 21 int recover=1;
22 u32 alow, ahigh, high, low; 22 u32 alow, ahigh, high, low;
@@ -27,29 +27,32 @@ static fastcall void k7_machine_check(struct pt_regs * regs, long error_code)
27 if (mcgstl & (1<<0)) /* Recoverable ? */ 27 if (mcgstl & (1<<0)) /* Recoverable ? */
28 recover=0; 28 recover=0;
29 29
30 printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", 30 printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
31 smp_processor_id(), mcgsth, mcgstl); 31 smp_processor_id(), mcgsth, mcgstl);
32 32
33 for (i=1; i<nr_mce_banks; i++) { 33 for (i = 1; i < nr_mce_banks; i++) {
34 rdmsr (MSR_IA32_MC0_STATUS+i*4,low, high); 34 rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
35 if (high&(1<<31)) { 35 if (high&(1<<31)) {
36 char misc[20];
37 char addr[24];
38 misc[0] = addr[0] = '\0';
36 if (high & (1<<29)) 39 if (high & (1<<29))
37 recover |= 1; 40 recover |= 1;
38 if (high & (1<<25)) 41 if (high & (1<<25))
39 recover |= 2; 42 recover |= 2;
40 printk (KERN_EMERG "Bank %d: %08x%08x", i, high, low);
41 high &= ~(1<<31); 43 high &= ~(1<<31);
42 if (high & (1<<27)) { 44 if (high & (1<<27)) {
43 rdmsr (MSR_IA32_MC0_MISC+i*4, alow, ahigh); 45 rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
44 printk ("[%08x%08x]", ahigh, alow); 46 snprintf(misc, 20, "[%08x%08x]", ahigh, alow);
45 } 47 }
46 if (high & (1<<26)) { 48 if (high & (1<<26)) {
47 rdmsr (MSR_IA32_MC0_ADDR+i*4, alow, ahigh); 49 rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
48 printk (" at %08x%08x", ahigh, alow); 50 snprintf(addr, 24, " at %08x%08x", ahigh, alow);
49 } 51 }
50 printk ("\n"); 52 printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n",
53 smp_processor_id(), i, high, low, misc, addr);
51 /* Clear it */ 54 /* Clear it */
52 wrmsr (MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL); 55 wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
53 /* Serialize */ 56 /* Serialize */
54 wmb(); 57 wmb();
55 add_taint(TAINT_MACHINE_CHECK); 58 add_taint(TAINT_MACHINE_CHECK);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.h b/arch/x86/kernel/cpu/mcheck/mce.h
index 81fb6e2d35f3..ae9f628838f1 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.h
+++ b/arch/x86/kernel/cpu/mcheck/mce.h
@@ -8,7 +8,7 @@ void intel_p6_mcheck_init(struct cpuinfo_x86 *c);
8void winchip_mcheck_init(struct cpuinfo_x86 *c); 8void winchip_mcheck_init(struct cpuinfo_x86 *c);
9 9
10/* Call the installed machine check handler for this CPU setup. */ 10/* Call the installed machine check handler for this CPU setup. */
11extern fastcall void (*machine_check_vector)(struct pt_regs *, long error_code); 11extern void (*machine_check_vector)(struct pt_regs *, long error_code);
12 12
13extern int nr_mce_banks; 13extern int nr_mce_banks;
14 14
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c
index 34c781eddee4..a5182dcd94ae 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_32.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_32.c
@@ -22,13 +22,13 @@ int nr_mce_banks;
22EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */ 22EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */
23 23
24/* Handle unconfigured int18 (should never happen) */ 24/* Handle unconfigured int18 (should never happen) */
25static fastcall void unexpected_machine_check(struct pt_regs * regs, long error_code) 25static void unexpected_machine_check(struct pt_regs * regs, long error_code)
26{ 26{
27 printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", smp_processor_id()); 27 printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", smp_processor_id());
28} 28}
29 29
30/* Call the installed machine check handler for this CPU setup. */ 30/* Call the installed machine check handler for this CPU setup. */
31void fastcall (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check; 31void (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check;
32 32
33/* This has to be run for each processor */ 33/* This has to be run for each processor */
34void mcheck_init(struct cpuinfo_x86 *c) 34void mcheck_init(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 242e8668dbeb..9a699ed03598 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -63,7 +63,7 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
63 * separate MCEs from kernel messages to avoid bogus bug reports. 63 * separate MCEs from kernel messages to avoid bogus bug reports.
64 */ 64 */
65 65
66struct mce_log mcelog = { 66static struct mce_log mcelog = {
67 MCE_LOG_SIGNATURE, 67 MCE_LOG_SIGNATURE,
68 MCE_LOG_LEN, 68 MCE_LOG_LEN,
69}; 69};
@@ -80,7 +80,7 @@ void mce_log(struct mce *mce)
80 /* When the buffer fills up discard new entries. Assume 80 /* When the buffer fills up discard new entries. Assume
81 that the earlier errors are the more interesting. */ 81 that the earlier errors are the more interesting. */
82 if (entry >= MCE_LOG_LEN) { 82 if (entry >= MCE_LOG_LEN) {
83 set_bit(MCE_OVERFLOW, &mcelog.flags); 83 set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
84 return; 84 return;
85 } 85 }
86 /* Old left over entry. Skip. */ 86 /* Old left over entry. Skip. */
@@ -110,12 +110,12 @@ static void print_mce(struct mce *m)
110 KERN_EMERG 110 KERN_EMERG
111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n", 111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
112 m->cpu, m->mcgstatus, m->bank, m->status); 112 m->cpu, m->mcgstatus, m->bank, m->status);
113 if (m->rip) { 113 if (m->ip) {
114 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ", 114 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
115 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", 115 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
116 m->cs, m->rip); 116 m->cs, m->ip);
117 if (m->cs == __KERNEL_CS) 117 if (m->cs == __KERNEL_CS)
118 print_symbol("{%s}", m->rip); 118 print_symbol("{%s}", m->ip);
119 printk("\n"); 119 printk("\n");
120 } 120 }
121 printk(KERN_EMERG "TSC %Lx ", m->tsc); 121 printk(KERN_EMERG "TSC %Lx ", m->tsc);
@@ -156,16 +156,16 @@ static int mce_available(struct cpuinfo_x86 *c)
156static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) 156static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
157{ 157{
158 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) { 158 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
159 m->rip = regs->rip; 159 m->ip = regs->ip;
160 m->cs = regs->cs; 160 m->cs = regs->cs;
161 } else { 161 } else {
162 m->rip = 0; 162 m->ip = 0;
163 m->cs = 0; 163 m->cs = 0;
164 } 164 }
165 if (rip_msr) { 165 if (rip_msr) {
166 /* Assume the RIP in the MSR is exact. Is this true? */ 166 /* Assume the RIP in the MSR is exact. Is this true? */
167 m->mcgstatus |= MCG_STATUS_EIPV; 167 m->mcgstatus |= MCG_STATUS_EIPV;
168 rdmsrl(rip_msr, m->rip); 168 rdmsrl(rip_msr, m->ip);
169 m->cs = 0; 169 m->cs = 0;
170 } 170 }
171} 171}
@@ -192,10 +192,10 @@ void do_machine_check(struct pt_regs * regs, long error_code)
192 192
193 atomic_inc(&mce_entry); 193 atomic_inc(&mce_entry);
194 194
195 if (regs) 195 if ((regs
196 notify_die(DIE_NMI, "machine check", regs, error_code, 18, 196 && notify_die(DIE_NMI, "machine check", regs, error_code,
197 SIGKILL); 197 18, SIGKILL) == NOTIFY_STOP)
198 if (!banks) 198 || !banks)
199 goto out2; 199 goto out2;
200 200
201 memset(&m, 0, sizeof(struct mce)); 201 memset(&m, 0, sizeof(struct mce));
@@ -288,7 +288,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
288 * instruction which caused the MCE. 288 * instruction which caused the MCE.
289 */ 289 */
290 if (m.mcgstatus & MCG_STATUS_EIPV) 290 if (m.mcgstatus & MCG_STATUS_EIPV)
291 user_space = panicm.rip && (panicm.cs & 3); 291 user_space = panicm.ip && (panicm.cs & 3);
292 292
293 /* 293 /*
294 * If we know that the error was in user space, send a 294 * If we know that the error was in user space, send a
@@ -564,7 +564,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
564 loff_t *off) 564 loff_t *off)
565{ 565{
566 unsigned long *cpu_tsc; 566 unsigned long *cpu_tsc;
567 static DECLARE_MUTEX(mce_read_sem); 567 static DEFINE_MUTEX(mce_read_mutex);
568 unsigned next; 568 unsigned next;
569 char __user *buf = ubuf; 569 char __user *buf = ubuf;
570 int i, err; 570 int i, err;
@@ -573,12 +573,12 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
573 if (!cpu_tsc) 573 if (!cpu_tsc)
574 return -ENOMEM; 574 return -ENOMEM;
575 575
576 down(&mce_read_sem); 576 mutex_lock(&mce_read_mutex);
577 next = rcu_dereference(mcelog.next); 577 next = rcu_dereference(mcelog.next);
578 578
579 /* Only supports full reads right now */ 579 /* Only supports full reads right now */
580 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { 580 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
581 up(&mce_read_sem); 581 mutex_unlock(&mce_read_mutex);
582 kfree(cpu_tsc); 582 kfree(cpu_tsc);
583 return -EINVAL; 583 return -EINVAL;
584 } 584 }
@@ -621,7 +621,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
621 memset(&mcelog.entry[i], 0, sizeof(struct mce)); 621 memset(&mcelog.entry[i], 0, sizeof(struct mce));
622 } 622 }
623 } 623 }
624 up(&mce_read_sem); 624 mutex_unlock(&mce_read_mutex);
625 kfree(cpu_tsc); 625 kfree(cpu_tsc);
626 return err ? -EFAULT : buf - ubuf; 626 return err ? -EFAULT : buf - ubuf;
627} 627}
@@ -634,8 +634,7 @@ static unsigned int mce_poll(struct file *file, poll_table *wait)
634 return 0; 634 return 0;
635} 635}
636 636
637static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, 637static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
638 unsigned long arg)
639{ 638{
640 int __user *p = (int __user *)arg; 639 int __user *p = (int __user *)arg;
641 640
@@ -664,7 +663,7 @@ static const struct file_operations mce_chrdev_ops = {
664 .release = mce_release, 663 .release = mce_release,
665 .read = mce_read, 664 .read = mce_read,
666 .poll = mce_poll, 665 .poll = mce_poll,
667 .ioctl = mce_ioctl, 666 .unlocked_ioctl = mce_ioctl,
668}; 667};
669 668
670static struct miscdevice mce_log_device = { 669static struct miscdevice mce_log_device = {
@@ -855,8 +854,8 @@ static void mce_remove_device(unsigned int cpu)
855} 854}
856 855
857/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 856/* Get notified when a cpu comes on/off. Be hotplug friendly. */
858static int 857static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
859mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 858 unsigned long action, void *hcpu)
860{ 859{
861 unsigned int cpu = (unsigned long)hcpu; 860 unsigned int cpu = (unsigned long)hcpu;
862 861
@@ -873,7 +872,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
873 return NOTIFY_OK; 872 return NOTIFY_OK;
874} 873}
875 874
876static struct notifier_block mce_cpu_notifier = { 875static struct notifier_block mce_cpu_notifier __cpuinitdata = {
877 .notifier_call = mce_cpu_callback, 876 .notifier_call = mce_cpu_callback,
878}; 877};
879 878
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 753588755fee..32671da8184e 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -118,6 +118,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
118{ 118{
119 unsigned int bank, block; 119 unsigned int bank, block;
120 unsigned int cpu = smp_processor_id(); 120 unsigned int cpu = smp_processor_id();
121 u8 lvt_off;
121 u32 low = 0, high = 0, address = 0; 122 u32 low = 0, high = 0, address = 0;
122 123
123 for (bank = 0; bank < NR_BANKS; ++bank) { 124 for (bank = 0; bank < NR_BANKS; ++bank) {
@@ -153,14 +154,13 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
153 if (shared_bank[bank] && c->cpu_core_id) 154 if (shared_bank[bank] && c->cpu_core_id)
154 break; 155 break;
155#endif 156#endif
157 lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR,
158 APIC_EILVT_MSG_FIX, 0);
159
156 high &= ~MASK_LVTOFF_HI; 160 high &= ~MASK_LVTOFF_HI;
157 high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20; 161 high |= lvt_off << 20;
158 wrmsr(address, low, high); 162 wrmsr(address, low, high);
159 163
160 setup_APIC_extended_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD,
161 THRESHOLD_APIC_VECTOR,
162 K8_APIC_EXT_INT_MSG_FIX, 0);
163
164 threshold_defaults.address = address; 164 threshold_defaults.address = address;
165 threshold_restart_bank(&threshold_defaults, 0, 0); 165 threshold_restart_bank(&threshold_defaults, 0, 0);
166 } 166 }
@@ -450,7 +450,8 @@ recurse:
450 if (err) 450 if (err)
451 goto out_free; 451 goto out_free;
452 452
453 kobject_uevent(&b->kobj, KOBJ_ADD); 453 if (b)
454 kobject_uevent(&b->kobj, KOBJ_ADD);
454 455
455 return err; 456 return err;
456 457
@@ -554,7 +555,7 @@ static __cpuinit int threshold_create_device(unsigned int cpu)
554 int err = 0; 555 int err = 0;
555 556
556 for (bank = 0; bank < NR_BANKS; ++bank) { 557 for (bank = 0; bank < NR_BANKS; ++bank) {
557 if (!(per_cpu(bank_map, cpu) & 1 << bank)) 558 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
558 continue; 559 continue;
559 err = threshold_create_bank(cpu, bank); 560 err = threshold_create_bank(cpu, bank);
560 if (err) 561 if (err)
@@ -637,14 +638,14 @@ static void threshold_remove_device(unsigned int cpu)
637 unsigned int bank; 638 unsigned int bank;
638 639
639 for (bank = 0; bank < NR_BANKS; ++bank) { 640 for (bank = 0; bank < NR_BANKS; ++bank) {
640 if (!(per_cpu(bank_map, cpu) & 1 << bank)) 641 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
641 continue; 642 continue;
642 threshold_remove_bank(cpu, bank); 643 threshold_remove_bank(cpu, bank);
643 } 644 }
644} 645}
645 646
646/* get notified when a cpu comes on/off */ 647/* get notified when a cpu comes on/off */
647static int threshold_cpu_callback(struct notifier_block *nfb, 648static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb,
648 unsigned long action, void *hcpu) 649 unsigned long action, void *hcpu)
649{ 650{
650 /* cpu was unsigned int to begin with */ 651 /* cpu was unsigned int to begin with */
@@ -669,7 +670,7 @@ static int threshold_cpu_callback(struct notifier_block *nfb,
669 return NOTIFY_OK; 670 return NOTIFY_OK;
670} 671}
671 672
672static struct notifier_block threshold_cpu_notifier = { 673static struct notifier_block threshold_cpu_notifier __cpuinitdata = {
673 .notifier_call = threshold_cpu_callback, 674 .notifier_call = threshold_cpu_callback,
674}; 675};
675 676
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c
index be4dabfee1f5..cb03345554a5 100644
--- a/arch/x86/kernel/cpu/mcheck/p4.c
+++ b/arch/x86/kernel/cpu/mcheck/p4.c
@@ -57,7 +57,7 @@ static void intel_thermal_interrupt(struct pt_regs *regs)
57/* Thermal interrupt handler for this CPU setup */ 57/* Thermal interrupt handler for this CPU setup */
58static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_thermal_interrupt; 58static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_thermal_interrupt;
59 59
60fastcall void smp_thermal_interrupt(struct pt_regs *regs) 60void smp_thermal_interrupt(struct pt_regs *regs)
61{ 61{
62 irq_enter(); 62 irq_enter();
63 vendor_thermal_interrupt(regs); 63 vendor_thermal_interrupt(regs);
@@ -141,7 +141,7 @@ static inline void intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
141 rdmsr (MSR_IA32_MCG_EIP, r->eip, h); 141 rdmsr (MSR_IA32_MCG_EIP, r->eip, h);
142} 142}
143 143
144static fastcall void intel_machine_check(struct pt_regs * regs, long error_code) 144static void intel_machine_check(struct pt_regs * regs, long error_code)
145{ 145{
146 int recover=1; 146 int recover=1;
147 u32 alow, ahigh, high, low; 147 u32 alow, ahigh, high, low;
@@ -152,38 +152,41 @@ static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
152 if (mcgstl & (1<<0)) /* Recoverable ? */ 152 if (mcgstl & (1<<0)) /* Recoverable ? */
153 recover=0; 153 recover=0;
154 154
155 printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", 155 printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
156 smp_processor_id(), mcgsth, mcgstl); 156 smp_processor_id(), mcgsth, mcgstl);
157 157
158 if (mce_num_extended_msrs > 0) { 158 if (mce_num_extended_msrs > 0) {
159 struct intel_mce_extended_msrs dbg; 159 struct intel_mce_extended_msrs dbg;
160 intel_get_extended_msrs(&dbg); 160 intel_get_extended_msrs(&dbg);
161 printk (KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n", 161 printk(KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n"
162 smp_processor_id(), dbg.eip, dbg.eflags); 162 "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n"
163 printk (KERN_DEBUG "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n", 163 "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n",
164 dbg.eax, dbg.ebx, dbg.ecx, dbg.edx); 164 smp_processor_id(), dbg.eip, dbg.eflags,
165 printk (KERN_DEBUG "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n", 165 dbg.eax, dbg.ebx, dbg.ecx, dbg.edx,
166 dbg.esi, dbg.edi, dbg.ebp, dbg.esp); 166 dbg.esi, dbg.edi, dbg.ebp, dbg.esp);
167 } 167 }
168 168
169 for (i=0; i<nr_mce_banks; i++) { 169 for (i = 0; i < nr_mce_banks; i++) {
170 rdmsr (MSR_IA32_MC0_STATUS+i*4,low, high); 170 rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
171 if (high & (1<<31)) { 171 if (high & (1<<31)) {
172 char misc[20];
173 char addr[24];
174 misc[0] = addr[0] = '\0';
172 if (high & (1<<29)) 175 if (high & (1<<29))
173 recover |= 1; 176 recover |= 1;
174 if (high & (1<<25)) 177 if (high & (1<<25))
175 recover |= 2; 178 recover |= 2;
176 printk (KERN_EMERG "Bank %d: %08x%08x", i, high, low);
177 high &= ~(1<<31); 179 high &= ~(1<<31);
178 if (high & (1<<27)) { 180 if (high & (1<<27)) {
179 rdmsr (MSR_IA32_MC0_MISC+i*4, alow, ahigh); 181 rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
180 printk ("[%08x%08x]", ahigh, alow); 182 snprintf(misc, 20, "[%08x%08x]", ahigh, alow);
181 } 183 }
182 if (high & (1<<26)) { 184 if (high & (1<<26)) {
183 rdmsr (MSR_IA32_MC0_ADDR+i*4, alow, ahigh); 185 rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
184 printk (" at %08x%08x", ahigh, alow); 186 snprintf(addr, 24, " at %08x%08x", ahigh, alow);
185 } 187 }
186 printk ("\n"); 188 printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n",
189 smp_processor_id(), i, high, low, misc, addr);
187 } 190 }
188 } 191 }
189 192
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
index 94bc43d950cf..a18310aaae0c 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -16,7 +16,7 @@
16#include "mce.h" 16#include "mce.h"
17 17
18/* Machine check handler for Pentium class Intel */ 18/* Machine check handler for Pentium class Intel */
19static fastcall void pentium_machine_check(struct pt_regs * regs, long error_code) 19static void pentium_machine_check(struct pt_regs * regs, long error_code)
20{ 20{
21 u32 loaddr, hi, lotype; 21 u32 loaddr, hi, lotype;
22 rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); 22 rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
diff --git a/arch/x86/kernel/cpu/mcheck/p6.c b/arch/x86/kernel/cpu/mcheck/p6.c
index deeae42ce199..74342604d30e 100644
--- a/arch/x86/kernel/cpu/mcheck/p6.c
+++ b/arch/x86/kernel/cpu/mcheck/p6.c
@@ -16,7 +16,7 @@
16#include "mce.h" 16#include "mce.h"
17 17
18/* Machine Check Handler For PII/PIII */ 18/* Machine Check Handler For PII/PIII */
19static fastcall void intel_machine_check(struct pt_regs * regs, long error_code) 19static void intel_machine_check(struct pt_regs * regs, long error_code)
20{ 20{
21 int recover=1; 21 int recover=1;
22 u32 alow, ahigh, high, low; 22 u32 alow, ahigh, high, low;
@@ -27,27 +27,30 @@ static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
27 if (mcgstl & (1<<0)) /* Recoverable ? */ 27 if (mcgstl & (1<<0)) /* Recoverable ? */
28 recover=0; 28 recover=0;
29 29
30 printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", 30 printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
31 smp_processor_id(), mcgsth, mcgstl); 31 smp_processor_id(), mcgsth, mcgstl);
32 32
33 for (i=0; i<nr_mce_banks; i++) { 33 for (i = 0; i < nr_mce_banks; i++) {
34 rdmsr (MSR_IA32_MC0_STATUS+i*4,low, high); 34 rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
35 if (high & (1<<31)) { 35 if (high & (1<<31)) {
36 char misc[20];
37 char addr[24];
38 misc[0] = addr[0] = '\0';
36 if (high & (1<<29)) 39 if (high & (1<<29))
37 recover |= 1; 40 recover |= 1;
38 if (high & (1<<25)) 41 if (high & (1<<25))
39 recover |= 2; 42 recover |= 2;
40 printk (KERN_EMERG "Bank %d: %08x%08x", i, high, low);
41 high &= ~(1<<31); 43 high &= ~(1<<31);
42 if (high & (1<<27)) { 44 if (high & (1<<27)) {
43 rdmsr (MSR_IA32_MC0_MISC+i*4, alow, ahigh); 45 rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
44 printk ("[%08x%08x]", ahigh, alow); 46 snprintf(misc, 20, "[%08x%08x]", ahigh, alow);
45 } 47 }
46 if (high & (1<<26)) { 48 if (high & (1<<26)) {
47 rdmsr (MSR_IA32_MC0_ADDR+i*4, alow, ahigh); 49 rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
48 printk (" at %08x%08x", ahigh, alow); 50 snprintf(addr, 24, " at %08x%08x", ahigh, alow);
49 } 51 }
50 printk ("\n"); 52 printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n",
53 smp_processor_id(), i, high, low, misc, addr);
51 } 54 }
52 } 55 }
53 56
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
index 9e424b6c293d..3d428d5afc52 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -15,7 +15,7 @@
15#include "mce.h" 15#include "mce.h"
16 16
17/* Machine check handler for WinChip C6 */ 17/* Machine check handler for WinChip C6 */
18static fastcall void winchip_machine_check(struct pt_regs * regs, long error_code) 18static void winchip_machine_check(struct pt_regs * regs, long error_code)
19{ 19{
20 printk(KERN_EMERG "CPU0: Machine Check Exception.\n"); 20 printk(KERN_EMERG "CPU0: Machine Check Exception.\n");
21 add_taint(TAINT_MACHINE_CHECK); 21 add_taint(TAINT_MACHINE_CHECK);
diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
index 0949cdbf848a..ee2331b0e58f 100644
--- a/arch/x86/kernel/cpu/mtrr/amd.c
+++ b/arch/x86/kernel/cpu/mtrr/amd.c
@@ -53,8 +53,6 @@ static void amd_set_mtrr(unsigned int reg, unsigned long base,
53 <base> The base address of the region. 53 <base> The base address of the region.
54 <size> The size of the region. If this is 0 the region is disabled. 54 <size> The size of the region. If this is 0 the region is disabled.
55 <type> The type of the region. 55 <type> The type of the region.
56 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
57 be done externally.
58 [RETURNS] Nothing. 56 [RETURNS] Nothing.
59*/ 57*/
60{ 58{
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index 9964be3de2b7..8e139c70f888 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -4,6 +4,7 @@
4#include <asm/msr.h> 4#include <asm/msr.h>
5#include <asm/io.h> 5#include <asm/io.h>
6#include <asm/processor-cyrix.h> 6#include <asm/processor-cyrix.h>
7#include <asm/processor-flags.h>
7#include "mtrr.h" 8#include "mtrr.h"
8 9
9int arr3_protected; 10int arr3_protected;
@@ -142,7 +143,7 @@ static void prepare_set(void)
142 143
143 /* Disable and flush caches. Note that wbinvd flushes the TLBs as 144 /* Disable and flush caches. Note that wbinvd flushes the TLBs as
144 a side-effect */ 145 a side-effect */
145 cr0 = read_cr0() | 0x40000000; 146 cr0 = read_cr0() | X86_CR0_CD;
146 wbinvd(); 147 wbinvd();
147 write_cr0(cr0); 148 write_cr0(cr0);
148 wbinvd(); 149 wbinvd();
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 992f08dfbb6c..103d61a59b19 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -9,11 +9,12 @@
9#include <asm/msr.h> 9#include <asm/msr.h>
10#include <asm/system.h> 10#include <asm/system.h>
11#include <asm/cpufeature.h> 11#include <asm/cpufeature.h>
12#include <asm/processor-flags.h>
12#include <asm/tlbflush.h> 13#include <asm/tlbflush.h>
13#include "mtrr.h" 14#include "mtrr.h"
14 15
15struct mtrr_state { 16struct mtrr_state {
16 struct mtrr_var_range *var_ranges; 17 struct mtrr_var_range var_ranges[MAX_VAR_RANGES];
17 mtrr_type fixed_ranges[NUM_FIXED_RANGES]; 18 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
18 unsigned char enabled; 19 unsigned char enabled;
19 unsigned char have_fixed; 20 unsigned char have_fixed;
@@ -85,12 +86,6 @@ void __init get_mtrr_state(void)
85 struct mtrr_var_range *vrs; 86 struct mtrr_var_range *vrs;
86 unsigned lo, dummy; 87 unsigned lo, dummy;
87 88
88 if (!mtrr_state.var_ranges) {
89 mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
90 GFP_KERNEL);
91 if (!mtrr_state.var_ranges)
92 return;
93 }
94 vrs = mtrr_state.var_ranges; 89 vrs = mtrr_state.var_ranges;
95 90
96 rdmsr(MTRRcap_MSR, lo, dummy); 91 rdmsr(MTRRcap_MSR, lo, dummy);
@@ -188,7 +183,7 @@ static inline void k8_enable_fixed_iorrs(void)
188 * \param changed pointer which indicates whether the MTRR needed to be changed 183 * \param changed pointer which indicates whether the MTRR needed to be changed
189 * \param msrwords pointer to the MSR values which the MSR should have 184 * \param msrwords pointer to the MSR values which the MSR should have
190 */ 185 */
191static void set_fixed_range(int msr, int * changed, unsigned int * msrwords) 186static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
192{ 187{
193 unsigned lo, hi; 188 unsigned lo, hi;
194 189
@@ -200,7 +195,7 @@ static void set_fixed_range(int msr, int * changed, unsigned int * msrwords)
200 ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK)) 195 ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
201 k8_enable_fixed_iorrs(); 196 k8_enable_fixed_iorrs();
202 mtrr_wrmsr(msr, msrwords[0], msrwords[1]); 197 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
203 *changed = TRUE; 198 *changed = true;
204 } 199 }
205} 200}
206 201
@@ -260,7 +255,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
260static int set_fixed_ranges(mtrr_type * frs) 255static int set_fixed_ranges(mtrr_type * frs)
261{ 256{
262 unsigned long long *saved = (unsigned long long *) frs; 257 unsigned long long *saved = (unsigned long long *) frs;
263 int changed = FALSE; 258 bool changed = false;
264 int block=-1, range; 259 int block=-1, range;
265 260
266 while (fixed_range_blocks[++block].ranges) 261 while (fixed_range_blocks[++block].ranges)
@@ -273,17 +268,17 @@ static int set_fixed_ranges(mtrr_type * frs)
273 268
274/* Set the MSR pair relating to a var range. Returns TRUE if 269/* Set the MSR pair relating to a var range. Returns TRUE if
275 changes are made */ 270 changes are made */
276static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) 271static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
277{ 272{
278 unsigned int lo, hi; 273 unsigned int lo, hi;
279 int changed = FALSE; 274 bool changed = false;
280 275
281 rdmsr(MTRRphysBase_MSR(index), lo, hi); 276 rdmsr(MTRRphysBase_MSR(index), lo, hi);
282 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) 277 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
283 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != 278 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
284 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { 279 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
285 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 280 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
286 changed = TRUE; 281 changed = true;
287 } 282 }
288 283
289 rdmsr(MTRRphysMask_MSR(index), lo, hi); 284 rdmsr(MTRRphysMask_MSR(index), lo, hi);
@@ -292,7 +287,7 @@ static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
292 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != 287 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
293 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { 288 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
294 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 289 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
295 changed = TRUE; 290 changed = true;
296 } 291 }
297 return changed; 292 return changed;
298} 293}
@@ -350,7 +345,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
350 spin_lock(&set_atomicity_lock); 345 spin_lock(&set_atomicity_lock);
351 346
352 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ 347 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
353 cr0 = read_cr0() | 0x40000000; /* set CD flag */ 348 cr0 = read_cr0() | X86_CR0_CD;
354 write_cr0(cr0); 349 write_cr0(cr0);
355 wbinvd(); 350 wbinvd();
356 351
@@ -417,8 +412,6 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
417 <base> The base address of the region. 412 <base> The base address of the region.
418 <size> The size of the region. If this is 0 the region is disabled. 413 <size> The size of the region. If this is 0 the region is disabled.
419 <type> The type of the region. 414 <type> The type of the region.
420 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
421 be done externally.
422 [RETURNS] Nothing. 415 [RETURNS] Nothing.
423*/ 416*/
424{ 417{
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index c7d8f1756745..91e150acb46c 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -11,10 +11,6 @@
11#include <asm/mtrr.h> 11#include <asm/mtrr.h>
12#include "mtrr.h" 12#include "mtrr.h"
13 13
14/* RED-PEN: this is accessed without any locking */
15extern unsigned int *usage_table;
16
17
18#define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private) 14#define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private)
19 15
20static const char *const mtrr_strings[MTRR_NUM_TYPES] = 16static const char *const mtrr_strings[MTRR_NUM_TYPES] =
@@ -37,7 +33,7 @@ const char *mtrr_attrib_to_str(int x)
37 33
38static int 34static int
39mtrr_file_add(unsigned long base, unsigned long size, 35mtrr_file_add(unsigned long base, unsigned long size,
40 unsigned int type, char increment, struct file *file, int page) 36 unsigned int type, bool increment, struct file *file, int page)
41{ 37{
42 int reg, max; 38 int reg, max;
43 unsigned int *fcount = FILE_FCOUNT(file); 39 unsigned int *fcount = FILE_FCOUNT(file);
@@ -55,7 +51,7 @@ mtrr_file_add(unsigned long base, unsigned long size,
55 base >>= PAGE_SHIFT; 51 base >>= PAGE_SHIFT;
56 size >>= PAGE_SHIFT; 52 size >>= PAGE_SHIFT;
57 } 53 }
58 reg = mtrr_add_page(base, size, type, 1); 54 reg = mtrr_add_page(base, size, type, true);
59 if (reg >= 0) 55 if (reg >= 0)
60 ++fcount[reg]; 56 ++fcount[reg];
61 return reg; 57 return reg;
@@ -141,7 +137,7 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
141 size >>= PAGE_SHIFT; 137 size >>= PAGE_SHIFT;
142 err = 138 err =
143 mtrr_add_page((unsigned long) base, (unsigned long) size, i, 139 mtrr_add_page((unsigned long) base, (unsigned long) size, i,
144 1); 140 true);
145 if (err < 0) 141 if (err < 0)
146 return err; 142 return err;
147 return len; 143 return len;
@@ -217,7 +213,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
217 if (!capable(CAP_SYS_ADMIN)) 213 if (!capable(CAP_SYS_ADMIN))
218 return -EPERM; 214 return -EPERM;
219 err = 215 err =
220 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, 216 mtrr_file_add(sentry.base, sentry.size, sentry.type, true,
221 file, 0); 217 file, 0);
222 break; 218 break;
223 case MTRRIOC_SET_ENTRY: 219 case MTRRIOC_SET_ENTRY:
@@ -226,7 +222,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
226#endif 222#endif
227 if (!capable(CAP_SYS_ADMIN)) 223 if (!capable(CAP_SYS_ADMIN))
228 return -EPERM; 224 return -EPERM;
229 err = mtrr_add(sentry.base, sentry.size, sentry.type, 0); 225 err = mtrr_add(sentry.base, sentry.size, sentry.type, false);
230 break; 226 break;
231 case MTRRIOC_DEL_ENTRY: 227 case MTRRIOC_DEL_ENTRY:
232#ifdef CONFIG_COMPAT 228#ifdef CONFIG_COMPAT
@@ -270,7 +266,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
270 if (!capable(CAP_SYS_ADMIN)) 266 if (!capable(CAP_SYS_ADMIN))
271 return -EPERM; 267 return -EPERM;
272 err = 268 err =
273 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, 269 mtrr_file_add(sentry.base, sentry.size, sentry.type, true,
274 file, 1); 270 file, 1);
275 break; 271 break;
276 case MTRRIOC_SET_PAGE_ENTRY: 272 case MTRRIOC_SET_PAGE_ENTRY:
@@ -279,7 +275,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
279#endif 275#endif
280 if (!capable(CAP_SYS_ADMIN)) 276 if (!capable(CAP_SYS_ADMIN))
281 return -EPERM; 277 return -EPERM;
282 err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0); 278 err =
279 mtrr_add_page(sentry.base, sentry.size, sentry.type, false);
283 break; 280 break;
284 case MTRRIOC_DEL_PAGE_ENTRY: 281 case MTRRIOC_DEL_PAGE_ENTRY:
285#ifdef CONFIG_COMPAT 282#ifdef CONFIG_COMPAT
@@ -396,7 +393,7 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
396 for (i = 0; i < max; i++) { 393 for (i = 0; i < max; i++) {
397 mtrr_if->get(i, &base, &size, &type); 394 mtrr_if->get(i, &base, &size, &type);
398 if (size == 0) 395 if (size == 0)
399 usage_table[i] = 0; 396 mtrr_usage_table[i] = 0;
400 else { 397 else {
401 if (size < (0x100000 >> PAGE_SHIFT)) { 398 if (size < (0x100000 >> PAGE_SHIFT)) {
402 /* less than 1MB */ 399 /* less than 1MB */
@@ -410,7 +407,7 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
410 len += seq_printf(seq, 407 len += seq_printf(seq,
411 "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n", 408 "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n",
412 i, base, base >> (20 - PAGE_SHIFT), size, factor, 409 i, base, base >> (20 - PAGE_SHIFT), size, factor,
413 mtrr_attrib_to_str(type), usage_table[i]); 410 mtrr_attrib_to_str(type), mtrr_usage_table[i]);
414 } 411 }
415 } 412 }
416 return 0; 413 return 0;
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index beb45c9c0835..715919582657 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -38,8 +38,8 @@
38#include <linux/cpu.h> 38#include <linux/cpu.h>
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40 40
41#include <asm/e820.h>
41#include <asm/mtrr.h> 42#include <asm/mtrr.h>
42
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44#include <asm/processor.h> 44#include <asm/processor.h>
45#include <asm/msr.h> 45#include <asm/msr.h>
@@ -47,7 +47,7 @@
47 47
48u32 num_var_ranges = 0; 48u32 num_var_ranges = 0;
49 49
50unsigned int *usage_table; 50unsigned int mtrr_usage_table[MAX_VAR_RANGES];
51static DEFINE_MUTEX(mtrr_mutex); 51static DEFINE_MUTEX(mtrr_mutex);
52 52
53u64 size_or_mask, size_and_mask; 53u64 size_or_mask, size_and_mask;
@@ -121,13 +121,8 @@ static void __init init_table(void)
121 int i, max; 121 int i, max;
122 122
123 max = num_var_ranges; 123 max = num_var_ranges;
124 if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
125 == NULL) {
126 printk(KERN_ERR "mtrr: could not allocate\n");
127 return;
128 }
129 for (i = 0; i < max; i++) 124 for (i = 0; i < max; i++)
130 usage_table[i] = 1; 125 mtrr_usage_table[i] = 1;
131} 126}
132 127
133struct set_mtrr_data { 128struct set_mtrr_data {
@@ -311,7 +306,7 @@ static void set_mtrr(unsigned int reg, unsigned long base,
311 */ 306 */
312 307
313int mtrr_add_page(unsigned long base, unsigned long size, 308int mtrr_add_page(unsigned long base, unsigned long size,
314 unsigned int type, char increment) 309 unsigned int type, bool increment)
315{ 310{
316 int i, replace, error; 311 int i, replace, error;
317 mtrr_type ltype; 312 mtrr_type ltype;
@@ -383,7 +378,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
383 goto out; 378 goto out;
384 } 379 }
385 if (increment) 380 if (increment)
386 ++usage_table[i]; 381 ++mtrr_usage_table[i];
387 error = i; 382 error = i;
388 goto out; 383 goto out;
389 } 384 }
@@ -391,13 +386,15 @@ int mtrr_add_page(unsigned long base, unsigned long size,
391 i = mtrr_if->get_free_region(base, size, replace); 386 i = mtrr_if->get_free_region(base, size, replace);
392 if (i >= 0) { 387 if (i >= 0) {
393 set_mtrr(i, base, size, type); 388 set_mtrr(i, base, size, type);
394 if (likely(replace < 0)) 389 if (likely(replace < 0)) {
395 usage_table[i] = 1; 390 mtrr_usage_table[i] = 1;
396 else { 391 } else {
397 usage_table[i] = usage_table[replace] + !!increment; 392 mtrr_usage_table[i] = mtrr_usage_table[replace];
393 if (increment)
394 mtrr_usage_table[i]++;
398 if (unlikely(replace != i)) { 395 if (unlikely(replace != i)) {
399 set_mtrr(replace, 0, 0, 0); 396 set_mtrr(replace, 0, 0, 0);
400 usage_table[replace] = 0; 397 mtrr_usage_table[replace] = 0;
401 } 398 }
402 } 399 }
403 } else 400 } else
@@ -460,7 +457,7 @@ static int mtrr_check(unsigned long base, unsigned long size)
460 457
461int 458int
462mtrr_add(unsigned long base, unsigned long size, unsigned int type, 459mtrr_add(unsigned long base, unsigned long size, unsigned int type,
463 char increment) 460 bool increment)
464{ 461{
465 if (mtrr_check(base, size)) 462 if (mtrr_check(base, size))
466 return -EINVAL; 463 return -EINVAL;
@@ -527,11 +524,11 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
527 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg); 524 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
528 goto out; 525 goto out;
529 } 526 }
530 if (usage_table[reg] < 1) { 527 if (mtrr_usage_table[reg] < 1) {
531 printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg); 528 printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
532 goto out; 529 goto out;
533 } 530 }
534 if (--usage_table[reg] < 1) 531 if (--mtrr_usage_table[reg] < 1)
535 set_mtrr(reg, 0, 0, 0); 532 set_mtrr(reg, 0, 0, 0);
536 error = reg; 533 error = reg;
537 out: 534 out:
@@ -591,16 +588,11 @@ struct mtrr_value {
591 unsigned long lsize; 588 unsigned long lsize;
592}; 589};
593 590
594static struct mtrr_value * mtrr_state; 591static struct mtrr_value mtrr_state[MAX_VAR_RANGES];
595 592
596static int mtrr_save(struct sys_device * sysdev, pm_message_t state) 593static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
597{ 594{
598 int i; 595 int i;
599 int size = num_var_ranges * sizeof(struct mtrr_value);
600
601 mtrr_state = kzalloc(size,GFP_ATOMIC);
602 if (!mtrr_state)
603 return -ENOMEM;
604 596
605 for (i = 0; i < num_var_ranges; i++) { 597 for (i = 0; i < num_var_ranges; i++) {
606 mtrr_if->get(i, 598 mtrr_if->get(i,
@@ -622,7 +614,6 @@ static int mtrr_restore(struct sys_device * sysdev)
622 mtrr_state[i].lsize, 614 mtrr_state[i].lsize,
623 mtrr_state[i].ltype); 615 mtrr_state[i].ltype);
624 } 616 }
625 kfree(mtrr_state);
626 return 0; 617 return 0;
627} 618}
628 619
@@ -633,6 +624,112 @@ static struct sysdev_driver mtrr_sysdev_driver = {
633 .resume = mtrr_restore, 624 .resume = mtrr_restore,
634}; 625};
635 626
627static int disable_mtrr_trim;
628
629static int __init disable_mtrr_trim_setup(char *str)
630{
631 disable_mtrr_trim = 1;
632 return 0;
633}
634early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
635
636/*
637 * Newer AMD K8s and later CPUs have a special magic MSR way to force WB
638 * for memory >4GB. Check for that here.
639 * Note this won't check if the MTRRs < 4GB where the magic bit doesn't
640 * apply to are wrong, but so far we don't know of any such case in the wild.
641 */
642#define Tom2Enabled (1U << 21)
643#define Tom2ForceMemTypeWB (1U << 22)
644
645static __init int amd_special_default_mtrr(void)
646{
647 u32 l, h;
648
649 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
650 return 0;
651 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
652 return 0;
653 /* In case some hypervisor doesn't pass SYSCFG through */
654 if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
655 return 0;
656 /*
657 * Memory between 4GB and top of mem is forced WB by this magic bit.
658 * Reserved before K8RevF, but should be zero there.
659 */
660 if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) ==
661 (Tom2Enabled | Tom2ForceMemTypeWB))
662 return 1;
663 return 0;
664}
665
666/**
667 * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
668 *
669 * Some buggy BIOSes don't setup the MTRRs properly for systems with certain
670 * memory configurations. This routine checks that the highest MTRR matches
671 * the end of memory, to make sure the MTRRs having a write back type cover
672 * all of the memory the kernel is intending to use. If not, it'll trim any
673 * memory off the end by adjusting end_pfn, removing it from the kernel's
674 * allocation pools, warning the user with an obnoxious message.
675 */
676int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
677{
678 unsigned long i, base, size, highest_addr = 0, def, dummy;
679 mtrr_type type;
680 u64 trim_start, trim_size;
681
682 /*
683 * Make sure we only trim uncachable memory on machines that
684 * support the Intel MTRR architecture:
685 */
686 if (!is_cpu(INTEL) || disable_mtrr_trim)
687 return 0;
688 rdmsr(MTRRdefType_MSR, def, dummy);
689 def &= 0xff;
690 if (def != MTRR_TYPE_UNCACHABLE)
691 return 0;
692
693 if (amd_special_default_mtrr())
694 return 0;
695
696 /* Find highest cached pfn */
697 for (i = 0; i < num_var_ranges; i++) {
698 mtrr_if->get(i, &base, &size, &type);
699 if (type != MTRR_TYPE_WRBACK)
700 continue;
701 base <<= PAGE_SHIFT;
702 size <<= PAGE_SHIFT;
703 if (highest_addr < base + size)
704 highest_addr = base + size;
705 }
706
707 /* kvm/qemu doesn't have mtrr set right, don't trim them all */
708 if (!highest_addr) {
709 printk(KERN_WARNING "WARNING: strange, CPU MTRRs all blank?\n");
710 WARN_ON(1);
711 return 0;
712 }
713
714 if ((highest_addr >> PAGE_SHIFT) < end_pfn) {
715 printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover"
716 " all of memory, losing %LdMB of RAM.\n",
717 (((u64)end_pfn << PAGE_SHIFT) - highest_addr) >> 20);
718
719 WARN_ON(1);
720
721 printk(KERN_INFO "update e820 for mtrr\n");
722 trim_start = highest_addr;
723 trim_size = end_pfn;
724 trim_size <<= PAGE_SHIFT;
725 trim_size -= trim_start;
726 add_memory_region(trim_start, trim_size, E820_RESERVED);
727 update_e820();
728 return 1;
729 }
730
731 return 0;
732}
636 733
637/** 734/**
638 * mtrr_bp_init - initialize mtrrs on the boot CPU 735 * mtrr_bp_init - initialize mtrrs on the boot CPU
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 289dfe6030e3..fb74a2c20814 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -2,10 +2,8 @@
2 * local mtrr defines. 2 * local mtrr defines.
3 */ 3 */
4 4
5#ifndef TRUE 5#include <linux/types.h>
6#define TRUE 1 6#include <linux/stddef.h>
7#define FALSE 0
8#endif
9 7
10#define MTRRcap_MSR 0x0fe 8#define MTRRcap_MSR 0x0fe
11#define MTRRdefType_MSR 0x2ff 9#define MTRRdefType_MSR 0x2ff
@@ -14,6 +12,7 @@
14#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1) 12#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
15 13
16#define NUM_FIXED_RANGES 88 14#define NUM_FIXED_RANGES 88
15#define MAX_VAR_RANGES 256
17#define MTRRfix64K_00000_MSR 0x250 16#define MTRRfix64K_00000_MSR 0x250
18#define MTRRfix16K_80000_MSR 0x258 17#define MTRRfix16K_80000_MSR 0x258
19#define MTRRfix16K_A0000_MSR 0x259 18#define MTRRfix16K_A0000_MSR 0x259
@@ -34,6 +33,8 @@
34 an 8 bit field: */ 33 an 8 bit field: */
35typedef u8 mtrr_type; 34typedef u8 mtrr_type;
36 35
36extern unsigned int mtrr_usage_table[MAX_VAR_RANGES];
37
37struct mtrr_ops { 38struct mtrr_ops {
38 u32 vendor; 39 u32 vendor;
39 u32 use_intel_if; 40 u32 use_intel_if;
diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c
index 49e20c2afcdf..9f8ba923d1c9 100644
--- a/arch/x86/kernel/cpu/mtrr/state.c
+++ b/arch/x86/kernel/cpu/mtrr/state.c
@@ -4,6 +4,7 @@
4#include <asm/mtrr.h> 4#include <asm/mtrr.h>
5#include <asm/msr.h> 5#include <asm/msr.h>
6#include <asm/processor-cyrix.h> 6#include <asm/processor-cyrix.h>
7#include <asm/processor-flags.h>
7#include "mtrr.h" 8#include "mtrr.h"
8 9
9 10
@@ -25,7 +26,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
25 26
26 /* Disable and flush caches. Note that wbinvd flushes the TLBs as 27 /* Disable and flush caches. Note that wbinvd flushes the TLBs as
27 a side-effect */ 28 a side-effect */
28 cr0 = read_cr0() | 0x40000000; 29 cr0 = read_cr0() | X86_CR0_CD;
29 wbinvd(); 30 wbinvd();
30 write_cr0(cr0); 31 write_cr0(cr0);
31 wbinvd(); 32 wbinvd();
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index c02541e6e653..9b838324b818 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -167,7 +167,6 @@ void release_evntsel_nmi(unsigned int msr)
167 clear_bit(counter, evntsel_nmi_owner); 167 clear_bit(counter, evntsel_nmi_owner);
168} 168}
169 169
170EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
171EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); 170EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
172EXPORT_SYMBOL(reserve_perfctr_nmi); 171EXPORT_SYMBOL(reserve_perfctr_nmi);
173EXPORT_SYMBOL(release_perfctr_nmi); 172EXPORT_SYMBOL(release_perfctr_nmi);
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 3900e46d66db..028213260148 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -188,7 +188,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
188static void c_stop(struct seq_file *m, void *v) 188static void c_stop(struct seq_file *m, void *v)
189{ 189{
190} 190}
191struct seq_operations cpuinfo_op = { 191const struct seq_operations cpuinfo_op = {
192 .start = c_start, 192 .start = c_start,
193 .next = c_next, 193 .next = c_next,
194 .stop = c_stop, 194 .stop = c_stop,