aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-05-08 03:18:43 -0400
committerIngo Molnar <mingo@elte.hu>2008-05-08 09:43:51 -0400
commit8d4a4300854f3971502e81dacd930704cb88f606 (patch)
treed091b49851e60af1530dd3d7cd54057f98d48ffb /arch
parentcb3f43b22bbb5ddbf6ce3e2bac40ce6eba30aba0 (diff)
x86: cleanup PAT cpu validation
Move the scattered checks for PAT support to a single function. Its moved to addon_cpuid_features.c as this file is shared between 32 and 64 bit. Remove the manipulation of the PAT feature bit and just disable PAT in the PAT layer, based on the PAT bit provided by the CPU and the current CPU version/model white list. Change the boot CPU check so it works on Voyager somewhere in the future as well :) Also panic, when a secondary has PAT disabled but the primary one has alrady switched to PAT. We have no way to undo that. The white list is kept for now to ensure that we can rely on known to work CPU types and concentrate on the software induced problems instead of fighthing CPU erratas and subtle wreckage caused by not yet verified CPUs. Once the PAT code has stabilized enough, we can remove the white list and open the can of worms. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c21
-rw-r--r--arch/x86/kernel/cpu/common.c27
-rw-r--r--arch/x86/kernel/setup_64.c9
-rw-r--r--arch/x86/mm/pat.c50
4 files changed, 47 insertions, 60 deletions
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 238468ae1993..c2e1ce33c7cb 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -6,6 +6,7 @@
6 6
7#include <linux/cpu.h> 7#include <linux/cpu.h>
8 8
9#include <asm/pat.h>
9#include <asm/processor.h> 10#include <asm/processor.h>
10 11
11struct cpuid_bit { 12struct cpuid_bit {
@@ -48,3 +49,23 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
48 set_cpu_cap(c, cb->feature); 49 set_cpu_cap(c, cb->feature);
49 } 50 }
50} 51}
52
53#ifdef CONFIG_X86_PAT
54void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
55{
56 switch (c->x86_vendor) {
57 case X86_VENDOR_AMD:
58 if (c->x86 >= 0xf && c->x86 <= 0x11)
59 return;
60 break;
61 case X86_VENDOR_INTEL:
62 if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
63 return;
64 break;
65 }
66
67 pat_disable(cpu_has_pat ?
68 "PAT disabled. Not yet verified on this CPU type." :
69 "PAT not supported by CPU.");
70}
71#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 35b4f6a9c8ef..d0463a946247 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -12,6 +12,7 @@
12#include <asm/mmu_context.h> 12#include <asm/mmu_context.h>
13#include <asm/mtrr.h> 13#include <asm/mtrr.h>
14#include <asm/mce.h> 14#include <asm/mce.h>
15#include <asm/pat.h>
15#ifdef CONFIG_X86_LOCAL_APIC 16#ifdef CONFIG_X86_LOCAL_APIC
16#include <asm/mpspec.h> 17#include <asm/mpspec.h>
17#include <asm/apic.h> 18#include <asm/apic.h>
@@ -308,19 +309,6 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
308 309
309 } 310 }
310 311
311 clear_cpu_cap(c, X86_FEATURE_PAT);
312
313 switch (c->x86_vendor) {
314 case X86_VENDOR_AMD:
315 if (c->x86 >= 0xf && c->x86 <= 0x11)
316 set_cpu_cap(c, X86_FEATURE_PAT);
317 break;
318 case X86_VENDOR_INTEL:
319 if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
320 set_cpu_cap(c, X86_FEATURE_PAT);
321 break;
322 }
323
324} 312}
325 313
326/* 314/*
@@ -409,18 +397,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
409 init_scattered_cpuid_features(c); 397 init_scattered_cpuid_features(c);
410 } 398 }
411 399
412 clear_cpu_cap(c, X86_FEATURE_PAT);
413
414 switch (c->x86_vendor) {
415 case X86_VENDOR_AMD:
416 if (c->x86 >= 0xf && c->x86 <= 0x11)
417 set_cpu_cap(c, X86_FEATURE_PAT);
418 break;
419 case X86_VENDOR_INTEL:
420 if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
421 set_cpu_cap(c, X86_FEATURE_PAT);
422 break;
423 }
424} 400}
425 401
426static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 402static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
@@ -651,6 +627,7 @@ void __init early_cpu_init(void)
651 cpu_devs[cvdev->vendor] = cvdev->cpu_dev; 627 cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
652 628
653 early_cpu_detect(); 629 early_cpu_detect();
630 validate_pat_support(&boot_cpu_data);
654} 631}
655 632
656/* Make sure %fs is initialized properly in idle threads */ 633/* Make sure %fs is initialized properly in idle threads */
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 22c14e21c97c..80d80fab7006 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -70,6 +70,7 @@
70#include <asm/ds.h> 70#include <asm/ds.h>
71#include <asm/topology.h> 71#include <asm/topology.h>
72#include <asm/trampoline.h> 72#include <asm/trampoline.h>
73#include <asm/pat.h>
73 74
74#include <mach_apic.h> 75#include <mach_apic.h>
75#ifdef CONFIG_PARAVIRT 76#ifdef CONFIG_PARAVIRT
@@ -1063,25 +1064,19 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1063 if (c->extended_cpuid_level >= 0x80000007) 1064 if (c->extended_cpuid_level >= 0x80000007)
1064 c->x86_power = cpuid_edx(0x80000007); 1065 c->x86_power = cpuid_edx(0x80000007);
1065 1066
1066
1067 clear_cpu_cap(c, X86_FEATURE_PAT);
1068
1069 switch (c->x86_vendor) { 1067 switch (c->x86_vendor) {
1070 case X86_VENDOR_AMD: 1068 case X86_VENDOR_AMD:
1071 early_init_amd(c); 1069 early_init_amd(c);
1072 if (c->x86 >= 0xf && c->x86 <= 0x11)
1073 set_cpu_cap(c, X86_FEATURE_PAT);
1074 break; 1070 break;
1075 case X86_VENDOR_INTEL: 1071 case X86_VENDOR_INTEL:
1076 early_init_intel(c); 1072 early_init_intel(c);
1077 if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
1078 set_cpu_cap(c, X86_FEATURE_PAT);
1079 break; 1073 break;
1080 case X86_VENDOR_CENTAUR: 1074 case X86_VENDOR_CENTAUR:
1081 early_init_centaur(c); 1075 early_init_centaur(c);
1082 break; 1076 break;
1083 } 1077 }
1084 1078
1079 validate_pat_support(c);
1085} 1080}
1086 1081
1087/* 1082/*
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 277446cd30b6..60adbe22efa0 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -25,31 +25,24 @@
25#include <asm/mtrr.h> 25#include <asm/mtrr.h>
26#include <asm/io.h> 26#include <asm/io.h>
27 27
28int pat_wc_enabled = 1; 28#ifdef CONFIG_X86_PAT
29int __read_mostly pat_wc_enabled = 1;
29 30
30static u64 __read_mostly boot_pat_state; 31void __init pat_disable(char *reason)
31
32static int nopat(char *str)
33{ 32{
34 pat_wc_enabled = 0; 33 pat_wc_enabled = 0;
35 printk(KERN_INFO "x86: PAT support disabled.\n"); 34 printk(KERN_INFO "%s\n", reason);
36
37 return 0;
38} 35}
39early_param("nopat", nopat);
40 36
41static int pat_known_cpu(void) 37static int nopat(char *str)
42{ 38{
43 if (!pat_wc_enabled) 39 pat_disable("PAT support disabled.");
44 return 0;
45
46 if (cpu_has_pat)
47 return 1;
48
49 pat_wc_enabled = 0;
50 printk(KERN_INFO "CPU and/or kernel does not support PAT.\n");
51 return 0; 40 return 0;
52} 41}
42early_param("nopat", nopat);
43#endif
44
45static u64 __read_mostly boot_pat_state;
53 46
54enum { 47enum {
55 PAT_UC = 0, /* uncached */ 48 PAT_UC = 0, /* uncached */
@@ -66,17 +59,19 @@ void pat_init(void)
66{ 59{
67 u64 pat; 60 u64 pat;
68 61
69#ifndef CONFIG_X86_PAT 62 if (!pat_wc_enabled)
70 nopat(NULL);
71#endif
72
73 /* Boot CPU enables PAT based on CPU feature */
74 if (!smp_processor_id() && !pat_known_cpu())
75 return; 63 return;
76 64
77 /* APs enable PAT iff boot CPU has enabled it before */ 65 /* Paranoia check. */
78 if (smp_processor_id() && !pat_wc_enabled) 66 if (!cpu_has_pat) {
79 return; 67 printk(KERN_ERR "PAT enabled, but CPU feature cleared\n");
68 /*
69 * Panic if this happens on the secondary CPU, and we
70 * switched to PAT on the boot CPU. We have no way to
71 * undo PAT.
72 */
73 BUG_ON(boot_pat_state);
74 }
80 75
81 /* Set PWT to Write-Combining. All other bits stay the same */ 76 /* Set PWT to Write-Combining. All other bits stay the same */
82 /* 77 /*
@@ -95,9 +90,8 @@ void pat_init(void)
95 PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC); 90 PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC);
96 91
97 /* Boot CPU check */ 92 /* Boot CPU check */
98 if (!smp_processor_id()) { 93 if (!boot_pat_state)
99 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); 94 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
100 }
101 95
102 wrmsrl(MSR_IA32_CR_PAT, pat); 96 wrmsrl(MSR_IA32_CR_PAT, pat);
103 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", 97 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",