aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-24 15:52:27 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-24 15:52:27 -0500
commit87b203079ed949de52f0d92aeae20e5e0116c12f (patch)
tree1878756f936963822ed2d51a15db1da5814973e7 /arch/x86/kernel/cpu
parent58105ef1857112a186696c9b8957020090226a28 (diff)
parenta852cbfaaf8122827602027b1614971cfd832304 (diff)
Merge branch 'x86/core' into core/percpu
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c54
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/common.c99
-rw-r--r--arch/x86/kernel/cpu/cpufreq/e_powersaver.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c40
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c6
-rw-r--r--arch/x86/kernel/cpu/intel.c17
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel_64.c10
-rw-r--r--arch/x86/kernel/cpu/mcheck/p4.c4
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c2
12 files changed, 133 insertions, 116 deletions
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 2cf23634b6d..6882a735d9c 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -7,7 +7,7 @@
7#include <asm/pat.h> 7#include <asm/pat.h>
8#include <asm/processor.h> 8#include <asm/processor.h>
9 9
10#include <mach_apic.h> 10#include <asm/apic.h>
11 11
12struct cpuid_bit { 12struct cpuid_bit {
13 u16 feature; 13 u16 feature;
@@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
69 */ 69 */
70void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) 70void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
71{ 71{
72#ifdef CONFIG_X86_SMP 72#ifdef CONFIG_SMP
73 unsigned int eax, ebx, ecx, edx, sub_index; 73 unsigned int eax, ebx, ecx, edx, sub_index;
74 unsigned int ht_mask_width, core_plus_mask_width; 74 unsigned int ht_mask_width, core_plus_mask_width;
75 unsigned int core_select_mask, core_level_siblings; 75 unsigned int core_select_mask, core_level_siblings;
@@ -116,22 +116,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
116 116
117 core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; 117 core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
118 118
119#ifdef CONFIG_X86_32 119 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width)
120 c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
121 & core_select_mask; 120 & core_select_mask;
122 c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); 121 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width);
123 /* 122 /*
124 * Reinit the apicid, now that we have extended initial_apicid. 123 * Reinit the apicid, now that we have extended initial_apicid.
125 */ 124 */
126 c->apicid = phys_pkg_id(c->initial_apicid, 0); 125 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
127#else 126
128 c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
129 c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
130 /*
131 * Reinit the apicid, now that we have extended initial_apicid.
132 */
133 c->apicid = phys_pkg_id(0);
134#endif
135 c->x86_max_cores = (core_level_siblings / smp_num_siblings); 127 c->x86_max_cores = (core_level_siblings / smp_num_siblings);
136 128
137 129
@@ -143,37 +135,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
143 return; 135 return;
144#endif 136#endif
145} 137}
146
147#ifdef CONFIG_X86_PAT
148void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
149{
150 if (!cpu_has_pat)
151 pat_disable("PAT not supported by CPU.");
152
153 switch (c->x86_vendor) {
154 case X86_VENDOR_INTEL:
155 /*
156 * There is a known erratum on Pentium III and Core Solo
157 * and Core Duo CPUs.
158 * " Page with PAT set to WC while associated MTRR is UC
159 * may consolidate to UC "
160 * Because of this erratum, it is better to stick with
161 * setting WC in MTRR rather than using PAT on these CPUs.
162 *
163 * Enable PAT WC only on P4, Core 2 or later CPUs.
164 */
165 if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
166 return;
167
168 pat_disable("PAT WC disabled due to known CPU erratum.");
169 return;
170
171 case X86_VENDOR_AMD:
172 case X86_VENDOR_CENTAUR:
173 case X86_VENDOR_TRANSMETA:
174 return;
175 }
176
177 pat_disable("PAT disabled. Not yet verified on this CPU type.");
178}
179#endif
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 7c878f6aa91..25423a5b80e 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -12,8 +12,6 @@
12# include <asm/cacheflush.h> 12# include <asm/cacheflush.h>
13#endif 13#endif
14 14
15#include <mach_apic.h>
16
17#include "cpu.h" 15#include "cpu.h"
18 16
19#ifdef CONFIG_X86_32 17#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 260fe4cb2c8..826d5c87627 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -23,11 +23,9 @@
23#include <asm/smp.h> 23#include <asm/smp.h>
24#include <asm/cpu.h> 24#include <asm/cpu.h>
25#include <asm/cpumask.h> 25#include <asm/cpumask.h>
26#ifdef CONFIG_X86_LOCAL_APIC
27#include <asm/mpspec.h>
28#include <asm/apic.h> 26#include <asm/apic.h>
29#include <mach_apic.h> 27
30#include <asm/genapic.h> 28#ifdef CONFIG_X86_LOCAL_APIC
31#include <asm/uv/uv.h> 29#include <asm/uv/uv.h>
32#endif 30#endif
33 31
@@ -226,6 +224,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
226#endif 224#endif
227 225
228/* 226/*
227 * Some CPU features depend on higher CPUID levels, which may not always
228 * be available due to CPUID level capping or broken virtualization
229 * software. Add those features to this table to auto-disable them.
230 */
231struct cpuid_dependent_feature {
232 u32 feature;
233 u32 level;
234};
235static const struct cpuid_dependent_feature __cpuinitconst
236cpuid_dependent_features[] = {
237 { X86_FEATURE_MWAIT, 0x00000005 },
238 { X86_FEATURE_DCA, 0x00000009 },
239 { X86_FEATURE_XSAVE, 0x0000000d },
240 { 0, 0 }
241};
242
243static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
244{
245 const struct cpuid_dependent_feature *df;
246 for (df = cpuid_dependent_features; df->feature; df++) {
247 /*
248 * Note: cpuid_level is set to -1 if unavailable, but
249 * extended_extended_level is set to 0 if unavailable
250 * and the legitimate extended levels are all negative
251 * when signed; hence the weird messing around with
252 * signs here...
253 */
254 if (cpu_has(c, df->feature) &&
255 ((s32)df->level < 0 ?
256 (u32)df->level > (u32)c->extended_cpuid_level :
257 (s32)df->level > (s32)c->cpuid_level)) {
258 clear_cpu_cap(c, df->feature);
259 if (warn)
260 printk(KERN_WARNING
261 "CPU: CPU feature %s disabled "
262 "due to lack of CPUID level 0x%x\n",
263 x86_cap_flags[df->feature],
264 df->level);
265 }
266 }
267}
268
269/*
229 * Naming convention should be: <Name> [(<Codename>)] 270 * Naming convention should be: <Name> [(<Codename>)]
230 * This table only is used unless init_<vendor>() below doesn't set it; 271 * This table only is used unless init_<vendor>() below doesn't set it;
231 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used 272 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
@@ -407,11 +448,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
407 } 448 }
408 449
409 index_msb = get_count_order(smp_num_siblings); 450 index_msb = get_count_order(smp_num_siblings);
410#ifdef CONFIG_X86_64 451 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
411 c->phys_proc_id = phys_pkg_id(index_msb);
412#else
413 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
414#endif
415 452
416 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 453 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
417 454
@@ -419,13 +456,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
419 456
420 core_bits = get_count_order(c->x86_max_cores); 457 core_bits = get_count_order(c->x86_max_cores);
421 458
422#ifdef CONFIG_X86_64 459 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
423 c->cpu_core_id = phys_pkg_id(index_msb) &
424 ((1 << core_bits) - 1); 460 ((1 << core_bits) - 1);
425#else
426 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
427 ((1 << core_bits) - 1);
428#endif
429 } 461 }
430 462
431out: 463out:
@@ -594,11 +626,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
594 if (this_cpu->c_early_init) 626 if (this_cpu->c_early_init)
595 this_cpu->c_early_init(c); 627 this_cpu->c_early_init(c);
596 628
597 validate_pat_support(c);
598
599#ifdef CONFIG_SMP 629#ifdef CONFIG_SMP
600 c->cpu_index = boot_cpu_id; 630 c->cpu_index = boot_cpu_id;
601#endif 631#endif
632 filter_cpuid_features(c, false);
602} 633}
603 634
604void __init early_cpu_init(void) 635void __init early_cpu_init(void)
@@ -661,7 +692,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
661 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 692 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
662#ifdef CONFIG_X86_32 693#ifdef CONFIG_X86_32
663# ifdef CONFIG_X86_HT 694# ifdef CONFIG_X86_HT
664 c->apicid = phys_pkg_id(c->initial_apicid, 0); 695 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
665# else 696# else
666 c->apicid = c->initial_apicid; 697 c->apicid = c->initial_apicid;
667# endif 698# endif
@@ -708,7 +739,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
708 this_cpu->c_identify(c); 739 this_cpu->c_identify(c);
709 740
710#ifdef CONFIG_X86_64 741#ifdef CONFIG_X86_64
711 c->apicid = phys_pkg_id(0); 742 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
712#endif 743#endif
713 744
714 /* 745 /*
@@ -732,6 +763,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
732 * we do "generic changes." 763 * we do "generic changes."
733 */ 764 */
734 765
766 /* Filter out anything that depends on CPUID levels we don't have */
767 filter_cpuid_features(c, true);
768
735 /* If the model name is still unset, do table lookup. */ 769 /* If the model name is still unset, do table lookup. */
736 if (!c->x86_model_id[0]) { 770 if (!c->x86_model_id[0]) {
737 char *p; 771 char *p;
@@ -1015,7 +1049,7 @@ void __cpuinit cpu_init(void)
1015 barrier(); 1049 barrier();
1016 1050
1017 check_efer(); 1051 check_efer();
1018 if (cpu != 0 && x2apic) 1052 if (cpu != 0)
1019 enable_x2apic(); 1053 enable_x2apic();
1020 1054
1021 /* 1055 /*
@@ -1062,22 +1096,19 @@ void __cpuinit cpu_init(void)
1062 */ 1096 */
1063 if (kgdb_connected && arch_kgdb_ops.correct_hw_break) 1097 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
1064 arch_kgdb_ops.correct_hw_break(); 1098 arch_kgdb_ops.correct_hw_break();
1065 else { 1099 else
1066#endif 1100#endif
1067 /* 1101 {
1068 * Clear all 6 debug registers: 1102 /*
1069 */ 1103 * Clear all 6 debug registers:
1070 1104 */
1071 set_debugreg(0UL, 0); 1105 set_debugreg(0UL, 0);
1072 set_debugreg(0UL, 1); 1106 set_debugreg(0UL, 1);
1073 set_debugreg(0UL, 2); 1107 set_debugreg(0UL, 2);
1074 set_debugreg(0UL, 3); 1108 set_debugreg(0UL, 3);
1075 set_debugreg(0UL, 6); 1109 set_debugreg(0UL, 6);
1076 set_debugreg(0UL, 7); 1110 set_debugreg(0UL, 7);
1077#ifdef CONFIG_KGDB
1078 /* If the kgdb is connected no debug regs should be altered. */
1079 } 1111 }
1080#endif
1081 1112
1082 fpu_init(); 1113 fpu_init();
1083 1114
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
index c2f930d8664..41ab3f064cb 100644
--- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
+++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
@@ -204,12 +204,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
204 } 204 }
205 /* Enable Enhanced PowerSaver */ 205 /* Enable Enhanced PowerSaver */
206 rdmsrl(MSR_IA32_MISC_ENABLE, val); 206 rdmsrl(MSR_IA32_MISC_ENABLE, val);
207 if (!(val & 1 << 16)) { 207 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
208 val |= 1 << 16; 208 val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
209 wrmsrl(MSR_IA32_MISC_ENABLE, val); 209 wrmsrl(MSR_IA32_MISC_ENABLE, val);
210 /* Can be locked at 0 */ 210 /* Can be locked at 0 */
211 rdmsrl(MSR_IA32_MISC_ENABLE, val); 211 rdmsrl(MSR_IA32_MISC_ENABLE, val);
212 if (!(val & 1 << 16)) { 212 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
213 printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n"); 213 printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
214 return -ENODEV; 214 return -ENODEV;
215 } 215 }
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 5c28b37dea1..6428aa17b40 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -939,10 +939,25 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
939 free_cpumask_var(data->acpi_data.shared_cpu_map); 939 free_cpumask_var(data->acpi_data.shared_cpu_map);
940} 940}
941 941
942static int get_transition_latency(struct powernow_k8_data *data)
943{
944 int max_latency = 0;
945 int i;
946 for (i = 0; i < data->acpi_data.state_count; i++) {
947 int cur_latency = data->acpi_data.states[i].transition_latency
948 + data->acpi_data.states[i].bus_master_latency;
949 if (cur_latency > max_latency)
950 max_latency = cur_latency;
951 }
952 /* value in usecs, needs to be in nanoseconds */
953 return 1000 * max_latency;
954}
955
942#else 956#else
943static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } 957static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
944static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } 958static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
945static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } 959static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
960static int get_transition_latency(struct powernow_k8_data *data) { return 0; }
946#endif /* CONFIG_X86_POWERNOW_K8_ACPI */ 961#endif /* CONFIG_X86_POWERNOW_K8_ACPI */
947 962
948/* Take a frequency, and issue the fid/vid transition command */ 963/* Take a frequency, and issue the fid/vid transition command */
@@ -1142,8 +1157,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1142 data->cpu = pol->cpu; 1157 data->cpu = pol->cpu;
1143 data->currpstate = HW_PSTATE_INVALID; 1158 data->currpstate = HW_PSTATE_INVALID;
1144 1159
1145 rc = powernow_k8_cpu_init_acpi(data); 1160 if (powernow_k8_cpu_init_acpi(data)) {
1146 if (rc) {
1147 /* 1161 /*
1148 * Use the PSB BIOS structure. This is only availabe on 1162 * Use the PSB BIOS structure. This is only availabe on
1149 * an UP version, and is deprecated by AMD. 1163 * an UP version, and is deprecated by AMD.
@@ -1161,19 +1175,28 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1161 "ACPI maintainers and complain to your BIOS " 1175 "ACPI maintainers and complain to your BIOS "
1162 "vendor.\n"); 1176 "vendor.\n");
1163#endif 1177#endif
1164 goto err_out; 1178 kfree(data);
1179 return -ENODEV;
1165 } 1180 }
1166 if (pol->cpu != 0) { 1181 if (pol->cpu != 0) {
1167 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " 1182 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
1168 "CPU other than CPU0. Complain to your BIOS " 1183 "CPU other than CPU0. Complain to your BIOS "
1169 "vendor.\n"); 1184 "vendor.\n");
1170 goto err_out; 1185 kfree(data);
1186 return -ENODEV;
1171 } 1187 }
1172 rc = find_psb_table(data); 1188 rc = find_psb_table(data);
1173 if (rc) { 1189 if (rc) {
1174 goto err_out; 1190 kfree(data);
1191 return -ENODEV;
1175 } 1192 }
1176 } 1193 /* Take a crude guess here.
1194 * That guess was in microseconds, so multiply with 1000 */
1195 pol->cpuinfo.transition_latency = (
1196 ((data->rvo + 8) * data->vstable * VST_UNITS_20US) +
1197 ((1 << data->irt) * 30)) * 1000;
1198 } else /* ACPI _PSS objects available */
1199 pol->cpuinfo.transition_latency = get_transition_latency(data);
1177 1200
1178 /* only run on specific CPU from here on */ 1201 /* only run on specific CPU from here on */
1179 oldmask = current->cpus_allowed; 1202 oldmask = current->cpus_allowed;
@@ -1204,11 +1227,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1204 cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); 1227 cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu));
1205 data->available_cores = pol->cpus; 1228 data->available_cores = pol->cpus;
1206 1229
1207 /* Take a crude guess here.
1208 * That guess was in microseconds, so multiply with 1000 */
1209 pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
1210 + (3 * (1 << data->irt) * 10)) * 1000;
1211
1212 if (cpu_family == CPU_HW_PSTATE) 1230 if (cpu_family == CPU_HW_PSTATE)
1213 pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); 1231 pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
1214 else 1232 else
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index f08998278a3..c9f1fdc0283 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -390,14 +390,14 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
390 enable it if not. */ 390 enable it if not. */
391 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 391 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
392 392
393 if (!(l & (1<<16))) { 393 if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
394 l |= (1<<16); 394 l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
395 dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); 395 dprintk("trying to enable Enhanced SpeedStep (%x)\n", l);
396 wrmsr(MSR_IA32_MISC_ENABLE, l, h); 396 wrmsr(MSR_IA32_MISC_ENABLE, l, h);
397 397
398 /* check to see if it stuck */ 398 /* check to see if it stuck */
399 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 399 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
400 if (!(l & (1<<16))) { 400 if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
401 printk(KERN_INFO PFX 401 printk(KERN_INFO PFX
402 "couldn't enable Enhanced SpeedStep\n"); 402 "couldn't enable Enhanced SpeedStep\n");
403 return -ENODEV; 403 return -ENODEV;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 24ff26a38ad..25c559ba8d5 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -24,7 +24,6 @@
24#ifdef CONFIG_X86_LOCAL_APIC 24#ifdef CONFIG_X86_LOCAL_APIC
25#include <asm/mpspec.h> 25#include <asm/mpspec.h>
26#include <asm/apic.h> 26#include <asm/apic.h>
27#include <mach_apic.h>
28#endif 27#endif
29 28
30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 29static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
@@ -63,6 +62,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
63 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 62 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
64 } 63 }
65 64
65 /*
66 * There is a known erratum on Pentium III and Core Solo
67 * and Core Duo CPUs.
68 * " Page with PAT set to WC while associated MTRR is UC
69 * may consolidate to UC "
70 * Because of this erratum, it is better to stick with
71 * setting WC in MTRR rather than using PAT on these CPUs.
72 *
73 * Enable PAT WC only on P4, Core 2 or later CPUs.
74 */
75 if (c->x86 == 6 && c->x86_model < 15)
76 clear_cpu_cap(c, X86_FEATURE_PAT);
66} 77}
67 78
68#ifdef CONFIG_X86_32 79#ifdef CONFIG_X86_32
@@ -135,10 +146,10 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
135 */ 146 */
136 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { 147 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
137 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); 148 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
138 if ((lo & (1<<9)) == 0) { 149 if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
139 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); 150 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
140 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); 151 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
141 lo |= (1<<9); /* Disable hw prefetching */ 152 lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
142 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); 153 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
143 } 154 }
144 } 155 }
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 1c838032fd3..fe79985ce0f 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -295,11 +295,11 @@ void do_machine_check(struct pt_regs * regs, long error_code)
295 * If we know that the error was in user space, send a 295 * If we know that the error was in user space, send a
296 * SIGBUS. Otherwise, panic if tolerance is low. 296 * SIGBUS. Otherwise, panic if tolerance is low.
297 * 297 *
298 * do_exit() takes an awful lot of locks and has a slight 298 * force_sig() takes an awful lot of locks and has a slight
299 * risk of deadlocking. 299 * risk of deadlocking.
300 */ 300 */
301 if (user_space) { 301 if (user_space) {
302 do_exit(SIGBUS); 302 force_sig(SIGBUS, current);
303 } else if (panic_on_oops || tolerant < 2) { 303 } else if (panic_on_oops || tolerant < 2) {
304 mce_panic("Uncorrected machine check", 304 mce_panic("Uncorrected machine check",
305 &panicm, mcestart); 305 &panicm, mcestart);
@@ -490,7 +490,7 @@ static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
490 490
491} 491}
492 492
493static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) 493static void mce_cpu_features(struct cpuinfo_x86 *c)
494{ 494{
495 switch (c->x86_vendor) { 495 switch (c->x86_vendor) {
496 case X86_VENDOR_INTEL: 496 case X86_VENDOR_INTEL:
@@ -734,6 +734,7 @@ __setup("mce=", mcheck_enable);
734static int mce_resume(struct sys_device *dev) 734static int mce_resume(struct sys_device *dev)
735{ 735{
736 mce_init(NULL); 736 mce_init(NULL);
737 mce_cpu_features(&current_cpu_data);
737 return 0; 738 return 0;
738} 739}
739 740
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 4772e91e824..9817506dd46 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -121,7 +121,7 @@ static long threshold_restart_bank(void *_tr)
121} 121}
122 122
123/* cpu init entry point, called from mce.c with preempt off */ 123/* cpu init entry point, called from mce.c with preempt off */
124void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) 124void mce_amd_feature_init(struct cpuinfo_x86 *c)
125{ 125{
126 unsigned int bank, block; 126 unsigned int bank, block;
127 unsigned int cpu = smp_processor_id(); 127 unsigned int cpu = smp_processor_id();
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
index 5e8c79e748a..aa5e287c98e 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
@@ -31,7 +31,7 @@ asmlinkage void smp_thermal_interrupt(void)
31 irq_exit(); 31 irq_exit();
32} 32}
33 33
34static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) 34static void intel_init_thermal(struct cpuinfo_x86 *c)
35{ 35{
36 u32 l, h; 36 u32 l, h;
37 int tm2 = 0; 37 int tm2 = 0;
@@ -49,13 +49,13 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
49 */ 49 */
50 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 50 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
51 h = apic_read(APIC_LVTTHMR); 51 h = apic_read(APIC_LVTTHMR);
52 if ((l & (1 << 3)) && (h & APIC_DM_SMI)) { 52 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
53 printk(KERN_DEBUG 53 printk(KERN_DEBUG
54 "CPU%d: Thermal monitoring handled by SMI\n", cpu); 54 "CPU%d: Thermal monitoring handled by SMI\n", cpu);
55 return; 55 return;
56 } 56 }
57 57
58 if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13))) 58 if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2))
59 tm2 = 1; 59 tm2 = 1;
60 60
61 if (h & APIC_VECTOR_MASK) { 61 if (h & APIC_VECTOR_MASK) {
@@ -73,7 +73,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
73 wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); 73 wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h);
74 74
75 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 75 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
76 wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h); 76 wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
77 77
78 l = apic_read(APIC_LVTTHMR); 78 l = apic_read(APIC_LVTTHMR);
79 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); 79 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
@@ -85,7 +85,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
85 return; 85 return;
86} 86}
87 87
88void __cpuinit mce_intel_feature_init(struct cpuinfo_x86 *c) 88void mce_intel_feature_init(struct cpuinfo_x86 *c)
89{ 89{
90 intel_init_thermal(c); 90 intel_init_thermal(c);
91} 91}
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c
index 9b60fce09f7..f53bdcbaf38 100644
--- a/arch/x86/kernel/cpu/mcheck/p4.c
+++ b/arch/x86/kernel/cpu/mcheck/p4.c
@@ -85,7 +85,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
85 */ 85 */
86 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 86 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
87 h = apic_read(APIC_LVTTHMR); 87 h = apic_read(APIC_LVTTHMR);
88 if ((l & (1<<3)) && (h & APIC_DM_SMI)) { 88 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
89 printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", 89 printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n",
90 cpu); 90 cpu);
91 return; /* -EBUSY */ 91 return; /* -EBUSY */
@@ -111,7 +111,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
111 vendor_thermal_interrupt = intel_thermal_interrupt; 111 vendor_thermal_interrupt = intel_thermal_interrupt;
112 112
113 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 113 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
114 wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h); 114 wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
115 115
116 l = apic_read(APIC_LVTTHMR); 116 l = apic_read(APIC_LVTTHMR);
117 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); 117 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 9abd48b2267..f6c70a164e3 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -19,7 +19,7 @@
19#include <linux/nmi.h> 19#include <linux/nmi.h>
20#include <linux/kprobes.h> 20#include <linux/kprobes.h>
21 21
22#include <asm/apic.h> 22#include <asm/genapic.h>
23#include <asm/intel_arch_perfmon.h> 23#include <asm/intel_arch_perfmon.h>
24 24
25struct nmi_watchdog_ctlblk { 25struct nmi_watchdog_ctlblk {