aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2008-01-30 07:32:40 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:32:40 -0500
commit2b16a2353814a513cdb5c5c739b76a19d7ea39ce (patch)
treecaee8a54a0e85b4839078286114f4c9f920ac89e /arch
parent68071a96655c883b316da9ef497f6dec8953529f (diff)
x86: move X86_FEATURE_CONSTANT_TSC into early cpu feature detection
Need this in the next patch in time_init and that happens early. This includes a minor fix on i386 where early_intel_workarounds() [which is now called early_init_intel] really executes early as the comments say. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/amd.c17
-rw-r--r--arch/x86/kernel/cpu/common.c11
-rw-r--r--arch/x86/kernel/cpu/cpu.h3
-rw-r--r--arch/x86/kernel/cpu/intel.c13
-rw-r--r--arch/x86/kernel/setup_64.c39
5 files changed, 59 insertions, 24 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index aaa8101d3d80..cd2fe15ff4b5 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -63,6 +63,15 @@ static __cpuinit int amd_apic_timer_broken(void)
63 63
64int force_mwait __cpuinitdata; 64int force_mwait __cpuinitdata;
65 65
66void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
67{
68 if (cpuid_eax(0x80000000) >= 0x80000007) {
69 c->x86_power = cpuid_edx(0x80000007);
70 if (c->x86_power & (1<<8))
71 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
72 }
73}
74
66static void __cpuinit init_amd(struct cpuinfo_x86 *c) 75static void __cpuinit init_amd(struct cpuinfo_x86 *c)
67{ 76{
68 u32 l, h; 77 u32 l, h;
@@ -85,6 +94,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
85 } 94 }
86#endif 95#endif
87 96
97 early_init_amd(c);
98
88 /* 99 /*
89 * FIXME: We should handle the K5 here. Set up the write 100 * FIXME: We should handle the K5 here. Set up the write
90 * range and also turn on MSR 83 bits 4 and 31 (write alloc, 101 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
@@ -257,12 +268,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
257 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 268 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
258 } 269 }
259 270
260 if (cpuid_eax(0x80000000) >= 0x80000007) {
261 c->x86_power = cpuid_edx(0x80000007);
262 if (c->x86_power & (1<<8))
263 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
264 }
265
266#ifdef CONFIG_X86_HT 271#ifdef CONFIG_X86_HT
267 /* 272 /*
268 * On a AMD multi core setup the lower bits of the APIC id 273 * On a AMD multi core setup the lower bits of the APIC id
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e48832a6c2a9..dbb9142a8241 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -307,6 +307,15 @@ static void __init early_cpu_detect(void)
307 cpu_detect(c); 307 cpu_detect(c);
308 308
309 get_cpu_vendor(c, 1); 309 get_cpu_vendor(c, 1);
310
311 switch (c->x86_vendor) {
312 case X86_VENDOR_AMD:
313 early_init_amd(c);
314 break;
315 case X86_VENDOR_INTEL:
316 early_init_intel(c);
317 break;
318 }
310} 319}
311 320
312static void __cpuinit generic_identify(struct cpuinfo_x86 * c) 321static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
@@ -364,8 +373,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
364 init_scattered_cpuid_features(c); 373 init_scattered_cpuid_features(c);
365 } 374 }
366 375
367 early_intel_workaround(c);
368
369#ifdef CONFIG_X86_HT 376#ifdef CONFIG_X86_HT
370 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; 377 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
371#endif 378#endif
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 2f6432cef6ff..ad6527a5beb1 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -24,5 +24,6 @@ extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
24extern int get_model_name(struct cpuinfo_x86 *c); 24extern int get_model_name(struct cpuinfo_x86 *c);
25extern void display_cacheinfo(struct cpuinfo_x86 *c); 25extern void display_cacheinfo(struct cpuinfo_x86 *c);
26 26
27extern void early_intel_workaround(struct cpuinfo_x86 *c); 27extern void early_init_intel(struct cpuinfo_x86 *c);
28extern void early_init_amd(struct cpuinfo_x86 *c);
28 29
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 5731de3e1bd1..f1136115279a 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -29,13 +29,14 @@
29struct movsl_mask movsl_mask __read_mostly; 29struct movsl_mask movsl_mask __read_mostly;
30#endif 30#endif
31 31
32void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c) 32void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
33{ 33{
34 if (c->x86_vendor != X86_VENDOR_INTEL)
35 return;
36 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ 34 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
37 if (c->x86 == 15 && c->x86_cache_alignment == 64) 35 if (c->x86 == 15 && c->x86_cache_alignment == 64)
38 c->x86_cache_alignment = 128; 36 c->x86_cache_alignment = 128;
37 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
38 (c->x86 == 0x6 && c->x86_model >= 0x0e))
39 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
39} 40}
40 41
41/* 42/*
@@ -115,6 +116,8 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
115 unsigned int l2 = 0; 116 unsigned int l2 = 0;
116 char *p = NULL; 117 char *p = NULL;
117 118
119 early_init_intel(c);
120
118#ifdef CONFIG_X86_F00F_BUG 121#ifdef CONFIG_X86_F00F_BUG
119 /* 122 /*
120 * All current models of Pentium and Pentium with MMX technology CPUs 123 * All current models of Pentium and Pentium with MMX technology CPUs
@@ -210,10 +213,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
210 } 213 }
211 if (c->x86 == 6) 214 if (c->x86 == 6)
212 set_bit(X86_FEATURE_P3, c->x86_capability); 215 set_bit(X86_FEATURE_P3, c->x86_capability);
213 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
214 (c->x86 == 0x6 && c->x86_model >= 0x0e))
215 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
216
217 if (cpu_has_ds) { 216 if (cpu_has_ds) {
218 unsigned int l1; 217 unsigned int l1;
219 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 218 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 8618178db842..3cae326093cb 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -544,9 +544,6 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
544 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", 544 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
545 c->x86_cache_size, ecx & 0xFF); 545 c->x86_cache_size, ecx & 0xFF);
546 } 546 }
547
548 if (n >= 0x80000007)
549 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
550 if (n >= 0x80000008) { 547 if (n >= 0x80000008) {
551 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); 548 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
552 c->x86_virt_bits = (eax >> 8) & 0xff; 549 c->x86_virt_bits = (eax >> 8) & 0xff;
@@ -624,7 +621,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
624#endif 621#endif
625} 622}
626 623
627static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) 624static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
628{ 625{
629#ifdef CONFIG_SMP 626#ifdef CONFIG_SMP
630 unsigned bits, ecx; 627 unsigned bits, ecx;
@@ -682,6 +679,15 @@ static __cpuinit int amd_apic_timer_broken(void)
682 return 0; 679 return 0;
683} 680}
684 681
682static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
683{
684 early_init_amd_mc(c);
685
686 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
687 if (c->x86_power & (1<<8))
688 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
689}
690
685static void __cpuinit init_amd(struct cpuinfo_x86 *c) 691static void __cpuinit init_amd(struct cpuinfo_x86 *c)
686{ 692{
687 unsigned level; 693 unsigned level;
@@ -731,10 +737,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
731 } 737 }
732 display_cacheinfo(c); 738 display_cacheinfo(c);
733 739
734 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
735 if (c->x86_power & (1<<8))
736 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
737
738 /* Multi core CPU? */ 740 /* Multi core CPU? */
739 if (c->extended_cpuid_level >= 0x80000008) 741 if (c->extended_cpuid_level >= 0x80000008)
740 amd_detect_cmp(c); 742 amd_detect_cmp(c);
@@ -845,6 +847,13 @@ static void srat_detect_node(void)
845#endif 847#endif
846} 848}
847 849
850static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
851{
852 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
853 (c->x86 == 0x6 && c->x86_model >= 0x0e))
854 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
855}
856
848static void __cpuinit init_intel(struct cpuinfo_x86 *c) 857static void __cpuinit init_intel(struct cpuinfo_x86 *c)
849{ 858{
850 /* Cache sizes */ 859 /* Cache sizes */
@@ -1056,6 +1065,20 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1056#ifdef CONFIG_NUMA 1065#ifdef CONFIG_NUMA
1057 numa_add_cpu(smp_processor_id()); 1066 numa_add_cpu(smp_processor_id());
1058#endif 1067#endif
1068
1069 c->extended_cpuid_level = cpuid_eax(0x80000000);
1070
1071 if (c->extended_cpuid_level >= 0x80000007)
1072 c->x86_power = cpuid_edx(0x80000007);
1073
1074 switch (c->x86_vendor) {
1075 case X86_VENDOR_AMD:
1076 early_init_amd(c);
1077 break;
1078 case X86_VENDOR_INTEL:
1079 early_init_intel(c);
1080 break;
1081 }
1059} 1082}
1060 1083
1061void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 1084void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)