diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-09-09 19:40:35 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-10 02:21:03 -0400 |
commit | 185f3b9da24c09c26b784591ed354fe57998a7b1 (patch) | |
tree | 7bfad56989207414b752ea212ce2e705aeceb8c7 /arch/x86/kernel/cpu/intel.c | |
parent | 81faaae45701484bd7368336e02f2a846153b22f (diff) |
x86: make intel.c have 64-bit support code
prepare for unification.
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/intel.c')
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 119 |
1 files changed, 83 insertions, 36 deletions
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index a66989586a84..365a008080c2 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -15,6 +15,11 @@ | |||
15 | #include <asm/ds.h> | 15 | #include <asm/ds.h> |
16 | #include <asm/bugs.h> | 16 | #include <asm/bugs.h> |
17 | 17 | ||
18 | #ifdef CONFIG_X86_64 | ||
19 | #include <asm/topology.h> | ||
20 | #include <asm/numa_64.h> | ||
21 | #endif | ||
22 | |||
18 | #include "cpu.h" | 23 | #include "cpu.h" |
19 | 24 | ||
20 | #ifdef CONFIG_X86_LOCAL_APIC | 25 | #ifdef CONFIG_X86_LOCAL_APIC |
@@ -25,14 +30,20 @@ | |||
25 | 30 | ||
26 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 31 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
27 | { | 32 | { |
28 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | ||
29 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | ||
30 | c->x86_cache_alignment = 128; | ||
31 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | 33 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
32 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | 34 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) |
33 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 35 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
36 | |||
37 | #ifdef CONFIG_X86_64 | ||
38 | set_cpu_cap(c, X86_FEATURE_SYSENTER32); | ||
39 | #else | ||
40 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | ||
41 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | ||
42 | c->x86_cache_alignment = 128; | ||
43 | #endif | ||
34 | } | 44 | } |
35 | 45 | ||
46 | #ifdef CONFIG_X86_32 | ||
36 | /* | 47 | /* |
37 | * Early probe support logic for ppro memory erratum #50 | 48 | * Early probe support logic for ppro memory erratum #50 |
38 | * | 49 | * |
@@ -73,6 +84,40 @@ static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | |||
73 | } | 84 | } |
74 | 85 | ||
75 | 86 | ||
87 | |||
88 | #ifdef CONFIG_X86_F00F_BUG | ||
89 | static void __cpuinit trap_init_f00f_bug(void) | ||
90 | { | ||
91 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | ||
92 | |||
93 | /* | ||
94 | * Update the IDT descriptor and reload the IDT so that | ||
95 | * it uses the read-only mapped virtual address. | ||
96 | */ | ||
97 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); | ||
98 | load_idt(&idt_descr); | ||
99 | } | ||
100 | #endif | ||
101 | #endif | ||
102 | |||
103 | static void __cpuinit srat_detect_node(void) | ||
104 | { | ||
105 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | ||
106 | unsigned node; | ||
107 | int cpu = smp_processor_id(); | ||
108 | int apicid = hard_smp_processor_id(); | ||
109 | |||
110 | /* Don't do the funky fallback heuristics the AMD version employs | ||
111 | for now. */ | ||
112 | node = apicid_to_node[apicid]; | ||
113 | if (node == NUMA_NO_NODE || !node_online(node)) | ||
114 | node = first_node(node_online_map); | ||
115 | numa_set_node(cpu, node); | ||
116 | |||
117 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
118 | #endif | ||
119 | } | ||
120 | |||
76 | /* | 121 | /* |
77 | * find out the number of processor cores on the die | 122 | * find out the number of processor cores on the die |
78 | */ | 123 | */ |
@@ -91,20 +136,6 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | |||
91 | return 1; | 136 | return 1; |
92 | } | 137 | } |
93 | 138 | ||
94 | #ifdef CONFIG_X86_F00F_BUG | ||
95 | static void __cpuinit trap_init_f00f_bug(void) | ||
96 | { | ||
97 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | ||
98 | |||
99 | /* | ||
100 | * Update the IDT descriptor and reload the IDT so that | ||
101 | * it uses the read-only mapped virtual address. | ||
102 | */ | ||
103 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); | ||
104 | load_idt(&idt_descr); | ||
105 | } | ||
106 | #endif | ||
107 | |||
108 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | 139 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) |
109 | { | 140 | { |
110 | unsigned int l2 = 0; | 141 | unsigned int l2 = 0; |
@@ -139,6 +170,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
139 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); | 170 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); |
140 | } | 171 | } |
141 | 172 | ||
173 | #ifdef CONFIG_X86_32 | ||
142 | /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ | 174 | /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ |
143 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) | 175 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) |
144 | clear_cpu_cap(c, X86_FEATURE_SEP); | 176 | clear_cpu_cap(c, X86_FEATURE_SEP); |
@@ -176,18 +208,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
176 | if (p) | 208 | if (p) |
177 | strcpy(c->x86_model_id, p); | 209 | strcpy(c->x86_model_id, p); |
178 | 210 | ||
179 | detect_extended_topology(c); | ||
180 | |||
181 | if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { | ||
182 | /* | ||
183 | * let's use the legacy cpuid vector 0x1 and 0x4 for topology | ||
184 | * detection. | ||
185 | */ | ||
186 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
187 | detect_ht(c); | ||
188 | } | ||
189 | |||
190 | /* Work around errata */ | ||
191 | Intel_errata_workarounds(c); | 211 | Intel_errata_workarounds(c); |
192 | 212 | ||
193 | #ifdef CONFIG_X86_INTEL_USERCOPY | 213 | #ifdef CONFIG_X86_INTEL_USERCOPY |
@@ -208,12 +228,10 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
208 | } | 228 | } |
209 | #endif | 229 | #endif |
210 | 230 | ||
231 | #endif | ||
232 | |||
211 | if (cpu_has_xmm2) | 233 | if (cpu_has_xmm2) |
212 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | 234 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); |
213 | if (c->x86 == 15) | ||
214 | set_cpu_cap(c, X86_FEATURE_P4); | ||
215 | if (c->x86 == 6) | ||
216 | set_cpu_cap(c, X86_FEATURE_P3); | ||
217 | if (cpu_has_ds) { | 235 | if (cpu_has_ds) { |
218 | unsigned int l1; | 236 | unsigned int l1; |
219 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | 237 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); |
@@ -224,6 +242,17 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
224 | ds_init_intel(c); | 242 | ds_init_intel(c); |
225 | } | 243 | } |
226 | 244 | ||
245 | #ifdef CONFIG_X86_64 | ||
246 | if (c->x86 == 15) | ||
247 | c->x86_cache_alignment = c->x86_clflush_size * 2; | ||
248 | if (c->x86 == 6) | ||
249 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
250 | #else | ||
251 | if (c->x86 == 15) | ||
252 | set_cpu_cap(c, X86_FEATURE_P4); | ||
253 | if (c->x86 == 6) | ||
254 | set_cpu_cap(c, X86_FEATURE_P3); | ||
255 | |||
227 | if (cpu_has_bts) | 256 | if (cpu_has_bts) |
228 | ptrace_bts_init_intel(c); | 257 | ptrace_bts_init_intel(c); |
229 | 258 | ||
@@ -240,8 +269,25 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
240 | #ifdef CONFIG_X86_NUMAQ | 269 | #ifdef CONFIG_X86_NUMAQ |
241 | numaq_tsc_disable(); | 270 | numaq_tsc_disable(); |
242 | #endif | 271 | #endif |
272 | #endif | ||
273 | |||
274 | detect_extended_topology(c); | ||
275 | if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { | ||
276 | /* | ||
277 | * let's use the legacy cpuid vector 0x1 and 0x4 for topology | ||
278 | * detection. | ||
279 | */ | ||
280 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
281 | #ifdef CONFIG_X86_32 | ||
282 | detect_ht(c); | ||
283 | #endif | ||
284 | } | ||
285 | |||
286 | /* Work around errata */ | ||
287 | srat_detect_node(); | ||
243 | } | 288 | } |
244 | 289 | ||
290 | #ifdef CONFIG_X86_32 | ||
245 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 291 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
246 | { | 292 | { |
247 | /* | 293 | /* |
@@ -254,10 +300,12 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i | |||
254 | size = 256; | 300 | size = 256; |
255 | return size; | 301 | return size; |
256 | } | 302 | } |
303 | #endif | ||
257 | 304 | ||
258 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { | 305 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { |
259 | .c_vendor = "Intel", | 306 | .c_vendor = "Intel", |
260 | .c_ident = { "GenuineIntel" }, | 307 | .c_ident = { "GenuineIntel" }, |
308 | #ifdef CONFIG_X86_32 | ||
261 | .c_models = { | 309 | .c_models = { |
262 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = | 310 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = |
263 | { | 311 | { |
@@ -307,13 +355,12 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = { | |||
307 | } | 355 | } |
308 | }, | 356 | }, |
309 | }, | 357 | }, |
358 | .c_size_cache = intel_size_cache, | ||
359 | #endif | ||
310 | .c_early_init = early_init_intel, | 360 | .c_early_init = early_init_intel, |
311 | .c_init = init_intel, | 361 | .c_init = init_intel, |
312 | .c_size_cache = intel_size_cache, | ||
313 | .c_x86_vendor = X86_VENDOR_INTEL, | 362 | .c_x86_vendor = X86_VENDOR_INTEL, |
314 | }; | 363 | }; |
315 | 364 | ||
316 | cpu_dev_register(intel_cpu_dev); | 365 | cpu_dev_register(intel_cpu_dev); |
317 | 366 | ||
318 | /* arch_initcall(intel_cpu_init); */ | ||
319 | |||