diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-09-04 15:09:44 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-04 15:09:44 -0400 |
commit | 3da99c97763703b23cbf2bd6e96252256d4e4617 (patch) | |
tree | 6253c27954b2a148075d8274257d67582fca3ac9 /arch/x86/kernel/cpu/common.c | |
parent | 5031088dbc0cd30e893eb27d53f0e0eee6eb1c00 (diff) |
x86: make (early)_identify_cpu more the same between 32bit and 64 bit
1. add extended_cpuid_level for 32bit
2. add generic_identify for 64bit
3. add early_identify_cpu for 32bit
4. early_identify_cpu not be called by identify_cpu
5. remove early in get_cpu_vendor for 32bit
6. add get_cpu_cap
7. add cpu_detect for 64bit
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 138 |
1 files changed, 53 insertions, 85 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 8aab8517642..96e1b8698d3 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -96,7 +96,7 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
96 | unsigned int *v; | 96 | unsigned int *v; |
97 | char *p, *q; | 97 | char *p, *q; |
98 | 98 | ||
99 | if (cpuid_eax(0x80000000) < 0x80000004) | 99 | if (c->extended_cpuid_level < 0x80000004) |
100 | return 0; | 100 | return 0; |
101 | 101 | ||
102 | v = (unsigned int *) c->x86_model_id; | 102 | v = (unsigned int *) c->x86_model_id; |
@@ -125,7 +125,7 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
125 | { | 125 | { |
126 | unsigned int n, dummy, ecx, edx, l2size; | 126 | unsigned int n, dummy, ecx, edx, l2size; |
127 | 127 | ||
128 | n = cpuid_eax(0x80000000); | 128 | n = c->extended_cpuid_level; |
129 | 129 | ||
130 | if (n >= 0x80000005) { | 130 | if (n >= 0x80000005) { |
131 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | 131 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); |
@@ -186,7 +186,7 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | |||
186 | } | 186 | } |
187 | 187 | ||
188 | 188 | ||
189 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | 189 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) |
190 | { | 190 | { |
191 | char *v = c->x86_vendor_id; | 191 | char *v = c->x86_vendor_id; |
192 | int i; | 192 | int i; |
@@ -198,8 +198,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | |||
198 | (cpu_devs[i]->c_ident[1] && | 198 | (cpu_devs[i]->c_ident[1] && |
199 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | 199 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
200 | c->x86_vendor = i; | 200 | c->x86_vendor = i; |
201 | if (!early) | 201 | this_cpu = cpu_devs[i]; |
202 | this_cpu = cpu_devs[i]; | ||
203 | return; | 202 | return; |
204 | } | 203 | } |
205 | } | 204 | } |
@@ -284,34 +283,30 @@ void __init cpu_detect(struct cpuinfo_x86 *c) | |||
284 | } | 283 | } |
285 | } | 284 | } |
286 | } | 285 | } |
287 | static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) | 286 | |
287 | static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | ||
288 | { | 288 | { |
289 | u32 tfms, xlvl; | 289 | u32 tfms, xlvl; |
290 | unsigned int ebx; | 290 | u32 ebx; |
291 | 291 | ||
292 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 292 | /* Intel-defined flags: level 0x00000001 */ |
293 | if (have_cpuid_p()) { | 293 | if (c->cpuid_level >= 0x00000001) { |
294 | /* Intel-defined flags: level 0x00000001 */ | 294 | u32 capability, excap; |
295 | if (c->cpuid_level >= 0x00000001) { | 295 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
296 | u32 capability, excap; | 296 | c->x86_capability[0] = capability; |
297 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | 297 | c->x86_capability[4] = excap; |
298 | c->x86_capability[0] = capability; | 298 | } |
299 | c->x86_capability[4] = excap; | ||
300 | } | ||
301 | 299 | ||
302 | /* AMD-defined flags: level 0x80000001 */ | 300 | /* AMD-defined flags: level 0x80000001 */ |
303 | xlvl = cpuid_eax(0x80000000); | 301 | xlvl = cpuid_eax(0x80000000); |
304 | if ((xlvl & 0xffff0000) == 0x80000000) { | 302 | c->extended_cpuid_level = xlvl; |
305 | if (xlvl >= 0x80000001) { | 303 | if ((xlvl & 0xffff0000) == 0x80000000) { |
306 | c->x86_capability[1] = cpuid_edx(0x80000001); | 304 | if (xlvl >= 0x80000001) { |
307 | c->x86_capability[6] = cpuid_ecx(0x80000001); | 305 | c->x86_capability[1] = cpuid_edx(0x80000001); |
308 | } | 306 | c->x86_capability[6] = cpuid_ecx(0x80000001); |
309 | } | 307 | } |
310 | |||
311 | } | 308 | } |
312 | |||
313 | } | 309 | } |
314 | |||
315 | /* | 310 | /* |
316 | * Do minimum CPU detection early. | 311 | * Do minimum CPU detection early. |
317 | * Fields really needed: vendor, cpuid_level, family, model, mask, | 312 | * Fields really needed: vendor, cpuid_level, family, model, mask, |
@@ -321,25 +316,29 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) | |||
321 | * WARNING: this function is only called on the BP. Don't add code here | 316 | * WARNING: this function is only called on the BP. Don't add code here |
322 | * that is supposed to run on all CPUs. | 317 | * that is supposed to run on all CPUs. |
323 | */ | 318 | */ |
324 | static void __init early_cpu_detect(void) | 319 | static void __init early_identify_cpu(struct cpuinfo_x86 *c) |
325 | { | 320 | { |
326 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
327 | |||
328 | c->x86_cache_alignment = 32; | 321 | c->x86_cache_alignment = 32; |
329 | c->x86_clflush_size = 32; | 322 | c->x86_clflush_size = 32; |
330 | 323 | ||
331 | if (!have_cpuid_p()) | 324 | if (!have_cpuid_p()) |
332 | return; | 325 | return; |
333 | 326 | ||
327 | c->extended_cpuid_level = 0; | ||
328 | |||
329 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | ||
330 | |||
334 | cpu_detect(c); | 331 | cpu_detect(c); |
335 | 332 | ||
336 | get_cpu_vendor(c, 1); | 333 | get_cpu_vendor(c); |
337 | 334 | ||
338 | early_get_cap(c); | 335 | get_cpu_cap(c); |
339 | 336 | ||
340 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | 337 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && |
341 | cpu_devs[c->x86_vendor]->c_early_init) | 338 | cpu_devs[c->x86_vendor]->c_early_init) |
342 | cpu_devs[c->x86_vendor]->c_early_init(c); | 339 | cpu_devs[c->x86_vendor]->c_early_init(c); |
340 | |||
341 | validate_pat_support(c); | ||
343 | } | 342 | } |
344 | 343 | ||
345 | /* | 344 | /* |
@@ -373,60 +372,32 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | |||
373 | 372 | ||
374 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | 373 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
375 | { | 374 | { |
376 | u32 tfms, xlvl; | 375 | if (!have_cpuid_p()) |
377 | unsigned int ebx; | 376 | return; |
378 | 377 | ||
379 | if (have_cpuid_p()) { | 378 | c->extended_cpuid_level = 0; |
380 | /* Get vendor name */ | 379 | |
381 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | 380 | cpu_detect(c); |
382 | (unsigned int *)&c->x86_vendor_id[0], | 381 | |
383 | (unsigned int *)&c->x86_vendor_id[8], | 382 | get_cpu_vendor(c); |
384 | (unsigned int *)&c->x86_vendor_id[4]); | 383 | |
385 | 384 | get_cpu_cap(c); | |
386 | get_cpu_vendor(c, 0); | 385 | |
387 | /* Initialize the standard set of capabilities */ | 386 | if (c->cpuid_level >= 0x00000001) { |
388 | /* Note that the vendor-specific code below might override */ | 387 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; |
389 | /* Intel-defined flags: level 0x00000001 */ | ||
390 | if (c->cpuid_level >= 0x00000001) { | ||
391 | u32 capability, excap; | ||
392 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | ||
393 | c->x86_capability[0] = capability; | ||
394 | c->x86_capability[4] = excap; | ||
395 | c->x86 = (tfms >> 8) & 15; | ||
396 | c->x86_model = (tfms >> 4) & 15; | ||
397 | if (c->x86 == 0xf) | ||
398 | c->x86 += (tfms >> 20) & 0xff; | ||
399 | if (c->x86 >= 0x6) | ||
400 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | ||
401 | c->x86_mask = tfms & 15; | ||
402 | c->initial_apicid = (ebx >> 24) & 0xFF; | ||
403 | #ifdef CONFIG_X86_HT | 388 | #ifdef CONFIG_X86_HT |
404 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | 389 | c->apicid = phys_pkg_id(c->initial_apicid, 0); |
405 | c->phys_proc_id = c->initial_apicid; | 390 | c->phys_proc_id = c->initial_apicid; |
406 | #else | 391 | #else |
407 | c->apicid = c->initial_apicid; | 392 | c->apicid = c->initial_apicid; |
408 | #endif | 393 | #endif |
409 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) | 394 | } |
410 | c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; | ||
411 | } else { | ||
412 | /* Have CPUID level 0 only - unheard of */ | ||
413 | c->x86 = 4; | ||
414 | } | ||
415 | 395 | ||
416 | /* AMD-defined flags: level 0x80000001 */ | 396 | if (c->extended_cpuid_level >= 0x80000004) |
417 | xlvl = cpuid_eax(0x80000000); | 397 | get_model_name(c); /* Default name */ |
418 | if ((xlvl & 0xffff0000) == 0x80000000) { | ||
419 | if (xlvl >= 0x80000001) { | ||
420 | c->x86_capability[1] = cpuid_edx(0x80000001); | ||
421 | c->x86_capability[6] = cpuid_ecx(0x80000001); | ||
422 | } | ||
423 | if (xlvl >= 0x80000004) | ||
424 | get_model_name(c); /* Default name */ | ||
425 | } | ||
426 | 398 | ||
427 | init_scattered_cpuid_features(c); | 399 | init_scattered_cpuid_features(c); |
428 | detect_nopl(c); | 400 | detect_nopl(c); |
429 | } | ||
430 | } | 401 | } |
431 | 402 | ||
432 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 403 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
@@ -651,13 +622,10 @@ void __init early_cpu_init(void) | |||
651 | { | 622 | { |
652 | struct cpu_vendor_dev *cvdev; | 623 | struct cpu_vendor_dev *cvdev; |
653 | 624 | ||
654 | for (cvdev = __x86cpuvendor_start ; | 625 | for (cvdev = __x86cpuvendor_start; cvdev < __x86cpuvendor_end; cvdev++) |
655 | cvdev < __x86cpuvendor_end ; | ||
656 | cvdev++) | ||
657 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; | 626 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; |
658 | 627 | ||
659 | early_cpu_detect(); | 628 | early_identify_cpu(&boot_cpu_data); |
660 | validate_pat_support(&boot_cpu_data); | ||
661 | } | 629 | } |
662 | 630 | ||
663 | /* Make sure %fs is initialized properly in idle threads */ | 631 | /* Make sure %fs is initialized properly in idle threads */ |