diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-04-19 12:17:34 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-04-19 12:17:34 -0400 |
commit | cf816ecb533ab96b883dfdc0db174598b5b5c4d2 (patch) | |
tree | 1b7705db288ae2917105e624b01fdf81e0882bf1 /arch/x86/kernel/cpu/common.c | |
parent | adf6d34e460387ee3e8f1e1875d52bff51212c7d (diff) | |
parent | 15f7d677ccff6f0f5de8a1ee43a792567e9f9de9 (diff) |
Merge branch 'merge-fixes' into devel
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 180 |
1 files changed, 99 insertions, 81 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a38aafaefc23..d999d7833bc2 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -62,9 +62,9 @@ __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | |||
62 | static int cachesize_override __cpuinitdata = -1; | 62 | static int cachesize_override __cpuinitdata = -1; |
63 | static int disable_x86_serial_nr __cpuinitdata = 1; | 63 | static int disable_x86_serial_nr __cpuinitdata = 1; |
64 | 64 | ||
65 | struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; | 65 | struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; |
66 | 66 | ||
67 | static void __cpuinit default_init(struct cpuinfo_x86 * c) | 67 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
68 | { | 68 | { |
69 | /* Not much we can do here... */ | 69 | /* Not much we can do here... */ |
70 | /* Check if at least it has cpuid */ | 70 | /* Check if at least it has cpuid */ |
@@ -81,11 +81,11 @@ static struct cpu_dev __cpuinitdata default_cpu = { | |||
81 | .c_init = default_init, | 81 | .c_init = default_init, |
82 | .c_vendor = "Unknown", | 82 | .c_vendor = "Unknown", |
83 | }; | 83 | }; |
84 | static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu; | 84 | static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; |
85 | 85 | ||
86 | static int __init cachesize_setup(char *str) | 86 | static int __init cachesize_setup(char *str) |
87 | { | 87 | { |
88 | get_option (&str, &cachesize_override); | 88 | get_option(&str, &cachesize_override); |
89 | return 1; | 89 | return 1; |
90 | } | 90 | } |
91 | __setup("cachesize=", cachesize_setup); | 91 | __setup("cachesize=", cachesize_setup); |
@@ -107,12 +107,12 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
107 | /* Intel chips right-justify this string for some dumb reason; | 107 | /* Intel chips right-justify this string for some dumb reason; |
108 | undo that brain damage */ | 108 | undo that brain damage */ |
109 | p = q = &c->x86_model_id[0]; | 109 | p = q = &c->x86_model_id[0]; |
110 | while ( *p == ' ' ) | 110 | while (*p == ' ') |
111 | p++; | 111 | p++; |
112 | if ( p != q ) { | 112 | if (p != q) { |
113 | while ( *p ) | 113 | while (*p) |
114 | *q++ = *p++; | 114 | *q++ = *p++; |
115 | while ( q <= &c->x86_model_id[48] ) | 115 | while (q <= &c->x86_model_id[48]) |
116 | *q++ = '\0'; /* Zero-pad the rest */ | 116 | *q++ = '\0'; /* Zero-pad the rest */ |
117 | } | 117 | } |
118 | 118 | ||
@@ -130,7 +130,7 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
130 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | 130 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); |
131 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | 131 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", |
132 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | 132 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); |
133 | c->x86_cache_size=(ecx>>24)+(edx>>24); | 133 | c->x86_cache_size = (ecx>>24)+(edx>>24); |
134 | } | 134 | } |
135 | 135 | ||
136 | if (n < 0x80000006) /* Some chips just has a large L1. */ | 136 | if (n < 0x80000006) /* Some chips just has a large L1. */ |
@@ -138,16 +138,16 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
138 | 138 | ||
139 | ecx = cpuid_ecx(0x80000006); | 139 | ecx = cpuid_ecx(0x80000006); |
140 | l2size = ecx >> 16; | 140 | l2size = ecx >> 16; |
141 | 141 | ||
142 | /* do processor-specific cache resizing */ | 142 | /* do processor-specific cache resizing */ |
143 | if (this_cpu->c_size_cache) | 143 | if (this_cpu->c_size_cache) |
144 | l2size = this_cpu->c_size_cache(c,l2size); | 144 | l2size = this_cpu->c_size_cache(c, l2size); |
145 | 145 | ||
146 | /* Allow user to override all this if necessary. */ | 146 | /* Allow user to override all this if necessary. */ |
147 | if (cachesize_override != -1) | 147 | if (cachesize_override != -1) |
148 | l2size = cachesize_override; | 148 | l2size = cachesize_override; |
149 | 149 | ||
150 | if ( l2size == 0 ) | 150 | if (l2size == 0) |
151 | return; /* Again, no L2 cache is possible */ | 151 | return; /* Again, no L2 cache is possible */ |
152 | 152 | ||
153 | c->x86_cache_size = l2size; | 153 | c->x86_cache_size = l2size; |
@@ -156,16 +156,19 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
156 | l2size, ecx & 0xFF); | 156 | l2size, ecx & 0xFF); |
157 | } | 157 | } |
158 | 158 | ||
159 | /* Naming convention should be: <Name> [(<Codename>)] */ | 159 | /* |
160 | /* This table only is used unless init_<vendor>() below doesn't set it; */ | 160 | * Naming convention should be: <Name> [(<Codename>)] |
161 | /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ | 161 | * This table only is used unless init_<vendor>() below doesn't set it; |
162 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | ||
163 | * | ||
164 | */ | ||
162 | 165 | ||
163 | /* Look up CPU names by table lookup. */ | 166 | /* Look up CPU names by table lookup. */ |
164 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | 167 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) |
165 | { | 168 | { |
166 | struct cpu_model_info *info; | 169 | struct cpu_model_info *info; |
167 | 170 | ||
168 | if ( c->x86_model >= 16 ) | 171 | if (c->x86_model >= 16) |
169 | return NULL; /* Range check */ | 172 | return NULL; /* Range check */ |
170 | 173 | ||
171 | if (!this_cpu) | 174 | if (!this_cpu) |
@@ -190,9 +193,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | |||
190 | 193 | ||
191 | for (i = 0; i < X86_VENDOR_NUM; i++) { | 194 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
192 | if (cpu_devs[i]) { | 195 | if (cpu_devs[i]) { |
193 | if (!strcmp(v,cpu_devs[i]->c_ident[0]) || | 196 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
194 | (cpu_devs[i]->c_ident[1] && | 197 | (cpu_devs[i]->c_ident[1] && |
195 | !strcmp(v,cpu_devs[i]->c_ident[1]))) { | 198 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
196 | c->x86_vendor = i; | 199 | c->x86_vendor = i; |
197 | if (!early) | 200 | if (!early) |
198 | this_cpu = cpu_devs[i]; | 201 | this_cpu = cpu_devs[i]; |
@@ -210,7 +213,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | |||
210 | } | 213 | } |
211 | 214 | ||
212 | 215 | ||
213 | static int __init x86_fxsr_setup(char * s) | 216 | static int __init x86_fxsr_setup(char *s) |
214 | { | 217 | { |
215 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | 218 | setup_clear_cpu_cap(X86_FEATURE_FXSR); |
216 | setup_clear_cpu_cap(X86_FEATURE_XMM); | 219 | setup_clear_cpu_cap(X86_FEATURE_XMM); |
@@ -219,7 +222,7 @@ static int __init x86_fxsr_setup(char * s) | |||
219 | __setup("nofxsr", x86_fxsr_setup); | 222 | __setup("nofxsr", x86_fxsr_setup); |
220 | 223 | ||
221 | 224 | ||
222 | static int __init x86_sep_setup(char * s) | 225 | static int __init x86_sep_setup(char *s) |
223 | { | 226 | { |
224 | setup_clear_cpu_cap(X86_FEATURE_SEP); | 227 | setup_clear_cpu_cap(X86_FEATURE_SEP); |
225 | return 1; | 228 | return 1; |
@@ -306,14 +309,30 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) | |||
306 | 309 | ||
307 | } | 310 | } |
308 | 311 | ||
309 | } | 312 | clear_cpu_cap(c, X86_FEATURE_PAT); |
313 | |||
314 | switch (c->x86_vendor) { | ||
315 | case X86_VENDOR_AMD: | ||
316 | if (c->x86 >= 0xf && c->x86 <= 0x11) | ||
317 | set_cpu_cap(c, X86_FEATURE_PAT); | ||
318 | break; | ||
319 | case X86_VENDOR_INTEL: | ||
320 | if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) | ||
321 | set_cpu_cap(c, X86_FEATURE_PAT); | ||
322 | break; | ||
323 | } | ||
310 | 324 | ||
311 | /* Do minimum CPU detection early. | 325 | } |
312 | Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. | ||
313 | The others are not touched to avoid unwanted side effects. | ||
314 | 326 | ||
315 | WARNING: this function is only called on the BP. Don't add code here | 327 | /* |
316 | that is supposed to run on all CPUs. */ | 328 | * Do minimum CPU detection early. |
329 | * Fields really needed: vendor, cpuid_level, family, model, mask, | ||
330 | * cache alignment. | ||
331 | * The others are not touched to avoid unwanted side effects. | ||
332 | * | ||
333 | * WARNING: this function is only called on the BP. Don't add code here | ||
334 | * that is supposed to run on all CPUs. | ||
335 | */ | ||
317 | static void __init early_cpu_detect(void) | 336 | static void __init early_cpu_detect(void) |
318 | { | 337 | { |
319 | struct cpuinfo_x86 *c = &boot_cpu_data; | 338 | struct cpuinfo_x86 *c = &boot_cpu_data; |
@@ -328,19 +347,14 @@ static void __init early_cpu_detect(void) | |||
328 | 347 | ||
329 | get_cpu_vendor(c, 1); | 348 | get_cpu_vendor(c, 1); |
330 | 349 | ||
331 | switch (c->x86_vendor) { | 350 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && |
332 | case X86_VENDOR_AMD: | 351 | cpu_devs[c->x86_vendor]->c_early_init) |
333 | early_init_amd(c); | 352 | cpu_devs[c->x86_vendor]->c_early_init(c); |
334 | break; | ||
335 | case X86_VENDOR_INTEL: | ||
336 | early_init_intel(c); | ||
337 | break; | ||
338 | } | ||
339 | 353 | ||
340 | early_get_cap(c); | 354 | early_get_cap(c); |
341 | } | 355 | } |
342 | 356 | ||
343 | static void __cpuinit generic_identify(struct cpuinfo_x86 * c) | 357 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
344 | { | 358 | { |
345 | u32 tfms, xlvl; | 359 | u32 tfms, xlvl; |
346 | unsigned int ebx; | 360 | unsigned int ebx; |
@@ -351,13 +365,12 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c) | |||
351 | (unsigned int *)&c->x86_vendor_id[0], | 365 | (unsigned int *)&c->x86_vendor_id[0], |
352 | (unsigned int *)&c->x86_vendor_id[8], | 366 | (unsigned int *)&c->x86_vendor_id[8], |
353 | (unsigned int *)&c->x86_vendor_id[4]); | 367 | (unsigned int *)&c->x86_vendor_id[4]); |
354 | 368 | ||
355 | get_cpu_vendor(c, 0); | 369 | get_cpu_vendor(c, 0); |
356 | /* Initialize the standard set of capabilities */ | 370 | /* Initialize the standard set of capabilities */ |
357 | /* Note that the vendor-specific code below might override */ | 371 | /* Note that the vendor-specific code below might override */ |
358 | |||
359 | /* Intel-defined flags: level 0x00000001 */ | 372 | /* Intel-defined flags: level 0x00000001 */ |
360 | if ( c->cpuid_level >= 0x00000001 ) { | 373 | if (c->cpuid_level >= 0x00000001) { |
361 | u32 capability, excap; | 374 | u32 capability, excap; |
362 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | 375 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
363 | c->x86_capability[0] = capability; | 376 | c->x86_capability[0] = capability; |
@@ -369,12 +382,14 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c) | |||
369 | if (c->x86 >= 0x6) | 382 | if (c->x86 >= 0x6) |
370 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 383 | c->x86_model += ((tfms >> 16) & 0xF) << 4; |
371 | c->x86_mask = tfms & 15; | 384 | c->x86_mask = tfms & 15; |
385 | c->initial_apicid = (ebx >> 24) & 0xFF; | ||
372 | #ifdef CONFIG_X86_HT | 386 | #ifdef CONFIG_X86_HT |
373 | c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); | 387 | c->apicid = phys_pkg_id(c->initial_apicid, 0); |
388 | c->phys_proc_id = c->initial_apicid; | ||
374 | #else | 389 | #else |
375 | c->apicid = (ebx >> 24) & 0xFF; | 390 | c->apicid = c->initial_apicid; |
376 | #endif | 391 | #endif |
377 | if (c->x86_capability[0] & (1<<19)) | 392 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) |
378 | c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; | 393 | c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; |
379 | } else { | 394 | } else { |
380 | /* Have CPUID level 0 only - unheard of */ | 395 | /* Have CPUID level 0 only - unheard of */ |
@@ -383,33 +398,42 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c) | |||
383 | 398 | ||
384 | /* AMD-defined flags: level 0x80000001 */ | 399 | /* AMD-defined flags: level 0x80000001 */ |
385 | xlvl = cpuid_eax(0x80000000); | 400 | xlvl = cpuid_eax(0x80000000); |
386 | if ( (xlvl & 0xffff0000) == 0x80000000 ) { | 401 | if ((xlvl & 0xffff0000) == 0x80000000) { |
387 | if ( xlvl >= 0x80000001 ) { | 402 | if (xlvl >= 0x80000001) { |
388 | c->x86_capability[1] = cpuid_edx(0x80000001); | 403 | c->x86_capability[1] = cpuid_edx(0x80000001); |
389 | c->x86_capability[6] = cpuid_ecx(0x80000001); | 404 | c->x86_capability[6] = cpuid_ecx(0x80000001); |
390 | } | 405 | } |
391 | if ( xlvl >= 0x80000004 ) | 406 | if (xlvl >= 0x80000004) |
392 | get_model_name(c); /* Default name */ | 407 | get_model_name(c); /* Default name */ |
393 | } | 408 | } |
394 | 409 | ||
395 | init_scattered_cpuid_features(c); | 410 | init_scattered_cpuid_features(c); |
396 | } | 411 | } |
397 | 412 | ||
398 | #ifdef CONFIG_X86_HT | 413 | clear_cpu_cap(c, X86_FEATURE_PAT); |
399 | c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; | 414 | |
400 | #endif | 415 | switch (c->x86_vendor) { |
416 | case X86_VENDOR_AMD: | ||
417 | if (c->x86 >= 0xf && c->x86 <= 0x11) | ||
418 | set_cpu_cap(c, X86_FEATURE_PAT); | ||
419 | break; | ||
420 | case X86_VENDOR_INTEL: | ||
421 | if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) | ||
422 | set_cpu_cap(c, X86_FEATURE_PAT); | ||
423 | break; | ||
424 | } | ||
401 | } | 425 | } |
402 | 426 | ||
403 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 427 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
404 | { | 428 | { |
405 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { | 429 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { |
406 | /* Disable processor serial number */ | 430 | /* Disable processor serial number */ |
407 | unsigned long lo,hi; | 431 | unsigned long lo, hi; |
408 | rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | 432 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
409 | lo |= 0x200000; | 433 | lo |= 0x200000; |
410 | wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | 434 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
411 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | 435 | printk(KERN_NOTICE "CPU serial number disabled.\n"); |
412 | clear_bit(X86_FEATURE_PN, c->x86_capability); | 436 | clear_cpu_cap(c, X86_FEATURE_PN); |
413 | 437 | ||
414 | /* Disabling the serial number may affect the cpuid level */ | 438 | /* Disabling the serial number may affect the cpuid level */ |
415 | c->cpuid_level = cpuid_eax(0); | 439 | c->cpuid_level = cpuid_eax(0); |
@@ -444,9 +468,11 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
444 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 468 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
445 | 469 | ||
446 | if (!have_cpuid_p()) { | 470 | if (!have_cpuid_p()) { |
447 | /* First of all, decide if this is a 486 or higher */ | 471 | /* |
448 | /* It's a 486 if we can modify the AC flag */ | 472 | * First of all, decide if this is a 486 or higher |
449 | if ( flag_is_changeable_p(X86_EFLAGS_AC) ) | 473 | * It's a 486 if we can modify the AC flag |
474 | */ | ||
475 | if (flag_is_changeable_p(X86_EFLAGS_AC)) | ||
450 | c->x86 = 4; | 476 | c->x86 = 4; |
451 | else | 477 | else |
452 | c->x86 = 3; | 478 | c->x86 = 3; |
@@ -479,10 +505,10 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
479 | */ | 505 | */ |
480 | 506 | ||
481 | /* If the model name is still unset, do table lookup. */ | 507 | /* If the model name is still unset, do table lookup. */ |
482 | if ( !c->x86_model_id[0] ) { | 508 | if (!c->x86_model_id[0]) { |
483 | char *p; | 509 | char *p; |
484 | p = table_lookup_model(c); | 510 | p = table_lookup_model(c); |
485 | if ( p ) | 511 | if (p) |
486 | strcpy(c->x86_model_id, p); | 512 | strcpy(c->x86_model_id, p); |
487 | else | 513 | else |
488 | /* Last resort... */ | 514 | /* Last resort... */ |
@@ -496,9 +522,9 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
496 | * common between the CPUs. The first time this routine gets | 522 | * common between the CPUs. The first time this routine gets |
497 | * executed, c == &boot_cpu_data. | 523 | * executed, c == &boot_cpu_data. |
498 | */ | 524 | */ |
499 | if ( c != &boot_cpu_data ) { | 525 | if (c != &boot_cpu_data) { |
500 | /* AND the already accumulated flags with these */ | 526 | /* AND the already accumulated flags with these */ |
501 | for ( i = 0 ; i < NCAPINTS ; i++ ) | 527 | for (i = 0 ; i < NCAPINTS ; i++) |
502 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | 528 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
503 | } | 529 | } |
504 | 530 | ||
@@ -542,7 +568,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
542 | 568 | ||
543 | if (smp_num_siblings == 1) { | 569 | if (smp_num_siblings == 1) { |
544 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 570 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); |
545 | } else if (smp_num_siblings > 1 ) { | 571 | } else if (smp_num_siblings > 1) { |
546 | 572 | ||
547 | if (smp_num_siblings > NR_CPUS) { | 573 | if (smp_num_siblings > NR_CPUS) { |
548 | printk(KERN_WARNING "CPU: Unsupported number of the " | 574 | printk(KERN_WARNING "CPU: Unsupported number of the " |
@@ -552,7 +578,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
552 | } | 578 | } |
553 | 579 | ||
554 | index_msb = get_count_order(smp_num_siblings); | 580 | index_msb = get_count_order(smp_num_siblings); |
555 | c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); | 581 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); |
556 | 582 | ||
557 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 583 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
558 | c->phys_proc_id); | 584 | c->phys_proc_id); |
@@ -563,7 +589,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
563 | 589 | ||
564 | core_bits = get_count_order(c->x86_max_cores); | 590 | core_bits = get_count_order(c->x86_max_cores); |
565 | 591 | ||
566 | c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & | 592 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & |
567 | ((1 << core_bits) - 1); | 593 | ((1 << core_bits) - 1); |
568 | 594 | ||
569 | if (c->x86_max_cores > 1) | 595 | if (c->x86_max_cores > 1) |
@@ -597,7 +623,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
597 | else | 623 | else |
598 | printk("%s", c->x86_model_id); | 624 | printk("%s", c->x86_model_id); |
599 | 625 | ||
600 | if (c->x86_mask || c->cpuid_level >= 0) | 626 | if (c->x86_mask || c->cpuid_level >= 0) |
601 | printk(" stepping %02x\n", c->x86_mask); | 627 | printk(" stepping %02x\n", c->x86_mask); |
602 | else | 628 | else |
603 | printk("\n"); | 629 | printk("\n"); |
@@ -616,23 +642,15 @@ __setup("clearcpuid=", setup_disablecpuid); | |||
616 | 642 | ||
617 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | 643 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
618 | 644 | ||
619 | /* This is hacky. :) | ||
620 | * We're emulating future behavior. | ||
621 | * In the future, the cpu-specific init functions will be called implicitly | ||
622 | * via the magic of initcalls. | ||
623 | * They will insert themselves into the cpu_devs structure. | ||
624 | * Then, when cpu_init() is called, we can just iterate over that array. | ||
625 | */ | ||
626 | void __init early_cpu_init(void) | 645 | void __init early_cpu_init(void) |
627 | { | 646 | { |
628 | intel_cpu_init(); | 647 | struct cpu_vendor_dev *cvdev; |
629 | cyrix_init_cpu(); | 648 | |
630 | nsc_init_cpu(); | 649 | for (cvdev = __x86cpuvendor_start ; |
631 | amd_init_cpu(); | 650 | cvdev < __x86cpuvendor_end ; |
632 | centaur_init_cpu(); | 651 | cvdev++) |
633 | transmeta_init_cpu(); | 652 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; |
634 | nexgen_init_cpu(); | 653 | |
635 | umc_init_cpu(); | ||
636 | early_cpu_detect(); | 654 | early_cpu_detect(); |
637 | } | 655 | } |
638 | 656 | ||
@@ -666,7 +684,7 @@ void __cpuinit cpu_init(void) | |||
666 | { | 684 | { |
667 | int cpu = smp_processor_id(); | 685 | int cpu = smp_processor_id(); |
668 | struct task_struct *curr = current; | 686 | struct task_struct *curr = current; |
669 | struct tss_struct * t = &per_cpu(init_tss, cpu); | 687 | struct tss_struct *t = &per_cpu(init_tss, cpu); |
670 | struct thread_struct *thread = &curr->thread; | 688 | struct thread_struct *thread = &curr->thread; |
671 | 689 | ||
672 | if (cpu_test_and_set(cpu, cpu_initialized)) { | 690 | if (cpu_test_and_set(cpu, cpu_initialized)) { |
@@ -692,7 +710,7 @@ void __cpuinit cpu_init(void) | |||
692 | enter_lazy_tlb(&init_mm, curr); | 710 | enter_lazy_tlb(&init_mm, curr); |
693 | 711 | ||
694 | load_sp0(t, thread); | 712 | load_sp0(t, thread); |
695 | set_tss_desc(cpu,t); | 713 | set_tss_desc(cpu, t); |
696 | load_TR_desc(); | 714 | load_TR_desc(); |
697 | load_LDT(&init_mm.context); | 715 | load_LDT(&init_mm.context); |
698 | 716 | ||