aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-09-04 15:09:44 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-04 15:09:44 -0400
commit3da99c97763703b23cbf2bd6e96252256d4e4617 (patch)
tree6253c27954b2a148075d8274257d67582fca3ac9 /arch/x86/kernel
parent5031088dbc0cd30e893eb27d53f0e0eee6eb1c00 (diff)
x86: make (early)_identify_cpu more the same between 32bit and 64 bit
1. add extended_cpuid_level for 32bit 2. add generic_identify for 64bit 3. add early_identify_cpu for 32bit 4. early_identify_cpu not be called by identify_cpu 5. remove early in get_cpu_vendor for 32bit 6. add get_cpu_cap 7. add cpu_detect for 64bit Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/common.c138
-rw-r--r--arch/x86/kernel/cpu/common_64.c139
2 files changed, 141 insertions, 136 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 8aab8517642..96e1b8698d3 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -96,7 +96,7 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c)
96 unsigned int *v; 96 unsigned int *v;
97 char *p, *q; 97 char *p, *q;
98 98
99 if (cpuid_eax(0x80000000) < 0x80000004) 99 if (c->extended_cpuid_level < 0x80000004)
100 return 0; 100 return 0;
101 101
102 v = (unsigned int *) c->x86_model_id; 102 v = (unsigned int *) c->x86_model_id;
@@ -125,7 +125,7 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
125{ 125{
126 unsigned int n, dummy, ecx, edx, l2size; 126 unsigned int n, dummy, ecx, edx, l2size;
127 127
128 n = cpuid_eax(0x80000000); 128 n = c->extended_cpuid_level;
129 129
130 if (n >= 0x80000005) { 130 if (n >= 0x80000005) {
131 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); 131 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
@@ -186,7 +186,7 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
186} 186}
187 187
188 188
189static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) 189static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
190{ 190{
191 char *v = c->x86_vendor_id; 191 char *v = c->x86_vendor_id;
192 int i; 192 int i;
@@ -198,8 +198,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
198 (cpu_devs[i]->c_ident[1] && 198 (cpu_devs[i]->c_ident[1] &&
199 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 199 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
200 c->x86_vendor = i; 200 c->x86_vendor = i;
201 if (!early) 201 this_cpu = cpu_devs[i];
202 this_cpu = cpu_devs[i];
203 return; 202 return;
204 } 203 }
205 } 204 }
@@ -284,34 +283,30 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
284 } 283 }
285 } 284 }
286} 285}
287static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) 286
287static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
288{ 288{
289 u32 tfms, xlvl; 289 u32 tfms, xlvl;
290 unsigned int ebx; 290 u32 ebx;
291 291
292 memset(&c->x86_capability, 0, sizeof c->x86_capability); 292 /* Intel-defined flags: level 0x00000001 */
293 if (have_cpuid_p()) { 293 if (c->cpuid_level >= 0x00000001) {
294 /* Intel-defined flags: level 0x00000001 */ 294 u32 capability, excap;
295 if (c->cpuid_level >= 0x00000001) { 295 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
296 u32 capability, excap; 296 c->x86_capability[0] = capability;
297 cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 297 c->x86_capability[4] = excap;
298 c->x86_capability[0] = capability; 298 }
299 c->x86_capability[4] = excap;
300 }
301 299
302 /* AMD-defined flags: level 0x80000001 */ 300 /* AMD-defined flags: level 0x80000001 */
303 xlvl = cpuid_eax(0x80000000); 301 xlvl = cpuid_eax(0x80000000);
304 if ((xlvl & 0xffff0000) == 0x80000000) { 302 c->extended_cpuid_level = xlvl;
305 if (xlvl >= 0x80000001) { 303 if ((xlvl & 0xffff0000) == 0x80000000) {
306 c->x86_capability[1] = cpuid_edx(0x80000001); 304 if (xlvl >= 0x80000001) {
307 c->x86_capability[6] = cpuid_ecx(0x80000001); 305 c->x86_capability[1] = cpuid_edx(0x80000001);
308 } 306 c->x86_capability[6] = cpuid_ecx(0x80000001);
309 } 307 }
310
311 } 308 }
312
313} 309}
314
315/* 310/*
316 * Do minimum CPU detection early. 311 * Do minimum CPU detection early.
317 * Fields really needed: vendor, cpuid_level, family, model, mask, 312 * Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -321,25 +316,29 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
321 * WARNING: this function is only called on the BP. Don't add code here 316 * WARNING: this function is only called on the BP. Don't add code here
322 * that is supposed to run on all CPUs. 317 * that is supposed to run on all CPUs.
323 */ 318 */
324static void __init early_cpu_detect(void) 319static void __init early_identify_cpu(struct cpuinfo_x86 *c)
325{ 320{
326 struct cpuinfo_x86 *c = &boot_cpu_data;
327
328 c->x86_cache_alignment = 32; 321 c->x86_cache_alignment = 32;
329 c->x86_clflush_size = 32; 322 c->x86_clflush_size = 32;
330 323
331 if (!have_cpuid_p()) 324 if (!have_cpuid_p())
332 return; 325 return;
333 326
327 c->extended_cpuid_level = 0;
328
329 memset(&c->x86_capability, 0, sizeof c->x86_capability);
330
334 cpu_detect(c); 331 cpu_detect(c);
335 332
336 get_cpu_vendor(c, 1); 333 get_cpu_vendor(c);
337 334
338 early_get_cap(c); 335 get_cpu_cap(c);
339 336
340 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 337 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
341 cpu_devs[c->x86_vendor]->c_early_init) 338 cpu_devs[c->x86_vendor]->c_early_init)
342 cpu_devs[c->x86_vendor]->c_early_init(c); 339 cpu_devs[c->x86_vendor]->c_early_init(c);
340
341 validate_pat_support(c);
343} 342}
344 343
345/* 344/*
@@ -373,60 +372,32 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
373 372
374static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 373static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
375{ 374{
376 u32 tfms, xlvl; 375 if (!have_cpuid_p())
377 unsigned int ebx; 376 return;
378 377
379 if (have_cpuid_p()) { 378 c->extended_cpuid_level = 0;
380 /* Get vendor name */ 379
381 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 380 cpu_detect(c);
382 (unsigned int *)&c->x86_vendor_id[0], 381
383 (unsigned int *)&c->x86_vendor_id[8], 382 get_cpu_vendor(c);
384 (unsigned int *)&c->x86_vendor_id[4]); 383
385 384 get_cpu_cap(c);
386 get_cpu_vendor(c, 0); 385
387 /* Initialize the standard set of capabilities */ 386 if (c->cpuid_level >= 0x00000001) {
388 /* Note that the vendor-specific code below might override */ 387 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
389 /* Intel-defined flags: level 0x00000001 */
390 if (c->cpuid_level >= 0x00000001) {
391 u32 capability, excap;
392 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
393 c->x86_capability[0] = capability;
394 c->x86_capability[4] = excap;
395 c->x86 = (tfms >> 8) & 15;
396 c->x86_model = (tfms >> 4) & 15;
397 if (c->x86 == 0xf)
398 c->x86 += (tfms >> 20) & 0xff;
399 if (c->x86 >= 0x6)
400 c->x86_model += ((tfms >> 16) & 0xF) << 4;
401 c->x86_mask = tfms & 15;
402 c->initial_apicid = (ebx >> 24) & 0xFF;
403#ifdef CONFIG_X86_HT 388#ifdef CONFIG_X86_HT
404 c->apicid = phys_pkg_id(c->initial_apicid, 0); 389 c->apicid = phys_pkg_id(c->initial_apicid, 0);
405 c->phys_proc_id = c->initial_apicid; 390 c->phys_proc_id = c->initial_apicid;
406#else 391#else
407 c->apicid = c->initial_apicid; 392 c->apicid = c->initial_apicid;
408#endif 393#endif
409 if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) 394 }
410 c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
411 } else {
412 /* Have CPUID level 0 only - unheard of */
413 c->x86 = 4;
414 }
415 395
416 /* AMD-defined flags: level 0x80000001 */ 396 if (c->extended_cpuid_level >= 0x80000004)
417 xlvl = cpuid_eax(0x80000000); 397 get_model_name(c); /* Default name */
418 if ((xlvl & 0xffff0000) == 0x80000000) {
419 if (xlvl >= 0x80000001) {
420 c->x86_capability[1] = cpuid_edx(0x80000001);
421 c->x86_capability[6] = cpuid_ecx(0x80000001);
422 }
423 if (xlvl >= 0x80000004)
424 get_model_name(c); /* Default name */
425 }
426 398
427 init_scattered_cpuid_features(c); 399 init_scattered_cpuid_features(c);
428 detect_nopl(c); 400 detect_nopl(c);
429 }
430} 401}
431 402
432static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 403static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
@@ -651,13 +622,10 @@ void __init early_cpu_init(void)
651{ 622{
652 struct cpu_vendor_dev *cvdev; 623 struct cpu_vendor_dev *cvdev;
653 624
654 for (cvdev = __x86cpuvendor_start ; 625 for (cvdev = __x86cpuvendor_start; cvdev < __x86cpuvendor_end; cvdev++)
655 cvdev < __x86cpuvendor_end ;
656 cvdev++)
657 cpu_devs[cvdev->vendor] = cvdev->cpu_dev; 626 cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
658 627
659 early_cpu_detect(); 628 early_identify_cpu(&boot_cpu_data);
660 validate_pat_support(&boot_cpu_data);
661} 629}
662 630
663/* Make sure %fs is initialized properly in idle threads */ 631/* Make sure %fs is initialized properly in idle threads */
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c
index 699ecbfb63e..28719fe0794 100644
--- a/arch/x86/kernel/cpu/common_64.c
+++ b/arch/x86/kernel/cpu/common_64.c
@@ -195,6 +195,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
195 printk(KERN_ERR "CPU: Your system may be unstable.\n"); 195 printk(KERN_ERR "CPU: Your system may be unstable.\n");
196 } 196 }
197 c->x86_vendor = X86_VENDOR_UNKNOWN; 197 c->x86_vendor = X86_VENDOR_UNKNOWN;
198 this_cpu = &default_cpu;
198} 199}
199 200
200static void __init early_cpu_support_print(void) 201static void __init early_cpu_support_print(void)
@@ -249,56 +250,18 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
249 } 250 }
250} 251}
251 252
252static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); 253void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
253
254void __init early_cpu_init(void)
255{ 254{
256 struct cpu_vendor_dev *cvdev;
257
258 for (cvdev = __x86cpuvendor_start ;
259 cvdev < __x86cpuvendor_end ;
260 cvdev++)
261 cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
262 early_cpu_support_print();
263 early_identify_cpu(&boot_cpu_data);
264}
265
266/* Do some early cpuid on the boot CPU to get some parameter that are
267 needed before check_bugs. Everything advanced is in identify_cpu
268 below. */
269static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
270{
271 u32 tfms, xlvl;
272
273 c->loops_per_jiffy = loops_per_jiffy;
274 c->x86_cache_size = -1;
275 c->x86_vendor = X86_VENDOR_UNKNOWN;
276 c->x86_model = c->x86_mask = 0; /* So far unknown... */
277 c->x86_vendor_id[0] = '\0'; /* Unset */
278 c->x86_model_id[0] = '\0'; /* Unset */
279 c->x86_clflush_size = 64;
280 c->x86_cache_alignment = c->x86_clflush_size;
281 c->x86_max_cores = 1;
282 c->x86_coreid_bits = 0;
283 c->extended_cpuid_level = 0;
284 memset(&c->x86_capability, 0, sizeof c->x86_capability);
285
286 /* Get vendor name */ 255 /* Get vendor name */
287 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 256 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
288 (unsigned int *)&c->x86_vendor_id[0], 257 (unsigned int *)&c->x86_vendor_id[0],
289 (unsigned int *)&c->x86_vendor_id[8], 258 (unsigned int *)&c->x86_vendor_id[8],
290 (unsigned int *)&c->x86_vendor_id[4]); 259 (unsigned int *)&c->x86_vendor_id[4]);
291 260
292 get_cpu_vendor(c);
293
294 /* Initialize the standard set of capabilities */
295 /* Note that the vendor-specific code below might override */
296
297 /* Intel-defined flags: level 0x00000001 */ 261 /* Intel-defined flags: level 0x00000001 */
298 if (c->cpuid_level >= 0x00000001) { 262 if (c->cpuid_level >= 0x00000001) {
299 __u32 misc; 263 u32 junk, tfms, cap0, misc;
300 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4], 264 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
301 &c->x86_capability[0]);
302 c->x86 = (tfms >> 8) & 0xf; 265 c->x86 = (tfms >> 8) & 0xf;
303 c->x86_model = (tfms >> 4) & 0xf; 266 c->x86_model = (tfms >> 4) & 0xf;
304 c->x86_mask = tfms & 0xf; 267 c->x86_mask = tfms & 0xf;
@@ -306,17 +269,32 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
306 c->x86 += (tfms >> 20) & 0xff; 269 c->x86 += (tfms >> 20) & 0xff;
307 if (c->x86 >= 0x6) 270 if (c->x86 >= 0x6)
308 c->x86_model += ((tfms >> 16) & 0xF) << 4; 271 c->x86_model += ((tfms >> 16) & 0xF) << 4;
309 if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) 272 if (cap0 & (1<<19))
310 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 273 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
311 } else { 274 } else {
312 /* Have CPUID level 0 only - unheard of */ 275 /* Have CPUID level 0 only - unheard of */
313 c->x86 = 4; 276 c->x86 = 4;
314 } 277 }
278}
279
280
281static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
282{
283 u32 tfms, xlvl;
284 u32 ebx;
285
286 /* Initialize the standard set of capabilities */
287 /* Note that the vendor-specific code below might override */
288
289 /* Intel-defined flags: level 0x00000001 */
290 if (c->cpuid_level >= 0x00000001) {
291 u32 capability, excap;
292
293 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
294 c->x86_capability[0] = capability;
295 c->x86_capability[4] = excap;
296 }
315 297
316 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
317#ifdef CONFIG_SMP
318 c->phys_proc_id = c->initial_apicid;
319#endif
320 /* AMD-defined flags: level 0x80000001 */ 298 /* AMD-defined flags: level 0x80000001 */
321 xlvl = cpuid_eax(0x80000000); 299 xlvl = cpuid_eax(0x80000000);
322 c->extended_cpuid_level = xlvl; 300 c->extended_cpuid_level = xlvl;
@@ -325,8 +303,6 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
325 c->x86_capability[1] = cpuid_edx(0x80000001); 303 c->x86_capability[1] = cpuid_edx(0x80000001);
326 c->x86_capability[6] = cpuid_ecx(0x80000001); 304 c->x86_capability[6] = cpuid_ecx(0x80000001);
327 } 305 }
328 if (xlvl >= 0x80000004)
329 get_model_name(c); /* Default name */
330 } 306 }
331 307
332 /* Transmeta-defined flags: level 0x80860001 */ 308 /* Transmeta-defined flags: level 0x80860001 */
@@ -346,8 +322,26 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
346 c->x86_virt_bits = (eax >> 8) & 0xff; 322 c->x86_virt_bits = (eax >> 8) & 0xff;
347 c->x86_phys_bits = eax & 0xff; 323 c->x86_phys_bits = eax & 0xff;
348 } 324 }
325}
349 326
350 detect_nopl(c); 327/* Do some early cpuid on the boot CPU to get some parameter that are
328 needed before check_bugs. Everything advanced is in identify_cpu
329 below. */
330static void __init early_identify_cpu(struct cpuinfo_x86 *c)
331{
332
333 c->x86_clflush_size = 64;
334 c->x86_cache_alignment = c->x86_clflush_size;
335
336 memset(&c->x86_capability, 0, sizeof c->x86_capability);
337
338 c->extended_cpuid_level = 0;
339
340 cpu_detect(c);
341
342 get_cpu_vendor(c);
343
344 get_cpu_cap(c);
351 345
352 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 346 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
353 cpu_devs[c->x86_vendor]->c_early_init) 347 cpu_devs[c->x86_vendor]->c_early_init)
@@ -356,6 +350,39 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
356 validate_pat_support(c); 350 validate_pat_support(c);
357} 351}
358 352
353void __init early_cpu_init(void)
354{
355 struct cpu_vendor_dev *cvdev;
356
357 for (cvdev = __x86cpuvendor_start; cvdev < __x86cpuvendor_end; cvdev++)
358 cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
359
360 early_cpu_support_print();
361 early_identify_cpu(&boot_cpu_data);
362}
363
364static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
365{
366 c->extended_cpuid_level = 0;
367
368 cpu_detect(c);
369
370 get_cpu_vendor(c);
371
372 get_cpu_cap(c);
373
374 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
375#ifdef CONFIG_SMP
376 c->phys_proc_id = c->initial_apicid;
377#endif
378
379 if (c->extended_cpuid_level >= 0x80000004)
380 get_model_name(c); /* Default name */
381
382 init_scattered_cpuid_features(c);
383 detect_nopl(c);
384}
385
359/* 386/*
360 * This does the hard work of actually picking apart the CPU stuff... 387 * This does the hard work of actually picking apart the CPU stuff...
361 */ 388 */
@@ -363,9 +390,19 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
363{ 390{
364 int i; 391 int i;
365 392
366 early_identify_cpu(c); 393 c->loops_per_jiffy = loops_per_jiffy;
394 c->x86_cache_size = -1;
395 c->x86_vendor = X86_VENDOR_UNKNOWN;
396 c->x86_model = c->x86_mask = 0; /* So far unknown... */
397 c->x86_vendor_id[0] = '\0'; /* Unset */
398 c->x86_model_id[0] = '\0'; /* Unset */
399 c->x86_clflush_size = 64;
400 c->x86_cache_alignment = c->x86_clflush_size;
401 c->x86_max_cores = 1;
402 c->x86_coreid_bits = 0;
403 memset(&c->x86_capability, 0, sizeof c->x86_capability);
367 404
368 init_scattered_cpuid_features(c); 405 generic_identify(c);
369 406
370 c->apicid = phys_pkg_id(0); 407 c->apicid = phys_pkg_id(0);
371 408