aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/setup_64.c')
-rw-r--r--arch/x86/kernel/setup_64.c555
1 files changed, 309 insertions, 246 deletions
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 30d94d1d5f5f..77fb87bf6e5a 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -30,6 +30,7 @@
30#include <linux/crash_dump.h> 30#include <linux/crash_dump.h>
31#include <linux/root_dev.h> 31#include <linux/root_dev.h>
32#include <linux/pci.h> 32#include <linux/pci.h>
33#include <linux/efi.h>
33#include <linux/acpi.h> 34#include <linux/acpi.h>
34#include <linux/kallsyms.h> 35#include <linux/kallsyms.h>
35#include <linux/edd.h> 36#include <linux/edd.h>
@@ -39,10 +40,13 @@
39#include <linux/dmi.h> 40#include <linux/dmi.h>
40#include <linux/dma-mapping.h> 41#include <linux/dma-mapping.h>
41#include <linux/ctype.h> 42#include <linux/ctype.h>
43#include <linux/uaccess.h>
44#include <linux/init_ohci1394_dma.h>
42 45
43#include <asm/mtrr.h> 46#include <asm/mtrr.h>
44#include <asm/uaccess.h> 47#include <asm/uaccess.h>
45#include <asm/system.h> 48#include <asm/system.h>
49#include <asm/vsyscall.h>
46#include <asm/io.h> 50#include <asm/io.h>
47#include <asm/smp.h> 51#include <asm/smp.h>
48#include <asm/msr.h> 52#include <asm/msr.h>
@@ -50,6 +54,7 @@
50#include <video/edid.h> 54#include <video/edid.h>
51#include <asm/e820.h> 55#include <asm/e820.h>
52#include <asm/dma.h> 56#include <asm/dma.h>
57#include <asm/gart.h>
53#include <asm/mpspec.h> 58#include <asm/mpspec.h>
54#include <asm/mmu_context.h> 59#include <asm/mmu_context.h>
55#include <asm/proto.h> 60#include <asm/proto.h>
@@ -59,6 +64,15 @@
59#include <asm/sections.h> 64#include <asm/sections.h>
60#include <asm/dmi.h> 65#include <asm/dmi.h>
61#include <asm/cacheflush.h> 66#include <asm/cacheflush.h>
67#include <asm/mce.h>
68#include <asm/ds.h>
69#include <asm/topology.h>
70
71#ifdef CONFIG_PARAVIRT
72#include <asm/paravirt.h>
73#else
74#define ARCH_SETUP
75#endif
62 76
63/* 77/*
64 * Machine setup.. 78 * Machine setup..
@@ -67,6 +81,8 @@
67struct cpuinfo_x86 boot_cpu_data __read_mostly; 81struct cpuinfo_x86 boot_cpu_data __read_mostly;
68EXPORT_SYMBOL(boot_cpu_data); 82EXPORT_SYMBOL(boot_cpu_data);
69 83
84__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
85
70unsigned long mmu_cr4_features; 86unsigned long mmu_cr4_features;
71 87
72/* Boot loader ID as an integer, for the benefit of proc_dointvec */ 88/* Boot loader ID as an integer, for the benefit of proc_dointvec */
@@ -76,7 +92,7 @@ unsigned long saved_video_mode;
76 92
77int force_mwait __cpuinitdata; 93int force_mwait __cpuinitdata;
78 94
79/* 95/*
80 * Early DMI memory 96 * Early DMI memory
81 */ 97 */
82int dmi_alloc_index; 98int dmi_alloc_index;
@@ -122,25 +138,27 @@ struct resource standard_io_resources[] = {
122 138
123#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM) 139#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
124 140
125struct resource data_resource = { 141static struct resource data_resource = {
126 .name = "Kernel data", 142 .name = "Kernel data",
127 .start = 0, 143 .start = 0,
128 .end = 0, 144 .end = 0,
129 .flags = IORESOURCE_RAM, 145 .flags = IORESOURCE_RAM,
130}; 146};
131struct resource code_resource = { 147static struct resource code_resource = {
132 .name = "Kernel code", 148 .name = "Kernel code",
133 .start = 0, 149 .start = 0,
134 .end = 0, 150 .end = 0,
135 .flags = IORESOURCE_RAM, 151 .flags = IORESOURCE_RAM,
136}; 152};
137struct resource bss_resource = { 153static struct resource bss_resource = {
138 .name = "Kernel bss", 154 .name = "Kernel bss",
139 .start = 0, 155 .start = 0,
140 .end = 0, 156 .end = 0,
141 .flags = IORESOURCE_RAM, 157 .flags = IORESOURCE_RAM,
142}; 158};
143 159
160static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
161
144#ifdef CONFIG_PROC_VMCORE 162#ifdef CONFIG_PROC_VMCORE
145/* elfcorehdr= specifies the location of elf core header 163/* elfcorehdr= specifies the location of elf core header
146 * stored by the crashed kernel. This option will be passed 164 * stored by the crashed kernel. This option will be passed
@@ -166,12 +184,12 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
166 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; 184 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
167 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size); 185 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
168 if (bootmap == -1L) 186 if (bootmap == -1L)
169 panic("Cannot find bootmem map of size %ld\n",bootmap_size); 187 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
170 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn); 188 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
171 e820_register_active_regions(0, start_pfn, end_pfn); 189 e820_register_active_regions(0, start_pfn, end_pfn);
172 free_bootmem_with_active_regions(0, end_pfn); 190 free_bootmem_with_active_regions(0, end_pfn);
173 reserve_bootmem(bootmap, bootmap_size); 191 reserve_bootmem(bootmap, bootmap_size);
174} 192}
175#endif 193#endif
176 194
177#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) 195#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
@@ -205,7 +223,8 @@ static void __init reserve_crashkernel(void)
205 unsigned long long crash_size, crash_base; 223 unsigned long long crash_size, crash_base;
206 int ret; 224 int ret;
207 225
208 free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT; 226 free_mem =
227 ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
209 228
210 ret = parse_crashkernel(boot_command_line, free_mem, 229 ret = parse_crashkernel(boot_command_line, free_mem,
211 &crash_size, &crash_base); 230 &crash_size, &crash_base);
@@ -229,33 +248,21 @@ static inline void __init reserve_crashkernel(void)
229{} 248{}
230#endif 249#endif
231 250
232#define EBDA_ADDR_POINTER 0x40E 251/* Overridden in paravirt.c if CONFIG_PARAVIRT */
233 252void __attribute__((weak)) __init memory_setup(void)
234unsigned __initdata ebda_addr;
235unsigned __initdata ebda_size;
236
237static void discover_ebda(void)
238{ 253{
239 /* 254 machine_specific_memory_setup();
240 * there is a real-mode segmented pointer pointing to the
241 * 4K EBDA area at 0x40E
242 */
243 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
244 ebda_addr <<= 4;
245
246 ebda_size = *(unsigned short *)__va(ebda_addr);
247
248 /* Round EBDA up to pages */
249 if (ebda_size == 0)
250 ebda_size = 1;
251 ebda_size <<= 10;
252 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
253 if (ebda_size > 64*1024)
254 ebda_size = 64*1024;
255} 255}
256 256
257/*
258 * setup_arch - architecture-specific boot-time initializations
259 *
260 * Note: On x86_64, fixmaps are ready for use even before this is called.
261 */
257void __init setup_arch(char **cmdline_p) 262void __init setup_arch(char **cmdline_p)
258{ 263{
264 unsigned i;
265
259 printk(KERN_INFO "Command line: %s\n", boot_command_line); 266 printk(KERN_INFO "Command line: %s\n", boot_command_line);
260 267
261 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); 268 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
@@ -269,7 +276,15 @@ void __init setup_arch(char **cmdline_p)
269 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0); 276 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
270 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0); 277 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
271#endif 278#endif
272 setup_memory_region(); 279#ifdef CONFIG_EFI
280 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
281 "EL64", 4))
282 efi_enabled = 1;
283#endif
284
285 ARCH_SETUP
286
287 memory_setup();
273 copy_edd(); 288 copy_edd();
274 289
275 if (!boot_params.hdr.root_flags) 290 if (!boot_params.hdr.root_flags)
@@ -293,27 +308,47 @@ void __init setup_arch(char **cmdline_p)
293 308
294 parse_early_param(); 309 parse_early_param();
295 310
311#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
312 if (init_ohci1394_dma_early)
313 init_ohci1394_dma_on_all_controllers();
314#endif
315
296 finish_e820_parsing(); 316 finish_e820_parsing();
297 317
318 early_gart_iommu_check();
319
298 e820_register_active_regions(0, 0, -1UL); 320 e820_register_active_regions(0, 0, -1UL);
299 /* 321 /*
300 * partially used pages are not usable - thus 322 * partially used pages are not usable - thus
301 * we are rounding upwards: 323 * we are rounding upwards:
302 */ 324 */
303 end_pfn = e820_end_of_ram(); 325 end_pfn = e820_end_of_ram();
326 /* update e820 for memory not covered by WB MTRRs */
327 mtrr_bp_init();
328 if (mtrr_trim_uncached_memory(end_pfn)) {
329 e820_register_active_regions(0, 0, -1UL);
330 end_pfn = e820_end_of_ram();
331 }
332
304 num_physpages = end_pfn; 333 num_physpages = end_pfn;
305 334
306 check_efer(); 335 check_efer();
307 336
308 discover_ebda();
309
310 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); 337 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
338 if (efi_enabled)
339 efi_init();
311 340
312 dmi_scan_machine(); 341 dmi_scan_machine();
313 342
343 io_delay_init();
344
314#ifdef CONFIG_SMP 345#ifdef CONFIG_SMP
315 /* setup to use the static apicid table during kernel startup */ 346 /* setup to use the early static init tables during kernel startup */
316 x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init; 347 x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init;
348 x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init;
349#ifdef CONFIG_NUMA
350 x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init;
351#endif
317#endif 352#endif
318 353
319#ifdef CONFIG_ACPI 354#ifdef CONFIG_ACPI
@@ -340,48 +375,26 @@ void __init setup_arch(char **cmdline_p)
340#endif 375#endif
341 376
342#ifdef CONFIG_NUMA 377#ifdef CONFIG_NUMA
343 numa_initmem_init(0, end_pfn); 378 numa_initmem_init(0, end_pfn);
344#else 379#else
345 contig_initmem_init(0, end_pfn); 380 contig_initmem_init(0, end_pfn);
346#endif 381#endif
347 382
348 /* Reserve direct mapping */ 383 early_res_to_bootmem();
349 reserve_bootmem_generic(table_start << PAGE_SHIFT,
350 (table_end - table_start) << PAGE_SHIFT);
351
352 /* reserve kernel */
353 reserve_bootmem_generic(__pa_symbol(&_text),
354 __pa_symbol(&_end) - __pa_symbol(&_text));
355 384
385#ifdef CONFIG_ACPI_SLEEP
356 /* 386 /*
357 * reserve physical page 0 - it's a special BIOS page on many boxes, 387 * Reserve low memory region for sleep support.
358 * enabling clean reboots, SMP operation, laptop functions.
359 */ 388 */
360 reserve_bootmem_generic(0, PAGE_SIZE); 389 acpi_reserve_bootmem();
361
362 /* reserve ebda region */
363 if (ebda_addr)
364 reserve_bootmem_generic(ebda_addr, ebda_size);
365#ifdef CONFIG_NUMA
366 /* reserve nodemap region */
367 if (nodemap_addr)
368 reserve_bootmem_generic(nodemap_addr, nodemap_size);
369#endif 390#endif
370 391
371#ifdef CONFIG_SMP 392 if (efi_enabled)
372 /* Reserve SMP trampoline */ 393 efi_reserve_bootmem();
373 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
374#endif
375 394
376#ifdef CONFIG_ACPI_SLEEP
377 /* 395 /*
378 * Reserve low memory region for sleep support. 396 * Find and reserve possible boot-time SMP configuration:
379 */ 397 */
380 acpi_reserve_bootmem();
381#endif
382 /*
383 * Find and reserve possible boot-time SMP configuration:
384 */
385 find_smp_config(); 398 find_smp_config();
386#ifdef CONFIG_BLK_DEV_INITRD 399#ifdef CONFIG_BLK_DEV_INITRD
387 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { 400 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
@@ -395,6 +408,8 @@ void __init setup_arch(char **cmdline_p)
395 initrd_start = ramdisk_image + PAGE_OFFSET; 408 initrd_start = ramdisk_image + PAGE_OFFSET;
396 initrd_end = initrd_start+ramdisk_size; 409 initrd_end = initrd_start+ramdisk_size;
397 } else { 410 } else {
411 /* Assumes everything on node 0 */
412 free_bootmem(ramdisk_image, ramdisk_size);
398 printk(KERN_ERR "initrd extends beyond end of memory " 413 printk(KERN_ERR "initrd extends beyond end of memory "
399 "(0x%08lx > 0x%08lx)\ndisabling initrd\n", 414 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
400 ramdisk_end, end_of_mem); 415 ramdisk_end, end_of_mem);
@@ -404,17 +419,10 @@ void __init setup_arch(char **cmdline_p)
404#endif 419#endif
405 reserve_crashkernel(); 420 reserve_crashkernel();
406 paging_init(); 421 paging_init();
422 map_vsyscall();
407 423
408#ifdef CONFIG_PCI
409 early_quirks(); 424 early_quirks();
410#endif
411 425
412 /*
413 * set this early, so we dont allocate cpu0
414 * if MADT list doesnt list BSP first
415 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
416 */
417 cpu_set(0, cpu_present_map);
418#ifdef CONFIG_ACPI 426#ifdef CONFIG_ACPI
419 /* 427 /*
420 * Read APIC and some other early information from ACPI tables. 428 * Read APIC and some other early information from ACPI tables.
@@ -430,25 +438,24 @@ void __init setup_arch(char **cmdline_p)
430 if (smp_found_config) 438 if (smp_found_config)
431 get_smp_config(); 439 get_smp_config();
432 init_apic_mappings(); 440 init_apic_mappings();
441 ioapic_init_mappings();
433 442
434 /* 443 /*
435 * We trust e820 completely. No explicit ROM probing in memory. 444 * We trust e820 completely. No explicit ROM probing in memory.
436 */ 445 */
437 e820_reserve_resources(); 446 e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
438 e820_mark_nosave_regions(); 447 e820_mark_nosave_regions();
439 448
440 {
441 unsigned i;
442 /* request I/O space for devices used on all i[345]86 PCs */ 449 /* request I/O space for devices used on all i[345]86 PCs */
443 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) 450 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
444 request_resource(&ioport_resource, &standard_io_resources[i]); 451 request_resource(&ioport_resource, &standard_io_resources[i]);
445 }
446 452
447 e820_setup_gap(); 453 e820_setup_gap();
448 454
449#ifdef CONFIG_VT 455#ifdef CONFIG_VT
450#if defined(CONFIG_VGA_CONSOLE) 456#if defined(CONFIG_VGA_CONSOLE)
451 conswitchp = &vga_con; 457 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
458 conswitchp = &vga_con;
452#elif defined(CONFIG_DUMMY_CONSOLE) 459#elif defined(CONFIG_DUMMY_CONSOLE)
453 conswitchp = &dummy_con; 460 conswitchp = &dummy_con;
454#endif 461#endif
@@ -479,9 +486,10 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
479 486
480 if (n >= 0x80000005) { 487 if (n >= 0x80000005) {
481 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); 488 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
482 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", 489 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
483 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); 490 "D cache %dK (%d bytes/line)\n",
484 c->x86_cache_size=(ecx>>24)+(edx>>24); 491 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
492 c->x86_cache_size = (ecx>>24) + (edx>>24);
485 /* On K8 L1 TLB is inclusive, so don't count it */ 493 /* On K8 L1 TLB is inclusive, so don't count it */
486 c->x86_tlbsize = 0; 494 c->x86_tlbsize = 0;
487 } 495 }
@@ -495,11 +503,8 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
495 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", 503 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
496 c->x86_cache_size, ecx & 0xFF); 504 c->x86_cache_size, ecx & 0xFF);
497 } 505 }
498
499 if (n >= 0x80000007)
500 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
501 if (n >= 0x80000008) { 506 if (n >= 0x80000008) {
502 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); 507 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
503 c->x86_virt_bits = (eax >> 8) & 0xff; 508 c->x86_virt_bits = (eax >> 8) & 0xff;
504 c->x86_phys_bits = eax & 0xff; 509 c->x86_phys_bits = eax & 0xff;
505 } 510 }
@@ -508,14 +513,15 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
508#ifdef CONFIG_NUMA 513#ifdef CONFIG_NUMA
509static int nearby_node(int apicid) 514static int nearby_node(int apicid)
510{ 515{
511 int i; 516 int i, node;
517
512 for (i = apicid - 1; i >= 0; i--) { 518 for (i = apicid - 1; i >= 0; i--) {
513 int node = apicid_to_node[i]; 519 node = apicid_to_node[i];
514 if (node != NUMA_NO_NODE && node_online(node)) 520 if (node != NUMA_NO_NODE && node_online(node))
515 return node; 521 return node;
516 } 522 }
517 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { 523 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
518 int node = apicid_to_node[i]; 524 node = apicid_to_node[i];
519 if (node != NUMA_NO_NODE && node_online(node)) 525 if (node != NUMA_NO_NODE && node_online(node))
520 return node; 526 return node;
521 } 527 }
@@ -527,7 +533,7 @@ static int nearby_node(int apicid)
527 * On a AMD dual core setup the lower bits of the APIC id distingush the cores. 533 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
528 * Assumes number of cores is a power of two. 534 * Assumes number of cores is a power of two.
529 */ 535 */
530static void __init amd_detect_cmp(struct cpuinfo_x86 *c) 536static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
531{ 537{
532#ifdef CONFIG_SMP 538#ifdef CONFIG_SMP
533 unsigned bits; 539 unsigned bits;
@@ -536,7 +542,54 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
536 int node = 0; 542 int node = 0;
537 unsigned apicid = hard_smp_processor_id(); 543 unsigned apicid = hard_smp_processor_id();
538#endif 544#endif
539 unsigned ecx = cpuid_ecx(0x80000008); 545 bits = c->x86_coreid_bits;
546
547 /* Low order bits define the core id (index of core in socket) */
548 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
549 /* Convert the APIC ID into the socket ID */
550 c->phys_proc_id = phys_pkg_id(bits);
551
552#ifdef CONFIG_NUMA
553 node = c->phys_proc_id;
554 if (apicid_to_node[apicid] != NUMA_NO_NODE)
555 node = apicid_to_node[apicid];
556 if (!node_online(node)) {
557 /* Two possibilities here:
558 - The CPU is missing memory and no node was created.
559 In that case try picking one from a nearby CPU
560 - The APIC IDs differ from the HyperTransport node IDs
561 which the K8 northbridge parsing fills in.
562 Assume they are all increased by a constant offset,
563 but in the same order as the HT nodeids.
564 If that doesn't result in a usable node fall back to the
565 path for the previous case. */
566
567 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
568
569 if (ht_nodeid >= 0 &&
570 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
571 node = apicid_to_node[ht_nodeid];
572 /* Pick a nearby node */
573 if (!node_online(node))
574 node = nearby_node(apicid);
575 }
576 numa_set_node(cpu, node);
577
578 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
579#endif
580#endif
581}
582
583static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
584{
585#ifdef CONFIG_SMP
586 unsigned bits, ecx;
587
588 /* Multi core CPU? */
589 if (c->extended_cpuid_level < 0x80000008)
590 return;
591
592 ecx = cpuid_ecx(0x80000008);
540 593
541 c->x86_max_cores = (ecx & 0xff) + 1; 594 c->x86_max_cores = (ecx & 0xff) + 1;
542 595
@@ -549,37 +602,8 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
549 bits++; 602 bits++;
550 } 603 }
551 604
552 /* Low order bits define the core id (index of core in socket) */ 605 c->x86_coreid_bits = bits;
553 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
554 /* Convert the APIC ID into the socket ID */
555 c->phys_proc_id = phys_pkg_id(bits);
556
557#ifdef CONFIG_NUMA
558 node = c->phys_proc_id;
559 if (apicid_to_node[apicid] != NUMA_NO_NODE)
560 node = apicid_to_node[apicid];
561 if (!node_online(node)) {
562 /* Two possibilities here:
563 - The CPU is missing memory and no node was created.
564 In that case try picking one from a nearby CPU
565 - The APIC IDs differ from the HyperTransport node IDs
566 which the K8 northbridge parsing fills in.
567 Assume they are all increased by a constant offset,
568 but in the same order as the HT nodeids.
569 If that doesn't result in a usable node fall back to the
570 path for the previous case. */
571 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
572 if (ht_nodeid >= 0 &&
573 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
574 node = apicid_to_node[ht_nodeid];
575 /* Pick a nearby node */
576 if (!node_online(node))
577 node = nearby_node(apicid);
578 }
579 numa_set_node(cpu, node);
580 606
581 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
582#endif
583#endif 607#endif
584} 608}
585 609
@@ -595,8 +619,8 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
595/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ 619/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
596static __cpuinit int amd_apic_timer_broken(void) 620static __cpuinit int amd_apic_timer_broken(void)
597{ 621{
598 u32 lo, hi; 622 u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
599 u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); 623
600 switch (eax & CPUID_XFAM) { 624 switch (eax & CPUID_XFAM) {
601 case CPUID_XFAM_K8: 625 case CPUID_XFAM_K8:
602 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F) 626 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
@@ -614,6 +638,15 @@ static __cpuinit int amd_apic_timer_broken(void)
614 return 0; 638 return 0;
615} 639}
616 640
641static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
642{
643 early_init_amd_mc(c);
644
645 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
646 if (c->x86_power & (1<<8))
647 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
648}
649
617static void __cpuinit init_amd(struct cpuinfo_x86 *c) 650static void __cpuinit init_amd(struct cpuinfo_x86 *c)
618{ 651{
619 unsigned level; 652 unsigned level;
@@ -624,7 +657,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
624 /* 657 /*
625 * Disable TLB flush filter by setting HWCR.FFDIS on K8 658 * Disable TLB flush filter by setting HWCR.FFDIS on K8
626 * bit 6 of msr C001_0015 659 * bit 6 of msr C001_0015
627 * 660 *
628 * Errata 63 for SH-B3 steppings 661 * Errata 63 for SH-B3 steppings
629 * Errata 122 for all steppings (F+ have it disabled by default) 662 * Errata 122 for all steppings (F+ have it disabled by default)
630 */ 663 */
@@ -637,35 +670,32 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
637 670
638 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; 671 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
639 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ 672 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
640 clear_bit(0*32+31, &c->x86_capability); 673 clear_bit(0*32+31, (unsigned long *)&c->x86_capability);
641 674
642 /* On C+ stepping K8 rep microcode works well for copy/memset */ 675 /* On C+ stepping K8 rep microcode works well for copy/memset */
643 level = cpuid_eax(1); 676 level = cpuid_eax(1);
644 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)) 677 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
645 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); 678 level >= 0x0f58))
679 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
646 if (c->x86 == 0x10 || c->x86 == 0x11) 680 if (c->x86 == 0x10 || c->x86 == 0x11)
647 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); 681 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
648 682
649 /* Enable workaround for FXSAVE leak */ 683 /* Enable workaround for FXSAVE leak */
650 if (c->x86 >= 6) 684 if (c->x86 >= 6)
651 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability); 685 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
652 686
653 level = get_model_name(c); 687 level = get_model_name(c);
654 if (!level) { 688 if (!level) {
655 switch (c->x86) { 689 switch (c->x86) {
656 case 15: 690 case 15:
657 /* Should distinguish Models here, but this is only 691 /* Should distinguish Models here, but this is only
658 a fallback anyways. */ 692 a fallback anyways. */
659 strcpy(c->x86_model_id, "Hammer"); 693 strcpy(c->x86_model_id, "Hammer");
660 break; 694 break;
661 } 695 }
662 } 696 }
663 display_cacheinfo(c); 697 display_cacheinfo(c);
664 698
665 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
666 if (c->x86_power & (1<<8))
667 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
668
669 /* Multi core CPU? */ 699 /* Multi core CPU? */
670 if (c->extended_cpuid_level >= 0x80000008) 700 if (c->extended_cpuid_level >= 0x80000008)
671 amd_detect_cmp(c); 701 amd_detect_cmp(c);
@@ -677,41 +707,38 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
677 num_cache_leaves = 3; 707 num_cache_leaves = 3;
678 708
679 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11) 709 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
680 set_bit(X86_FEATURE_K8, &c->x86_capability); 710 set_cpu_cap(c, X86_FEATURE_K8);
681
682 /* RDTSC can be speculated around */
683 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
684 711
685 /* Family 10 doesn't support C states in MWAIT so don't use it */ 712 /* MFENCE stops RDTSC speculation */
686 if (c->x86 == 0x10 && !force_mwait) 713 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
687 clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
688 714
689 if (amd_apic_timer_broken()) 715 if (amd_apic_timer_broken())
690 disable_apic_timer = 1; 716 disable_apic_timer = 1;
691} 717}
692 718
693static void __cpuinit detect_ht(struct cpuinfo_x86 *c) 719void __cpuinit detect_ht(struct cpuinfo_x86 *c)
694{ 720{
695#ifdef CONFIG_SMP 721#ifdef CONFIG_SMP
696 u32 eax, ebx, ecx, edx; 722 u32 eax, ebx, ecx, edx;
697 int index_msb, core_bits; 723 int index_msb, core_bits;
698 724
699 cpuid(1, &eax, &ebx, &ecx, &edx); 725 cpuid(1, &eax, &ebx, &ecx, &edx);
700 726
701 727
702 if (!cpu_has(c, X86_FEATURE_HT)) 728 if (!cpu_has(c, X86_FEATURE_HT))
703 return; 729 return;
704 if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) 730 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
705 goto out; 731 goto out;
706 732
707 smp_num_siblings = (ebx & 0xff0000) >> 16; 733 smp_num_siblings = (ebx & 0xff0000) >> 16;
708 734
709 if (smp_num_siblings == 1) { 735 if (smp_num_siblings == 1) {
710 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 736 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
711 } else if (smp_num_siblings > 1 ) { 737 } else if (smp_num_siblings > 1) {
712 738
713 if (smp_num_siblings > NR_CPUS) { 739 if (smp_num_siblings > NR_CPUS) {
714 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); 740 printk(KERN_WARNING "CPU: Unsupported number of "
741 "siblings %d", smp_num_siblings);
715 smp_num_siblings = 1; 742 smp_num_siblings = 1;
716 return; 743 return;
717 } 744 }
@@ -721,7 +748,7 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
721 748
722 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 749 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
723 750
724 index_msb = get_count_order(smp_num_siblings) ; 751 index_msb = get_count_order(smp_num_siblings);
725 752
726 core_bits = get_count_order(c->x86_max_cores); 753 core_bits = get_count_order(c->x86_max_cores);
727 754
@@ -730,8 +757,10 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
730 } 757 }
731out: 758out:
732 if ((c->x86_max_cores * smp_num_siblings) > 1) { 759 if ((c->x86_max_cores * smp_num_siblings) > 1) {
733 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id); 760 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
734 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id); 761 c->phys_proc_id);
762 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
763 c->cpu_core_id);
735 } 764 }
736 765
737#endif 766#endif
@@ -773,28 +802,39 @@ static void srat_detect_node(void)
773#endif 802#endif
774} 803}
775 804
805static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
806{
807 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
808 (c->x86 == 0x6 && c->x86_model >= 0x0e))
809 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
810}
811
776static void __cpuinit init_intel(struct cpuinfo_x86 *c) 812static void __cpuinit init_intel(struct cpuinfo_x86 *c)
777{ 813{
778 /* Cache sizes */ 814 /* Cache sizes */
779 unsigned n; 815 unsigned n;
780 816
781 init_intel_cacheinfo(c); 817 init_intel_cacheinfo(c);
782 if (c->cpuid_level > 9 ) { 818 if (c->cpuid_level > 9) {
783 unsigned eax = cpuid_eax(10); 819 unsigned eax = cpuid_eax(10);
784 /* Check for version and the number of counters */ 820 /* Check for version and the number of counters */
785 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) 821 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
786 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability); 822 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
787 } 823 }
788 824
789 if (cpu_has_ds) { 825 if (cpu_has_ds) {
790 unsigned int l1, l2; 826 unsigned int l1, l2;
791 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 827 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
792 if (!(l1 & (1<<11))) 828 if (!(l1 & (1<<11)))
793 set_bit(X86_FEATURE_BTS, c->x86_capability); 829 set_cpu_cap(c, X86_FEATURE_BTS);
794 if (!(l1 & (1<<12))) 830 if (!(l1 & (1<<12)))
795 set_bit(X86_FEATURE_PEBS, c->x86_capability); 831 set_cpu_cap(c, X86_FEATURE_PEBS);
796 } 832 }
797 833
834
835 if (cpu_has_bts)
836 ds_init_intel(c);
837
798 n = c->extended_cpuid_level; 838 n = c->extended_cpuid_level;
799 if (n >= 0x80000008) { 839 if (n >= 0x80000008) {
800 unsigned eax = cpuid_eax(0x80000008); 840 unsigned eax = cpuid_eax(0x80000008);
@@ -811,14 +851,11 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
811 c->x86_cache_alignment = c->x86_clflush_size * 2; 851 c->x86_cache_alignment = c->x86_clflush_size * 2;
812 if ((c->x86 == 0xf && c->x86_model >= 0x03) || 852 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
813 (c->x86 == 0x6 && c->x86_model >= 0x0e)) 853 (c->x86 == 0x6 && c->x86_model >= 0x0e))
814 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); 854 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
815 if (c->x86 == 6) 855 if (c->x86 == 6)
816 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); 856 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
817 if (c->x86 == 15) 857 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
818 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability); 858 c->x86_max_cores = intel_num_cpu_cores(c);
819 else
820 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
821 c->x86_max_cores = intel_num_cpu_cores(c);
822 859
823 srat_detect_node(); 860 srat_detect_node();
824} 861}
@@ -835,18 +872,12 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
835 c->x86_vendor = X86_VENDOR_UNKNOWN; 872 c->x86_vendor = X86_VENDOR_UNKNOWN;
836} 873}
837 874
838struct cpu_model_info {
839 int vendor;
840 int family;
841 char *model_names[16];
842};
843
844/* Do some early cpuid on the boot CPU to get some parameter that are 875/* Do some early cpuid on the boot CPU to get some parameter that are
845 needed before check_bugs. Everything advanced is in identify_cpu 876 needed before check_bugs. Everything advanced is in identify_cpu
846 below. */ 877 below. */
847void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) 878static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
848{ 879{
849 u32 tfms; 880 u32 tfms, xlvl;
850 881
851 c->loops_per_jiffy = loops_per_jiffy; 882 c->loops_per_jiffy = loops_per_jiffy;
852 c->x86_cache_size = -1; 883 c->x86_cache_size = -1;
@@ -857,6 +888,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
857 c->x86_clflush_size = 64; 888 c->x86_clflush_size = 64;
858 c->x86_cache_alignment = c->x86_clflush_size; 889 c->x86_cache_alignment = c->x86_clflush_size;
859 c->x86_max_cores = 1; 890 c->x86_max_cores = 1;
891 c->x86_coreid_bits = 0;
860 c->extended_cpuid_level = 0; 892 c->extended_cpuid_level = 0;
861 memset(&c->x86_capability, 0, sizeof c->x86_capability); 893 memset(&c->x86_capability, 0, sizeof c->x86_capability);
862 894
@@ -865,7 +897,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
865 (unsigned int *)&c->x86_vendor_id[0], 897 (unsigned int *)&c->x86_vendor_id[0],
866 (unsigned int *)&c->x86_vendor_id[8], 898 (unsigned int *)&c->x86_vendor_id[8],
867 (unsigned int *)&c->x86_vendor_id[4]); 899 (unsigned int *)&c->x86_vendor_id[4]);
868 900
869 get_cpu_vendor(c); 901 get_cpu_vendor(c);
870 902
871 /* Initialize the standard set of capabilities */ 903 /* Initialize the standard set of capabilities */
@@ -883,7 +915,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
883 c->x86 += (tfms >> 20) & 0xff; 915 c->x86 += (tfms >> 20) & 0xff;
884 if (c->x86 >= 0x6) 916 if (c->x86 >= 0x6)
885 c->x86_model += ((tfms >> 16) & 0xF) << 4; 917 c->x86_model += ((tfms >> 16) & 0xF) << 4;
886 if (c->x86_capability[0] & (1<<19)) 918 if (c->x86_capability[0] & (1<<19))
887 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 919 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
888 } else { 920 } else {
889 /* Have CPUID level 0 only - unheard of */ 921 /* Have CPUID level 0 only - unheard of */
@@ -893,18 +925,6 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
893#ifdef CONFIG_SMP 925#ifdef CONFIG_SMP
894 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; 926 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
895#endif 927#endif
896}
897
898/*
899 * This does the hard work of actually picking apart the CPU stuff...
900 */
901void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
902{
903 int i;
904 u32 xlvl;
905
906 early_identify_cpu(c);
907
908 /* AMD-defined flags: level 0x80000001 */ 928 /* AMD-defined flags: level 0x80000001 */
909 xlvl = cpuid_eax(0x80000000); 929 xlvl = cpuid_eax(0x80000000);
910 c->extended_cpuid_level = xlvl; 930 c->extended_cpuid_level = xlvl;
@@ -925,6 +945,30 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
925 c->x86_capability[2] = cpuid_edx(0x80860001); 945 c->x86_capability[2] = cpuid_edx(0x80860001);
926 } 946 }
927 947
948 c->extended_cpuid_level = cpuid_eax(0x80000000);
949 if (c->extended_cpuid_level >= 0x80000007)
950 c->x86_power = cpuid_edx(0x80000007);
951
952 switch (c->x86_vendor) {
953 case X86_VENDOR_AMD:
954 early_init_amd(c);
955 break;
956 case X86_VENDOR_INTEL:
957 early_init_intel(c);
958 break;
959 }
960
961}
962
963/*
964 * This does the hard work of actually picking apart the CPU stuff...
965 */
966void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
967{
968 int i;
969
970 early_identify_cpu(c);
971
928 init_scattered_cpuid_features(c); 972 init_scattered_cpuid_features(c);
929 973
930 c->apicid = phys_pkg_id(0); 974 c->apicid = phys_pkg_id(0);
@@ -954,8 +998,7 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
954 break; 998 break;
955 } 999 }
956 1000
957 select_idle_routine(c); 1001 detect_ht(c);
958 detect_ht(c);
959 1002
960 /* 1003 /*
961 * On SMP, boot_cpu_data holds the common feature set between 1004 * On SMP, boot_cpu_data holds the common feature set between
@@ -965,32 +1008,56 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
965 */ 1008 */
966 if (c != &boot_cpu_data) { 1009 if (c != &boot_cpu_data) {
967 /* AND the already accumulated flags with these */ 1010 /* AND the already accumulated flags with these */
968 for (i = 0 ; i < NCAPINTS ; i++) 1011 for (i = 0; i < NCAPINTS; i++)
969 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 1012 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
970 } 1013 }
971 1014
1015 /* Clear all flags overriden by options */
1016 for (i = 0; i < NCAPINTS; i++)
1017 c->x86_capability[i] ^= cleared_cpu_caps[i];
1018
972#ifdef CONFIG_X86_MCE 1019#ifdef CONFIG_X86_MCE
973 mcheck_init(c); 1020 mcheck_init(c);
974#endif 1021#endif
1022 select_idle_routine(c);
1023
975 if (c != &boot_cpu_data) 1024 if (c != &boot_cpu_data)
976 mtrr_ap_init(); 1025 mtrr_ap_init();
977#ifdef CONFIG_NUMA 1026#ifdef CONFIG_NUMA
978 numa_add_cpu(smp_processor_id()); 1027 numa_add_cpu(smp_processor_id());
979#endif 1028#endif
1029
1030}
1031
1032static __init int setup_noclflush(char *arg)
1033{
1034 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
1035 return 1;
980} 1036}
981 1037__setup("noclflush", setup_noclflush);
982 1038
983void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 1039void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
984{ 1040{
985 if (c->x86_model_id[0]) 1041 if (c->x86_model_id[0])
986 printk("%s", c->x86_model_id); 1042 printk(KERN_INFO "%s", c->x86_model_id);
987 1043
988 if (c->x86_mask || c->cpuid_level >= 0) 1044 if (c->x86_mask || c->cpuid_level >= 0)
989 printk(" stepping %02x\n", c->x86_mask); 1045 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
990 else 1046 else
991 printk("\n"); 1047 printk(KERN_CONT "\n");
992} 1048}
993 1049
1050static __init int setup_disablecpuid(char *arg)
1051{
1052 int bit;
1053 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
1054 setup_clear_cpu_cap(bit);
1055 else
1056 return 0;
1057 return 1;
1058}
1059__setup("clearcpuid=", setup_disablecpuid);
1060
994/* 1061/*
995 * Get CPU information for use by the procfs. 1062 * Get CPU information for use by the procfs.
996 */ 1063 */
@@ -998,9 +1065,9 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
998static int show_cpuinfo(struct seq_file *m, void *v) 1065static int show_cpuinfo(struct seq_file *m, void *v)
999{ 1066{
1000 struct cpuinfo_x86 *c = v; 1067 struct cpuinfo_x86 *c = v;
1001 int cpu = 0; 1068 int cpu = 0, i;
1002 1069
1003 /* 1070 /*
1004 * These flag bits must match the definitions in <asm/cpufeature.h>. 1071 * These flag bits must match the definitions in <asm/cpufeature.h>.
1005 * NULL means this bit is undefined or reserved; either way it doesn't 1072 * NULL means this bit is undefined or reserved; either way it doesn't
1006 * have meaning as far as Linux is concerned. Note that it's important 1073 * have meaning as far as Linux is concerned. Note that it's important
@@ -1010,10 +1077,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1010 */ 1077 */
1011 static const char *const x86_cap_flags[] = { 1078 static const char *const x86_cap_flags[] = {
1012 /* Intel-defined */ 1079 /* Intel-defined */
1013 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", 1080 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1014 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", 1081 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1015 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx", 1082 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1016 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe", 1083 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1017 1084
1018 /* AMD-defined */ 1085 /* AMD-defined */
1019 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1086 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
@@ -1080,34 +1147,35 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1080 cpu = c->cpu_index; 1147 cpu = c->cpu_index;
1081#endif 1148#endif
1082 1149
1083 seq_printf(m,"processor\t: %u\n" 1150 seq_printf(m, "processor\t: %u\n"
1084 "vendor_id\t: %s\n" 1151 "vendor_id\t: %s\n"
1085 "cpu family\t: %d\n" 1152 "cpu family\t: %d\n"
1086 "model\t\t: %d\n" 1153 "model\t\t: %d\n"
1087 "model name\t: %s\n", 1154 "model name\t: %s\n",
1088 (unsigned)cpu, 1155 (unsigned)cpu,
1089 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", 1156 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1090 c->x86, 1157 c->x86,
1091 (int)c->x86_model, 1158 (int)c->x86_model,
1092 c->x86_model_id[0] ? c->x86_model_id : "unknown"); 1159 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1093 1160
1094 if (c->x86_mask || c->cpuid_level >= 0) 1161 if (c->x86_mask || c->cpuid_level >= 0)
1095 seq_printf(m, "stepping\t: %d\n", c->x86_mask); 1162 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1096 else 1163 else
1097 seq_printf(m, "stepping\t: unknown\n"); 1164 seq_printf(m, "stepping\t: unknown\n");
1098 1165
1099 if (cpu_has(c,X86_FEATURE_TSC)) { 1166 if (cpu_has(c, X86_FEATURE_TSC)) {
1100 unsigned int freq = cpufreq_quick_get((unsigned)cpu); 1167 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
1168
1101 if (!freq) 1169 if (!freq)
1102 freq = cpu_khz; 1170 freq = cpu_khz;
1103 seq_printf(m, "cpu MHz\t\t: %u.%03u\n", 1171 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1104 freq / 1000, (freq % 1000)); 1172 freq / 1000, (freq % 1000));
1105 } 1173 }
1106 1174
1107 /* Cache size */ 1175 /* Cache size */
1108 if (c->x86_cache_size >= 0) 1176 if (c->x86_cache_size >= 0)
1109 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); 1177 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1110 1178
1111#ifdef CONFIG_SMP 1179#ifdef CONFIG_SMP
1112 if (smp_num_siblings * c->x86_max_cores > 1) { 1180 if (smp_num_siblings * c->x86_max_cores > 1) {
1113 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 1181 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
@@ -1116,48 +1184,43 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1116 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); 1184 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1117 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 1185 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1118 } 1186 }
1119#endif 1187#endif
1120 1188
1121 seq_printf(m, 1189 seq_printf(m,
1122 "fpu\t\t: yes\n" 1190 "fpu\t\t: yes\n"
1123 "fpu_exception\t: yes\n" 1191 "fpu_exception\t: yes\n"
1124 "cpuid level\t: %d\n" 1192 "cpuid level\t: %d\n"
1125 "wp\t\t: yes\n" 1193 "wp\t\t: yes\n"
1126 "flags\t\t:", 1194 "flags\t\t:",
1127 c->cpuid_level); 1195 c->cpuid_level);
1128 1196
1129 { 1197 for (i = 0; i < 32*NCAPINTS; i++)
1130 int i; 1198 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1131 for ( i = 0 ; i < 32*NCAPINTS ; i++ ) 1199 seq_printf(m, " %s", x86_cap_flags[i]);
1132 if (cpu_has(c, i) && x86_cap_flags[i] != NULL) 1200
1133 seq_printf(m, " %s", x86_cap_flags[i]);
1134 }
1135
1136 seq_printf(m, "\nbogomips\t: %lu.%02lu\n", 1201 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1137 c->loops_per_jiffy/(500000/HZ), 1202 c->loops_per_jiffy/(500000/HZ),
1138 (c->loops_per_jiffy/(5000/HZ)) % 100); 1203 (c->loops_per_jiffy/(5000/HZ)) % 100);
1139 1204
1140 if (c->x86_tlbsize > 0) 1205 if (c->x86_tlbsize > 0)
1141 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); 1206 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1142 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size); 1207 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1143 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); 1208 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1144 1209
1145 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", 1210 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1146 c->x86_phys_bits, c->x86_virt_bits); 1211 c->x86_phys_bits, c->x86_virt_bits);
1147 1212
1148 seq_printf(m, "power management:"); 1213 seq_printf(m, "power management:");
1149 { 1214 for (i = 0; i < 32; i++) {
1150 unsigned i; 1215 if (c->x86_power & (1 << i)) {
1151 for (i = 0; i < 32; i++) 1216 if (i < ARRAY_SIZE(x86_power_flags) &&
1152 if (c->x86_power & (1 << i)) { 1217 x86_power_flags[i])
1153 if (i < ARRAY_SIZE(x86_power_flags) && 1218 seq_printf(m, "%s%s",
1154 x86_power_flags[i]) 1219 x86_power_flags[i][0]?" ":"",
1155 seq_printf(m, "%s%s", 1220 x86_power_flags[i]);
1156 x86_power_flags[i][0]?" ":"", 1221 else
1157 x86_power_flags[i]); 1222 seq_printf(m, " [%d]", i);
1158 else 1223 }
1159 seq_printf(m, " [%d]", i);
1160 }
1161 } 1224 }
1162 1225
1163 seq_printf(m, "\n\n"); 1226 seq_printf(m, "\n\n");
@@ -1184,8 +1247,8 @@ static void c_stop(struct seq_file *m, void *v)
1184{ 1247{
1185} 1248}
1186 1249
1187struct seq_operations cpuinfo_op = { 1250const struct seq_operations cpuinfo_op = {
1188 .start =c_start, 1251 .start = c_start,
1189 .next = c_next, 1252 .next = c_next,
1190 .stop = c_stop, 1253 .stop = c_stop,
1191 .show = show_cpuinfo, 1254 .show = show_cpuinfo,