diff options
Diffstat (limited to 'arch/x86/kernel/setup_64.c')
-rw-r--r-- | arch/x86/kernel/setup_64.c | 624 |
1 files changed, 306 insertions, 318 deletions
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 30d94d1d5f5f..c8939dfddfba 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/crash_dump.h> | 30 | #include <linux/crash_dump.h> |
31 | #include <linux/root_dev.h> | 31 | #include <linux/root_dev.h> |
32 | #include <linux/pci.h> | 32 | #include <linux/pci.h> |
33 | #include <linux/efi.h> | ||
33 | #include <linux/acpi.h> | 34 | #include <linux/acpi.h> |
34 | #include <linux/kallsyms.h> | 35 | #include <linux/kallsyms.h> |
35 | #include <linux/edd.h> | 36 | #include <linux/edd.h> |
@@ -39,10 +40,13 @@ | |||
39 | #include <linux/dmi.h> | 40 | #include <linux/dmi.h> |
40 | #include <linux/dma-mapping.h> | 41 | #include <linux/dma-mapping.h> |
41 | #include <linux/ctype.h> | 42 | #include <linux/ctype.h> |
43 | #include <linux/uaccess.h> | ||
44 | #include <linux/init_ohci1394_dma.h> | ||
42 | 45 | ||
43 | #include <asm/mtrr.h> | 46 | #include <asm/mtrr.h> |
44 | #include <asm/uaccess.h> | 47 | #include <asm/uaccess.h> |
45 | #include <asm/system.h> | 48 | #include <asm/system.h> |
49 | #include <asm/vsyscall.h> | ||
46 | #include <asm/io.h> | 50 | #include <asm/io.h> |
47 | #include <asm/smp.h> | 51 | #include <asm/smp.h> |
48 | #include <asm/msr.h> | 52 | #include <asm/msr.h> |
@@ -50,6 +54,7 @@ | |||
50 | #include <video/edid.h> | 54 | #include <video/edid.h> |
51 | #include <asm/e820.h> | 55 | #include <asm/e820.h> |
52 | #include <asm/dma.h> | 56 | #include <asm/dma.h> |
57 | #include <asm/gart.h> | ||
53 | #include <asm/mpspec.h> | 58 | #include <asm/mpspec.h> |
54 | #include <asm/mmu_context.h> | 59 | #include <asm/mmu_context.h> |
55 | #include <asm/proto.h> | 60 | #include <asm/proto.h> |
@@ -59,6 +64,15 @@ | |||
59 | #include <asm/sections.h> | 64 | #include <asm/sections.h> |
60 | #include <asm/dmi.h> | 65 | #include <asm/dmi.h> |
61 | #include <asm/cacheflush.h> | 66 | #include <asm/cacheflush.h> |
67 | #include <asm/mce.h> | ||
68 | #include <asm/ds.h> | ||
69 | #include <asm/topology.h> | ||
70 | |||
71 | #ifdef CONFIG_PARAVIRT | ||
72 | #include <asm/paravirt.h> | ||
73 | #else | ||
74 | #define ARCH_SETUP | ||
75 | #endif | ||
62 | 76 | ||
63 | /* | 77 | /* |
64 | * Machine setup.. | 78 | * Machine setup.. |
@@ -67,6 +81,8 @@ | |||
67 | struct cpuinfo_x86 boot_cpu_data __read_mostly; | 81 | struct cpuinfo_x86 boot_cpu_data __read_mostly; |
68 | EXPORT_SYMBOL(boot_cpu_data); | 82 | EXPORT_SYMBOL(boot_cpu_data); |
69 | 83 | ||
84 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | ||
85 | |||
70 | unsigned long mmu_cr4_features; | 86 | unsigned long mmu_cr4_features; |
71 | 87 | ||
72 | /* Boot loader ID as an integer, for the benefit of proc_dointvec */ | 88 | /* Boot loader ID as an integer, for the benefit of proc_dointvec */ |
@@ -76,7 +92,7 @@ unsigned long saved_video_mode; | |||
76 | 92 | ||
77 | int force_mwait __cpuinitdata; | 93 | int force_mwait __cpuinitdata; |
78 | 94 | ||
79 | /* | 95 | /* |
80 | * Early DMI memory | 96 | * Early DMI memory |
81 | */ | 97 | */ |
82 | int dmi_alloc_index; | 98 | int dmi_alloc_index; |
@@ -122,25 +138,27 @@ struct resource standard_io_resources[] = { | |||
122 | 138 | ||
123 | #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM) | 139 | #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM) |
124 | 140 | ||
125 | struct resource data_resource = { | 141 | static struct resource data_resource = { |
126 | .name = "Kernel data", | 142 | .name = "Kernel data", |
127 | .start = 0, | 143 | .start = 0, |
128 | .end = 0, | 144 | .end = 0, |
129 | .flags = IORESOURCE_RAM, | 145 | .flags = IORESOURCE_RAM, |
130 | }; | 146 | }; |
131 | struct resource code_resource = { | 147 | static struct resource code_resource = { |
132 | .name = "Kernel code", | 148 | .name = "Kernel code", |
133 | .start = 0, | 149 | .start = 0, |
134 | .end = 0, | 150 | .end = 0, |
135 | .flags = IORESOURCE_RAM, | 151 | .flags = IORESOURCE_RAM, |
136 | }; | 152 | }; |
137 | struct resource bss_resource = { | 153 | static struct resource bss_resource = { |
138 | .name = "Kernel bss", | 154 | .name = "Kernel bss", |
139 | .start = 0, | 155 | .start = 0, |
140 | .end = 0, | 156 | .end = 0, |
141 | .flags = IORESOURCE_RAM, | 157 | .flags = IORESOURCE_RAM, |
142 | }; | 158 | }; |
143 | 159 | ||
160 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); | ||
161 | |||
144 | #ifdef CONFIG_PROC_VMCORE | 162 | #ifdef CONFIG_PROC_VMCORE |
145 | /* elfcorehdr= specifies the location of elf core header | 163 | /* elfcorehdr= specifies the location of elf core header |
146 | * stored by the crashed kernel. This option will be passed | 164 | * stored by the crashed kernel. This option will be passed |
@@ -164,14 +182,15 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn) | |||
164 | unsigned long bootmap_size, bootmap; | 182 | unsigned long bootmap_size, bootmap; |
165 | 183 | ||
166 | bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; | 184 | bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; |
167 | bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size); | 185 | bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size, |
186 | PAGE_SIZE); | ||
168 | if (bootmap == -1L) | 187 | if (bootmap == -1L) |
169 | panic("Cannot find bootmem map of size %ld\n",bootmap_size); | 188 | panic("Cannot find bootmem map of size %ld\n", bootmap_size); |
170 | bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn); | 189 | bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn); |
171 | e820_register_active_regions(0, start_pfn, end_pfn); | 190 | e820_register_active_regions(0, start_pfn, end_pfn); |
172 | free_bootmem_with_active_regions(0, end_pfn); | 191 | free_bootmem_with_active_regions(0, end_pfn); |
173 | reserve_bootmem(bootmap, bootmap_size); | 192 | reserve_bootmem(bootmap, bootmap_size); |
174 | } | 193 | } |
175 | #endif | 194 | #endif |
176 | 195 | ||
177 | #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) | 196 | #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) |
@@ -205,7 +224,8 @@ static void __init reserve_crashkernel(void) | |||
205 | unsigned long long crash_size, crash_base; | 224 | unsigned long long crash_size, crash_base; |
206 | int ret; | 225 | int ret; |
207 | 226 | ||
208 | free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT; | 227 | free_mem = |
228 | ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT; | ||
209 | 229 | ||
210 | ret = parse_crashkernel(boot_command_line, free_mem, | 230 | ret = parse_crashkernel(boot_command_line, free_mem, |
211 | &crash_size, &crash_base); | 231 | &crash_size, &crash_base); |
@@ -229,33 +249,21 @@ static inline void __init reserve_crashkernel(void) | |||
229 | {} | 249 | {} |
230 | #endif | 250 | #endif |
231 | 251 | ||
232 | #define EBDA_ADDR_POINTER 0x40E | 252 | /* Overridden in paravirt.c if CONFIG_PARAVIRT */ |
233 | 253 | void __attribute__((weak)) __init memory_setup(void) | |
234 | unsigned __initdata ebda_addr; | ||
235 | unsigned __initdata ebda_size; | ||
236 | |||
237 | static void discover_ebda(void) | ||
238 | { | 254 | { |
239 | /* | 255 | machine_specific_memory_setup(); |
240 | * there is a real-mode segmented pointer pointing to the | ||
241 | * 4K EBDA area at 0x40E | ||
242 | */ | ||
243 | ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER); | ||
244 | ebda_addr <<= 4; | ||
245 | |||
246 | ebda_size = *(unsigned short *)__va(ebda_addr); | ||
247 | |||
248 | /* Round EBDA up to pages */ | ||
249 | if (ebda_size == 0) | ||
250 | ebda_size = 1; | ||
251 | ebda_size <<= 10; | ||
252 | ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE); | ||
253 | if (ebda_size > 64*1024) | ||
254 | ebda_size = 64*1024; | ||
255 | } | 256 | } |
256 | 257 | ||
258 | /* | ||
259 | * setup_arch - architecture-specific boot-time initializations | ||
260 | * | ||
261 | * Note: On x86_64, fixmaps are ready for use even before this is called. | ||
262 | */ | ||
257 | void __init setup_arch(char **cmdline_p) | 263 | void __init setup_arch(char **cmdline_p) |
258 | { | 264 | { |
265 | unsigned i; | ||
266 | |||
259 | printk(KERN_INFO "Command line: %s\n", boot_command_line); | 267 | printk(KERN_INFO "Command line: %s\n", boot_command_line); |
260 | 268 | ||
261 | ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); | 269 | ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); |
@@ -269,7 +277,15 @@ void __init setup_arch(char **cmdline_p) | |||
269 | rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0); | 277 | rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0); |
270 | rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0); | 278 | rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0); |
271 | #endif | 279 | #endif |
272 | setup_memory_region(); | 280 | #ifdef CONFIG_EFI |
281 | if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, | ||
282 | "EL64", 4)) | ||
283 | efi_enabled = 1; | ||
284 | #endif | ||
285 | |||
286 | ARCH_SETUP | ||
287 | |||
288 | memory_setup(); | ||
273 | copy_edd(); | 289 | copy_edd(); |
274 | 290 | ||
275 | if (!boot_params.hdr.root_flags) | 291 | if (!boot_params.hdr.root_flags) |
@@ -293,27 +309,47 @@ void __init setup_arch(char **cmdline_p) | |||
293 | 309 | ||
294 | parse_early_param(); | 310 | parse_early_param(); |
295 | 311 | ||
312 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
313 | if (init_ohci1394_dma_early) | ||
314 | init_ohci1394_dma_on_all_controllers(); | ||
315 | #endif | ||
316 | |||
296 | finish_e820_parsing(); | 317 | finish_e820_parsing(); |
297 | 318 | ||
319 | early_gart_iommu_check(); | ||
320 | |||
298 | e820_register_active_regions(0, 0, -1UL); | 321 | e820_register_active_regions(0, 0, -1UL); |
299 | /* | 322 | /* |
300 | * partially used pages are not usable - thus | 323 | * partially used pages are not usable - thus |
301 | * we are rounding upwards: | 324 | * we are rounding upwards: |
302 | */ | 325 | */ |
303 | end_pfn = e820_end_of_ram(); | 326 | end_pfn = e820_end_of_ram(); |
327 | /* update e820 for memory not covered by WB MTRRs */ | ||
328 | mtrr_bp_init(); | ||
329 | if (mtrr_trim_uncached_memory(end_pfn)) { | ||
330 | e820_register_active_regions(0, 0, -1UL); | ||
331 | end_pfn = e820_end_of_ram(); | ||
332 | } | ||
333 | |||
304 | num_physpages = end_pfn; | 334 | num_physpages = end_pfn; |
305 | 335 | ||
306 | check_efer(); | 336 | check_efer(); |
307 | 337 | ||
308 | discover_ebda(); | ||
309 | |||
310 | init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); | 338 | init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); |
339 | if (efi_enabled) | ||
340 | efi_init(); | ||
311 | 341 | ||
312 | dmi_scan_machine(); | 342 | dmi_scan_machine(); |
313 | 343 | ||
344 | io_delay_init(); | ||
345 | |||
314 | #ifdef CONFIG_SMP | 346 | #ifdef CONFIG_SMP |
315 | /* setup to use the static apicid table during kernel startup */ | 347 | /* setup to use the early static init tables during kernel startup */ |
316 | x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init; | 348 | x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init; |
349 | x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init; | ||
350 | #ifdef CONFIG_NUMA | ||
351 | x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init; | ||
352 | #endif | ||
317 | #endif | 353 | #endif |
318 | 354 | ||
319 | #ifdef CONFIG_ACPI | 355 | #ifdef CONFIG_ACPI |
@@ -340,48 +376,26 @@ void __init setup_arch(char **cmdline_p) | |||
340 | #endif | 376 | #endif |
341 | 377 | ||
342 | #ifdef CONFIG_NUMA | 378 | #ifdef CONFIG_NUMA |
343 | numa_initmem_init(0, end_pfn); | 379 | numa_initmem_init(0, end_pfn); |
344 | #else | 380 | #else |
345 | contig_initmem_init(0, end_pfn); | 381 | contig_initmem_init(0, end_pfn); |
346 | #endif | 382 | #endif |
347 | 383 | ||
348 | /* Reserve direct mapping */ | 384 | early_res_to_bootmem(); |
349 | reserve_bootmem_generic(table_start << PAGE_SHIFT, | ||
350 | (table_end - table_start) << PAGE_SHIFT); | ||
351 | |||
352 | /* reserve kernel */ | ||
353 | reserve_bootmem_generic(__pa_symbol(&_text), | ||
354 | __pa_symbol(&_end) - __pa_symbol(&_text)); | ||
355 | 385 | ||
386 | #ifdef CONFIG_ACPI_SLEEP | ||
356 | /* | 387 | /* |
357 | * reserve physical page 0 - it's a special BIOS page on many boxes, | 388 | * Reserve low memory region for sleep support. |
358 | * enabling clean reboots, SMP operation, laptop functions. | ||
359 | */ | 389 | */ |
360 | reserve_bootmem_generic(0, PAGE_SIZE); | 390 | acpi_reserve_bootmem(); |
361 | |||
362 | /* reserve ebda region */ | ||
363 | if (ebda_addr) | ||
364 | reserve_bootmem_generic(ebda_addr, ebda_size); | ||
365 | #ifdef CONFIG_NUMA | ||
366 | /* reserve nodemap region */ | ||
367 | if (nodemap_addr) | ||
368 | reserve_bootmem_generic(nodemap_addr, nodemap_size); | ||
369 | #endif | 391 | #endif |
370 | 392 | ||
371 | #ifdef CONFIG_SMP | 393 | if (efi_enabled) |
372 | /* Reserve SMP trampoline */ | 394 | efi_reserve_bootmem(); |
373 | reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE); | ||
374 | #endif | ||
375 | 395 | ||
376 | #ifdef CONFIG_ACPI_SLEEP | ||
377 | /* | 396 | /* |
378 | * Reserve low memory region for sleep support. | 397 | * Find and reserve possible boot-time SMP configuration: |
379 | */ | 398 | */ |
380 | acpi_reserve_bootmem(); | ||
381 | #endif | ||
382 | /* | ||
383 | * Find and reserve possible boot-time SMP configuration: | ||
384 | */ | ||
385 | find_smp_config(); | 399 | find_smp_config(); |
386 | #ifdef CONFIG_BLK_DEV_INITRD | 400 | #ifdef CONFIG_BLK_DEV_INITRD |
387 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { | 401 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { |
@@ -395,6 +409,8 @@ void __init setup_arch(char **cmdline_p) | |||
395 | initrd_start = ramdisk_image + PAGE_OFFSET; | 409 | initrd_start = ramdisk_image + PAGE_OFFSET; |
396 | initrd_end = initrd_start+ramdisk_size; | 410 | initrd_end = initrd_start+ramdisk_size; |
397 | } else { | 411 | } else { |
412 | /* Assumes everything on node 0 */ | ||
413 | free_bootmem(ramdisk_image, ramdisk_size); | ||
398 | printk(KERN_ERR "initrd extends beyond end of memory " | 414 | printk(KERN_ERR "initrd extends beyond end of memory " |
399 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", | 415 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", |
400 | ramdisk_end, end_of_mem); | 416 | ramdisk_end, end_of_mem); |
@@ -404,17 +420,10 @@ void __init setup_arch(char **cmdline_p) | |||
404 | #endif | 420 | #endif |
405 | reserve_crashkernel(); | 421 | reserve_crashkernel(); |
406 | paging_init(); | 422 | paging_init(); |
423 | map_vsyscall(); | ||
407 | 424 | ||
408 | #ifdef CONFIG_PCI | ||
409 | early_quirks(); | 425 | early_quirks(); |
410 | #endif | ||
411 | 426 | ||
412 | /* | ||
413 | * set this early, so we dont allocate cpu0 | ||
414 | * if MADT list doesnt list BSP first | ||
415 | * mpparse.c/MP_processor_info() allocates logical cpu numbers. | ||
416 | */ | ||
417 | cpu_set(0, cpu_present_map); | ||
418 | #ifdef CONFIG_ACPI | 427 | #ifdef CONFIG_ACPI |
419 | /* | 428 | /* |
420 | * Read APIC and some other early information from ACPI tables. | 429 | * Read APIC and some other early information from ACPI tables. |
@@ -430,25 +439,24 @@ void __init setup_arch(char **cmdline_p) | |||
430 | if (smp_found_config) | 439 | if (smp_found_config) |
431 | get_smp_config(); | 440 | get_smp_config(); |
432 | init_apic_mappings(); | 441 | init_apic_mappings(); |
442 | ioapic_init_mappings(); | ||
433 | 443 | ||
434 | /* | 444 | /* |
435 | * We trust e820 completely. No explicit ROM probing in memory. | 445 | * We trust e820 completely. No explicit ROM probing in memory. |
436 | */ | 446 | */ |
437 | e820_reserve_resources(); | 447 | e820_reserve_resources(&code_resource, &data_resource, &bss_resource); |
438 | e820_mark_nosave_regions(); | 448 | e820_mark_nosave_regions(); |
439 | 449 | ||
440 | { | ||
441 | unsigned i; | ||
442 | /* request I/O space for devices used on all i[345]86 PCs */ | 450 | /* request I/O space for devices used on all i[345]86 PCs */ |
443 | for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) | 451 | for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) |
444 | request_resource(&ioport_resource, &standard_io_resources[i]); | 452 | request_resource(&ioport_resource, &standard_io_resources[i]); |
445 | } | ||
446 | 453 | ||
447 | e820_setup_gap(); | 454 | e820_setup_gap(); |
448 | 455 | ||
449 | #ifdef CONFIG_VT | 456 | #ifdef CONFIG_VT |
450 | #if defined(CONFIG_VGA_CONSOLE) | 457 | #if defined(CONFIG_VGA_CONSOLE) |
451 | conswitchp = &vga_con; | 458 | if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) |
459 | conswitchp = &vga_con; | ||
452 | #elif defined(CONFIG_DUMMY_CONSOLE) | 460 | #elif defined(CONFIG_DUMMY_CONSOLE) |
453 | conswitchp = &dummy_con; | 461 | conswitchp = &dummy_con; |
454 | #endif | 462 | #endif |
@@ -479,9 +487,10 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
479 | 487 | ||
480 | if (n >= 0x80000005) { | 488 | if (n >= 0x80000005) { |
481 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); | 489 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
482 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | 490 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), " |
483 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | 491 | "D cache %dK (%d bytes/line)\n", |
484 | c->x86_cache_size=(ecx>>24)+(edx>>24); | 492 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); |
493 | c->x86_cache_size = (ecx>>24) + (edx>>24); | ||
485 | /* On K8 L1 TLB is inclusive, so don't count it */ | 494 | /* On K8 L1 TLB is inclusive, so don't count it */ |
486 | c->x86_tlbsize = 0; | 495 | c->x86_tlbsize = 0; |
487 | } | 496 | } |
@@ -495,11 +504,8 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
495 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | 504 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", |
496 | c->x86_cache_size, ecx & 0xFF); | 505 | c->x86_cache_size, ecx & 0xFF); |
497 | } | 506 | } |
498 | |||
499 | if (n >= 0x80000007) | ||
500 | cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power); | ||
501 | if (n >= 0x80000008) { | 507 | if (n >= 0x80000008) { |
502 | cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); | 508 | cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); |
503 | c->x86_virt_bits = (eax >> 8) & 0xff; | 509 | c->x86_virt_bits = (eax >> 8) & 0xff; |
504 | c->x86_phys_bits = eax & 0xff; | 510 | c->x86_phys_bits = eax & 0xff; |
505 | } | 511 | } |
@@ -508,14 +514,15 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
508 | #ifdef CONFIG_NUMA | 514 | #ifdef CONFIG_NUMA |
509 | static int nearby_node(int apicid) | 515 | static int nearby_node(int apicid) |
510 | { | 516 | { |
511 | int i; | 517 | int i, node; |
518 | |||
512 | for (i = apicid - 1; i >= 0; i--) { | 519 | for (i = apicid - 1; i >= 0; i--) { |
513 | int node = apicid_to_node[i]; | 520 | node = apicid_to_node[i]; |
514 | if (node != NUMA_NO_NODE && node_online(node)) | 521 | if (node != NUMA_NO_NODE && node_online(node)) |
515 | return node; | 522 | return node; |
516 | } | 523 | } |
517 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | 524 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { |
518 | int node = apicid_to_node[i]; | 525 | node = apicid_to_node[i]; |
519 | if (node != NUMA_NO_NODE && node_online(node)) | 526 | if (node != NUMA_NO_NODE && node_online(node)) |
520 | return node; | 527 | return node; |
521 | } | 528 | } |
@@ -527,7 +534,7 @@ static int nearby_node(int apicid) | |||
527 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. | 534 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. |
528 | * Assumes number of cores is a power of two. | 535 | * Assumes number of cores is a power of two. |
529 | */ | 536 | */ |
530 | static void __init amd_detect_cmp(struct cpuinfo_x86 *c) | 537 | static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) |
531 | { | 538 | { |
532 | #ifdef CONFIG_SMP | 539 | #ifdef CONFIG_SMP |
533 | unsigned bits; | 540 | unsigned bits; |
@@ -536,7 +543,54 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) | |||
536 | int node = 0; | 543 | int node = 0; |
537 | unsigned apicid = hard_smp_processor_id(); | 544 | unsigned apicid = hard_smp_processor_id(); |
538 | #endif | 545 | #endif |
539 | unsigned ecx = cpuid_ecx(0x80000008); | 546 | bits = c->x86_coreid_bits; |
547 | |||
548 | /* Low order bits define the core id (index of core in socket) */ | ||
549 | c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1); | ||
550 | /* Convert the APIC ID into the socket ID */ | ||
551 | c->phys_proc_id = phys_pkg_id(bits); | ||
552 | |||
553 | #ifdef CONFIG_NUMA | ||
554 | node = c->phys_proc_id; | ||
555 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | ||
556 | node = apicid_to_node[apicid]; | ||
557 | if (!node_online(node)) { | ||
558 | /* Two possibilities here: | ||
559 | - The CPU is missing memory and no node was created. | ||
560 | In that case try picking one from a nearby CPU | ||
561 | - The APIC IDs differ from the HyperTransport node IDs | ||
562 | which the K8 northbridge parsing fills in. | ||
563 | Assume they are all increased by a constant offset, | ||
564 | but in the same order as the HT nodeids. | ||
565 | If that doesn't result in a usable node fall back to the | ||
566 | path for the previous case. */ | ||
567 | |||
568 | int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits); | ||
569 | |||
570 | if (ht_nodeid >= 0 && | ||
571 | apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | ||
572 | node = apicid_to_node[ht_nodeid]; | ||
573 | /* Pick a nearby node */ | ||
574 | if (!node_online(node)) | ||
575 | node = nearby_node(apicid); | ||
576 | } | ||
577 | numa_set_node(cpu, node); | ||
578 | |||
579 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
580 | #endif | ||
581 | #endif | ||
582 | } | ||
583 | |||
584 | static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) | ||
585 | { | ||
586 | #ifdef CONFIG_SMP | ||
587 | unsigned bits, ecx; | ||
588 | |||
589 | /* Multi core CPU? */ | ||
590 | if (c->extended_cpuid_level < 0x80000008) | ||
591 | return; | ||
592 | |||
593 | ecx = cpuid_ecx(0x80000008); | ||
540 | 594 | ||
541 | c->x86_max_cores = (ecx & 0xff) + 1; | 595 | c->x86_max_cores = (ecx & 0xff) + 1; |
542 | 596 | ||
@@ -549,37 +603,8 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) | |||
549 | bits++; | 603 | bits++; |
550 | } | 604 | } |
551 | 605 | ||
552 | /* Low order bits define the core id (index of core in socket) */ | 606 | c->x86_coreid_bits = bits; |
553 | c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1); | ||
554 | /* Convert the APIC ID into the socket ID */ | ||
555 | c->phys_proc_id = phys_pkg_id(bits); | ||
556 | |||
557 | #ifdef CONFIG_NUMA | ||
558 | node = c->phys_proc_id; | ||
559 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | ||
560 | node = apicid_to_node[apicid]; | ||
561 | if (!node_online(node)) { | ||
562 | /* Two possibilities here: | ||
563 | - The CPU is missing memory and no node was created. | ||
564 | In that case try picking one from a nearby CPU | ||
565 | - The APIC IDs differ from the HyperTransport node IDs | ||
566 | which the K8 northbridge parsing fills in. | ||
567 | Assume they are all increased by a constant offset, | ||
568 | but in the same order as the HT nodeids. | ||
569 | If that doesn't result in a usable node fall back to the | ||
570 | path for the previous case. */ | ||
571 | int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits); | ||
572 | if (ht_nodeid >= 0 && | ||
573 | apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | ||
574 | node = apicid_to_node[ht_nodeid]; | ||
575 | /* Pick a nearby node */ | ||
576 | if (!node_online(node)) | ||
577 | node = nearby_node(apicid); | ||
578 | } | ||
579 | numa_set_node(cpu, node); | ||
580 | 607 | ||
581 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
582 | #endif | ||
583 | #endif | 608 | #endif |
584 | } | 609 | } |
585 | 610 | ||
@@ -595,8 +620,8 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) | |||
595 | /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ | 620 | /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ |
596 | static __cpuinit int amd_apic_timer_broken(void) | 621 | static __cpuinit int amd_apic_timer_broken(void) |
597 | { | 622 | { |
598 | u32 lo, hi; | 623 | u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); |
599 | u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); | 624 | |
600 | switch (eax & CPUID_XFAM) { | 625 | switch (eax & CPUID_XFAM) { |
601 | case CPUID_XFAM_K8: | 626 | case CPUID_XFAM_K8: |
602 | if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F) | 627 | if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F) |
@@ -614,6 +639,15 @@ static __cpuinit int amd_apic_timer_broken(void) | |||
614 | return 0; | 639 | return 0; |
615 | } | 640 | } |
616 | 641 | ||
642 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | ||
643 | { | ||
644 | early_init_amd_mc(c); | ||
645 | |||
646 | /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ | ||
647 | if (c->x86_power & (1<<8)) | ||
648 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
649 | } | ||
650 | |||
617 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 651 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) |
618 | { | 652 | { |
619 | unsigned level; | 653 | unsigned level; |
@@ -624,7 +658,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
624 | /* | 658 | /* |
625 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 | 659 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 |
626 | * bit 6 of msr C001_0015 | 660 | * bit 6 of msr C001_0015 |
627 | * | 661 | * |
628 | * Errata 63 for SH-B3 steppings | 662 | * Errata 63 for SH-B3 steppings |
629 | * Errata 122 for all steppings (F+ have it disabled by default) | 663 | * Errata 122 for all steppings (F+ have it disabled by default) |
630 | */ | 664 | */ |
@@ -637,35 +671,32 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
637 | 671 | ||
638 | /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; | 672 | /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; |
639 | 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ | 673 | 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ |
640 | clear_bit(0*32+31, &c->x86_capability); | 674 | clear_bit(0*32+31, (unsigned long *)&c->x86_capability); |
641 | 675 | ||
642 | /* On C+ stepping K8 rep microcode works well for copy/memset */ | 676 | /* On C+ stepping K8 rep microcode works well for copy/memset */ |
643 | level = cpuid_eax(1); | 677 | level = cpuid_eax(1); |
644 | if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)) | 678 | if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || |
645 | set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); | 679 | level >= 0x0f58)) |
680 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
646 | if (c->x86 == 0x10 || c->x86 == 0x11) | 681 | if (c->x86 == 0x10 || c->x86 == 0x11) |
647 | set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); | 682 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
648 | 683 | ||
649 | /* Enable workaround for FXSAVE leak */ | 684 | /* Enable workaround for FXSAVE leak */ |
650 | if (c->x86 >= 6) | 685 | if (c->x86 >= 6) |
651 | set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability); | 686 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); |
652 | 687 | ||
653 | level = get_model_name(c); | 688 | level = get_model_name(c); |
654 | if (!level) { | 689 | if (!level) { |
655 | switch (c->x86) { | 690 | switch (c->x86) { |
656 | case 15: | 691 | case 15: |
657 | /* Should distinguish Models here, but this is only | 692 | /* Should distinguish Models here, but this is only |
658 | a fallback anyways. */ | 693 | a fallback anyways. */ |
659 | strcpy(c->x86_model_id, "Hammer"); | 694 | strcpy(c->x86_model_id, "Hammer"); |
660 | break; | 695 | break; |
661 | } | 696 | } |
662 | } | 697 | } |
663 | display_cacheinfo(c); | 698 | display_cacheinfo(c); |
664 | 699 | ||
665 | /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ | ||
666 | if (c->x86_power & (1<<8)) | ||
667 | set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); | ||
668 | |||
669 | /* Multi core CPU? */ | 700 | /* Multi core CPU? */ |
670 | if (c->extended_cpuid_level >= 0x80000008) | 701 | if (c->extended_cpuid_level >= 0x80000008) |
671 | amd_detect_cmp(c); | 702 | amd_detect_cmp(c); |
@@ -677,41 +708,38 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
677 | num_cache_leaves = 3; | 708 | num_cache_leaves = 3; |
678 | 709 | ||
679 | if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11) | 710 | if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11) |
680 | set_bit(X86_FEATURE_K8, &c->x86_capability); | 711 | set_cpu_cap(c, X86_FEATURE_K8); |
681 | |||
682 | /* RDTSC can be speculated around */ | ||
683 | clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability); | ||
684 | 712 | ||
685 | /* Family 10 doesn't support C states in MWAIT so don't use it */ | 713 | /* MFENCE stops RDTSC speculation */ |
686 | if (c->x86 == 0x10 && !force_mwait) | 714 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); |
687 | clear_bit(X86_FEATURE_MWAIT, &c->x86_capability); | ||
688 | 715 | ||
689 | if (amd_apic_timer_broken()) | 716 | if (amd_apic_timer_broken()) |
690 | disable_apic_timer = 1; | 717 | disable_apic_timer = 1; |
691 | } | 718 | } |
692 | 719 | ||
693 | static void __cpuinit detect_ht(struct cpuinfo_x86 *c) | 720 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
694 | { | 721 | { |
695 | #ifdef CONFIG_SMP | 722 | #ifdef CONFIG_SMP |
696 | u32 eax, ebx, ecx, edx; | 723 | u32 eax, ebx, ecx, edx; |
697 | int index_msb, core_bits; | 724 | int index_msb, core_bits; |
698 | 725 | ||
699 | cpuid(1, &eax, &ebx, &ecx, &edx); | 726 | cpuid(1, &eax, &ebx, &ecx, &edx); |
700 | 727 | ||
701 | 728 | ||
702 | if (!cpu_has(c, X86_FEATURE_HT)) | 729 | if (!cpu_has(c, X86_FEATURE_HT)) |
703 | return; | 730 | return; |
704 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) | 731 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
705 | goto out; | 732 | goto out; |
706 | 733 | ||
707 | smp_num_siblings = (ebx & 0xff0000) >> 16; | 734 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
708 | 735 | ||
709 | if (smp_num_siblings == 1) { | 736 | if (smp_num_siblings == 1) { |
710 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 737 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); |
711 | } else if (smp_num_siblings > 1 ) { | 738 | } else if (smp_num_siblings > 1) { |
712 | 739 | ||
713 | if (smp_num_siblings > NR_CPUS) { | 740 | if (smp_num_siblings > NR_CPUS) { |
714 | printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); | 741 | printk(KERN_WARNING "CPU: Unsupported number of " |
742 | "siblings %d", smp_num_siblings); | ||
715 | smp_num_siblings = 1; | 743 | smp_num_siblings = 1; |
716 | return; | 744 | return; |
717 | } | 745 | } |
@@ -721,7 +749,7 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
721 | 749 | ||
722 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | 750 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
723 | 751 | ||
724 | index_msb = get_count_order(smp_num_siblings) ; | 752 | index_msb = get_count_order(smp_num_siblings); |
725 | 753 | ||
726 | core_bits = get_count_order(c->x86_max_cores); | 754 | core_bits = get_count_order(c->x86_max_cores); |
727 | 755 | ||
@@ -730,8 +758,10 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
730 | } | 758 | } |
731 | out: | 759 | out: |
732 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | 760 | if ((c->x86_max_cores * smp_num_siblings) > 1) { |
733 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id); | 761 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
734 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id); | 762 | c->phys_proc_id); |
763 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
764 | c->cpu_core_id); | ||
735 | } | 765 | } |
736 | 766 | ||
737 | #endif | 767 | #endif |
@@ -773,28 +803,39 @@ static void srat_detect_node(void) | |||
773 | #endif | 803 | #endif |
774 | } | 804 | } |
775 | 805 | ||
806 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | ||
807 | { | ||
808 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | ||
809 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | ||
810 | set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); | ||
811 | } | ||
812 | |||
776 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | 813 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) |
777 | { | 814 | { |
778 | /* Cache sizes */ | 815 | /* Cache sizes */ |
779 | unsigned n; | 816 | unsigned n; |
780 | 817 | ||
781 | init_intel_cacheinfo(c); | 818 | init_intel_cacheinfo(c); |
782 | if (c->cpuid_level > 9 ) { | 819 | if (c->cpuid_level > 9) { |
783 | unsigned eax = cpuid_eax(10); | 820 | unsigned eax = cpuid_eax(10); |
784 | /* Check for version and the number of counters */ | 821 | /* Check for version and the number of counters */ |
785 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | 822 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) |
786 | set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability); | 823 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); |
787 | } | 824 | } |
788 | 825 | ||
789 | if (cpu_has_ds) { | 826 | if (cpu_has_ds) { |
790 | unsigned int l1, l2; | 827 | unsigned int l1, l2; |
791 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | 828 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); |
792 | if (!(l1 & (1<<11))) | 829 | if (!(l1 & (1<<11))) |
793 | set_bit(X86_FEATURE_BTS, c->x86_capability); | 830 | set_cpu_cap(c, X86_FEATURE_BTS); |
794 | if (!(l1 & (1<<12))) | 831 | if (!(l1 & (1<<12))) |
795 | set_bit(X86_FEATURE_PEBS, c->x86_capability); | 832 | set_cpu_cap(c, X86_FEATURE_PEBS); |
796 | } | 833 | } |
797 | 834 | ||
835 | |||
836 | if (cpu_has_bts) | ||
837 | ds_init_intel(c); | ||
838 | |||
798 | n = c->extended_cpuid_level; | 839 | n = c->extended_cpuid_level; |
799 | if (n >= 0x80000008) { | 840 | if (n >= 0x80000008) { |
800 | unsigned eax = cpuid_eax(0x80000008); | 841 | unsigned eax = cpuid_eax(0x80000008); |
@@ -811,14 +852,11 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
811 | c->x86_cache_alignment = c->x86_clflush_size * 2; | 852 | c->x86_cache_alignment = c->x86_clflush_size * 2; |
812 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | 853 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
813 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | 854 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) |
814 | set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); | 855 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
815 | if (c->x86 == 6) | 856 | if (c->x86 == 6) |
816 | set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); | 857 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
817 | if (c->x86 == 15) | 858 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); |
818 | set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability); | 859 | c->x86_max_cores = intel_num_cpu_cores(c); |
819 | else | ||
820 | clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability); | ||
821 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
822 | 860 | ||
823 | srat_detect_node(); | 861 | srat_detect_node(); |
824 | } | 862 | } |
@@ -835,18 +873,12 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |||
835 | c->x86_vendor = X86_VENDOR_UNKNOWN; | 873 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
836 | } | 874 | } |
837 | 875 | ||
838 | struct cpu_model_info { | ||
839 | int vendor; | ||
840 | int family; | ||
841 | char *model_names[16]; | ||
842 | }; | ||
843 | |||
844 | /* Do some early cpuid on the boot CPU to get some parameter that are | 876 | /* Do some early cpuid on the boot CPU to get some parameter that are |
845 | needed before check_bugs. Everything advanced is in identify_cpu | 877 | needed before check_bugs. Everything advanced is in identify_cpu |
846 | below. */ | 878 | below. */ |
847 | void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | 879 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) |
848 | { | 880 | { |
849 | u32 tfms; | 881 | u32 tfms, xlvl; |
850 | 882 | ||
851 | c->loops_per_jiffy = loops_per_jiffy; | 883 | c->loops_per_jiffy = loops_per_jiffy; |
852 | c->x86_cache_size = -1; | 884 | c->x86_cache_size = -1; |
@@ -857,6 +889,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
857 | c->x86_clflush_size = 64; | 889 | c->x86_clflush_size = 64; |
858 | c->x86_cache_alignment = c->x86_clflush_size; | 890 | c->x86_cache_alignment = c->x86_clflush_size; |
859 | c->x86_max_cores = 1; | 891 | c->x86_max_cores = 1; |
892 | c->x86_coreid_bits = 0; | ||
860 | c->extended_cpuid_level = 0; | 893 | c->extended_cpuid_level = 0; |
861 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 894 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
862 | 895 | ||
@@ -865,7 +898,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
865 | (unsigned int *)&c->x86_vendor_id[0], | 898 | (unsigned int *)&c->x86_vendor_id[0], |
866 | (unsigned int *)&c->x86_vendor_id[8], | 899 | (unsigned int *)&c->x86_vendor_id[8], |
867 | (unsigned int *)&c->x86_vendor_id[4]); | 900 | (unsigned int *)&c->x86_vendor_id[4]); |
868 | 901 | ||
869 | get_cpu_vendor(c); | 902 | get_cpu_vendor(c); |
870 | 903 | ||
871 | /* Initialize the standard set of capabilities */ | 904 | /* Initialize the standard set of capabilities */ |
@@ -883,7 +916,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
883 | c->x86 += (tfms >> 20) & 0xff; | 916 | c->x86 += (tfms >> 20) & 0xff; |
884 | if (c->x86 >= 0x6) | 917 | if (c->x86 >= 0x6) |
885 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 918 | c->x86_model += ((tfms >> 16) & 0xF) << 4; |
886 | if (c->x86_capability[0] & (1<<19)) | 919 | if (c->x86_capability[0] & (1<<19)) |
887 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 920 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
888 | } else { | 921 | } else { |
889 | /* Have CPUID level 0 only - unheard of */ | 922 | /* Have CPUID level 0 only - unheard of */ |
@@ -893,18 +926,6 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
893 | #ifdef CONFIG_SMP | 926 | #ifdef CONFIG_SMP |
894 | c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; | 927 | c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; |
895 | #endif | 928 | #endif |
896 | } | ||
897 | |||
898 | /* | ||
899 | * This does the hard work of actually picking apart the CPU stuff... | ||
900 | */ | ||
901 | void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | ||
902 | { | ||
903 | int i; | ||
904 | u32 xlvl; | ||
905 | |||
906 | early_identify_cpu(c); | ||
907 | |||
908 | /* AMD-defined flags: level 0x80000001 */ | 929 | /* AMD-defined flags: level 0x80000001 */ |
909 | xlvl = cpuid_eax(0x80000000); | 930 | xlvl = cpuid_eax(0x80000000); |
910 | c->extended_cpuid_level = xlvl; | 931 | c->extended_cpuid_level = xlvl; |
@@ -925,6 +946,30 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
925 | c->x86_capability[2] = cpuid_edx(0x80860001); | 946 | c->x86_capability[2] = cpuid_edx(0x80860001); |
926 | } | 947 | } |
927 | 948 | ||
949 | c->extended_cpuid_level = cpuid_eax(0x80000000); | ||
950 | if (c->extended_cpuid_level >= 0x80000007) | ||
951 | c->x86_power = cpuid_edx(0x80000007); | ||
952 | |||
953 | switch (c->x86_vendor) { | ||
954 | case X86_VENDOR_AMD: | ||
955 | early_init_amd(c); | ||
956 | break; | ||
957 | case X86_VENDOR_INTEL: | ||
958 | early_init_intel(c); | ||
959 | break; | ||
960 | } | ||
961 | |||
962 | } | ||
963 | |||
964 | /* | ||
965 | * This does the hard work of actually picking apart the CPU stuff... | ||
966 | */ | ||
967 | void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | ||
968 | { | ||
969 | int i; | ||
970 | |||
971 | early_identify_cpu(c); | ||
972 | |||
928 | init_scattered_cpuid_features(c); | 973 | init_scattered_cpuid_features(c); |
929 | 974 | ||
930 | c->apicid = phys_pkg_id(0); | 975 | c->apicid = phys_pkg_id(0); |
@@ -954,8 +999,7 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
954 | break; | 999 | break; |
955 | } | 1000 | } |
956 | 1001 | ||
957 | select_idle_routine(c); | 1002 | detect_ht(c); |
958 | detect_ht(c); | ||
959 | 1003 | ||
960 | /* | 1004 | /* |
961 | * On SMP, boot_cpu_data holds the common feature set between | 1005 | * On SMP, boot_cpu_data holds the common feature set between |
@@ -965,31 +1009,55 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
965 | */ | 1009 | */ |
966 | if (c != &boot_cpu_data) { | 1010 | if (c != &boot_cpu_data) { |
967 | /* AND the already accumulated flags with these */ | 1011 | /* AND the already accumulated flags with these */ |
968 | for (i = 0 ; i < NCAPINTS ; i++) | 1012 | for (i = 0; i < NCAPINTS; i++) |
969 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | 1013 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
970 | } | 1014 | } |
971 | 1015 | ||
1016 | /* Clear all flags overriden by options */ | ||
1017 | for (i = 0; i < NCAPINTS; i++) | ||
1018 | c->x86_capability[i] ^= cleared_cpu_caps[i]; | ||
1019 | |||
972 | #ifdef CONFIG_X86_MCE | 1020 | #ifdef CONFIG_X86_MCE |
973 | mcheck_init(c); | 1021 | mcheck_init(c); |
974 | #endif | 1022 | #endif |
1023 | select_idle_routine(c); | ||
1024 | |||
975 | if (c != &boot_cpu_data) | 1025 | if (c != &boot_cpu_data) |
976 | mtrr_ap_init(); | 1026 | mtrr_ap_init(); |
977 | #ifdef CONFIG_NUMA | 1027 | #ifdef CONFIG_NUMA |
978 | numa_add_cpu(smp_processor_id()); | 1028 | numa_add_cpu(smp_processor_id()); |
979 | #endif | 1029 | #endif |
1030 | |||
980 | } | 1031 | } |
981 | 1032 | ||
1033 | static __init int setup_noclflush(char *arg) | ||
1034 | { | ||
1035 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); | ||
1036 | return 1; | ||
1037 | } | ||
1038 | __setup("noclflush", setup_noclflush); | ||
982 | 1039 | ||
983 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | 1040 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) |
984 | { | 1041 | { |
985 | if (c->x86_model_id[0]) | 1042 | if (c->x86_model_id[0]) |
986 | printk("%s", c->x86_model_id); | 1043 | printk(KERN_INFO "%s", c->x86_model_id); |
987 | 1044 | ||
988 | if (c->x86_mask || c->cpuid_level >= 0) | 1045 | if (c->x86_mask || c->cpuid_level >= 0) |
989 | printk(" stepping %02x\n", c->x86_mask); | 1046 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); |
990 | else | 1047 | else |
991 | printk("\n"); | 1048 | printk(KERN_CONT "\n"); |
1049 | } | ||
1050 | |||
1051 | static __init int setup_disablecpuid(char *arg) | ||
1052 | { | ||
1053 | int bit; | ||
1054 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) | ||
1055 | setup_clear_cpu_cap(bit); | ||
1056 | else | ||
1057 | return 0; | ||
1058 | return 1; | ||
992 | } | 1059 | } |
1060 | __setup("clearcpuid=", setup_disablecpuid); | ||
993 | 1061 | ||
994 | /* | 1062 | /* |
995 | * Get CPU information for use by the procfs. | 1063 | * Get CPU information for use by the procfs. |
@@ -998,116 +1066,41 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
998 | static int show_cpuinfo(struct seq_file *m, void *v) | 1066 | static int show_cpuinfo(struct seq_file *m, void *v) |
999 | { | 1067 | { |
1000 | struct cpuinfo_x86 *c = v; | 1068 | struct cpuinfo_x86 *c = v; |
1001 | int cpu = 0; | 1069 | int cpu = 0, i; |
1002 | |||
1003 | /* | ||
1004 | * These flag bits must match the definitions in <asm/cpufeature.h>. | ||
1005 | * NULL means this bit is undefined or reserved; either way it doesn't | ||
1006 | * have meaning as far as Linux is concerned. Note that it's important | ||
1007 | * to realize there is a difference between this table and CPUID -- if | ||
1008 | * applications want to get the raw CPUID data, they should access | ||
1009 | * /dev/cpu/<cpu_nr>/cpuid instead. | ||
1010 | */ | ||
1011 | static const char *const x86_cap_flags[] = { | ||
1012 | /* Intel-defined */ | ||
1013 | "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", | ||
1014 | "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", | ||
1015 | "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx", | ||
1016 | "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe", | ||
1017 | |||
1018 | /* AMD-defined */ | ||
1019 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1020 | NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, | ||
1021 | NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL, | ||
1022 | NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm", | ||
1023 | "3dnowext", "3dnow", | ||
1024 | |||
1025 | /* Transmeta-defined */ | ||
1026 | "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, | ||
1027 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1028 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1029 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1030 | |||
1031 | /* Other (Linux-defined) */ | ||
1032 | "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", | ||
1033 | NULL, NULL, NULL, NULL, | ||
1034 | "constant_tsc", "up", NULL, "arch_perfmon", | ||
1035 | "pebs", "bts", NULL, "sync_rdtsc", | ||
1036 | "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1037 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1038 | |||
1039 | /* Intel-defined (#2) */ | ||
1040 | "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est", | ||
1041 | "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, | ||
1042 | NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt", | ||
1043 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1044 | |||
1045 | /* VIA/Cyrix/Centaur-defined */ | ||
1046 | NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en", | ||
1047 | "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL, | ||
1048 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1049 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1050 | |||
1051 | /* AMD-defined (#2) */ | ||
1052 | "lahf_lm", "cmp_legacy", "svm", "extapic", | ||
1053 | "cr8_legacy", "abm", "sse4a", "misalignsse", | ||
1054 | "3dnowprefetch", "osvw", "ibs", "sse5", | ||
1055 | "skinit", "wdt", NULL, NULL, | ||
1056 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1057 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1058 | |||
1059 | /* Auxiliary (Linux-defined) */ | ||
1060 | "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1061 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1062 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1063 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1064 | }; | ||
1065 | static const char *const x86_power_flags[] = { | ||
1066 | "ts", /* temperature sensor */ | ||
1067 | "fid", /* frequency id control */ | ||
1068 | "vid", /* voltage id control */ | ||
1069 | "ttp", /* thermal trip */ | ||
1070 | "tm", | ||
1071 | "stc", | ||
1072 | "100mhzsteps", | ||
1073 | "hwpstate", | ||
1074 | "", /* tsc invariant mapped to constant_tsc */ | ||
1075 | /* nothing */ | ||
1076 | }; | ||
1077 | |||
1078 | 1070 | ||
1079 | #ifdef CONFIG_SMP | 1071 | #ifdef CONFIG_SMP |
1080 | cpu = c->cpu_index; | 1072 | cpu = c->cpu_index; |
1081 | #endif | 1073 | #endif |
1082 | 1074 | ||
1083 | seq_printf(m,"processor\t: %u\n" | 1075 | seq_printf(m, "processor\t: %u\n" |
1084 | "vendor_id\t: %s\n" | 1076 | "vendor_id\t: %s\n" |
1085 | "cpu family\t: %d\n" | 1077 | "cpu family\t: %d\n" |
1086 | "model\t\t: %d\n" | 1078 | "model\t\t: %d\n" |
1087 | "model name\t: %s\n", | 1079 | "model name\t: %s\n", |
1088 | (unsigned)cpu, | 1080 | (unsigned)cpu, |
1089 | c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", | 1081 | c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", |
1090 | c->x86, | 1082 | c->x86, |
1091 | (int)c->x86_model, | 1083 | (int)c->x86_model, |
1092 | c->x86_model_id[0] ? c->x86_model_id : "unknown"); | 1084 | c->x86_model_id[0] ? c->x86_model_id : "unknown"); |
1093 | 1085 | ||
1094 | if (c->x86_mask || c->cpuid_level >= 0) | 1086 | if (c->x86_mask || c->cpuid_level >= 0) |
1095 | seq_printf(m, "stepping\t: %d\n", c->x86_mask); | 1087 | seq_printf(m, "stepping\t: %d\n", c->x86_mask); |
1096 | else | 1088 | else |
1097 | seq_printf(m, "stepping\t: unknown\n"); | 1089 | seq_printf(m, "stepping\t: unknown\n"); |
1098 | 1090 | ||
1099 | if (cpu_has(c,X86_FEATURE_TSC)) { | 1091 | if (cpu_has(c, X86_FEATURE_TSC)) { |
1100 | unsigned int freq = cpufreq_quick_get((unsigned)cpu); | 1092 | unsigned int freq = cpufreq_quick_get((unsigned)cpu); |
1093 | |||
1101 | if (!freq) | 1094 | if (!freq) |
1102 | freq = cpu_khz; | 1095 | freq = cpu_khz; |
1103 | seq_printf(m, "cpu MHz\t\t: %u.%03u\n", | 1096 | seq_printf(m, "cpu MHz\t\t: %u.%03u\n", |
1104 | freq / 1000, (freq % 1000)); | 1097 | freq / 1000, (freq % 1000)); |
1105 | } | 1098 | } |
1106 | 1099 | ||
1107 | /* Cache size */ | 1100 | /* Cache size */ |
1108 | if (c->x86_cache_size >= 0) | 1101 | if (c->x86_cache_size >= 0) |
1109 | seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); | 1102 | seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); |
1110 | 1103 | ||
1111 | #ifdef CONFIG_SMP | 1104 | #ifdef CONFIG_SMP |
1112 | if (smp_num_siblings * c->x86_max_cores > 1) { | 1105 | if (smp_num_siblings * c->x86_max_cores > 1) { |
1113 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | 1106 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); |
@@ -1116,48 +1109,43 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1116 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | 1109 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); |
1117 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | 1110 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); |
1118 | } | 1111 | } |
1119 | #endif | 1112 | #endif |
1120 | 1113 | ||
1121 | seq_printf(m, | 1114 | seq_printf(m, |
1122 | "fpu\t\t: yes\n" | 1115 | "fpu\t\t: yes\n" |
1123 | "fpu_exception\t: yes\n" | 1116 | "fpu_exception\t: yes\n" |
1124 | "cpuid level\t: %d\n" | 1117 | "cpuid level\t: %d\n" |
1125 | "wp\t\t: yes\n" | 1118 | "wp\t\t: yes\n" |
1126 | "flags\t\t:", | 1119 | "flags\t\t:", |
1127 | c->cpuid_level); | 1120 | c->cpuid_level); |
1128 | 1121 | ||
1129 | { | 1122 | for (i = 0; i < 32*NCAPINTS; i++) |
1130 | int i; | 1123 | if (cpu_has(c, i) && x86_cap_flags[i] != NULL) |
1131 | for ( i = 0 ; i < 32*NCAPINTS ; i++ ) | 1124 | seq_printf(m, " %s", x86_cap_flags[i]); |
1132 | if (cpu_has(c, i) && x86_cap_flags[i] != NULL) | 1125 | |
1133 | seq_printf(m, " %s", x86_cap_flags[i]); | ||
1134 | } | ||
1135 | |||
1136 | seq_printf(m, "\nbogomips\t: %lu.%02lu\n", | 1126 | seq_printf(m, "\nbogomips\t: %lu.%02lu\n", |
1137 | c->loops_per_jiffy/(500000/HZ), | 1127 | c->loops_per_jiffy/(500000/HZ), |
1138 | (c->loops_per_jiffy/(5000/HZ)) % 100); | 1128 | (c->loops_per_jiffy/(5000/HZ)) % 100); |
1139 | 1129 | ||
1140 | if (c->x86_tlbsize > 0) | 1130 | if (c->x86_tlbsize > 0) |
1141 | seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); | 1131 | seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); |
1142 | seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size); | 1132 | seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size); |
1143 | seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); | 1133 | seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); |
1144 | 1134 | ||
1145 | seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", | 1135 | seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", |
1146 | c->x86_phys_bits, c->x86_virt_bits); | 1136 | c->x86_phys_bits, c->x86_virt_bits); |
1147 | 1137 | ||
1148 | seq_printf(m, "power management:"); | 1138 | seq_printf(m, "power management:"); |
1149 | { | 1139 | for (i = 0; i < 32; i++) { |
1150 | unsigned i; | 1140 | if (c->x86_power & (1 << i)) { |
1151 | for (i = 0; i < 32; i++) | 1141 | if (i < ARRAY_SIZE(x86_power_flags) && |
1152 | if (c->x86_power & (1 << i)) { | 1142 | x86_power_flags[i]) |
1153 | if (i < ARRAY_SIZE(x86_power_flags) && | 1143 | seq_printf(m, "%s%s", |
1154 | x86_power_flags[i]) | 1144 | x86_power_flags[i][0]?" ":"", |
1155 | seq_printf(m, "%s%s", | 1145 | x86_power_flags[i]); |
1156 | x86_power_flags[i][0]?" ":"", | 1146 | else |
1157 | x86_power_flags[i]); | 1147 | seq_printf(m, " [%d]", i); |
1158 | else | 1148 | } |
1159 | seq_printf(m, " [%d]", i); | ||
1160 | } | ||
1161 | } | 1149 | } |
1162 | 1150 | ||
1163 | seq_printf(m, "\n\n"); | 1151 | seq_printf(m, "\n\n"); |
@@ -1184,8 +1172,8 @@ static void c_stop(struct seq_file *m, void *v) | |||
1184 | { | 1172 | { |
1185 | } | 1173 | } |
1186 | 1174 | ||
1187 | struct seq_operations cpuinfo_op = { | 1175 | const struct seq_operations cpuinfo_op = { |
1188 | .start =c_start, | 1176 | .start = c_start, |
1189 | .next = c_next, | 1177 | .next = c_next, |
1190 | .stop = c_stop, | 1178 | .stop = c_stop, |
1191 | .show = show_cpuinfo, | 1179 | .show = show_cpuinfo, |