diff options
Diffstat (limited to 'arch/x86/kernel/setup_64.c')
-rw-r--r-- | arch/x86/kernel/setup_64.c | 1199 |
1 files changed, 0 insertions, 1199 deletions
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c deleted file mode 100644 index 524b6850b2c0..000000000000 --- a/arch/x86/kernel/setup_64.c +++ /dev/null | |||
@@ -1,1199 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1995 Linus Torvalds | ||
3 | */ | ||
4 | |||
5 | /* | ||
6 | * This file handles the architecture-dependent parts of initialization | ||
7 | */ | ||
8 | |||
9 | #include <linux/errno.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/stddef.h> | ||
14 | #include <linux/unistd.h> | ||
15 | #include <linux/ptrace.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/user.h> | ||
18 | #include <linux/screen_info.h> | ||
19 | #include <linux/ioport.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/initrd.h> | ||
23 | #include <linux/highmem.h> | ||
24 | #include <linux/bootmem.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <asm/processor.h> | ||
27 | #include <linux/console.h> | ||
28 | #include <linux/seq_file.h> | ||
29 | #include <linux/crash_dump.h> | ||
30 | #include <linux/root_dev.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <asm/pci-direct.h> | ||
33 | #include <linux/efi.h> | ||
34 | #include <linux/acpi.h> | ||
35 | #include <linux/kallsyms.h> | ||
36 | #include <linux/edd.h> | ||
37 | #include <linux/iscsi_ibft.h> | ||
38 | #include <linux/mmzone.h> | ||
39 | #include <linux/kexec.h> | ||
40 | #include <linux/cpufreq.h> | ||
41 | #include <linux/dmi.h> | ||
42 | #include <linux/dma-mapping.h> | ||
43 | #include <linux/ctype.h> | ||
44 | #include <linux/sort.h> | ||
45 | #include <linux/uaccess.h> | ||
46 | #include <linux/init_ohci1394_dma.h> | ||
47 | #include <linux/kvm_para.h> | ||
48 | |||
49 | #include <asm/mtrr.h> | ||
50 | #include <asm/uaccess.h> | ||
51 | #include <asm/system.h> | ||
52 | #include <asm/vsyscall.h> | ||
53 | #include <asm/io.h> | ||
54 | #include <asm/smp.h> | ||
55 | #include <asm/msr.h> | ||
56 | #include <asm/desc.h> | ||
57 | #include <video/edid.h> | ||
58 | #include <asm/e820.h> | ||
59 | #include <asm/dma.h> | ||
60 | #include <asm/gart.h> | ||
61 | #include <asm/mpspec.h> | ||
62 | #include <asm/mmu_context.h> | ||
63 | #include <asm/proto.h> | ||
64 | #include <asm/setup.h> | ||
65 | #include <asm/numa.h> | ||
66 | #include <asm/sections.h> | ||
67 | #include <asm/dmi.h> | ||
68 | #include <asm/cacheflush.h> | ||
69 | #include <asm/mce.h> | ||
70 | #include <asm/ds.h> | ||
71 | #include <asm/topology.h> | ||
72 | #include <asm/trampoline.h> | ||
73 | #include <asm/pat.h> | ||
74 | |||
75 | #include <mach_apic.h> | ||
76 | #ifdef CONFIG_PARAVIRT | ||
77 | #include <asm/paravirt.h> | ||
78 | #else | ||
79 | #define ARCH_SETUP | ||
80 | #endif | ||
81 | |||
82 | /* | ||
83 | * Machine setup.. | ||
84 | */ | ||
85 | |||
86 | struct cpuinfo_x86 boot_cpu_data __read_mostly; | ||
87 | EXPORT_SYMBOL(boot_cpu_data); | ||
88 | |||
89 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | ||
90 | |||
91 | unsigned long mmu_cr4_features; | ||
92 | |||
93 | /* Boot loader ID as an integer, for the benefit of proc_dointvec */ | ||
94 | int bootloader_type; | ||
95 | |||
96 | unsigned long saved_video_mode; | ||
97 | |||
98 | int force_mwait __cpuinitdata; | ||
99 | |||
100 | /* | ||
101 | * Early DMI memory | ||
102 | */ | ||
103 | int dmi_alloc_index; | ||
104 | char dmi_alloc_data[DMI_MAX_DATA]; | ||
105 | |||
106 | /* | ||
107 | * Setup options | ||
108 | */ | ||
109 | struct screen_info screen_info; | ||
110 | EXPORT_SYMBOL(screen_info); | ||
111 | struct sys_desc_table_struct { | ||
112 | unsigned short length; | ||
113 | unsigned char table[0]; | ||
114 | }; | ||
115 | |||
116 | struct edid_info edid_info; | ||
117 | EXPORT_SYMBOL_GPL(edid_info); | ||
118 | |||
119 | extern int root_mountflags; | ||
120 | |||
121 | char __initdata command_line[COMMAND_LINE_SIZE]; | ||
122 | |||
123 | static struct resource standard_io_resources[] = { | ||
124 | { .name = "dma1", .start = 0x00, .end = 0x1f, | ||
125 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
126 | { .name = "pic1", .start = 0x20, .end = 0x21, | ||
127 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
128 | { .name = "timer0", .start = 0x40, .end = 0x43, | ||
129 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
130 | { .name = "timer1", .start = 0x50, .end = 0x53, | ||
131 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
132 | { .name = "keyboard", .start = 0x60, .end = 0x60, | ||
133 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
134 | { .name = "keyboard", .start = 0x64, .end = 0x64, | ||
135 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
136 | { .name = "dma page reg", .start = 0x80, .end = 0x8f, | ||
137 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
138 | { .name = "pic2", .start = 0xa0, .end = 0xa1, | ||
139 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
140 | { .name = "dma2", .start = 0xc0, .end = 0xdf, | ||
141 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
142 | { .name = "fpu", .start = 0xf0, .end = 0xff, | ||
143 | .flags = IORESOURCE_BUSY | IORESOURCE_IO } | ||
144 | }; | ||
145 | |||
146 | #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM) | ||
147 | |||
148 | static struct resource data_resource = { | ||
149 | .name = "Kernel data", | ||
150 | .start = 0, | ||
151 | .end = 0, | ||
152 | .flags = IORESOURCE_RAM, | ||
153 | }; | ||
154 | static struct resource code_resource = { | ||
155 | .name = "Kernel code", | ||
156 | .start = 0, | ||
157 | .end = 0, | ||
158 | .flags = IORESOURCE_RAM, | ||
159 | }; | ||
160 | static struct resource bss_resource = { | ||
161 | .name = "Kernel bss", | ||
162 | .start = 0, | ||
163 | .end = 0, | ||
164 | .flags = IORESOURCE_RAM, | ||
165 | }; | ||
166 | |||
167 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); | ||
168 | |||
169 | #ifdef CONFIG_PROC_VMCORE | ||
170 | /* elfcorehdr= specifies the location of elf core header | ||
171 | * stored by the crashed kernel. This option will be passed | ||
172 | * by kexec loader to the capture kernel. | ||
173 | */ | ||
174 | static int __init setup_elfcorehdr(char *arg) | ||
175 | { | ||
176 | char *end; | ||
177 | if (!arg) | ||
178 | return -EINVAL; | ||
179 | elfcorehdr_addr = memparse(arg, &end); | ||
180 | return end > arg ? 0 : -EINVAL; | ||
181 | } | ||
182 | early_param("elfcorehdr", setup_elfcorehdr); | ||
183 | #endif | ||
184 | |||
185 | #ifndef CONFIG_NUMA | ||
186 | static void __init | ||
187 | contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn) | ||
188 | { | ||
189 | unsigned long bootmap_size, bootmap; | ||
190 | |||
191 | bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; | ||
192 | bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size, | ||
193 | PAGE_SIZE); | ||
194 | if (bootmap == -1L) | ||
195 | panic("Cannot find bootmem map of size %ld\n", bootmap_size); | ||
196 | bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn); | ||
197 | e820_register_active_regions(0, start_pfn, end_pfn); | ||
198 | free_bootmem_with_active_regions(0, end_pfn); | ||
199 | early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT); | ||
200 | reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT); | ||
201 | } | ||
202 | #endif | ||
203 | |||
204 | #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) | ||
205 | struct edd edd; | ||
206 | #ifdef CONFIG_EDD_MODULE | ||
207 | EXPORT_SYMBOL(edd); | ||
208 | #endif | ||
209 | /** | ||
210 | * copy_edd() - Copy the BIOS EDD information | ||
211 | * from boot_params into a safe place. | ||
212 | * | ||
213 | */ | ||
214 | static inline void copy_edd(void) | ||
215 | { | ||
216 | memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, | ||
217 | sizeof(edd.mbr_signature)); | ||
218 | memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info)); | ||
219 | edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries; | ||
220 | edd.edd_info_nr = boot_params.eddbuf_entries; | ||
221 | } | ||
222 | #else | ||
223 | static inline void copy_edd(void) | ||
224 | { | ||
225 | } | ||
226 | #endif | ||
227 | |||
228 | #ifdef CONFIG_KEXEC | ||
229 | static void __init reserve_crashkernel(void) | ||
230 | { | ||
231 | unsigned long long total_mem; | ||
232 | unsigned long long crash_size, crash_base; | ||
233 | int ret; | ||
234 | |||
235 | total_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT; | ||
236 | |||
237 | ret = parse_crashkernel(boot_command_line, total_mem, | ||
238 | &crash_size, &crash_base); | ||
239 | if (ret == 0 && crash_size) { | ||
240 | if (crash_base <= 0) { | ||
241 | printk(KERN_INFO "crashkernel reservation failed - " | ||
242 | "you have to specify a base address\n"); | ||
243 | return; | ||
244 | } | ||
245 | |||
246 | if (reserve_bootmem(crash_base, crash_size, | ||
247 | BOOTMEM_EXCLUSIVE) < 0) { | ||
248 | printk(KERN_INFO "crashkernel reservation failed - " | ||
249 | "memory is in use\n"); | ||
250 | return; | ||
251 | } | ||
252 | |||
253 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " | ||
254 | "for crashkernel (System RAM: %ldMB)\n", | ||
255 | (unsigned long)(crash_size >> 20), | ||
256 | (unsigned long)(crash_base >> 20), | ||
257 | (unsigned long)(total_mem >> 20)); | ||
258 | crashk_res.start = crash_base; | ||
259 | crashk_res.end = crash_base + crash_size - 1; | ||
260 | insert_resource(&iomem_resource, &crashk_res); | ||
261 | } | ||
262 | } | ||
263 | #else | ||
264 | static inline void __init reserve_crashkernel(void) | ||
265 | {} | ||
266 | #endif | ||
267 | |||
268 | /* Overridden in paravirt.c if CONFIG_PARAVIRT */ | ||
269 | void __attribute__((weak)) __init memory_setup(void) | ||
270 | { | ||
271 | machine_specific_memory_setup(); | ||
272 | } | ||
273 | |||
274 | static void __init parse_setup_data(void) | ||
275 | { | ||
276 | struct setup_data *data; | ||
277 | unsigned long pa_data; | ||
278 | |||
279 | if (boot_params.hdr.version < 0x0209) | ||
280 | return; | ||
281 | pa_data = boot_params.hdr.setup_data; | ||
282 | while (pa_data) { | ||
283 | data = early_ioremap(pa_data, PAGE_SIZE); | ||
284 | switch (data->type) { | ||
285 | default: | ||
286 | break; | ||
287 | } | ||
288 | #ifndef CONFIG_DEBUG_BOOT_PARAMS | ||
289 | free_early(pa_data, pa_data+sizeof(*data)+data->len); | ||
290 | #endif | ||
291 | pa_data = data->next; | ||
292 | early_iounmap(data, PAGE_SIZE); | ||
293 | } | ||
294 | } | ||
295 | |||
296 | #ifdef CONFIG_PCI_MMCONFIG | ||
297 | extern void __cpuinit fam10h_check_enable_mmcfg(void); | ||
298 | extern void __init check_enable_amd_mmconf_dmi(void); | ||
299 | #else | ||
300 | void __cpuinit fam10h_check_enable_mmcfg(void) | ||
301 | { | ||
302 | } | ||
303 | void __init check_enable_amd_mmconf_dmi(void) | ||
304 | { | ||
305 | } | ||
306 | #endif | ||
307 | |||
308 | /* | ||
309 | * setup_arch - architecture-specific boot-time initializations | ||
310 | * | ||
311 | * Note: On x86_64, fixmaps are ready for use even before this is called. | ||
312 | */ | ||
313 | void __init setup_arch(char **cmdline_p) | ||
314 | { | ||
315 | unsigned i; | ||
316 | |||
317 | printk(KERN_INFO "Command line: %s\n", boot_command_line); | ||
318 | |||
319 | ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); | ||
320 | screen_info = boot_params.screen_info; | ||
321 | edid_info = boot_params.edid_info; | ||
322 | saved_video_mode = boot_params.hdr.vid_mode; | ||
323 | bootloader_type = boot_params.hdr.type_of_loader; | ||
324 | |||
325 | #ifdef CONFIG_BLK_DEV_RAM | ||
326 | rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; | ||
327 | rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0); | ||
328 | rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0); | ||
329 | #endif | ||
330 | #ifdef CONFIG_EFI | ||
331 | if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, | ||
332 | "EL64", 4)) | ||
333 | efi_enabled = 1; | ||
334 | #endif | ||
335 | |||
336 | ARCH_SETUP | ||
337 | |||
338 | memory_setup(); | ||
339 | copy_edd(); | ||
340 | |||
341 | if (!boot_params.hdr.root_flags) | ||
342 | root_mountflags &= ~MS_RDONLY; | ||
343 | init_mm.start_code = (unsigned long) &_text; | ||
344 | init_mm.end_code = (unsigned long) &_etext; | ||
345 | init_mm.end_data = (unsigned long) &_edata; | ||
346 | init_mm.brk = (unsigned long) &_end; | ||
347 | |||
348 | code_resource.start = virt_to_phys(&_text); | ||
349 | code_resource.end = virt_to_phys(&_etext)-1; | ||
350 | data_resource.start = virt_to_phys(&_etext); | ||
351 | data_resource.end = virt_to_phys(&_edata)-1; | ||
352 | bss_resource.start = virt_to_phys(&__bss_start); | ||
353 | bss_resource.end = virt_to_phys(&__bss_stop)-1; | ||
354 | |||
355 | early_identify_cpu(&boot_cpu_data); | ||
356 | |||
357 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); | ||
358 | *cmdline_p = command_line; | ||
359 | |||
360 | parse_setup_data(); | ||
361 | |||
362 | parse_early_param(); | ||
363 | |||
364 | #ifdef CONFIG_PCI | ||
365 | if (pci_early_dump_regs) | ||
366 | early_dump_pci_devices(); | ||
367 | #endif | ||
368 | |||
369 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
370 | if (init_ohci1394_dma_early) | ||
371 | init_ohci1394_dma_on_all_controllers(); | ||
372 | #endif | ||
373 | |||
374 | finish_e820_parsing(); | ||
375 | |||
376 | /* after parse_early_param, so could debug it */ | ||
377 | insert_resource(&iomem_resource, &code_resource); | ||
378 | insert_resource(&iomem_resource, &data_resource); | ||
379 | insert_resource(&iomem_resource, &bss_resource); | ||
380 | |||
381 | early_gart_iommu_check(); | ||
382 | |||
383 | e820_register_active_regions(0, 0, -1UL); | ||
384 | /* | ||
385 | * partially used pages are not usable - thus | ||
386 | * we are rounding upwards: | ||
387 | */ | ||
388 | end_pfn = e820_end_of_ram(); | ||
389 | /* update e820 for memory not covered by WB MTRRs */ | ||
390 | mtrr_bp_init(); | ||
391 | if (mtrr_trim_uncached_memory(end_pfn)) { | ||
392 | e820_register_active_regions(0, 0, -1UL); | ||
393 | end_pfn = e820_end_of_ram(); | ||
394 | } | ||
395 | |||
396 | num_physpages = end_pfn; | ||
397 | |||
398 | check_efer(); | ||
399 | |||
400 | max_pfn_mapped = init_memory_mapping(0, (max_pfn_mapped << PAGE_SHIFT)); | ||
401 | if (efi_enabled) | ||
402 | efi_init(); | ||
403 | |||
404 | vsmp_init(); | ||
405 | |||
406 | dmi_scan_machine(); | ||
407 | |||
408 | io_delay_init(); | ||
409 | |||
410 | #ifdef CONFIG_KVM_CLOCK | ||
411 | kvmclock_init(); | ||
412 | #endif | ||
413 | |||
414 | #ifdef CONFIG_SMP | ||
415 | /* setup to use the early static init tables during kernel startup */ | ||
416 | x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init; | ||
417 | x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init; | ||
418 | #ifdef CONFIG_NUMA | ||
419 | x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init; | ||
420 | #endif | ||
421 | #endif | ||
422 | |||
423 | #ifdef CONFIG_ACPI | ||
424 | /* | ||
425 | * Initialize the ACPI boot-time table parser (gets the RSDP and SDT). | ||
426 | * Call this early for SRAT node setup. | ||
427 | */ | ||
428 | acpi_boot_table_init(); | ||
429 | #endif | ||
430 | |||
431 | /* How many end-of-memory variables you have, grandma! */ | ||
432 | max_low_pfn = end_pfn; | ||
433 | max_pfn = end_pfn; | ||
434 | high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1; | ||
435 | |||
436 | /* Remove active ranges so rediscovery with NUMA-awareness happens */ | ||
437 | remove_all_active_ranges(); | ||
438 | |||
439 | #ifdef CONFIG_ACPI_NUMA | ||
440 | /* | ||
441 | * Parse SRAT to discover nodes. | ||
442 | */ | ||
443 | acpi_numa_init(); | ||
444 | #endif | ||
445 | |||
446 | #ifdef CONFIG_NUMA | ||
447 | numa_initmem_init(0, end_pfn); | ||
448 | #else | ||
449 | contig_initmem_init(0, end_pfn); | ||
450 | #endif | ||
451 | |||
452 | dma32_reserve_bootmem(); | ||
453 | |||
454 | #ifdef CONFIG_ACPI_SLEEP | ||
455 | /* | ||
456 | * Reserve low memory region for sleep support. | ||
457 | */ | ||
458 | acpi_reserve_bootmem(); | ||
459 | #endif | ||
460 | |||
461 | if (efi_enabled) | ||
462 | efi_reserve_bootmem(); | ||
463 | |||
464 | /* | ||
465 | * Find and reserve possible boot-time SMP configuration: | ||
466 | */ | ||
467 | find_smp_config(); | ||
468 | #ifdef CONFIG_BLK_DEV_INITRD | ||
469 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { | ||
470 | unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; | ||
471 | unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; | ||
472 | unsigned long ramdisk_end = ramdisk_image + ramdisk_size; | ||
473 | unsigned long end_of_mem = end_pfn << PAGE_SHIFT; | ||
474 | |||
475 | if (ramdisk_end <= end_of_mem) { | ||
476 | /* | ||
477 | * don't need to reserve again, already reserved early | ||
478 | * in x86_64_start_kernel, and early_res_to_bootmem | ||
479 | * convert that to reserved in bootmem | ||
480 | */ | ||
481 | initrd_start = ramdisk_image + PAGE_OFFSET; | ||
482 | initrd_end = initrd_start+ramdisk_size; | ||
483 | } else { | ||
484 | free_bootmem(ramdisk_image, ramdisk_size); | ||
485 | printk(KERN_ERR "initrd extends beyond end of memory " | ||
486 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", | ||
487 | ramdisk_end, end_of_mem); | ||
488 | initrd_start = 0; | ||
489 | } | ||
490 | } | ||
491 | #endif | ||
492 | reserve_crashkernel(); | ||
493 | |||
494 | reserve_ibft_region(); | ||
495 | |||
496 | paging_init(); | ||
497 | map_vsyscall(); | ||
498 | |||
499 | early_quirks(); | ||
500 | |||
501 | #ifdef CONFIG_ACPI | ||
502 | /* | ||
503 | * Read APIC and some other early information from ACPI tables. | ||
504 | */ | ||
505 | acpi_boot_init(); | ||
506 | #endif | ||
507 | |||
508 | init_cpu_to_node(); | ||
509 | |||
510 | /* | ||
511 | * get boot-time SMP configuration: | ||
512 | */ | ||
513 | if (smp_found_config) | ||
514 | get_smp_config(); | ||
515 | init_apic_mappings(); | ||
516 | ioapic_init_mappings(); | ||
517 | |||
518 | kvm_guest_init(); | ||
519 | |||
520 | /* | ||
521 | * We trust e820 completely. No explicit ROM probing in memory. | ||
522 | */ | ||
523 | e820_reserve_resources(); | ||
524 | e820_mark_nosave_regions(); | ||
525 | |||
526 | /* request I/O space for devices used on all i[345]86 PCs */ | ||
527 | for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) | ||
528 | request_resource(&ioport_resource, &standard_io_resources[i]); | ||
529 | |||
530 | e820_setup_gap(); | ||
531 | |||
532 | #ifdef CONFIG_VT | ||
533 | #if defined(CONFIG_VGA_CONSOLE) | ||
534 | if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) | ||
535 | conswitchp = &vga_con; | ||
536 | #elif defined(CONFIG_DUMMY_CONSOLE) | ||
537 | conswitchp = &dummy_con; | ||
538 | #endif | ||
539 | #endif | ||
540 | |||
541 | /* do this before identify_cpu for boot cpu */ | ||
542 | check_enable_amd_mmconf_dmi(); | ||
543 | } | ||
544 | |||
545 | static int __cpuinit get_model_name(struct cpuinfo_x86 *c) | ||
546 | { | ||
547 | unsigned int *v; | ||
548 | |||
549 | if (c->extended_cpuid_level < 0x80000004) | ||
550 | return 0; | ||
551 | |||
552 | v = (unsigned int *) c->x86_model_id; | ||
553 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | ||
554 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | ||
555 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | ||
556 | c->x86_model_id[48] = 0; | ||
557 | return 1; | ||
558 | } | ||
559 | |||
560 | |||
561 | static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | ||
562 | { | ||
563 | unsigned int n, dummy, eax, ebx, ecx, edx; | ||
564 | |||
565 | n = c->extended_cpuid_level; | ||
566 | |||
567 | if (n >= 0x80000005) { | ||
568 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); | ||
569 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), " | ||
570 | "D cache %dK (%d bytes/line)\n", | ||
571 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | ||
572 | c->x86_cache_size = (ecx>>24) + (edx>>24); | ||
573 | /* On K8 L1 TLB is inclusive, so don't count it */ | ||
574 | c->x86_tlbsize = 0; | ||
575 | } | ||
576 | |||
577 | if (n >= 0x80000006) { | ||
578 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); | ||
579 | ecx = cpuid_ecx(0x80000006); | ||
580 | c->x86_cache_size = ecx >> 16; | ||
581 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); | ||
582 | |||
583 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | ||
584 | c->x86_cache_size, ecx & 0xFF); | ||
585 | } | ||
586 | if (n >= 0x80000008) { | ||
587 | cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); | ||
588 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
589 | c->x86_phys_bits = eax & 0xff; | ||
590 | } | ||
591 | } | ||
592 | |||
593 | #ifdef CONFIG_NUMA | ||
594 | static int __cpuinit nearby_node(int apicid) | ||
595 | { | ||
596 | int i, node; | ||
597 | |||
598 | for (i = apicid - 1; i >= 0; i--) { | ||
599 | node = apicid_to_node[i]; | ||
600 | if (node != NUMA_NO_NODE && node_online(node)) | ||
601 | return node; | ||
602 | } | ||
603 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | ||
604 | node = apicid_to_node[i]; | ||
605 | if (node != NUMA_NO_NODE && node_online(node)) | ||
606 | return node; | ||
607 | } | ||
608 | return first_node(node_online_map); /* Shouldn't happen */ | ||
609 | } | ||
610 | #endif | ||
611 | |||
612 | /* | ||
613 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. | ||
614 | * Assumes number of cores is a power of two. | ||
615 | */ | ||
616 | static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | ||
617 | { | ||
618 | #ifdef CONFIG_SMP | ||
619 | unsigned bits; | ||
620 | #ifdef CONFIG_NUMA | ||
621 | int cpu = smp_processor_id(); | ||
622 | int node = 0; | ||
623 | unsigned apicid = hard_smp_processor_id(); | ||
624 | #endif | ||
625 | bits = c->x86_coreid_bits; | ||
626 | |||
627 | /* Low order bits define the core id (index of core in socket) */ | ||
628 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); | ||
629 | /* Convert the initial APIC ID into the socket ID */ | ||
630 | c->phys_proc_id = c->initial_apicid >> bits; | ||
631 | |||
632 | #ifdef CONFIG_NUMA | ||
633 | node = c->phys_proc_id; | ||
634 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | ||
635 | node = apicid_to_node[apicid]; | ||
636 | if (!node_online(node)) { | ||
637 | /* Two possibilities here: | ||
638 | - The CPU is missing memory and no node was created. | ||
639 | In that case try picking one from a nearby CPU | ||
640 | - The APIC IDs differ from the HyperTransport node IDs | ||
641 | which the K8 northbridge parsing fills in. | ||
642 | Assume they are all increased by a constant offset, | ||
643 | but in the same order as the HT nodeids. | ||
644 | If that doesn't result in a usable node fall back to the | ||
645 | path for the previous case. */ | ||
646 | |||
647 | int ht_nodeid = c->initial_apicid; | ||
648 | |||
649 | if (ht_nodeid >= 0 && | ||
650 | apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | ||
651 | node = apicid_to_node[ht_nodeid]; | ||
652 | /* Pick a nearby node */ | ||
653 | if (!node_online(node)) | ||
654 | node = nearby_node(apicid); | ||
655 | } | ||
656 | numa_set_node(cpu, node); | ||
657 | |||
658 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
659 | #endif | ||
660 | #endif | ||
661 | } | ||
662 | |||
663 | static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) | ||
664 | { | ||
665 | #ifdef CONFIG_SMP | ||
666 | unsigned bits, ecx; | ||
667 | |||
668 | /* Multi core CPU? */ | ||
669 | if (c->extended_cpuid_level < 0x80000008) | ||
670 | return; | ||
671 | |||
672 | ecx = cpuid_ecx(0x80000008); | ||
673 | |||
674 | c->x86_max_cores = (ecx & 0xff) + 1; | ||
675 | |||
676 | /* CPU telling us the core id bits shift? */ | ||
677 | bits = (ecx >> 12) & 0xF; | ||
678 | |||
679 | /* Otherwise recompute */ | ||
680 | if (bits == 0) { | ||
681 | while ((1 << bits) < c->x86_max_cores) | ||
682 | bits++; | ||
683 | } | ||
684 | |||
685 | c->x86_coreid_bits = bits; | ||
686 | |||
687 | #endif | ||
688 | } | ||
689 | |||
690 | #define ENABLE_C1E_MASK 0x18000000 | ||
691 | #define CPUID_PROCESSOR_SIGNATURE 1 | ||
692 | #define CPUID_XFAM 0x0ff00000 | ||
693 | #define CPUID_XFAM_K8 0x00000000 | ||
694 | #define CPUID_XFAM_10H 0x00100000 | ||
695 | #define CPUID_XFAM_11H 0x00200000 | ||
696 | #define CPUID_XMOD 0x000f0000 | ||
697 | #define CPUID_XMOD_REV_F 0x00040000 | ||
698 | |||
699 | /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ | ||
700 | static __cpuinit int amd_apic_timer_broken(void) | ||
701 | { | ||
702 | u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); | ||
703 | |||
704 | switch (eax & CPUID_XFAM) { | ||
705 | case CPUID_XFAM_K8: | ||
706 | if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F) | ||
707 | break; | ||
708 | case CPUID_XFAM_10H: | ||
709 | case CPUID_XFAM_11H: | ||
710 | rdmsr(MSR_K8_ENABLE_C1E, lo, hi); | ||
711 | if (lo & ENABLE_C1E_MASK) | ||
712 | return 1; | ||
713 | break; | ||
714 | default: | ||
715 | /* err on the side of caution */ | ||
716 | return 1; | ||
717 | } | ||
718 | return 0; | ||
719 | } | ||
720 | |||
721 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | ||
722 | { | ||
723 | early_init_amd_mc(c); | ||
724 | |||
725 | /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ | ||
726 | if (c->x86_power & (1<<8)) | ||
727 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
728 | } | ||
729 | |||
730 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | ||
731 | { | ||
732 | unsigned level; | ||
733 | |||
734 | #ifdef CONFIG_SMP | ||
735 | unsigned long value; | ||
736 | |||
737 | /* | ||
738 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 | ||
739 | * bit 6 of msr C001_0015 | ||
740 | * | ||
741 | * Errata 63 for SH-B3 steppings | ||
742 | * Errata 122 for all steppings (F+ have it disabled by default) | ||
743 | */ | ||
744 | if (c->x86 == 15) { | ||
745 | rdmsrl(MSR_K8_HWCR, value); | ||
746 | value |= 1 << 6; | ||
747 | wrmsrl(MSR_K8_HWCR, value); | ||
748 | } | ||
749 | #endif | ||
750 | |||
751 | /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; | ||
752 | 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ | ||
753 | clear_cpu_cap(c, 0*32+31); | ||
754 | |||
755 | /* On C+ stepping K8 rep microcode works well for copy/memset */ | ||
756 | level = cpuid_eax(1); | ||
757 | if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || | ||
758 | level >= 0x0f58)) | ||
759 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
760 | if (c->x86 == 0x10 || c->x86 == 0x11) | ||
761 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
762 | |||
763 | /* Enable workaround for FXSAVE leak */ | ||
764 | if (c->x86 >= 6) | ||
765 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); | ||
766 | |||
767 | level = get_model_name(c); | ||
768 | if (!level) { | ||
769 | switch (c->x86) { | ||
770 | case 15: | ||
771 | /* Should distinguish Models here, but this is only | ||
772 | a fallback anyways. */ | ||
773 | strcpy(c->x86_model_id, "Hammer"); | ||
774 | break; | ||
775 | } | ||
776 | } | ||
777 | display_cacheinfo(c); | ||
778 | |||
779 | /* Multi core CPU? */ | ||
780 | if (c->extended_cpuid_level >= 0x80000008) | ||
781 | amd_detect_cmp(c); | ||
782 | |||
783 | if (c->extended_cpuid_level >= 0x80000006 && | ||
784 | (cpuid_edx(0x80000006) & 0xf000)) | ||
785 | num_cache_leaves = 4; | ||
786 | else | ||
787 | num_cache_leaves = 3; | ||
788 | |||
789 | if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11) | ||
790 | set_cpu_cap(c, X86_FEATURE_K8); | ||
791 | |||
792 | /* MFENCE stops RDTSC speculation */ | ||
793 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); | ||
794 | |||
795 | if (c->x86 == 0x10) | ||
796 | fam10h_check_enable_mmcfg(); | ||
797 | |||
798 | if (amd_apic_timer_broken()) | ||
799 | disable_apic_timer = 1; | ||
800 | |||
801 | if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { | ||
802 | unsigned long long tseg; | ||
803 | |||
804 | /* | ||
805 | * Split up direct mapping around the TSEG SMM area. | ||
806 | * Don't do it for gbpages because there seems very little | ||
807 | * benefit in doing so. | ||
808 | */ | ||
809 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg) && | ||
810 | (tseg >> PMD_SHIFT) < (max_pfn_mapped >> (PMD_SHIFT-PAGE_SHIFT))) | ||
811 | set_memory_4k((unsigned long)__va(tseg), 1); | ||
812 | } | ||
813 | } | ||
814 | |||
815 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | ||
816 | { | ||
817 | #ifdef CONFIG_SMP | ||
818 | u32 eax, ebx, ecx, edx; | ||
819 | int index_msb, core_bits; | ||
820 | |||
821 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
822 | |||
823 | |||
824 | if (!cpu_has(c, X86_FEATURE_HT)) | ||
825 | return; | ||
826 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) | ||
827 | goto out; | ||
828 | |||
829 | smp_num_siblings = (ebx & 0xff0000) >> 16; | ||
830 | |||
831 | if (smp_num_siblings == 1) { | ||
832 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | ||
833 | } else if (smp_num_siblings > 1) { | ||
834 | |||
835 | if (smp_num_siblings > NR_CPUS) { | ||
836 | printk(KERN_WARNING "CPU: Unsupported number of " | ||
837 | "siblings %d", smp_num_siblings); | ||
838 | smp_num_siblings = 1; | ||
839 | return; | ||
840 | } | ||
841 | |||
842 | index_msb = get_count_order(smp_num_siblings); | ||
843 | c->phys_proc_id = phys_pkg_id(index_msb); | ||
844 | |||
845 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
846 | |||
847 | index_msb = get_count_order(smp_num_siblings); | ||
848 | |||
849 | core_bits = get_count_order(c->x86_max_cores); | ||
850 | |||
851 | c->cpu_core_id = phys_pkg_id(index_msb) & | ||
852 | ((1 << core_bits) - 1); | ||
853 | } | ||
854 | out: | ||
855 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | ||
856 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
857 | c->phys_proc_id); | ||
858 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
859 | c->cpu_core_id); | ||
860 | } | ||
861 | |||
862 | #endif | ||
863 | } | ||
864 | |||
865 | /* | ||
866 | * find out the number of processor cores on the die | ||
867 | */ | ||
868 | static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | ||
869 | { | ||
870 | unsigned int eax, t; | ||
871 | |||
872 | if (c->cpuid_level < 4) | ||
873 | return 1; | ||
874 | |||
875 | cpuid_count(4, 0, &eax, &t, &t, &t); | ||
876 | |||
877 | if (eax & 0x1f) | ||
878 | return ((eax >> 26) + 1); | ||
879 | else | ||
880 | return 1; | ||
881 | } | ||
882 | |||
883 | static void __cpuinit srat_detect_node(void) | ||
884 | { | ||
885 | #ifdef CONFIG_NUMA | ||
886 | unsigned node; | ||
887 | int cpu = smp_processor_id(); | ||
888 | int apicid = hard_smp_processor_id(); | ||
889 | |||
890 | /* Don't do the funky fallback heuristics the AMD version employs | ||
891 | for now. */ | ||
892 | node = apicid_to_node[apicid]; | ||
893 | if (node == NUMA_NO_NODE || !node_online(node)) | ||
894 | node = first_node(node_online_map); | ||
895 | numa_set_node(cpu, node); | ||
896 | |||
897 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
898 | #endif | ||
899 | } | ||
900 | |||
901 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | ||
902 | { | ||
903 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | ||
904 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | ||
905 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
906 | } | ||
907 | |||
908 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | ||
909 | { | ||
910 | /* Cache sizes */ | ||
911 | unsigned n; | ||
912 | |||
913 | init_intel_cacheinfo(c); | ||
914 | if (c->cpuid_level > 9) { | ||
915 | unsigned eax = cpuid_eax(10); | ||
916 | /* Check for version and the number of counters */ | ||
917 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | ||
918 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); | ||
919 | } | ||
920 | |||
921 | if (cpu_has_ds) { | ||
922 | unsigned int l1, l2; | ||
923 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | ||
924 | if (!(l1 & (1<<11))) | ||
925 | set_cpu_cap(c, X86_FEATURE_BTS); | ||
926 | if (!(l1 & (1<<12))) | ||
927 | set_cpu_cap(c, X86_FEATURE_PEBS); | ||
928 | } | ||
929 | |||
930 | |||
931 | if (cpu_has_bts) | ||
932 | ds_init_intel(c); | ||
933 | |||
934 | n = c->extended_cpuid_level; | ||
935 | if (n >= 0x80000008) { | ||
936 | unsigned eax = cpuid_eax(0x80000008); | ||
937 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
938 | c->x86_phys_bits = eax & 0xff; | ||
939 | /* CPUID workaround for Intel 0F34 CPU */ | ||
940 | if (c->x86_vendor == X86_VENDOR_INTEL && | ||
941 | c->x86 == 0xF && c->x86_model == 0x3 && | ||
942 | c->x86_mask == 0x4) | ||
943 | c->x86_phys_bits = 36; | ||
944 | } | ||
945 | |||
946 | if (c->x86 == 15) | ||
947 | c->x86_cache_alignment = c->x86_clflush_size * 2; | ||
948 | if (c->x86 == 6) | ||
949 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
950 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
951 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
952 | |||
953 | srat_detect_node(); | ||
954 | } | ||
955 | |||
956 | static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | ||
957 | { | ||
958 | if (c->x86 == 0x6 && c->x86_model >= 0xf) | ||
959 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
960 | } | ||
961 | |||
962 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | ||
963 | { | ||
964 | /* Cache sizes */ | ||
965 | unsigned n; | ||
966 | |||
967 | n = c->extended_cpuid_level; | ||
968 | if (n >= 0x80000008) { | ||
969 | unsigned eax = cpuid_eax(0x80000008); | ||
970 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
971 | c->x86_phys_bits = eax & 0xff; | ||
972 | } | ||
973 | |||
974 | if (c->x86 == 0x6 && c->x86_model >= 0xf) { | ||
975 | c->x86_cache_alignment = c->x86_clflush_size * 2; | ||
976 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
977 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
978 | } | ||
979 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
980 | } | ||
981 | |||
982 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | ||
983 | { | ||
984 | char *v = c->x86_vendor_id; | ||
985 | |||
986 | if (!strcmp(v, "AuthenticAMD")) | ||
987 | c->x86_vendor = X86_VENDOR_AMD; | ||
988 | else if (!strcmp(v, "GenuineIntel")) | ||
989 | c->x86_vendor = X86_VENDOR_INTEL; | ||
990 | else if (!strcmp(v, "CentaurHauls")) | ||
991 | c->x86_vendor = X86_VENDOR_CENTAUR; | ||
992 | else | ||
993 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
994 | } | ||
995 | |||
996 | /* Do some early cpuid on the boot CPU to get some parameter that are | ||
997 | needed before check_bugs. Everything advanced is in identify_cpu | ||
998 | below. */ | ||
999 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | ||
1000 | { | ||
1001 | u32 tfms, xlvl; | ||
1002 | |||
1003 | c->loops_per_jiffy = loops_per_jiffy; | ||
1004 | c->x86_cache_size = -1; | ||
1005 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
1006 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | ||
1007 | c->x86_vendor_id[0] = '\0'; /* Unset */ | ||
1008 | c->x86_model_id[0] = '\0'; /* Unset */ | ||
1009 | c->x86_clflush_size = 64; | ||
1010 | c->x86_cache_alignment = c->x86_clflush_size; | ||
1011 | c->x86_max_cores = 1; | ||
1012 | c->x86_coreid_bits = 0; | ||
1013 | c->extended_cpuid_level = 0; | ||
1014 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | ||
1015 | |||
1016 | /* Get vendor name */ | ||
1017 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | ||
1018 | (unsigned int *)&c->x86_vendor_id[0], | ||
1019 | (unsigned int *)&c->x86_vendor_id[8], | ||
1020 | (unsigned int *)&c->x86_vendor_id[4]); | ||
1021 | |||
1022 | get_cpu_vendor(c); | ||
1023 | |||
1024 | /* Initialize the standard set of capabilities */ | ||
1025 | /* Note that the vendor-specific code below might override */ | ||
1026 | |||
1027 | /* Intel-defined flags: level 0x00000001 */ | ||
1028 | if (c->cpuid_level >= 0x00000001) { | ||
1029 | __u32 misc; | ||
1030 | cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4], | ||
1031 | &c->x86_capability[0]); | ||
1032 | c->x86 = (tfms >> 8) & 0xf; | ||
1033 | c->x86_model = (tfms >> 4) & 0xf; | ||
1034 | c->x86_mask = tfms & 0xf; | ||
1035 | if (c->x86 == 0xf) | ||
1036 | c->x86 += (tfms >> 20) & 0xff; | ||
1037 | if (c->x86 >= 0x6) | ||
1038 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | ||
1039 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) | ||
1040 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | ||
1041 | } else { | ||
1042 | /* Have CPUID level 0 only - unheard of */ | ||
1043 | c->x86 = 4; | ||
1044 | } | ||
1045 | |||
1046 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; | ||
1047 | #ifdef CONFIG_SMP | ||
1048 | c->phys_proc_id = c->initial_apicid; | ||
1049 | #endif | ||
1050 | /* AMD-defined flags: level 0x80000001 */ | ||
1051 | xlvl = cpuid_eax(0x80000000); | ||
1052 | c->extended_cpuid_level = xlvl; | ||
1053 | if ((xlvl & 0xffff0000) == 0x80000000) { | ||
1054 | if (xlvl >= 0x80000001) { | ||
1055 | c->x86_capability[1] = cpuid_edx(0x80000001); | ||
1056 | c->x86_capability[6] = cpuid_ecx(0x80000001); | ||
1057 | } | ||
1058 | if (xlvl >= 0x80000004) | ||
1059 | get_model_name(c); /* Default name */ | ||
1060 | } | ||
1061 | |||
1062 | /* Transmeta-defined flags: level 0x80860001 */ | ||
1063 | xlvl = cpuid_eax(0x80860000); | ||
1064 | if ((xlvl & 0xffff0000) == 0x80860000) { | ||
1065 | /* Don't set x86_cpuid_level here for now to not confuse. */ | ||
1066 | if (xlvl >= 0x80860001) | ||
1067 | c->x86_capability[2] = cpuid_edx(0x80860001); | ||
1068 | } | ||
1069 | |||
1070 | c->extended_cpuid_level = cpuid_eax(0x80000000); | ||
1071 | if (c->extended_cpuid_level >= 0x80000007) | ||
1072 | c->x86_power = cpuid_edx(0x80000007); | ||
1073 | |||
1074 | switch (c->x86_vendor) { | ||
1075 | case X86_VENDOR_AMD: | ||
1076 | early_init_amd(c); | ||
1077 | break; | ||
1078 | case X86_VENDOR_INTEL: | ||
1079 | early_init_intel(c); | ||
1080 | break; | ||
1081 | case X86_VENDOR_CENTAUR: | ||
1082 | early_init_centaur(c); | ||
1083 | break; | ||
1084 | } | ||
1085 | |||
1086 | validate_pat_support(c); | ||
1087 | } | ||
1088 | |||
1089 | /* | ||
1090 | * This does the hard work of actually picking apart the CPU stuff... | ||
1091 | */ | ||
1092 | void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | ||
1093 | { | ||
1094 | int i; | ||
1095 | |||
1096 | early_identify_cpu(c); | ||
1097 | |||
1098 | init_scattered_cpuid_features(c); | ||
1099 | |||
1100 | c->apicid = phys_pkg_id(0); | ||
1101 | |||
1102 | /* | ||
1103 | * Vendor-specific initialization. In this section we | ||
1104 | * canonicalize the feature flags, meaning if there are | ||
1105 | * features a certain CPU supports which CPUID doesn't | ||
1106 | * tell us, CPUID claiming incorrect flags, or other bugs, | ||
1107 | * we handle them here. | ||
1108 | * | ||
1109 | * At the end of this section, c->x86_capability better | ||
1110 | * indicate the features this CPU genuinely supports! | ||
1111 | */ | ||
1112 | switch (c->x86_vendor) { | ||
1113 | case X86_VENDOR_AMD: | ||
1114 | init_amd(c); | ||
1115 | break; | ||
1116 | |||
1117 | case X86_VENDOR_INTEL: | ||
1118 | init_intel(c); | ||
1119 | break; | ||
1120 | |||
1121 | case X86_VENDOR_CENTAUR: | ||
1122 | init_centaur(c); | ||
1123 | break; | ||
1124 | |||
1125 | case X86_VENDOR_UNKNOWN: | ||
1126 | default: | ||
1127 | display_cacheinfo(c); | ||
1128 | break; | ||
1129 | } | ||
1130 | |||
1131 | detect_ht(c); | ||
1132 | |||
1133 | /* | ||
1134 | * On SMP, boot_cpu_data holds the common feature set between | ||
1135 | * all CPUs; so make sure that we indicate which features are | ||
1136 | * common between the CPUs. The first time this routine gets | ||
1137 | * executed, c == &boot_cpu_data. | ||
1138 | */ | ||
1139 | if (c != &boot_cpu_data) { | ||
1140 | /* AND the already accumulated flags with these */ | ||
1141 | for (i = 0; i < NCAPINTS; i++) | ||
1142 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | ||
1143 | } | ||
1144 | |||
1145 | /* Clear all flags overriden by options */ | ||
1146 | for (i = 0; i < NCAPINTS; i++) | ||
1147 | c->x86_capability[i] &= ~cleared_cpu_caps[i]; | ||
1148 | |||
1149 | #ifdef CONFIG_X86_MCE | ||
1150 | mcheck_init(c); | ||
1151 | #endif | ||
1152 | select_idle_routine(c); | ||
1153 | |||
1154 | #ifdef CONFIG_NUMA | ||
1155 | numa_add_cpu(smp_processor_id()); | ||
1156 | #endif | ||
1157 | |||
1158 | } | ||
1159 | |||
1160 | void __cpuinit identify_boot_cpu(void) | ||
1161 | { | ||
1162 | identify_cpu(&boot_cpu_data); | ||
1163 | } | ||
1164 | |||
1165 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | ||
1166 | { | ||
1167 | BUG_ON(c == &boot_cpu_data); | ||
1168 | identify_cpu(c); | ||
1169 | mtrr_ap_init(); | ||
1170 | } | ||
1171 | |||
1172 | static __init int setup_noclflush(char *arg) | ||
1173 | { | ||
1174 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); | ||
1175 | return 1; | ||
1176 | } | ||
1177 | __setup("noclflush", setup_noclflush); | ||
1178 | |||
1179 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | ||
1180 | { | ||
1181 | if (c->x86_model_id[0]) | ||
1182 | printk(KERN_CONT "%s", c->x86_model_id); | ||
1183 | |||
1184 | if (c->x86_mask || c->cpuid_level >= 0) | ||
1185 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); | ||
1186 | else | ||
1187 | printk(KERN_CONT "\n"); | ||
1188 | } | ||
1189 | |||
1190 | static __init int setup_disablecpuid(char *arg) | ||
1191 | { | ||
1192 | int bit; | ||
1193 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) | ||
1194 | setup_clear_cpu_cap(bit); | ||
1195 | else | ||
1196 | return 0; | ||
1197 | return 1; | ||
1198 | } | ||
1199 | __setup("clearcpuid=", setup_disablecpuid); | ||