diff options
Diffstat (limited to 'arch/x86/kernel/setup_64.c')
-rw-r--r-- | arch/x86/kernel/setup_64.c | 1194 |
1 files changed, 0 insertions, 1194 deletions
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c deleted file mode 100644 index 6dff1286ad8a..000000000000 --- a/arch/x86/kernel/setup_64.c +++ /dev/null | |||
@@ -1,1194 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1995 Linus Torvalds | ||
3 | */ | ||
4 | |||
5 | /* | ||
6 | * This file handles the architecture-dependent parts of initialization | ||
7 | */ | ||
8 | |||
9 | #include <linux/errno.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/stddef.h> | ||
14 | #include <linux/unistd.h> | ||
15 | #include <linux/ptrace.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/user.h> | ||
18 | #include <linux/screen_info.h> | ||
19 | #include <linux/ioport.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/initrd.h> | ||
23 | #include <linux/highmem.h> | ||
24 | #include <linux/bootmem.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <asm/processor.h> | ||
27 | #include <linux/console.h> | ||
28 | #include <linux/seq_file.h> | ||
29 | #include <linux/crash_dump.h> | ||
30 | #include <linux/root_dev.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <asm/pci-direct.h> | ||
33 | #include <linux/efi.h> | ||
34 | #include <linux/acpi.h> | ||
35 | #include <linux/kallsyms.h> | ||
36 | #include <linux/edd.h> | ||
37 | #include <linux/iscsi_ibft.h> | ||
38 | #include <linux/mmzone.h> | ||
39 | #include <linux/kexec.h> | ||
40 | #include <linux/cpufreq.h> | ||
41 | #include <linux/dmi.h> | ||
42 | #include <linux/dma-mapping.h> | ||
43 | #include <linux/ctype.h> | ||
44 | #include <linux/sort.h> | ||
45 | #include <linux/uaccess.h> | ||
46 | #include <linux/init_ohci1394_dma.h> | ||
47 | #include <linux/kvm_para.h> | ||
48 | |||
49 | #include <asm/mtrr.h> | ||
50 | #include <asm/uaccess.h> | ||
51 | #include <asm/system.h> | ||
52 | #include <asm/vsyscall.h> | ||
53 | #include <asm/io.h> | ||
54 | #include <asm/smp.h> | ||
55 | #include <asm/msr.h> | ||
56 | #include <asm/desc.h> | ||
57 | #include <video/edid.h> | ||
58 | #include <asm/e820.h> | ||
59 | #include <asm/dma.h> | ||
60 | #include <asm/gart.h> | ||
61 | #include <asm/mpspec.h> | ||
62 | #include <asm/mmu_context.h> | ||
63 | #include <asm/proto.h> | ||
64 | #include <asm/setup.h> | ||
65 | #include <asm/numa.h> | ||
66 | #include <asm/sections.h> | ||
67 | #include <asm/dmi.h> | ||
68 | #include <asm/cacheflush.h> | ||
69 | #include <asm/mce.h> | ||
70 | #include <asm/ds.h> | ||
71 | #include <asm/topology.h> | ||
72 | #include <asm/trampoline.h> | ||
73 | #include <asm/pat.h> | ||
74 | |||
75 | #include <mach_apic.h> | ||
76 | #ifdef CONFIG_PARAVIRT | ||
77 | #include <asm/paravirt.h> | ||
78 | #else | ||
79 | #define ARCH_SETUP | ||
80 | #endif | ||
81 | |||
82 | /* | ||
83 | * Machine setup.. | ||
84 | */ | ||
85 | |||
86 | struct cpuinfo_x86 boot_cpu_data __read_mostly; | ||
87 | EXPORT_SYMBOL(boot_cpu_data); | ||
88 | |||
89 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | ||
90 | |||
91 | unsigned long mmu_cr4_features; | ||
92 | |||
93 | /* Boot loader ID as an integer, for the benefit of proc_dointvec */ | ||
94 | int bootloader_type; | ||
95 | |||
96 | unsigned long saved_video_mode; | ||
97 | |||
98 | int force_mwait __cpuinitdata; | ||
99 | |||
100 | /* | ||
101 | * Early DMI memory | ||
102 | */ | ||
103 | int dmi_alloc_index; | ||
104 | char dmi_alloc_data[DMI_MAX_DATA]; | ||
105 | |||
106 | /* | ||
107 | * Setup options | ||
108 | */ | ||
109 | struct screen_info screen_info; | ||
110 | EXPORT_SYMBOL(screen_info); | ||
111 | struct sys_desc_table_struct { | ||
112 | unsigned short length; | ||
113 | unsigned char table[0]; | ||
114 | }; | ||
115 | |||
116 | struct edid_info edid_info; | ||
117 | EXPORT_SYMBOL_GPL(edid_info); | ||
118 | |||
119 | extern int root_mountflags; | ||
120 | |||
121 | char __initdata command_line[COMMAND_LINE_SIZE]; | ||
122 | |||
123 | static struct resource standard_io_resources[] = { | ||
124 | { .name = "dma1", .start = 0x00, .end = 0x1f, | ||
125 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
126 | { .name = "pic1", .start = 0x20, .end = 0x21, | ||
127 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
128 | { .name = "timer0", .start = 0x40, .end = 0x43, | ||
129 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
130 | { .name = "timer1", .start = 0x50, .end = 0x53, | ||
131 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
132 | { .name = "keyboard", .start = 0x60, .end = 0x60, | ||
133 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
134 | { .name = "keyboard", .start = 0x64, .end = 0x64, | ||
135 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
136 | { .name = "dma page reg", .start = 0x80, .end = 0x8f, | ||
137 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
138 | { .name = "pic2", .start = 0xa0, .end = 0xa1, | ||
139 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
140 | { .name = "dma2", .start = 0xc0, .end = 0xdf, | ||
141 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
142 | { .name = "fpu", .start = 0xf0, .end = 0xff, | ||
143 | .flags = IORESOURCE_BUSY | IORESOURCE_IO } | ||
144 | }; | ||
145 | |||
146 | #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM) | ||
147 | |||
148 | static struct resource data_resource = { | ||
149 | .name = "Kernel data", | ||
150 | .start = 0, | ||
151 | .end = 0, | ||
152 | .flags = IORESOURCE_RAM, | ||
153 | }; | ||
154 | static struct resource code_resource = { | ||
155 | .name = "Kernel code", | ||
156 | .start = 0, | ||
157 | .end = 0, | ||
158 | .flags = IORESOURCE_RAM, | ||
159 | }; | ||
160 | static struct resource bss_resource = { | ||
161 | .name = "Kernel bss", | ||
162 | .start = 0, | ||
163 | .end = 0, | ||
164 | .flags = IORESOURCE_RAM, | ||
165 | }; | ||
166 | |||
167 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); | ||
168 | |||
169 | #ifdef CONFIG_PROC_VMCORE | ||
170 | /* elfcorehdr= specifies the location of elf core header | ||
171 | * stored by the crashed kernel. This option will be passed | ||
172 | * by kexec loader to the capture kernel. | ||
173 | */ | ||
174 | static int __init setup_elfcorehdr(char *arg) | ||
175 | { | ||
176 | char *end; | ||
177 | if (!arg) | ||
178 | return -EINVAL; | ||
179 | elfcorehdr_addr = memparse(arg, &end); | ||
180 | return end > arg ? 0 : -EINVAL; | ||
181 | } | ||
182 | early_param("elfcorehdr", setup_elfcorehdr); | ||
183 | #endif | ||
184 | |||
185 | #ifndef CONFIG_NUMA | ||
186 | static void __init | ||
187 | contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn) | ||
188 | { | ||
189 | unsigned long bootmap_size, bootmap; | ||
190 | |||
191 | bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; | ||
192 | bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size, | ||
193 | PAGE_SIZE); | ||
194 | if (bootmap == -1L) | ||
195 | panic("Cannot find bootmem map of size %ld\n", bootmap_size); | ||
196 | bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn); | ||
197 | e820_register_active_regions(0, start_pfn, end_pfn); | ||
198 | free_bootmem_with_active_regions(0, end_pfn); | ||
199 | early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT); | ||
200 | reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT); | ||
201 | } | ||
202 | #endif | ||
203 | |||
204 | #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) | ||
205 | struct edd edd; | ||
206 | #ifdef CONFIG_EDD_MODULE | ||
207 | EXPORT_SYMBOL(edd); | ||
208 | #endif | ||
209 | /** | ||
210 | * copy_edd() - Copy the BIOS EDD information | ||
211 | * from boot_params into a safe place. | ||
212 | * | ||
213 | */ | ||
214 | static inline void copy_edd(void) | ||
215 | { | ||
216 | memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, | ||
217 | sizeof(edd.mbr_signature)); | ||
218 | memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info)); | ||
219 | edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries; | ||
220 | edd.edd_info_nr = boot_params.eddbuf_entries; | ||
221 | } | ||
222 | #else | ||
223 | static inline void copy_edd(void) | ||
224 | { | ||
225 | } | ||
226 | #endif | ||
227 | |||
228 | #ifdef CONFIG_KEXEC | ||
229 | static void __init reserve_crashkernel(void) | ||
230 | { | ||
231 | unsigned long long total_mem; | ||
232 | unsigned long long crash_size, crash_base; | ||
233 | int ret; | ||
234 | |||
235 | total_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT; | ||
236 | |||
237 | ret = parse_crashkernel(boot_command_line, total_mem, | ||
238 | &crash_size, &crash_base); | ||
239 | if (ret == 0 && crash_size) { | ||
240 | if (crash_base <= 0) { | ||
241 | printk(KERN_INFO "crashkernel reservation failed - " | ||
242 | "you have to specify a base address\n"); | ||
243 | return; | ||
244 | } | ||
245 | |||
246 | if (reserve_bootmem(crash_base, crash_size, | ||
247 | BOOTMEM_EXCLUSIVE) < 0) { | ||
248 | printk(KERN_INFO "crashkernel reservation failed - " | ||
249 | "memory is in use\n"); | ||
250 | return; | ||
251 | } | ||
252 | |||
253 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " | ||
254 | "for crashkernel (System RAM: %ldMB)\n", | ||
255 | (unsigned long)(crash_size >> 20), | ||
256 | (unsigned long)(crash_base >> 20), | ||
257 | (unsigned long)(total_mem >> 20)); | ||
258 | crashk_res.start = crash_base; | ||
259 | crashk_res.end = crash_base + crash_size - 1; | ||
260 | insert_resource(&iomem_resource, &crashk_res); | ||
261 | } | ||
262 | } | ||
263 | #else | ||
264 | static inline void __init reserve_crashkernel(void) | ||
265 | {} | ||
266 | #endif | ||
267 | |||
268 | /* Overridden in paravirt.c if CONFIG_PARAVIRT */ | ||
269 | void __attribute__((weak)) __init memory_setup(void) | ||
270 | { | ||
271 | machine_specific_memory_setup(); | ||
272 | } | ||
273 | |||
274 | static void __init parse_setup_data(void) | ||
275 | { | ||
276 | struct setup_data *data; | ||
277 | unsigned long pa_data; | ||
278 | |||
279 | if (boot_params.hdr.version < 0x0209) | ||
280 | return; | ||
281 | pa_data = boot_params.hdr.setup_data; | ||
282 | while (pa_data) { | ||
283 | data = early_ioremap(pa_data, PAGE_SIZE); | ||
284 | switch (data->type) { | ||
285 | default: | ||
286 | break; | ||
287 | } | ||
288 | #ifndef CONFIG_DEBUG_BOOT_PARAMS | ||
289 | free_early(pa_data, pa_data+sizeof(*data)+data->len); | ||
290 | #endif | ||
291 | pa_data = data->next; | ||
292 | early_iounmap(data, PAGE_SIZE); | ||
293 | } | ||
294 | } | ||
295 | |||
296 | #ifdef CONFIG_PCI_MMCONFIG | ||
297 | extern void __cpuinit fam10h_check_enable_mmcfg(void); | ||
298 | extern void __init check_enable_amd_mmconf_dmi(void); | ||
299 | #else | ||
300 | void __cpuinit fam10h_check_enable_mmcfg(void) | ||
301 | { | ||
302 | } | ||
303 | void __init check_enable_amd_mmconf_dmi(void) | ||
304 | { | ||
305 | } | ||
306 | #endif | ||
307 | |||
308 | /* | ||
309 | * setup_arch - architecture-specific boot-time initializations | ||
310 | * | ||
311 | * Note: On x86_64, fixmaps are ready for use even before this is called. | ||
312 | */ | ||
313 | void __init setup_arch(char **cmdline_p) | ||
314 | { | ||
315 | unsigned i; | ||
316 | |||
317 | printk(KERN_INFO "Command line: %s\n", boot_command_line); | ||
318 | |||
319 | ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); | ||
320 | screen_info = boot_params.screen_info; | ||
321 | edid_info = boot_params.edid_info; | ||
322 | saved_video_mode = boot_params.hdr.vid_mode; | ||
323 | bootloader_type = boot_params.hdr.type_of_loader; | ||
324 | |||
325 | #ifdef CONFIG_BLK_DEV_RAM | ||
326 | rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; | ||
327 | rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0); | ||
328 | rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0); | ||
329 | #endif | ||
330 | #ifdef CONFIG_EFI | ||
331 | if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, | ||
332 | "EL64", 4)) | ||
333 | efi_enabled = 1; | ||
334 | #endif | ||
335 | |||
336 | ARCH_SETUP | ||
337 | |||
338 | memory_setup(); | ||
339 | copy_edd(); | ||
340 | |||
341 | if (!boot_params.hdr.root_flags) | ||
342 | root_mountflags &= ~MS_RDONLY; | ||
343 | init_mm.start_code = (unsigned long) &_text; | ||
344 | init_mm.end_code = (unsigned long) &_etext; | ||
345 | init_mm.end_data = (unsigned long) &_edata; | ||
346 | init_mm.brk = (unsigned long) &_end; | ||
347 | |||
348 | code_resource.start = virt_to_phys(&_text); | ||
349 | code_resource.end = virt_to_phys(&_etext)-1; | ||
350 | data_resource.start = virt_to_phys(&_etext); | ||
351 | data_resource.end = virt_to_phys(&_edata)-1; | ||
352 | bss_resource.start = virt_to_phys(&__bss_start); | ||
353 | bss_resource.end = virt_to_phys(&__bss_stop)-1; | ||
354 | |||
355 | early_identify_cpu(&boot_cpu_data); | ||
356 | |||
357 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); | ||
358 | *cmdline_p = command_line; | ||
359 | |||
360 | parse_setup_data(); | ||
361 | |||
362 | parse_early_param(); | ||
363 | |||
364 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
365 | if (init_ohci1394_dma_early) | ||
366 | init_ohci1394_dma_on_all_controllers(); | ||
367 | #endif | ||
368 | |||
369 | finish_e820_parsing(); | ||
370 | |||
371 | /* after parse_early_param, so could debug it */ | ||
372 | insert_resource(&iomem_resource, &code_resource); | ||
373 | insert_resource(&iomem_resource, &data_resource); | ||
374 | insert_resource(&iomem_resource, &bss_resource); | ||
375 | |||
376 | early_gart_iommu_check(); | ||
377 | |||
378 | e820_register_active_regions(0, 0, -1UL); | ||
379 | /* | ||
380 | * partially used pages are not usable - thus | ||
381 | * we are rounding upwards: | ||
382 | */ | ||
383 | end_pfn = e820_end_of_ram(); | ||
384 | /* update e820 for memory not covered by WB MTRRs */ | ||
385 | mtrr_bp_init(); | ||
386 | if (mtrr_trim_uncached_memory(end_pfn)) { | ||
387 | e820_register_active_regions(0, 0, -1UL); | ||
388 | end_pfn = e820_end_of_ram(); | ||
389 | } | ||
390 | |||
391 | num_physpages = end_pfn; | ||
392 | |||
393 | check_efer(); | ||
394 | |||
395 | max_pfn_mapped = init_memory_mapping(0, (max_pfn_mapped << PAGE_SHIFT)); | ||
396 | if (efi_enabled) | ||
397 | efi_init(); | ||
398 | |||
399 | vsmp_init(); | ||
400 | |||
401 | dmi_scan_machine(); | ||
402 | |||
403 | io_delay_init(); | ||
404 | |||
405 | #ifdef CONFIG_KVM_CLOCK | ||
406 | kvmclock_init(); | ||
407 | #endif | ||
408 | |||
409 | #ifdef CONFIG_SMP | ||
410 | /* setup to use the early static init tables during kernel startup */ | ||
411 | x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init; | ||
412 | x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init; | ||
413 | #ifdef CONFIG_NUMA | ||
414 | x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init; | ||
415 | #endif | ||
416 | #endif | ||
417 | |||
418 | #ifdef CONFIG_ACPI | ||
419 | /* | ||
420 | * Initialize the ACPI boot-time table parser (gets the RSDP and SDT). | ||
421 | * Call this early for SRAT node setup. | ||
422 | */ | ||
423 | acpi_boot_table_init(); | ||
424 | #endif | ||
425 | |||
426 | /* How many end-of-memory variables you have, grandma! */ | ||
427 | max_low_pfn = end_pfn; | ||
428 | max_pfn = end_pfn; | ||
429 | high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1; | ||
430 | |||
431 | /* Remove active ranges so rediscovery with NUMA-awareness happens */ | ||
432 | remove_all_active_ranges(); | ||
433 | |||
434 | #ifdef CONFIG_ACPI_NUMA | ||
435 | /* | ||
436 | * Parse SRAT to discover nodes. | ||
437 | */ | ||
438 | acpi_numa_init(); | ||
439 | #endif | ||
440 | |||
441 | #ifdef CONFIG_NUMA | ||
442 | numa_initmem_init(0, end_pfn); | ||
443 | #else | ||
444 | contig_initmem_init(0, end_pfn); | ||
445 | #endif | ||
446 | |||
447 | dma32_reserve_bootmem(); | ||
448 | |||
449 | #ifdef CONFIG_ACPI_SLEEP | ||
450 | /* | ||
451 | * Reserve low memory region for sleep support. | ||
452 | */ | ||
453 | acpi_reserve_bootmem(); | ||
454 | #endif | ||
455 | |||
456 | if (efi_enabled) | ||
457 | efi_reserve_bootmem(); | ||
458 | |||
459 | /* | ||
460 | * Find and reserve possible boot-time SMP configuration: | ||
461 | */ | ||
462 | find_smp_config(); | ||
463 | #ifdef CONFIG_BLK_DEV_INITRD | ||
464 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { | ||
465 | unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; | ||
466 | unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; | ||
467 | unsigned long ramdisk_end = ramdisk_image + ramdisk_size; | ||
468 | unsigned long end_of_mem = end_pfn << PAGE_SHIFT; | ||
469 | |||
470 | if (ramdisk_end <= end_of_mem) { | ||
471 | /* | ||
472 | * don't need to reserve again, already reserved early | ||
473 | * in x86_64_start_kernel, and early_res_to_bootmem | ||
474 | * convert that to reserved in bootmem | ||
475 | */ | ||
476 | initrd_start = ramdisk_image + PAGE_OFFSET; | ||
477 | initrd_end = initrd_start+ramdisk_size; | ||
478 | } else { | ||
479 | free_bootmem(ramdisk_image, ramdisk_size); | ||
480 | printk(KERN_ERR "initrd extends beyond end of memory " | ||
481 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", | ||
482 | ramdisk_end, end_of_mem); | ||
483 | initrd_start = 0; | ||
484 | } | ||
485 | } | ||
486 | #endif | ||
487 | reserve_crashkernel(); | ||
488 | |||
489 | reserve_ibft_region(); | ||
490 | |||
491 | paging_init(); | ||
492 | map_vsyscall(); | ||
493 | |||
494 | early_quirks(); | ||
495 | |||
496 | #ifdef CONFIG_ACPI | ||
497 | /* | ||
498 | * Read APIC and some other early information from ACPI tables. | ||
499 | */ | ||
500 | acpi_boot_init(); | ||
501 | #endif | ||
502 | |||
503 | init_cpu_to_node(); | ||
504 | |||
505 | /* | ||
506 | * get boot-time SMP configuration: | ||
507 | */ | ||
508 | if (smp_found_config) | ||
509 | get_smp_config(); | ||
510 | init_apic_mappings(); | ||
511 | ioapic_init_mappings(); | ||
512 | |||
513 | kvm_guest_init(); | ||
514 | |||
515 | /* | ||
516 | * We trust e820 completely. No explicit ROM probing in memory. | ||
517 | */ | ||
518 | e820_reserve_resources(); | ||
519 | e820_mark_nosave_regions(); | ||
520 | |||
521 | /* request I/O space for devices used on all i[345]86 PCs */ | ||
522 | for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) | ||
523 | request_resource(&ioport_resource, &standard_io_resources[i]); | ||
524 | |||
525 | e820_setup_gap(); | ||
526 | |||
527 | #ifdef CONFIG_VT | ||
528 | #if defined(CONFIG_VGA_CONSOLE) | ||
529 | if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) | ||
530 | conswitchp = &vga_con; | ||
531 | #elif defined(CONFIG_DUMMY_CONSOLE) | ||
532 | conswitchp = &dummy_con; | ||
533 | #endif | ||
534 | #endif | ||
535 | |||
536 | /* do this before identify_cpu for boot cpu */ | ||
537 | check_enable_amd_mmconf_dmi(); | ||
538 | } | ||
539 | |||
540 | static int __cpuinit get_model_name(struct cpuinfo_x86 *c) | ||
541 | { | ||
542 | unsigned int *v; | ||
543 | |||
544 | if (c->extended_cpuid_level < 0x80000004) | ||
545 | return 0; | ||
546 | |||
547 | v = (unsigned int *) c->x86_model_id; | ||
548 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | ||
549 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | ||
550 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | ||
551 | c->x86_model_id[48] = 0; | ||
552 | return 1; | ||
553 | } | ||
554 | |||
555 | |||
556 | static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | ||
557 | { | ||
558 | unsigned int n, dummy, eax, ebx, ecx, edx; | ||
559 | |||
560 | n = c->extended_cpuid_level; | ||
561 | |||
562 | if (n >= 0x80000005) { | ||
563 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); | ||
564 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), " | ||
565 | "D cache %dK (%d bytes/line)\n", | ||
566 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | ||
567 | c->x86_cache_size = (ecx>>24) + (edx>>24); | ||
568 | /* On K8 L1 TLB is inclusive, so don't count it */ | ||
569 | c->x86_tlbsize = 0; | ||
570 | } | ||
571 | |||
572 | if (n >= 0x80000006) { | ||
573 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); | ||
574 | ecx = cpuid_ecx(0x80000006); | ||
575 | c->x86_cache_size = ecx >> 16; | ||
576 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); | ||
577 | |||
578 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | ||
579 | c->x86_cache_size, ecx & 0xFF); | ||
580 | } | ||
581 | if (n >= 0x80000008) { | ||
582 | cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); | ||
583 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
584 | c->x86_phys_bits = eax & 0xff; | ||
585 | } | ||
586 | } | ||
587 | |||
588 | #ifdef CONFIG_NUMA | ||
589 | static int __cpuinit nearby_node(int apicid) | ||
590 | { | ||
591 | int i, node; | ||
592 | |||
593 | for (i = apicid - 1; i >= 0; i--) { | ||
594 | node = apicid_to_node[i]; | ||
595 | if (node != NUMA_NO_NODE && node_online(node)) | ||
596 | return node; | ||
597 | } | ||
598 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | ||
599 | node = apicid_to_node[i]; | ||
600 | if (node != NUMA_NO_NODE && node_online(node)) | ||
601 | return node; | ||
602 | } | ||
603 | return first_node(node_online_map); /* Shouldn't happen */ | ||
604 | } | ||
605 | #endif | ||
606 | |||
607 | /* | ||
608 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. | ||
609 | * Assumes number of cores is a power of two. | ||
610 | */ | ||
611 | static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | ||
612 | { | ||
613 | #ifdef CONFIG_SMP | ||
614 | unsigned bits; | ||
615 | #ifdef CONFIG_NUMA | ||
616 | int cpu = smp_processor_id(); | ||
617 | int node = 0; | ||
618 | unsigned apicid = hard_smp_processor_id(); | ||
619 | #endif | ||
620 | bits = c->x86_coreid_bits; | ||
621 | |||
622 | /* Low order bits define the core id (index of core in socket) */ | ||
623 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); | ||
624 | /* Convert the initial APIC ID into the socket ID */ | ||
625 | c->phys_proc_id = c->initial_apicid >> bits; | ||
626 | |||
627 | #ifdef CONFIG_NUMA | ||
628 | node = c->phys_proc_id; | ||
629 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | ||
630 | node = apicid_to_node[apicid]; | ||
631 | if (!node_online(node)) { | ||
632 | /* Two possibilities here: | ||
633 | - The CPU is missing memory and no node was created. | ||
634 | In that case try picking one from a nearby CPU | ||
635 | - The APIC IDs differ from the HyperTransport node IDs | ||
636 | which the K8 northbridge parsing fills in. | ||
637 | Assume they are all increased by a constant offset, | ||
638 | but in the same order as the HT nodeids. | ||
639 | If that doesn't result in a usable node fall back to the | ||
640 | path for the previous case. */ | ||
641 | |||
642 | int ht_nodeid = c->initial_apicid; | ||
643 | |||
644 | if (ht_nodeid >= 0 && | ||
645 | apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | ||
646 | node = apicid_to_node[ht_nodeid]; | ||
647 | /* Pick a nearby node */ | ||
648 | if (!node_online(node)) | ||
649 | node = nearby_node(apicid); | ||
650 | } | ||
651 | numa_set_node(cpu, node); | ||
652 | |||
653 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
654 | #endif | ||
655 | #endif | ||
656 | } | ||
657 | |||
658 | static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) | ||
659 | { | ||
660 | #ifdef CONFIG_SMP | ||
661 | unsigned bits, ecx; | ||
662 | |||
663 | /* Multi core CPU? */ | ||
664 | if (c->extended_cpuid_level < 0x80000008) | ||
665 | return; | ||
666 | |||
667 | ecx = cpuid_ecx(0x80000008); | ||
668 | |||
669 | c->x86_max_cores = (ecx & 0xff) + 1; | ||
670 | |||
671 | /* CPU telling us the core id bits shift? */ | ||
672 | bits = (ecx >> 12) & 0xF; | ||
673 | |||
674 | /* Otherwise recompute */ | ||
675 | if (bits == 0) { | ||
676 | while ((1 << bits) < c->x86_max_cores) | ||
677 | bits++; | ||
678 | } | ||
679 | |||
680 | c->x86_coreid_bits = bits; | ||
681 | |||
682 | #endif | ||
683 | } | ||
684 | |||
685 | #define ENABLE_C1E_MASK 0x18000000 | ||
686 | #define CPUID_PROCESSOR_SIGNATURE 1 | ||
687 | #define CPUID_XFAM 0x0ff00000 | ||
688 | #define CPUID_XFAM_K8 0x00000000 | ||
689 | #define CPUID_XFAM_10H 0x00100000 | ||
690 | #define CPUID_XFAM_11H 0x00200000 | ||
691 | #define CPUID_XMOD 0x000f0000 | ||
692 | #define CPUID_XMOD_REV_F 0x00040000 | ||
693 | |||
694 | /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ | ||
695 | static __cpuinit int amd_apic_timer_broken(void) | ||
696 | { | ||
697 | u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); | ||
698 | |||
699 | switch (eax & CPUID_XFAM) { | ||
700 | case CPUID_XFAM_K8: | ||
701 | if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F) | ||
702 | break; | ||
703 | case CPUID_XFAM_10H: | ||
704 | case CPUID_XFAM_11H: | ||
705 | rdmsr(MSR_K8_ENABLE_C1E, lo, hi); | ||
706 | if (lo & ENABLE_C1E_MASK) | ||
707 | return 1; | ||
708 | break; | ||
709 | default: | ||
710 | /* err on the side of caution */ | ||
711 | return 1; | ||
712 | } | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | ||
717 | { | ||
718 | early_init_amd_mc(c); | ||
719 | |||
720 | /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ | ||
721 | if (c->x86_power & (1<<8)) | ||
722 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
723 | } | ||
724 | |||
725 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | ||
726 | { | ||
727 | unsigned level; | ||
728 | |||
729 | #ifdef CONFIG_SMP | ||
730 | unsigned long value; | ||
731 | |||
732 | /* | ||
733 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 | ||
734 | * bit 6 of msr C001_0015 | ||
735 | * | ||
736 | * Errata 63 for SH-B3 steppings | ||
737 | * Errata 122 for all steppings (F+ have it disabled by default) | ||
738 | */ | ||
739 | if (c->x86 == 15) { | ||
740 | rdmsrl(MSR_K8_HWCR, value); | ||
741 | value |= 1 << 6; | ||
742 | wrmsrl(MSR_K8_HWCR, value); | ||
743 | } | ||
744 | #endif | ||
745 | |||
746 | /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; | ||
747 | 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ | ||
748 | clear_cpu_cap(c, 0*32+31); | ||
749 | |||
750 | /* On C+ stepping K8 rep microcode works well for copy/memset */ | ||
751 | level = cpuid_eax(1); | ||
752 | if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || | ||
753 | level >= 0x0f58)) | ||
754 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
755 | if (c->x86 == 0x10 || c->x86 == 0x11) | ||
756 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
757 | |||
758 | /* Enable workaround for FXSAVE leak */ | ||
759 | if (c->x86 >= 6) | ||
760 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); | ||
761 | |||
762 | level = get_model_name(c); | ||
763 | if (!level) { | ||
764 | switch (c->x86) { | ||
765 | case 15: | ||
766 | /* Should distinguish Models here, but this is only | ||
767 | a fallback anyways. */ | ||
768 | strcpy(c->x86_model_id, "Hammer"); | ||
769 | break; | ||
770 | } | ||
771 | } | ||
772 | display_cacheinfo(c); | ||
773 | |||
774 | /* Multi core CPU? */ | ||
775 | if (c->extended_cpuid_level >= 0x80000008) | ||
776 | amd_detect_cmp(c); | ||
777 | |||
778 | if (c->extended_cpuid_level >= 0x80000006 && | ||
779 | (cpuid_edx(0x80000006) & 0xf000)) | ||
780 | num_cache_leaves = 4; | ||
781 | else | ||
782 | num_cache_leaves = 3; | ||
783 | |||
784 | if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11) | ||
785 | set_cpu_cap(c, X86_FEATURE_K8); | ||
786 | |||
787 | /* MFENCE stops RDTSC speculation */ | ||
788 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); | ||
789 | |||
790 | if (c->x86 == 0x10) | ||
791 | fam10h_check_enable_mmcfg(); | ||
792 | |||
793 | if (amd_apic_timer_broken()) | ||
794 | disable_apic_timer = 1; | ||
795 | |||
796 | if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { | ||
797 | unsigned long long tseg; | ||
798 | |||
799 | /* | ||
800 | * Split up direct mapping around the TSEG SMM area. | ||
801 | * Don't do it for gbpages because there seems very little | ||
802 | * benefit in doing so. | ||
803 | */ | ||
804 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg) && | ||
805 | (tseg >> PMD_SHIFT) < (max_pfn_mapped >> (PMD_SHIFT-PAGE_SHIFT))) | ||
806 | set_memory_4k((unsigned long)__va(tseg), 1); | ||
807 | } | ||
808 | } | ||
809 | |||
810 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | ||
811 | { | ||
812 | #ifdef CONFIG_SMP | ||
813 | u32 eax, ebx, ecx, edx; | ||
814 | int index_msb, core_bits; | ||
815 | |||
816 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
817 | |||
818 | |||
819 | if (!cpu_has(c, X86_FEATURE_HT)) | ||
820 | return; | ||
821 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) | ||
822 | goto out; | ||
823 | |||
824 | smp_num_siblings = (ebx & 0xff0000) >> 16; | ||
825 | |||
826 | if (smp_num_siblings == 1) { | ||
827 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | ||
828 | } else if (smp_num_siblings > 1) { | ||
829 | |||
830 | if (smp_num_siblings > NR_CPUS) { | ||
831 | printk(KERN_WARNING "CPU: Unsupported number of " | ||
832 | "siblings %d", smp_num_siblings); | ||
833 | smp_num_siblings = 1; | ||
834 | return; | ||
835 | } | ||
836 | |||
837 | index_msb = get_count_order(smp_num_siblings); | ||
838 | c->phys_proc_id = phys_pkg_id(index_msb); | ||
839 | |||
840 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
841 | |||
842 | index_msb = get_count_order(smp_num_siblings); | ||
843 | |||
844 | core_bits = get_count_order(c->x86_max_cores); | ||
845 | |||
846 | c->cpu_core_id = phys_pkg_id(index_msb) & | ||
847 | ((1 << core_bits) - 1); | ||
848 | } | ||
849 | out: | ||
850 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | ||
851 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
852 | c->phys_proc_id); | ||
853 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
854 | c->cpu_core_id); | ||
855 | } | ||
856 | |||
857 | #endif | ||
858 | } | ||
859 | |||
860 | /* | ||
861 | * find out the number of processor cores on the die | ||
862 | */ | ||
863 | static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | ||
864 | { | ||
865 | unsigned int eax, t; | ||
866 | |||
867 | if (c->cpuid_level < 4) | ||
868 | return 1; | ||
869 | |||
870 | cpuid_count(4, 0, &eax, &t, &t, &t); | ||
871 | |||
872 | if (eax & 0x1f) | ||
873 | return ((eax >> 26) + 1); | ||
874 | else | ||
875 | return 1; | ||
876 | } | ||
877 | |||
878 | static void __cpuinit srat_detect_node(void) | ||
879 | { | ||
880 | #ifdef CONFIG_NUMA | ||
881 | unsigned node; | ||
882 | int cpu = smp_processor_id(); | ||
883 | int apicid = hard_smp_processor_id(); | ||
884 | |||
885 | /* Don't do the funky fallback heuristics the AMD version employs | ||
886 | for now. */ | ||
887 | node = apicid_to_node[apicid]; | ||
888 | if (node == NUMA_NO_NODE || !node_online(node)) | ||
889 | node = first_node(node_online_map); | ||
890 | numa_set_node(cpu, node); | ||
891 | |||
892 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
893 | #endif | ||
894 | } | ||
895 | |||
896 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | ||
897 | { | ||
898 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | ||
899 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | ||
900 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
901 | } | ||
902 | |||
903 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | ||
904 | { | ||
905 | /* Cache sizes */ | ||
906 | unsigned n; | ||
907 | |||
908 | init_intel_cacheinfo(c); | ||
909 | if (c->cpuid_level > 9) { | ||
910 | unsigned eax = cpuid_eax(10); | ||
911 | /* Check for version and the number of counters */ | ||
912 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | ||
913 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); | ||
914 | } | ||
915 | |||
916 | if (cpu_has_ds) { | ||
917 | unsigned int l1, l2; | ||
918 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | ||
919 | if (!(l1 & (1<<11))) | ||
920 | set_cpu_cap(c, X86_FEATURE_BTS); | ||
921 | if (!(l1 & (1<<12))) | ||
922 | set_cpu_cap(c, X86_FEATURE_PEBS); | ||
923 | } | ||
924 | |||
925 | |||
926 | if (cpu_has_bts) | ||
927 | ds_init_intel(c); | ||
928 | |||
929 | n = c->extended_cpuid_level; | ||
930 | if (n >= 0x80000008) { | ||
931 | unsigned eax = cpuid_eax(0x80000008); | ||
932 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
933 | c->x86_phys_bits = eax & 0xff; | ||
934 | /* CPUID workaround for Intel 0F34 CPU */ | ||
935 | if (c->x86_vendor == X86_VENDOR_INTEL && | ||
936 | c->x86 == 0xF && c->x86_model == 0x3 && | ||
937 | c->x86_mask == 0x4) | ||
938 | c->x86_phys_bits = 36; | ||
939 | } | ||
940 | |||
941 | if (c->x86 == 15) | ||
942 | c->x86_cache_alignment = c->x86_clflush_size * 2; | ||
943 | if (c->x86 == 6) | ||
944 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
945 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
946 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
947 | |||
948 | srat_detect_node(); | ||
949 | } | ||
950 | |||
951 | static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | ||
952 | { | ||
953 | if (c->x86 == 0x6 && c->x86_model >= 0xf) | ||
954 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
955 | } | ||
956 | |||
957 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | ||
958 | { | ||
959 | /* Cache sizes */ | ||
960 | unsigned n; | ||
961 | |||
962 | n = c->extended_cpuid_level; | ||
963 | if (n >= 0x80000008) { | ||
964 | unsigned eax = cpuid_eax(0x80000008); | ||
965 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
966 | c->x86_phys_bits = eax & 0xff; | ||
967 | } | ||
968 | |||
969 | if (c->x86 == 0x6 && c->x86_model >= 0xf) { | ||
970 | c->x86_cache_alignment = c->x86_clflush_size * 2; | ||
971 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
972 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
973 | } | ||
974 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
975 | } | ||
976 | |||
977 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | ||
978 | { | ||
979 | char *v = c->x86_vendor_id; | ||
980 | |||
981 | if (!strcmp(v, "AuthenticAMD")) | ||
982 | c->x86_vendor = X86_VENDOR_AMD; | ||
983 | else if (!strcmp(v, "GenuineIntel")) | ||
984 | c->x86_vendor = X86_VENDOR_INTEL; | ||
985 | else if (!strcmp(v, "CentaurHauls")) | ||
986 | c->x86_vendor = X86_VENDOR_CENTAUR; | ||
987 | else | ||
988 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
989 | } | ||
990 | |||
991 | /* Do some early cpuid on the boot CPU to get some parameter that are | ||
992 | needed before check_bugs. Everything advanced is in identify_cpu | ||
993 | below. */ | ||
994 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | ||
995 | { | ||
996 | u32 tfms, xlvl; | ||
997 | |||
998 | c->loops_per_jiffy = loops_per_jiffy; | ||
999 | c->x86_cache_size = -1; | ||
1000 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
1001 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | ||
1002 | c->x86_vendor_id[0] = '\0'; /* Unset */ | ||
1003 | c->x86_model_id[0] = '\0'; /* Unset */ | ||
1004 | c->x86_clflush_size = 64; | ||
1005 | c->x86_cache_alignment = c->x86_clflush_size; | ||
1006 | c->x86_max_cores = 1; | ||
1007 | c->x86_coreid_bits = 0; | ||
1008 | c->extended_cpuid_level = 0; | ||
1009 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | ||
1010 | |||
1011 | /* Get vendor name */ | ||
1012 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | ||
1013 | (unsigned int *)&c->x86_vendor_id[0], | ||
1014 | (unsigned int *)&c->x86_vendor_id[8], | ||
1015 | (unsigned int *)&c->x86_vendor_id[4]); | ||
1016 | |||
1017 | get_cpu_vendor(c); | ||
1018 | |||
1019 | /* Initialize the standard set of capabilities */ | ||
1020 | /* Note that the vendor-specific code below might override */ | ||
1021 | |||
1022 | /* Intel-defined flags: level 0x00000001 */ | ||
1023 | if (c->cpuid_level >= 0x00000001) { | ||
1024 | __u32 misc; | ||
1025 | cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4], | ||
1026 | &c->x86_capability[0]); | ||
1027 | c->x86 = (tfms >> 8) & 0xf; | ||
1028 | c->x86_model = (tfms >> 4) & 0xf; | ||
1029 | c->x86_mask = tfms & 0xf; | ||
1030 | if (c->x86 == 0xf) | ||
1031 | c->x86 += (tfms >> 20) & 0xff; | ||
1032 | if (c->x86 >= 0x6) | ||
1033 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | ||
1034 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) | ||
1035 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | ||
1036 | } else { | ||
1037 | /* Have CPUID level 0 only - unheard of */ | ||
1038 | c->x86 = 4; | ||
1039 | } | ||
1040 | |||
1041 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; | ||
1042 | #ifdef CONFIG_SMP | ||
1043 | c->phys_proc_id = c->initial_apicid; | ||
1044 | #endif | ||
1045 | /* AMD-defined flags: level 0x80000001 */ | ||
1046 | xlvl = cpuid_eax(0x80000000); | ||
1047 | c->extended_cpuid_level = xlvl; | ||
1048 | if ((xlvl & 0xffff0000) == 0x80000000) { | ||
1049 | if (xlvl >= 0x80000001) { | ||
1050 | c->x86_capability[1] = cpuid_edx(0x80000001); | ||
1051 | c->x86_capability[6] = cpuid_ecx(0x80000001); | ||
1052 | } | ||
1053 | if (xlvl >= 0x80000004) | ||
1054 | get_model_name(c); /* Default name */ | ||
1055 | } | ||
1056 | |||
1057 | /* Transmeta-defined flags: level 0x80860001 */ | ||
1058 | xlvl = cpuid_eax(0x80860000); | ||
1059 | if ((xlvl & 0xffff0000) == 0x80860000) { | ||
1060 | /* Don't set x86_cpuid_level here for now to not confuse. */ | ||
1061 | if (xlvl >= 0x80860001) | ||
1062 | c->x86_capability[2] = cpuid_edx(0x80860001); | ||
1063 | } | ||
1064 | |||
1065 | c->extended_cpuid_level = cpuid_eax(0x80000000); | ||
1066 | if (c->extended_cpuid_level >= 0x80000007) | ||
1067 | c->x86_power = cpuid_edx(0x80000007); | ||
1068 | |||
1069 | switch (c->x86_vendor) { | ||
1070 | case X86_VENDOR_AMD: | ||
1071 | early_init_amd(c); | ||
1072 | break; | ||
1073 | case X86_VENDOR_INTEL: | ||
1074 | early_init_intel(c); | ||
1075 | break; | ||
1076 | case X86_VENDOR_CENTAUR: | ||
1077 | early_init_centaur(c); | ||
1078 | break; | ||
1079 | } | ||
1080 | |||
1081 | validate_pat_support(c); | ||
1082 | } | ||
1083 | |||
1084 | /* | ||
1085 | * This does the hard work of actually picking apart the CPU stuff... | ||
1086 | */ | ||
1087 | void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | ||
1088 | { | ||
1089 | int i; | ||
1090 | |||
1091 | early_identify_cpu(c); | ||
1092 | |||
1093 | init_scattered_cpuid_features(c); | ||
1094 | |||
1095 | c->apicid = phys_pkg_id(0); | ||
1096 | |||
1097 | /* | ||
1098 | * Vendor-specific initialization. In this section we | ||
1099 | * canonicalize the feature flags, meaning if there are | ||
1100 | * features a certain CPU supports which CPUID doesn't | ||
1101 | * tell us, CPUID claiming incorrect flags, or other bugs, | ||
1102 | * we handle them here. | ||
1103 | * | ||
1104 | * At the end of this section, c->x86_capability better | ||
1105 | * indicate the features this CPU genuinely supports! | ||
1106 | */ | ||
1107 | switch (c->x86_vendor) { | ||
1108 | case X86_VENDOR_AMD: | ||
1109 | init_amd(c); | ||
1110 | break; | ||
1111 | |||
1112 | case X86_VENDOR_INTEL: | ||
1113 | init_intel(c); | ||
1114 | break; | ||
1115 | |||
1116 | case X86_VENDOR_CENTAUR: | ||
1117 | init_centaur(c); | ||
1118 | break; | ||
1119 | |||
1120 | case X86_VENDOR_UNKNOWN: | ||
1121 | default: | ||
1122 | display_cacheinfo(c); | ||
1123 | break; | ||
1124 | } | ||
1125 | |||
1126 | detect_ht(c); | ||
1127 | |||
1128 | /* | ||
1129 | * On SMP, boot_cpu_data holds the common feature set between | ||
1130 | * all CPUs; so make sure that we indicate which features are | ||
1131 | * common between the CPUs. The first time this routine gets | ||
1132 | * executed, c == &boot_cpu_data. | ||
1133 | */ | ||
1134 | if (c != &boot_cpu_data) { | ||
1135 | /* AND the already accumulated flags with these */ | ||
1136 | for (i = 0; i < NCAPINTS; i++) | ||
1137 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | ||
1138 | } | ||
1139 | |||
1140 | /* Clear all flags overriden by options */ | ||
1141 | for (i = 0; i < NCAPINTS; i++) | ||
1142 | c->x86_capability[i] &= ~cleared_cpu_caps[i]; | ||
1143 | |||
1144 | #ifdef CONFIG_X86_MCE | ||
1145 | mcheck_init(c); | ||
1146 | #endif | ||
1147 | select_idle_routine(c); | ||
1148 | |||
1149 | #ifdef CONFIG_NUMA | ||
1150 | numa_add_cpu(smp_processor_id()); | ||
1151 | #endif | ||
1152 | |||
1153 | } | ||
1154 | |||
1155 | void __cpuinit identify_boot_cpu(void) | ||
1156 | { | ||
1157 | identify_cpu(&boot_cpu_data); | ||
1158 | } | ||
1159 | |||
1160 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | ||
1161 | { | ||
1162 | BUG_ON(c == &boot_cpu_data); | ||
1163 | identify_cpu(c); | ||
1164 | mtrr_ap_init(); | ||
1165 | } | ||
1166 | |||
1167 | static __init int setup_noclflush(char *arg) | ||
1168 | { | ||
1169 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); | ||
1170 | return 1; | ||
1171 | } | ||
1172 | __setup("noclflush", setup_noclflush); | ||
1173 | |||
1174 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | ||
1175 | { | ||
1176 | if (c->x86_model_id[0]) | ||
1177 | printk(KERN_CONT "%s", c->x86_model_id); | ||
1178 | |||
1179 | if (c->x86_mask || c->cpuid_level >= 0) | ||
1180 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); | ||
1181 | else | ||
1182 | printk(KERN_CONT "\n"); | ||
1183 | } | ||
1184 | |||
1185 | static __init int setup_disablecpuid(char *arg) | ||
1186 | { | ||
1187 | int bit; | ||
1188 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) | ||
1189 | setup_clear_cpu_cap(bit); | ||
1190 | else | ||
1191 | return 0; | ||
1192 | return 1; | ||
1193 | } | ||
1194 | __setup("clearcpuid=", setup_disablecpuid); | ||