diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-06-21 06:24:19 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-08 06:48:34 -0400 |
commit | f580366f77cc4e035a68369105fbeae5bf436b4c (patch) | |
tree | addd3a9503c63defe45320040d85fac4fb6f943a /arch/x86/kernel/setup_64.c | |
parent | 04606618bb50c4ec754585a82732ea4facfe2bc9 (diff) |
x86: seperate funcs from setup_64 to cpu common_64.c
Signed-off-by: Yinghai Lu <yhlu.kernel@mail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/setup_64.c')
-rw-r--r-- | arch/x86/kernel/setup_64.c | 823 |
1 files changed, 0 insertions, 823 deletions
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c deleted file mode 100644 index 9b516eecada0..000000000000 --- a/arch/x86/kernel/setup_64.c +++ /dev/null | |||
@@ -1,823 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1995 Linus Torvalds | ||
3 | */ | ||
4 | |||
5 | /* | ||
6 | * This file handles the architecture-dependent parts of initialization | ||
7 | */ | ||
8 | |||
9 | #include <linux/errno.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/stddef.h> | ||
14 | #include <linux/unistd.h> | ||
15 | #include <linux/ptrace.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/user.h> | ||
18 | #include <linux/screen_info.h> | ||
19 | #include <linux/ioport.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/initrd.h> | ||
23 | #include <linux/highmem.h> | ||
24 | #include <linux/bootmem.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <asm/processor.h> | ||
27 | #include <linux/console.h> | ||
28 | #include <linux/seq_file.h> | ||
29 | #include <linux/crash_dump.h> | ||
30 | #include <linux/root_dev.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <asm/pci-direct.h> | ||
33 | #include <linux/efi.h> | ||
34 | #include <linux/acpi.h> | ||
35 | #include <linux/kallsyms.h> | ||
36 | #include <linux/edd.h> | ||
37 | #include <linux/iscsi_ibft.h> | ||
38 | #include <linux/mmzone.h> | ||
39 | #include <linux/kexec.h> | ||
40 | #include <linux/cpufreq.h> | ||
41 | #include <linux/dmi.h> | ||
42 | #include <linux/dma-mapping.h> | ||
43 | #include <linux/ctype.h> | ||
44 | #include <linux/sort.h> | ||
45 | #include <linux/uaccess.h> | ||
46 | #include <linux/init_ohci1394_dma.h> | ||
47 | #include <linux/kvm_para.h> | ||
48 | |||
49 | #include <asm/mtrr.h> | ||
50 | #include <asm/uaccess.h> | ||
51 | #include <asm/system.h> | ||
52 | #include <asm/vsyscall.h> | ||
53 | #include <asm/io.h> | ||
54 | #include <asm/smp.h> | ||
55 | #include <asm/msr.h> | ||
56 | #include <asm/desc.h> | ||
57 | #include <video/edid.h> | ||
58 | #include <asm/e820.h> | ||
59 | #include <asm/mpspec.h> | ||
60 | #include <asm/dma.h> | ||
61 | #include <asm/gart.h> | ||
62 | #include <asm/mpspec.h> | ||
63 | #include <asm/mmu_context.h> | ||
64 | #include <asm/proto.h> | ||
65 | #include <asm/setup.h> | ||
66 | #include <asm/numa.h> | ||
67 | #include <asm/sections.h> | ||
68 | #include <asm/dmi.h> | ||
69 | #include <asm/cacheflush.h> | ||
70 | #include <asm/mce.h> | ||
71 | #include <asm/ds.h> | ||
72 | #include <asm/topology.h> | ||
73 | #include <asm/trampoline.h> | ||
74 | #include <asm/pat.h> | ||
75 | |||
76 | #include <mach_apic.h> | ||
77 | #ifdef CONFIG_PARAVIRT | ||
78 | #include <asm/paravirt.h> | ||
79 | #else | ||
80 | #define ARCH_SETUP | ||
81 | #endif | ||
82 | |||
83 | /* | ||
84 | * Machine setup.. | ||
85 | */ | ||
86 | |||
87 | struct cpuinfo_x86 boot_cpu_data __read_mostly; | ||
88 | EXPORT_SYMBOL(boot_cpu_data); | ||
89 | |||
90 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | ||
91 | |||
92 | unsigned long mmu_cr4_features; | ||
93 | |||
94 | /* Boot loader ID as an integer, for the benefit of proc_dointvec */ | ||
95 | int bootloader_type; | ||
96 | |||
97 | unsigned long saved_video_mode; | ||
98 | |||
99 | /* | ||
100 | * Early DMI memory | ||
101 | */ | ||
102 | int dmi_alloc_index; | ||
103 | char dmi_alloc_data[DMI_MAX_DATA]; | ||
104 | |||
105 | /* | ||
106 | * Setup options | ||
107 | */ | ||
108 | struct screen_info screen_info; | ||
109 | EXPORT_SYMBOL(screen_info); | ||
110 | struct sys_desc_table_struct { | ||
111 | unsigned short length; | ||
112 | unsigned char table[0]; | ||
113 | }; | ||
114 | |||
115 | struct edid_info edid_info; | ||
116 | EXPORT_SYMBOL_GPL(edid_info); | ||
117 | |||
118 | extern int root_mountflags; | ||
119 | |||
120 | static char __initdata command_line[COMMAND_LINE_SIZE]; | ||
121 | |||
122 | static struct resource standard_io_resources[] = { | ||
123 | { .name = "dma1", .start = 0x00, .end = 0x1f, | ||
124 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
125 | { .name = "pic1", .start = 0x20, .end = 0x21, | ||
126 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
127 | { .name = "timer0", .start = 0x40, .end = 0x43, | ||
128 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
129 | { .name = "timer1", .start = 0x50, .end = 0x53, | ||
130 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
131 | { .name = "keyboard", .start = 0x60, .end = 0x60, | ||
132 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
133 | { .name = "keyboard", .start = 0x64, .end = 0x64, | ||
134 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
135 | { .name = "dma page reg", .start = 0x80, .end = 0x8f, | ||
136 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
137 | { .name = "pic2", .start = 0xa0, .end = 0xa1, | ||
138 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
139 | { .name = "dma2", .start = 0xc0, .end = 0xdf, | ||
140 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, | ||
141 | { .name = "fpu", .start = 0xf0, .end = 0xff, | ||
142 | .flags = IORESOURCE_BUSY | IORESOURCE_IO } | ||
143 | }; | ||
144 | |||
145 | #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM) | ||
146 | |||
147 | static struct resource data_resource = { | ||
148 | .name = "Kernel data", | ||
149 | .start = 0, | ||
150 | .end = 0, | ||
151 | .flags = IORESOURCE_RAM, | ||
152 | }; | ||
153 | static struct resource code_resource = { | ||
154 | .name = "Kernel code", | ||
155 | .start = 0, | ||
156 | .end = 0, | ||
157 | .flags = IORESOURCE_RAM, | ||
158 | }; | ||
159 | static struct resource bss_resource = { | ||
160 | .name = "Kernel bss", | ||
161 | .start = 0, | ||
162 | .end = 0, | ||
163 | .flags = IORESOURCE_RAM, | ||
164 | }; | ||
165 | |||
166 | static void __init early_cpu_init(void); | ||
167 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); | ||
168 | |||
169 | #ifdef CONFIG_PROC_VMCORE | ||
170 | /* elfcorehdr= specifies the location of elf core header | ||
171 | * stored by the crashed kernel. This option will be passed | ||
172 | * by kexec loader to the capture kernel. | ||
173 | */ | ||
174 | static int __init setup_elfcorehdr(char *arg) | ||
175 | { | ||
176 | char *end; | ||
177 | if (!arg) | ||
178 | return -EINVAL; | ||
179 | elfcorehdr_addr = memparse(arg, &end); | ||
180 | return end > arg ? 0 : -EINVAL; | ||
181 | } | ||
182 | early_param("elfcorehdr", setup_elfcorehdr); | ||
183 | #endif | ||
184 | |||
185 | #ifndef CONFIG_NUMA | ||
186 | static void __init | ||
187 | contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn) | ||
188 | { | ||
189 | unsigned long bootmap_size, bootmap; | ||
190 | |||
191 | bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; | ||
192 | bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size, | ||
193 | PAGE_SIZE); | ||
194 | if (bootmap == -1L) | ||
195 | panic("Cannot find bootmem map of size %ld\n", bootmap_size); | ||
196 | bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn); | ||
197 | e820_register_active_regions(0, start_pfn, end_pfn); | ||
198 | free_bootmem_with_active_regions(0, end_pfn); | ||
199 | early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT); | ||
200 | reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT); | ||
201 | } | ||
202 | #endif | ||
203 | |||
204 | #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) | ||
205 | struct edd edd; | ||
206 | #ifdef CONFIG_EDD_MODULE | ||
207 | EXPORT_SYMBOL(edd); | ||
208 | #endif | ||
209 | /** | ||
210 | * copy_edd() - Copy the BIOS EDD information | ||
211 | * from boot_params into a safe place. | ||
212 | * | ||
213 | */ | ||
214 | static inline void copy_edd(void) | ||
215 | { | ||
216 | memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, | ||
217 | sizeof(edd.mbr_signature)); | ||
218 | memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info)); | ||
219 | edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries; | ||
220 | edd.edd_info_nr = boot_params.eddbuf_entries; | ||
221 | } | ||
222 | #else | ||
223 | static inline void copy_edd(void) | ||
224 | { | ||
225 | } | ||
226 | #endif | ||
227 | |||
228 | /* Overridden in paravirt.c if CONFIG_PARAVIRT */ | ||
229 | void __attribute__((weak)) __init memory_setup(void) | ||
230 | { | ||
231 | machine_specific_memory_setup(); | ||
232 | } | ||
233 | |||
234 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
235 | * it's on the real one. */ | ||
236 | void switch_to_new_gdt(void) | ||
237 | { | ||
238 | struct desc_ptr gdt_descr; | ||
239 | |||
240 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
241 | gdt_descr.size = GDT_SIZE - 1; | ||
242 | load_gdt(&gdt_descr); | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * setup_arch - architecture-specific boot-time initializations | ||
247 | * | ||
248 | * Note: On x86_64, fixmaps are ready for use even before this is called. | ||
249 | */ | ||
250 | void __init setup_arch(char **cmdline_p) | ||
251 | { | ||
252 | unsigned i; | ||
253 | |||
254 | printk(KERN_INFO "Command line: %s\n", boot_command_line); | ||
255 | |||
256 | ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); | ||
257 | screen_info = boot_params.screen_info; | ||
258 | edid_info = boot_params.edid_info; | ||
259 | saved_video_mode = boot_params.hdr.vid_mode; | ||
260 | bootloader_type = boot_params.hdr.type_of_loader; | ||
261 | |||
262 | #ifdef CONFIG_BLK_DEV_RAM | ||
263 | rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; | ||
264 | rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0); | ||
265 | rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0); | ||
266 | #endif | ||
267 | #ifdef CONFIG_EFI | ||
268 | if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, | ||
269 | "EL64", 4)) { | ||
270 | efi_enabled = 1; | ||
271 | efi_reserve_early(); | ||
272 | } | ||
273 | #endif | ||
274 | |||
275 | ARCH_SETUP | ||
276 | |||
277 | setup_memory_map(); | ||
278 | copy_edd(); | ||
279 | |||
280 | if (!boot_params.hdr.root_flags) | ||
281 | root_mountflags &= ~MS_RDONLY; | ||
282 | init_mm.start_code = (unsigned long) &_text; | ||
283 | init_mm.end_code = (unsigned long) &_etext; | ||
284 | init_mm.end_data = (unsigned long) &_edata; | ||
285 | init_mm.brk = (unsigned long) &_end; | ||
286 | |||
287 | code_resource.start = virt_to_phys(&_text); | ||
288 | code_resource.end = virt_to_phys(&_etext)-1; | ||
289 | data_resource.start = virt_to_phys(&_etext); | ||
290 | data_resource.end = virt_to_phys(&_edata)-1; | ||
291 | bss_resource.start = virt_to_phys(&__bss_start); | ||
292 | bss_resource.end = virt_to_phys(&__bss_stop)-1; | ||
293 | |||
294 | early_cpu_init(); | ||
295 | early_identify_cpu(&boot_cpu_data); | ||
296 | |||
297 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); | ||
298 | *cmdline_p = command_line; | ||
299 | |||
300 | parse_setup_data(); | ||
301 | |||
302 | parse_early_param(); | ||
303 | |||
304 | if (acpi_mps_check()) { | ||
305 | disable_apic = 1; | ||
306 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); | ||
307 | } | ||
308 | |||
309 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
310 | if (init_ohci1394_dma_early) | ||
311 | init_ohci1394_dma_on_all_controllers(); | ||
312 | #endif | ||
313 | |||
314 | finish_e820_parsing(); | ||
315 | |||
316 | /* after parse_early_param, so could debug it */ | ||
317 | insert_resource(&iomem_resource, &code_resource); | ||
318 | insert_resource(&iomem_resource, &data_resource); | ||
319 | insert_resource(&iomem_resource, &bss_resource); | ||
320 | |||
321 | early_gart_iommu_check(); | ||
322 | |||
323 | e820_register_active_regions(0, 0, -1UL); | ||
324 | /* | ||
325 | * partially used pages are not usable - thus | ||
326 | * we are rounding upwards: | ||
327 | */ | ||
328 | end_pfn = e820_end_of_ram(); | ||
329 | |||
330 | /* pre allocte 4k for mptable mpc */ | ||
331 | early_reserve_e820_mpc_new(); | ||
332 | /* update e820 for memory not covered by WB MTRRs */ | ||
333 | mtrr_bp_init(); | ||
334 | if (mtrr_trim_uncached_memory(end_pfn)) { | ||
335 | remove_all_active_ranges(); | ||
336 | e820_register_active_regions(0, 0, -1UL); | ||
337 | end_pfn = e820_end_of_ram(); | ||
338 | } | ||
339 | |||
340 | num_physpages = end_pfn; | ||
341 | |||
342 | check_efer(); | ||
343 | |||
344 | max_pfn_mapped = init_memory_mapping(0, (end_pfn << PAGE_SHIFT)); | ||
345 | if (efi_enabled) | ||
346 | efi_init(); | ||
347 | |||
348 | vsmp_init(); | ||
349 | |||
350 | dmi_scan_machine(); | ||
351 | |||
352 | io_delay_init(); | ||
353 | |||
354 | #ifdef CONFIG_KVM_CLOCK | ||
355 | kvmclock_init(); | ||
356 | #endif | ||
357 | |||
358 | /* | ||
359 | * Initialize the ACPI boot-time table parser (gets the RSDP and SDT). | ||
360 | * Call this early for SRAT node setup. | ||
361 | */ | ||
362 | acpi_boot_table_init(); | ||
363 | |||
364 | /* How many end-of-memory variables you have, grandma! */ | ||
365 | max_low_pfn = end_pfn; | ||
366 | max_pfn = end_pfn; | ||
367 | high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1; | ||
368 | |||
369 | /* Remove active ranges so rediscovery with NUMA-awareness happens */ | ||
370 | remove_all_active_ranges(); | ||
371 | |||
372 | #ifdef CONFIG_ACPI_NUMA | ||
373 | /* | ||
374 | * Parse SRAT to discover nodes. | ||
375 | */ | ||
376 | acpi_numa_init(); | ||
377 | #endif | ||
378 | |||
379 | #ifdef CONFIG_NUMA | ||
380 | numa_initmem_init(0, end_pfn); | ||
381 | #else | ||
382 | contig_initmem_init(0, end_pfn); | ||
383 | #endif | ||
384 | |||
385 | dma32_reserve_bootmem(); | ||
386 | |||
387 | #ifdef CONFIG_ACPI_SLEEP | ||
388 | /* | ||
389 | * Reserve low memory region for sleep support. | ||
390 | */ | ||
391 | acpi_reserve_bootmem(); | ||
392 | #endif | ||
393 | |||
394 | #ifdef CONFIG_X86_MPPARSE | ||
395 | /* | ||
396 | * Find and reserve possible boot-time SMP configuration: | ||
397 | */ | ||
398 | find_smp_config(); | ||
399 | #endif | ||
400 | #ifdef CONFIG_BLK_DEV_INITRD | ||
401 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { | ||
402 | unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; | ||
403 | unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; | ||
404 | unsigned long ramdisk_end = ramdisk_image + ramdisk_size; | ||
405 | unsigned long end_of_mem = end_pfn << PAGE_SHIFT; | ||
406 | |||
407 | if (ramdisk_end <= end_of_mem) { | ||
408 | /* | ||
409 | * don't need to reserve again, already reserved early | ||
410 | * in x86_64_start_kernel, and early_res_to_bootmem | ||
411 | * convert that to reserved in bootmem | ||
412 | */ | ||
413 | initrd_start = ramdisk_image + PAGE_OFFSET; | ||
414 | initrd_end = initrd_start+ramdisk_size; | ||
415 | } else { | ||
416 | free_bootmem(ramdisk_image, ramdisk_size); | ||
417 | printk(KERN_ERR "initrd extends beyond end of memory " | ||
418 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", | ||
419 | ramdisk_end, end_of_mem); | ||
420 | initrd_start = 0; | ||
421 | } | ||
422 | } | ||
423 | #endif | ||
424 | reserve_crashkernel(); | ||
425 | |||
426 | reserve_ibft_region(); | ||
427 | |||
428 | paging_init(); | ||
429 | map_vsyscall(); | ||
430 | |||
431 | early_quirks(); | ||
432 | |||
433 | /* | ||
434 | * Read APIC and some other early information from ACPI tables. | ||
435 | */ | ||
436 | acpi_boot_init(); | ||
437 | |||
438 | init_cpu_to_node(); | ||
439 | |||
440 | #ifdef CONFIG_X86_MPPARSE | ||
441 | /* | ||
442 | * get boot-time SMP configuration: | ||
443 | */ | ||
444 | if (smp_found_config) | ||
445 | get_smp_config(); | ||
446 | #endif | ||
447 | init_apic_mappings(); | ||
448 | ioapic_init_mappings(); | ||
449 | |||
450 | kvm_guest_init(); | ||
451 | |||
452 | /* | ||
453 | * We trust e820 completely. No explicit ROM probing in memory. | ||
454 | */ | ||
455 | e820_reserve_resources(); | ||
456 | e820_mark_nosave_regions(end_pfn); | ||
457 | |||
458 | /* request I/O space for devices used on all i[345]86 PCs */ | ||
459 | for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) | ||
460 | request_resource(&ioport_resource, &standard_io_resources[i]); | ||
461 | |||
462 | e820_setup_gap(); | ||
463 | |||
464 | #ifdef CONFIG_VT | ||
465 | #if defined(CONFIG_VGA_CONSOLE) | ||
466 | if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) | ||
467 | conswitchp = &vga_con; | ||
468 | #elif defined(CONFIG_DUMMY_CONSOLE) | ||
469 | conswitchp = &dummy_con; | ||
470 | #endif | ||
471 | #endif | ||
472 | } | ||
473 | |||
474 | struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | ||
475 | |||
476 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | ||
477 | { | ||
478 | display_cacheinfo(c); | ||
479 | } | ||
480 | |||
481 | static struct cpu_dev __cpuinitdata default_cpu = { | ||
482 | .c_init = default_init, | ||
483 | .c_vendor = "Unknown", | ||
484 | }; | ||
485 | static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; | ||
486 | |||
487 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) | ||
488 | { | ||
489 | unsigned int *v; | ||
490 | |||
491 | if (c->extended_cpuid_level < 0x80000004) | ||
492 | return 0; | ||
493 | |||
494 | v = (unsigned int *) c->x86_model_id; | ||
495 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | ||
496 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | ||
497 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | ||
498 | c->x86_model_id[48] = 0; | ||
499 | return 1; | ||
500 | } | ||
501 | |||
502 | |||
503 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | ||
504 | { | ||
505 | unsigned int n, dummy, eax, ebx, ecx, edx; | ||
506 | |||
507 | n = c->extended_cpuid_level; | ||
508 | |||
509 | if (n >= 0x80000005) { | ||
510 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); | ||
511 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), " | ||
512 | "D cache %dK (%d bytes/line)\n", | ||
513 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | ||
514 | c->x86_cache_size = (ecx>>24) + (edx>>24); | ||
515 | /* On K8 L1 TLB is inclusive, so don't count it */ | ||
516 | c->x86_tlbsize = 0; | ||
517 | } | ||
518 | |||
519 | if (n >= 0x80000006) { | ||
520 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); | ||
521 | ecx = cpuid_ecx(0x80000006); | ||
522 | c->x86_cache_size = ecx >> 16; | ||
523 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); | ||
524 | |||
525 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | ||
526 | c->x86_cache_size, ecx & 0xFF); | ||
527 | } | ||
528 | if (n >= 0x80000008) { | ||
529 | cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); | ||
530 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
531 | c->x86_phys_bits = eax & 0xff; | ||
532 | } | ||
533 | } | ||
534 | |||
535 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | ||
536 | { | ||
537 | #ifdef CONFIG_SMP | ||
538 | u32 eax, ebx, ecx, edx; | ||
539 | int index_msb, core_bits; | ||
540 | |||
541 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
542 | |||
543 | |||
544 | if (!cpu_has(c, X86_FEATURE_HT)) | ||
545 | return; | ||
546 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) | ||
547 | goto out; | ||
548 | |||
549 | smp_num_siblings = (ebx & 0xff0000) >> 16; | ||
550 | |||
551 | if (smp_num_siblings == 1) { | ||
552 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | ||
553 | } else if (smp_num_siblings > 1) { | ||
554 | |||
555 | if (smp_num_siblings > NR_CPUS) { | ||
556 | printk(KERN_WARNING "CPU: Unsupported number of " | ||
557 | "siblings %d", smp_num_siblings); | ||
558 | smp_num_siblings = 1; | ||
559 | return; | ||
560 | } | ||
561 | |||
562 | index_msb = get_count_order(smp_num_siblings); | ||
563 | c->phys_proc_id = phys_pkg_id(index_msb); | ||
564 | |||
565 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
566 | |||
567 | index_msb = get_count_order(smp_num_siblings); | ||
568 | |||
569 | core_bits = get_count_order(c->x86_max_cores); | ||
570 | |||
571 | c->cpu_core_id = phys_pkg_id(index_msb) & | ||
572 | ((1 << core_bits) - 1); | ||
573 | } | ||
574 | out: | ||
575 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | ||
576 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
577 | c->phys_proc_id); | ||
578 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
579 | c->cpu_core_id); | ||
580 | } | ||
581 | |||
582 | #endif | ||
583 | } | ||
584 | |||
585 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | ||
586 | { | ||
587 | char *v = c->x86_vendor_id; | ||
588 | int i; | ||
589 | static int printed; | ||
590 | |||
591 | for (i = 0; i < X86_VENDOR_NUM; i++) { | ||
592 | if (cpu_devs[i]) { | ||
593 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | ||
594 | (cpu_devs[i]->c_ident[1] && | ||
595 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | ||
596 | c->x86_vendor = i; | ||
597 | this_cpu = cpu_devs[i]; | ||
598 | return; | ||
599 | } | ||
600 | } | ||
601 | } | ||
602 | if (!printed) { | ||
603 | printed++; | ||
604 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); | ||
605 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | ||
606 | } | ||
607 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
608 | } | ||
609 | |||
610 | static void __init early_cpu_support_print(void) | ||
611 | { | ||
612 | int i,j; | ||
613 | struct cpu_dev *cpu_devx; | ||
614 | |||
615 | printk("KERNEL supported cpus:\n"); | ||
616 | for (i = 0; i < X86_VENDOR_NUM; i++) { | ||
617 | cpu_devx = cpu_devs[i]; | ||
618 | if (!cpu_devx) | ||
619 | continue; | ||
620 | for (j = 0; j < 2; j++) { | ||
621 | if (!cpu_devx->c_ident[j]) | ||
622 | continue; | ||
623 | printk(" %s %s\n", cpu_devx->c_vendor, | ||
624 | cpu_devx->c_ident[j]); | ||
625 | } | ||
626 | } | ||
627 | } | ||
628 | |||
629 | static void __init early_cpu_init(void) | ||
630 | { | ||
631 | struct cpu_vendor_dev *cvdev; | ||
632 | |||
633 | for (cvdev = __x86cpuvendor_start ; | ||
634 | cvdev < __x86cpuvendor_end ; | ||
635 | cvdev++) | ||
636 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; | ||
637 | early_cpu_support_print(); | ||
638 | } | ||
639 | |||
640 | /* Do some early cpuid on the boot CPU to get some parameter that are | ||
641 | needed before check_bugs. Everything advanced is in identify_cpu | ||
642 | below. */ | ||
643 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | ||
644 | { | ||
645 | u32 tfms, xlvl; | ||
646 | |||
647 | c->loops_per_jiffy = loops_per_jiffy; | ||
648 | c->x86_cache_size = -1; | ||
649 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
650 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | ||
651 | c->x86_vendor_id[0] = '\0'; /* Unset */ | ||
652 | c->x86_model_id[0] = '\0'; /* Unset */ | ||
653 | c->x86_clflush_size = 64; | ||
654 | c->x86_cache_alignment = c->x86_clflush_size; | ||
655 | c->x86_max_cores = 1; | ||
656 | c->x86_coreid_bits = 0; | ||
657 | c->extended_cpuid_level = 0; | ||
658 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | ||
659 | |||
660 | /* Get vendor name */ | ||
661 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | ||
662 | (unsigned int *)&c->x86_vendor_id[0], | ||
663 | (unsigned int *)&c->x86_vendor_id[8], | ||
664 | (unsigned int *)&c->x86_vendor_id[4]); | ||
665 | |||
666 | get_cpu_vendor(c); | ||
667 | |||
668 | /* Initialize the standard set of capabilities */ | ||
669 | /* Note that the vendor-specific code below might override */ | ||
670 | |||
671 | /* Intel-defined flags: level 0x00000001 */ | ||
672 | if (c->cpuid_level >= 0x00000001) { | ||
673 | __u32 misc; | ||
674 | cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4], | ||
675 | &c->x86_capability[0]); | ||
676 | c->x86 = (tfms >> 8) & 0xf; | ||
677 | c->x86_model = (tfms >> 4) & 0xf; | ||
678 | c->x86_mask = tfms & 0xf; | ||
679 | if (c->x86 == 0xf) | ||
680 | c->x86 += (tfms >> 20) & 0xff; | ||
681 | if (c->x86 >= 0x6) | ||
682 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | ||
683 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) | ||
684 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | ||
685 | } else { | ||
686 | /* Have CPUID level 0 only - unheard of */ | ||
687 | c->x86 = 4; | ||
688 | } | ||
689 | |||
690 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; | ||
691 | #ifdef CONFIG_SMP | ||
692 | c->phys_proc_id = c->initial_apicid; | ||
693 | #endif | ||
694 | /* AMD-defined flags: level 0x80000001 */ | ||
695 | xlvl = cpuid_eax(0x80000000); | ||
696 | c->extended_cpuid_level = xlvl; | ||
697 | if ((xlvl & 0xffff0000) == 0x80000000) { | ||
698 | if (xlvl >= 0x80000001) { | ||
699 | c->x86_capability[1] = cpuid_edx(0x80000001); | ||
700 | c->x86_capability[6] = cpuid_ecx(0x80000001); | ||
701 | } | ||
702 | if (xlvl >= 0x80000004) | ||
703 | get_model_name(c); /* Default name */ | ||
704 | } | ||
705 | |||
706 | /* Transmeta-defined flags: level 0x80860001 */ | ||
707 | xlvl = cpuid_eax(0x80860000); | ||
708 | if ((xlvl & 0xffff0000) == 0x80860000) { | ||
709 | /* Don't set x86_cpuid_level here for now to not confuse. */ | ||
710 | if (xlvl >= 0x80860001) | ||
711 | c->x86_capability[2] = cpuid_edx(0x80860001); | ||
712 | } | ||
713 | |||
714 | c->extended_cpuid_level = cpuid_eax(0x80000000); | ||
715 | if (c->extended_cpuid_level >= 0x80000007) | ||
716 | c->x86_power = cpuid_edx(0x80000007); | ||
717 | |||
718 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | ||
719 | cpu_devs[c->x86_vendor]->c_early_init) | ||
720 | cpu_devs[c->x86_vendor]->c_early_init(c); | ||
721 | |||
722 | validate_pat_support(c); | ||
723 | |||
724 | /* early_param could clear that, but recall get it set again */ | ||
725 | if (disable_apic) | ||
726 | clear_cpu_cap(c, X86_FEATURE_APIC); | ||
727 | } | ||
728 | |||
729 | /* | ||
730 | * This does the hard work of actually picking apart the CPU stuff... | ||
731 | */ | ||
732 | void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | ||
733 | { | ||
734 | int i; | ||
735 | |||
736 | early_identify_cpu(c); | ||
737 | |||
738 | init_scattered_cpuid_features(c); | ||
739 | |||
740 | c->apicid = phys_pkg_id(0); | ||
741 | |||
742 | /* | ||
743 | * Vendor-specific initialization. In this section we | ||
744 | * canonicalize the feature flags, meaning if there are | ||
745 | * features a certain CPU supports which CPUID doesn't | ||
746 | * tell us, CPUID claiming incorrect flags, or other bugs, | ||
747 | * we handle them here. | ||
748 | * | ||
749 | * At the end of this section, c->x86_capability better | ||
750 | * indicate the features this CPU genuinely supports! | ||
751 | */ | ||
752 | if (this_cpu->c_init) | ||
753 | this_cpu->c_init(c); | ||
754 | |||
755 | detect_ht(c); | ||
756 | |||
757 | /* | ||
758 | * On SMP, boot_cpu_data holds the common feature set between | ||
759 | * all CPUs; so make sure that we indicate which features are | ||
760 | * common between the CPUs. The first time this routine gets | ||
761 | * executed, c == &boot_cpu_data. | ||
762 | */ | ||
763 | if (c != &boot_cpu_data) { | ||
764 | /* AND the already accumulated flags with these */ | ||
765 | for (i = 0; i < NCAPINTS; i++) | ||
766 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | ||
767 | } | ||
768 | |||
769 | /* Clear all flags overriden by options */ | ||
770 | for (i = 0; i < NCAPINTS; i++) | ||
771 | c->x86_capability[i] &= ~cleared_cpu_caps[i]; | ||
772 | |||
773 | #ifdef CONFIG_X86_MCE | ||
774 | mcheck_init(c); | ||
775 | #endif | ||
776 | select_idle_routine(c); | ||
777 | |||
778 | #ifdef CONFIG_NUMA | ||
779 | numa_add_cpu(smp_processor_id()); | ||
780 | #endif | ||
781 | |||
782 | } | ||
783 | |||
784 | void __cpuinit identify_boot_cpu(void) | ||
785 | { | ||
786 | identify_cpu(&boot_cpu_data); | ||
787 | } | ||
788 | |||
789 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | ||
790 | { | ||
791 | BUG_ON(c == &boot_cpu_data); | ||
792 | identify_cpu(c); | ||
793 | mtrr_ap_init(); | ||
794 | } | ||
795 | |||
796 | static __init int setup_noclflush(char *arg) | ||
797 | { | ||
798 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); | ||
799 | return 1; | ||
800 | } | ||
801 | __setup("noclflush", setup_noclflush); | ||
802 | |||
803 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | ||
804 | { | ||
805 | if (c->x86_model_id[0]) | ||
806 | printk(KERN_CONT "%s", c->x86_model_id); | ||
807 | |||
808 | if (c->x86_mask || c->cpuid_level >= 0) | ||
809 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); | ||
810 | else | ||
811 | printk(KERN_CONT "\n"); | ||
812 | } | ||
813 | |||
814 | static __init int setup_disablecpuid(char *arg) | ||
815 | { | ||
816 | int bit; | ||
817 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) | ||
818 | setup_clear_cpu_cap(bit); | ||
819 | else | ||
820 | return 0; | ||
821 | return 1; | ||
822 | } | ||
823 | __setup("clearcpuid=", setup_disablecpuid); | ||