diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-09-04 15:09:44 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-04 15:09:44 -0400 |
commit | 9d31d35b5f9d619bb2482235cc889326de049e29 (patch) | |
tree | b6f398afa52fb85a848cb0804b70c47ff94174e0 /arch/x86/kernel/cpu/common.c | |
parent | 3da99c97763703b23cbf2bd6e96252256d4e4617 (diff) |
x86: order functions in cpu/common.c and cpu/common_64.c v2
v2: make 64 bit get c->x86_cache_alignment = c->x86_clfush_size
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 190 |
1 files changed, 105 insertions, 85 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 96e1b8698d3a..10e89ae5a600 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -60,6 +60,18 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | |||
60 | 60 | ||
61 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | 61 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; |
62 | 62 | ||
63 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
64 | * it's on the real one. */ | ||
65 | void switch_to_new_gdt(void) | ||
66 | { | ||
67 | struct desc_ptr gdt_descr; | ||
68 | |||
69 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
70 | gdt_descr.size = GDT_SIZE - 1; | ||
71 | load_gdt(&gdt_descr); | ||
72 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | ||
73 | } | ||
74 | |||
63 | static int cachesize_override __cpuinitdata = -1; | 75 | static int cachesize_override __cpuinitdata = -1; |
64 | static int disable_x86_serial_nr __cpuinitdata = 1; | 76 | static int disable_x86_serial_nr __cpuinitdata = 1; |
65 | 77 | ||
@@ -123,15 +135,15 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
123 | 135 | ||
124 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | 136 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) |
125 | { | 137 | { |
126 | unsigned int n, dummy, ecx, edx, l2size; | 138 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
127 | 139 | ||
128 | n = c->extended_cpuid_level; | 140 | n = c->extended_cpuid_level; |
129 | 141 | ||
130 | if (n >= 0x80000005) { | 142 | if (n >= 0x80000005) { |
131 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | 143 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
132 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | 144 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", |
133 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | 145 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); |
134 | c->x86_cache_size = (ecx>>24)+(edx>>24); | 146 | c->x86_cache_size = (ecx>>24) + (edx>>24); |
135 | } | 147 | } |
136 | 148 | ||
137 | if (n < 0x80000006) /* Some chips just has a large L1. */ | 149 | if (n < 0x80000006) /* Some chips just has a large L1. */ |
@@ -185,6 +197,51 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | |||
185 | return NULL; /* Not found */ | 197 | return NULL; /* Not found */ |
186 | } | 198 | } |
187 | 199 | ||
200 | #ifdef CONFIG_X86_HT | ||
201 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | ||
202 | { | ||
203 | u32 eax, ebx, ecx, edx; | ||
204 | int index_msb, core_bits; | ||
205 | |||
206 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
207 | |||
208 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) | ||
209 | return; | ||
210 | |||
211 | smp_num_siblings = (ebx & 0xff0000) >> 16; | ||
212 | |||
213 | if (smp_num_siblings == 1) { | ||
214 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | ||
215 | } else if (smp_num_siblings > 1) { | ||
216 | |||
217 | if (smp_num_siblings > NR_CPUS) { | ||
218 | printk(KERN_WARNING "CPU: Unsupported number of siblings %d", | ||
219 | smp_num_siblings); | ||
220 | smp_num_siblings = 1; | ||
221 | return; | ||
222 | } | ||
223 | |||
224 | index_msb = get_count_order(smp_num_siblings); | ||
225 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | ||
226 | |||
227 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
228 | c->phys_proc_id); | ||
229 | |||
230 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
231 | |||
232 | index_msb = get_count_order(smp_num_siblings); | ||
233 | |||
234 | core_bits = get_count_order(c->x86_max_cores); | ||
235 | |||
236 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | ||
237 | ((1 << core_bits) - 1); | ||
238 | |||
239 | if (c->x86_max_cores > 1) | ||
240 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
241 | c->cpu_core_id); | ||
242 | } | ||
243 | } | ||
244 | #endif | ||
188 | 245 | ||
189 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | 246 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) |
190 | { | 247 | { |
@@ -258,7 +315,26 @@ static int __cpuinit have_cpuid_p(void) | |||
258 | return flag_is_changeable_p(X86_EFLAGS_ID); | 315 | return flag_is_changeable_p(X86_EFLAGS_ID); |
259 | } | 316 | } |
260 | 317 | ||
261 | void __init cpu_detect(struct cpuinfo_x86 *c) | 318 | static void __init early_cpu_support_print(void) |
319 | { | ||
320 | int i,j; | ||
321 | struct cpu_dev *cpu_devx; | ||
322 | |||
323 | printk("KERNEL supported cpus:\n"); | ||
324 | for (i = 0; i < X86_VENDOR_NUM; i++) { | ||
325 | cpu_devx = cpu_devs[i]; | ||
326 | if (!cpu_devx) | ||
327 | continue; | ||
328 | for (j = 0; j < 2; j++) { | ||
329 | if (!cpu_devx->c_ident[j]) | ||
330 | continue; | ||
331 | printk(" %s %s\n", cpu_devx->c_vendor, | ||
332 | cpu_devx->c_ident[j]); | ||
333 | } | ||
334 | } | ||
335 | } | ||
336 | |||
337 | void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | ||
262 | { | 338 | { |
263 | /* Get vendor name */ | 339 | /* Get vendor name */ |
264 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | 340 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
@@ -267,19 +343,20 @@ void __init cpu_detect(struct cpuinfo_x86 *c) | |||
267 | (unsigned int *)&c->x86_vendor_id[4]); | 343 | (unsigned int *)&c->x86_vendor_id[4]); |
268 | 344 | ||
269 | c->x86 = 4; | 345 | c->x86 = 4; |
346 | /* Intel-defined flags: level 0x00000001 */ | ||
270 | if (c->cpuid_level >= 0x00000001) { | 347 | if (c->cpuid_level >= 0x00000001) { |
271 | u32 junk, tfms, cap0, misc; | 348 | u32 junk, tfms, cap0, misc; |
272 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | 349 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
273 | c->x86 = (tfms >> 8) & 15; | 350 | c->x86 = (tfms >> 8) & 0xf; |
274 | c->x86_model = (tfms >> 4) & 15; | 351 | c->x86_model = (tfms >> 4) & 0xf; |
352 | c->x86_mask = tfms & 0xf; | ||
275 | if (c->x86 == 0xf) | 353 | if (c->x86 == 0xf) |
276 | c->x86 += (tfms >> 20) & 0xff; | 354 | c->x86 += (tfms >> 20) & 0xff; |
277 | if (c->x86 >= 0x6) | 355 | if (c->x86 >= 0x6) |
278 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 356 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
279 | c->x86_mask = tfms & 15; | ||
280 | if (cap0 & (1<<19)) { | 357 | if (cap0 & (1<<19)) { |
281 | c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; | ||
282 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 358 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
359 | c->x86_cache_alignment = c->x86_clflush_size; | ||
283 | } | 360 | } |
284 | } | 361 | } |
285 | } | 362 | } |
@@ -341,6 +418,17 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
341 | validate_pat_support(c); | 418 | validate_pat_support(c); |
342 | } | 419 | } |
343 | 420 | ||
421 | void __init early_cpu_init(void) | ||
422 | { | ||
423 | struct cpu_vendor_dev *cvdev; | ||
424 | |||
425 | for (cvdev = __x86cpuvendor_start; cvdev < __x86cpuvendor_end; cvdev++) | ||
426 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; | ||
427 | |||
428 | early_cpu_support_print(); | ||
429 | early_identify_cpu(&boot_cpu_data); | ||
430 | } | ||
431 | |||
344 | /* | 432 | /* |
345 | * The NOPL instruction is supposed to exist on all CPUs with | 433 | * The NOPL instruction is supposed to exist on all CPUs with |
346 | * family >= 6, unfortunately, that's not true in practice because | 434 | * family >= 6, unfortunately, that's not true in practice because |
@@ -500,7 +588,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
500 | */ | 588 | */ |
501 | if (c != &boot_cpu_data) { | 589 | if (c != &boot_cpu_data) { |
502 | /* AND the already accumulated flags with these */ | 590 | /* AND the already accumulated flags with these */ |
503 | for (i = 0 ; i < NCAPINTS ; i++) | 591 | for (i = 0; i < NCAPINTS; i++) |
504 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | 592 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
505 | } | 593 | } |
506 | 594 | ||
@@ -529,52 +617,6 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | |||
529 | mtrr_ap_init(); | 617 | mtrr_ap_init(); |
530 | } | 618 | } |
531 | 619 | ||
532 | #ifdef CONFIG_X86_HT | ||
533 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | ||
534 | { | ||
535 | u32 eax, ebx, ecx, edx; | ||
536 | int index_msb, core_bits; | ||
537 | |||
538 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
539 | |||
540 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) | ||
541 | return; | ||
542 | |||
543 | smp_num_siblings = (ebx & 0xff0000) >> 16; | ||
544 | |||
545 | if (smp_num_siblings == 1) { | ||
546 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | ||
547 | } else if (smp_num_siblings > 1) { | ||
548 | |||
549 | if (smp_num_siblings > NR_CPUS) { | ||
550 | printk(KERN_WARNING "CPU: Unsupported number of the " | ||
551 | "siblings %d", smp_num_siblings); | ||
552 | smp_num_siblings = 1; | ||
553 | return; | ||
554 | } | ||
555 | |||
556 | index_msb = get_count_order(smp_num_siblings); | ||
557 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | ||
558 | |||
559 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
560 | c->phys_proc_id); | ||
561 | |||
562 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
563 | |||
564 | index_msb = get_count_order(smp_num_siblings) ; | ||
565 | |||
566 | core_bits = get_count_order(c->x86_max_cores); | ||
567 | |||
568 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | ||
569 | ((1 << core_bits) - 1); | ||
570 | |||
571 | if (c->x86_max_cores > 1) | ||
572 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
573 | c->cpu_core_id); | ||
574 | } | ||
575 | } | ||
576 | #endif | ||
577 | |||
578 | static __init int setup_noclflush(char *arg) | 620 | static __init int setup_noclflush(char *arg) |
579 | { | 621 | { |
580 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); | 622 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); |
@@ -592,17 +634,17 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
592 | vendor = c->x86_vendor_id; | 634 | vendor = c->x86_vendor_id; |
593 | 635 | ||
594 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) | 636 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) |
595 | printk("%s ", vendor); | 637 | printk(KERN_CONT "%s ", vendor); |
596 | 638 | ||
597 | if (!c->x86_model_id[0]) | 639 | if (c->x86_model_id[0]) |
598 | printk("%d86", c->x86); | 640 | printk(KERN_CONT "%s", c->x86_model_id); |
599 | else | 641 | else |
600 | printk("%s", c->x86_model_id); | 642 | printk(KERN_CONT "%d86", c->x86); |
601 | 643 | ||
602 | if (c->x86_mask || c->cpuid_level >= 0) | 644 | if (c->x86_mask || c->cpuid_level >= 0) |
603 | printk(" stepping %02x\n", c->x86_mask); | 645 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); |
604 | else | 646 | else |
605 | printk("\n"); | 647 | printk(KERN_CONT "\n"); |
606 | } | 648 | } |
607 | 649 | ||
608 | static __init int setup_disablecpuid(char *arg) | 650 | static __init int setup_disablecpuid(char *arg) |
@@ -618,16 +660,6 @@ __setup("clearcpuid=", setup_disablecpuid); | |||
618 | 660 | ||
619 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | 661 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
620 | 662 | ||
621 | void __init early_cpu_init(void) | ||
622 | { | ||
623 | struct cpu_vendor_dev *cvdev; | ||
624 | |||
625 | for (cvdev = __x86cpuvendor_start; cvdev < __x86cpuvendor_end; cvdev++) | ||
626 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; | ||
627 | |||
628 | early_identify_cpu(&boot_cpu_data); | ||
629 | } | ||
630 | |||
631 | /* Make sure %fs is initialized properly in idle threads */ | 663 | /* Make sure %fs is initialized properly in idle threads */ |
632 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | 664 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) |
633 | { | 665 | { |
@@ -636,18 +668,6 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | |||
636 | return regs; | 668 | return regs; |
637 | } | 669 | } |
638 | 670 | ||
639 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
640 | * it's on the real one. */ | ||
641 | void switch_to_new_gdt(void) | ||
642 | { | ||
643 | struct desc_ptr gdt_descr; | ||
644 | |||
645 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
646 | gdt_descr.size = GDT_SIZE - 1; | ||
647 | load_gdt(&gdt_descr); | ||
648 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | ||
649 | } | ||
650 | |||
651 | /* | 671 | /* |
652 | * cpu_init() initializes state that is per-CPU. Some data is already | 672 | * cpu_init() initializes state that is per-CPU. Some data is already |
653 | * initialized (naturally) in the bootstrap process, such as the GDT | 673 | * initialized (naturally) in the bootstrap process, such as the GDT |