diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-09-04 15:09:44 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-04 15:09:44 -0400 |
commit | 9d31d35b5f9d619bb2482235cc889326de049e29 (patch) | |
tree | b6f398afa52fb85a848cb0804b70c47ff94174e0 | |
parent | 3da99c97763703b23cbf2bd6e96252256d4e4617 (diff) |
x86: order functions in cpu/common.c and cpu/common_64.c v2
v2: make 64 bit get c->x86_cache_alignment = c->x86_clfush_size
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 190 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common_64.c | 106 |
2 files changed, 156 insertions, 140 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 96e1b8698d3a..10e89ae5a600 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -60,6 +60,18 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | |||
60 | 60 | ||
61 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | 61 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; |
62 | 62 | ||
63 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
64 | * it's on the real one. */ | ||
65 | void switch_to_new_gdt(void) | ||
66 | { | ||
67 | struct desc_ptr gdt_descr; | ||
68 | |||
69 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
70 | gdt_descr.size = GDT_SIZE - 1; | ||
71 | load_gdt(&gdt_descr); | ||
72 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | ||
73 | } | ||
74 | |||
63 | static int cachesize_override __cpuinitdata = -1; | 75 | static int cachesize_override __cpuinitdata = -1; |
64 | static int disable_x86_serial_nr __cpuinitdata = 1; | 76 | static int disable_x86_serial_nr __cpuinitdata = 1; |
65 | 77 | ||
@@ -123,15 +135,15 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
123 | 135 | ||
124 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | 136 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) |
125 | { | 137 | { |
126 | unsigned int n, dummy, ecx, edx, l2size; | 138 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
127 | 139 | ||
128 | n = c->extended_cpuid_level; | 140 | n = c->extended_cpuid_level; |
129 | 141 | ||
130 | if (n >= 0x80000005) { | 142 | if (n >= 0x80000005) { |
131 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | 143 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
132 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | 144 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", |
133 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | 145 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); |
134 | c->x86_cache_size = (ecx>>24)+(edx>>24); | 146 | c->x86_cache_size = (ecx>>24) + (edx>>24); |
135 | } | 147 | } |
136 | 148 | ||
137 | if (n < 0x80000006) /* Some chips just has a large L1. */ | 149 | if (n < 0x80000006) /* Some chips just has a large L1. */ |
@@ -185,6 +197,51 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | |||
185 | return NULL; /* Not found */ | 197 | return NULL; /* Not found */ |
186 | } | 198 | } |
187 | 199 | ||
200 | #ifdef CONFIG_X86_HT | ||
201 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | ||
202 | { | ||
203 | u32 eax, ebx, ecx, edx; | ||
204 | int index_msb, core_bits; | ||
205 | |||
206 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
207 | |||
208 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) | ||
209 | return; | ||
210 | |||
211 | smp_num_siblings = (ebx & 0xff0000) >> 16; | ||
212 | |||
213 | if (smp_num_siblings == 1) { | ||
214 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | ||
215 | } else if (smp_num_siblings > 1) { | ||
216 | |||
217 | if (smp_num_siblings > NR_CPUS) { | ||
218 | printk(KERN_WARNING "CPU: Unsupported number of siblings %d", | ||
219 | smp_num_siblings); | ||
220 | smp_num_siblings = 1; | ||
221 | return; | ||
222 | } | ||
223 | |||
224 | index_msb = get_count_order(smp_num_siblings); | ||
225 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | ||
226 | |||
227 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
228 | c->phys_proc_id); | ||
229 | |||
230 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
231 | |||
232 | index_msb = get_count_order(smp_num_siblings); | ||
233 | |||
234 | core_bits = get_count_order(c->x86_max_cores); | ||
235 | |||
236 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | ||
237 | ((1 << core_bits) - 1); | ||
238 | |||
239 | if (c->x86_max_cores > 1) | ||
240 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
241 | c->cpu_core_id); | ||
242 | } | ||
243 | } | ||
244 | #endif | ||
188 | 245 | ||
189 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | 246 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) |
190 | { | 247 | { |
@@ -258,7 +315,26 @@ static int __cpuinit have_cpuid_p(void) | |||
258 | return flag_is_changeable_p(X86_EFLAGS_ID); | 315 | return flag_is_changeable_p(X86_EFLAGS_ID); |
259 | } | 316 | } |
260 | 317 | ||
261 | void __init cpu_detect(struct cpuinfo_x86 *c) | 318 | static void __init early_cpu_support_print(void) |
319 | { | ||
320 | int i,j; | ||
321 | struct cpu_dev *cpu_devx; | ||
322 | |||
323 | printk("KERNEL supported cpus:\n"); | ||
324 | for (i = 0; i < X86_VENDOR_NUM; i++) { | ||
325 | cpu_devx = cpu_devs[i]; | ||
326 | if (!cpu_devx) | ||
327 | continue; | ||
328 | for (j = 0; j < 2; j++) { | ||
329 | if (!cpu_devx->c_ident[j]) | ||
330 | continue; | ||
331 | printk(" %s %s\n", cpu_devx->c_vendor, | ||
332 | cpu_devx->c_ident[j]); | ||
333 | } | ||
334 | } | ||
335 | } | ||
336 | |||
337 | void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | ||
262 | { | 338 | { |
263 | /* Get vendor name */ | 339 | /* Get vendor name */ |
264 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | 340 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
@@ -267,19 +343,20 @@ void __init cpu_detect(struct cpuinfo_x86 *c) | |||
267 | (unsigned int *)&c->x86_vendor_id[4]); | 343 | (unsigned int *)&c->x86_vendor_id[4]); |
268 | 344 | ||
269 | c->x86 = 4; | 345 | c->x86 = 4; |
346 | /* Intel-defined flags: level 0x00000001 */ | ||
270 | if (c->cpuid_level >= 0x00000001) { | 347 | if (c->cpuid_level >= 0x00000001) { |
271 | u32 junk, tfms, cap0, misc; | 348 | u32 junk, tfms, cap0, misc; |
272 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | 349 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
273 | c->x86 = (tfms >> 8) & 15; | 350 | c->x86 = (tfms >> 8) & 0xf; |
274 | c->x86_model = (tfms >> 4) & 15; | 351 | c->x86_model = (tfms >> 4) & 0xf; |
352 | c->x86_mask = tfms & 0xf; | ||
275 | if (c->x86 == 0xf) | 353 | if (c->x86 == 0xf) |
276 | c->x86 += (tfms >> 20) & 0xff; | 354 | c->x86 += (tfms >> 20) & 0xff; |
277 | if (c->x86 >= 0x6) | 355 | if (c->x86 >= 0x6) |
278 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 356 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
279 | c->x86_mask = tfms & 15; | ||
280 | if (cap0 & (1<<19)) { | 357 | if (cap0 & (1<<19)) { |
281 | c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; | ||
282 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 358 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
359 | c->x86_cache_alignment = c->x86_clflush_size; | ||
283 | } | 360 | } |
284 | } | 361 | } |
285 | } | 362 | } |
@@ -341,6 +418,17 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
341 | validate_pat_support(c); | 418 | validate_pat_support(c); |
342 | } | 419 | } |
343 | 420 | ||
421 | void __init early_cpu_init(void) | ||
422 | { | ||
423 | struct cpu_vendor_dev *cvdev; | ||
424 | |||
425 | for (cvdev = __x86cpuvendor_start; cvdev < __x86cpuvendor_end; cvdev++) | ||
426 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; | ||
427 | |||
428 | early_cpu_support_print(); | ||
429 | early_identify_cpu(&boot_cpu_data); | ||
430 | } | ||
431 | |||
344 | /* | 432 | /* |
345 | * The NOPL instruction is supposed to exist on all CPUs with | 433 | * The NOPL instruction is supposed to exist on all CPUs with |
346 | * family >= 6, unfortunately, that's not true in practice because | 434 | * family >= 6, unfortunately, that's not true in practice because |
@@ -500,7 +588,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
500 | */ | 588 | */ |
501 | if (c != &boot_cpu_data) { | 589 | if (c != &boot_cpu_data) { |
502 | /* AND the already accumulated flags with these */ | 590 | /* AND the already accumulated flags with these */ |
503 | for (i = 0 ; i < NCAPINTS ; i++) | 591 | for (i = 0; i < NCAPINTS; i++) |
504 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | 592 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
505 | } | 593 | } |
506 | 594 | ||
@@ -529,52 +617,6 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | |||
529 | mtrr_ap_init(); | 617 | mtrr_ap_init(); |
530 | } | 618 | } |
531 | 619 | ||
532 | #ifdef CONFIG_X86_HT | ||
533 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | ||
534 | { | ||
535 | u32 eax, ebx, ecx, edx; | ||
536 | int index_msb, core_bits; | ||
537 | |||
538 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
539 | |||
540 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) | ||
541 | return; | ||
542 | |||
543 | smp_num_siblings = (ebx & 0xff0000) >> 16; | ||
544 | |||
545 | if (smp_num_siblings == 1) { | ||
546 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | ||
547 | } else if (smp_num_siblings > 1) { | ||
548 | |||
549 | if (smp_num_siblings > NR_CPUS) { | ||
550 | printk(KERN_WARNING "CPU: Unsupported number of the " | ||
551 | "siblings %d", smp_num_siblings); | ||
552 | smp_num_siblings = 1; | ||
553 | return; | ||
554 | } | ||
555 | |||
556 | index_msb = get_count_order(smp_num_siblings); | ||
557 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | ||
558 | |||
559 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
560 | c->phys_proc_id); | ||
561 | |||
562 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
563 | |||
564 | index_msb = get_count_order(smp_num_siblings) ; | ||
565 | |||
566 | core_bits = get_count_order(c->x86_max_cores); | ||
567 | |||
568 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | ||
569 | ((1 << core_bits) - 1); | ||
570 | |||
571 | if (c->x86_max_cores > 1) | ||
572 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
573 | c->cpu_core_id); | ||
574 | } | ||
575 | } | ||
576 | #endif | ||
577 | |||
578 | static __init int setup_noclflush(char *arg) | 620 | static __init int setup_noclflush(char *arg) |
579 | { | 621 | { |
580 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); | 622 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); |
@@ -592,17 +634,17 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
592 | vendor = c->x86_vendor_id; | 634 | vendor = c->x86_vendor_id; |
593 | 635 | ||
594 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) | 636 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) |
595 | printk("%s ", vendor); | 637 | printk(KERN_CONT "%s ", vendor); |
596 | 638 | ||
597 | if (!c->x86_model_id[0]) | 639 | if (c->x86_model_id[0]) |
598 | printk("%d86", c->x86); | 640 | printk(KERN_CONT "%s", c->x86_model_id); |
599 | else | 641 | else |
600 | printk("%s", c->x86_model_id); | 642 | printk(KERN_CONT "%d86", c->x86); |
601 | 643 | ||
602 | if (c->x86_mask || c->cpuid_level >= 0) | 644 | if (c->x86_mask || c->cpuid_level >= 0) |
603 | printk(" stepping %02x\n", c->x86_mask); | 645 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); |
604 | else | 646 | else |
605 | printk("\n"); | 647 | printk(KERN_CONT "\n"); |
606 | } | 648 | } |
607 | 649 | ||
608 | static __init int setup_disablecpuid(char *arg) | 650 | static __init int setup_disablecpuid(char *arg) |
@@ -618,16 +660,6 @@ __setup("clearcpuid=", setup_disablecpuid); | |||
618 | 660 | ||
619 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | 661 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
620 | 662 | ||
621 | void __init early_cpu_init(void) | ||
622 | { | ||
623 | struct cpu_vendor_dev *cvdev; | ||
624 | |||
625 | for (cvdev = __x86cpuvendor_start; cvdev < __x86cpuvendor_end; cvdev++) | ||
626 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; | ||
627 | |||
628 | early_identify_cpu(&boot_cpu_data); | ||
629 | } | ||
630 | |||
631 | /* Make sure %fs is initialized properly in idle threads */ | 663 | /* Make sure %fs is initialized properly in idle threads */ |
632 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | 664 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) |
633 | { | 665 | { |
@@ -636,18 +668,6 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | |||
636 | return regs; | 668 | return regs; |
637 | } | 669 | } |
638 | 670 | ||
639 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
640 | * it's on the real one. */ | ||
641 | void switch_to_new_gdt(void) | ||
642 | { | ||
643 | struct desc_ptr gdt_descr; | ||
644 | |||
645 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
646 | gdt_descr.size = GDT_SIZE - 1; | ||
647 | load_gdt(&gdt_descr); | ||
648 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | ||
649 | } | ||
650 | |||
651 | /* | 671 | /* |
652 | * cpu_init() initializes state that is per-CPU. Some data is already | 672 | * cpu_init() initializes state that is per-CPU. Some data is already |
653 | * initialized (naturally) in the bootstrap process, such as the GDT | 673 | * initialized (naturally) in the bootstrap process, such as the GDT |
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c index 28719fe07941..522a5f2e405d 100644 --- a/arch/x86/kernel/cpu/common_64.c +++ b/arch/x86/kernel/cpu/common_64.c | |||
@@ -103,9 +103,8 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
103 | 103 | ||
104 | if (n >= 0x80000005) { | 104 | if (n >= 0x80000005) { |
105 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); | 105 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
106 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), " | 106 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", |
107 | "D cache %dK (%d bytes/line)\n", | 107 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); |
108 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | ||
109 | c->x86_cache_size = (ecx>>24) + (edx>>24); | 108 | c->x86_cache_size = (ecx>>24) + (edx>>24); |
110 | /* On K8 L1 TLB is inclusive, so don't count it */ | 109 | /* On K8 L1 TLB is inclusive, so don't count it */ |
111 | c->x86_tlbsize = 0; | 110 | c->x86_tlbsize = 0; |
@@ -143,8 +142,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
143 | } else if (smp_num_siblings > 1) { | 142 | } else if (smp_num_siblings > 1) { |
144 | 143 | ||
145 | if (smp_num_siblings > NR_CPUS) { | 144 | if (smp_num_siblings > NR_CPUS) { |
146 | printk(KERN_WARNING "CPU: Unsupported number of " | 145 | printk(KERN_WARNING "CPU: Unsupported number of siblings %d", |
147 | "siblings %d", smp_num_siblings); | 146 | smp_num_siblings); |
148 | smp_num_siblings = 1; | 147 | smp_num_siblings = 1; |
149 | return; | 148 | return; |
150 | } | 149 | } |
@@ -182,7 +181,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |||
182 | if (cpu_devs[i]) { | 181 | if (cpu_devs[i]) { |
183 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | 182 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
184 | (cpu_devs[i]->c_ident[1] && | 183 | (cpu_devs[i]->c_ident[1] && |
185 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | 184 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
186 | c->x86_vendor = i; | 185 | c->x86_vendor = i; |
187 | this_cpu = cpu_devs[i]; | 186 | this_cpu = cpu_devs[i]; |
188 | return; | 187 | return; |
@@ -217,39 +216,6 @@ static void __init early_cpu_support_print(void) | |||
217 | } | 216 | } |
218 | } | 217 | } |
219 | 218 | ||
220 | /* | ||
221 | * The NOPL instruction is supposed to exist on all CPUs with | ||
222 | * family >= 6, unfortunately, that's not true in practice because | ||
223 | * of early VIA chips and (more importantly) broken virtualizers that | ||
224 | * are not easy to detect. Hence, probe for it based on first | ||
225 | * principles. | ||
226 | * | ||
227 | * Note: no 64-bit chip is known to lack these, but put the code here | ||
228 | * for consistency with 32 bits, and to make it utterly trivial to | ||
229 | * diagnose the problem should it ever surface. | ||
230 | */ | ||
231 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | ||
232 | { | ||
233 | const u32 nopl_signature = 0x888c53b1; /* Random number */ | ||
234 | u32 has_nopl = nopl_signature; | ||
235 | |||
236 | clear_cpu_cap(c, X86_FEATURE_NOPL); | ||
237 | if (c->x86 >= 6) { | ||
238 | asm volatile("\n" | ||
239 | "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ | ||
240 | "2:\n" | ||
241 | " .section .fixup,\"ax\"\n" | ||
242 | "3: xor %0,%0\n" | ||
243 | " jmp 2b\n" | ||
244 | " .previous\n" | ||
245 | _ASM_EXTABLE(1b,3b) | ||
246 | : "+a" (has_nopl)); | ||
247 | |||
248 | if (has_nopl == nopl_signature) | ||
249 | set_cpu_cap(c, X86_FEATURE_NOPL); | ||
250 | } | ||
251 | } | ||
252 | |||
253 | void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | 219 | void __cpuinit cpu_detect(struct cpuinfo_x86 *c) |
254 | { | 220 | { |
255 | /* Get vendor name */ | 221 | /* Get vendor name */ |
@@ -258,6 +224,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |||
258 | (unsigned int *)&c->x86_vendor_id[8], | 224 | (unsigned int *)&c->x86_vendor_id[8], |
259 | (unsigned int *)&c->x86_vendor_id[4]); | 225 | (unsigned int *)&c->x86_vendor_id[4]); |
260 | 226 | ||
227 | c->x86 = 4; | ||
261 | /* Intel-defined flags: level 0x00000001 */ | 228 | /* Intel-defined flags: level 0x00000001 */ |
262 | if (c->cpuid_level >= 0x00000001) { | 229 | if (c->cpuid_level >= 0x00000001) { |
263 | u32 junk, tfms, cap0, misc; | 230 | u32 junk, tfms, cap0, misc; |
@@ -268,12 +235,11 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |||
268 | if (c->x86 == 0xf) | 235 | if (c->x86 == 0xf) |
269 | c->x86 += (tfms >> 20) & 0xff; | 236 | c->x86 += (tfms >> 20) & 0xff; |
270 | if (c->x86 >= 0x6) | 237 | if (c->x86 >= 0x6) |
271 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 238 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
272 | if (cap0 & (1<<19)) | 239 | if (cap0 & (1<<19)) { |
273 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 240 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
274 | } else { | 241 | c->x86_cache_alignment = c->x86_clflush_size; |
275 | /* Have CPUID level 0 only - unheard of */ | 242 | } |
276 | c->x86 = 4; | ||
277 | } | 243 | } |
278 | } | 244 | } |
279 | 245 | ||
@@ -283,9 +249,6 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
283 | u32 tfms, xlvl; | 249 | u32 tfms, xlvl; |
284 | u32 ebx; | 250 | u32 ebx; |
285 | 251 | ||
286 | /* Initialize the standard set of capabilities */ | ||
287 | /* Note that the vendor-specific code below might override */ | ||
288 | |||
289 | /* Intel-defined flags: level 0x00000001 */ | 252 | /* Intel-defined flags: level 0x00000001 */ |
290 | if (c->cpuid_level >= 0x00000001) { | 253 | if (c->cpuid_level >= 0x00000001) { |
291 | u32 capability, excap; | 254 | u32 capability, excap; |
@@ -361,6 +324,39 @@ void __init early_cpu_init(void) | |||
361 | early_identify_cpu(&boot_cpu_data); | 324 | early_identify_cpu(&boot_cpu_data); |
362 | } | 325 | } |
363 | 326 | ||
327 | /* | ||
328 | * The NOPL instruction is supposed to exist on all CPUs with | ||
329 | * family >= 6, unfortunately, that's not true in practice because | ||
330 | * of early VIA chips and (more importantly) broken virtualizers that | ||
331 | * are not easy to detect. Hence, probe for it based on first | ||
332 | * principles. | ||
333 | * | ||
334 | * Note: no 64-bit chip is known to lack these, but put the code here | ||
335 | * for consistency with 32 bits, and to make it utterly trivial to | ||
336 | * diagnose the problem should it ever surface. | ||
337 | */ | ||
338 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | ||
339 | { | ||
340 | const u32 nopl_signature = 0x888c53b1; /* Random number */ | ||
341 | u32 has_nopl = nopl_signature; | ||
342 | |||
343 | clear_cpu_cap(c, X86_FEATURE_NOPL); | ||
344 | if (c->x86 >= 6) { | ||
345 | asm volatile("\n" | ||
346 | "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ | ||
347 | "2:\n" | ||
348 | " .section .fixup,\"ax\"\n" | ||
349 | "3: xor %0,%0\n" | ||
350 | " jmp 2b\n" | ||
351 | " .previous\n" | ||
352 | _ASM_EXTABLE(1b,3b) | ||
353 | : "+a" (has_nopl)); | ||
354 | |||
355 | if (has_nopl == nopl_signature) | ||
356 | set_cpu_cap(c, X86_FEATURE_NOPL); | ||
357 | } | ||
358 | } | ||
359 | |||
364 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | 360 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
365 | { | 361 | { |
366 | c->extended_cpuid_level = 0; | 362 | c->extended_cpuid_level = 0; |
@@ -448,7 +444,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
448 | 444 | ||
449 | } | 445 | } |
450 | 446 | ||
451 | void __cpuinit identify_boot_cpu(void) | 447 | void __init identify_boot_cpu(void) |
452 | { | 448 | { |
453 | identify_cpu(&boot_cpu_data); | 449 | identify_cpu(&boot_cpu_data); |
454 | } | 450 | } |
@@ -460,13 +456,6 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | |||
460 | mtrr_ap_init(); | 456 | mtrr_ap_init(); |
461 | } | 457 | } |
462 | 458 | ||
463 | static __init int setup_noclflush(char *arg) | ||
464 | { | ||
465 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); | ||
466 | return 1; | ||
467 | } | ||
468 | __setup("noclflush", setup_noclflush); | ||
469 | |||
470 | struct msr_range { | 459 | struct msr_range { |
471 | unsigned min; | 460 | unsigned min; |
472 | unsigned max; | 461 | unsigned max; |
@@ -510,6 +499,13 @@ static __init int setup_show_msr(char *arg) | |||
510 | } | 499 | } |
511 | __setup("show_msr=", setup_show_msr); | 500 | __setup("show_msr=", setup_show_msr); |
512 | 501 | ||
502 | static __init int setup_noclflush(char *arg) | ||
503 | { | ||
504 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); | ||
505 | return 1; | ||
506 | } | ||
507 | __setup("noclflush", setup_noclflush); | ||
508 | |||
513 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | 509 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) |
514 | { | 510 | { |
515 | if (c->x86_model_id[0]) | 511 | if (c->x86_model_id[0]) |