diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-14 03:46:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-14 05:37:34 -0400 |
commit | 0f3fa48a7eaf5d1118cfda1650e8c759b2a116e4 (patch) | |
tree | a24e81ada634fcd2298d44f079208ebf18de5916 /arch/x86/kernel/cpu/common.c | |
parent | 9766cdbcb260389669e9679b2aa87c11832f479f (diff) |
x86: cpu/common.c more cleanups
Complete/fix the cleanups of cpu/common.c:
- fix ugly warning due to asm/topology.h -> linux/topology.h change
- standardize the style across the file
- simplify/refactor the code flow where possible
Cc: Jaswinder Singh Rajput <jaswinder@kernel.org>
LKML-Reference: <1237009789.4387.2.camel@localhost.localdomain>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 243 |
1 files changed, 143 insertions, 100 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index cad6878c88db..a9e3791ca098 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1,4 +1,3 @@ | |||
1 | #include <linux/topology.h> | ||
2 | #include <linux/bootmem.h> | 1 | #include <linux/bootmem.h> |
3 | #include <linux/linkage.h> | 2 | #include <linux/linkage.h> |
4 | #include <linux/bitops.h> | 3 | #include <linux/bitops.h> |
@@ -18,6 +17,7 @@ | |||
18 | #include <asm/hypervisor.h> | 17 | #include <asm/hypervisor.h> |
19 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
20 | #include <asm/sections.h> | 19 | #include <asm/sections.h> |
20 | #include <asm/topology.h> | ||
21 | #include <asm/cpumask.h> | 21 | #include <asm/cpumask.h> |
22 | #include <asm/pgtable.h> | 22 | #include <asm/pgtable.h> |
23 | #include <asm/atomic.h> | 23 | #include <asm/atomic.h> |
@@ -82,45 +82,45 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | |||
82 | * TLS descriptors are currently at a different place compared to i386. | 82 | * TLS descriptors are currently at a different place compared to i386. |
83 | * Hopefully nobody expects them at a fixed place (Wine?) | 83 | * Hopefully nobody expects them at a fixed place (Wine?) |
84 | */ | 84 | */ |
85 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, | 85 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, |
86 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, | 86 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, |
87 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, | 87 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, |
88 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, | 88 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, |
89 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, | 89 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, |
90 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, | 90 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, |
91 | #else | 91 | #else |
92 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, | 92 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, |
93 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 93 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, |
94 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, | 94 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, |
95 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, | 95 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, |
96 | /* | 96 | /* |
97 | * Segments used for calling PnP BIOS have byte granularity. | 97 | * Segments used for calling PnP BIOS have byte granularity. |
98 | * They code segments and data segments have fixed 64k limits, | 98 | * They code segments and data segments have fixed 64k limits, |
99 | * the transfer segment sizes are set at run time. | 99 | * the transfer segment sizes are set at run time. |
100 | */ | 100 | */ |
101 | /* 32-bit code */ | 101 | /* 32-bit code */ |
102 | [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, | 102 | [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, |
103 | /* 16-bit code */ | 103 | /* 16-bit code */ |
104 | [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, | 104 | [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, |
105 | /* 16-bit data */ | 105 | /* 16-bit data */ |
106 | [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, | 106 | [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, |
107 | /* 16-bit data */ | 107 | /* 16-bit data */ |
108 | [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, | 108 | [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, |
109 | /* 16-bit data */ | 109 | /* 16-bit data */ |
110 | [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, | 110 | [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, |
111 | /* | 111 | /* |
112 | * The APM segments have byte granularity and their bases | 112 | * The APM segments have byte granularity and their bases |
113 | * are set at run time. All have 64k limits. | 113 | * are set at run time. All have 64k limits. |
114 | */ | 114 | */ |
115 | /* 32-bit code */ | 115 | /* 32-bit code */ |
116 | [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, | 116 | [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, |
117 | /* 16-bit code */ | 117 | /* 16-bit code */ |
118 | [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, | 118 | [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, |
119 | /* data */ | 119 | /* data */ |
120 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, | 120 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, |
121 | 121 | ||
122 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, | 122 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, |
123 | [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, | 123 | [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, |
124 | GDT_STACK_CANARY_INIT | 124 | GDT_STACK_CANARY_INIT |
125 | #endif | 125 | #endif |
126 | } }; | 126 | } }; |
@@ -164,16 +164,17 @@ static inline int flag_is_changeable_p(u32 flag) | |||
164 | * the CPUID. Add "volatile" to not allow gcc to | 164 | * the CPUID. Add "volatile" to not allow gcc to |
165 | * optimize the subsequent calls to this function. | 165 | * optimize the subsequent calls to this function. |
166 | */ | 166 | */ |
167 | asm volatile ("pushfl\n\t" | 167 | asm volatile ("pushfl \n\t" |
168 | "pushfl\n\t" | 168 | "pushfl \n\t" |
169 | "popl %0\n\t" | 169 | "popl %0 \n\t" |
170 | "movl %0,%1\n\t" | 170 | "movl %0, %1 \n\t" |
171 | "xorl %2,%0\n\t" | 171 | "xorl %2, %0 \n\t" |
172 | "pushl %0\n\t" | 172 | "pushl %0 \n\t" |
173 | "popfl\n\t" | 173 | "popfl \n\t" |
174 | "pushfl\n\t" | 174 | "pushfl \n\t" |
175 | "popl %0\n\t" | 175 | "popl %0 \n\t" |
176 | "popfl\n\t" | 176 | "popfl \n\t" |
177 | |||
177 | : "=&r" (f1), "=&r" (f2) | 178 | : "=&r" (f1), "=&r" (f2) |
178 | : "ir" (flag)); | 179 | : "ir" (flag)); |
179 | 180 | ||
@@ -188,18 +189,22 @@ static int __cpuinit have_cpuid_p(void) | |||
188 | 189 | ||
189 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 190 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
190 | { | 191 | { |
191 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { | 192 | unsigned long lo, hi; |
192 | /* Disable processor serial number */ | 193 | |
193 | unsigned long lo, hi; | 194 | if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) |
194 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | 195 | return; |
195 | lo |= 0x200000; | 196 | |
196 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | 197 | /* Disable processor serial number: */ |
197 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | 198 | |
198 | clear_cpu_cap(c, X86_FEATURE_PN); | 199 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
199 | 200 | lo |= 0x200000; | |
200 | /* Disabling the serial number may affect the cpuid level */ | 201 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
201 | c->cpuid_level = cpuid_eax(0); | 202 | |
202 | } | 203 | printk(KERN_NOTICE "CPU serial number disabled.\n"); |
204 | clear_cpu_cap(c, X86_FEATURE_PN); | ||
205 | |||
206 | /* Disabling the serial number may affect the cpuid level */ | ||
207 | c->cpuid_level = cpuid_eax(0); | ||
203 | } | 208 | } |
204 | 209 | ||
205 | static int __init x86_serial_nr_setup(char *s) | 210 | static int __init x86_serial_nr_setup(char *s) |
@@ -232,6 +237,7 @@ struct cpuid_dependent_feature { | |||
232 | u32 feature; | 237 | u32 feature; |
233 | u32 level; | 238 | u32 level; |
234 | }; | 239 | }; |
240 | |||
235 | static const struct cpuid_dependent_feature __cpuinitconst | 241 | static const struct cpuid_dependent_feature __cpuinitconst |
236 | cpuid_dependent_features[] = { | 242 | cpuid_dependent_features[] = { |
237 | { X86_FEATURE_MWAIT, 0x00000005 }, | 243 | { X86_FEATURE_MWAIT, 0x00000005 }, |
@@ -245,6 +251,9 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | |||
245 | const struct cpuid_dependent_feature *df; | 251 | const struct cpuid_dependent_feature *df; |
246 | 252 | ||
247 | for (df = cpuid_dependent_features; df->feature; df++) { | 253 | for (df = cpuid_dependent_features; df->feature; df++) { |
254 | |||
255 | if (!cpu_has(c, df->feature)) | ||
256 | continue; | ||
248 | /* | 257 | /* |
249 | * Note: cpuid_level is set to -1 if unavailable, but | 258 | * Note: cpuid_level is set to -1 if unavailable, but |
250 | * extended_extended_level is set to 0 if unavailable | 259 | * extended_extended_level is set to 0 if unavailable |
@@ -252,26 +261,26 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | |||
252 | * when signed; hence the weird messing around with | 261 | * when signed; hence the weird messing around with |
253 | * signs here... | 262 | * signs here... |
254 | */ | 263 | */ |
255 | if (cpu_has(c, df->feature) && | 264 | if (!((s32)df->level < 0 ? |
256 | ((s32)df->level < 0 ? | ||
257 | (u32)df->level > (u32)c->extended_cpuid_level : | 265 | (u32)df->level > (u32)c->extended_cpuid_level : |
258 | (s32)df->level > (s32)c->cpuid_level)) { | 266 | (s32)df->level > (s32)c->cpuid_level)) |
259 | clear_cpu_cap(c, df->feature); | 267 | continue; |
260 | if (warn) | 268 | |
261 | printk(KERN_WARNING | 269 | clear_cpu_cap(c, df->feature); |
262 | "CPU: CPU feature %s disabled " | 270 | if (!warn) |
263 | "due to lack of CPUID level 0x%x\n", | 271 | continue; |
264 | x86_cap_flags[df->feature], | 272 | |
265 | df->level); | 273 | printk(KERN_WARNING |
266 | } | 274 | "CPU: CPU feature %s disabled, no CPUID level 0x%x\n", |
275 | x86_cap_flags[df->feature], df->level); | ||
267 | } | 276 | } |
268 | } | 277 | } |
269 | 278 | ||
270 | /* | 279 | /* |
271 | * Naming convention should be: <Name> [(<Codename>)] | 280 | * Naming convention should be: <Name> [(<Codename>)] |
272 | * This table only is used unless init_<vendor>() below doesn't set it; | 281 | * This table only is used unless init_<vendor>() below doesn't set it; |
273 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | 282 | * in particular, if CPUID levels 0x80000002..4 are supported, this |
274 | * | 283 | * isn't used |
275 | */ | 284 | */ |
276 | 285 | ||
277 | /* Look up CPU names by table lookup. */ | 286 | /* Look up CPU names by table lookup. */ |
@@ -308,8 +317,10 @@ void load_percpu_segment(int cpu) | |||
308 | load_stack_canary_segment(); | 317 | load_stack_canary_segment(); |
309 | } | 318 | } |
310 | 319 | ||
311 | /* Current gdt points %fs at the "master" per-cpu area: after this, | 320 | /* |
312 | * it's on the real one. */ | 321 | * Current gdt points %fs at the "master" per-cpu area: after this, |
322 | * it's on the real one. | ||
323 | */ | ||
313 | void switch_to_new_gdt(int cpu) | 324 | void switch_to_new_gdt(int cpu) |
314 | { | 325 | { |
315 | struct desc_ptr gdt_descr; | 326 | struct desc_ptr gdt_descr; |
@@ -355,14 +366,16 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
355 | if (c->extended_cpuid_level < 0x80000004) | 366 | if (c->extended_cpuid_level < 0x80000004) |
356 | return; | 367 | return; |
357 | 368 | ||
358 | v = (unsigned int *) c->x86_model_id; | 369 | v = (unsigned int *)c->x86_model_id; |
359 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | 370 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
360 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | 371 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); |
361 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | 372 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); |
362 | c->x86_model_id[48] = 0; | 373 | c->x86_model_id[48] = 0; |
363 | 374 | ||
364 | /* Intel chips right-justify this string for some dumb reason; | 375 | /* |
365 | undo that brain damage */ | 376 | * Intel chips right-justify this string for some dumb reason; |
377 | * undo that brain damage: | ||
378 | */ | ||
366 | p = q = &c->x86_model_id[0]; | 379 | p = q = &c->x86_model_id[0]; |
367 | while (*p == ' ') | 380 | while (*p == ' ') |
368 | p++; | 381 | p++; |
@@ -439,28 +452,30 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
439 | 452 | ||
440 | if (smp_num_siblings == 1) { | 453 | if (smp_num_siblings == 1) { |
441 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 454 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); |
442 | } else if (smp_num_siblings > 1) { | 455 | goto out; |
456 | } | ||
443 | 457 | ||
444 | if (smp_num_siblings > nr_cpu_ids) { | 458 | if (smp_num_siblings <= 1) |
445 | pr_warning("CPU: Unsupported number of siblings %d", | 459 | goto out; |
446 | smp_num_siblings); | ||
447 | smp_num_siblings = 1; | ||
448 | return; | ||
449 | } | ||
450 | 460 | ||
451 | index_msb = get_count_order(smp_num_siblings); | 461 | if (smp_num_siblings > nr_cpu_ids) { |
452 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, | 462 | pr_warning("CPU: Unsupported number of siblings %d", |
453 | index_msb); | 463 | smp_num_siblings); |
464 | smp_num_siblings = 1; | ||
465 | return; | ||
466 | } | ||
454 | 467 | ||
455 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | 468 | index_msb = get_count_order(smp_num_siblings); |
469 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); | ||
456 | 470 | ||
457 | index_msb = get_count_order(smp_num_siblings); | 471 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
458 | 472 | ||
459 | core_bits = get_count_order(c->x86_max_cores); | 473 | index_msb = get_count_order(smp_num_siblings); |
460 | 474 | ||
461 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & | 475 | core_bits = get_count_order(c->x86_max_cores); |
462 | ((1 << core_bits) - 1); | 476 | |
463 | } | 477 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & |
478 | ((1 << core_bits) - 1); | ||
464 | 479 | ||
465 | out: | 480 | out: |
466 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | 481 | if ((c->x86_max_cores * smp_num_siblings) > 1) { |
@@ -475,8 +490,8 @@ out: | |||
475 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | 490 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) |
476 | { | 491 | { |
477 | char *v = c->x86_vendor_id; | 492 | char *v = c->x86_vendor_id; |
478 | int i; | ||
479 | static int printed; | 493 | static int printed; |
494 | int i; | ||
480 | 495 | ||
481 | for (i = 0; i < X86_VENDOR_NUM; i++) { | 496 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
482 | if (!cpu_devs[i]) | 497 | if (!cpu_devs[i]) |
@@ -485,6 +500,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |||
485 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | 500 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
486 | (cpu_devs[i]->c_ident[1] && | 501 | (cpu_devs[i]->c_ident[1] && |
487 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | 502 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
503 | |||
488 | this_cpu = cpu_devs[i]; | 504 | this_cpu = cpu_devs[i]; |
489 | c->x86_vendor = this_cpu->c_x86_vendor; | 505 | c->x86_vendor = this_cpu->c_x86_vendor; |
490 | return; | 506 | return; |
@@ -493,8 +509,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |||
493 | 509 | ||
494 | if (!printed) { | 510 | if (!printed) { |
495 | printed++; | 511 | printed++; |
496 | printk(KERN_ERR "CPU: vendor_id '%s'" | 512 | printk(KERN_ERR |
497 | "unknown, using generic init.\n", v); | 513 | "CPU: vendor_id '%s' unknown, using generic init.\n", v); |
514 | |||
498 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | 515 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); |
499 | } | 516 | } |
500 | 517 | ||
@@ -514,14 +531,17 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |||
514 | /* Intel-defined flags: level 0x00000001 */ | 531 | /* Intel-defined flags: level 0x00000001 */ |
515 | if (c->cpuid_level >= 0x00000001) { | 532 | if (c->cpuid_level >= 0x00000001) { |
516 | u32 junk, tfms, cap0, misc; | 533 | u32 junk, tfms, cap0, misc; |
534 | |||
517 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | 535 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
518 | c->x86 = (tfms >> 8) & 0xf; | 536 | c->x86 = (tfms >> 8) & 0xf; |
519 | c->x86_model = (tfms >> 4) & 0xf; | 537 | c->x86_model = (tfms >> 4) & 0xf; |
520 | c->x86_mask = tfms & 0xf; | 538 | c->x86_mask = tfms & 0xf; |
539 | |||
521 | if (c->x86 == 0xf) | 540 | if (c->x86 == 0xf) |
522 | c->x86 += (tfms >> 20) & 0xff; | 541 | c->x86 += (tfms >> 20) & 0xff; |
523 | if (c->x86 >= 0x6) | 542 | if (c->x86 >= 0x6) |
524 | c->x86_model += ((tfms >> 16) & 0xf) << 4; | 543 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
544 | |||
525 | if (cap0 & (1<<19)) { | 545 | if (cap0 & (1<<19)) { |
526 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 546 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
527 | c->x86_cache_alignment = c->x86_clflush_size; | 547 | c->x86_cache_alignment = c->x86_clflush_size; |
@@ -537,6 +557,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
537 | /* Intel-defined flags: level 0x00000001 */ | 557 | /* Intel-defined flags: level 0x00000001 */ |
538 | if (c->cpuid_level >= 0x00000001) { | 558 | if (c->cpuid_level >= 0x00000001) { |
539 | u32 capability, excap; | 559 | u32 capability, excap; |
560 | |||
540 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | 561 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
541 | c->x86_capability[0] = capability; | 562 | c->x86_capability[0] = capability; |
542 | c->x86_capability[4] = excap; | 563 | c->x86_capability[4] = excap; |
@@ -545,6 +566,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
545 | /* AMD-defined flags: level 0x80000001 */ | 566 | /* AMD-defined flags: level 0x80000001 */ |
546 | xlvl = cpuid_eax(0x80000000); | 567 | xlvl = cpuid_eax(0x80000000); |
547 | c->extended_cpuid_level = xlvl; | 568 | c->extended_cpuid_level = xlvl; |
569 | |||
548 | if ((xlvl & 0xffff0000) == 0x80000000) { | 570 | if ((xlvl & 0xffff0000) == 0x80000000) { |
549 | if (xlvl >= 0x80000001) { | 571 | if (xlvl >= 0x80000001) { |
550 | c->x86_capability[1] = cpuid_edx(0x80000001); | 572 | c->x86_capability[1] = cpuid_edx(0x80000001); |
@@ -762,8 +784,8 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
762 | squash_the_stupid_serial_number(c); | 784 | squash_the_stupid_serial_number(c); |
763 | 785 | ||
764 | /* | 786 | /* |
765 | * The vendor-specific functions might have changed features. Now | 787 | * The vendor-specific functions might have changed features. |
766 | * we do "generic changes." | 788 | * Now we do "generic changes." |
767 | */ | 789 | */ |
768 | 790 | ||
769 | /* Filter out anything that depends on CPUID levels we don't have */ | 791 | /* Filter out anything that depends on CPUID levels we don't have */ |
@@ -846,8 +868,8 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | |||
846 | } | 868 | } |
847 | 869 | ||
848 | struct msr_range { | 870 | struct msr_range { |
849 | unsigned min; | 871 | unsigned min; |
850 | unsigned max; | 872 | unsigned max; |
851 | }; | 873 | }; |
852 | 874 | ||
853 | static struct msr_range msr_range_array[] __cpuinitdata = { | 875 | static struct msr_range msr_range_array[] __cpuinitdata = { |
@@ -859,14 +881,15 @@ static struct msr_range msr_range_array[] __cpuinitdata = { | |||
859 | 881 | ||
860 | static void __cpuinit print_cpu_msr(void) | 882 | static void __cpuinit print_cpu_msr(void) |
861 | { | 883 | { |
884 | unsigned index_min, index_max; | ||
862 | unsigned index; | 885 | unsigned index; |
863 | u64 val; | 886 | u64 val; |
864 | int i; | 887 | int i; |
865 | unsigned index_min, index_max; | ||
866 | 888 | ||
867 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { | 889 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { |
868 | index_min = msr_range_array[i].min; | 890 | index_min = msr_range_array[i].min; |
869 | index_max = msr_range_array[i].max; | 891 | index_max = msr_range_array[i].max; |
892 | |||
870 | for (index = index_min; index < index_max; index++) { | 893 | for (index = index_min; index < index_max; index++) { |
871 | if (rdmsrl_amd_safe(index, &val)) | 894 | if (rdmsrl_amd_safe(index, &val)) |
872 | continue; | 895 | continue; |
@@ -876,6 +899,7 @@ static void __cpuinit print_cpu_msr(void) | |||
876 | } | 899 | } |
877 | 900 | ||
878 | static int show_msr __cpuinitdata; | 901 | static int show_msr __cpuinitdata; |
902 | |||
879 | static __init int setup_show_msr(char *arg) | 903 | static __init int setup_show_msr(char *arg) |
880 | { | 904 | { |
881 | int num; | 905 | int num; |
@@ -899,10 +923,12 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
899 | { | 923 | { |
900 | char *vendor = NULL; | 924 | char *vendor = NULL; |
901 | 925 | ||
902 | if (c->x86_vendor < X86_VENDOR_NUM) | 926 | if (c->x86_vendor < X86_VENDOR_NUM) { |
903 | vendor = this_cpu->c_vendor; | 927 | vendor = this_cpu->c_vendor; |
904 | else if (c->cpuid_level >= 0) | 928 | } else { |
905 | vendor = c->x86_vendor_id; | 929 | if (c->cpuid_level >= 0) |
930 | vendor = c->x86_vendor_id; | ||
931 | } | ||
906 | 932 | ||
907 | if (vendor && !strstr(c->x86_model_id, vendor)) | 933 | if (vendor && !strstr(c->x86_model_id, vendor)) |
908 | printk(KERN_CONT "%s ", vendor); | 934 | printk(KERN_CONT "%s ", vendor); |
@@ -929,10 +955,12 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
929 | static __init int setup_disablecpuid(char *arg) | 955 | static __init int setup_disablecpuid(char *arg) |
930 | { | 956 | { |
931 | int bit; | 957 | int bit; |
958 | |||
932 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) | 959 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) |
933 | setup_clear_cpu_cap(bit); | 960 | setup_clear_cpu_cap(bit); |
934 | else | 961 | else |
935 | return 0; | 962 | return 0; |
963 | |||
936 | return 1; | 964 | return 1; |
937 | } | 965 | } |
938 | __setup("clearcpuid=", setup_disablecpuid); | 966 | __setup("clearcpuid=", setup_disablecpuid); |
@@ -942,6 +970,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | |||
942 | 970 | ||
943 | DEFINE_PER_CPU_FIRST(union irq_stack_union, | 971 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
944 | irq_stack_union) __aligned(PAGE_SIZE); | 972 | irq_stack_union) __aligned(PAGE_SIZE); |
973 | |||
945 | DEFINE_PER_CPU(char *, irq_stack_ptr) = | 974 | DEFINE_PER_CPU(char *, irq_stack_ptr) = |
946 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | 975 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; |
947 | 976 | ||
@@ -951,6 +980,17 @@ EXPORT_PER_CPU_SYMBOL(kernel_stack); | |||
951 | 980 | ||
952 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; | 981 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; |
953 | 982 | ||
983 | /* | ||
984 | * Special IST stacks which the CPU switches to when it calls | ||
985 | * an IST-marked descriptor entry. Up to 7 stacks (hardware | ||
986 | * limit), all of them are 4K, except the debug stack which | ||
987 | * is 8K. | ||
988 | */ | ||
989 | static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { | ||
990 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, | ||
991 | [DEBUG_STACK - 1] = DEBUG_STKSZ | ||
992 | }; | ||
993 | |||
954 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks | 994 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks |
955 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) | 995 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) |
956 | __aligned(PAGE_SIZE); | 996 | __aligned(PAGE_SIZE); |
@@ -984,7 +1024,7 @@ unsigned long kernel_eflags; | |||
984 | */ | 1024 | */ |
985 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | 1025 | DEFINE_PER_CPU(struct orig_ist, orig_ist); |
986 | 1026 | ||
987 | #else /* x86_64 */ | 1027 | #else /* CONFIG_X86_64 */ |
988 | 1028 | ||
989 | #ifdef CONFIG_CC_STACKPROTECTOR | 1029 | #ifdef CONFIG_CC_STACKPROTECTOR |
990 | DEFINE_PER_CPU(unsigned long, stack_canary); | 1030 | DEFINE_PER_CPU(unsigned long, stack_canary); |
@@ -996,9 +1036,10 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | |||
996 | memset(regs, 0, sizeof(struct pt_regs)); | 1036 | memset(regs, 0, sizeof(struct pt_regs)); |
997 | regs->fs = __KERNEL_PERCPU; | 1037 | regs->fs = __KERNEL_PERCPU; |
998 | regs->gs = __KERNEL_STACK_CANARY; | 1038 | regs->gs = __KERNEL_STACK_CANARY; |
1039 | |||
999 | return regs; | 1040 | return regs; |
1000 | } | 1041 | } |
1001 | #endif /* x86_64 */ | 1042 | #endif /* CONFIG_X86_64 */ |
1002 | 1043 | ||
1003 | /* | 1044 | /* |
1004 | * Clear all 6 debug registers: | 1045 | * Clear all 6 debug registers: |
@@ -1024,15 +1065,20 @@ static void clear_all_debug_regs(void) | |||
1024 | * A lot of state is already set up in PDA init for 64 bit | 1065 | * A lot of state is already set up in PDA init for 64 bit |
1025 | */ | 1066 | */ |
1026 | #ifdef CONFIG_X86_64 | 1067 | #ifdef CONFIG_X86_64 |
1068 | |||
1027 | void __cpuinit cpu_init(void) | 1069 | void __cpuinit cpu_init(void) |
1028 | { | 1070 | { |
1029 | int cpu = stack_smp_processor_id(); | 1071 | struct orig_ist *orig_ist; |
1030 | struct tss_struct *t = &per_cpu(init_tss, cpu); | ||
1031 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); | ||
1032 | unsigned long v; | ||
1033 | struct task_struct *me; | 1072 | struct task_struct *me; |
1073 | struct tss_struct *t; | ||
1074 | unsigned long v; | ||
1075 | int cpu; | ||
1034 | int i; | 1076 | int i; |
1035 | 1077 | ||
1078 | cpu = stack_smp_processor_id(); | ||
1079 | t = &per_cpu(init_tss, cpu); | ||
1080 | orig_ist = &per_cpu(orig_ist, cpu); | ||
1081 | |||
1036 | #ifdef CONFIG_NUMA | 1082 | #ifdef CONFIG_NUMA |
1037 | if (cpu != 0 && percpu_read(node_number) == 0 && | 1083 | if (cpu != 0 && percpu_read(node_number) == 0 && |
1038 | cpu_to_node(cpu) != NUMA_NO_NODE) | 1084 | cpu_to_node(cpu) != NUMA_NO_NODE) |
@@ -1073,19 +1119,17 @@ void __cpuinit cpu_init(void) | |||
1073 | * set up and load the per-CPU TSS | 1119 | * set up and load the per-CPU TSS |
1074 | */ | 1120 | */ |
1075 | if (!orig_ist->ist[0]) { | 1121 | if (!orig_ist->ist[0]) { |
1076 | static const unsigned int sizes[N_EXCEPTION_STACKS] = { | ||
1077 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, | ||
1078 | [DEBUG_STACK - 1] = DEBUG_STKSZ | ||
1079 | }; | ||
1080 | char *estacks = per_cpu(exception_stacks, cpu); | 1122 | char *estacks = per_cpu(exception_stacks, cpu); |
1123 | |||
1081 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 1124 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
1082 | estacks += sizes[v]; | 1125 | estacks += exception_stack_sizes[v]; |
1083 | orig_ist->ist[v] = t->x86_tss.ist[v] = | 1126 | orig_ist->ist[v] = t->x86_tss.ist[v] = |
1084 | (unsigned long)estacks; | 1127 | (unsigned long)estacks; |
1085 | } | 1128 | } |
1086 | } | 1129 | } |
1087 | 1130 | ||
1088 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | 1131 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
1132 | |||
1089 | /* | 1133 | /* |
1090 | * <= is required because the CPU will access up to | 1134 | * <= is required because the CPU will access up to |
1091 | * 8 bits beyond the end of the IO permission bitmap. | 1135 | * 8 bits beyond the end of the IO permission bitmap. |
@@ -1187,5 +1231,4 @@ void __cpuinit cpu_init(void) | |||
1187 | 1231 | ||
1188 | xsave_init(); | 1232 | xsave_init(); |
1189 | } | 1233 | } |
1190 | |||
1191 | #endif | 1234 | #endif |