aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r--arch/x86/kernel/cpu/common.c392
1 files changed, 222 insertions, 170 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d7dd3c294e2a..c4f667896c28 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1,50 +1,50 @@
1#include <linux/init.h>
2#include <linux/kernel.h>
3#include <linux/sched.h>
4#include <linux/string.h>
5#include <linux/bootmem.h> 1#include <linux/bootmem.h>
2#include <linux/linkage.h>
6#include <linux/bitops.h> 3#include <linux/bitops.h>
4#include <linux/kernel.h>
7#include <linux/module.h> 5#include <linux/module.h>
8#include <linux/kgdb.h> 6#include <linux/percpu.h>
9#include <linux/topology.h> 7#include <linux/string.h>
10#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/sched.h>
10#include <linux/init.h>
11#include <linux/kgdb.h>
11#include <linux/smp.h> 12#include <linux/smp.h>
12#include <linux/percpu.h> 13#include <linux/io.h>
13#include <asm/i387.h> 14
14#include <asm/msr.h> 15#include <asm/stackprotector.h>
15#include <asm/io.h>
16#include <asm/linkage.h>
17#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
17#include <asm/hypervisor.h>
18#include <asm/processor.h>
19#include <asm/sections.h>
20#include <asm/topology.h>
21#include <asm/cpumask.h>
22#include <asm/pgtable.h>
23#include <asm/atomic.h>
24#include <asm/proto.h>
25#include <asm/setup.h>
26#include <asm/apic.h>
27#include <asm/desc.h>
28#include <asm/i387.h>
18#include <asm/mtrr.h> 29#include <asm/mtrr.h>
30#include <asm/numa.h>
31#include <asm/asm.h>
32#include <asm/cpu.h>
19#include <asm/mce.h> 33#include <asm/mce.h>
34#include <asm/msr.h>
20#include <asm/pat.h> 35#include <asm/pat.h>
21#include <asm/asm.h>
22#include <asm/numa.h>
23#include <asm/smp.h> 36#include <asm/smp.h>
24#include <asm/cpu.h>
25#include <asm/cpumask.h>
26#include <asm/apic.h>
27 37
28#ifdef CONFIG_X86_LOCAL_APIC 38#ifdef CONFIG_X86_LOCAL_APIC
29#include <asm/uv/uv.h> 39#include <asm/uv/uv.h>
30#endif 40#endif
31 41
32#include <asm/pgtable.h>
33#include <asm/processor.h>
34#include <asm/desc.h>
35#include <asm/atomic.h>
36#include <asm/proto.h>
37#include <asm/sections.h>
38#include <asm/setup.h>
39#include <asm/hypervisor.h>
40#include <asm/stackprotector.h>
41
42#include "cpu.h" 42#include "cpu.h"
43 43
44/* all of these masks are initialized in setup_cpu_local_masks() */ 44/* all of these masks are initialized in setup_cpu_local_masks() */
45cpumask_var_t cpu_callin_mask;
46cpumask_var_t cpu_callout_mask;
47cpumask_var_t cpu_initialized_mask; 45cpumask_var_t cpu_initialized_mask;
46cpumask_var_t cpu_callout_mask;
47cpumask_var_t cpu_callin_mask;
48 48
49/* representing cpus for which sibling maps can be computed */ 49/* representing cpus for which sibling maps can be computed */
50cpumask_var_t cpu_sibling_setup_mask; 50cpumask_var_t cpu_sibling_setup_mask;
@@ -58,7 +58,7 @@ void __init setup_cpu_local_masks(void)
58 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 58 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
59} 59}
60 60
61static struct cpu_dev *this_cpu __cpuinitdata; 61static const struct cpu_dev *this_cpu __cpuinitdata;
62 62
63DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 63DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
64#ifdef CONFIG_X86_64 64#ifdef CONFIG_X86_64
@@ -67,48 +67,48 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
67 * IRET will check the segment types kkeil 2000/10/28 67 * IRET will check the segment types kkeil 2000/10/28
68 * Also sysret mandates a special GDT layout 68 * Also sysret mandates a special GDT layout
69 * 69 *
70 * The TLS descriptors are currently at a different place compared to i386. 70 * TLS descriptors are currently at a different place compared to i386.
71 * Hopefully nobody expects them at a fixed place (Wine?) 71 * Hopefully nobody expects them at a fixed place (Wine?)
72 */ 72 */
73 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, 73 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
74 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, 74 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
75 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, 75 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
76 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, 76 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
77 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, 77 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
78 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, 78 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
79#else 79#else
80 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, 80 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
81 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, 81 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
82 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, 82 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
83 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, 83 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
84 /* 84 /*
85 * Segments used for calling PnP BIOS have byte granularity. 85 * Segments used for calling PnP BIOS have byte granularity.
86 * They code segments and data segments have fixed 64k limits, 86 * They code segments and data segments have fixed 64k limits,
87 * the transfer segment sizes are set at run time. 87 * the transfer segment sizes are set at run time.
88 */ 88 */
89 /* 32-bit code */ 89 /* 32-bit code */
90 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, 90 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
91 /* 16-bit code */ 91 /* 16-bit code */
92 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, 92 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
93 /* 16-bit data */ 93 /* 16-bit data */
94 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, 94 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
95 /* 16-bit data */ 95 /* 16-bit data */
96 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, 96 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
97 /* 16-bit data */ 97 /* 16-bit data */
98 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, 98 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
99 /* 99 /*
100 * The APM segments have byte granularity and their bases 100 * The APM segments have byte granularity and their bases
101 * are set at run time. All have 64k limits. 101 * are set at run time. All have 64k limits.
102 */ 102 */
103 /* 32-bit code */ 103 /* 32-bit code */
104 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, 104 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
105 /* 16-bit code */ 105 /* 16-bit code */
106 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, 106 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
107 /* data */ 107 /* data */
108 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, 108 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
109 109
110 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, 110 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
111 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, 111 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
112 GDT_STACK_CANARY_INIT 112 GDT_STACK_CANARY_INIT
113#endif 113#endif
114} }; 114} };
@@ -152,16 +152,17 @@ static inline int flag_is_changeable_p(u32 flag)
152 * the CPUID. Add "volatile" to not allow gcc to 152 * the CPUID. Add "volatile" to not allow gcc to
153 * optimize the subsequent calls to this function. 153 * optimize the subsequent calls to this function.
154 */ 154 */
155 asm volatile ("pushfl\n\t" 155 asm volatile ("pushfl \n\t"
156 "pushfl\n\t" 156 "pushfl \n\t"
157 "popl %0\n\t" 157 "popl %0 \n\t"
158 "movl %0,%1\n\t" 158 "movl %0, %1 \n\t"
159 "xorl %2,%0\n\t" 159 "xorl %2, %0 \n\t"
160 "pushl %0\n\t" 160 "pushl %0 \n\t"
161 "popfl\n\t" 161 "popfl \n\t"
162 "pushfl\n\t" 162 "pushfl \n\t"
163 "popl %0\n\t" 163 "popl %0 \n\t"
164 "popfl\n\t" 164 "popfl \n\t"
165
165 : "=&r" (f1), "=&r" (f2) 166 : "=&r" (f1), "=&r" (f2)
166 : "ir" (flag)); 167 : "ir" (flag));
167 168
@@ -176,18 +177,22 @@ static int __cpuinit have_cpuid_p(void)
176 177
177static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 178static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
178{ 179{
179 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { 180 unsigned long lo, hi;
180 /* Disable processor serial number */ 181
181 unsigned long lo, hi; 182 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
182 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 183 return;
183 lo |= 0x200000; 184
184 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 185 /* Disable processor serial number: */
185 printk(KERN_NOTICE "CPU serial number disabled.\n"); 186
186 clear_cpu_cap(c, X86_FEATURE_PN); 187 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
187 188 lo |= 0x200000;
188 /* Disabling the serial number may affect the cpuid level */ 189 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
189 c->cpuid_level = cpuid_eax(0); 190
190 } 191 printk(KERN_NOTICE "CPU serial number disabled.\n");
192 clear_cpu_cap(c, X86_FEATURE_PN);
193
194 /* Disabling the serial number may affect the cpuid level */
195 c->cpuid_level = cpuid_eax(0);
191} 196}
192 197
193static int __init x86_serial_nr_setup(char *s) 198static int __init x86_serial_nr_setup(char *s)
@@ -220,6 +225,7 @@ struct cpuid_dependent_feature {
220 u32 feature; 225 u32 feature;
221 u32 level; 226 u32 level;
222}; 227};
228
223static const struct cpuid_dependent_feature __cpuinitconst 229static const struct cpuid_dependent_feature __cpuinitconst
224cpuid_dependent_features[] = { 230cpuid_dependent_features[] = {
225 { X86_FEATURE_MWAIT, 0x00000005 }, 231 { X86_FEATURE_MWAIT, 0x00000005 },
@@ -231,7 +237,11 @@ cpuid_dependent_features[] = {
231static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 237static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
232{ 238{
233 const struct cpuid_dependent_feature *df; 239 const struct cpuid_dependent_feature *df;
240
234 for (df = cpuid_dependent_features; df->feature; df++) { 241 for (df = cpuid_dependent_features; df->feature; df++) {
242
243 if (!cpu_has(c, df->feature))
244 continue;
235 /* 245 /*
236 * Note: cpuid_level is set to -1 if unavailable, but 246 * Note: cpuid_level is set to -1 if unavailable, but
237 * extended_extended_level is set to 0 if unavailable 247 * extended_extended_level is set to 0 if unavailable
@@ -239,32 +249,32 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
239 * when signed; hence the weird messing around with 249 * when signed; hence the weird messing around with
240 * signs here... 250 * signs here...
241 */ 251 */
242 if (cpu_has(c, df->feature) && 252 if (!((s32)df->level < 0 ?
243 ((s32)df->level < 0 ?
244 (u32)df->level > (u32)c->extended_cpuid_level : 253 (u32)df->level > (u32)c->extended_cpuid_level :
245 (s32)df->level > (s32)c->cpuid_level)) { 254 (s32)df->level > (s32)c->cpuid_level))
246 clear_cpu_cap(c, df->feature); 255 continue;
247 if (warn) 256
248 printk(KERN_WARNING 257 clear_cpu_cap(c, df->feature);
249 "CPU: CPU feature %s disabled " 258 if (!warn)
250 "due to lack of CPUID level 0x%x\n", 259 continue;
251 x86_cap_flags[df->feature], 260
252 df->level); 261 printk(KERN_WARNING
253 } 262 "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
263 x86_cap_flags[df->feature], df->level);
254 } 264 }
255} 265}
256 266
257/* 267/*
258 * Naming convention should be: <Name> [(<Codename>)] 268 * Naming convention should be: <Name> [(<Codename>)]
259 * This table only is used unless init_<vendor>() below doesn't set it; 269 * This table only is used unless init_<vendor>() below doesn't set it;
260 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used 270 * in particular, if CPUID levels 0x80000002..4 are supported, this
261 * 271 * isn't used
262 */ 272 */
263 273
264/* Look up CPU names by table lookup. */ 274/* Look up CPU names by table lookup. */
265static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) 275static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
266{ 276{
267 struct cpu_model_info *info; 277 const struct cpu_model_info *info;
268 278
269 if (c->x86_model >= 16) 279 if (c->x86_model >= 16)
270 return NULL; /* Range check */ 280 return NULL; /* Range check */
@@ -295,8 +305,10 @@ void load_percpu_segment(int cpu)
295 load_stack_canary_segment(); 305 load_stack_canary_segment();
296} 306}
297 307
298/* Current gdt points %fs at the "master" per-cpu area: after this, 308/*
299 * it's on the real one. */ 309 * Current gdt points %fs at the "master" per-cpu area: after this,
310 * it's on the real one.
311 */
300void switch_to_new_gdt(int cpu) 312void switch_to_new_gdt(int cpu)
301{ 313{
302 struct desc_ptr gdt_descr; 314 struct desc_ptr gdt_descr;
@@ -309,7 +321,7 @@ void switch_to_new_gdt(int cpu)
309 load_percpu_segment(cpu); 321 load_percpu_segment(cpu);
310} 322}
311 323
312static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 324static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
313 325
314static void __cpuinit default_init(struct cpuinfo_x86 *c) 326static void __cpuinit default_init(struct cpuinfo_x86 *c)
315{ 327{
@@ -328,7 +340,7 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
328#endif 340#endif
329} 341}
330 342
331static struct cpu_dev __cpuinitdata default_cpu = { 343static const struct cpu_dev __cpuinitconst default_cpu = {
332 .c_init = default_init, 344 .c_init = default_init,
333 .c_vendor = "Unknown", 345 .c_vendor = "Unknown",
334 .c_x86_vendor = X86_VENDOR_UNKNOWN, 346 .c_x86_vendor = X86_VENDOR_UNKNOWN,
@@ -342,22 +354,24 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
342 if (c->extended_cpuid_level < 0x80000004) 354 if (c->extended_cpuid_level < 0x80000004)
343 return; 355 return;
344 356
345 v = (unsigned int *) c->x86_model_id; 357 v = (unsigned int *)c->x86_model_id;
346 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 358 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
347 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 359 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
348 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 360 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
349 c->x86_model_id[48] = 0; 361 c->x86_model_id[48] = 0;
350 362
351 /* Intel chips right-justify this string for some dumb reason; 363 /*
352 undo that brain damage */ 364 * Intel chips right-justify this string for some dumb reason;
365 * undo that brain damage:
366 */
353 p = q = &c->x86_model_id[0]; 367 p = q = &c->x86_model_id[0];
354 while (*p == ' ') 368 while (*p == ' ')
355 p++; 369 p++;
356 if (p != q) { 370 if (p != q) {
357 while (*p) 371 while (*p)
358 *q++ = *p++; 372 *q++ = *p++;
359 while (q <= &c->x86_model_id[48]) 373 while (q <= &c->x86_model_id[48])
360 *q++ = '\0'; /* Zero-pad the rest */ 374 *q++ = '\0'; /* Zero-pad the rest */
361 } 375 }
362} 376}
363 377
@@ -426,27 +440,30 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
426 440
427 if (smp_num_siblings == 1) { 441 if (smp_num_siblings == 1) {
428 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 442 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
429 } else if (smp_num_siblings > 1) { 443 goto out;
444 }
430 445
431 if (smp_num_siblings > nr_cpu_ids) { 446 if (smp_num_siblings <= 1)
432 printk(KERN_WARNING "CPU: Unsupported number of siblings %d", 447 goto out;
433 smp_num_siblings);
434 smp_num_siblings = 1;
435 return;
436 }
437 448
438 index_msb = get_count_order(smp_num_siblings); 449 if (smp_num_siblings > nr_cpu_ids) {
439 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); 450 pr_warning("CPU: Unsupported number of siblings %d",
451 smp_num_siblings);
452 smp_num_siblings = 1;
453 return;
454 }
440 455
441 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 456 index_msb = get_count_order(smp_num_siblings);
457 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
442 458
443 index_msb = get_count_order(smp_num_siblings); 459 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
444 460
445 core_bits = get_count_order(c->x86_max_cores); 461 index_msb = get_count_order(smp_num_siblings);
446 462
447 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & 463 core_bits = get_count_order(c->x86_max_cores);
448 ((1 << core_bits) - 1); 464
449 } 465 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
466 ((1 << core_bits) - 1);
450 467
451out: 468out:
452 if ((c->x86_max_cores * smp_num_siblings) > 1) { 469 if ((c->x86_max_cores * smp_num_siblings) > 1) {
@@ -461,8 +478,8 @@ out:
461static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 478static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
462{ 479{
463 char *v = c->x86_vendor_id; 480 char *v = c->x86_vendor_id;
464 int i;
465 static int printed; 481 static int printed;
482 int i;
466 483
467 for (i = 0; i < X86_VENDOR_NUM; i++) { 484 for (i = 0; i < X86_VENDOR_NUM; i++) {
468 if (!cpu_devs[i]) 485 if (!cpu_devs[i])
@@ -471,6 +488,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
471 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 488 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
472 (cpu_devs[i]->c_ident[1] && 489 (cpu_devs[i]->c_ident[1] &&
473 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 490 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
491
474 this_cpu = cpu_devs[i]; 492 this_cpu = cpu_devs[i];
475 c->x86_vendor = this_cpu->c_x86_vendor; 493 c->x86_vendor = this_cpu->c_x86_vendor;
476 return; 494 return;
@@ -479,7 +497,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
479 497
480 if (!printed) { 498 if (!printed) {
481 printed++; 499 printed++;
482 printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v); 500 printk(KERN_ERR
501 "CPU: vendor_id '%s' unknown, using generic init.\n", v);
502
483 printk(KERN_ERR "CPU: Your system may be unstable.\n"); 503 printk(KERN_ERR "CPU: Your system may be unstable.\n");
484 } 504 }
485 505
@@ -499,14 +519,17 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
499 /* Intel-defined flags: level 0x00000001 */ 519 /* Intel-defined flags: level 0x00000001 */
500 if (c->cpuid_level >= 0x00000001) { 520 if (c->cpuid_level >= 0x00000001) {
501 u32 junk, tfms, cap0, misc; 521 u32 junk, tfms, cap0, misc;
522
502 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 523 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
503 c->x86 = (tfms >> 8) & 0xf; 524 c->x86 = (tfms >> 8) & 0xf;
504 c->x86_model = (tfms >> 4) & 0xf; 525 c->x86_model = (tfms >> 4) & 0xf;
505 c->x86_mask = tfms & 0xf; 526 c->x86_mask = tfms & 0xf;
527
506 if (c->x86 == 0xf) 528 if (c->x86 == 0xf)
507 c->x86 += (tfms >> 20) & 0xff; 529 c->x86 += (tfms >> 20) & 0xff;
508 if (c->x86 >= 0x6) 530 if (c->x86 >= 0x6)
509 c->x86_model += ((tfms >> 16) & 0xf) << 4; 531 c->x86_model += ((tfms >> 16) & 0xf) << 4;
532
510 if (cap0 & (1<<19)) { 533 if (cap0 & (1<<19)) {
511 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 534 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
512 c->x86_cache_alignment = c->x86_clflush_size; 535 c->x86_cache_alignment = c->x86_clflush_size;
@@ -522,6 +545,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
522 /* Intel-defined flags: level 0x00000001 */ 545 /* Intel-defined flags: level 0x00000001 */
523 if (c->cpuid_level >= 0x00000001) { 546 if (c->cpuid_level >= 0x00000001) {
524 u32 capability, excap; 547 u32 capability, excap;
548
525 cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 549 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
526 c->x86_capability[0] = capability; 550 c->x86_capability[0] = capability;
527 c->x86_capability[4] = excap; 551 c->x86_capability[4] = excap;
@@ -530,6 +554,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
530 /* AMD-defined flags: level 0x80000001 */ 554 /* AMD-defined flags: level 0x80000001 */
531 xlvl = cpuid_eax(0x80000000); 555 xlvl = cpuid_eax(0x80000000);
532 c->extended_cpuid_level = xlvl; 556 c->extended_cpuid_level = xlvl;
557
533 if ((xlvl & 0xffff0000) == 0x80000000) { 558 if ((xlvl & 0xffff0000) == 0x80000000) {
534 if (xlvl >= 0x80000001) { 559 if (xlvl >= 0x80000001) {
535 c->x86_capability[1] = cpuid_edx(0x80000001); 560 c->x86_capability[1] = cpuid_edx(0x80000001);
@@ -537,13 +562,15 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
537 } 562 }
538 } 563 }
539 564
540#ifdef CONFIG_X86_64
541 if (c->extended_cpuid_level >= 0x80000008) { 565 if (c->extended_cpuid_level >= 0x80000008) {
542 u32 eax = cpuid_eax(0x80000008); 566 u32 eax = cpuid_eax(0x80000008);
543 567
544 c->x86_virt_bits = (eax >> 8) & 0xff; 568 c->x86_virt_bits = (eax >> 8) & 0xff;
545 c->x86_phys_bits = eax & 0xff; 569 c->x86_phys_bits = eax & 0xff;
546 } 570 }
571#ifdef CONFIG_X86_32
572 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
573 c->x86_phys_bits = 36;
547#endif 574#endif
548 575
549 if (c->extended_cpuid_level >= 0x80000007) 576 if (c->extended_cpuid_level >= 0x80000007)
@@ -590,8 +617,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
590{ 617{
591#ifdef CONFIG_X86_64 618#ifdef CONFIG_X86_64
592 c->x86_clflush_size = 64; 619 c->x86_clflush_size = 64;
620 c->x86_phys_bits = 36;
621 c->x86_virt_bits = 48;
593#else 622#else
594 c->x86_clflush_size = 32; 623 c->x86_clflush_size = 32;
624 c->x86_phys_bits = 32;
625 c->x86_virt_bits = 32;
595#endif 626#endif
596 c->x86_cache_alignment = c->x86_clflush_size; 627 c->x86_cache_alignment = c->x86_clflush_size;
597 628
@@ -622,12 +653,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
622 653
623void __init early_cpu_init(void) 654void __init early_cpu_init(void)
624{ 655{
625 struct cpu_dev **cdev; 656 const struct cpu_dev *const *cdev;
626 int count = 0; 657 int count = 0;
627 658
628 printk("KERNEL supported cpus:\n"); 659 printk(KERN_INFO "KERNEL supported cpus:\n");
629 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 660 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
630 struct cpu_dev *cpudev = *cdev; 661 const struct cpu_dev *cpudev = *cdev;
631 unsigned int j; 662 unsigned int j;
632 663
633 if (count >= X86_VENDOR_NUM) 664 if (count >= X86_VENDOR_NUM)
@@ -638,7 +669,7 @@ void __init early_cpu_init(void)
638 for (j = 0; j < 2; j++) { 669 for (j = 0; j < 2; j++) {
639 if (!cpudev->c_ident[j]) 670 if (!cpudev->c_ident[j])
640 continue; 671 continue;
641 printk(" %s %s\n", cpudev->c_vendor, 672 printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
642 cpudev->c_ident[j]); 673 cpudev->c_ident[j]);
643 } 674 }
644 } 675 }
@@ -714,9 +745,13 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
714 c->x86_coreid_bits = 0; 745 c->x86_coreid_bits = 0;
715#ifdef CONFIG_X86_64 746#ifdef CONFIG_X86_64
716 c->x86_clflush_size = 64; 747 c->x86_clflush_size = 64;
748 c->x86_phys_bits = 36;
749 c->x86_virt_bits = 48;
717#else 750#else
718 c->cpuid_level = -1; /* CPUID not detected */ 751 c->cpuid_level = -1; /* CPUID not detected */
719 c->x86_clflush_size = 32; 752 c->x86_clflush_size = 32;
753 c->x86_phys_bits = 32;
754 c->x86_virt_bits = 32;
720#endif 755#endif
721 c->x86_cache_alignment = c->x86_clflush_size; 756 c->x86_cache_alignment = c->x86_clflush_size;
722 memset(&c->x86_capability, 0, sizeof c->x86_capability); 757 memset(&c->x86_capability, 0, sizeof c->x86_capability);
@@ -747,8 +782,8 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
747 squash_the_stupid_serial_number(c); 782 squash_the_stupid_serial_number(c);
748 783
749 /* 784 /*
750 * The vendor-specific functions might have changed features. Now 785 * The vendor-specific functions might have changed features.
751 * we do "generic changes." 786 * Now we do "generic changes."
752 */ 787 */
753 788
754 /* Filter out anything that depends on CPUID levels we don't have */ 789 /* Filter out anything that depends on CPUID levels we don't have */
@@ -756,7 +791,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
756 791
757 /* If the model name is still unset, do table lookup. */ 792 /* If the model name is still unset, do table lookup. */
758 if (!c->x86_model_id[0]) { 793 if (!c->x86_model_id[0]) {
759 char *p; 794 const char *p;
760 p = table_lookup_model(c); 795 p = table_lookup_model(c);
761 if (p) 796 if (p)
762 strcpy(c->x86_model_id, p); 797 strcpy(c->x86_model_id, p);
@@ -832,11 +867,11 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
832} 867}
833 868
834struct msr_range { 869struct msr_range {
835 unsigned min; 870 unsigned min;
836 unsigned max; 871 unsigned max;
837}; 872};
838 873
839static struct msr_range msr_range_array[] __cpuinitdata = { 874static const struct msr_range msr_range_array[] __cpuinitconst = {
840 { 0x00000000, 0x00000418}, 875 { 0x00000000, 0x00000418},
841 { 0xc0000000, 0xc000040b}, 876 { 0xc0000000, 0xc000040b},
842 { 0xc0010000, 0xc0010142}, 877 { 0xc0010000, 0xc0010142},
@@ -845,14 +880,15 @@ static struct msr_range msr_range_array[] __cpuinitdata = {
845 880
846static void __cpuinit print_cpu_msr(void) 881static void __cpuinit print_cpu_msr(void)
847{ 882{
883 unsigned index_min, index_max;
848 unsigned index; 884 unsigned index;
849 u64 val; 885 u64 val;
850 int i; 886 int i;
851 unsigned index_min, index_max;
852 887
853 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { 888 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
854 index_min = msr_range_array[i].min; 889 index_min = msr_range_array[i].min;
855 index_max = msr_range_array[i].max; 890 index_max = msr_range_array[i].max;
891
856 for (index = index_min; index < index_max; index++) { 892 for (index = index_min; index < index_max; index++) {
857 if (rdmsrl_amd_safe(index, &val)) 893 if (rdmsrl_amd_safe(index, &val))
858 continue; 894 continue;
@@ -862,6 +898,7 @@ static void __cpuinit print_cpu_msr(void)
862} 898}
863 899
864static int show_msr __cpuinitdata; 900static int show_msr __cpuinitdata;
901
865static __init int setup_show_msr(char *arg) 902static __init int setup_show_msr(char *arg)
866{ 903{
867 int num; 904 int num;
@@ -883,12 +920,14 @@ __setup("noclflush", setup_noclflush);
883 920
884void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 921void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
885{ 922{
886 char *vendor = NULL; 923 const char *vendor = NULL;
887 924
888 if (c->x86_vendor < X86_VENDOR_NUM) 925 if (c->x86_vendor < X86_VENDOR_NUM) {
889 vendor = this_cpu->c_vendor; 926 vendor = this_cpu->c_vendor;
890 else if (c->cpuid_level >= 0) 927 } else {
891 vendor = c->x86_vendor_id; 928 if (c->cpuid_level >= 0)
929 vendor = c->x86_vendor_id;
930 }
892 931
893 if (vendor && !strstr(c->x86_model_id, vendor)) 932 if (vendor && !strstr(c->x86_model_id, vendor))
894 printk(KERN_CONT "%s ", vendor); 933 printk(KERN_CONT "%s ", vendor);
@@ -915,10 +954,12 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
915static __init int setup_disablecpuid(char *arg) 954static __init int setup_disablecpuid(char *arg)
916{ 955{
917 int bit; 956 int bit;
957
918 if (get_option(&arg, &bit) && bit < NCAPINTS*32) 958 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
919 setup_clear_cpu_cap(bit); 959 setup_clear_cpu_cap(bit);
920 else 960 else
921 return 0; 961 return 0;
962
922 return 1; 963 return 1;
923} 964}
924__setup("clearcpuid=", setup_disablecpuid); 965__setup("clearcpuid=", setup_disablecpuid);
@@ -928,6 +969,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
928 969
929DEFINE_PER_CPU_FIRST(union irq_stack_union, 970DEFINE_PER_CPU_FIRST(union irq_stack_union,
930 irq_stack_union) __aligned(PAGE_SIZE); 971 irq_stack_union) __aligned(PAGE_SIZE);
972
931DEFINE_PER_CPU(char *, irq_stack_ptr) = 973DEFINE_PER_CPU(char *, irq_stack_ptr) =
932 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; 974 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
933 975
@@ -937,12 +979,21 @@ EXPORT_PER_CPU_SYMBOL(kernel_stack);
937 979
938DEFINE_PER_CPU(unsigned int, irq_count) = -1; 980DEFINE_PER_CPU(unsigned int, irq_count) = -1;
939 981
982/*
983 * Special IST stacks which the CPU switches to when it calls
984 * an IST-marked descriptor entry. Up to 7 stacks (hardware
985 * limit), all of them are 4K, except the debug stack which
986 * is 8K.
987 */
988static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
989 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
990 [DEBUG_STACK - 1] = DEBUG_STKSZ
991};
992
940static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks 993static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
941 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) 994 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
942 __aligned(PAGE_SIZE); 995 __aligned(PAGE_SIZE);
943 996
944extern asmlinkage void ignore_sysret(void);
945
946/* May not be marked __init: used by software suspend */ 997/* May not be marked __init: used by software suspend */
947void syscall_init(void) 998void syscall_init(void)
948{ 999{
@@ -972,7 +1023,7 @@ unsigned long kernel_eflags;
972 */ 1023 */
973DEFINE_PER_CPU(struct orig_ist, orig_ist); 1024DEFINE_PER_CPU(struct orig_ist, orig_ist);
974 1025
975#else /* x86_64 */ 1026#else /* CONFIG_X86_64 */
976 1027
977#ifdef CONFIG_CC_STACKPROTECTOR 1028#ifdef CONFIG_CC_STACKPROTECTOR
978DEFINE_PER_CPU(unsigned long, stack_canary); 1029DEFINE_PER_CPU(unsigned long, stack_canary);
@@ -984,9 +1035,26 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
984 memset(regs, 0, sizeof(struct pt_regs)); 1035 memset(regs, 0, sizeof(struct pt_regs));
985 regs->fs = __KERNEL_PERCPU; 1036 regs->fs = __KERNEL_PERCPU;
986 regs->gs = __KERNEL_STACK_CANARY; 1037 regs->gs = __KERNEL_STACK_CANARY;
1038
987 return regs; 1039 return regs;
988} 1040}
989#endif /* x86_64 */ 1041#endif /* CONFIG_X86_64 */
1042
1043/*
1044 * Clear all 6 debug registers:
1045 */
1046static void clear_all_debug_regs(void)
1047{
1048 int i;
1049
1050 for (i = 0; i < 8; i++) {
1051 /* Ignore db4, db5 */
1052 if ((i == 4) || (i == 5))
1053 continue;
1054
1055 set_debugreg(0, i);
1056 }
1057}
990 1058
991/* 1059/*
992 * cpu_init() initializes state that is per-CPU. Some data is already 1060 * cpu_init() initializes state that is per-CPU. Some data is already
@@ -996,15 +1064,20 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
996 * A lot of state is already set up in PDA init for 64 bit 1064 * A lot of state is already set up in PDA init for 64 bit
997 */ 1065 */
998#ifdef CONFIG_X86_64 1066#ifdef CONFIG_X86_64
1067
999void __cpuinit cpu_init(void) 1068void __cpuinit cpu_init(void)
1000{ 1069{
1001 int cpu = stack_smp_processor_id(); 1070 struct orig_ist *orig_ist;
1002 struct tss_struct *t = &per_cpu(init_tss, cpu);
1003 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
1004 unsigned long v;
1005 struct task_struct *me; 1071 struct task_struct *me;
1072 struct tss_struct *t;
1073 unsigned long v;
1074 int cpu;
1006 int i; 1075 int i;
1007 1076
1077 cpu = stack_smp_processor_id();
1078 t = &per_cpu(init_tss, cpu);
1079 orig_ist = &per_cpu(orig_ist, cpu);
1080
1008#ifdef CONFIG_NUMA 1081#ifdef CONFIG_NUMA
1009 if (cpu != 0 && percpu_read(node_number) == 0 && 1082 if (cpu != 0 && percpu_read(node_number) == 0 &&
1010 cpu_to_node(cpu) != NUMA_NO_NODE) 1083 cpu_to_node(cpu) != NUMA_NO_NODE)
@@ -1045,19 +1118,17 @@ void __cpuinit cpu_init(void)
1045 * set up and load the per-CPU TSS 1118 * set up and load the per-CPU TSS
1046 */ 1119 */
1047 if (!orig_ist->ist[0]) { 1120 if (!orig_ist->ist[0]) {
1048 static const unsigned int sizes[N_EXCEPTION_STACKS] = {
1049 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1050 [DEBUG_STACK - 1] = DEBUG_STKSZ
1051 };
1052 char *estacks = per_cpu(exception_stacks, cpu); 1121 char *estacks = per_cpu(exception_stacks, cpu);
1122
1053 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1123 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1054 estacks += sizes[v]; 1124 estacks += exception_stack_sizes[v];
1055 orig_ist->ist[v] = t->x86_tss.ist[v] = 1125 orig_ist->ist[v] = t->x86_tss.ist[v] =
1056 (unsigned long)estacks; 1126 (unsigned long)estacks;
1057 } 1127 }
1058 } 1128 }
1059 1129
1060 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1130 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1131
1061 /* 1132 /*
1062 * <= is required because the CPU will access up to 1133 * <= is required because the CPU will access up to
1063 * 8 bits beyond the end of the IO permission bitmap. 1134 * 8 bits beyond the end of the IO permission bitmap.
@@ -1067,8 +1138,7 @@ void __cpuinit cpu_init(void)
1067 1138
1068 atomic_inc(&init_mm.mm_count); 1139 atomic_inc(&init_mm.mm_count);
1069 me->active_mm = &init_mm; 1140 me->active_mm = &init_mm;
1070 if (me->mm) 1141 BUG_ON(me->mm);
1071 BUG();
1072 enter_lazy_tlb(&init_mm, me); 1142 enter_lazy_tlb(&init_mm, me);
1073 1143
1074 load_sp0(t, &current->thread); 1144 load_sp0(t, &current->thread);
@@ -1087,17 +1157,7 @@ void __cpuinit cpu_init(void)
1087 arch_kgdb_ops.correct_hw_break(); 1157 arch_kgdb_ops.correct_hw_break();
1088 else 1158 else
1089#endif 1159#endif
1090 { 1160 clear_all_debug_regs();
1091 /*
1092 * Clear all 6 debug registers:
1093 */
1094 set_debugreg(0UL, 0);
1095 set_debugreg(0UL, 1);
1096 set_debugreg(0UL, 2);
1097 set_debugreg(0UL, 3);
1098 set_debugreg(0UL, 6);
1099 set_debugreg(0UL, 7);
1100 }
1101 1161
1102 fpu_init(); 1162 fpu_init();
1103 1163
@@ -1118,7 +1178,8 @@ void __cpuinit cpu_init(void)
1118 1178
1119 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { 1179 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
1120 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 1180 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
1121 for (;;) local_irq_enable(); 1181 for (;;)
1182 local_irq_enable();
1122 } 1183 }
1123 1184
1124 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1185 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
@@ -1134,8 +1195,7 @@ void __cpuinit cpu_init(void)
1134 */ 1195 */
1135 atomic_inc(&init_mm.mm_count); 1196 atomic_inc(&init_mm.mm_count);
1136 curr->active_mm = &init_mm; 1197 curr->active_mm = &init_mm;
1137 if (curr->mm) 1198 BUG_ON(curr->mm);
1138 BUG();
1139 enter_lazy_tlb(&init_mm, curr); 1199 enter_lazy_tlb(&init_mm, curr);
1140 1200
1141 load_sp0(t, thread); 1201 load_sp0(t, thread);
@@ -1148,13 +1208,7 @@ void __cpuinit cpu_init(void)
1148 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1208 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1149#endif 1209#endif
1150 1210
1151 /* Clear all 6 debug registers: */ 1211 clear_all_debug_regs();
1152 set_debugreg(0, 0);
1153 set_debugreg(0, 1);
1154 set_debugreg(0, 2);
1155 set_debugreg(0, 3);
1156 set_debugreg(0, 6);
1157 set_debugreg(0, 7);
1158 1212
1159 /* 1213 /*
1160 * Force FPU initialization: 1214 * Force FPU initialization:
@@ -1174,6 +1228,4 @@ void __cpuinit cpu_init(void)
1174 1228
1175 xsave_init(); 1229 xsave_init();
1176} 1230}
1177
1178
1179#endif 1231#endif