aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r--arch/x86/kernel/cpu/common.c579
1 files changed, 326 insertions, 253 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 83492b1f93b..e2962cc1e27 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1,118 +1,129 @@
1#include <linux/init.h>
2#include <linux/kernel.h>
3#include <linux/sched.h>
4#include <linux/string.h>
5#include <linux/bootmem.h> 1#include <linux/bootmem.h>
2#include <linux/linkage.h>
6#include <linux/bitops.h> 3#include <linux/bitops.h>
4#include <linux/kernel.h>
7#include <linux/module.h> 5#include <linux/module.h>
8#include <linux/kgdb.h> 6#include <linux/percpu.h>
9#include <linux/topology.h> 7#include <linux/string.h>
10#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/sched.h>
10#include <linux/init.h>
11#include <linux/kgdb.h>
11#include <linux/smp.h> 12#include <linux/smp.h>
12#include <linux/percpu.h> 13#include <linux/io.h>
13#include <asm/i387.h> 14
14#include <asm/msr.h> 15#include <asm/stackprotector.h>
15#include <asm/io.h>
16#include <asm/linkage.h>
17#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
17#include <asm/hypervisor.h>
18#include <asm/processor.h>
19#include <asm/sections.h>
20#include <asm/topology.h>
21#include <asm/cpumask.h>
22#include <asm/pgtable.h>
23#include <asm/atomic.h>
24#include <asm/proto.h>
25#include <asm/setup.h>
26#include <asm/apic.h>
27#include <asm/desc.h>
28#include <asm/i387.h>
18#include <asm/mtrr.h> 29#include <asm/mtrr.h>
30#include <asm/numa.h>
31#include <asm/asm.h>
32#include <asm/cpu.h>
19#include <asm/mce.h> 33#include <asm/mce.h>
34#include <asm/msr.h>
20#include <asm/pat.h> 35#include <asm/pat.h>
21#include <asm/asm.h>
22#include <asm/numa.h>
23#include <asm/smp.h> 36#include <asm/smp.h>
37
24#ifdef CONFIG_X86_LOCAL_APIC 38#ifdef CONFIG_X86_LOCAL_APIC
25#include <asm/mpspec.h> 39#include <asm/uv/uv.h>
26#include <asm/apic.h>
27#include <mach_apic.h>
28#include <asm/genapic.h>
29#endif 40#endif
30 41
31#include <asm/pda.h>
32#include <asm/pgtable.h>
33#include <asm/processor.h>
34#include <asm/desc.h>
35#include <asm/atomic.h>
36#include <asm/proto.h>
37#include <asm/sections.h>
38#include <asm/setup.h>
39#include <asm/hypervisor.h>
40
41#include "cpu.h" 42#include "cpu.h"
42 43
43#ifdef CONFIG_X86_64 44#ifdef CONFIG_X86_64
44 45
45/* all of these masks are initialized in setup_cpu_local_masks() */ 46/* all of these masks are initialized in setup_cpu_local_masks() */
46cpumask_var_t cpu_callin_mask;
47cpumask_var_t cpu_callout_mask;
48cpumask_var_t cpu_initialized_mask; 47cpumask_var_t cpu_initialized_mask;
48cpumask_var_t cpu_callout_mask;
49cpumask_var_t cpu_callin_mask;
49 50
50/* representing cpus for which sibling maps can be computed */ 51/* representing cpus for which sibling maps can be computed */
51cpumask_var_t cpu_sibling_setup_mask; 52cpumask_var_t cpu_sibling_setup_mask;
52 53
54/* correctly size the local cpu masks */
55void __init setup_cpu_local_masks(void)
56{
57 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
58 alloc_bootmem_cpumask_var(&cpu_callin_mask);
59 alloc_bootmem_cpumask_var(&cpu_callout_mask);
60 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
61}
62
53#else /* CONFIG_X86_32 */ 63#else /* CONFIG_X86_32 */
54 64
55cpumask_t cpu_callin_map; 65cpumask_t cpu_sibling_setup_map;
56cpumask_t cpu_callout_map; 66cpumask_t cpu_callout_map;
57cpumask_t cpu_initialized; 67cpumask_t cpu_initialized;
58cpumask_t cpu_sibling_setup_map; 68cpumask_t cpu_callin_map;
59 69
60#endif /* CONFIG_X86_32 */ 70#endif /* CONFIG_X86_32 */
61 71
62 72
63static struct cpu_dev *this_cpu __cpuinitdata; 73static const struct cpu_dev *this_cpu __cpuinitdata;
64 74
75DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
65#ifdef CONFIG_X86_64 76#ifdef CONFIG_X86_64
66/* We need valid kernel segments for data and code in long mode too 77 /*
67 * IRET will check the segment types kkeil 2000/10/28 78 * We need valid kernel segments for data and code in long mode too
68 * Also sysret mandates a special GDT layout 79 * IRET will check the segment types kkeil 2000/10/28
69 */ 80 * Also sysret mandates a special GDT layout
70/* The TLS descriptors are currently at a different place compared to i386. 81 *
71 Hopefully nobody expects them at a fixed place (Wine?) */ 82 * TLS descriptors are currently at a different place compared to i386.
72DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { 83 * Hopefully nobody expects them at a fixed place (Wine?)
73 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, 84 */
74 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, 85 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
75 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, 86 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
76 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, 87 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
77 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, 88 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
78 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, 89 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
79} }; 90 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
80#else 91#else
81DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 92 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
82 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, 93 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
83 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, 94 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
84 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, 95 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
85 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
86 /* 96 /*
87 * Segments used for calling PnP BIOS have byte granularity. 97 * Segments used for calling PnP BIOS have byte granularity.
88 * They code segments and data segments have fixed 64k limits, 98 * They code segments and data segments have fixed 64k limits,
89 * the transfer segment sizes are set at run time. 99 * the transfer segment sizes are set at run time.
90 */ 100 */
91 /* 32-bit code */ 101 /* 32-bit code */
92 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, 102 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
93 /* 16-bit code */ 103 /* 16-bit code */
94 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, 104 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
95 /* 16-bit data */ 105 /* 16-bit data */
96 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, 106 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
97 /* 16-bit data */ 107 /* 16-bit data */
98 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, 108 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
99 /* 16-bit data */ 109 /* 16-bit data */
100 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, 110 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
101 /* 111 /*
102 * The APM segments have byte granularity and their bases 112 * The APM segments have byte granularity and their bases
103 * are set at run time. All have 64k limits. 113 * are set at run time. All have 64k limits.
104 */ 114 */
105 /* 32-bit code */ 115 /* 32-bit code */
106 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, 116 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
107 /* 16-bit code */ 117 /* 16-bit code */
108 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, 118 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
109 /* data */ 119 /* data */
110 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, 120 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
111 121
112 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, 122 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
113 [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, 123 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
114} }; 124 GDT_STACK_CANARY_INIT
115#endif 125#endif
126} };
116EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 127EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
117 128
118#ifdef CONFIG_X86_32 129#ifdef CONFIG_X86_32
@@ -153,16 +164,17 @@ static inline int flag_is_changeable_p(u32 flag)
153 * the CPUID. Add "volatile" to not allow gcc to 164 * the CPUID. Add "volatile" to not allow gcc to
154 * optimize the subsequent calls to this function. 165 * optimize the subsequent calls to this function.
155 */ 166 */
156 asm volatile ("pushfl\n\t" 167 asm volatile ("pushfl \n\t"
157 "pushfl\n\t" 168 "pushfl \n\t"
158 "popl %0\n\t" 169 "popl %0 \n\t"
159 "movl %0,%1\n\t" 170 "movl %0, %1 \n\t"
160 "xorl %2,%0\n\t" 171 "xorl %2, %0 \n\t"
161 "pushl %0\n\t" 172 "pushl %0 \n\t"
162 "popfl\n\t" 173 "popfl \n\t"
163 "pushfl\n\t" 174 "pushfl \n\t"
164 "popl %0\n\t" 175 "popl %0 \n\t"
165 "popfl\n\t" 176 "popfl \n\t"
177
166 : "=&r" (f1), "=&r" (f2) 178 : "=&r" (f1), "=&r" (f2)
167 : "ir" (flag)); 179 : "ir" (flag));
168 180
@@ -177,18 +189,22 @@ static int __cpuinit have_cpuid_p(void)
177 189
178static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 190static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
179{ 191{
180 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { 192 unsigned long lo, hi;
181 /* Disable processor serial number */ 193
182 unsigned long lo, hi; 194 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
183 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 195 return;
184 lo |= 0x200000; 196
185 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 197 /* Disable processor serial number: */
186 printk(KERN_NOTICE "CPU serial number disabled.\n"); 198
187 clear_cpu_cap(c, X86_FEATURE_PN); 199 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
188 200 lo |= 0x200000;
189 /* Disabling the serial number may affect the cpuid level */ 201 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
190 c->cpuid_level = cpuid_eax(0); 202
191 } 203 printk(KERN_NOTICE "CPU serial number disabled.\n");
204 clear_cpu_cap(c, X86_FEATURE_PN);
205
206 /* Disabling the serial number may affect the cpuid level */
207 c->cpuid_level = cpuid_eax(0);
192} 208}
193 209
194static int __init x86_serial_nr_setup(char *s) 210static int __init x86_serial_nr_setup(char *s)
@@ -213,16 +229,64 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
213#endif 229#endif
214 230
215/* 231/*
232 * Some CPU features depend on higher CPUID levels, which may not always
233 * be available due to CPUID level capping or broken virtualization
234 * software. Add those features to this table to auto-disable them.
235 */
236struct cpuid_dependent_feature {
237 u32 feature;
238 u32 level;
239};
240
241static const struct cpuid_dependent_feature __cpuinitconst
242cpuid_dependent_features[] = {
243 { X86_FEATURE_MWAIT, 0x00000005 },
244 { X86_FEATURE_DCA, 0x00000009 },
245 { X86_FEATURE_XSAVE, 0x0000000d },
246 { 0, 0 }
247};
248
249static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
250{
251 const struct cpuid_dependent_feature *df;
252
253 for (df = cpuid_dependent_features; df->feature; df++) {
254
255 if (!cpu_has(c, df->feature))
256 continue;
257 /*
258 * Note: cpuid_level is set to -1 if unavailable, but
259 * extended_extended_level is set to 0 if unavailable
260 * and the legitimate extended levels are all negative
261 * when signed; hence the weird messing around with
262 * signs here...
263 */
264 if (!((s32)df->level < 0 ?
265 (u32)df->level > (u32)c->extended_cpuid_level :
266 (s32)df->level > (s32)c->cpuid_level))
267 continue;
268
269 clear_cpu_cap(c, df->feature);
270 if (!warn)
271 continue;
272
273 printk(KERN_WARNING
274 "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
275 x86_cap_flags[df->feature], df->level);
276 }
277}
278
279/*
216 * Naming convention should be: <Name> [(<Codename>)] 280 * Naming convention should be: <Name> [(<Codename>)]
217 * This table only is used unless init_<vendor>() below doesn't set it; 281 * This table only is used unless init_<vendor>() below doesn't set it;
218 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used 282 * in particular, if CPUID levels 0x80000002..4 are supported, this
219 * 283 * isn't used
220 */ 284 */
221 285
222/* Look up CPU names by table lookup. */ 286/* Look up CPU names by table lookup. */
223static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) 287static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
224{ 288{
225 struct cpu_model_info *info; 289 const struct cpu_model_info *info;
226 290
227 if (c->x86_model >= 16) 291 if (c->x86_model >= 16)
228 return NULL; /* Range check */ 292 return NULL; /* Range check */
@@ -242,21 +306,34 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
242 306
243__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; 307__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
244 308
245/* Current gdt points %fs at the "master" per-cpu area: after this, 309void load_percpu_segment(int cpu)
246 * it's on the real one. */ 310{
247void switch_to_new_gdt(void) 311#ifdef CONFIG_X86_32
312 loadsegment(fs, __KERNEL_PERCPU);
313#else
314 loadsegment(gs, 0);
315 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
316#endif
317 load_stack_canary_segment();
318}
319
320/*
321 * Current gdt points %fs at the "master" per-cpu area: after this,
322 * it's on the real one.
323 */
324void switch_to_new_gdt(int cpu)
248{ 325{
249 struct desc_ptr gdt_descr; 326 struct desc_ptr gdt_descr;
250 327
251 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); 328 gdt_descr.address = (long)get_cpu_gdt_table(cpu);
252 gdt_descr.size = GDT_SIZE - 1; 329 gdt_descr.size = GDT_SIZE - 1;
253 load_gdt(&gdt_descr); 330 load_gdt(&gdt_descr);
254#ifdef CONFIG_X86_32 331 /* Reload the per-cpu base */
255 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); 332
256#endif 333 load_percpu_segment(cpu);
257} 334}
258 335
259static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 336static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
260 337
261static void __cpuinit default_init(struct cpuinfo_x86 *c) 338static void __cpuinit default_init(struct cpuinfo_x86 *c)
262{ 339{
@@ -275,7 +352,7 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
275#endif 352#endif
276} 353}
277 354
278static struct cpu_dev __cpuinitdata default_cpu = { 355static const struct cpu_dev __cpuinitconst default_cpu = {
279 .c_init = default_init, 356 .c_init = default_init,
280 .c_vendor = "Unknown", 357 .c_vendor = "Unknown",
281 .c_x86_vendor = X86_VENDOR_UNKNOWN, 358 .c_x86_vendor = X86_VENDOR_UNKNOWN,
@@ -289,22 +366,24 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
289 if (c->extended_cpuid_level < 0x80000004) 366 if (c->extended_cpuid_level < 0x80000004)
290 return; 367 return;
291 368
292 v = (unsigned int *) c->x86_model_id; 369 v = (unsigned int *)c->x86_model_id;
293 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 370 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
294 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 371 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
295 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 372 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
296 c->x86_model_id[48] = 0; 373 c->x86_model_id[48] = 0;
297 374
298 /* Intel chips right-justify this string for some dumb reason; 375 /*
299 undo that brain damage */ 376 * Intel chips right-justify this string for some dumb reason;
377 * undo that brain damage:
378 */
300 p = q = &c->x86_model_id[0]; 379 p = q = &c->x86_model_id[0];
301 while (*p == ' ') 380 while (*p == ' ')
302 p++; 381 p++;
303 if (p != q) { 382 if (p != q) {
304 while (*p) 383 while (*p)
305 *q++ = *p++; 384 *q++ = *p++;
306 while (q <= &c->x86_model_id[48]) 385 while (q <= &c->x86_model_id[48])
307 *q++ = '\0'; /* Zero-pad the rest */ 386 *q++ = '\0'; /* Zero-pad the rest */
308 } 387 }
309} 388}
310 389
@@ -373,36 +452,30 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
373 452
374 if (smp_num_siblings == 1) { 453 if (smp_num_siblings == 1) {
375 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 454 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
376 } else if (smp_num_siblings > 1) { 455 goto out;
456 }
377 457
378 if (smp_num_siblings > nr_cpu_ids) { 458 if (smp_num_siblings <= 1)
379 printk(KERN_WARNING "CPU: Unsupported number of siblings %d", 459 goto out;
380 smp_num_siblings);
381 smp_num_siblings = 1;
382 return;
383 }
384 460
385 index_msb = get_count_order(smp_num_siblings); 461 if (smp_num_siblings > nr_cpu_ids) {
386#ifdef CONFIG_X86_64 462 pr_warning("CPU: Unsupported number of siblings %d",
387 c->phys_proc_id = phys_pkg_id(index_msb); 463 smp_num_siblings);
388#else 464 smp_num_siblings = 1;
389 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); 465 return;
390#endif 466 }
391 467
392 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 468 index_msb = get_count_order(smp_num_siblings);
469 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
393 470
394 index_msb = get_count_order(smp_num_siblings); 471 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
395 472
396 core_bits = get_count_order(c->x86_max_cores); 473 index_msb = get_count_order(smp_num_siblings);
397 474
398#ifdef CONFIG_X86_64 475 core_bits = get_count_order(c->x86_max_cores);
399 c->cpu_core_id = phys_pkg_id(index_msb) & 476
400 ((1 << core_bits) - 1); 477 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
401#else 478 ((1 << core_bits) - 1);
402 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
403 ((1 << core_bits) - 1);
404#endif
405 }
406 479
407out: 480out:
408 if ((c->x86_max_cores * smp_num_siblings) > 1) { 481 if ((c->x86_max_cores * smp_num_siblings) > 1) {
@@ -417,8 +490,8 @@ out:
417static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 490static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
418{ 491{
419 char *v = c->x86_vendor_id; 492 char *v = c->x86_vendor_id;
420 int i;
421 static int printed; 493 static int printed;
494 int i;
422 495
423 for (i = 0; i < X86_VENDOR_NUM; i++) { 496 for (i = 0; i < X86_VENDOR_NUM; i++) {
424 if (!cpu_devs[i]) 497 if (!cpu_devs[i])
@@ -427,6 +500,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
427 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 500 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
428 (cpu_devs[i]->c_ident[1] && 501 (cpu_devs[i]->c_ident[1] &&
429 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 502 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
503
430 this_cpu = cpu_devs[i]; 504 this_cpu = cpu_devs[i];
431 c->x86_vendor = this_cpu->c_x86_vendor; 505 c->x86_vendor = this_cpu->c_x86_vendor;
432 return; 506 return;
@@ -435,7 +509,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
435 509
436 if (!printed) { 510 if (!printed) {
437 printed++; 511 printed++;
438 printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v); 512 printk(KERN_ERR
513 "CPU: vendor_id '%s' unknown, using generic init.\n", v);
514
439 printk(KERN_ERR "CPU: Your system may be unstable.\n"); 515 printk(KERN_ERR "CPU: Your system may be unstable.\n");
440 } 516 }
441 517
@@ -455,14 +531,17 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
455 /* Intel-defined flags: level 0x00000001 */ 531 /* Intel-defined flags: level 0x00000001 */
456 if (c->cpuid_level >= 0x00000001) { 532 if (c->cpuid_level >= 0x00000001) {
457 u32 junk, tfms, cap0, misc; 533 u32 junk, tfms, cap0, misc;
534
458 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 535 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
459 c->x86 = (tfms >> 8) & 0xf; 536 c->x86 = (tfms >> 8) & 0xf;
460 c->x86_model = (tfms >> 4) & 0xf; 537 c->x86_model = (tfms >> 4) & 0xf;
461 c->x86_mask = tfms & 0xf; 538 c->x86_mask = tfms & 0xf;
539
462 if (c->x86 == 0xf) 540 if (c->x86 == 0xf)
463 c->x86 += (tfms >> 20) & 0xff; 541 c->x86 += (tfms >> 20) & 0xff;
464 if (c->x86 >= 0x6) 542 if (c->x86 >= 0x6)
465 c->x86_model += ((tfms >> 16) & 0xf) << 4; 543 c->x86_model += ((tfms >> 16) & 0xf) << 4;
544
466 if (cap0 & (1<<19)) { 545 if (cap0 & (1<<19)) {
467 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 546 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
468 c->x86_cache_alignment = c->x86_clflush_size; 547 c->x86_cache_alignment = c->x86_clflush_size;
@@ -478,6 +557,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
478 /* Intel-defined flags: level 0x00000001 */ 557 /* Intel-defined flags: level 0x00000001 */
479 if (c->cpuid_level >= 0x00000001) { 558 if (c->cpuid_level >= 0x00000001) {
480 u32 capability, excap; 559 u32 capability, excap;
560
481 cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 561 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
482 c->x86_capability[0] = capability; 562 c->x86_capability[0] = capability;
483 c->x86_capability[4] = excap; 563 c->x86_capability[4] = excap;
@@ -486,6 +566,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
486 /* AMD-defined flags: level 0x80000001 */ 566 /* AMD-defined flags: level 0x80000001 */
487 xlvl = cpuid_eax(0x80000000); 567 xlvl = cpuid_eax(0x80000000);
488 c->extended_cpuid_level = xlvl; 568 c->extended_cpuid_level = xlvl;
569
489 if ((xlvl & 0xffff0000) == 0x80000000) { 570 if ((xlvl & 0xffff0000) == 0x80000000) {
490 if (xlvl >= 0x80000001) { 571 if (xlvl >= 0x80000001) {
491 c->x86_capability[1] = cpuid_edx(0x80000001); 572 c->x86_capability[1] = cpuid_edx(0x80000001);
@@ -493,13 +574,15 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
493 } 574 }
494 } 575 }
495 576
496#ifdef CONFIG_X86_64
497 if (c->extended_cpuid_level >= 0x80000008) { 577 if (c->extended_cpuid_level >= 0x80000008) {
498 u32 eax = cpuid_eax(0x80000008); 578 u32 eax = cpuid_eax(0x80000008);
499 579
500 c->x86_virt_bits = (eax >> 8) & 0xff; 580 c->x86_virt_bits = (eax >> 8) & 0xff;
501 c->x86_phys_bits = eax & 0xff; 581 c->x86_phys_bits = eax & 0xff;
502 } 582 }
583#ifdef CONFIG_X86_32
584 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
585 c->x86_phys_bits = 36;
503#endif 586#endif
504 587
505 if (c->extended_cpuid_level >= 0x80000007) 588 if (c->extended_cpuid_level >= 0x80000007)
@@ -546,8 +629,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
546{ 629{
547#ifdef CONFIG_X86_64 630#ifdef CONFIG_X86_64
548 c->x86_clflush_size = 64; 631 c->x86_clflush_size = 64;
632 c->x86_phys_bits = 36;
633 c->x86_virt_bits = 48;
549#else 634#else
550 c->x86_clflush_size = 32; 635 c->x86_clflush_size = 32;
636 c->x86_phys_bits = 32;
637 c->x86_virt_bits = 32;
551#endif 638#endif
552 c->x86_cache_alignment = c->x86_clflush_size; 639 c->x86_cache_alignment = c->x86_clflush_size;
553 640
@@ -570,21 +657,20 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
570 if (this_cpu->c_early_init) 657 if (this_cpu->c_early_init)
571 this_cpu->c_early_init(c); 658 this_cpu->c_early_init(c);
572 659
573 validate_pat_support(c);
574
575#ifdef CONFIG_SMP 660#ifdef CONFIG_SMP
576 c->cpu_index = boot_cpu_id; 661 c->cpu_index = boot_cpu_id;
577#endif 662#endif
663 filter_cpuid_features(c, false);
578} 664}
579 665
580void __init early_cpu_init(void) 666void __init early_cpu_init(void)
581{ 667{
582 struct cpu_dev **cdev; 668 const struct cpu_dev *const *cdev;
583 int count = 0; 669 int count = 0;
584 670
585 printk("KERNEL supported cpus:\n"); 671 printk(KERN_INFO "KERNEL supported cpus:\n");
586 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 672 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
587 struct cpu_dev *cpudev = *cdev; 673 const struct cpu_dev *cpudev = *cdev;
588 unsigned int j; 674 unsigned int j;
589 675
590 if (count >= X86_VENDOR_NUM) 676 if (count >= X86_VENDOR_NUM)
@@ -595,7 +681,7 @@ void __init early_cpu_init(void)
595 for (j = 0; j < 2; j++) { 681 for (j = 0; j < 2; j++) {
596 if (!cpudev->c_ident[j]) 682 if (!cpudev->c_ident[j])
597 continue; 683 continue;
598 printk(" %s %s\n", cpudev->c_vendor, 684 printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
599 cpudev->c_ident[j]); 685 cpudev->c_ident[j]);
600 } 686 }
601 } 687 }
@@ -637,7 +723,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
637 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 723 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
638#ifdef CONFIG_X86_32 724#ifdef CONFIG_X86_32
639# ifdef CONFIG_X86_HT 725# ifdef CONFIG_X86_HT
640 c->apicid = phys_pkg_id(c->initial_apicid, 0); 726 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
641# else 727# else
642 c->apicid = c->initial_apicid; 728 c->apicid = c->initial_apicid;
643# endif 729# endif
@@ -671,9 +757,13 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
671 c->x86_coreid_bits = 0; 757 c->x86_coreid_bits = 0;
672#ifdef CONFIG_X86_64 758#ifdef CONFIG_X86_64
673 c->x86_clflush_size = 64; 759 c->x86_clflush_size = 64;
760 c->x86_phys_bits = 36;
761 c->x86_virt_bits = 48;
674#else 762#else
675 c->cpuid_level = -1; /* CPUID not detected */ 763 c->cpuid_level = -1; /* CPUID not detected */
676 c->x86_clflush_size = 32; 764 c->x86_clflush_size = 32;
765 c->x86_phys_bits = 32;
766 c->x86_virt_bits = 32;
677#endif 767#endif
678 c->x86_cache_alignment = c->x86_clflush_size; 768 c->x86_cache_alignment = c->x86_clflush_size;
679 memset(&c->x86_capability, 0, sizeof c->x86_capability); 769 memset(&c->x86_capability, 0, sizeof c->x86_capability);
@@ -684,7 +774,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
684 this_cpu->c_identify(c); 774 this_cpu->c_identify(c);
685 775
686#ifdef CONFIG_X86_64 776#ifdef CONFIG_X86_64
687 c->apicid = phys_pkg_id(0); 777 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
688#endif 778#endif
689 779
690 /* 780 /*
@@ -704,13 +794,16 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
704 squash_the_stupid_serial_number(c); 794 squash_the_stupid_serial_number(c);
705 795
706 /* 796 /*
707 * The vendor-specific functions might have changed features. Now 797 * The vendor-specific functions might have changed features.
708 * we do "generic changes." 798 * Now we do "generic changes."
709 */ 799 */
710 800
801 /* Filter out anything that depends on CPUID levels we don't have */
802 filter_cpuid_features(c, true);
803
711 /* If the model name is still unset, do table lookup. */ 804 /* If the model name is still unset, do table lookup. */
712 if (!c->x86_model_id[0]) { 805 if (!c->x86_model_id[0]) {
713 char *p; 806 const char *p;
714 p = table_lookup_model(c); 807 p = table_lookup_model(c);
715 if (p) 808 if (p)
716 strcpy(c->x86_model_id, p); 809 strcpy(c->x86_model_id, p);
@@ -785,11 +878,11 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
785} 878}
786 879
787struct msr_range { 880struct msr_range {
788 unsigned min; 881 unsigned min;
789 unsigned max; 882 unsigned max;
790}; 883};
791 884
792static struct msr_range msr_range_array[] __cpuinitdata = { 885static const struct msr_range msr_range_array[] __cpuinitconst = {
793 { 0x00000000, 0x00000418}, 886 { 0x00000000, 0x00000418},
794 { 0xc0000000, 0xc000040b}, 887 { 0xc0000000, 0xc000040b},
795 { 0xc0010000, 0xc0010142}, 888 { 0xc0010000, 0xc0010142},
@@ -798,14 +891,15 @@ static struct msr_range msr_range_array[] __cpuinitdata = {
798 891
799static void __cpuinit print_cpu_msr(void) 892static void __cpuinit print_cpu_msr(void)
800{ 893{
894 unsigned index_min, index_max;
801 unsigned index; 895 unsigned index;
802 u64 val; 896 u64 val;
803 int i; 897 int i;
804 unsigned index_min, index_max;
805 898
806 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { 899 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
807 index_min = msr_range_array[i].min; 900 index_min = msr_range_array[i].min;
808 index_max = msr_range_array[i].max; 901 index_max = msr_range_array[i].max;
902
809 for (index = index_min; index < index_max; index++) { 903 for (index = index_min; index < index_max; index++) {
810 if (rdmsrl_amd_safe(index, &val)) 904 if (rdmsrl_amd_safe(index, &val))
811 continue; 905 continue;
@@ -815,6 +909,7 @@ static void __cpuinit print_cpu_msr(void)
815} 909}
816 910
817static int show_msr __cpuinitdata; 911static int show_msr __cpuinitdata;
912
818static __init int setup_show_msr(char *arg) 913static __init int setup_show_msr(char *arg)
819{ 914{
820 int num; 915 int num;
@@ -836,12 +931,14 @@ __setup("noclflush", setup_noclflush);
836 931
837void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 932void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
838{ 933{
839 char *vendor = NULL; 934 const char *vendor = NULL;
840 935
841 if (c->x86_vendor < X86_VENDOR_NUM) 936 if (c->x86_vendor < X86_VENDOR_NUM) {
842 vendor = this_cpu->c_vendor; 937 vendor = this_cpu->c_vendor;
843 else if (c->cpuid_level >= 0) 938 } else {
844 vendor = c->x86_vendor_id; 939 if (c->cpuid_level >= 0)
940 vendor = c->x86_vendor_id;
941 }
845 942
846 if (vendor && !strstr(c->x86_model_id, vendor)) 943 if (vendor && !strstr(c->x86_model_id, vendor))
847 printk(KERN_CONT "%s ", vendor); 944 printk(KERN_CONT "%s ", vendor);
@@ -868,65 +965,45 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
868static __init int setup_disablecpuid(char *arg) 965static __init int setup_disablecpuid(char *arg)
869{ 966{
870 int bit; 967 int bit;
968
871 if (get_option(&arg, &bit) && bit < NCAPINTS*32) 969 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
872 setup_clear_cpu_cap(bit); 970 setup_clear_cpu_cap(bit);
873 else 971 else
874 return 0; 972 return 0;
973
875 return 1; 974 return 1;
876} 975}
877__setup("clearcpuid=", setup_disablecpuid); 976__setup("clearcpuid=", setup_disablecpuid);
878 977
879#ifdef CONFIG_X86_64 978#ifdef CONFIG_X86_64
880struct x8664_pda **_cpu_pda __read_mostly;
881EXPORT_SYMBOL(_cpu_pda);
882
883struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; 979struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
884 980
885static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; 981DEFINE_PER_CPU_FIRST(union irq_stack_union,
982 irq_stack_union) __aligned(PAGE_SIZE);
886 983
887void __cpuinit pda_init(int cpu) 984DEFINE_PER_CPU(char *, irq_stack_ptr) =
888{ 985 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
889 struct x8664_pda *pda = cpu_pda(cpu);
890 986
891 /* Setup up data that may be needed in __get_free_pages early */ 987DEFINE_PER_CPU(unsigned long, kernel_stack) =
892 loadsegment(fs, 0); 988 (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
893 loadsegment(gs, 0); 989EXPORT_PER_CPU_SYMBOL(kernel_stack);
894 /* Memory clobbers used to order PDA accessed */
895 mb();
896 wrmsrl(MSR_GS_BASE, pda);
897 mb();
898
899 pda->cpunumber = cpu;
900 pda->irqcount = -1;
901 pda->kernelstack = (unsigned long)stack_thread_info() -
902 PDA_STACKOFFSET + THREAD_SIZE;
903 pda->active_mm = &init_mm;
904 pda->mmu_state = 0;
905
906 if (cpu == 0) {
907 /* others are initialized in smpboot.c */
908 pda->pcurrent = &init_task;
909 pda->irqstackptr = boot_cpu_stack;
910 pda->irqstackptr += IRQSTACKSIZE - 64;
911 } else {
912 if (!pda->irqstackptr) {
913 pda->irqstackptr = (char *)
914 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
915 if (!pda->irqstackptr)
916 panic("cannot allocate irqstack for cpu %d",
917 cpu);
918 pda->irqstackptr += IRQSTACKSIZE - 64;
919 }
920 990
921 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) 991DEFINE_PER_CPU(unsigned int, irq_count) = -1;
922 pda->nodenumber = cpu_to_node(cpu);
923 }
924}
925 992
926static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + 993/*
927 DEBUG_STKSZ] __page_aligned_bss; 994 * Special IST stacks which the CPU switches to when it calls
995 * an IST-marked descriptor entry. Up to 7 stacks (hardware
996 * limit), all of them are 4K, except the debug stack which
997 * is 8K.
998 */
999static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1000 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1001 [DEBUG_STACK - 1] = DEBUG_STKSZ
1002};
928 1003
929extern asmlinkage void ignore_sysret(void); 1004static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
1005 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
1006 __aligned(PAGE_SIZE);
930 1007
931/* May not be marked __init: used by software suspend */ 1008/* May not be marked __init: used by software suspend */
932void syscall_init(void) 1009void syscall_init(void)
@@ -957,16 +1034,38 @@ unsigned long kernel_eflags;
957 */ 1034 */
958DEFINE_PER_CPU(struct orig_ist, orig_ist); 1035DEFINE_PER_CPU(struct orig_ist, orig_ist);
959 1036
960#else 1037#else /* CONFIG_X86_64 */
1038
1039#ifdef CONFIG_CC_STACKPROTECTOR
1040DEFINE_PER_CPU(unsigned long, stack_canary);
1041#endif
961 1042
962/* Make sure %fs is initialized properly in idle threads */ 1043/* Make sure %fs and %gs are initialized properly in idle threads */
963struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) 1044struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
964{ 1045{
965 memset(regs, 0, sizeof(struct pt_regs)); 1046 memset(regs, 0, sizeof(struct pt_regs));
966 regs->fs = __KERNEL_PERCPU; 1047 regs->fs = __KERNEL_PERCPU;
1048 regs->gs = __KERNEL_STACK_CANARY;
1049
967 return regs; 1050 return regs;
968} 1051}
969#endif 1052#endif /* CONFIG_X86_64 */
1053
1054/*
1055 * Clear all 6 debug registers:
1056 */
1057static void clear_all_debug_regs(void)
1058{
1059 int i;
1060
1061 for (i = 0; i < 8; i++) {
1062 /* Ignore db4, db5 */
1063 if ((i == 4) || (i == 5))
1064 continue;
1065
1066 set_debugreg(0, i);
1067 }
1068}
970 1069
971/* 1070/*
972 * cpu_init() initializes state that is per-CPU. Some data is already 1071 * cpu_init() initializes state that is per-CPU. Some data is already
@@ -976,21 +1075,25 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
976 * A lot of state is already set up in PDA init for 64 bit 1075 * A lot of state is already set up in PDA init for 64 bit
977 */ 1076 */
978#ifdef CONFIG_X86_64 1077#ifdef CONFIG_X86_64
1078
979void __cpuinit cpu_init(void) 1079void __cpuinit cpu_init(void)
980{ 1080{
981 int cpu = stack_smp_processor_id(); 1081 struct orig_ist *orig_ist;
982 struct tss_struct *t = &per_cpu(init_tss, cpu);
983 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
984 unsigned long v;
985 char *estacks = NULL;
986 struct task_struct *me; 1082 struct task_struct *me;
1083 struct tss_struct *t;
1084 unsigned long v;
1085 int cpu;
987 int i; 1086 int i;
988 1087
989 /* CPU 0 is initialised in head64.c */ 1088 cpu = stack_smp_processor_id();
990 if (cpu != 0) 1089 t = &per_cpu(init_tss, cpu);
991 pda_init(cpu); 1090 orig_ist = &per_cpu(orig_ist, cpu);
992 else 1091
993 estacks = boot_exception_stacks; 1092#ifdef CONFIG_NUMA
1093 if (cpu != 0 && percpu_read(node_number) == 0 &&
1094 cpu_to_node(cpu) != NUMA_NO_NODE)
1095 percpu_write(node_number, cpu_to_node(cpu));
1096#endif
994 1097
995 me = current; 1098 me = current;
996 1099
@@ -1006,7 +1109,9 @@ void __cpuinit cpu_init(void)
1006 * and set up the GDT descriptor: 1109 * and set up the GDT descriptor:
1007 */ 1110 */
1008 1111
1009 switch_to_new_gdt(); 1112 switch_to_new_gdt(cpu);
1113 loadsegment(fs, 0);
1114
1010 load_idt((const struct desc_ptr *)&idt_descr); 1115 load_idt((const struct desc_ptr *)&idt_descr);
1011 1116
1012 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 1117 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
@@ -1017,31 +1122,24 @@ void __cpuinit cpu_init(void)
1017 barrier(); 1122 barrier();
1018 1123
1019 check_efer(); 1124 check_efer();
1020 if (cpu != 0 && x2apic) 1125 if (cpu != 0)
1021 enable_x2apic(); 1126 enable_x2apic();
1022 1127
1023 /* 1128 /*
1024 * set up and load the per-CPU TSS 1129 * set up and load the per-CPU TSS
1025 */ 1130 */
1026 if (!orig_ist->ist[0]) { 1131 if (!orig_ist->ist[0]) {
1027 static const unsigned int order[N_EXCEPTION_STACKS] = { 1132 char *estacks = per_cpu(exception_stacks, cpu);
1028 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, 1133
1029 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
1030 };
1031 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1134 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1032 if (cpu) { 1135 estacks += exception_stack_sizes[v];
1033 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
1034 if (!estacks)
1035 panic("Cannot allocate exception "
1036 "stack %ld %d\n", v, cpu);
1037 }
1038 estacks += PAGE_SIZE << order[v];
1039 orig_ist->ist[v] = t->x86_tss.ist[v] = 1136 orig_ist->ist[v] = t->x86_tss.ist[v] =
1040 (unsigned long)estacks; 1137 (unsigned long)estacks;
1041 } 1138 }
1042 } 1139 }
1043 1140
1044 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1141 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1142
1045 /* 1143 /*
1046 * <= is required because the CPU will access up to 1144 * <= is required because the CPU will access up to
1047 * 8 bits beyond the end of the IO permission bitmap. 1145 * 8 bits beyond the end of the IO permission bitmap.
@@ -1051,8 +1149,7 @@ void __cpuinit cpu_init(void)
1051 1149
1052 atomic_inc(&init_mm.mm_count); 1150 atomic_inc(&init_mm.mm_count);
1053 me->active_mm = &init_mm; 1151 me->active_mm = &init_mm;
1054 if (me->mm) 1152 BUG_ON(me->mm);
1055 BUG();
1056 enter_lazy_tlb(&init_mm, me); 1153 enter_lazy_tlb(&init_mm, me);
1057 1154
1058 load_sp0(t, &current->thread); 1155 load_sp0(t, &current->thread);
@@ -1069,22 +1166,9 @@ void __cpuinit cpu_init(void)
1069 */ 1166 */
1070 if (kgdb_connected && arch_kgdb_ops.correct_hw_break) 1167 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
1071 arch_kgdb_ops.correct_hw_break(); 1168 arch_kgdb_ops.correct_hw_break();
1072 else { 1169 else
1073#endif
1074 /*
1075 * Clear all 6 debug registers:
1076 */
1077
1078 set_debugreg(0UL, 0);
1079 set_debugreg(0UL, 1);
1080 set_debugreg(0UL, 2);
1081 set_debugreg(0UL, 3);
1082 set_debugreg(0UL, 6);
1083 set_debugreg(0UL, 7);
1084#ifdef CONFIG_KGDB
1085 /* If the kgdb is connected no debug regs should be altered. */
1086 }
1087#endif 1170#endif
1171 clear_all_debug_regs();
1088 1172
1089 fpu_init(); 1173 fpu_init();
1090 1174
@@ -1105,7 +1189,8 @@ void __cpuinit cpu_init(void)
1105 1189
1106 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { 1190 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
1107 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 1191 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
1108 for (;;) local_irq_enable(); 1192 for (;;)
1193 local_irq_enable();
1109 } 1194 }
1110 1195
1111 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1196 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
@@ -1114,15 +1199,14 @@ void __cpuinit cpu_init(void)
1114 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1199 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1115 1200
1116 load_idt(&idt_descr); 1201 load_idt(&idt_descr);
1117 switch_to_new_gdt(); 1202 switch_to_new_gdt(cpu);
1118 1203
1119 /* 1204 /*
1120 * Set up and load the per-CPU TSS and LDT 1205 * Set up and load the per-CPU TSS and LDT
1121 */ 1206 */
1122 atomic_inc(&init_mm.mm_count); 1207 atomic_inc(&init_mm.mm_count);
1123 curr->active_mm = &init_mm; 1208 curr->active_mm = &init_mm;
1124 if (curr->mm) 1209 BUG_ON(curr->mm);
1125 BUG();
1126 enter_lazy_tlb(&init_mm, curr); 1210 enter_lazy_tlb(&init_mm, curr);
1127 1211
1128 load_sp0(t, thread); 1212 load_sp0(t, thread);
@@ -1135,16 +1219,7 @@ void __cpuinit cpu_init(void)
1135 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1219 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1136#endif 1220#endif
1137 1221
1138 /* Clear %gs. */ 1222 clear_all_debug_regs();
1139 asm volatile ("mov %0, %%gs" : : "r" (0));
1140
1141 /* Clear all 6 debug registers: */
1142 set_debugreg(0, 0);
1143 set_debugreg(0, 1);
1144 set_debugreg(0, 2);
1145 set_debugreg(0, 3);
1146 set_debugreg(0, 6);
1147 set_debugreg(0, 7);
1148 1223
1149 /* 1224 /*
1150 * Force FPU initialization: 1225 * Force FPU initialization:
@@ -1164,6 +1239,4 @@ void __cpuinit cpu_init(void)
1164 1239
1165 xsave_init(); 1240 xsave_init();
1166} 1241}
1167
1168
1169#endif 1242#endif