diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:02:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:02:57 -0400 |
commit | f541ae326fa120fa5c57433e4d9a133df212ce41 (patch) | |
tree | bdbd94ec72cfc601118051cb35e8617d55510177 /arch/x86/kernel/cpu/common.c | |
parent | e255357764f92afcafafbd4879b222b8c752065a (diff) | |
parent | 0221c81b1b8eb0cbb6b30a0ced52ead32d2b4e4c (diff) |
Merge branch 'linus' into perfcounters/core-v2
Merge reason: we have gathered quite a few conflicts, need to merge upstream
Conflicts:
arch/powerpc/kernel/Makefile
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/unistd_32.h
arch/x86/include/asm/unistd_64.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/irq.c
arch/x86/kernel/syscall_table_32.S
arch/x86/mm/iomap_32.c
include/linux/sched.h
kernel/Makefile
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 407 |
1 files changed, 224 insertions, 183 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index b66af09a6c7d..a86769efe0df 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1,53 +1,51 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/sched.h> | ||
4 | #include <linux/string.h> | ||
5 | #include <linux/bootmem.h> | 1 | #include <linux/bootmem.h> |
2 | #include <linux/linkage.h> | ||
6 | #include <linux/bitops.h> | 3 | #include <linux/bitops.h> |
4 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | 5 | #include <linux/module.h> |
8 | #include <linux/kgdb.h> | 6 | #include <linux/percpu.h> |
9 | #include <linux/topology.h> | 7 | #include <linux/string.h> |
10 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
9 | #include <linux/sched.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/kgdb.h> | ||
11 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
12 | #include <linux/percpu.h> | 13 | #include <linux/io.h> |
13 | #include <asm/i387.h> | 14 | |
14 | #include <asm/msr.h> | 15 | #include <asm/stackprotector.h> |
15 | #include <asm/io.h> | 16 | #include <asm/perf_counter.h> |
16 | #include <asm/linkage.h> | ||
17 | #include <asm/mmu_context.h> | 17 | #include <asm/mmu_context.h> |
18 | #include <asm/hypervisor.h> | ||
19 | #include <asm/processor.h> | ||
20 | #include <asm/sections.h> | ||
21 | #include <asm/topology.h> | ||
22 | #include <asm/cpumask.h> | ||
23 | #include <asm/pgtable.h> | ||
24 | #include <asm/atomic.h> | ||
25 | #include <asm/proto.h> | ||
26 | #include <asm/setup.h> | ||
27 | #include <asm/apic.h> | ||
28 | #include <asm/desc.h> | ||
29 | #include <asm/i387.h> | ||
18 | #include <asm/mtrr.h> | 30 | #include <asm/mtrr.h> |
31 | #include <asm/numa.h> | ||
32 | #include <asm/asm.h> | ||
33 | #include <asm/cpu.h> | ||
19 | #include <asm/mce.h> | 34 | #include <asm/mce.h> |
20 | #include <asm/perf_counter.h> | 35 | #include <asm/msr.h> |
21 | #include <asm/pat.h> | 36 | #include <asm/pat.h> |
22 | #include <asm/asm.h> | ||
23 | #include <asm/numa.h> | ||
24 | #include <asm/smp.h> | 37 | #include <asm/smp.h> |
25 | #include <asm/cpu.h> | ||
26 | #include <asm/cpumask.h> | ||
27 | #include <asm/apic.h> | ||
28 | 38 | ||
29 | #ifdef CONFIG_X86_LOCAL_APIC | 39 | #ifdef CONFIG_X86_LOCAL_APIC |
30 | #include <asm/uv/uv.h> | 40 | #include <asm/uv/uv.h> |
31 | #endif | 41 | #endif |
32 | 42 | ||
33 | #include <asm/pgtable.h> | ||
34 | #include <asm/processor.h> | ||
35 | #include <asm/desc.h> | ||
36 | #include <asm/atomic.h> | ||
37 | #include <asm/proto.h> | ||
38 | #include <asm/sections.h> | ||
39 | #include <asm/setup.h> | ||
40 | #include <asm/hypervisor.h> | ||
41 | #include <asm/stackprotector.h> | ||
42 | |||
43 | #include "cpu.h" | 43 | #include "cpu.h" |
44 | 44 | ||
45 | #ifdef CONFIG_X86_64 | ||
46 | |||
47 | /* all of these masks are initialized in setup_cpu_local_masks() */ | 45 | /* all of these masks are initialized in setup_cpu_local_masks() */ |
48 | cpumask_var_t cpu_callin_mask; | ||
49 | cpumask_var_t cpu_callout_mask; | ||
50 | cpumask_var_t cpu_initialized_mask; | 46 | cpumask_var_t cpu_initialized_mask; |
47 | cpumask_var_t cpu_callout_mask; | ||
48 | cpumask_var_t cpu_callin_mask; | ||
51 | 49 | ||
52 | /* representing cpus for which sibling maps can be computed */ | 50 | /* representing cpus for which sibling maps can be computed */ |
53 | cpumask_var_t cpu_sibling_setup_mask; | 51 | cpumask_var_t cpu_sibling_setup_mask; |
@@ -61,17 +59,7 @@ void __init setup_cpu_local_masks(void) | |||
61 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | 59 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); |
62 | } | 60 | } |
63 | 61 | ||
64 | #else /* CONFIG_X86_32 */ | 62 | static const struct cpu_dev *this_cpu __cpuinitdata; |
65 | |||
66 | cpumask_t cpu_callin_map; | ||
67 | cpumask_t cpu_callout_map; | ||
68 | cpumask_t cpu_initialized; | ||
69 | cpumask_t cpu_sibling_setup_map; | ||
70 | |||
71 | #endif /* CONFIG_X86_32 */ | ||
72 | |||
73 | |||
74 | static struct cpu_dev *this_cpu __cpuinitdata; | ||
75 | 63 | ||
76 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | 64 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
77 | #ifdef CONFIG_X86_64 | 65 | #ifdef CONFIG_X86_64 |
@@ -80,48 +68,48 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | |||
80 | * IRET will check the segment types kkeil 2000/10/28 | 68 | * IRET will check the segment types kkeil 2000/10/28 |
81 | * Also sysret mandates a special GDT layout | 69 | * Also sysret mandates a special GDT layout |
82 | * | 70 | * |
83 | * The TLS descriptors are currently at a different place compared to i386. | 71 | * TLS descriptors are currently at a different place compared to i386. |
84 | * Hopefully nobody expects them at a fixed place (Wine?) | 72 | * Hopefully nobody expects them at a fixed place (Wine?) |
85 | */ | 73 | */ |
86 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, | 74 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, |
87 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, | 75 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, |
88 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, | 76 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, |
89 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, | 77 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, |
90 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, | 78 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, |
91 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, | 79 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, |
92 | #else | 80 | #else |
93 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, | 81 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, |
94 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 82 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, |
95 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, | 83 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, |
96 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, | 84 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, |
97 | /* | 85 | /* |
98 | * Segments used for calling PnP BIOS have byte granularity. | 86 | * Segments used for calling PnP BIOS have byte granularity. |
99 | * They code segments and data segments have fixed 64k limits, | 87 | * They code segments and data segments have fixed 64k limits, |
100 | * the transfer segment sizes are set at run time. | 88 | * the transfer segment sizes are set at run time. |
101 | */ | 89 | */ |
102 | /* 32-bit code */ | 90 | /* 32-bit code */ |
103 | [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, | 91 | [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, |
104 | /* 16-bit code */ | 92 | /* 16-bit code */ |
105 | [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, | 93 | [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, |
106 | /* 16-bit data */ | 94 | /* 16-bit data */ |
107 | [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, | 95 | [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, |
108 | /* 16-bit data */ | 96 | /* 16-bit data */ |
109 | [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, | 97 | [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, |
110 | /* 16-bit data */ | 98 | /* 16-bit data */ |
111 | [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, | 99 | [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, |
112 | /* | 100 | /* |
113 | * The APM segments have byte granularity and their bases | 101 | * The APM segments have byte granularity and their bases |
114 | * are set at run time. All have 64k limits. | 102 | * are set at run time. All have 64k limits. |
115 | */ | 103 | */ |
116 | /* 32-bit code */ | 104 | /* 32-bit code */ |
117 | [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, | 105 | [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, |
118 | /* 16-bit code */ | 106 | /* 16-bit code */ |
119 | [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, | 107 | [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, |
120 | /* data */ | 108 | /* data */ |
121 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, | 109 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, |
122 | 110 | ||
123 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, | 111 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, |
124 | [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, | 112 | [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, |
125 | GDT_STACK_CANARY_INIT | 113 | GDT_STACK_CANARY_INIT |
126 | #endif | 114 | #endif |
127 | } }; | 115 | } }; |
@@ -165,16 +153,17 @@ static inline int flag_is_changeable_p(u32 flag) | |||
165 | * the CPUID. Add "volatile" to not allow gcc to | 153 | * the CPUID. Add "volatile" to not allow gcc to |
166 | * optimize the subsequent calls to this function. | 154 | * optimize the subsequent calls to this function. |
167 | */ | 155 | */ |
168 | asm volatile ("pushfl\n\t" | 156 | asm volatile ("pushfl \n\t" |
169 | "pushfl\n\t" | 157 | "pushfl \n\t" |
170 | "popl %0\n\t" | 158 | "popl %0 \n\t" |
171 | "movl %0,%1\n\t" | 159 | "movl %0, %1 \n\t" |
172 | "xorl %2,%0\n\t" | 160 | "xorl %2, %0 \n\t" |
173 | "pushl %0\n\t" | 161 | "pushl %0 \n\t" |
174 | "popfl\n\t" | 162 | "popfl \n\t" |
175 | "pushfl\n\t" | 163 | "pushfl \n\t" |
176 | "popl %0\n\t" | 164 | "popl %0 \n\t" |
177 | "popfl\n\t" | 165 | "popfl \n\t" |
166 | |||
178 | : "=&r" (f1), "=&r" (f2) | 167 | : "=&r" (f1), "=&r" (f2) |
179 | : "ir" (flag)); | 168 | : "ir" (flag)); |
180 | 169 | ||
@@ -189,18 +178,22 @@ static int __cpuinit have_cpuid_p(void) | |||
189 | 178 | ||
190 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 179 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
191 | { | 180 | { |
192 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { | 181 | unsigned long lo, hi; |
193 | /* Disable processor serial number */ | 182 | |
194 | unsigned long lo, hi; | 183 | if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) |
195 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | 184 | return; |
196 | lo |= 0x200000; | 185 | |
197 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | 186 | /* Disable processor serial number: */ |
198 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | 187 | |
199 | clear_cpu_cap(c, X86_FEATURE_PN); | 188 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
200 | 189 | lo |= 0x200000; | |
201 | /* Disabling the serial number may affect the cpuid level */ | 190 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
202 | c->cpuid_level = cpuid_eax(0); | 191 | |
203 | } | 192 | printk(KERN_NOTICE "CPU serial number disabled.\n"); |
193 | clear_cpu_cap(c, X86_FEATURE_PN); | ||
194 | |||
195 | /* Disabling the serial number may affect the cpuid level */ | ||
196 | c->cpuid_level = cpuid_eax(0); | ||
204 | } | 197 | } |
205 | 198 | ||
206 | static int __init x86_serial_nr_setup(char *s) | 199 | static int __init x86_serial_nr_setup(char *s) |
@@ -233,6 +226,7 @@ struct cpuid_dependent_feature { | |||
233 | u32 feature; | 226 | u32 feature; |
234 | u32 level; | 227 | u32 level; |
235 | }; | 228 | }; |
229 | |||
236 | static const struct cpuid_dependent_feature __cpuinitconst | 230 | static const struct cpuid_dependent_feature __cpuinitconst |
237 | cpuid_dependent_features[] = { | 231 | cpuid_dependent_features[] = { |
238 | { X86_FEATURE_MWAIT, 0x00000005 }, | 232 | { X86_FEATURE_MWAIT, 0x00000005 }, |
@@ -244,7 +238,11 @@ cpuid_dependent_features[] = { | |||
244 | static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | 238 | static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) |
245 | { | 239 | { |
246 | const struct cpuid_dependent_feature *df; | 240 | const struct cpuid_dependent_feature *df; |
241 | |||
247 | for (df = cpuid_dependent_features; df->feature; df++) { | 242 | for (df = cpuid_dependent_features; df->feature; df++) { |
243 | |||
244 | if (!cpu_has(c, df->feature)) | ||
245 | continue; | ||
248 | /* | 246 | /* |
249 | * Note: cpuid_level is set to -1 if unavailable, but | 247 | * Note: cpuid_level is set to -1 if unavailable, but |
250 | * extended_extended_level is set to 0 if unavailable | 248 | * extended_extended_level is set to 0 if unavailable |
@@ -252,32 +250,32 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | |||
252 | * when signed; hence the weird messing around with | 250 | * when signed; hence the weird messing around with |
253 | * signs here... | 251 | * signs here... |
254 | */ | 252 | */ |
255 | if (cpu_has(c, df->feature) && | 253 | if (!((s32)df->level < 0 ? |
256 | ((s32)df->level < 0 ? | ||
257 | (u32)df->level > (u32)c->extended_cpuid_level : | 254 | (u32)df->level > (u32)c->extended_cpuid_level : |
258 | (s32)df->level > (s32)c->cpuid_level)) { | 255 | (s32)df->level > (s32)c->cpuid_level)) |
259 | clear_cpu_cap(c, df->feature); | 256 | continue; |
260 | if (warn) | 257 | |
261 | printk(KERN_WARNING | 258 | clear_cpu_cap(c, df->feature); |
262 | "CPU: CPU feature %s disabled " | 259 | if (!warn) |
263 | "due to lack of CPUID level 0x%x\n", | 260 | continue; |
264 | x86_cap_flags[df->feature], | 261 | |
265 | df->level); | 262 | printk(KERN_WARNING |
266 | } | 263 | "CPU: CPU feature %s disabled, no CPUID level 0x%x\n", |
264 | x86_cap_flags[df->feature], df->level); | ||
267 | } | 265 | } |
268 | } | 266 | } |
269 | 267 | ||
270 | /* | 268 | /* |
271 | * Naming convention should be: <Name> [(<Codename>)] | 269 | * Naming convention should be: <Name> [(<Codename>)] |
272 | * This table only is used unless init_<vendor>() below doesn't set it; | 270 | * This table only is used unless init_<vendor>() below doesn't set it; |
273 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | 271 | * in particular, if CPUID levels 0x80000002..4 are supported, this |
274 | * | 272 | * isn't used |
275 | */ | 273 | */ |
276 | 274 | ||
277 | /* Look up CPU names by table lookup. */ | 275 | /* Look up CPU names by table lookup. */ |
278 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | 276 | static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) |
279 | { | 277 | { |
280 | struct cpu_model_info *info; | 278 | const struct cpu_model_info *info; |
281 | 279 | ||
282 | if (c->x86_model >= 16) | 280 | if (c->x86_model >= 16) |
283 | return NULL; /* Range check */ | 281 | return NULL; /* Range check */ |
@@ -308,8 +306,10 @@ void load_percpu_segment(int cpu) | |||
308 | load_stack_canary_segment(); | 306 | load_stack_canary_segment(); |
309 | } | 307 | } |
310 | 308 | ||
311 | /* Current gdt points %fs at the "master" per-cpu area: after this, | 309 | /* |
312 | * it's on the real one. */ | 310 | * Current gdt points %fs at the "master" per-cpu area: after this, |
311 | * it's on the real one. | ||
312 | */ | ||
313 | void switch_to_new_gdt(int cpu) | 313 | void switch_to_new_gdt(int cpu) |
314 | { | 314 | { |
315 | struct desc_ptr gdt_descr; | 315 | struct desc_ptr gdt_descr; |
@@ -322,7 +322,7 @@ void switch_to_new_gdt(int cpu) | |||
322 | load_percpu_segment(cpu); | 322 | load_percpu_segment(cpu); |
323 | } | 323 | } |
324 | 324 | ||
325 | static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | 325 | static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; |
326 | 326 | ||
327 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | 327 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
328 | { | 328 | { |
@@ -341,7 +341,7 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c) | |||
341 | #endif | 341 | #endif |
342 | } | 342 | } |
343 | 343 | ||
344 | static struct cpu_dev __cpuinitdata default_cpu = { | 344 | static const struct cpu_dev __cpuinitconst default_cpu = { |
345 | .c_init = default_init, | 345 | .c_init = default_init, |
346 | .c_vendor = "Unknown", | 346 | .c_vendor = "Unknown", |
347 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | 347 | .c_x86_vendor = X86_VENDOR_UNKNOWN, |
@@ -355,22 +355,24 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
355 | if (c->extended_cpuid_level < 0x80000004) | 355 | if (c->extended_cpuid_level < 0x80000004) |
356 | return; | 356 | return; |
357 | 357 | ||
358 | v = (unsigned int *) c->x86_model_id; | 358 | v = (unsigned int *)c->x86_model_id; |
359 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | 359 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
360 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | 360 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); |
361 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | 361 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); |
362 | c->x86_model_id[48] = 0; | 362 | c->x86_model_id[48] = 0; |
363 | 363 | ||
364 | /* Intel chips right-justify this string for some dumb reason; | 364 | /* |
365 | undo that brain damage */ | 365 | * Intel chips right-justify this string for some dumb reason; |
366 | * undo that brain damage: | ||
367 | */ | ||
366 | p = q = &c->x86_model_id[0]; | 368 | p = q = &c->x86_model_id[0]; |
367 | while (*p == ' ') | 369 | while (*p == ' ') |
368 | p++; | 370 | p++; |
369 | if (p != q) { | 371 | if (p != q) { |
370 | while (*p) | 372 | while (*p) |
371 | *q++ = *p++; | 373 | *q++ = *p++; |
372 | while (q <= &c->x86_model_id[48]) | 374 | while (q <= &c->x86_model_id[48]) |
373 | *q++ = '\0'; /* Zero-pad the rest */ | 375 | *q++ = '\0'; /* Zero-pad the rest */ |
374 | } | 376 | } |
375 | } | 377 | } |
376 | 378 | ||
@@ -439,27 +441,30 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
439 | 441 | ||
440 | if (smp_num_siblings == 1) { | 442 | if (smp_num_siblings == 1) { |
441 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 443 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); |
442 | } else if (smp_num_siblings > 1) { | 444 | goto out; |
445 | } | ||
443 | 446 | ||
444 | if (smp_num_siblings > nr_cpu_ids) { | 447 | if (smp_num_siblings <= 1) |
445 | printk(KERN_WARNING "CPU: Unsupported number of siblings %d", | 448 | goto out; |
446 | smp_num_siblings); | ||
447 | smp_num_siblings = 1; | ||
448 | return; | ||
449 | } | ||
450 | 449 | ||
451 | index_msb = get_count_order(smp_num_siblings); | 450 | if (smp_num_siblings > nr_cpu_ids) { |
452 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); | 451 | pr_warning("CPU: Unsupported number of siblings %d", |
452 | smp_num_siblings); | ||
453 | smp_num_siblings = 1; | ||
454 | return; | ||
455 | } | ||
453 | 456 | ||
454 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | 457 | index_msb = get_count_order(smp_num_siblings); |
458 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); | ||
455 | 459 | ||
456 | index_msb = get_count_order(smp_num_siblings); | 460 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
457 | 461 | ||
458 | core_bits = get_count_order(c->x86_max_cores); | 462 | index_msb = get_count_order(smp_num_siblings); |
459 | 463 | ||
460 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & | 464 | core_bits = get_count_order(c->x86_max_cores); |
461 | ((1 << core_bits) - 1); | 465 | |
462 | } | 466 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & |
467 | ((1 << core_bits) - 1); | ||
463 | 468 | ||
464 | out: | 469 | out: |
465 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | 470 | if ((c->x86_max_cores * smp_num_siblings) > 1) { |
@@ -474,8 +479,8 @@ out: | |||
474 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | 479 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) |
475 | { | 480 | { |
476 | char *v = c->x86_vendor_id; | 481 | char *v = c->x86_vendor_id; |
477 | int i; | ||
478 | static int printed; | 482 | static int printed; |
483 | int i; | ||
479 | 484 | ||
480 | for (i = 0; i < X86_VENDOR_NUM; i++) { | 485 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
481 | if (!cpu_devs[i]) | 486 | if (!cpu_devs[i]) |
@@ -484,6 +489,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |||
484 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | 489 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
485 | (cpu_devs[i]->c_ident[1] && | 490 | (cpu_devs[i]->c_ident[1] && |
486 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | 491 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
492 | |||
487 | this_cpu = cpu_devs[i]; | 493 | this_cpu = cpu_devs[i]; |
488 | c->x86_vendor = this_cpu->c_x86_vendor; | 494 | c->x86_vendor = this_cpu->c_x86_vendor; |
489 | return; | 495 | return; |
@@ -492,7 +498,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |||
492 | 498 | ||
493 | if (!printed) { | 499 | if (!printed) { |
494 | printed++; | 500 | printed++; |
495 | printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v); | 501 | printk(KERN_ERR |
502 | "CPU: vendor_id '%s' unknown, using generic init.\n", v); | ||
503 | |||
496 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | 504 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); |
497 | } | 505 | } |
498 | 506 | ||
@@ -512,14 +520,17 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |||
512 | /* Intel-defined flags: level 0x00000001 */ | 520 | /* Intel-defined flags: level 0x00000001 */ |
513 | if (c->cpuid_level >= 0x00000001) { | 521 | if (c->cpuid_level >= 0x00000001) { |
514 | u32 junk, tfms, cap0, misc; | 522 | u32 junk, tfms, cap0, misc; |
523 | |||
515 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | 524 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
516 | c->x86 = (tfms >> 8) & 0xf; | 525 | c->x86 = (tfms >> 8) & 0xf; |
517 | c->x86_model = (tfms >> 4) & 0xf; | 526 | c->x86_model = (tfms >> 4) & 0xf; |
518 | c->x86_mask = tfms & 0xf; | 527 | c->x86_mask = tfms & 0xf; |
528 | |||
519 | if (c->x86 == 0xf) | 529 | if (c->x86 == 0xf) |
520 | c->x86 += (tfms >> 20) & 0xff; | 530 | c->x86 += (tfms >> 20) & 0xff; |
521 | if (c->x86 >= 0x6) | 531 | if (c->x86 >= 0x6) |
522 | c->x86_model += ((tfms >> 16) & 0xf) << 4; | 532 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
533 | |||
523 | if (cap0 & (1<<19)) { | 534 | if (cap0 & (1<<19)) { |
524 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 535 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
525 | c->x86_cache_alignment = c->x86_clflush_size; | 536 | c->x86_cache_alignment = c->x86_clflush_size; |
@@ -535,6 +546,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
535 | /* Intel-defined flags: level 0x00000001 */ | 546 | /* Intel-defined flags: level 0x00000001 */ |
536 | if (c->cpuid_level >= 0x00000001) { | 547 | if (c->cpuid_level >= 0x00000001) { |
537 | u32 capability, excap; | 548 | u32 capability, excap; |
549 | |||
538 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | 550 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
539 | c->x86_capability[0] = capability; | 551 | c->x86_capability[0] = capability; |
540 | c->x86_capability[4] = excap; | 552 | c->x86_capability[4] = excap; |
@@ -543,6 +555,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
543 | /* AMD-defined flags: level 0x80000001 */ | 555 | /* AMD-defined flags: level 0x80000001 */ |
544 | xlvl = cpuid_eax(0x80000000); | 556 | xlvl = cpuid_eax(0x80000000); |
545 | c->extended_cpuid_level = xlvl; | 557 | c->extended_cpuid_level = xlvl; |
558 | |||
546 | if ((xlvl & 0xffff0000) == 0x80000000) { | 559 | if ((xlvl & 0xffff0000) == 0x80000000) { |
547 | if (xlvl >= 0x80000001) { | 560 | if (xlvl >= 0x80000001) { |
548 | c->x86_capability[1] = cpuid_edx(0x80000001); | 561 | c->x86_capability[1] = cpuid_edx(0x80000001); |
@@ -550,13 +563,15 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
550 | } | 563 | } |
551 | } | 564 | } |
552 | 565 | ||
553 | #ifdef CONFIG_X86_64 | ||
554 | if (c->extended_cpuid_level >= 0x80000008) { | 566 | if (c->extended_cpuid_level >= 0x80000008) { |
555 | u32 eax = cpuid_eax(0x80000008); | 567 | u32 eax = cpuid_eax(0x80000008); |
556 | 568 | ||
557 | c->x86_virt_bits = (eax >> 8) & 0xff; | 569 | c->x86_virt_bits = (eax >> 8) & 0xff; |
558 | c->x86_phys_bits = eax & 0xff; | 570 | c->x86_phys_bits = eax & 0xff; |
559 | } | 571 | } |
572 | #ifdef CONFIG_X86_32 | ||
573 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) | ||
574 | c->x86_phys_bits = 36; | ||
560 | #endif | 575 | #endif |
561 | 576 | ||
562 | if (c->extended_cpuid_level >= 0x80000007) | 577 | if (c->extended_cpuid_level >= 0x80000007) |
@@ -603,8 +618,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
603 | { | 618 | { |
604 | #ifdef CONFIG_X86_64 | 619 | #ifdef CONFIG_X86_64 |
605 | c->x86_clflush_size = 64; | 620 | c->x86_clflush_size = 64; |
621 | c->x86_phys_bits = 36; | ||
622 | c->x86_virt_bits = 48; | ||
606 | #else | 623 | #else |
607 | c->x86_clflush_size = 32; | 624 | c->x86_clflush_size = 32; |
625 | c->x86_phys_bits = 32; | ||
626 | c->x86_virt_bits = 32; | ||
608 | #endif | 627 | #endif |
609 | c->x86_cache_alignment = c->x86_clflush_size; | 628 | c->x86_cache_alignment = c->x86_clflush_size; |
610 | 629 | ||
@@ -635,12 +654,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
635 | 654 | ||
636 | void __init early_cpu_init(void) | 655 | void __init early_cpu_init(void) |
637 | { | 656 | { |
638 | struct cpu_dev **cdev; | 657 | const struct cpu_dev *const *cdev; |
639 | int count = 0; | 658 | int count = 0; |
640 | 659 | ||
641 | printk("KERNEL supported cpus:\n"); | 660 | printk(KERN_INFO "KERNEL supported cpus:\n"); |
642 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { | 661 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
643 | struct cpu_dev *cpudev = *cdev; | 662 | const struct cpu_dev *cpudev = *cdev; |
644 | unsigned int j; | 663 | unsigned int j; |
645 | 664 | ||
646 | if (count >= X86_VENDOR_NUM) | 665 | if (count >= X86_VENDOR_NUM) |
@@ -651,7 +670,7 @@ void __init early_cpu_init(void) | |||
651 | for (j = 0; j < 2; j++) { | 670 | for (j = 0; j < 2; j++) { |
652 | if (!cpudev->c_ident[j]) | 671 | if (!cpudev->c_ident[j]) |
653 | continue; | 672 | continue; |
654 | printk(" %s %s\n", cpudev->c_vendor, | 673 | printk(KERN_INFO " %s %s\n", cpudev->c_vendor, |
655 | cpudev->c_ident[j]); | 674 | cpudev->c_ident[j]); |
656 | } | 675 | } |
657 | } | 676 | } |
@@ -727,9 +746,13 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
727 | c->x86_coreid_bits = 0; | 746 | c->x86_coreid_bits = 0; |
728 | #ifdef CONFIG_X86_64 | 747 | #ifdef CONFIG_X86_64 |
729 | c->x86_clflush_size = 64; | 748 | c->x86_clflush_size = 64; |
749 | c->x86_phys_bits = 36; | ||
750 | c->x86_virt_bits = 48; | ||
730 | #else | 751 | #else |
731 | c->cpuid_level = -1; /* CPUID not detected */ | 752 | c->cpuid_level = -1; /* CPUID not detected */ |
732 | c->x86_clflush_size = 32; | 753 | c->x86_clflush_size = 32; |
754 | c->x86_phys_bits = 32; | ||
755 | c->x86_virt_bits = 32; | ||
733 | #endif | 756 | #endif |
734 | c->x86_cache_alignment = c->x86_clflush_size; | 757 | c->x86_cache_alignment = c->x86_clflush_size; |
735 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 758 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
@@ -760,8 +783,8 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
760 | squash_the_stupid_serial_number(c); | 783 | squash_the_stupid_serial_number(c); |
761 | 784 | ||
762 | /* | 785 | /* |
763 | * The vendor-specific functions might have changed features. Now | 786 | * The vendor-specific functions might have changed features. |
764 | * we do "generic changes." | 787 | * Now we do "generic changes." |
765 | */ | 788 | */ |
766 | 789 | ||
767 | /* Filter out anything that depends on CPUID levels we don't have */ | 790 | /* Filter out anything that depends on CPUID levels we don't have */ |
@@ -769,7 +792,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
769 | 792 | ||
770 | /* If the model name is still unset, do table lookup. */ | 793 | /* If the model name is still unset, do table lookup. */ |
771 | if (!c->x86_model_id[0]) { | 794 | if (!c->x86_model_id[0]) { |
772 | char *p; | 795 | const char *p; |
773 | p = table_lookup_model(c); | 796 | p = table_lookup_model(c); |
774 | if (p) | 797 | if (p) |
775 | strcpy(c->x86_model_id, p); | 798 | strcpy(c->x86_model_id, p); |
@@ -825,6 +848,7 @@ static void vgetcpu_set_mode(void) | |||
825 | void __init identify_boot_cpu(void) | 848 | void __init identify_boot_cpu(void) |
826 | { | 849 | { |
827 | identify_cpu(&boot_cpu_data); | 850 | identify_cpu(&boot_cpu_data); |
851 | init_c1e_mask(); | ||
828 | #ifdef CONFIG_X86_32 | 852 | #ifdef CONFIG_X86_32 |
829 | sysenter_setup(); | 853 | sysenter_setup(); |
830 | enable_sep_cpu(); | 854 | enable_sep_cpu(); |
@@ -845,11 +869,11 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | |||
845 | } | 869 | } |
846 | 870 | ||
847 | struct msr_range { | 871 | struct msr_range { |
848 | unsigned min; | 872 | unsigned min; |
849 | unsigned max; | 873 | unsigned max; |
850 | }; | 874 | }; |
851 | 875 | ||
852 | static struct msr_range msr_range_array[] __cpuinitdata = { | 876 | static const struct msr_range msr_range_array[] __cpuinitconst = { |
853 | { 0x00000000, 0x00000418}, | 877 | { 0x00000000, 0x00000418}, |
854 | { 0xc0000000, 0xc000040b}, | 878 | { 0xc0000000, 0xc000040b}, |
855 | { 0xc0010000, 0xc0010142}, | 879 | { 0xc0010000, 0xc0010142}, |
@@ -858,14 +882,15 @@ static struct msr_range msr_range_array[] __cpuinitdata = { | |||
858 | 882 | ||
859 | static void __cpuinit print_cpu_msr(void) | 883 | static void __cpuinit print_cpu_msr(void) |
860 | { | 884 | { |
885 | unsigned index_min, index_max; | ||
861 | unsigned index; | 886 | unsigned index; |
862 | u64 val; | 887 | u64 val; |
863 | int i; | 888 | int i; |
864 | unsigned index_min, index_max; | ||
865 | 889 | ||
866 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { | 890 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { |
867 | index_min = msr_range_array[i].min; | 891 | index_min = msr_range_array[i].min; |
868 | index_max = msr_range_array[i].max; | 892 | index_max = msr_range_array[i].max; |
893 | |||
869 | for (index = index_min; index < index_max; index++) { | 894 | for (index = index_min; index < index_max; index++) { |
870 | if (rdmsrl_amd_safe(index, &val)) | 895 | if (rdmsrl_amd_safe(index, &val)) |
871 | continue; | 896 | continue; |
@@ -875,6 +900,7 @@ static void __cpuinit print_cpu_msr(void) | |||
875 | } | 900 | } |
876 | 901 | ||
877 | static int show_msr __cpuinitdata; | 902 | static int show_msr __cpuinitdata; |
903 | |||
878 | static __init int setup_show_msr(char *arg) | 904 | static __init int setup_show_msr(char *arg) |
879 | { | 905 | { |
880 | int num; | 906 | int num; |
@@ -896,12 +922,14 @@ __setup("noclflush", setup_noclflush); | |||
896 | 922 | ||
897 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | 923 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) |
898 | { | 924 | { |
899 | char *vendor = NULL; | 925 | const char *vendor = NULL; |
900 | 926 | ||
901 | if (c->x86_vendor < X86_VENDOR_NUM) | 927 | if (c->x86_vendor < X86_VENDOR_NUM) { |
902 | vendor = this_cpu->c_vendor; | 928 | vendor = this_cpu->c_vendor; |
903 | else if (c->cpuid_level >= 0) | 929 | } else { |
904 | vendor = c->x86_vendor_id; | 930 | if (c->cpuid_level >= 0) |
931 | vendor = c->x86_vendor_id; | ||
932 | } | ||
905 | 933 | ||
906 | if (vendor && !strstr(c->x86_model_id, vendor)) | 934 | if (vendor && !strstr(c->x86_model_id, vendor)) |
907 | printk(KERN_CONT "%s ", vendor); | 935 | printk(KERN_CONT "%s ", vendor); |
@@ -928,10 +956,12 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
928 | static __init int setup_disablecpuid(char *arg) | 956 | static __init int setup_disablecpuid(char *arg) |
929 | { | 957 | { |
930 | int bit; | 958 | int bit; |
959 | |||
931 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) | 960 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) |
932 | setup_clear_cpu_cap(bit); | 961 | setup_clear_cpu_cap(bit); |
933 | else | 962 | else |
934 | return 0; | 963 | return 0; |
964 | |||
935 | return 1; | 965 | return 1; |
936 | } | 966 | } |
937 | __setup("clearcpuid=", setup_disablecpuid); | 967 | __setup("clearcpuid=", setup_disablecpuid); |
@@ -941,6 +971,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | |||
941 | 971 | ||
942 | DEFINE_PER_CPU_FIRST(union irq_stack_union, | 972 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
943 | irq_stack_union) __aligned(PAGE_SIZE); | 973 | irq_stack_union) __aligned(PAGE_SIZE); |
974 | |||
944 | DEFINE_PER_CPU(char *, irq_stack_ptr) = | 975 | DEFINE_PER_CPU(char *, irq_stack_ptr) = |
945 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | 976 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; |
946 | 977 | ||
@@ -950,12 +981,21 @@ EXPORT_PER_CPU_SYMBOL(kernel_stack); | |||
950 | 981 | ||
951 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; | 982 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; |
952 | 983 | ||
984 | /* | ||
985 | * Special IST stacks which the CPU switches to when it calls | ||
986 | * an IST-marked descriptor entry. Up to 7 stacks (hardware | ||
987 | * limit), all of them are 4K, except the debug stack which | ||
988 | * is 8K. | ||
989 | */ | ||
990 | static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { | ||
991 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, | ||
992 | [DEBUG_STACK - 1] = DEBUG_STKSZ | ||
993 | }; | ||
994 | |||
953 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks | 995 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks |
954 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) | 996 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) |
955 | __aligned(PAGE_SIZE); | 997 | __aligned(PAGE_SIZE); |
956 | 998 | ||
957 | extern asmlinkage void ignore_sysret(void); | ||
958 | |||
959 | /* May not be marked __init: used by software suspend */ | 999 | /* May not be marked __init: used by software suspend */ |
960 | void syscall_init(void) | 1000 | void syscall_init(void) |
961 | { | 1001 | { |
@@ -985,7 +1025,7 @@ unsigned long kernel_eflags; | |||
985 | */ | 1025 | */ |
986 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | 1026 | DEFINE_PER_CPU(struct orig_ist, orig_ist); |
987 | 1027 | ||
988 | #else /* x86_64 */ | 1028 | #else /* CONFIG_X86_64 */ |
989 | 1029 | ||
990 | #ifdef CONFIG_CC_STACKPROTECTOR | 1030 | #ifdef CONFIG_CC_STACKPROTECTOR |
991 | DEFINE_PER_CPU(unsigned long, stack_canary); | 1031 | DEFINE_PER_CPU(unsigned long, stack_canary); |
@@ -997,9 +1037,26 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | |||
997 | memset(regs, 0, sizeof(struct pt_regs)); | 1037 | memset(regs, 0, sizeof(struct pt_regs)); |
998 | regs->fs = __KERNEL_PERCPU; | 1038 | regs->fs = __KERNEL_PERCPU; |
999 | regs->gs = __KERNEL_STACK_CANARY; | 1039 | regs->gs = __KERNEL_STACK_CANARY; |
1040 | |||
1000 | return regs; | 1041 | return regs; |
1001 | } | 1042 | } |
1002 | #endif /* x86_64 */ | 1043 | #endif /* CONFIG_X86_64 */ |
1044 | |||
1045 | /* | ||
1046 | * Clear all 6 debug registers: | ||
1047 | */ | ||
1048 | static void clear_all_debug_regs(void) | ||
1049 | { | ||
1050 | int i; | ||
1051 | |||
1052 | for (i = 0; i < 8; i++) { | ||
1053 | /* Ignore db4, db5 */ | ||
1054 | if ((i == 4) || (i == 5)) | ||
1055 | continue; | ||
1056 | |||
1057 | set_debugreg(0, i); | ||
1058 | } | ||
1059 | } | ||
1003 | 1060 | ||
1004 | /* | 1061 | /* |
1005 | * cpu_init() initializes state that is per-CPU. Some data is already | 1062 | * cpu_init() initializes state that is per-CPU. Some data is already |
@@ -1009,15 +1066,20 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | |||
1009 | * A lot of state is already set up in PDA init for 64 bit | 1066 | * A lot of state is already set up in PDA init for 64 bit |
1010 | */ | 1067 | */ |
1011 | #ifdef CONFIG_X86_64 | 1068 | #ifdef CONFIG_X86_64 |
1069 | |||
1012 | void __cpuinit cpu_init(void) | 1070 | void __cpuinit cpu_init(void) |
1013 | { | 1071 | { |
1014 | int cpu = stack_smp_processor_id(); | 1072 | struct orig_ist *orig_ist; |
1015 | struct tss_struct *t = &per_cpu(init_tss, cpu); | ||
1016 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); | ||
1017 | unsigned long v; | ||
1018 | struct task_struct *me; | 1073 | struct task_struct *me; |
1074 | struct tss_struct *t; | ||
1075 | unsigned long v; | ||
1076 | int cpu; | ||
1019 | int i; | 1077 | int i; |
1020 | 1078 | ||
1079 | cpu = stack_smp_processor_id(); | ||
1080 | t = &per_cpu(init_tss, cpu); | ||
1081 | orig_ist = &per_cpu(orig_ist, cpu); | ||
1082 | |||
1021 | #ifdef CONFIG_NUMA | 1083 | #ifdef CONFIG_NUMA |
1022 | if (cpu != 0 && percpu_read(node_number) == 0 && | 1084 | if (cpu != 0 && percpu_read(node_number) == 0 && |
1023 | cpu_to_node(cpu) != NUMA_NO_NODE) | 1085 | cpu_to_node(cpu) != NUMA_NO_NODE) |
@@ -1058,19 +1120,17 @@ void __cpuinit cpu_init(void) | |||
1058 | * set up and load the per-CPU TSS | 1120 | * set up and load the per-CPU TSS |
1059 | */ | 1121 | */ |
1060 | if (!orig_ist->ist[0]) { | 1122 | if (!orig_ist->ist[0]) { |
1061 | static const unsigned int sizes[N_EXCEPTION_STACKS] = { | ||
1062 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, | ||
1063 | [DEBUG_STACK - 1] = DEBUG_STKSZ | ||
1064 | }; | ||
1065 | char *estacks = per_cpu(exception_stacks, cpu); | 1123 | char *estacks = per_cpu(exception_stacks, cpu); |
1124 | |||
1066 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 1125 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
1067 | estacks += sizes[v]; | 1126 | estacks += exception_stack_sizes[v]; |
1068 | orig_ist->ist[v] = t->x86_tss.ist[v] = | 1127 | orig_ist->ist[v] = t->x86_tss.ist[v] = |
1069 | (unsigned long)estacks; | 1128 | (unsigned long)estacks; |
1070 | } | 1129 | } |
1071 | } | 1130 | } |
1072 | 1131 | ||
1073 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | 1132 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
1133 | |||
1074 | /* | 1134 | /* |
1075 | * <= is required because the CPU will access up to | 1135 | * <= is required because the CPU will access up to |
1076 | * 8 bits beyond the end of the IO permission bitmap. | 1136 | * 8 bits beyond the end of the IO permission bitmap. |
@@ -1080,8 +1140,7 @@ void __cpuinit cpu_init(void) | |||
1080 | 1140 | ||
1081 | atomic_inc(&init_mm.mm_count); | 1141 | atomic_inc(&init_mm.mm_count); |
1082 | me->active_mm = &init_mm; | 1142 | me->active_mm = &init_mm; |
1083 | if (me->mm) | 1143 | BUG_ON(me->mm); |
1084 | BUG(); | ||
1085 | enter_lazy_tlb(&init_mm, me); | 1144 | enter_lazy_tlb(&init_mm, me); |
1086 | 1145 | ||
1087 | load_sp0(t, ¤t->thread); | 1146 | load_sp0(t, ¤t->thread); |
@@ -1100,17 +1159,7 @@ void __cpuinit cpu_init(void) | |||
1100 | arch_kgdb_ops.correct_hw_break(); | 1159 | arch_kgdb_ops.correct_hw_break(); |
1101 | else | 1160 | else |
1102 | #endif | 1161 | #endif |
1103 | { | 1162 | clear_all_debug_regs(); |
1104 | /* | ||
1105 | * Clear all 6 debug registers: | ||
1106 | */ | ||
1107 | set_debugreg(0UL, 0); | ||
1108 | set_debugreg(0UL, 1); | ||
1109 | set_debugreg(0UL, 2); | ||
1110 | set_debugreg(0UL, 3); | ||
1111 | set_debugreg(0UL, 6); | ||
1112 | set_debugreg(0UL, 7); | ||
1113 | } | ||
1114 | 1163 | ||
1115 | fpu_init(); | 1164 | fpu_init(); |
1116 | 1165 | ||
@@ -1131,7 +1180,8 @@ void __cpuinit cpu_init(void) | |||
1131 | 1180 | ||
1132 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { | 1181 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { |
1133 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); | 1182 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); |
1134 | for (;;) local_irq_enable(); | 1183 | for (;;) |
1184 | local_irq_enable(); | ||
1135 | } | 1185 | } |
1136 | 1186 | ||
1137 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | 1187 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); |
@@ -1147,8 +1197,7 @@ void __cpuinit cpu_init(void) | |||
1147 | */ | 1197 | */ |
1148 | atomic_inc(&init_mm.mm_count); | 1198 | atomic_inc(&init_mm.mm_count); |
1149 | curr->active_mm = &init_mm; | 1199 | curr->active_mm = &init_mm; |
1150 | if (curr->mm) | 1200 | BUG_ON(curr->mm); |
1151 | BUG(); | ||
1152 | enter_lazy_tlb(&init_mm, curr); | 1201 | enter_lazy_tlb(&init_mm, curr); |
1153 | 1202 | ||
1154 | load_sp0(t, thread); | 1203 | load_sp0(t, thread); |
@@ -1161,13 +1210,7 @@ void __cpuinit cpu_init(void) | |||
1161 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | 1210 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); |
1162 | #endif | 1211 | #endif |
1163 | 1212 | ||
1164 | /* Clear all 6 debug registers: */ | 1213 | clear_all_debug_regs(); |
1165 | set_debugreg(0, 0); | ||
1166 | set_debugreg(0, 1); | ||
1167 | set_debugreg(0, 2); | ||
1168 | set_debugreg(0, 3); | ||
1169 | set_debugreg(0, 6); | ||
1170 | set_debugreg(0, 7); | ||
1171 | 1214 | ||
1172 | /* | 1215 | /* |
1173 | * Force FPU initialization: | 1216 | * Force FPU initialization: |
@@ -1187,6 +1230,4 @@ void __cpuinit cpu_init(void) | |||
1187 | 1230 | ||
1188 | xsave_init(); | 1231 | xsave_init(); |
1189 | } | 1232 | } |
1190 | |||
1191 | |||
1192 | #endif | 1233 | #endif |