diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:16:58 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:16:58 -0400 |
commit | f7627e2513987bb5d4e8cb13c4e0a478352141ac (patch) | |
tree | 46ef70a107285c1dfe8161a57f433d30252d285a /arch/x86/kernel/cpu/common.c | |
parent | 4ac24f63fd203bc12a841a88a2034dccd358d0d1 (diff) |
i386: move kernel/cpu
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 733 |
1 files changed, 733 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c new file mode 100644 index 000000000000..d506201d397c --- /dev/null +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -0,0 +1,733 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/string.h> | ||
3 | #include <linux/delay.h> | ||
4 | #include <linux/smp.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/percpu.h> | ||
7 | #include <linux/bootmem.h> | ||
8 | #include <asm/semaphore.h> | ||
9 | #include <asm/processor.h> | ||
10 | #include <asm/i387.h> | ||
11 | #include <asm/msr.h> | ||
12 | #include <asm/io.h> | ||
13 | #include <asm/mmu_context.h> | ||
14 | #include <asm/mtrr.h> | ||
15 | #include <asm/mce.h> | ||
16 | #ifdef CONFIG_X86_LOCAL_APIC | ||
17 | #include <asm/mpspec.h> | ||
18 | #include <asm/apic.h> | ||
19 | #include <mach_apic.h> | ||
20 | #endif | ||
21 | |||
22 | #include "cpu.h" | ||
23 | |||
24 | DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | ||
25 | [GDT_ENTRY_KERNEL_CS] = { 0x0000ffff, 0x00cf9a00 }, | ||
26 | [GDT_ENTRY_KERNEL_DS] = { 0x0000ffff, 0x00cf9200 }, | ||
27 | [GDT_ENTRY_DEFAULT_USER_CS] = { 0x0000ffff, 0x00cffa00 }, | ||
28 | [GDT_ENTRY_DEFAULT_USER_DS] = { 0x0000ffff, 0x00cff200 }, | ||
29 | /* | ||
30 | * Segments used for calling PnP BIOS have byte granularity. | ||
31 | * They code segments and data segments have fixed 64k limits, | ||
32 | * the transfer segment sizes are set at run time. | ||
33 | */ | ||
34 | [GDT_ENTRY_PNPBIOS_CS32] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */ | ||
35 | [GDT_ENTRY_PNPBIOS_CS16] = { 0x0000ffff, 0x00009a00 },/* 16-bit code */ | ||
36 | [GDT_ENTRY_PNPBIOS_DS] = { 0x0000ffff, 0x00009200 }, /* 16-bit data */ | ||
37 | [GDT_ENTRY_PNPBIOS_TS1] = { 0x00000000, 0x00009200 },/* 16-bit data */ | ||
38 | [GDT_ENTRY_PNPBIOS_TS2] = { 0x00000000, 0x00009200 },/* 16-bit data */ | ||
39 | /* | ||
40 | * The APM segments have byte granularity and their bases | ||
41 | * are set at run time. All have 64k limits. | ||
42 | */ | ||
43 | [GDT_ENTRY_APMBIOS_BASE] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */ | ||
44 | /* 16-bit code */ | ||
45 | [GDT_ENTRY_APMBIOS_BASE+1] = { 0x0000ffff, 0x00009a00 }, | ||
46 | [GDT_ENTRY_APMBIOS_BASE+2] = { 0x0000ffff, 0x00409200 }, /* data */ | ||
47 | |||
48 | [GDT_ENTRY_ESPFIX_SS] = { 0x00000000, 0x00c09200 }, | ||
49 | [GDT_ENTRY_PERCPU] = { 0x00000000, 0x00000000 }, | ||
50 | } }; | ||
51 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | ||
52 | |||
53 | static int cachesize_override __cpuinitdata = -1; | ||
54 | static int disable_x86_fxsr __cpuinitdata; | ||
55 | static int disable_x86_serial_nr __cpuinitdata = 1; | ||
56 | static int disable_x86_sep __cpuinitdata; | ||
57 | |||
58 | struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; | ||
59 | |||
60 | extern int disable_pse; | ||
61 | |||
62 | static void __cpuinit default_init(struct cpuinfo_x86 * c) | ||
63 | { | ||
64 | /* Not much we can do here... */ | ||
65 | /* Check if at least it has cpuid */ | ||
66 | if (c->cpuid_level == -1) { | ||
67 | /* No cpuid. It must be an ancient CPU */ | ||
68 | if (c->x86 == 4) | ||
69 | strcpy(c->x86_model_id, "486"); | ||
70 | else if (c->x86 == 3) | ||
71 | strcpy(c->x86_model_id, "386"); | ||
72 | } | ||
73 | } | ||
74 | |||
75 | static struct cpu_dev __cpuinitdata default_cpu = { | ||
76 | .c_init = default_init, | ||
77 | .c_vendor = "Unknown", | ||
78 | }; | ||
79 | static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu; | ||
80 | |||
81 | static int __init cachesize_setup(char *str) | ||
82 | { | ||
83 | get_option (&str, &cachesize_override); | ||
84 | return 1; | ||
85 | } | ||
86 | __setup("cachesize=", cachesize_setup); | ||
87 | |||
88 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) | ||
89 | { | ||
90 | unsigned int *v; | ||
91 | char *p, *q; | ||
92 | |||
93 | if (cpuid_eax(0x80000000) < 0x80000004) | ||
94 | return 0; | ||
95 | |||
96 | v = (unsigned int *) c->x86_model_id; | ||
97 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | ||
98 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | ||
99 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | ||
100 | c->x86_model_id[48] = 0; | ||
101 | |||
102 | /* Intel chips right-justify this string for some dumb reason; | ||
103 | undo that brain damage */ | ||
104 | p = q = &c->x86_model_id[0]; | ||
105 | while ( *p == ' ' ) | ||
106 | p++; | ||
107 | if ( p != q ) { | ||
108 | while ( *p ) | ||
109 | *q++ = *p++; | ||
110 | while ( q <= &c->x86_model_id[48] ) | ||
111 | *q++ = '\0'; /* Zero-pad the rest */ | ||
112 | } | ||
113 | |||
114 | return 1; | ||
115 | } | ||
116 | |||
117 | |||
118 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | ||
119 | { | ||
120 | unsigned int n, dummy, ecx, edx, l2size; | ||
121 | |||
122 | n = cpuid_eax(0x80000000); | ||
123 | |||
124 | if (n >= 0x80000005) { | ||
125 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | ||
126 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | ||
127 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | ||
128 | c->x86_cache_size=(ecx>>24)+(edx>>24); | ||
129 | } | ||
130 | |||
131 | if (n < 0x80000006) /* Some chips just has a large L1. */ | ||
132 | return; | ||
133 | |||
134 | ecx = cpuid_ecx(0x80000006); | ||
135 | l2size = ecx >> 16; | ||
136 | |||
137 | /* do processor-specific cache resizing */ | ||
138 | if (this_cpu->c_size_cache) | ||
139 | l2size = this_cpu->c_size_cache(c,l2size); | ||
140 | |||
141 | /* Allow user to override all this if necessary. */ | ||
142 | if (cachesize_override != -1) | ||
143 | l2size = cachesize_override; | ||
144 | |||
145 | if ( l2size == 0 ) | ||
146 | return; /* Again, no L2 cache is possible */ | ||
147 | |||
148 | c->x86_cache_size = l2size; | ||
149 | |||
150 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | ||
151 | l2size, ecx & 0xFF); | ||
152 | } | ||
153 | |||
154 | /* Naming convention should be: <Name> [(<Codename>)] */ | ||
155 | /* This table only is used unless init_<vendor>() below doesn't set it; */ | ||
156 | /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ | ||
157 | |||
158 | /* Look up CPU names by table lookup. */ | ||
159 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | ||
160 | { | ||
161 | struct cpu_model_info *info; | ||
162 | |||
163 | if ( c->x86_model >= 16 ) | ||
164 | return NULL; /* Range check */ | ||
165 | |||
166 | if (!this_cpu) | ||
167 | return NULL; | ||
168 | |||
169 | info = this_cpu->c_models; | ||
170 | |||
171 | while (info && info->family) { | ||
172 | if (info->family == c->x86) | ||
173 | return info->model_names[c->x86_model]; | ||
174 | info++; | ||
175 | } | ||
176 | return NULL; /* Not found */ | ||
177 | } | ||
178 | |||
179 | |||
180 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | ||
181 | { | ||
182 | char *v = c->x86_vendor_id; | ||
183 | int i; | ||
184 | static int printed; | ||
185 | |||
186 | for (i = 0; i < X86_VENDOR_NUM; i++) { | ||
187 | if (cpu_devs[i]) { | ||
188 | if (!strcmp(v,cpu_devs[i]->c_ident[0]) || | ||
189 | (cpu_devs[i]->c_ident[1] && | ||
190 | !strcmp(v,cpu_devs[i]->c_ident[1]))) { | ||
191 | c->x86_vendor = i; | ||
192 | if (!early) | ||
193 | this_cpu = cpu_devs[i]; | ||
194 | return; | ||
195 | } | ||
196 | } | ||
197 | } | ||
198 | if (!printed) { | ||
199 | printed++; | ||
200 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); | ||
201 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | ||
202 | } | ||
203 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
204 | this_cpu = &default_cpu; | ||
205 | } | ||
206 | |||
207 | |||
208 | static int __init x86_fxsr_setup(char * s) | ||
209 | { | ||
210 | /* Tell all the other CPU's to not use it... */ | ||
211 | disable_x86_fxsr = 1; | ||
212 | |||
213 | /* | ||
214 | * ... and clear the bits early in the boot_cpu_data | ||
215 | * so that the bootup process doesn't try to do this | ||
216 | * either. | ||
217 | */ | ||
218 | clear_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability); | ||
219 | clear_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability); | ||
220 | return 1; | ||
221 | } | ||
222 | __setup("nofxsr", x86_fxsr_setup); | ||
223 | |||
224 | |||
225 | static int __init x86_sep_setup(char * s) | ||
226 | { | ||
227 | disable_x86_sep = 1; | ||
228 | return 1; | ||
229 | } | ||
230 | __setup("nosep", x86_sep_setup); | ||
231 | |||
232 | |||
233 | /* Standard macro to see if a specific flag is changeable */ | ||
234 | static inline int flag_is_changeable_p(u32 flag) | ||
235 | { | ||
236 | u32 f1, f2; | ||
237 | |||
238 | asm("pushfl\n\t" | ||
239 | "pushfl\n\t" | ||
240 | "popl %0\n\t" | ||
241 | "movl %0,%1\n\t" | ||
242 | "xorl %2,%0\n\t" | ||
243 | "pushl %0\n\t" | ||
244 | "popfl\n\t" | ||
245 | "pushfl\n\t" | ||
246 | "popl %0\n\t" | ||
247 | "popfl\n\t" | ||
248 | : "=&r" (f1), "=&r" (f2) | ||
249 | : "ir" (flag)); | ||
250 | |||
251 | return ((f1^f2) & flag) != 0; | ||
252 | } | ||
253 | |||
254 | |||
255 | /* Probe for the CPUID instruction */ | ||
256 | static int __cpuinit have_cpuid_p(void) | ||
257 | { | ||
258 | return flag_is_changeable_p(X86_EFLAGS_ID); | ||
259 | } | ||
260 | |||
261 | void __init cpu_detect(struct cpuinfo_x86 *c) | ||
262 | { | ||
263 | /* Get vendor name */ | ||
264 | cpuid(0x00000000, &c->cpuid_level, | ||
265 | (int *)&c->x86_vendor_id[0], | ||
266 | (int *)&c->x86_vendor_id[8], | ||
267 | (int *)&c->x86_vendor_id[4]); | ||
268 | |||
269 | c->x86 = 4; | ||
270 | if (c->cpuid_level >= 0x00000001) { | ||
271 | u32 junk, tfms, cap0, misc; | ||
272 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | ||
273 | c->x86 = (tfms >> 8) & 15; | ||
274 | c->x86_model = (tfms >> 4) & 15; | ||
275 | if (c->x86 == 0xf) | ||
276 | c->x86 += (tfms >> 20) & 0xff; | ||
277 | if (c->x86 >= 0x6) | ||
278 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | ||
279 | c->x86_mask = tfms & 15; | ||
280 | if (cap0 & (1<<19)) | ||
281 | c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; | ||
282 | } | ||
283 | } | ||
284 | |||
285 | /* Do minimum CPU detection early. | ||
286 | Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. | ||
287 | The others are not touched to avoid unwanted side effects. | ||
288 | |||
289 | WARNING: this function is only called on the BP. Don't add code here | ||
290 | that is supposed to run on all CPUs. */ | ||
291 | static void __init early_cpu_detect(void) | ||
292 | { | ||
293 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
294 | |||
295 | c->x86_cache_alignment = 32; | ||
296 | |||
297 | if (!have_cpuid_p()) | ||
298 | return; | ||
299 | |||
300 | cpu_detect(c); | ||
301 | |||
302 | get_cpu_vendor(c, 1); | ||
303 | } | ||
304 | |||
305 | static void __cpuinit generic_identify(struct cpuinfo_x86 * c) | ||
306 | { | ||
307 | u32 tfms, xlvl; | ||
308 | int ebx; | ||
309 | |||
310 | if (have_cpuid_p()) { | ||
311 | /* Get vendor name */ | ||
312 | cpuid(0x00000000, &c->cpuid_level, | ||
313 | (int *)&c->x86_vendor_id[0], | ||
314 | (int *)&c->x86_vendor_id[8], | ||
315 | (int *)&c->x86_vendor_id[4]); | ||
316 | |||
317 | get_cpu_vendor(c, 0); | ||
318 | /* Initialize the standard set of capabilities */ | ||
319 | /* Note that the vendor-specific code below might override */ | ||
320 | |||
321 | /* Intel-defined flags: level 0x00000001 */ | ||
322 | if ( c->cpuid_level >= 0x00000001 ) { | ||
323 | u32 capability, excap; | ||
324 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | ||
325 | c->x86_capability[0] = capability; | ||
326 | c->x86_capability[4] = excap; | ||
327 | c->x86 = (tfms >> 8) & 15; | ||
328 | c->x86_model = (tfms >> 4) & 15; | ||
329 | if (c->x86 == 0xf) | ||
330 | c->x86 += (tfms >> 20) & 0xff; | ||
331 | if (c->x86 >= 0x6) | ||
332 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | ||
333 | c->x86_mask = tfms & 15; | ||
334 | #ifdef CONFIG_X86_HT | ||
335 | c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); | ||
336 | #else | ||
337 | c->apicid = (ebx >> 24) & 0xFF; | ||
338 | #endif | ||
339 | if (c->x86_capability[0] & (1<<19)) | ||
340 | c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; | ||
341 | } else { | ||
342 | /* Have CPUID level 0 only - unheard of */ | ||
343 | c->x86 = 4; | ||
344 | } | ||
345 | |||
346 | /* AMD-defined flags: level 0x80000001 */ | ||
347 | xlvl = cpuid_eax(0x80000000); | ||
348 | if ( (xlvl & 0xffff0000) == 0x80000000 ) { | ||
349 | if ( xlvl >= 0x80000001 ) { | ||
350 | c->x86_capability[1] = cpuid_edx(0x80000001); | ||
351 | c->x86_capability[6] = cpuid_ecx(0x80000001); | ||
352 | } | ||
353 | if ( xlvl >= 0x80000004 ) | ||
354 | get_model_name(c); /* Default name */ | ||
355 | } | ||
356 | |||
357 | init_scattered_cpuid_features(c); | ||
358 | } | ||
359 | |||
360 | early_intel_workaround(c); | ||
361 | |||
362 | #ifdef CONFIG_X86_HT | ||
363 | c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; | ||
364 | #endif | ||
365 | } | ||
366 | |||
367 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | ||
368 | { | ||
369 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { | ||
370 | /* Disable processor serial number */ | ||
371 | unsigned long lo,hi; | ||
372 | rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | ||
373 | lo |= 0x200000; | ||
374 | wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | ||
375 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | ||
376 | clear_bit(X86_FEATURE_PN, c->x86_capability); | ||
377 | |||
378 | /* Disabling the serial number may affect the cpuid level */ | ||
379 | c->cpuid_level = cpuid_eax(0); | ||
380 | } | ||
381 | } | ||
382 | |||
383 | static int __init x86_serial_nr_setup(char *s) | ||
384 | { | ||
385 | disable_x86_serial_nr = 0; | ||
386 | return 1; | ||
387 | } | ||
388 | __setup("serialnumber", x86_serial_nr_setup); | ||
389 | |||
390 | |||
391 | |||
392 | /* | ||
393 | * This does the hard work of actually picking apart the CPU stuff... | ||
394 | */ | ||
395 | static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | ||
396 | { | ||
397 | int i; | ||
398 | |||
399 | c->loops_per_jiffy = loops_per_jiffy; | ||
400 | c->x86_cache_size = -1; | ||
401 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
402 | c->cpuid_level = -1; /* CPUID not detected */ | ||
403 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | ||
404 | c->x86_vendor_id[0] = '\0'; /* Unset */ | ||
405 | c->x86_model_id[0] = '\0'; /* Unset */ | ||
406 | c->x86_max_cores = 1; | ||
407 | c->x86_clflush_size = 32; | ||
408 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | ||
409 | |||
410 | if (!have_cpuid_p()) { | ||
411 | /* First of all, decide if this is a 486 or higher */ | ||
412 | /* It's a 486 if we can modify the AC flag */ | ||
413 | if ( flag_is_changeable_p(X86_EFLAGS_AC) ) | ||
414 | c->x86 = 4; | ||
415 | else | ||
416 | c->x86 = 3; | ||
417 | } | ||
418 | |||
419 | generic_identify(c); | ||
420 | |||
421 | printk(KERN_DEBUG "CPU: After generic identify, caps:"); | ||
422 | for (i = 0; i < NCAPINTS; i++) | ||
423 | printk(" %08lx", c->x86_capability[i]); | ||
424 | printk("\n"); | ||
425 | |||
426 | if (this_cpu->c_identify) { | ||
427 | this_cpu->c_identify(c); | ||
428 | |||
429 | printk(KERN_DEBUG "CPU: After vendor identify, caps:"); | ||
430 | for (i = 0; i < NCAPINTS; i++) | ||
431 | printk(" %08lx", c->x86_capability[i]); | ||
432 | printk("\n"); | ||
433 | } | ||
434 | |||
435 | /* | ||
436 | * Vendor-specific initialization. In this section we | ||
437 | * canonicalize the feature flags, meaning if there are | ||
438 | * features a certain CPU supports which CPUID doesn't | ||
439 | * tell us, CPUID claiming incorrect flags, or other bugs, | ||
440 | * we handle them here. | ||
441 | * | ||
442 | * At the end of this section, c->x86_capability better | ||
443 | * indicate the features this CPU genuinely supports! | ||
444 | */ | ||
445 | if (this_cpu->c_init) | ||
446 | this_cpu->c_init(c); | ||
447 | |||
448 | /* Disable the PN if appropriate */ | ||
449 | squash_the_stupid_serial_number(c); | ||
450 | |||
451 | /* | ||
452 | * The vendor-specific functions might have changed features. Now | ||
453 | * we do "generic changes." | ||
454 | */ | ||
455 | |||
456 | /* TSC disabled? */ | ||
457 | if ( tsc_disable ) | ||
458 | clear_bit(X86_FEATURE_TSC, c->x86_capability); | ||
459 | |||
460 | /* FXSR disabled? */ | ||
461 | if (disable_x86_fxsr) { | ||
462 | clear_bit(X86_FEATURE_FXSR, c->x86_capability); | ||
463 | clear_bit(X86_FEATURE_XMM, c->x86_capability); | ||
464 | } | ||
465 | |||
466 | /* SEP disabled? */ | ||
467 | if (disable_x86_sep) | ||
468 | clear_bit(X86_FEATURE_SEP, c->x86_capability); | ||
469 | |||
470 | if (disable_pse) | ||
471 | clear_bit(X86_FEATURE_PSE, c->x86_capability); | ||
472 | |||
473 | /* If the model name is still unset, do table lookup. */ | ||
474 | if ( !c->x86_model_id[0] ) { | ||
475 | char *p; | ||
476 | p = table_lookup_model(c); | ||
477 | if ( p ) | ||
478 | strcpy(c->x86_model_id, p); | ||
479 | else | ||
480 | /* Last resort... */ | ||
481 | sprintf(c->x86_model_id, "%02x/%02x", | ||
482 | c->x86, c->x86_model); | ||
483 | } | ||
484 | |||
485 | /* Now the feature flags better reflect actual CPU features! */ | ||
486 | |||
487 | printk(KERN_DEBUG "CPU: After all inits, caps:"); | ||
488 | for (i = 0; i < NCAPINTS; i++) | ||
489 | printk(" %08lx", c->x86_capability[i]); | ||
490 | printk("\n"); | ||
491 | |||
492 | /* | ||
493 | * On SMP, boot_cpu_data holds the common feature set between | ||
494 | * all CPUs; so make sure that we indicate which features are | ||
495 | * common between the CPUs. The first time this routine gets | ||
496 | * executed, c == &boot_cpu_data. | ||
497 | */ | ||
498 | if ( c != &boot_cpu_data ) { | ||
499 | /* AND the already accumulated flags with these */ | ||
500 | for ( i = 0 ; i < NCAPINTS ; i++ ) | ||
501 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | ||
502 | } | ||
503 | |||
504 | /* Init Machine Check Exception if available. */ | ||
505 | mcheck_init(c); | ||
506 | } | ||
507 | |||
508 | void __init identify_boot_cpu(void) | ||
509 | { | ||
510 | identify_cpu(&boot_cpu_data); | ||
511 | sysenter_setup(); | ||
512 | enable_sep_cpu(); | ||
513 | mtrr_bp_init(); | ||
514 | } | ||
515 | |||
516 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | ||
517 | { | ||
518 | BUG_ON(c == &boot_cpu_data); | ||
519 | identify_cpu(c); | ||
520 | enable_sep_cpu(); | ||
521 | mtrr_ap_init(); | ||
522 | } | ||
523 | |||
524 | #ifdef CONFIG_X86_HT | ||
525 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | ||
526 | { | ||
527 | u32 eax, ebx, ecx, edx; | ||
528 | int index_msb, core_bits; | ||
529 | |||
530 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
531 | |||
532 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) | ||
533 | return; | ||
534 | |||
535 | smp_num_siblings = (ebx & 0xff0000) >> 16; | ||
536 | |||
537 | if (smp_num_siblings == 1) { | ||
538 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | ||
539 | } else if (smp_num_siblings > 1 ) { | ||
540 | |||
541 | if (smp_num_siblings > NR_CPUS) { | ||
542 | printk(KERN_WARNING "CPU: Unsupported number of the " | ||
543 | "siblings %d", smp_num_siblings); | ||
544 | smp_num_siblings = 1; | ||
545 | return; | ||
546 | } | ||
547 | |||
548 | index_msb = get_count_order(smp_num_siblings); | ||
549 | c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); | ||
550 | |||
551 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
552 | c->phys_proc_id); | ||
553 | |||
554 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
555 | |||
556 | index_msb = get_count_order(smp_num_siblings) ; | ||
557 | |||
558 | core_bits = get_count_order(c->x86_max_cores); | ||
559 | |||
560 | c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & | ||
561 | ((1 << core_bits) - 1); | ||
562 | |||
563 | if (c->x86_max_cores > 1) | ||
564 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
565 | c->cpu_core_id); | ||
566 | } | ||
567 | } | ||
568 | #endif | ||
569 | |||
570 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | ||
571 | { | ||
572 | char *vendor = NULL; | ||
573 | |||
574 | if (c->x86_vendor < X86_VENDOR_NUM) | ||
575 | vendor = this_cpu->c_vendor; | ||
576 | else if (c->cpuid_level >= 0) | ||
577 | vendor = c->x86_vendor_id; | ||
578 | |||
579 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) | ||
580 | printk("%s ", vendor); | ||
581 | |||
582 | if (!c->x86_model_id[0]) | ||
583 | printk("%d86", c->x86); | ||
584 | else | ||
585 | printk("%s", c->x86_model_id); | ||
586 | |||
587 | if (c->x86_mask || c->cpuid_level >= 0) | ||
588 | printk(" stepping %02x\n", c->x86_mask); | ||
589 | else | ||
590 | printk("\n"); | ||
591 | } | ||
592 | |||
593 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | ||
594 | |||
595 | /* This is hacky. :) | ||
596 | * We're emulating future behavior. | ||
597 | * In the future, the cpu-specific init functions will be called implicitly | ||
598 | * via the magic of initcalls. | ||
599 | * They will insert themselves into the cpu_devs structure. | ||
600 | * Then, when cpu_init() is called, we can just iterate over that array. | ||
601 | */ | ||
602 | |||
603 | extern int intel_cpu_init(void); | ||
604 | extern int cyrix_init_cpu(void); | ||
605 | extern int nsc_init_cpu(void); | ||
606 | extern int amd_init_cpu(void); | ||
607 | extern int centaur_init_cpu(void); | ||
608 | extern int transmeta_init_cpu(void); | ||
609 | extern int nexgen_init_cpu(void); | ||
610 | extern int umc_init_cpu(void); | ||
611 | |||
612 | void __init early_cpu_init(void) | ||
613 | { | ||
614 | intel_cpu_init(); | ||
615 | cyrix_init_cpu(); | ||
616 | nsc_init_cpu(); | ||
617 | amd_init_cpu(); | ||
618 | centaur_init_cpu(); | ||
619 | transmeta_init_cpu(); | ||
620 | nexgen_init_cpu(); | ||
621 | umc_init_cpu(); | ||
622 | early_cpu_detect(); | ||
623 | |||
624 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
625 | /* pse is not compatible with on-the-fly unmapping, | ||
626 | * disable it even if the cpus claim to support it. | ||
627 | */ | ||
628 | clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); | ||
629 | disable_pse = 1; | ||
630 | #endif | ||
631 | } | ||
632 | |||
633 | /* Make sure %fs is initialized properly in idle threads */ | ||
634 | struct pt_regs * __devinit idle_regs(struct pt_regs *regs) | ||
635 | { | ||
636 | memset(regs, 0, sizeof(struct pt_regs)); | ||
637 | regs->xfs = __KERNEL_PERCPU; | ||
638 | return regs; | ||
639 | } | ||
640 | |||
641 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
642 | * it's on the real one. */ | ||
643 | void switch_to_new_gdt(void) | ||
644 | { | ||
645 | struct Xgt_desc_struct gdt_descr; | ||
646 | |||
647 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
648 | gdt_descr.size = GDT_SIZE - 1; | ||
649 | load_gdt(&gdt_descr); | ||
650 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | ||
651 | } | ||
652 | |||
653 | /* | ||
654 | * cpu_init() initializes state that is per-CPU. Some data is already | ||
655 | * initialized (naturally) in the bootstrap process, such as the GDT | ||
656 | * and IDT. We reload them nevertheless, this function acts as a | ||
657 | * 'CPU state barrier', nothing should get across. | ||
658 | */ | ||
659 | void __cpuinit cpu_init(void) | ||
660 | { | ||
661 | int cpu = smp_processor_id(); | ||
662 | struct task_struct *curr = current; | ||
663 | struct tss_struct * t = &per_cpu(init_tss, cpu); | ||
664 | struct thread_struct *thread = &curr->thread; | ||
665 | |||
666 | if (cpu_test_and_set(cpu, cpu_initialized)) { | ||
667 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); | ||
668 | for (;;) local_irq_enable(); | ||
669 | } | ||
670 | |||
671 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | ||
672 | |||
673 | if (cpu_has_vme || cpu_has_tsc || cpu_has_de) | ||
674 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | ||
675 | if (tsc_disable && cpu_has_tsc) { | ||
676 | printk(KERN_NOTICE "Disabling TSC...\n"); | ||
677 | /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/ | ||
678 | clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability); | ||
679 | set_in_cr4(X86_CR4_TSD); | ||
680 | } | ||
681 | |||
682 | load_idt(&idt_descr); | ||
683 | switch_to_new_gdt(); | ||
684 | |||
685 | /* | ||
686 | * Set up and load the per-CPU TSS and LDT | ||
687 | */ | ||
688 | atomic_inc(&init_mm.mm_count); | ||
689 | curr->active_mm = &init_mm; | ||
690 | if (curr->mm) | ||
691 | BUG(); | ||
692 | enter_lazy_tlb(&init_mm, curr); | ||
693 | |||
694 | load_esp0(t, thread); | ||
695 | set_tss_desc(cpu,t); | ||
696 | load_TR_desc(); | ||
697 | load_LDT(&init_mm.context); | ||
698 | |||
699 | #ifdef CONFIG_DOUBLEFAULT | ||
700 | /* Set up doublefault TSS pointer in the GDT */ | ||
701 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | ||
702 | #endif | ||
703 | |||
704 | /* Clear %gs. */ | ||
705 | asm volatile ("mov %0, %%gs" : : "r" (0)); | ||
706 | |||
707 | /* Clear all 6 debug registers: */ | ||
708 | set_debugreg(0, 0); | ||
709 | set_debugreg(0, 1); | ||
710 | set_debugreg(0, 2); | ||
711 | set_debugreg(0, 3); | ||
712 | set_debugreg(0, 6); | ||
713 | set_debugreg(0, 7); | ||
714 | |||
715 | /* | ||
716 | * Force FPU initialization: | ||
717 | */ | ||
718 | current_thread_info()->status = 0; | ||
719 | clear_used_math(); | ||
720 | mxcsr_feature_mask_init(); | ||
721 | } | ||
722 | |||
723 | #ifdef CONFIG_HOTPLUG_CPU | ||
724 | void __cpuinit cpu_uninit(void) | ||
725 | { | ||
726 | int cpu = raw_smp_processor_id(); | ||
727 | cpu_clear(cpu, cpu_initialized); | ||
728 | |||
729 | /* lazy TLB state */ | ||
730 | per_cpu(cpu_tlbstate, cpu).state = 0; | ||
731 | per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; | ||
732 | } | ||
733 | #endif | ||