diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/i386/kernel/cpu/common.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/i386/kernel/cpu/common.c')
-rw-r--r-- | arch/i386/kernel/cpu/common.c | 634 |
1 files changed, 634 insertions, 0 deletions
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c new file mode 100644 index 000000000000..ebd5d8247faa --- /dev/null +++ b/arch/i386/kernel/cpu/common.c | |||
@@ -0,0 +1,634 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/string.h> | ||
3 | #include <linux/delay.h> | ||
4 | #include <linux/smp.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/percpu.h> | ||
7 | #include <asm/semaphore.h> | ||
8 | #include <asm/processor.h> | ||
9 | #include <asm/i387.h> | ||
10 | #include <asm/msr.h> | ||
11 | #include <asm/io.h> | ||
12 | #include <asm/mmu_context.h> | ||
13 | #ifdef CONFIG_X86_LOCAL_APIC | ||
14 | #include <asm/mpspec.h> | ||
15 | #include <asm/apic.h> | ||
16 | #include <mach_apic.h> | ||
17 | #endif | ||
18 | |||
19 | #include "cpu.h" | ||
20 | |||
21 | DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]); | ||
22 | EXPORT_PER_CPU_SYMBOL(cpu_gdt_table); | ||
23 | |||
24 | DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); | ||
25 | EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); | ||
26 | |||
27 | static int cachesize_override __initdata = -1; | ||
28 | static int disable_x86_fxsr __initdata = 0; | ||
29 | static int disable_x86_serial_nr __initdata = 1; | ||
30 | |||
31 | struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; | ||
32 | |||
33 | extern void mcheck_init(struct cpuinfo_x86 *c); | ||
34 | |||
35 | extern int disable_pse; | ||
36 | |||
37 | static void default_init(struct cpuinfo_x86 * c) | ||
38 | { | ||
39 | /* Not much we can do here... */ | ||
40 | /* Check if at least it has cpuid */ | ||
41 | if (c->cpuid_level == -1) { | ||
42 | /* No cpuid. It must be an ancient CPU */ | ||
43 | if (c->x86 == 4) | ||
44 | strcpy(c->x86_model_id, "486"); | ||
45 | else if (c->x86 == 3) | ||
46 | strcpy(c->x86_model_id, "386"); | ||
47 | } | ||
48 | } | ||
49 | |||
50 | static struct cpu_dev default_cpu = { | ||
51 | .c_init = default_init, | ||
52 | }; | ||
53 | static struct cpu_dev * this_cpu = &default_cpu; | ||
54 | |||
55 | static int __init cachesize_setup(char *str) | ||
56 | { | ||
57 | get_option (&str, &cachesize_override); | ||
58 | return 1; | ||
59 | } | ||
60 | __setup("cachesize=", cachesize_setup); | ||
61 | |||
62 | int __init get_model_name(struct cpuinfo_x86 *c) | ||
63 | { | ||
64 | unsigned int *v; | ||
65 | char *p, *q; | ||
66 | |||
67 | if (cpuid_eax(0x80000000) < 0x80000004) | ||
68 | return 0; | ||
69 | |||
70 | v = (unsigned int *) c->x86_model_id; | ||
71 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | ||
72 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | ||
73 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | ||
74 | c->x86_model_id[48] = 0; | ||
75 | |||
76 | /* Intel chips right-justify this string for some dumb reason; | ||
77 | undo that brain damage */ | ||
78 | p = q = &c->x86_model_id[0]; | ||
79 | while ( *p == ' ' ) | ||
80 | p++; | ||
81 | if ( p != q ) { | ||
82 | while ( *p ) | ||
83 | *q++ = *p++; | ||
84 | while ( q <= &c->x86_model_id[48] ) | ||
85 | *q++ = '\0'; /* Zero-pad the rest */ | ||
86 | } | ||
87 | |||
88 | return 1; | ||
89 | } | ||
90 | |||
91 | |||
92 | void __init display_cacheinfo(struct cpuinfo_x86 *c) | ||
93 | { | ||
94 | unsigned int n, dummy, ecx, edx, l2size; | ||
95 | |||
96 | n = cpuid_eax(0x80000000); | ||
97 | |||
98 | if (n >= 0x80000005) { | ||
99 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | ||
100 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | ||
101 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | ||
102 | c->x86_cache_size=(ecx>>24)+(edx>>24); | ||
103 | } | ||
104 | |||
105 | if (n < 0x80000006) /* Some chips just has a large L1. */ | ||
106 | return; | ||
107 | |||
108 | ecx = cpuid_ecx(0x80000006); | ||
109 | l2size = ecx >> 16; | ||
110 | |||
111 | /* do processor-specific cache resizing */ | ||
112 | if (this_cpu->c_size_cache) | ||
113 | l2size = this_cpu->c_size_cache(c,l2size); | ||
114 | |||
115 | /* Allow user to override all this if necessary. */ | ||
116 | if (cachesize_override != -1) | ||
117 | l2size = cachesize_override; | ||
118 | |||
119 | if ( l2size == 0 ) | ||
120 | return; /* Again, no L2 cache is possible */ | ||
121 | |||
122 | c->x86_cache_size = l2size; | ||
123 | |||
124 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | ||
125 | l2size, ecx & 0xFF); | ||
126 | } | ||
127 | |||
128 | /* Naming convention should be: <Name> [(<Codename>)] */ | ||
129 | /* This table only is used unless init_<vendor>() below doesn't set it; */ | ||
130 | /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ | ||
131 | |||
132 | /* Look up CPU names by table lookup. */ | ||
133 | static char __init *table_lookup_model(struct cpuinfo_x86 *c) | ||
134 | { | ||
135 | struct cpu_model_info *info; | ||
136 | |||
137 | if ( c->x86_model >= 16 ) | ||
138 | return NULL; /* Range check */ | ||
139 | |||
140 | if (!this_cpu) | ||
141 | return NULL; | ||
142 | |||
143 | info = this_cpu->c_models; | ||
144 | |||
145 | while (info && info->family) { | ||
146 | if (info->family == c->x86) | ||
147 | return info->model_names[c->x86_model]; | ||
148 | info++; | ||
149 | } | ||
150 | return NULL; /* Not found */ | ||
151 | } | ||
152 | |||
153 | |||
154 | void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early) | ||
155 | { | ||
156 | char *v = c->x86_vendor_id; | ||
157 | int i; | ||
158 | |||
159 | for (i = 0; i < X86_VENDOR_NUM; i++) { | ||
160 | if (cpu_devs[i]) { | ||
161 | if (!strcmp(v,cpu_devs[i]->c_ident[0]) || | ||
162 | (cpu_devs[i]->c_ident[1] && | ||
163 | !strcmp(v,cpu_devs[i]->c_ident[1]))) { | ||
164 | c->x86_vendor = i; | ||
165 | if (!early) | ||
166 | this_cpu = cpu_devs[i]; | ||
167 | break; | ||
168 | } | ||
169 | } | ||
170 | } | ||
171 | } | ||
172 | |||
173 | |||
174 | static int __init x86_fxsr_setup(char * s) | ||
175 | { | ||
176 | disable_x86_fxsr = 1; | ||
177 | return 1; | ||
178 | } | ||
179 | __setup("nofxsr", x86_fxsr_setup); | ||
180 | |||
181 | |||
182 | /* Standard macro to see if a specific flag is changeable */ | ||
183 | static inline int flag_is_changeable_p(u32 flag) | ||
184 | { | ||
185 | u32 f1, f2; | ||
186 | |||
187 | asm("pushfl\n\t" | ||
188 | "pushfl\n\t" | ||
189 | "popl %0\n\t" | ||
190 | "movl %0,%1\n\t" | ||
191 | "xorl %2,%0\n\t" | ||
192 | "pushl %0\n\t" | ||
193 | "popfl\n\t" | ||
194 | "pushfl\n\t" | ||
195 | "popl %0\n\t" | ||
196 | "popfl\n\t" | ||
197 | : "=&r" (f1), "=&r" (f2) | ||
198 | : "ir" (flag)); | ||
199 | |||
200 | return ((f1^f2) & flag) != 0; | ||
201 | } | ||
202 | |||
203 | |||
204 | /* Probe for the CPUID instruction */ | ||
205 | static int __init have_cpuid_p(void) | ||
206 | { | ||
207 | return flag_is_changeable_p(X86_EFLAGS_ID); | ||
208 | } | ||
209 | |||
210 | /* Do minimum CPU detection early. | ||
211 | Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. | ||
212 | The others are not touched to avoid unwanted side effects. */ | ||
213 | static void __init early_cpu_detect(void) | ||
214 | { | ||
215 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
216 | |||
217 | c->x86_cache_alignment = 32; | ||
218 | |||
219 | if (!have_cpuid_p()) | ||
220 | return; | ||
221 | |||
222 | /* Get vendor name */ | ||
223 | cpuid(0x00000000, &c->cpuid_level, | ||
224 | (int *)&c->x86_vendor_id[0], | ||
225 | (int *)&c->x86_vendor_id[8], | ||
226 | (int *)&c->x86_vendor_id[4]); | ||
227 | |||
228 | get_cpu_vendor(c, 1); | ||
229 | |||
230 | c->x86 = 4; | ||
231 | if (c->cpuid_level >= 0x00000001) { | ||
232 | u32 junk, tfms, cap0, misc; | ||
233 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | ||
234 | c->x86 = (tfms >> 8) & 15; | ||
235 | c->x86_model = (tfms >> 4) & 15; | ||
236 | if (c->x86 == 0xf) { | ||
237 | c->x86 += (tfms >> 20) & 0xff; | ||
238 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | ||
239 | } | ||
240 | c->x86_mask = tfms & 15; | ||
241 | if (cap0 & (1<<19)) | ||
242 | c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; | ||
243 | } | ||
244 | |||
245 | early_intel_workaround(c); | ||
246 | } | ||
247 | |||
248 | void __init generic_identify(struct cpuinfo_x86 * c) | ||
249 | { | ||
250 | u32 tfms, xlvl; | ||
251 | int junk; | ||
252 | |||
253 | if (have_cpuid_p()) { | ||
254 | /* Get vendor name */ | ||
255 | cpuid(0x00000000, &c->cpuid_level, | ||
256 | (int *)&c->x86_vendor_id[0], | ||
257 | (int *)&c->x86_vendor_id[8], | ||
258 | (int *)&c->x86_vendor_id[4]); | ||
259 | |||
260 | get_cpu_vendor(c, 0); | ||
261 | /* Initialize the standard set of capabilities */ | ||
262 | /* Note that the vendor-specific code below might override */ | ||
263 | |||
264 | /* Intel-defined flags: level 0x00000001 */ | ||
265 | if ( c->cpuid_level >= 0x00000001 ) { | ||
266 | u32 capability, excap; | ||
267 | cpuid(0x00000001, &tfms, &junk, &excap, &capability); | ||
268 | c->x86_capability[0] = capability; | ||
269 | c->x86_capability[4] = excap; | ||
270 | c->x86 = (tfms >> 8) & 15; | ||
271 | c->x86_model = (tfms >> 4) & 15; | ||
272 | if (c->x86 == 0xf) { | ||
273 | c->x86 += (tfms >> 20) & 0xff; | ||
274 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | ||
275 | } | ||
276 | c->x86_mask = tfms & 15; | ||
277 | } else { | ||
278 | /* Have CPUID level 0 only - unheard of */ | ||
279 | c->x86 = 4; | ||
280 | } | ||
281 | |||
282 | /* AMD-defined flags: level 0x80000001 */ | ||
283 | xlvl = cpuid_eax(0x80000000); | ||
284 | if ( (xlvl & 0xffff0000) == 0x80000000 ) { | ||
285 | if ( xlvl >= 0x80000001 ) { | ||
286 | c->x86_capability[1] = cpuid_edx(0x80000001); | ||
287 | c->x86_capability[6] = cpuid_ecx(0x80000001); | ||
288 | } | ||
289 | if ( xlvl >= 0x80000004 ) | ||
290 | get_model_name(c); /* Default name */ | ||
291 | } | ||
292 | } | ||
293 | } | ||
294 | |||
295 | static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | ||
296 | { | ||
297 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { | ||
298 | /* Disable processor serial number */ | ||
299 | unsigned long lo,hi; | ||
300 | rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | ||
301 | lo |= 0x200000; | ||
302 | wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | ||
303 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | ||
304 | clear_bit(X86_FEATURE_PN, c->x86_capability); | ||
305 | |||
306 | /* Disabling the serial number may affect the cpuid level */ | ||
307 | c->cpuid_level = cpuid_eax(0); | ||
308 | } | ||
309 | } | ||
310 | |||
311 | static int __init x86_serial_nr_setup(char *s) | ||
312 | { | ||
313 | disable_x86_serial_nr = 0; | ||
314 | return 1; | ||
315 | } | ||
316 | __setup("serialnumber", x86_serial_nr_setup); | ||
317 | |||
318 | |||
319 | |||
320 | /* | ||
321 | * This does the hard work of actually picking apart the CPU stuff... | ||
322 | */ | ||
323 | void __init identify_cpu(struct cpuinfo_x86 *c) | ||
324 | { | ||
325 | int i; | ||
326 | |||
327 | c->loops_per_jiffy = loops_per_jiffy; | ||
328 | c->x86_cache_size = -1; | ||
329 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
330 | c->cpuid_level = -1; /* CPUID not detected */ | ||
331 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | ||
332 | c->x86_vendor_id[0] = '\0'; /* Unset */ | ||
333 | c->x86_model_id[0] = '\0'; /* Unset */ | ||
334 | c->x86_num_cores = 1; | ||
335 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | ||
336 | |||
337 | if (!have_cpuid_p()) { | ||
338 | /* First of all, decide if this is a 486 or higher */ | ||
339 | /* It's a 486 if we can modify the AC flag */ | ||
340 | if ( flag_is_changeable_p(X86_EFLAGS_AC) ) | ||
341 | c->x86 = 4; | ||
342 | else | ||
343 | c->x86 = 3; | ||
344 | } | ||
345 | |||
346 | generic_identify(c); | ||
347 | |||
348 | printk(KERN_DEBUG "CPU: After generic identify, caps:"); | ||
349 | for (i = 0; i < NCAPINTS; i++) | ||
350 | printk(" %08lx", c->x86_capability[i]); | ||
351 | printk("\n"); | ||
352 | |||
353 | if (this_cpu->c_identify) { | ||
354 | this_cpu->c_identify(c); | ||
355 | |||
356 | printk(KERN_DEBUG "CPU: After vendor identify, caps:"); | ||
357 | for (i = 0; i < NCAPINTS; i++) | ||
358 | printk(" %08lx", c->x86_capability[i]); | ||
359 | printk("\n"); | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * Vendor-specific initialization. In this section we | ||
364 | * canonicalize the feature flags, meaning if there are | ||
365 | * features a certain CPU supports which CPUID doesn't | ||
366 | * tell us, CPUID claiming incorrect flags, or other bugs, | ||
367 | * we handle them here. | ||
368 | * | ||
369 | * At the end of this section, c->x86_capability better | ||
370 | * indicate the features this CPU genuinely supports! | ||
371 | */ | ||
372 | if (this_cpu->c_init) | ||
373 | this_cpu->c_init(c); | ||
374 | |||
375 | /* Disable the PN if appropriate */ | ||
376 | squash_the_stupid_serial_number(c); | ||
377 | |||
378 | /* | ||
379 | * The vendor-specific functions might have changed features. Now | ||
380 | * we do "generic changes." | ||
381 | */ | ||
382 | |||
383 | /* TSC disabled? */ | ||
384 | if ( tsc_disable ) | ||
385 | clear_bit(X86_FEATURE_TSC, c->x86_capability); | ||
386 | |||
387 | /* FXSR disabled? */ | ||
388 | if (disable_x86_fxsr) { | ||
389 | clear_bit(X86_FEATURE_FXSR, c->x86_capability); | ||
390 | clear_bit(X86_FEATURE_XMM, c->x86_capability); | ||
391 | } | ||
392 | |||
393 | if (disable_pse) | ||
394 | clear_bit(X86_FEATURE_PSE, c->x86_capability); | ||
395 | |||
396 | /* If the model name is still unset, do table lookup. */ | ||
397 | if ( !c->x86_model_id[0] ) { | ||
398 | char *p; | ||
399 | p = table_lookup_model(c); | ||
400 | if ( p ) | ||
401 | strcpy(c->x86_model_id, p); | ||
402 | else | ||
403 | /* Last resort... */ | ||
404 | sprintf(c->x86_model_id, "%02x/%02x", | ||
405 | c->x86_vendor, c->x86_model); | ||
406 | } | ||
407 | |||
408 | /* Now the feature flags better reflect actual CPU features! */ | ||
409 | |||
410 | printk(KERN_DEBUG "CPU: After all inits, caps:"); | ||
411 | for (i = 0; i < NCAPINTS; i++) | ||
412 | printk(" %08lx", c->x86_capability[i]); | ||
413 | printk("\n"); | ||
414 | |||
415 | /* | ||
416 | * On SMP, boot_cpu_data holds the common feature set between | ||
417 | * all CPUs; so make sure that we indicate which features are | ||
418 | * common between the CPUs. The first time this routine gets | ||
419 | * executed, c == &boot_cpu_data. | ||
420 | */ | ||
421 | if ( c != &boot_cpu_data ) { | ||
422 | /* AND the already accumulated flags with these */ | ||
423 | for ( i = 0 ; i < NCAPINTS ; i++ ) | ||
424 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | ||
425 | } | ||
426 | |||
427 | /* Init Machine Check Exception if available. */ | ||
428 | #ifdef CONFIG_X86_MCE | ||
429 | mcheck_init(c); | ||
430 | #endif | ||
431 | } | ||
432 | |||
433 | #ifdef CONFIG_X86_HT | ||
434 | void __init detect_ht(struct cpuinfo_x86 *c) | ||
435 | { | ||
436 | u32 eax, ebx, ecx, edx; | ||
437 | int index_lsb, index_msb, tmp; | ||
438 | int cpu = smp_processor_id(); | ||
439 | |||
440 | if (!cpu_has(c, X86_FEATURE_HT)) | ||
441 | return; | ||
442 | |||
443 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
444 | smp_num_siblings = (ebx & 0xff0000) >> 16; | ||
445 | |||
446 | if (smp_num_siblings == 1) { | ||
447 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | ||
448 | } else if (smp_num_siblings > 1 ) { | ||
449 | index_lsb = 0; | ||
450 | index_msb = 31; | ||
451 | |||
452 | if (smp_num_siblings > NR_CPUS) { | ||
453 | printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); | ||
454 | smp_num_siblings = 1; | ||
455 | return; | ||
456 | } | ||
457 | tmp = smp_num_siblings; | ||
458 | while ((tmp & 1) == 0) { | ||
459 | tmp >>=1 ; | ||
460 | index_lsb++; | ||
461 | } | ||
462 | tmp = smp_num_siblings; | ||
463 | while ((tmp & 0x80000000 ) == 0) { | ||
464 | tmp <<=1 ; | ||
465 | index_msb--; | ||
466 | } | ||
467 | if (index_lsb != index_msb ) | ||
468 | index_msb++; | ||
469 | phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); | ||
470 | |||
471 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
472 | phys_proc_id[cpu]); | ||
473 | } | ||
474 | } | ||
475 | #endif | ||
476 | |||
477 | void __init print_cpu_info(struct cpuinfo_x86 *c) | ||
478 | { | ||
479 | char *vendor = NULL; | ||
480 | |||
481 | if (c->x86_vendor < X86_VENDOR_NUM) | ||
482 | vendor = this_cpu->c_vendor; | ||
483 | else if (c->cpuid_level >= 0) | ||
484 | vendor = c->x86_vendor_id; | ||
485 | |||
486 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) | ||
487 | printk("%s ", vendor); | ||
488 | |||
489 | if (!c->x86_model_id[0]) | ||
490 | printk("%d86", c->x86); | ||
491 | else | ||
492 | printk("%s", c->x86_model_id); | ||
493 | |||
494 | if (c->x86_mask || c->cpuid_level >= 0) | ||
495 | printk(" stepping %02x\n", c->x86_mask); | ||
496 | else | ||
497 | printk("\n"); | ||
498 | } | ||
499 | |||
500 | cpumask_t cpu_initialized __initdata = CPU_MASK_NONE; | ||
501 | |||
502 | /* This is hacky. :) | ||
503 | * We're emulating future behavior. | ||
504 | * In the future, the cpu-specific init functions will be called implicitly | ||
505 | * via the magic of initcalls. | ||
506 | * They will insert themselves into the cpu_devs structure. | ||
507 | * Then, when cpu_init() is called, we can just iterate over that array. | ||
508 | */ | ||
509 | |||
510 | extern int intel_cpu_init(void); | ||
511 | extern int cyrix_init_cpu(void); | ||
512 | extern int nsc_init_cpu(void); | ||
513 | extern int amd_init_cpu(void); | ||
514 | extern int centaur_init_cpu(void); | ||
515 | extern int transmeta_init_cpu(void); | ||
516 | extern int rise_init_cpu(void); | ||
517 | extern int nexgen_init_cpu(void); | ||
518 | extern int umc_init_cpu(void); | ||
519 | |||
520 | void __init early_cpu_init(void) | ||
521 | { | ||
522 | intel_cpu_init(); | ||
523 | cyrix_init_cpu(); | ||
524 | nsc_init_cpu(); | ||
525 | amd_init_cpu(); | ||
526 | centaur_init_cpu(); | ||
527 | transmeta_init_cpu(); | ||
528 | rise_init_cpu(); | ||
529 | nexgen_init_cpu(); | ||
530 | umc_init_cpu(); | ||
531 | early_cpu_detect(); | ||
532 | |||
533 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
534 | /* pse is not compatible with on-the-fly unmapping, | ||
535 | * disable it even if the cpus claim to support it. | ||
536 | */ | ||
537 | clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); | ||
538 | disable_pse = 1; | ||
539 | #endif | ||
540 | } | ||
541 | /* | ||
542 | * cpu_init() initializes state that is per-CPU. Some data is already | ||
543 | * initialized (naturally) in the bootstrap process, such as the GDT | ||
544 | * and IDT. We reload them nevertheless, this function acts as a | ||
545 | * 'CPU state barrier', nothing should get across. | ||
546 | */ | ||
547 | void __init cpu_init (void) | ||
548 | { | ||
549 | int cpu = smp_processor_id(); | ||
550 | struct tss_struct * t = &per_cpu(init_tss, cpu); | ||
551 | struct thread_struct *thread = ¤t->thread; | ||
552 | __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu); | ||
553 | |||
554 | if (cpu_test_and_set(cpu, cpu_initialized)) { | ||
555 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); | ||
556 | for (;;) local_irq_enable(); | ||
557 | } | ||
558 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | ||
559 | |||
560 | if (cpu_has_vme || cpu_has_tsc || cpu_has_de) | ||
561 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | ||
562 | if (tsc_disable && cpu_has_tsc) { | ||
563 | printk(KERN_NOTICE "Disabling TSC...\n"); | ||
564 | /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/ | ||
565 | clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability); | ||
566 | set_in_cr4(X86_CR4_TSD); | ||
567 | } | ||
568 | |||
569 | /* | ||
570 | * Initialize the per-CPU GDT with the boot GDT, | ||
571 | * and set up the GDT descriptor: | ||
572 | */ | ||
573 | memcpy(&per_cpu(cpu_gdt_table, cpu), cpu_gdt_table, | ||
574 | GDT_SIZE); | ||
575 | |||
576 | /* Set up GDT entry for 16bit stack */ | ||
577 | *(__u64 *)&(per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_ESPFIX_SS]) |= | ||
578 | ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) | | ||
579 | ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | | ||
580 | (CPU_16BIT_STACK_SIZE - 1); | ||
581 | |||
582 | cpu_gdt_descr[cpu].size = GDT_SIZE - 1; | ||
583 | cpu_gdt_descr[cpu].address = | ||
584 | (unsigned long)&per_cpu(cpu_gdt_table, cpu); | ||
585 | |||
586 | /* | ||
587 | * Set up the per-thread TLS descriptor cache: | ||
588 | */ | ||
589 | memcpy(thread->tls_array, &per_cpu(cpu_gdt_table, cpu), | ||
590 | GDT_ENTRY_TLS_ENTRIES * 8); | ||
591 | |||
592 | __asm__ __volatile__("lgdt %0" : : "m" (cpu_gdt_descr[cpu])); | ||
593 | __asm__ __volatile__("lidt %0" : : "m" (idt_descr)); | ||
594 | |||
595 | /* | ||
596 | * Delete NT | ||
597 | */ | ||
598 | __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl"); | ||
599 | |||
600 | /* | ||
601 | * Set up and load the per-CPU TSS and LDT | ||
602 | */ | ||
603 | atomic_inc(&init_mm.mm_count); | ||
604 | current->active_mm = &init_mm; | ||
605 | if (current->mm) | ||
606 | BUG(); | ||
607 | enter_lazy_tlb(&init_mm, current); | ||
608 | |||
609 | load_esp0(t, thread); | ||
610 | set_tss_desc(cpu,t); | ||
611 | load_TR_desc(); | ||
612 | load_LDT(&init_mm.context); | ||
613 | |||
614 | /* Set up doublefault TSS pointer in the GDT */ | ||
615 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | ||
616 | |||
617 | /* Clear %fs and %gs. */ | ||
618 | asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs"); | ||
619 | |||
620 | /* Clear all 6 debug registers: */ | ||
621 | |||
622 | #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) ); | ||
623 | |||
624 | CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7); | ||
625 | |||
626 | #undef CD | ||
627 | |||
628 | /* | ||
629 | * Force FPU initialization: | ||
630 | */ | ||
631 | current_thread_info()->status = 0; | ||
632 | clear_used_math(); | ||
633 | mxcsr_feature_mask_init(); | ||
634 | } | ||