diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig.cpu | 13 | ||||
-rw-r--r-- | arch/x86/boot/cpucheck.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/alternative.c | 36 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/centaur.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 34 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common_64.c | 74 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cyrix.c | 32 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/feature_names.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/e820.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/hpet.c | 19 | ||||
-rw-r--r-- | arch/x86/kernel/io_delay.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 135 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 2 |
14 files changed, 256 insertions, 130 deletions
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 2c518fbc52ec..b225219c448c 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -382,14 +382,17 @@ config X86_OOSTORE | |||
382 | # P6_NOPs are a relatively minor optimization that require a family >= | 382 | # P6_NOPs are a relatively minor optimization that require a family >= |
383 | # 6 processor, except that it is broken on certain VIA chips. | 383 | # 6 processor, except that it is broken on certain VIA chips. |
384 | # Furthermore, AMD chips prefer a totally different sequence of NOPs | 384 | # Furthermore, AMD chips prefer a totally different sequence of NOPs |
385 | # (which work on all CPUs). As a result, disallow these if we're | 385 | # (which work on all CPUs). In addition, it looks like Virtual PC |
386 | # compiling X86_GENERIC but not X86_64 (these NOPs do work on all | 386 | # does not understand them. |
387 | # x86-64 capable chips); the list of processors in the right-hand clause | 387 | # |
388 | # are the cores that benefit from this optimization. | 388 | # As a result, disallow these if we're not compiling for X86_64 (these |
389 | # NOPs do work on all x86-64 capable chips); the list of processors in | ||
390 | # the right-hand clause are the cores that benefit from this optimization. | ||
389 | # | 391 | # |
390 | config X86_P6_NOP | 392 | config X86_P6_NOP |
391 | def_bool y | 393 | def_bool y |
392 | depends on (X86_64 || !X86_GENERIC) && (M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4 || MPSC) | 394 | depends on X86_64 |
395 | depends on (MCORE2 || MPENTIUM4 || MPSC) | ||
393 | 396 | ||
394 | config X86_TSC | 397 | config X86_TSC |
395 | def_bool y | 398 | def_bool y |
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c index 4b9ae7c56748..4d3ff037201f 100644 --- a/arch/x86/boot/cpucheck.c +++ b/arch/x86/boot/cpucheck.c | |||
@@ -38,12 +38,12 @@ static const u32 req_flags[NCAPINTS] = | |||
38 | { | 38 | { |
39 | REQUIRED_MASK0, | 39 | REQUIRED_MASK0, |
40 | REQUIRED_MASK1, | 40 | REQUIRED_MASK1, |
41 | REQUIRED_MASK2, | 41 | 0, /* REQUIRED_MASK2 not implemented in this file */ |
42 | REQUIRED_MASK3, | 42 | 0, /* REQUIRED_MASK3 not implemented in this file */ |
43 | REQUIRED_MASK4, | 43 | REQUIRED_MASK4, |
44 | REQUIRED_MASK5, | 44 | 0, /* REQUIRED_MASK5 not implemented in this file */ |
45 | REQUIRED_MASK6, | 45 | REQUIRED_MASK6, |
46 | REQUIRED_MASK7, | 46 | 0, /* REQUIRED_MASK7 not implemented in this file */ |
47 | }; | 47 | }; |
48 | 48 | ||
49 | #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) | 49 | #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 2763cb37b553..65a0c1b48696 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -145,35 +145,25 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { | |||
145 | extern char __vsyscall_0; | 145 | extern char __vsyscall_0; |
146 | const unsigned char *const *find_nop_table(void) | 146 | const unsigned char *const *find_nop_table(void) |
147 | { | 147 | { |
148 | return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || | 148 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
149 | boot_cpu_data.x86 < 6 ? k8_nops : p6_nops; | 149 | boot_cpu_has(X86_FEATURE_NOPL)) |
150 | return p6_nops; | ||
151 | else | ||
152 | return k8_nops; | ||
150 | } | 153 | } |
151 | 154 | ||
152 | #else /* CONFIG_X86_64 */ | 155 | #else /* CONFIG_X86_64 */ |
153 | 156 | ||
154 | static const struct nop { | ||
155 | int cpuid; | ||
156 | const unsigned char *const *noptable; | ||
157 | } noptypes[] = { | ||
158 | { X86_FEATURE_K8, k8_nops }, | ||
159 | { X86_FEATURE_K7, k7_nops }, | ||
160 | { X86_FEATURE_P4, p6_nops }, | ||
161 | { X86_FEATURE_P3, p6_nops }, | ||
162 | { -1, NULL } | ||
163 | }; | ||
164 | |||
165 | const unsigned char *const *find_nop_table(void) | 157 | const unsigned char *const *find_nop_table(void) |
166 | { | 158 | { |
167 | const unsigned char *const *noptable = intel_nops; | 159 | if (boot_cpu_has(X86_FEATURE_K8)) |
168 | int i; | 160 | return k8_nops; |
169 | 161 | else if (boot_cpu_has(X86_FEATURE_K7)) | |
170 | for (i = 0; noptypes[i].cpuid >= 0; i++) { | 162 | return k7_nops; |
171 | if (boot_cpu_has(noptypes[i].cpuid)) { | 163 | else if (boot_cpu_has(X86_FEATURE_NOPL)) |
172 | noptable = noptypes[i].noptable; | 164 | return p6_nops; |
173 | break; | 165 | else |
174 | } | 166 | return intel_nops; |
175 | } | ||
176 | return noptable; | ||
177 | } | 167 | } |
178 | 168 | ||
179 | #endif /* CONFIG_X86_64 */ | 169 | #endif /* CONFIG_X86_64 */ |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index cae9cabc3031..18514ed26104 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -31,6 +31,11 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | |||
31 | if (c->x86_power & (1<<8)) | 31 | if (c->x86_power & (1<<8)) |
32 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 32 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
33 | } | 33 | } |
34 | |||
35 | /* Set MTRR capability flag if appropriate */ | ||
36 | if (c->x86_model == 13 || c->x86_model == 9 || | ||
37 | (c->x86_model == 8 && c->x86_mask >= 8)) | ||
38 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | ||
34 | } | 39 | } |
35 | 40 | ||
36 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 41 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) |
@@ -166,10 +171,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
166 | mbytes); | 171 | mbytes); |
167 | } | 172 | } |
168 | 173 | ||
169 | /* Set MTRR capability flag if appropriate */ | ||
170 | if (c->x86_model == 13 || c->x86_model == 9 || | ||
171 | (c->x86_model == 8 && c->x86_mask >= 8)) | ||
172 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | ||
173 | break; | 174 | break; |
174 | } | 175 | } |
175 | 176 | ||
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index e0f45edd6a55..a0534c04d38a 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
@@ -314,6 +314,16 @@ enum { | |||
314 | EAMD3D = 1<<20, | 314 | EAMD3D = 1<<20, |
315 | }; | 315 | }; |
316 | 316 | ||
317 | static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | ||
318 | { | ||
319 | switch (c->x86) { | ||
320 | case 5: | ||
321 | /* Emulate MTRRs using Centaur's MCR. */ | ||
322 | set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); | ||
323 | break; | ||
324 | } | ||
325 | } | ||
326 | |||
317 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | 327 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) |
318 | { | 328 | { |
319 | 329 | ||
@@ -462,6 +472,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) | |||
462 | static struct cpu_dev centaur_cpu_dev __cpuinitdata = { | 472 | static struct cpu_dev centaur_cpu_dev __cpuinitdata = { |
463 | .c_vendor = "Centaur", | 473 | .c_vendor = "Centaur", |
464 | .c_ident = { "CentaurHauls" }, | 474 | .c_ident = { "CentaurHauls" }, |
475 | .c_early_init = early_init_centaur, | ||
465 | .c_init = init_centaur, | 476 | .c_init = init_centaur, |
466 | .c_size_cache = centaur_size_cache, | 477 | .c_size_cache = centaur_size_cache, |
467 | }; | 478 | }; |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 80ab20d4fa39..8aab8517642e 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/mtrr.h> | 13 | #include <asm/mtrr.h> |
14 | #include <asm/mce.h> | 14 | #include <asm/mce.h> |
15 | #include <asm/pat.h> | 15 | #include <asm/pat.h> |
16 | #include <asm/asm.h> | ||
16 | #ifdef CONFIG_X86_LOCAL_APIC | 17 | #ifdef CONFIG_X86_LOCAL_APIC |
17 | #include <asm/mpspec.h> | 18 | #include <asm/mpspec.h> |
18 | #include <asm/apic.h> | 19 | #include <asm/apic.h> |
@@ -334,11 +335,40 @@ static void __init early_cpu_detect(void) | |||
334 | 335 | ||
335 | get_cpu_vendor(c, 1); | 336 | get_cpu_vendor(c, 1); |
336 | 337 | ||
338 | early_get_cap(c); | ||
339 | |||
337 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | 340 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && |
338 | cpu_devs[c->x86_vendor]->c_early_init) | 341 | cpu_devs[c->x86_vendor]->c_early_init) |
339 | cpu_devs[c->x86_vendor]->c_early_init(c); | 342 | cpu_devs[c->x86_vendor]->c_early_init(c); |
343 | } | ||
340 | 344 | ||
341 | early_get_cap(c); | 345 | /* |
346 | * The NOPL instruction is supposed to exist on all CPUs with | ||
347 | * family >= 6, unfortunately, that's not true in practice because | ||
348 | * of early VIA chips and (more importantly) broken virtualizers that | ||
349 | * are not easy to detect. Hence, probe for it based on first | ||
350 | * principles. | ||
351 | */ | ||
352 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | ||
353 | { | ||
354 | const u32 nopl_signature = 0x888c53b1; /* Random number */ | ||
355 | u32 has_nopl = nopl_signature; | ||
356 | |||
357 | clear_cpu_cap(c, X86_FEATURE_NOPL); | ||
358 | if (c->x86 >= 6) { | ||
359 | asm volatile("\n" | ||
360 | "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ | ||
361 | "2:\n" | ||
362 | " .section .fixup,\"ax\"\n" | ||
363 | "3: xor %0,%0\n" | ||
364 | " jmp 2b\n" | ||
365 | " .previous\n" | ||
366 | _ASM_EXTABLE(1b,3b) | ||
367 | : "+a" (has_nopl)); | ||
368 | |||
369 | if (has_nopl == nopl_signature) | ||
370 | set_cpu_cap(c, X86_FEATURE_NOPL); | ||
371 | } | ||
342 | } | 372 | } |
343 | 373 | ||
344 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | 374 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
@@ -395,8 +425,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | |||
395 | } | 425 | } |
396 | 426 | ||
397 | init_scattered_cpuid_features(c); | 427 | init_scattered_cpuid_features(c); |
428 | detect_nopl(c); | ||
398 | } | 429 | } |
399 | |||
400 | } | 430 | } |
401 | 431 | ||
402 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 432 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c index dd6e3f15017e..a11f5d4477cd 100644 --- a/arch/x86/kernel/cpu/common_64.c +++ b/arch/x86/kernel/cpu/common_64.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/mtrr.h> | 18 | #include <asm/mtrr.h> |
19 | #include <asm/mce.h> | 19 | #include <asm/mce.h> |
20 | #include <asm/pat.h> | 20 | #include <asm/pat.h> |
21 | #include <asm/asm.h> | ||
21 | #include <asm/numa.h> | 22 | #include <asm/numa.h> |
22 | #ifdef CONFIG_X86_LOCAL_APIC | 23 | #ifdef CONFIG_X86_LOCAL_APIC |
23 | #include <asm/mpspec.h> | 24 | #include <asm/mpspec.h> |
@@ -215,6 +216,39 @@ static void __init early_cpu_support_print(void) | |||
215 | } | 216 | } |
216 | } | 217 | } |
217 | 218 | ||
219 | /* | ||
220 | * The NOPL instruction is supposed to exist on all CPUs with | ||
221 | * family >= 6, unfortunately, that's not true in practice because | ||
222 | * of early VIA chips and (more importantly) broken virtualizers that | ||
223 | * are not easy to detect. Hence, probe for it based on first | ||
224 | * principles. | ||
225 | * | ||
226 | * Note: no 64-bit chip is known to lack these, but put the code here | ||
227 | * for consistency with 32 bits, and to make it utterly trivial to | ||
228 | * diagnose the problem should it ever surface. | ||
229 | */ | ||
230 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | ||
231 | { | ||
232 | const u32 nopl_signature = 0x888c53b1; /* Random number */ | ||
233 | u32 has_nopl = nopl_signature; | ||
234 | |||
235 | clear_cpu_cap(c, X86_FEATURE_NOPL); | ||
236 | if (c->x86 >= 6) { | ||
237 | asm volatile("\n" | ||
238 | "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ | ||
239 | "2:\n" | ||
240 | " .section .fixup,\"ax\"\n" | ||
241 | "3: xor %0,%0\n" | ||
242 | " jmp 2b\n" | ||
243 | " .previous\n" | ||
244 | _ASM_EXTABLE(1b,3b) | ||
245 | : "+a" (has_nopl)); | ||
246 | |||
247 | if (has_nopl == nopl_signature) | ||
248 | set_cpu_cap(c, X86_FEATURE_NOPL); | ||
249 | } | ||
250 | } | ||
251 | |||
218 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); | 252 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); |
219 | 253 | ||
220 | void __init early_cpu_init(void) | 254 | void __init early_cpu_init(void) |
@@ -313,6 +347,8 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
313 | c->x86_phys_bits = eax & 0xff; | 347 | c->x86_phys_bits = eax & 0xff; |
314 | } | 348 | } |
315 | 349 | ||
350 | detect_nopl(c); | ||
351 | |||
316 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | 352 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && |
317 | cpu_devs[c->x86_vendor]->c_early_init) | 353 | cpu_devs[c->x86_vendor]->c_early_init) |
318 | cpu_devs[c->x86_vendor]->c_early_init(c); | 354 | cpu_devs[c->x86_vendor]->c_early_init(c); |
@@ -493,17 +529,20 @@ void pda_init(int cpu) | |||
493 | /* others are initialized in smpboot.c */ | 529 | /* others are initialized in smpboot.c */ |
494 | pda->pcurrent = &init_task; | 530 | pda->pcurrent = &init_task; |
495 | pda->irqstackptr = boot_cpu_stack; | 531 | pda->irqstackptr = boot_cpu_stack; |
532 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
496 | } else { | 533 | } else { |
497 | pda->irqstackptr = (char *) | 534 | if (!pda->irqstackptr) { |
498 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); | 535 | pda->irqstackptr = (char *) |
499 | if (!pda->irqstackptr) | 536 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); |
500 | panic("cannot allocate irqstack for cpu %d", cpu); | 537 | if (!pda->irqstackptr) |
538 | panic("cannot allocate irqstack for cpu %d", | ||
539 | cpu); | ||
540 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
541 | } | ||
501 | 542 | ||
502 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) | 543 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) |
503 | pda->nodenumber = cpu_to_node(cpu); | 544 | pda->nodenumber = cpu_to_node(cpu); |
504 | } | 545 | } |
505 | |||
506 | pda->irqstackptr += IRQSTACKSIZE-64; | ||
507 | } | 546 | } |
508 | 547 | ||
509 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + | 548 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + |
@@ -601,19 +640,22 @@ void __cpuinit cpu_init(void) | |||
601 | /* | 640 | /* |
602 | * set up and load the per-CPU TSS | 641 | * set up and load the per-CPU TSS |
603 | */ | 642 | */ |
604 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 643 | if (!orig_ist->ist[0]) { |
605 | static const unsigned int order[N_EXCEPTION_STACKS] = { | 644 | static const unsigned int order[N_EXCEPTION_STACKS] = { |
606 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | 645 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, |
607 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | 646 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER |
608 | }; | 647 | }; |
609 | if (cpu) { | 648 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
610 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | 649 | if (cpu) { |
611 | if (!estacks) | 650 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); |
612 | panic("Cannot allocate exception stack %ld %d\n", | 651 | if (!estacks) |
613 | v, cpu); | 652 | panic("Cannot allocate exception " |
653 | "stack %ld %d\n", v, cpu); | ||
654 | } | ||
655 | estacks += PAGE_SIZE << order[v]; | ||
656 | orig_ist->ist[v] = t->x86_tss.ist[v] = | ||
657 | (unsigned long)estacks; | ||
614 | } | 658 | } |
615 | estacks += PAGE_SIZE << order[v]; | ||
616 | orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks; | ||
617 | } | 659 | } |
618 | 660 | ||
619 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | 661 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index e710a21bb6e8..898a5a2002ed 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
@@ -15,13 +15,11 @@ | |||
15 | /* | 15 | /* |
16 | * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU | 16 | * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU |
17 | */ | 17 | */ |
18 | static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | 18 | static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) |
19 | { | 19 | { |
20 | unsigned char ccr2, ccr3; | 20 | unsigned char ccr2, ccr3; |
21 | unsigned long flags; | ||
22 | 21 | ||
23 | /* we test for DEVID by checking whether CCR3 is writable */ | 22 | /* we test for DEVID by checking whether CCR3 is writable */ |
24 | local_irq_save(flags); | ||
25 | ccr3 = getCx86(CX86_CCR3); | 23 | ccr3 = getCx86(CX86_CCR3); |
26 | setCx86(CX86_CCR3, ccr3 ^ 0x80); | 24 | setCx86(CX86_CCR3, ccr3 ^ 0x80); |
27 | getCx86(0xc0); /* dummy to change bus */ | 25 | getCx86(0xc0); /* dummy to change bus */ |
@@ -44,9 +42,16 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | |||
44 | *dir0 = getCx86(CX86_DIR0); | 42 | *dir0 = getCx86(CX86_DIR0); |
45 | *dir1 = getCx86(CX86_DIR1); | 43 | *dir1 = getCx86(CX86_DIR1); |
46 | } | 44 | } |
47 | local_irq_restore(flags); | ||
48 | } | 45 | } |
49 | 46 | ||
47 | static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | ||
48 | { | ||
49 | unsigned long flags; | ||
50 | |||
51 | local_irq_save(flags); | ||
52 | __do_cyrix_devid(dir0, dir1); | ||
53 | local_irq_restore(flags); | ||
54 | } | ||
50 | /* | 55 | /* |
51 | * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in | 56 | * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in |
52 | * order to identify the Cyrix CPU model after we're out of setup.c | 57 | * order to identify the Cyrix CPU model after we're out of setup.c |
@@ -161,6 +166,24 @@ static void __cpuinit geode_configure(void) | |||
161 | local_irq_restore(flags); | 166 | local_irq_restore(flags); |
162 | } | 167 | } |
163 | 168 | ||
169 | static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c) | ||
170 | { | ||
171 | unsigned char dir0, dir0_msn, dir1 = 0; | ||
172 | |||
173 | __do_cyrix_devid(&dir0, &dir1); | ||
174 | dir0_msn = dir0 >> 4; /* identifies CPU "family" */ | ||
175 | |||
176 | switch (dir0_msn) { | ||
177 | case 3: /* 6x86/6x86L */ | ||
178 | /* Emulate MTRRs using Cyrix's ARRs. */ | ||
179 | set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); | ||
180 | break; | ||
181 | case 5: /* 6x86MX/M II */ | ||
182 | /* Emulate MTRRs using Cyrix's ARRs. */ | ||
183 | set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); | ||
184 | break; | ||
185 | } | ||
186 | } | ||
164 | 187 | ||
165 | static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | 188 | static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) |
166 | { | 189 | { |
@@ -416,6 +439,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
416 | static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { | 439 | static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { |
417 | .c_vendor = "Cyrix", | 440 | .c_vendor = "Cyrix", |
418 | .c_ident = { "CyrixInstead" }, | 441 | .c_ident = { "CyrixInstead" }, |
442 | .c_early_init = early_init_cyrix, | ||
419 | .c_init = init_cyrix, | 443 | .c_init = init_cyrix, |
420 | .c_identify = cyrix_identify, | 444 | .c_identify = cyrix_identify, |
421 | }; | 445 | }; |
diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c index e43ad4ad4cba..c9017799497c 100644 --- a/arch/x86/kernel/cpu/feature_names.c +++ b/arch/x86/kernel/cpu/feature_names.c | |||
@@ -39,7 +39,8 @@ const char * const x86_cap_flags[NCAPINTS*32] = { | |||
39 | NULL, NULL, NULL, NULL, | 39 | NULL, NULL, NULL, NULL, |
40 | "constant_tsc", "up", NULL, "arch_perfmon", | 40 | "constant_tsc", "up", NULL, "arch_perfmon", |
41 | "pebs", "bts", NULL, NULL, | 41 | "pebs", "bts", NULL, NULL, |
42 | "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 42 | "rep_good", NULL, NULL, NULL, |
43 | "nopl", NULL, NULL, NULL, | ||
43 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 44 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
44 | 45 | ||
45 | /* Intel-defined (#2) */ | 46 | /* Intel-defined (#2) */ |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 9af89078f7bb..66e48aa2dd1b 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -1203,7 +1203,7 @@ static int __init parse_memmap_opt(char *p) | |||
1203 | if (!p) | 1203 | if (!p) |
1204 | return -EINVAL; | 1204 | return -EINVAL; |
1205 | 1205 | ||
1206 | if (!strcmp(p, "exactmap")) { | 1206 | if (!strncmp(p, "exactmap", 8)) { |
1207 | #ifdef CONFIG_CRASH_DUMP | 1207 | #ifdef CONFIG_CRASH_DUMP |
1208 | /* | 1208 | /* |
1209 | * If we are doing a crash dump, we still need to know | 1209 | * If we are doing a crash dump, we still need to know |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 59fd3b6b1303..73deaffadd03 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -210,8 +210,8 @@ static void hpet_legacy_clockevent_register(void) | |||
210 | /* Calculate the min / max delta */ | 210 | /* Calculate the min / max delta */ |
211 | hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, | 211 | hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, |
212 | &hpet_clockevent); | 212 | &hpet_clockevent); |
213 | hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30, | 213 | /* 5 usec minimum reprogramming delta. */ |
214 | &hpet_clockevent); | 214 | hpet_clockevent.min_delta_ns = 5000; |
215 | 215 | ||
216 | /* | 216 | /* |
217 | * Start hpet with the boot cpu mask and make it | 217 | * Start hpet with the boot cpu mask and make it |
@@ -270,15 +270,22 @@ static void hpet_legacy_set_mode(enum clock_event_mode mode, | |||
270 | } | 270 | } |
271 | 271 | ||
272 | static int hpet_legacy_next_event(unsigned long delta, | 272 | static int hpet_legacy_next_event(unsigned long delta, |
273 | struct clock_event_device *evt) | 273 | struct clock_event_device *evt) |
274 | { | 274 | { |
275 | unsigned long cnt; | 275 | u32 cnt; |
276 | 276 | ||
277 | cnt = hpet_readl(HPET_COUNTER); | 277 | cnt = hpet_readl(HPET_COUNTER); |
278 | cnt += delta; | 278 | cnt += (u32) delta; |
279 | hpet_writel(cnt, HPET_T0_CMP); | 279 | hpet_writel(cnt, HPET_T0_CMP); |
280 | 280 | ||
281 | return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0) ? -ETIME : 0; | 281 | /* |
282 | * We need to read back the CMP register to make sure that | ||
283 | * what we wrote hit the chip before we compare it to the | ||
284 | * counter. | ||
285 | */ | ||
286 | WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt); | ||
287 | |||
288 | return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; | ||
282 | } | 289 | } |
283 | 290 | ||
284 | /* | 291 | /* |
diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c index 1c3a66a67f83..720d2607aacb 100644 --- a/arch/x86/kernel/io_delay.c +++ b/arch/x86/kernel/io_delay.c | |||
@@ -92,6 +92,14 @@ static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = { | |||
92 | DMI_MATCH(DMI_BOARD_NAME, "30BF") | 92 | DMI_MATCH(DMI_BOARD_NAME, "30BF") |
93 | } | 93 | } |
94 | }, | 94 | }, |
95 | { | ||
96 | .callback = dmi_io_delay_0xed_port, | ||
97 | .ident = "Presario F700", | ||
98 | .matches = { | ||
99 | DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), | ||
100 | DMI_MATCH(DMI_BOARD_NAME, "30D3") | ||
101 | } | ||
102 | }, | ||
95 | { } | 103 | { } |
96 | }; | 104 | }; |
97 | 105 | ||
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ac79bd143da8..8f98e9de1b82 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -122,15 +122,75 @@ static u64 tsc_read_refs(u64 *pm, u64 *hpet) | |||
122 | return ULLONG_MAX; | 122 | return ULLONG_MAX; |
123 | } | 123 | } |
124 | 124 | ||
125 | /* | ||
126 | * Try to calibrate the TSC against the Programmable | ||
127 | * Interrupt Timer and return the frequency of the TSC | ||
128 | * in kHz. | ||
129 | * | ||
130 | * Return ULONG_MAX on failure to calibrate. | ||
131 | */ | ||
132 | static unsigned long pit_calibrate_tsc(void) | ||
133 | { | ||
134 | u64 tsc, t1, t2, delta; | ||
135 | unsigned long tscmin, tscmax; | ||
136 | int pitcnt; | ||
137 | |||
138 | /* Set the Gate high, disable speaker */ | ||
139 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | ||
140 | |||
141 | /* | ||
142 | * Setup CTC channel 2* for mode 0, (interrupt on terminal | ||
143 | * count mode), binary count. Set the latch register to 50ms | ||
144 | * (LSB then MSB) to begin countdown. | ||
145 | */ | ||
146 | outb(0xb0, 0x43); | ||
147 | outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); | ||
148 | outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); | ||
149 | |||
150 | tsc = t1 = t2 = get_cycles(); | ||
151 | |||
152 | pitcnt = 0; | ||
153 | tscmax = 0; | ||
154 | tscmin = ULONG_MAX; | ||
155 | while ((inb(0x61) & 0x20) == 0) { | ||
156 | t2 = get_cycles(); | ||
157 | delta = t2 - tsc; | ||
158 | tsc = t2; | ||
159 | if ((unsigned long) delta < tscmin) | ||
160 | tscmin = (unsigned int) delta; | ||
161 | if ((unsigned long) delta > tscmax) | ||
162 | tscmax = (unsigned int) delta; | ||
163 | pitcnt++; | ||
164 | } | ||
165 | |||
166 | /* | ||
167 | * Sanity checks: | ||
168 | * | ||
169 | * If we were not able to read the PIT more than 5000 | ||
170 | * times, then we have been hit by a massive SMI | ||
171 | * | ||
172 | * If the maximum is 10 times larger than the minimum, | ||
173 | * then we got hit by an SMI as well. | ||
174 | */ | ||
175 | if (pitcnt < 5000 || tscmax > 10 * tscmin) | ||
176 | return ULONG_MAX; | ||
177 | |||
178 | /* Calculate the PIT value */ | ||
179 | delta = t2 - t1; | ||
180 | do_div(delta, 50); | ||
181 | return delta; | ||
182 | } | ||
183 | |||
184 | |||
125 | /** | 185 | /** |
126 | * native_calibrate_tsc - calibrate the tsc on boot | 186 | * native_calibrate_tsc - calibrate the tsc on boot |
127 | */ | 187 | */ |
128 | unsigned long native_calibrate_tsc(void) | 188 | unsigned long native_calibrate_tsc(void) |
129 | { | 189 | { |
130 | u64 tsc1, tsc2, tr1, tr2, tsc, delta, pm1, pm2, hpet1, hpet2; | 190 | u64 tsc1, tsc2, delta, pm1, pm2, hpet1, hpet2; |
131 | unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; | 191 | unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; |
132 | unsigned long flags, tscmin, tscmax; | 192 | unsigned long flags; |
133 | int hpet = is_hpet_enabled(), pitcnt, i; | 193 | int hpet = is_hpet_enabled(), i; |
134 | 194 | ||
135 | /* | 195 | /* |
136 | * Run 5 calibration loops to get the lowest frequency value | 196 | * Run 5 calibration loops to get the lowest frequency value |
@@ -157,72 +217,22 @@ unsigned long native_calibrate_tsc(void) | |||
157 | * amount of time anyway. | 217 | * amount of time anyway. |
158 | */ | 218 | */ |
159 | for (i = 0; i < 5; i++) { | 219 | for (i = 0; i < 5; i++) { |
160 | 220 | unsigned long tsc_pit_khz; | |
161 | tscmin = ULONG_MAX; | ||
162 | tscmax = 0; | ||
163 | pitcnt = 0; | ||
164 | |||
165 | local_irq_save(flags); | ||
166 | 221 | ||
167 | /* | 222 | /* |
168 | * Read the start value and the reference count of | 223 | * Read the start value and the reference count of |
169 | * hpet/pmtimer when available: | 224 | * hpet/pmtimer when available. Then do the PIT |
225 | * calibration, which will take at least 50ms, and | ||
226 | * read the end value. | ||
170 | */ | 227 | */ |
228 | local_irq_save(flags); | ||
171 | tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL); | 229 | tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL); |
172 | 230 | tsc_pit_khz = pit_calibrate_tsc(); | |
173 | /* Set the Gate high, disable speaker */ | ||
174 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | ||
175 | |||
176 | /* | ||
177 | * Setup CTC channel 2* for mode 0, (interrupt on terminal | ||
178 | * count mode), binary count. Set the latch register to 50ms | ||
179 | * (LSB then MSB) to begin countdown. | ||
180 | * | ||
181 | * Some devices need a delay here. | ||
182 | */ | ||
183 | outb(0xb0, 0x43); | ||
184 | outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); | ||
185 | outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); | ||
186 | |||
187 | tsc = tr1 = tr2 = get_cycles(); | ||
188 | |||
189 | while ((inb(0x61) & 0x20) == 0) { | ||
190 | tr2 = get_cycles(); | ||
191 | delta = tr2 - tsc; | ||
192 | tsc = tr2; | ||
193 | if ((unsigned int) delta < tscmin) | ||
194 | tscmin = (unsigned int) delta; | ||
195 | if ((unsigned int) delta > tscmax) | ||
196 | tscmax = (unsigned int) delta; | ||
197 | pitcnt++; | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * We waited at least 50ms above. Now read | ||
202 | * pmtimer/hpet reference again | ||
203 | */ | ||
204 | tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); | 231 | tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); |
205 | |||
206 | local_irq_restore(flags); | 232 | local_irq_restore(flags); |
207 | 233 | ||
208 | /* | 234 | /* Pick the lowest PIT TSC calibration so far */ |
209 | * Sanity checks: | 235 | tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); |
210 | * | ||
211 | * If we were not able to read the PIT more than 5000 | ||
212 | * times, then we have been hit by a massive SMI | ||
213 | * | ||
214 | * If the maximum is 10 times larger than the minimum, | ||
215 | * then we got hit by an SMI as well. | ||
216 | */ | ||
217 | if (pitcnt > 5000 && tscmax < 10 * tscmin) { | ||
218 | |||
219 | /* Calculate the PIT value */ | ||
220 | delta = tr2 - tr1; | ||
221 | do_div(delta, 50); | ||
222 | |||
223 | /* We take the smallest value into account */ | ||
224 | tsc_pit_min = min(tsc_pit_min, (unsigned long) delta); | ||
225 | } | ||
226 | 236 | ||
227 | /* hpet or pmtimer available ? */ | 237 | /* hpet or pmtimer available ? */ |
228 | if (!hpet && !pm1 && !pm2) | 238 | if (!hpet && !pm1 && !pm2) |
@@ -257,8 +267,7 @@ unsigned long native_calibrate_tsc(void) | |||
257 | */ | 267 | */ |
258 | if (tsc_pit_min == ULONG_MAX) { | 268 | if (tsc_pit_min == ULONG_MAX) { |
259 | /* PIT gave no useful value */ | 269 | /* PIT gave no useful value */ |
260 | printk(KERN_WARNING "TSC: PIT calibration failed due to " | 270 | printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n"); |
261 | "SMI disturbance.\n"); | ||
262 | 271 | ||
263 | /* We don't have an alternative source, disable TSC */ | 272 | /* We don't have an alternative source, disable TSC */ |
264 | if (!hpet && !pm1 && !pm2) { | 273 | if (!hpet && !pm1 && !pm2) { |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 9ff6e3cbf08f..a4e201b47f64 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1324,7 +1324,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
1324 | .ptep_modify_prot_commit = __ptep_modify_prot_commit, | 1324 | .ptep_modify_prot_commit = __ptep_modify_prot_commit, |
1325 | 1325 | ||
1326 | .pte_val = xen_pte_val, | 1326 | .pte_val = xen_pte_val, |
1327 | .pte_flags = native_pte_val, | 1327 | .pte_flags = native_pte_flags, |
1328 | .pgd_val = xen_pgd_val, | 1328 | .pgd_val = xen_pgd_val, |
1329 | 1329 | ||
1330 | .make_pte = xen_make_pte, | 1330 | .make_pte = xen_make_pte, |