diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-06 13:02:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-06 13:02:36 -0400 |
commit | 0f477dd0851bdcee82923da66a7fc4a44cb1bc3d (patch) | |
tree | cf8937ba8c7800c6143aa3c9758c6e184c1289ec /arch/x86/kernel | |
parent | c4efd6b569b2646e1346a08a4c40286f8bcb5f11 (diff) | |
parent | e8c534ec068af1a0845aceda373a9bfd2de62030 (diff) |
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86: Fix keeping track of AMD C1E
x86, cpu: Package Level Thermal Control, Power Limit Notification definitions
x86, cpu: Export AMD errata definitions
x86, cpu: Use AMD errata checking framework for erratum 383
x86, cpu: Clean up AMD erratum 400 workaround
x86, cpu: AMD errata checking framework
x86, cpu: Split addon_cpuid_features.c
x86, cpu: Clean up formatting in cpufeature.h, remove override
x86, cpu: Enumerate xsaveopt
x86, cpu: Add xsaveopt cpufeature
x86, cpu: Make init_scattered_cpuid_features() consider cpuid subleaves
x86, cpu: Support the features flags in new CPUID leaf 7
x86, cpu: Add CPU flags for F16C and RDRND
x86: Look for IA32_ENERGY_PERF_BIAS support
x86, AMD: Extend support to future families
x86, cacheinfo: Carve out L3 cache slot accessors
x86, xsave: Cleanup return codes in check_for_xstate()
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 77 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 108 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/scattered.c | 63 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/topology.c (renamed from arch/x86/kernel/cpu/addon_cpuid_features.c) | 58 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 45 | ||||
-rw-r--r-- | arch/x86/kernel/xsave.c | 13 |
8 files changed, 248 insertions, 128 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 3a785da34b6f..5e3a3512ba05 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -12,7 +12,7 @@ endif | |||
12 | nostackp := $(call cc-option, -fno-stack-protector) | 12 | nostackp := $(call cc-option, -fno-stack-protector) |
13 | CFLAGS_common.o := $(nostackp) | 13 | CFLAGS_common.o := $(nostackp) |
14 | 14 | ||
15 | obj-y := intel_cacheinfo.o addon_cpuid_features.o | 15 | obj-y := intel_cacheinfo.o scattered.o topology.o |
16 | obj-y += proc.o capflags.o powerflags.o common.o | 16 | obj-y += proc.o capflags.o powerflags.o common.o |
17 | obj-y += vmware.o hypervisor.o sched.o mshyperv.o | 17 | obj-y += vmware.o hypervisor.o sched.o mshyperv.o |
18 | 18 | ||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index e485825130d2..60a57b13082d 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -466,7 +466,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
466 | } | 466 | } |
467 | 467 | ||
468 | } | 468 | } |
469 | if (c->x86 == 0x10 || c->x86 == 0x11) | 469 | if (c->x86 >= 0x10) |
470 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 470 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
471 | 471 | ||
472 | /* get apicid instead of initial apic id from cpuid */ | 472 | /* get apicid instead of initial apic id from cpuid */ |
@@ -529,7 +529,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
529 | num_cache_leaves = 3; | 529 | num_cache_leaves = 3; |
530 | } | 530 | } |
531 | 531 | ||
532 | if (c->x86 >= 0xf && c->x86 <= 0x11) | 532 | if (c->x86 >= 0xf) |
533 | set_cpu_cap(c, X86_FEATURE_K8); | 533 | set_cpu_cap(c, X86_FEATURE_K8); |
534 | 534 | ||
535 | if (cpu_has_xmm2) { | 535 | if (cpu_has_xmm2) { |
@@ -546,7 +546,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
546 | fam10h_check_enable_mmcfg(); | 546 | fam10h_check_enable_mmcfg(); |
547 | } | 547 | } |
548 | 548 | ||
549 | if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { | 549 | if (c == &boot_cpu_data && c->x86 >= 0xf) { |
550 | unsigned long long tseg; | 550 | unsigned long long tseg; |
551 | 551 | ||
552 | /* | 552 | /* |
@@ -609,3 +609,74 @@ static const struct cpu_dev __cpuinitconst amd_cpu_dev = { | |||
609 | }; | 609 | }; |
610 | 610 | ||
611 | cpu_dev_register(amd_cpu_dev); | 611 | cpu_dev_register(amd_cpu_dev); |
612 | |||
613 | /* | ||
614 | * AMD errata checking | ||
615 | * | ||
616 | * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or | ||
617 | * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that | ||
618 | * have an OSVW id assigned, which it takes as first argument. Both take a | ||
619 | * variable number of family-specific model-stepping ranges created by | ||
620 | * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const | ||
621 | * int[] in arch/x86/include/asm/processor.h. | ||
622 | * | ||
623 | * Example: | ||
624 | * | ||
625 | * const int amd_erratum_319[] = | ||
626 | * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), | ||
627 | * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), | ||
628 | * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); | ||
629 | */ | ||
630 | |||
631 | const int amd_erratum_400[] = | ||
632 | AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), | ||
633 | AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); | ||
634 | EXPORT_SYMBOL_GPL(amd_erratum_400); | ||
635 | |||
636 | const int amd_erratum_383[] = | ||
637 | AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); | ||
638 | EXPORT_SYMBOL_GPL(amd_erratum_383); | ||
639 | |||
640 | bool cpu_has_amd_erratum(const int *erratum) | ||
641 | { | ||
642 | struct cpuinfo_x86 *cpu = ¤t_cpu_data; | ||
643 | int osvw_id = *erratum++; | ||
644 | u32 range; | ||
645 | u32 ms; | ||
646 | |||
647 | /* | ||
648 | * If called early enough that current_cpu_data hasn't been initialized | ||
649 | * yet, fall back to boot_cpu_data. | ||
650 | */ | ||
651 | if (cpu->x86 == 0) | ||
652 | cpu = &boot_cpu_data; | ||
653 | |||
654 | if (cpu->x86_vendor != X86_VENDOR_AMD) | ||
655 | return false; | ||
656 | |||
657 | if (osvw_id >= 0 && osvw_id < 65536 && | ||
658 | cpu_has(cpu, X86_FEATURE_OSVW)) { | ||
659 | u64 osvw_len; | ||
660 | |||
661 | rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); | ||
662 | if (osvw_id < osvw_len) { | ||
663 | u64 osvw_bits; | ||
664 | |||
665 | rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), | ||
666 | osvw_bits); | ||
667 | return osvw_bits & (1ULL << (osvw_id & 0x3f)); | ||
668 | } | ||
669 | } | ||
670 | |||
671 | /* OSVW unavailable or ID unknown, match family-model-stepping range */ | ||
672 | ms = (cpu->x86_model << 8) | cpu->x86_mask; | ||
673 | while ((range = *erratum++)) | ||
674 | if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && | ||
675 | (ms >= AMD_MODEL_RANGE_START(range)) && | ||
676 | (ms <= AMD_MODEL_RANGE_END(range))) | ||
677 | return true; | ||
678 | |||
679 | return false; | ||
680 | } | ||
681 | |||
682 | EXPORT_SYMBOL_GPL(cpu_has_amd_erratum); | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 68e4a6f2211e..c7358303d8cd 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -551,6 +551,16 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
551 | c->x86_capability[4] = excap; | 551 | c->x86_capability[4] = excap; |
552 | } | 552 | } |
553 | 553 | ||
554 | /* Additional Intel-defined flags: level 0x00000007 */ | ||
555 | if (c->cpuid_level >= 0x00000007) { | ||
556 | u32 eax, ebx, ecx, edx; | ||
557 | |||
558 | cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); | ||
559 | |||
560 | if (eax > 0) | ||
561 | c->x86_capability[9] = ebx; | ||
562 | } | ||
563 | |||
554 | /* AMD-defined flags: level 0x80000001 */ | 564 | /* AMD-defined flags: level 0x80000001 */ |
555 | xlvl = cpuid_eax(0x80000000); | 565 | xlvl = cpuid_eax(0x80000000); |
556 | c->extended_cpuid_level = xlvl; | 566 | c->extended_cpuid_level = xlvl; |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 33eae2062cf5..898c2f4eab88 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -347,8 +347,8 @@ static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) | |||
347 | return l3; | 347 | return l3; |
348 | } | 348 | } |
349 | 349 | ||
350 | static void __cpuinit | 350 | static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, |
351 | amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) | 351 | int index) |
352 | { | 352 | { |
353 | int node; | 353 | int node; |
354 | 354 | ||
@@ -396,20 +396,39 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) | |||
396 | this_leaf->l3 = l3_caches[node]; | 396 | this_leaf->l3 = l3_caches[node]; |
397 | } | 397 | } |
398 | 398 | ||
399 | /* | ||
400 | * check whether a slot used for disabling an L3 index is occupied. | ||
401 | * @l3: L3 cache descriptor | ||
402 | * @slot: slot number (0..1) | ||
403 | * | ||
404 | * @returns: the disabled index if used or negative value if slot free. | ||
405 | */ | ||
406 | int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot) | ||
407 | { | ||
408 | unsigned int reg = 0; | ||
409 | |||
410 | pci_read_config_dword(l3->dev, 0x1BC + slot * 4, ®); | ||
411 | |||
412 | /* check whether this slot is activated already */ | ||
413 | if (reg & (3UL << 30)) | ||
414 | return reg & 0xfff; | ||
415 | |||
416 | return -1; | ||
417 | } | ||
418 | |||
399 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | 419 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, |
400 | unsigned int slot) | 420 | unsigned int slot) |
401 | { | 421 | { |
402 | struct pci_dev *dev = this_leaf->l3->dev; | 422 | int index; |
403 | unsigned int reg = 0; | ||
404 | 423 | ||
405 | if (!this_leaf->l3 || !this_leaf->l3->can_disable) | 424 | if (!this_leaf->l3 || !this_leaf->l3->can_disable) |
406 | return -EINVAL; | 425 | return -EINVAL; |
407 | 426 | ||
408 | if (!dev) | 427 | index = amd_get_l3_disable_slot(this_leaf->l3, slot); |
409 | return -EINVAL; | 428 | if (index >= 0) |
429 | return sprintf(buf, "%d\n", index); | ||
410 | 430 | ||
411 | pci_read_config_dword(dev, 0x1BC + slot * 4, ®); | 431 | return sprintf(buf, "FREE\n"); |
412 | return sprintf(buf, "0x%08x\n", reg); | ||
413 | } | 432 | } |
414 | 433 | ||
415 | #define SHOW_CACHE_DISABLE(slot) \ | 434 | #define SHOW_CACHE_DISABLE(slot) \ |
@@ -451,37 +470,74 @@ static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu, | |||
451 | } | 470 | } |
452 | } | 471 | } |
453 | 472 | ||
454 | 473 | /* | |
455 | static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | 474 | * disable a L3 cache index by using a disable-slot |
456 | const char *buf, size_t count, | 475 | * |
457 | unsigned int slot) | 476 | * @l3: L3 cache descriptor |
477 | * @cpu: A CPU on the node containing the L3 cache | ||
478 | * @slot: slot number (0..1) | ||
479 | * @index: index to disable | ||
480 | * | ||
481 | * @return: 0 on success, error status on failure | ||
482 | */ | ||
483 | int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot, | ||
484 | unsigned long index) | ||
458 | { | 485 | { |
459 | struct pci_dev *dev = this_leaf->l3->dev; | 486 | int ret = 0; |
460 | int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); | ||
461 | unsigned long val = 0; | ||
462 | 487 | ||
463 | #define SUBCACHE_MASK (3UL << 20) | 488 | #define SUBCACHE_MASK (3UL << 20) |
464 | #define SUBCACHE_INDEX 0xfff | 489 | #define SUBCACHE_INDEX 0xfff |
465 | 490 | ||
466 | if (!this_leaf->l3 || !this_leaf->l3->can_disable) | 491 | /* |
492 | * check whether this slot is already used or | ||
493 | * the index is already disabled | ||
494 | */ | ||
495 | ret = amd_get_l3_disable_slot(l3, slot); | ||
496 | if (ret >= 0) | ||
467 | return -EINVAL; | 497 | return -EINVAL; |
468 | 498 | ||
499 | /* | ||
500 | * check whether the other slot has disabled the | ||
501 | * same index already | ||
502 | */ | ||
503 | if (index == amd_get_l3_disable_slot(l3, !slot)) | ||
504 | return -EINVAL; | ||
505 | |||
506 | /* do not allow writes outside of allowed bits */ | ||
507 | if ((index & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) || | ||
508 | ((index & SUBCACHE_INDEX) > l3->indices)) | ||
509 | return -EINVAL; | ||
510 | |||
511 | amd_l3_disable_index(l3, cpu, slot, index); | ||
512 | |||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | ||
517 | const char *buf, size_t count, | ||
518 | unsigned int slot) | ||
519 | { | ||
520 | unsigned long val = 0; | ||
521 | int cpu, err = 0; | ||
522 | |||
469 | if (!capable(CAP_SYS_ADMIN)) | 523 | if (!capable(CAP_SYS_ADMIN)) |
470 | return -EPERM; | 524 | return -EPERM; |
471 | 525 | ||
472 | if (!dev) | 526 | if (!this_leaf->l3 || !this_leaf->l3->can_disable) |
473 | return -EINVAL; | 527 | return -EINVAL; |
474 | 528 | ||
475 | if (strict_strtoul(buf, 10, &val) < 0) | 529 | cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); |
476 | return -EINVAL; | ||
477 | 530 | ||
478 | /* do not allow writes outside of allowed bits */ | 531 | if (strict_strtoul(buf, 10, &val) < 0) |
479 | if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) || | ||
480 | ((val & SUBCACHE_INDEX) > this_leaf->l3->indices)) | ||
481 | return -EINVAL; | 532 | return -EINVAL; |
482 | 533 | ||
483 | amd_l3_disable_index(this_leaf->l3, cpu, slot, val); | 534 | err = amd_set_l3_disable_slot(this_leaf->l3, cpu, slot, val); |
484 | 535 | if (err) { | |
536 | if (err == -EEXIST) | ||
537 | printk(KERN_WARNING "L3 disable slot %d in use!\n", | ||
538 | slot); | ||
539 | return err; | ||
540 | } | ||
485 | return count; | 541 | return count; |
486 | } | 542 | } |
487 | 543 | ||
@@ -502,7 +558,7 @@ static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | |||
502 | 558 | ||
503 | #else /* CONFIG_CPU_SUP_AMD */ | 559 | #else /* CONFIG_CPU_SUP_AMD */ |
504 | static void __cpuinit | 560 | static void __cpuinit |
505 | amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) | 561 | amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index) |
506 | { | 562 | { |
507 | }; | 563 | }; |
508 | #endif /* CONFIG_CPU_SUP_AMD */ | 564 | #endif /* CONFIG_CPU_SUP_AMD */ |
@@ -518,7 +574,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index, | |||
518 | 574 | ||
519 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { | 575 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { |
520 | amd_cpuid4(index, &eax, &ebx, &ecx); | 576 | amd_cpuid4(index, &eax, &ebx, &ecx); |
521 | amd_check_l3_disable(index, this_leaf); | 577 | amd_check_l3_disable(this_leaf, index); |
522 | } else { | 578 | } else { |
523 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); | 579 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); |
524 | } | 580 | } |
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c new file mode 100644 index 000000000000..34b4dad6f0b8 --- /dev/null +++ b/arch/x86/kernel/cpu/scattered.c | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * Routines to indentify additional cpu features that are scattered in | ||
3 | * cpuid space. | ||
4 | */ | ||
5 | #include <linux/cpu.h> | ||
6 | |||
7 | #include <asm/pat.h> | ||
8 | #include <asm/processor.h> | ||
9 | |||
10 | #include <asm/apic.h> | ||
11 | |||
12 | struct cpuid_bit { | ||
13 | u16 feature; | ||
14 | u8 reg; | ||
15 | u8 bit; | ||
16 | u32 level; | ||
17 | u32 sub_leaf; | ||
18 | }; | ||
19 | |||
20 | enum cpuid_regs { | ||
21 | CR_EAX = 0, | ||
22 | CR_ECX, | ||
23 | CR_EDX, | ||
24 | CR_EBX | ||
25 | }; | ||
26 | |||
27 | void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | ||
28 | { | ||
29 | u32 max_level; | ||
30 | u32 regs[4]; | ||
31 | const struct cpuid_bit *cb; | ||
32 | |||
33 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { | ||
34 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, | ||
35 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, | ||
36 | { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, | ||
37 | { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, | ||
38 | { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, | ||
39 | { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, | ||
40 | { X86_FEATURE_XSAVEOPT, CR_EAX, 0, 0x0000000d, 1 }, | ||
41 | { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 }, | ||
42 | { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 }, | ||
43 | { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 }, | ||
44 | { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 }, | ||
45 | { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 }, | ||
46 | { 0, 0, 0, 0, 0 } | ||
47 | }; | ||
48 | |||
49 | for (cb = cpuid_bits; cb->feature; cb++) { | ||
50 | |||
51 | /* Verify that the level is valid */ | ||
52 | max_level = cpuid_eax(cb->level & 0xffff0000); | ||
53 | if (max_level < cb->level || | ||
54 | max_level > (cb->level | 0xffff)) | ||
55 | continue; | ||
56 | |||
57 | cpuid_count(cb->level, cb->sub_leaf, ®s[CR_EAX], | ||
58 | ®s[CR_EBX], ®s[CR_ECX], ®s[CR_EDX]); | ||
59 | |||
60 | if (regs[cb->reg] & (1 << cb->bit)) | ||
61 | set_cpu_cap(c, cb->feature); | ||
62 | } | ||
63 | } | ||
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/topology.c index 10fa5684a662..4397e987a1cf 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/topology.c | |||
@@ -1,62 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * Routines to indentify additional cpu features that are scattered in | 2 | * Check for extended topology enumeration cpuid leaf 0xb and if it |
3 | * cpuid space. | 3 | * exists, use it for populating initial_apicid and cpu topology |
4 | * detection. | ||
4 | */ | 5 | */ |
5 | #include <linux/cpu.h> | ||
6 | 6 | ||
7 | #include <linux/cpu.h> | ||
8 | #include <asm/apic.h> | ||
7 | #include <asm/pat.h> | 9 | #include <asm/pat.h> |
8 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
9 | 11 | ||
10 | #include <asm/apic.h> | ||
11 | |||
12 | struct cpuid_bit { | ||
13 | u16 feature; | ||
14 | u8 reg; | ||
15 | u8 bit; | ||
16 | u32 level; | ||
17 | }; | ||
18 | |||
19 | enum cpuid_regs { | ||
20 | CR_EAX = 0, | ||
21 | CR_ECX, | ||
22 | CR_EDX, | ||
23 | CR_EBX | ||
24 | }; | ||
25 | |||
26 | void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | ||
27 | { | ||
28 | u32 max_level; | ||
29 | u32 regs[4]; | ||
30 | const struct cpuid_bit *cb; | ||
31 | |||
32 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { | ||
33 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, | ||
34 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 }, | ||
35 | { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006 }, | ||
36 | { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007 }, | ||
37 | { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a }, | ||
38 | { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a }, | ||
39 | { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a }, | ||
40 | { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a }, | ||
41 | { 0, 0, 0, 0 } | ||
42 | }; | ||
43 | |||
44 | for (cb = cpuid_bits; cb->feature; cb++) { | ||
45 | |||
46 | /* Verify that the level is valid */ | ||
47 | max_level = cpuid_eax(cb->level & 0xffff0000); | ||
48 | if (max_level < cb->level || | ||
49 | max_level > (cb->level | 0xffff)) | ||
50 | continue; | ||
51 | |||
52 | cpuid(cb->level, ®s[CR_EAX], ®s[CR_EBX], | ||
53 | ®s[CR_ECX], ®s[CR_EDX]); | ||
54 | |||
55 | if (regs[cb->reg] & (1 << cb->bit)) | ||
56 | set_cpu_cap(c, cb->feature); | ||
57 | } | ||
58 | } | ||
59 | |||
60 | /* leaf 0xb SMT level */ | 12 | /* leaf 0xb SMT level */ |
61 | #define SMT_LEVEL 0 | 13 | #define SMT_LEVEL 0 |
62 | 14 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index cbcf013a0ec6..d401f1d2d06e 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -526,44 +526,10 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | |||
526 | return (edx & MWAIT_EDX_C1); | 526 | return (edx & MWAIT_EDX_C1); |
527 | } | 527 | } |
528 | 528 | ||
529 | /* | 529 | bool c1e_detected; |
530 | * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e. | 530 | EXPORT_SYMBOL(c1e_detected); |
531 | * For more information see | ||
532 | * - Erratum #400 for NPT family 0xf and family 0x10 CPUs | ||
533 | * - Erratum #365 for family 0x11 (not affected because C1e not in use) | ||
534 | */ | ||
535 | static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | ||
536 | { | ||
537 | u64 val; | ||
538 | if (c->x86_vendor != X86_VENDOR_AMD) | ||
539 | goto no_c1e_idle; | ||
540 | |||
541 | /* Family 0x0f models < rev F do not have C1E */ | ||
542 | if (c->x86 == 0x0F && c->x86_model >= 0x40) | ||
543 | return 1; | ||
544 | |||
545 | if (c->x86 == 0x10) { | ||
546 | /* | ||
547 | * check OSVW bit for CPUs that are not affected | ||
548 | * by erratum #400 | ||
549 | */ | ||
550 | if (cpu_has(c, X86_FEATURE_OSVW)) { | ||
551 | rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val); | ||
552 | if (val >= 2) { | ||
553 | rdmsrl(MSR_AMD64_OSVW_STATUS, val); | ||
554 | if (!(val & BIT(1))) | ||
555 | goto no_c1e_idle; | ||
556 | } | ||
557 | } | ||
558 | return 1; | ||
559 | } | ||
560 | |||
561 | no_c1e_idle: | ||
562 | return 0; | ||
563 | } | ||
564 | 531 | ||
565 | static cpumask_var_t c1e_mask; | 532 | static cpumask_var_t c1e_mask; |
566 | static int c1e_detected; | ||
567 | 533 | ||
568 | void c1e_remove_cpu(int cpu) | 534 | void c1e_remove_cpu(int cpu) |
569 | { | 535 | { |
@@ -585,12 +551,12 @@ static void c1e_idle(void) | |||
585 | u32 lo, hi; | 551 | u32 lo, hi; |
586 | 552 | ||
587 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); | 553 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); |
554 | |||
588 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { | 555 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { |
589 | c1e_detected = 1; | 556 | c1e_detected = true; |
590 | if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | 557 | if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) |
591 | mark_tsc_unstable("TSC halt in AMD C1E"); | 558 | mark_tsc_unstable("TSC halt in AMD C1E"); |
592 | printk(KERN_INFO "System has AMD C1E enabled\n"); | 559 | printk(KERN_INFO "System has AMD C1E enabled\n"); |
593 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E); | ||
594 | } | 560 | } |
595 | } | 561 | } |
596 | 562 | ||
@@ -639,7 +605,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | |||
639 | */ | 605 | */ |
640 | printk(KERN_INFO "using mwait in idle threads.\n"); | 606 | printk(KERN_INFO "using mwait in idle threads.\n"); |
641 | pm_idle = mwait_idle; | 607 | pm_idle = mwait_idle; |
642 | } else if (check_c1e_idle(c)) { | 608 | } else if (cpu_has_amd_erratum(amd_erratum_400)) { |
609 | /* E400: APIC timer interrupt does not wake up CPU from C1e */ | ||
643 | printk(KERN_INFO "using C1E aware idle routine\n"); | 610 | printk(KERN_INFO "using C1E aware idle routine\n"); |
644 | pm_idle = c1e_idle; | 611 | pm_idle = c1e_idle; |
645 | } else | 612 | } else |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 37e68fc5e24a..980149867a19 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -36,15 +36,14 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf, | |||
36 | 36 | ||
37 | err = __copy_from_user(fx_sw_user, &buf->sw_reserved[0], | 37 | err = __copy_from_user(fx_sw_user, &buf->sw_reserved[0], |
38 | sizeof(struct _fpx_sw_bytes)); | 38 | sizeof(struct _fpx_sw_bytes)); |
39 | |||
40 | if (err) | 39 | if (err) |
41 | return err; | 40 | return -EFAULT; |
42 | 41 | ||
43 | /* | 42 | /* |
44 | * First Magic check failed. | 43 | * First Magic check failed. |
45 | */ | 44 | */ |
46 | if (fx_sw_user->magic1 != FP_XSTATE_MAGIC1) | 45 | if (fx_sw_user->magic1 != FP_XSTATE_MAGIC1) |
47 | return -1; | 46 | return -EINVAL; |
48 | 47 | ||
49 | /* | 48 | /* |
50 | * Check for error scenarios. | 49 | * Check for error scenarios. |
@@ -52,19 +51,21 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf, | |||
52 | if (fx_sw_user->xstate_size < min_xstate_size || | 51 | if (fx_sw_user->xstate_size < min_xstate_size || |
53 | fx_sw_user->xstate_size > xstate_size || | 52 | fx_sw_user->xstate_size > xstate_size || |
54 | fx_sw_user->xstate_size > fx_sw_user->extended_size) | 53 | fx_sw_user->xstate_size > fx_sw_user->extended_size) |
55 | return -1; | 54 | return -EINVAL; |
56 | 55 | ||
57 | err = __get_user(magic2, (__u32 *) (((void *)fpstate) + | 56 | err = __get_user(magic2, (__u32 *) (((void *)fpstate) + |
58 | fx_sw_user->extended_size - | 57 | fx_sw_user->extended_size - |
59 | FP_XSTATE_MAGIC2_SIZE)); | 58 | FP_XSTATE_MAGIC2_SIZE)); |
59 | if (err) | ||
60 | return err; | ||
60 | /* | 61 | /* |
61 | * Check for the presence of second magic word at the end of memory | 62 | * Check for the presence of second magic word at the end of memory |
62 | * layout. This detects the case where the user just copied the legacy | 63 | * layout. This detects the case where the user just copied the legacy |
63 | * fpstate layout with out copying the extended state information | 64 | * fpstate layout with out copying the extended state information |
64 | * in the memory layout. | 65 | * in the memory layout. |
65 | */ | 66 | */ |
66 | if (err || magic2 != FP_XSTATE_MAGIC2) | 67 | if (magic2 != FP_XSTATE_MAGIC2) |
67 | return -1; | 68 | return -EFAULT; |
68 | 69 | ||
69 | return 0; | 70 | return 0; |
70 | } | 71 | } |