aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-21 21:03:39 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-21 21:03:39 -0500
commitcb715a836642e0ec69350670d1c2f800f3e2d2e4 (patch)
treec4055162dee46d32a4318f18f1c7c70bc3f3f970 /arch
parent27ea6dfdc23e5e96e324d6cb3488528c14e4a7f7 (diff)
parent2e32b7190641a184b8510d3e342400473ff1ab60 (diff)
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cpu updates from Peter Anvin: "This is a corrected attempt at the x86/cpu branch, this time with the fixes in that makes it not break on KVM (current or past), or any other virtualizer which traps on this configuration. Again, the biggest change here is enabling the WC+ memory type on AMD processors, if the BIOS doesn't." * 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, kvm: Add MSR_AMD64_BU_CFG2 to the list of ignored MSRs x86, cpu, amd: Fix WC+ workaround for older virtual hosts x86, AMD: Enable WC+ memory type on family 10 processors x86, AMD: Clean up init_amd() x86/process: Change %8s to %s for pr_warn() in release_thread() x86/cpu/hotplug: Remove CONFIG_EXPERIMENTAL dependency
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h1
-rw-r--r--arch/x86/kernel/cpu/amd.c52
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kvm/x86.c16
5 files changed, 43 insertions, 32 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 63c0431daa3a..b44c0b50e569 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1722,7 +1722,7 @@ config HOTPLUG_CPU
1722config BOOTPARAM_HOTPLUG_CPU0 1722config BOOTPARAM_HOTPLUG_CPU0
1723 bool "Set default setting of cpu0_hotpluggable" 1723 bool "Set default setting of cpu0_hotpluggable"
1724 default n 1724 default n
1725 depends on HOTPLUG_CPU && EXPERIMENTAL 1725 depends on HOTPLUG_CPU
1726 ---help--- 1726 ---help---
1727 Set whether default state of cpu0_hotpluggable is on or off. 1727 Set whether default state of cpu0_hotpluggable is on or off.
1728 1728
@@ -1751,7 +1751,7 @@ config BOOTPARAM_HOTPLUG_CPU0
1751config DEBUG_HOTPLUG_CPU0 1751config DEBUG_HOTPLUG_CPU0
1752 def_bool n 1752 def_bool n
1753 prompt "Debug CPU0 hotplug" 1753 prompt "Debug CPU0 hotplug"
1754 depends on HOTPLUG_CPU && EXPERIMENTAL 1754 depends on HOTPLUG_CPU
1755 ---help--- 1755 ---help---
1756 Enabling this option offlines CPU0 (if CPU0 can be offlined) as 1756 Enabling this option offlines CPU0 (if CPU0 can be offlined) as
1757 soon as possible and boots up userspace with CPU0 offlined. User 1757 soon as possible and boots up userspace with CPU0 offlined. User
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index f26d2771846f..892ce40a7470 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -175,6 +175,7 @@
175#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 175#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
176#define MSR_AMD64_OSVW_STATUS 0xc0010141 176#define MSR_AMD64_OSVW_STATUS 0xc0010141
177#define MSR_AMD64_DC_CFG 0xc0011022 177#define MSR_AMD64_DC_CFG 0xc0011022
178#define MSR_AMD64_BU_CFG2 0xc001102a
178#define MSR_AMD64_IBSFETCHCTL 0xc0011030 179#define MSR_AMD64_IBSFETCHCTL 0xc0011030
179#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 180#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
180#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 181#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 782c456eaa01..84bee67141ad 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -518,10 +518,9 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
518static void __cpuinit init_amd(struct cpuinfo_x86 *c) 518static void __cpuinit init_amd(struct cpuinfo_x86 *c)
519{ 519{
520 u32 dummy; 520 u32 dummy;
521
522#ifdef CONFIG_SMP
523 unsigned long long value; 521 unsigned long long value;
524 522
523#ifdef CONFIG_SMP
525 /* 524 /*
526 * Disable TLB flush filter by setting HWCR.FFDIS on K8 525 * Disable TLB flush filter by setting HWCR.FFDIS on K8
527 * bit 6 of msr C001_0015 526 * bit 6 of msr C001_0015
@@ -559,12 +558,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
559 * (AMD Erratum #110, docId: 25759). 558 * (AMD Erratum #110, docId: 25759).
560 */ 559 */
561 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { 560 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
562 u64 val;
563
564 clear_cpu_cap(c, X86_FEATURE_LAHF_LM); 561 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
565 if (!rdmsrl_amd_safe(0xc001100d, &val)) { 562 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
566 val &= ~(1ULL << 32); 563 value &= ~(1ULL << 32);
567 wrmsrl_amd_safe(0xc001100d, val); 564 wrmsrl_amd_safe(0xc001100d, value);
568 } 565 }
569 } 566 }
570 567
@@ -617,13 +614,12 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
617 if ((c->x86 == 0x15) && 614 if ((c->x86 == 0x15) &&
618 (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && 615 (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
619 !cpu_has(c, X86_FEATURE_TOPOEXT)) { 616 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
620 u64 val;
621 617
622 if (!rdmsrl_safe(0xc0011005, &val)) { 618 if (!rdmsrl_safe(0xc0011005, &value)) {
623 val |= 1ULL << 54; 619 value |= 1ULL << 54;
624 wrmsrl_safe(0xc0011005, val); 620 wrmsrl_safe(0xc0011005, value);
625 rdmsrl(0xc0011005, val); 621 rdmsrl(0xc0011005, value);
626 if (val & (1ULL << 54)) { 622 if (value & (1ULL << 54)) {
627 set_cpu_cap(c, X86_FEATURE_TOPOEXT); 623 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
628 printk(KERN_INFO FW_INFO "CPU: Re-enabling " 624 printk(KERN_INFO FW_INFO "CPU: Re-enabling "
629 "disabled Topology Extensions Support\n"); 625 "disabled Topology Extensions Support\n");
@@ -637,11 +633,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
637 */ 633 */
638 if ((c->x86 == 0x15) && 634 if ((c->x86 == 0x15) &&
639 (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { 635 (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
640 u64 val;
641 636
642 if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { 637 if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) {
643 val |= 0x1E; 638 value |= 0x1E;
644 wrmsrl_safe(0xc0011021, val); 639 wrmsrl_safe(0xc0011021, value);
645 } 640 }
646 } 641 }
647 642
@@ -703,13 +698,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
703 if (c->x86 > 0x11) 698 if (c->x86 > 0x11)
704 set_cpu_cap(c, X86_FEATURE_ARAT); 699 set_cpu_cap(c, X86_FEATURE_ARAT);
705 700
706 /*
707 * Disable GART TLB Walk Errors on Fam10h. We do this here
708 * because this is always needed when GART is enabled, even in a
709 * kernel which has no MCE support built in.
710 */
711 if (c->x86 == 0x10) { 701 if (c->x86 == 0x10) {
712 /* 702 /*
703 * Disable GART TLB Walk Errors on Fam10h. We do this here
704 * because this is always needed when GART is enabled, even in a
705 * kernel which has no MCE support built in.
713 * BIOS should disable GartTlbWlk Errors themself. If 706 * BIOS should disable GartTlbWlk Errors themself. If
714 * it doesn't do it here as suggested by the BKDG. 707 * it doesn't do it here as suggested by the BKDG.
715 * 708 *
@@ -723,6 +716,21 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
723 mask |= (1 << 10); 716 mask |= (1 << 10);
724 wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask); 717 wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
725 } 718 }
719
720 /*
721 * On family 10h BIOS may not have properly enabled WC+ support,
722 * causing it to be converted to CD memtype. This may result in
723 * performance degradation for certain nested-paging guests.
724 * Prevent this conversion by clearing bit 24 in
725 * MSR_AMD64_BU_CFG2.
726 *
727 * NOTE: we want to use the _safe accessors so as not to #GP kvm
728 * guests on older kvm hosts.
729 */
730
731 rdmsrl_safe(MSR_AMD64_BU_CFG2, &value);
732 value &= ~(1ULL << 24);
733 wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
726 } 734 }
727 735
728 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 736 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6e68a6194965..0f49677da51e 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -117,7 +117,7 @@ void release_thread(struct task_struct *dead_task)
117{ 117{
118 if (dead_task->mm) { 118 if (dead_task->mm) {
119 if (dead_task->mm->context.size) { 119 if (dead_task->mm->context.size) {
120 pr_warn("WARNING: dead process %8s still has LDT? <%p/%d>\n", 120 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
121 dead_task->comm, 121 dead_task->comm,
122 dead_task->mm->context.ldt, 122 dead_task->mm->context.ldt,
123 dead_task->mm->context.size); 123 dead_task->mm->context.size);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c243b81e3c74..37040079cd6b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1881,6 +1881,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1881 u64 data = msr_info->data; 1881 u64 data = msr_info->data;
1882 1882
1883 switch (msr) { 1883 switch (msr) {
1884 case MSR_AMD64_NB_CFG:
1885 case MSR_IA32_UCODE_REV:
1886 case MSR_IA32_UCODE_WRITE:
1887 case MSR_VM_HSAVE_PA:
1888 case MSR_AMD64_PATCH_LOADER:
1889 case MSR_AMD64_BU_CFG2:
1890 break;
1891
1884 case MSR_EFER: 1892 case MSR_EFER:
1885 return set_efer(vcpu, data); 1893 return set_efer(vcpu, data);
1886 case MSR_K7_HWCR: 1894 case MSR_K7_HWCR:
@@ -1900,8 +1908,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1900 return 1; 1908 return 1;
1901 } 1909 }
1902 break; 1910 break;
1903 case MSR_AMD64_NB_CFG:
1904 break;
1905 case MSR_IA32_DEBUGCTLMSR: 1911 case MSR_IA32_DEBUGCTLMSR:
1906 if (!data) { 1912 if (!data) {
1907 /* We support the non-activated case already */ 1913 /* We support the non-activated case already */
@@ -1914,11 +1920,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1914 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", 1920 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1915 __func__, data); 1921 __func__, data);
1916 break; 1922 break;
1917 case MSR_IA32_UCODE_REV:
1918 case MSR_IA32_UCODE_WRITE:
1919 case MSR_VM_HSAVE_PA:
1920 case MSR_AMD64_PATCH_LOADER:
1921 break;
1922 case 0x200 ... 0x2ff: 1923 case 0x200 ... 0x2ff:
1923 return set_msr_mtrr(vcpu, msr, data); 1924 return set_msr_mtrr(vcpu, msr, data);
1924 case MSR_IA32_APICBASE: 1925 case MSR_IA32_APICBASE:
@@ -2253,6 +2254,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2253 case MSR_K8_INT_PENDING_MSG: 2254 case MSR_K8_INT_PENDING_MSG:
2254 case MSR_AMD64_NB_CFG: 2255 case MSR_AMD64_NB_CFG:
2255 case MSR_FAM10H_MMIO_CONF_BASE: 2256 case MSR_FAM10H_MMIO_CONF_BASE:
2257 case MSR_AMD64_BU_CFG2:
2256 data = 0; 2258 data = 0;
2257 break; 2259 break;
2258 case MSR_P6_PERFCTR0: 2260 case MSR_P6_PERFCTR0: