aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBoris Ostrovsky <boris.ostrovsky@amd.com>2012-01-09 14:00:35 -0500
committerAvi Kivity <avi@redhat.com>2012-03-05 07:52:21 -0500
commit2b036c6b861dc5da295c6fe19a3edcff7093fdeb (patch)
tree6c329a86190f3d071946864643fee0d851387985
parent4a58ae614a28b1ae3bea1c74a307cdfb7c77dab8 (diff)
KVM: SVM: Add support for AMD's OSVW feature in guests
In some cases guests should not provide workarounds for errata even when the physical processor is affected. For example, because of erratum 400 on family 10h processors a Linux guest will read an MSR (resulting in VMEXIT) before going to idle in order to avoid getting stuck in a non-C0 state. This is not necessary: HLT and IO instructions are intercepted and therefore there is no reason for erratum 400 workaround in the guest. This patch allows us to present a guest with certain errata as fixed, regardless of the state of actual hardware. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@amd.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h6
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/svm.c59
-rw-r--r--arch/x86/kvm/x86.c20
5 files changed, 94 insertions, 1 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 52d6640a5ca1..bd69c93da8fa 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -478,6 +478,12 @@ struct kvm_vcpu_arch {
478 u32 id; 478 u32 id;
479 bool send_user_only; 479 bool send_user_only;
480 } apf; 480 } apf;
481
482 /* OSVW MSRs (AMD only) */
483 struct {
484 u64 length;
485 u64 status;
486 } osvw;
481}; 487};
482 488
483struct kvm_arch { 489struct kvm_arch {
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 89b02bfaaca5..9fed5bedaad6 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -236,7 +236,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
236 const u32 kvm_supported_word6_x86_features = 236 const u32 kvm_supported_word6_x86_features =
237 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | 237 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
238 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | 238 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
239 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) | 239 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
240 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM); 240 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
241 241
242 /* cpuid 0xC0000001.edx */ 242 /* cpuid 0xC0000001.edx */
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 5b97e1797a6d..26d1fb437eb5 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -43,4 +43,12 @@ static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
43 return best && (best->ebx & bit(X86_FEATURE_FSGSBASE)); 43 return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
44} 44}
45 45
46static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
47{
48 struct kvm_cpuid_entry2 *best;
49
50 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
51 return best && (best->ecx & bit(X86_FEATURE_OSVW));
52}
53
46#endif 54#endif
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 5fa553babe56..fce3ba0f2079 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -110,6 +110,12 @@ struct nested_state {
110#define MSRPM_OFFSETS 16 110#define MSRPM_OFFSETS 16
111static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; 111static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
112 112
113/*
114 * Set osvw_len to higher value when updated Revision Guides
115 * are published and we know what the new status bits are
116 */
117static uint64_t osvw_len = 4, osvw_status;
118
113struct vcpu_svm { 119struct vcpu_svm {
114 struct kvm_vcpu vcpu; 120 struct kvm_vcpu vcpu;
115 struct vmcb *vmcb; 121 struct vmcb *vmcb;
@@ -556,6 +562,27 @@ static void svm_init_erratum_383(void)
556 erratum_383_found = true; 562 erratum_383_found = true;
557} 563}
558 564
565static void svm_init_osvw(struct kvm_vcpu *vcpu)
566{
567 /*
568 * Guests should see errata 400 and 415 as fixed (assuming that
569 * HLT and IO instructions are intercepted).
570 */
571 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
572 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
573
574 /*
575 * By increasing VCPU's osvw.length to 3 we are telling the guest that
576 * all osvw.status bits inside that length, including bit 0 (which is
577 * reserved for erratum 298), are valid. However, if host processor's
578 * osvw_len is 0 then osvw_status[0] carries no information. We need to
579 * be conservative here and therefore we tell the guest that erratum 298
580 * is present (because we really don't know).
581 */
582 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
583 vcpu->arch.osvw.status |= 1;
584}
585
559static int has_svm(void) 586static int has_svm(void)
560{ 587{
561 const char *msg; 588 const char *msg;
@@ -620,6 +647,36 @@ static int svm_hardware_enable(void *garbage)
620 __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT; 647 __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT;
621 } 648 }
622 649
650
651 /*
652 * Get OSVW bits.
653 *
654 * Note that it is possible to have a system with mixed processor
655 * revisions and therefore different OSVW bits. If bits are not the same
656 * on different processors then choose the worst case (i.e. if erratum
657 * is present on one processor and not on another then assume that the
658 * erratum is present everywhere).
659 */
660 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
661 uint64_t len, status = 0;
662 int err;
663
664 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
665 if (!err)
666 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
667 &err);
668
669 if (err)
670 osvw_status = osvw_len = 0;
671 else {
672 if (len < osvw_len)
673 osvw_len = len;
674 osvw_status |= status;
675 osvw_status &= (1ULL << osvw_len) - 1;
676 }
677 } else
678 osvw_status = osvw_len = 0;
679
623 svm_init_erratum_383(); 680 svm_init_erratum_383();
624 681
625 return 0; 682 return 0;
@@ -1186,6 +1243,8 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1186 if (kvm_vcpu_is_bsp(&svm->vcpu)) 1243 if (kvm_vcpu_is_bsp(&svm->vcpu))
1187 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; 1244 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1188 1245
1246 svm_init_osvw(&svm->vcpu);
1247
1189 return &svm->vcpu; 1248 return &svm->vcpu;
1190 1249
1191free_page4: 1250free_page4:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a3ce196d21fe..2bd77a3a41ed 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1675,6 +1675,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1675 */ 1675 */
1676 pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); 1676 pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
1677 break; 1677 break;
1678 case MSR_AMD64_OSVW_ID_LENGTH:
1679 if (!guest_cpuid_has_osvw(vcpu))
1680 return 1;
1681 vcpu->arch.osvw.length = data;
1682 break;
1683 case MSR_AMD64_OSVW_STATUS:
1684 if (!guest_cpuid_has_osvw(vcpu))
1685 return 1;
1686 vcpu->arch.osvw.status = data;
1687 break;
1678 default: 1688 default:
1679 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) 1689 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1680 return xen_hvm_config(vcpu, data); 1690 return xen_hvm_config(vcpu, data);
@@ -1959,6 +1969,16 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1959 */ 1969 */
1960 data = 0xbe702111; 1970 data = 0xbe702111;
1961 break; 1971 break;
1972 case MSR_AMD64_OSVW_ID_LENGTH:
1973 if (!guest_cpuid_has_osvw(vcpu))
1974 return 1;
1975 data = vcpu->arch.osvw.length;
1976 break;
1977 case MSR_AMD64_OSVW_STATUS:
1978 if (!guest_cpuid_has_osvw(vcpu))
1979 return 1;
1980 data = vcpu->arch.osvw.status;
1981 break;
1962 default: 1982 default:
1963 if (kvm_pmu_msr(vcpu, msr)) 1983 if (kvm_pmu_msr(vcpu, msr))
1964 return kvm_pmu_get_msr(vcpu, msr, pdata); 1984 return kvm_pmu_get_msr(vcpu, msr, pdata);