aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-12-07 11:15:06 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:30:49 -0500
commit81dd35d42c9aef3c1f7ae6ce4cf6a0d382661db5 (patch)
treeac263e921a56954c8e72668e1bcf5afb82a24bb7 /arch/x86/kvm
parentd4dbf470096c51cb4785167ea59fdbdea87ccbe4 (diff)
KVM: SVM: Add xsetbv intercept
This patch implements the xsetbv intercept to the AMD part of KVM. This makes AVX usable in a save way for the guest on AVX capable AMD hardware. The patch is tested by using AVX in the guest and host in parallel and checking for data corruption. I also used the KVM xsave unit-tests and they all pass. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/svm.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 4dbc37204de1..73461b1cfb05 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -935,6 +935,7 @@ static void init_vmcb(struct vcpu_svm *svm)
935 set_intercept(svm, INTERCEPT_WBINVD); 935 set_intercept(svm, INTERCEPT_WBINVD);
936 set_intercept(svm, INTERCEPT_MONITOR); 936 set_intercept(svm, INTERCEPT_MONITOR);
937 set_intercept(svm, INTERCEPT_MWAIT); 937 set_intercept(svm, INTERCEPT_MWAIT);
938 set_intercept(svm, INTERCEPT_XSETBV);
938 939
939 control->iopm_base_pa = iopm_base; 940 control->iopm_base_pa = iopm_base;
940 control->msrpm_base_pa = __pa(svm->msrpm); 941 control->msrpm_base_pa = __pa(svm->msrpm);
@@ -2546,6 +2547,19 @@ static int skinit_interception(struct vcpu_svm *svm)
2546 return 1; 2547 return 1;
2547} 2548}
2548 2549
2550static int xsetbv_interception(struct vcpu_svm *svm)
2551{
2552 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
2553 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
2554
2555 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
2556 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2557 skip_emulated_instruction(&svm->vcpu);
2558 }
2559
2560 return 1;
2561}
2562
2549static int invalid_op_interception(struct vcpu_svm *svm) 2563static int invalid_op_interception(struct vcpu_svm *svm)
2550{ 2564{
2551 kvm_queue_exception(&svm->vcpu, UD_VECTOR); 2565 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
@@ -2971,6 +2985,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2971 [SVM_EXIT_WBINVD] = emulate_on_interception, 2985 [SVM_EXIT_WBINVD] = emulate_on_interception,
2972 [SVM_EXIT_MONITOR] = invalid_op_interception, 2986 [SVM_EXIT_MONITOR] = invalid_op_interception,
2973 [SVM_EXIT_MWAIT] = invalid_op_interception, 2987 [SVM_EXIT_MWAIT] = invalid_op_interception,
2988 [SVM_EXIT_XSETBV] = xsetbv_interception,
2974 [SVM_EXIT_NPF] = pf_interception, 2989 [SVM_EXIT_NPF] = pf_interception,
2975}; 2990};
2976 2991
@@ -3624,10 +3639,6 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
3624static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) 3639static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
3625{ 3640{
3626 switch (func) { 3641 switch (func) {
3627 case 0x00000001:
3628 /* Mask out xsave bit as long as it is not supported by SVM */
3629 entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
3630 break;
3631 case 0x80000001: 3642 case 0x80000001:
3632 if (nested) 3643 if (nested)
3633 entry->ecx |= (1 << 2); /* Set SVM bit */ 3644 entry->ecx |= (1 << 2); /* Set SVM bit */
@@ -3701,6 +3712,7 @@ static const struct trace_print_flags svm_exit_reasons_str[] = {
3701 { SVM_EXIT_WBINVD, "wbinvd" }, 3712 { SVM_EXIT_WBINVD, "wbinvd" },
3702 { SVM_EXIT_MONITOR, "monitor" }, 3713 { SVM_EXIT_MONITOR, "monitor" },
3703 { SVM_EXIT_MWAIT, "mwait" }, 3714 { SVM_EXIT_MWAIT, "mwait" },
3715 { SVM_EXIT_XSETBV, "xsetbv" },
3704 { SVM_EXIT_NPF, "npf" }, 3716 { SVM_EXIT_NPF, "npf" },
3705 { -1, NULL } 3717 { -1, NULL }
3706}; 3718};