aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-02-24 12:59:18 -0500
committerAvi Kivity <avi@redhat.com>2010-04-25 06:53:23 -0400
commit7f5d8b5600b5294137886b46bf00ef811d0fdf32 (patch)
tree8eb3d48b169c0c642014eeff3255f785cfa300ab
parentb44ea385d8cb187e04ec8d901d4c320c8b07c40b (diff)
KVM: SVM: Handle nested selective_cr0 intercept correctly
If we have the following situation with nested svm: 1. Host KVM intercepts cr0 writes 2. Guest hypervisor intercepts only selective cr0 writes Then we get an cr0 write intercept which is handled on the host. But that intercepts may actually be a selective cr0 intercept for the guest. This patch checks for this condition and injects a selective cr0 intercept if needed. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/svm.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b4aac5c7ad87..631d2e544491 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1043,6 +1043,27 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1043{ 1043{
1044 struct vcpu_svm *svm = to_svm(vcpu); 1044 struct vcpu_svm *svm = to_svm(vcpu);
1045 1045
1046 if (is_nested(svm)) {
1047 /*
1048 * We are here because we run in nested mode, the host kvm
1049 * intercepts cr0 writes but the l1 hypervisor does not.
1050 * But the L1 hypervisor may intercept selective cr0 writes.
1051 * This needs to be checked here.
1052 */
1053 unsigned long old, new;
1054
1055 /* Remove bits that would trigger a real cr0 write intercept */
1056 old = vcpu->arch.cr0 & SVM_CR0_SELECTIVE_MASK;
1057 new = cr0 & SVM_CR0_SELECTIVE_MASK;
1058
1059 if (old == new) {
1060 /* cr0 write with ts and mp unchanged */
1061 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
1062 if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE)
1063 return;
1064 }
1065 }
1066
1046#ifdef CONFIG_X86_64 1067#ifdef CONFIG_X86_64
1047 if (vcpu->arch.efer & EFER_LME) { 1068 if (vcpu->arch.efer & EFER_LME) {
1048 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 1069 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {