diff options
-rw-r--r-- | arch/x86/kvm/svm.c | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b4aac5c7ad87..631d2e544491 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1043,6 +1043,27 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
1043 | { | 1043 | { |
1044 | struct vcpu_svm *svm = to_svm(vcpu); | 1044 | struct vcpu_svm *svm = to_svm(vcpu); |
1045 | 1045 | ||
1046 | if (is_nested(svm)) { | ||
1047 | /* | ||
1048 | * We are here because we run in nested mode, the host kvm | ||
1049 | * intercepts cr0 writes but the l1 hypervisor does not. | ||
1050 | * But the L1 hypervisor may intercept selective cr0 writes. | ||
1051 | * This needs to be checked here. | ||
1052 | */ | ||
1053 | unsigned long old, new; | ||
1054 | |||
1055 | /* Remove bits that would trigger a real cr0 write intercept */ | ||
1056 | old = vcpu->arch.cr0 & SVM_CR0_SELECTIVE_MASK; | ||
1057 | new = cr0 & SVM_CR0_SELECTIVE_MASK; | ||
1058 | |||
1059 | if (old == new) { | ||
1060 | /* cr0 write with ts and mp unchanged */ | ||
1061 | svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; | ||
1062 | if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) | ||
1063 | return; | ||
1064 | } | ||
1065 | } | ||
1066 | |||
1046 | #ifdef CONFIG_X86_64 | 1067 | #ifdef CONFIG_X86_64 |
1047 | if (vcpu->arch.efer & EFER_LME) { | 1068 | if (vcpu->arch.efer & EFER_LME) { |
1048 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { | 1069 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { |