diff options
author | Vitaly Kuznetsov <vkuznets@redhat.com> | 2019-02-22 11:45:01 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2019-02-22 13:24:48 -0500 |
commit | ad7dc69aeb23138cc23c406cac25003b97e8ee17 (patch) | |
tree | d4f2fbec9ea5c23b6525488595705c588250a3bd | |
parent | 73de65f5b9de517d8d9251a755e91296dd521ca6 (diff) |
x86/kvm/mmu: fix switch between root and guest MMUs
Commit 14c07ad89f4d ("x86/kvm/mmu: introduce guest_mmu") brought one subtle
change: previously, when switching back from L2 to L1, we were resetting
MMU hooks (like mmu->get_cr3()) in kvm_init_mmu() called from
nested_vmx_load_cr3() and now we do that in nested_ept_uninit_mmu_context()
when we re-target vcpu->arch.mmu pointer.
The change itself looks logical: if nested_ept_init_mmu_context() changes
something than nested_ept_uninit_mmu_context() restores it back. There is,
however, one thing: the following call chain:
nested_vmx_load_cr3()
kvm_mmu_new_cr3()
__kvm_mmu_new_cr3()
fast_cr3_switch()
cached_root_available()
now happens with MMU hooks pointing to the new MMU (root MMU in our case)
while previously it was happening with the old one. cached_root_available()
tries to stash current root but it is incorrect to read current CR3 with
mmu->get_cr3(), we need to use old_mmu->get_cr3() which in case we're
switching from L2 to L1 is guest_mmu. (BTW, in shadow page tables case this
is a non-issue because we don't switch MMU).
While we could've tried to guess that we're switching between MMUs and call
the right ->get_cr3() from cached_root_available() this seems to be overly
complicated. Instead, just stash the corresponding CR3 when setting
root_hpa and make cached_root_available() use the stashed value.
Fixes: 14c07ad89f4d ("x86/kvm/mmu: introduce guest_mmu")
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 1 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 17 |
2 files changed, 14 insertions, 4 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4660ce90de7f..593e17b7797e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -397,6 +397,7 @@ struct kvm_mmu { | |||
397 | void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | 397 | void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
398 | u64 *spte, const void *pte); | 398 | u64 *spte, const void *pte); |
399 | hpa_t root_hpa; | 399 | hpa_t root_hpa; |
400 | gpa_t root_cr3; | ||
400 | union kvm_mmu_role mmu_role; | 401 | union kvm_mmu_role mmu_role; |
401 | u8 root_level; | 402 | u8 root_level; |
402 | u8 shadow_root_level; | 403 | u8 shadow_root_level; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index da9c42349b1f..6e62ed3852ac 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -3555,6 +3555,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, | |||
3555 | &invalid_list); | 3555 | &invalid_list); |
3556 | mmu->root_hpa = INVALID_PAGE; | 3556 | mmu->root_hpa = INVALID_PAGE; |
3557 | } | 3557 | } |
3558 | mmu->root_cr3 = 0; | ||
3558 | } | 3559 | } |
3559 | 3560 | ||
3560 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); | 3561 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
@@ -3610,6 +3611,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) | |||
3610 | vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); | 3611 | vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); |
3611 | } else | 3612 | } else |
3612 | BUG(); | 3613 | BUG(); |
3614 | vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); | ||
3613 | 3615 | ||
3614 | return 0; | 3616 | return 0; |
3615 | } | 3617 | } |
@@ -3618,10 +3620,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) | |||
3618 | { | 3620 | { |
3619 | struct kvm_mmu_page *sp; | 3621 | struct kvm_mmu_page *sp; |
3620 | u64 pdptr, pm_mask; | 3622 | u64 pdptr, pm_mask; |
3621 | gfn_t root_gfn; | 3623 | gfn_t root_gfn, root_cr3; |
3622 | int i; | 3624 | int i; |
3623 | 3625 | ||
3624 | root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT; | 3626 | root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); |
3627 | root_gfn = root_cr3 >> PAGE_SHIFT; | ||
3625 | 3628 | ||
3626 | if (mmu_check_root(vcpu, root_gfn)) | 3629 | if (mmu_check_root(vcpu, root_gfn)) |
3627 | return 1; | 3630 | return 1; |
@@ -3646,7 +3649,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) | |||
3646 | ++sp->root_count; | 3649 | ++sp->root_count; |
3647 | spin_unlock(&vcpu->kvm->mmu_lock); | 3650 | spin_unlock(&vcpu->kvm->mmu_lock); |
3648 | vcpu->arch.mmu->root_hpa = root; | 3651 | vcpu->arch.mmu->root_hpa = root; |
3649 | return 0; | 3652 | goto set_root_cr3; |
3650 | } | 3653 | } |
3651 | 3654 | ||
3652 | /* | 3655 | /* |
@@ -3712,6 +3715,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) | |||
3712 | vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); | 3715 | vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); |
3713 | } | 3716 | } |
3714 | 3717 | ||
3718 | set_root_cr3: | ||
3719 | vcpu->arch.mmu->root_cr3 = root_cr3; | ||
3720 | |||
3715 | return 0; | 3721 | return 0; |
3716 | } | 3722 | } |
3717 | 3723 | ||
@@ -4163,7 +4169,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, | |||
4163 | struct kvm_mmu_root_info root; | 4169 | struct kvm_mmu_root_info root; |
4164 | struct kvm_mmu *mmu = vcpu->arch.mmu; | 4170 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
4165 | 4171 | ||
4166 | root.cr3 = mmu->get_cr3(vcpu); | 4172 | root.cr3 = mmu->root_cr3; |
4167 | root.hpa = mmu->root_hpa; | 4173 | root.hpa = mmu->root_hpa; |
4168 | 4174 | ||
4169 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { | 4175 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { |
@@ -4176,6 +4182,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, | |||
4176 | } | 4182 | } |
4177 | 4183 | ||
4178 | mmu->root_hpa = root.hpa; | 4184 | mmu->root_hpa = root.hpa; |
4185 | mmu->root_cr3 = root.cr3; | ||
4179 | 4186 | ||
4180 | return i < KVM_MMU_NUM_PREV_ROOTS; | 4187 | return i < KVM_MMU_NUM_PREV_ROOTS; |
4181 | } | 4188 | } |
@@ -5516,11 +5523,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) | |||
5516 | vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; | 5523 | vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; |
5517 | 5524 | ||
5518 | vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; | 5525 | vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; |
5526 | vcpu->arch.root_mmu.root_cr3 = 0; | ||
5519 | vcpu->arch.root_mmu.translate_gpa = translate_gpa; | 5527 | vcpu->arch.root_mmu.translate_gpa = translate_gpa; |
5520 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) | 5528 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
5521 | vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; | 5529 | vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; |
5522 | 5530 | ||
5523 | vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; | 5531 | vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; |
5532 | vcpu->arch.guest_mmu.root_cr3 = 0; | ||
5524 | vcpu->arch.guest_mmu.translate_gpa = translate_gpa; | 5533 | vcpu->arch.guest_mmu.translate_gpa = translate_gpa; |
5525 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) | 5534 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
5526 | vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; | 5535 | vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; |