diff options
author | Avi Kivity <avi@redhat.com> | 2010-12-12 12:30:55 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-01-12 04:30:51 -0500 |
commit | d3c422bd33388e6fe6777bde0e9bd20152133083 (patch) | |
tree | a71812ddeb38943b7feb8a6f72c5540176d2c923 /arch/x86/kvm | |
parent | 5c663a1534d27d817e17eed06a83d08f497f9f4f (diff) |
KVM: MMU: Fix incorrect direct page write protection due to ro host page
If KVM sees a read-only host page, it will map it as read-only to prevent
breaking a COW. However, if the page was part of a large guest page, KVM
incorrectly extends the write protection to the entire large page frame
instead of limiting it to the normal host page.
This results in the instantiation of a new shadow page with read-only access.
If this happens for a MOVS instruction that moves memory between two normal
pages, within a single large page frame, and mapped within the guest as a
large page, and if, in addition, the source operand is not writeable in the
host (perhaps due to KSM), then KVM will instantiate a read-only direct
shadow page, instantiate an spte for the source operand, then instantiate
a new read/write direct shadow page and instantiate an spte for the
destination operand. Since these two sptes are in different shadow pages,
MOVS will never see them at the same time and the guest will not make
progress.
Fix by mapping the direct shadow page read/write, and only marking the
host page read-only.
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 146b681e6ab0..5ca9426389b5 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -511,6 +511,9 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
511 | link_shadow_page(it.sptep, sp); | 511 | link_shadow_page(it.sptep, sp); |
512 | } | 512 | } |
513 | 513 | ||
514 | if (!map_writable) | ||
515 | access &= ~ACC_WRITE_MASK; | ||
516 | |||
514 | mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access, | 517 | mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access, |
515 | user_fault, write_fault, dirty, ptwrite, it.level, | 518 | user_fault, write_fault, dirty, ptwrite, it.level, |
516 | gw->gfn, pfn, prefault, map_writable); | 519 | gw->gfn, pfn, prefault, map_writable); |
@@ -593,9 +596,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, | |||
593 | if (is_error_pfn(pfn)) | 596 | if (is_error_pfn(pfn)) |
594 | return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn); | 597 | return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn); |
595 | 598 | ||
596 | if (!map_writable) | ||
597 | walker.pte_access &= ~ACC_WRITE_MASK; | ||
598 | |||
599 | spin_lock(&vcpu->kvm->mmu_lock); | 599 | spin_lock(&vcpu->kvm->mmu_lock); |
600 | if (mmu_notifier_retry(vcpu, mmu_seq)) | 600 | if (mmu_notifier_retry(vcpu, mmu_seq)) |
601 | goto out_unlock; | 601 | goto out_unlock; |