aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKai Huang <kai.huang@linux.intel.com>2015-01-27 21:54:25 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2015-01-29 09:31:33 -0500
commit9b51a63024bd759f97a12f50907b8af23b065b36 (patch)
tree08cc77c6722f2be119b4b73538ddd7b23ebe9672
parentf4b4b1808690c37c7c703d43789c1988c5e7fdeb (diff)
KVM: MMU: Explicitly set D-bit for writable spte.
This patch avoids unnecessary dirty GPA logging to PML buffer in EPT violation path by setting D-bit manually prior to the occurrence of the write from guest. We only set D-bit manually in set_spte, and leave fast_page_fault path unchanged, as fast_page_fault is very unlikely to happen in case of PML. For the hva <-> pa change case, the spte is updated to either read-only (host pte is read-only) or be dropped (host pte is writeable), and both cases will be handled by above changes, therefore no change is necessary. Signed-off-by: Kai Huang <kai.huang@linux.intel.com> Reviewed-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c438224cca34..b7192236dcba 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2597,8 +2597,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2597 } 2597 }
2598 } 2598 }
2599 2599
2600 if (pte_access & ACC_WRITE_MASK) 2600 if (pte_access & ACC_WRITE_MASK) {
2601 mark_page_dirty(vcpu->kvm, gfn); 2601 mark_page_dirty(vcpu->kvm, gfn);
2602 spte |= shadow_dirty_mask;
2603 }
2602 2604
2603set_pte: 2605set_pte:
2604 if (mmu_spte_update(sptep, spte)) 2606 if (mmu_spte_update(sptep, spte))
@@ -2914,6 +2916,18 @@ fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2914 */ 2916 */
2915 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); 2917 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
2916 2918
2919 /*
2920 * Theoretically we could also set dirty bit (and flush TLB) here in
2921 * order to eliminate unnecessary PML logging. See comments in
2922 * set_spte. But fast_page_fault is very unlikely to happen with PML
2923 * enabled, so we do not do this. This might result in the same GPA
2924 * to be logged in PML buffer again when the write really happens, and
2925 * eventually to be called by mark_page_dirty twice. But it's also no
2926 * harm. This also avoids the TLB flush needed after setting dirty bit
2927 * so non-PML cases won't be impacted.
2928 *
2929 * Compare with set_spte where instead shadow_dirty_mask is set.
2930 */
2917 if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte) 2931 if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte)
2918 mark_page_dirty(vcpu->kvm, gfn); 2932 mark_page_dirty(vcpu->kvm, gfn);
2919 2933