diff options
-rw-r--r-- | arch/x86/kvm/mmu.c | 16 |
1 files changed, 15 insertions, 1 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index c438224cca34..b7192236dcba 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2597,8 +2597,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
2597 | } | 2597 | } |
2598 | } | 2598 | } |
2599 | 2599 | ||
2600 | if (pte_access & ACC_WRITE_MASK) | 2600 | if (pte_access & ACC_WRITE_MASK) { |
2601 | mark_page_dirty(vcpu->kvm, gfn); | 2601 | mark_page_dirty(vcpu->kvm, gfn); |
2602 | spte |= shadow_dirty_mask; | ||
2603 | } | ||
2602 | 2604 | ||
2603 | set_pte: | 2605 | set_pte: |
2604 | if (mmu_spte_update(sptep, spte)) | 2606 | if (mmu_spte_update(sptep, spte)) |
@@ -2914,6 +2916,18 @@ fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | |||
2914 | */ | 2916 | */ |
2915 | gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); | 2917 | gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); |
2916 | 2918 | ||
2919 | /* | ||
2920 | * Theoretically we could also set dirty bit (and flush TLB) here in | ||
2921 | * order to eliminate unnecessary PML logging. See comments in | ||
2922 | * set_spte. But fast_page_fault is very unlikely to happen with PML | ||
2923 | * enabled, so we do not do this. This might result in the same GPA | ||
2924 | * to be logged in PML buffer again when the write really happens, and | ||
2925 | * eventually to be called by mark_page_dirty twice. But it's also no | ||
2926 | * harm. This also avoids the TLB flush needed after setting dirty bit | ||
2927 | * so non-PML cases won't be impacted. | ||
2928 | * | ||
2929 | * Compare with set_spte where instead shadow_dirty_mask is set. | ||
2930 | */ | ||
2917 | if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte) | 2931 | if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte) |
2918 | mark_page_dirty(vcpu->kvm, gfn); | 2932 | mark_page_dirty(vcpu->kvm, gfn); |
2919 | 2933 | ||