diff options
-rw-r--r-- | arch/x86/kvm/mmu.c | 9 |
1 files changed, 9 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index fa3486d64078..dd20b199a7c0 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1593,6 +1593,15 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1593 | 1593 | ||
1594 | spte |= PT_WRITABLE_MASK; | 1594 | spte |= PT_WRITABLE_MASK; |
1595 | 1595 | ||
1596 | /* | ||
1597 | * Optimization: for pte sync, if spte was writable the hash | ||
1598 | * lookup is unnecessary (and expensive). Write protection | ||
1599 | * is responsibility of mmu_get_page / kvm_sync_page. | ||
1600 | * Same reasoning can be applied to dirty page accounting. | ||
1601 | */ | ||
1602 | if (!can_unsync && is_writeble_pte(*shadow_pte)) | ||
1603 | goto set_pte; | ||
1604 | |||
1596 | if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { | 1605 | if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { |
1597 | pgprintk("%s: found shadow page for %lx, marking ro\n", | 1606 | pgprintk("%s: found shadow page for %lx, marking ro\n", |
1598 | __func__, gfn); | 1607 | __func__, gfn); |