aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-11-25 09:58:07 -0500
committerAvi Kivity <avi@redhat.com>2008-12-31 09:55:02 -0500
commitecc5589f19a52e7e6501fe449047b19087ae11bb (patch)
tree2b5a0273e2ce67953d8c32d5a60475aa91907815 /arch/x86
parent5319c662522db8995ff9276ba9d80549c64b294a (diff)
KVM: MMU: optimize set_spte for page sync
The write protect verification in set_spte is unnecessary for page sync. Its guaranteed that, if the unsync spte was writable, the target page does not have a write protected shadow (if it had, the spte would have been write protected under mmu_lock by rmap_write_protect before). Same reasoning applies to mark_page_dirty: the gfn has been marked as dirty via the pagefault path. The cost of hash table and memslot lookups are quite significant if the workload is pagetable write intensive resulting in increased mmu_lock contention. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index fa3486d64078..dd20b199a7c0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1593,6 +1593,15 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1593 1593
1594 spte |= PT_WRITABLE_MASK; 1594 spte |= PT_WRITABLE_MASK;
1595 1595
1596 /*
1597 * Optimization: for pte sync, if spte was writable the hash
1598 * lookup is unnecessary (and expensive). Write protection
1599 * is responsibility of mmu_get_page / kvm_sync_page.
1600 * Same reasoning can be applied to dirty page accounting.
1601 */
1602 if (!can_unsync && is_writeble_pte(*shadow_pte))
1603 goto set_pte;
1604
1596 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { 1605 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1597 pgprintk("%s: found shadow page for %lx, marking ro\n", 1606 pgprintk("%s: found shadow page for %lx, marking ro\n",
1598 __func__, gfn); 1607 __func__, gfn);