aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-05-01 09:53:31 -0400
committerAvi Kivity <avi@qumranet.com>2007-07-16 05:05:39 -0400
commit0028425f647b6b78a0de8810d6b782fc3ce6c272 (patch)
tree8afece215ec531f993c28cc3dee17b0c2c1dd3b1 /drivers/kvm
parentfce0657ff9f14f6b1f147b5fcd6db2f54c06424e (diff)
KVM: Update shadow pte on write to guest pte
A typical demand page/copy on write pattern is: - page fault on vaddr - kvm propagates fault to guest - guest handles fault, updates pte - kvm traps write, clears shadow pte, resumes guest - guest returns to userspace, re-faults on same vaddr - kvm installs shadow pte, resumes guest - guest continues So, three vmexits for a single guest page fault. But if instead of clearing the page table entry, we update to correspond to the value that the guest has just written, we eliminate the third vmexit. This patch does exactly that, reducing kbuild time by about 10%. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/mmu.c15
-rw-r--r--drivers/kvm/paging_tmpl.h15
2 files changed, 30 insertions, 0 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 23dc4612026b..9ec3df90dbb8 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -1137,6 +1137,20 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1137 *spte = 0; 1137 *spte = 0;
1138} 1138}
1139 1139
1140static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1141 struct kvm_mmu_page *page,
1142 u64 *spte,
1143 const void *new, int bytes)
1144{
1145 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1146 return;
1147
1148 if (page->role.glevels == PT32_ROOT_LEVEL)
1149 paging32_update_pte(vcpu, page, spte, new, bytes);
1150 else
1151 paging64_update_pte(vcpu, page, spte, new, bytes);
1152}
1153
1140void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 1154void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1141 const u8 *old, const u8 *new, int bytes) 1155 const u8 *old, const u8 *new, int bytes)
1142{ 1156{
@@ -1212,6 +1226,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1212 spte += page_offset / sizeof(*spte); 1226 spte += page_offset / sizeof(*spte);
1213 while (npte--) { 1227 while (npte--) {
1214 mmu_pte_write_zap_pte(vcpu, page, spte); 1228 mmu_pte_write_zap_pte(vcpu, page, spte);
1229 mmu_pte_write_new_pte(vcpu, page, spte, new, bytes);
1215 ++spte; 1230 ++spte;
1216 } 1231 }
1217 } 1232 }
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index bc64cceec039..10ba0a80ce59 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -202,6 +202,21 @@ static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
202 guest_pte & PT_DIRTY_MASK, access_bits, gfn); 202 guest_pte & PT_DIRTY_MASK, access_bits, gfn);
203} 203}
204 204
205static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
206 u64 *spte, const void *pte, int bytes)
207{
208 pt_element_t gpte;
209
210 if (bytes < sizeof(pt_element_t))
211 return;
212 gpte = *(const pt_element_t *)pte;
213 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK))
214 return;
215 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
216 FNAME(set_pte)(vcpu, gpte, spte, 6,
217 (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
218}
219
205static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde, 220static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
206 u64 *shadow_pte, u64 access_bits, gfn_t gfn) 221 u64 *shadow_pte, u64 access_bits, gfn_t gfn)
207{ 222{