aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorDong, Eddie <eddie.dong@intel.com>2008-01-07 04:14:20 -0500
committerAvi Kivity <avi@qumranet.com>2008-04-27 04:53:13 -0400
commit489f1d6526ab68ca1842398fa3ae95c597fe3d32 (patch)
treece86e117d8bc74c410ad8168928c036497a93eb8 /arch/x86/kvm/mmu.c
parentc3bf9bc243092c53946fd6d8ebd6dc2f4e572d48 (diff)
KVM: MMU: Update shadow ptes on partial guest pte writes
A guest partial guest pte write will leave shadow_trap_nonpresent_pte in spte, which generates a vmexit at the next guest access through that pte. This patch improves this by reading the full guest pte in advance and thus being able to update the spte and eliminate the vmexit. This helps pae guests which use two 32-bit writes to set a single 64-bit pte. [truncation fix by Eric] Signed-off-by: Yaozu (Eddie) Dong <eddie.dong@intel.com> Signed-off-by: Feng (Eric) Liu <eric.e.liu@intel.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e55af12e11b7..28f9a44060cc 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1329,8 +1329,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1329static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, 1329static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1330 struct kvm_mmu_page *sp, 1330 struct kvm_mmu_page *sp,
1331 u64 *spte, 1331 u64 *spte,
1332 const void *new, int bytes, 1332 const void *new)
1333 int offset_in_pte)
1334{ 1333{
1335 if (sp->role.level != PT_PAGE_TABLE_LEVEL) { 1334 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
1336 ++vcpu->kvm->stat.mmu_pde_zapped; 1335 ++vcpu->kvm->stat.mmu_pde_zapped;
@@ -1339,9 +1338,9 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1339 1338
1340 ++vcpu->kvm->stat.mmu_pte_updated; 1339 ++vcpu->kvm->stat.mmu_pte_updated;
1341 if (sp->role.glevels == PT32_ROOT_LEVEL) 1340 if (sp->role.glevels == PT32_ROOT_LEVEL)
1342 paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte); 1341 paging32_update_pte(vcpu, sp, spte, new);
1343 else 1342 else
1344 paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte); 1343 paging64_update_pte(vcpu, sp, spte, new);
1345} 1344}
1346 1345
1347static bool need_remote_flush(u64 old, u64 new) 1346static bool need_remote_flush(u64 old, u64 new)
@@ -1423,7 +1422,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1423 struct hlist_node *node, *n; 1422 struct hlist_node *node, *n;
1424 struct hlist_head *bucket; 1423 struct hlist_head *bucket;
1425 unsigned index; 1424 unsigned index;
1426 u64 entry; 1425 u64 entry, gentry;
1427 u64 *spte; 1426 u64 *spte;
1428 unsigned offset = offset_in_page(gpa); 1427 unsigned offset = offset_in_page(gpa);
1429 unsigned pte_size; 1428 unsigned pte_size;
@@ -1433,6 +1432,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1433 int level; 1432 int level;
1434 int flooded = 0; 1433 int flooded = 0;
1435 int npte; 1434 int npte;
1435 int r;
1436 1436
1437 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); 1437 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1438 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes); 1438 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
@@ -1496,11 +1496,20 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1496 continue; 1496 continue;
1497 } 1497 }
1498 spte = &sp->spt[page_offset / sizeof(*spte)]; 1498 spte = &sp->spt[page_offset / sizeof(*spte)];
1499 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
1500 gentry = 0;
1501 r = kvm_read_guest_atomic(vcpu->kvm,
1502 gpa & ~(u64)(pte_size - 1),
1503 &gentry, pte_size);
1504 new = (const void *)&gentry;
1505 if (r < 0)
1506 new = NULL;
1507 }
1499 while (npte--) { 1508 while (npte--) {
1500 entry = *spte; 1509 entry = *spte;
1501 mmu_pte_write_zap_pte(vcpu, sp, spte); 1510 mmu_pte_write_zap_pte(vcpu, sp, spte);
1502 mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes, 1511 if (new)
1503 page_offset & (pte_size - 1)); 1512 mmu_pte_write_new_pte(vcpu, sp, spte, new);
1504 mmu_pte_write_flush_tlb(vcpu, entry, *spte); 1513 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1505 ++spte; 1514 ++spte;
1506 } 1515 }