aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-11-20 19:06:21 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:53:10 -0500
commit79539cec0c3c38d35a1e3e5310d2c562ae6e82b8 (patch)
tree7bcb93c2a3a26a1541a77f108915e43479dc3ebd /drivers/kvm/mmu.c
parent0f74a24c59b814c1c8085251cbea48d339f0c7c6 (diff)
KVM: MMU: Avoid unnecessary remote tlb flushes when guest updates a pte
If all we're doing is increasing permissions on a pte (typical for demand paging), then there's not need to flush remote tlbs. Worst case they'll get a spurious page fault. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r--drivers/kvm/mmu.c27
1 files changed, 26 insertions, 1 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 101cd5377a89..281dd5f9310c 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -134,6 +134,8 @@ static int dbg = 1;
134#define PT32_DIR_BASE_ADDR_MASK \ 134#define PT32_DIR_BASE_ADDR_MASK \
135 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) 135 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
136 136
137#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
138 | PT64_NX_MASK)
137 139
138#define PFERR_PRESENT_MASK (1U << 0) 140#define PFERR_PRESENT_MASK (1U << 0)
139#define PFERR_WRITE_MASK (1U << 1) 141#define PFERR_WRITE_MASK (1U << 1)
@@ -1227,7 +1229,6 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1227 } 1229 }
1228 } 1230 }
1229 set_shadow_pte(spte, shadow_trap_nonpresent_pte); 1231 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1230 kvm_flush_remote_tlbs(vcpu->kvm);
1231} 1232}
1232 1233
1233static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, 1234static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
@@ -1250,6 +1251,27 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1250 offset_in_pte); 1251 offset_in_pte);
1251} 1252}
1252 1253
1254static bool need_remote_flush(u64 old, u64 new)
1255{
1256 if (!is_shadow_present_pte(old))
1257 return false;
1258 if (!is_shadow_present_pte(new))
1259 return true;
1260 if ((old ^ new) & PT64_BASE_ADDR_MASK)
1261 return true;
1262 old ^= PT64_NX_MASK;
1263 new ^= PT64_NX_MASK;
1264 return (old & ~new & PT64_PERM_MASK) != 0;
1265}
1266
1267static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1268{
1269 if (need_remote_flush(old, new))
1270 kvm_flush_remote_tlbs(vcpu->kvm);
1271 else
1272 kvm_mmu_flush_tlb(vcpu);
1273}
1274
1253static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) 1275static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1254{ 1276{
1255 u64 *spte = vcpu->last_pte_updated; 1277 u64 *spte = vcpu->last_pte_updated;
@@ -1265,6 +1287,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1265 struct hlist_node *node, *n; 1287 struct hlist_node *node, *n;
1266 struct hlist_head *bucket; 1288 struct hlist_head *bucket;
1267 unsigned index; 1289 unsigned index;
1290 u64 entry;
1268 u64 *spte; 1291 u64 *spte;
1269 unsigned offset = offset_in_page(gpa); 1292 unsigned offset = offset_in_page(gpa);
1270 unsigned pte_size; 1293 unsigned pte_size;
@@ -1335,9 +1358,11 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1335 } 1358 }
1336 spte = &page->spt[page_offset / sizeof(*spte)]; 1359 spte = &page->spt[page_offset / sizeof(*spte)];
1337 while (npte--) { 1360 while (npte--) {
1361 entry = *spte;
1338 mmu_pte_write_zap_pte(vcpu, page, spte); 1362 mmu_pte_write_zap_pte(vcpu, page, spte);
1339 mmu_pte_write_new_pte(vcpu, page, spte, new, bytes, 1363 mmu_pte_write_new_pte(vcpu, page, spte, new, bytes,
1340 page_offset & (pte_size - 1)); 1364 page_offset & (pte_size - 1));
1365 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1341 ++spte; 1366 ++spte;
1342 } 1367 }
1343 } 1368 }