aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/mmu.c
diff options
context:
space:
mode:
authorIzik Eidus <izike@qumranet.com>2007-10-16 08:43:46 -0400
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:52:54 -0500
commit9647c14c98687d0abf5197e74b9d1448ab6ebb95 (patch)
tree6bd61ace880d9428c38ebe45858793d689ca4f4b /drivers/kvm/mmu.c
parent98348e9507ace5fda95432ff8ca23f13e7f66176 (diff)
KVM: MMU: Keep a reverse mapping of non-writable translations
The current kvm mmu only reverse maps writable translation. This is used to write-protect a page in case it becomes a pagetable. But with swapping support, we need a reverse mapping of read-only pages as well: when we evict a page, we need to remove any mapping to it, whether writable or not. Signed-off-by: Izik Eidus <izike@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r--drivers/kvm/mmu.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 14e54e331f50..bbf5eb427dc6 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -211,8 +211,8 @@ static int is_io_pte(unsigned long pte)
211 211
212static int is_rmap_pte(u64 pte) 212static int is_rmap_pte(u64 pte)
213{ 213{
214 return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK)) 214 return pte != shadow_trap_nonpresent_pte
215 == (PT_WRITABLE_MASK | PT_PRESENT_MASK); 215 && pte != shadow_notrap_nonpresent_pte;
216} 216}
217 217
218static void set_shadow_pte(u64 *sptep, u64 spte) 218static void set_shadow_pte(u64 *sptep, u64 spte)
@@ -488,7 +488,6 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
488{ 488{
489 unsigned long *rmapp; 489 unsigned long *rmapp;
490 u64 *spte; 490 u64 *spte;
491 u64 *prev_spte;
492 491
493 gfn = unalias_gfn(kvm, gfn); 492 gfn = unalias_gfn(kvm, gfn);
494 rmapp = gfn_to_rmap(kvm, gfn); 493 rmapp = gfn_to_rmap(kvm, gfn);
@@ -497,13 +496,11 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
497 while (spte) { 496 while (spte) {
498 BUG_ON(!spte); 497 BUG_ON(!spte);
499 BUG_ON(!(*spte & PT_PRESENT_MASK)); 498 BUG_ON(!(*spte & PT_PRESENT_MASK));
500 BUG_ON(!(*spte & PT_WRITABLE_MASK));
501 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); 499 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
502 prev_spte = spte; 500 if (is_writeble_pte(*spte))
503 spte = rmap_next(kvm, rmapp, spte); 501 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
504 rmap_remove(kvm, prev_spte);
505 set_shadow_pte(prev_spte, *prev_spte & ~PT_WRITABLE_MASK);
506 kvm_flush_remote_tlbs(kvm); 502 kvm_flush_remote_tlbs(kvm);
503 spte = rmap_next(kvm, rmapp, spte);
507 } 504 }
508} 505}
509 506
@@ -908,14 +905,18 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
908 table = __va(table_addr); 905 table = __va(table_addr);
909 906
910 if (level == 1) { 907 if (level == 1) {
908 int was_rmapped;
909
911 pte = table[index]; 910 pte = table[index];
911 was_rmapped = is_rmap_pte(pte);
912 if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) 912 if (is_shadow_present_pte(pte) && is_writeble_pte(pte))
913 return 0; 913 return 0;
914 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT); 914 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
915 page_header_update_slot(vcpu->kvm, table, v); 915 page_header_update_slot(vcpu->kvm, table, v);
916 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK | 916 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
917 PT_USER_MASK; 917 PT_USER_MASK;
918 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT); 918 if (!was_rmapped)
919 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
919 return 0; 920 return 0;
920 } 921 }
921 922
@@ -1424,10 +1425,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1424 pt = page->spt; 1425 pt = page->spt;
1425 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) 1426 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1426 /* avoid RMW */ 1427 /* avoid RMW */
1427 if (pt[i] & PT_WRITABLE_MASK) { 1428 if (pt[i] & PT_WRITABLE_MASK)
1428 rmap_remove(kvm, &pt[i]);
1429 pt[i] &= ~PT_WRITABLE_MASK; 1429 pt[i] &= ~PT_WRITABLE_MASK;
1430 }
1431 } 1430 }
1432} 1431}
1433 1432