aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/mmu.c
diff options
context:
space:
mode:
authorIzik Eidus <izike@localhost.localdomain>2007-11-20 04:49:33 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:53:09 -0500
commitb4231d61807cac8d9d257eb6979c1685fa9a171f (patch)
tree36b2064c0df4a2b2e11c5d9a2221c097bb0c7be9 /drivers/kvm/mmu.c
parent2065b3727ecdb64450597d70f7e13af00b85dbd8 (diff)
KVM: MMU: Selectively set PageDirty when releasing guest memory
Improve dirty bit setting for pages that kvm release, until now every page that we released we marked dirty, from now only pages that have potential to get dirty we mark dirty. Signed-off-by: Izik Eidus <izike@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r--drivers/kvm/mmu.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 8add4d5c6840..4624f3789b9a 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -420,14 +420,18 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
420 struct kvm_rmap_desc *desc; 420 struct kvm_rmap_desc *desc;
421 struct kvm_rmap_desc *prev_desc; 421 struct kvm_rmap_desc *prev_desc;
422 struct kvm_mmu_page *page; 422 struct kvm_mmu_page *page;
423 struct page *release_page;
423 unsigned long *rmapp; 424 unsigned long *rmapp;
424 int i; 425 int i;
425 426
426 if (!is_rmap_pte(*spte)) 427 if (!is_rmap_pte(*spte))
427 return; 428 return;
428 page = page_header(__pa(spte)); 429 page = page_header(__pa(spte));
429 kvm_release_page(pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> 430 release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
430 PAGE_SHIFT)); 431 if (is_writeble_pte(*spte))
432 kvm_release_page_dirty(release_page);
433 else
434 kvm_release_page_clean(release_page);
431 rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]); 435 rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
432 if (!*rmapp) { 436 if (!*rmapp) {
433 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); 437 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
@@ -893,7 +897,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
893{ 897{
894 int level = PT32E_ROOT_LEVEL; 898 int level = PT32E_ROOT_LEVEL;
895 hpa_t table_addr = vcpu->mmu.root_hpa; 899 hpa_t table_addr = vcpu->mmu.root_hpa;
900 struct page *page;
896 901
902 page = pfn_to_page(p >> PAGE_SHIFT);
897 for (; ; level--) { 903 for (; ; level--) {
898 u32 index = PT64_INDEX(v, level); 904 u32 index = PT64_INDEX(v, level);
899 u64 *table; 905 u64 *table;
@@ -908,7 +914,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
908 pte = table[index]; 914 pte = table[index];
909 was_rmapped = is_rmap_pte(pte); 915 was_rmapped = is_rmap_pte(pte);
910 if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) { 916 if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
911 kvm_release_page(pfn_to_page(p >> PAGE_SHIFT)); 917 kvm_release_page_clean(page);
912 return 0; 918 return 0;
913 } 919 }
914 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT); 920 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
@@ -918,7 +924,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
918 if (!was_rmapped) 924 if (!was_rmapped)
919 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT); 925 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
920 else 926 else
921 kvm_release_page(pfn_to_page(p >> PAGE_SHIFT)); 927 kvm_release_page_clean(page);
928
922 return 0; 929 return 0;
923 } 930 }
924 931
@@ -933,7 +940,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
933 1, 3, &table[index]); 940 1, 3, &table[index]);
934 if (!new_table) { 941 if (!new_table) {
935 pgprintk("nonpaging_map: ENOMEM\n"); 942 pgprintk("nonpaging_map: ENOMEM\n");
936 kvm_release_page(pfn_to_page(p >> PAGE_SHIFT)); 943 kvm_release_page_clean(page);
937 return -ENOMEM; 944 return -ENOMEM;
938 } 945 }
939 946
@@ -1049,8 +1056,8 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1049 paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK); 1056 paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
1050 1057
1051 if (is_error_hpa(paddr)) { 1058 if (is_error_hpa(paddr)) {
1052 kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK) 1059 kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
1053 >> PAGE_SHIFT)); 1060 >> PAGE_SHIFT));
1054 return 1; 1061 return 1;
1055 } 1062 }
1056 1063
@@ -1580,7 +1587,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1580 " valid guest gva %lx\n", audit_msg, va); 1587 " valid guest gva %lx\n", audit_msg, va);
1581 page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK) 1588 page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
1582 >> PAGE_SHIFT); 1589 >> PAGE_SHIFT);
1583 kvm_release_page(page); 1590 kvm_release_page_clean(page);
1584 1591
1585 } 1592 }
1586 } 1593 }