aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorIzik Eidus <izike@localhost.localdomain>2007-11-20 04:49:33 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:53:09 -0500
commitb4231d61807cac8d9d257eb6979c1685fa9a171f (patch)
tree36b2064c0df4a2b2e11c5d9a2221c097bb0c7be9 /drivers
parent2065b3727ecdb64450597d70f7e13af00b85dbd8 (diff)
KVM: MMU: Selectively set PageDirty when releasing guest memory
Improve dirty bit setting for pages that kvm release, until now every page that we released we marked dirty, from now only pages that have potential to get dirty we mark dirty. Signed-off-by: Izik Eidus <izike@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/kvm/kvm.h3
-rw-r--r--drivers/kvm/kvm_main.c12
-rw-r--r--drivers/kvm/mmu.c23
-rw-r--r--drivers/kvm/paging_tmpl.h12
-rw-r--r--drivers/kvm/x86.c2
5 files changed, 33 insertions, 19 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 52e80183e050..c2acd74389fa 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -393,7 +393,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
393 int user_alloc); 393 int user_alloc);
394gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); 394gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
395struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 395struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
396void kvm_release_page(struct page *page); 396void kvm_release_page_clean(struct page *page);
397void kvm_release_page_dirty(struct page *page);
397int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 398int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
398 int len); 399 int len);
399int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 400int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 4e1bd9488470..729573b844e5 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -543,13 +543,19 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
543 543
544EXPORT_SYMBOL_GPL(gfn_to_page); 544EXPORT_SYMBOL_GPL(gfn_to_page);
545 545
546void kvm_release_page(struct page *page) 546void kvm_release_page_clean(struct page *page)
547{
548 put_page(page);
549}
550EXPORT_SYMBOL_GPL(kvm_release_page_clean);
551
552void kvm_release_page_dirty(struct page *page)
547{ 553{
548 if (!PageReserved(page)) 554 if (!PageReserved(page))
549 SetPageDirty(page); 555 SetPageDirty(page);
550 put_page(page); 556 put_page(page);
551} 557}
552EXPORT_SYMBOL_GPL(kvm_release_page); 558EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
553 559
554static int next_segment(unsigned long len, int offset) 560static int next_segment(unsigned long len, int offset)
555{ 561{
@@ -1055,7 +1061,7 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
1055 /* current->mm->mmap_sem is already held so call lockless version */ 1061 /* current->mm->mmap_sem is already held so call lockless version */
1056 page = __gfn_to_page(kvm, pgoff); 1062 page = __gfn_to_page(kvm, pgoff);
1057 if (is_error_page(page)) { 1063 if (is_error_page(page)) {
1058 kvm_release_page(page); 1064 kvm_release_page_clean(page);
1059 return NOPAGE_SIGBUS; 1065 return NOPAGE_SIGBUS;
1060 } 1066 }
1061 if (type != NULL) 1067 if (type != NULL)
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 8add4d5c6840..4624f3789b9a 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -420,14 +420,18 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
420 struct kvm_rmap_desc *desc; 420 struct kvm_rmap_desc *desc;
421 struct kvm_rmap_desc *prev_desc; 421 struct kvm_rmap_desc *prev_desc;
422 struct kvm_mmu_page *page; 422 struct kvm_mmu_page *page;
423 struct page *release_page;
423 unsigned long *rmapp; 424 unsigned long *rmapp;
424 int i; 425 int i;
425 426
426 if (!is_rmap_pte(*spte)) 427 if (!is_rmap_pte(*spte))
427 return; 428 return;
428 page = page_header(__pa(spte)); 429 page = page_header(__pa(spte));
429 kvm_release_page(pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> 430 release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
430 PAGE_SHIFT)); 431 if (is_writeble_pte(*spte))
432 kvm_release_page_dirty(release_page);
433 else
434 kvm_release_page_clean(release_page);
431 rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]); 435 rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
432 if (!*rmapp) { 436 if (!*rmapp) {
433 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); 437 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
@@ -893,7 +897,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
893{ 897{
894 int level = PT32E_ROOT_LEVEL; 898 int level = PT32E_ROOT_LEVEL;
895 hpa_t table_addr = vcpu->mmu.root_hpa; 899 hpa_t table_addr = vcpu->mmu.root_hpa;
900 struct page *page;
896 901
902 page = pfn_to_page(p >> PAGE_SHIFT);
897 for (; ; level--) { 903 for (; ; level--) {
898 u32 index = PT64_INDEX(v, level); 904 u32 index = PT64_INDEX(v, level);
899 u64 *table; 905 u64 *table;
@@ -908,7 +914,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
908 pte = table[index]; 914 pte = table[index];
909 was_rmapped = is_rmap_pte(pte); 915 was_rmapped = is_rmap_pte(pte);
910 if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) { 916 if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
911 kvm_release_page(pfn_to_page(p >> PAGE_SHIFT)); 917 kvm_release_page_clean(page);
912 return 0; 918 return 0;
913 } 919 }
914 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT); 920 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
@@ -918,7 +924,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
918 if (!was_rmapped) 924 if (!was_rmapped)
919 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT); 925 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
920 else 926 else
921 kvm_release_page(pfn_to_page(p >> PAGE_SHIFT)); 927 kvm_release_page_clean(page);
928
922 return 0; 929 return 0;
923 } 930 }
924 931
@@ -933,7 +940,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
933 1, 3, &table[index]); 940 1, 3, &table[index]);
934 if (!new_table) { 941 if (!new_table) {
935 pgprintk("nonpaging_map: ENOMEM\n"); 942 pgprintk("nonpaging_map: ENOMEM\n");
936 kvm_release_page(pfn_to_page(p >> PAGE_SHIFT)); 943 kvm_release_page_clean(page);
937 return -ENOMEM; 944 return -ENOMEM;
938 } 945 }
939 946
@@ -1049,8 +1056,8 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1049 paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK); 1056 paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
1050 1057
1051 if (is_error_hpa(paddr)) { 1058 if (is_error_hpa(paddr)) {
1052 kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK) 1059 kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
1053 >> PAGE_SHIFT)); 1060 >> PAGE_SHIFT));
1054 return 1; 1061 return 1;
1055 } 1062 }
1056 1063
@@ -1580,7 +1587,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1580 " valid guest gva %lx\n", audit_msg, va); 1587 " valid guest gva %lx\n", audit_msg, va);
1581 page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK) 1588 page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
1582 >> PAGE_SHIFT); 1589 >> PAGE_SHIFT);
1583 kvm_release_page(page); 1590 kvm_release_page_clean(page);
1584 1591
1585 } 1592 }
1586 } 1593 }
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 77a2b22492bf..bf15d127a48f 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -212,8 +212,8 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
212 if (is_error_hpa(paddr)) { 212 if (is_error_hpa(paddr)) {
213 set_shadow_pte(shadow_pte, 213 set_shadow_pte(shadow_pte,
214 shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK); 214 shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
215 kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK) 215 kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
216 >> PAGE_SHIFT)); 216 >> PAGE_SHIFT));
217 return; 217 return;
218 } 218 }
219 219
@@ -259,12 +259,12 @@ unshadowed:
259 259
260 page = pfn_to_page((paddr & PT64_BASE_ADDR_MASK) 260 page = pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
261 >> PAGE_SHIFT); 261 >> PAGE_SHIFT);
262 kvm_release_page(page); 262 kvm_release_page_clean(page);
263 } 263 }
264 } 264 }
265 else 265 else
266 kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK) 266 kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
267 >> PAGE_SHIFT)); 267 >> PAGE_SHIFT));
268 if (!ptwrite || !*ptwrite) 268 if (!ptwrite || !*ptwrite)
269 vcpu->last_pte_updated = shadow_pte; 269 vcpu->last_pte_updated = shadow_pte;
270} 270}
@@ -503,7 +503,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
503 else 503 else
504 sp->spt[i] = shadow_notrap_nonpresent_pte; 504 sp->spt[i] = shadow_notrap_nonpresent_pte;
505 kunmap_atomic(gpt, KM_USER0); 505 kunmap_atomic(gpt, KM_USER0);
506 kvm_release_page(page); 506 kvm_release_page_clean(page);
507} 507}
508 508
509#undef pt_element_t 509#undef pt_element_t
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 5a1b72fbaeaa..6212984a2e6c 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -1472,7 +1472,7 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
1472 1472
1473 for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i) 1473 for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
1474 if (vcpu->pio.guest_pages[i]) { 1474 if (vcpu->pio.guest_pages[i]) {
1475 kvm_release_page(vcpu->pio.guest_pages[i]); 1475 kvm_release_page_dirty(vcpu->pio.guest_pages[i]);
1476 vcpu->pio.guest_pages[i] = NULL; 1476 vcpu->pio.guest_pages[i] = NULL;
1477 } 1477 }
1478} 1478}