aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/mmu.c
diff options
context:
space:
mode:
authorIzik Eidus <avi@qumranet.com>2007-10-18 05:09:33 -0400
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:52:54 -0500
commit8a7ae055f3533b520401c170ac55e30628b34df5 (patch)
treef9654746dc92fa18ef66e49e12537dc6cb1d32e6 /drivers/kvm/mmu.c
parentcea7bb21280e3a825e64b54740edc5d3e6e4193c (diff)
KVM: MMU: Partial swapping of guest memory
This allows guest memory to be swapped. Pages which are currently mapped via shadow page tables are pinned into memory, but all other pages can be freely swapped. The patch makes gfn_to_page() elevate the page's reference count, and introduces kvm_release_page() that pairs with it. Signed-off-by: Izik Eidus <izike@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r--drivers/kvm/mmu.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 2ad14fbdcfa0..5d7af4bde595 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -425,6 +425,8 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
425 if (!is_rmap_pte(*spte)) 425 if (!is_rmap_pte(*spte))
426 return; 426 return;
427 page = page_header(__pa(spte)); 427 page = page_header(__pa(spte));
428 kvm_release_page(pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >>
429 PAGE_SHIFT));
428 rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]); 430 rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
429 if (!*rmapp) { 431 if (!*rmapp) {
430 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); 432 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
@@ -911,6 +913,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
911 PT_USER_MASK; 913 PT_USER_MASK;
912 if (!was_rmapped) 914 if (!was_rmapped)
913 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT); 915 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
916 else
917 kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
914 return 0; 918 return 0;
915 } 919 }
916 920
@@ -925,6 +929,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
925 1, 3, &table[index]); 929 1, 3, &table[index]);
926 if (!new_table) { 930 if (!new_table) {
927 pgprintk("nonpaging_map: ENOMEM\n"); 931 pgprintk("nonpaging_map: ENOMEM\n");
932 kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
928 return -ENOMEM; 933 return -ENOMEM;
929 } 934 }
930 935
@@ -1039,8 +1044,11 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1039 1044
1040 paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK); 1045 paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
1041 1046
1042 if (is_error_hpa(paddr)) 1047 if (is_error_hpa(paddr)) {
1048 kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
1049 >> PAGE_SHIFT));
1043 return 1; 1050 return 1;
1051 }
1044 1052
1045 return nonpaging_map(vcpu, addr & PAGE_MASK, paddr); 1053 return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
1046} 1054}
@@ -1507,6 +1515,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1507 } else { 1515 } else {
1508 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va); 1516 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1509 hpa_t hpa = gpa_to_hpa(vcpu, gpa); 1517 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
1518 struct page *page;
1510 1519
1511 if (is_shadow_present_pte(ent) 1520 if (is_shadow_present_pte(ent)
1512 && (ent & PT64_BASE_ADDR_MASK) != hpa) 1521 && (ent & PT64_BASE_ADDR_MASK) != hpa)
@@ -1519,6 +1528,9 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1519 && !is_error_hpa(hpa)) 1528 && !is_error_hpa(hpa))
1520 printk(KERN_ERR "audit: (%s) notrap shadow," 1529 printk(KERN_ERR "audit: (%s) notrap shadow,"
1521 " valid guest gva %lx\n", audit_msg, va); 1530 " valid guest gva %lx\n", audit_msg, va);
1531 page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
1532 >> PAGE_SHIFT);
1533 kvm_release_page(page);
1522 1534
1523 } 1535 }
1524 } 1536 }