aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorIzik Eidus <avi@qumranet.com>2007-10-18 05:09:33 -0400
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:52:54 -0500
commit8a7ae055f3533b520401c170ac55e30628b34df5 (patch)
treef9654746dc92fa18ef66e49e12537dc6cb1d32e6 /drivers/kvm/paging_tmpl.h
parentcea7bb21280e3a825e64b54740edc5d3e6e4193c (diff)
KVM: MMU: Partial swapping of guest memory
This allows guest memory to be swapped. Pages which are currently mapped via shadow page tables are pinned into memory, but all other pages can be freely swapped. The patch makes gfn_to_page() elevate the page's reference count, and introduces kvm_release_page() that pairs with it. Signed-off-by: Izik Eidus <izike@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/paging_tmpl.h')
-rw-r--r--drivers/kvm/paging_tmpl.h26
1 files changed, 23 insertions, 3 deletions
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 572e5b6d9a7a..0f0266af3f68 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -72,7 +72,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
72 struct kvm_vcpu *vcpu, gva_t addr, 72 struct kvm_vcpu *vcpu, gva_t addr,
73 int write_fault, int user_fault, int fetch_fault) 73 int write_fault, int user_fault, int fetch_fault)
74{ 74{
75 struct page *page; 75 struct page *page = NULL;
76 pt_element_t *table; 76 pt_element_t *table;
77 pt_element_t pte; 77 pt_element_t pte;
78 gfn_t table_gfn; 78 gfn_t table_gfn;
@@ -149,6 +149,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
149 149
150 walker->inherited_ar &= pte; 150 walker->inherited_ar &= pte;
151 --walker->level; 151 --walker->level;
152 kvm_release_page(page);
152 } 153 }
153 154
154 if (write_fault && !is_dirty_pte(pte)) { 155 if (write_fault && !is_dirty_pte(pte)) {
@@ -162,6 +163,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
162 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte)); 163 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
163 } 164 }
164 165
166 kvm_release_page(page);
165 walker->pte = pte; 167 walker->pte = pte;
166 pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)pte); 168 pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)pte);
167 return 1; 169 return 1;
@@ -180,6 +182,8 @@ err:
180 walker->error_code |= PFERR_USER_MASK; 182 walker->error_code |= PFERR_USER_MASK;
181 if (fetch_fault) 183 if (fetch_fault)
182 walker->error_code |= PFERR_FETCH_MASK; 184 walker->error_code |= PFERR_FETCH_MASK;
185 if (page)
186 kvm_release_page(page);
183 return 0; 187 return 0;
184} 188}
185 189
@@ -223,6 +227,8 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
223 if (is_error_hpa(paddr)) { 227 if (is_error_hpa(paddr)) {
224 set_shadow_pte(shadow_pte, 228 set_shadow_pte(shadow_pte,
225 shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK); 229 shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
230 kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
231 >> PAGE_SHIFT));
226 return; 232 return;
227 } 233 }
228 234
@@ -260,9 +266,20 @@ unshadowed:
260 pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte); 266 pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
261 set_shadow_pte(shadow_pte, spte); 267 set_shadow_pte(shadow_pte, spte);
262 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr); 268 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
263 if (!was_rmapped) 269 if (!was_rmapped) {
264 rmap_add(vcpu, shadow_pte, (gaddr & PT64_BASE_ADDR_MASK) 270 rmap_add(vcpu, shadow_pte, (gaddr & PT64_BASE_ADDR_MASK)
265 >> PAGE_SHIFT); 271 >> PAGE_SHIFT);
272 if (!is_rmap_pte(*shadow_pte)) {
273 struct page *page;
274
275 page = pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
276 >> PAGE_SHIFT);
277 kvm_release_page(page);
278 }
279 }
280 else
281 kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
282 >> PAGE_SHIFT));
266 if (!ptwrite || !*ptwrite) 283 if (!ptwrite || !*ptwrite)
267 vcpu->last_pte_updated = shadow_pte; 284 vcpu->last_pte_updated = shadow_pte;
268} 285}
@@ -486,19 +503,22 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
486{ 503{
487 int i; 504 int i;
488 pt_element_t *gpt; 505 pt_element_t *gpt;
506 struct page *page;
489 507
490 if (sp->role.metaphysical || PTTYPE == 32) { 508 if (sp->role.metaphysical || PTTYPE == 32) {
491 nonpaging_prefetch_page(vcpu, sp); 509 nonpaging_prefetch_page(vcpu, sp);
492 return; 510 return;
493 } 511 }
494 512
495 gpt = kmap_atomic(gfn_to_page(vcpu->kvm, sp->gfn), KM_USER0); 513 page = gfn_to_page(vcpu->kvm, sp->gfn);
514 gpt = kmap_atomic(page, KM_USER0);
496 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) 515 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
497 if (is_present_pte(gpt[i])) 516 if (is_present_pte(gpt[i]))
498 sp->spt[i] = shadow_trap_nonpresent_pte; 517 sp->spt[i] = shadow_trap_nonpresent_pte;
499 else 518 else
500 sp->spt[i] = shadow_notrap_nonpresent_pte; 519 sp->spt[i] = shadow_notrap_nonpresent_pte;
501 kunmap_atomic(gpt, KM_USER0); 520 kunmap_atomic(gpt, KM_USER0);
521 kvm_release_page(page);
502} 522}
503 523
504#undef pt_element_t 524#undef pt_element_t