aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2008-04-02 15:46:56 -0400
committerAvi Kivity <avi@qumranet.com>2008-04-27 05:01:15 -0400
commit35149e2129fe34fc8cb5917e1ecf5156b0fa3415 (patch)
treeb67cb16fa6054769ee476fce99a32601b126af10 /arch/x86/kvm/paging_tmpl.h
parentfdae862f91728aec6dd8fd62cd2398868c906b6b (diff)
KVM: MMU: Don't assume struct page for x86
This patch introduces a gfn_to_pfn() function and corresponding functions like kvm_release_pfn_dirty(). Using these new functions, we can modify the x86 MMU to no longer assume that it can always get a struct page for any given gfn. We don't want to eliminate gfn_to_page() entirely because a number of places assume they can do gfn_to_page() and then kmap() the results. When we support IO memory, gfn_to_page() will fail for IO pages although gfn_to_pfn() will succeed. This does not implement support for avoiding reference counting for reserved RAM or for IO memory. However, it should make those things pretty straight forward. Since we're only introducing new common symbols, I don't think it will break the non-x86 architectures but I haven't tested those. I've tested Intel, AMD, NPT, and hugetlbfs with Windows and Linux guests. [avi: fix overflow when shifting left pfns by adding casts] Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h26
1 files changed, 13 insertions, 13 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 57d872aec663..156fe10288ae 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -247,7 +247,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
247{ 247{
248 pt_element_t gpte; 248 pt_element_t gpte;
249 unsigned pte_access; 249 unsigned pte_access;
250 struct page *npage; 250 pfn_t pfn;
251 int largepage = vcpu->arch.update_pte.largepage; 251 int largepage = vcpu->arch.update_pte.largepage;
252 252
253 gpte = *(const pt_element_t *)pte; 253 gpte = *(const pt_element_t *)pte;
@@ -260,13 +260,13 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
260 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte); 260 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
261 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn) 261 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
262 return; 262 return;
263 npage = vcpu->arch.update_pte.page; 263 pfn = vcpu->arch.update_pte.pfn;
264 if (!npage) 264 if (is_error_pfn(pfn))
265 return; 265 return;
266 get_page(npage); 266 kvm_get_pfn(pfn);
267 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, 267 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
268 gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte), 268 gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
269 npage, true); 269 pfn, true);
270} 270}
271 271
272/* 272/*
@@ -275,7 +275,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
275static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 275static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
276 struct guest_walker *walker, 276 struct guest_walker *walker,
277 int user_fault, int write_fault, int largepage, 277 int user_fault, int write_fault, int largepage,
278 int *ptwrite, struct page *page) 278 int *ptwrite, pfn_t pfn)
279{ 279{
280 hpa_t shadow_addr; 280 hpa_t shadow_addr;
281 int level; 281 int level;
@@ -336,7 +336,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
336 walker->pte_gpa[level - 2], 336 walker->pte_gpa[level - 2],
337 &curr_pte, sizeof(curr_pte)); 337 &curr_pte, sizeof(curr_pte));
338 if (r || curr_pte != walker->ptes[level - 2]) { 338 if (r || curr_pte != walker->ptes[level - 2]) {
339 kvm_release_page_clean(page); 339 kvm_release_pfn_clean(pfn);
340 return NULL; 340 return NULL;
341 } 341 }
342 } 342 }
@@ -349,7 +349,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
349 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access, 349 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
350 user_fault, write_fault, 350 user_fault, write_fault,
351 walker->ptes[walker->level-1] & PT_DIRTY_MASK, 351 walker->ptes[walker->level-1] & PT_DIRTY_MASK,
352 ptwrite, largepage, walker->gfn, page, false); 352 ptwrite, largepage, walker->gfn, pfn, false);
353 353
354 return shadow_ent; 354 return shadow_ent;
355} 355}
@@ -378,7 +378,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
378 u64 *shadow_pte; 378 u64 *shadow_pte;
379 int write_pt = 0; 379 int write_pt = 0;
380 int r; 380 int r;
381 struct page *page; 381 pfn_t pfn;
382 int largepage = 0; 382 int largepage = 0;
383 383
384 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); 384 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
@@ -413,20 +413,20 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
413 largepage = 1; 413 largepage = 1;
414 } 414 }
415 } 415 }
416 page = gfn_to_page(vcpu->kvm, walker.gfn); 416 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
417 up_read(&current->mm->mmap_sem); 417 up_read(&current->mm->mmap_sem);
418 418
419 /* mmio */ 419 /* mmio */
420 if (is_error_page(page)) { 420 if (is_error_pfn(pfn)) {
421 pgprintk("gfn %x is mmio\n", walker.gfn); 421 pgprintk("gfn %x is mmio\n", walker.gfn);
422 kvm_release_page_clean(page); 422 kvm_release_pfn_clean(pfn);
423 return 1; 423 return 1;
424 } 424 }
425 425
426 spin_lock(&vcpu->kvm->mmu_lock); 426 spin_lock(&vcpu->kvm->mmu_lock);
427 kvm_mmu_free_some_pages(vcpu); 427 kvm_mmu_free_some_pages(vcpu);
428 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 428 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
429 largepage, &write_pt, page); 429 largepage, &write_pt, pfn);
430 430
431 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__, 431 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
432 shadow_pte, *shadow_pte, write_pt); 432 shadow_pte, *shadow_pte, write_pt);