aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/mmu.c
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2007-10-10 21:08:41 -0400
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:52:51 -0500
commit4a4c99248713e878e1e2880015d01049aec805f3 (patch)
treea847d398faafef4ccbd44de13cfe5d1040fd1df1 /drivers/kvm/mmu.c
parentf67a46f4aa1212b38696ac6b6a82b4323cea61aa (diff)
KVM: MMU: More struct kvm_vcpu -> struct kvm cleanups
This time, the biggest change is gpa_to_hpa. The translation of GPA to HPA does not depend on the VCPU state unlike GVA to GPA so there's no need to pass in the kvm_vcpu. Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r--drivers/kvm/mmu.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index a5ca9457e929..d046ba807763 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -451,14 +451,14 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
451 } 451 }
452} 452}
453 453
454static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) 454static void rmap_write_protect(struct kvm *kvm, u64 gfn)
455{ 455{
456 struct kvm_rmap_desc *desc; 456 struct kvm_rmap_desc *desc;
457 unsigned long *rmapp; 457 unsigned long *rmapp;
458 u64 *spte; 458 u64 *spte;
459 459
460 gfn = unalias_gfn(vcpu->kvm, gfn); 460 gfn = unalias_gfn(kvm, gfn);
461 rmapp = gfn_to_rmap(vcpu->kvm, gfn); 461 rmapp = gfn_to_rmap(kvm, gfn);
462 462
463 while (*rmapp) { 463 while (*rmapp) {
464 if (!(*rmapp & 1)) 464 if (!(*rmapp & 1))
@@ -471,9 +471,9 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
471 BUG_ON(!(*spte & PT_PRESENT_MASK)); 471 BUG_ON(!(*spte & PT_PRESENT_MASK));
472 BUG_ON(!(*spte & PT_WRITABLE_MASK)); 472 BUG_ON(!(*spte & PT_WRITABLE_MASK));
473 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); 473 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
474 rmap_remove(vcpu->kvm, spte); 474 rmap_remove(kvm, spte);
475 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK); 475 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
476 kvm_flush_remote_tlbs(vcpu->kvm); 476 kvm_flush_remote_tlbs(kvm);
477 } 477 }
478} 478}
479 479
@@ -670,7 +670,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
670 hlist_add_head(&page->hash_link, bucket); 670 hlist_add_head(&page->hash_link, bucket);
671 vcpu->mmu.prefetch_page(vcpu, page); 671 vcpu->mmu.prefetch_page(vcpu, page);
672 if (!metaphysical) 672 if (!metaphysical)
673 rmap_write_protect(vcpu, gfn); 673 rmap_write_protect(vcpu->kvm, gfn);
674 return page; 674 return page;
675} 675}
676 676
@@ -823,19 +823,19 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
823 __set_bit(slot, &page_head->slot_bitmap); 823 __set_bit(slot, &page_head->slot_bitmap);
824} 824}
825 825
826hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa) 826hpa_t safe_gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
827{ 827{
828 hpa_t hpa = gpa_to_hpa(vcpu, gpa); 828 hpa_t hpa = gpa_to_hpa(kvm, gpa);
829 829
830 return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa; 830 return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
831} 831}
832 832
833hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa) 833hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
834{ 834{
835 struct page *page; 835 struct page *page;
836 836
837 ASSERT((gpa & HPA_ERR_MASK) == 0); 837 ASSERT((gpa & HPA_ERR_MASK) == 0);
838 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); 838 page = gfn_to_page(kvm, gpa >> PAGE_SHIFT);
839 if (!page) 839 if (!page)
840 return gpa | HPA_ERR_MASK; 840 return gpa | HPA_ERR_MASK;
841 return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) 841 return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
@@ -848,7 +848,7 @@ hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
848 848
849 if (gpa == UNMAPPED_GVA) 849 if (gpa == UNMAPPED_GVA)
850 return UNMAPPED_GVA; 850 return UNMAPPED_GVA;
851 return gpa_to_hpa(vcpu, gpa); 851 return gpa_to_hpa(vcpu->kvm, gpa);
852} 852}
853 853
854struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) 854struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
@@ -857,7 +857,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
857 857
858 if (gpa == UNMAPPED_GVA) 858 if (gpa == UNMAPPED_GVA)
859 return NULL; 859 return NULL;
860 return pfn_to_page(gpa_to_hpa(vcpu, gpa) >> PAGE_SHIFT); 860 return pfn_to_page(gpa_to_hpa(vcpu->kvm, gpa) >> PAGE_SHIFT);
861} 861}
862 862
863static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) 863static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -1012,7 +1012,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1012 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); 1012 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
1013 1013
1014 1014
1015 paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK); 1015 paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
1016 1016
1017 if (is_error_hpa(paddr)) 1017 if (is_error_hpa(paddr))
1018 return 1; 1018 return 1;