aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/mmu.c
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2007-10-10 20:25:50 -0400
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:52:51 -0500
commitf67a46f4aa1212b38696ac6b6a82b4323cea61aa (patch)
tree7d9b86dda5a4b4196f0cbd11ef395d7e1c78fbb4 /drivers/kvm/mmu.c
parent043405e10001fe7aae60c46a57189515f13a6468 (diff)
KVM: MMU: Clean up MMU functions to take struct kvm when appropriate
Some of the MMU functions take a struct kvm_vcpu even though they affect all VCPUs. This patch cleans up some of them to instead take a struct kvm. This makes things a bit more clear. The main thing that was confusing me was whether certain functions need to be called on all VCPUs. Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r--drivers/kvm/mmu.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index ece0aa4e4c9f..a5ca9457e929 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -606,7 +606,7 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
606 BUG(); 606 BUG();
607} 607}
608 608
609static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu, 609static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm,
610 gfn_t gfn) 610 gfn_t gfn)
611{ 611{
612 unsigned index; 612 unsigned index;
@@ -616,7 +616,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
616 616
617 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); 617 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
618 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 618 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
619 bucket = &vcpu->kvm->mmu_page_hash[index]; 619 bucket = &kvm->mmu_page_hash[index];
620 hlist_for_each_entry(page, node, bucket, hash_link) 620 hlist_for_each_entry(page, node, bucket, hash_link)
621 if (page->gfn == gfn && !page->role.metaphysical) { 621 if (page->gfn == gfn && !page->role.metaphysical) {
622 pgprintk("%s: found role %x\n", 622 pgprintk("%s: found role %x\n",
@@ -782,7 +782,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
782 kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages; 782 kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
783} 783}
784 784
785static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) 785static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
786{ 786{
787 unsigned index; 787 unsigned index;
788 struct hlist_head *bucket; 788 struct hlist_head *bucket;
@@ -793,25 +793,25 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
793 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); 793 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
794 r = 0; 794 r = 0;
795 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 795 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
796 bucket = &vcpu->kvm->mmu_page_hash[index]; 796 bucket = &kvm->mmu_page_hash[index];
797 hlist_for_each_entry_safe(page, node, n, bucket, hash_link) 797 hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
798 if (page->gfn == gfn && !page->role.metaphysical) { 798 if (page->gfn == gfn && !page->role.metaphysical) {
799 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, 799 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
800 page->role.word); 800 page->role.word);
801 kvm_mmu_zap_page(vcpu->kvm, page); 801 kvm_mmu_zap_page(kvm, page);
802 r = 1; 802 r = 1;
803 } 803 }
804 return r; 804 return r;
805} 805}
806 806
807static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn) 807static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
808{ 808{
809 struct kvm_mmu_page *page; 809 struct kvm_mmu_page *page;
810 810
811 while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) { 811 while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
812 pgprintk("%s: zap %lx %x\n", 812 pgprintk("%s: zap %lx %x\n",
813 __FUNCTION__, gfn, page->role.word); 813 __FUNCTION__, gfn, page->role.word);
814 kvm_mmu_zap_page(vcpu->kvm, page); 814 kvm_mmu_zap_page(kvm, page);
815 } 815 }
816} 816}
817 817
@@ -1299,7 +1299,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1299{ 1299{
1300 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); 1300 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1301 1301
1302 return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT); 1302 return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1303} 1303}
1304 1304
1305void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 1305void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)