diff options
author | Anthony Liguori <aliguori@us.ibm.com> | 2007-10-10 20:25:50 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:52:51 -0500 |
commit | f67a46f4aa1212b38696ac6b6a82b4323cea61aa (patch) | |
tree | 7d9b86dda5a4b4196f0cbd11ef395d7e1c78fbb4 /drivers | |
parent | 043405e10001fe7aae60c46a57189515f13a6468 (diff) |
KVM: MMU: Clean up MMU functions to take struct kvm when appropriate
Some of the MMU functions take a struct kvm_vcpu even though they affect all
VCPUs. This patch cleans up some of them to instead take a struct kvm. This
makes things a bit more clear.
The main thing that was confusing me was whether certain functions need to be
called on all VCPUs.
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/kvm/mmu.c | 18 | ||||
-rw-r--r-- | drivers/kvm/paging_tmpl.h | 4 |
2 files changed, 11 insertions, 11 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index ece0aa4e4c9f..a5ca9457e929 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -606,7 +606,7 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page, | |||
606 | BUG(); | 606 | BUG(); |
607 | } | 607 | } |
608 | 608 | ||
609 | static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu, | 609 | static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, |
610 | gfn_t gfn) | 610 | gfn_t gfn) |
611 | { | 611 | { |
612 | unsigned index; | 612 | unsigned index; |
@@ -616,7 +616,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu, | |||
616 | 616 | ||
617 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); | 617 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); |
618 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | 618 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; |
619 | bucket = &vcpu->kvm->mmu_page_hash[index]; | 619 | bucket = &kvm->mmu_page_hash[index]; |
620 | hlist_for_each_entry(page, node, bucket, hash_link) | 620 | hlist_for_each_entry(page, node, bucket, hash_link) |
621 | if (page->gfn == gfn && !page->role.metaphysical) { | 621 | if (page->gfn == gfn && !page->role.metaphysical) { |
622 | pgprintk("%s: found role %x\n", | 622 | pgprintk("%s: found role %x\n", |
@@ -782,7 +782,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) | |||
782 | kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages; | 782 | kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages; |
783 | } | 783 | } |
784 | 784 | ||
785 | static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) | 785 | static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) |
786 | { | 786 | { |
787 | unsigned index; | 787 | unsigned index; |
788 | struct hlist_head *bucket; | 788 | struct hlist_head *bucket; |
@@ -793,25 +793,25 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
793 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); | 793 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); |
794 | r = 0; | 794 | r = 0; |
795 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | 795 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; |
796 | bucket = &vcpu->kvm->mmu_page_hash[index]; | 796 | bucket = &kvm->mmu_page_hash[index]; |
797 | hlist_for_each_entry_safe(page, node, n, bucket, hash_link) | 797 | hlist_for_each_entry_safe(page, node, n, bucket, hash_link) |
798 | if (page->gfn == gfn && !page->role.metaphysical) { | 798 | if (page->gfn == gfn && !page->role.metaphysical) { |
799 | pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, | 799 | pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, |
800 | page->role.word); | 800 | page->role.word); |
801 | kvm_mmu_zap_page(vcpu->kvm, page); | 801 | kvm_mmu_zap_page(kvm, page); |
802 | r = 1; | 802 | r = 1; |
803 | } | 803 | } |
804 | return r; | 804 | return r; |
805 | } | 805 | } |
806 | 806 | ||
807 | static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn) | 807 | static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) |
808 | { | 808 | { |
809 | struct kvm_mmu_page *page; | 809 | struct kvm_mmu_page *page; |
810 | 810 | ||
811 | while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) { | 811 | while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) { |
812 | pgprintk("%s: zap %lx %x\n", | 812 | pgprintk("%s: zap %lx %x\n", |
813 | __FUNCTION__, gfn, page->role.word); | 813 | __FUNCTION__, gfn, page->role.word); |
814 | kvm_mmu_zap_page(vcpu->kvm, page); | 814 | kvm_mmu_zap_page(kvm, page); |
815 | } | 815 | } |
816 | } | 816 | } |
817 | 817 | ||
@@ -1299,7 +1299,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) | |||
1299 | { | 1299 | { |
1300 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); | 1300 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); |
1301 | 1301 | ||
1302 | return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT); | 1302 | return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
1303 | } | 1303 | } |
1304 | 1304 | ||
1305 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | 1305 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) |
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h index 447d2c31f0cb..4f6edf85d13f 100644 --- a/drivers/kvm/paging_tmpl.h +++ b/drivers/kvm/paging_tmpl.h | |||
@@ -268,11 +268,11 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu, | |||
268 | 268 | ||
269 | spte |= PT_WRITABLE_MASK; | 269 | spte |= PT_WRITABLE_MASK; |
270 | if (user_fault) { | 270 | if (user_fault) { |
271 | mmu_unshadow(vcpu, gfn); | 271 | mmu_unshadow(vcpu->kvm, gfn); |
272 | goto unshadowed; | 272 | goto unshadowed; |
273 | } | 273 | } |
274 | 274 | ||
275 | shadow = kvm_mmu_lookup_page(vcpu, gfn); | 275 | shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn); |
276 | if (shadow) { | 276 | if (shadow) { |
277 | pgprintk("%s: found shadow page for %lx, marking ro\n", | 277 | pgprintk("%s: found shadow page for %lx, marking ro\n", |
278 | __FUNCTION__, gfn); | 278 | __FUNCTION__, gfn); |