diff options
author | Anthony Liguori <aliguori@us.ibm.com> | 2007-10-10 21:08:41 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:52:51 -0500 |
commit | 4a4c99248713e878e1e2880015d01049aec805f3 (patch) | |
tree | a847d398faafef4ccbd44de13cfe5d1040fd1df1 | |
parent | f67a46f4aa1212b38696ac6b6a82b4323cea61aa (diff) |
KVM: MMU: More struct kvm_vcpu -> struct kvm cleanups
This time, the biggest change is gpa_to_hpa. The translation of GPA to HPA does
not depend on the VCPU state unlike GVA to GPA so there's no need to pass in
the kvm_vcpu.
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r-- | drivers/kvm/kvm.h | 2 | ||||
-rw-r--r-- | drivers/kvm/mmu.c | 26 | ||||
-rw-r--r-- | drivers/kvm/paging_tmpl.h | 6 |
3 files changed, 17 insertions, 17 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 9c9c1d7f5b3..d56962d49aa 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -554,7 +554,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); | |||
554 | void kvm_mmu_zap_all(struct kvm *kvm); | 554 | void kvm_mmu_zap_all(struct kvm *kvm); |
555 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); | 555 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); |
556 | 556 | ||
557 | hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); | 557 | hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa); |
558 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) | 558 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) |
559 | #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) | 559 | #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) |
560 | static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } | 560 | static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } |
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index a5ca9457e92..d046ba80776 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -451,14 +451,14 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) | |||
451 | } | 451 | } |
452 | } | 452 | } |
453 | 453 | ||
454 | static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) | 454 | static void rmap_write_protect(struct kvm *kvm, u64 gfn) |
455 | { | 455 | { |
456 | struct kvm_rmap_desc *desc; | 456 | struct kvm_rmap_desc *desc; |
457 | unsigned long *rmapp; | 457 | unsigned long *rmapp; |
458 | u64 *spte; | 458 | u64 *spte; |
459 | 459 | ||
460 | gfn = unalias_gfn(vcpu->kvm, gfn); | 460 | gfn = unalias_gfn(kvm, gfn); |
461 | rmapp = gfn_to_rmap(vcpu->kvm, gfn); | 461 | rmapp = gfn_to_rmap(kvm, gfn); |
462 | 462 | ||
463 | while (*rmapp) { | 463 | while (*rmapp) { |
464 | if (!(*rmapp & 1)) | 464 | if (!(*rmapp & 1)) |
@@ -471,9 +471,9 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) | |||
471 | BUG_ON(!(*spte & PT_PRESENT_MASK)); | 471 | BUG_ON(!(*spte & PT_PRESENT_MASK)); |
472 | BUG_ON(!(*spte & PT_WRITABLE_MASK)); | 472 | BUG_ON(!(*spte & PT_WRITABLE_MASK)); |
473 | rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); | 473 | rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); |
474 | rmap_remove(vcpu->kvm, spte); | 474 | rmap_remove(kvm, spte); |
475 | set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK); | 475 | set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK); |
476 | kvm_flush_remote_tlbs(vcpu->kvm); | 476 | kvm_flush_remote_tlbs(kvm); |
477 | } | 477 | } |
478 | } | 478 | } |
479 | 479 | ||
@@ -670,7 +670,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
670 | hlist_add_head(&page->hash_link, bucket); | 670 | hlist_add_head(&page->hash_link, bucket); |
671 | vcpu->mmu.prefetch_page(vcpu, page); | 671 | vcpu->mmu.prefetch_page(vcpu, page); |
672 | if (!metaphysical) | 672 | if (!metaphysical) |
673 | rmap_write_protect(vcpu, gfn); | 673 | rmap_write_protect(vcpu->kvm, gfn); |
674 | return page; | 674 | return page; |
675 | } | 675 | } |
676 | 676 | ||
@@ -823,19 +823,19 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) | |||
823 | __set_bit(slot, &page_head->slot_bitmap); | 823 | __set_bit(slot, &page_head->slot_bitmap); |
824 | } | 824 | } |
825 | 825 | ||
826 | hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa) | 826 | hpa_t safe_gpa_to_hpa(struct kvm *kvm, gpa_t gpa) |
827 | { | 827 | { |
828 | hpa_t hpa = gpa_to_hpa(vcpu, gpa); | 828 | hpa_t hpa = gpa_to_hpa(kvm, gpa); |
829 | 829 | ||
830 | return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa; | 830 | return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa; |
831 | } | 831 | } |
832 | 832 | ||
833 | hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa) | 833 | hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa) |
834 | { | 834 | { |
835 | struct page *page; | 835 | struct page *page; |
836 | 836 | ||
837 | ASSERT((gpa & HPA_ERR_MASK) == 0); | 837 | ASSERT((gpa & HPA_ERR_MASK) == 0); |
838 | page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); | 838 | page = gfn_to_page(kvm, gpa >> PAGE_SHIFT); |
839 | if (!page) | 839 | if (!page) |
840 | return gpa | HPA_ERR_MASK; | 840 | return gpa | HPA_ERR_MASK; |
841 | return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) | 841 | return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) |
@@ -848,7 +848,7 @@ hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva) | |||
848 | 848 | ||
849 | if (gpa == UNMAPPED_GVA) | 849 | if (gpa == UNMAPPED_GVA) |
850 | return UNMAPPED_GVA; | 850 | return UNMAPPED_GVA; |
851 | return gpa_to_hpa(vcpu, gpa); | 851 | return gpa_to_hpa(vcpu->kvm, gpa); |
852 | } | 852 | } |
853 | 853 | ||
854 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) | 854 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) |
@@ -857,7 +857,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) | |||
857 | 857 | ||
858 | if (gpa == UNMAPPED_GVA) | 858 | if (gpa == UNMAPPED_GVA) |
859 | return NULL; | 859 | return NULL; |
860 | return pfn_to_page(gpa_to_hpa(vcpu, gpa) >> PAGE_SHIFT); | 860 | return pfn_to_page(gpa_to_hpa(vcpu->kvm, gpa) >> PAGE_SHIFT); |
861 | } | 861 | } |
862 | 862 | ||
863 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) | 863 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) |
@@ -1012,7 +1012,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | |||
1012 | ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); | 1012 | ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); |
1013 | 1013 | ||
1014 | 1014 | ||
1015 | paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK); | 1015 | paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK); |
1016 | 1016 | ||
1017 | if (is_error_hpa(paddr)) | 1017 | if (is_error_hpa(paddr)) |
1018 | return 1; | 1018 | return 1; |
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h index 4f6edf85d13..8e1e4ca6ea4 100644 --- a/drivers/kvm/paging_tmpl.h +++ b/drivers/kvm/paging_tmpl.h | |||
@@ -103,7 +103,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker, | |||
103 | pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, | 103 | pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, |
104 | walker->level - 1, table_gfn); | 104 | walker->level - 1, table_gfn); |
105 | slot = gfn_to_memslot(vcpu->kvm, table_gfn); | 105 | slot = gfn_to_memslot(vcpu->kvm, table_gfn); |
106 | hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK); | 106 | hpa = safe_gpa_to_hpa(vcpu->kvm, root & PT64_BASE_ADDR_MASK); |
107 | walker->page = pfn_to_page(hpa >> PAGE_SHIFT); | 107 | walker->page = pfn_to_page(hpa >> PAGE_SHIFT); |
108 | walker->table = kmap_atomic(walker->page, KM_USER0); | 108 | walker->table = kmap_atomic(walker->page, KM_USER0); |
109 | 109 | ||
@@ -159,7 +159,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker, | |||
159 | walker->inherited_ar &= walker->table[index]; | 159 | walker->inherited_ar &= walker->table[index]; |
160 | table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; | 160 | table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; |
161 | kunmap_atomic(walker->table, KM_USER0); | 161 | kunmap_atomic(walker->table, KM_USER0); |
162 | paddr = safe_gpa_to_hpa(vcpu, table_gfn << PAGE_SHIFT); | 162 | paddr = safe_gpa_to_hpa(vcpu->kvm, table_gfn << PAGE_SHIFT); |
163 | walker->page = pfn_to_page(paddr >> PAGE_SHIFT); | 163 | walker->page = pfn_to_page(paddr >> PAGE_SHIFT); |
164 | walker->table = kmap_atomic(walker->page, KM_USER0); | 164 | walker->table = kmap_atomic(walker->page, KM_USER0); |
165 | --walker->level; | 165 | --walker->level; |
@@ -248,7 +248,7 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu, | |||
248 | if (!dirty) | 248 | if (!dirty) |
249 | access_bits &= ~PT_WRITABLE_MASK; | 249 | access_bits &= ~PT_WRITABLE_MASK; |
250 | 250 | ||
251 | paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK); | 251 | paddr = gpa_to_hpa(vcpu->kvm, gaddr & PT64_BASE_ADDR_MASK); |
252 | 252 | ||
253 | spte |= PT_PRESENT_MASK; | 253 | spte |= PT_PRESENT_MASK; |
254 | if (access_bits & PT_USER_MASK) | 254 | if (access_bits & PT_USER_MASK) |