diff options
author | Zhang Xiantao <xiantao.zhang@intel.com> | 2007-12-13 10:50:52 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:58:09 -0500 |
commit | ad312c7c79f781c822e37effe41307503a2bb85b (patch) | |
tree | d979bfb70e76ada58b79b456c61a0507a8f0847d /drivers/kvm/mmu.c | |
parent | 682c59a3f3f211ed555b17144f2d82eb8286a1db (diff) |
KVM: Portability: Introduce kvm_vcpu_arch
Move all the architecture-specific fields in kvm_vcpu into a new struct
kvm_vcpu_arch.
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
Acked-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r-- | drivers/kvm/mmu.c | 142 |
1 files changed, 71 insertions, 71 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 92ac0d1106b4..da1dedb497b8 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes); | |||
180 | 180 | ||
181 | static int is_write_protection(struct kvm_vcpu *vcpu) | 181 | static int is_write_protection(struct kvm_vcpu *vcpu) |
182 | { | 182 | { |
183 | return vcpu->cr0 & X86_CR0_WP; | 183 | return vcpu->arch.cr0 & X86_CR0_WP; |
184 | } | 184 | } |
185 | 185 | ||
186 | static int is_cpuid_PSE36(void) | 186 | static int is_cpuid_PSE36(void) |
@@ -190,7 +190,7 @@ static int is_cpuid_PSE36(void) | |||
190 | 190 | ||
191 | static int is_nx(struct kvm_vcpu *vcpu) | 191 | static int is_nx(struct kvm_vcpu *vcpu) |
192 | { | 192 | { |
193 | return vcpu->shadow_efer & EFER_NX; | 193 | return vcpu->arch.shadow_efer & EFER_NX; |
194 | } | 194 | } |
195 | 195 | ||
196 | static int is_present_pte(unsigned long pte) | 196 | static int is_present_pte(unsigned long pte) |
@@ -292,18 +292,18 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) | |||
292 | int r; | 292 | int r; |
293 | 293 | ||
294 | kvm_mmu_free_some_pages(vcpu); | 294 | kvm_mmu_free_some_pages(vcpu); |
295 | r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache, | 295 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache, |
296 | pte_chain_cache, 4); | 296 | pte_chain_cache, 4); |
297 | if (r) | 297 | if (r) |
298 | goto out; | 298 | goto out; |
299 | r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, | 299 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, |
300 | rmap_desc_cache, 1); | 300 | rmap_desc_cache, 1); |
301 | if (r) | 301 | if (r) |
302 | goto out; | 302 | goto out; |
303 | r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8); | 303 | r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); |
304 | if (r) | 304 | if (r) |
305 | goto out; | 305 | goto out; |
306 | r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache, | 306 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, |
307 | mmu_page_header_cache, 4); | 307 | mmu_page_header_cache, 4); |
308 | out: | 308 | out: |
309 | return r; | 309 | return r; |
@@ -311,10 +311,10 @@ out: | |||
311 | 311 | ||
312 | static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) | 312 | static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
313 | { | 313 | { |
314 | mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); | 314 | mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache); |
315 | mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); | 315 | mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache); |
316 | mmu_free_memory_cache_page(&vcpu->mmu_page_cache); | 316 | mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); |
317 | mmu_free_memory_cache(&vcpu->mmu_page_header_cache); | 317 | mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); |
318 | } | 318 | } |
319 | 319 | ||
320 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, | 320 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, |
@@ -330,7 +330,7 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, | |||
330 | 330 | ||
331 | static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu) | 331 | static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu) |
332 | { | 332 | { |
333 | return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache, | 333 | return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache, |
334 | sizeof(struct kvm_pte_chain)); | 334 | sizeof(struct kvm_pte_chain)); |
335 | } | 335 | } |
336 | 336 | ||
@@ -341,7 +341,7 @@ static void mmu_free_pte_chain(struct kvm_pte_chain *pc) | |||
341 | 341 | ||
342 | static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) | 342 | static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) |
343 | { | 343 | { |
344 | return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache, | 344 | return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache, |
345 | sizeof(struct kvm_rmap_desc)); | 345 | sizeof(struct kvm_rmap_desc)); |
346 | } | 346 | } |
347 | 347 | ||
@@ -568,9 +568,9 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, | |||
568 | if (!vcpu->kvm->n_free_mmu_pages) | 568 | if (!vcpu->kvm->n_free_mmu_pages) |
569 | return NULL; | 569 | return NULL; |
570 | 570 | ||
571 | sp = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache, sizeof *sp); | 571 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp); |
572 | sp->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE); | 572 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); |
573 | sp->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE); | 573 | sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); |
574 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); | 574 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); |
575 | list_add(&sp->link, &vcpu->kvm->active_mmu_pages); | 575 | list_add(&sp->link, &vcpu->kvm->active_mmu_pages); |
576 | ASSERT(is_empty_shadow_page(sp->spt)); | 576 | ASSERT(is_empty_shadow_page(sp->spt)); |
@@ -692,11 +692,11 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
692 | struct hlist_node *node; | 692 | struct hlist_node *node; |
693 | 693 | ||
694 | role.word = 0; | 694 | role.word = 0; |
695 | role.glevels = vcpu->mmu.root_level; | 695 | role.glevels = vcpu->arch.mmu.root_level; |
696 | role.level = level; | 696 | role.level = level; |
697 | role.metaphysical = metaphysical; | 697 | role.metaphysical = metaphysical; |
698 | role.access = access; | 698 | role.access = access; |
699 | if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) { | 699 | if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { |
700 | quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); | 700 | quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); |
701 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; | 701 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; |
702 | role.quadrant = quadrant; | 702 | role.quadrant = quadrant; |
@@ -718,7 +718,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
718 | sp->gfn = gfn; | 718 | sp->gfn = gfn; |
719 | sp->role = role; | 719 | sp->role = role; |
720 | hlist_add_head(&sp->hash_link, bucket); | 720 | hlist_add_head(&sp->hash_link, bucket); |
721 | vcpu->mmu.prefetch_page(vcpu, sp); | 721 | vcpu->arch.mmu.prefetch_page(vcpu, sp); |
722 | if (!metaphysical) | 722 | if (!metaphysical) |
723 | rmap_write_protect(vcpu->kvm, gfn); | 723 | rmap_write_protect(vcpu->kvm, gfn); |
724 | if (new_page) | 724 | if (new_page) |
@@ -768,7 +768,7 @@ static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm) | |||
768 | 768 | ||
769 | for (i = 0; i < KVM_MAX_VCPUS; ++i) | 769 | for (i = 0; i < KVM_MAX_VCPUS; ++i) |
770 | if (kvm->vcpus[i]) | 770 | if (kvm->vcpus[i]) |
771 | kvm->vcpus[i]->last_pte_updated = NULL; | 771 | kvm->vcpus[i]->arch.last_pte_updated = NULL; |
772 | } | 772 | } |
773 | 773 | ||
774 | static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) | 774 | static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
@@ -875,7 +875,7 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) | |||
875 | 875 | ||
876 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) | 876 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) |
877 | { | 877 | { |
878 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); | 878 | gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); |
879 | 879 | ||
880 | if (gpa == UNMAPPED_GVA) | 880 | if (gpa == UNMAPPED_GVA) |
881 | return NULL; | 881 | return NULL; |
@@ -962,7 +962,7 @@ unshadowed: | |||
962 | else | 962 | else |
963 | kvm_release_page_clean(page); | 963 | kvm_release_page_clean(page); |
964 | if (!ptwrite || !*ptwrite) | 964 | if (!ptwrite || !*ptwrite) |
965 | vcpu->last_pte_updated = shadow_pte; | 965 | vcpu->arch.last_pte_updated = shadow_pte; |
966 | } | 966 | } |
967 | 967 | ||
968 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) | 968 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) |
@@ -972,7 +972,7 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) | |||
972 | static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) | 972 | static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) |
973 | { | 973 | { |
974 | int level = PT32E_ROOT_LEVEL; | 974 | int level = PT32E_ROOT_LEVEL; |
975 | hpa_t table_addr = vcpu->mmu.root_hpa; | 975 | hpa_t table_addr = vcpu->arch.mmu.root_hpa; |
976 | int pt_write = 0; | 976 | int pt_write = 0; |
977 | 977 | ||
978 | for (; ; level--) { | 978 | for (; ; level--) { |
@@ -1024,29 +1024,29 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) | |||
1024 | int i; | 1024 | int i; |
1025 | struct kvm_mmu_page *sp; | 1025 | struct kvm_mmu_page *sp; |
1026 | 1026 | ||
1027 | if (!VALID_PAGE(vcpu->mmu.root_hpa)) | 1027 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) |
1028 | return; | 1028 | return; |
1029 | #ifdef CONFIG_X86_64 | 1029 | #ifdef CONFIG_X86_64 |
1030 | if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { | 1030 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { |
1031 | hpa_t root = vcpu->mmu.root_hpa; | 1031 | hpa_t root = vcpu->arch.mmu.root_hpa; |
1032 | 1032 | ||
1033 | sp = page_header(root); | 1033 | sp = page_header(root); |
1034 | --sp->root_count; | 1034 | --sp->root_count; |
1035 | vcpu->mmu.root_hpa = INVALID_PAGE; | 1035 | vcpu->arch.mmu.root_hpa = INVALID_PAGE; |
1036 | return; | 1036 | return; |
1037 | } | 1037 | } |
1038 | #endif | 1038 | #endif |
1039 | for (i = 0; i < 4; ++i) { | 1039 | for (i = 0; i < 4; ++i) { |
1040 | hpa_t root = vcpu->mmu.pae_root[i]; | 1040 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
1041 | 1041 | ||
1042 | if (root) { | 1042 | if (root) { |
1043 | root &= PT64_BASE_ADDR_MASK; | 1043 | root &= PT64_BASE_ADDR_MASK; |
1044 | sp = page_header(root); | 1044 | sp = page_header(root); |
1045 | --sp->root_count; | 1045 | --sp->root_count; |
1046 | } | 1046 | } |
1047 | vcpu->mmu.pae_root[i] = INVALID_PAGE; | 1047 | vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; |
1048 | } | 1048 | } |
1049 | vcpu->mmu.root_hpa = INVALID_PAGE; | 1049 | vcpu->arch.mmu.root_hpa = INVALID_PAGE; |
1050 | } | 1050 | } |
1051 | 1051 | ||
1052 | static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | 1052 | static void mmu_alloc_roots(struct kvm_vcpu *vcpu) |
@@ -1055,41 +1055,41 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
1055 | gfn_t root_gfn; | 1055 | gfn_t root_gfn; |
1056 | struct kvm_mmu_page *sp; | 1056 | struct kvm_mmu_page *sp; |
1057 | 1057 | ||
1058 | root_gfn = vcpu->cr3 >> PAGE_SHIFT; | 1058 | root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; |
1059 | 1059 | ||
1060 | #ifdef CONFIG_X86_64 | 1060 | #ifdef CONFIG_X86_64 |
1061 | if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { | 1061 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { |
1062 | hpa_t root = vcpu->mmu.root_hpa; | 1062 | hpa_t root = vcpu->arch.mmu.root_hpa; |
1063 | 1063 | ||
1064 | ASSERT(!VALID_PAGE(root)); | 1064 | ASSERT(!VALID_PAGE(root)); |
1065 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, | 1065 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, |
1066 | PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL); | 1066 | PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL); |
1067 | root = __pa(sp->spt); | 1067 | root = __pa(sp->spt); |
1068 | ++sp->root_count; | 1068 | ++sp->root_count; |
1069 | vcpu->mmu.root_hpa = root; | 1069 | vcpu->arch.mmu.root_hpa = root; |
1070 | return; | 1070 | return; |
1071 | } | 1071 | } |
1072 | #endif | 1072 | #endif |
1073 | for (i = 0; i < 4; ++i) { | 1073 | for (i = 0; i < 4; ++i) { |
1074 | hpa_t root = vcpu->mmu.pae_root[i]; | 1074 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
1075 | 1075 | ||
1076 | ASSERT(!VALID_PAGE(root)); | 1076 | ASSERT(!VALID_PAGE(root)); |
1077 | if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) { | 1077 | if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { |
1078 | if (!is_present_pte(vcpu->pdptrs[i])) { | 1078 | if (!is_present_pte(vcpu->arch.pdptrs[i])) { |
1079 | vcpu->mmu.pae_root[i] = 0; | 1079 | vcpu->arch.mmu.pae_root[i] = 0; |
1080 | continue; | 1080 | continue; |
1081 | } | 1081 | } |
1082 | root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT; | 1082 | root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT; |
1083 | } else if (vcpu->mmu.root_level == 0) | 1083 | } else if (vcpu->arch.mmu.root_level == 0) |
1084 | root_gfn = 0; | 1084 | root_gfn = 0; |
1085 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, | 1085 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
1086 | PT32_ROOT_LEVEL, !is_paging(vcpu), | 1086 | PT32_ROOT_LEVEL, !is_paging(vcpu), |
1087 | ACC_ALL, NULL, NULL); | 1087 | ACC_ALL, NULL, NULL); |
1088 | root = __pa(sp->spt); | 1088 | root = __pa(sp->spt); |
1089 | ++sp->root_count; | 1089 | ++sp->root_count; |
1090 | vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; | 1090 | vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; |
1091 | } | 1091 | } |
1092 | vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root); | 1092 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) | 1095 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) |
@@ -1109,7 +1109,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | |||
1109 | return r; | 1109 | return r; |
1110 | 1110 | ||
1111 | ASSERT(vcpu); | 1111 | ASSERT(vcpu); |
1112 | ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); | 1112 | ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
1113 | 1113 | ||
1114 | gfn = gva >> PAGE_SHIFT; | 1114 | gfn = gva >> PAGE_SHIFT; |
1115 | 1115 | ||
@@ -1124,7 +1124,7 @@ static void nonpaging_free(struct kvm_vcpu *vcpu) | |||
1124 | 1124 | ||
1125 | static int nonpaging_init_context(struct kvm_vcpu *vcpu) | 1125 | static int nonpaging_init_context(struct kvm_vcpu *vcpu) |
1126 | { | 1126 | { |
1127 | struct kvm_mmu *context = &vcpu->mmu; | 1127 | struct kvm_mmu *context = &vcpu->arch.mmu; |
1128 | 1128 | ||
1129 | context->new_cr3 = nonpaging_new_cr3; | 1129 | context->new_cr3 = nonpaging_new_cr3; |
1130 | context->page_fault = nonpaging_page_fault; | 1130 | context->page_fault = nonpaging_page_fault; |
@@ -1171,7 +1171,7 @@ static void paging_free(struct kvm_vcpu *vcpu) | |||
1171 | 1171 | ||
1172 | static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level) | 1172 | static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level) |
1173 | { | 1173 | { |
1174 | struct kvm_mmu *context = &vcpu->mmu; | 1174 | struct kvm_mmu *context = &vcpu->arch.mmu; |
1175 | 1175 | ||
1176 | ASSERT(is_pae(vcpu)); | 1176 | ASSERT(is_pae(vcpu)); |
1177 | context->new_cr3 = paging_new_cr3; | 1177 | context->new_cr3 = paging_new_cr3; |
@@ -1192,7 +1192,7 @@ static int paging64_init_context(struct kvm_vcpu *vcpu) | |||
1192 | 1192 | ||
1193 | static int paging32_init_context(struct kvm_vcpu *vcpu) | 1193 | static int paging32_init_context(struct kvm_vcpu *vcpu) |
1194 | { | 1194 | { |
1195 | struct kvm_mmu *context = &vcpu->mmu; | 1195 | struct kvm_mmu *context = &vcpu->arch.mmu; |
1196 | 1196 | ||
1197 | context->new_cr3 = paging_new_cr3; | 1197 | context->new_cr3 = paging_new_cr3; |
1198 | context->page_fault = paging32_page_fault; | 1198 | context->page_fault = paging32_page_fault; |
@@ -1213,7 +1213,7 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu) | |||
1213 | static int init_kvm_mmu(struct kvm_vcpu *vcpu) | 1213 | static int init_kvm_mmu(struct kvm_vcpu *vcpu) |
1214 | { | 1214 | { |
1215 | ASSERT(vcpu); | 1215 | ASSERT(vcpu); |
1216 | ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); | 1216 | ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
1217 | 1217 | ||
1218 | if (!is_paging(vcpu)) | 1218 | if (!is_paging(vcpu)) |
1219 | return nonpaging_init_context(vcpu); | 1219 | return nonpaging_init_context(vcpu); |
@@ -1228,9 +1228,9 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu) | |||
1228 | static void destroy_kvm_mmu(struct kvm_vcpu *vcpu) | 1228 | static void destroy_kvm_mmu(struct kvm_vcpu *vcpu) |
1229 | { | 1229 | { |
1230 | ASSERT(vcpu); | 1230 | ASSERT(vcpu); |
1231 | if (VALID_PAGE(vcpu->mmu.root_hpa)) { | 1231 | if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) { |
1232 | vcpu->mmu.free(vcpu); | 1232 | vcpu->arch.mmu.free(vcpu); |
1233 | vcpu->mmu.root_hpa = INVALID_PAGE; | 1233 | vcpu->arch.mmu.root_hpa = INVALID_PAGE; |
1234 | } | 1234 | } |
1235 | } | 1235 | } |
1236 | 1236 | ||
@@ -1250,7 +1250,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) | |||
1250 | if (r) | 1250 | if (r) |
1251 | goto out; | 1251 | goto out; |
1252 | mmu_alloc_roots(vcpu); | 1252 | mmu_alloc_roots(vcpu); |
1253 | kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa); | 1253 | kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa); |
1254 | kvm_mmu_flush_tlb(vcpu); | 1254 | kvm_mmu_flush_tlb(vcpu); |
1255 | out: | 1255 | out: |
1256 | mutex_unlock(&vcpu->kvm->lock); | 1256 | mutex_unlock(&vcpu->kvm->lock); |
@@ -1323,7 +1323,7 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new) | |||
1323 | 1323 | ||
1324 | static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) | 1324 | static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) |
1325 | { | 1325 | { |
1326 | u64 *spte = vcpu->last_pte_updated; | 1326 | u64 *spte = vcpu->arch.last_pte_updated; |
1327 | 1327 | ||
1328 | return !!(spte && (*spte & PT_ACCESSED_MASK)); | 1328 | return !!(spte && (*spte & PT_ACCESSED_MASK)); |
1329 | } | 1329 | } |
@@ -1350,15 +1350,15 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1350 | pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); | 1350 | pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); |
1351 | ++vcpu->kvm->stat.mmu_pte_write; | 1351 | ++vcpu->kvm->stat.mmu_pte_write; |
1352 | kvm_mmu_audit(vcpu, "pre pte write"); | 1352 | kvm_mmu_audit(vcpu, "pre pte write"); |
1353 | if (gfn == vcpu->last_pt_write_gfn | 1353 | if (gfn == vcpu->arch.last_pt_write_gfn |
1354 | && !last_updated_pte_accessed(vcpu)) { | 1354 | && !last_updated_pte_accessed(vcpu)) { |
1355 | ++vcpu->last_pt_write_count; | 1355 | ++vcpu->arch.last_pt_write_count; |
1356 | if (vcpu->last_pt_write_count >= 3) | 1356 | if (vcpu->arch.last_pt_write_count >= 3) |
1357 | flooded = 1; | 1357 | flooded = 1; |
1358 | } else { | 1358 | } else { |
1359 | vcpu->last_pt_write_gfn = gfn; | 1359 | vcpu->arch.last_pt_write_gfn = gfn; |
1360 | vcpu->last_pt_write_count = 1; | 1360 | vcpu->arch.last_pt_write_count = 1; |
1361 | vcpu->last_pte_updated = NULL; | 1361 | vcpu->arch.last_pte_updated = NULL; |
1362 | } | 1362 | } |
1363 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | 1363 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; |
1364 | bucket = &vcpu->kvm->mmu_page_hash[index]; | 1364 | bucket = &vcpu->kvm->mmu_page_hash[index]; |
@@ -1420,7 +1420,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1420 | 1420 | ||
1421 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) | 1421 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) |
1422 | { | 1422 | { |
1423 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); | 1423 | gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); |
1424 | 1424 | ||
1425 | return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); | 1425 | return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
1426 | } | 1426 | } |
@@ -1443,7 +1443,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) | |||
1443 | enum emulation_result er; | 1443 | enum emulation_result er; |
1444 | 1444 | ||
1445 | mutex_lock(&vcpu->kvm->lock); | 1445 | mutex_lock(&vcpu->kvm->lock); |
1446 | r = vcpu->mmu.page_fault(vcpu, cr2, error_code); | 1446 | r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code); |
1447 | if (r < 0) | 1447 | if (r < 0) |
1448 | goto out; | 1448 | goto out; |
1449 | 1449 | ||
@@ -1486,7 +1486,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu) | |||
1486 | struct kvm_mmu_page, link); | 1486 | struct kvm_mmu_page, link); |
1487 | kvm_mmu_zap_page(vcpu->kvm, sp); | 1487 | kvm_mmu_zap_page(vcpu->kvm, sp); |
1488 | } | 1488 | } |
1489 | free_page((unsigned long)vcpu->mmu.pae_root); | 1489 | free_page((unsigned long)vcpu->arch.mmu.pae_root); |
1490 | } | 1490 | } |
1491 | 1491 | ||
1492 | static int alloc_mmu_pages(struct kvm_vcpu *vcpu) | 1492 | static int alloc_mmu_pages(struct kvm_vcpu *vcpu) |
@@ -1508,9 +1508,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu) | |||
1508 | page = alloc_page(GFP_KERNEL | __GFP_DMA32); | 1508 | page = alloc_page(GFP_KERNEL | __GFP_DMA32); |
1509 | if (!page) | 1509 | if (!page) |
1510 | goto error_1; | 1510 | goto error_1; |
1511 | vcpu->mmu.pae_root = page_address(page); | 1511 | vcpu->arch.mmu.pae_root = page_address(page); |
1512 | for (i = 0; i < 4; ++i) | 1512 | for (i = 0; i < 4; ++i) |
1513 | vcpu->mmu.pae_root[i] = INVALID_PAGE; | 1513 | vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; |
1514 | 1514 | ||
1515 | return 0; | 1515 | return 0; |
1516 | 1516 | ||
@@ -1522,7 +1522,7 @@ error_1: | |||
1522 | int kvm_mmu_create(struct kvm_vcpu *vcpu) | 1522 | int kvm_mmu_create(struct kvm_vcpu *vcpu) |
1523 | { | 1523 | { |
1524 | ASSERT(vcpu); | 1524 | ASSERT(vcpu); |
1525 | ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); | 1525 | ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
1526 | 1526 | ||
1527 | return alloc_mmu_pages(vcpu); | 1527 | return alloc_mmu_pages(vcpu); |
1528 | } | 1528 | } |
@@ -1530,7 +1530,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) | |||
1530 | int kvm_mmu_setup(struct kvm_vcpu *vcpu) | 1530 | int kvm_mmu_setup(struct kvm_vcpu *vcpu) |
1531 | { | 1531 | { |
1532 | ASSERT(vcpu); | 1532 | ASSERT(vcpu); |
1533 | ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); | 1533 | ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
1534 | 1534 | ||
1535 | return init_kvm_mmu(vcpu); | 1535 | return init_kvm_mmu(vcpu); |
1536 | } | 1536 | } |
@@ -1659,11 +1659,11 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte, | |||
1659 | printk(KERN_ERR "audit: (%s) nontrapping pte" | 1659 | printk(KERN_ERR "audit: (%s) nontrapping pte" |
1660 | " in nonleaf level: levels %d gva %lx" | 1660 | " in nonleaf level: levels %d gva %lx" |
1661 | " level %d pte %llx\n", audit_msg, | 1661 | " level %d pte %llx\n", audit_msg, |
1662 | vcpu->mmu.root_level, va, level, ent); | 1662 | vcpu->arch.mmu.root_level, va, level, ent); |
1663 | 1663 | ||
1664 | audit_mappings_page(vcpu, ent, va, level - 1); | 1664 | audit_mappings_page(vcpu, ent, va, level - 1); |
1665 | } else { | 1665 | } else { |
1666 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va); | 1666 | gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va); |
1667 | struct page *page = gpa_to_page(vcpu, gpa); | 1667 | struct page *page = gpa_to_page(vcpu, gpa); |
1668 | hpa_t hpa = page_to_phys(page); | 1668 | hpa_t hpa = page_to_phys(page); |
1669 | 1669 | ||
@@ -1671,7 +1671,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte, | |||
1671 | && (ent & PT64_BASE_ADDR_MASK) != hpa) | 1671 | && (ent & PT64_BASE_ADDR_MASK) != hpa) |
1672 | printk(KERN_ERR "xx audit error: (%s) levels %d" | 1672 | printk(KERN_ERR "xx audit error: (%s) levels %d" |
1673 | " gva %lx gpa %llx hpa %llx ent %llx %d\n", | 1673 | " gva %lx gpa %llx hpa %llx ent %llx %d\n", |
1674 | audit_msg, vcpu->mmu.root_level, | 1674 | audit_msg, vcpu->arch.mmu.root_level, |
1675 | va, gpa, hpa, ent, | 1675 | va, gpa, hpa, ent, |
1676 | is_shadow_present_pte(ent)); | 1676 | is_shadow_present_pte(ent)); |
1677 | else if (ent == shadow_notrap_nonpresent_pte | 1677 | else if (ent == shadow_notrap_nonpresent_pte |
@@ -1688,13 +1688,13 @@ static void audit_mappings(struct kvm_vcpu *vcpu) | |||
1688 | { | 1688 | { |
1689 | unsigned i; | 1689 | unsigned i; |
1690 | 1690 | ||
1691 | if (vcpu->mmu.root_level == 4) | 1691 | if (vcpu->arch.mmu.root_level == 4) |
1692 | audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4); | 1692 | audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4); |
1693 | else | 1693 | else |
1694 | for (i = 0; i < 4; ++i) | 1694 | for (i = 0; i < 4; ++i) |
1695 | if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK) | 1695 | if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK) |
1696 | audit_mappings_page(vcpu, | 1696 | audit_mappings_page(vcpu, |
1697 | vcpu->mmu.pae_root[i], | 1697 | vcpu->arch.mmu.pae_root[i], |
1698 | i << 30, | 1698 | i << 30, |
1699 | 2); | 1699 | 2); |
1700 | } | 1700 | } |