aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/kvm_host.h
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2011-11-28 07:42:16 -0500
committerAvi Kivity <avi@redhat.com>2011-12-27 04:22:09 -0500
commite459e3228dc57f7160e564ce0f09edb5bee656d3 (patch)
treeaa3b61d28e69a094f0c7906de40c5fcdca38b3d6 /arch/x86/include/asm/kvm_host.h
parent9edb17d55f3ea4943f9654f2aad7a99b4c55840a (diff)
KVM: MMU: move the relevant mmu code to mmu.c
Move the mmu code in kvm_arch_vcpu_init() to kvm_mmu_create() Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/include/asm/kvm_host.h')
-rw-r--r--arch/x86/include/asm/kvm_host.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 1769f3dde611..020413afb285 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -752,6 +752,7 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
752int kvm_mmu_load(struct kvm_vcpu *vcpu); 752int kvm_mmu_load(struct kvm_vcpu *vcpu);
753void kvm_mmu_unload(struct kvm_vcpu *vcpu); 753void kvm_mmu_unload(struct kvm_vcpu *vcpu);
754void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 754void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
755gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
755gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 756gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
756 struct x86_exception *exception); 757 struct x86_exception *exception);
757gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 758gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
@@ -773,6 +774,11 @@ void kvm_disable_tdp(void);
773int complete_pio(struct kvm_vcpu *vcpu); 774int complete_pio(struct kvm_vcpu *vcpu);
774bool kvm_check_iopl(struct kvm_vcpu *vcpu); 775bool kvm_check_iopl(struct kvm_vcpu *vcpu);
775 776
777static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
778{
779 return gpa;
780}
781
776static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 782static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
777{ 783{
778 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 784 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);