aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-09-10 11:30:40 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:29 -0400
commitf43addd46168110d572dcf69100cb215a4e9fd08 (patch)
treeb17a5d13f1d765cc6ec7f53ffbf29a22033f45e3 /arch
parentc5a78f2b649ae75ae788e7622ca5a586af2cb35a (diff)
KVM: MMU: Make set_cr3 a function pointer in kvm_mmu
This is necessary to implement Nested Nested Paging. As a side effect this allows some cleanups in the SVM nested paging code. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/mmu.c6
2 files changed, 5 insertions, 2 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 80ef28bddcc3..53cedede88fa 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -236,6 +236,7 @@ struct kvm_pio_request {
236 */ 236 */
237struct kvm_mmu { 237struct kvm_mmu {
238 void (*new_cr3)(struct kvm_vcpu *vcpu); 238 void (*new_cr3)(struct kvm_vcpu *vcpu);
239 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
239 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 240 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
240 void (*free)(struct kvm_vcpu *vcpu); 241 void (*free)(struct kvm_vcpu *vcpu);
241 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 242 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5c28e979d730..c8acb9609ca4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2714,6 +2714,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2714 context->shadow_root_level = kvm_x86_ops->get_tdp_level(); 2714 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2715 context->root_hpa = INVALID_PAGE; 2715 context->root_hpa = INVALID_PAGE;
2716 context->direct_map = true; 2716 context->direct_map = true;
2717 context->set_cr3 = kvm_x86_ops->set_cr3;
2717 2718
2718 if (!is_paging(vcpu)) { 2719 if (!is_paging(vcpu)) {
2719 context->gva_to_gpa = nonpaging_gva_to_gpa; 2720 context->gva_to_gpa = nonpaging_gva_to_gpa;
@@ -2752,7 +2753,8 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2752 r = paging32_init_context(vcpu); 2753 r = paging32_init_context(vcpu);
2753 2754
2754 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); 2755 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
2755 vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); 2756 vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
2757 vcpu->arch.mmu.set_cr3 = kvm_x86_ops->set_cr3;
2756 2758
2757 return r; 2759 return r;
2758} 2760}
@@ -2796,7 +2798,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
2796 if (r) 2798 if (r)
2797 goto out; 2799 goto out;
2798 /* set_cr3() should ensure TLB has been flushed */ 2800 /* set_cr3() should ensure TLB has been flushed */
2799 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa); 2801 vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2800out: 2802out:
2801 return r; 2803 return r;
2802} 2804}