aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-09-10 11:30:42 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:31 -0400
commit5777ed340d89cdc6c76a5c552337a3861b40a806 (patch)
tree7f3c3a8da975dadef7e00fc92c39bd3ab5862a60
parent1c97f0a04c74196880f22a563134c8f6d0b9d752 (diff)
KVM: MMU: Introduce get_cr3 function pointer
This function pointer in the MMU context is required to implement Nested Nested Paging. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/mmu.c9
-rw-r--r--arch/x86/kvm/paging_tmpl.h4
3 files changed, 11 insertions, 3 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 81a51473f745..6c97b8debfa8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -237,6 +237,7 @@ struct kvm_pio_request {
237struct kvm_mmu { 237struct kvm_mmu {
238 void (*new_cr3)(struct kvm_vcpu *vcpu); 238 void (*new_cr3)(struct kvm_vcpu *vcpu);
239 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); 239 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
240 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
240 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 241 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
241 void (*free)(struct kvm_vcpu *vcpu); 242 void (*free)(struct kvm_vcpu *vcpu);
242 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 243 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a55f8d5a7985..e4a7de4c8c77 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2365,7 +2365,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2365 int direct = 0; 2365 int direct = 0;
2366 u64 pdptr; 2366 u64 pdptr;
2367 2367
2368 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; 2368 root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
2369 2369
2370 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { 2370 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2371 hpa_t root = vcpu->arch.mmu.root_hpa; 2371 hpa_t root = vcpu->arch.mmu.root_hpa;
@@ -2562,6 +2562,11 @@ static void paging_new_cr3(struct kvm_vcpu *vcpu)
2562 mmu_free_roots(vcpu); 2562 mmu_free_roots(vcpu);
2563} 2563}
2564 2564
2565static unsigned long get_cr3(struct kvm_vcpu *vcpu)
2566{
2567 return vcpu->arch.cr3;
2568}
2569
2565static void inject_page_fault(struct kvm_vcpu *vcpu, 2570static void inject_page_fault(struct kvm_vcpu *vcpu,
2566 u64 addr, 2571 u64 addr,
2567 u32 err_code) 2572 u32 err_code)
@@ -2715,6 +2720,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2715 context->root_hpa = INVALID_PAGE; 2720 context->root_hpa = INVALID_PAGE;
2716 context->direct_map = true; 2721 context->direct_map = true;
2717 context->set_cr3 = kvm_x86_ops->set_tdp_cr3; 2722 context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
2723 context->get_cr3 = get_cr3;
2718 2724
2719 if (!is_paging(vcpu)) { 2725 if (!is_paging(vcpu)) {
2720 context->gva_to_gpa = nonpaging_gva_to_gpa; 2726 context->gva_to_gpa = nonpaging_gva_to_gpa;
@@ -2755,6 +2761,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2755 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); 2761 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
2756 vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); 2762 vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
2757 vcpu->arch.mmu.set_cr3 = kvm_x86_ops->set_cr3; 2763 vcpu->arch.mmu.set_cr3 = kvm_x86_ops->set_cr3;
2764 vcpu->arch.mmu.get_cr3 = get_cr3;
2758 2765
2759 return r; 2766 return r;
2760} 2767}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index e4ad3dc84df3..13d0c06b1bc8 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -130,7 +130,7 @@ walk:
130 present = true; 130 present = true;
131 eperm = rsvd_fault = false; 131 eperm = rsvd_fault = false;
132 walker->level = vcpu->arch.mmu.root_level; 132 walker->level = vcpu->arch.mmu.root_level;
133 pte = vcpu->arch.cr3; 133 pte = vcpu->arch.mmu.get_cr3(vcpu);
134#if PTTYPE == 64 134#if PTTYPE == 64
135 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { 135 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
136 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3); 136 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3);
@@ -143,7 +143,7 @@ walk:
143 } 143 }
144#endif 144#endif
145 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || 145 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
146 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0); 146 (vcpu->arch.mmu.get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
147 147
148 pt_access = ACC_ALL; 148 pt_access = ACC_ALL;
149 149