diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2010-09-10 11:30:54 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:52:39 -0400 |
commit | 02f59dc9f1f51d2148d87d48f84adb455a4fd697 (patch) | |
tree | f1947e95af0dd4e2211c1b152e48f9784a749ebb /arch/x86/kvm/mmu.c | |
parent | 3d06b8bfd44ec421c386241f7c5af66c8200cbf4 (diff) |
KVM: MMU: Introduce init_kvm_nested_mmu()
This patch introduces the init_kvm_nested_mmu() function
which is used to re-initialize the nested mmu when the l2
guest changes its paging mode.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 37 |
1 files changed, 36 insertions, 1 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1e215e8b9377..a26f13bd34e0 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2784,11 +2784,46 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu) | |||
2784 | return r; | 2784 | return r; |
2785 | } | 2785 | } |
2786 | 2786 | ||
2787 | static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu) | ||
2788 | { | ||
2789 | struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; | ||
2790 | |||
2791 | g_context->get_cr3 = get_cr3; | ||
2792 | g_context->inject_page_fault = kvm_inject_page_fault; | ||
2793 | |||
2794 | /* | ||
2795 | * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The | ||
2796 | * translation of l2_gpa to l1_gpa addresses is done using the | ||
2797 | * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa | ||
2798 | * functions between mmu and nested_mmu are swapped. | ||
2799 | */ | ||
2800 | if (!is_paging(vcpu)) { | ||
2801 | g_context->root_level = 0; | ||
2802 | g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; | ||
2803 | } else if (is_long_mode(vcpu)) { | ||
2804 | reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL); | ||
2805 | g_context->root_level = PT64_ROOT_LEVEL; | ||
2806 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; | ||
2807 | } else if (is_pae(vcpu)) { | ||
2808 | reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL); | ||
2809 | g_context->root_level = PT32E_ROOT_LEVEL; | ||
2810 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; | ||
2811 | } else { | ||
2812 | reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL); | ||
2813 | g_context->root_level = PT32_ROOT_LEVEL; | ||
2814 | g_context->gva_to_gpa = paging32_gva_to_gpa_nested; | ||
2815 | } | ||
2816 | |||
2817 | return 0; | ||
2818 | } | ||
2819 | |||
2787 | static int init_kvm_mmu(struct kvm_vcpu *vcpu) | 2820 | static int init_kvm_mmu(struct kvm_vcpu *vcpu) |
2788 | { | 2821 | { |
2789 | vcpu->arch.update_pte.pfn = bad_pfn; | 2822 | vcpu->arch.update_pte.pfn = bad_pfn; |
2790 | 2823 | ||
2791 | if (tdp_enabled) | 2824 | if (mmu_is_nested(vcpu)) |
2825 | return init_kvm_nested_mmu(vcpu); | ||
2826 | else if (tdp_enabled) | ||
2792 | return init_kvm_tdp_mmu(vcpu); | 2827 | return init_kvm_tdp_mmu(vcpu); |
2793 | else | 2828 | else |
2794 | return init_kvm_softmmu(vcpu); | 2829 | return init_kvm_softmmu(vcpu); |