aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-09-10 11:30:44 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:32 -0400
commit52fde8df7dd13d90f5f8dc43157418bff968d90a (patch)
treeb02791abecb83a1b9772b95c333581087ab622f5 /arch/x86
parentcb659db8a7d1ed558898f533a957dfc342f9499d (diff)
KVM: MMU: Introduce kvm_init_shadow_mmu helper function
Some logic of the init_kvm_softmmu function is required to build the Nested Nested Paging context. So factor the required logic into a seperate function and export it. Also make the whole init path suitable for more than one mmu context. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c60
-rw-r--r--arch/x86/kvm/mmu.h1
2 files changed, 36 insertions, 25 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a751dfc8526d..9e48a774fceb 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2532,10 +2532,9 @@ static void nonpaging_free(struct kvm_vcpu *vcpu)
2532 mmu_free_roots(vcpu); 2532 mmu_free_roots(vcpu);
2533} 2533}
2534 2534
2535static int nonpaging_init_context(struct kvm_vcpu *vcpu) 2535static int nonpaging_init_context(struct kvm_vcpu *vcpu,
2536 struct kvm_mmu *context)
2536{ 2537{
2537 struct kvm_mmu *context = &vcpu->arch.mmu;
2538
2539 context->new_cr3 = nonpaging_new_cr3; 2538 context->new_cr3 = nonpaging_new_cr3;
2540 context->page_fault = nonpaging_page_fault; 2539 context->page_fault = nonpaging_page_fault;
2541 context->gva_to_gpa = nonpaging_gva_to_gpa; 2540 context->gva_to_gpa = nonpaging_gva_to_gpa;
@@ -2595,9 +2594,10 @@ static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
2595#include "paging_tmpl.h" 2594#include "paging_tmpl.h"
2596#undef PTTYPE 2595#undef PTTYPE
2597 2596
2598static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level) 2597static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
2598 struct kvm_mmu *context,
2599 int level)
2599{ 2600{
2600 struct kvm_mmu *context = &vcpu->arch.mmu;
2601 int maxphyaddr = cpuid_maxphyaddr(vcpu); 2601 int maxphyaddr = cpuid_maxphyaddr(vcpu);
2602 u64 exb_bit_rsvd = 0; 2602 u64 exb_bit_rsvd = 0;
2603 2603
@@ -2656,9 +2656,11 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
2656 } 2656 }
2657} 2657}
2658 2658
2659static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level) 2659static int paging64_init_context_common(struct kvm_vcpu *vcpu,
2660 struct kvm_mmu *context,
2661 int level)
2660{ 2662{
2661 struct kvm_mmu *context = &vcpu->arch.mmu; 2663 reset_rsvds_bits_mask(vcpu, context, level);
2662 2664
2663 ASSERT(is_pae(vcpu)); 2665 ASSERT(is_pae(vcpu));
2664 context->new_cr3 = paging_new_cr3; 2666 context->new_cr3 = paging_new_cr3;
@@ -2675,17 +2677,17 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2675 return 0; 2677 return 0;
2676} 2678}
2677 2679
2678static int paging64_init_context(struct kvm_vcpu *vcpu) 2680static int paging64_init_context(struct kvm_vcpu *vcpu,
2681 struct kvm_mmu *context)
2679{ 2682{
2680 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL); 2683 return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
2681 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
2682} 2684}
2683 2685
2684static int paging32_init_context(struct kvm_vcpu *vcpu) 2686static int paging32_init_context(struct kvm_vcpu *vcpu,
2687 struct kvm_mmu *context)
2685{ 2688{
2686 struct kvm_mmu *context = &vcpu->arch.mmu; 2689 reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
2687 2690
2688 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2689 context->new_cr3 = paging_new_cr3; 2691 context->new_cr3 = paging_new_cr3;
2690 context->page_fault = paging32_page_fault; 2692 context->page_fault = paging32_page_fault;
2691 context->gva_to_gpa = paging32_gva_to_gpa; 2693 context->gva_to_gpa = paging32_gva_to_gpa;
@@ -2700,10 +2702,10 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
2700 return 0; 2702 return 0;
2701} 2703}
2702 2704
2703static int paging32E_init_context(struct kvm_vcpu *vcpu) 2705static int paging32E_init_context(struct kvm_vcpu *vcpu,
2706 struct kvm_mmu *context)
2704{ 2707{
2705 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL); 2708 return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
2706 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
2707} 2709}
2708 2710
2709static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) 2711static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
@@ -2727,15 +2729,15 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2727 context->gva_to_gpa = nonpaging_gva_to_gpa; 2729 context->gva_to_gpa = nonpaging_gva_to_gpa;
2728 context->root_level = 0; 2730 context->root_level = 0;
2729 } else if (is_long_mode(vcpu)) { 2731 } else if (is_long_mode(vcpu)) {
2730 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL); 2732 reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL);
2731 context->gva_to_gpa = paging64_gva_to_gpa; 2733 context->gva_to_gpa = paging64_gva_to_gpa;
2732 context->root_level = PT64_ROOT_LEVEL; 2734 context->root_level = PT64_ROOT_LEVEL;
2733 } else if (is_pae(vcpu)) { 2735 } else if (is_pae(vcpu)) {
2734 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL); 2736 reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL);
2735 context->gva_to_gpa = paging64_gva_to_gpa; 2737 context->gva_to_gpa = paging64_gva_to_gpa;
2736 context->root_level = PT32E_ROOT_LEVEL; 2738 context->root_level = PT32E_ROOT_LEVEL;
2737 } else { 2739 } else {
2738 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL); 2740 reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
2739 context->gva_to_gpa = paging32_gva_to_gpa; 2741 context->gva_to_gpa = paging32_gva_to_gpa;
2740 context->root_level = PT32_ROOT_LEVEL; 2742 context->root_level = PT32_ROOT_LEVEL;
2741 } 2743 }
@@ -2743,24 +2745,32 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2743 return 0; 2745 return 0;
2744} 2746}
2745 2747
2746static int init_kvm_softmmu(struct kvm_vcpu *vcpu) 2748int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
2747{ 2749{
2748 int r; 2750 int r;
2749
2750 ASSERT(vcpu); 2751 ASSERT(vcpu);
2751 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); 2752 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2752 2753
2753 if (!is_paging(vcpu)) 2754 if (!is_paging(vcpu))
2754 r = nonpaging_init_context(vcpu); 2755 r = nonpaging_init_context(vcpu, context);
2755 else if (is_long_mode(vcpu)) 2756 else if (is_long_mode(vcpu))
2756 r = paging64_init_context(vcpu); 2757 r = paging64_init_context(vcpu, context);
2757 else if (is_pae(vcpu)) 2758 else if (is_pae(vcpu))
2758 r = paging32E_init_context(vcpu); 2759 r = paging32E_init_context(vcpu, context);
2759 else 2760 else
2760 r = paging32_init_context(vcpu); 2761 r = paging32_init_context(vcpu, context);
2761 2762
2762 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); 2763 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
2763 vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); 2764 vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
2765
2766 return r;
2767}
2768EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
2769
2770static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2771{
2772 int r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
2773
2764 vcpu->arch.mmu.set_cr3 = kvm_x86_ops->set_cr3; 2774 vcpu->arch.mmu.set_cr3 = kvm_x86_ops->set_cr3;
2765 vcpu->arch.mmu.get_cr3 = get_cr3; 2775 vcpu->arch.mmu.get_cr3 = get_cr3;
2766 vcpu->arch.mmu.inject_page_fault = kvm_inject_page_fault; 2776 vcpu->arch.mmu.inject_page_fault = kvm_inject_page_fault;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index f05a03dfba4e..7086ca85d3e7 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -49,6 +49,7 @@
49#define PFERR_FETCH_MASK (1U << 4) 49#define PFERR_FETCH_MASK (1U << 4)
50 50
51int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); 51int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
52int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
52 53
53static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 54static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
54{ 55{