diff options
author | Avi Kivity <avi@qumranet.com> | 2008-08-22 12:24:38 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-10-15 04:15:23 -0400 |
commit | 3d000db5688c8beff6319fb9ff4b98dcac32f798 (patch) | |
tree | 75e8464d300c232efa1793477c555e1a5b7e0a80 /arch | |
parent | 6c41f428b72afe5a581b967590c12538db31d399 (diff) |
KVM: MMU: Add generic shadow walker
We currently walk the shadow page tables in two places: direct map (for
real mode and two dimensional paging) and paging mode shadow. Since we
anticipate requiring a third walk (for invlpg), it makes sense to have
a generic facility for shadow walk.
This patch adds such a shadow walker, walks the page tables and calls a
method for every spte encountered. The method can examine the spte,
modify it, or even instantiate it. The walk can be aborted by returning
nonzero from the method.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/mmu.c | 34 |
1 files changed, 34 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 72f739aa8623..8b95cf748b53 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -142,6 +142,11 @@ struct kvm_rmap_desc { | |||
142 | struct kvm_rmap_desc *more; | 142 | struct kvm_rmap_desc *more; |
143 | }; | 143 | }; |
144 | 144 | ||
145 | struct kvm_shadow_walk { | ||
146 | int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu, | ||
147 | gva_t addr, u64 *spte, int level); | ||
148 | }; | ||
149 | |||
145 | static struct kmem_cache *pte_chain_cache; | 150 | static struct kmem_cache *pte_chain_cache; |
146 | static struct kmem_cache *rmap_desc_cache; | 151 | static struct kmem_cache *rmap_desc_cache; |
147 | static struct kmem_cache *mmu_page_header_cache; | 152 | static struct kmem_cache *mmu_page_header_cache; |
@@ -935,6 +940,35 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
935 | return sp; | 940 | return sp; |
936 | } | 941 | } |
937 | 942 | ||
943 | static int walk_shadow(struct kvm_shadow_walk *walker, | ||
944 | struct kvm_vcpu *vcpu, gva_t addr) | ||
945 | { | ||
946 | hpa_t shadow_addr; | ||
947 | int level; | ||
948 | int r; | ||
949 | u64 *sptep; | ||
950 | unsigned index; | ||
951 | |||
952 | shadow_addr = vcpu->arch.mmu.root_hpa; | ||
953 | level = vcpu->arch.mmu.shadow_root_level; | ||
954 | if (level == PT32E_ROOT_LEVEL) { | ||
955 | shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; | ||
956 | shadow_addr &= PT64_BASE_ADDR_MASK; | ||
957 | --level; | ||
958 | } | ||
959 | |||
960 | while (level >= PT_PAGE_TABLE_LEVEL) { | ||
961 | index = SHADOW_PT_INDEX(addr, level); | ||
962 | sptep = ((u64 *)__va(shadow_addr)) + index; | ||
963 | r = walker->entry(walker, vcpu, addr, sptep, level); | ||
964 | if (r) | ||
965 | return r; | ||
966 | shadow_addr = *sptep & PT64_BASE_ADDR_MASK; | ||
967 | --level; | ||
968 | } | ||
969 | return 0; | ||
970 | } | ||
971 | |||
938 | static void kvm_mmu_page_unlink_children(struct kvm *kvm, | 972 | static void kvm_mmu_page_unlink_children(struct kvm *kvm, |
939 | struct kvm_mmu_page *sp) | 973 | struct kvm_mmu_page *sp) |
940 | { | 974 | { |