diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2010-09-10 11:30:47 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:52:33 -0400 |
commit | 1e301feb079e8ee6091bb75283e960fc33059a68 (patch) | |
tree | 458abbfb4f1b3c86a3c137f711c5fce93c5d7002 | |
parent | 8df25a328a6ca3bd0f048278f4d5ae0a1f6fadc1 (diff) |
KVM: MMU: Introduce generic walk_addr function
This is the first patch in the series towards a generic
walk_addr implementation which could walk two-dimensional
page tables in the end. In this first step the walk_addr
function is renamed into walk_addr_generic which takes a
mmu context as an additional parameter.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 26 |
1 files changed, 18 insertions, 8 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index d07f48a06f09..a704a8130e44 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -114,9 +114,10 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte) | |||
114 | /* | 114 | /* |
115 | * Fetch a guest pte for a guest virtual address | 115 | * Fetch a guest pte for a guest virtual address |
116 | */ | 116 | */ |
117 | static int FNAME(walk_addr)(struct guest_walker *walker, | 117 | static int FNAME(walk_addr_generic)(struct guest_walker *walker, |
118 | struct kvm_vcpu *vcpu, gva_t addr, | 118 | struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
119 | int write_fault, int user_fault, int fetch_fault) | 119 | gva_t addr, int write_fault, |
120 | int user_fault, int fetch_fault) | ||
120 | { | 121 | { |
121 | pt_element_t pte; | 122 | pt_element_t pte; |
122 | gfn_t table_gfn; | 123 | gfn_t table_gfn; |
@@ -129,10 +130,11 @@ static int FNAME(walk_addr)(struct guest_walker *walker, | |||
129 | walk: | 130 | walk: |
130 | present = true; | 131 | present = true; |
131 | eperm = rsvd_fault = false; | 132 | eperm = rsvd_fault = false; |
132 | walker->level = vcpu->arch.mmu.root_level; | 133 | walker->level = mmu->root_level; |
133 | pte = vcpu->arch.mmu.get_cr3(vcpu); | 134 | pte = mmu->get_cr3(vcpu); |
135 | |||
134 | #if PTTYPE == 64 | 136 | #if PTTYPE == 64 |
135 | if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { | 137 | if (walker->level == PT32E_ROOT_LEVEL) { |
136 | pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3); | 138 | pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3); |
137 | trace_kvm_mmu_paging_element(pte, walker->level); | 139 | trace_kvm_mmu_paging_element(pte, walker->level); |
138 | if (!is_present_gpte(pte)) { | 140 | if (!is_present_gpte(pte)) { |
@@ -143,7 +145,7 @@ walk: | |||
143 | } | 145 | } |
144 | #endif | 146 | #endif |
145 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || | 147 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || |
146 | (vcpu->arch.mmu.get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0); | 148 | (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0); |
147 | 149 | ||
148 | pt_access = ACC_ALL; | 150 | pt_access = ACC_ALL; |
149 | 151 | ||
@@ -205,7 +207,7 @@ walk: | |||
205 | (PTTYPE == 64 || is_pse(vcpu))) || | 207 | (PTTYPE == 64 || is_pse(vcpu))) || |
206 | ((walker->level == PT_PDPE_LEVEL) && | 208 | ((walker->level == PT_PDPE_LEVEL) && |
207 | is_large_pte(pte) && | 209 | is_large_pte(pte) && |
208 | vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL)) { | 210 | mmu->root_level == PT64_ROOT_LEVEL)) { |
209 | int lvl = walker->level; | 211 | int lvl = walker->level; |
210 | 212 | ||
211 | walker->gfn = gpte_to_gfn_lvl(pte, lvl); | 213 | walker->gfn = gpte_to_gfn_lvl(pte, lvl); |
@@ -266,6 +268,14 @@ error: | |||
266 | return 0; | 268 | return 0; |
267 | } | 269 | } |
268 | 270 | ||
271 | static int FNAME(walk_addr)(struct guest_walker *walker, | ||
272 | struct kvm_vcpu *vcpu, gva_t addr, | ||
273 | int write_fault, int user_fault, int fetch_fault) | ||
274 | { | ||
275 | return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr, | ||
276 | write_fault, user_fault, fetch_fault); | ||
277 | } | ||
278 | |||
269 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | 279 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
270 | u64 *spte, const void *pte) | 280 | u64 *spte, const void *pte) |
271 | { | 281 | { |