aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorSheng Yang <sheng.yang@intel.com>2008-09-01 07:41:20 -0400
committerAvi Kivity <avi@qumranet.com>2008-10-15 04:15:25 -0400
commitd40a1ee4859c673677c9811ae84475c4051baca5 (patch)
treedeedab33a187c41b23db799bd610af0508820cd4 /arch/x86/kvm
parent8c4b537da7eceab1246695df21beea10f180d460 (diff)
KVM: MMU: Modify kvm_shadow_walk.entry to accept u64 addr
EPT is 4 level by default in 32pae(48 bits), but the addr parameter of kvm_shadow_walk->entry() only accept unsigned long as virtual address, which is 32bit in 32pae. This result in SHADOW_PT_INDEX() overflow when try to fetch level 4 index. Fix it by extend kvm_shadow_walk->entry() to accept 64bit addr in parameter. Signed-off-by: Sheng Yang <sheng.yang@intel.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/kvm/paging_tmpl.h4
2 files changed, 7 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 866d7133cad8..bce3e25ec79b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -144,7 +144,7 @@ struct kvm_rmap_desc {
144 144
145struct kvm_shadow_walk { 145struct kvm_shadow_walk {
146 int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu, 146 int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu,
147 gva_t addr, u64 *spte, int level); 147 u64 addr, u64 *spte, int level);
148}; 148};
149 149
150static struct kmem_cache *pte_chain_cache; 150static struct kmem_cache *pte_chain_cache;
@@ -941,7 +941,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
941} 941}
942 942
943static int walk_shadow(struct kvm_shadow_walk *walker, 943static int walk_shadow(struct kvm_shadow_walk *walker,
944 struct kvm_vcpu *vcpu, gva_t addr) 944 struct kvm_vcpu *vcpu, u64 addr)
945{ 945{
946 hpa_t shadow_addr; 946 hpa_t shadow_addr;
947 int level; 947 int level;
@@ -1270,7 +1270,7 @@ struct direct_shadow_walk {
1270 1270
1271static int direct_map_entry(struct kvm_shadow_walk *_walk, 1271static int direct_map_entry(struct kvm_shadow_walk *_walk,
1272 struct kvm_vcpu *vcpu, 1272 struct kvm_vcpu *vcpu,
1273 gva_t addr, u64 *sptep, int level) 1273 u64 addr, u64 *sptep, int level)
1274{ 1274{
1275 struct direct_shadow_walk *walk = 1275 struct direct_shadow_walk *walk =
1276 container_of(_walk, struct direct_shadow_walk, walker); 1276 container_of(_walk, struct direct_shadow_walk, walker);
@@ -1289,7 +1289,7 @@ static int direct_map_entry(struct kvm_shadow_walk *_walk,
1289 1289
1290 if (*sptep == shadow_trap_nonpresent_pte) { 1290 if (*sptep == shadow_trap_nonpresent_pte) {
1291 pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT; 1291 pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
1292 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, addr, level - 1, 1292 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1,
1293 1, ACC_ALL, sptep); 1293 1, ACC_ALL, sptep);
1294 if (!sp) { 1294 if (!sp) {
1295 pgprintk("nonpaging_map: ENOMEM\n"); 1295 pgprintk("nonpaging_map: ENOMEM\n");
@@ -1317,7 +1317,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1317 .pt_write = 0, 1317 .pt_write = 0,
1318 }; 1318 };
1319 1319
1320 r = walk_shadow(&walker.walker, vcpu, (gva_t)gfn << PAGE_SHIFT); 1320 r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT);
1321 if (r < 0) 1321 if (r < 0)
1322 return r; 1322 return r;
1323 return walker.pt_write; 1323 return walker.pt_write;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index b7064e1e1e17..b671f61be41e 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -286,7 +286,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
286 * Fetch a shadow pte for a specific level in the paging hierarchy. 286 * Fetch a shadow pte for a specific level in the paging hierarchy.
287 */ 287 */
288static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw, 288static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
289 struct kvm_vcpu *vcpu, gva_t addr, 289 struct kvm_vcpu *vcpu, u64 addr,
290 u64 *sptep, int level) 290 u64 *sptep, int level)
291{ 291{
292 struct shadow_walker *sw = 292 struct shadow_walker *sw =
@@ -326,7 +326,7 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
326 metaphysical = 0; 326 metaphysical = 0;
327 table_gfn = gw->table_gfn[level - 2]; 327 table_gfn = gw->table_gfn[level - 2];
328 } 328 }
329 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, 329 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1,
330 metaphysical, access, sptep); 330 metaphysical, access, sptep);
331 if (!metaphysical) { 331 if (!metaphysical) {
332 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2], 332 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],