aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorIzik Eidus <ieidus@redhat.com>2008-10-03 10:40:32 -0400
committerAvi Kivity <avi@redhat.com>2008-12-31 09:51:50 -0500
commit2843099fee32a6020e1caa95c6026f28b5d43bff (patch)
tree774ddfeec4091adddf9bd9ce938648dad14c378e /virt
parent6eb55818c043b097c83828da8430fcb9a02fdb89 (diff)
KVM: MMU: Fix aliased gfns treated as unaliased
Some areas of kvm x86 mmu are using gfn offset inside a slot without unaliasing the gfn first. This patch makes sure that the gfn will be unaliased and add gfn_to_memslot_unaliased() to save the calculating of the gfn unaliasing in case we have it unaliased already. Signed-off-by: Izik Eidus <ieidus@redhat.com> Acked-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1838052f3c9e..a65baa9039d5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -923,7 +923,7 @@ int kvm_is_error_hva(unsigned long addr)
923} 923}
924EXPORT_SYMBOL_GPL(kvm_is_error_hva); 924EXPORT_SYMBOL_GPL(kvm_is_error_hva);
925 925
926static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 926struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
927{ 927{
928 int i; 928 int i;
929 929
@@ -936,11 +936,12 @@ static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
936 } 936 }
937 return NULL; 937 return NULL;
938} 938}
939EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
939 940
940struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 941struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
941{ 942{
942 gfn = unalias_gfn(kvm, gfn); 943 gfn = unalias_gfn(kvm, gfn);
943 return __gfn_to_memslot(kvm, gfn); 944 return gfn_to_memslot_unaliased(kvm, gfn);
944} 945}
945 946
946int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 947int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
@@ -964,7 +965,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
964 struct kvm_memory_slot *slot; 965 struct kvm_memory_slot *slot;
965 966
966 gfn = unalias_gfn(kvm, gfn); 967 gfn = unalias_gfn(kvm, gfn);
967 slot = __gfn_to_memslot(kvm, gfn); 968 slot = gfn_to_memslot_unaliased(kvm, gfn);
968 if (!slot) 969 if (!slot)
969 return bad_hva(); 970 return bad_hva();
970 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); 971 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
@@ -1215,7 +1216,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1215 struct kvm_memory_slot *memslot; 1216 struct kvm_memory_slot *memslot;
1216 1217
1217 gfn = unalias_gfn(kvm, gfn); 1218 gfn = unalias_gfn(kvm, gfn);
1218 memslot = __gfn_to_memslot(kvm, gfn); 1219 memslot = gfn_to_memslot_unaliased(kvm, gfn);
1219 if (memslot && memslot->dirty_bitmap) { 1220 if (memslot && memslot->dirty_bitmap) {
1220 unsigned long rel_gfn = gfn - memslot->base_gfn; 1221 unsigned long rel_gfn = gfn - memslot->base_gfn;
1221 1222