aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIzik Eidus <ieidus@redhat.com>2008-10-03 10:40:32 -0400
committerAvi Kivity <avi@redhat.com>2008-12-31 09:51:50 -0500
commit2843099fee32a6020e1caa95c6026f28b5d43bff (patch)
tree774ddfeec4091adddf9bd9ce938648dad14c378e
parent6eb55818c043b097c83828da8430fcb9a02fdb89 (diff)
KVM: MMU: Fix aliased gfns treated as unaliased
Some areas of kvm x86 mmu are using gfn offset inside a slot without unaliasing the gfn first. This patch makes sure that the gfn will be unaliased and add gfn_to_memslot_unaliased() to save the calculating of the gfn unaliasing in case we have it unaliased already. Signed-off-by: Izik Eidus <ieidus@redhat.com> Acked-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/mmu.c14
-rw-r--r--virt/kvm/kvm_main.c9
3 files changed, 17 insertions, 8 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 09e6c56572cb..99e3cc149d21 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -617,6 +617,8 @@ void kvm_disable_tdp(void);
617int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 617int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
618int complete_pio(struct kvm_vcpu *vcpu); 618int complete_pio(struct kvm_vcpu *vcpu);
619 619
620struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn);
621
620static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 622static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
621{ 623{
622 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 624 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8687758b5295..8904e8ada978 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -386,7 +386,9 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
386{ 386{
387 int *write_count; 387 int *write_count;
388 388
389 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn)); 389 gfn = unalias_gfn(kvm, gfn);
390 write_count = slot_largepage_idx(gfn,
391 gfn_to_memslot_unaliased(kvm, gfn));
390 *write_count += 1; 392 *write_count += 1;
391} 393}
392 394
@@ -394,16 +396,20 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
394{ 396{
395 int *write_count; 397 int *write_count;
396 398
397 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn)); 399 gfn = unalias_gfn(kvm, gfn);
400 write_count = slot_largepage_idx(gfn,
401 gfn_to_memslot_unaliased(kvm, gfn));
398 *write_count -= 1; 402 *write_count -= 1;
399 WARN_ON(*write_count < 0); 403 WARN_ON(*write_count < 0);
400} 404}
401 405
402static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn) 406static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
403{ 407{
404 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 408 struct kvm_memory_slot *slot;
405 int *largepage_idx; 409 int *largepage_idx;
406 410
411 gfn = unalias_gfn(kvm, gfn);
412 slot = gfn_to_memslot_unaliased(kvm, gfn);
407 if (slot) { 413 if (slot) {
408 largepage_idx = slot_largepage_idx(gfn, slot); 414 largepage_idx = slot_largepage_idx(gfn, slot);
409 return *largepage_idx; 415 return *largepage_idx;
@@ -2973,8 +2979,8 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
2973 if (sp->role.metaphysical) 2979 if (sp->role.metaphysical)
2974 continue; 2980 continue;
2975 2981
2976 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
2977 gfn = unalias_gfn(vcpu->kvm, sp->gfn); 2982 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
2983 slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
2978 rmapp = &slot->rmap[gfn - slot->base_gfn]; 2984 rmapp = &slot->rmap[gfn - slot->base_gfn];
2979 if (*rmapp) 2985 if (*rmapp)
2980 printk(KERN_ERR "%s: (%s) shadow page has writable" 2986 printk(KERN_ERR "%s: (%s) shadow page has writable"
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1838052f3c9e..a65baa9039d5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -923,7 +923,7 @@ int kvm_is_error_hva(unsigned long addr)
923} 923}
924EXPORT_SYMBOL_GPL(kvm_is_error_hva); 924EXPORT_SYMBOL_GPL(kvm_is_error_hva);
925 925
926static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 926struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
927{ 927{
928 int i; 928 int i;
929 929
@@ -936,11 +936,12 @@ static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
936 } 936 }
937 return NULL; 937 return NULL;
938} 938}
939EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
939 940
940struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 941struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
941{ 942{
942 gfn = unalias_gfn(kvm, gfn); 943 gfn = unalias_gfn(kvm, gfn);
943 return __gfn_to_memslot(kvm, gfn); 944 return gfn_to_memslot_unaliased(kvm, gfn);
944} 945}
945 946
946int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 947int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
@@ -964,7 +965,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
964 struct kvm_memory_slot *slot; 965 struct kvm_memory_slot *slot;
965 966
966 gfn = unalias_gfn(kvm, gfn); 967 gfn = unalias_gfn(kvm, gfn);
967 slot = __gfn_to_memslot(kvm, gfn); 968 slot = gfn_to_memslot_unaliased(kvm, gfn);
968 if (!slot) 969 if (!slot)
969 return bad_hva(); 970 return bad_hva();
970 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); 971 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
@@ -1215,7 +1216,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1215 struct kvm_memory_slot *memslot; 1216 struct kvm_memory_slot *memslot;
1216 1217
1217 gfn = unalias_gfn(kvm, gfn); 1218 gfn = unalias_gfn(kvm, gfn);
1218 memslot = __gfn_to_memslot(kvm, gfn); 1219 memslot = gfn_to_memslot_unaliased(kvm, gfn);
1219 if (memslot && memslot->dirty_bitmap) { 1220 if (memslot && memslot->dirty_bitmap) {
1220 unsigned long rel_gfn = gfn - memslot->base_gfn; 1221 unsigned long rel_gfn = gfn - memslot->base_gfn;
1221 1222