aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-10-18 09:22:23 -0400
committerAvi Kivity <avi@redhat.com>2011-01-12 04:23:08 -0500
commit49c7754ce57063b819b01eb8a4290841ad0886c4 (patch)
tree67561bbc829d93efa5316c246ec84fe2f2493236 /virt/kvm/kvm_main.c
parent56028d0861e48f7cc9c573d79f2d8a0a933a2bba (diff)
KVM: Add memory slot versioning and use it to provide fast guest write interface
Keep track of memslots changes by keeping generation number in memslots structure. Provide kvm_write_guest_cached() function that skips gfn_to_hva() translation if memslots was not changed since previous invocation. Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c75
1 files changed, 63 insertions, 12 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 75fd590c0214..228f00f87966 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -687,6 +687,7 @@ skip_lpage:
687 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 687 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
688 if (mem->slot >= slots->nmemslots) 688 if (mem->slot >= slots->nmemslots)
689 slots->nmemslots = mem->slot + 1; 689 slots->nmemslots = mem->slot + 1;
690 slots->generation++;
690 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID; 691 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
691 692
692 old_memslots = kvm->memslots; 693 old_memslots = kvm->memslots;
@@ -721,6 +722,7 @@ skip_lpage:
721 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 722 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
722 if (mem->slot >= slots->nmemslots) 723 if (mem->slot >= slots->nmemslots)
723 slots->nmemslots = mem->slot + 1; 724 slots->nmemslots = mem->slot + 1;
725 slots->generation++;
724 726
725 /* actual memory is freed via old in kvm_free_physmem_slot below */ 727 /* actual memory is freed via old in kvm_free_physmem_slot below */
726 if (!npages) { 728 if (!npages) {
@@ -851,10 +853,10 @@ int kvm_is_error_hva(unsigned long addr)
851} 853}
852EXPORT_SYMBOL_GPL(kvm_is_error_hva); 854EXPORT_SYMBOL_GPL(kvm_is_error_hva);
853 855
854struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 856static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
857 gfn_t gfn)
855{ 858{
856 int i; 859 int i;
857 struct kvm_memslots *slots = kvm_memslots(kvm);
858 860
859 for (i = 0; i < slots->nmemslots; ++i) { 861 for (i = 0; i < slots->nmemslots; ++i) {
860 struct kvm_memory_slot *memslot = &slots->memslots[i]; 862 struct kvm_memory_slot *memslot = &slots->memslots[i];
@@ -865,6 +867,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
865 } 867 }
866 return NULL; 868 return NULL;
867} 869}
870
871struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
872{
873 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
874}
868EXPORT_SYMBOL_GPL(gfn_to_memslot); 875EXPORT_SYMBOL_GPL(gfn_to_memslot);
869 876
870int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 877int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
@@ -927,12 +934,9 @@ int memslot_id(struct kvm *kvm, gfn_t gfn)
927 return memslot - slots->memslots; 934 return memslot - slots->memslots;
928} 935}
929 936
930static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn, 937static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
931 gfn_t *nr_pages) 938 gfn_t *nr_pages)
932{ 939{
933 struct kvm_memory_slot *slot;
934
935 slot = gfn_to_memslot(kvm, gfn);
936 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 940 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
937 return bad_hva(); 941 return bad_hva();
938 942
@@ -944,7 +948,7 @@ static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn,
944 948
945unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 949unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
946{ 950{
947 return gfn_to_hva_many(kvm, gfn, NULL); 951 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
948} 952}
949EXPORT_SYMBOL_GPL(gfn_to_hva); 953EXPORT_SYMBOL_GPL(gfn_to_hva);
950 954
@@ -1054,7 +1058,7 @@ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
1054 unsigned long addr; 1058 unsigned long addr;
1055 gfn_t entry; 1059 gfn_t entry;
1056 1060
1057 addr = gfn_to_hva_many(kvm, gfn, &entry); 1061 addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
1058 if (kvm_is_error_hva(addr)) 1062 if (kvm_is_error_hva(addr))
1059 return -1; 1063 return -1;
1060 1064
@@ -1238,6 +1242,47 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1238 return 0; 1242 return 0;
1239} 1243}
1240 1244
1245int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1246 gpa_t gpa)
1247{
1248 struct kvm_memslots *slots = kvm_memslots(kvm);
1249 int offset = offset_in_page(gpa);
1250 gfn_t gfn = gpa >> PAGE_SHIFT;
1251
1252 ghc->gpa = gpa;
1253 ghc->generation = slots->generation;
1254 ghc->memslot = __gfn_to_memslot(slots, gfn);
1255 ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
1256 if (!kvm_is_error_hva(ghc->hva))
1257 ghc->hva += offset;
1258 else
1259 return -EFAULT;
1260
1261 return 0;
1262}
1263EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
1264
1265int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1266 void *data, unsigned long len)
1267{
1268 struct kvm_memslots *slots = kvm_memslots(kvm);
1269 int r;
1270
1271 if (slots->generation != ghc->generation)
1272 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
1273
1274 if (kvm_is_error_hva(ghc->hva))
1275 return -EFAULT;
1276
1277 r = copy_to_user((void __user *)ghc->hva, data, len);
1278 if (r)
1279 return -EFAULT;
1280 mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
1281
1282 return 0;
1283}
1284EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
1285
1241int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1286int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1242{ 1287{
1243 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len); 1288 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
@@ -1263,11 +1308,9 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1263} 1308}
1264EXPORT_SYMBOL_GPL(kvm_clear_guest); 1309EXPORT_SYMBOL_GPL(kvm_clear_guest);
1265 1310
1266void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1311void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
1312 gfn_t gfn)
1267{ 1313{
1268 struct kvm_memory_slot *memslot;
1269
1270 memslot = gfn_to_memslot(kvm, gfn);
1271 if (memslot && memslot->dirty_bitmap) { 1314 if (memslot && memslot->dirty_bitmap) {
1272 unsigned long rel_gfn = gfn - memslot->base_gfn; 1315 unsigned long rel_gfn = gfn - memslot->base_gfn;
1273 1316
@@ -1275,6 +1318,14 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1275 } 1318 }
1276} 1319}
1277 1320
1321void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1322{
1323 struct kvm_memory_slot *memslot;
1324
1325 memslot = gfn_to_memslot(kvm, gfn);
1326 mark_page_dirty_in_slot(kvm, memslot, gfn);
1327}
1328
1278/* 1329/*
1279 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1330 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1280 */ 1331 */