aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/kvm_host.h
diff options
context:
space:
mode:
authorIgor Mammedov <imammedo@redhat.com>2014-12-01 12:29:27 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2014-12-04 09:29:11 -0500
commit9c1a5d38780e652275aa55362dbee0d7e827e069 (patch)
tree0fc252e3948a30b78974e3ba8979d5aa231de22f /include/linux/kvm_host.h
parent0e60b0799fedc495a5c57dbd669de3c10d72edd2 (diff)
kvm: optimize GFN to memslot lookup with large slots amount
Current linear search doesn't scale well when large amount of memslots is used and looked up slot is not in the beginning memslots array. Taking in account that memslots don't overlap, it's possible to switch sorting order of memslots array from 'npages' to 'base_gfn' and use binary search for memslot lookup by GFN. As result of switching to binary search lookup times are reduced with large amount of memslots. Following is a table of search_memslot() cycles during WS2008R2 guest boot. boot, boot + ~10 min mostly same of using it, slot lookup randomized lookup max average average cycles cycles cycles 13 slots : 1450 28 30 13 slots : 1400 30 40 binary search 117 slots : 13000 30 460 117 slots : 2000 35 180 binary search Signed-off-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r--include/linux/kvm_host.h34
1 files changed, 22 insertions, 12 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1a371447fd45..193bca68372d 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -354,6 +354,7 @@ struct kvm_memslots {
354 /* The mapping table from slot id to the index in memslots[]. */ 354 /* The mapping table from slot id to the index in memslots[]. */
355 short id_to_index[KVM_MEM_SLOTS_NUM]; 355 short id_to_index[KVM_MEM_SLOTS_NUM];
356 atomic_t lru_slot; 356 atomic_t lru_slot;
357 int used_slots;
357}; 358};
358 359
359struct kvm { 360struct kvm {
@@ -791,19 +792,28 @@ static inline void kvm_guest_exit(void)
791static inline struct kvm_memory_slot * 792static inline struct kvm_memory_slot *
792search_memslots(struct kvm_memslots *slots, gfn_t gfn) 793search_memslots(struct kvm_memslots *slots, gfn_t gfn)
793{ 794{
795 int start = 0, end = slots->used_slots;
794 int slot = atomic_read(&slots->lru_slot); 796 int slot = atomic_read(&slots->lru_slot);
795 struct kvm_memory_slot *memslot = &slots->memslots[slot]; 797 struct kvm_memory_slot *memslots = slots->memslots;
796 798
797 if (gfn >= memslot->base_gfn && 799 if (gfn >= memslots[slot].base_gfn &&
798 gfn < memslot->base_gfn + memslot->npages) 800 gfn < memslots[slot].base_gfn + memslots[slot].npages)
799 return memslot; 801 return &memslots[slot];
800 802
801 kvm_for_each_memslot(memslot, slots) 803 while (start < end) {
802 if (gfn >= memslot->base_gfn && 804 slot = start + (end - start) / 2;
803 gfn < memslot->base_gfn + memslot->npages) { 805
804 atomic_set(&slots->lru_slot, memslot - slots->memslots); 806 if (gfn >= memslots[slot].base_gfn)
805 return memslot; 807 end = slot;
806 } 808 else
809 start = slot + 1;
810 }
811
812 if (gfn >= memslots[start].base_gfn &&
813 gfn < memslots[start].base_gfn + memslots[start].npages) {
814 atomic_set(&slots->lru_slot, start);
815 return &memslots[start];
816 }
807 817
808 return NULL; 818 return NULL;
809} 819}