aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorIgor Mammedov <imammedo@redhat.com>2014-12-01 12:29:25 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2014-12-04 09:29:10 -0500
commitd4ae84a02bc65cec29608bc417a969fc2ec75449 (patch)
treefdf2e5f189783ca5a47407c5a64dbea7f243e86a /include/linux
parent7f379cff11fb9e00e0ff9eff8fbc39ddfd4b1bec (diff)
kvm: search_memslots: add simple LRU memslot caching
In typical guest boot workload only 2-3 memslots are used extensively, and at that it's mostly the same memslot lookup operation. Adding LRU cache improves average lookup time from 46 to 28 cycles (~40%) for this workload. Signed-off-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/kvm_host.h12
1 files changed, 10 insertions, 2 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 231dd9472226..1a371447fd45 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -353,6 +353,7 @@ struct kvm_memslots {
353 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; 353 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
354 /* The mapping table from slot id to the index in memslots[]. */ 354 /* The mapping table from slot id to the index in memslots[]. */
355 short id_to_index[KVM_MEM_SLOTS_NUM]; 355 short id_to_index[KVM_MEM_SLOTS_NUM];
356 atomic_t lru_slot;
356}; 357};
357 358
358struct kvm { 359struct kvm {
@@ -790,12 +791,19 @@ static inline void kvm_guest_exit(void)
790static inline struct kvm_memory_slot * 791static inline struct kvm_memory_slot *
791search_memslots(struct kvm_memslots *slots, gfn_t gfn) 792search_memslots(struct kvm_memslots *slots, gfn_t gfn)
792{ 793{
793 struct kvm_memory_slot *memslot; 794 int slot = atomic_read(&slots->lru_slot);
795 struct kvm_memory_slot *memslot = &slots->memslots[slot];
796
797 if (gfn >= memslot->base_gfn &&
798 gfn < memslot->base_gfn + memslot->npages)
799 return memslot;
794 800
795 kvm_for_each_memslot(memslot, slots) 801 kvm_for_each_memslot(memslot, slots)
796 if (gfn >= memslot->base_gfn && 802 if (gfn >= memslot->base_gfn &&
797 gfn < memslot->base_gfn + memslot->npages) 803 gfn < memslot->base_gfn + memslot->npages) {
804 atomic_set(&slots->lru_slot, memslot - slots->memslots);
798 return memslot; 805 return memslot;
806 }
799 807
800 return NULL; 808 return NULL;
801} 809}