aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2011-11-24 04:40:57 -0500
committerAvi Kivity <avi@redhat.com>2011-12-27 04:17:40 -0500
commitbf3e05bc1e2781d5d8d3ddb2d8bf2d6ec207e5cb (patch)
tree7312367143f00ec1c051831f3a4e6dcc7f8acd92
parent28a37544fb0223eb9805d2567b88f7360edec52a (diff)
KVM: sort memslots by its size and use line search
Sort memslots base on its size and use line search to find it, so that the larger memslots have better fit The idea is from Avi Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--include/linux/kvm_host.h18
-rw-r--r--virt/kvm/kvm_main.c79
2 files changed, 72 insertions, 25 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 123925cd3ae8..9efdf5c703a5 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -231,8 +231,12 @@ struct kvm_irq_routing_table {};
231#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) 231#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
232#endif 232#endif
233 233
234/*
235 * Note:
236 * memslots are not sorted by id anymore, please use id_to_memslot()
237 * to get the memslot by its id.
238 */
234struct kvm_memslots { 239struct kvm_memslots {
235 int nmemslots;
236 u64 generation; 240 u64 generation;
237 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; 241 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
238}; 242};
@@ -310,7 +314,8 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
310 314
311#define kvm_for_each_memslot(memslot, slots) \ 315#define kvm_for_each_memslot(memslot, slots) \
312 for (memslot = &slots->memslots[0]; \ 316 for (memslot = &slots->memslots[0]; \
313 memslot < slots->memslots + (slots)->nmemslots; memslot++) 317 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
318 memslot++)
314 319
315int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); 320int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
316void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); 321void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
@@ -336,7 +341,14 @@ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
336static inline struct kvm_memory_slot * 341static inline struct kvm_memory_slot *
337id_to_memslot(struct kvm_memslots *slots, int id) 342id_to_memslot(struct kvm_memslots *slots, int id)
338{ 343{
339 return &slots->memslots[id]; 344 int i;
345
346 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
347 if (slots->memslots[i].id == id)
348 return &slots->memslots[i];
349
350 WARN_ON(1);
351 return NULL;
340} 352}
341 353
342#define HPA_MSB ((sizeof(hpa_t) * 8) - 1) 354#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 7b6084999424..6e8eb15dd30b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -440,6 +440,15 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
440 440
441#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 441#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
442 442
443static void kvm_init_memslots_id(struct kvm *kvm)
444{
445 int i;
446 struct kvm_memslots *slots = kvm->memslots;
447
448 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
449 slots->memslots[i].id = i;
450}
451
443static struct kvm *kvm_create_vm(void) 452static struct kvm *kvm_create_vm(void)
444{ 453{
445 int r, i; 454 int r, i;
@@ -465,6 +474,7 @@ static struct kvm *kvm_create_vm(void)
465 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 474 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
466 if (!kvm->memslots) 475 if (!kvm->memslots)
467 goto out_err_nosrcu; 476 goto out_err_nosrcu;
477 kvm_init_memslots_id(kvm);
468 if (init_srcu_struct(&kvm->srcu)) 478 if (init_srcu_struct(&kvm->srcu))
469 goto out_err_nosrcu; 479 goto out_err_nosrcu;
470 for (i = 0; i < KVM_NR_BUSES; i++) { 480 for (i = 0; i < KVM_NR_BUSES; i++) {
@@ -630,15 +640,54 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
630} 640}
631#endif /* !CONFIG_S390 */ 641#endif /* !CONFIG_S390 */
632 642
643static struct kvm_memory_slot *
644search_memslots(struct kvm_memslots *slots, gfn_t gfn)
645{
646 struct kvm_memory_slot *memslot;
647
648 kvm_for_each_memslot(memslot, slots)
649 if (gfn >= memslot->base_gfn &&
650 gfn < memslot->base_gfn + memslot->npages)
651 return memslot;
652
653 return NULL;
654}
655
656static int cmp_memslot(const void *slot1, const void *slot2)
657{
658 struct kvm_memory_slot *s1, *s2;
659
660 s1 = (struct kvm_memory_slot *)slot1;
661 s2 = (struct kvm_memory_slot *)slot2;
662
663 if (s1->npages < s2->npages)
664 return 1;
665 if (s1->npages > s2->npages)
666 return -1;
667
668 return 0;
669}
670
671/*
672 * Sort the memslots base on its size, so the larger slots
673 * will get better fit.
674 */
675static void sort_memslots(struct kvm_memslots *slots)
676{
677 sort(slots->memslots, KVM_MEM_SLOTS_NUM,
678 sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
679}
680
633void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new) 681void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
634{ 682{
635 if (new) { 683 if (new) {
636 int id = new->id; 684 int id = new->id;
637 struct kvm_memory_slot *old = id_to_memslot(slots, id); 685 struct kvm_memory_slot *old = id_to_memslot(slots, id);
686 unsigned long npages = old->npages;
638 687
639 *old = *new; 688 *old = *new;
640 if (id >= slots->nmemslots) 689 if (new->npages != npages)
641 slots->nmemslots = id + 1; 690 sort_memslots(slots);
642 } 691 }
643 692
644 slots->generation++; 693 slots->generation++;
@@ -980,14 +1029,7 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
980static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots, 1029static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
981 gfn_t gfn) 1030 gfn_t gfn)
982{ 1031{
983 struct kvm_memory_slot *memslot; 1032 return search_memslots(slots, gfn);
984
985 kvm_for_each_memslot(memslot, slots)
986 if (gfn >= memslot->base_gfn
987 && gfn < memslot->base_gfn + memslot->npages)
988 return memslot;
989
990 return NULL;
991} 1033}
992 1034
993struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 1035struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
@@ -998,20 +1040,13 @@ EXPORT_SYMBOL_GPL(gfn_to_memslot);
998 1040
999int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 1041int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1000{ 1042{
1001 int i; 1043 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
1002 struct kvm_memslots *slots = kvm_memslots(kvm);
1003
1004 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1005 struct kvm_memory_slot *memslot = &slots->memslots[i];
1006 1044
1007 if (memslot->flags & KVM_MEMSLOT_INVALID) 1045 if (!memslot || memslot->id >= KVM_MEMORY_SLOTS ||
1008 continue; 1046 memslot->flags & KVM_MEMSLOT_INVALID)
1047 return 0;
1009 1048
1010 if (gfn >= memslot->base_gfn 1049 return 1;
1011 && gfn < memslot->base_gfn + memslot->npages)
1012 return 1;
1013 }
1014 return 0;
1015} 1050}
1016EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 1051EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1017 1052