diff options
author | Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> | 2011-11-24 04:40:57 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-12-27 04:17:40 -0500 |
commit | bf3e05bc1e2781d5d8d3ddb2d8bf2d6ec207e5cb (patch) | |
tree | 7312367143f00ec1c051831f3a4e6dcc7f8acd92 /virt/kvm/kvm_main.c | |
parent | 28a37544fb0223eb9805d2567b88f7360edec52a (diff) |
KVM: sort memslots by its size and use line search
Sort memslots base on its size and use line search to find it, so that the
larger memslots have better fit
The idea is from Avi
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r-- | virt/kvm/kvm_main.c | 79 |
1 files changed, 57 insertions, 22 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 7b6084999424..6e8eb15dd30b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -440,6 +440,15 @@ static int kvm_init_mmu_notifier(struct kvm *kvm) | |||
440 | 440 | ||
441 | #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ | 441 | #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ |
442 | 442 | ||
443 | static void kvm_init_memslots_id(struct kvm *kvm) | ||
444 | { | ||
445 | int i; | ||
446 | struct kvm_memslots *slots = kvm->memslots; | ||
447 | |||
448 | for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) | ||
449 | slots->memslots[i].id = i; | ||
450 | } | ||
451 | |||
443 | static struct kvm *kvm_create_vm(void) | 452 | static struct kvm *kvm_create_vm(void) |
444 | { | 453 | { |
445 | int r, i; | 454 | int r, i; |
@@ -465,6 +474,7 @@ static struct kvm *kvm_create_vm(void) | |||
465 | kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); | 474 | kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); |
466 | if (!kvm->memslots) | 475 | if (!kvm->memslots) |
467 | goto out_err_nosrcu; | 476 | goto out_err_nosrcu; |
477 | kvm_init_memslots_id(kvm); | ||
468 | if (init_srcu_struct(&kvm->srcu)) | 478 | if (init_srcu_struct(&kvm->srcu)) |
469 | goto out_err_nosrcu; | 479 | goto out_err_nosrcu; |
470 | for (i = 0; i < KVM_NR_BUSES; i++) { | 480 | for (i = 0; i < KVM_NR_BUSES; i++) { |
@@ -630,15 +640,54 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) | |||
630 | } | 640 | } |
631 | #endif /* !CONFIG_S390 */ | 641 | #endif /* !CONFIG_S390 */ |
632 | 642 | ||
643 | static struct kvm_memory_slot * | ||
644 | search_memslots(struct kvm_memslots *slots, gfn_t gfn) | ||
645 | { | ||
646 | struct kvm_memory_slot *memslot; | ||
647 | |||
648 | kvm_for_each_memslot(memslot, slots) | ||
649 | if (gfn >= memslot->base_gfn && | ||
650 | gfn < memslot->base_gfn + memslot->npages) | ||
651 | return memslot; | ||
652 | |||
653 | return NULL; | ||
654 | } | ||
655 | |||
656 | static int cmp_memslot(const void *slot1, const void *slot2) | ||
657 | { | ||
658 | struct kvm_memory_slot *s1, *s2; | ||
659 | |||
660 | s1 = (struct kvm_memory_slot *)slot1; | ||
661 | s2 = (struct kvm_memory_slot *)slot2; | ||
662 | |||
663 | if (s1->npages < s2->npages) | ||
664 | return 1; | ||
665 | if (s1->npages > s2->npages) | ||
666 | return -1; | ||
667 | |||
668 | return 0; | ||
669 | } | ||
670 | |||
671 | /* | ||
672 | * Sort the memslots base on its size, so the larger slots | ||
673 | * will get better fit. | ||
674 | */ | ||
675 | static void sort_memslots(struct kvm_memslots *slots) | ||
676 | { | ||
677 | sort(slots->memslots, KVM_MEM_SLOTS_NUM, | ||
678 | sizeof(struct kvm_memory_slot), cmp_memslot, NULL); | ||
679 | } | ||
680 | |||
633 | void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new) | 681 | void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new) |
634 | { | 682 | { |
635 | if (new) { | 683 | if (new) { |
636 | int id = new->id; | 684 | int id = new->id; |
637 | struct kvm_memory_slot *old = id_to_memslot(slots, id); | 685 | struct kvm_memory_slot *old = id_to_memslot(slots, id); |
686 | unsigned long npages = old->npages; | ||
638 | 687 | ||
639 | *old = *new; | 688 | *old = *new; |
640 | if (id >= slots->nmemslots) | 689 | if (new->npages != npages) |
641 | slots->nmemslots = id + 1; | 690 | sort_memslots(slots); |
642 | } | 691 | } |
643 | 692 | ||
644 | slots->generation++; | 693 | slots->generation++; |
@@ -980,14 +1029,7 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva); | |||
980 | static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots, | 1029 | static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots, |
981 | gfn_t gfn) | 1030 | gfn_t gfn) |
982 | { | 1031 | { |
983 | struct kvm_memory_slot *memslot; | 1032 | return search_memslots(slots, gfn); |
984 | |||
985 | kvm_for_each_memslot(memslot, slots) | ||
986 | if (gfn >= memslot->base_gfn | ||
987 | && gfn < memslot->base_gfn + memslot->npages) | ||
988 | return memslot; | ||
989 | |||
990 | return NULL; | ||
991 | } | 1033 | } |
992 | 1034 | ||
993 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) | 1035 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) |
@@ -998,20 +1040,13 @@ EXPORT_SYMBOL_GPL(gfn_to_memslot); | |||
998 | 1040 | ||
999 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) | 1041 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) |
1000 | { | 1042 | { |
1001 | int i; | 1043 | struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); |
1002 | struct kvm_memslots *slots = kvm_memslots(kvm); | ||
1003 | |||
1004 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { | ||
1005 | struct kvm_memory_slot *memslot = &slots->memslots[i]; | ||
1006 | 1044 | ||
1007 | if (memslot->flags & KVM_MEMSLOT_INVALID) | 1045 | if (!memslot || memslot->id >= KVM_MEMORY_SLOTS || |
1008 | continue; | 1046 | memslot->flags & KVM_MEMSLOT_INVALID) |
1047 | return 0; | ||
1009 | 1048 | ||
1010 | if (gfn >= memslot->base_gfn | 1049 | return 1; |
1011 | && gfn < memslot->base_gfn + memslot->npages) | ||
1012 | return 1; | ||
1013 | } | ||
1014 | return 0; | ||
1015 | } | 1050 | } |
1016 | EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); | 1051 | EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); |
1017 | 1052 | ||