diff options
author | Igor Mammedov <imammedo@redhat.com> | 2014-11-13 18:00:13 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-11-14 04:49:04 -0500 |
commit | 063584d44377ebde5ebc6e99cedc1bc6561939d7 (patch) | |
tree | c7ce69f00f8b1edfd89ca621d51a590709b5f14f /virt | |
parent | 1d4e7e3c0bca747d0fc54069a6ab8393349431c0 (diff) |
kvm: memslots: replace heap sort with an insertion sort pass
memslots is a sorted array. When a slot is changed, heapsort (lib/sort.c)
would take O(n log n) time to update it; an optimized insertion sort will
only cost O(n) on an array with just one item out of order.
Replace sort() with a custom sort that takes advantage of memslots usage
pattern and the known position of the changed slot.
performance change of 128 memslots insertions with gradually increasing
size (the worst case):
heap sort custom sort
max: 249747 2500 cycles
with custom sort alg taking ~98% less then original
update time.
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/kvm_main.c | 56 |
1 files changed, 28 insertions, 28 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3a31ec6e396b..c0c2202e6c4f 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -668,31 +668,37 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) | |||
668 | return 0; | 668 | return 0; |
669 | } | 669 | } |
670 | 670 | ||
671 | static int cmp_memslot(const void *slot1, const void *slot2) | ||
672 | { | ||
673 | struct kvm_memory_slot *s1, *s2; | ||
674 | |||
675 | s1 = (struct kvm_memory_slot *)slot1; | ||
676 | s2 = (struct kvm_memory_slot *)slot2; | ||
677 | |||
678 | if (s1->npages < s2->npages) | ||
679 | return 1; | ||
680 | if (s1->npages > s2->npages) | ||
681 | return -1; | ||
682 | |||
683 | return 0; | ||
684 | } | ||
685 | |||
686 | /* | 671 | /* |
687 | * Sort the memslots base on its size, so the larger slots | 672 | * Insert memslot and re-sort memslots based on their size, |
688 | * will get better fit. | 673 | * so the larger slots will get better fit. Sorting algorithm |
674 | * takes advantage of having initially sorted array and | ||
675 | * known changed memslot position. | ||
689 | */ | 676 | */ |
690 | static void sort_memslots(struct kvm_memslots *slots) | 677 | static void insert_memslot(struct kvm_memslots *slots, |
678 | struct kvm_memory_slot *new) | ||
691 | { | 679 | { |
692 | int i; | 680 | int i = slots->id_to_index[new->id]; |
681 | struct kvm_memory_slot *old = id_to_memslot(slots, new->id); | ||
682 | struct kvm_memory_slot *mslots = slots->memslots; | ||
693 | 683 | ||
694 | sort(slots->memslots, KVM_MEM_SLOTS_NUM, | 684 | if (new->npages == old->npages) { |
695 | sizeof(struct kvm_memory_slot), cmp_memslot, NULL); | 685 | *old = *new; |
686 | return; | ||
687 | } | ||
688 | |||
689 | while (1) { | ||
690 | if (i < (KVM_MEM_SLOTS_NUM - 1) && | ||
691 | new->npages < mslots[i + 1].npages) { | ||
692 | mslots[i] = mslots[i + 1]; | ||
693 | i++; | ||
694 | } else if (i > 0 && new->npages > mslots[i - 1].npages) { | ||
695 | mslots[i] = mslots[i - 1]; | ||
696 | i--; | ||
697 | } else { | ||
698 | mslots[i] = *new; | ||
699 | break; | ||
700 | } | ||
701 | } | ||
696 | 702 | ||
697 | for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) | 703 | for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) |
698 | slots->id_to_index[slots->memslots[i].id] = i; | 704 | slots->id_to_index[slots->memslots[i].id] = i; |
@@ -702,13 +708,7 @@ static void update_memslots(struct kvm_memslots *slots, | |||
702 | struct kvm_memory_slot *new) | 708 | struct kvm_memory_slot *new) |
703 | { | 709 | { |
704 | if (new) { | 710 | if (new) { |
705 | int id = new->id; | 711 | insert_memslot(slots, new); |
706 | struct kvm_memory_slot *old = id_to_memslot(slots, id); | ||
707 | unsigned long npages = old->npages; | ||
708 | |||
709 | *old = *new; | ||
710 | if (new->npages != npages) | ||
711 | sort_memslots(slots); | ||
712 | } | 712 | } |
713 | } | 713 | } |
714 | 714 | ||