diff options
author | Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> | 2010-04-12 06:35:35 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-04-20 06:06:55 -0400 |
commit | 87bf6e7de1134f48681fd2ce4b7c1ec45458cb6d (patch) | |
tree | ae8ce63cecab98c036c0d76422de42cf78e042f4 /virt/kvm/kvm_main.c | |
parent | 77662e0028c7c63e34257fda03ff9625c59d939d (diff) |
KVM: fix the handling of dirty bitmaps to avoid overflows
Int is not long enough to store the size of a dirty bitmap.
This patch fixes this problem with the introduction of a wrapper
function to calculate the sizes of dirty bitmaps.
Note: in mark_page_dirty(), we have to consider the fact that
__set_bit() takes the offset as int, not long.
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r-- | virt/kvm/kvm_main.c | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5a0cd194dce0..364daacafb58 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -648,7 +648,7 @@ skip_lpage: | |||
648 | 648 | ||
649 | /* Allocate page dirty bitmap if needed */ | 649 | /* Allocate page dirty bitmap if needed */ |
650 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { | 650 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { |
651 | unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; | 651 | unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); |
652 | 652 | ||
653 | new.dirty_bitmap = vmalloc(dirty_bytes); | 653 | new.dirty_bitmap = vmalloc(dirty_bytes); |
654 | if (!new.dirty_bitmap) | 654 | if (!new.dirty_bitmap) |
@@ -768,7 +768,7 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
768 | { | 768 | { |
769 | struct kvm_memory_slot *memslot; | 769 | struct kvm_memory_slot *memslot; |
770 | int r, i; | 770 | int r, i; |
771 | int n; | 771 | unsigned long n; |
772 | unsigned long any = 0; | 772 | unsigned long any = 0; |
773 | 773 | ||
774 | r = -EINVAL; | 774 | r = -EINVAL; |
@@ -780,7 +780,7 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
780 | if (!memslot->dirty_bitmap) | 780 | if (!memslot->dirty_bitmap) |
781 | goto out; | 781 | goto out; |
782 | 782 | ||
783 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 783 | n = kvm_dirty_bitmap_bytes(memslot); |
784 | 784 | ||
785 | for (i = 0; !any && i < n/sizeof(long); ++i) | 785 | for (i = 0; !any && i < n/sizeof(long); ++i) |
786 | any = memslot->dirty_bitmap[i]; | 786 | any = memslot->dirty_bitmap[i]; |
@@ -1186,10 +1186,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | |||
1186 | memslot = gfn_to_memslot_unaliased(kvm, gfn); | 1186 | memslot = gfn_to_memslot_unaliased(kvm, gfn); |
1187 | if (memslot && memslot->dirty_bitmap) { | 1187 | if (memslot && memslot->dirty_bitmap) { |
1188 | unsigned long rel_gfn = gfn - memslot->base_gfn; | 1188 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
1189 | unsigned long *p = memslot->dirty_bitmap + | ||
1190 | rel_gfn / BITS_PER_LONG; | ||
1191 | int offset = rel_gfn % BITS_PER_LONG; | ||
1189 | 1192 | ||
1190 | /* avoid RMW */ | 1193 | /* avoid RMW */ |
1191 | if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) | 1194 | if (!generic_test_le_bit(offset, p)) |
1192 | generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); | 1195 | generic___set_le_bit(offset, p); |
1193 | } | 1196 | } |
1194 | } | 1197 | } |
1195 | 1198 | ||