aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2010-04-12 06:35:35 -0400
committerAvi Kivity <avi@redhat.com>2010-04-20 06:06:55 -0400
commit87bf6e7de1134f48681fd2ce4b7c1ec45458cb6d (patch)
treeae8ce63cecab98c036c0d76422de42cf78e042f4
parent77662e0028c7c63e34257fda03ff9625c59d939d (diff)
KVM: fix the handling of dirty bitmaps to avoid overflows
Int is not long enough to store the size of a dirty bitmap. This patch fixes this problem with the introduction of a wrapper function to calculate the sizes of dirty bitmaps. Note: in mark_page_dirty(), we have to consider the fact that __set_bit() takes the offset as int, not long. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/ia64/kvm/kvm-ia64.c9
-rw-r--r--arch/powerpc/kvm/book3s.c5
-rw-r--r--arch/x86/kvm/x86.c5
-rw-r--r--include/linux/kvm_host.h5
-rw-r--r--virt/kvm/kvm_main.c13
5 files changed, 24 insertions, 13 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 73c5c2b05f64..7f3c0a2e60cd 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1802,7 +1802,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1802{ 1802{
1803 struct kvm_memory_slot *memslot; 1803 struct kvm_memory_slot *memslot;
1804 int r, i; 1804 int r, i;
1805 long n, base; 1805 long base;
1806 unsigned long n;
1806 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + 1807 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1807 offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); 1808 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
1808 1809
@@ -1815,7 +1816,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1815 if (!memslot->dirty_bitmap) 1816 if (!memslot->dirty_bitmap)
1816 goto out; 1817 goto out;
1817 1818
1818 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1819 n = kvm_dirty_bitmap_bytes(memslot);
1819 base = memslot->base_gfn / BITS_PER_LONG; 1820 base = memslot->base_gfn / BITS_PER_LONG;
1820 1821
1821 for (i = 0; i < n/sizeof(long); ++i) { 1822 for (i = 0; i < n/sizeof(long); ++i) {
@@ -1831,7 +1832,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1831 struct kvm_dirty_log *log) 1832 struct kvm_dirty_log *log)
1832{ 1833{
1833 int r; 1834 int r;
1834 int n; 1835 unsigned long n;
1835 struct kvm_memory_slot *memslot; 1836 struct kvm_memory_slot *memslot;
1836 int is_dirty = 0; 1837 int is_dirty = 0;
1837 1838
@@ -1850,7 +1851,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1850 if (is_dirty) { 1851 if (is_dirty) {
1851 kvm_flush_remote_tlbs(kvm); 1852 kvm_flush_remote_tlbs(kvm);
1852 memslot = &kvm->memslots->memslots[log->slot]; 1853 memslot = &kvm->memslots->memslots[log->slot];
1853 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1854 n = kvm_dirty_bitmap_bytes(memslot);
1854 memset(memslot->dirty_bitmap, 0, n); 1855 memset(memslot->dirty_bitmap, 0, n);
1855 } 1856 }
1856 r = 0; 1857 r = 0;
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 25da07fd9f77..604af29b71ed 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -1004,7 +1004,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1004 struct kvm_vcpu *vcpu; 1004 struct kvm_vcpu *vcpu;
1005 ulong ga, ga_end; 1005 ulong ga, ga_end;
1006 int is_dirty = 0; 1006 int is_dirty = 0;
1007 int r, n; 1007 int r;
1008 unsigned long n;
1008 1009
1009 mutex_lock(&kvm->slots_lock); 1010 mutex_lock(&kvm->slots_lock);
1010 1011
@@ -1022,7 +1023,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1022 kvm_for_each_vcpu(n, vcpu, kvm) 1023 kvm_for_each_vcpu(n, vcpu, kvm)
1023 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); 1024 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1024 1025
1025 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1026 n = kvm_dirty_bitmap_bytes(memslot);
1026 memset(memslot->dirty_bitmap, 0, n); 1027 memset(memslot->dirty_bitmap, 0, n);
1027 } 1028 }
1028 1029
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9ad3d064c781..45aa90f8cc57 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2612,8 +2612,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
2612int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 2612int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2613 struct kvm_dirty_log *log) 2613 struct kvm_dirty_log *log)
2614{ 2614{
2615 int r, n, i; 2615 int r, i;
2616 struct kvm_memory_slot *memslot; 2616 struct kvm_memory_slot *memslot;
2617 unsigned long n;
2617 unsigned long is_dirty = 0; 2618 unsigned long is_dirty = 0;
2618 unsigned long *dirty_bitmap = NULL; 2619 unsigned long *dirty_bitmap = NULL;
2619 2620
@@ -2628,7 +2629,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2628 if (!memslot->dirty_bitmap) 2629 if (!memslot->dirty_bitmap)
2629 goto out; 2630 goto out;
2630 2631
2631 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 2632 n = kvm_dirty_bitmap_bytes(memslot);
2632 2633
2633 r = -ENOMEM; 2634 r = -ENOMEM;
2634 dirty_bitmap = vmalloc(n); 2635 dirty_bitmap = vmalloc(n);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a3fd0f91d943..9ad825e1c79b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -119,6 +119,11 @@ struct kvm_memory_slot {
119 int user_alloc; 119 int user_alloc;
120}; 120};
121 121
122static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
123{
124 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
125}
126
122struct kvm_kernel_irq_routing_entry { 127struct kvm_kernel_irq_routing_entry {
123 u32 gsi; 128 u32 gsi;
124 u32 type; 129 u32 type;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5a0cd194dce0..364daacafb58 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -648,7 +648,7 @@ skip_lpage:
648 648
649 /* Allocate page dirty bitmap if needed */ 649 /* Allocate page dirty bitmap if needed */
650 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 650 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
651 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; 651 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
652 652
653 new.dirty_bitmap = vmalloc(dirty_bytes); 653 new.dirty_bitmap = vmalloc(dirty_bytes);
654 if (!new.dirty_bitmap) 654 if (!new.dirty_bitmap)
@@ -768,7 +768,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
768{ 768{
769 struct kvm_memory_slot *memslot; 769 struct kvm_memory_slot *memslot;
770 int r, i; 770 int r, i;
771 int n; 771 unsigned long n;
772 unsigned long any = 0; 772 unsigned long any = 0;
773 773
774 r = -EINVAL; 774 r = -EINVAL;
@@ -780,7 +780,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
780 if (!memslot->dirty_bitmap) 780 if (!memslot->dirty_bitmap)
781 goto out; 781 goto out;
782 782
783 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 783 n = kvm_dirty_bitmap_bytes(memslot);
784 784
785 for (i = 0; !any && i < n/sizeof(long); ++i) 785 for (i = 0; !any && i < n/sizeof(long); ++i)
786 any = memslot->dirty_bitmap[i]; 786 any = memslot->dirty_bitmap[i];
@@ -1186,10 +1186,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1186 memslot = gfn_to_memslot_unaliased(kvm, gfn); 1186 memslot = gfn_to_memslot_unaliased(kvm, gfn);
1187 if (memslot && memslot->dirty_bitmap) { 1187 if (memslot && memslot->dirty_bitmap) {
1188 unsigned long rel_gfn = gfn - memslot->base_gfn; 1188 unsigned long rel_gfn = gfn - memslot->base_gfn;
1189 unsigned long *p = memslot->dirty_bitmap +
1190 rel_gfn / BITS_PER_LONG;
1191 int offset = rel_gfn % BITS_PER_LONG;
1189 1192
1190 /* avoid RMW */ 1193 /* avoid RMW */
1191 if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) 1194 if (!generic_test_le_bit(offset, p))
1192 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); 1195 generic___set_le_bit(offset, p);
1193 } 1196 }
1194} 1197}
1195 1198