aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2010-04-12 06:35:35 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-04-26 10:48:05 -0400
commitff5bfd4f4896c3f27dafcc4ae533b0d803968499 (patch)
tree06ff00917d803d3112ec379d3b75ad2e778a8afc
parentf1f52e1094fc4e39bfd3071e33803fbbf0543d49 (diff)
KVM: fix the handling of dirty bitmaps to avoid overflows
(Cherry-picked from commit 87bf6e7de1134f48681fd2ce4b7c1ec45458cb6d) Int is not long enough to store the size of a dirty bitmap. This patch fixes this problem with the introduction of a wrapper function to calculate the sizes of dirty bitmaps. Note: in mark_page_dirty(), we have to consider the fact that __set_bit() takes the offset as int, not long. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--arch/ia64/kvm/kvm-ia64.c9
-rw-r--r--arch/powerpc/kvm/book3s.c5
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--include/linux/kvm_host.h5
-rw-r--r--virt/kvm/kvm_main.c13
5 files changed, 23 insertions, 13 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 5fdeec5fddcf..d76279aaaea1 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1794,7 +1794,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1794{ 1794{
1795 struct kvm_memory_slot *memslot; 1795 struct kvm_memory_slot *memslot;
1796 int r, i; 1796 int r, i;
1797 long n, base; 1797 long base;
1798 unsigned long n;
1798 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + 1799 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1799 offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); 1800 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
1800 1801
@@ -1807,7 +1808,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1807 if (!memslot->dirty_bitmap) 1808 if (!memslot->dirty_bitmap)
1808 goto out; 1809 goto out;
1809 1810
1810 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1811 n = kvm_dirty_bitmap_bytes(memslot);
1811 base = memslot->base_gfn / BITS_PER_LONG; 1812 base = memslot->base_gfn / BITS_PER_LONG;
1812 1813
1813 for (i = 0; i < n/sizeof(long); ++i) { 1814 for (i = 0; i < n/sizeof(long); ++i) {
@@ -1823,7 +1824,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1823 struct kvm_dirty_log *log) 1824 struct kvm_dirty_log *log)
1824{ 1825{
1825 int r; 1826 int r;
1826 int n; 1827 unsigned long n;
1827 struct kvm_memory_slot *memslot; 1828 struct kvm_memory_slot *memslot;
1828 int is_dirty = 0; 1829 int is_dirty = 0;
1829 1830
@@ -1841,7 +1842,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1841 if (is_dirty) { 1842 if (is_dirty) {
1842 kvm_flush_remote_tlbs(kvm); 1843 kvm_flush_remote_tlbs(kvm);
1843 memslot = &kvm->memslots[log->slot]; 1844 memslot = &kvm->memslots[log->slot];
1844 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1845 n = kvm_dirty_bitmap_bytes(memslot);
1845 memset(memslot->dirty_bitmap, 0, n); 1846 memset(memslot->dirty_bitmap, 0, n);
1846 } 1847 }
1847 r = 0; 1848 r = 0;
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 3e294bd9b8c6..e6dc59558fc1 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -848,7 +848,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
848 struct kvm_vcpu *vcpu; 848 struct kvm_vcpu *vcpu;
849 ulong ga, ga_end; 849 ulong ga, ga_end;
850 int is_dirty = 0; 850 int is_dirty = 0;
851 int r, n; 851 int r;
852 unsigned long n;
852 853
853 down_write(&kvm->slots_lock); 854 down_write(&kvm->slots_lock);
854 855
@@ -866,7 +867,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
866 kvm_for_each_vcpu(n, vcpu, kvm) 867 kvm_for_each_vcpu(n, vcpu, kvm)
867 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); 868 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
868 869
869 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 870 n = kvm_dirty_bitmap_bytes(memslot);
870 memset(memslot->dirty_bitmap, 0, n); 871 memset(memslot->dirty_bitmap, 0, n);
871 } 872 }
872 873
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cd3dc54e2fc4..e0f010e53f63 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2343,7 +2343,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2343 struct kvm_dirty_log *log) 2343 struct kvm_dirty_log *log)
2344{ 2344{
2345 int r; 2345 int r;
2346 int n; 2346 unsigned long n;
2347 struct kvm_memory_slot *memslot; 2347 struct kvm_memory_slot *memslot;
2348 int is_dirty = 0; 2348 int is_dirty = 0;
2349 2349
@@ -2359,7 +2359,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2359 kvm_mmu_slot_remove_write_access(kvm, log->slot); 2359 kvm_mmu_slot_remove_write_access(kvm, log->slot);
2360 spin_unlock(&kvm->mmu_lock); 2360 spin_unlock(&kvm->mmu_lock);
2361 memslot = &kvm->memslots[log->slot]; 2361 memslot = &kvm->memslots[log->slot];
2362 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 2362 n = kvm_dirty_bitmap_bytes(memslot);
2363 memset(memslot->dirty_bitmap, 0, n); 2363 memset(memslot->dirty_bitmap, 0, n);
2364 } 2364 }
2365 r = 0; 2365 r = 0;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bd5a616d9373..ddb7d5840de8 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -116,6 +116,11 @@ struct kvm_memory_slot {
116 int user_alloc; 116 int user_alloc;
117}; 117};
118 118
119static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
120{
121 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
122}
123
119struct kvm_kernel_irq_routing_entry { 124struct kvm_kernel_irq_routing_entry {
120 u32 gsi; 125 u32 gsi;
121 u32 type; 126 u32 type;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a944be392d6e..9dd98cb25573 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -636,7 +636,7 @@ skip_lpage:
636 636
637 /* Allocate page dirty bitmap if needed */ 637 /* Allocate page dirty bitmap if needed */
638 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 638 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
639 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; 639 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
640 640
641 new.dirty_bitmap = vmalloc(dirty_bytes); 641 new.dirty_bitmap = vmalloc(dirty_bytes);
642 if (!new.dirty_bitmap) 642 if (!new.dirty_bitmap)
@@ -719,7 +719,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
719{ 719{
720 struct kvm_memory_slot *memslot; 720 struct kvm_memory_slot *memslot;
721 int r, i; 721 int r, i;
722 int n; 722 unsigned long n;
723 unsigned long any = 0; 723 unsigned long any = 0;
724 724
725 r = -EINVAL; 725 r = -EINVAL;
@@ -731,7 +731,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
731 if (!memslot->dirty_bitmap) 731 if (!memslot->dirty_bitmap)
732 goto out; 732 goto out;
733 733
734 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 734 n = kvm_dirty_bitmap_bytes(memslot);
735 735
736 for (i = 0; !any && i < n/sizeof(long); ++i) 736 for (i = 0; !any && i < n/sizeof(long); ++i)
737 any = memslot->dirty_bitmap[i]; 737 any = memslot->dirty_bitmap[i];
@@ -1073,10 +1073,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1073 memslot = gfn_to_memslot_unaliased(kvm, gfn); 1073 memslot = gfn_to_memslot_unaliased(kvm, gfn);
1074 if (memslot && memslot->dirty_bitmap) { 1074 if (memslot && memslot->dirty_bitmap) {
1075 unsigned long rel_gfn = gfn - memslot->base_gfn; 1075 unsigned long rel_gfn = gfn - memslot->base_gfn;
1076 unsigned long *p = memslot->dirty_bitmap +
1077 rel_gfn / BITS_PER_LONG;
1078 int offset = rel_gfn % BITS_PER_LONG;
1076 1079
1077 /* avoid RMW */ 1080 /* avoid RMW */
1078 if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) 1081 if (!generic_test_le_bit(offset, p))
1079 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); 1082 generic___set_le_bit(offset, p);
1080 } 1083 }
1081} 1084}
1082 1085