aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2010-10-27 05:23:54 -0400
committerAvi Kivity <avi@redhat.com>2011-01-12 04:28:46 -0500
commit515a01279a187415322a80736800a7d6325876ab (patch)
tree8690a1b26013cb385b9d143c83301bdab758dd48 /arch/x86/kvm/x86.c
parenta36a57b1a19bce17b67f5c6f43460baf664ae5fa (diff)
KVM: pre-allocate one more dirty bitmap to avoid vmalloc()
Currently x86's kvm_vm_ioctl_get_dirty_log() needs to allocate a bitmap by vmalloc() which will be used in the next logging and this has been causing bad effect to VGA and live-migration: vmalloc() consumes extra systime, triggers tlb flush, etc. This patch resolves this issue by pre-allocating one more bitmap and switching between two bitmaps during dirty logging. Performance improvement: I measured performance for the case of VGA update by trace-cmd. The result was 1.5 times faster than the original one. In the case of live migration, the improvement ratio depends on the workload and the guest memory size. In general, the larger the memory size is the more benefits we get. Note: This does not change other architectures's logic but the allocation size becomes twice. This will increase the actual memory consumption only when the new size changes the number of pages allocated by vmalloc(). Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Fernando Luis Vazquez Cao <fernando@oss.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c16
1 files changed, 5 insertions, 11 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a2a785472431..35f82f2c66f6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3208,18 +3208,15 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3208 struct kvm_memslots *slots, *old_slots; 3208 struct kvm_memslots *slots, *old_slots;
3209 unsigned long *dirty_bitmap; 3209 unsigned long *dirty_bitmap;
3210 3210
3211 r = -ENOMEM; 3211 dirty_bitmap = memslot->dirty_bitmap_head;
3212 dirty_bitmap = vmalloc(n); 3212 if (memslot->dirty_bitmap == dirty_bitmap)
3213 if (!dirty_bitmap) 3213 dirty_bitmap += n / sizeof(long);
3214 goto out;
3215 memset(dirty_bitmap, 0, n); 3214 memset(dirty_bitmap, 0, n);
3216 3215
3217 r = -ENOMEM; 3216 r = -ENOMEM;
3218 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 3217 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
3219 if (!slots) { 3218 if (!slots)
3220 vfree(dirty_bitmap);
3221 goto out; 3219 goto out;
3222 }
3223 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 3220 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
3224 slots->memslots[log->slot].dirty_bitmap = dirty_bitmap; 3221 slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
3225 slots->generation++; 3222 slots->generation++;
@@ -3235,11 +3232,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3235 spin_unlock(&kvm->mmu_lock); 3232 spin_unlock(&kvm->mmu_lock);
3236 3233
3237 r = -EFAULT; 3234 r = -EFAULT;
3238 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { 3235 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
3239 vfree(dirty_bitmap);
3240 goto out; 3236 goto out;
3241 }
3242 vfree(dirty_bitmap);
3243 } else { 3237 } else {
3244 r = -EFAULT; 3238 r = -EFAULT;
3245 if (clear_user(log->dirty_bitmap, n)) 3239 if (clear_user(log->dirty_bitmap, n))