diff options
-rw-r--r-- | arch/x86/kvm/x86.c | 49 |
1 files changed, 41 insertions, 8 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 43da65feed49..e7488350ca16 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2415,27 +2415,60 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm, | |||
2415 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | 2415 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
2416 | struct kvm_dirty_log *log) | 2416 | struct kvm_dirty_log *log) |
2417 | { | 2417 | { |
2418 | int r; | 2418 | int r, n, i; |
2419 | int n; | ||
2420 | struct kvm_memory_slot *memslot; | 2419 | struct kvm_memory_slot *memslot; |
2421 | int is_dirty = 0; | 2420 | unsigned long is_dirty = 0; |
2421 | unsigned long *dirty_bitmap = NULL; | ||
2422 | 2422 | ||
2423 | down_write(&kvm->slots_lock); | 2423 | down_write(&kvm->slots_lock); |
2424 | 2424 | ||
2425 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | 2425 | r = -EINVAL; |
2426 | if (r) | 2426 | if (log->slot >= KVM_MEMORY_SLOTS) |
2427 | goto out; | ||
2428 | |||
2429 | memslot = &kvm->memslots->memslots[log->slot]; | ||
2430 | r = -ENOENT; | ||
2431 | if (!memslot->dirty_bitmap) | ||
2432 | goto out; | ||
2433 | |||
2434 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | ||
2435 | |||
2436 | r = -ENOMEM; | ||
2437 | dirty_bitmap = vmalloc(n); | ||
2438 | if (!dirty_bitmap) | ||
2427 | goto out; | 2439 | goto out; |
2440 | memset(dirty_bitmap, 0, n); | ||
2441 | |||
2442 | for (i = 0; !is_dirty && i < n/sizeof(long); i++) | ||
2443 | is_dirty = memslot->dirty_bitmap[i]; | ||
2428 | 2444 | ||
2429 | /* If nothing is dirty, don't bother messing with page tables. */ | 2445 | /* If nothing is dirty, don't bother messing with page tables. */ |
2430 | if (is_dirty) { | 2446 | if (is_dirty) { |
2447 | struct kvm_memslots *slots, *old_slots; | ||
2448 | |||
2431 | spin_lock(&kvm->mmu_lock); | 2449 | spin_lock(&kvm->mmu_lock); |
2432 | kvm_mmu_slot_remove_write_access(kvm, log->slot); | 2450 | kvm_mmu_slot_remove_write_access(kvm, log->slot); |
2433 | spin_unlock(&kvm->mmu_lock); | 2451 | spin_unlock(&kvm->mmu_lock); |
2434 | memslot = &kvm->memslots->memslots[log->slot]; | 2452 | |
2435 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 2453 | slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); |
2436 | memset(memslot->dirty_bitmap, 0, n); | 2454 | if (!slots) |
2455 | goto out_free; | ||
2456 | |||
2457 | memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); | ||
2458 | slots->memslots[log->slot].dirty_bitmap = dirty_bitmap; | ||
2459 | |||
2460 | old_slots = kvm->memslots; | ||
2461 | rcu_assign_pointer(kvm->memslots, slots); | ||
2462 | synchronize_srcu_expedited(&kvm->srcu); | ||
2463 | dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; | ||
2464 | kfree(old_slots); | ||
2437 | } | 2465 | } |
2466 | |||
2438 | r = 0; | 2467 | r = 0; |
2468 | if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) | ||
2469 | r = -EFAULT; | ||
2470 | out_free: | ||
2471 | vfree(dirty_bitmap); | ||
2439 | out: | 2472 | out: |
2440 | up_write(&kvm->slots_lock); | 2473 | up_write(&kvm->slots_lock); |
2441 | return r; | 2474 | return r; |