aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/kvm/mips.c42
-rw-r--r--arch/mips/kvm/mmu.c22
3 files changed, 46 insertions, 19 deletions
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 7c56d6b124d1..85c4593b634a 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -20,6 +20,7 @@ config KVM
20 select EXPORT_UASM 20 select EXPORT_UASM
21 select PREEMPT_NOTIFIERS 21 select PREEMPT_NOTIFIERS
22 select ANON_INODES 22 select ANON_INODES
23 select KVM_GENERIC_DIRTYLOG_READ_PROTECT
23 select KVM_MMIO 24 select KVM_MMIO
24 select SRCU 25 select SRCU
25 ---help--- 26 ---help---
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 01f3fa1b9f0e..0b84b336ee4d 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1086,42 +1086,46 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
1086 return r; 1086 return r;
1087} 1087}
1088 1088
1089/* Get (and clear) the dirty memory log for a memory slot. */ 1089/**
1090 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
1091 * @kvm: kvm instance
1092 * @log: slot id and address to which we copy the log
1093 *
1094 * Steps 1-4 below provide general overview of dirty page logging. See
1095 * kvm_get_dirty_log_protect() function description for additional details.
1096 *
1097 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
1098 * always flush the TLB (step 4) even if previous step failed and the dirty
1099 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
1100 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
1101 * writes will be marked dirty for next log read.
1102 *
1103 * 1. Take a snapshot of the bit and clear it if needed.
1104 * 2. Write protect the corresponding page.
1105 * 3. Copy the snapshot to the userspace.
1106 * 4. Flush TLB's if needed.
1107 */
1090int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 1108int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1091{ 1109{
1092 struct kvm_memslots *slots; 1110 struct kvm_memslots *slots;
1093 struct kvm_memory_slot *memslot; 1111 struct kvm_memory_slot *memslot;
1094 unsigned long ga, ga_end; 1112 bool is_dirty = false;
1095 int is_dirty = 0;
1096 int r; 1113 int r;
1097 unsigned long n;
1098 1114
1099 mutex_lock(&kvm->slots_lock); 1115 mutex_lock(&kvm->slots_lock);
1100 1116
1101 r = kvm_get_dirty_log(kvm, log, &is_dirty); 1117 r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
1102 if (r)
1103 goto out;
1104 1118
1105 /* If nothing is dirty, don't bother messing with page tables. */
1106 if (is_dirty) { 1119 if (is_dirty) {
1107 slots = kvm_memslots(kvm); 1120 slots = kvm_memslots(kvm);
1108 memslot = id_to_memslot(slots, log->slot); 1121 memslot = id_to_memslot(slots, log->slot);
1109 1122
1110 ga = memslot->base_gfn << PAGE_SHIFT; 1123 /* Let implementation handle TLB/GVA invalidation */
1111 ga_end = ga + (memslot->npages << PAGE_SHIFT); 1124 kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
1112
1113 kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
1114 ga_end);
1115
1116 n = kvm_dirty_bitmap_bytes(memslot);
1117 memset(memslot->dirty_bitmap, 0, n);
1118 } 1125 }
1119 1126
1120 r = 0;
1121out:
1122 mutex_unlock(&kvm->slots_lock); 1127 mutex_unlock(&kvm->slots_lock);
1123 return r; 1128 return r;
1124
1125} 1129}
1126 1130
1127long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 1131long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 892fd0ede718..63a6d542ecb3 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -429,6 +429,28 @@ int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
429} 429}
430 430
431/** 431/**
432 * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
433 * @kvm: The KVM pointer
434 * @slot: The memory slot associated with mask
435 * @gfn_offset: The gfn offset in memory slot
436 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
437 * slot to be write protected
438 *
439 * Walks bits set in mask write protects the associated pte's. Caller must
440 * acquire @kvm->mmu_lock.
441 */
442void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
443 struct kvm_memory_slot *slot,
444 gfn_t gfn_offset, unsigned long mask)
445{
446 gfn_t base_gfn = slot->base_gfn + gfn_offset;
447 gfn_t start = base_gfn + __ffs(mask);
448 gfn_t end = base_gfn + __fls(mask);
449
450 kvm_mips_mkclean_gpa_pt(kvm, start, end);
451}
452
453/**
432 * kvm_mips_map_page() - Map a guest physical page. 454 * kvm_mips_map_page() - Map a guest physical page.
433 * @vcpu: VCPU pointer. 455 * @vcpu: VCPU pointer.
434 * @gpa: Guest physical address of fault. 456 * @gpa: Guest physical address of fault.