aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKai Huang <kai.huang@linux.intel.com>2015-01-27 21:54:27 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2015-01-29 09:31:41 -0500
commit88178fd4f7187bbe290c5d373fd44aabec891934 (patch)
tree997b40341e4dccca84af8a570e33a493120f402d
parent1c91cad42366ce0799ca17e7ad6995418741d012 (diff)
KVM: x86: Add new dirty logging kvm_x86_ops for PML
This patch adds new kvm_x86_ops dirty logging hooks to enable/disable dirty logging for particular memory slot, and to flush potentially logged dirty GPAs before reporting slot->dirty_bitmap to userspace. kvm x86 common code calls these hooks when they are available so PML logic can be hidden to VMX specific. SVM won't be impacted as these hooks remain NULL there. Signed-off-by: Kai Huang <kai.huang@linux.intel.com> Reviewed-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h25
-rw-r--r--arch/x86/kvm/mmu.c6
-rw-r--r--arch/x86/kvm/x86.c71
3 files changed, 93 insertions, 9 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 67a98d793bf2..57916ecb9b92 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -802,6 +802,31 @@ struct kvm_x86_ops {
802 int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); 802 int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
803 803
804 void (*sched_in)(struct kvm_vcpu *kvm, int cpu); 804 void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
805
806 /*
807 * Arch-specific dirty logging hooks. These hooks are only supposed to
808 * be valid if the specific arch has hardware-accelerated dirty logging
809 * mechanism. Currently only for PML on VMX.
810 *
811 * - slot_enable_log_dirty:
812 * called when enabling log dirty mode for the slot.
813 * - slot_disable_log_dirty:
814 * called when disabling log dirty mode for the slot.
815 * also called when slot is created with log dirty disabled.
816 * - flush_log_dirty:
817 * called before reporting dirty_bitmap to userspace.
818 * - enable_log_dirty_pt_masked:
819 * called when reenabling log dirty for the GFNs in the mask after
820 * corresponding bits are cleared in slot->dirty_bitmap.
821 */
822 void (*slot_enable_log_dirty)(struct kvm *kvm,
823 struct kvm_memory_slot *slot);
824 void (*slot_disable_log_dirty)(struct kvm *kvm,
825 struct kvm_memory_slot *slot);
826 void (*flush_log_dirty)(struct kvm *kvm);
827 void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
828 struct kvm_memory_slot *slot,
829 gfn_t offset, unsigned long mask);
805}; 830};
806 831
807struct kvm_arch_async_pf { 832struct kvm_arch_async_pf {
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ec62ecd2d513..cee759299a35 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1335,7 +1335,11 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1335 struct kvm_memory_slot *slot, 1335 struct kvm_memory_slot *slot,
1336 gfn_t gfn_offset, unsigned long mask) 1336 gfn_t gfn_offset, unsigned long mask)
1337{ 1337{
1338 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); 1338 if (kvm_x86_ops->enable_log_dirty_pt_masked)
1339 kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
1340 mask);
1341 else
1342 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1339} 1343}
1340 1344
1341static bool rmap_write_protect(struct kvm *kvm, u64 gfn) 1345static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3a7fcff81af6..442ee7d90946 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3780,6 +3780,12 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
3780 3780
3781 mutex_lock(&kvm->slots_lock); 3781 mutex_lock(&kvm->slots_lock);
3782 3782
3783 /*
3784 * Flush potentially hardware-cached dirty pages to dirty_bitmap.
3785 */
3786 if (kvm_x86_ops->flush_log_dirty)
3787 kvm_x86_ops->flush_log_dirty(kvm);
3788
3783 r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); 3789 r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
3784 3790
3785 /* 3791 /*
@@ -7533,6 +7539,56 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
7533 return 0; 7539 return 0;
7534} 7540}
7535 7541
7542static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
7543 struct kvm_memory_slot *new)
7544{
7545 /* Still write protect RO slot */
7546 if (new->flags & KVM_MEM_READONLY) {
7547 kvm_mmu_slot_remove_write_access(kvm, new);
7548 return;
7549 }
7550
7551 /*
7552 * Call kvm_x86_ops dirty logging hooks when they are valid.
7553 *
7554 * kvm_x86_ops->slot_disable_log_dirty is called when:
7555 *
7556 * - KVM_MR_CREATE with dirty logging is disabled
7557 * - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag
7558 *
7559 * The reason is, in case of PML, we need to set D-bit for any slots
7560 * with dirty logging disabled in order to eliminate unnecessary GPA
7561 * logging in PML buffer (and potential PML buffer full VMEXT). This
7562 * guarantees leaving PML enabled during guest's lifetime won't have
7563 * any additonal overhead from PML when guest is running with dirty
7564 * logging disabled for memory slots.
7565 *
7566 * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot
7567 * to dirty logging mode.
7568 *
7569 * If kvm_x86_ops dirty logging hooks are invalid, use write protect.
7570 *
7571 * In case of write protect:
7572 *
7573 * Write protect all pages for dirty logging.
7574 *
7575 * All the sptes including the large sptes which point to this
7576 * slot are set to readonly. We can not create any new large
7577 * spte on this slot until the end of the logging.
7578 *
7579 * See the comments in fast_page_fault().
7580 */
7581 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
7582 if (kvm_x86_ops->slot_enable_log_dirty)
7583 kvm_x86_ops->slot_enable_log_dirty(kvm, new);
7584 else
7585 kvm_mmu_slot_remove_write_access(kvm, new);
7586 } else {
7587 if (kvm_x86_ops->slot_disable_log_dirty)
7588 kvm_x86_ops->slot_disable_log_dirty(kvm, new);
7589 }
7590}
7591
7536void kvm_arch_commit_memory_region(struct kvm *kvm, 7592void kvm_arch_commit_memory_region(struct kvm *kvm,
7537 struct kvm_userspace_memory_region *mem, 7593 struct kvm_userspace_memory_region *mem,
7538 const struct kvm_memory_slot *old, 7594 const struct kvm_memory_slot *old,
@@ -7562,16 +7618,15 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
7562 new = id_to_memslot(kvm->memslots, mem->slot); 7618 new = id_to_memslot(kvm->memslots, mem->slot);
7563 7619
7564 /* 7620 /*
7565 * Write protect all pages for dirty logging. 7621 * Set up write protection and/or dirty logging for the new slot.
7566 * 7622 *
7567 * All the sptes including the large sptes which point to this 7623 * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
7568 * slot are set to readonly. We can not create any new large 7624 * been zapped so no dirty logging staff is needed for old slot. For
7569 * spte on this slot until the end of the logging. 7625 * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the
7570 * 7626 * new and it's also covered when dealing with the new slot.
7571 * See the comments in fast_page_fault().
7572 */ 7627 */
7573 if ((change != KVM_MR_DELETE) && (new->flags & KVM_MEM_LOG_DIRTY_PAGES)) 7628 if (change != KVM_MR_DELETE)
7574 kvm_mmu_slot_remove_write_access(kvm, new); 7629 kvm_mmu_slot_apply_flags(kvm, new);
7575} 7630}
7576 7631
7577void kvm_arch_flush_shadow_all(struct kvm *kvm) 7632void kvm_arch_flush_shadow_all(struct kvm *kvm)