aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKai Huang <kai.huang@linux.intel.com>2015-01-27 21:54:23 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2015-01-29 09:30:38 -0500
commit3b0f1d01e501792d8d89ab4371bc9e8cd2a10032 (patch)
treed9c509027636a5bb3ba2f811dada48bddb1245e7
parentb0165f1b415daeedab78455aaac529aaec6007dd (diff)
KVM: Rename kvm_arch_mmu_write_protect_pt_masked to be more generic for log dirty
We don't have to write protect guest memory for dirty logging if architecture supports hardware dirty logging, such as PML on VMX, so rename it to be more generic. Signed-off-by: Kai Huang <kai.huang@linux.intel.com> Reviewed-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/arm/kvm/mmu.c18
-rw-r--r--arch/x86/kvm/mmu.c21
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--virt/kvm/kvm_main.c2
4 files changed, 37 insertions, 6 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 74aeabaa3c4d..6034697ede3f 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1081,7 +1081,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1081} 1081}
1082 1082
1083/** 1083/**
1084 * kvm_arch_mmu_write_protect_pt_masked() - write protect dirty pages 1084 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
1085 * @kvm: The KVM pointer 1085 * @kvm: The KVM pointer
1086 * @slot: The memory slot associated with mask 1086 * @slot: The memory slot associated with mask
1087 * @gfn_offset: The gfn offset in memory slot 1087 * @gfn_offset: The gfn offset in memory slot
@@ -1091,7 +1091,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1091 * Walks bits set in mask write protects the associated pte's. Caller must 1091 * Walks bits set in mask write protects the associated pte's. Caller must
1092 * acquire kvm_mmu_lock. 1092 * acquire kvm_mmu_lock.
1093 */ 1093 */
1094void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm, 1094static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1095 struct kvm_memory_slot *slot, 1095 struct kvm_memory_slot *slot,
1096 gfn_t gfn_offset, unsigned long mask) 1096 gfn_t gfn_offset, unsigned long mask)
1097{ 1097{
@@ -1102,6 +1102,20 @@ void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm,
1102 stage2_wp_range(kvm, start, end); 1102 stage2_wp_range(kvm, start, end);
1103} 1103}
1104 1104
1105/*
1106 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1107 * dirty pages.
1108 *
1109 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1110 * enable dirty logging for them.
1111 */
1112void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1113 struct kvm_memory_slot *slot,
1114 gfn_t gfn_offset, unsigned long mask)
1115{
1116 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1117}
1118
1105static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 1119static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1106 struct kvm_memory_slot *memslot, unsigned long hva, 1120 struct kvm_memory_slot *memslot, unsigned long hva,
1107 unsigned long fault_status) 1121 unsigned long fault_status)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 0ed9f795e4f0..b18e65ce3683 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1216,7 +1216,7 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
1216} 1216}
1217 1217
1218/** 1218/**
1219 * kvm_arch_mmu_write_protect_pt_masked - write protect selected PT level pages 1219 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1220 * @kvm: kvm instance 1220 * @kvm: kvm instance
1221 * @slot: slot to protect 1221 * @slot: slot to protect
1222 * @gfn_offset: start of the BITS_PER_LONG pages we care about 1222 * @gfn_offset: start of the BITS_PER_LONG pages we care about
@@ -1225,7 +1225,7 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
1225 * Used when we do not need to care about huge page mappings: e.g. during dirty 1225 * Used when we do not need to care about huge page mappings: e.g. during dirty
1226 * logging we do not have any such mappings. 1226 * logging we do not have any such mappings.
1227 */ 1227 */
1228void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm, 1228static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1229 struct kvm_memory_slot *slot, 1229 struct kvm_memory_slot *slot,
1230 gfn_t gfn_offset, unsigned long mask) 1230 gfn_t gfn_offset, unsigned long mask)
1231{ 1231{
@@ -1241,6 +1241,23 @@ void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm,
1241 } 1241 }
1242} 1242}
1243 1243
1244/**
1245 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1246 * PT level pages.
1247 *
1248 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1249 * enable dirty logging for them.
1250 *
1251 * Used when we do not need to care about huge page mappings: e.g. during dirty
1252 * logging we do not have any such mappings.
1253 */
1254void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1255 struct kvm_memory_slot *slot,
1256 gfn_t gfn_offset, unsigned long mask)
1257{
1258 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1259}
1260
1244static bool rmap_write_protect(struct kvm *kvm, u64 gfn) 1261static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
1245{ 1262{
1246 struct kvm_memory_slot *slot; 1263 struct kvm_memory_slot *slot;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 7d6719522f1f..32d057571bf6 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -615,7 +615,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
615int kvm_get_dirty_log_protect(struct kvm *kvm, 615int kvm_get_dirty_log_protect(struct kvm *kvm,
616 struct kvm_dirty_log *log, bool *is_dirty); 616 struct kvm_dirty_log *log, bool *is_dirty);
617 617
618void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm, 618void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
619 struct kvm_memory_slot *slot, 619 struct kvm_memory_slot *slot,
620 gfn_t gfn_offset, 620 gfn_t gfn_offset,
621 unsigned long mask); 621 unsigned long mask);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a8490f084483..0c281760a1c5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1059,7 +1059,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
1059 dirty_bitmap_buffer[i] = mask; 1059 dirty_bitmap_buffer[i] = mask;
1060 1060
1061 offset = i * BITS_PER_LONG; 1061 offset = i * BITS_PER_LONG;
1062 kvm_arch_mmu_write_protect_pt_masked(kvm, memslot, offset, 1062 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset,
1063 mask); 1063 mask);
1064 } 1064 }
1065 1065