aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2012-03-01 05:32:16 -0500
committerAvi Kivity <avi@redhat.com>2012-04-08 05:49:58 -0400
commit5dc99b2380d59b8aeafa98791f92b96400ed3187 (patch)
treeeba3731e113e14df9bd2e1ece5a3493dd5e606c0 /arch/x86
parenta0ed46073c14f66dbf0707aaa7588b78da83d7c6 (diff)
KVM: Avoid checking huge page mappings in get_dirty_log()
Dropped such mappings when we enabled dirty logging and we will never create new ones until we stop the logging. For this we introduce a new function which can be used to write protect a range of PT level pages: although we do not need to care about a range of pages at this point, the following patch will need this feature to optimize the write protection of many pages. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h5
-rw-r--r--arch/x86/kvm/mmu.c40
-rw-r--r--arch/x86/kvm/x86.c8
3 files changed, 36 insertions, 17 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e216ba066e79..f624ca72ea24 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -712,8 +712,9 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
712 712
713int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 713int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
714void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); 714void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
715int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, 715void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
716 struct kvm_memory_slot *slot); 716 struct kvm_memory_slot *slot,
717 gfn_t gfn_offset, unsigned long mask);
717void kvm_mmu_zap_all(struct kvm *kvm); 718void kvm_mmu_zap_all(struct kvm *kvm);
718unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 719unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
719void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 720void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c8b5694d1a48..dc5f2459db6c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1037,27 +1037,47 @@ static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level
1037 return write_protected; 1037 return write_protected;
1038} 1038}
1039 1039
1040int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, 1040/**
1041 struct kvm_memory_slot *slot) 1041 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1042 * @kvm: kvm instance
1043 * @slot: slot to protect
1044 * @gfn_offset: start of the BITS_PER_LONG pages we care about
1045 * @mask: indicates which pages we should protect
1046 *
1047 * Used when we do not need to care about huge page mappings: e.g. during dirty
1048 * logging we do not have any such mappings.
1049 */
1050void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1051 struct kvm_memory_slot *slot,
1052 gfn_t gfn_offset, unsigned long mask)
1042{ 1053{
1043 unsigned long *rmapp; 1054 unsigned long *rmapp;
1044 int i, write_protected = 0;
1045 1055
1046 for (i = PT_PAGE_TABLE_LEVEL; 1056 while (mask) {
1047 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 1057 rmapp = &slot->rmap[gfn_offset + __ffs(mask)];
1048 rmapp = __gfn_to_rmap(gfn, i, slot); 1058 __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL);
1049 write_protected |= __rmap_write_protect(kvm, rmapp, i);
1050 }
1051 1059
1052 return write_protected; 1060 /* clear the first set bit */
1061 mask &= mask - 1;
1062 }
1053} 1063}
1054 1064
1055static int rmap_write_protect(struct kvm *kvm, u64 gfn) 1065static int rmap_write_protect(struct kvm *kvm, u64 gfn)
1056{ 1066{
1057 struct kvm_memory_slot *slot; 1067 struct kvm_memory_slot *slot;
1068 unsigned long *rmapp;
1069 int i;
1070 int write_protected = 0;
1058 1071
1059 slot = gfn_to_memslot(kvm, gfn); 1072 slot = gfn_to_memslot(kvm, gfn);
1060 return kvm_mmu_rmap_write_protect(kvm, gfn, slot); 1073
1074 for (i = PT_PAGE_TABLE_LEVEL;
1075 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
1076 rmapp = __gfn_to_rmap(gfn, i, slot);
1077 write_protected |= __rmap_write_protect(kvm, rmapp, i);
1078 }
1079
1080 return write_protected;
1061} 1081}
1062 1082
1063static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, 1083static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 99b738028fc0..813ebf1e55a0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3095,13 +3095,11 @@ static void write_protect_slot(struct kvm *kvm,
3095 3095
3096 /* Not many dirty pages compared to # of shadow pages. */ 3096 /* Not many dirty pages compared to # of shadow pages. */
3097 if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) { 3097 if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
3098 unsigned long gfn_offset; 3098 gfn_t offset;
3099 3099
3100 for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) { 3100 for_each_set_bit(offset, dirty_bitmap, memslot->npages)
3101 unsigned long gfn = memslot->base_gfn + gfn_offset; 3101 kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, 1);
3102 3102
3103 kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
3104 }
3105 kvm_flush_remote_tlbs(kvm); 3103 kvm_flush_remote_tlbs(kvm);
3106 } else 3104 } else
3107 kvm_mmu_slot_remove_write_access(kvm, memslot->id); 3105 kvm_mmu_slot_remove_write_access(kvm, memslot->id);