aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2011-11-14 04:24:50 -0500
committerAvi Kivity <avi@redhat.com>2011-12-27 04:17:20 -0500
commit95d4c16ce78cb6b7549a09159c409d52ddd18dae (patch)
tree1291405b107b4caa495454855baeeea5b9baa5e8 /arch/x86/kvm/mmu.c
parent7850ac5420803996e2960d15b924021f28e0dffc (diff)
KVM: Optimize dirty logging by rmap_write_protect()
Currently, write protecting a slot needs to walk all the shadow pages and checks ones which have a pte mapping a page in it. The walk is overly heavy when dirty pages in that slot are not so many and checking the shadow pages would result in unwanted cache pollution. To mitigate this problem, we use rmap_write_protect() and check only the sptes which can be reached from gfns marked in the dirty bitmap when the number of dirty pages are less than that of shadow pages. This criterion is reasonable in its meaning and worked well in our test: write protection became some times faster than before when the ratio of dirty pages are low and was not worse even when the ratio was near the criterion. Note that the locking for this write protection becomes fine grained. The reason why this is safe is descripted in the comments. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index fa71085f75a3..aecdea265f7e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1023,15 +1023,13 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
1023 rmap_remove(kvm, sptep); 1023 rmap_remove(kvm, sptep);
1024} 1024}
1025 1025
1026static int rmap_write_protect(struct kvm *kvm, u64 gfn) 1026int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
1027 struct kvm_memory_slot *slot)
1027{ 1028{
1028 struct kvm_memory_slot *slot;
1029 unsigned long *rmapp; 1029 unsigned long *rmapp;
1030 u64 *spte; 1030 u64 *spte;
1031 int i, write_protected = 0; 1031 int i, write_protected = 0;
1032 1032
1033 slot = gfn_to_memslot(kvm, gfn);
1034
1035 rmapp = __gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL, slot); 1033 rmapp = __gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL, slot);
1036 spte = rmap_next(kvm, rmapp, NULL); 1034 spte = rmap_next(kvm, rmapp, NULL);
1037 while (spte) { 1035 while (spte) {
@@ -1066,6 +1064,14 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
1066 return write_protected; 1064 return write_protected;
1067} 1065}
1068 1066
1067static int rmap_write_protect(struct kvm *kvm, u64 gfn)
1068{
1069 struct kvm_memory_slot *slot;
1070
1071 slot = gfn_to_memslot(kvm, gfn);
1072 return kvm_mmu_rmap_write_protect(kvm, gfn, slot);
1073}
1074
1069static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, 1075static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
1070 unsigned long data) 1076 unsigned long data)
1071{ 1077{