aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2011-11-14 04:22:28 -0500
committerAvi Kivity <avi@redhat.com>2011-12-27 04:17:17 -0500
commit9b9b1492364758de82c19c36f07baa9ae162c7e5 (patch)
tree2d600f1bdcc36831597cdcb779f434d668d9d392 /arch/x86/kvm/mmu.c
parentd6eebf8b80316ea61718dc115cd6a20c16195327 (diff)
KVM: MMU: Split gfn_to_rmap() into two functions
rmap_write_protect() calls gfn_to_rmap() for each level with gfn fixed. This results in calling gfn_to_memslot() repeatedly with that gfn. This patch introduces __gfn_to_rmap() which takes the slot as an argument to avoid this. This is also needed for the following dirty logging optimization. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 973f25480afa..fa71085f75a3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -958,23 +958,29 @@ static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
958 } 958 }
959} 959}
960 960
961/* 961static unsigned long *__gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level,
962 * Take gfn and return the reverse mapping to it. 962 struct kvm_memory_slot *slot)
963 */
964static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
965{ 963{
966 struct kvm_memory_slot *slot;
967 struct kvm_lpage_info *linfo; 964 struct kvm_lpage_info *linfo;
968 965
969 slot = gfn_to_memslot(kvm, gfn);
970 if (likely(level == PT_PAGE_TABLE_LEVEL)) 966 if (likely(level == PT_PAGE_TABLE_LEVEL))
971 return &slot->rmap[gfn - slot->base_gfn]; 967 return &slot->rmap[gfn - slot->base_gfn];
972 968
973 linfo = lpage_info_slot(gfn, slot, level); 969 linfo = lpage_info_slot(gfn, slot, level);
974
975 return &linfo->rmap_pde; 970 return &linfo->rmap_pde;
976} 971}
977 972
973/*
974 * Take gfn and return the reverse mapping to it.
975 */
976static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
977{
978 struct kvm_memory_slot *slot;
979
980 slot = gfn_to_memslot(kvm, gfn);
981 return __gfn_to_rmap(kvm, gfn, level, slot);
982}
983
978static bool rmap_can_add(struct kvm_vcpu *vcpu) 984static bool rmap_can_add(struct kvm_vcpu *vcpu)
979{ 985{
980 struct kvm_mmu_memory_cache *cache; 986 struct kvm_mmu_memory_cache *cache;
@@ -1019,12 +1025,14 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
1019 1025
1020static int rmap_write_protect(struct kvm *kvm, u64 gfn) 1026static int rmap_write_protect(struct kvm *kvm, u64 gfn)
1021{ 1027{
1028 struct kvm_memory_slot *slot;
1022 unsigned long *rmapp; 1029 unsigned long *rmapp;
1023 u64 *spte; 1030 u64 *spte;
1024 int i, write_protected = 0; 1031 int i, write_protected = 0;
1025 1032
1026 rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL); 1033 slot = gfn_to_memslot(kvm, gfn);
1027 1034
1035 rmapp = __gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL, slot);
1028 spte = rmap_next(kvm, rmapp, NULL); 1036 spte = rmap_next(kvm, rmapp, NULL);
1029 while (spte) { 1037 while (spte) {
1030 BUG_ON(!(*spte & PT_PRESENT_MASK)); 1038 BUG_ON(!(*spte & PT_PRESENT_MASK));
@@ -1039,7 +1047,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
1039 /* check for huge page mappings */ 1047 /* check for huge page mappings */
1040 for (i = PT_DIRECTORY_LEVEL; 1048 for (i = PT_DIRECTORY_LEVEL;
1041 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 1049 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
1042 rmapp = gfn_to_rmap(kvm, gfn, i); 1050 rmapp = __gfn_to_rmap(kvm, gfn, i, slot);
1043 spte = rmap_next(kvm, rmapp, NULL); 1051 spte = rmap_next(kvm, rmapp, NULL);
1044 while (spte) { 1052 while (spte) {
1045 BUG_ON(!(*spte & PT_PRESENT_MASK)); 1053 BUG_ON(!(*spte & PT_PRESENT_MASK));