aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-09-23 12:18:30 -0400
committerAvi Kivity <avi@redhat.com>2008-10-15 08:25:16 -0400
commit1e73f9dd885957bf0c7bb5e63b350d5aeb06b726 (patch)
treee8b0a494efa53b8bae28722cd0e896be9431e30d /arch/x86/kvm/mmu.c
parent93a423e7045cf3cf69f960ff307edda1afcd7b41 (diff)
KVM: MMU: split mmu_set_spte
Split the spte entry creation code into a new set_spte function. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c101
1 files changed, 57 insertions, 44 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5779a2323e23..9ad4cc553893 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1148,44 +1148,13 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1148 return page; 1148 return page;
1149} 1149}
1150 1150
1151static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 1151static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1152 unsigned pt_access, unsigned pte_access, 1152 unsigned pte_access, int user_fault,
1153 int user_fault, int write_fault, int dirty, 1153 int write_fault, int dirty, int largepage,
1154 int *ptwrite, int largepage, gfn_t gfn, 1154 gfn_t gfn, pfn_t pfn, bool speculative)
1155 pfn_t pfn, bool speculative)
1156{ 1155{
1157 u64 spte; 1156 u64 spte;
1158 int was_rmapped = 0; 1157 int ret = 0;
1159 int was_writeble = is_writeble_pte(*shadow_pte);
1160
1161 pgprintk("%s: spte %llx access %x write_fault %d"
1162 " user_fault %d gfn %lx\n",
1163 __func__, *shadow_pte, pt_access,
1164 write_fault, user_fault, gfn);
1165
1166 if (is_rmap_pte(*shadow_pte)) {
1167 /*
1168 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1169 * the parent of the now unreachable PTE.
1170 */
1171 if (largepage && !is_large_pte(*shadow_pte)) {
1172 struct kvm_mmu_page *child;
1173 u64 pte = *shadow_pte;
1174
1175 child = page_header(pte & PT64_BASE_ADDR_MASK);
1176 mmu_page_remove_parent_pte(child, shadow_pte);
1177 } else if (pfn != spte_to_pfn(*shadow_pte)) {
1178 pgprintk("hfn old %lx new %lx\n",
1179 spte_to_pfn(*shadow_pte), pfn);
1180 rmap_remove(vcpu->kvm, shadow_pte);
1181 } else {
1182 if (largepage)
1183 was_rmapped = is_large_pte(*shadow_pte);
1184 else
1185 was_rmapped = 1;
1186 }
1187 }
1188
1189 /* 1158 /*
1190 * We don't set the accessed bit, since we sometimes want to see 1159 * We don't set the accessed bit, since we sometimes want to see
1191 * whether the guest actually used the pte (in order to detect 1160 * whether the guest actually used the pte (in order to detect
@@ -1218,26 +1187,70 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1218 (largepage && has_wrprotected_page(vcpu->kvm, gfn))) { 1187 (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
1219 pgprintk("%s: found shadow page for %lx, marking ro\n", 1188 pgprintk("%s: found shadow page for %lx, marking ro\n",
1220 __func__, gfn); 1189 __func__, gfn);
1190 ret = 1;
1221 pte_access &= ~ACC_WRITE_MASK; 1191 pte_access &= ~ACC_WRITE_MASK;
1222 if (is_writeble_pte(spte)) { 1192 if (is_writeble_pte(spte)) {
1223 spte &= ~PT_WRITABLE_MASK; 1193 spte &= ~PT_WRITABLE_MASK;
1224 kvm_x86_ops->tlb_flush(vcpu); 1194 kvm_x86_ops->tlb_flush(vcpu);
1225 } 1195 }
1226 if (write_fault)
1227 *ptwrite = 1;
1228 } 1196 }
1229 } 1197 }
1230 1198
1231 if (pte_access & ACC_WRITE_MASK) 1199 if (pte_access & ACC_WRITE_MASK)
1232 mark_page_dirty(vcpu->kvm, gfn); 1200 mark_page_dirty(vcpu->kvm, gfn);
1233 1201
1234 pgprintk("%s: setting spte %llx\n", __func__, spte);
1235 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1236 (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
1237 (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
1238 set_shadow_pte(shadow_pte, spte); 1202 set_shadow_pte(shadow_pte, spte);
1239 if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK) 1203 return ret;
1240 && (spte & PT_PRESENT_MASK)) 1204}
1205
1206
1207static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1208 unsigned pt_access, unsigned pte_access,
1209 int user_fault, int write_fault, int dirty,
1210 int *ptwrite, int largepage, gfn_t gfn,
1211 pfn_t pfn, bool speculative)
1212{
1213 int was_rmapped = 0;
1214 int was_writeble = is_writeble_pte(*shadow_pte);
1215
1216 pgprintk("%s: spte %llx access %x write_fault %d"
1217 " user_fault %d gfn %lx\n",
1218 __func__, *shadow_pte, pt_access,
1219 write_fault, user_fault, gfn);
1220
1221 if (is_rmap_pte(*shadow_pte)) {
1222 /*
1223 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1224 * the parent of the now unreachable PTE.
1225 */
1226 if (largepage && !is_large_pte(*shadow_pte)) {
1227 struct kvm_mmu_page *child;
1228 u64 pte = *shadow_pte;
1229
1230 child = page_header(pte & PT64_BASE_ADDR_MASK);
1231 mmu_page_remove_parent_pte(child, shadow_pte);
1232 } else if (pfn != spte_to_pfn(*shadow_pte)) {
1233 pgprintk("hfn old %lx new %lx\n",
1234 spte_to_pfn(*shadow_pte), pfn);
1235 rmap_remove(vcpu->kvm, shadow_pte);
1236 } else {
1237 if (largepage)
1238 was_rmapped = is_large_pte(*shadow_pte);
1239 else
1240 was_rmapped = 1;
1241 }
1242 }
1243 if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
1244 dirty, largepage, gfn, pfn, speculative))
1245 if (write_fault)
1246 *ptwrite = 1;
1247
1248 pgprintk("%s: setting spte %llx\n", __func__, *shadow_pte);
1249 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1250 is_large_pte(*shadow_pte)? "2MB" : "4kB",
1251 is_present_pte(*shadow_pte)?"RW":"R", gfn,
1252 *shadow_pte, shadow_pte);
1253 if (!was_rmapped && is_large_pte(*shadow_pte))
1241 ++vcpu->kvm->stat.lpages; 1254 ++vcpu->kvm->stat.lpages;
1242 1255
1243 page_header_update_slot(vcpu->kvm, shadow_pte, gfn); 1256 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);