aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-09-23 12:18:32 -0400
committerAvi Kivity <avi@redhat.com>2008-10-15 08:25:18 -0400
commit38187c830cab84daecb41169948467f1f19317e3 (patch)
treee2566ec17957f604db89ae0f580aa94f80bad592 /arch/x86/kvm/mmu.c
parenta378b4e64c0fef2d9e53214db167878b7673a7a3 (diff)
KVM: MMU: do not write-protect large mappings
There is not much point in write protecting large mappings. This can only happen when a page is shadowed during the window between is_largepage_backed and mmu_lock acquision. Zap the entry instead, so the next pagefault will find a shadowed page via is_largepage_backed and fallback to 4k translations. Simplifies out of sync shadow. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 23752ef0839c..731e6fe9cb07 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1180,11 +1180,16 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1180 || (write_fault && !is_write_protection(vcpu) && !user_fault)) { 1180 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1181 struct kvm_mmu_page *shadow; 1181 struct kvm_mmu_page *shadow;
1182 1182
1183 if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) {
1184 ret = 1;
1185 spte = shadow_trap_nonpresent_pte;
1186 goto set_pte;
1187 }
1188
1183 spte |= PT_WRITABLE_MASK; 1189 spte |= PT_WRITABLE_MASK;
1184 1190
1185 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn); 1191 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1186 if (shadow || 1192 if (shadow) {
1187 (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
1188 pgprintk("%s: found shadow page for %lx, marking ro\n", 1193 pgprintk("%s: found shadow page for %lx, marking ro\n",
1189 __func__, gfn); 1194 __func__, gfn);
1190 ret = 1; 1195 ret = 1;
@@ -1197,6 +1202,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1197 if (pte_access & ACC_WRITE_MASK) 1202 if (pte_access & ACC_WRITE_MASK)
1198 mark_page_dirty(vcpu->kvm, gfn); 1203 mark_page_dirty(vcpu->kvm, gfn);
1199 1204
1205set_pte:
1200 set_shadow_pte(shadow_pte, spte); 1206 set_shadow_pte(shadow_pte, spte);
1201 return ret; 1207 return ret;
1202} 1208}