aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-09-23 12:18:37 -0400
committerAvi Kivity <avi@redhat.com>2008-10-15 08:25:23 -0400
commit0738541396be165995c7f2387746eb0b47024fec (patch)
tree4a92962a64b6b08fbb242e74e2d00773fa3543b3 /arch
parentad8cfbe3fffdc09704f0808fde3934855620d545 (diff)
KVM: MMU: awareness of new kvm_mmu_zap_page behaviour
kvm_mmu_zap_page will soon zap the unsynced children of a page. Restart list walk in such case. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b82abee78f17..c9b4b902527b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1078,7 +1078,7 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1078 } 1078 }
1079} 1079}
1080 1080
1081static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) 1081static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1082{ 1082{
1083 ++kvm->stat.mmu_shadow_zapped; 1083 ++kvm->stat.mmu_shadow_zapped;
1084 kvm_mmu_page_unlink_children(kvm, sp); 1084 kvm_mmu_page_unlink_children(kvm, sp);
@@ -1095,6 +1095,7 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1095 kvm_reload_remote_mmus(kvm); 1095 kvm_reload_remote_mmus(kvm);
1096 } 1096 }
1097 kvm_mmu_reset_last_pte_updated(kvm); 1097 kvm_mmu_reset_last_pte_updated(kvm);
1098 return 0;
1098} 1099}
1099 1100
1100/* 1101/*
@@ -1147,8 +1148,9 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1147 if (sp->gfn == gfn && !sp->role.metaphysical) { 1148 if (sp->gfn == gfn && !sp->role.metaphysical) {
1148 pgprintk("%s: gfn %lx role %x\n", __func__, gfn, 1149 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1149 sp->role.word); 1150 sp->role.word);
1150 kvm_mmu_zap_page(kvm, sp);
1151 r = 1; 1151 r = 1;
1152 if (kvm_mmu_zap_page(kvm, sp))
1153 n = bucket->first;
1152 } 1154 }
1153 return r; 1155 return r;
1154} 1156}
@@ -1992,7 +1994,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1992 */ 1994 */
1993 pgprintk("misaligned: gpa %llx bytes %d role %x\n", 1995 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1994 gpa, bytes, sp->role.word); 1996 gpa, bytes, sp->role.word);
1995 kvm_mmu_zap_page(vcpu->kvm, sp); 1997 if (kvm_mmu_zap_page(vcpu->kvm, sp))
1998 n = bucket->first;
1996 ++vcpu->kvm->stat.mmu_flooded; 1999 ++vcpu->kvm->stat.mmu_flooded;
1997 continue; 2000 continue;
1998 } 2001 }
@@ -2226,7 +2229,9 @@ void kvm_mmu_zap_all(struct kvm *kvm)
2226 2229
2227 spin_lock(&kvm->mmu_lock); 2230 spin_lock(&kvm->mmu_lock);
2228 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) 2231 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
2229 kvm_mmu_zap_page(kvm, sp); 2232 if (kvm_mmu_zap_page(kvm, sp))
2233 node = container_of(kvm->arch.active_mmu_pages.next,
2234 struct kvm_mmu_page, link);
2230 spin_unlock(&kvm->mmu_lock); 2235 spin_unlock(&kvm->mmu_lock);
2231 2236
2232 kvm_flush_remote_tlbs(kvm); 2237 kvm_flush_remote_tlbs(kvm);