aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-07-15 23:25:17 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 23:40:58 -0400
commit9ed5520dd3c9cb79c25f95fce9c57b87637d0fb7 (patch)
tree32a983f23b3aa4dba5dea64a57f1b96126afe92a /arch/x86/kvm/mmu.c
parentdaa3db693ce925a14b7e17ab6f306dc0e6a5342c (diff)
KVM: MMU: fix page dirty tracking lost while sync page
In sync-page path, if spte.writable is changed, it will lose page dirty tracking, for example: assume spte.writable = 0 in a unsync-page, when it's synced, it map spte to writable(that is spte.writable = 1), later guest write spte.gfn, it means spte.gfn is dirty, then guest changed this mapping to read-only, after it's synced, spte.writable = 0 So, when host release the spte, it detect spte.writable = 0 and not mark page dirty Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9b3b916ebea..a04756a26fe 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1985,6 +1985,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1985 mark_page_dirty(vcpu->kvm, gfn); 1985 mark_page_dirty(vcpu->kvm, gfn);
1986 1986
1987set_pte: 1987set_pte:
1988 if (is_writable_pte(*sptep) && !is_writable_pte(spte))
1989 kvm_set_pfn_dirty(pfn);
1988 update_spte(sptep, spte); 1990 update_spte(sptep, spte);
1989done: 1991done:
1990 return ret; 1992 return ret;
@@ -1998,7 +2000,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1998 bool reset_host_protection) 2000 bool reset_host_protection)
1999{ 2001{
2000 int was_rmapped = 0; 2002 int was_rmapped = 0;
2001 int was_writable = is_writable_pte(*sptep);
2002 int rmap_count; 2003 int rmap_count;
2003 2004
2004 pgprintk("%s: spte %llx access %x write_fault %d" 2005 pgprintk("%s: spte %llx access %x write_fault %d"
@@ -2048,15 +2049,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2048 page_header_update_slot(vcpu->kvm, sptep, gfn); 2049 page_header_update_slot(vcpu->kvm, sptep, gfn);
2049 if (!was_rmapped) { 2050 if (!was_rmapped) {
2050 rmap_count = rmap_add(vcpu, sptep, gfn); 2051 rmap_count = rmap_add(vcpu, sptep, gfn);
2051 kvm_release_pfn_clean(pfn);
2052 if (rmap_count > RMAP_RECYCLE_THRESHOLD) 2052 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2053 rmap_recycle(vcpu, sptep, gfn); 2053 rmap_recycle(vcpu, sptep, gfn);
2054 } else {
2055 if (was_writable)
2056 kvm_release_pfn_dirty(pfn);
2057 else
2058 kvm_release_pfn_clean(pfn);
2059 } 2054 }
2055 kvm_release_pfn_clean(pfn);
2060 if (speculative) { 2056 if (speculative) {
2061 vcpu->arch.last_pte_updated = sptep; 2057 vcpu->arch.last_pte_updated = sptep;
2062 vcpu->arch.last_pte_gfn = gfn; 2058 vcpu->arch.last_pte_gfn = gfn;