aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-09-22 04:55:36 -0400
committerAvi Kivity <avi@redhat.com>2011-12-27 04:16:53 -0500
commitd01f8d5e02cc79998e3160f7ad545f77891b00e5 (patch)
tree2378c32a3a6bae9b18a64a640e5e917eefb42385 /arch/x86
parent6f6fbe98c3a9f3e9d69cd354a0459989e594e707 (diff)
KVM: MMU: do not mark accessed bit on pte write path
In current code, the accessed bit is always set when page fault occurred, do not need to set it on pte write path Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/mmu.c22
2 files changed, 1 insertions, 22 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4ceefa9567ed..f8ab0d760231 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -356,7 +356,6 @@ struct kvm_vcpu_arch {
356 gfn_t last_pt_write_gfn; 356 gfn_t last_pt_write_gfn;
357 int last_pt_write_count; 357 int last_pt_write_count;
358 u64 *last_pte_updated; 358 u64 *last_pte_updated;
359 gfn_t last_pte_gfn;
360 359
361 struct fpu guest_fpu; 360 struct fpu guest_fpu;
362 u64 xcr0; 361 u64 xcr0;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7a22eb81b4ca..b432a71a1839 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2207,11 +2207,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2207 if (set_mmio_spte(sptep, gfn, pfn, pte_access)) 2207 if (set_mmio_spte(sptep, gfn, pfn, pte_access))
2208 return 0; 2208 return 0;
2209 2209
2210 /*
2211 * We don't set the accessed bit, since we sometimes want to see
2212 * whether the guest actually used the pte (in order to detect
2213 * demand paging).
2214 */
2215 spte = PT_PRESENT_MASK; 2210 spte = PT_PRESENT_MASK;
2216 if (!speculative) 2211 if (!speculative)
2217 spte |= shadow_accessed_mask; 2212 spte |= shadow_accessed_mask;
@@ -2362,10 +2357,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2362 } 2357 }
2363 } 2358 }
2364 kvm_release_pfn_clean(pfn); 2359 kvm_release_pfn_clean(pfn);
2365 if (speculative) { 2360 if (speculative)
2366 vcpu->arch.last_pte_updated = sptep; 2361 vcpu->arch.last_pte_updated = sptep;
2367 vcpu->arch.last_pte_gfn = gfn;
2368 }
2369} 2362}
2370 2363
2371static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) 2364static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -3533,18 +3526,6 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
3533 return !!(spte && (*spte & shadow_accessed_mask)); 3526 return !!(spte && (*spte & shadow_accessed_mask));
3534} 3527}
3535 3528
3536static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
3537{
3538 u64 *spte = vcpu->arch.last_pte_updated;
3539
3540 if (spte
3541 && vcpu->arch.last_pte_gfn == gfn
3542 && shadow_accessed_mask
3543 && !(*spte & shadow_accessed_mask)
3544 && is_shadow_present_pte(*spte))
3545 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
3546}
3547
3548void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 3529void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3549 const u8 *new, int bytes, 3530 const u8 *new, int bytes,
3550 bool guest_initiated) 3531 bool guest_initiated)
@@ -3615,7 +3596,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3615 ++vcpu->kvm->stat.mmu_pte_write; 3596 ++vcpu->kvm->stat.mmu_pte_write;
3616 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); 3597 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
3617 if (guest_initiated) { 3598 if (guest_initiated) {
3618 kvm_mmu_access_page(vcpu, gfn);
3619 if (gfn == vcpu->arch.last_pt_write_gfn 3599 if (gfn == vcpu->arch.last_pt_write_gfn
3620 && !last_updated_pte_accessed(vcpu)) { 3600 && !last_updated_pte_accessed(vcpu)) {
3621 ++vcpu->arch.last_pt_write_count; 3601 ++vcpu->arch.last_pt_write_count;