aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-07-11 15:24:39 -0400
committerAvi Kivity <avi@redhat.com>2011-07-24 04:50:27 -0400
commit640d9b0dbe9f744ac8fd517a8f6afe238f8f525b (patch)
treeff4543527ee35018f26d686e5c46a4c100dda0ef /arch/x86/kvm/mmu.c
parentbebb106a5afa32efdf5332ed4a40bf4d6d06b56e (diff)
KVM: MMU: optimize to handle dirty bit
If dirty bit is not set, we can make the pte access read-only to avoid handing dirty bit everywhere Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d1986b7dcec7..98812c25727b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1923,7 +1923,7 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1923 1923
1924static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, 1924static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1925 unsigned pte_access, int user_fault, 1925 unsigned pte_access, int user_fault,
1926 int write_fault, int dirty, int level, 1926 int write_fault, int level,
1927 gfn_t gfn, pfn_t pfn, bool speculative, 1927 gfn_t gfn, pfn_t pfn, bool speculative,
1928 bool can_unsync, bool host_writable) 1928 bool can_unsync, bool host_writable)
1929{ 1929{
@@ -1938,8 +1938,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1938 spte = PT_PRESENT_MASK; 1938 spte = PT_PRESENT_MASK;
1939 if (!speculative) 1939 if (!speculative)
1940 spte |= shadow_accessed_mask; 1940 spte |= shadow_accessed_mask;
1941 if (!dirty) 1941
1942 pte_access &= ~ACC_WRITE_MASK;
1943 if (pte_access & ACC_EXEC_MASK) 1942 if (pte_access & ACC_EXEC_MASK)
1944 spte |= shadow_x_mask; 1943 spte |= shadow_x_mask;
1945 else 1944 else
@@ -2023,7 +2022,7 @@ done:
2023 2022
2024static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, 2023static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2025 unsigned pt_access, unsigned pte_access, 2024 unsigned pt_access, unsigned pte_access,
2026 int user_fault, int write_fault, int dirty, 2025 int user_fault, int write_fault,
2027 int *ptwrite, int level, gfn_t gfn, 2026 int *ptwrite, int level, gfn_t gfn,
2028 pfn_t pfn, bool speculative, 2027 pfn_t pfn, bool speculative,
2029 bool host_writable) 2028 bool host_writable)
@@ -2059,7 +2058,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2059 } 2058 }
2060 2059
2061 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, 2060 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
2062 dirty, level, gfn, pfn, speculative, true, 2061 level, gfn, pfn, speculative, true,
2063 host_writable)) { 2062 host_writable)) {
2064 if (write_fault) 2063 if (write_fault)
2065 *ptwrite = 1; 2064 *ptwrite = 1;
@@ -2129,7 +2128,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2129 2128
2130 for (i = 0; i < ret; i++, gfn++, start++) 2129 for (i = 0; i < ret; i++, gfn++, start++)
2131 mmu_set_spte(vcpu, start, ACC_ALL, 2130 mmu_set_spte(vcpu, start, ACC_ALL,
2132 access, 0, 0, 1, NULL, 2131 access, 0, 0, NULL,
2133 sp->role.level, gfn, 2132 sp->role.level, gfn,
2134 page_to_pfn(pages[i]), true, true); 2133 page_to_pfn(pages[i]), true, true);
2135 2134
@@ -2193,7 +2192,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2193 unsigned pte_access = ACC_ALL; 2192 unsigned pte_access = ACC_ALL;
2194 2193
2195 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access, 2194 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
2196 0, write, 1, &pt_write, 2195 0, write, &pt_write,
2197 level, gfn, pfn, prefault, map_writable); 2196 level, gfn, pfn, prefault, map_writable);
2198 direct_pte_prefetch(vcpu, iterator.sptep); 2197 direct_pte_prefetch(vcpu, iterator.sptep);
2199 ++vcpu->stat.pf_fixed; 2198 ++vcpu->stat.pf_fixed;