aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2008-03-18 05:05:52 -0400
committerAvi Kivity <avi@qumranet.com>2008-04-27 05:00:33 -0400
commit947da53830690cbd77d7f2b625d0df1f161ffd54 (patch)
tree32db5b7e97b5df34ada31ac9de8f3250b90dfd55 /arch/x86/kvm
parent97646202bc3f190dfcb48a3d506ea2445717d392 (diff)
KVM: MMU: Set the accessed bit on non-speculative shadow ptes
If we populate a shadow pte due to a fault (and not speculatively due to a pte write) then we can set the accessed bit on it, as we know it will be set immediately on the next guest instruction. This saves a read-modify-write operation. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c8
-rw-r--r--arch/x86/kvm/paging_tmpl.h4
2 files changed, 7 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 072e9422c914..a5872b3c466d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1020,7 +1020,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1020 unsigned pt_access, unsigned pte_access, 1020 unsigned pt_access, unsigned pte_access,
1021 int user_fault, int write_fault, int dirty, 1021 int user_fault, int write_fault, int dirty,
1022 int *ptwrite, int largepage, gfn_t gfn, 1022 int *ptwrite, int largepage, gfn_t gfn,
1023 struct page *page) 1023 struct page *page, bool speculative)
1024{ 1024{
1025 u64 spte; 1025 u64 spte;
1026 int was_rmapped = 0; 1026 int was_rmapped = 0;
@@ -1061,6 +1061,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1061 * demand paging). 1061 * demand paging).
1062 */ 1062 */
1063 spte = PT_PRESENT_MASK | PT_DIRTY_MASK; 1063 spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
1064 if (!speculative)
1065 pte_access |= PT_ACCESSED_MASK;
1064 if (!dirty) 1066 if (!dirty)
1065 pte_access &= ~ACC_WRITE_MASK; 1067 pte_access &= ~ACC_WRITE_MASK;
1066 if (!(pte_access & ACC_EXEC_MASK)) 1068 if (!(pte_access & ACC_EXEC_MASK))
@@ -1148,13 +1150,13 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1148 1150
1149 if (level == 1) { 1151 if (level == 1) {
1150 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL, 1152 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
1151 0, write, 1, &pt_write, 0, gfn, page); 1153 0, write, 1, &pt_write, 0, gfn, page, false);
1152 return pt_write; 1154 return pt_write;
1153 } 1155 }
1154 1156
1155 if (largepage && level == 2) { 1157 if (largepage && level == 2) {
1156 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL, 1158 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
1157 0, write, 1, &pt_write, 1, gfn, page); 1159 0, write, 1, &pt_write, 1, gfn, page, false);
1158 return pt_write; 1160 return pt_write;
1159 } 1161 }
1160 1162
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 57abbd091143..e9ae5dba724e 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -266,7 +266,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
266 get_page(npage); 266 get_page(npage);
267 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, 267 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
268 gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte), 268 gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
269 npage); 269 npage, true);
270} 270}
271 271
272/* 272/*
@@ -349,7 +349,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
349 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access, 349 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
350 user_fault, write_fault, 350 user_fault, write_fault,
351 walker->ptes[walker->level-1] & PT_DIRTY_MASK, 351 walker->ptes[walker->level-1] & PT_DIRTY_MASK,
352 ptwrite, largepage, walker->gfn, page); 352 ptwrite, largepage, walker->gfn, page, false);
353 353
354 return shadow_ent; 354 return shadow_ent;
355} 355}