diff options
author | Avi Kivity <avi@qumranet.com> | 2008-03-18 05:05:52 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-04-27 05:00:33 -0400 |
commit | 947da53830690cbd77d7f2b625d0df1f161ffd54 (patch) | |
tree | 32db5b7e97b5df34ada31ac9de8f3250b90dfd55 /arch/x86/kvm/mmu.c | |
parent | 97646202bc3f190dfcb48a3d506ea2445717d392 (diff) |
KVM: MMU: Set the accessed bit on non-speculative shadow ptes
If we populate a shadow pte due to a fault (and not speculatively due to a
pte write) then we can set the accessed bit on it, as we know it will be
set immediately on the next guest instruction. This saves a read-modify-write
operation.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 072e9422c914..a5872b3c466d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1020,7 +1020,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1020 | unsigned pt_access, unsigned pte_access, | 1020 | unsigned pt_access, unsigned pte_access, |
1021 | int user_fault, int write_fault, int dirty, | 1021 | int user_fault, int write_fault, int dirty, |
1022 | int *ptwrite, int largepage, gfn_t gfn, | 1022 | int *ptwrite, int largepage, gfn_t gfn, |
1023 | struct page *page) | 1023 | struct page *page, bool speculative) |
1024 | { | 1024 | { |
1025 | u64 spte; | 1025 | u64 spte; |
1026 | int was_rmapped = 0; | 1026 | int was_rmapped = 0; |
@@ -1061,6 +1061,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1061 | * demand paging). | 1061 | * demand paging). |
1062 | */ | 1062 | */ |
1063 | spte = PT_PRESENT_MASK | PT_DIRTY_MASK; | 1063 | spte = PT_PRESENT_MASK | PT_DIRTY_MASK; |
1064 | if (!speculative) | ||
1065 | pte_access |= PT_ACCESSED_MASK; | ||
1064 | if (!dirty) | 1066 | if (!dirty) |
1065 | pte_access &= ~ACC_WRITE_MASK; | 1067 | pte_access &= ~ACC_WRITE_MASK; |
1066 | if (!(pte_access & ACC_EXEC_MASK)) | 1068 | if (!(pte_access & ACC_EXEC_MASK)) |
@@ -1148,13 +1150,13 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, | |||
1148 | 1150 | ||
1149 | if (level == 1) { | 1151 | if (level == 1) { |
1150 | mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL, | 1152 | mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL, |
1151 | 0, write, 1, &pt_write, 0, gfn, page); | 1153 | 0, write, 1, &pt_write, 0, gfn, page, false); |
1152 | return pt_write; | 1154 | return pt_write; |
1153 | } | 1155 | } |
1154 | 1156 | ||
1155 | if (largepage && level == 2) { | 1157 | if (largepage && level == 2) { |
1156 | mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL, | 1158 | mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL, |
1157 | 0, write, 1, &pt_write, 1, gfn, page); | 1159 | 0, write, 1, &pt_write, 1, gfn, page, false); |
1158 | return pt_write; | 1160 | return pt_write; |
1159 | } | 1161 | } |
1160 | 1162 | ||