aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2008-02-26 15:12:10 -0500
committerAvi Kivity <avi@qumranet.com>2008-03-04 08:19:49 -0500
commitf7d9c7b7b902f9f532738d47593d9679b0b182d9 (patch)
tree61af54605ead13b71f664a0ce4776720d10a3ef1 /arch/x86/kvm/paging_tmpl.h
parent8c35f237fb5664d30aa90448c3d6cea0cbb43f35 (diff)
KVM: MMU: Fix race when instantiating a shadow pte
For improved concurrency, the guest walk is performed concurrently with other vcpus. This means that we need to revalidate the guest ptes once we have write-protected the guest page tables, at which point they can no longer be modified. The current code attempts to avoid this check if the shadow page table is not new, on the assumption that if it has existed before, the guest could not have modified the pte without the shadow lock. However the assumption is incorrect, as the racing vcpu could have modified the pte, then instantiated the shadow page, before our vcpu regains control: vcpu0 vcpu1 fault walk pte modify pte fault in same pagetable instantiate shadow page lookup shadow page conclude it is old instantiate spte based on stale guest pte We could do something clever with generation counters, but a test run by Marcelo suggests this is unnecessary and we can just do the revalidation unconditionally. The pte will be in the processor cache and the check can be quite fast. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h5
1 files changed, 2 insertions, 3 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 5588fa594b61..ecc0856268c4 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -300,7 +300,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
300 u64 shadow_pte; 300 u64 shadow_pte;
301 int metaphysical; 301 int metaphysical;
302 gfn_t table_gfn; 302 gfn_t table_gfn;
303 bool new_page = 0;
304 303
305 shadow_ent = ((u64 *)__va(shadow_addr)) + index; 304 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
306 if (level == PT_PAGE_TABLE_LEVEL) 305 if (level == PT_PAGE_TABLE_LEVEL)
@@ -322,8 +321,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
322 } 321 }
323 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, 322 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
324 metaphysical, access, 323 metaphysical, access,
325 shadow_ent, &new_page); 324 shadow_ent);
326 if (new_page && !metaphysical) { 325 if (!metaphysical) {
327 int r; 326 int r;
328 pt_element_t curr_pte; 327 pt_element_t curr_pte;
329 r = kvm_read_guest_atomic(vcpu->kvm, 328 r = kvm_read_guest_atomic(vcpu->kvm,