aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-07-13 07:27:10 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 23:40:47 -0400
commit5991b33237b7fc7dd9f62ae04998c42217d444a7 (patch)
treedadadd3c7329e073f64af5e13260b4d732d35cb3 /arch/x86/kvm
parent0b3c933302262d83018dd5f69656bca9f28a0cd3 (diff)
KVM: MMU: Validate all gptes during fetch, not just those used for new pages
Currently, when we fetch an spte, we only verify that gptes match those that the walker saw if we build new shadow pages for them. However, this misses the following race: vcpu1 vcpu2 walk change gpte walk instantiate sp fetch existing sp Fix by validating every gpte, regardless of whether it is used for building a new sp or not. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/paging_tmpl.h33
1 files changed, 24 insertions, 9 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 368e4cb6233b..8cb85f9c8adb 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -319,10 +319,11 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
319 int *ptwrite, pfn_t pfn) 319 int *ptwrite, pfn_t pfn)
320{ 320{
321 unsigned access = gw->pt_access; 321 unsigned access = gw->pt_access;
322 struct kvm_mmu_page *sp; 322 struct kvm_mmu_page *sp = NULL;
323 u64 *sptep = NULL; 323 u64 *sptep = NULL;
324 int uninitialized_var(level); 324 int uninitialized_var(level);
325 bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]); 325 bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
326 int top_level;
326 unsigned direct_access; 327 unsigned direct_access;
327 struct kvm_shadow_walk_iterator iterator; 328 struct kvm_shadow_walk_iterator iterator;
328 329
@@ -333,6 +334,18 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
333 if (!dirty) 334 if (!dirty)
334 direct_access &= ~ACC_WRITE_MASK; 335 direct_access &= ~ACC_WRITE_MASK;
335 336
337 top_level = vcpu->arch.mmu.root_level;
338 if (top_level == PT32E_ROOT_LEVEL)
339 top_level = PT32_ROOT_LEVEL;
340 /*
341 * Verify that the top-level gpte is still there. Since the page
342 * is a root page, it is either write protected (and cannot be
343 * changed from now on) or it is invalid (in which case, we don't
344 * really care if it changes underneath us after this point).
345 */
346 if (FNAME(gpte_changed)(vcpu, gw, top_level))
347 goto out_gpte_changed;
348
336 for (shadow_walk_init(&iterator, vcpu, addr); 349 for (shadow_walk_init(&iterator, vcpu, addr);
337 shadow_walk_okay(&iterator) && iterator.level > gw->level; 350 shadow_walk_okay(&iterator) && iterator.level > gw->level;
338 shadow_walk_next(&iterator)) { 351 shadow_walk_next(&iterator)) {
@@ -343,12 +356,12 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
343 356
344 drop_large_spte(vcpu, sptep); 357 drop_large_spte(vcpu, sptep);
345 358
346 if (is_shadow_present_pte(*sptep)) 359 sp = NULL;
347 continue; 360 if (!is_shadow_present_pte(*sptep)) {
348 361 table_gfn = gw->table_gfn[level - 2];
349 table_gfn = gw->table_gfn[level - 2]; 362 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
350 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, 363 false, access, sptep);
351 false, access, sptep); 364 }
352 365
353 /* 366 /*
354 * Verify that the gpte in the page we've just write 367 * Verify that the gpte in the page we've just write
@@ -357,7 +370,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
357 if (FNAME(gpte_changed)(vcpu, gw, level - 1)) 370 if (FNAME(gpte_changed)(vcpu, gw, level - 1))
358 goto out_gpte_changed; 371 goto out_gpte_changed;
359 372
360 link_shadow_page(sptep, sp); 373 if (sp)
374 link_shadow_page(sptep, sp);
361 } 375 }
362 376
363 for (; 377 for (;
@@ -392,7 +406,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
392 return sptep; 406 return sptep;
393 407
394out_gpte_changed: 408out_gpte_changed:
395 kvm_mmu_put_page(sp, sptep); 409 if (sp)
410 kvm_mmu_put_page(sp, sptep);
396 kvm_release_pfn_clean(pfn); 411 kvm_release_pfn_clean(pfn);
397 return NULL; 412 return NULL;
398} 413}