aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-07-13 07:27:09 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 23:40:45 -0400
commit0b3c933302262d83018dd5f69656bca9f28a0cd3 (patch)
tree809d6c97623ee87baa195ded38fe98a0a3df8638 /arch
parent39c8c672a18c52048343d7531dfb2dcf3431ee74 (diff)
KVM: MMU: Simplify spte fetch() function
Partition the function into three sections: - fetching indirect shadow pages (host_level > guest_level) - fetching direct shadow pages (page_level < host_level <= guest_level) - the final spte (page_level == host_level) Instead of the current spaghetti. A slight change from the original code is that we call validate_direct_spte() more often: previously we called it only for gw->level, now we also call it for lower levels. The change should have no effect. [xiao: fix regression caused by validate_direct_spte() called too late] Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/paging_tmpl.h93
1 files changed, 49 insertions, 44 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index e1c1f9eb1cc1..368e4cb6233b 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -321,9 +321,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
321 unsigned access = gw->pt_access; 321 unsigned access = gw->pt_access;
322 struct kvm_mmu_page *sp; 322 struct kvm_mmu_page *sp;
323 u64 *sptep = NULL; 323 u64 *sptep = NULL;
324 int direct; 324 int uninitialized_var(level);
325 gfn_t table_gfn;
326 int level;
327 bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]); 325 bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
328 unsigned direct_access; 326 unsigned direct_access;
329 struct kvm_shadow_walk_iterator iterator; 327 struct kvm_shadow_walk_iterator iterator;
@@ -335,61 +333,68 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
335 if (!dirty) 333 if (!dirty)
336 direct_access &= ~ACC_WRITE_MASK; 334 direct_access &= ~ACC_WRITE_MASK;
337 335
338 for_each_shadow_entry(vcpu, addr, iterator) { 336 for (shadow_walk_init(&iterator, vcpu, addr);
337 shadow_walk_okay(&iterator) && iterator.level > gw->level;
338 shadow_walk_next(&iterator)) {
339 gfn_t table_gfn;
340
339 level = iterator.level; 341 level = iterator.level;
340 sptep = iterator.sptep; 342 sptep = iterator.sptep;
341 if (iterator.level == hlevel) {
342 mmu_set_spte(vcpu, sptep, access,
343 gw->pte_access & access,
344 user_fault, write_fault,
345 dirty, ptwrite, level,
346 gw->gfn, pfn, false, true);
347 break;
348 }
349
350 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)
351 && level == gw->level)
352 validate_direct_spte(vcpu, sptep, direct_access);
353 343
354 drop_large_spte(vcpu, sptep); 344 drop_large_spte(vcpu, sptep);
355 345
356 if (is_shadow_present_pte(*sptep)) 346 if (is_shadow_present_pte(*sptep))
357 continue; 347 continue;
358 348
359 if (level <= gw->level) { 349 table_gfn = gw->table_gfn[level - 2];
360 direct = 1;
361 access = direct_access;
362
363 /*
364 * It is a large guest pages backed by small host pages,
365 * So we set @direct(@sp->role.direct)=1, and set
366 * @table_gfn(@sp->gfn)=the base page frame for linear
367 * translations.
368 */
369 table_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
370 access &= gw->pte_access;
371 } else {
372 direct = 0;
373 table_gfn = gw->table_gfn[level - 2];
374 }
375 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, 350 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
376 direct, access, sptep); 351 false, access, sptep);
377 if (!direct) 352
378 /* 353 /*
379 * Verify that the gpte in the page we've just write 354 * Verify that the gpte in the page we've just write
380 * protected is still there. 355 * protected is still there.
381 */ 356 */
382 if (FNAME(gpte_changed)(vcpu, gw, level - 1)) { 357 if (FNAME(gpte_changed)(vcpu, gw, level - 1))
383 kvm_mmu_put_page(sp, sptep); 358 goto out_gpte_changed;
384 kvm_release_pfn_clean(pfn); 359
385 sptep = NULL; 360 link_shadow_page(sptep, sp);
386 break; 361 }
387 } 362
363 for (;
364 shadow_walk_okay(&iterator) && iterator.level > hlevel;
365 shadow_walk_next(&iterator)) {
366 gfn_t direct_gfn;
388 367
368 level = iterator.level;
369 sptep = iterator.sptep;
370
371 validate_direct_spte(vcpu, sptep, direct_access);
372
373 drop_large_spte(vcpu, sptep);
374
375 if (is_shadow_present_pte(*sptep))
376 continue;
377
378 direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
379
380 sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, level-1,
381 true, direct_access, sptep);
389 link_shadow_page(sptep, sp); 382 link_shadow_page(sptep, sp);
390 } 383 }
391 384
385 sptep = iterator.sptep;
386 level = iterator.level;
387
388 mmu_set_spte(vcpu, sptep, access, gw->pte_access & access,
389 user_fault, write_fault, dirty, ptwrite, level,
390 gw->gfn, pfn, false, true);
391
392 return sptep; 392 return sptep;
393
394out_gpte_changed:
395 kvm_mmu_put_page(sp, sptep);
396 kvm_release_pfn_clean(pfn);
397 return NULL;
393} 398}
394 399
395/* 400/*