diff options
author | Avi Kivity <avi@redhat.com> | 2010-07-13 07:27:11 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-08-01 23:40:48 -0400 |
commit | 24157aaf833261e68e5a398fa54bd15e4fa1d0b7 (patch) | |
tree | 3c351cc93f55bd7eef4a1069fc019940e8065fb8 /arch/x86/kvm | |
parent | 5991b33237b7fc7dd9f62ae04998c42217d444a7 (diff) |
KVM: MMU: Eliminate redundant temporaries in FNAME(fetch)
'level' and 'sptep' are aliases for 'interator.level' and 'iterator.sptep', no
need for them.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 59 |
1 files changed, 24 insertions, 35 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 8cb85f9c8adb..d9a2742014e3 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -320,12 +320,10 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
320 | { | 320 | { |
321 | unsigned access = gw->pt_access; | 321 | unsigned access = gw->pt_access; |
322 | struct kvm_mmu_page *sp = NULL; | 322 | struct kvm_mmu_page *sp = NULL; |
323 | u64 *sptep = NULL; | ||
324 | int uninitialized_var(level); | ||
325 | bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]); | 323 | bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]); |
326 | int top_level; | 324 | int top_level; |
327 | unsigned direct_access; | 325 | unsigned direct_access; |
328 | struct kvm_shadow_walk_iterator iterator; | 326 | struct kvm_shadow_walk_iterator it; |
329 | 327 | ||
330 | if (!is_present_gpte(gw->ptes[gw->level - 1])) | 328 | if (!is_present_gpte(gw->ptes[gw->level - 1])) |
331 | return NULL; | 329 | return NULL; |
@@ -346,68 +344,59 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
346 | if (FNAME(gpte_changed)(vcpu, gw, top_level)) | 344 | if (FNAME(gpte_changed)(vcpu, gw, top_level)) |
347 | goto out_gpte_changed; | 345 | goto out_gpte_changed; |
348 | 346 | ||
349 | for (shadow_walk_init(&iterator, vcpu, addr); | 347 | for (shadow_walk_init(&it, vcpu, addr); |
350 | shadow_walk_okay(&iterator) && iterator.level > gw->level; | 348 | shadow_walk_okay(&it) && it.level > gw->level; |
351 | shadow_walk_next(&iterator)) { | 349 | shadow_walk_next(&it)) { |
352 | gfn_t table_gfn; | 350 | gfn_t table_gfn; |
353 | 351 | ||
354 | level = iterator.level; | 352 | drop_large_spte(vcpu, it.sptep); |
355 | sptep = iterator.sptep; | ||
356 | |||
357 | drop_large_spte(vcpu, sptep); | ||
358 | 353 | ||
359 | sp = NULL; | 354 | sp = NULL; |
360 | if (!is_shadow_present_pte(*sptep)) { | 355 | if (!is_shadow_present_pte(*it.sptep)) { |
361 | table_gfn = gw->table_gfn[level - 2]; | 356 | table_gfn = gw->table_gfn[it.level - 2]; |
362 | sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, | 357 | sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1, |
363 | false, access, sptep); | 358 | false, access, it.sptep); |
364 | } | 359 | } |
365 | 360 | ||
366 | /* | 361 | /* |
367 | * Verify that the gpte in the page we've just write | 362 | * Verify that the gpte in the page we've just write |
368 | * protected is still there. | 363 | * protected is still there. |
369 | */ | 364 | */ |
370 | if (FNAME(gpte_changed)(vcpu, gw, level - 1)) | 365 | if (FNAME(gpte_changed)(vcpu, gw, it.level - 1)) |
371 | goto out_gpte_changed; | 366 | goto out_gpte_changed; |
372 | 367 | ||
373 | if (sp) | 368 | if (sp) |
374 | link_shadow_page(sptep, sp); | 369 | link_shadow_page(it.sptep, sp); |
375 | } | 370 | } |
376 | 371 | ||
377 | for (; | 372 | for (; |
378 | shadow_walk_okay(&iterator) && iterator.level > hlevel; | 373 | shadow_walk_okay(&it) && it.level > hlevel; |
379 | shadow_walk_next(&iterator)) { | 374 | shadow_walk_next(&it)) { |
380 | gfn_t direct_gfn; | 375 | gfn_t direct_gfn; |
381 | 376 | ||
382 | level = iterator.level; | 377 | validate_direct_spte(vcpu, it.sptep, direct_access); |
383 | sptep = iterator.sptep; | ||
384 | 378 | ||
385 | validate_direct_spte(vcpu, sptep, direct_access); | 379 | drop_large_spte(vcpu, it.sptep); |
386 | 380 | ||
387 | drop_large_spte(vcpu, sptep); | 381 | if (is_shadow_present_pte(*it.sptep)) |
388 | |||
389 | if (is_shadow_present_pte(*sptep)) | ||
390 | continue; | 382 | continue; |
391 | 383 | ||
392 | direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1); | 384 | direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); |
393 | 385 | ||
394 | sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, level-1, | 386 | sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1, |
395 | true, direct_access, sptep); | 387 | true, direct_access, it.sptep); |
396 | link_shadow_page(sptep, sp); | 388 | link_shadow_page(it.sptep, sp); |
397 | } | 389 | } |
398 | 390 | ||
399 | sptep = iterator.sptep; | 391 | mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access, |
400 | level = iterator.level; | 392 | user_fault, write_fault, dirty, ptwrite, it.level, |
401 | |||
402 | mmu_set_spte(vcpu, sptep, access, gw->pte_access & access, | ||
403 | user_fault, write_fault, dirty, ptwrite, level, | ||
404 | gw->gfn, pfn, false, true); | 393 | gw->gfn, pfn, false, true); |
405 | 394 | ||
406 | return sptep; | 395 | return it.sptep; |
407 | 396 | ||
408 | out_gpte_changed: | 397 | out_gpte_changed: |
409 | if (sp) | 398 | if (sp) |
410 | kvm_mmu_put_page(sp, sptep); | 399 | kvm_mmu_put_page(sp, it.sptep); |
411 | kvm_release_pfn_clean(pfn); | 400 | kvm_release_pfn_clean(pfn); |
412 | return NULL; | 401 | return NULL; |
413 | } | 402 | } |