diff options
| -rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 18 |
1 files changed, 0 insertions, 18 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index a6017132fba8..58a0f1e88596 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
| @@ -455,8 +455,6 @@ out_unlock: | |||
| 455 | static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | 455 | static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) |
| 456 | { | 456 | { |
| 457 | struct kvm_shadow_walk_iterator iterator; | 457 | struct kvm_shadow_walk_iterator iterator; |
| 458 | pt_element_t gpte; | ||
| 459 | gpa_t pte_gpa = -1; | ||
| 460 | int level; | 458 | int level; |
| 461 | u64 *sptep; | 459 | u64 *sptep; |
| 462 | int need_flush = 0; | 460 | int need_flush = 0; |
| @@ -470,10 +468,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | |||
| 470 | if (level == PT_PAGE_TABLE_LEVEL || | 468 | if (level == PT_PAGE_TABLE_LEVEL || |
| 471 | ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || | 469 | ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || |
| 472 | ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { | 470 | ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { |
| 473 | struct kvm_mmu_page *sp = page_header(__pa(sptep)); | ||
| 474 | |||
| 475 | pte_gpa = (sp->gfn << PAGE_SHIFT); | ||
| 476 | pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); | ||
| 477 | 471 | ||
| 478 | if (is_shadow_present_pte(*sptep)) { | 472 | if (is_shadow_present_pte(*sptep)) { |
| 479 | rmap_remove(vcpu->kvm, sptep); | 473 | rmap_remove(vcpu->kvm, sptep); |
| @@ -492,18 +486,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | |||
| 492 | if (need_flush) | 486 | if (need_flush) |
| 493 | kvm_flush_remote_tlbs(vcpu->kvm); | 487 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 494 | spin_unlock(&vcpu->kvm->mmu_lock); | 488 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 495 | |||
| 496 | if (pte_gpa == -1) | ||
| 497 | return; | ||
| 498 | if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, | ||
| 499 | sizeof(pt_element_t))) | ||
| 500 | return; | ||
| 501 | if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) { | ||
| 502 | if (mmu_topup_memory_caches(vcpu)) | ||
| 503 | return; | ||
| 504 | kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte, | ||
| 505 | sizeof(pt_element_t), 0); | ||
| 506 | } | ||
| 507 | } | 489 | } |
| 508 | 490 | ||
| 509 | static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) | 491 | static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) |
