aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c20
1 files changed, 7 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 146f295ee322..d43867c33bc4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4481,9 +4481,11 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
4481 pfn = spte_to_pfn(*sptep); 4481 pfn = spte_to_pfn(*sptep);
4482 4482
4483 /* 4483 /*
4484 * Only EPT supported for now; otherwise, one would need to 4484 * We cannot do huge page mapping for indirect shadow pages,
4485 * find out efficiently whether the guest page tables are 4485 * which are found on the last rmap (level = 1) when not using
4486 * also using huge pages. 4486 * tdp; such shadow pages are synced with the page table in
4487 * the guest, and the guest page table is using 4K page size
4488 * mapping if the indirect sp has level = 1.
4487 */ 4489 */
4488 if (sp->role.direct && 4490 if (sp->role.direct &&
4489 !kvm_is_reserved_pfn(pfn) && 4491 !kvm_is_reserved_pfn(pfn) &&
@@ -4504,19 +4506,12 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
4504 bool flush = false; 4506 bool flush = false;
4505 unsigned long *rmapp; 4507 unsigned long *rmapp;
4506 unsigned long last_index, index; 4508 unsigned long last_index, index;
4507 gfn_t gfn_start, gfn_end;
4508 4509
4509 spin_lock(&kvm->mmu_lock); 4510 spin_lock(&kvm->mmu_lock);
4510 4511
4511 gfn_start = memslot->base_gfn;
4512 gfn_end = memslot->base_gfn + memslot->npages - 1;
4513
4514 if (gfn_start >= gfn_end)
4515 goto out;
4516
4517 rmapp = memslot->arch.rmap[0]; 4512 rmapp = memslot->arch.rmap[0];
4518 last_index = gfn_to_index(gfn_end, memslot->base_gfn, 4513 last_index = gfn_to_index(memslot->base_gfn + memslot->npages - 1,
4519 PT_PAGE_TABLE_LEVEL); 4514 memslot->base_gfn, PT_PAGE_TABLE_LEVEL);
4520 4515
4521 for (index = 0; index <= last_index; ++index, ++rmapp) { 4516 for (index = 0; index <= last_index; ++index, ++rmapp) {
4522 if (*rmapp) 4517 if (*rmapp)
@@ -4534,7 +4529,6 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
4534 if (flush) 4529 if (flush)
4535 kvm_flush_remote_tlbs(kvm); 4530 kvm_flush_remote_tlbs(kvm);
4536 4531
4537out:
4538 spin_unlock(&kvm->mmu_lock); 4532 spin_unlock(&kvm->mmu_lock);
4539} 4533}
4540 4534