aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2014-11-10 03:33:56 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2014-11-26 08:40:45 -0500
commitd3fccc7ef831d1d829b4da5eaa081db55b1e38f3 (patch)
treeb6c81a616eb4a7a43a6fa4641c6554f4d25955b2 /arch/x86/kvm/mmu.c
parentbb55e9b131d70ab9e30d73ab1342ad4907f9e0de (diff)
kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn()
This reverts commit 85c8555ff0 ("KVM: check for !is_zero_pfn() in kvm_is_mmio_pfn()") and renames the function to kvm_is_reserved_pfn. The problem being addressed by the patch above was that some ARM code based the memory mapping attributes of a pfn on the return value of kvm_is_mmio_pfn(), whose name indeed suggests that such pfns should be mapped as device memory. However, kvm_is_mmio_pfn() doesn't do quite what it says on the tin, and the existing non-ARM users were already using it in a way which suggests that its name should probably have been 'kvm_is_reserved_pfn' from the beginning, e.g., whether or not to call get_page/put_page on it etc. This means that returning false for the zero page is a mistake and the patch above should be reverted. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ac1c4de3a484..978f402006ee 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
630 * kvm mmu, before reclaiming the page, we should 630 * kvm mmu, before reclaiming the page, we should
631 * unmap it from mmu first. 631 * unmap it from mmu first.
632 */ 632 */
633 WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn))); 633 WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
634 634
635 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 635 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
636 kvm_set_pfn_accessed(pfn); 636 kvm_set_pfn_accessed(pfn);
@@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2461 spte |= PT_PAGE_SIZE_MASK; 2461 spte |= PT_PAGE_SIZE_MASK;
2462 if (tdp_enabled) 2462 if (tdp_enabled)
2463 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, 2463 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
2464 kvm_is_mmio_pfn(pfn)); 2464 kvm_is_reserved_pfn(pfn));
2465 2465
2466 if (host_writable) 2466 if (host_writable)
2467 spte |= SPTE_HOST_WRITEABLE; 2467 spte |= SPTE_HOST_WRITEABLE;
@@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2737 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done 2737 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
2738 * here. 2738 * here.
2739 */ 2739 */
2740 if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) && 2740 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
2741 level == PT_PAGE_TABLE_LEVEL && 2741 level == PT_PAGE_TABLE_LEVEL &&
2742 PageTransCompound(pfn_to_page(pfn)) && 2742 PageTransCompound(pfn_to_page(pfn)) &&
2743 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { 2743 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {