aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2014-11-10 03:33:56 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2014-11-25 08:57:26 -0500
commitbf4bea8e9a9058319a19f8c2710a6f0ef2459983 (patch)
tree192e21f7d1509b7ea3303ac64bc21f9123bffcd4
parent07a9748c78cfc39b54f06125a216b67b9c8f09ed (diff)
kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn()
This reverts commit 85c8555ff0 ("KVM: check for !is_zero_pfn() in kvm_is_mmio_pfn()") and renames the function to kvm_is_reserved_pfn. The problem being addressed by the patch above was that some ARM code based the memory mapping attributes of a pfn on the return value of kvm_is_mmio_pfn(), whose name indeed suggests that such pfns should be mapped as device memory. However, kvm_is_mmio_pfn() doesn't do quite what it says on the tin, and the existing non-ARM users were already using it in a way which suggests that its name should probably have been 'kvm_is_reserved_pfn' from the beginning, e.g., whether or not to call get_page/put_page on it etc. This means that returning false for the zero page is a mistake and the patch above should be reverted. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r--arch/ia64/kvm/kvm-ia64.c2
-rw-r--r--arch/x86/kvm/mmu.c6
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--virt/kvm/kvm_main.c16
4 files changed, 13 insertions, 13 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index ec6b9acb6bea..dbe46f43884d 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1563 1563
1564 for (i = 0; i < npages; i++) { 1564 for (i = 0; i < npages; i++) {
1565 pfn = gfn_to_pfn(kvm, base_gfn + i); 1565 pfn = gfn_to_pfn(kvm, base_gfn + i);
1566 if (!kvm_is_mmio_pfn(pfn)) { 1566 if (!kvm_is_reserved_pfn(pfn)) {
1567 kvm_set_pmt_entry(kvm, base_gfn + i, 1567 kvm_set_pmt_entry(kvm, base_gfn + i,
1568 pfn << PAGE_SHIFT, 1568 pfn << PAGE_SHIFT,
1569 _PAGE_AR_RWX | _PAGE_MA_WB); 1569 _PAGE_AR_RWX | _PAGE_MA_WB);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ac1c4de3a484..978f402006ee 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
630 * kvm mmu, before reclaiming the page, we should 630 * kvm mmu, before reclaiming the page, we should
631 * unmap it from mmu first. 631 * unmap it from mmu first.
632 */ 632 */
633 WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn))); 633 WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
634 634
635 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 635 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
636 kvm_set_pfn_accessed(pfn); 636 kvm_set_pfn_accessed(pfn);
@@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2461 spte |= PT_PAGE_SIZE_MASK; 2461 spte |= PT_PAGE_SIZE_MASK;
2462 if (tdp_enabled) 2462 if (tdp_enabled)
2463 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, 2463 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
2464 kvm_is_mmio_pfn(pfn)); 2464 kvm_is_reserved_pfn(pfn));
2465 2465
2466 if (host_writable) 2466 if (host_writable)
2467 spte |= SPTE_HOST_WRITEABLE; 2467 spte |= SPTE_HOST_WRITEABLE;
@@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2737 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done 2737 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
2738 * here. 2738 * here.
2739 */ 2739 */
2740 if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) && 2740 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
2741 level == PT_PAGE_TABLE_LEVEL && 2741 level == PT_PAGE_TABLE_LEVEL &&
2742 PageTransCompound(pfn_to_page(pfn)) && 2742 PageTransCompound(pfn_to_page(pfn)) &&
2743 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { 2743 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ea53b04993f2..a6059bdf7b03 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
703int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 703int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
704void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 704void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
705 705
706bool kvm_is_mmio_pfn(pfn_t pfn); 706bool kvm_is_reserved_pfn(pfn_t pfn);
707 707
708struct kvm_irq_ack_notifier { 708struct kvm_irq_ack_notifier {
709 struct hlist_node link; 709 struct hlist_node link;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 25ffac9e947d..3cee7b167052 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
107 107
108static bool largepages_enabled = true; 108static bool largepages_enabled = true;
109 109
110bool kvm_is_mmio_pfn(pfn_t pfn) 110bool kvm_is_reserved_pfn(pfn_t pfn)
111{ 111{
112 if (pfn_valid(pfn)) 112 if (pfn_valid(pfn))
113 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); 113 return PageReserved(pfn_to_page(pfn));
114 114
115 return true; 115 return true;
116} 116}
@@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
1321 else if ((vma->vm_flags & VM_PFNMAP)) { 1321 else if ((vma->vm_flags & VM_PFNMAP)) {
1322 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + 1322 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
1323 vma->vm_pgoff; 1323 vma->vm_pgoff;
1324 BUG_ON(!kvm_is_mmio_pfn(pfn)); 1324 BUG_ON(!kvm_is_reserved_pfn(pfn));
1325 } else { 1325 } else {
1326 if (async && vma_is_valid(vma, write_fault)) 1326 if (async && vma_is_valid(vma, write_fault))
1327 *async = true; 1327 *async = true;
@@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
1427 if (is_error_noslot_pfn(pfn)) 1427 if (is_error_noslot_pfn(pfn))
1428 return KVM_ERR_PTR_BAD_PAGE; 1428 return KVM_ERR_PTR_BAD_PAGE;
1429 1429
1430 if (kvm_is_mmio_pfn(pfn)) { 1430 if (kvm_is_reserved_pfn(pfn)) {
1431 WARN_ON(1); 1431 WARN_ON(1);
1432 return KVM_ERR_PTR_BAD_PAGE; 1432 return KVM_ERR_PTR_BAD_PAGE;
1433 } 1433 }
@@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1456 1456
1457void kvm_release_pfn_clean(pfn_t pfn) 1457void kvm_release_pfn_clean(pfn_t pfn)
1458{ 1458{
1459 if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) 1459 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
1460 put_page(pfn_to_page(pfn)); 1460 put_page(pfn_to_page(pfn));
1461} 1461}
1462EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1462EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
1477 1477
1478void kvm_set_pfn_dirty(pfn_t pfn) 1478void kvm_set_pfn_dirty(pfn_t pfn)
1479{ 1479{
1480 if (!kvm_is_mmio_pfn(pfn)) { 1480 if (!kvm_is_reserved_pfn(pfn)) {
1481 struct page *page = pfn_to_page(pfn); 1481 struct page *page = pfn_to_page(pfn);
1482 if (!PageReserved(page)) 1482 if (!PageReserved(page))
1483 SetPageDirty(page); 1483 SetPageDirty(page);
@@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1487 1487
1488void kvm_set_pfn_accessed(pfn_t pfn) 1488void kvm_set_pfn_accessed(pfn_t pfn)
1489{ 1489{
1490 if (!kvm_is_mmio_pfn(pfn)) 1490 if (!kvm_is_reserved_pfn(pfn))
1491 mark_page_accessed(pfn_to_page(pfn)); 1491 mark_page_accessed(pfn_to_page(pfn));
1492} 1492}
1493EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1493EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1494 1494
1495void kvm_get_pfn(pfn_t pfn) 1495void kvm_get_pfn(pfn_t pfn)
1496{ 1496{
1497 if (!kvm_is_mmio_pfn(pfn)) 1497 if (!kvm_is_reserved_pfn(pfn))
1498 get_page(pfn_to_page(pfn)); 1498 get_page(pfn_to_page(pfn));
1499} 1499}
1500EXPORT_SYMBOL_GPL(kvm_get_pfn); 1500EXPORT_SYMBOL_GPL(kvm_get_pfn);