aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2014-11-10 03:33:56 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2014-11-25 08:57:26 -0500
commitbf4bea8e9a9058319a19f8c2710a6f0ef2459983 (patch)
tree192e21f7d1509b7ea3303ac64bc21f9123bffcd4 /virt/kvm
parent07a9748c78cfc39b54f06125a216b67b9c8f09ed (diff)
kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn()
This reverts commit 85c8555ff0 ("KVM: check for !is_zero_pfn() in kvm_is_mmio_pfn()") and renames the function to kvm_is_reserved_pfn. The problem being addressed by the patch above was that some ARM code based the memory mapping attributes of a pfn on the return value of kvm_is_mmio_pfn(), whose name indeed suggests that such pfns should be mapped as device memory. However, kvm_is_mmio_pfn() doesn't do quite what it says on the tin, and the existing non-ARM users were already using it in a way which suggests that its name should probably have been 'kvm_is_reserved_pfn' from the beginning, e.g., whether or not to call get_page/put_page on it etc. This means that returning false for the zero page is a mistake and the patch above should be reverted. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/kvm_main.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 25ffac9e947d..3cee7b167052 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
107 107
108static bool largepages_enabled = true; 108static bool largepages_enabled = true;
109 109
110bool kvm_is_mmio_pfn(pfn_t pfn) 110bool kvm_is_reserved_pfn(pfn_t pfn)
111{ 111{
112 if (pfn_valid(pfn)) 112 if (pfn_valid(pfn))
113 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); 113 return PageReserved(pfn_to_page(pfn));
114 114
115 return true; 115 return true;
116} 116}
@@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
1321 else if ((vma->vm_flags & VM_PFNMAP)) { 1321 else if ((vma->vm_flags & VM_PFNMAP)) {
1322 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + 1322 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
1323 vma->vm_pgoff; 1323 vma->vm_pgoff;
1324 BUG_ON(!kvm_is_mmio_pfn(pfn)); 1324 BUG_ON(!kvm_is_reserved_pfn(pfn));
1325 } else { 1325 } else {
1326 if (async && vma_is_valid(vma, write_fault)) 1326 if (async && vma_is_valid(vma, write_fault))
1327 *async = true; 1327 *async = true;
@@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
1427 if (is_error_noslot_pfn(pfn)) 1427 if (is_error_noslot_pfn(pfn))
1428 return KVM_ERR_PTR_BAD_PAGE; 1428 return KVM_ERR_PTR_BAD_PAGE;
1429 1429
1430 if (kvm_is_mmio_pfn(pfn)) { 1430 if (kvm_is_reserved_pfn(pfn)) {
1431 WARN_ON(1); 1431 WARN_ON(1);
1432 return KVM_ERR_PTR_BAD_PAGE; 1432 return KVM_ERR_PTR_BAD_PAGE;
1433 } 1433 }
@@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1456 1456
1457void kvm_release_pfn_clean(pfn_t pfn) 1457void kvm_release_pfn_clean(pfn_t pfn)
1458{ 1458{
1459 if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) 1459 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
1460 put_page(pfn_to_page(pfn)); 1460 put_page(pfn_to_page(pfn));
1461} 1461}
1462EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1462EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
1477 1477
1478void kvm_set_pfn_dirty(pfn_t pfn) 1478void kvm_set_pfn_dirty(pfn_t pfn)
1479{ 1479{
1480 if (!kvm_is_mmio_pfn(pfn)) { 1480 if (!kvm_is_reserved_pfn(pfn)) {
1481 struct page *page = pfn_to_page(pfn); 1481 struct page *page = pfn_to_page(pfn);
1482 if (!PageReserved(page)) 1482 if (!PageReserved(page))
1483 SetPageDirty(page); 1483 SetPageDirty(page);
@@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1487 1487
1488void kvm_set_pfn_accessed(pfn_t pfn) 1488void kvm_set_pfn_accessed(pfn_t pfn)
1489{ 1489{
1490 if (!kvm_is_mmio_pfn(pfn)) 1490 if (!kvm_is_reserved_pfn(pfn))
1491 mark_page_accessed(pfn_to_page(pfn)); 1491 mark_page_accessed(pfn_to_page(pfn));
1492} 1492}
1493EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1493EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1494 1494
1495void kvm_get_pfn(pfn_t pfn) 1495void kvm_get_pfn(pfn_t pfn)
1496{ 1496{
1497 if (!kvm_is_mmio_pfn(pfn)) 1497 if (!kvm_is_reserved_pfn(pfn))
1498 get_page(pfn_to_page(pfn)); 1498 get_page(pfn_to_page(pfn));
1499} 1499}
1500EXPORT_SYMBOL_GPL(kvm_get_pfn); 1500EXPORT_SYMBOL_GPL(kvm_get_pfn);