diff options
-rw-r--r-- | arch/powerpc/kvm/e500_tlb.c | 1 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 7 | ||||
-rw-r--r-- | arch/x86/kvm/mmu_audit.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 8 | ||||
-rw-r--r-- | virt/kvm/iommu.c | 1 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 14 |
6 files changed, 14 insertions, 21 deletions
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index c8f6c5826742..09ce5ac128f8 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c | |||
@@ -524,7 +524,6 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
524 | if (is_error_pfn(pfn)) { | 524 | if (is_error_pfn(pfn)) { |
525 | printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", | 525 | printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", |
526 | (long)gfn); | 526 | (long)gfn); |
527 | kvm_release_pfn_clean(pfn); | ||
528 | return; | 527 | return; |
529 | } | 528 | } |
530 | 529 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d3cdf69da513..9651c2cd0005 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2496,7 +2496,9 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
2496 | rmap_recycle(vcpu, sptep, gfn); | 2496 | rmap_recycle(vcpu, sptep, gfn); |
2497 | } | 2497 | } |
2498 | } | 2498 | } |
2499 | kvm_release_pfn_clean(pfn); | 2499 | |
2500 | if (!is_error_pfn(pfn)) | ||
2501 | kvm_release_pfn_clean(pfn); | ||
2500 | } | 2502 | } |
2501 | 2503 | ||
2502 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) | 2504 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) |
@@ -2648,7 +2650,6 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct * | |||
2648 | 2650 | ||
2649 | static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) | 2651 | static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) |
2650 | { | 2652 | { |
2651 | kvm_release_pfn_clean(pfn); | ||
2652 | if (pfn == KVM_PFN_ERR_HWPOISON) { | 2653 | if (pfn == KVM_PFN_ERR_HWPOISON) { |
2653 | kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); | 2654 | kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); |
2654 | return 0; | 2655 | return 0; |
@@ -3273,8 +3274,6 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, | |||
3273 | if (!async) | 3274 | if (!async) |
3274 | return false; /* *pfn has correct page already */ | 3275 | return false; /* *pfn has correct page already */ |
3275 | 3276 | ||
3276 | kvm_release_pfn_clean(*pfn); | ||
3277 | |||
3278 | if (!prefault && can_do_async_pf(vcpu)) { | 3277 | if (!prefault && can_do_async_pf(vcpu)) { |
3279 | trace_kvm_try_async_get_page(gva, gfn); | 3278 | trace_kvm_try_async_get_page(gva, gfn); |
3280 | if (kvm_find_async_pf_gfn(vcpu, gfn)) { | 3279 | if (kvm_find_async_pf_gfn(vcpu, gfn)) { |
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c index ca403f9bb0f2..daff69e21150 100644 --- a/arch/x86/kvm/mmu_audit.c +++ b/arch/x86/kvm/mmu_audit.c | |||
@@ -116,10 +116,8 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) | |||
116 | gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); | 116 | gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); |
117 | pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn); | 117 | pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn); |
118 | 118 | ||
119 | if (is_error_pfn(pfn)) { | 119 | if (is_error_pfn(pfn)) |
120 | kvm_release_pfn_clean(pfn); | ||
121 | return; | 120 | return; |
122 | } | ||
123 | 121 | ||
124 | hpa = pfn << PAGE_SHIFT; | 122 | hpa = pfn << PAGE_SHIFT; |
125 | if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) | 123 | if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index bb7cf01cae76..bf8c42bf50fe 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -370,10 +370,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | |||
370 | pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); | 370 | pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); |
371 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true); | 371 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true); |
372 | pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte)); | 372 | pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte)); |
373 | if (mmu_invalid_pfn(pfn)) { | 373 | if (mmu_invalid_pfn(pfn)) |
374 | kvm_release_pfn_clean(pfn); | ||
375 | return; | 374 | return; |
376 | } | ||
377 | 375 | ||
378 | /* | 376 | /* |
379 | * we call mmu_set_spte() with host_writable = true because that | 377 | * we call mmu_set_spte() with host_writable = true because that |
@@ -448,10 +446,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, | |||
448 | gfn = gpte_to_gfn(gpte); | 446 | gfn = gpte_to_gfn(gpte); |
449 | pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, | 447 | pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, |
450 | pte_access & ACC_WRITE_MASK); | 448 | pte_access & ACC_WRITE_MASK); |
451 | if (mmu_invalid_pfn(pfn)) { | 449 | if (mmu_invalid_pfn(pfn)) |
452 | kvm_release_pfn_clean(pfn); | ||
453 | break; | 450 | break; |
454 | } | ||
455 | 451 | ||
456 | mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, | 452 | mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, |
457 | NULL, PT_PAGE_TABLE_LEVEL, gfn, | 453 | NULL, PT_PAGE_TABLE_LEVEL, gfn, |
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 6a67bea4019c..037cb6730e68 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c | |||
@@ -107,7 +107,6 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) | |||
107 | */ | 107 | */ |
108 | pfn = kvm_pin_pages(slot, gfn, page_size); | 108 | pfn = kvm_pin_pages(slot, gfn, page_size); |
109 | if (is_error_pfn(pfn)) { | 109 | if (is_error_pfn(pfn)) { |
110 | kvm_release_pfn_clean(pfn); | ||
111 | gfn += 1; | 110 | gfn += 1; |
112 | continue; | 111 | continue; |
113 | } | 112 | } |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 93d3c6e063c8..eafba99d1070 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -102,9 +102,6 @@ static bool largepages_enabled = true; | |||
102 | 102 | ||
103 | bool kvm_is_mmio_pfn(pfn_t pfn) | 103 | bool kvm_is_mmio_pfn(pfn_t pfn) |
104 | { | 104 | { |
105 | if (is_error_pfn(pfn)) | ||
106 | return false; | ||
107 | |||
108 | if (pfn_valid(pfn)) { | 105 | if (pfn_valid(pfn)) { |
109 | int reserved; | 106 | int reserved; |
110 | struct page *tail = pfn_to_page(pfn); | 107 | struct page *tail = pfn_to_page(pfn); |
@@ -1165,10 +1162,13 @@ EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); | |||
1165 | 1162 | ||
1166 | static struct page *kvm_pfn_to_page(pfn_t pfn) | 1163 | static struct page *kvm_pfn_to_page(pfn_t pfn) |
1167 | { | 1164 | { |
1168 | WARN_ON(kvm_is_mmio_pfn(pfn)); | 1165 | if (is_error_pfn(pfn)) |
1166 | return KVM_ERR_PTR_BAD_PAGE; | ||
1169 | 1167 | ||
1170 | if (is_error_pfn(pfn) || kvm_is_mmio_pfn(pfn)) | 1168 | if (kvm_is_mmio_pfn(pfn)) { |
1169 | WARN_ON(1); | ||
1171 | return KVM_ERR_PTR_BAD_PAGE; | 1170 | return KVM_ERR_PTR_BAD_PAGE; |
1171 | } | ||
1172 | 1172 | ||
1173 | return pfn_to_page(pfn); | 1173 | return pfn_to_page(pfn); |
1174 | } | 1174 | } |
@@ -1193,7 +1193,9 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean); | |||
1193 | 1193 | ||
1194 | void kvm_release_pfn_clean(pfn_t pfn) | 1194 | void kvm_release_pfn_clean(pfn_t pfn) |
1195 | { | 1195 | { |
1196 | if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) | 1196 | WARN_ON(is_error_pfn(pfn)); |
1197 | |||
1198 | if (!kvm_is_mmio_pfn(pfn)) | ||
1197 | put_page(pfn_to_page(pfn)); | 1199 | put_page(pfn_to_page(pfn)); |
1198 | } | 1200 | } |
1199 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); | 1201 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); |