aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2012-08-03 03:42:10 -0400
committerAvi Kivity <avi@redhat.com>2012-08-06 09:04:57 -0400
commitcb9aaa30b133574b646d9d4766ef08a843211393 (patch)
tree214d3b1dd115573d1aea11d2f4dafe525a1ac674 /arch/x86/kvm
parent6cede2e6794be6b0649f62d3681e0c4aff5a9270 (diff)
KVM: do not release the error pfn
After commit a2766325cf9f9, the error pfn is replaced by the error code, it need not be released anymore [ The patch has been compiling tested for powerpc ] Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/kvm/mmu_audit.c4
-rw-r--r--arch/x86/kvm/paging_tmpl.h8
3 files changed, 6 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d3cdf69da513..9651c2cd0005 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2496,7 +2496,9 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2496 rmap_recycle(vcpu, sptep, gfn); 2496 rmap_recycle(vcpu, sptep, gfn);
2497 } 2497 }
2498 } 2498 }
2499 kvm_release_pfn_clean(pfn); 2499
2500 if (!is_error_pfn(pfn))
2501 kvm_release_pfn_clean(pfn);
2500} 2502}
2501 2503
2502static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) 2504static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -2648,7 +2650,6 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
2648 2650
2649static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) 2651static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
2650{ 2652{
2651 kvm_release_pfn_clean(pfn);
2652 if (pfn == KVM_PFN_ERR_HWPOISON) { 2653 if (pfn == KVM_PFN_ERR_HWPOISON) {
2653 kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); 2654 kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
2654 return 0; 2655 return 0;
@@ -3273,8 +3274,6 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3273 if (!async) 3274 if (!async)
3274 return false; /* *pfn has correct page already */ 3275 return false; /* *pfn has correct page already */
3275 3276
3276 kvm_release_pfn_clean(*pfn);
3277
3278 if (!prefault && can_do_async_pf(vcpu)) { 3277 if (!prefault && can_do_async_pf(vcpu)) {
3279 trace_kvm_try_async_get_page(gva, gfn); 3278 trace_kvm_try_async_get_page(gva, gfn);
3280 if (kvm_find_async_pf_gfn(vcpu, gfn)) { 3279 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index ca403f9bb0f2..daff69e21150 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -116,10 +116,8 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
116 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); 116 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
117 pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn); 117 pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
118 118
119 if (is_error_pfn(pfn)) { 119 if (is_error_pfn(pfn))
120 kvm_release_pfn_clean(pfn);
121 return; 120 return;
122 }
123 121
124 hpa = pfn << PAGE_SHIFT; 122 hpa = pfn << PAGE_SHIFT;
125 if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) 123 if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index bb7cf01cae76..bf8c42bf50fe 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -370,10 +370,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
370 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); 370 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
371 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true); 371 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
372 pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte)); 372 pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
373 if (mmu_invalid_pfn(pfn)) { 373 if (mmu_invalid_pfn(pfn))
374 kvm_release_pfn_clean(pfn);
375 return; 374 return;
376 }
377 375
378 /* 376 /*
379 * we call mmu_set_spte() with host_writable = true because that 377 * we call mmu_set_spte() with host_writable = true because that
@@ -448,10 +446,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
448 gfn = gpte_to_gfn(gpte); 446 gfn = gpte_to_gfn(gpte);
449 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, 447 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
450 pte_access & ACC_WRITE_MASK); 448 pte_access & ACC_WRITE_MASK);
451 if (mmu_invalid_pfn(pfn)) { 449 if (mmu_invalid_pfn(pfn))
452 kvm_release_pfn_clean(pfn);
453 break; 450 break;
454 }
455 451
456 mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, 452 mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
457 NULL, PT_PAGE_TABLE_LEVEL, gfn, 453 NULL, PT_PAGE_TABLE_LEVEL, gfn,