aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/e500_tlb.c2
-rw-r--r--arch/x86/kvm/mmu.c4
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--include/linux/kvm_host.h28
-rw-r--r--virt/kvm/iommu.c4
-rw-r--r--virt/kvm/kvm_main.c6
9 files changed, 32 insertions, 20 deletions
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index d1107a9b5d13..00e619bf608e 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -155,7 +155,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
155 155
156 /* Get host physical address for gpa */ 156 /* Get host physical address for gpa */
157 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 157 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
158 if (is_error_pfn(hpaddr)) { 158 if (is_error_noslot_pfn(hpaddr)) {
159 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", 159 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
160 orig_pte->eaddr); 160 orig_pte->eaddr);
161 r = -EINVAL; 161 r = -EINVAL;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index d0205a545a81..ead58e317294 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -93,7 +93,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
93 93
94 /* Get host physical address for gpa */ 94 /* Get host physical address for gpa */
95 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 95 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
96 if (is_error_pfn(hpaddr)) { 96 if (is_error_noslot_pfn(hpaddr)) {
97 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); 97 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
98 r = -EINVAL; 98 r = -EINVAL;
99 goto out; 99 goto out;
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index c73389477d17..6305ee692ef7 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -524,7 +524,7 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
524 if (likely(!pfnmap)) { 524 if (likely(!pfnmap)) {
525 unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); 525 unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
526 pfn = gfn_to_pfn_memslot(slot, gfn); 526 pfn = gfn_to_pfn_memslot(slot, gfn);
527 if (is_error_pfn(pfn)) { 527 if (is_error_noslot_pfn(pfn)) {
528 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", 528 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
529 (long)gfn); 529 (long)gfn);
530 return; 530 return;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index aabb1289ff04..b875a9ed9b8e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2699,7 +2699,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2699 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done 2699 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
2700 * here. 2700 * here.
2701 */ 2701 */
2702 if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) && 2702 if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
2703 level == PT_PAGE_TABLE_LEVEL && 2703 level == PT_PAGE_TABLE_LEVEL &&
2704 PageTransCompound(pfn_to_page(pfn)) && 2704 PageTransCompound(pfn_to_page(pfn)) &&
2705 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { 2705 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
@@ -2733,7 +2733,7 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
2733 bool ret = true; 2733 bool ret = true;
2734 2734
2735 /* The pfn is invalid, report the error! */ 2735 /* The pfn is invalid, report the error! */
2736 if (unlikely(is_invalid_pfn(pfn))) { 2736 if (unlikely(is_error_pfn(pfn))) {
2737 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); 2737 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
2738 goto exit; 2738 goto exit;
2739 } 2739 }
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index d17decaf1db9..891eb6d93b8b 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -323,7 +323,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
323 protect_clean_gpte(&pte_access, gpte); 323 protect_clean_gpte(&pte_access, gpte);
324 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, 324 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
325 no_dirty_log && (pte_access & ACC_WRITE_MASK)); 325 no_dirty_log && (pte_access & ACC_WRITE_MASK));
326 if (is_invalid_pfn(pfn)) 326 if (is_error_pfn(pfn))
327 return false; 327 return false;
328 328
329 /* 329 /*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6e5f069bee30..49fa1f0e59bd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4504,7 +4504,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
4504 * instruction -> ... 4504 * instruction -> ...
4505 */ 4505 */
4506 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); 4506 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
4507 if (!is_error_pfn(pfn)) { 4507 if (!is_error_noslot_pfn(pfn)) {
4508 kvm_release_pfn_clean(pfn); 4508 kvm_release_pfn_clean(pfn);
4509 return true; 4509 return true;
4510 } 4510 }
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 82e2c783a21e..99a47627e046 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -58,28 +58,40 @@
58 58
59/* 59/*
60 * For the normal pfn, the highest 12 bits should be zero, 60 * For the normal pfn, the highest 12 bits should be zero,
61 * so we can mask these bits to indicate the error. 61 * so we can mask bit 62 ~ bit 52 to indicate the error pfn,
62 * mask bit 63 to indicate the noslot pfn.
62 */ 63 */
63#define KVM_PFN_ERR_MASK (0xfffULL << 52) 64#define KVM_PFN_ERR_MASK (0x7ffULL << 52)
65#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
66#define KVM_PFN_NOSLOT (0x1ULL << 63)
64 67
65#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) 68#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
66#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) 69#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
67#define KVM_PFN_ERR_BAD (KVM_PFN_ERR_MASK + 2) 70#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
68#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 3)
69 71
72/*
73 * error pfns indicate that the gfn is in slot but faild to
74 * translate it to pfn on host.
75 */
70static inline bool is_error_pfn(pfn_t pfn) 76static inline bool is_error_pfn(pfn_t pfn)
71{ 77{
72 return !!(pfn & KVM_PFN_ERR_MASK); 78 return !!(pfn & KVM_PFN_ERR_MASK);
73} 79}
74 80
75static inline bool is_noslot_pfn(pfn_t pfn) 81/*
82 * error_noslot pfns indicate that the gfn can not be
83 * translated to pfn - it is not in slot or failed to
84 * translate it to pfn.
85 */
86static inline bool is_error_noslot_pfn(pfn_t pfn)
76{ 87{
77 return pfn == KVM_PFN_ERR_BAD; 88 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
78} 89}
79 90
80static inline bool is_invalid_pfn(pfn_t pfn) 91/* noslot pfn indicates that the gfn is not in slot. */
92static inline bool is_noslot_pfn(pfn_t pfn)
81{ 93{
82 return !is_noslot_pfn(pfn) && is_error_pfn(pfn); 94 return pfn == KVM_PFN_NOSLOT;
83} 95}
84 96
85#define KVM_HVA_ERR_BAD (PAGE_OFFSET) 97#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 18e1e30019e3..4a340cb23013 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -52,7 +52,7 @@ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
52 end_gfn = gfn + (size >> PAGE_SHIFT); 52 end_gfn = gfn + (size >> PAGE_SHIFT);
53 gfn += 1; 53 gfn += 1;
54 54
55 if (is_error_pfn(pfn)) 55 if (is_error_noslot_pfn(pfn))
56 return pfn; 56 return pfn;
57 57
58 while (gfn < end_gfn) 58 while (gfn < end_gfn)
@@ -106,7 +106,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
106 * important because we unmap and unpin in 4kb steps later. 106 * important because we unmap and unpin in 4kb steps later.
107 */ 107 */
108 pfn = kvm_pin_pages(slot, gfn, page_size); 108 pfn = kvm_pin_pages(slot, gfn, page_size);
109 if (is_error_pfn(pfn)) { 109 if (is_error_noslot_pfn(pfn)) {
110 gfn += 1; 110 gfn += 1;
111 continue; 111 continue;
112 } 112 }
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index be70035fd42a..2fb73191801f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1208,7 +1208,7 @@ __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
1208 return KVM_PFN_ERR_RO_FAULT; 1208 return KVM_PFN_ERR_RO_FAULT;
1209 1209
1210 if (kvm_is_error_hva(addr)) 1210 if (kvm_is_error_hva(addr))
1211 return KVM_PFN_ERR_BAD; 1211 return KVM_PFN_NOSLOT;
1212 1212
1213 /* Do not map writable pfn in the readonly memslot. */ 1213 /* Do not map writable pfn in the readonly memslot. */
1214 if (writable && memslot_is_readonly(slot)) { 1214 if (writable && memslot_is_readonly(slot)) {
@@ -1290,7 +1290,7 @@ EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1290 1290
1291static struct page *kvm_pfn_to_page(pfn_t pfn) 1291static struct page *kvm_pfn_to_page(pfn_t pfn)
1292{ 1292{
1293 if (is_error_pfn(pfn)) 1293 if (is_error_noslot_pfn(pfn))
1294 return KVM_ERR_PTR_BAD_PAGE; 1294 return KVM_ERR_PTR_BAD_PAGE;
1295 1295
1296 if (kvm_is_mmio_pfn(pfn)) { 1296 if (kvm_is_mmio_pfn(pfn)) {
@@ -1322,7 +1322,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1322 1322
1323void kvm_release_pfn_clean(pfn_t pfn) 1323void kvm_release_pfn_clean(pfn_t pfn)
1324{ 1324{
1325 if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) 1325 if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
1326 put_page(pfn_to_page(pfn)); 1326 put_page(pfn_to_page(pfn));
1327} 1327}
1328EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1328EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);