aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2012-08-03 03:41:22 -0400
committerAvi Kivity <avi@redhat.com>2012-08-06 09:04:55 -0400
commit6cede2e6794be6b0649f62d3681e0c4aff5a9270 (patch)
treeb05fdcfd0db7974c7ed7f036d07520ac4c12202f
parent9a592a953880fd6981955e69c1476ce541d9bd16 (diff)
KVM: introduce KVM_ERR_PTR_BAD_PAGE
It is used to eliminate the overload of function call and cleanup the code Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--include/linux/kvm_host.h9
-rw-r--r--virt/kvm/async_pf.c2
-rw-r--r--virt/kvm/kvm_main.c13
3 files changed, 9 insertions, 15 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e2dcc7cb2284..ce7c32950f4e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -68,6 +68,13 @@ static inline int is_invalid_pfn(pfn_t pfn)
68 return !is_noslot_pfn(pfn) && is_error_pfn(pfn); 68 return !is_noslot_pfn(pfn) && is_error_pfn(pfn);
69} 69}
70 70
71#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
72
73static inline int is_error_page(struct page *page)
74{
75 return IS_ERR(page);
76}
77
71/* 78/*
72 * vcpu->requests bit members 79 * vcpu->requests bit members
73 */ 80 */
@@ -409,7 +416,6 @@ id_to_memslot(struct kvm_memslots *slots, int id)
409 return slot; 416 return slot;
410} 417}
411 418
412int is_error_page(struct page *page);
413int kvm_is_error_hva(unsigned long addr); 419int kvm_is_error_hva(unsigned long addr);
414int kvm_set_memory_region(struct kvm *kvm, 420int kvm_set_memory_region(struct kvm *kvm,
415 struct kvm_userspace_memory_region *mem, 421 struct kvm_userspace_memory_region *mem,
@@ -436,7 +442,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm);
436int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, 442int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
437 int nr_pages); 443 int nr_pages);
438 444
439struct page *get_bad_page(void);
440struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 445struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
441unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 446unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
442void kvm_release_page_clean(struct page *page); 447void kvm_release_page_clean(struct page *page);
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 79722782d9d7..56f553391896 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -203,7 +203,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
203 if (!work) 203 if (!work)
204 return -ENOMEM; 204 return -ENOMEM;
205 205
206 work->page = get_bad_page(); 206 work->page = KVM_ERR_PTR_BAD_PAGE;
207 INIT_LIST_HEAD(&work->queue); /* for list_del to work */ 207 INIT_LIST_HEAD(&work->queue); /* for list_del to work */
208 208
209 spin_lock(&vcpu->async_pf.lock); 209 spin_lock(&vcpu->async_pf.lock);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index eb73e5f13678..93d3c6e063c8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -922,17 +922,6 @@ void kvm_disable_largepages(void)
922} 922}
923EXPORT_SYMBOL_GPL(kvm_disable_largepages); 923EXPORT_SYMBOL_GPL(kvm_disable_largepages);
924 924
925int is_error_page(struct page *page)
926{
927 return IS_ERR(page);
928}
929EXPORT_SYMBOL_GPL(is_error_page);
930
931struct page *get_bad_page(void)
932{
933 return ERR_PTR(-ENOENT);
934}
935
936static inline unsigned long bad_hva(void) 925static inline unsigned long bad_hva(void)
937{ 926{
938 return PAGE_OFFSET; 927 return PAGE_OFFSET;
@@ -1179,7 +1168,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
1179 WARN_ON(kvm_is_mmio_pfn(pfn)); 1168 WARN_ON(kvm_is_mmio_pfn(pfn));
1180 1169
1181 if (is_error_pfn(pfn) || kvm_is_mmio_pfn(pfn)) 1170 if (is_error_pfn(pfn) || kvm_is_mmio_pfn(pfn))
1182 return get_bad_page(); 1171 return KVM_ERR_PTR_BAD_PAGE;
1183 1172
1184 return pfn_to_page(pfn); 1173 return pfn_to_page(pfn);
1185} 1174}