aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorBen-Ami Yassour <benami@il.ibm.com>2008-07-28 12:26:24 -0400
committerAvi Kivity <avi@qumranet.com>2008-10-15 04:15:17 -0400
commitcbff90a7caa49507d399c9a55ba4a411e840bfb4 (patch)
treebadd7eb07475ace6098f0bff66ce296c5ac0f7b7 /virt/kvm/kvm_main.c
parent0293615f3fb9886b6b23800c121be293bb7483e9 (diff)
KVM: direct mmio pfn check
Userspace may specify memory slots that are backed by mmio pages rather than normal RAM. In some cases it is not enough to identify these mmio pages by pfn_valid(). This patch adds checking the PageReserved as well. Signed-off-by: Ben-Ami Yassour <benami@il.ibm.com> Signed-off-by: Muli Ben-Yehuda <muli@il.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c22
1 files changed, 15 insertions, 7 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 7dd9b0b85e4e..5eb96c7c8d7a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -76,6 +76,14 @@ static inline int valid_vcpu(int n)
76 return likely(n >= 0 && n < KVM_MAX_VCPUS); 76 return likely(n >= 0 && n < KVM_MAX_VCPUS);
77} 77}
78 78
79static inline int is_mmio_pfn(pfn_t pfn)
80{
81 if (pfn_valid(pfn))
82 return PageReserved(pfn_to_page(pfn));
83
84 return true;
85}
86
79/* 87/*
80 * Switches to specified vcpu, until a matching vcpu_put() 88 * Switches to specified vcpu, until a matching vcpu_put()
81 */ 89 */
@@ -740,7 +748,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
740 } 748 }
741 749
742 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 750 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
743 BUG_ON(pfn_valid(pfn)); 751 BUG_ON(!is_mmio_pfn(pfn));
744 } else 752 } else
745 pfn = page_to_pfn(page[0]); 753 pfn = page_to_pfn(page[0]);
746 754
@@ -754,10 +762,10 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
754 pfn_t pfn; 762 pfn_t pfn;
755 763
756 pfn = gfn_to_pfn(kvm, gfn); 764 pfn = gfn_to_pfn(kvm, gfn);
757 if (pfn_valid(pfn)) 765 if (!is_mmio_pfn(pfn))
758 return pfn_to_page(pfn); 766 return pfn_to_page(pfn);
759 767
760 WARN_ON(!pfn_valid(pfn)); 768 WARN_ON(is_mmio_pfn(pfn));
761 769
762 get_page(bad_page); 770 get_page(bad_page);
763 return bad_page; 771 return bad_page;
@@ -773,7 +781,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
773 781
774void kvm_release_pfn_clean(pfn_t pfn) 782void kvm_release_pfn_clean(pfn_t pfn)
775{ 783{
776 if (pfn_valid(pfn)) 784 if (!is_mmio_pfn(pfn))
777 put_page(pfn_to_page(pfn)); 785 put_page(pfn_to_page(pfn));
778} 786}
779EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 787EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -799,7 +807,7 @@ EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
799 807
800void kvm_set_pfn_dirty(pfn_t pfn) 808void kvm_set_pfn_dirty(pfn_t pfn)
801{ 809{
802 if (pfn_valid(pfn)) { 810 if (!is_mmio_pfn(pfn)) {
803 struct page *page = pfn_to_page(pfn); 811 struct page *page = pfn_to_page(pfn);
804 if (!PageReserved(page)) 812 if (!PageReserved(page))
805 SetPageDirty(page); 813 SetPageDirty(page);
@@ -809,14 +817,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
809 817
810void kvm_set_pfn_accessed(pfn_t pfn) 818void kvm_set_pfn_accessed(pfn_t pfn)
811{ 819{
812 if (pfn_valid(pfn)) 820 if (!is_mmio_pfn(pfn))
813 mark_page_accessed(pfn_to_page(pfn)); 821 mark_page_accessed(pfn_to_page(pfn));
814} 822}
815EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 823EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
816 824
817void kvm_get_pfn(pfn_t pfn) 825void kvm_get_pfn(pfn_t pfn)
818{ 826{
819 if (pfn_valid(pfn)) 827 if (!is_mmio_pfn(pfn))
820 get_page(pfn_to_page(pfn)); 828 get_page(pfn_to_page(pfn));
821} 829}
822EXPORT_SYMBOL_GPL(kvm_get_pfn); 830EXPORT_SYMBOL_GPL(kvm_get_pfn);