aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-07-07 13:16:45 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 23:40:33 -0400
commitedba23e51578f7cb6781461568489fc1825db4ac (patch)
tree54ce6b22998c1bf7c40cfa43e3ebd8b9df271dcb /virt
parentfa7bff8f8a7d3de61c0473d0b6dc5a0f4fdc6ac9 (diff)
KVM: Return EFAULT from kvm ioctl when guest accesses bad area
Currently if guest access address that belongs to memory slot but is not backed up by page or page is read only KVM treats it like MMIO access. Remove that capability. It was never part of the interface and should not be relied upon. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c28
1 files changed, 24 insertions, 4 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 630d1224f187..b78b794c1039 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -96,6 +96,9 @@ static bool largepages_enabled = true;
96static struct page *hwpoison_page; 96static struct page *hwpoison_page;
97static pfn_t hwpoison_pfn; 97static pfn_t hwpoison_pfn;
98 98
99static struct page *fault_page;
100static pfn_t fault_pfn;
101
99inline int kvm_is_mmio_pfn(pfn_t pfn) 102inline int kvm_is_mmio_pfn(pfn_t pfn)
100{ 103{
101 if (pfn_valid(pfn)) { 104 if (pfn_valid(pfn)) {
@@ -815,13 +818,13 @@ EXPORT_SYMBOL_GPL(kvm_disable_largepages);
815 818
816int is_error_page(struct page *page) 819int is_error_page(struct page *page)
817{ 820{
818 return page == bad_page || page == hwpoison_page; 821 return page == bad_page || page == hwpoison_page || page == fault_page;
819} 822}
820EXPORT_SYMBOL_GPL(is_error_page); 823EXPORT_SYMBOL_GPL(is_error_page);
821 824
822int is_error_pfn(pfn_t pfn) 825int is_error_pfn(pfn_t pfn)
823{ 826{
824 return pfn == bad_pfn || pfn == hwpoison_pfn; 827 return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn;
825} 828}
826EXPORT_SYMBOL_GPL(is_error_pfn); 829EXPORT_SYMBOL_GPL(is_error_pfn);
827 830
@@ -831,6 +834,12 @@ int is_hwpoison_pfn(pfn_t pfn)
831} 834}
832EXPORT_SYMBOL_GPL(is_hwpoison_pfn); 835EXPORT_SYMBOL_GPL(is_hwpoison_pfn);
833 836
837int is_fault_pfn(pfn_t pfn)
838{
839 return pfn == fault_pfn;
840}
841EXPORT_SYMBOL_GPL(is_fault_pfn);
842
834static inline unsigned long bad_hva(void) 843static inline unsigned long bad_hva(void)
835{ 844{
836 return PAGE_OFFSET; 845 return PAGE_OFFSET;
@@ -959,8 +968,8 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
959 if (vma == NULL || addr < vma->vm_start || 968 if (vma == NULL || addr < vma->vm_start ||
960 !(vma->vm_flags & VM_PFNMAP)) { 969 !(vma->vm_flags & VM_PFNMAP)) {
961 up_read(&current->mm->mmap_sem); 970 up_read(&current->mm->mmap_sem);
962 get_page(bad_page); 971 get_page(fault_page);
963 return page_to_pfn(bad_page); 972 return page_to_pfn(fault_page);
964 } 973 }
965 974
966 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 975 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
@@ -2226,6 +2235,15 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2226 2235
2227 hwpoison_pfn = page_to_pfn(hwpoison_page); 2236 hwpoison_pfn = page_to_pfn(hwpoison_page);
2228 2237
2238 fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2239
2240 if (fault_page == NULL) {
2241 r = -ENOMEM;
2242 goto out_free_0;
2243 }
2244
2245 fault_pfn = page_to_pfn(fault_page);
2246
2229 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 2247 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2230 r = -ENOMEM; 2248 r = -ENOMEM;
2231 goto out_free_0; 2249 goto out_free_0;
@@ -2298,6 +2316,8 @@ out_free_1:
2298out_free_0a: 2316out_free_0a:
2299 free_cpumask_var(cpus_hardware_enabled); 2317 free_cpumask_var(cpus_hardware_enabled);
2300out_free_0: 2318out_free_0:
2319 if (fault_page)
2320 __free_page(fault_page);
2301 if (hwpoison_page) 2321 if (hwpoison_page)
2302 __free_page(hwpoison_page); 2322 __free_page(hwpoison_page);
2303 __free_page(bad_page); 2323 __free_page(bad_page);