aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2010-05-31 02:28:19 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:35:26 -0400
commitbf998156d24bcb127318ad5bf531ac3bdfcd6449 (patch)
tree616c19474d7cb626ff9eebc54f6753563a4322cd /virt/kvm/kvm_main.c
parent540ad6b62b3a188a53b51cac81d8a60d40e29fbd (diff)
KVM: Avoid killing userspace through guest SRAO MCE on unmapped pages
In common cases, guest SRAO MCE will cause corresponding poisoned page be un-mapped and SIGBUS be sent to QEMU-KVM, then QEMU-KVM will relay the MCE to guest OS. But it is reported that if the poisoned page is accessed in guest after unmapping and before MCE is relayed to guest OS, userspace will be killed. The reason is as follows. Because poisoned page has been un-mapped, guest access will cause guest exit and kvm_mmu_page_fault will be called. kvm_mmu_page_fault can not get the poisoned page for fault address, so kernel and user space MMIO processing is tried in turn. In user MMIO processing, poisoned page is accessed again, then userspace is killed by force_sig_info. To fix the bug, kvm_mmu_page_fault send HWPOISON signal to QEMU-KVM and do not try kernel and user space MMIO processing for poisoned page. [xiao: fix warning introduced by avi] Reported-by: Max Asbock <masbock@linux.vnet.ibm.com> Signed-off-by: Huang Ying <ying.huang@intel.com> Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c30
1 files changed, 28 insertions, 2 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f032806a212f..187aa8d984a7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -92,6 +92,9 @@ static bool kvm_rebooting;
92 92
93static bool largepages_enabled = true; 93static bool largepages_enabled = true;
94 94
95struct page *hwpoison_page;
96pfn_t hwpoison_pfn;
97
95inline int kvm_is_mmio_pfn(pfn_t pfn) 98inline int kvm_is_mmio_pfn(pfn_t pfn)
96{ 99{
97 if (pfn_valid(pfn)) { 100 if (pfn_valid(pfn)) {
@@ -810,16 +813,22 @@ EXPORT_SYMBOL_GPL(kvm_disable_largepages);
810 813
811int is_error_page(struct page *page) 814int is_error_page(struct page *page)
812{ 815{
813 return page == bad_page; 816 return page == bad_page || page == hwpoison_page;
814} 817}
815EXPORT_SYMBOL_GPL(is_error_page); 818EXPORT_SYMBOL_GPL(is_error_page);
816 819
817int is_error_pfn(pfn_t pfn) 820int is_error_pfn(pfn_t pfn)
818{ 821{
819 return pfn == bad_pfn; 822 return pfn == bad_pfn || pfn == hwpoison_pfn;
820} 823}
821EXPORT_SYMBOL_GPL(is_error_pfn); 824EXPORT_SYMBOL_GPL(is_error_pfn);
822 825
826int is_hwpoison_pfn(pfn_t pfn)
827{
828 return pfn == hwpoison_pfn;
829}
830EXPORT_SYMBOL_GPL(is_hwpoison_pfn);
831
823static inline unsigned long bad_hva(void) 832static inline unsigned long bad_hva(void)
824{ 833{
825 return PAGE_OFFSET; 834 return PAGE_OFFSET;
@@ -945,6 +954,11 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
945 if (unlikely(npages != 1)) { 954 if (unlikely(npages != 1)) {
946 struct vm_area_struct *vma; 955 struct vm_area_struct *vma;
947 956
957 if (is_hwpoison_address(addr)) {
958 get_page(hwpoison_page);
959 return page_to_pfn(hwpoison_page);
960 }
961
948 down_read(&current->mm->mmap_sem); 962 down_read(&current->mm->mmap_sem);
949 vma = find_vma(current->mm, addr); 963 vma = find_vma(current->mm, addr);
950 964
@@ -2197,6 +2211,15 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2197 2211
2198 bad_pfn = page_to_pfn(bad_page); 2212 bad_pfn = page_to_pfn(bad_page);
2199 2213
2214 hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2215
2216 if (hwpoison_page == NULL) {
2217 r = -ENOMEM;
2218 goto out_free_0;
2219 }
2220
2221 hwpoison_pfn = page_to_pfn(hwpoison_page);
2222
2200 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 2223 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2201 r = -ENOMEM; 2224 r = -ENOMEM;
2202 goto out_free_0; 2225 goto out_free_0;
@@ -2269,6 +2292,8 @@ out_free_1:
2269out_free_0a: 2292out_free_0a:
2270 free_cpumask_var(cpus_hardware_enabled); 2293 free_cpumask_var(cpus_hardware_enabled);
2271out_free_0: 2294out_free_0:
2295 if (hwpoison_page)
2296 __free_page(hwpoison_page);
2272 __free_page(bad_page); 2297 __free_page(bad_page);
2273out: 2298out:
2274 kvm_arch_exit(); 2299 kvm_arch_exit();
@@ -2290,6 +2315,7 @@ void kvm_exit(void)
2290 kvm_arch_hardware_unsetup(); 2315 kvm_arch_hardware_unsetup();
2291 kvm_arch_exit(); 2316 kvm_arch_exit();
2292 free_cpumask_var(cpus_hardware_enabled); 2317 free_cpumask_var(cpus_hardware_enabled);
2318 __free_page(hwpoison_page);
2293 __free_page(bad_page); 2319 __free_page(bad_page);
2294} 2320}
2295EXPORT_SYMBOL_GPL(kvm_exit); 2321EXPORT_SYMBOL_GPL(kvm_exit);