aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2010-05-31 02:28:19 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:35:26 -0400
commitbf998156d24bcb127318ad5bf531ac3bdfcd6449 (patch)
tree616c19474d7cb626ff9eebc54f6753563a4322cd /arch/x86/kvm/mmu.c
parent540ad6b62b3a188a53b51cac81d8a60d40e29fbd (diff)
KVM: Avoid killing userspace through guest SRAO MCE on unmapped pages
In common cases, guest SRAO MCE will cause corresponding poisoned page be un-mapped and SIGBUS be sent to QEMU-KVM, then QEMU-KVM will relay the MCE to guest OS. But it is reported that if the poisoned page is accessed in guest after unmapping and before MCE is relayed to guest OS, userspace will be killed. The reason is as follows. Because poisoned page has been un-mapped, guest access will cause guest exit and kvm_mmu_page_fault will be called. kvm_mmu_page_fault can not get the poisoned page for fault address, so kernel and user space MMIO processing is tried in turn. In user MMIO processing, poisoned page is accessed again, then userspace is killed by force_sig_info. To fix the bug, kvm_mmu_page_fault send HWPOISON signal to QEMU-KVM and do not try kernel and user space MMIO processing for poisoned page. [xiao: fix warning introduced by avi] Reported-by: Max Asbock <masbock@linux.vnet.ibm.com> Signed-off-by: Huang Ying <ying.huang@intel.com> Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c34
1 files changed, 26 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b1ed0a1a5913..b666d8d106a9 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -32,6 +32,7 @@
32#include <linux/compiler.h> 32#include <linux/compiler.h>
33#include <linux/srcu.h> 33#include <linux/srcu.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/uaccess.h>
35 36
36#include <asm/page.h> 37#include <asm/page.h>
37#include <asm/cmpxchg.h> 38#include <asm/cmpxchg.h>
@@ -1960,6 +1961,27 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1960 return pt_write; 1961 return pt_write;
1961} 1962}
1962 1963
1964static void kvm_send_hwpoison_signal(struct kvm *kvm, gfn_t gfn)
1965{
1966 char buf[1];
1967 void __user *hva;
1968 int r;
1969
1970 /* Touch the page, so send SIGBUS */
1971 hva = (void __user *)gfn_to_hva(kvm, gfn);
1972 r = copy_from_user(buf, hva, 1);
1973}
1974
1975static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
1976{
1977 kvm_release_pfn_clean(pfn);
1978 if (is_hwpoison_pfn(pfn)) {
1979 kvm_send_hwpoison_signal(kvm, gfn);
1980 return 0;
1981 }
1982 return 1;
1983}
1984
1963static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) 1985static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1964{ 1986{
1965 int r; 1987 int r;
@@ -1983,10 +2005,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1983 pfn = gfn_to_pfn(vcpu->kvm, gfn); 2005 pfn = gfn_to_pfn(vcpu->kvm, gfn);
1984 2006
1985 /* mmio */ 2007 /* mmio */
1986 if (is_error_pfn(pfn)) { 2008 if (is_error_pfn(pfn))
1987 kvm_release_pfn_clean(pfn); 2009 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
1988 return 1;
1989 }
1990 2010
1991 spin_lock(&vcpu->kvm->mmu_lock); 2011 spin_lock(&vcpu->kvm->mmu_lock);
1992 if (mmu_notifier_retry(vcpu, mmu_seq)) 2012 if (mmu_notifier_retry(vcpu, mmu_seq))
@@ -2198,10 +2218,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2198 mmu_seq = vcpu->kvm->mmu_notifier_seq; 2218 mmu_seq = vcpu->kvm->mmu_notifier_seq;
2199 smp_rmb(); 2219 smp_rmb();
2200 pfn = gfn_to_pfn(vcpu->kvm, gfn); 2220 pfn = gfn_to_pfn(vcpu->kvm, gfn);
2201 if (is_error_pfn(pfn)) { 2221 if (is_error_pfn(pfn))
2202 kvm_release_pfn_clean(pfn); 2222 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
2203 return 1;
2204 }
2205 spin_lock(&vcpu->kvm->mmu_lock); 2223 spin_lock(&vcpu->kvm->mmu_lock);
2206 if (mmu_notifier_retry(vcpu, mmu_seq)) 2224 if (mmu_notifier_retry(vcpu, mmu_seq))
2207 goto out_unlock; 2225 goto out_unlock;