diff options
author | Nadav Amit <namit@cs.technion.ac.il> | 2014-09-15 20:24:05 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-10-24 07:21:08 -0400 |
commit | 854e8bb1aa06c578c2c9145fa6bfe3680ef63b23 (patch) | |
tree | a970da7943352914676e83f159aedaa4dda79db7 /arch/x86/kvm | |
parent | c3351dfabf5c78fb5ddc79d0f7b65ebd9e441337 (diff) |
KVM: x86: Check non-canonical addresses upon WRMSR
Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
written to certain MSRs. The behavior is "almost" identical for AMD and Intel
(ignoring MSRs that are not implemented in either architecture since they would
anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
non-canonical address is written on Intel but not on AMD (which ignores the top
32-bits).
Accordingly, this patch injects a #GP on the MSRs which behave identically on
Intel and AMD. To eliminate the differences between the architecutres, the
value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
canonical value before writing instead of injecting a #GP.
Some references from Intel and AMD manuals:
According to Intel SDM description of WRMSR instruction #GP is expected on
WRMSR "If the source register contains a non-canonical address and ECX
specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
According to AMD manual instruction manual:
LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
form, a general-protection exception (#GP) occurs."
IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
base field must be in canonical form or a #GP fault will occur."
IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
be in canonical form."
This patch fixes CVE-2014-3610.
Cc: stable@vger.kernel.org
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/svm.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 27 |
3 files changed, 28 insertions, 3 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 65510f624dfe..00bed2c5e948 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -3251,7 +3251,7 @@ static int wrmsr_interception(struct vcpu_svm *svm) | |||
3251 | msr.host_initiated = false; | 3251 | msr.host_initiated = false; |
3252 | 3252 | ||
3253 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; | 3253 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; |
3254 | if (svm_set_msr(&svm->vcpu, &msr)) { | 3254 | if (kvm_set_msr(&svm->vcpu, &msr)) { |
3255 | trace_kvm_msr_write_ex(ecx, data); | 3255 | trace_kvm_msr_write_ex(ecx, data); |
3256 | kvm_inject_gp(&svm->vcpu, 0); | 3256 | kvm_inject_gp(&svm->vcpu, 0); |
3257 | } else { | 3257 | } else { |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0acac81f198b..148020a7dd98 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -5291,7 +5291,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu) | |||
5291 | msr.data = data; | 5291 | msr.data = data; |
5292 | msr.index = ecx; | 5292 | msr.index = ecx; |
5293 | msr.host_initiated = false; | 5293 | msr.host_initiated = false; |
5294 | if (vmx_set_msr(vcpu, &msr) != 0) { | 5294 | if (kvm_set_msr(vcpu, &msr) != 0) { |
5295 | trace_kvm_msr_write_ex(ecx, data); | 5295 | trace_kvm_msr_write_ex(ecx, data); |
5296 | kvm_inject_gp(vcpu, 0); | 5296 | kvm_inject_gp(vcpu, 0); |
5297 | return 1; | 5297 | return 1; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 34c8f94331f8..5a7195573a32 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -987,7 +987,6 @@ void kvm_enable_efer_bits(u64 mask) | |||
987 | } | 987 | } |
988 | EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); | 988 | EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); |
989 | 989 | ||
990 | |||
991 | /* | 990 | /* |
992 | * Writes msr value into into the appropriate "register". | 991 | * Writes msr value into into the appropriate "register". |
993 | * Returns 0 on success, non-0 otherwise. | 992 | * Returns 0 on success, non-0 otherwise. |
@@ -995,8 +994,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); | |||
995 | */ | 994 | */ |
996 | int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) | 995 | int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) |
997 | { | 996 | { |
997 | switch (msr->index) { | ||
998 | case MSR_FS_BASE: | ||
999 | case MSR_GS_BASE: | ||
1000 | case MSR_KERNEL_GS_BASE: | ||
1001 | case MSR_CSTAR: | ||
1002 | case MSR_LSTAR: | ||
1003 | if (is_noncanonical_address(msr->data)) | ||
1004 | return 1; | ||
1005 | break; | ||
1006 | case MSR_IA32_SYSENTER_EIP: | ||
1007 | case MSR_IA32_SYSENTER_ESP: | ||
1008 | /* | ||
1009 | * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if | ||
1010 | * non-canonical address is written on Intel but not on | ||
1011 | * AMD (which ignores the top 32-bits, because it does | ||
1012 | * not implement 64-bit SYSENTER). | ||
1013 | * | ||
1014 | * 64-bit code should hence be able to write a non-canonical | ||
1015 | * value on AMD. Making the address canonical ensures that | ||
1016 | * vmentry does not fail on Intel after writing a non-canonical | ||
1017 | * value, and that something deterministic happens if the guest | ||
1018 | * invokes 64-bit SYSENTER. | ||
1019 | */ | ||
1020 | msr->data = get_canonical(msr->data); | ||
1021 | } | ||
998 | return kvm_x86_ops->set_msr(vcpu, msr); | 1022 | return kvm_x86_ops->set_msr(vcpu, msr); |
999 | } | 1023 | } |
1024 | EXPORT_SYMBOL_GPL(kvm_set_msr); | ||
1000 | 1025 | ||
1001 | /* | 1026 | /* |
1002 | * Adapt set_msr() to msr_io()'s calling convention | 1027 | * Adapt set_msr() to msr_io()'s calling convention |