diff options
author | Andi Kleen <ak@linux.intel.com> | 2009-06-08 05:37:09 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-06-10 05:27:08 -0400 |
commit | a0861c02a981c943573478ea13b29b1fb958ee5b (patch) | |
tree | a98fb24b4cc4cc7fb58037be64fd4cc42c35bf38 /arch/x86/kvm/vmx.c | |
parent | 56b237e31abf4d6dbc6e2a0214049b9a23be4883 (diff) |
KVM: Add VT-x machine check support
VT-x needs an explicit MC vector intercept to handle machine checks in the
hyper visor.
It also has a special option to catch machine checks that happen
during VT entry.
Do these interceptions and forward them to the Linux machine check
handler. Make it always look like user space is interrupted because
the machine check handler treats kernel/user space differently.
Thanks to Jiang Yunhong for help and testing.
Cc: stable@kernel.org
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 50 |
1 files changed, 48 insertions, 2 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c379a3472fa9..32d6ae8fb60e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/desc.h> | 32 | #include <asm/desc.h> |
33 | #include <asm/vmx.h> | 33 | #include <asm/vmx.h> |
34 | #include <asm/virtext.h> | 34 | #include <asm/virtext.h> |
35 | #include <asm/mce.h> | ||
35 | 36 | ||
36 | #define __ex(x) __kvm_handle_fault_on_reboot(x) | 37 | #define __ex(x) __kvm_handle_fault_on_reboot(x) |
37 | 38 | ||
@@ -97,6 +98,7 @@ struct vcpu_vmx { | |||
97 | int soft_vnmi_blocked; | 98 | int soft_vnmi_blocked; |
98 | ktime_t entry_time; | 99 | ktime_t entry_time; |
99 | s64 vnmi_blocked_time; | 100 | s64 vnmi_blocked_time; |
101 | u32 exit_reason; | ||
100 | }; | 102 | }; |
101 | 103 | ||
102 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) | 104 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) |
@@ -214,6 +216,13 @@ static inline int is_external_interrupt(u32 intr_info) | |||
214 | == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); | 216 | == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); |
215 | } | 217 | } |
216 | 218 | ||
219 | static inline int is_machine_check(u32 intr_info) | ||
220 | { | ||
221 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | ||
222 | INTR_INFO_VALID_MASK)) == | ||
223 | (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); | ||
224 | } | ||
225 | |||
217 | static inline int cpu_has_vmx_msr_bitmap(void) | 226 | static inline int cpu_has_vmx_msr_bitmap(void) |
218 | { | 227 | { |
219 | return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; | 228 | return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; |
@@ -485,7 +494,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) | |||
485 | { | 494 | { |
486 | u32 eb; | 495 | u32 eb; |
487 | 496 | ||
488 | eb = (1u << PF_VECTOR) | (1u << UD_VECTOR); | 497 | eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR); |
489 | if (!vcpu->fpu_active) | 498 | if (!vcpu->fpu_active) |
490 | eb |= 1u << NM_VECTOR; | 499 | eb |= 1u << NM_VECTOR; |
491 | if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { | 500 | if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { |
@@ -2582,6 +2591,31 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, | |||
2582 | return 0; | 2591 | return 0; |
2583 | } | 2592 | } |
2584 | 2593 | ||
2594 | /* | ||
2595 | * Trigger machine check on the host. We assume all the MSRs are already set up | ||
2596 | * by the CPU and that we still run on the same CPU as the MCE occurred on. | ||
2597 | * We pass a fake environment to the machine check handler because we want | ||
2598 | * the guest to be always treated like user space, no matter what context | ||
2599 | * it used internally. | ||
2600 | */ | ||
2601 | static void kvm_machine_check(void) | ||
2602 | { | ||
2603 | #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) | ||
2604 | struct pt_regs regs = { | ||
2605 | .cs = 3, /* Fake ring 3 no matter what the guest ran on */ | ||
2606 | .flags = X86_EFLAGS_IF, | ||
2607 | }; | ||
2608 | |||
2609 | do_machine_check(®s, 0); | ||
2610 | #endif | ||
2611 | } | ||
2612 | |||
2613 | static int handle_machine_check(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2614 | { | ||
2615 | /* already handled by vcpu_run */ | ||
2616 | return 1; | ||
2617 | } | ||
2618 | |||
2585 | static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2619 | static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
2586 | { | 2620 | { |
2587 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2621 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
@@ -2593,6 +2627,9 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2593 | vect_info = vmx->idt_vectoring_info; | 2627 | vect_info = vmx->idt_vectoring_info; |
2594 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | 2628 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
2595 | 2629 | ||
2630 | if (is_machine_check(intr_info)) | ||
2631 | return handle_machine_check(vcpu, kvm_run); | ||
2632 | |||
2596 | if ((vect_info & VECTORING_INFO_VALID_MASK) && | 2633 | if ((vect_info & VECTORING_INFO_VALID_MASK) && |
2597 | !is_page_fault(intr_info)) | 2634 | !is_page_fault(intr_info)) |
2598 | printk(KERN_ERR "%s: unexpected, vectoring info 0x%x " | 2635 | printk(KERN_ERR "%s: unexpected, vectoring info 0x%x " |
@@ -3166,6 +3203,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
3166 | [EXIT_REASON_WBINVD] = handle_wbinvd, | 3203 | [EXIT_REASON_WBINVD] = handle_wbinvd, |
3167 | [EXIT_REASON_TASK_SWITCH] = handle_task_switch, | 3204 | [EXIT_REASON_TASK_SWITCH] = handle_task_switch, |
3168 | [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, | 3205 | [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, |
3206 | [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, | ||
3169 | }; | 3207 | }; |
3170 | 3208 | ||
3171 | static const int kvm_vmx_max_exit_handlers = | 3209 | static const int kvm_vmx_max_exit_handlers = |
@@ -3177,8 +3215,8 @@ static const int kvm_vmx_max_exit_handlers = | |||
3177 | */ | 3215 | */ |
3178 | static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 3216 | static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
3179 | { | 3217 | { |
3180 | u32 exit_reason = vmcs_read32(VM_EXIT_REASON); | ||
3181 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3218 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3219 | u32 exit_reason = vmx->exit_reason; | ||
3182 | u32 vectoring_info = vmx->idt_vectoring_info; | 3220 | u32 vectoring_info = vmx->idt_vectoring_info; |
3183 | 3221 | ||
3184 | KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu), | 3222 | KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu), |
@@ -3263,6 +3301,14 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) | |||
3263 | 3301 | ||
3264 | exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | 3302 | exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
3265 | 3303 | ||
3304 | vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); | ||
3305 | |||
3306 | /* Handle machine checks before interrupts are enabled */ | ||
3307 | if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY) | ||
3308 | || (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI | ||
3309 | && is_machine_check(exit_intr_info))) | ||
3310 | kvm_machine_check(); | ||
3311 | |||
3266 | /* We need to handle NMIs before interrupts are enabled */ | 3312 | /* We need to handle NMIs before interrupts are enabled */ |
3267 | if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && | 3313 | if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && |
3268 | (exit_intr_info & INTR_INFO_VALID_MASK)) { | 3314 | (exit_intr_info & INTR_INFO_VALID_MASK)) { |