diff options
author | Avi Kivity <avi@qumranet.com> | 2008-05-13 06:23:38 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-07-20 05:41:43 -0400 |
commit | 4ecac3fd6dc2629ad76a658a486f081c44aef10e (patch) | |
tree | a078acb4c626432ac8cf887b911d912f4a316d06 /arch/x86/kvm/vmx.c | |
parent | 1b7fcd3263e5f12dba43d27b64e1578bec070c28 (diff) |
KVM: Handle virtualization instruction #UD faults during reboot
KVM turns off hardware virtualization extensions during reboot, in order
to disassociate the memory used by the virtualization extensions from the
processor, and in order to have the system in a consistent state.
Unfortunately virtual machines may still be running while this goes on,
and once virtualization extensions are turned off, any virtulization
instruction will #UD on execution.
Fix by adding an exception handler to virtualization instructions; if we get
an exception during reboot, we simply spin waiting for the reset to complete.
If it's a true exception, BUG() so we can have our stack trace.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 25 |
1 files changed, 14 insertions, 11 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index fff8e23433d6..b80b4d141637 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <asm/io.h> | 30 | #include <asm/io.h> |
31 | #include <asm/desc.h> | 31 | #include <asm/desc.h> |
32 | 32 | ||
33 | #define __ex(x) __kvm_handle_fault_on_reboot(x) | ||
34 | |||
33 | MODULE_AUTHOR("Qumranet"); | 35 | MODULE_AUTHOR("Qumranet"); |
34 | MODULE_LICENSE("GPL"); | 36 | MODULE_LICENSE("GPL"); |
35 | 37 | ||
@@ -278,7 +280,7 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva) | |||
278 | u64 gva; | 280 | u64 gva; |
279 | } operand = { vpid, 0, gva }; | 281 | } operand = { vpid, 0, gva }; |
280 | 282 | ||
281 | asm volatile (ASM_VMX_INVVPID | 283 | asm volatile (__ex(ASM_VMX_INVVPID) |
282 | /* CF==1 or ZF==1 --> rc = -1 */ | 284 | /* CF==1 or ZF==1 --> rc = -1 */ |
283 | "; ja 1f ; ud2 ; 1:" | 285 | "; ja 1f ; ud2 ; 1:" |
284 | : : "a"(&operand), "c"(ext) : "cc", "memory"); | 286 | : : "a"(&operand), "c"(ext) : "cc", "memory"); |
@@ -290,7 +292,7 @@ static inline void __invept(int ext, u64 eptp, gpa_t gpa) | |||
290 | u64 eptp, gpa; | 292 | u64 eptp, gpa; |
291 | } operand = {eptp, gpa}; | 293 | } operand = {eptp, gpa}; |
292 | 294 | ||
293 | asm volatile (ASM_VMX_INVEPT | 295 | asm volatile (__ex(ASM_VMX_INVEPT) |
294 | /* CF==1 or ZF==1 --> rc = -1 */ | 296 | /* CF==1 or ZF==1 --> rc = -1 */ |
295 | "; ja 1f ; ud2 ; 1:\n" | 297 | "; ja 1f ; ud2 ; 1:\n" |
296 | : : "a" (&operand), "c" (ext) : "cc", "memory"); | 298 | : : "a" (&operand), "c" (ext) : "cc", "memory"); |
@@ -311,7 +313,7 @@ static void vmcs_clear(struct vmcs *vmcs) | |||
311 | u64 phys_addr = __pa(vmcs); | 313 | u64 phys_addr = __pa(vmcs); |
312 | u8 error; | 314 | u8 error; |
313 | 315 | ||
314 | asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0" | 316 | asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0" |
315 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) | 317 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) |
316 | : "cc", "memory"); | 318 | : "cc", "memory"); |
317 | if (error) | 319 | if (error) |
@@ -378,7 +380,7 @@ static unsigned long vmcs_readl(unsigned long field) | |||
378 | { | 380 | { |
379 | unsigned long value; | 381 | unsigned long value; |
380 | 382 | ||
381 | asm volatile (ASM_VMX_VMREAD_RDX_RAX | 383 | asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX) |
382 | : "=a"(value) : "d"(field) : "cc"); | 384 | : "=a"(value) : "d"(field) : "cc"); |
383 | return value; | 385 | return value; |
384 | } | 386 | } |
@@ -413,7 +415,7 @@ static void vmcs_writel(unsigned long field, unsigned long value) | |||
413 | { | 415 | { |
414 | u8 error; | 416 | u8 error; |
415 | 417 | ||
416 | asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0" | 418 | asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0" |
417 | : "=q"(error) : "a"(value), "d"(field) : "cc"); | 419 | : "=q"(error) : "a"(value), "d"(field) : "cc"); |
418 | if (unlikely(error)) | 420 | if (unlikely(error)) |
419 | vmwrite_error(field, value); | 421 | vmwrite_error(field, value); |
@@ -621,7 +623,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
621 | u8 error; | 623 | u8 error; |
622 | 624 | ||
623 | per_cpu(current_vmcs, cpu) = vmx->vmcs; | 625 | per_cpu(current_vmcs, cpu) = vmx->vmcs; |
624 | asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0" | 626 | asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" |
625 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) | 627 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) |
626 | : "cc"); | 628 | : "cc"); |
627 | if (error) | 629 | if (error) |
@@ -1030,13 +1032,14 @@ static void hardware_enable(void *garbage) | |||
1030 | MSR_IA32_FEATURE_CONTROL_LOCKED | | 1032 | MSR_IA32_FEATURE_CONTROL_LOCKED | |
1031 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED); | 1033 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED); |
1032 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ | 1034 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ |
1033 | asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr) | 1035 | asm volatile (ASM_VMX_VMXON_RAX |
1036 | : : "a"(&phys_addr), "m"(phys_addr) | ||
1034 | : "memory", "cc"); | 1037 | : "memory", "cc"); |
1035 | } | 1038 | } |
1036 | 1039 | ||
1037 | static void hardware_disable(void *garbage) | 1040 | static void hardware_disable(void *garbage) |
1038 | { | 1041 | { |
1039 | asm volatile (ASM_VMX_VMXOFF : : : "cc"); | 1042 | asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); |
1040 | write_cr4(read_cr4() & ~X86_CR4_VMXE); | 1043 | write_cr4(read_cr4() & ~X86_CR4_VMXE); |
1041 | } | 1044 | } |
1042 | 1045 | ||
@@ -2834,7 +2837,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2834 | "push %%edx; push %%ebp;" | 2837 | "push %%edx; push %%ebp;" |
2835 | "push %%ecx \n\t" | 2838 | "push %%ecx \n\t" |
2836 | #endif | 2839 | #endif |
2837 | ASM_VMX_VMWRITE_RSP_RDX "\n\t" | 2840 | __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" |
2838 | /* Check if vmlaunch of vmresume is needed */ | 2841 | /* Check if vmlaunch of vmresume is needed */ |
2839 | "cmpl $0, %c[launched](%0) \n\t" | 2842 | "cmpl $0, %c[launched](%0) \n\t" |
2840 | /* Load guest registers. Don't clobber flags. */ | 2843 | /* Load guest registers. Don't clobber flags. */ |
@@ -2869,9 +2872,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2869 | #endif | 2872 | #endif |
2870 | /* Enter guest mode */ | 2873 | /* Enter guest mode */ |
2871 | "jne .Llaunched \n\t" | 2874 | "jne .Llaunched \n\t" |
2872 | ASM_VMX_VMLAUNCH "\n\t" | 2875 | __ex(ASM_VMX_VMLAUNCH) "\n\t" |
2873 | "jmp .Lkvm_vmx_return \n\t" | 2876 | "jmp .Lkvm_vmx_return \n\t" |
2874 | ".Llaunched: " ASM_VMX_VMRESUME "\n\t" | 2877 | ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" |
2875 | ".Lkvm_vmx_return: " | 2878 | ".Lkvm_vmx_return: " |
2876 | /* Save guest registers, load host registers, keep flags */ | 2879 | /* Save guest registers, load host registers, keep flags */ |
2877 | #ifdef CONFIG_X86_64 | 2880 | #ifdef CONFIG_X86_64 |