diff options
| -rw-r--r-- | arch/x86/kvm/vmx.c | 67 |
1 files changed, 67 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 94833e2fe78c..1a30fd5c3fb2 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -42,6 +42,7 @@ | |||
| 42 | #include <asm/i387.h> | 42 | #include <asm/i387.h> |
| 43 | #include <asm/xcr.h> | 43 | #include <asm/xcr.h> |
| 44 | #include <asm/perf_event.h> | 44 | #include <asm/perf_event.h> |
| 45 | #include <asm/kexec.h> | ||
| 45 | 46 | ||
| 46 | #include "trace.h" | 47 | #include "trace.h" |
| 47 | 48 | ||
| @@ -987,6 +988,46 @@ static void vmcs_load(struct vmcs *vmcs) | |||
| 987 | vmcs, phys_addr); | 988 | vmcs, phys_addr); |
| 988 | } | 989 | } |
| 989 | 990 | ||
| 991 | #ifdef CONFIG_KEXEC | ||
| 992 | /* | ||
| 993 | * This bitmap is used to indicate whether the vmclear | ||
| 994 | * operation is enabled on all cpus. All disabled by | ||
| 995 | * default. | ||
| 996 | */ | ||
| 997 | static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; | ||
| 998 | |||
| 999 | static inline void crash_enable_local_vmclear(int cpu) | ||
| 1000 | { | ||
| 1001 | cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); | ||
| 1002 | } | ||
| 1003 | |||
| 1004 | static inline void crash_disable_local_vmclear(int cpu) | ||
| 1005 | { | ||
| 1006 | cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); | ||
| 1007 | } | ||
| 1008 | |||
| 1009 | static inline int crash_local_vmclear_enabled(int cpu) | ||
| 1010 | { | ||
| 1011 | return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | static void crash_vmclear_local_loaded_vmcss(void) | ||
| 1015 | { | ||
| 1016 | int cpu = raw_smp_processor_id(); | ||
| 1017 | struct loaded_vmcs *v; | ||
| 1018 | |||
| 1019 | if (!crash_local_vmclear_enabled(cpu)) | ||
| 1020 | return; | ||
| 1021 | |||
| 1022 | list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), | ||
| 1023 | loaded_vmcss_on_cpu_link) | ||
| 1024 | vmcs_clear(v->vmcs); | ||
| 1025 | } | ||
| 1026 | #else | ||
| 1027 | static inline void crash_enable_local_vmclear(int cpu) { } | ||
| 1028 | static inline void crash_disable_local_vmclear(int cpu) { } | ||
| 1029 | #endif /* CONFIG_KEXEC */ | ||
| 1030 | |||
| 990 | static void __loaded_vmcs_clear(void *arg) | 1031 | static void __loaded_vmcs_clear(void *arg) |
| 991 | { | 1032 | { |
| 992 | struct loaded_vmcs *loaded_vmcs = arg; | 1033 | struct loaded_vmcs *loaded_vmcs = arg; |
| @@ -996,6 +1037,7 @@ static void __loaded_vmcs_clear(void *arg) | |||
| 996 | return; /* vcpu migration can race with cpu offline */ | 1037 | return; /* vcpu migration can race with cpu offline */ |
| 997 | if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) | 1038 | if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) |
| 998 | per_cpu(current_vmcs, cpu) = NULL; | 1039 | per_cpu(current_vmcs, cpu) = NULL; |
| 1040 | crash_disable_local_vmclear(cpu); | ||
| 999 | list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); | 1041 | list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); |
| 1000 | 1042 | ||
| 1001 | /* | 1043 | /* |
| @@ -1007,6 +1049,7 @@ static void __loaded_vmcs_clear(void *arg) | |||
| 1007 | smp_wmb(); | 1049 | smp_wmb(); |
| 1008 | 1050 | ||
| 1009 | loaded_vmcs_init(loaded_vmcs); | 1051 | loaded_vmcs_init(loaded_vmcs); |
| 1052 | crash_enable_local_vmclear(cpu); | ||
| 1010 | } | 1053 | } |
| 1011 | 1054 | ||
| 1012 | static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) | 1055 | static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) |
| @@ -1530,6 +1573,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 1530 | 1573 | ||
| 1531 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); | 1574 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
| 1532 | local_irq_disable(); | 1575 | local_irq_disable(); |
| 1576 | crash_disable_local_vmclear(cpu); | ||
| 1533 | 1577 | ||
| 1534 | /* | 1578 | /* |
| 1535 | * Read loaded_vmcs->cpu should be before fetching | 1579 | * Read loaded_vmcs->cpu should be before fetching |
| @@ -1540,6 +1584,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 1540 | 1584 | ||
| 1541 | list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, | 1585 | list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, |
| 1542 | &per_cpu(loaded_vmcss_on_cpu, cpu)); | 1586 | &per_cpu(loaded_vmcss_on_cpu, cpu)); |
| 1587 | crash_enable_local_vmclear(cpu); | ||
| 1543 | local_irq_enable(); | 1588 | local_irq_enable(); |
| 1544 | 1589 | ||
| 1545 | /* | 1590 | /* |
| @@ -2353,6 +2398,18 @@ static int hardware_enable(void *garbage) | |||
| 2353 | return -EBUSY; | 2398 | return -EBUSY; |
| 2354 | 2399 | ||
| 2355 | INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); | 2400 | INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); |
| 2401 | |||
| 2402 | /* | ||
| 2403 | * Now we can enable the vmclear operation in kdump | ||
| 2404 | * since the loaded_vmcss_on_cpu list on this cpu | ||
| 2405 | * has been initialized. | ||
| 2406 | * | ||
| 2407 | * Though the cpu is not in VMX operation now, there | ||
| 2408 | * is no problem to enable the vmclear operation | ||
| 2409 | * for the loaded_vmcss_on_cpu list is empty! | ||
| 2410 | */ | ||
| 2411 | crash_enable_local_vmclear(cpu); | ||
| 2412 | |||
| 2356 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); | 2413 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); |
| 2357 | 2414 | ||
| 2358 | test_bits = FEATURE_CONTROL_LOCKED; | 2415 | test_bits = FEATURE_CONTROL_LOCKED; |
| @@ -7383,6 +7440,11 @@ static int __init vmx_init(void) | |||
| 7383 | if (r) | 7440 | if (r) |
| 7384 | goto out3; | 7441 | goto out3; |
| 7385 | 7442 | ||
| 7443 | #ifdef CONFIG_KEXEC | ||
| 7444 | rcu_assign_pointer(crash_vmclear_loaded_vmcss, | ||
| 7445 | crash_vmclear_local_loaded_vmcss); | ||
| 7446 | #endif | ||
| 7447 | |||
| 7386 | vmx_disable_intercept_for_msr(MSR_FS_BASE, false); | 7448 | vmx_disable_intercept_for_msr(MSR_FS_BASE, false); |
| 7387 | vmx_disable_intercept_for_msr(MSR_GS_BASE, false); | 7449 | vmx_disable_intercept_for_msr(MSR_GS_BASE, false); |
| 7388 | vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); | 7450 | vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); |
| @@ -7420,6 +7482,11 @@ static void __exit vmx_exit(void) | |||
| 7420 | free_page((unsigned long)vmx_io_bitmap_b); | 7482 | free_page((unsigned long)vmx_io_bitmap_b); |
| 7421 | free_page((unsigned long)vmx_io_bitmap_a); | 7483 | free_page((unsigned long)vmx_io_bitmap_a); |
| 7422 | 7484 | ||
| 7485 | #ifdef CONFIG_KEXEC | ||
| 7486 | rcu_assign_pointer(crash_vmclear_loaded_vmcss, NULL); | ||
| 7487 | synchronize_rcu(); | ||
| 7488 | #endif | ||
| 7489 | |||
| 7423 | kvm_exit(); | 7490 | kvm_exit(); |
| 7424 | } | 7491 | } |
| 7425 | 7492 | ||
