diff options
-rw-r--r-- | arch/x86/kvm/vmx.c | 24 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 27 |
2 files changed, 22 insertions, 29 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b80b4d141637..4d179d106376 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -55,6 +55,7 @@ struct vmcs { | |||
55 | 55 | ||
56 | struct vcpu_vmx { | 56 | struct vcpu_vmx { |
57 | struct kvm_vcpu vcpu; | 57 | struct kvm_vcpu vcpu; |
58 | struct list_head local_vcpus_link; | ||
58 | int launched; | 59 | int launched; |
59 | u8 fail; | 60 | u8 fail; |
60 | u32 idt_vectoring_info; | 61 | u32 idt_vectoring_info; |
@@ -93,6 +94,7 @@ static int init_rmode(struct kvm *kvm); | |||
93 | 94 | ||
94 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); | 95 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); |
95 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); | 96 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); |
97 | static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu); | ||
96 | 98 | ||
97 | static struct page *vmx_io_bitmap_a; | 99 | static struct page *vmx_io_bitmap_a; |
98 | static struct page *vmx_io_bitmap_b; | 100 | static struct page *vmx_io_bitmap_b; |
@@ -331,6 +333,9 @@ static void __vcpu_clear(void *arg) | |||
331 | if (per_cpu(current_vmcs, cpu) == vmx->vmcs) | 333 | if (per_cpu(current_vmcs, cpu) == vmx->vmcs) |
332 | per_cpu(current_vmcs, cpu) = NULL; | 334 | per_cpu(current_vmcs, cpu) = NULL; |
333 | rdtscll(vmx->vcpu.arch.host_tsc); | 335 | rdtscll(vmx->vcpu.arch.host_tsc); |
336 | list_del(&vmx->local_vcpus_link); | ||
337 | vmx->vcpu.cpu = -1; | ||
338 | vmx->launched = 0; | ||
334 | } | 339 | } |
335 | 340 | ||
336 | static void vcpu_clear(struct vcpu_vmx *vmx) | 341 | static void vcpu_clear(struct vcpu_vmx *vmx) |
@@ -338,7 +343,6 @@ static void vcpu_clear(struct vcpu_vmx *vmx) | |||
338 | if (vmx->vcpu.cpu == -1) | 343 | if (vmx->vcpu.cpu == -1) |
339 | return; | 344 | return; |
340 | smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); | 345 | smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); |
341 | vmx->launched = 0; | ||
342 | } | 346 | } |
343 | 347 | ||
344 | static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx) | 348 | static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx) |
@@ -617,6 +621,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
617 | vcpu_clear(vmx); | 621 | vcpu_clear(vmx); |
618 | kvm_migrate_timers(vcpu); | 622 | kvm_migrate_timers(vcpu); |
619 | vpid_sync_vcpu_all(vmx); | 623 | vpid_sync_vcpu_all(vmx); |
624 | local_irq_disable(); | ||
625 | list_add(&vmx->local_vcpus_link, | ||
626 | &per_cpu(vcpus_on_cpu, cpu)); | ||
627 | local_irq_enable(); | ||
620 | } | 628 | } |
621 | 629 | ||
622 | if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { | 630 | if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { |
@@ -1022,6 +1030,7 @@ static void hardware_enable(void *garbage) | |||
1022 | u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); | 1030 | u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); |
1023 | u64 old; | 1031 | u64 old; |
1024 | 1032 | ||
1033 | INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); | ||
1025 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); | 1034 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); |
1026 | if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED | | 1035 | if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED | |
1027 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) | 1036 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) |
@@ -1037,8 +1046,19 @@ static void hardware_enable(void *garbage) | |||
1037 | : "memory", "cc"); | 1046 | : "memory", "cc"); |
1038 | } | 1047 | } |
1039 | 1048 | ||
1049 | static void vmclear_local_vcpus(void) | ||
1050 | { | ||
1051 | int cpu = raw_smp_processor_id(); | ||
1052 | struct vcpu_vmx *vmx, *n; | ||
1053 | |||
1054 | list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu), | ||
1055 | local_vcpus_link) | ||
1056 | __vcpu_clear(vmx); | ||
1057 | } | ||
1058 | |||
1040 | static void hardware_disable(void *garbage) | 1059 | static void hardware_disable(void *garbage) |
1041 | { | 1060 | { |
1061 | vmclear_local_vcpus(); | ||
1042 | asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); | 1062 | asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); |
1043 | write_cr4(read_cr4() & ~X86_CR4_VMXE); | 1063 | write_cr4(read_cr4() & ~X86_CR4_VMXE); |
1044 | } | 1064 | } |
@@ -2967,7 +2987,7 @@ static void vmx_free_vmcs(struct kvm_vcpu *vcpu) | |||
2967 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2987 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
2968 | 2988 | ||
2969 | if (vmx->vmcs) { | 2989 | if (vmx->vmcs) { |
2970 | on_each_cpu(__vcpu_clear, vmx, 1); | 2990 | vcpu_clear(vmx); |
2971 | free_vmcs(vmx->vmcs); | 2991 | free_vmcs(vmx->vmcs); |
2972 | vmx->vmcs = NULL; | 2992 | vmx->vmcs = NULL; |
2973 | } | 2993 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 59084a3981c0..8c14ddcaba70 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -823,33 +823,6 @@ out: | |||
823 | */ | 823 | */ |
824 | void decache_vcpus_on_cpu(int cpu) | 824 | void decache_vcpus_on_cpu(int cpu) |
825 | { | 825 | { |
826 | struct kvm *vm; | ||
827 | struct kvm_vcpu *vcpu; | ||
828 | int i; | ||
829 | |||
830 | spin_lock(&kvm_lock); | ||
831 | list_for_each_entry(vm, &vm_list, vm_list) | ||
832 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
833 | vcpu = vm->vcpus[i]; | ||
834 | if (!vcpu) | ||
835 | continue; | ||
836 | /* | ||
837 | * If the vcpu is locked, then it is running on some | ||
838 | * other cpu and therefore it is not cached on the | ||
839 | * cpu in question. | ||
840 | * | ||
841 | * If it's not locked, check the last cpu it executed | ||
842 | * on. | ||
843 | */ | ||
844 | if (mutex_trylock(&vcpu->mutex)) { | ||
845 | if (vcpu->cpu == cpu) { | ||
846 | kvm_x86_ops->vcpu_decache(vcpu); | ||
847 | vcpu->cpu = -1; | ||
848 | } | ||
849 | mutex_unlock(&vcpu->mutex); | ||
850 | } | ||
851 | } | ||
852 | spin_unlock(&kvm_lock); | ||
853 | } | 826 | } |
854 | 827 | ||
855 | int kvm_dev_ioctl_check_extension(long ext) | 828 | int kvm_dev_ioctl_check_extension(long ext) |