aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSheng Yang <sheng@linux.intel.com>2010-06-30 00:25:15 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:47:21 -0400
commitf5f48ee15c2ee3e44cf429e34b16c6fa9b900246 (patch)
tree75496197219d9aeedd3317fa007cc3b2e414c5da /arch
parentcf3e3d3e19868ca01da163200bbfc687523df0fc (diff)
KVM: VMX: Execute WBINVD to keep data consistency with assigned devices
Some guest device driver may leverage the "Non-Snoop" I/O, and explicitly WBINVD or CLFLUSH to a RAM space. Since migration may occur before WBINVD or CLFLUSH, we need to maintain data consistency either by: 1: flushing cache (wbinvd) when the guest is scheduled out if there is no wbinvd exit, or 2: execute wbinvd on all dirty physical CPUs when guest wbinvd exits. Signed-off-by: Yaozu (Eddie) Dong <eddie.dong@intel.com> Signed-off-by: Sheng Yang <sheng@linux.intel.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h6
-rw-r--r--arch/x86/kvm/emulate.c5
-rw-r--r--arch/x86/kvm/svm.c7
-rw-r--r--arch/x86/kvm/vmx.c10
-rw-r--r--arch/x86/kvm/x86.c41
5 files changed, 67 insertions, 2 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a57cdeacc4d2..2bda62485c4c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -15,6 +15,7 @@
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/mmu_notifier.h> 16#include <linux/mmu_notifier.h>
17#include <linux/tracepoint.h> 17#include <linux/tracepoint.h>
18#include <linux/cpumask.h>
18 19
19#include <linux/kvm.h> 20#include <linux/kvm.h>
20#include <linux/kvm_para.h> 21#include <linux/kvm_para.h>
@@ -358,6 +359,8 @@ struct kvm_vcpu_arch {
358 359
359 /* fields used by HYPER-V emulation */ 360 /* fields used by HYPER-V emulation */
360 u64 hv_vapic; 361 u64 hv_vapic;
362
363 cpumask_var_t wbinvd_dirty_mask;
361}; 364};
362 365
363struct kvm_arch { 366struct kvm_arch {
@@ -514,6 +517,8 @@ struct kvm_x86_ops {
514 517
515 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); 518 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
516 519
520 bool (*has_wbinvd_exit)(void);
521
517 const struct trace_print_flags *exit_reasons_str; 522 const struct trace_print_flags *exit_reasons_str;
518}; 523};
519 524
@@ -571,6 +576,7 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
571int kvm_emulate_halt(struct kvm_vcpu *vcpu); 576int kvm_emulate_halt(struct kvm_vcpu *vcpu);
572int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); 577int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
573int emulate_clts(struct kvm_vcpu *vcpu); 578int emulate_clts(struct kvm_vcpu *vcpu);
579int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
574 580
575void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 581void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
576int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); 582int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index abb8cec420a2..e8bdddc4509e 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3138,8 +3138,11 @@ twobyte_insn:
3138 emulate_clts(ctxt->vcpu); 3138 emulate_clts(ctxt->vcpu);
3139 c->dst.type = OP_NONE; 3139 c->dst.type = OP_NONE;
3140 break; 3140 break;
3141 case 0x08: /* invd */
3142 case 0x09: /* wbinvd */ 3141 case 0x09: /* wbinvd */
3142 kvm_emulate_wbinvd(ctxt->vcpu);
3143 c->dst.type = OP_NONE;
3144 break;
3145 case 0x08: /* invd */
3143 case 0x0d: /* GrpP (prefetch) */ 3146 case 0x0d: /* GrpP (prefetch) */
3144 case 0x18: /* Grp16 (prefetch/nop) */ 3147 case 0x18: /* Grp16 (prefetch/nop) */
3145 c->dst.type = OP_NONE; 3148 c->dst.type = OP_NONE;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 587b99d37d44..56c9b6bd7655 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3424,6 +3424,11 @@ static bool svm_rdtscp_supported(void)
3424 return false; 3424 return false;
3425} 3425}
3426 3426
3427static bool svm_has_wbinvd_exit(void)
3428{
3429 return true;
3430}
3431
3427static void svm_fpu_deactivate(struct kvm_vcpu *vcpu) 3432static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
3428{ 3433{
3429 struct vcpu_svm *svm = to_svm(vcpu); 3434 struct vcpu_svm *svm = to_svm(vcpu);
@@ -3508,6 +3513,8 @@ static struct kvm_x86_ops svm_x86_ops = {
3508 .rdtscp_supported = svm_rdtscp_supported, 3513 .rdtscp_supported = svm_rdtscp_supported,
3509 3514
3510 .set_supported_cpuid = svm_set_supported_cpuid, 3515 .set_supported_cpuid = svm_set_supported_cpuid,
3516
3517 .has_wbinvd_exit = svm_has_wbinvd_exit,
3511}; 3518};
3512 3519
3513static int __init svm_init(void) 3520static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 661c6e199b4a..4dfb1dc09c88 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -412,6 +412,12 @@ static inline bool cpu_has_virtual_nmis(void)
412 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; 412 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
413} 413}
414 414
415static inline bool cpu_has_vmx_wbinvd_exit(void)
416{
417 return vmcs_config.cpu_based_2nd_exec_ctrl &
418 SECONDARY_EXEC_WBINVD_EXITING;
419}
420
415static inline bool report_flexpriority(void) 421static inline bool report_flexpriority(void)
416{ 422{
417 return flexpriority_enabled; 423 return flexpriority_enabled;
@@ -3397,7 +3403,7 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
3397static int handle_wbinvd(struct kvm_vcpu *vcpu) 3403static int handle_wbinvd(struct kvm_vcpu *vcpu)
3398{ 3404{
3399 skip_emulated_instruction(vcpu); 3405 skip_emulated_instruction(vcpu);
3400 /* TODO: Add support for VT-d/pass-through device */ 3406 kvm_emulate_wbinvd(vcpu);
3401 return 1; 3407 return 1;
3402} 3408}
3403 3409
@@ -4347,6 +4353,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
4347 .rdtscp_supported = vmx_rdtscp_supported, 4353 .rdtscp_supported = vmx_rdtscp_supported,
4348 4354
4349 .set_supported_cpuid = vmx_set_supported_cpuid, 4355 .set_supported_cpuid = vmx_set_supported_cpuid,
4356
4357 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
4350}; 4358};
4351 4359
4352static int __init vmx_init(void) 4360static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 27322d341232..3d72fc067059 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1783,8 +1783,28 @@ out:
1783 return r; 1783 return r;
1784} 1784}
1785 1785
1786static void wbinvd_ipi(void *garbage)
1787{
1788 wbinvd();
1789}
1790
1791static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
1792{
1793 return vcpu->kvm->arch.iommu_domain &&
1794 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
1795}
1796
1786void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1797void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1787{ 1798{
1799 /* Address WBINVD may be executed by guest */
1800 if (need_emulate_wbinvd(vcpu)) {
1801 if (kvm_x86_ops->has_wbinvd_exit())
1802 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
1803 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
1804 smp_call_function_single(vcpu->cpu,
1805 wbinvd_ipi, NULL, 1);
1806 }
1807
1788 kvm_x86_ops->vcpu_load(vcpu, cpu); 1808 kvm_x86_ops->vcpu_load(vcpu, cpu);
1789 if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) { 1809 if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
1790 unsigned long khz = cpufreq_quick_get(cpu); 1810 unsigned long khz = cpufreq_quick_get(cpu);
@@ -3660,6 +3680,21 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
3660 return X86EMUL_CONTINUE; 3680 return X86EMUL_CONTINUE;
3661} 3681}
3662 3682
3683int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
3684{
3685 if (!need_emulate_wbinvd(vcpu))
3686 return X86EMUL_CONTINUE;
3687
3688 if (kvm_x86_ops->has_wbinvd_exit()) {
3689 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
3690 wbinvd_ipi, NULL, 1);
3691 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
3692 }
3693 wbinvd();
3694 return X86EMUL_CONTINUE;
3695}
3696EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
3697
3663int emulate_clts(struct kvm_vcpu *vcpu) 3698int emulate_clts(struct kvm_vcpu *vcpu)
3664{ 3699{
3665 kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); 3700 kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
@@ -5263,6 +5298,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
5263 vcpu->arch.time_page = NULL; 5298 vcpu->arch.time_page = NULL;
5264 } 5299 }
5265 5300
5301 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
5266 fx_free(vcpu); 5302 fx_free(vcpu);
5267 kvm_x86_ops->vcpu_free(vcpu); 5303 kvm_x86_ops->vcpu_free(vcpu);
5268} 5304}
@@ -5392,7 +5428,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5392 } 5428 }
5393 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; 5429 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
5394 5430
5431 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
5432 goto fail_free_mce_banks;
5433
5395 return 0; 5434 return 0;
5435fail_free_mce_banks:
5436 kfree(vcpu->arch.mce_banks);
5396fail_free_lapic: 5437fail_free_lapic:
5397 kvm_free_lapic(vcpu); 5438 kvm_free_lapic(vcpu);
5398fail_mmu_destroy: 5439fail_mmu_destroy: