aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c31
1 files changed, 24 insertions, 7 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index fd766214d9da..95f66136f2d5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -890,9 +890,9 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
890 * Returns 0 on success, non-0 otherwise. 890 * Returns 0 on success, non-0 otherwise.
891 * Assumes vcpu_load() was already called. 891 * Assumes vcpu_load() was already called.
892 */ 892 */
893int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 893int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
894{ 894{
895 return kvm_x86_ops->set_msr(vcpu, msr_index, data); 895 return kvm_x86_ops->set_msr(vcpu, msr);
896} 896}
897 897
898/* 898/*
@@ -900,7 +900,12 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
900 */ 900 */
901static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 901static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
902{ 902{
903 return kvm_set_msr(vcpu, index, *data); 903 struct msr_data msr;
904
905 msr.data = *data;
906 msr.index = index;
907 msr.host_initiated = true;
908 return kvm_set_msr(vcpu, &msr);
904} 909}
905 910
906#ifdef CONFIG_X86_64 911#ifdef CONFIG_X86_64
@@ -1130,13 +1135,14 @@ void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
1130#endif 1135#endif
1131} 1136}
1132 1137
1133void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) 1138void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
1134{ 1139{
1135 struct kvm *kvm = vcpu->kvm; 1140 struct kvm *kvm = vcpu->kvm;
1136 u64 offset, ns, elapsed; 1141 u64 offset, ns, elapsed;
1137 unsigned long flags; 1142 unsigned long flags;
1138 s64 usdiff; 1143 s64 usdiff;
1139 bool matched; 1144 bool matched;
1145 u64 data = msr->data;
1140 1146
1141 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 1147 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1142 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); 1148 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
@@ -1857,9 +1863,11 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
1857 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); 1863 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
1858} 1864}
1859 1865
1860int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1866int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1861{ 1867{
1862 bool pr = false; 1868 bool pr = false;
1869 u32 msr = msr_info->index;
1870 u64 data = msr_info->data;
1863 1871
1864 switch (msr) { 1872 switch (msr) {
1865 case MSR_EFER: 1873 case MSR_EFER:
@@ -4531,7 +4539,12 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
4531static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, 4539static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
4532 u32 msr_index, u64 data) 4540 u32 msr_index, u64 data)
4533{ 4541{
4534 return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data); 4542 struct msr_data msr;
4543
4544 msr.data = data;
4545 msr.index = msr_index;
4546 msr.host_initiated = false;
4547 return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
4535} 4548}
4536 4549
4537static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, 4550static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
@@ -6375,11 +6388,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
6375int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 6388int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
6376{ 6389{
6377 int r; 6390 int r;
6391 struct msr_data msr;
6378 6392
6379 r = vcpu_load(vcpu); 6393 r = vcpu_load(vcpu);
6380 if (r) 6394 if (r)
6381 return r; 6395 return r;
6382 kvm_write_tsc(vcpu, 0); 6396 msr.data = 0x0;
6397 msr.index = MSR_IA32_TSC;
6398 msr.host_initiated = true;
6399 kvm_write_tsc(vcpu, &msr);
6383 vcpu_put(vcpu); 6400 vcpu_put(vcpu);
6384 6401
6385 return r; 6402 return r;