diff options
| author | Will Auld <will.auld.intel@gmail.com> | 2012-11-29 15:42:12 -0500 |
|---|---|---|
| committer | Marcelo Tosatti <mtosatti@redhat.com> | 2012-11-30 15:26:12 -0500 |
| commit | 8fe8ab46be06fcd9abfe6fe9928fd95b54ab079a (patch) | |
| tree | 38c355d8451076da9259f45db598763f91aaf941 | |
| parent | 5419369ed6bd4cf711fdda5e52a5999b940413f5 (diff) | |
KVM: x86: Add code to track call origin for msr assignment
In order to track who initiated the call (host or guest) to modify an msr
value I have changed function call parameters along the call path. The
specific change is to add a struct pointer parameter that points to (index,
data, caller) information rather than having this information passed as
individual parameters.
The initial use for this capability is for updating the IA32_TSC_ADJUST msr
while setting the tsc value. It is anticipated that this capability is
useful for other tasks.
Signed-off-by: Will Auld <will.auld@intel.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
| -rw-r--r-- | arch/x86/include/asm/kvm_host.h | 12 | ||||
| -rw-r--r-- | arch/x86/kvm/svm.c | 14 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx.c | 18 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 31 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.h | 2 |
5 files changed, 56 insertions, 21 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9fb6d8da7a43..56c5dca9d78d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -620,6 +620,12 @@ struct kvm_vcpu_stat { | |||
| 620 | 620 | ||
| 621 | struct x86_instruction_info; | 621 | struct x86_instruction_info; |
| 622 | 622 | ||
| 623 | struct msr_data { | ||
| 624 | bool host_initiated; | ||
| 625 | u32 index; | ||
| 626 | u64 data; | ||
| 627 | }; | ||
| 628 | |||
| 623 | struct kvm_x86_ops { | 629 | struct kvm_x86_ops { |
| 624 | int (*cpu_has_kvm_support)(void); /* __init */ | 630 | int (*cpu_has_kvm_support)(void); /* __init */ |
| 625 | int (*disabled_by_bios)(void); /* __init */ | 631 | int (*disabled_by_bios)(void); /* __init */ |
| @@ -642,7 +648,7 @@ struct kvm_x86_ops { | |||
| 642 | 648 | ||
| 643 | void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu); | 649 | void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu); |
| 644 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); | 650 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); |
| 645 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | 651 | int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); |
| 646 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); | 652 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); |
| 647 | void (*get_segment)(struct kvm_vcpu *vcpu, | 653 | void (*get_segment)(struct kvm_vcpu *vcpu, |
| 648 | struct kvm_segment *var, int seg); | 654 | struct kvm_segment *var, int seg); |
| @@ -793,7 +799,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu, | |||
| 793 | 799 | ||
| 794 | void kvm_enable_efer_bits(u64); | 800 | void kvm_enable_efer_bits(u64); |
| 795 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); | 801 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); |
| 796 | int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | 802 | int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); |
| 797 | 803 | ||
| 798 | struct x86_emulate_ctxt; | 804 | struct x86_emulate_ctxt; |
| 799 | 805 | ||
| @@ -820,7 +826,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); | |||
| 820 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); | 826 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); |
| 821 | 827 | ||
| 822 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | 828 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); |
| 823 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); | 829 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); |
| 824 | 830 | ||
| 825 | unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); | 831 | unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); |
| 826 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | 832 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 161a5fa66d82..fc22e58d23b7 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -3127,13 +3127,15 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) | |||
| 3127 | return 0; | 3127 | return 0; |
| 3128 | } | 3128 | } |
| 3129 | 3129 | ||
| 3130 | static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | 3130 | static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) |
| 3131 | { | 3131 | { |
| 3132 | struct vcpu_svm *svm = to_svm(vcpu); | 3132 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3133 | 3133 | ||
| 3134 | u32 ecx = msr->index; | ||
| 3135 | u64 data = msr->data; | ||
| 3134 | switch (ecx) { | 3136 | switch (ecx) { |
| 3135 | case MSR_IA32_TSC: | 3137 | case MSR_IA32_TSC: |
| 3136 | kvm_write_tsc(vcpu, data); | 3138 | kvm_write_tsc(vcpu, msr); |
| 3137 | break; | 3139 | break; |
| 3138 | case MSR_STAR: | 3140 | case MSR_STAR: |
| 3139 | svm->vmcb->save.star = data; | 3141 | svm->vmcb->save.star = data; |
| @@ -3188,20 +3190,24 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |||
| 3188 | vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); | 3190 | vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); |
| 3189 | break; | 3191 | break; |
| 3190 | default: | 3192 | default: |
| 3191 | return kvm_set_msr_common(vcpu, ecx, data); | 3193 | return kvm_set_msr_common(vcpu, msr); |
| 3192 | } | 3194 | } |
| 3193 | return 0; | 3195 | return 0; |
| 3194 | } | 3196 | } |
| 3195 | 3197 | ||
| 3196 | static int wrmsr_interception(struct vcpu_svm *svm) | 3198 | static int wrmsr_interception(struct vcpu_svm *svm) |
| 3197 | { | 3199 | { |
| 3200 | struct msr_data msr; | ||
| 3198 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; | 3201 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; |
| 3199 | u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) | 3202 | u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) |
| 3200 | | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); | 3203 | | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); |
| 3201 | 3204 | ||
| 3205 | msr.data = data; | ||
| 3206 | msr.index = ecx; | ||
| 3207 | msr.host_initiated = false; | ||
| 3202 | 3208 | ||
| 3203 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; | 3209 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; |
| 3204 | if (svm_set_msr(&svm->vcpu, ecx, data)) { | 3210 | if (svm_set_msr(&svm->vcpu, &msr)) { |
| 3205 | trace_kvm_msr_write_ex(ecx, data); | 3211 | trace_kvm_msr_write_ex(ecx, data); |
| 3206 | kvm_inject_gp(&svm->vcpu, 0); | 3212 | kvm_inject_gp(&svm->vcpu, 0); |
| 3207 | } else { | 3213 | } else { |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 3606154bbc8b..45ffa32352f1 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -2220,15 +2220,17 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
| 2220 | * Returns 0 on success, non-0 otherwise. | 2220 | * Returns 0 on success, non-0 otherwise. |
| 2221 | * Assumes vcpu_load() was already called. | 2221 | * Assumes vcpu_load() was already called. |
| 2222 | */ | 2222 | */ |
| 2223 | static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | 2223 | static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
| 2224 | { | 2224 | { |
| 2225 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2225 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 2226 | struct shared_msr_entry *msr; | 2226 | struct shared_msr_entry *msr; |
| 2227 | int ret = 0; | 2227 | int ret = 0; |
| 2228 | u32 msr_index = msr_info->index; | ||
| 2229 | u64 data = msr_info->data; | ||
| 2228 | 2230 | ||
| 2229 | switch (msr_index) { | 2231 | switch (msr_index) { |
| 2230 | case MSR_EFER: | 2232 | case MSR_EFER: |
| 2231 | ret = kvm_set_msr_common(vcpu, msr_index, data); | 2233 | ret = kvm_set_msr_common(vcpu, msr_info); |
| 2232 | break; | 2234 | break; |
| 2233 | #ifdef CONFIG_X86_64 | 2235 | #ifdef CONFIG_X86_64 |
| 2234 | case MSR_FS_BASE: | 2236 | case MSR_FS_BASE: |
| @@ -2254,7 +2256,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
| 2254 | vmcs_writel(GUEST_SYSENTER_ESP, data); | 2256 | vmcs_writel(GUEST_SYSENTER_ESP, data); |
| 2255 | break; | 2257 | break; |
| 2256 | case MSR_IA32_TSC: | 2258 | case MSR_IA32_TSC: |
| 2257 | kvm_write_tsc(vcpu, data); | 2259 | kvm_write_tsc(vcpu, msr_info); |
| 2258 | break; | 2260 | break; |
| 2259 | case MSR_IA32_CR_PAT: | 2261 | case MSR_IA32_CR_PAT: |
| 2260 | if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { | 2262 | if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { |
| @@ -2262,7 +2264,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
| 2262 | vcpu->arch.pat = data; | 2264 | vcpu->arch.pat = data; |
| 2263 | break; | 2265 | break; |
| 2264 | } | 2266 | } |
| 2265 | ret = kvm_set_msr_common(vcpu, msr_index, data); | 2267 | ret = kvm_set_msr_common(vcpu, msr_info); |
| 2266 | break; | 2268 | break; |
| 2267 | case MSR_TSC_AUX: | 2269 | case MSR_TSC_AUX: |
| 2268 | if (!vmx->rdtscp_enabled) | 2270 | if (!vmx->rdtscp_enabled) |
| @@ -2285,7 +2287,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
| 2285 | } | 2287 | } |
| 2286 | break; | 2288 | break; |
| 2287 | } | 2289 | } |
| 2288 | ret = kvm_set_msr_common(vcpu, msr_index, data); | 2290 | ret = kvm_set_msr_common(vcpu, msr_info); |
| 2289 | } | 2291 | } |
| 2290 | 2292 | ||
| 2291 | return ret; | 2293 | return ret; |
| @@ -4648,11 +4650,15 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu) | |||
| 4648 | 4650 | ||
| 4649 | static int handle_wrmsr(struct kvm_vcpu *vcpu) | 4651 | static int handle_wrmsr(struct kvm_vcpu *vcpu) |
| 4650 | { | 4652 | { |
| 4653 | struct msr_data msr; | ||
| 4651 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; | 4654 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; |
| 4652 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | 4655 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) |
| 4653 | | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); | 4656 | | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); |
| 4654 | 4657 | ||
| 4655 | if (vmx_set_msr(vcpu, ecx, data) != 0) { | 4658 | msr.data = data; |
| 4659 | msr.index = ecx; | ||
| 4660 | msr.host_initiated = false; | ||
| 4661 | if (vmx_set_msr(vcpu, &msr) != 0) { | ||
| 4656 | trace_kvm_msr_write_ex(ecx, data); | 4662 | trace_kvm_msr_write_ex(ecx, data); |
| 4657 | kvm_inject_gp(vcpu, 0); | 4663 | kvm_inject_gp(vcpu, 0); |
| 4658 | return 1; | 4664 | return 1; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index fd766214d9da..95f66136f2d5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -890,9 +890,9 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); | |||
| 890 | * Returns 0 on success, non-0 otherwise. | 890 | * Returns 0 on success, non-0 otherwise. |
| 891 | * Assumes vcpu_load() was already called. | 891 | * Assumes vcpu_load() was already called. |
| 892 | */ | 892 | */ |
| 893 | int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | 893 | int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) |
| 894 | { | 894 | { |
| 895 | return kvm_x86_ops->set_msr(vcpu, msr_index, data); | 895 | return kvm_x86_ops->set_msr(vcpu, msr); |
| 896 | } | 896 | } |
| 897 | 897 | ||
| 898 | /* | 898 | /* |
| @@ -900,7 +900,12 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
| 900 | */ | 900 | */ |
| 901 | static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) | 901 | static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) |
| 902 | { | 902 | { |
| 903 | return kvm_set_msr(vcpu, index, *data); | 903 | struct msr_data msr; |
| 904 | |||
| 905 | msr.data = *data; | ||
| 906 | msr.index = index; | ||
| 907 | msr.host_initiated = true; | ||
| 908 | return kvm_set_msr(vcpu, &msr); | ||
| 904 | } | 909 | } |
| 905 | 910 | ||
| 906 | #ifdef CONFIG_X86_64 | 911 | #ifdef CONFIG_X86_64 |
| @@ -1130,13 +1135,14 @@ void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) | |||
| 1130 | #endif | 1135 | #endif |
| 1131 | } | 1136 | } |
| 1132 | 1137 | ||
| 1133 | void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) | 1138 | void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) |
| 1134 | { | 1139 | { |
| 1135 | struct kvm *kvm = vcpu->kvm; | 1140 | struct kvm *kvm = vcpu->kvm; |
| 1136 | u64 offset, ns, elapsed; | 1141 | u64 offset, ns, elapsed; |
| 1137 | unsigned long flags; | 1142 | unsigned long flags; |
| 1138 | s64 usdiff; | 1143 | s64 usdiff; |
| 1139 | bool matched; | 1144 | bool matched; |
| 1145 | u64 data = msr->data; | ||
| 1140 | 1146 | ||
| 1141 | raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); | 1147 | raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); |
| 1142 | offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); | 1148 | offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); |
| @@ -1857,9 +1863,11 @@ static void record_steal_time(struct kvm_vcpu *vcpu) | |||
| 1857 | &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); | 1863 | &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); |
| 1858 | } | 1864 | } |
| 1859 | 1865 | ||
| 1860 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | 1866 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
| 1861 | { | 1867 | { |
| 1862 | bool pr = false; | 1868 | bool pr = false; |
| 1869 | u32 msr = msr_info->index; | ||
| 1870 | u64 data = msr_info->data; | ||
| 1863 | 1871 | ||
| 1864 | switch (msr) { | 1872 | switch (msr) { |
| 1865 | case MSR_EFER: | 1873 | case MSR_EFER: |
| @@ -4531,7 +4539,12 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, | |||
| 4531 | static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, | 4539 | static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, |
| 4532 | u32 msr_index, u64 data) | 4540 | u32 msr_index, u64 data) |
| 4533 | { | 4541 | { |
| 4534 | return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data); | 4542 | struct msr_data msr; |
| 4543 | |||
| 4544 | msr.data = data; | ||
| 4545 | msr.index = msr_index; | ||
| 4546 | msr.host_initiated = false; | ||
| 4547 | return kvm_set_msr(emul_to_vcpu(ctxt), &msr); | ||
| 4535 | } | 4548 | } |
| 4536 | 4549 | ||
| 4537 | static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, | 4550 | static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, |
| @@ -6375,11 +6388,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
| 6375 | int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | 6388 | int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
| 6376 | { | 6389 | { |
| 6377 | int r; | 6390 | int r; |
| 6391 | struct msr_data msr; | ||
| 6378 | 6392 | ||
| 6379 | r = vcpu_load(vcpu); | 6393 | r = vcpu_load(vcpu); |
| 6380 | if (r) | 6394 | if (r) |
| 6381 | return r; | 6395 | return r; |
| 6382 | kvm_write_tsc(vcpu, 0); | 6396 | msr.data = 0x0; |
| 6397 | msr.index = MSR_IA32_TSC; | ||
| 6398 | msr.host_initiated = true; | ||
| 6399 | kvm_write_tsc(vcpu, &msr); | ||
| 6383 | vcpu_put(vcpu); | 6400 | vcpu_put(vcpu); |
| 6384 | 6401 | ||
| 6385 | return r; | 6402 | return r; |
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 2b5219c12ac8..e224f7a671b6 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h | |||
| @@ -112,7 +112,7 @@ void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); | |||
| 112 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); | 112 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); |
| 113 | int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); | 113 | int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); |
| 114 | 114 | ||
| 115 | void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data); | 115 | void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); |
| 116 | 116 | ||
| 117 | int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, | 117 | int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, |
| 118 | gva_t addr, void *val, unsigned int bytes, | 118 | gva_t addr, void *val, unsigned int bytes, |
