aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorNadav Har'El <nyh@il.ibm.com>2011-05-25 16:15:39 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 04:45:19 -0400
commit7991825b8558a719eb7cfb93c4458d767ae1f2eb (patch)
tree6d9a09a6175a3f607737680525ba9486ac95dbab /arch/x86
parent36cf24e01e9eba8c9ea201202762081ced2f8cdf (diff)
KVM: nVMX: Additional TSC-offset handling
In the unlikely case that L1 does not capture MSR_IA32_TSC, L0 needs to emulate this MSR write by L2 by modifying vmcs02.tsc_offset. We also need to set vmcs12.tsc_offset, for this change to survive the next nested entry (see prepare_vmcs02()). Additionally, we also need to modify vmx_adjust_tsc_offset: The semantics of this function is that the TSC of all guests on this vcpu, L1 and possibly several L2s, need to be adjusted. To do this, we need to adjust vmcs01's tsc_offset (this offset will also apply to each L2s we enter). We can't set vmcs01 now, so we have to remember this adjustment and apply it when we later exit to L1. Signed-off-by: Nadav Har'El <nyh@il.ibm.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/vmx.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 10b22619e7ec..afc1f06907ce 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1766,12 +1766,24 @@ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
1766static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 1766static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1767{ 1767{
1768 vmcs_write64(TSC_OFFSET, offset); 1768 vmcs_write64(TSC_OFFSET, offset);
1769 if (is_guest_mode(vcpu))
1770 /*
1771 * We're here if L1 chose not to trap the TSC MSR. Since
1772 * prepare_vmcs12() does not copy tsc_offset, we need to also
1773 * set the vmcs12 field here.
1774 */
1775 get_vmcs12(vcpu)->tsc_offset = offset -
1776 to_vmx(vcpu)->nested.vmcs01_tsc_offset;
1769} 1777}
1770 1778
1771static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) 1779static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
1772{ 1780{
1773 u64 offset = vmcs_read64(TSC_OFFSET); 1781 u64 offset = vmcs_read64(TSC_OFFSET);
1774 vmcs_write64(TSC_OFFSET, offset + adjustment); 1782 vmcs_write64(TSC_OFFSET, offset + adjustment);
1783 if (is_guest_mode(vcpu)) {
1784 /* Even when running L2, the adjustment needs to apply to L1 */
1785 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
1786 }
1775} 1787}
1776 1788
1777static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) 1789static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)