aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-02-19 10:23:06 -0500
committerAvi Kivity <avi@redhat.com>2010-04-25 05:34:22 -0400
commit88ab24adc7142506c8583ac36a34fa388300b750 (patch)
treea3501849792f4fb681d0ccb6dc4135e455b9b57e /arch/x86/kvm/svm.c
parent4c7da8cb43c09e71a405b5aeaa58a1dbac3c39e9 (diff)
KVM: SVM: Don't sync nested cr8 to lapic and back
This patch makes syncing of the guest tpr to the lapic conditional on !nested. Otherwise a nested guest using the TPR could freeze the guest. Another important change this patch introduces is that the cr8 intercept bits are no longer ORed at vmrun emulation if the guest sets VINTR_MASKING in its VMCB. The reason is that nested cr8 accesses need alway be handled by the nested hypervisor because they change the shadow version of the tpr. Cc: stable@kernel.org Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c46
1 files changed, 31 insertions, 15 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 4459c477af9..481bd0ee5f7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1832,21 +1832,6 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1832 svm->vmcb->save.dr6 = nested_vmcb->save.dr6; 1832 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
1833 svm->vmcb->save.cpl = nested_vmcb->save.cpl; 1833 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
1834 1834
1835 /* We don't want a nested guest to be more powerful than the guest,
1836 so all intercepts are ORed */
1837 svm->vmcb->control.intercept_cr_read |=
1838 nested_vmcb->control.intercept_cr_read;
1839 svm->vmcb->control.intercept_cr_write |=
1840 nested_vmcb->control.intercept_cr_write;
1841 svm->vmcb->control.intercept_dr_read |=
1842 nested_vmcb->control.intercept_dr_read;
1843 svm->vmcb->control.intercept_dr_write |=
1844 nested_vmcb->control.intercept_dr_write;
1845 svm->vmcb->control.intercept_exceptions |=
1846 nested_vmcb->control.intercept_exceptions;
1847
1848 svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1849
1850 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa; 1835 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
1851 1836
1852 /* cache intercepts */ 1837 /* cache intercepts */
@@ -1864,6 +1849,28 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1864 else 1849 else
1865 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; 1850 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
1866 1851
1852 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
1853 /* We only want the cr8 intercept bits of the guest */
1854 svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
1855 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1856 }
1857
1858 /* We don't want a nested guest to be more powerful than the guest,
1859 so all intercepts are ORed */
1860 svm->vmcb->control.intercept_cr_read |=
1861 nested_vmcb->control.intercept_cr_read;
1862 svm->vmcb->control.intercept_cr_write |=
1863 nested_vmcb->control.intercept_cr_write;
1864 svm->vmcb->control.intercept_dr_read |=
1865 nested_vmcb->control.intercept_dr_read;
1866 svm->vmcb->control.intercept_dr_write |=
1867 nested_vmcb->control.intercept_dr_write;
1868 svm->vmcb->control.intercept_exceptions |=
1869 nested_vmcb->control.intercept_exceptions;
1870
1871 svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1872
1873 svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
1867 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; 1874 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
1868 svm->vmcb->control.int_state = nested_vmcb->control.int_state; 1875 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
1869 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; 1876 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
@@ -2526,6 +2533,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
2526{ 2533{
2527 struct vcpu_svm *svm = to_svm(vcpu); 2534 struct vcpu_svm *svm = to_svm(vcpu);
2528 2535
2536 if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
2537 return;
2538
2529 if (irr == -1) 2539 if (irr == -1)
2530 return; 2540 return;
2531 2541
@@ -2629,6 +2639,9 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
2629{ 2639{
2630 struct vcpu_svm *svm = to_svm(vcpu); 2640 struct vcpu_svm *svm = to_svm(vcpu);
2631 2641
2642 if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
2643 return;
2644
2632 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) { 2645 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
2633 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; 2646 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
2634 kvm_set_cr8(vcpu, cr8); 2647 kvm_set_cr8(vcpu, cr8);
@@ -2640,6 +2653,9 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
2640 struct vcpu_svm *svm = to_svm(vcpu); 2653 struct vcpu_svm *svm = to_svm(vcpu);
2641 u64 cr8; 2654 u64 cr8;
2642 2655
2656 if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
2657 return;
2658
2643 cr8 = kvm_get_cr8(vcpu); 2659 cr8 = kvm_get_cr8(vcpu);
2644 svm->vmcb->control.int_ctl &= ~V_TPR_MASK; 2660 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
2645 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; 2661 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;