aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-11-29 11:51:48 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:30:06 -0500
commit2030753de70a8aed39543ed09c2360665b3af481 (patch)
treedb9974733e876b8c430739c7ab9f9dfda27acf33
parentec9e60b21977007e3dfacc2b8fe3a8fbb9276b51 (diff)
KVM: SVM: Make Use of the generic guest-mode functions
This patch replaces the is_nested logic in the SVM module with the generic notion of guest-mode. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/x86/kvm/svm.c44
1 files changed, 21 insertions, 23 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b985cb81a573..2ae94b540358 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -192,11 +192,6 @@ static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
192 return container_of(vcpu, struct vcpu_svm, vcpu); 192 return container_of(vcpu, struct vcpu_svm, vcpu);
193} 193}
194 194
195static inline bool is_nested(struct vcpu_svm *svm)
196{
197 return svm->nested.vmcb;
198}
199
200static inline void enable_gif(struct vcpu_svm *svm) 195static inline void enable_gif(struct vcpu_svm *svm)
201{ 196{
202 svm->vcpu.arch.hflags |= HF_GIF_MASK; 197 svm->vcpu.arch.hflags |= HF_GIF_MASK;
@@ -727,7 +722,7 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
727 struct vcpu_svm *svm = to_svm(vcpu); 722 struct vcpu_svm *svm = to_svm(vcpu);
728 u64 g_tsc_offset = 0; 723 u64 g_tsc_offset = 0;
729 724
730 if (is_nested(svm)) { 725 if (is_guest_mode(vcpu)) {
731 g_tsc_offset = svm->vmcb->control.tsc_offset - 726 g_tsc_offset = svm->vmcb->control.tsc_offset -
732 svm->nested.hsave->control.tsc_offset; 727 svm->nested.hsave->control.tsc_offset;
733 svm->nested.hsave->control.tsc_offset = offset; 728 svm->nested.hsave->control.tsc_offset = offset;
@@ -741,7 +736,7 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
741 struct vcpu_svm *svm = to_svm(vcpu); 736 struct vcpu_svm *svm = to_svm(vcpu);
742 737
743 svm->vmcb->control.tsc_offset += adjustment; 738 svm->vmcb->control.tsc_offset += adjustment;
744 if (is_nested(svm)) 739 if (is_guest_mode(vcpu))
745 svm->nested.hsave->control.tsc_offset += adjustment; 740 svm->nested.hsave->control.tsc_offset += adjustment;
746} 741}
747 742
@@ -1209,7 +1204,7 @@ static void update_cr0_intercept(struct vcpu_svm *svm)
1209 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) { 1204 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
1210 vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK; 1205 vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
1211 vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK; 1206 vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
1212 if (is_nested(svm)) { 1207 if (is_guest_mode(&svm->vcpu)) {
1213 struct vmcb *hsave = svm->nested.hsave; 1208 struct vmcb *hsave = svm->nested.hsave;
1214 1209
1215 hsave->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK; 1210 hsave->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
@@ -1220,7 +1215,7 @@ static void update_cr0_intercept(struct vcpu_svm *svm)
1220 } else { 1215 } else {
1221 svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK; 1216 svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
1222 svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK; 1217 svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
1223 if (is_nested(svm)) { 1218 if (is_guest_mode(&svm->vcpu)) {
1224 struct vmcb *hsave = svm->nested.hsave; 1219 struct vmcb *hsave = svm->nested.hsave;
1225 1220
1226 hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK; 1221 hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
@@ -1233,7 +1228,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1233{ 1228{
1234 struct vcpu_svm *svm = to_svm(vcpu); 1229 struct vcpu_svm *svm = to_svm(vcpu);
1235 1230
1236 if (is_nested(svm)) { 1231 if (is_guest_mode(vcpu)) {
1237 /* 1232 /*
1238 * We are here because we run in nested mode, the host kvm 1233 * We are here because we run in nested mode, the host kvm
1239 * intercepts cr0 writes but the l1 hypervisor does not. 1234 * intercepts cr0 writes but the l1 hypervisor does not.
@@ -1471,7 +1466,7 @@ static void svm_fpu_activate(struct kvm_vcpu *vcpu)
1471 struct vcpu_svm *svm = to_svm(vcpu); 1466 struct vcpu_svm *svm = to_svm(vcpu);
1472 u32 excp; 1467 u32 excp;
1473 1468
1474 if (is_nested(svm)) { 1469 if (is_guest_mode(vcpu)) {
1475 u32 h_excp, n_excp; 1470 u32 h_excp, n_excp;
1476 1471
1477 h_excp = svm->nested.hsave->control.intercept_exceptions; 1472 h_excp = svm->nested.hsave->control.intercept_exceptions;
@@ -1701,7 +1696,7 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1701{ 1696{
1702 int vmexit; 1697 int vmexit;
1703 1698
1704 if (!is_nested(svm)) 1699 if (!is_guest_mode(&svm->vcpu))
1705 return 0; 1700 return 0;
1706 1701
1707 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; 1702 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
@@ -1719,7 +1714,7 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1719/* This function returns true if it is save to enable the irq window */ 1714/* This function returns true if it is save to enable the irq window */
1720static inline bool nested_svm_intr(struct vcpu_svm *svm) 1715static inline bool nested_svm_intr(struct vcpu_svm *svm)
1721{ 1716{
1722 if (!is_nested(svm)) 1717 if (!is_guest_mode(&svm->vcpu))
1723 return true; 1718 return true;
1724 1719
1725 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) 1720 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
@@ -1758,7 +1753,7 @@ static inline bool nested_svm_intr(struct vcpu_svm *svm)
1758/* This function returns true if it is save to enable the nmi window */ 1753/* This function returns true if it is save to enable the nmi window */
1759static inline bool nested_svm_nmi(struct vcpu_svm *svm) 1754static inline bool nested_svm_nmi(struct vcpu_svm *svm)
1760{ 1755{
1761 if (!is_nested(svm)) 1756 if (!is_guest_mode(&svm->vcpu))
1762 return true; 1757 return true;
1763 1758
1764 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI))) 1759 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
@@ -1995,7 +1990,8 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1995 if (!nested_vmcb) 1990 if (!nested_vmcb)
1996 return 1; 1991 return 1;
1997 1992
1998 /* Exit nested SVM mode */ 1993 /* Exit Guest-Mode */
1994 leave_guest_mode(&svm->vcpu);
1999 svm->nested.vmcb = 0; 1995 svm->nested.vmcb = 0;
2000 1996
2001 /* Give the current vmcb to the guest */ 1997 /* Give the current vmcb to the guest */
@@ -2303,7 +2299,9 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
2303 2299
2304 nested_svm_unmap(page); 2300 nested_svm_unmap(page);
2305 2301
2306 /* nested_vmcb is our indicator if nested SVM is activated */ 2302 /* Enter Guest-Mode */
2303 enter_guest_mode(&svm->vcpu);
2304
2307 svm->nested.vmcb = vmcb_gpa; 2305 svm->nested.vmcb = vmcb_gpa;
2308 2306
2309 enable_gif(svm); 2307 enable_gif(svm);
@@ -2589,7 +2587,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2589 case MSR_IA32_TSC: { 2587 case MSR_IA32_TSC: {
2590 u64 tsc_offset; 2588 u64 tsc_offset;
2591 2589
2592 if (is_nested(svm)) 2590 if (is_guest_mode(vcpu))
2593 tsc_offset = svm->nested.hsave->control.tsc_offset; 2591 tsc_offset = svm->nested.hsave->control.tsc_offset;
2594 else 2592 else
2595 tsc_offset = svm->vmcb->control.tsc_offset; 2593 tsc_offset = svm->vmcb->control.tsc_offset;
@@ -3003,7 +3001,7 @@ static int handle_exit(struct kvm_vcpu *vcpu)
3003 return 1; 3001 return 1;
3004 } 3002 }
3005 3003
3006 if (is_nested(svm)) { 3004 if (is_guest_mode(vcpu)) {
3007 int vmexit; 3005 int vmexit;
3008 3006
3009 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code, 3007 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
@@ -3110,7 +3108,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3110{ 3108{
3111 struct vcpu_svm *svm = to_svm(vcpu); 3109 struct vcpu_svm *svm = to_svm(vcpu);
3112 3110
3113 if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK)) 3111 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3114 return; 3112 return;
3115 3113
3116 if (irr == -1) 3114 if (irr == -1)
@@ -3164,7 +3162,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
3164 3162
3165 ret = !!(vmcb->save.rflags & X86_EFLAGS_IF); 3163 ret = !!(vmcb->save.rflags & X86_EFLAGS_IF);
3166 3164
3167 if (is_nested(svm)) 3165 if (is_guest_mode(vcpu))
3168 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK); 3166 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
3169 3167
3170 return ret; 3168 return ret;
@@ -3221,7 +3219,7 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3221{ 3219{
3222 struct vcpu_svm *svm = to_svm(vcpu); 3220 struct vcpu_svm *svm = to_svm(vcpu);
3223 3221
3224 if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK)) 3222 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3225 return; 3223 return;
3226 3224
3227 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) { 3225 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
@@ -3235,7 +3233,7 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3235 struct vcpu_svm *svm = to_svm(vcpu); 3233 struct vcpu_svm *svm = to_svm(vcpu);
3236 u64 cr8; 3234 u64 cr8;
3237 3235
3238 if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK)) 3236 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3239 return; 3237 return;
3240 3238
3241 cr8 = kvm_get_cr8(vcpu); 3239 cr8 = kvm_get_cr8(vcpu);
@@ -3621,7 +3619,7 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
3621 struct vcpu_svm *svm = to_svm(vcpu); 3619 struct vcpu_svm *svm = to_svm(vcpu);
3622 3620
3623 svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR; 3621 svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
3624 if (is_nested(svm)) 3622 if (is_guest_mode(vcpu))
3625 svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR; 3623 svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR;
3626 update_cr0_intercept(svm); 3624 update_cr0_intercept(svm);
3627} 3625}