diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2010-02-19 10:23:08 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-04-25 05:34:28 -0400 |
commit | 66a562f7e2576cde384ec813b481404d8f54f4c6 (patch) | |
tree | ea64df4f210caf8b0e375932a422174f95f658b8 /arch/x86/kvm/svm.c | |
parent | 06fc7772690dec2a0e3814633357babf8f63af41 (diff) |
KVM: SVM: Make lazy FPU switching work with nested svm
The new lazy fpu switching code may disable cr0 intercepts
when running nested. This is a bug because the nested
hypervisor may still want to intercept cr0 which will break
in this situation. This patch fixes this issue and makes
lazy fpu switching working with nested svm.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r-- | arch/x86/kvm/svm.c | 43 |
1 files changed, 39 insertions, 4 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 8ace0b0da933..a8ec53fe74f5 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -979,6 +979,7 @@ static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | |||
979 | 979 | ||
980 | static void update_cr0_intercept(struct vcpu_svm *svm) | 980 | static void update_cr0_intercept(struct vcpu_svm *svm) |
981 | { | 981 | { |
982 | struct vmcb *vmcb = svm->vmcb; | ||
982 | ulong gcr0 = svm->vcpu.arch.cr0; | 983 | ulong gcr0 = svm->vcpu.arch.cr0; |
983 | u64 *hcr0 = &svm->vmcb->save.cr0; | 984 | u64 *hcr0 = &svm->vmcb->save.cr0; |
984 | 985 | ||
@@ -990,11 +991,25 @@ static void update_cr0_intercept(struct vcpu_svm *svm) | |||
990 | 991 | ||
991 | 992 | ||
992 | if (gcr0 == *hcr0 && svm->vcpu.fpu_active) { | 993 | if (gcr0 == *hcr0 && svm->vcpu.fpu_active) { |
993 | svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK; | 994 | vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK; |
994 | svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK; | 995 | vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK; |
996 | if (is_nested(svm)) { | ||
997 | struct vmcb *hsave = svm->nested.hsave; | ||
998 | |||
999 | hsave->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK; | ||
1000 | hsave->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK; | ||
1001 | vmcb->control.intercept_cr_read |= svm->nested.intercept_cr_read; | ||
1002 | vmcb->control.intercept_cr_write |= svm->nested.intercept_cr_write; | ||
1003 | } | ||
995 | } else { | 1004 | } else { |
996 | svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK; | 1005 | svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK; |
997 | svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK; | 1006 | svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK; |
1007 | if (is_nested(svm)) { | ||
1008 | struct vmcb *hsave = svm->nested.hsave; | ||
1009 | |||
1010 | hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK; | ||
1011 | hsave->control.intercept_cr_write |= INTERCEPT_CR0_MASK; | ||
1012 | } | ||
998 | } | 1013 | } |
999 | } | 1014 | } |
1000 | 1015 | ||
@@ -1269,7 +1284,22 @@ static int ud_interception(struct vcpu_svm *svm) | |||
1269 | static void svm_fpu_activate(struct kvm_vcpu *vcpu) | 1284 | static void svm_fpu_activate(struct kvm_vcpu *vcpu) |
1270 | { | 1285 | { |
1271 | struct vcpu_svm *svm = to_svm(vcpu); | 1286 | struct vcpu_svm *svm = to_svm(vcpu); |
1272 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | 1287 | u32 excp; |
1288 | |||
1289 | if (is_nested(svm)) { | ||
1290 | u32 h_excp, n_excp; | ||
1291 | |||
1292 | h_excp = svm->nested.hsave->control.intercept_exceptions; | ||
1293 | n_excp = svm->nested.intercept_exceptions; | ||
1294 | h_excp &= ~(1 << NM_VECTOR); | ||
1295 | excp = h_excp | n_excp; | ||
1296 | } else { | ||
1297 | excp = svm->vmcb->control.intercept_exceptions; | ||
1298 | excp &= ~(1 << NM_VECTOR); | ||
1299 | } | ||
1300 | |||
1301 | svm->vmcb->control.intercept_exceptions = excp; | ||
1302 | |||
1273 | svm->vcpu.fpu_active = 1; | 1303 | svm->vcpu.fpu_active = 1; |
1274 | update_cr0_intercept(svm); | 1304 | update_cr0_intercept(svm); |
1275 | } | 1305 | } |
@@ -1513,6 +1543,9 @@ static int nested_svm_exit_special(struct vcpu_svm *svm) | |||
1513 | if (!npt_enabled) | 1543 | if (!npt_enabled) |
1514 | return NESTED_EXIT_HOST; | 1544 | return NESTED_EXIT_HOST; |
1515 | break; | 1545 | break; |
1546 | case SVM_EXIT_EXCP_BASE + NM_VECTOR: | ||
1547 | nm_interception(svm); | ||
1548 | break; | ||
1516 | default: | 1549 | default: |
1517 | break; | 1550 | break; |
1518 | } | 1551 | } |
@@ -2980,8 +3013,10 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu) | |||
2980 | { | 3013 | { |
2981 | struct vcpu_svm *svm = to_svm(vcpu); | 3014 | struct vcpu_svm *svm = to_svm(vcpu); |
2982 | 3015 | ||
2983 | update_cr0_intercept(svm); | ||
2984 | svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR; | 3016 | svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR; |
3017 | if (is_nested(svm)) | ||
3018 | svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR; | ||
3019 | update_cr0_intercept(svm); | ||
2985 | } | 3020 | } |
2986 | 3021 | ||
2987 | static struct kvm_x86_ops svm_x86_ops = { | 3022 | static struct kvm_x86_ops svm_x86_ops = { |