diff options
author | Avi Kivity <avi@redhat.com> | 2010-01-06 03:55:27 -0500 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2010-03-01 10:35:51 -0500 |
commit | d225157bc6a442b1214882635fbf287d7d0e8133 (patch) | |
tree | edea55c1e777faa364c6c379fff136ded71b001f | |
parent | 888f9f3e0cfa32baf05b3840f0248f5502292a0f (diff) |
KVM: SVM: Selective cr0 intercept
If two conditions apply:
- no bits outside TS and EM differ between the host and guest cr0
- the fpu is active
then we can activate the selective cr0 write intercept and drop the
unconditional cr0 read and write intercept, and allow the guest to run
with the host fpu state. This reduces cr0 exits due to guest fpu management
while the guest fpu is loaded.
Acked-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/x86/kvm/svm.c | 32 |
1 files changed, 26 insertions, 6 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 27273ed24c41..83c7ab1bdad8 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -571,6 +571,7 @@ static void init_vmcb(struct vcpu_svm *svm) | |||
571 | control->intercept = (1ULL << INTERCEPT_INTR) | | 571 | control->intercept = (1ULL << INTERCEPT_INTR) | |
572 | (1ULL << INTERCEPT_NMI) | | 572 | (1ULL << INTERCEPT_NMI) | |
573 | (1ULL << INTERCEPT_SMI) | | 573 | (1ULL << INTERCEPT_SMI) | |
574 | (1ULL << INTERCEPT_SELECTIVE_CR0) | | ||
574 | (1ULL << INTERCEPT_CPUID) | | 575 | (1ULL << INTERCEPT_CPUID) | |
575 | (1ULL << INTERCEPT_INVD) | | 576 | (1ULL << INTERCEPT_INVD) | |
576 | (1ULL << INTERCEPT_HLT) | | 577 | (1ULL << INTERCEPT_HLT) | |
@@ -963,6 +964,27 @@ static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | |||
963 | { | 964 | { |
964 | } | 965 | } |
965 | 966 | ||
967 | static void update_cr0_intercept(struct vcpu_svm *svm) | ||
968 | { | ||
969 | ulong gcr0 = svm->vcpu.arch.cr0; | ||
970 | u64 *hcr0 = &svm->vmcb->save.cr0; | ||
971 | |||
972 | if (!svm->vcpu.fpu_active) | ||
973 | *hcr0 |= SVM_CR0_SELECTIVE_MASK; | ||
974 | else | ||
975 | *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK) | ||
976 | | (gcr0 & SVM_CR0_SELECTIVE_MASK); | ||
977 | |||
978 | |||
979 | if (gcr0 == *hcr0 && svm->vcpu.fpu_active) { | ||
980 | svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK; | ||
981 | svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK; | ||
982 | } else { | ||
983 | svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK; | ||
984 | svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK; | ||
985 | } | ||
986 | } | ||
987 | |||
966 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | 988 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
967 | { | 989 | { |
968 | struct vcpu_svm *svm = to_svm(vcpu); | 990 | struct vcpu_svm *svm = to_svm(vcpu); |
@@ -994,6 +1016,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
994 | */ | 1016 | */ |
995 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); | 1017 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); |
996 | svm->vmcb->save.cr0 = cr0; | 1018 | svm->vmcb->save.cr0 = cr0; |
1019 | update_cr0_intercept(svm); | ||
997 | } | 1020 | } |
998 | 1021 | ||
999 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 1022 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
@@ -1239,11 +1262,8 @@ static int ud_interception(struct vcpu_svm *svm) | |||
1239 | static int nm_interception(struct vcpu_svm *svm) | 1262 | static int nm_interception(struct vcpu_svm *svm) |
1240 | { | 1263 | { |
1241 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | 1264 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); |
1242 | if (!kvm_read_cr0_bits(&svm->vcpu, X86_CR0_TS)) | ||
1243 | svm->vmcb->save.cr0 &= ~X86_CR0_TS; | ||
1244 | else | ||
1245 | svm->vmcb->save.cr0 |= X86_CR0_TS; | ||
1246 | svm->vcpu.fpu_active = 1; | 1265 | svm->vcpu.fpu_active = 1; |
1266 | update_cr0_intercept(svm); | ||
1247 | 1267 | ||
1248 | return 1; | 1268 | return 1; |
1249 | } | 1269 | } |
@@ -2296,7 +2316,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { | |||
2296 | [SVM_EXIT_READ_CR3] = emulate_on_interception, | 2316 | [SVM_EXIT_READ_CR3] = emulate_on_interception, |
2297 | [SVM_EXIT_READ_CR4] = emulate_on_interception, | 2317 | [SVM_EXIT_READ_CR4] = emulate_on_interception, |
2298 | [SVM_EXIT_READ_CR8] = emulate_on_interception, | 2318 | [SVM_EXIT_READ_CR8] = emulate_on_interception, |
2299 | /* for now: */ | 2319 | [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, |
2300 | [SVM_EXIT_WRITE_CR0] = emulate_on_interception, | 2320 | [SVM_EXIT_WRITE_CR0] = emulate_on_interception, |
2301 | [SVM_EXIT_WRITE_CR3] = emulate_on_interception, | 2321 | [SVM_EXIT_WRITE_CR3] = emulate_on_interception, |
2302 | [SVM_EXIT_WRITE_CR4] = emulate_on_interception, | 2322 | [SVM_EXIT_WRITE_CR4] = emulate_on_interception, |
@@ -2914,8 +2934,8 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu) | |||
2914 | return; | 2934 | return; |
2915 | } | 2935 | } |
2916 | 2936 | ||
2937 | update_cr0_intercept(svm); | ||
2917 | svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR; | 2938 | svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR; |
2918 | svm->vmcb->save.cr0 |= X86_CR0_TS; | ||
2919 | } | 2939 | } |
2920 | 2940 | ||
2921 | static struct kvm_x86_ops svm_x86_ops = { | 2941 | static struct kvm_x86_ops svm_x86_ops = { |