diff options
author | Eugene Korenevsky <ekorenevsky@gmail.com> | 2015-03-29 16:56:44 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-04-08 04:46:58 -0400 |
commit | 92d71bc6951364c20f7d8c70a83cd93a68a63ea7 (patch) | |
tree | 1633f3257a8c1f644f5db589f7a380297c3af63a /arch/x86 | |
parent | 9090422f1ca5270795738549cf91a4ae7cb47662 (diff) |
KVM: nVMX: remove unnecessary double caching of MAXPHYADDR
After speed-up of cpuid_maxphyaddr() it can be called frequently:
instead of heavyweight enumeration of CPUID entries it returns a cached
pre-computed value. It is also inlined now. So caching its result became
unnecessary and can be removed.
Signed-off-by: Eugene Korenevsky <ekorenevsky@gmail.com>
Message-Id: <20150329205644.GA1258@gnote>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/vmx.c | 14 |
1 files changed, 6 insertions, 8 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 6f770e875936..ddce07e8bef8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -8866,9 +8866,9 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, | |||
8866 | 8866 | ||
8867 | static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, | 8867 | static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, |
8868 | unsigned long count_field, | 8868 | unsigned long count_field, |
8869 | unsigned long addr_field, | 8869 | unsigned long addr_field) |
8870 | int maxphyaddr) | ||
8871 | { | 8870 | { |
8871 | int maxphyaddr; | ||
8872 | u64 count, addr; | 8872 | u64 count, addr; |
8873 | 8873 | ||
8874 | if (vmcs12_read_any(vcpu, count_field, &count) || | 8874 | if (vmcs12_read_any(vcpu, count_field, &count) || |
@@ -8878,6 +8878,7 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, | |||
8878 | } | 8878 | } |
8879 | if (count == 0) | 8879 | if (count == 0) |
8880 | return 0; | 8880 | return 0; |
8881 | maxphyaddr = cpuid_maxphyaddr(vcpu); | ||
8881 | if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || | 8882 | if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || |
8882 | (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) { | 8883 | (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) { |
8883 | pr_warn_ratelimited( | 8884 | pr_warn_ratelimited( |
@@ -8891,19 +8892,16 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, | |||
8891 | static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, | 8892 | static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, |
8892 | struct vmcs12 *vmcs12) | 8893 | struct vmcs12 *vmcs12) |
8893 | { | 8894 | { |
8894 | int maxphyaddr; | ||
8895 | |||
8896 | if (vmcs12->vm_exit_msr_load_count == 0 && | 8895 | if (vmcs12->vm_exit_msr_load_count == 0 && |
8897 | vmcs12->vm_exit_msr_store_count == 0 && | 8896 | vmcs12->vm_exit_msr_store_count == 0 && |
8898 | vmcs12->vm_entry_msr_load_count == 0) | 8897 | vmcs12->vm_entry_msr_load_count == 0) |
8899 | return 0; /* Fast path */ | 8898 | return 0; /* Fast path */ |
8900 | maxphyaddr = cpuid_maxphyaddr(vcpu); | ||
8901 | if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT, | 8899 | if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT, |
8902 | VM_EXIT_MSR_LOAD_ADDR, maxphyaddr) || | 8900 | VM_EXIT_MSR_LOAD_ADDR) || |
8903 | nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT, | 8901 | nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT, |
8904 | VM_EXIT_MSR_STORE_ADDR, maxphyaddr) || | 8902 | VM_EXIT_MSR_STORE_ADDR) || |
8905 | nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT, | 8903 | nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT, |
8906 | VM_ENTRY_MSR_LOAD_ADDR, maxphyaddr)) | 8904 | VM_ENTRY_MSR_LOAD_ADDR)) |
8907 | return -EINVAL; | 8905 | return -EINVAL; |
8908 | return 0; | 8906 | return 0; |
8909 | } | 8907 | } |