aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-04-19 06:22:48 -0400
committerAvi Kivity <avi@qumranet.com>2007-05-03 03:52:30 -0400
commite38aea3e9330624d19a233c05f3e69c57519edd5 (patch)
tree68daabdafa535043ab68d01b80642fbf6a4c0977 /drivers
parent2345df8c555ecb92c0c36172c07d5ac321a92dc7 (diff)
KVM: VMX: Don't switch 64-bit msrs for 32-bit guests
Some msrs are only used by x86_64 instructions, and are therefore not needed when the guest is legacy mode. By not bothering to switch them, we reduce vmexit latency by 2400 cycles (from about 8800) when running a 32-bt guest on a 64-bit host. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/kvm/vmx.c58
1 files changed, 42 insertions, 16 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 3745e6ccc5b4..6270df58e055 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -80,6 +80,9 @@ static const u32 vmx_msr_index[] = {
80 80
81#ifdef CONFIG_X86_64 81#ifdef CONFIG_X86_64
82static unsigned msr_offset_kernel_gs_base; 82static unsigned msr_offset_kernel_gs_base;
83#define NR_64BIT_MSRS 4
84#else
85#define NR_64BIT_MSRS 0
83#endif 86#endif
84 87
85static inline int is_page_fault(u32 intr_info) 88static inline int is_page_fault(u32 intr_info)
@@ -301,6 +304,32 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
301} 304}
302 305
303/* 306/*
307 * Set up the vmcs to automatically save and restore system
308 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
309 * mode, as fiddling with msrs is very expensive.
310 */
311static void setup_msrs(struct kvm_vcpu *vcpu)
312{
313 int nr_skip, nr_good_msrs;
314
315 if (is_long_mode(vcpu))
316 nr_skip = NR_BAD_MSRS;
317 else
318 nr_skip = NR_64BIT_MSRS;
319 nr_good_msrs = vcpu->nmsrs - nr_skip;
320
321 vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
322 virt_to_phys(vcpu->guest_msrs + nr_skip));
323 vmcs_writel(VM_EXIT_MSR_STORE_ADDR,
324 virt_to_phys(vcpu->guest_msrs + nr_skip));
325 vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
326 virt_to_phys(vcpu->host_msrs + nr_skip));
327 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
328 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
329 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
330}
331
332/*
304 * reads and returns guest's timestamp counter "register" 333 * reads and returns guest's timestamp counter "register"
305 * guest_tsc = host_tsc + tsc_offset -- 21.3 334 * guest_tsc = host_tsc + tsc_offset -- 21.3
306 */ 335 */
@@ -825,6 +854,7 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
825 854
826 msr->data = efer & ~EFER_LME; 855 msr->data = efer & ~EFER_LME;
827 } 856 }
857 setup_msrs(vcpu);
828} 858}
829 859
830#endif 860#endif
@@ -988,7 +1018,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
988 struct descriptor_table dt; 1018 struct descriptor_table dt;
989 int i; 1019 int i;
990 int ret = 0; 1020 int ret = 0;
991 int nr_good_msrs;
992 extern asmlinkage void kvm_vmx_return(void); 1021 extern asmlinkage void kvm_vmx_return(void);
993 1022
994 if (!init_rmode_tss(vcpu->kvm)) { 1023 if (!init_rmode_tss(vcpu->kvm)) {
@@ -1140,19 +1169,10 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1140 ++vcpu->nmsrs; 1169 ++vcpu->nmsrs;
1141 } 1170 }
1142 1171
1143 nr_good_msrs = vcpu->nmsrs - NR_BAD_MSRS; 1172 setup_msrs(vcpu);
1144 vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR, 1173
1145 virt_to_phys(vcpu->guest_msrs + NR_BAD_MSRS));
1146 vmcs_writel(VM_EXIT_MSR_STORE_ADDR,
1147 virt_to_phys(vcpu->guest_msrs + NR_BAD_MSRS));
1148 vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
1149 virt_to_phys(vcpu->host_msrs + NR_BAD_MSRS));
1150 vmcs_write32_fixedbits(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_CONTROLS, 1174 vmcs_write32_fixedbits(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_CONTROLS,
1151 (HOST_IS_64 << 9)); /* 22.2,1, 20.7.1 */ 1175 (HOST_IS_64 << 9)); /* 22.2,1, 20.7.1 */
1152 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
1153 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
1154 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
1155
1156 1176
1157 /* 22.2.1, 20.8.1 */ 1177 /* 22.2.1, 20.8.1 */
1158 vmcs_write32_fixedbits(MSR_IA32_VMX_ENTRY_CTLS, 1178 vmcs_write32_fixedbits(MSR_IA32_VMX_ENTRY_CTLS,
@@ -1769,9 +1789,11 @@ again:
1769 fx_restore(vcpu->guest_fx_image); 1789 fx_restore(vcpu->guest_fx_image);
1770 1790
1771#ifdef CONFIG_X86_64 1791#ifdef CONFIG_X86_64
1772 save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base, 1); 1792 if (is_long_mode(vcpu)) {
1793 save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base, 1);
1794 load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
1795 }
1773#endif 1796#endif
1774 load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
1775 1797
1776 asm ( 1798 asm (
1777 /* Store host registers */ 1799 /* Store host registers */
@@ -1915,8 +1937,12 @@ again:
1915 } 1937 }
1916 ++kvm_stat.exits; 1938 ++kvm_stat.exits;
1917 1939
1918 save_msrs(vcpu->guest_msrs, NR_BAD_MSRS); 1940#ifdef CONFIG_X86_64
1919 load_msrs(vcpu->host_msrs, NR_BAD_MSRS); 1941 if (is_long_mode(vcpu)) {
1942 save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
1943 load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
1944 }
1945#endif
1920 1946
1921 fx_save(vcpu->guest_fx_image); 1947 fx_save(vcpu->guest_fx_image);
1922 fx_restore(vcpu->host_fx_image); 1948 fx_restore(vcpu->host_fx_image);