aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2012-05-13 12:53:24 -0400
committerMarcelo Tosatti <mtosatti@redhat.com>2012-05-16 15:03:19 -0400
commitb2da15ac26a0c00fc0d399a2bc5cf3c4e15f0b4f (patch)
tree7cf2f30e20976877b7bcef1b76a7441f474f062d /arch/x86
parent512d5649e8dc3ed36f2ebf0818da64a4d4c2544a (diff)
KVM: VMX: Optimize %ds, %es reload
On x86_64, we can defer %ds and %es reload to the heavyweight context switch, since nothing in the lightweight paths uses the host %ds or %es (they are ignored by the processor). Furthermore we can avoid the load if the segments are null, by letting the hardware load the null segments for us. This is the expected case. On i386, we could avoid the reload entirely, since the entry.S paths take care of reload, except for the SYSEXIT path which leaves %ds and %es set to __USER_DS. So we set them to the same values as well. Saves about 70 cycles out of 1600 (around 4%; noisy measurements). Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/vmx.c36
1 files changed, 31 insertions, 5 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f2ee016e1004..32eb58866292 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -393,6 +393,9 @@ struct vcpu_vmx {
393 struct { 393 struct {
394 int loaded; 394 int loaded;
395 u16 fs_sel, gs_sel, ldt_sel; 395 u16 fs_sel, gs_sel, ldt_sel;
396#ifdef CONFIG_X86_64
397 u16 ds_sel, es_sel;
398#endif
396 int gs_ldt_reload_needed; 399 int gs_ldt_reload_needed;
397 int fs_reload_needed; 400 int fs_reload_needed;
398 } host_state; 401 } host_state;
@@ -1418,6 +1421,11 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
1418 } 1421 }
1419 1422
1420#ifdef CONFIG_X86_64 1423#ifdef CONFIG_X86_64
1424 savesegment(ds, vmx->host_state.ds_sel);
1425 savesegment(es, vmx->host_state.es_sel);
1426#endif
1427
1428#ifdef CONFIG_X86_64
1421 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); 1429 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1422 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); 1430 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1423#else 1431#else
@@ -1457,6 +1465,19 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1457 } 1465 }
1458 if (vmx->host_state.fs_reload_needed) 1466 if (vmx->host_state.fs_reload_needed)
1459 loadsegment(fs, vmx->host_state.fs_sel); 1467 loadsegment(fs, vmx->host_state.fs_sel);
1468#ifdef CONFIG_X86_64
1469 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
1470 loadsegment(ds, vmx->host_state.ds_sel);
1471 loadsegment(es, vmx->host_state.es_sel);
1472 }
1473#else
1474 /*
1475 * The sysexit path does not restore ds/es, so we must set them to
1476 * a reasonable value ourselves.
1477 */
1478 loadsegment(ds, __USER_DS);
1479 loadsegment(es, __USER_DS);
1480#endif
1460 reload_tss(); 1481 reload_tss();
1461#ifdef CONFIG_X86_64 1482#ifdef CONFIG_X86_64
1462 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 1483 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
@@ -3640,8 +3661,18 @@ static void vmx_set_constant_host_state(void)
3640 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ 3661 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
3641 3662
3642 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ 3663 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
3664#ifdef CONFIG_X86_64
3665 /*
3666 * Load null selectors, so we can avoid reloading them in
3667 * __vmx_load_host_state(), in case userspace uses the null selectors
3668 * too (the expected case).
3669 */
3670 vmcs_write16(HOST_DS_SELECTOR, 0);
3671 vmcs_write16(HOST_ES_SELECTOR, 0);
3672#else
3643 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 3673 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
3644 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 3674 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
3675#endif
3645 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 3676 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
3646 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ 3677 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
3647 3678
@@ -6102,10 +6133,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
6102static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) 6133static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
6103{ 6134{
6104 struct vcpu_vmx *vmx = to_vmx(vcpu); 6135 struct vcpu_vmx *vmx = to_vmx(vcpu);
6105 u16 _ds, _es;
6106 6136
6107 savesegment(ds, _ds);
6108 savesegment(es, _es);
6109 if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) { 6137 if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) {
6110 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6138 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6111 if (vmcs12->idt_vectoring_info_field & 6139 if (vmcs12->idt_vectoring_info_field &
@@ -6266,8 +6294,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
6266 } 6294 }
6267 } 6295 }
6268 6296
6269 loadsegment(ds, _ds);
6270 loadsegment(es, _es);
6271 vmx->loaded_vmcs->launched = 1; 6297 vmx->loaded_vmcs->launched = 1;
6272 6298
6273 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); 6299 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);