aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/vmx.c
diff options
context:
space:
mode:
authorLaurent Vivier <Laurent.Vivier@bull.net>2007-08-23 10:33:11 -0400
committerAvi Kivity <avi@qumranet.com>2007-10-13 04:18:24 -0400
commit152d3f2f246ce3c2a0cf2fc6c2214663cd99aa83 (patch)
treee499626bab90fc75a9c0cc97e96c68a7fd4bbaa3 /drivers/kvm/vmx.c
parentd39dba54ce71ab3234c387219b175dc36d37f85a (diff)
KVM: VMX: Split segments reload in vmx_load_host_state()
vmx_load_host_state() bundles fs, gs, ldt, and tss reloading into one in the hope that it is infrequent. With smp guests, fs reloading is frequent due to fs being used by threads. Unbundle the reloads so reduce expensive gs reloads. Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/vmx.c')
-rw-r--r--drivers/kvm/vmx.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index b40066854c14..d63e82e5dbf8 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -54,7 +54,8 @@ struct vcpu_vmx {
54 struct { 54 struct {
55 int loaded; 55 int loaded;
56 u16 fs_sel, gs_sel, ldt_sel; 56 u16 fs_sel, gs_sel, ldt_sel;
57 int fs_gs_ldt_reload_needed; 57 int gs_ldt_reload_needed;
58 int fs_reload_needed;
58 }host_state; 59 }host_state;
59 60
60}; 61};
@@ -353,20 +354,21 @@ static void vmx_save_host_state(struct vcpu_vmx *vmx)
353 * allow segment selectors with cpl > 0 or ti == 1. 354 * allow segment selectors with cpl > 0 or ti == 1.
354 */ 355 */
355 vmx->host_state.ldt_sel = read_ldt(); 356 vmx->host_state.ldt_sel = read_ldt();
356 vmx->host_state.fs_gs_ldt_reload_needed = vmx->host_state.ldt_sel; 357 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
357 vmx->host_state.fs_sel = read_fs(); 358 vmx->host_state.fs_sel = read_fs();
358 if (!(vmx->host_state.fs_sel & 7)) 359 if (!(vmx->host_state.fs_sel & 7)) {
359 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); 360 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
360 else { 361 vmx->host_state.fs_reload_needed = 0;
362 } else {
361 vmcs_write16(HOST_FS_SELECTOR, 0); 363 vmcs_write16(HOST_FS_SELECTOR, 0);
362 vmx->host_state.fs_gs_ldt_reload_needed = 1; 364 vmx->host_state.fs_reload_needed = 1;
363 } 365 }
364 vmx->host_state.gs_sel = read_gs(); 366 vmx->host_state.gs_sel = read_gs();
365 if (!(vmx->host_state.gs_sel & 7)) 367 if (!(vmx->host_state.gs_sel & 7))
366 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); 368 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
367 else { 369 else {
368 vmcs_write16(HOST_GS_SELECTOR, 0); 370 vmcs_write16(HOST_GS_SELECTOR, 0);
369 vmx->host_state.fs_gs_ldt_reload_needed = 1; 371 vmx->host_state.gs_ldt_reload_needed = 1;
370 } 372 }
371 373
372#ifdef CONFIG_X86_64 374#ifdef CONFIG_X86_64
@@ -396,9 +398,10 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
396 return; 398 return;
397 399
398 vmx->host_state.loaded = 0; 400 vmx->host_state.loaded = 0;
399 if (vmx->host_state.fs_gs_ldt_reload_needed) { 401 if (vmx->host_state.fs_reload_needed)
400 load_ldt(vmx->host_state.ldt_sel);
401 load_fs(vmx->host_state.fs_sel); 402 load_fs(vmx->host_state.fs_sel);
403 if (vmx->host_state.gs_ldt_reload_needed) {
404 load_ldt(vmx->host_state.ldt_sel);
402 /* 405 /*
403 * If we have to reload gs, we must take care to 406 * If we have to reload gs, we must take care to
404 * preserve our gs base. 407 * preserve our gs base.
@@ -409,9 +412,8 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
409 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); 412 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
410#endif 413#endif
411 local_irq_restore(flags); 414 local_irq_restore(flags);
412
413 reload_tss();
414 } 415 }
416 reload_tss();
415 save_msrs(vmx->guest_msrs, vmx->save_nmsrs); 417 save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
416 load_msrs(vmx->host_msrs, vmx->save_nmsrs); 418 load_msrs(vmx->host_msrs, vmx->save_nmsrs);
417 if (msr_efer_need_save_restore(vmx)) 419 if (msr_efer_need_save_restore(vmx))