aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/vmx.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-08-28 20:48:05 -0400
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:52:47 -0500
commit51c6cf662b4b361a09fbd324f4c67875d9bcfbea (patch)
tree8b16dd35e100af2feec42306348896a6ef198a50 /drivers/kvm/vmx.c
parent3427318fd2244737a466a06a93c5fe579852f871 (diff)
KVM: VMX: Further reduce efer reloads
KVM avoids reloading the efer msr when the difference between the guest and host values consist of the long mode bits (which are switched by hardware) and the NX bit (which is emulated by the KVM MMU). This patch also allows KVM to ignore SCE (syscall enable) when the guest is running in 32-bit mode. This is because the syscall instruction is not available in 32-bit mode on Intel processors, so the SCE bit is effectively meaningless. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/vmx.c')
-rw-r--r--drivers/kvm/vmx.c61
1 files changed, 37 insertions, 24 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 91768d5dbfb9..8eb49e055ec0 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -57,6 +57,7 @@ struct vcpu_vmx {
57 u16 fs_sel, gs_sel, ldt_sel; 57 u16 fs_sel, gs_sel, ldt_sel;
58 int gs_ldt_reload_needed; 58 int gs_ldt_reload_needed;
59 int fs_reload_needed; 59 int fs_reload_needed;
60 int guest_efer_loaded;
60 }host_state; 61 }host_state;
61 62
62}; 63};
@@ -74,8 +75,6 @@ static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
74static struct page *vmx_io_bitmap_a; 75static struct page *vmx_io_bitmap_a;
75static struct page *vmx_io_bitmap_b; 76static struct page *vmx_io_bitmap_b;
76 77
77#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
78
79static struct vmcs_config { 78static struct vmcs_config {
80 int size; 79 int size;
81 int order; 80 int order;
@@ -138,18 +137,6 @@ static void save_msrs(struct kvm_msr_entry *e, int n)
138 rdmsrl(e[i].index, e[i].data); 137 rdmsrl(e[i].index, e[i].data);
139} 138}
140 139
141static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr)
142{
143 return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
144}
145
146static inline int msr_efer_need_save_restore(struct vcpu_vmx *vmx)
147{
148 int efer_offset = vmx->msr_offset_efer;
149 return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
150 msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
151}
152
153static inline int is_page_fault(u32 intr_info) 140static inline int is_page_fault(u32 intr_info)
154{ 141{
155 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | 142 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
@@ -351,16 +338,42 @@ static void reload_tss(void)
351 338
352static void load_transition_efer(struct vcpu_vmx *vmx) 339static void load_transition_efer(struct vcpu_vmx *vmx)
353{ 340{
354 u64 trans_efer;
355 int efer_offset = vmx->msr_offset_efer; 341 int efer_offset = vmx->msr_offset_efer;
342 u64 host_efer = vmx->host_msrs[efer_offset].data;
343 u64 guest_efer = vmx->guest_msrs[efer_offset].data;
344 u64 ignore_bits;
345
346 if (efer_offset < 0)
347 return;
348 /*
349 * NX is emulated; LMA and LME handled by hardware; SCE meaninless
350 * outside long mode
351 */
352 ignore_bits = EFER_NX | EFER_SCE;
353#ifdef CONFIG_X86_64
354 ignore_bits |= EFER_LMA | EFER_LME;
355 /* SCE is meaningful only in long mode on Intel */
356 if (guest_efer & EFER_LMA)
357 ignore_bits &= ~(u64)EFER_SCE;
358#endif
359 if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
360 return;
356 361
357 trans_efer = vmx->host_msrs[efer_offset].data; 362 vmx->host_state.guest_efer_loaded = 1;
358 trans_efer &= ~EFER_SAVE_RESTORE_BITS; 363 guest_efer &= ~ignore_bits;
359 trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]); 364 guest_efer |= host_efer & ignore_bits;
360 wrmsrl(MSR_EFER, trans_efer); 365 wrmsrl(MSR_EFER, guest_efer);
361 vmx->vcpu.stat.efer_reload++; 366 vmx->vcpu.stat.efer_reload++;
362} 367}
363 368
369static void reload_host_efer(struct vcpu_vmx *vmx)
370{
371 if (vmx->host_state.guest_efer_loaded) {
372 vmx->host_state.guest_efer_loaded = 0;
373 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
374 }
375}
376
364static void vmx_save_host_state(struct kvm_vcpu *vcpu) 377static void vmx_save_host_state(struct kvm_vcpu *vcpu)
365{ 378{
366 struct vcpu_vmx *vmx = to_vmx(vcpu); 379 struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -406,8 +419,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
406 } 419 }
407#endif 420#endif
408 load_msrs(vmx->guest_msrs, vmx->save_nmsrs); 421 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
409 if (msr_efer_need_save_restore(vmx)) 422 load_transition_efer(vmx);
410 load_transition_efer(vmx);
411} 423}
412 424
413static void vmx_load_host_state(struct vcpu_vmx *vmx) 425static void vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -436,8 +448,7 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
436 reload_tss(); 448 reload_tss();
437 save_msrs(vmx->guest_msrs, vmx->save_nmsrs); 449 save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
438 load_msrs(vmx->host_msrs, vmx->save_nmsrs); 450 load_msrs(vmx->host_msrs, vmx->save_nmsrs);
439 if (msr_efer_need_save_restore(vmx)) 451 reload_host_efer(vmx);
440 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
441} 452}
442 453
443/* 454/*
@@ -727,8 +738,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
727#ifdef CONFIG_X86_64 738#ifdef CONFIG_X86_64
728 case MSR_EFER: 739 case MSR_EFER:
729 ret = kvm_set_msr_common(vcpu, msr_index, data); 740 ret = kvm_set_msr_common(vcpu, msr_index, data);
730 if (vmx->host_state.loaded) 741 if (vmx->host_state.loaded) {
742 reload_host_efer(vmx);
731 load_transition_efer(vmx); 743 load_transition_efer(vmx);
744 }
732 break; 745 break;
733 case MSR_FS_BASE: 746 case MSR_FS_BASE:
734 vmcs_writel(GUEST_FS_BASE, data); 747 vmcs_writel(GUEST_FS_BASE, data);