aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-04-19 07:28:44 -0400
committerAvi Kivity <avi@qumranet.com>2007-05-03 03:52:30 -0400
commit4d56c8a787aefb2e3fc4ac4be966db96c14d1ad8 (patch)
tree99db1bc065832b2685bd5edb75cd7c1019011ead /drivers
parent35cc7f971188366f5a5c0d5da1456bb38cef5da9 (diff)
KVM: VMX: Only save/restore MSR_K6_STAR if necessary
Intel hosts only support syscall/sysret in long more (and only if efer.sce is enabled), so only reload the related MSR_K6_STAR if the guest will actually be able to use it. This reduces vmexit cost by about 500 cycles (6400 -> 5870) on my setup. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/kvm/vmx.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index b61d4dd804e3..37537af126d1 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -69,6 +69,10 @@ static struct kvm_vmx_segment_field {
69 VMX_SEGMENT_FIELD(LDTR), 69 VMX_SEGMENT_FIELD(LDTR),
70}; 70};
71 71
72/*
73 * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
74 * away by decrementing the array size.
75 */
72static const u32 vmx_msr_index[] = { 76static const u32 vmx_msr_index[] = {
73#ifdef CONFIG_X86_64 77#ifdef CONFIG_X86_64
74 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE, 78 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
@@ -323,6 +327,18 @@ static void setup_msrs(struct kvm_vcpu *vcpu)
323 nr_skip = NR_64BIT_MSRS; 327 nr_skip = NR_64BIT_MSRS;
324 nr_good_msrs = vcpu->nmsrs - nr_skip; 328 nr_good_msrs = vcpu->nmsrs - nr_skip;
325 329
330 /*
331 * MSR_K6_STAR is only needed on long mode guests, and only
332 * if efer.sce is enabled.
333 */
334 if (find_msr_entry(vcpu, MSR_K6_STAR)) {
335 --nr_good_msrs;
336#ifdef CONFIG_X86_64
337 if (is_long_mode(vcpu) && (vcpu->shadow_efer & EFER_SCE))
338 ++nr_good_msrs;
339#endif
340 }
341
326 vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR, 342 vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
327 virt_to_phys(vcpu->guest_msrs + nr_skip)); 343 virt_to_phys(vcpu->guest_msrs + nr_skip));
328 vmcs_writel(VM_EXIT_MSR_STORE_ADDR, 344 vmcs_writel(VM_EXIT_MSR_STORE_ADDR,