diff options
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/kvm.h | 1 | ||||
-rw-r--r-- | drivers/kvm/kvm_main.c | 1 | ||||
-rw-r--r-- | drivers/kvm/vmx.c | 105 |
3 files changed, 62 insertions, 45 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 152312c1fafa..7facebd1911d 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -252,6 +252,7 @@ struct kvm_stat { | |||
252 | u32 halt_exits; | 252 | u32 halt_exits; |
253 | u32 request_irq_exits; | 253 | u32 request_irq_exits; |
254 | u32 irq_exits; | 254 | u32 irq_exits; |
255 | u32 light_exits; | ||
255 | }; | 256 | }; |
256 | 257 | ||
257 | struct kvm_vcpu { | 258 | struct kvm_vcpu { |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 8f1f07adb04e..7d682586423b 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -72,6 +72,7 @@ static struct kvm_stats_debugfs_item { | |||
72 | { "halt_exits", STAT_OFFSET(halt_exits) }, | 72 | { "halt_exits", STAT_OFFSET(halt_exits) }, |
73 | { "request_irq", STAT_OFFSET(request_irq_exits) }, | 73 | { "request_irq", STAT_OFFSET(request_irq_exits) }, |
74 | { "irq_exits", STAT_OFFSET(irq_exits) }, | 74 | { "irq_exits", STAT_OFFSET(irq_exits) }, |
75 | { "light_exits", STAT_OFFSET(light_exits) }, | ||
75 | { NULL } | 76 | { NULL } |
76 | }; | 77 | }; |
77 | 78 | ||
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index 52bd5f079df1..84ce0c0930a0 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -483,6 +483,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
483 | case MSR_GS_BASE: | 483 | case MSR_GS_BASE: |
484 | vmcs_writel(GUEST_GS_BASE, data); | 484 | vmcs_writel(GUEST_GS_BASE, data); |
485 | break; | 485 | break; |
486 | case MSR_LSTAR: | ||
487 | case MSR_SYSCALL_MASK: | ||
488 | msr = find_msr_entry(vcpu, msr_index); | ||
489 | if (msr) | ||
490 | msr->data = data; | ||
491 | load_msrs(vcpu->guest_msrs, NR_BAD_MSRS); | ||
492 | break; | ||
486 | #endif | 493 | #endif |
487 | case MSR_IA32_SYSENTER_CS: | 494 | case MSR_IA32_SYSENTER_CS: |
488 | vmcs_write32(GUEST_SYSENTER_CS, data); | 495 | vmcs_write32(GUEST_SYSENTER_CS, data); |
@@ -1820,7 +1827,7 @@ static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1820 | int fs_gs_ldt_reload_needed; | 1827 | int fs_gs_ldt_reload_needed; |
1821 | int r; | 1828 | int r; |
1822 | 1829 | ||
1823 | again: | 1830 | preempted: |
1824 | /* | 1831 | /* |
1825 | * Set host fs and gs selectors. Unfortunately, 22.2.3 does not | 1832 | * Set host fs and gs selectors. Unfortunately, 22.2.3 does not |
1826 | * allow segment selectors with cpl > 0 or ti == 1. | 1833 | * allow segment selectors with cpl > 0 or ti == 1. |
@@ -1851,13 +1858,6 @@ again: | |||
1851 | if (vcpu->guest_debug.enabled) | 1858 | if (vcpu->guest_debug.enabled) |
1852 | kvm_guest_debug_pre(vcpu); | 1859 | kvm_guest_debug_pre(vcpu); |
1853 | 1860 | ||
1854 | kvm_load_guest_fpu(vcpu); | ||
1855 | |||
1856 | /* | ||
1857 | * Loading guest fpu may have cleared host cr0.ts | ||
1858 | */ | ||
1859 | vmcs_writel(HOST_CR0, read_cr0()); | ||
1860 | |||
1861 | #ifdef CONFIG_X86_64 | 1861 | #ifdef CONFIG_X86_64 |
1862 | if (is_long_mode(vcpu)) { | 1862 | if (is_long_mode(vcpu)) { |
1863 | save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base, 1); | 1863 | save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base, 1); |
@@ -1865,6 +1865,14 @@ again: | |||
1865 | } | 1865 | } |
1866 | #endif | 1866 | #endif |
1867 | 1867 | ||
1868 | again: | ||
1869 | kvm_load_guest_fpu(vcpu); | ||
1870 | |||
1871 | /* | ||
1872 | * Loading guest fpu may have cleared host cr0.ts | ||
1873 | */ | ||
1874 | vmcs_writel(HOST_CR0, read_cr0()); | ||
1875 | |||
1868 | asm ( | 1876 | asm ( |
1869 | /* Store host registers */ | 1877 | /* Store host registers */ |
1870 | "pushf \n\t" | 1878 | "pushf \n\t" |
@@ -1984,36 +1992,8 @@ again: | |||
1984 | [cr2]"i"(offsetof(struct kvm_vcpu, cr2)) | 1992 | [cr2]"i"(offsetof(struct kvm_vcpu, cr2)) |
1985 | : "cc", "memory" ); | 1993 | : "cc", "memory" ); |
1986 | 1994 | ||
1987 | /* | ||
1988 | * Reload segment selectors ASAP. (it's needed for a functional | ||
1989 | * kernel: x86 relies on having __KERNEL_PDA in %fs and x86_64 | ||
1990 | * relies on having 0 in %gs for the CPU PDA to work.) | ||
1991 | */ | ||
1992 | if (fs_gs_ldt_reload_needed) { | ||
1993 | load_ldt(ldt_sel); | ||
1994 | load_fs(fs_sel); | ||
1995 | /* | ||
1996 | * If we have to reload gs, we must take care to | ||
1997 | * preserve our gs base. | ||
1998 | */ | ||
1999 | local_irq_disable(); | ||
2000 | load_gs(gs_sel); | ||
2001 | #ifdef CONFIG_X86_64 | ||
2002 | wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); | ||
2003 | #endif | ||
2004 | local_irq_enable(); | ||
2005 | |||
2006 | reload_tss(); | ||
2007 | } | ||
2008 | ++vcpu->stat.exits; | 1995 | ++vcpu->stat.exits; |
2009 | 1996 | ||
2010 | #ifdef CONFIG_X86_64 | ||
2011 | if (is_long_mode(vcpu)) { | ||
2012 | save_msrs(vcpu->guest_msrs, NR_BAD_MSRS); | ||
2013 | load_msrs(vcpu->host_msrs, NR_BAD_MSRS); | ||
2014 | } | ||
2015 | #endif | ||
2016 | |||
2017 | vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; | 1997 | vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; |
2018 | 1998 | ||
2019 | asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); | 1999 | asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); |
@@ -2035,24 +2015,59 @@ again: | |||
2035 | if (r > 0) { | 2015 | if (r > 0) { |
2036 | /* Give scheduler a change to reschedule. */ | 2016 | /* Give scheduler a change to reschedule. */ |
2037 | if (signal_pending(current)) { | 2017 | if (signal_pending(current)) { |
2038 | ++vcpu->stat.signal_exits; | 2018 | r = -EINTR; |
2039 | post_kvm_run_save(vcpu, kvm_run); | ||
2040 | kvm_run->exit_reason = KVM_EXIT_INTR; | 2019 | kvm_run->exit_reason = KVM_EXIT_INTR; |
2041 | return -EINTR; | 2020 | ++vcpu->stat.signal_exits; |
2021 | goto out; | ||
2042 | } | 2022 | } |
2043 | 2023 | ||
2044 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | 2024 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { |
2045 | ++vcpu->stat.request_irq_exits; | 2025 | r = -EINTR; |
2046 | post_kvm_run_save(vcpu, kvm_run); | ||
2047 | kvm_run->exit_reason = KVM_EXIT_INTR; | 2026 | kvm_run->exit_reason = KVM_EXIT_INTR; |
2048 | return -EINTR; | 2027 | ++vcpu->stat.request_irq_exits; |
2028 | goto out; | ||
2029 | } | ||
2030 | if (!need_resched()) { | ||
2031 | ++vcpu->stat.light_exits; | ||
2032 | goto again; | ||
2049 | } | 2033 | } |
2050 | |||
2051 | kvm_resched(vcpu); | ||
2052 | goto again; | ||
2053 | } | 2034 | } |
2054 | } | 2035 | } |
2055 | 2036 | ||
2037 | out: | ||
2038 | /* | ||
2039 | * Reload segment selectors ASAP. (it's needed for a functional | ||
2040 | * kernel: x86 relies on having __KERNEL_PDA in %fs and x86_64 | ||
2041 | * relies on having 0 in %gs for the CPU PDA to work.) | ||
2042 | */ | ||
2043 | if (fs_gs_ldt_reload_needed) { | ||
2044 | load_ldt(ldt_sel); | ||
2045 | load_fs(fs_sel); | ||
2046 | /* | ||
2047 | * If we have to reload gs, we must take care to | ||
2048 | * preserve our gs base. | ||
2049 | */ | ||
2050 | local_irq_disable(); | ||
2051 | load_gs(gs_sel); | ||
2052 | #ifdef CONFIG_X86_64 | ||
2053 | wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); | ||
2054 | #endif | ||
2055 | local_irq_enable(); | ||
2056 | |||
2057 | reload_tss(); | ||
2058 | } | ||
2059 | #ifdef CONFIG_X86_64 | ||
2060 | if (is_long_mode(vcpu)) { | ||
2061 | save_msrs(vcpu->guest_msrs, NR_BAD_MSRS); | ||
2062 | load_msrs(vcpu->host_msrs, NR_BAD_MSRS); | ||
2063 | } | ||
2064 | #endif | ||
2065 | |||
2066 | if (r > 0) { | ||
2067 | kvm_resched(vcpu); | ||
2068 | goto preempted; | ||
2069 | } | ||
2070 | |||
2056 | post_kvm_run_save(vcpu, kvm_run); | 2071 | post_kvm_run_save(vcpu, kvm_run); |
2057 | return r; | 2072 | return r; |
2058 | } | 2073 | } |