diff options
author | Avi Kivity <avi@qumranet.com> | 2007-05-02 09:54:03 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-07-16 05:05:39 -0400 |
commit | 33ed6329210f3ad0638306bfa46cd3aaf5a5f929 (patch) | |
tree | 6b5b4570641fe071b3544ded46a223a11e9f338f /drivers/kvm/vmx.c | |
parent | 7494c0ccbb8fa0903bcb1ced89cc2b79c3624974 (diff) |
KVM: Fix potential guest state leak into host
The lightweight vmexit path avoids saving and reloading certain host
state. However in certain cases lightweight vmexit handling can schedule()
which requires reloading the host state.
So we store the host state in the vcpu structure, and reloaded it if we
relinquish the vcpu.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/vmx.c')
-rw-r--r-- | drivers/kvm/vmx.c | 160 |
1 files changed, 89 insertions, 71 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index 49cadd31120b..677b38c4444a 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -237,6 +237,93 @@ static void vmcs_set_bits(unsigned long field, u32 mask) | |||
237 | vmcs_writel(field, vmcs_readl(field) | mask); | 237 | vmcs_writel(field, vmcs_readl(field) | mask); |
238 | } | 238 | } |
239 | 239 | ||
240 | static void reload_tss(void) | ||
241 | { | ||
242 | #ifndef CONFIG_X86_64 | ||
243 | |||
244 | /* | ||
245 | * VT restores TR but not its size. Useless. | ||
246 | */ | ||
247 | struct descriptor_table gdt; | ||
248 | struct segment_descriptor *descs; | ||
249 | |||
250 | get_gdt(&gdt); | ||
251 | descs = (void *)gdt.base; | ||
252 | descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ | ||
253 | load_TR_desc(); | ||
254 | #endif | ||
255 | } | ||
256 | |||
257 | static void vmx_save_host_state(struct kvm_vcpu *vcpu) | ||
258 | { | ||
259 | struct vmx_host_state *hs = &vcpu->vmx_host_state; | ||
260 | |||
261 | if (hs->loaded) | ||
262 | return; | ||
263 | |||
264 | hs->loaded = 1; | ||
265 | /* | ||
266 | * Set host fs and gs selectors. Unfortunately, 22.2.3 does not | ||
267 | * allow segment selectors with cpl > 0 or ti == 1. | ||
268 | */ | ||
269 | hs->ldt_sel = read_ldt(); | ||
270 | hs->fs_gs_ldt_reload_needed = hs->ldt_sel; | ||
271 | hs->fs_sel = read_fs(); | ||
272 | if (!(hs->fs_sel & 7)) | ||
273 | vmcs_write16(HOST_FS_SELECTOR, hs->fs_sel); | ||
274 | else { | ||
275 | vmcs_write16(HOST_FS_SELECTOR, 0); | ||
276 | hs->fs_gs_ldt_reload_needed = 1; | ||
277 | } | ||
278 | hs->gs_sel = read_gs(); | ||
279 | if (!(hs->gs_sel & 7)) | ||
280 | vmcs_write16(HOST_GS_SELECTOR, hs->gs_sel); | ||
281 | else { | ||
282 | vmcs_write16(HOST_GS_SELECTOR, 0); | ||
283 | hs->fs_gs_ldt_reload_needed = 1; | ||
284 | } | ||
285 | |||
286 | #ifdef CONFIG_X86_64 | ||
287 | vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); | ||
288 | vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); | ||
289 | #else | ||
290 | vmcs_writel(HOST_FS_BASE, segment_base(hs->fs_sel)); | ||
291 | vmcs_writel(HOST_GS_BASE, segment_base(hs->gs_sel)); | ||
292 | #endif | ||
293 | } | ||
294 | |||
295 | static void vmx_load_host_state(struct kvm_vcpu *vcpu) | ||
296 | { | ||
297 | struct vmx_host_state *hs = &vcpu->vmx_host_state; | ||
298 | |||
299 | if (!hs->loaded) | ||
300 | return; | ||
301 | |||
302 | hs->loaded = 0; | ||
303 | if (hs->fs_gs_ldt_reload_needed) { | ||
304 | load_ldt(hs->ldt_sel); | ||
305 | load_fs(hs->fs_sel); | ||
306 | /* | ||
307 | * If we have to reload gs, we must take care to | ||
308 | * preserve our gs base. | ||
309 | */ | ||
310 | local_irq_disable(); | ||
311 | load_gs(hs->gs_sel); | ||
312 | #ifdef CONFIG_X86_64 | ||
313 | wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); | ||
314 | #endif | ||
315 | local_irq_enable(); | ||
316 | |||
317 | reload_tss(); | ||
318 | } | ||
319 | #ifdef CONFIG_X86_64 | ||
320 | if (is_long_mode(vcpu)) { | ||
321 | save_msrs(vcpu->guest_msrs, NR_BAD_MSRS); | ||
322 | load_msrs(vcpu->host_msrs, NR_BAD_MSRS); | ||
323 | } | ||
324 | #endif | ||
325 | } | ||
326 | |||
240 | /* | 327 | /* |
241 | * Switches to specified vcpu, until a matching vcpu_put(), but assumes | 328 | * Switches to specified vcpu, until a matching vcpu_put(), but assumes |
242 | * vcpu mutex is already taken. | 329 | * vcpu mutex is already taken. |
@@ -283,6 +370,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu) | |||
283 | 370 | ||
284 | static void vmx_vcpu_put(struct kvm_vcpu *vcpu) | 371 | static void vmx_vcpu_put(struct kvm_vcpu *vcpu) |
285 | { | 372 | { |
373 | vmx_load_host_state(vcpu); | ||
286 | kvm_put_guest_fpu(vcpu); | 374 | kvm_put_guest_fpu(vcpu); |
287 | put_cpu(); | 375 | put_cpu(); |
288 | } | 376 | } |
@@ -397,23 +485,6 @@ static void guest_write_tsc(u64 guest_tsc) | |||
397 | vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc); | 485 | vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc); |
398 | } | 486 | } |
399 | 487 | ||
400 | static void reload_tss(void) | ||
401 | { | ||
402 | #ifndef CONFIG_X86_64 | ||
403 | |||
404 | /* | ||
405 | * VT restores TR but not its size. Useless. | ||
406 | */ | ||
407 | struct descriptor_table gdt; | ||
408 | struct segment_descriptor *descs; | ||
409 | |||
410 | get_gdt(&gdt); | ||
411 | descs = (void *)gdt.base; | ||
412 | descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ | ||
413 | load_TR_desc(); | ||
414 | #endif | ||
415 | } | ||
416 | |||
417 | /* | 488 | /* |
418 | * Reads an msr value (of 'msr_index') into 'pdata'. | 489 | * Reads an msr value (of 'msr_index') into 'pdata'. |
419 | * Returns 0 on success, non-0 otherwise. | 490 | * Returns 0 on success, non-0 otherwise. |
@@ -1823,40 +1894,9 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, | |||
1823 | static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1894 | static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1824 | { | 1895 | { |
1825 | u8 fail; | 1896 | u8 fail; |
1826 | u16 fs_sel, gs_sel, ldt_sel; | ||
1827 | int fs_gs_ldt_reload_needed; | ||
1828 | int r; | 1897 | int r; |
1829 | 1898 | ||
1830 | preempted: | 1899 | preempted: |
1831 | /* | ||
1832 | * Set host fs and gs selectors. Unfortunately, 22.2.3 does not | ||
1833 | * allow segment selectors with cpl > 0 or ti == 1. | ||
1834 | */ | ||
1835 | ldt_sel = read_ldt(); | ||
1836 | fs_gs_ldt_reload_needed = ldt_sel; | ||
1837 | fs_sel = read_fs(); | ||
1838 | if (!(fs_sel & 7)) | ||
1839 | vmcs_write16(HOST_FS_SELECTOR, fs_sel); | ||
1840 | else { | ||
1841 | vmcs_write16(HOST_FS_SELECTOR, 0); | ||
1842 | fs_gs_ldt_reload_needed = 1; | ||
1843 | } | ||
1844 | gs_sel = read_gs(); | ||
1845 | if (!(gs_sel & 7)) | ||
1846 | vmcs_write16(HOST_GS_SELECTOR, gs_sel); | ||
1847 | else { | ||
1848 | vmcs_write16(HOST_GS_SELECTOR, 0); | ||
1849 | fs_gs_ldt_reload_needed = 1; | ||
1850 | } | ||
1851 | |||
1852 | #ifdef CONFIG_X86_64 | ||
1853 | vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); | ||
1854 | vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); | ||
1855 | #else | ||
1856 | vmcs_writel(HOST_FS_BASE, segment_base(fs_sel)); | ||
1857 | vmcs_writel(HOST_GS_BASE, segment_base(gs_sel)); | ||
1858 | #endif | ||
1859 | |||
1860 | if (!vcpu->mmio_read_completed) | 1900 | if (!vcpu->mmio_read_completed) |
1861 | do_interrupt_requests(vcpu, kvm_run); | 1901 | do_interrupt_requests(vcpu, kvm_run); |
1862 | 1902 | ||
@@ -1871,6 +1911,7 @@ preempted: | |||
1871 | #endif | 1911 | #endif |
1872 | 1912 | ||
1873 | again: | 1913 | again: |
1914 | vmx_save_host_state(vcpu); | ||
1874 | kvm_load_guest_fpu(vcpu); | 1915 | kvm_load_guest_fpu(vcpu); |
1875 | 1916 | ||
1876 | /* | 1917 | /* |
@@ -2040,29 +2081,6 @@ again: | |||
2040 | } | 2081 | } |
2041 | 2082 | ||
2042 | out: | 2083 | out: |
2043 | if (fs_gs_ldt_reload_needed) { | ||
2044 | load_ldt(ldt_sel); | ||
2045 | load_fs(fs_sel); | ||
2046 | /* | ||
2047 | * If we have to reload gs, we must take care to | ||
2048 | * preserve our gs base. | ||
2049 | */ | ||
2050 | local_irq_disable(); | ||
2051 | load_gs(gs_sel); | ||
2052 | #ifdef CONFIG_X86_64 | ||
2053 | wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); | ||
2054 | #endif | ||
2055 | local_irq_enable(); | ||
2056 | |||
2057 | reload_tss(); | ||
2058 | } | ||
2059 | #ifdef CONFIG_X86_64 | ||
2060 | if (is_long_mode(vcpu)) { | ||
2061 | save_msrs(vcpu->guest_msrs, NR_BAD_MSRS); | ||
2062 | load_msrs(vcpu->host_msrs, NR_BAD_MSRS); | ||
2063 | } | ||
2064 | #endif | ||
2065 | |||
2066 | if (r > 0) { | 2084 | if (r > 0) { |
2067 | kvm_resched(vcpu); | 2085 | kvm_resched(vcpu); |
2068 | goto preempted; | 2086 | goto preempted; |