diff options
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 378 |
1 files changed, 229 insertions, 149 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index edca080407a5..859a01a07dbf 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/moduleparam.h> | 27 | #include <linux/moduleparam.h> |
28 | #include <linux/ftrace_event.h> | 28 | #include <linux/ftrace_event.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/tboot.h> | ||
30 | #include "kvm_cache_regs.h" | 31 | #include "kvm_cache_regs.h" |
31 | #include "x86.h" | 32 | #include "x86.h" |
32 | 33 | ||
@@ -98,6 +99,8 @@ module_param(ple_gap, int, S_IRUGO); | |||
98 | static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; | 99 | static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; |
99 | module_param(ple_window, int, S_IRUGO); | 100 | module_param(ple_window, int, S_IRUGO); |
100 | 101 | ||
102 | #define NR_AUTOLOAD_MSRS 1 | ||
103 | |||
101 | struct vmcs { | 104 | struct vmcs { |
102 | u32 revision_id; | 105 | u32 revision_id; |
103 | u32 abort; | 106 | u32 abort; |
@@ -125,6 +128,11 @@ struct vcpu_vmx { | |||
125 | u64 msr_guest_kernel_gs_base; | 128 | u64 msr_guest_kernel_gs_base; |
126 | #endif | 129 | #endif |
127 | struct vmcs *vmcs; | 130 | struct vmcs *vmcs; |
131 | struct msr_autoload { | ||
132 | unsigned nr; | ||
133 | struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS]; | ||
134 | struct vmx_msr_entry host[NR_AUTOLOAD_MSRS]; | ||
135 | } msr_autoload; | ||
128 | struct { | 136 | struct { |
129 | int loaded; | 137 | int loaded; |
130 | u16 fs_sel, gs_sel, ldt_sel; | 138 | u16 fs_sel, gs_sel, ldt_sel; |
@@ -234,56 +242,56 @@ static const u32 vmx_msr_index[] = { | |||
234 | }; | 242 | }; |
235 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) | 243 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) |
236 | 244 | ||
237 | static inline int is_page_fault(u32 intr_info) | 245 | static inline bool is_page_fault(u32 intr_info) |
238 | { | 246 | { |
239 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | 247 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
240 | INTR_INFO_VALID_MASK)) == | 248 | INTR_INFO_VALID_MASK)) == |
241 | (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); | 249 | (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); |
242 | } | 250 | } |
243 | 251 | ||
244 | static inline int is_no_device(u32 intr_info) | 252 | static inline bool is_no_device(u32 intr_info) |
245 | { | 253 | { |
246 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | 254 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
247 | INTR_INFO_VALID_MASK)) == | 255 | INTR_INFO_VALID_MASK)) == |
248 | (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); | 256 | (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); |
249 | } | 257 | } |
250 | 258 | ||
251 | static inline int is_invalid_opcode(u32 intr_info) | 259 | static inline bool is_invalid_opcode(u32 intr_info) |
252 | { | 260 | { |
253 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | 261 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
254 | INTR_INFO_VALID_MASK)) == | 262 | INTR_INFO_VALID_MASK)) == |
255 | (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); | 263 | (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); |
256 | } | 264 | } |
257 | 265 | ||
258 | static inline int is_external_interrupt(u32 intr_info) | 266 | static inline bool is_external_interrupt(u32 intr_info) |
259 | { | 267 | { |
260 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) | 268 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) |
261 | == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); | 269 | == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); |
262 | } | 270 | } |
263 | 271 | ||
264 | static inline int is_machine_check(u32 intr_info) | 272 | static inline bool is_machine_check(u32 intr_info) |
265 | { | 273 | { |
266 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | 274 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
267 | INTR_INFO_VALID_MASK)) == | 275 | INTR_INFO_VALID_MASK)) == |
268 | (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); | 276 | (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); |
269 | } | 277 | } |
270 | 278 | ||
271 | static inline int cpu_has_vmx_msr_bitmap(void) | 279 | static inline bool cpu_has_vmx_msr_bitmap(void) |
272 | { | 280 | { |
273 | return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; | 281 | return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; |
274 | } | 282 | } |
275 | 283 | ||
276 | static inline int cpu_has_vmx_tpr_shadow(void) | 284 | static inline bool cpu_has_vmx_tpr_shadow(void) |
277 | { | 285 | { |
278 | return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW; | 286 | return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW; |
279 | } | 287 | } |
280 | 288 | ||
281 | static inline int vm_need_tpr_shadow(struct kvm *kvm) | 289 | static inline bool vm_need_tpr_shadow(struct kvm *kvm) |
282 | { | 290 | { |
283 | return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)); | 291 | return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)); |
284 | } | 292 | } |
285 | 293 | ||
286 | static inline int cpu_has_secondary_exec_ctrls(void) | 294 | static inline bool cpu_has_secondary_exec_ctrls(void) |
287 | { | 295 | { |
288 | return vmcs_config.cpu_based_exec_ctrl & | 296 | return vmcs_config.cpu_based_exec_ctrl & |
289 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; | 297 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; |
@@ -303,80 +311,80 @@ static inline bool cpu_has_vmx_flexpriority(void) | |||
303 | 311 | ||
304 | static inline bool cpu_has_vmx_ept_execute_only(void) | 312 | static inline bool cpu_has_vmx_ept_execute_only(void) |
305 | { | 313 | { |
306 | return !!(vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT); | 314 | return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT; |
307 | } | 315 | } |
308 | 316 | ||
309 | static inline bool cpu_has_vmx_eptp_uncacheable(void) | 317 | static inline bool cpu_has_vmx_eptp_uncacheable(void) |
310 | { | 318 | { |
311 | return !!(vmx_capability.ept & VMX_EPTP_UC_BIT); | 319 | return vmx_capability.ept & VMX_EPTP_UC_BIT; |
312 | } | 320 | } |
313 | 321 | ||
314 | static inline bool cpu_has_vmx_eptp_writeback(void) | 322 | static inline bool cpu_has_vmx_eptp_writeback(void) |
315 | { | 323 | { |
316 | return !!(vmx_capability.ept & VMX_EPTP_WB_BIT); | 324 | return vmx_capability.ept & VMX_EPTP_WB_BIT; |
317 | } | 325 | } |
318 | 326 | ||
319 | static inline bool cpu_has_vmx_ept_2m_page(void) | 327 | static inline bool cpu_has_vmx_ept_2m_page(void) |
320 | { | 328 | { |
321 | return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT); | 329 | return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT; |
322 | } | 330 | } |
323 | 331 | ||
324 | static inline bool cpu_has_vmx_ept_1g_page(void) | 332 | static inline bool cpu_has_vmx_ept_1g_page(void) |
325 | { | 333 | { |
326 | return !!(vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT); | 334 | return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT; |
327 | } | 335 | } |
328 | 336 | ||
329 | static inline int cpu_has_vmx_invept_individual_addr(void) | 337 | static inline bool cpu_has_vmx_invept_individual_addr(void) |
330 | { | 338 | { |
331 | return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT); | 339 | return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT; |
332 | } | 340 | } |
333 | 341 | ||
334 | static inline int cpu_has_vmx_invept_context(void) | 342 | static inline bool cpu_has_vmx_invept_context(void) |
335 | { | 343 | { |
336 | return !!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT); | 344 | return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT; |
337 | } | 345 | } |
338 | 346 | ||
339 | static inline int cpu_has_vmx_invept_global(void) | 347 | static inline bool cpu_has_vmx_invept_global(void) |
340 | { | 348 | { |
341 | return !!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT); | 349 | return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT; |
342 | } | 350 | } |
343 | 351 | ||
344 | static inline int cpu_has_vmx_ept(void) | 352 | static inline bool cpu_has_vmx_ept(void) |
345 | { | 353 | { |
346 | return vmcs_config.cpu_based_2nd_exec_ctrl & | 354 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
347 | SECONDARY_EXEC_ENABLE_EPT; | 355 | SECONDARY_EXEC_ENABLE_EPT; |
348 | } | 356 | } |
349 | 357 | ||
350 | static inline int cpu_has_vmx_unrestricted_guest(void) | 358 | static inline bool cpu_has_vmx_unrestricted_guest(void) |
351 | { | 359 | { |
352 | return vmcs_config.cpu_based_2nd_exec_ctrl & | 360 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
353 | SECONDARY_EXEC_UNRESTRICTED_GUEST; | 361 | SECONDARY_EXEC_UNRESTRICTED_GUEST; |
354 | } | 362 | } |
355 | 363 | ||
356 | static inline int cpu_has_vmx_ple(void) | 364 | static inline bool cpu_has_vmx_ple(void) |
357 | { | 365 | { |
358 | return vmcs_config.cpu_based_2nd_exec_ctrl & | 366 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
359 | SECONDARY_EXEC_PAUSE_LOOP_EXITING; | 367 | SECONDARY_EXEC_PAUSE_LOOP_EXITING; |
360 | } | 368 | } |
361 | 369 | ||
362 | static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm) | 370 | static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm) |
363 | { | 371 | { |
364 | return flexpriority_enabled && irqchip_in_kernel(kvm); | 372 | return flexpriority_enabled && irqchip_in_kernel(kvm); |
365 | } | 373 | } |
366 | 374 | ||
367 | static inline int cpu_has_vmx_vpid(void) | 375 | static inline bool cpu_has_vmx_vpid(void) |
368 | { | 376 | { |
369 | return vmcs_config.cpu_based_2nd_exec_ctrl & | 377 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
370 | SECONDARY_EXEC_ENABLE_VPID; | 378 | SECONDARY_EXEC_ENABLE_VPID; |
371 | } | 379 | } |
372 | 380 | ||
373 | static inline int cpu_has_vmx_rdtscp(void) | 381 | static inline bool cpu_has_vmx_rdtscp(void) |
374 | { | 382 | { |
375 | return vmcs_config.cpu_based_2nd_exec_ctrl & | 383 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
376 | SECONDARY_EXEC_RDTSCP; | 384 | SECONDARY_EXEC_RDTSCP; |
377 | } | 385 | } |
378 | 386 | ||
379 | static inline int cpu_has_virtual_nmis(void) | 387 | static inline bool cpu_has_virtual_nmis(void) |
380 | { | 388 | { |
381 | return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; | 389 | return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; |
382 | } | 390 | } |
@@ -595,16 +603,56 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) | |||
595 | vmcs_write32(EXCEPTION_BITMAP, eb); | 603 | vmcs_write32(EXCEPTION_BITMAP, eb); |
596 | } | 604 | } |
597 | 605 | ||
606 | static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) | ||
607 | { | ||
608 | unsigned i; | ||
609 | struct msr_autoload *m = &vmx->msr_autoload; | ||
610 | |||
611 | for (i = 0; i < m->nr; ++i) | ||
612 | if (m->guest[i].index == msr) | ||
613 | break; | ||
614 | |||
615 | if (i == m->nr) | ||
616 | return; | ||
617 | --m->nr; | ||
618 | m->guest[i] = m->guest[m->nr]; | ||
619 | m->host[i] = m->host[m->nr]; | ||
620 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); | ||
621 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); | ||
622 | } | ||
623 | |||
624 | static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, | ||
625 | u64 guest_val, u64 host_val) | ||
626 | { | ||
627 | unsigned i; | ||
628 | struct msr_autoload *m = &vmx->msr_autoload; | ||
629 | |||
630 | for (i = 0; i < m->nr; ++i) | ||
631 | if (m->guest[i].index == msr) | ||
632 | break; | ||
633 | |||
634 | if (i == m->nr) { | ||
635 | ++m->nr; | ||
636 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); | ||
637 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); | ||
638 | } | ||
639 | |||
640 | m->guest[i].index = msr; | ||
641 | m->guest[i].value = guest_val; | ||
642 | m->host[i].index = msr; | ||
643 | m->host[i].value = host_val; | ||
644 | } | ||
645 | |||
598 | static void reload_tss(void) | 646 | static void reload_tss(void) |
599 | { | 647 | { |
600 | /* | 648 | /* |
601 | * VT restores TR but not its size. Useless. | 649 | * VT restores TR but not its size. Useless. |
602 | */ | 650 | */ |
603 | struct descriptor_table gdt; | 651 | struct desc_ptr gdt; |
604 | struct desc_struct *descs; | 652 | struct desc_struct *descs; |
605 | 653 | ||
606 | kvm_get_gdt(&gdt); | 654 | native_store_gdt(&gdt); |
607 | descs = (void *)gdt.base; | 655 | descs = (void *)gdt.address; |
608 | descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ | 656 | descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ |
609 | load_TR_desc(); | 657 | load_TR_desc(); |
610 | } | 658 | } |
@@ -631,9 +679,57 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) | |||
631 | guest_efer |= host_efer & ignore_bits; | 679 | guest_efer |= host_efer & ignore_bits; |
632 | vmx->guest_msrs[efer_offset].data = guest_efer; | 680 | vmx->guest_msrs[efer_offset].data = guest_efer; |
633 | vmx->guest_msrs[efer_offset].mask = ~ignore_bits; | 681 | vmx->guest_msrs[efer_offset].mask = ~ignore_bits; |
682 | |||
683 | clear_atomic_switch_msr(vmx, MSR_EFER); | ||
684 | /* On ept, can't emulate nx, and must switch nx atomically */ | ||
685 | if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) { | ||
686 | guest_efer = vmx->vcpu.arch.efer; | ||
687 | if (!(guest_efer & EFER_LMA)) | ||
688 | guest_efer &= ~EFER_LME; | ||
689 | add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer); | ||
690 | return false; | ||
691 | } | ||
692 | |||
634 | return true; | 693 | return true; |
635 | } | 694 | } |
636 | 695 | ||
696 | static unsigned long segment_base(u16 selector) | ||
697 | { | ||
698 | struct desc_ptr gdt; | ||
699 | struct desc_struct *d; | ||
700 | unsigned long table_base; | ||
701 | unsigned long v; | ||
702 | |||
703 | if (!(selector & ~3)) | ||
704 | return 0; | ||
705 | |||
706 | native_store_gdt(&gdt); | ||
707 | table_base = gdt.address; | ||
708 | |||
709 | if (selector & 4) { /* from ldt */ | ||
710 | u16 ldt_selector = kvm_read_ldt(); | ||
711 | |||
712 | if (!(ldt_selector & ~3)) | ||
713 | return 0; | ||
714 | |||
715 | table_base = segment_base(ldt_selector); | ||
716 | } | ||
717 | d = (struct desc_struct *)(table_base + (selector & ~7)); | ||
718 | v = get_desc_base(d); | ||
719 | #ifdef CONFIG_X86_64 | ||
720 | if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11)) | ||
721 | v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32; | ||
722 | #endif | ||
723 | return v; | ||
724 | } | ||
725 | |||
726 | static inline unsigned long kvm_read_tr_base(void) | ||
727 | { | ||
728 | u16 tr; | ||
729 | asm("str %0" : "=g"(tr)); | ||
730 | return segment_base(tr); | ||
731 | } | ||
732 | |||
637 | static void vmx_save_host_state(struct kvm_vcpu *vcpu) | 733 | static void vmx_save_host_state(struct kvm_vcpu *vcpu) |
638 | { | 734 | { |
639 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 735 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
@@ -758,7 +854,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
758 | } | 854 | } |
759 | 855 | ||
760 | if (vcpu->cpu != cpu) { | 856 | if (vcpu->cpu != cpu) { |
761 | struct descriptor_table dt; | 857 | struct desc_ptr dt; |
762 | unsigned long sysenter_esp; | 858 | unsigned long sysenter_esp; |
763 | 859 | ||
764 | vcpu->cpu = cpu; | 860 | vcpu->cpu = cpu; |
@@ -767,8 +863,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
767 | * processors. | 863 | * processors. |
768 | */ | 864 | */ |
769 | vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ | 865 | vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ |
770 | kvm_get_gdt(&dt); | 866 | native_store_gdt(&dt); |
771 | vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */ | 867 | vmcs_writel(HOST_GDTR_BASE, dt.address); /* 22.2.4 */ |
772 | 868 | ||
773 | rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); | 869 | rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); |
774 | vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ | 870 | vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ |
@@ -846,9 +942,9 @@ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) | |||
846 | int ret = 0; | 942 | int ret = 0; |
847 | 943 | ||
848 | if (interruptibility & GUEST_INTR_STATE_STI) | 944 | if (interruptibility & GUEST_INTR_STATE_STI) |
849 | ret |= X86_SHADOW_INT_STI; | 945 | ret |= KVM_X86_SHADOW_INT_STI; |
850 | if (interruptibility & GUEST_INTR_STATE_MOV_SS) | 946 | if (interruptibility & GUEST_INTR_STATE_MOV_SS) |
851 | ret |= X86_SHADOW_INT_MOV_SS; | 947 | ret |= KVM_X86_SHADOW_INT_MOV_SS; |
852 | 948 | ||
853 | return ret & mask; | 949 | return ret & mask; |
854 | } | 950 | } |
@@ -860,9 +956,9 @@ static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) | |||
860 | 956 | ||
861 | interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); | 957 | interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); |
862 | 958 | ||
863 | if (mask & X86_SHADOW_INT_MOV_SS) | 959 | if (mask & KVM_X86_SHADOW_INT_MOV_SS) |
864 | interruptibility |= GUEST_INTR_STATE_MOV_SS; | 960 | interruptibility |= GUEST_INTR_STATE_MOV_SS; |
865 | if (mask & X86_SHADOW_INT_STI) | 961 | else if (mask & KVM_X86_SHADOW_INT_STI) |
866 | interruptibility |= GUEST_INTR_STATE_STI; | 962 | interruptibility |= GUEST_INTR_STATE_STI; |
867 | 963 | ||
868 | if ((interruptibility != interruptibility_old)) | 964 | if ((interruptibility != interruptibility_old)) |
@@ -882,7 +978,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
882 | } | 978 | } |
883 | 979 | ||
884 | static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | 980 | static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, |
885 | bool has_error_code, u32 error_code) | 981 | bool has_error_code, u32 error_code, |
982 | bool reinject) | ||
886 | { | 983 | { |
887 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 984 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
888 | u32 intr_info = nr | INTR_INFO_VALID_MASK; | 985 | u32 intr_info = nr | INTR_INFO_VALID_MASK; |
@@ -1176,9 +1273,16 @@ static __init int vmx_disabled_by_bios(void) | |||
1176 | u64 msr; | 1273 | u64 msr; |
1177 | 1274 | ||
1178 | rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); | 1275 | rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); |
1179 | return (msr & (FEATURE_CONTROL_LOCKED | | 1276 | if (msr & FEATURE_CONTROL_LOCKED) { |
1180 | FEATURE_CONTROL_VMXON_ENABLED)) | 1277 | if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) |
1181 | == FEATURE_CONTROL_LOCKED; | 1278 | && tboot_enabled()) |
1279 | return 1; | ||
1280 | if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) | ||
1281 | && !tboot_enabled()) | ||
1282 | return 1; | ||
1283 | } | ||
1284 | |||
1285 | return 0; | ||
1182 | /* locked but not enabled */ | 1286 | /* locked but not enabled */ |
1183 | } | 1287 | } |
1184 | 1288 | ||
@@ -1186,21 +1290,23 @@ static int hardware_enable(void *garbage) | |||
1186 | { | 1290 | { |
1187 | int cpu = raw_smp_processor_id(); | 1291 | int cpu = raw_smp_processor_id(); |
1188 | u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); | 1292 | u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); |
1189 | u64 old; | 1293 | u64 old, test_bits; |
1190 | 1294 | ||
1191 | if (read_cr4() & X86_CR4_VMXE) | 1295 | if (read_cr4() & X86_CR4_VMXE) |
1192 | return -EBUSY; | 1296 | return -EBUSY; |
1193 | 1297 | ||
1194 | INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); | 1298 | INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); |
1195 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); | 1299 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); |
1196 | if ((old & (FEATURE_CONTROL_LOCKED | | 1300 | |
1197 | FEATURE_CONTROL_VMXON_ENABLED)) | 1301 | test_bits = FEATURE_CONTROL_LOCKED; |
1198 | != (FEATURE_CONTROL_LOCKED | | 1302 | test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; |
1199 | FEATURE_CONTROL_VMXON_ENABLED)) | 1303 | if (tboot_enabled()) |
1304 | test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX; | ||
1305 | |||
1306 | if ((old & test_bits) != test_bits) { | ||
1200 | /* enable and lock */ | 1307 | /* enable and lock */ |
1201 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | | 1308 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); |
1202 | FEATURE_CONTROL_LOCKED | | 1309 | } |
1203 | FEATURE_CONTROL_VMXON_ENABLED); | ||
1204 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ | 1310 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ |
1205 | asm volatile (ASM_VMX_VMXON_RAX | 1311 | asm volatile (ASM_VMX_VMXON_RAX |
1206 | : : "a"(&phys_addr), "m"(phys_addr) | 1312 | : : "a"(&phys_addr), "m"(phys_addr) |
@@ -1521,7 +1627,7 @@ static gva_t rmode_tss_base(struct kvm *kvm) | |||
1521 | struct kvm_memslots *slots; | 1627 | struct kvm_memslots *slots; |
1522 | gfn_t base_gfn; | 1628 | gfn_t base_gfn; |
1523 | 1629 | ||
1524 | slots = rcu_dereference(kvm->memslots); | 1630 | slots = kvm_memslots(kvm); |
1525 | base_gfn = kvm->memslots->memslots[0].base_gfn + | 1631 | base_gfn = kvm->memslots->memslots[0].base_gfn + |
1526 | kvm->memslots->memslots[0].npages - 3; | 1632 | kvm->memslots->memslots[0].npages - 3; |
1527 | return base_gfn << PAGE_SHIFT; | 1633 | return base_gfn << PAGE_SHIFT; |
@@ -1649,6 +1755,7 @@ static void exit_lmode(struct kvm_vcpu *vcpu) | |||
1649 | vmcs_write32(VM_ENTRY_CONTROLS, | 1755 | vmcs_write32(VM_ENTRY_CONTROLS, |
1650 | vmcs_read32(VM_ENTRY_CONTROLS) | 1756 | vmcs_read32(VM_ENTRY_CONTROLS) |
1651 | & ~VM_ENTRY_IA32E_MODE); | 1757 | & ~VM_ENTRY_IA32E_MODE); |
1758 | vmx_set_efer(vcpu, vcpu->arch.efer); | ||
1652 | } | 1759 | } |
1653 | 1760 | ||
1654 | #endif | 1761 | #endif |
@@ -1934,28 +2041,28 @@ static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) | |||
1934 | *l = (ar >> 13) & 1; | 2041 | *l = (ar >> 13) & 1; |
1935 | } | 2042 | } |
1936 | 2043 | ||
1937 | static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | 2044 | static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
1938 | { | 2045 | { |
1939 | dt->limit = vmcs_read32(GUEST_IDTR_LIMIT); | 2046 | dt->size = vmcs_read32(GUEST_IDTR_LIMIT); |
1940 | dt->base = vmcs_readl(GUEST_IDTR_BASE); | 2047 | dt->address = vmcs_readl(GUEST_IDTR_BASE); |
1941 | } | 2048 | } |
1942 | 2049 | ||
1943 | static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | 2050 | static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
1944 | { | 2051 | { |
1945 | vmcs_write32(GUEST_IDTR_LIMIT, dt->limit); | 2052 | vmcs_write32(GUEST_IDTR_LIMIT, dt->size); |
1946 | vmcs_writel(GUEST_IDTR_BASE, dt->base); | 2053 | vmcs_writel(GUEST_IDTR_BASE, dt->address); |
1947 | } | 2054 | } |
1948 | 2055 | ||
1949 | static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | 2056 | static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
1950 | { | 2057 | { |
1951 | dt->limit = vmcs_read32(GUEST_GDTR_LIMIT); | 2058 | dt->size = vmcs_read32(GUEST_GDTR_LIMIT); |
1952 | dt->base = vmcs_readl(GUEST_GDTR_BASE); | 2059 | dt->address = vmcs_readl(GUEST_GDTR_BASE); |
1953 | } | 2060 | } |
1954 | 2061 | ||
1955 | static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | 2062 | static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
1956 | { | 2063 | { |
1957 | vmcs_write32(GUEST_GDTR_LIMIT, dt->limit); | 2064 | vmcs_write32(GUEST_GDTR_LIMIT, dt->size); |
1958 | vmcs_writel(GUEST_GDTR_BASE, dt->base); | 2065 | vmcs_writel(GUEST_GDTR_BASE, dt->address); |
1959 | } | 2066 | } |
1960 | 2067 | ||
1961 | static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) | 2068 | static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) |
@@ -2296,6 +2403,16 @@ static void allocate_vpid(struct vcpu_vmx *vmx) | |||
2296 | spin_unlock(&vmx_vpid_lock); | 2403 | spin_unlock(&vmx_vpid_lock); |
2297 | } | 2404 | } |
2298 | 2405 | ||
2406 | static void free_vpid(struct vcpu_vmx *vmx) | ||
2407 | { | ||
2408 | if (!enable_vpid) | ||
2409 | return; | ||
2410 | spin_lock(&vmx_vpid_lock); | ||
2411 | if (vmx->vpid != 0) | ||
2412 | __clear_bit(vmx->vpid, vmx_vpid_bitmap); | ||
2413 | spin_unlock(&vmx_vpid_lock); | ||
2414 | } | ||
2415 | |||
2299 | static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr) | 2416 | static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr) |
2300 | { | 2417 | { |
2301 | int f = sizeof(unsigned long); | 2418 | int f = sizeof(unsigned long); |
@@ -2334,7 +2451,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
2334 | u32 junk; | 2451 | u32 junk; |
2335 | u64 host_pat, tsc_this, tsc_base; | 2452 | u64 host_pat, tsc_this, tsc_base; |
2336 | unsigned long a; | 2453 | unsigned long a; |
2337 | struct descriptor_table dt; | 2454 | struct desc_ptr dt; |
2338 | int i; | 2455 | int i; |
2339 | unsigned long kvm_vmx_return; | 2456 | unsigned long kvm_vmx_return; |
2340 | u32 exec_control; | 2457 | u32 exec_control; |
@@ -2415,14 +2532,16 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
2415 | 2532 | ||
2416 | vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ | 2533 | vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ |
2417 | 2534 | ||
2418 | kvm_get_idt(&dt); | 2535 | native_store_idt(&dt); |
2419 | vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ | 2536 | vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ |
2420 | 2537 | ||
2421 | asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); | 2538 | asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); |
2422 | vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ | 2539 | vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ |
2423 | vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); | 2540 | vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); |
2424 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); | 2541 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); |
2542 | vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); | ||
2425 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); | 2543 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); |
2544 | vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); | ||
2426 | 2545 | ||
2427 | rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk); | 2546 | rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk); |
2428 | vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs); | 2547 | vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs); |
@@ -2947,22 +3066,20 @@ static int handle_io(struct kvm_vcpu *vcpu) | |||
2947 | int size, in, string; | 3066 | int size, in, string; |
2948 | unsigned port; | 3067 | unsigned port; |
2949 | 3068 | ||
2950 | ++vcpu->stat.io_exits; | ||
2951 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 3069 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
2952 | string = (exit_qualification & 16) != 0; | 3070 | string = (exit_qualification & 16) != 0; |
3071 | in = (exit_qualification & 8) != 0; | ||
2953 | 3072 | ||
2954 | if (string) { | 3073 | ++vcpu->stat.io_exits; |
2955 | if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO) | ||
2956 | return 0; | ||
2957 | return 1; | ||
2958 | } | ||
2959 | 3074 | ||
2960 | size = (exit_qualification & 7) + 1; | 3075 | if (string || in) |
2961 | in = (exit_qualification & 8) != 0; | 3076 | return !(emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO); |
2962 | port = exit_qualification >> 16; | ||
2963 | 3077 | ||
3078 | port = exit_qualification >> 16; | ||
3079 | size = (exit_qualification & 7) + 1; | ||
2964 | skip_emulated_instruction(vcpu); | 3080 | skip_emulated_instruction(vcpu); |
2965 | return kvm_emulate_pio(vcpu, in, size, port); | 3081 | |
3082 | return kvm_fast_pio_out(vcpu, size, port); | ||
2966 | } | 3083 | } |
2967 | 3084 | ||
2968 | static void | 3085 | static void |
@@ -3053,19 +3170,9 @@ static int handle_cr(struct kvm_vcpu *vcpu) | |||
3053 | return 0; | 3170 | return 0; |
3054 | } | 3171 | } |
3055 | 3172 | ||
3056 | static int check_dr_alias(struct kvm_vcpu *vcpu) | ||
3057 | { | ||
3058 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) { | ||
3059 | kvm_queue_exception(vcpu, UD_VECTOR); | ||
3060 | return -1; | ||
3061 | } | ||
3062 | return 0; | ||
3063 | } | ||
3064 | |||
3065 | static int handle_dr(struct kvm_vcpu *vcpu) | 3173 | static int handle_dr(struct kvm_vcpu *vcpu) |
3066 | { | 3174 | { |
3067 | unsigned long exit_qualification; | 3175 | unsigned long exit_qualification; |
3068 | unsigned long val; | ||
3069 | int dr, reg; | 3176 | int dr, reg; |
3070 | 3177 | ||
3071 | /* Do not handle if the CPL > 0, will trigger GP on re-entry */ | 3178 | /* Do not handle if the CPL > 0, will trigger GP on re-entry */ |
@@ -3100,67 +3207,20 @@ static int handle_dr(struct kvm_vcpu *vcpu) | |||
3100 | dr = exit_qualification & DEBUG_REG_ACCESS_NUM; | 3207 | dr = exit_qualification & DEBUG_REG_ACCESS_NUM; |
3101 | reg = DEBUG_REG_ACCESS_REG(exit_qualification); | 3208 | reg = DEBUG_REG_ACCESS_REG(exit_qualification); |
3102 | if (exit_qualification & TYPE_MOV_FROM_DR) { | 3209 | if (exit_qualification & TYPE_MOV_FROM_DR) { |
3103 | switch (dr) { | 3210 | unsigned long val; |
3104 | case 0 ... 3: | 3211 | if (!kvm_get_dr(vcpu, dr, &val)) |
3105 | val = vcpu->arch.db[dr]; | 3212 | kvm_register_write(vcpu, reg, val); |
3106 | break; | 3213 | } else |
3107 | case 4: | 3214 | kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]); |
3108 | if (check_dr_alias(vcpu) < 0) | ||
3109 | return 1; | ||
3110 | /* fall through */ | ||
3111 | case 6: | ||
3112 | val = vcpu->arch.dr6; | ||
3113 | break; | ||
3114 | case 5: | ||
3115 | if (check_dr_alias(vcpu) < 0) | ||
3116 | return 1; | ||
3117 | /* fall through */ | ||
3118 | default: /* 7 */ | ||
3119 | val = vcpu->arch.dr7; | ||
3120 | break; | ||
3121 | } | ||
3122 | kvm_register_write(vcpu, reg, val); | ||
3123 | } else { | ||
3124 | val = vcpu->arch.regs[reg]; | ||
3125 | switch (dr) { | ||
3126 | case 0 ... 3: | ||
3127 | vcpu->arch.db[dr] = val; | ||
3128 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) | ||
3129 | vcpu->arch.eff_db[dr] = val; | ||
3130 | break; | ||
3131 | case 4: | ||
3132 | if (check_dr_alias(vcpu) < 0) | ||
3133 | return 1; | ||
3134 | /* fall through */ | ||
3135 | case 6: | ||
3136 | if (val & 0xffffffff00000000ULL) { | ||
3137 | kvm_inject_gp(vcpu, 0); | ||
3138 | return 1; | ||
3139 | } | ||
3140 | vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; | ||
3141 | break; | ||
3142 | case 5: | ||
3143 | if (check_dr_alias(vcpu) < 0) | ||
3144 | return 1; | ||
3145 | /* fall through */ | ||
3146 | default: /* 7 */ | ||
3147 | if (val & 0xffffffff00000000ULL) { | ||
3148 | kvm_inject_gp(vcpu, 0); | ||
3149 | return 1; | ||
3150 | } | ||
3151 | vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; | ||
3152 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { | ||
3153 | vmcs_writel(GUEST_DR7, vcpu->arch.dr7); | ||
3154 | vcpu->arch.switch_db_regs = | ||
3155 | (val & DR7_BP_EN_MASK); | ||
3156 | } | ||
3157 | break; | ||
3158 | } | ||
3159 | } | ||
3160 | skip_emulated_instruction(vcpu); | 3215 | skip_emulated_instruction(vcpu); |
3161 | return 1; | 3216 | return 1; |
3162 | } | 3217 | } |
3163 | 3218 | ||
3219 | static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) | ||
3220 | { | ||
3221 | vmcs_writel(GUEST_DR7, val); | ||
3222 | } | ||
3223 | |||
3164 | static int handle_cpuid(struct kvm_vcpu *vcpu) | 3224 | static int handle_cpuid(struct kvm_vcpu *vcpu) |
3165 | { | 3225 | { |
3166 | kvm_emulate_cpuid(vcpu); | 3226 | kvm_emulate_cpuid(vcpu); |
@@ -3292,6 +3352,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu) | |||
3292 | { | 3352 | { |
3293 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3353 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3294 | unsigned long exit_qualification; | 3354 | unsigned long exit_qualification; |
3355 | bool has_error_code = false; | ||
3356 | u32 error_code = 0; | ||
3295 | u16 tss_selector; | 3357 | u16 tss_selector; |
3296 | int reason, type, idt_v; | 3358 | int reason, type, idt_v; |
3297 | 3359 | ||
@@ -3314,6 +3376,13 @@ static int handle_task_switch(struct kvm_vcpu *vcpu) | |||
3314 | kvm_clear_interrupt_queue(vcpu); | 3376 | kvm_clear_interrupt_queue(vcpu); |
3315 | break; | 3377 | break; |
3316 | case INTR_TYPE_HARD_EXCEPTION: | 3378 | case INTR_TYPE_HARD_EXCEPTION: |
3379 | if (vmx->idt_vectoring_info & | ||
3380 | VECTORING_INFO_DELIVER_CODE_MASK) { | ||
3381 | has_error_code = true; | ||
3382 | error_code = | ||
3383 | vmcs_read32(IDT_VECTORING_ERROR_CODE); | ||
3384 | } | ||
3385 | /* fall through */ | ||
3317 | case INTR_TYPE_SOFT_EXCEPTION: | 3386 | case INTR_TYPE_SOFT_EXCEPTION: |
3318 | kvm_clear_exception_queue(vcpu); | 3387 | kvm_clear_exception_queue(vcpu); |
3319 | break; | 3388 | break; |
@@ -3328,8 +3397,13 @@ static int handle_task_switch(struct kvm_vcpu *vcpu) | |||
3328 | type != INTR_TYPE_NMI_INTR)) | 3397 | type != INTR_TYPE_NMI_INTR)) |
3329 | skip_emulated_instruction(vcpu); | 3398 | skip_emulated_instruction(vcpu); |
3330 | 3399 | ||
3331 | if (!kvm_task_switch(vcpu, tss_selector, reason)) | 3400 | if (kvm_task_switch(vcpu, tss_selector, reason, |
3401 | has_error_code, error_code) == EMULATE_FAIL) { | ||
3402 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
3403 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; | ||
3404 | vcpu->run->internal.ndata = 0; | ||
3332 | return 0; | 3405 | return 0; |
3406 | } | ||
3333 | 3407 | ||
3334 | /* clear all local breakpoint enable flags */ | 3408 | /* clear all local breakpoint enable flags */ |
3335 | vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55); | 3409 | vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55); |
@@ -3574,7 +3648,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) | |||
3574 | u32 exit_reason = vmx->exit_reason; | 3648 | u32 exit_reason = vmx->exit_reason; |
3575 | u32 vectoring_info = vmx->idt_vectoring_info; | 3649 | u32 vectoring_info = vmx->idt_vectoring_info; |
3576 | 3650 | ||
3577 | trace_kvm_exit(exit_reason, kvm_rip_read(vcpu)); | 3651 | trace_kvm_exit(exit_reason, vcpu); |
3578 | 3652 | ||
3579 | /* If guest state is invalid, start emulating */ | 3653 | /* If guest state is invalid, start emulating */ |
3580 | if (vmx->emulation_required && emulate_invalid_guest_state) | 3654 | if (vmx->emulation_required && emulate_invalid_guest_state) |
@@ -3923,10 +3997,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) | |||
3923 | { | 3997 | { |
3924 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3998 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3925 | 3999 | ||
3926 | spin_lock(&vmx_vpid_lock); | 4000 | free_vpid(vmx); |
3927 | if (vmx->vpid != 0) | ||
3928 | __clear_bit(vmx->vpid, vmx_vpid_bitmap); | ||
3929 | spin_unlock(&vmx_vpid_lock); | ||
3930 | vmx_free_vmcs(vcpu); | 4001 | vmx_free_vmcs(vcpu); |
3931 | kfree(vmx->guest_msrs); | 4002 | kfree(vmx->guest_msrs); |
3932 | kvm_vcpu_uninit(vcpu); | 4003 | kvm_vcpu_uninit(vcpu); |
@@ -3988,6 +4059,7 @@ free_msrs: | |||
3988 | uninit_vcpu: | 4059 | uninit_vcpu: |
3989 | kvm_vcpu_uninit(&vmx->vcpu); | 4060 | kvm_vcpu_uninit(&vmx->vcpu); |
3990 | free_vcpu: | 4061 | free_vcpu: |
4062 | free_vpid(vmx); | ||
3991 | kmem_cache_free(kvm_vcpu_cache, vmx); | 4063 | kmem_cache_free(kvm_vcpu_cache, vmx); |
3992 | return ERR_PTR(err); | 4064 | return ERR_PTR(err); |
3993 | } | 4065 | } |
@@ -4118,6 +4190,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) | |||
4118 | } | 4190 | } |
4119 | } | 4191 | } |
4120 | 4192 | ||
4193 | static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) | ||
4194 | { | ||
4195 | } | ||
4196 | |||
4121 | static struct kvm_x86_ops vmx_x86_ops = { | 4197 | static struct kvm_x86_ops vmx_x86_ops = { |
4122 | .cpu_has_kvm_support = cpu_has_kvm_support, | 4198 | .cpu_has_kvm_support = cpu_has_kvm_support, |
4123 | .disabled_by_bios = vmx_disabled_by_bios, | 4199 | .disabled_by_bios = vmx_disabled_by_bios, |
@@ -4154,6 +4230,7 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
4154 | .set_idt = vmx_set_idt, | 4230 | .set_idt = vmx_set_idt, |
4155 | .get_gdt = vmx_get_gdt, | 4231 | .get_gdt = vmx_get_gdt, |
4156 | .set_gdt = vmx_set_gdt, | 4232 | .set_gdt = vmx_set_gdt, |
4233 | .set_dr7 = vmx_set_dr7, | ||
4157 | .cache_reg = vmx_cache_reg, | 4234 | .cache_reg = vmx_cache_reg, |
4158 | .get_rflags = vmx_get_rflags, | 4235 | .get_rflags = vmx_get_rflags, |
4159 | .set_rflags = vmx_set_rflags, | 4236 | .set_rflags = vmx_set_rflags, |
@@ -4189,6 +4266,8 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
4189 | .cpuid_update = vmx_cpuid_update, | 4266 | .cpuid_update = vmx_cpuid_update, |
4190 | 4267 | ||
4191 | .rdtscp_supported = vmx_rdtscp_supported, | 4268 | .rdtscp_supported = vmx_rdtscp_supported, |
4269 | |||
4270 | .set_supported_cpuid = vmx_set_supported_cpuid, | ||
4192 | }; | 4271 | }; |
4193 | 4272 | ||
4194 | static int __init vmx_init(void) | 4273 | static int __init vmx_init(void) |
@@ -4236,7 +4315,8 @@ static int __init vmx_init(void) | |||
4236 | 4315 | ||
4237 | set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ | 4316 | set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ |
4238 | 4317 | ||
4239 | r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE); | 4318 | r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), |
4319 | __alignof__(struct vcpu_vmx), THIS_MODULE); | ||
4240 | if (r) | 4320 | if (r) |
4241 | goto out3; | 4321 | goto out3; |
4242 | 4322 | ||