diff options
Diffstat (limited to 'drivers/kvm/vmx.c')
-rw-r--r-- | drivers/kvm/vmx.c | 67 |
1 files changed, 46 insertions, 21 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index dc99191dbb4a..93e5bb2c40e3 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -42,6 +42,7 @@ static struct page *vmx_io_bitmap_b; | |||
42 | #else | 42 | #else |
43 | #define HOST_IS_64 0 | 43 | #define HOST_IS_64 0 |
44 | #endif | 44 | #endif |
45 | #define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE) | ||
45 | 46 | ||
46 | static struct vmcs_descriptor { | 47 | static struct vmcs_descriptor { |
47 | int size; | 48 | int size; |
@@ -85,6 +86,18 @@ static const u32 vmx_msr_index[] = { | |||
85 | }; | 86 | }; |
86 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) | 87 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) |
87 | 88 | ||
89 | static inline u64 msr_efer_save_restore_bits(struct vmx_msr_entry msr) | ||
90 | { | ||
91 | return (u64)msr.data & EFER_SAVE_RESTORE_BITS; | ||
92 | } | ||
93 | |||
94 | static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu) | ||
95 | { | ||
96 | int efer_offset = vcpu->msr_offset_efer; | ||
97 | return msr_efer_save_restore_bits(vcpu->host_msrs[efer_offset]) != | ||
98 | msr_efer_save_restore_bits(vcpu->guest_msrs[efer_offset]); | ||
99 | } | ||
100 | |||
88 | static inline int is_page_fault(u32 intr_info) | 101 | static inline int is_page_fault(u32 intr_info) |
89 | { | 102 | { |
90 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | 103 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
@@ -265,6 +278,19 @@ static void reload_tss(void) | |||
265 | #endif | 278 | #endif |
266 | } | 279 | } |
267 | 280 | ||
281 | static void load_transition_efer(struct kvm_vcpu *vcpu) | ||
282 | { | ||
283 | u64 trans_efer; | ||
284 | int efer_offset = vcpu->msr_offset_efer; | ||
285 | |||
286 | trans_efer = vcpu->host_msrs[efer_offset].data; | ||
287 | trans_efer &= ~EFER_SAVE_RESTORE_BITS; | ||
288 | trans_efer |= msr_efer_save_restore_bits( | ||
289 | vcpu->guest_msrs[efer_offset]); | ||
290 | wrmsrl(MSR_EFER, trans_efer); | ||
291 | vcpu->stat.efer_reload++; | ||
292 | } | ||
293 | |||
268 | static void vmx_save_host_state(struct kvm_vcpu *vcpu) | 294 | static void vmx_save_host_state(struct kvm_vcpu *vcpu) |
269 | { | 295 | { |
270 | struct vmx_host_state *hs = &vcpu->vmx_host_state; | 296 | struct vmx_host_state *hs = &vcpu->vmx_host_state; |
@@ -308,6 +334,8 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
308 | } | 334 | } |
309 | #endif | 335 | #endif |
310 | load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); | 336 | load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); |
337 | if (msr_efer_need_save_restore(vcpu)) | ||
338 | load_transition_efer(vcpu); | ||
311 | } | 339 | } |
312 | 340 | ||
313 | static void vmx_load_host_state(struct kvm_vcpu *vcpu) | 341 | static void vmx_load_host_state(struct kvm_vcpu *vcpu) |
@@ -336,6 +364,8 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu) | |||
336 | } | 364 | } |
337 | save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); | 365 | save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); |
338 | load_msrs(vcpu->host_msrs, vcpu->save_nmsrs); | 366 | load_msrs(vcpu->host_msrs, vcpu->save_nmsrs); |
367 | if (msr_efer_need_save_restore(vcpu)) | ||
368 | load_msrs(vcpu->host_msrs + vcpu->msr_offset_efer, 1); | ||
339 | } | 369 | } |
340 | 370 | ||
341 | /* | 371 | /* |
@@ -477,11 +507,13 @@ void move_msr_up(struct kvm_vcpu *vcpu, int from, int to) | |||
477 | */ | 507 | */ |
478 | static void setup_msrs(struct kvm_vcpu *vcpu) | 508 | static void setup_msrs(struct kvm_vcpu *vcpu) |
479 | { | 509 | { |
480 | int index, save_nmsrs; | 510 | int save_nmsrs; |
481 | 511 | ||
482 | save_nmsrs = 0; | 512 | save_nmsrs = 0; |
483 | #ifdef CONFIG_X86_64 | 513 | #ifdef CONFIG_X86_64 |
484 | if (is_long_mode(vcpu)) { | 514 | if (is_long_mode(vcpu)) { |
515 | int index; | ||
516 | |||
485 | index = __find_msr_index(vcpu, MSR_SYSCALL_MASK); | 517 | index = __find_msr_index(vcpu, MSR_SYSCALL_MASK); |
486 | if (index >= 0) | 518 | if (index >= 0) |
487 | move_msr_up(vcpu, index, save_nmsrs++); | 519 | move_msr_up(vcpu, index, save_nmsrs++); |
@@ -509,22 +541,7 @@ static void setup_msrs(struct kvm_vcpu *vcpu) | |||
509 | vcpu->msr_offset_kernel_gs_base = | 541 | vcpu->msr_offset_kernel_gs_base = |
510 | __find_msr_index(vcpu, MSR_KERNEL_GS_BASE); | 542 | __find_msr_index(vcpu, MSR_KERNEL_GS_BASE); |
511 | #endif | 543 | #endif |
512 | index = __find_msr_index(vcpu, MSR_EFER); | 544 | vcpu->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER); |
513 | if (index >= 0) | ||
514 | save_nmsrs = 1; | ||
515 | else { | ||
516 | save_nmsrs = 0; | ||
517 | index = 0; | ||
518 | } | ||
519 | vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR, | ||
520 | virt_to_phys(vcpu->guest_msrs + index)); | ||
521 | vmcs_writel(VM_EXIT_MSR_STORE_ADDR, | ||
522 | virt_to_phys(vcpu->guest_msrs + index)); | ||
523 | vmcs_writel(VM_EXIT_MSR_LOAD_ADDR, | ||
524 | virt_to_phys(vcpu->host_msrs + index)); | ||
525 | vmcs_write32(VM_EXIT_MSR_STORE_COUNT, save_nmsrs); | ||
526 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, save_nmsrs); | ||
527 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, save_nmsrs); | ||
528 | } | 545 | } |
529 | 546 | ||
530 | /* | 547 | /* |
@@ -611,10 +628,15 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
611 | static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | 628 | static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) |
612 | { | 629 | { |
613 | struct vmx_msr_entry *msr; | 630 | struct vmx_msr_entry *msr; |
631 | int ret = 0; | ||
632 | |||
614 | switch (msr_index) { | 633 | switch (msr_index) { |
615 | #ifdef CONFIG_X86_64 | 634 | #ifdef CONFIG_X86_64 |
616 | case MSR_EFER: | 635 | case MSR_EFER: |
617 | return kvm_set_msr_common(vcpu, msr_index, data); | 636 | ret = kvm_set_msr_common(vcpu, msr_index, data); |
637 | if (vcpu->vmx_host_state.loaded) | ||
638 | load_transition_efer(vcpu); | ||
639 | break; | ||
618 | case MSR_FS_BASE: | 640 | case MSR_FS_BASE: |
619 | vmcs_writel(GUEST_FS_BASE, data); | 641 | vmcs_writel(GUEST_FS_BASE, data); |
620 | break; | 642 | break; |
@@ -639,13 +661,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
639 | if (msr) { | 661 | if (msr) { |
640 | msr->data = data; | 662 | msr->data = data; |
641 | if (vcpu->vmx_host_state.loaded) | 663 | if (vcpu->vmx_host_state.loaded) |
642 | load_msrs(vcpu->guest_msrs,vcpu->save_nmsrs); | 664 | load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); |
643 | break; | 665 | break; |
644 | } | 666 | } |
645 | return kvm_set_msr_common(vcpu, msr_index, data); | 667 | ret = kvm_set_msr_common(vcpu, msr_index, data); |
646 | } | 668 | } |
647 | 669 | ||
648 | return 0; | 670 | return ret; |
649 | } | 671 | } |
650 | 672 | ||
651 | /* | 673 | /* |
@@ -1326,6 +1348,9 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1326 | 1348 | ||
1327 | asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); | 1349 | asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); |
1328 | vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ | 1350 | vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ |
1351 | vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); | ||
1352 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); | ||
1353 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); | ||
1329 | 1354 | ||
1330 | rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk); | 1355 | rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk); |
1331 | vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs); | 1356 | vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs); |