diff options
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/kvm.h | 4 | ||||
-rw-r--r-- | drivers/kvm/vmx.c | 128 |
2 files changed, 76 insertions, 56 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index fc4a6c1235f0..c252efed49d9 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -288,6 +288,10 @@ struct kvm_vcpu { | |||
288 | u64 apic_base; | 288 | u64 apic_base; |
289 | u64 ia32_misc_enable_msr; | 289 | u64 ia32_misc_enable_msr; |
290 | int nmsrs; | 290 | int nmsrs; |
291 | int save_nmsrs; | ||
292 | #ifdef CONFIG_X86_64 | ||
293 | int msr_offset_kernel_gs_base; | ||
294 | #endif | ||
291 | struct vmx_msr_entry *guest_msrs; | 295 | struct vmx_msr_entry *guest_msrs; |
292 | struct vmx_msr_entry *host_msrs; | 296 | struct vmx_msr_entry *host_msrs; |
293 | 297 | ||
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index a05bfa085877..872ca0381fbe 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -85,19 +85,6 @@ static const u32 vmx_msr_index[] = { | |||
85 | }; | 85 | }; |
86 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) | 86 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) |
87 | 87 | ||
88 | #ifdef CONFIG_X86_64 | ||
89 | static unsigned msr_offset_kernel_gs_base; | ||
90 | #define NR_64BIT_MSRS 4 | ||
91 | /* | ||
92 | * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt | ||
93 | * mechanism (cpu bug AA24) | ||
94 | */ | ||
95 | #define NR_BAD_MSRS 2 | ||
96 | #else | ||
97 | #define NR_64BIT_MSRS 0 | ||
98 | #define NR_BAD_MSRS 0 | ||
99 | #endif | ||
100 | |||
101 | static inline int is_page_fault(u32 intr_info) | 88 | static inline int is_page_fault(u32 intr_info) |
102 | { | 89 | { |
103 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | 90 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
@@ -118,13 +105,23 @@ static inline int is_external_interrupt(u32 intr_info) | |||
118 | == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); | 105 | == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); |
119 | } | 106 | } |
120 | 107 | ||
121 | static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) | 108 | static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr) |
122 | { | 109 | { |
123 | int i; | 110 | int i; |
124 | 111 | ||
125 | for (i = 0; i < vcpu->nmsrs; ++i) | 112 | for (i = 0; i < vcpu->nmsrs; ++i) |
126 | if (vcpu->guest_msrs[i].index == msr) | 113 | if (vcpu->guest_msrs[i].index == msr) |
127 | return &vcpu->guest_msrs[i]; | 114 | return i; |
115 | return -1; | ||
116 | } | ||
117 | |||
118 | static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) | ||
119 | { | ||
120 | int i; | ||
121 | |||
122 | i = __find_msr_index(vcpu, msr); | ||
123 | if (i >= 0) | ||
124 | return &vcpu->guest_msrs[i]; | ||
128 | return NULL; | 125 | return NULL; |
129 | } | 126 | } |
130 | 127 | ||
@@ -307,10 +304,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
307 | 304 | ||
308 | #ifdef CONFIG_X86_64 | 305 | #ifdef CONFIG_X86_64 |
309 | if (is_long_mode(vcpu)) { | 306 | if (is_long_mode(vcpu)) { |
310 | save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base, 1); | 307 | save_msrs(vcpu->host_msrs + vcpu->msr_offset_kernel_gs_base, 1); |
311 | load_msrs(vcpu->guest_msrs, NR_BAD_MSRS); | ||
312 | } | 308 | } |
313 | #endif | 309 | #endif |
310 | load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); | ||
314 | } | 311 | } |
315 | 312 | ||
316 | static void vmx_load_host_state(struct kvm_vcpu *vcpu) | 313 | static void vmx_load_host_state(struct kvm_vcpu *vcpu) |
@@ -337,12 +334,8 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu) | |||
337 | 334 | ||
338 | reload_tss(); | 335 | reload_tss(); |
339 | } | 336 | } |
340 | #ifdef CONFIG_X86_64 | 337 | save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); |
341 | if (is_long_mode(vcpu)) { | 338 | load_msrs(vcpu->host_msrs, vcpu->save_nmsrs); |
342 | save_msrs(vcpu->guest_msrs, NR_BAD_MSRS); | ||
343 | load_msrs(vcpu->host_msrs, NR_BAD_MSRS); | ||
344 | } | ||
345 | #endif | ||
346 | } | 339 | } |
347 | 340 | ||
348 | /* | 341 | /* |
@@ -464,41 +457,74 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) | |||
464 | } | 457 | } |
465 | 458 | ||
466 | /* | 459 | /* |
460 | * Swap MSR entry in host/guest MSR entry array. | ||
461 | */ | ||
462 | void move_msr_up(struct kvm_vcpu *vcpu, int from, int to) | ||
463 | { | ||
464 | struct vmx_msr_entry tmp; | ||
465 | tmp = vcpu->guest_msrs[to]; | ||
466 | vcpu->guest_msrs[to] = vcpu->guest_msrs[from]; | ||
467 | vcpu->guest_msrs[from] = tmp; | ||
468 | tmp = vcpu->host_msrs[to]; | ||
469 | vcpu->host_msrs[to] = vcpu->host_msrs[from]; | ||
470 | vcpu->host_msrs[from] = tmp; | ||
471 | } | ||
472 | |||
473 | /* | ||
467 | * Set up the vmcs to automatically save and restore system | 474 | * Set up the vmcs to automatically save and restore system |
468 | * msrs. Don't touch the 64-bit msrs if the guest is in legacy | 475 | * msrs. Don't touch the 64-bit msrs if the guest is in legacy |
469 | * mode, as fiddling with msrs is very expensive. | 476 | * mode, as fiddling with msrs is very expensive. |
470 | */ | 477 | */ |
471 | static void setup_msrs(struct kvm_vcpu *vcpu) | 478 | static void setup_msrs(struct kvm_vcpu *vcpu) |
472 | { | 479 | { |
473 | int nr_skip, nr_good_msrs; | 480 | int index, save_nmsrs; |
474 | 481 | ||
475 | if (is_long_mode(vcpu)) | 482 | save_nmsrs = 0; |
476 | nr_skip = NR_BAD_MSRS; | 483 | #ifdef CONFIG_X86_64 |
477 | else | 484 | if (is_long_mode(vcpu)) { |
478 | nr_skip = NR_64BIT_MSRS; | 485 | index = __find_msr_index(vcpu, MSR_SYSCALL_MASK); |
479 | nr_good_msrs = vcpu->nmsrs - nr_skip; | 486 | if (index >= 0) |
487 | move_msr_up(vcpu, index, save_nmsrs++); | ||
488 | index = __find_msr_index(vcpu, MSR_LSTAR); | ||
489 | if (index >= 0) | ||
490 | move_msr_up(vcpu, index, save_nmsrs++); | ||
491 | index = __find_msr_index(vcpu, MSR_CSTAR); | ||
492 | if (index >= 0) | ||
493 | move_msr_up(vcpu, index, save_nmsrs++); | ||
494 | index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE); | ||
495 | if (index >= 0) | ||
496 | move_msr_up(vcpu, index, save_nmsrs++); | ||
497 | /* | ||
498 | * MSR_K6_STAR is only needed on long mode guests, and only | ||
499 | * if efer.sce is enabled. | ||
500 | */ | ||
501 | index = __find_msr_index(vcpu, MSR_K6_STAR); | ||
502 | if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE)) | ||
503 | move_msr_up(vcpu, index, save_nmsrs++); | ||
504 | } | ||
505 | #endif | ||
506 | vcpu->save_nmsrs = save_nmsrs; | ||
480 | 507 | ||
481 | /* | ||
482 | * MSR_K6_STAR is only needed on long mode guests, and only | ||
483 | * if efer.sce is enabled. | ||
484 | */ | ||
485 | if (find_msr_entry(vcpu, MSR_K6_STAR)) { | ||
486 | --nr_good_msrs; | ||
487 | #ifdef CONFIG_X86_64 | 508 | #ifdef CONFIG_X86_64 |
488 | if (is_long_mode(vcpu) && (vcpu->shadow_efer & EFER_SCE)) | 509 | vcpu->msr_offset_kernel_gs_base = |
489 | ++nr_good_msrs; | 510 | __find_msr_index(vcpu, MSR_KERNEL_GS_BASE); |
490 | #endif | 511 | #endif |
512 | index = __find_msr_index(vcpu, MSR_EFER); | ||
513 | if (index >= 0) | ||
514 | save_nmsrs = 1; | ||
515 | else { | ||
516 | save_nmsrs = 0; | ||
517 | index = 0; | ||
491 | } | 518 | } |
492 | |||
493 | vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR, | 519 | vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR, |
494 | virt_to_phys(vcpu->guest_msrs + nr_skip)); | 520 | virt_to_phys(vcpu->guest_msrs + index)); |
495 | vmcs_writel(VM_EXIT_MSR_STORE_ADDR, | 521 | vmcs_writel(VM_EXIT_MSR_STORE_ADDR, |
496 | virt_to_phys(vcpu->guest_msrs + nr_skip)); | 522 | virt_to_phys(vcpu->guest_msrs + index)); |
497 | vmcs_writel(VM_EXIT_MSR_LOAD_ADDR, | 523 | vmcs_writel(VM_EXIT_MSR_LOAD_ADDR, |
498 | virt_to_phys(vcpu->host_msrs + nr_skip)); | 524 | virt_to_phys(vcpu->host_msrs + index)); |
499 | vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */ | 525 | vmcs_write32(VM_EXIT_MSR_STORE_COUNT, save_nmsrs); |
500 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */ | 526 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, save_nmsrs); |
501 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */ | 527 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, save_nmsrs); |
502 | } | 528 | } |
503 | 529 | ||
504 | /* | 530 | /* |
@@ -595,14 +621,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
595 | case MSR_GS_BASE: | 621 | case MSR_GS_BASE: |
596 | vmcs_writel(GUEST_GS_BASE, data); | 622 | vmcs_writel(GUEST_GS_BASE, data); |
597 | break; | 623 | break; |
598 | case MSR_LSTAR: | ||
599 | case MSR_SYSCALL_MASK: | ||
600 | msr = find_msr_entry(vcpu, msr_index); | ||
601 | if (msr) | ||
602 | msr->data = data; | ||
603 | if (vcpu->vmx_host_state.loaded) | ||
604 | load_msrs(vcpu->guest_msrs, NR_BAD_MSRS); | ||
605 | break; | ||
606 | #endif | 624 | #endif |
607 | case MSR_IA32_SYSENTER_CS: | 625 | case MSR_IA32_SYSENTER_CS: |
608 | vmcs_write32(GUEST_SYSENTER_CS, data); | 626 | vmcs_write32(GUEST_SYSENTER_CS, data); |
@@ -620,6 +638,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
620 | msr = find_msr_entry(vcpu, msr_index); | 638 | msr = find_msr_entry(vcpu, msr_index); |
621 | if (msr) { | 639 | if (msr) { |
622 | msr->data = data; | 640 | msr->data = data; |
641 | if (vcpu->vmx_host_state.loaded) | ||
642 | load_msrs(vcpu->guest_msrs,vcpu->save_nmsrs); | ||
623 | break; | 643 | break; |
624 | } | 644 | } |
625 | return kvm_set_msr_common(vcpu, msr_index, data); | 645 | return kvm_set_msr_common(vcpu, msr_index, data); |
@@ -1331,10 +1351,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1331 | vcpu->host_msrs[j].reserved = 0; | 1351 | vcpu->host_msrs[j].reserved = 0; |
1332 | vcpu->host_msrs[j].data = data; | 1352 | vcpu->host_msrs[j].data = data; |
1333 | vcpu->guest_msrs[j] = vcpu->host_msrs[j]; | 1353 | vcpu->guest_msrs[j] = vcpu->host_msrs[j]; |
1334 | #ifdef CONFIG_X86_64 | ||
1335 | if (index == MSR_KERNEL_GS_BASE) | ||
1336 | msr_offset_kernel_gs_base = j; | ||
1337 | #endif | ||
1338 | ++vcpu->nmsrs; | 1354 | ++vcpu->nmsrs; |
1339 | } | 1355 | } |
1340 | 1356 | ||