aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2013-11-25 08:37:13 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2013-12-12 04:49:49 -0500
commit2961e8764faad212234e93907a370a7c36a67da5 (patch)
tree468ce4c80d9aacc4ac67c10d6f04c625fe8a6c78 /arch/x86/kvm/vmx.c
parent8494bd0e86271fb21581d27e3c5d6a369b5208ca (diff)
KVM: VMX: shadow VM_(ENTRY|EXIT)_CONTROLS vmcs field
VM_(ENTRY|EXIT)_CONTROLS vmcs fields are read/written on each guest entry but most times it can be avoided since values do not changes. Keep fields copy in memory to avoid unnecessary reads from vmcs. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c112
1 files changed, 85 insertions, 27 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b2fe1c252f35..1024689ac717 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -418,6 +418,8 @@ struct vcpu_vmx {
418 u64 msr_host_kernel_gs_base; 418 u64 msr_host_kernel_gs_base;
419 u64 msr_guest_kernel_gs_base; 419 u64 msr_guest_kernel_gs_base;
420#endif 420#endif
421 u32 vm_entry_controls_shadow;
422 u32 vm_exit_controls_shadow;
421 /* 423 /*
422 * loaded_vmcs points to the VMCS currently used in this vcpu. For a 424 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
423 * non-nested (L1) guest, it always points to vmcs01. For a nested 425 * non-nested (L1) guest, it always points to vmcs01. For a nested
@@ -1326,6 +1328,62 @@ static void vmcs_set_bits(unsigned long field, u32 mask)
1326 vmcs_writel(field, vmcs_readl(field) | mask); 1328 vmcs_writel(field, vmcs_readl(field) | mask);
1327} 1329}
1328 1330
1331static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
1332{
1333 vmcs_write32(VM_ENTRY_CONTROLS, val);
1334 vmx->vm_entry_controls_shadow = val;
1335}
1336
1337static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
1338{
1339 if (vmx->vm_entry_controls_shadow != val)
1340 vm_entry_controls_init(vmx, val);
1341}
1342
1343static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
1344{
1345 return vmx->vm_entry_controls_shadow;
1346}
1347
1348
1349static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1350{
1351 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
1352}
1353
1354static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1355{
1356 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
1357}
1358
1359static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
1360{
1361 vmcs_write32(VM_EXIT_CONTROLS, val);
1362 vmx->vm_exit_controls_shadow = val;
1363}
1364
1365static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
1366{
1367 if (vmx->vm_exit_controls_shadow != val)
1368 vm_exit_controls_init(vmx, val);
1369}
1370
1371static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
1372{
1373 return vmx->vm_exit_controls_shadow;
1374}
1375
1376
1377static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1378{
1379 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
1380}
1381
1382static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1383{
1384 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
1385}
1386
1329static void vmx_segment_cache_clear(struct vcpu_vmx *vmx) 1387static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1330{ 1388{
1331 vmx->segment_cache.bitmask = 0; 1389 vmx->segment_cache.bitmask = 0;
@@ -1410,11 +1468,11 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1410 vmcs_write32(EXCEPTION_BITMAP, eb); 1468 vmcs_write32(EXCEPTION_BITMAP, eb);
1411} 1469}
1412 1470
1413static void clear_atomic_switch_msr_special(unsigned long entry, 1471static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1414 unsigned long exit) 1472 unsigned long entry, unsigned long exit)
1415{ 1473{
1416 vmcs_clear_bits(VM_ENTRY_CONTROLS, entry); 1474 vm_entry_controls_clearbit(vmx, entry);
1417 vmcs_clear_bits(VM_EXIT_CONTROLS, exit); 1475 vm_exit_controls_clearbit(vmx, exit);
1418} 1476}
1419 1477
1420static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) 1478static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
@@ -1425,14 +1483,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1425 switch (msr) { 1483 switch (msr) {
1426 case MSR_EFER: 1484 case MSR_EFER:
1427 if (cpu_has_load_ia32_efer) { 1485 if (cpu_has_load_ia32_efer) {
1428 clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER, 1486 clear_atomic_switch_msr_special(vmx,
1487 VM_ENTRY_LOAD_IA32_EFER,
1429 VM_EXIT_LOAD_IA32_EFER); 1488 VM_EXIT_LOAD_IA32_EFER);
1430 return; 1489 return;
1431 } 1490 }
1432 break; 1491 break;
1433 case MSR_CORE_PERF_GLOBAL_CTRL: 1492 case MSR_CORE_PERF_GLOBAL_CTRL:
1434 if (cpu_has_load_perf_global_ctrl) { 1493 if (cpu_has_load_perf_global_ctrl) {
1435 clear_atomic_switch_msr_special( 1494 clear_atomic_switch_msr_special(vmx,
1436 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 1495 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1437 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); 1496 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1438 return; 1497 return;
@@ -1453,14 +1512,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1453 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); 1512 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1454} 1513}
1455 1514
1456static void add_atomic_switch_msr_special(unsigned long entry, 1515static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1457 unsigned long exit, unsigned long guest_val_vmcs, 1516 unsigned long entry, unsigned long exit,
1458 unsigned long host_val_vmcs, u64 guest_val, u64 host_val) 1517 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
1518 u64 guest_val, u64 host_val)
1459{ 1519{
1460 vmcs_write64(guest_val_vmcs, guest_val); 1520 vmcs_write64(guest_val_vmcs, guest_val);
1461 vmcs_write64(host_val_vmcs, host_val); 1521 vmcs_write64(host_val_vmcs, host_val);
1462 vmcs_set_bits(VM_ENTRY_CONTROLS, entry); 1522 vm_entry_controls_setbit(vmx, entry);
1463 vmcs_set_bits(VM_EXIT_CONTROLS, exit); 1523 vm_exit_controls_setbit(vmx, exit);
1464} 1524}
1465 1525
1466static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, 1526static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
@@ -1472,7 +1532,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1472 switch (msr) { 1532 switch (msr) {
1473 case MSR_EFER: 1533 case MSR_EFER:
1474 if (cpu_has_load_ia32_efer) { 1534 if (cpu_has_load_ia32_efer) {
1475 add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER, 1535 add_atomic_switch_msr_special(vmx,
1536 VM_ENTRY_LOAD_IA32_EFER,
1476 VM_EXIT_LOAD_IA32_EFER, 1537 VM_EXIT_LOAD_IA32_EFER,
1477 GUEST_IA32_EFER, 1538 GUEST_IA32_EFER,
1478 HOST_IA32_EFER, 1539 HOST_IA32_EFER,
@@ -1482,7 +1543,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1482 break; 1543 break;
1483 case MSR_CORE_PERF_GLOBAL_CTRL: 1544 case MSR_CORE_PERF_GLOBAL_CTRL:
1484 if (cpu_has_load_perf_global_ctrl) { 1545 if (cpu_has_load_perf_global_ctrl) {
1485 add_atomic_switch_msr_special( 1546 add_atomic_switch_msr_special(vmx,
1486 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 1547 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1487 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 1548 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1488 GUEST_IA32_PERF_GLOBAL_CTRL, 1549 GUEST_IA32_PERF_GLOBAL_CTRL,
@@ -3182,14 +3243,10 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
3182 vmx_load_host_state(to_vmx(vcpu)); 3243 vmx_load_host_state(to_vmx(vcpu));
3183 vcpu->arch.efer = efer; 3244 vcpu->arch.efer = efer;
3184 if (efer & EFER_LMA) { 3245 if (efer & EFER_LMA) {
3185 vmcs_write32(VM_ENTRY_CONTROLS, 3246 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
3186 vmcs_read32(VM_ENTRY_CONTROLS) |
3187 VM_ENTRY_IA32E_MODE);
3188 msr->data = efer; 3247 msr->data = efer;
3189 } else { 3248 } else {
3190 vmcs_write32(VM_ENTRY_CONTROLS, 3249 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
3191 vmcs_read32(VM_ENTRY_CONTROLS) &
3192 ~VM_ENTRY_IA32E_MODE);
3193 3250
3194 msr->data = efer & ~EFER_LME; 3251 msr->data = efer & ~EFER_LME;
3195 } 3252 }
@@ -3217,9 +3274,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
3217 3274
3218static void exit_lmode(struct kvm_vcpu *vcpu) 3275static void exit_lmode(struct kvm_vcpu *vcpu)
3219{ 3276{
3220 vmcs_write32(VM_ENTRY_CONTROLS, 3277 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
3221 vmcs_read32(VM_ENTRY_CONTROLS)
3222 & ~VM_ENTRY_IA32E_MODE);
3223 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); 3278 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
3224} 3279}
3225 3280
@@ -4346,10 +4401,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
4346 ++vmx->nmsrs; 4401 ++vmx->nmsrs;
4347 } 4402 }
4348 4403
4349 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); 4404
4405 vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
4350 4406
4351 /* 22.2.1, 20.8.1 */ 4407 /* 22.2.1, 20.8.1 */
4352 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl); 4408 vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
4353 4409
4354 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); 4410 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
4355 set_cr4_guest_host_mask(vmx); 4411 set_cr4_guest_host_mask(vmx);
@@ -7759,12 +7815,12 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7759 exit_control = vmcs_config.vmexit_ctrl; 7815 exit_control = vmcs_config.vmexit_ctrl;
7760 if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER) 7816 if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
7761 exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER; 7817 exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
7762 vmcs_write32(VM_EXIT_CONTROLS, exit_control); 7818 vm_exit_controls_init(vmx, exit_control);
7763 7819
7764 /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are 7820 /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
7765 * emulated by vmx_set_efer(), below. 7821 * emulated by vmx_set_efer(), below.
7766 */ 7822 */
7767 vmcs_write32(VM_ENTRY_CONTROLS, 7823 vm_entry_controls_init(vmx,
7768 (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER & 7824 (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
7769 ~VM_ENTRY_IA32E_MODE) | 7825 ~VM_ENTRY_IA32E_MODE) |
7770 (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); 7826 (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
@@ -8186,7 +8242,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
8186 8242
8187 vmcs12->vm_entry_controls = 8243 vmcs12->vm_entry_controls =
8188 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | 8244 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
8189 (vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE); 8245 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
8190 8246
8191 /* TODO: These cannot have changed unless we have MSR bitmaps and 8247 /* TODO: These cannot have changed unless we have MSR bitmaps and
8192 * the relevant bit asks not to trap the change */ 8248 * the relevant bit asks not to trap the change */
@@ -8390,6 +8446,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
8390 vcpu->cpu = cpu; 8446 vcpu->cpu = cpu;
8391 put_cpu(); 8447 put_cpu();
8392 8448
8449 vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
8450 vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
8393 vmx_segment_cache_clear(vmx); 8451 vmx_segment_cache_clear(vmx);
8394 8452
8395 /* if no vmcs02 cache requested, remove the one we used */ 8453 /* if no vmcs02 cache requested, remove the one we used */