diff options
author | Avi Kivity <avi@redhat.com> | 2009-05-31 15:58:47 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-09-10 01:32:46 -0400 |
commit | 6de4f3ada40b336522250a7832a0cc4de8856589 (patch) | |
tree | 90920846774aa0fb0fb47ac245fcf5f8b73afcee /arch/x86/kvm/vmx.c | |
parent | 8f5d549f028056d6ad6044f2d9e27ecf361d955e (diff) |
KVM: Cache pdptrs
Instead of reloading the pdptrs on every entry and exit (vmcs writes on vmx,
guest memory access on svm) extract them on demand.
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 22 |
1 files changed, 18 insertions, 4 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 752465f98bfd..d726dec69529 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -161,6 +161,8 @@ static struct kvm_vmx_segment_field { | |||
161 | VMX_SEGMENT_FIELD(LDTR), | 161 | VMX_SEGMENT_FIELD(LDTR), |
162 | }; | 162 | }; |
163 | 163 | ||
164 | static void ept_save_pdptrs(struct kvm_vcpu *vcpu); | ||
165 | |||
164 | /* | 166 | /* |
165 | * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it | 167 | * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it |
166 | * away by decrementing the array size. | 168 | * away by decrementing the array size. |
@@ -1047,6 +1049,10 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) | |||
1047 | case VCPU_REGS_RIP: | 1049 | case VCPU_REGS_RIP: |
1048 | vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); | 1050 | vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); |
1049 | break; | 1051 | break; |
1052 | case VCPU_EXREG_PDPTR: | ||
1053 | if (enable_ept) | ||
1054 | ept_save_pdptrs(vcpu); | ||
1055 | break; | ||
1050 | default: | 1056 | default: |
1051 | break; | 1057 | break; |
1052 | } | 1058 | } |
@@ -1546,6 +1552,10 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | |||
1546 | 1552 | ||
1547 | static void ept_load_pdptrs(struct kvm_vcpu *vcpu) | 1553 | static void ept_load_pdptrs(struct kvm_vcpu *vcpu) |
1548 | { | 1554 | { |
1555 | if (!test_bit(VCPU_EXREG_PDPTR, | ||
1556 | (unsigned long *)&vcpu->arch.regs_dirty)) | ||
1557 | return; | ||
1558 | |||
1549 | if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { | 1559 | if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { |
1550 | vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]); | 1560 | vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]); |
1551 | vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]); | 1561 | vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]); |
@@ -1562,6 +1572,11 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu) | |||
1562 | vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); | 1572 | vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); |
1563 | vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); | 1573 | vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); |
1564 | } | 1574 | } |
1575 | |||
1576 | __set_bit(VCPU_EXREG_PDPTR, | ||
1577 | (unsigned long *)&vcpu->arch.regs_avail); | ||
1578 | __set_bit(VCPU_EXREG_PDPTR, | ||
1579 | (unsigned long *)&vcpu->arch.regs_dirty); | ||
1565 | } | 1580 | } |
1566 | 1581 | ||
1567 | static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); | 1582 | static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
@@ -3255,10 +3270,8 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3255 | 3270 | ||
3256 | /* Access CR3 don't cause VMExit in paging mode, so we need | 3271 | /* Access CR3 don't cause VMExit in paging mode, so we need |
3257 | * to sync with guest real CR3. */ | 3272 | * to sync with guest real CR3. */ |
3258 | if (enable_ept && is_paging(vcpu)) { | 3273 | if (enable_ept && is_paging(vcpu)) |
3259 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); | 3274 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); |
3260 | ept_save_pdptrs(vcpu); | ||
3261 | } | ||
3262 | 3275 | ||
3263 | if (unlikely(vmx->fail)) { | 3276 | if (unlikely(vmx->fail)) { |
3264 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | 3277 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
@@ -3567,7 +3580,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3567 | #endif | 3580 | #endif |
3568 | ); | 3581 | ); |
3569 | 3582 | ||
3570 | vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); | 3583 | vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) |
3584 | | (1 << VCPU_EXREG_PDPTR)); | ||
3571 | vcpu->arch.regs_dirty = 0; | 3585 | vcpu->arch.regs_dirty = 0; |
3572 | 3586 | ||
3573 | get_debugreg(vcpu->arch.dr6, 6); | 3587 | get_debugreg(vcpu->arch.dr6, 6); |