diff options
author | Avi Kivity <avi@redhat.com> | 2009-05-31 15:58:47 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-09-10 01:32:46 -0400 |
commit | 6de4f3ada40b336522250a7832a0cc4de8856589 (patch) | |
tree | 90920846774aa0fb0fb47ac245fcf5f8b73afcee /arch | |
parent | 8f5d549f028056d6ad6044f2d9e27ecf361d955e (diff) |
KVM: Cache pdptrs
Instead of reloading the pdptrs on every entry and exit (vmcs writes on vmx,
guest memory access on svm) extract them on demand.
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 4 | ||||
-rw-r--r-- | arch/x86/kvm/kvm_cache_regs.h | 9 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 7 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 24 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 22 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 8 |
7 files changed, 63 insertions, 13 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 81c68f630b14..1cc901ec4ba5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -120,6 +120,10 @@ enum kvm_reg { | |||
120 | NR_VCPU_REGS | 120 | NR_VCPU_REGS |
121 | }; | 121 | }; |
122 | 122 | ||
123 | enum kvm_reg_ex { | ||
124 | VCPU_EXREG_PDPTR = NR_VCPU_REGS, | ||
125 | }; | ||
126 | |||
123 | enum { | 127 | enum { |
124 | VCPU_SREG_ES, | 128 | VCPU_SREG_ES, |
125 | VCPU_SREG_CS, | 129 | VCPU_SREG_CS, |
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 1ff819dce7d3..7bcc5b6a4403 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h | |||
@@ -29,4 +29,13 @@ static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val) | |||
29 | kvm_register_write(vcpu, VCPU_REGS_RIP, val); | 29 | kvm_register_write(vcpu, VCPU_REGS_RIP, val); |
30 | } | 30 | } |
31 | 31 | ||
32 | static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) | ||
33 | { | ||
34 | if (!test_bit(VCPU_EXREG_PDPTR, | ||
35 | (unsigned long *)&vcpu->arch.regs_avail)) | ||
36 | kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR); | ||
37 | |||
38 | return vcpu->arch.pdptrs[index]; | ||
39 | } | ||
40 | |||
32 | #endif | 41 | #endif |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 0ef5bb2b4043..8ee67e3fb9d0 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include "mmu.h" | 20 | #include "mmu.h" |
21 | #include "kvm_cache_regs.h" | ||
21 | 22 | ||
22 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
23 | #include <linux/types.h> | 24 | #include <linux/types.h> |
@@ -1954,6 +1955,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
1954 | gfn_t root_gfn; | 1955 | gfn_t root_gfn; |
1955 | struct kvm_mmu_page *sp; | 1956 | struct kvm_mmu_page *sp; |
1956 | int direct = 0; | 1957 | int direct = 0; |
1958 | u64 pdptr; | ||
1957 | 1959 | ||
1958 | root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; | 1960 | root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; |
1959 | 1961 | ||
@@ -1981,11 +1983,12 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
1981 | 1983 | ||
1982 | ASSERT(!VALID_PAGE(root)); | 1984 | ASSERT(!VALID_PAGE(root)); |
1983 | if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { | 1985 | if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { |
1984 | if (!is_present_pte(vcpu->arch.pdptrs[i])) { | 1986 | pdptr = kvm_pdptr_read(vcpu, i); |
1987 | if (!is_present_pte(pdptr)) { | ||
1985 | vcpu->arch.mmu.pae_root[i] = 0; | 1988 | vcpu->arch.mmu.pae_root[i] = 0; |
1986 | continue; | 1989 | continue; |
1987 | } | 1990 | } |
1988 | root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT; | 1991 | root_gfn = pdptr >> PAGE_SHIFT; |
1989 | } else if (vcpu->arch.mmu.root_level == 0) | 1992 | } else if (vcpu->arch.mmu.root_level == 0) |
1990 | root_gfn = 0; | 1993 | root_gfn = 0; |
1991 | if (mmu_check_root(vcpu, root_gfn)) | 1994 | if (mmu_check_root(vcpu, root_gfn)) |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 67785f635399..4cb1dbfd7c2a 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -131,7 +131,7 @@ walk: | |||
131 | pte = vcpu->arch.cr3; | 131 | pte = vcpu->arch.cr3; |
132 | #if PTTYPE == 64 | 132 | #if PTTYPE == 64 |
133 | if (!is_long_mode(vcpu)) { | 133 | if (!is_long_mode(vcpu)) { |
134 | pte = vcpu->arch.pdptrs[(addr >> 30) & 3]; | 134 | pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3); |
135 | if (!is_present_pte(pte)) | 135 | if (!is_present_pte(pte)) |
136 | goto not_present; | 136 | goto not_present; |
137 | --walker->level; | 137 | --walker->level; |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 522e69597a16..7749b0692cb2 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -777,6 +777,18 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | |||
777 | to_svm(vcpu)->vmcb->save.rflags = rflags; | 777 | to_svm(vcpu)->vmcb->save.rflags = rflags; |
778 | } | 778 | } |
779 | 779 | ||
780 | static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) | ||
781 | { | ||
782 | switch (reg) { | ||
783 | case VCPU_EXREG_PDPTR: | ||
784 | BUG_ON(!npt_enabled); | ||
785 | load_pdptrs(vcpu, vcpu->arch.cr3); | ||
786 | break; | ||
787 | default: | ||
788 | BUG(); | ||
789 | } | ||
790 | } | ||
791 | |||
780 | static void svm_set_vintr(struct vcpu_svm *svm) | 792 | static void svm_set_vintr(struct vcpu_svm *svm) |
781 | { | 793 | { |
782 | svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR; | 794 | svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR; |
@@ -2285,12 +2297,6 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
2285 | } | 2297 | } |
2286 | vcpu->arch.cr0 = svm->vmcb->save.cr0; | 2298 | vcpu->arch.cr0 = svm->vmcb->save.cr0; |
2287 | vcpu->arch.cr3 = svm->vmcb->save.cr3; | 2299 | vcpu->arch.cr3 = svm->vmcb->save.cr3; |
2288 | if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { | ||
2289 | if (!load_pdptrs(vcpu, vcpu->arch.cr3)) { | ||
2290 | kvm_inject_gp(vcpu, 0); | ||
2291 | return 1; | ||
2292 | } | ||
2293 | } | ||
2294 | if (mmu_reload) { | 2300 | if (mmu_reload) { |
2295 | kvm_mmu_reset_context(vcpu); | 2301 | kvm_mmu_reset_context(vcpu); |
2296 | kvm_mmu_load(vcpu); | 2302 | kvm_mmu_load(vcpu); |
@@ -2641,6 +2647,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2641 | 2647 | ||
2642 | svm->next_rip = 0; | 2648 | svm->next_rip = 0; |
2643 | 2649 | ||
2650 | if (npt_enabled) { | ||
2651 | vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); | ||
2652 | vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR); | ||
2653 | } | ||
2654 | |||
2644 | svm_complete_interrupts(svm); | 2655 | svm_complete_interrupts(svm); |
2645 | } | 2656 | } |
2646 | 2657 | ||
@@ -2749,6 +2760,7 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
2749 | .set_gdt = svm_set_gdt, | 2760 | .set_gdt = svm_set_gdt, |
2750 | .get_dr = svm_get_dr, | 2761 | .get_dr = svm_get_dr, |
2751 | .set_dr = svm_set_dr, | 2762 | .set_dr = svm_set_dr, |
2763 | .cache_reg = svm_cache_reg, | ||
2752 | .get_rflags = svm_get_rflags, | 2764 | .get_rflags = svm_get_rflags, |
2753 | .set_rflags = svm_set_rflags, | 2765 | .set_rflags = svm_set_rflags, |
2754 | 2766 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 752465f98bfd..d726dec69529 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -161,6 +161,8 @@ static struct kvm_vmx_segment_field { | |||
161 | VMX_SEGMENT_FIELD(LDTR), | 161 | VMX_SEGMENT_FIELD(LDTR), |
162 | }; | 162 | }; |
163 | 163 | ||
164 | static void ept_save_pdptrs(struct kvm_vcpu *vcpu); | ||
165 | |||
164 | /* | 166 | /* |
165 | * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it | 167 | * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it |
166 | * away by decrementing the array size. | 168 | * away by decrementing the array size. |
@@ -1047,6 +1049,10 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) | |||
1047 | case VCPU_REGS_RIP: | 1049 | case VCPU_REGS_RIP: |
1048 | vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); | 1050 | vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); |
1049 | break; | 1051 | break; |
1052 | case VCPU_EXREG_PDPTR: | ||
1053 | if (enable_ept) | ||
1054 | ept_save_pdptrs(vcpu); | ||
1055 | break; | ||
1050 | default: | 1056 | default: |
1051 | break; | 1057 | break; |
1052 | } | 1058 | } |
@@ -1546,6 +1552,10 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | |||
1546 | 1552 | ||
1547 | static void ept_load_pdptrs(struct kvm_vcpu *vcpu) | 1553 | static void ept_load_pdptrs(struct kvm_vcpu *vcpu) |
1548 | { | 1554 | { |
1555 | if (!test_bit(VCPU_EXREG_PDPTR, | ||
1556 | (unsigned long *)&vcpu->arch.regs_dirty)) | ||
1557 | return; | ||
1558 | |||
1549 | if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { | 1559 | if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { |
1550 | vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]); | 1560 | vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]); |
1551 | vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]); | 1561 | vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]); |
@@ -1562,6 +1572,11 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu) | |||
1562 | vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); | 1572 | vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); |
1563 | vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); | 1573 | vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); |
1564 | } | 1574 | } |
1575 | |||
1576 | __set_bit(VCPU_EXREG_PDPTR, | ||
1577 | (unsigned long *)&vcpu->arch.regs_avail); | ||
1578 | __set_bit(VCPU_EXREG_PDPTR, | ||
1579 | (unsigned long *)&vcpu->arch.regs_dirty); | ||
1565 | } | 1580 | } |
1566 | 1581 | ||
1567 | static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); | 1582 | static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
@@ -3255,10 +3270,8 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3255 | 3270 | ||
3256 | /* Access CR3 don't cause VMExit in paging mode, so we need | 3271 | /* Access CR3 don't cause VMExit in paging mode, so we need |
3257 | * to sync with guest real CR3. */ | 3272 | * to sync with guest real CR3. */ |
3258 | if (enable_ept && is_paging(vcpu)) { | 3273 | if (enable_ept && is_paging(vcpu)) |
3259 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); | 3274 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); |
3260 | ept_save_pdptrs(vcpu); | ||
3261 | } | ||
3262 | 3275 | ||
3263 | if (unlikely(vmx->fail)) { | 3276 | if (unlikely(vmx->fail)) { |
3264 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | 3277 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
@@ -3567,7 +3580,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3567 | #endif | 3580 | #endif |
3568 | ); | 3581 | ); |
3569 | 3582 | ||
3570 | vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); | 3583 | vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) |
3584 | | (1 << VCPU_EXREG_PDPTR)); | ||
3571 | vcpu->arch.regs_dirty = 0; | 3585 | vcpu->arch.regs_dirty = 0; |
3572 | 3586 | ||
3573 | get_debugreg(vcpu->arch.dr6, 6); | 3587 | get_debugreg(vcpu->arch.dr6, 6); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 75e9df097845..2ad8c97f58cc 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -246,6 +246,10 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
246 | ret = 1; | 246 | ret = 1; |
247 | 247 | ||
248 | memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); | 248 | memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); |
249 | __set_bit(VCPU_EXREG_PDPTR, | ||
250 | (unsigned long *)&vcpu->arch.regs_avail); | ||
251 | __set_bit(VCPU_EXREG_PDPTR, | ||
252 | (unsigned long *)&vcpu->arch.regs_dirty); | ||
249 | out: | 253 | out: |
250 | 254 | ||
251 | return ret; | 255 | return ret; |
@@ -261,6 +265,10 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu) | |||
261 | if (is_long_mode(vcpu) || !is_pae(vcpu)) | 265 | if (is_long_mode(vcpu) || !is_pae(vcpu)) |
262 | return false; | 266 | return false; |
263 | 267 | ||
268 | if (!test_bit(VCPU_EXREG_PDPTR, | ||
269 | (unsigned long *)&vcpu->arch.regs_avail)) | ||
270 | return true; | ||
271 | |||
264 | r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); | 272 | r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); |
265 | if (r < 0) | 273 | if (r < 0) |
266 | goto out; | 274 | goto out; |