diff options
author | Avi Kivity <avi@redhat.com> | 2010-03-15 07:59:53 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-05-17 05:15:37 -0400 |
commit | 72016f3a4221799a0b1fdf443ef6e29db572a9bb (patch) | |
tree | ba1d110deb5e714ea39bd3c3ea4b1dcbd81352e9 /arch/x86/kvm/mmu.c | |
parent | d57e2c0740bbdd768dcbafe58cf62174f31d7c2d (diff) |
KVM: MMU: Consolidate two guest pte reads in kvm_mmu_pte_write()
kvm_mmu_pte_write() reads guest ptes in two different occasions, both to
allow a 32-bit pae guest to update a pte with 4-byte writes. Consolidate
these into a single read, which also allows us to consolidate another read
from an invlpg speculating a gpte into the shadow page table.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 69 |
1 files changed, 31 insertions, 38 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4455ddbe36f4..91f8b171c825 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2560,36 +2560,11 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) | |||
2560 | } | 2560 | } |
2561 | 2561 | ||
2562 | static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | 2562 | static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
2563 | const u8 *new, int bytes) | 2563 | u64 gpte) |
2564 | { | 2564 | { |
2565 | gfn_t gfn; | 2565 | gfn_t gfn; |
2566 | int r; | ||
2567 | u64 gpte = 0; | ||
2568 | pfn_t pfn; | 2566 | pfn_t pfn; |
2569 | 2567 | ||
2570 | if (bytes != 4 && bytes != 8) | ||
2571 | return; | ||
2572 | |||
2573 | /* | ||
2574 | * Assume that the pte write on a page table of the same type | ||
2575 | * as the current vcpu paging mode. This is nearly always true | ||
2576 | * (might be false while changing modes). Note it is verified later | ||
2577 | * by update_pte(). | ||
2578 | */ | ||
2579 | if (is_pae(vcpu)) { | ||
2580 | /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ | ||
2581 | if ((bytes == 4) && (gpa % 4 == 0)) { | ||
2582 | r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8); | ||
2583 | if (r) | ||
2584 | return; | ||
2585 | memcpy((void *)&gpte + (gpa % 8), new, 4); | ||
2586 | } else if ((bytes == 8) && (gpa % 8 == 0)) { | ||
2587 | memcpy((void *)&gpte, new, 8); | ||
2588 | } | ||
2589 | } else { | ||
2590 | if ((bytes == 4) && (gpa % 4 == 0)) | ||
2591 | memcpy((void *)&gpte, new, 4); | ||
2592 | } | ||
2593 | if (!is_present_gpte(gpte)) | 2568 | if (!is_present_gpte(gpte)) |
2594 | return; | 2569 | return; |
2595 | gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; | 2570 | gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; |
@@ -2640,7 +2615,34 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
2640 | int r; | 2615 | int r; |
2641 | 2616 | ||
2642 | pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); | 2617 | pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); |
2643 | mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes); | 2618 | |
2619 | switch (bytes) { | ||
2620 | case 4: | ||
2621 | gentry = *(const u32 *)new; | ||
2622 | break; | ||
2623 | case 8: | ||
2624 | gentry = *(const u64 *)new; | ||
2625 | break; | ||
2626 | default: | ||
2627 | gentry = 0; | ||
2628 | break; | ||
2629 | } | ||
2630 | |||
2631 | /* | ||
2632 | * Assume that the pte write on a page table of the same type | ||
2633 | * as the current vcpu paging mode. This is nearly always true | ||
2634 | * (might be false while changing modes). Note it is verified later | ||
2635 | * by update_pte(). | ||
2636 | */ | ||
2637 | if (is_pae(vcpu) && bytes == 4) { | ||
2638 | /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ | ||
2639 | gpa &= ~(gpa_t)7; | ||
2640 | r = kvm_read_guest(vcpu->kvm, gpa, &gentry, 8); | ||
2641 | if (r) | ||
2642 | gentry = 0; | ||
2643 | } | ||
2644 | |||
2645 | mmu_guess_page_from_pte_write(vcpu, gpa, gentry); | ||
2644 | spin_lock(&vcpu->kvm->mmu_lock); | 2646 | spin_lock(&vcpu->kvm->mmu_lock); |
2645 | kvm_mmu_access_page(vcpu, gfn); | 2647 | kvm_mmu_access_page(vcpu, gfn); |
2646 | kvm_mmu_free_some_pages(vcpu); | 2648 | kvm_mmu_free_some_pages(vcpu); |
@@ -2705,20 +2707,11 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
2705 | continue; | 2707 | continue; |
2706 | } | 2708 | } |
2707 | spte = &sp->spt[page_offset / sizeof(*spte)]; | 2709 | spte = &sp->spt[page_offset / sizeof(*spte)]; |
2708 | if ((gpa & (pte_size - 1)) || (bytes < pte_size)) { | ||
2709 | gentry = 0; | ||
2710 | r = kvm_read_guest_atomic(vcpu->kvm, | ||
2711 | gpa & ~(u64)(pte_size - 1), | ||
2712 | &gentry, pte_size); | ||
2713 | new = (const void *)&gentry; | ||
2714 | if (r < 0) | ||
2715 | new = NULL; | ||
2716 | } | ||
2717 | while (npte--) { | 2710 | while (npte--) { |
2718 | entry = *spte; | 2711 | entry = *spte; |
2719 | mmu_pte_write_zap_pte(vcpu, sp, spte); | 2712 | mmu_pte_write_zap_pte(vcpu, sp, spte); |
2720 | if (new) | 2713 | if (gentry) |
2721 | mmu_pte_write_new_pte(vcpu, sp, spte, new); | 2714 | mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); |
2722 | mmu_pte_write_flush_tlb(vcpu, entry, *spte); | 2715 | mmu_pte_write_flush_tlb(vcpu, entry, *spte); |
2723 | ++spte; | 2716 | ++spte; |
2724 | } | 2717 | } |