diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2010-06-04 09:56:59 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-08-01 03:39:28 -0400 |
commit | 0671a8e75d8aeb33e15c5152147abb0d2fa0c1e6 (patch) | |
tree | 9ee71a7abb58c3d34c70ae77ac7c1b09351e2787 /arch/x86/kvm/mmu.c | |
parent | f41d335a02d5132c14ec0459d3b2790eeb16fb11 (diff) |
KVM: MMU: reduce remote tlb flush in kvm_mmu_pte_write()
collect remote tlb flush in kvm_mmu_pte_write() path
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 20 |
1 files changed, 15 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 3b75689eda95..b285449e82b0 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2666,11 +2666,15 @@ static bool need_remote_flush(u64 old, u64 new) | |||
2666 | return (old & ~new & PT64_PERM_MASK) != 0; | 2666 | return (old & ~new & PT64_PERM_MASK) != 0; |
2667 | } | 2667 | } |
2668 | 2668 | ||
2669 | static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new) | 2669 | static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page, |
2670 | bool remote_flush, bool local_flush) | ||
2670 | { | 2671 | { |
2671 | if (need_remote_flush(old, new)) | 2672 | if (zap_page) |
2673 | return; | ||
2674 | |||
2675 | if (remote_flush) | ||
2672 | kvm_flush_remote_tlbs(vcpu->kvm); | 2676 | kvm_flush_remote_tlbs(vcpu->kvm); |
2673 | else | 2677 | else if (local_flush) |
2674 | kvm_mmu_flush_tlb(vcpu); | 2678 | kvm_mmu_flush_tlb(vcpu); |
2675 | } | 2679 | } |
2676 | 2680 | ||
@@ -2735,6 +2739,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
2735 | int npte; | 2739 | int npte; |
2736 | int r; | 2740 | int r; |
2737 | int invlpg_counter; | 2741 | int invlpg_counter; |
2742 | bool remote_flush, local_flush, zap_page; | ||
2743 | |||
2744 | zap_page = remote_flush = local_flush = false; | ||
2738 | 2745 | ||
2739 | pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); | 2746 | pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); |
2740 | 2747 | ||
@@ -2808,7 +2815,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
2808 | */ | 2815 | */ |
2809 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", | 2816 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", |
2810 | gpa, bytes, sp->role.word); | 2817 | gpa, bytes, sp->role.word); |
2811 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, | 2818 | zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, |
2812 | &invalid_list); | 2819 | &invalid_list); |
2813 | ++vcpu->kvm->stat.mmu_flooded; | 2820 | ++vcpu->kvm->stat.mmu_flooded; |
2814 | continue; | 2821 | continue; |
@@ -2833,16 +2840,19 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
2833 | if (quadrant != sp->role.quadrant) | 2840 | if (quadrant != sp->role.quadrant) |
2834 | continue; | 2841 | continue; |
2835 | } | 2842 | } |
2843 | local_flush = true; | ||
2836 | spte = &sp->spt[page_offset / sizeof(*spte)]; | 2844 | spte = &sp->spt[page_offset / sizeof(*spte)]; |
2837 | while (npte--) { | 2845 | while (npte--) { |
2838 | entry = *spte; | 2846 | entry = *spte; |
2839 | mmu_pte_write_zap_pte(vcpu, sp, spte); | 2847 | mmu_pte_write_zap_pte(vcpu, sp, spte); |
2840 | if (gentry) | 2848 | if (gentry) |
2841 | mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); | 2849 | mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); |
2842 | mmu_pte_write_flush_tlb(vcpu, entry, *spte); | 2850 | if (!remote_flush && need_remote_flush(entry, *spte)) |
2851 | remote_flush = true; | ||
2843 | ++spte; | 2852 | ++spte; |
2844 | } | 2853 | } |
2845 | } | 2854 | } |
2855 | mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush); | ||
2846 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); | 2856 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
2847 | kvm_mmu_audit(vcpu, "post pte write"); | 2857 | kvm_mmu_audit(vcpu, "post pte write"); |
2848 | spin_unlock(&vcpu->kvm->mmu_lock); | 2858 | spin_unlock(&vcpu->kvm->mmu_lock); |