aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>2014-02-18 03:22:47 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2014-02-18 04:07:26 -0500
commit5befdc385ddb2d5ae8995ad89004529a3acf58fc (patch)
treee31e48e6f1f810596b6c5ca9663c02c02ed319b5 /arch/x86/kvm
parentf18eb31f9df52c28ec86d18d72f66ef689878daa (diff)
KVM: Simplify kvm->tlbs_dirty handling
When this was introduced, kvm_flush_remote_tlbs() could be called without holding mmu_lock. It is now acknowledged that the function must be called before releasing mmu_lock, and all callers have already been changed to do so. There is no need to use smp_mb() and cmpxchg() any more. Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/paging_tmpl.h7
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index cba218a2f08d..b1e6c1bf68d3 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -913,7 +913,8 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
913 * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't 913 * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
914 * used by guest then tlbs are not flushed, so guest is allowed to access the 914 * used by guest then tlbs are not flushed, so guest is allowed to access the
915 * freed pages. 915 * freed pages.
916 * And we increase kvm->tlbs_dirty to delay tlbs flush in this case. 916 * We set tlbs_dirty to let the notifier know this change and delay the flush
917 * until such a case actually happens.
917 */ 918 */
918static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 919static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
919{ 920{
@@ -942,7 +943,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
942 return -EINVAL; 943 return -EINVAL;
943 944
944 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { 945 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
945 vcpu->kvm->tlbs_dirty++; 946 vcpu->kvm->tlbs_dirty = true;
946 continue; 947 continue;
947 } 948 }
948 949
@@ -957,7 +958,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
957 958
958 if (gfn != sp->gfns[i]) { 959 if (gfn != sp->gfns[i]) {
959 drop_spte(vcpu->kvm, &sp->spt[i]); 960 drop_spte(vcpu->kvm, &sp->spt[i]);
960 vcpu->kvm->tlbs_dirty++; 961 vcpu->kvm->tlbs_dirty = true;
961 continue; 962 continue;
962 } 963 }
963 964