diff options
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 7 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 4 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 5 |
3 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index cba218a2f08d..b1e6c1bf68d3 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -913,7 +913,8 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, | |||
913 | * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't | 913 | * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't |
914 | * used by guest then tlbs are not flushed, so guest is allowed to access the | 914 | * used by guest then tlbs are not flushed, so guest is allowed to access the |
915 | * freed pages. | 915 | * freed pages. |
916 | * And we increase kvm->tlbs_dirty to delay tlbs flush in this case. | 916 | * We set tlbs_dirty to let the notifier know this change and delay the flush |
917 | * until such a case actually happens. | ||
917 | */ | 918 | */ |
918 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | 919 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
919 | { | 920 | { |
@@ -942,7 +943,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
942 | return -EINVAL; | 943 | return -EINVAL; |
943 | 944 | ||
944 | if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { | 945 | if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { |
945 | vcpu->kvm->tlbs_dirty++; | 946 | vcpu->kvm->tlbs_dirty = true; |
946 | continue; | 947 | continue; |
947 | } | 948 | } |
948 | 949 | ||
@@ -957,7 +958,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
957 | 958 | ||
958 | if (gfn != sp->gfns[i]) { | 959 | if (gfn != sp->gfns[i]) { |
959 | drop_spte(vcpu->kvm, &sp->spt[i]); | 960 | drop_spte(vcpu->kvm, &sp->spt[i]); |
960 | vcpu->kvm->tlbs_dirty++; | 961 | vcpu->kvm->tlbs_dirty = true; |
961 | continue; | 962 | continue; |
962 | } | 963 | } |
963 | 964 | ||
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f5937b8188b4..9816b68b085f 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -401,7 +401,9 @@ struct kvm { | |||
401 | unsigned long mmu_notifier_seq; | 401 | unsigned long mmu_notifier_seq; |
402 | long mmu_notifier_count; | 402 | long mmu_notifier_count; |
403 | #endif | 403 | #endif |
404 | long tlbs_dirty; | 404 | /* Protected by mmu_lock */ |
405 | bool tlbs_dirty; | ||
406 | |||
405 | struct list_head devices; | 407 | struct list_head devices; |
406 | }; | 408 | }; |
407 | 409 | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a9e999a48e43..f5668a431d54 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -186,12 +186,9 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) | |||
186 | 186 | ||
187 | void kvm_flush_remote_tlbs(struct kvm *kvm) | 187 | void kvm_flush_remote_tlbs(struct kvm *kvm) |
188 | { | 188 | { |
189 | long dirty_count = kvm->tlbs_dirty; | ||
190 | |||
191 | smp_mb(); | ||
192 | if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) | 189 | if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) |
193 | ++kvm->stat.remote_tlb_flush; | 190 | ++kvm->stat.remote_tlb_flush; |
194 | cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); | 191 | kvm->tlbs_dirty = false; |
195 | } | 192 | } |
196 | EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); | 193 | EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); |
197 | 194 | ||