diff options
-rw-r--r-- | include/linux/kvm_host.h | 14 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 6 |
2 files changed, 12 insertions, 8 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d4d4d709211..eada8e69fe5 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -702,12 +702,16 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se | |||
702 | if (unlikely(vcpu->kvm->mmu_notifier_count)) | 702 | if (unlikely(vcpu->kvm->mmu_notifier_count)) |
703 | return 1; | 703 | return 1; |
704 | /* | 704 | /* |
705 | * Both reads happen under the mmu_lock and both values are | 705 | * Ensure the read of mmu_notifier_count happens before the read |
706 | * modified under mmu_lock, so there's no need of smb_rmb() | 706 | * of mmu_notifier_seq. This interacts with the smp_wmb() in |
707 | * here in between, otherwise mmu_notifier_count should be | 707 | * mmu_notifier_invalidate_range_end to make sure that the caller |
708 | * read before mmu_notifier_seq, see | 708 | * either sees the old (non-zero) value of mmu_notifier_count or |
709 | * mmu_notifier_invalidate_range_end write side. | 709 | * the new (incremented) value of mmu_notifier_seq. |
710 | * PowerPC Book3s HV KVM calls this under a per-page lock | ||
711 | * rather than under kvm->mmu_lock, for scalability, so | ||
712 | * can't rely on kvm->mmu_lock to keep things ordered. | ||
710 | */ | 713 | */ |
714 | smp_rmb(); | ||
711 | if (vcpu->kvm->mmu_notifier_seq != mmu_seq) | 715 | if (vcpu->kvm->mmu_notifier_seq != mmu_seq) |
712 | return 1; | 716 | return 1; |
713 | return 0; | 717 | return 0; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 64be836f334..9f32bffd37c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -357,11 +357,11 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, | |||
357 | * been freed. | 357 | * been freed. |
358 | */ | 358 | */ |
359 | kvm->mmu_notifier_seq++; | 359 | kvm->mmu_notifier_seq++; |
360 | smp_wmb(); | ||
360 | /* | 361 | /* |
361 | * The above sequence increase must be visible before the | 362 | * The above sequence increase must be visible before the |
362 | * below count decrease but both values are read by the kvm | 363 | * below count decrease, which is ensured by the smp_wmb above |
363 | * page fault under mmu_lock spinlock so we don't need to add | 364 | * in conjunction with the smp_rmb in mmu_notifier_retry(). |
364 | * a smb_wmb() here in between the two. | ||
365 | */ | 365 | */ |
366 | kvm->mmu_notifier_count--; | 366 | kvm->mmu_notifier_count--; |
367 | spin_unlock(&kvm->mmu_lock); | 367 | spin_unlock(&kvm->mmu_lock); |