aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/e500_mmu_host.c
diff options
context:
space:
mode:
authorBharat Bhushan <r65777@freescale.com>2013-08-07 06:03:46 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2013-10-10 05:40:08 -0400
commit40fde70d0df993d1a652d6cc69f8b4e967656170 (patch)
tree9d9ec53946c35f6335c87a6ec6ba504ed7690fbc /arch/powerpc/kvm/e500_mmu_host.c
parentcfc860253abd73e1681696c08ea268d33285a2c4 (diff)
kvm: ppc: booke: check range page invalidation progress on page setup
When the MM code is invalidating a range of pages, it calls the KVM kvm_mmu_notifier_invalidate_range_start() notifier function, which calls kvm_unmap_hva_range(), which arranges to flush all the TLBs for guest pages. However, the Linux PTEs for the range being flushed are still valid at that point. We are not supposed to establish any new references to pages in the range until the ...range_end() notifier gets called. The PPC-specific KVM code doesn't get any explicit notification of that; instead, we are supposed to use mmu_notifier_retry() to test whether we are or have been inside a range flush notifier pair while we have been referencing a page. This patch calls the mmu_notifier_retry() while mapping the guest page to ensure we are not referencing a page when in range invalidation. This call is inside a region locked with kvm->mmu_lock, which is the same lock that is called by the KVM MMU notifier functions, thus ensuring that no new notification can proceed while we are in the locked region. Signed-off-by: Bharat Bhushan <bharat.bhushan@freescale.com> Acked-by: Alexander Graf <agraf@suse.de> [Backported to 3.12 - Paolo] Reviewed-by: Bharat Bhushan <bharat.bhushan@freescale.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/e500_mmu_host.c')
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 1c6a9d729df4..c65593abae8e 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -332,6 +332,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
332 unsigned long hva; 332 unsigned long hva;
333 int pfnmap = 0; 333 int pfnmap = 0;
334 int tsize = BOOK3E_PAGESZ_4K; 334 int tsize = BOOK3E_PAGESZ_4K;
335 int ret = 0;
336 unsigned long mmu_seq;
337 struct kvm *kvm = vcpu_e500->vcpu.kvm;
338
339 /* used to check for invalidations in progress */
340 mmu_seq = kvm->mmu_notifier_seq;
341 smp_rmb();
335 342
336 /* 343 /*
337 * Translate guest physical to true physical, acquiring 344 * Translate guest physical to true physical, acquiring
@@ -449,6 +456,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
449 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); 456 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
450 } 457 }
451 458
459 spin_lock(&kvm->mmu_lock);
460 if (mmu_notifier_retry(kvm, mmu_seq)) {
461 ret = -EAGAIN;
462 goto out;
463 }
464
452 kvmppc_e500_ref_setup(ref, gtlbe, pfn); 465 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
453 466
454 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 467 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -457,10 +470,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
457 /* Clear i-cache for new pages */ 470 /* Clear i-cache for new pages */
458 kvmppc_mmu_flush_icache(pfn); 471 kvmppc_mmu_flush_icache(pfn);
459 472
473out:
474 spin_unlock(&kvm->mmu_lock);
475
460 /* Drop refcount on page, so that mmu notifiers can clear it */ 476 /* Drop refcount on page, so that mmu notifiers can clear it */
461 kvm_release_pfn_clean(pfn); 477 kvm_release_pfn_clean(pfn);
462 478
463 return 0; 479 return ret;
464} 480}
465 481
466/* XXX only map the one-one case, for now use TLB0 */ 482/* XXX only map the one-one case, for now use TLB0 */