aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/e500_mmu_host.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/e500_mmu_host.c')
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c24
1 files changed, 22 insertions, 2 deletions
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 1c6a9d729df4..ecf2247b13be 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -32,10 +32,11 @@
32#include <asm/kvm_ppc.h> 32#include <asm/kvm_ppc.h>
33 33
34#include "e500.h" 34#include "e500.h"
35#include "trace.h"
36#include "timing.h" 35#include "timing.h"
37#include "e500_mmu_host.h" 36#include "e500_mmu_host.h"
38 37
38#include "trace_booke.h"
39
39#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) 40#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
40 41
41static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; 42static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
@@ -253,6 +254,9 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
253 ref->pfn = pfn; 254 ref->pfn = pfn;
254 ref->flags |= E500_TLB_VALID; 255 ref->flags |= E500_TLB_VALID;
255 256
257 /* Mark the page accessed */
258 kvm_set_pfn_accessed(pfn);
259
256 if (tlbe_is_writable(gtlbe)) 260 if (tlbe_is_writable(gtlbe))
257 kvm_set_pfn_dirty(pfn); 261 kvm_set_pfn_dirty(pfn);
258} 262}
@@ -332,6 +336,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
332 unsigned long hva; 336 unsigned long hva;
333 int pfnmap = 0; 337 int pfnmap = 0;
334 int tsize = BOOK3E_PAGESZ_4K; 338 int tsize = BOOK3E_PAGESZ_4K;
339 int ret = 0;
340 unsigned long mmu_seq;
341 struct kvm *kvm = vcpu_e500->vcpu.kvm;
342
343 /* used to check for invalidations in progress */
344 mmu_seq = kvm->mmu_notifier_seq;
345 smp_rmb();
335 346
336 /* 347 /*
337 * Translate guest physical to true physical, acquiring 348 * Translate guest physical to true physical, acquiring
@@ -449,6 +460,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
449 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); 460 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
450 } 461 }
451 462
463 spin_lock(&kvm->mmu_lock);
464 if (mmu_notifier_retry(kvm, mmu_seq)) {
465 ret = -EAGAIN;
466 goto out;
467 }
468
452 kvmppc_e500_ref_setup(ref, gtlbe, pfn); 469 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
453 470
454 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 471 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -457,10 +474,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
457 /* Clear i-cache for new pages */ 474 /* Clear i-cache for new pages */
458 kvmppc_mmu_flush_icache(pfn); 475 kvmppc_mmu_flush_icache(pfn);
459 476
477out:
478 spin_unlock(&kvm->mmu_lock);
479
460 /* Drop refcount on page, so that mmu notifiers can clear it */ 480 /* Drop refcount on page, so that mmu notifiers can clear it */
461 kvm_release_pfn_clean(pfn); 481 kvm_release_pfn_clean(pfn);
462 482
463 return 0; 483 return ret;
464} 484}
465 485
466/* XXX only map the one-one case, for now use TLB0 */ 486/* XXX only map the one-one case, for now use TLB0 */