aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2013-01-17 11:54:36 -0500
committerAlexander Graf <agraf@suse.de>2013-01-24 13:23:32 -0500
commitc015c62b13498629809185eb0ff04e3f13d1afb6 (patch)
tree7f18a3e8eab2c9af305bf16bb03342b064325183
parentb71c9e2fb72cf538aadbc59ea719639a1e2191fa (diff)
KVM: PPC: e500: Implement TLB1-in-TLB0 mapping
When a host mapping fault happens in a guest TLB1 entry today, we map the translated guest entry into the host's TLB1. This isn't particularly clever when the guest is mapped by normal 4k pages, since these would be a lot better to put into TLB0 instead. This patch adds the required logic to map 4k TLB1 shadow maps into the host's TLB0. Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/kvm/e500.h1
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c65
2 files changed, 47 insertions, 19 deletions
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index c70d37ed770a..41cefd43655f 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -28,6 +28,7 @@
28 28
29#define E500_TLB_VALID 1 29#define E500_TLB_VALID 1
30#define E500_TLB_BITMAP 2 30#define E500_TLB_BITMAP 2
31#define E500_TLB_TLB0 (1 << 2)
31 32
32struct tlbe_ref { 33struct tlbe_ref {
33 pfn_t pfn; 34 pfn_t pfn;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 4c32d6510133..9a150bced298 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -216,10 +216,21 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
216 vcpu_e500->g2h_tlb1_map[esel] = 0; 216 vcpu_e500->g2h_tlb1_map[esel] = 0;
217 ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); 217 ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
218 local_irq_restore(flags); 218 local_irq_restore(flags);
219 }
219 220
220 return; 221 if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
222 /*
223 * TLB1 entry is backed by 4k pages. This should happen
224 * rarely and is not worth optimizing. Invalidate everything.
225 */
226 kvmppc_e500_tlbil_all(vcpu_e500);
227 ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
221 } 228 }
222 229
230 /* Already invalidated in between */
231 if (!(ref->flags & E500_TLB_VALID))
232 return;
233
223 /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ 234 /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
224 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); 235 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
225 236
@@ -487,38 +498,54 @@ static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
487 return 0; 498 return 0;
488} 499}
489 500
501static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
502 struct tlbe_ref *ref,
503 int esel)
504{
505 unsigned int sesel = vcpu_e500->host_tlb1_nv++;
506
507 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
508 vcpu_e500->host_tlb1_nv = 0;
509
510 vcpu_e500->tlb_refs[1][sesel] = *ref;
511 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
512 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
513 if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
514 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
515 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
516 }
517 vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
518
519 return sesel;
520}
521
490/* Caller must ensure that the specified guest TLB entry is safe to insert into 522/* Caller must ensure that the specified guest TLB entry is safe to insert into
491 * the shadow TLB. */ 523 * the shadow TLB. */
492/* XXX for both one-one and one-to-many , for now use TLB1 */ 524/* For both one-one and one-to-many */
493static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, 525static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
494 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, 526 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
495 struct kvm_book3e_206_tlb_entry *stlbe, int esel) 527 struct kvm_book3e_206_tlb_entry *stlbe, int esel)
496{ 528{
497 struct tlbe_ref *ref; 529 struct tlbe_ref ref;
498 unsigned int sesel; 530 int sesel;
499 int r; 531 int r;
500 int stlbsel = 1;
501
502 sesel = vcpu_e500->host_tlb1_nv++;
503
504 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
505 vcpu_e500->host_tlb1_nv = 0;
506 532
507 ref = &vcpu_e500->tlb_refs[1][sesel]; 533 ref.flags = 0;
508 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, 534 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
509 ref); 535 &ref);
510 if (r) 536 if (r)
511 return r; 537 return r;
512 538
513 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; 539 /* Use TLB0 when we can only map a page with 4k */
514 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; 540 if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
515 if (vcpu_e500->h2g_tlb1_rmap[sesel]) { 541 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
516 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; 542 write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
517 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); 543 return 0;
518 } 544 }
519 vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
520 545
521 write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); 546 /* Otherwise map into TLB1 */
547 sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel);
548 write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
522 549
523 return 0; 550 return 0;
524} 551}