diff options
Diffstat (limited to 'arch/powerpc/kvm/e500_mmu_host.c')
-rw-r--r-- | arch/powerpc/kvm/e500_mmu_host.c | 84 |
1 files changed, 27 insertions, 57 deletions
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index a222edfb9a9b..1c6a9d729df4 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c | |||
@@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, | |||
193 | struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; | 193 | struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; |
194 | 194 | ||
195 | /* Don't bother with unmapped entries */ | 195 | /* Don't bother with unmapped entries */ |
196 | if (!(ref->flags & E500_TLB_VALID)) | 196 | if (!(ref->flags & E500_TLB_VALID)) { |
197 | return; | 197 | WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), |
198 | "%s: flags %x\n", __func__, ref->flags); | ||
199 | WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); | ||
200 | } | ||
198 | 201 | ||
199 | if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { | 202 | if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { |
200 | u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; | 203 | u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; |
@@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, | |||
248 | pfn_t pfn) | 251 | pfn_t pfn) |
249 | { | 252 | { |
250 | ref->pfn = pfn; | 253 | ref->pfn = pfn; |
251 | ref->flags = E500_TLB_VALID; | 254 | ref->flags |= E500_TLB_VALID; |
252 | 255 | ||
253 | if (tlbe_is_writable(gtlbe)) | 256 | if (tlbe_is_writable(gtlbe)) |
254 | kvm_set_pfn_dirty(pfn); | 257 | kvm_set_pfn_dirty(pfn); |
@@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, | |||
257 | static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) | 260 | static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) |
258 | { | 261 | { |
259 | if (ref->flags & E500_TLB_VALID) { | 262 | if (ref->flags & E500_TLB_VALID) { |
263 | /* FIXME: don't log bogus pfn for TLB1 */ | ||
260 | trace_kvm_booke206_ref_release(ref->pfn, ref->flags); | 264 | trace_kvm_booke206_ref_release(ref->pfn, ref->flags); |
261 | ref->flags = 0; | 265 | ref->flags = 0; |
262 | } | 266 | } |
@@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
274 | 278 | ||
275 | static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) | 279 | static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) |
276 | { | 280 | { |
277 | int tlbsel = 0; | 281 | int tlbsel; |
278 | int i; | ||
279 | |||
280 | for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { | ||
281 | struct tlbe_ref *ref = | ||
282 | &vcpu_e500->gtlb_priv[tlbsel][i].ref; | ||
283 | kvmppc_e500_ref_release(ref); | ||
284 | } | ||
285 | } | ||
286 | |||
287 | static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
288 | { | ||
289 | int stlbsel = 1; | ||
290 | int i; | 282 | int i; |
291 | 283 | ||
292 | kvmppc_e500_tlbil_all(vcpu_e500); | 284 | for (tlbsel = 0; tlbsel <= 1; tlbsel++) { |
293 | 285 | for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { | |
294 | for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { | 286 | struct tlbe_ref *ref = |
295 | struct tlbe_ref *ref = | 287 | &vcpu_e500->gtlb_priv[tlbsel][i].ref; |
296 | &vcpu_e500->tlb_refs[stlbsel][i]; | 288 | kvmppc_e500_ref_release(ref); |
297 | kvmppc_e500_ref_release(ref); | 289 | } |
298 | } | 290 | } |
299 | |||
300 | clear_tlb_privs(vcpu_e500); | ||
301 | } | 291 | } |
302 | 292 | ||
303 | void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) | 293 | void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) |
304 | { | 294 | { |
305 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 295 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
306 | clear_tlb_refs(vcpu_e500); | 296 | kvmppc_e500_tlbil_all(vcpu_e500); |
297 | clear_tlb_privs(vcpu_e500); | ||
307 | clear_tlb1_bitmap(vcpu_e500); | 298 | clear_tlb1_bitmap(vcpu_e500); |
308 | } | 299 | } |
309 | 300 | ||
@@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
458 | gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); | 449 | gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); |
459 | } | 450 | } |
460 | 451 | ||
461 | /* Drop old ref and setup new one. */ | ||
462 | kvmppc_e500_ref_release(ref); | ||
463 | kvmppc_e500_ref_setup(ref, gtlbe, pfn); | 452 | kvmppc_e500_ref_setup(ref, gtlbe, pfn); |
464 | 453 | ||
465 | kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, | 454 | kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, |
@@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
507 | if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) | 496 | if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) |
508 | vcpu_e500->host_tlb1_nv = 0; | 497 | vcpu_e500->host_tlb1_nv = 0; |
509 | 498 | ||
510 | vcpu_e500->tlb_refs[1][sesel] = *ref; | ||
511 | vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; | ||
512 | vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; | ||
513 | if (vcpu_e500->h2g_tlb1_rmap[sesel]) { | 499 | if (vcpu_e500->h2g_tlb1_rmap[sesel]) { |
514 | unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; | 500 | unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; |
515 | vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); | 501 | vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); |
516 | } | 502 | } |
517 | vcpu_e500->h2g_tlb1_rmap[sesel] = esel; | 503 | |
504 | vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; | ||
505 | vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; | ||
506 | vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; | ||
507 | WARN_ON(!(ref->flags & E500_TLB_VALID)); | ||
518 | 508 | ||
519 | return sesel; | 509 | return sesel; |
520 | } | 510 | } |
@@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
526 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, | 516 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, |
527 | struct kvm_book3e_206_tlb_entry *stlbe, int esel) | 517 | struct kvm_book3e_206_tlb_entry *stlbe, int esel) |
528 | { | 518 | { |
529 | struct tlbe_ref ref; | 519 | struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; |
530 | int sesel; | 520 | int sesel; |
531 | int r; | 521 | int r; |
532 | 522 | ||
533 | ref.flags = 0; | ||
534 | r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, | 523 | r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, |
535 | &ref); | 524 | ref); |
536 | if (r) | 525 | if (r) |
537 | return r; | 526 | return r; |
538 | 527 | ||
@@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
544 | } | 533 | } |
545 | 534 | ||
546 | /* Otherwise map into TLB1 */ | 535 | /* Otherwise map into TLB1 */ |
547 | sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel); | 536 | sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); |
548 | write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); | 537 | write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); |
549 | 538 | ||
550 | return 0; | 539 | return 0; |
@@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |||
565 | case 0: | 554 | case 0: |
566 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; | 555 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; |
567 | 556 | ||
568 | /* Triggers after clear_tlb_refs or on initial mapping */ | 557 | /* Triggers after clear_tlb_privs or on initial mapping */ |
569 | if (!(priv->ref.flags & E500_TLB_VALID)) { | 558 | if (!(priv->ref.flags & E500_TLB_VALID)) { |
570 | kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); | 559 | kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); |
571 | } else { | 560 | } else { |
@@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
665 | host_tlb_params[0].entries / host_tlb_params[0].ways; | 654 | host_tlb_params[0].entries / host_tlb_params[0].ways; |
666 | host_tlb_params[1].sets = 1; | 655 | host_tlb_params[1].sets = 1; |
667 | 656 | ||
668 | vcpu_e500->tlb_refs[0] = | ||
669 | kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, | ||
670 | GFP_KERNEL); | ||
671 | if (!vcpu_e500->tlb_refs[0]) | ||
672 | goto err; | ||
673 | |||
674 | vcpu_e500->tlb_refs[1] = | ||
675 | kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, | ||
676 | GFP_KERNEL); | ||
677 | if (!vcpu_e500->tlb_refs[1]) | ||
678 | goto err; | ||
679 | |||
680 | vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * | 657 | vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * |
681 | host_tlb_params[1].entries, | 658 | host_tlb_params[1].entries, |
682 | GFP_KERNEL); | 659 | GFP_KERNEL); |
683 | if (!vcpu_e500->h2g_tlb1_rmap) | 660 | if (!vcpu_e500->h2g_tlb1_rmap) |
684 | goto err; | 661 | return -EINVAL; |
685 | 662 | ||
686 | return 0; | 663 | return 0; |
687 | |||
688 | err: | ||
689 | kfree(vcpu_e500->tlb_refs[0]); | ||
690 | kfree(vcpu_e500->tlb_refs[1]); | ||
691 | return -EINVAL; | ||
692 | } | 664 | } |
693 | 665 | ||
694 | void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) | 666 | void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) |
695 | { | 667 | { |
696 | kfree(vcpu_e500->h2g_tlb1_rmap); | 668 | kfree(vcpu_e500->h2g_tlb1_rmap); |
697 | kfree(vcpu_e500->tlb_refs[0]); | ||
698 | kfree(vcpu_e500->tlb_refs[1]); | ||
699 | } | 669 | } |