diff options
| -rw-r--r-- | arch/arm/include/asm/pgtable-3level.h | 2 | ||||
| -rw-r--r-- | arch/arm/kvm/arm.c | 1 | ||||
| -rw-r--r-- | arch/powerpc/kvm/e500.h | 24 | ||||
| -rw-r--r-- | arch/powerpc/kvm/e500_mmu_host.c | 84 | ||||
| -rw-r--r-- | arch/powerpc/kvm/e500mc.c | 7 |
5 files changed, 43 insertions, 75 deletions
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 6ef8afd1b64c..86b8fe398b95 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h | |||
| @@ -111,7 +111,7 @@ | |||
| 111 | #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ | 111 | #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ |
| 112 | #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ | 112 | #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ |
| 113 | #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ | 113 | #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ |
| 114 | #define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */ | 114 | #define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ |
| 115 | 115 | ||
| 116 | /* | 116 | /* |
| 117 | * Hyp-mode PL2 PTE definitions for LPAE. | 117 | * Hyp-mode PL2 PTE definitions for LPAE. |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 5a936988eb24..c1fe498983ac 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
| @@ -201,6 +201,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
| 201 | break; | 201 | break; |
| 202 | case KVM_CAP_ARM_SET_DEVICE_ADDR: | 202 | case KVM_CAP_ARM_SET_DEVICE_ADDR: |
| 203 | r = 1; | 203 | r = 1; |
| 204 | break; | ||
| 204 | case KVM_CAP_NR_VCPUS: | 205 | case KVM_CAP_NR_VCPUS: |
| 205 | r = num_online_cpus(); | 206 | r = num_online_cpus(); |
| 206 | break; | 207 | break; |
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index 41cefd43655f..33db48a8ce24 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h | |||
| @@ -26,17 +26,20 @@ | |||
| 26 | #define E500_PID_NUM 3 | 26 | #define E500_PID_NUM 3 |
| 27 | #define E500_TLB_NUM 2 | 27 | #define E500_TLB_NUM 2 |
| 28 | 28 | ||
| 29 | #define E500_TLB_VALID 1 | 29 | /* entry is mapped somewhere in host TLB */ |
| 30 | #define E500_TLB_BITMAP 2 | 30 | #define E500_TLB_VALID (1 << 0) |
| 31 | /* TLB1 entry is mapped by host TLB1, tracked by bitmaps */ | ||
| 32 | #define E500_TLB_BITMAP (1 << 1) | ||
| 33 | /* TLB1 entry is mapped by host TLB0 */ | ||
| 31 | #define E500_TLB_TLB0 (1 << 2) | 34 | #define E500_TLB_TLB0 (1 << 2) |
| 32 | 35 | ||
| 33 | struct tlbe_ref { | 36 | struct tlbe_ref { |
| 34 | pfn_t pfn; | 37 | pfn_t pfn; /* valid only for TLB0, except briefly */ |
| 35 | unsigned int flags; /* E500_TLB_* */ | 38 | unsigned int flags; /* E500_TLB_* */ |
| 36 | }; | 39 | }; |
| 37 | 40 | ||
| 38 | struct tlbe_priv { | 41 | struct tlbe_priv { |
| 39 | struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ | 42 | struct tlbe_ref ref; |
| 40 | }; | 43 | }; |
| 41 | 44 | ||
| 42 | #ifdef CONFIG_KVM_E500V2 | 45 | #ifdef CONFIG_KVM_E500V2 |
| @@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 { | |||
| 63 | 66 | ||
| 64 | unsigned int gtlb_nv[E500_TLB_NUM]; | 67 | unsigned int gtlb_nv[E500_TLB_NUM]; |
| 65 | 68 | ||
| 66 | /* | ||
| 67 | * information associated with each host TLB entry -- | ||
| 68 | * TLB1 only for now. If/when guest TLB1 entries can be | ||
| 69 | * mapped with host TLB0, this will be used for that too. | ||
| 70 | * | ||
| 71 | * We don't want to use this for guest TLB0 because then we'd | ||
| 72 | * have the overhead of doing the translation again even if | ||
| 73 | * the entry is still in the guest TLB (e.g. we swapped out | ||
| 74 | * and back, and our host TLB entries got evicted). | ||
| 75 | */ | ||
| 76 | struct tlbe_ref *tlb_refs[E500_TLB_NUM]; | ||
| 77 | unsigned int host_tlb1_nv; | 69 | unsigned int host_tlb1_nv; |
| 78 | 70 | ||
| 79 | u32 svr; | 71 | u32 svr; |
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index a222edfb9a9b..1c6a9d729df4 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c | |||
| @@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, | |||
| 193 | struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; | 193 | struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; |
| 194 | 194 | ||
| 195 | /* Don't bother with unmapped entries */ | 195 | /* Don't bother with unmapped entries */ |
| 196 | if (!(ref->flags & E500_TLB_VALID)) | 196 | if (!(ref->flags & E500_TLB_VALID)) { |
| 197 | return; | 197 | WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), |
| 198 | "%s: flags %x\n", __func__, ref->flags); | ||
| 199 | WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); | ||
| 200 | } | ||
| 198 | 201 | ||
| 199 | if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { | 202 | if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { |
| 200 | u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; | 203 | u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; |
| @@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, | |||
| 248 | pfn_t pfn) | 251 | pfn_t pfn) |
| 249 | { | 252 | { |
| 250 | ref->pfn = pfn; | 253 | ref->pfn = pfn; |
| 251 | ref->flags = E500_TLB_VALID; | 254 | ref->flags |= E500_TLB_VALID; |
| 252 | 255 | ||
| 253 | if (tlbe_is_writable(gtlbe)) | 256 | if (tlbe_is_writable(gtlbe)) |
| 254 | kvm_set_pfn_dirty(pfn); | 257 | kvm_set_pfn_dirty(pfn); |
| @@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, | |||
| 257 | static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) | 260 | static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) |
| 258 | { | 261 | { |
| 259 | if (ref->flags & E500_TLB_VALID) { | 262 | if (ref->flags & E500_TLB_VALID) { |
| 263 | /* FIXME: don't log bogus pfn for TLB1 */ | ||
| 260 | trace_kvm_booke206_ref_release(ref->pfn, ref->flags); | 264 | trace_kvm_booke206_ref_release(ref->pfn, ref->flags); |
| 261 | ref->flags = 0; | 265 | ref->flags = 0; |
| 262 | } | 266 | } |
| @@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
| 274 | 278 | ||
| 275 | static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) | 279 | static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) |
| 276 | { | 280 | { |
| 277 | int tlbsel = 0; | 281 | int tlbsel; |
| 278 | int i; | ||
| 279 | |||
| 280 | for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { | ||
| 281 | struct tlbe_ref *ref = | ||
| 282 | &vcpu_e500->gtlb_priv[tlbsel][i].ref; | ||
| 283 | kvmppc_e500_ref_release(ref); | ||
| 284 | } | ||
| 285 | } | ||
| 286 | |||
| 287 | static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
| 288 | { | ||
| 289 | int stlbsel = 1; | ||
| 290 | int i; | 282 | int i; |
| 291 | 283 | ||
| 292 | kvmppc_e500_tlbil_all(vcpu_e500); | 284 | for (tlbsel = 0; tlbsel <= 1; tlbsel++) { |
| 293 | 285 | for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { | |
| 294 | for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { | 286 | struct tlbe_ref *ref = |
| 295 | struct tlbe_ref *ref = | 287 | &vcpu_e500->gtlb_priv[tlbsel][i].ref; |
| 296 | &vcpu_e500->tlb_refs[stlbsel][i]; | 288 | kvmppc_e500_ref_release(ref); |
| 297 | kvmppc_e500_ref_release(ref); | 289 | } |
| 298 | } | 290 | } |
| 299 | |||
| 300 | clear_tlb_privs(vcpu_e500); | ||
| 301 | } | 291 | } |
| 302 | 292 | ||
| 303 | void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) | 293 | void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) |
| 304 | { | 294 | { |
| 305 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 295 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
| 306 | clear_tlb_refs(vcpu_e500); | 296 | kvmppc_e500_tlbil_all(vcpu_e500); |
| 297 | clear_tlb_privs(vcpu_e500); | ||
| 307 | clear_tlb1_bitmap(vcpu_e500); | 298 | clear_tlb1_bitmap(vcpu_e500); |
| 308 | } | 299 | } |
| 309 | 300 | ||
| @@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
| 458 | gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); | 449 | gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); |
| 459 | } | 450 | } |
| 460 | 451 | ||
| 461 | /* Drop old ref and setup new one. */ | ||
| 462 | kvmppc_e500_ref_release(ref); | ||
| 463 | kvmppc_e500_ref_setup(ref, gtlbe, pfn); | 452 | kvmppc_e500_ref_setup(ref, gtlbe, pfn); |
| 464 | 453 | ||
| 465 | kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, | 454 | kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, |
| @@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
| 507 | if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) | 496 | if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) |
| 508 | vcpu_e500->host_tlb1_nv = 0; | 497 | vcpu_e500->host_tlb1_nv = 0; |
| 509 | 498 | ||
| 510 | vcpu_e500->tlb_refs[1][sesel] = *ref; | ||
| 511 | vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; | ||
| 512 | vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; | ||
| 513 | if (vcpu_e500->h2g_tlb1_rmap[sesel]) { | 499 | if (vcpu_e500->h2g_tlb1_rmap[sesel]) { |
| 514 | unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; | 500 | unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; |
| 515 | vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); | 501 | vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); |
| 516 | } | 502 | } |
| 517 | vcpu_e500->h2g_tlb1_rmap[sesel] = esel; | 503 | |
| 504 | vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; | ||
| 505 | vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; | ||
| 506 | vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; | ||
| 507 | WARN_ON(!(ref->flags & E500_TLB_VALID)); | ||
| 518 | 508 | ||
| 519 | return sesel; | 509 | return sesel; |
| 520 | } | 510 | } |
| @@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
| 526 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, | 516 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, |
| 527 | struct kvm_book3e_206_tlb_entry *stlbe, int esel) | 517 | struct kvm_book3e_206_tlb_entry *stlbe, int esel) |
| 528 | { | 518 | { |
| 529 | struct tlbe_ref ref; | 519 | struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; |
| 530 | int sesel; | 520 | int sesel; |
| 531 | int r; | 521 | int r; |
| 532 | 522 | ||
| 533 | ref.flags = 0; | ||
| 534 | r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, | 523 | r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, |
| 535 | &ref); | 524 | ref); |
| 536 | if (r) | 525 | if (r) |
| 537 | return r; | 526 | return r; |
| 538 | 527 | ||
| @@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
| 544 | } | 533 | } |
| 545 | 534 | ||
| 546 | /* Otherwise map into TLB1 */ | 535 | /* Otherwise map into TLB1 */ |
| 547 | sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel); | 536 | sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); |
| 548 | write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); | 537 | write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); |
| 549 | 538 | ||
| 550 | return 0; | 539 | return 0; |
| @@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |||
| 565 | case 0: | 554 | case 0: |
| 566 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; | 555 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; |
| 567 | 556 | ||
| 568 | /* Triggers after clear_tlb_refs or on initial mapping */ | 557 | /* Triggers after clear_tlb_privs or on initial mapping */ |
| 569 | if (!(priv->ref.flags & E500_TLB_VALID)) { | 558 | if (!(priv->ref.flags & E500_TLB_VALID)) { |
| 570 | kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); | 559 | kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); |
| 571 | } else { | 560 | } else { |
| @@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
| 665 | host_tlb_params[0].entries / host_tlb_params[0].ways; | 654 | host_tlb_params[0].entries / host_tlb_params[0].ways; |
| 666 | host_tlb_params[1].sets = 1; | 655 | host_tlb_params[1].sets = 1; |
| 667 | 656 | ||
| 668 | vcpu_e500->tlb_refs[0] = | ||
| 669 | kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, | ||
| 670 | GFP_KERNEL); | ||
| 671 | if (!vcpu_e500->tlb_refs[0]) | ||
| 672 | goto err; | ||
| 673 | |||
| 674 | vcpu_e500->tlb_refs[1] = | ||
| 675 | kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, | ||
| 676 | GFP_KERNEL); | ||
| 677 | if (!vcpu_e500->tlb_refs[1]) | ||
| 678 | goto err; | ||
| 679 | |||
| 680 | vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * | 657 | vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * |
| 681 | host_tlb_params[1].entries, | 658 | host_tlb_params[1].entries, |
| 682 | GFP_KERNEL); | 659 | GFP_KERNEL); |
| 683 | if (!vcpu_e500->h2g_tlb1_rmap) | 660 | if (!vcpu_e500->h2g_tlb1_rmap) |
| 684 | goto err; | 661 | return -EINVAL; |
| 685 | 662 | ||
| 686 | return 0; | 663 | return 0; |
| 687 | |||
| 688 | err: | ||
| 689 | kfree(vcpu_e500->tlb_refs[0]); | ||
| 690 | kfree(vcpu_e500->tlb_refs[1]); | ||
| 691 | return -EINVAL; | ||
| 692 | } | 664 | } |
| 693 | 665 | ||
| 694 | void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) | 666 | void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) |
| 695 | { | 667 | { |
| 696 | kfree(vcpu_e500->h2g_tlb1_rmap); | 668 | kfree(vcpu_e500->h2g_tlb1_rmap); |
| 697 | kfree(vcpu_e500->tlb_refs[0]); | ||
| 698 | kfree(vcpu_e500->tlb_refs[1]); | ||
| 699 | } | 669 | } |
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 1f89d26e65fb..2f4baa074b2e 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c | |||
| @@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | |||
| 108 | { | 108 | { |
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); | ||
| 112 | |||
| 111 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 113 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 112 | { | 114 | { |
| 113 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 115 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
| @@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 136 | mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); | 138 | mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); |
| 137 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); | 139 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); |
| 138 | 140 | ||
| 139 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR)) | 141 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || |
| 142 | __get_cpu_var(last_vcpu_on_cpu) != vcpu) { | ||
| 140 | kvmppc_e500_tlbil_all(vcpu_e500); | 143 | kvmppc_e500_tlbil_all(vcpu_e500); |
| 144 | __get_cpu_var(last_vcpu_on_cpu) = vcpu; | ||
| 145 | } | ||
| 141 | 146 | ||
| 142 | kvmppc_load_guest_fp(vcpu); | 147 | kvmppc_load_guest_fp(vcpu); |
| 143 | } | 148 | } |
