aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c10
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c23
-rw-r--r--arch/powerpc/kvm/book3s_hv.c64
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S13
-rw-r--r--arch/powerpc/kvm/powerpc.c2
5 files changed, 75 insertions, 37 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 6aec8a22aeff..235319c2574e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -651,6 +651,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
651 hnow_v = hpte_new_to_old_v(hnow_v, hnow_r); 651 hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
652 hnow_r = hpte_new_to_old_r(hnow_r); 652 hnow_r = hpte_new_to_old_r(hnow_r);
653 } 653 }
654
655 /*
656 * If the HPT is being resized, don't update the HPTE,
657 * instead let the guest retry after the resize operation is complete.
658 * The synchronization for mmu_ready test vs. set is provided
659 * by the HPTE lock.
660 */
661 if (!kvm->arch.mmu_ready)
662 goto out_unlock;
663
654 if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] || 664 if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
655 rev->guest_rpte != hpte[2]) 665 rev->guest_rpte != hpte[2])
656 /* HPTE has been changed under us; let the guest retry */ 666 /* HPTE has been changed under us; let the guest retry */
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 8f2da8bba737..4dffa611376d 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -478,28 +478,30 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
478 return ret; 478 return ret;
479 479
480 dir = iommu_tce_direction(tce); 480 dir = iommu_tce_direction(tce);
481
482 idx = srcu_read_lock(&vcpu->kvm->srcu);
483
481 if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, 484 if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
482 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) 485 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
483 return H_PARAMETER; 486 ret = H_PARAMETER;
487 goto unlock_exit;
488 }
484 489
485 entry = ioba >> stt->page_shift; 490 entry = ioba >> stt->page_shift;
486 491
487 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { 492 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
488 if (dir == DMA_NONE) { 493 if (dir == DMA_NONE)
489 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, 494 ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
490 stit->tbl, entry); 495 stit->tbl, entry);
491 } else { 496 else
492 idx = srcu_read_lock(&vcpu->kvm->srcu);
493 ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl, 497 ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
494 entry, ua, dir); 498 entry, ua, dir);
495 srcu_read_unlock(&vcpu->kvm->srcu, idx);
496 }
497 499
498 if (ret == H_SUCCESS) 500 if (ret == H_SUCCESS)
499 continue; 501 continue;
500 502
501 if (ret == H_TOO_HARD) 503 if (ret == H_TOO_HARD)
502 return ret; 504 goto unlock_exit;
503 505
504 WARN_ON_ONCE(1); 506 WARN_ON_ONCE(1);
505 kvmppc_clear_tce(stit->tbl, entry); 507 kvmppc_clear_tce(stit->tbl, entry);
@@ -507,7 +509,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
507 509
508 kvmppc_tce_put(stt, entry, tce); 510 kvmppc_tce_put(stt, entry, tce);
509 511
510 return H_SUCCESS; 512unlock_exit:
513 srcu_read_unlock(&vcpu->kvm->srcu, idx);
514
515 return ret;
511} 516}
512EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); 517EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
513 518
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index fff62fdf1464..18b16c3957fc 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2717,11 +2717,13 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
2717 * Hard-disable interrupts, and check resched flag and signals. 2717 * Hard-disable interrupts, and check resched flag and signals.
2718 * If we need to reschedule or deliver a signal, clean up 2718 * If we need to reschedule or deliver a signal, clean up
2719 * and return without going into the guest(s). 2719 * and return without going into the guest(s).
2720 * If the mmu_ready flag has been cleared, don't go into the
2721 * guest because that means a HPT resize operation is in progress.
2720 */ 2722 */
2721 local_irq_disable(); 2723 local_irq_disable();
2722 hard_irq_disable(); 2724 hard_irq_disable();
2723 if (lazy_irq_pending() || need_resched() || 2725 if (lazy_irq_pending() || need_resched() ||
2724 recheck_signals(&core_info)) { 2726 recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) {
2725 local_irq_enable(); 2727 local_irq_enable();
2726 vc->vcore_state = VCORE_INACTIVE; 2728 vc->vcore_state = VCORE_INACTIVE;
2727 /* Unlock all except the primary vcore */ 2729 /* Unlock all except the primary vcore */
@@ -3118,9 +3120,28 @@ out:
3118 trace_kvmppc_vcore_wakeup(do_sleep, block_ns); 3120 trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
3119} 3121}
3120 3122
3123static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
3124{
3125 int r = 0;
3126 struct kvm *kvm = vcpu->kvm;
3127
3128 mutex_lock(&kvm->lock);
3129 if (!kvm->arch.mmu_ready) {
3130 if (!kvm_is_radix(kvm))
3131 r = kvmppc_hv_setup_htab_rma(vcpu);
3132 if (!r) {
3133 if (cpu_has_feature(CPU_FTR_ARCH_300))
3134 kvmppc_setup_partition_table(kvm);
3135 kvm->arch.mmu_ready = 1;
3136 }
3137 }
3138 mutex_unlock(&kvm->lock);
3139 return r;
3140}
3141
3121static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 3142static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3122{ 3143{
3123 int n_ceded, i; 3144 int n_ceded, i, r;
3124 struct kvmppc_vcore *vc; 3145 struct kvmppc_vcore *vc;
3125 struct kvm_vcpu *v; 3146 struct kvm_vcpu *v;
3126 3147
@@ -3174,6 +3195,20 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3174 3195
3175 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && 3196 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
3176 !signal_pending(current)) { 3197 !signal_pending(current)) {
3198 /* See if the MMU is ready to go */
3199 if (!vcpu->kvm->arch.mmu_ready) {
3200 spin_unlock(&vc->lock);
3201 r = kvmhv_setup_mmu(vcpu);
3202 spin_lock(&vc->lock);
3203 if (r) {
3204 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3205 kvm_run->fail_entry.
3206 hardware_entry_failure_reason = 0;
3207 vcpu->arch.ret = r;
3208 break;
3209 }
3210 }
3211
3177 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) 3212 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
3178 kvmppc_vcore_end_preempt(vc); 3213 kvmppc_vcore_end_preempt(vc);
3179 3214
@@ -3293,24 +3328,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
3293 /* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */ 3328 /* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */
3294 smp_mb(); 3329 smp_mb();
3295 3330
3296 /* On the first time here, set up MMU if necessary */
3297 if (!vcpu->kvm->arch.mmu_ready) {
3298 mutex_lock(&kvm->lock);
3299 r = 0;
3300 if (!kvm->arch.mmu_ready) {
3301 if (!kvm_is_radix(vcpu->kvm))
3302 r = kvmppc_hv_setup_htab_rma(vcpu);
3303 if (!r) {
3304 if (cpu_has_feature(CPU_FTR_ARCH_300))
3305 kvmppc_setup_partition_table(kvm);
3306 kvm->arch.mmu_ready = 1;
3307 }
3308 }
3309 mutex_unlock(&kvm->lock);
3310 if (r)
3311 goto out;
3312 }
3313
3314 flush_all_to_thread(current); 3331 flush_all_to_thread(current);
3315 3332
3316 /* Save userspace EBB and other register values */ 3333 /* Save userspace EBB and other register values */
@@ -3336,10 +3353,10 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
3336 trace_kvm_hcall_exit(vcpu, r); 3353 trace_kvm_hcall_exit(vcpu, r);
3337 kvmppc_core_prepare_to_enter(vcpu); 3354 kvmppc_core_prepare_to_enter(vcpu);
3338 } else if (r == RESUME_PAGE_FAULT) { 3355 } else if (r == RESUME_PAGE_FAULT) {
3339 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 3356 srcu_idx = srcu_read_lock(&kvm->srcu);
3340 r = kvmppc_book3s_hv_page_fault(run, vcpu, 3357 r = kvmppc_book3s_hv_page_fault(run, vcpu,
3341 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); 3358 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
3342 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 3359 srcu_read_unlock(&kvm->srcu, srcu_idx);
3343 } else if (r == RESUME_PASSTHROUGH) { 3360 } else if (r == RESUME_PASSTHROUGH) {
3344 if (WARN_ON(xive_enabled())) 3361 if (WARN_ON(xive_enabled()))
3345 r = H_SUCCESS; 3362 r = H_SUCCESS;
@@ -3358,9 +3375,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
3358 } 3375 }
3359 mtspr(SPRN_VRSAVE, user_vrsave); 3376 mtspr(SPRN_VRSAVE, user_vrsave);
3360 3377
3361 out:
3362 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; 3378 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
3363 atomic_dec(&vcpu->kvm->arch.vcpus_running); 3379 atomic_dec(&kvm->arch.vcpus_running);
3364 return r; 3380 return r;
3365} 3381}
3366 3382
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 7add18930e6d..2659844784b8 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1025,13 +1025,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1025 beq no_xive 1025 beq no_xive
1026 ld r11, VCPU_XIVE_SAVED_STATE(r4) 1026 ld r11, VCPU_XIVE_SAVED_STATE(r4)
1027 li r9, TM_QW1_OS 1027 li r9, TM_QW1_OS
1028 stdcix r11,r9,r10
1029 eieio 1028 eieio
1029 stdcix r11,r9,r10
1030 lwz r11, VCPU_XIVE_CAM_WORD(r4) 1030 lwz r11, VCPU_XIVE_CAM_WORD(r4)
1031 li r9, TM_QW1_OS + TM_WORD2 1031 li r9, TM_QW1_OS + TM_WORD2
1032 stwcix r11,r9,r10 1032 stwcix r11,r9,r10
1033 li r9, 1 1033 li r9, 1
1034 stw r9, VCPU_XIVE_PUSHED(r4) 1034 stw r9, VCPU_XIVE_PUSHED(r4)
1035 eieio
1035no_xive: 1036no_xive:
1036#endif /* CONFIG_KVM_XICS */ 1037#endif /* CONFIG_KVM_XICS */
1037 1038
@@ -1346,6 +1347,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1346 bne 3f 1347 bne 3f
1347BEGIN_FTR_SECTION 1348BEGIN_FTR_SECTION
1348 PPC_MSGSYNC 1349 PPC_MSGSYNC
1350 lwsync
1349END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1351END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1350 lbz r0, HSTATE_HOST_IPI(r13) 1352 lbz r0, HSTATE_HOST_IPI(r13)
1351 cmpwi r0, 0 1353 cmpwi r0, 0
@@ -1436,8 +1438,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1436 cmpldi cr0, r10, 0 1438 cmpldi cr0, r10, 0
1437 beq 1f 1439 beq 1f
1438 /* First load to pull the context, we ignore the value */ 1440 /* First load to pull the context, we ignore the value */
1439 lwzx r11, r7, r10
1440 eieio 1441 eieio
1442 lwzx r11, r7, r10
1441 /* Second load to recover the context state (Words 0 and 1) */ 1443 /* Second load to recover the context state (Words 0 and 1) */
1442 ldx r11, r6, r10 1444 ldx r11, r6, r10
1443 b 3f 1445 b 3f
@@ -1445,8 +1447,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1445 cmpldi cr0, r10, 0 1447 cmpldi cr0, r10, 0
1446 beq 1f 1448 beq 1f
1447 /* First load to pull the context, we ignore the value */ 1449 /* First load to pull the context, we ignore the value */
1448 lwzcix r11, r7, r10
1449 eieio 1450 eieio
1451 lwzcix r11, r7, r10
1450 /* Second load to recover the context state (Words 0 and 1) */ 1452 /* Second load to recover the context state (Words 0 and 1) */
1451 ldcix r11, r6, r10 1453 ldcix r11, r6, r10
14523: std r11, VCPU_XIVE_SAVED_STATE(r9) 14543: std r11, VCPU_XIVE_SAVED_STATE(r9)
@@ -1456,6 +1458,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1456 stw r10, VCPU_XIVE_PUSHED(r9) 1458 stw r10, VCPU_XIVE_PUSHED(r9)
1457 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) 1459 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1458 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) 1460 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1461 eieio
14591: 14621:
1460#endif /* CONFIG_KVM_XICS */ 1463#endif /* CONFIG_KVM_XICS */
1461 /* Save more register state */ 1464 /* Save more register state */
@@ -2838,6 +2841,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2838 PPC_MSGCLR(6) 2841 PPC_MSGCLR(6)
2839 /* see if it's a host IPI */ 2842 /* see if it's a host IPI */
2840 li r3, 1 2843 li r3, 1
2844BEGIN_FTR_SECTION
2845 PPC_MSGSYNC
2846 lwsync
2847END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2841 lbz r0, HSTATE_HOST_IPI(r13) 2848 lbz r0, HSTATE_HOST_IPI(r13)
2842 cmpwi r0, 0 2849 cmpwi r0, 0
2843 bnelr 2850 bnelr
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index a0b7f094de78..6b6c53c42ac9 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -643,7 +643,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
643 break; 643 break;
644#endif 644#endif
645 case KVM_CAP_PPC_HTM: 645 case KVM_CAP_PPC_HTM:
646 r = is_kvmppc_hv_enabled(kvm) && 646 r = hv_enabled &&
647 (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP); 647 (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP);
648 break; 648 break;
649 default: 649 default: