diff options
| -rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 10 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 29 |
2 files changed, 29 insertions, 10 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 7c62967d672c..59247af5fd45 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
| @@ -646,6 +646,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 646 | hnow_v = hpte_new_to_old_v(hnow_v, hnow_r); | 646 | hnow_v = hpte_new_to_old_v(hnow_v, hnow_r); |
| 647 | hnow_r = hpte_new_to_old_r(hnow_r); | 647 | hnow_r = hpte_new_to_old_r(hnow_r); |
| 648 | } | 648 | } |
| 649 | |||
| 650 | /* | ||
| 651 | * If the HPT is being resized, don't update the HPTE, | ||
| 652 | * instead let the guest retry after the resize operation is complete. | ||
| 653 | * The synchronization for hpte_setup_done test vs. set is provided | ||
| 654 | * by the HPTE lock. | ||
| 655 | */ | ||
| 656 | if (!kvm->arch.hpte_setup_done) | ||
| 657 | goto out_unlock; | ||
| 658 | |||
| 649 | if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] || | 659 | if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] || |
| 650 | rev->guest_rpte != hpte[2]) | 660 | rev->guest_rpte != hpte[2]) |
| 651 | /* HPTE has been changed under us; let the guest retry */ | 661 | /* HPTE has been changed under us; let the guest retry */ |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 73bf1ebfa78f..8d43cf205d34 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
| @@ -2705,11 +2705,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) | |||
| 2705 | * Hard-disable interrupts, and check resched flag and signals. | 2705 | * Hard-disable interrupts, and check resched flag and signals. |
| 2706 | * If we need to reschedule or deliver a signal, clean up | 2706 | * If we need to reschedule or deliver a signal, clean up |
| 2707 | * and return without going into the guest(s). | 2707 | * and return without going into the guest(s). |
| 2708 | * If the hpte_setup_done flag has been cleared, don't go into the | ||
| 2709 | * guest because that means a HPT resize operation is in progress. | ||
| 2708 | */ | 2710 | */ |
| 2709 | local_irq_disable(); | 2711 | local_irq_disable(); |
| 2710 | hard_irq_disable(); | 2712 | hard_irq_disable(); |
| 2711 | if (lazy_irq_pending() || need_resched() || | 2713 | if (lazy_irq_pending() || need_resched() || |
| 2712 | recheck_signals(&core_info)) { | 2714 | recheck_signals(&core_info) || |
| 2715 | (!kvm_is_radix(vc->kvm) && !vc->kvm->arch.hpte_setup_done)) { | ||
| 2713 | local_irq_enable(); | 2716 | local_irq_enable(); |
| 2714 | vc->vcore_state = VCORE_INACTIVE; | 2717 | vc->vcore_state = VCORE_INACTIVE; |
| 2715 | /* Unlock all except the primary vcore */ | 2718 | /* Unlock all except the primary vcore */ |
| @@ -3078,7 +3081,7 @@ out: | |||
| 3078 | 3081 | ||
| 3079 | static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 3082 | static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
| 3080 | { | 3083 | { |
| 3081 | int n_ceded, i; | 3084 | int n_ceded, i, r; |
| 3082 | struct kvmppc_vcore *vc; | 3085 | struct kvmppc_vcore *vc; |
| 3083 | struct kvm_vcpu *v; | 3086 | struct kvm_vcpu *v; |
| 3084 | 3087 | ||
| @@ -3132,6 +3135,20 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
| 3132 | 3135 | ||
| 3133 | while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && | 3136 | while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && |
| 3134 | !signal_pending(current)) { | 3137 | !signal_pending(current)) { |
| 3138 | /* See if the HPT and VRMA are ready to go */ | ||
| 3139 | if (!kvm_is_radix(vcpu->kvm) && | ||
| 3140 | !vcpu->kvm->arch.hpte_setup_done) { | ||
| 3141 | spin_unlock(&vc->lock); | ||
| 3142 | r = kvmppc_hv_setup_htab_rma(vcpu); | ||
| 3143 | spin_lock(&vc->lock); | ||
| 3144 | if (r) { | ||
| 3145 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | ||
| 3146 | kvm_run->fail_entry.hardware_entry_failure_reason = 0; | ||
| 3147 | vcpu->arch.ret = r; | ||
| 3148 | break; | ||
| 3149 | } | ||
| 3150 | } | ||
| 3151 | |||
| 3135 | if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) | 3152 | if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) |
| 3136 | kvmppc_vcore_end_preempt(vc); | 3153 | kvmppc_vcore_end_preempt(vc); |
| 3137 | 3154 | ||
| @@ -3249,13 +3266,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
| 3249 | /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */ | 3266 | /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */ |
| 3250 | smp_mb(); | 3267 | smp_mb(); |
| 3251 | 3268 | ||
| 3252 | /* On the first time here, set up HTAB and VRMA */ | ||
| 3253 | if (!kvm_is_radix(vcpu->kvm) && !vcpu->kvm->arch.hpte_setup_done) { | ||
| 3254 | r = kvmppc_hv_setup_htab_rma(vcpu); | ||
| 3255 | if (r) | ||
| 3256 | goto out; | ||
| 3257 | } | ||
| 3258 | |||
| 3259 | flush_all_to_thread(current); | 3269 | flush_all_to_thread(current); |
| 3260 | 3270 | ||
| 3261 | /* Save userspace EBB and other register values */ | 3271 | /* Save userspace EBB and other register values */ |
| @@ -3303,7 +3313,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
| 3303 | } | 3313 | } |
| 3304 | mtspr(SPRN_VRSAVE, user_vrsave); | 3314 | mtspr(SPRN_VRSAVE, user_vrsave); |
| 3305 | 3315 | ||
| 3306 | out: | ||
| 3307 | vcpu->arch.state = KVMPPC_VCPU_NOTREADY; | 3316 | vcpu->arch.state = KVMPPC_VCPU_NOTREADY; |
| 3308 | atomic_dec(&vcpu->kvm->arch.vcpus_running); | 3317 | atomic_dec(&vcpu->kvm->arch.vcpus_running); |
| 3309 | return r; | 3318 | return r; |
