aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@ozlabs.org>2017-11-08 23:37:10 -0500
committerPaul Mackerras <paulus@ozlabs.org>2017-11-08 23:37:10 -0500
commit432953b4455d8a48af0500a77826c71871671161 (patch)
tree68993e8606b65ce68291e50175217f3d26e1cb99
parent072df8130c6b602c8ee219f7b06394680cafad2f (diff)
KVM: PPC: Book3S HV: Cosmetic post-merge cleanups
This rearranges the code in kvmppc_run_vcpu() and kvmppc_run_vcpu_hv() to be neater and clearer. Deeply indented code in kvmppc_run_vcpu() is moved out to a helper function, kvmhv_setup_mmu(). In kvmppc_vcpu_run_hv(), make use of the existing variable 'kvm' in place of 'vcpu->kvm'. No functional change. Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
-rw-r--r--arch/powerpc/kvm/book3s_hv.c41
1 files changed, 25 insertions, 16 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index ca0d4d938d6a..18b16c3957fc 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3120,6 +3120,25 @@ out:
3120 trace_kvmppc_vcore_wakeup(do_sleep, block_ns); 3120 trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
3121} 3121}
3122 3122
3123static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
3124{
3125 int r = 0;
3126 struct kvm *kvm = vcpu->kvm;
3127
3128 mutex_lock(&kvm->lock);
3129 if (!kvm->arch.mmu_ready) {
3130 if (!kvm_is_radix(kvm))
3131 r = kvmppc_hv_setup_htab_rma(vcpu);
3132 if (!r) {
3133 if (cpu_has_feature(CPU_FTR_ARCH_300))
3134 kvmppc_setup_partition_table(kvm);
3135 kvm->arch.mmu_ready = 1;
3136 }
3137 }
3138 mutex_unlock(&kvm->lock);
3139 return r;
3140}
3141
3123static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 3142static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3124{ 3143{
3125 int n_ceded, i, r; 3144 int n_ceded, i, r;
@@ -3179,22 +3198,12 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3179 /* See if the MMU is ready to go */ 3198 /* See if the MMU is ready to go */
3180 if (!vcpu->kvm->arch.mmu_ready) { 3199 if (!vcpu->kvm->arch.mmu_ready) {
3181 spin_unlock(&vc->lock); 3200 spin_unlock(&vc->lock);
3182 mutex_lock(&vcpu->kvm->lock); 3201 r = kvmhv_setup_mmu(vcpu);
3183 r = 0;
3184 if (!vcpu->kvm->arch.mmu_ready) {
3185 if (!kvm_is_radix(vcpu->kvm))
3186 r = kvmppc_hv_setup_htab_rma(vcpu);
3187 if (!r) {
3188 if (cpu_has_feature(CPU_FTR_ARCH_300))
3189 kvmppc_setup_partition_table(vcpu->kvm);
3190 vcpu->kvm->arch.mmu_ready = 1;
3191 }
3192 }
3193 mutex_unlock(&vcpu->kvm->lock);
3194 spin_lock(&vc->lock); 3202 spin_lock(&vc->lock);
3195 if (r) { 3203 if (r) {
3196 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 3204 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3197 kvm_run->fail_entry.hardware_entry_failure_reason = 0; 3205 kvm_run->fail_entry.
3206 hardware_entry_failure_reason = 0;
3198 vcpu->arch.ret = r; 3207 vcpu->arch.ret = r;
3199 break; 3208 break;
3200 } 3209 }
@@ -3344,10 +3353,10 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
3344 trace_kvm_hcall_exit(vcpu, r); 3353 trace_kvm_hcall_exit(vcpu, r);
3345 kvmppc_core_prepare_to_enter(vcpu); 3354 kvmppc_core_prepare_to_enter(vcpu);
3346 } else if (r == RESUME_PAGE_FAULT) { 3355 } else if (r == RESUME_PAGE_FAULT) {
3347 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 3356 srcu_idx = srcu_read_lock(&kvm->srcu);
3348 r = kvmppc_book3s_hv_page_fault(run, vcpu, 3357 r = kvmppc_book3s_hv_page_fault(run, vcpu,
3349 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); 3358 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
3350 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 3359 srcu_read_unlock(&kvm->srcu, srcu_idx);
3351 } else if (r == RESUME_PASSTHROUGH) { 3360 } else if (r == RESUME_PASSTHROUGH) {
3352 if (WARN_ON(xive_enabled())) 3361 if (WARN_ON(xive_enabled()))
3353 r = H_SUCCESS; 3362 r = H_SUCCESS;
@@ -3367,7 +3376,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
3367 mtspr(SPRN_VRSAVE, user_vrsave); 3376 mtspr(SPRN_VRSAVE, user_vrsave);
3368 3377
3369 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; 3378 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
3370 atomic_dec(&vcpu->kvm->arch.vcpus_running); 3379 atomic_dec(&kvm->arch.vcpus_running);
3371 return r; 3380 return r;
3372} 3381}
3373 3382