diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2015-02-06 06:48:04 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-04-08 04:46:53 -0400 |
commit | 362c698f8220e636edf1c40b1935715fa57f492f (patch) | |
tree | 19fa06cf3af95c66a8f9d222e4042c8b73d16bd1 /arch/x86/kvm | |
parent | 35fd68a38d574188835110cde2937d18fe9b46dd (diff) |
KVM: x86: extract blocking logic from __vcpu_run
Rename the old __vcpu_run to vcpu_run, and extract part of it to a new
function vcpu_block.
The next patch will add a new condition in vcpu_block, avoid extra
indentation.
Reviewed-by: David Matlack <dmatlack@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/x86.c | 62 |
1 files changed, 34 insertions, 28 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a284c927551e..6256dfa598a1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -6186,7 +6186,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | |||
6186 | } | 6186 | } |
6187 | 6187 | ||
6188 | /* | 6188 | /* |
6189 | * Returns 1 to let __vcpu_run() continue the guest execution loop without | 6189 | * Returns 1 to let vcpu_run() continue the guest execution loop without |
6190 | * exiting to the userspace. Otherwise, the value will be returned to the | 6190 | * exiting to the userspace. Otherwise, the value will be returned to the |
6191 | * userspace. | 6191 | * userspace. |
6192 | */ | 6192 | */ |
@@ -6404,42 +6404,46 @@ out: | |||
6404 | return r; | 6404 | return r; |
6405 | } | 6405 | } |
6406 | 6406 | ||
6407 | static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) | ||
6408 | { | ||
6409 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); | ||
6410 | kvm_vcpu_block(vcpu); | ||
6411 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); | ||
6412 | |||
6413 | if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) | ||
6414 | return 1; | ||
6415 | |||
6416 | kvm_apic_accept_events(vcpu); | ||
6417 | switch(vcpu->arch.mp_state) { | ||
6418 | case KVM_MP_STATE_HALTED: | ||
6419 | vcpu->arch.pv.pv_unhalted = false; | ||
6420 | vcpu->arch.mp_state = | ||
6421 | KVM_MP_STATE_RUNNABLE; | ||
6422 | case KVM_MP_STATE_RUNNABLE: | ||
6423 | vcpu->arch.apf.halted = false; | ||
6424 | break; | ||
6425 | case KVM_MP_STATE_INIT_RECEIVED: | ||
6426 | break; | ||
6427 | default: | ||
6428 | return -EINTR; | ||
6429 | break; | ||
6430 | } | ||
6431 | return 1; | ||
6432 | } | ||
6407 | 6433 | ||
6408 | static int __vcpu_run(struct kvm_vcpu *vcpu) | 6434 | static int vcpu_run(struct kvm_vcpu *vcpu) |
6409 | { | 6435 | { |
6410 | int r; | 6436 | int r; |
6411 | struct kvm *kvm = vcpu->kvm; | 6437 | struct kvm *kvm = vcpu->kvm; |
6412 | 6438 | ||
6413 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); | 6439 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); |
6414 | 6440 | ||
6415 | r = 1; | 6441 | for (;;) { |
6416 | while (r > 0) { | ||
6417 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && | 6442 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && |
6418 | !vcpu->arch.apf.halted) | 6443 | !vcpu->arch.apf.halted) |
6419 | r = vcpu_enter_guest(vcpu); | 6444 | r = vcpu_enter_guest(vcpu); |
6420 | else { | 6445 | else |
6421 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); | 6446 | r = vcpu_block(kvm, vcpu); |
6422 | kvm_vcpu_block(vcpu); | ||
6423 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); | ||
6424 | if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { | ||
6425 | kvm_apic_accept_events(vcpu); | ||
6426 | switch(vcpu->arch.mp_state) { | ||
6427 | case KVM_MP_STATE_HALTED: | ||
6428 | vcpu->arch.pv.pv_unhalted = false; | ||
6429 | vcpu->arch.mp_state = | ||
6430 | KVM_MP_STATE_RUNNABLE; | ||
6431 | case KVM_MP_STATE_RUNNABLE: | ||
6432 | vcpu->arch.apf.halted = false; | ||
6433 | break; | ||
6434 | case KVM_MP_STATE_INIT_RECEIVED: | ||
6435 | break; | ||
6436 | default: | ||
6437 | r = -EINTR; | ||
6438 | break; | ||
6439 | } | ||
6440 | } | ||
6441 | } | ||
6442 | |||
6443 | if (r <= 0) | 6447 | if (r <= 0) |
6444 | break; | 6448 | break; |
6445 | 6449 | ||
@@ -6451,6 +6455,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) | |||
6451 | r = -EINTR; | 6455 | r = -EINTR; |
6452 | vcpu->run->exit_reason = KVM_EXIT_INTR; | 6456 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
6453 | ++vcpu->stat.request_irq_exits; | 6457 | ++vcpu->stat.request_irq_exits; |
6458 | break; | ||
6454 | } | 6459 | } |
6455 | 6460 | ||
6456 | kvm_check_async_pf_completion(vcpu); | 6461 | kvm_check_async_pf_completion(vcpu); |
@@ -6459,6 +6464,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) | |||
6459 | r = -EINTR; | 6464 | r = -EINTR; |
6460 | vcpu->run->exit_reason = KVM_EXIT_INTR; | 6465 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
6461 | ++vcpu->stat.signal_exits; | 6466 | ++vcpu->stat.signal_exits; |
6467 | break; | ||
6462 | } | 6468 | } |
6463 | if (need_resched()) { | 6469 | if (need_resched()) { |
6464 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); | 6470 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); |
@@ -6590,7 +6596,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
6590 | } else | 6596 | } else |
6591 | WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); | 6597 | WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); |
6592 | 6598 | ||
6593 | r = __vcpu_run(vcpu); | 6599 | r = vcpu_run(vcpu); |
6594 | 6600 | ||
6595 | out: | 6601 | out: |
6596 | post_kvm_run_save(vcpu); | 6602 | post_kvm_run_save(vcpu); |