aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/booke.c
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2012-02-16 09:07:37 -0500
committerAvi Kivity <avi@redhat.com>2012-04-08 05:55:05 -0400
commita8e4ef841429d338b8700998afb3dfc18c1f25d9 (patch)
treec5c28f33902b7c7e9de837732e5a97d46c22cc57 /arch/powerpc/kvm/booke.c
parentd1ff54992d3008f4253ab3176913bb85d770e935 (diff)
KVM: PPC: booke: rework rescheduling checks
Instead of checking whether we should reschedule only when we exited due to an interrupt, let's always check before entering the guest back again. This gets the target more in line with the other archs. Also while at it, generalize the whole thing so that eventually we could have a single kvmppc_prepare_to_enter function for all ppc targets that does signal and reschedule checking for us. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/booke.c')
-rw-r--r--arch/powerpc/kvm/booke.c72
1 files changed, 50 insertions, 22 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 9979be1d7ff2..3da0e4273389 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -439,8 +439,9 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
439} 439}
440 440
441/* Check pending exceptions and deliver one, if possible. */ 441/* Check pending exceptions and deliver one, if possible. */
442void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) 442int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
443{ 443{
444 int r = 0;
444 WARN_ON_ONCE(!irqs_disabled()); 445 WARN_ON_ONCE(!irqs_disabled());
445 446
446 kvmppc_core_check_exceptions(vcpu); 447 kvmppc_core_check_exceptions(vcpu);
@@ -451,8 +452,46 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
451 local_irq_disable(); 452 local_irq_disable();
452 453
453 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); 454 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
454 kvmppc_core_check_exceptions(vcpu); 455 r = 1;
455 }; 456 };
457
458 return r;
459}
460
461/*
462 * Common checks before entering the guest world. Call with interrupts
463 * disabled.
464 *
465 * returns !0 if a signal is pending and check_signal is true
466 */
467static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu, bool check_signal)
468{
469 int r = 0;
470
471 WARN_ON_ONCE(!irqs_disabled());
472 while (true) {
473 if (need_resched()) {
474 local_irq_enable();
475 cond_resched();
476 local_irq_disable();
477 continue;
478 }
479
480 if (check_signal && signal_pending(current)) {
481 r = 1;
482 break;
483 }
484
485 if (kvmppc_core_prepare_to_enter(vcpu)) {
486 /* interrupts got enabled in between, so we
487 are back at square 1 */
488 continue;
489 }
490
491 break;
492 }
493
494 return r;
456} 495}
457 496
458int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 497int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
@@ -470,10 +509,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
470 } 509 }
471 510
472 local_irq_disable(); 511 local_irq_disable();
473 512 if (kvmppc_prepare_to_enter(vcpu, true)) {
474 kvmppc_core_prepare_to_enter(vcpu);
475
476 if (signal_pending(current)) {
477 kvm_run->exit_reason = KVM_EXIT_INTR; 513 kvm_run->exit_reason = KVM_EXIT_INTR;
478 ret = -EINTR; 514 ret = -EINTR;
479 goto out; 515 goto out;
@@ -598,25 +634,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
598 634
599 switch (exit_nr) { 635 switch (exit_nr) {
600 case BOOKE_INTERRUPT_MACHINE_CHECK: 636 case BOOKE_INTERRUPT_MACHINE_CHECK:
601 kvm_resched(vcpu);
602 r = RESUME_GUEST; 637 r = RESUME_GUEST;
603 break; 638 break;
604 639
605 case BOOKE_INTERRUPT_EXTERNAL: 640 case BOOKE_INTERRUPT_EXTERNAL:
606 kvmppc_account_exit(vcpu, EXT_INTR_EXITS); 641 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
607 kvm_resched(vcpu);
608 r = RESUME_GUEST; 642 r = RESUME_GUEST;
609 break; 643 break;
610 644
611 case BOOKE_INTERRUPT_DECREMENTER: 645 case BOOKE_INTERRUPT_DECREMENTER:
612 kvmppc_account_exit(vcpu, DEC_EXITS); 646 kvmppc_account_exit(vcpu, DEC_EXITS);
613 kvm_resched(vcpu);
614 r = RESUME_GUEST; 647 r = RESUME_GUEST;
615 break; 648 break;
616 649
617 case BOOKE_INTERRUPT_DOORBELL: 650 case BOOKE_INTERRUPT_DOORBELL:
618 kvmppc_account_exit(vcpu, DBELL_EXITS); 651 kvmppc_account_exit(vcpu, DBELL_EXITS);
619 kvm_resched(vcpu);
620 r = RESUME_GUEST; 652 r = RESUME_GUEST;
621 break; 653 break;
622 654
@@ -865,19 +897,15 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
865 BUG(); 897 BUG();
866 } 898 }
867 899
900 /*
901 * To avoid clobbering exit_reason, only check for signals if we
902 * aren't already exiting to userspace for some other reason.
903 */
868 local_irq_disable(); 904 local_irq_disable();
869 905 if (kvmppc_prepare_to_enter(vcpu, !(r & RESUME_HOST))) {
870 kvmppc_core_prepare_to_enter(vcpu); 906 run->exit_reason = KVM_EXIT_INTR;
871 907 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
872 if (!(r & RESUME_HOST)) { 908 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
873 /* To avoid clobbering exit_reason, only check for signals if
874 * we aren't already exiting to userspace for some other
875 * reason. */
876 if (signal_pending(current)) {
877 run->exit_reason = KVM_EXIT_INTR;
878 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
879 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
880 }
881 } 909 }
882 910
883 return r; 911 return r;