diff options
author | Alexander Graf <agraf@suse.de> | 2011-12-09 09:46:21 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-03-05 07:52:27 -0500 |
commit | 7d82714d4d1293edc57439c796750310866624b2 (patch) | |
tree | dca025da598927203e640aecc1486470b26d8533 | |
parent | dfd4d47e9a71c5a35eb67a44cd311efbe1846b7e (diff) |
KVM: PPC: Book3s: PR: Disable preemption in vcpu_run
When entering the guest, we want to make sure we're not getting preempted
away, so let's disable preemption on entry, but enable it again while handling
guest exits.
Reported-by: Jörg Sommer <joerg@alea.gnuu.de>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 13 |
1 files changed, 11 insertions, 2 deletions
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 203a7b7b58b9..19af2bf2b87d 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -517,6 +517,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
517 | run->ready_for_interrupt_injection = 1; | 517 | run->ready_for_interrupt_injection = 1; |
518 | 518 | ||
519 | trace_kvm_book3s_exit(exit_nr, vcpu); | 519 | trace_kvm_book3s_exit(exit_nr, vcpu); |
520 | preempt_enable(); | ||
520 | kvm_resched(vcpu); | 521 | kvm_resched(vcpu); |
521 | switch (exit_nr) { | 522 | switch (exit_nr) { |
522 | case BOOK3S_INTERRUPT_INST_STORAGE: | 523 | case BOOK3S_INTERRUPT_INST_STORAGE: |
@@ -761,6 +762,8 @@ program_interrupt: | |||
761 | run->exit_reason = KVM_EXIT_INTR; | 762 | run->exit_reason = KVM_EXIT_INTR; |
762 | r = -EINTR; | 763 | r = -EINTR; |
763 | } else { | 764 | } else { |
765 | preempt_disable(); | ||
766 | |||
764 | /* In case an interrupt came in that was triggered | 767 | /* In case an interrupt came in that was triggered |
765 | * from userspace (like DEC), we need to check what | 768 | * from userspace (like DEC), we need to check what |
766 | * to inject now! */ | 769 | * to inject now! */ |
@@ -923,10 +926,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
923 | #endif | 926 | #endif |
924 | ulong ext_msr; | 927 | ulong ext_msr; |
925 | 928 | ||
929 | preempt_disable(); | ||
930 | |||
926 | /* Check if we can run the vcpu at all */ | 931 | /* Check if we can run the vcpu at all */ |
927 | if (!vcpu->arch.sane) { | 932 | if (!vcpu->arch.sane) { |
928 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 933 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
929 | return -EINVAL; | 934 | ret = -EINVAL; |
935 | goto out; | ||
930 | } | 936 | } |
931 | 937 | ||
932 | kvmppc_core_prepare_to_enter(vcpu); | 938 | kvmppc_core_prepare_to_enter(vcpu); |
@@ -934,7 +940,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
934 | /* No need to go into the guest when all we do is going out */ | 940 | /* No need to go into the guest when all we do is going out */ |
935 | if (signal_pending(current)) { | 941 | if (signal_pending(current)) { |
936 | kvm_run->exit_reason = KVM_EXIT_INTR; | 942 | kvm_run->exit_reason = KVM_EXIT_INTR; |
937 | return -EINTR; | 943 | ret = -EINTR; |
944 | goto out; | ||
938 | } | 945 | } |
939 | 946 | ||
940 | /* Save FPU state in stack */ | 947 | /* Save FPU state in stack */ |
@@ -1004,6 +1011,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1004 | current->thread.used_vsr = used_vsr; | 1011 | current->thread.used_vsr = used_vsr; |
1005 | #endif | 1012 | #endif |
1006 | 1013 | ||
1014 | out: | ||
1015 | preempt_enable(); | ||
1007 | return ret; | 1016 | return ret; |
1008 | } | 1017 | } |
1009 | 1018 | ||