diff options
author | Alexander Graf <agraf@suse.de> | 2011-12-19 07:36:55 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-03-05 07:52:30 -0500 |
commit | e371f713db6523d99d8ffae8f9da564055e6de17 (patch) | |
tree | 87cf4fa16c010640611033c27a8353848bd8cd33 /arch/powerpc/kvm/book3s_pr.c | |
parent | ae21216bece0a623d09980c120b9c98790a860b9 (diff) |
KVM: PPC: Book3S: PR: Fix signal check race
As Scott put it:
> If we get a signal after the check, we want to be sure that we don't
> receive the reschedule IPI until after we're in the guest, so that it
> will cause another signal check.
we need to have interrupts disabled from the point we do signal_check()
all the way until we actually enter the guest.
This patch fixes potential signal loss races.
Reported-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_pr.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 20 |
1 files changed, 20 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 0c31507be908..2da670405727 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -51,6 +51,8 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |||
51 | #define MSR_USER32 MSR_USER | 51 | #define MSR_USER32 MSR_USER |
52 | #define MSR_USER64 MSR_USER | 52 | #define MSR_USER64 MSR_USER |
53 | #define HW_PAGE_SIZE PAGE_SIZE | 53 | #define HW_PAGE_SIZE PAGE_SIZE |
54 | #define __hard_irq_disable local_irq_disable | ||
55 | #define __hard_irq_enable local_irq_enable | ||
54 | #endif | 56 | #endif |
55 | 57 | ||
56 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 58 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
@@ -776,7 +778,16 @@ program_interrupt: | |||
776 | /* To avoid clobbering exit_reason, only check for signals if | 778 | /* To avoid clobbering exit_reason, only check for signals if |
777 | * we aren't already exiting to userspace for some other | 779 | * we aren't already exiting to userspace for some other |
778 | * reason. */ | 780 | * reason. */ |
781 | |||
782 | /* | ||
783 | * Interrupts could be timers for the guest which we have to | ||
784 | * inject again, so let's postpone them until we're in the guest | ||
785 | * and if we really did time things so badly, then we just exit | ||
786 | * again due to a host external interrupt. | ||
787 | */ | ||
788 | __hard_irq_disable(); | ||
779 | if (signal_pending(current)) { | 789 | if (signal_pending(current)) { |
790 | __hard_irq_enable(); | ||
780 | #ifdef EXIT_DEBUG | 791 | #ifdef EXIT_DEBUG |
781 | printk(KERN_EMERG "KVM: Going back to host\n"); | 792 | printk(KERN_EMERG "KVM: Going back to host\n"); |
782 | #endif | 793 | #endif |
@@ -959,8 +970,17 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
959 | 970 | ||
960 | kvmppc_core_prepare_to_enter(vcpu); | 971 | kvmppc_core_prepare_to_enter(vcpu); |
961 | 972 | ||
973 | /* | ||
974 | * Interrupts could be timers for the guest which we have to inject | ||
975 | * again, so let's postpone them until we're in the guest and if we | ||
976 | * really did time things so badly, then we just exit again due to | ||
977 | * a host external interrupt. | ||
978 | */ | ||
979 | __hard_irq_disable(); | ||
980 | |||
962 | /* No need to go into the guest when all we do is going out */ | 981 | /* No need to go into the guest when all we do is going out */ |
963 | if (signal_pending(current)) { | 982 | if (signal_pending(current)) { |
983 | __hard_irq_enable(); | ||
964 | kvm_run->exit_reason = KVM_EXIT_INTR; | 984 | kvm_run->exit_reason = KVM_EXIT_INTR; |
965 | ret = -EINTR; | 985 | ret = -EINTR; |
966 | goto out; | 986 | goto out; |