aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2011-11-08 19:23:23 -0500
committerAvi Kivity <avi@redhat.com>2012-03-05 07:52:26 -0500
commit25051b5a5aff0bb71435421b4b80279b789fa0dc (patch)
tree2bf51135f6dee991d80e83e4e59fe64b26d94f69 /arch/powerpc
parent7e28e60ef974d0eeb43112ef264d8c130f7b7bf4 (diff)
KVM: PPC: Move prepare_to_enter call site into subarch code
This function should be called with interrupts disabled, to avoid a race where an exception is delivered after we check, but the resched kick is received before we disable interrupts (and thus doesn't actually trigger the exit code that would recheck exceptions). booke already does this properly in the lightweight exit case, but not on initial entry. For now, move the call of prepare_to_enter into subarch-specific code so that booke can do the right thing here. Ideally book3s would do the same thing, but I'm having a hard time seeing where it does any interrupt disabling of this sort (plus it has several additional call sites), so I'm deferring the book3s fix to someone more familiar with that code. book3s behavior should be unchanged by this patch. Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_pr.c2
-rw-r--r--arch/powerpc/kvm/booke.c4
-rw-r--r--arch/powerpc/kvm/powerpc.c2
4 files changed, 8 insertions, 2 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 536adee59c07..b1e3b9c1326a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -836,6 +836,8 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
836 return -EINVAL; 836 return -EINVAL;
837 } 837 }
838 838
839 kvmppc_core_prepare_to_enter(vcpu);
840
839 /* No need to go into the guest when all we'll do is come back out */ 841 /* No need to go into the guest when all we'll do is come back out */
840 if (signal_pending(current)) { 842 if (signal_pending(current)) {
841 run->exit_reason = KVM_EXIT_INTR; 843 run->exit_reason = KVM_EXIT_INTR;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index f3628581fb7c..203a7b7b58b9 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -929,6 +929,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
929 return -EINVAL; 929 return -EINVAL;
930 } 930 }
931 931
932 kvmppc_core_prepare_to_enter(vcpu);
933
932 /* No need to go into the guest when all we do is going out */ 934 /* No need to go into the guest when all we do is going out */
933 if (signal_pending(current)) { 935 if (signal_pending(current)) {
934 kvm_run->exit_reason = KVM_EXIT_INTR; 936 kvm_run->exit_reason = KVM_EXIT_INTR;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index e082e348c882..feaefc433276 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -295,6 +295,8 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
295 unsigned long old_pending = vcpu->arch.pending_exceptions; 295 unsigned long old_pending = vcpu->arch.pending_exceptions;
296 unsigned int priority; 296 unsigned int priority;
297 297
298 WARN_ON_ONCE(!irqs_disabled());
299
298 priority = __ffs(*pending); 300 priority = __ffs(*pending);
299 while (priority <= BOOKE_IRQPRIO_MAX) { 301 while (priority <= BOOKE_IRQPRIO_MAX) {
300 if (kvmppc_booke_irqprio_deliver(vcpu, priority)) 302 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
@@ -323,6 +325,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
323 325
324 local_irq_disable(); 326 local_irq_disable();
325 327
328 kvmppc_core_prepare_to_enter(vcpu);
329
326 if (signal_pending(current)) { 330 if (signal_pending(current)) {
327 kvm_run->exit_reason = KVM_EXIT_INTR; 331 kvm_run->exit_reason = KVM_EXIT_INTR;
328 ret = -EINTR; 332 ret = -EINTR;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 6186ec0d939b..7411bdd8ff6f 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -559,8 +559,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
559 vcpu->arch.hcall_needed = 0; 559 vcpu->arch.hcall_needed = 0;
560 } 560 }
561 561
562 kvmppc_core_prepare_to_enter(vcpu);
563
564 r = kvmppc_vcpu_run(run, vcpu); 562 r = kvmppc_vcpu_run(run, vcpu);
565 563
566 if (vcpu->sigset_active) 564 if (vcpu->sigset_active)