diff options
author | Greg Kurz <gkurz@linux.vnet.ibm.com> | 2014-02-06 11:36:56 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2014-03-26 08:33:44 -0400 |
commit | e59d24e61269de34d79d2f39d3d581c219ac7a94 (patch) | |
tree | 3eb55df101db5e8960b03053b21c712ffa3dde90 /arch | |
parent | e724f080f5dd03881bc6d378750c37f7374cae7e (diff) |
KVM: PPC: Book3S HV: Fix incorrect userspace exit on ioeventfd write
When the guest does an MMIO write which is handled successfully by an
ioeventfd, ioeventfd_write() returns 0 (success) and
kvmppc_handle_store() returns EMULATE_DONE. Then
kvmppc_emulate_mmio() converts EMULATE_DONE to RESUME_GUEST_NV and
this causes an exit from the loop in kvmppc_vcpu_run_hv(), causing an
exit back to userspace with a bogus exit reason code, typically
causing userspace (e.g. qemu) to crash with a message about an unknown
exit code.
This adds handling of RESUME_GUEST_NV in kvmppc_vcpu_run_hv() in order
to fix that. For generality, we define a helper to check for either
of the return-to-guest codes we use, RESUME_GUEST and RESUME_GUEST_NV,
to make it easy to check for either and provide one place to update if
any other return-to-guest code gets defined in future.
Since it only affects Book3S HV for now, the helper is added to
the kvm_book3s.h header file.
We use the helper in two places in kvmppc_run_core() as well for
future-proofing, though we don't see RESUME_GUEST_NV in either place
at present.
[paulus@samba.org - combined 4 patches into one, rewrote description]
Suggested-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 5 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 6 |
2 files changed, 8 insertions, 3 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 83851aabfdc8..bb1e38a23ac7 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -304,6 +304,11 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | |||
304 | return vcpu->arch.fault_dar; | 304 | return vcpu->arch.fault_dar; |
305 | } | 305 | } |
306 | 306 | ||
307 | static inline bool is_kvmppc_resume_guest(int r) | ||
308 | { | ||
309 | return (r == RESUME_GUEST || r == RESUME_GUEST_NV); | ||
310 | } | ||
311 | |||
307 | /* Magic register values loaded into r3 and r4 before the 'sc' assembly | 312 | /* Magic register values loaded into r3 and r4 before the 'sc' assembly |
308 | * instruction for the OSI hypercalls */ | 313 | * instruction for the OSI hypercalls */ |
309 | #define OSI_SC_MAGIC_R3 0x113724FA | 314 | #define OSI_SC_MAGIC_R3 0x113724FA |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 17fc9496b6ac..3b498d942a22 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -1530,7 +1530,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc) | |||
1530 | vcpu->arch.trap = 0; | 1530 | vcpu->arch.trap = 0; |
1531 | 1531 | ||
1532 | if (vcpu->arch.ceded) { | 1532 | if (vcpu->arch.ceded) { |
1533 | if (ret != RESUME_GUEST) | 1533 | if (!is_kvmppc_resume_guest(ret)) |
1534 | kvmppc_end_cede(vcpu); | 1534 | kvmppc_end_cede(vcpu); |
1535 | else | 1535 | else |
1536 | kvmppc_set_timer(vcpu); | 1536 | kvmppc_set_timer(vcpu); |
@@ -1541,7 +1541,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc) | |||
1541 | vc->vcore_state = VCORE_INACTIVE; | 1541 | vc->vcore_state = VCORE_INACTIVE; |
1542 | list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, | 1542 | list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, |
1543 | arch.run_list) { | 1543 | arch.run_list) { |
1544 | if (vcpu->arch.ret != RESUME_GUEST) { | 1544 | if (!is_kvmppc_resume_guest(vcpu->arch.ret)) { |
1545 | kvmppc_remove_runnable(vc, vcpu); | 1545 | kvmppc_remove_runnable(vc, vcpu); |
1546 | wake_up(&vcpu->arch.cpu_run); | 1546 | wake_up(&vcpu->arch.cpu_run); |
1547 | } | 1547 | } |
@@ -1731,7 +1731,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1731 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); | 1731 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); |
1732 | srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); | 1732 | srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); |
1733 | } | 1733 | } |
1734 | } while (r == RESUME_GUEST); | 1734 | } while (is_kvmppc_resume_guest(r)); |
1735 | 1735 | ||
1736 | out: | 1736 | out: |
1737 | vcpu->arch.state = KVMPPC_VCPU_NOTREADY; | 1737 | vcpu->arch.state = KVMPPC_VCPU_NOTREADY; |