diff options
author | Christian Borntraeger <borntraeger@de.ibm.com> | 2008-05-07 03:22:53 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-05-07 03:23:01 -0400 |
commit | 0eaeafa10f3b2bd027e95859a6785d4c7fcc174c (patch) | |
tree | 97676107c28393326944bd3d922e7eeaa5caf942 /arch/s390/kvm/kvm-s390.c | |
parent | 2688905e6a9b3647bf7b452cb0ff2bdb166bd8fe (diff) |
[S390] s390-kvm: leave sie context on work. Removes preemption requirement
From: Martin Schwidefsky <schwidefsky@de.ibm.com>
This patch fixes a bug with cpu bound guest on kvm-s390. Sometimes it
was impossible to deliver a signal to a spinning guest. We used
preemption as a circumvention. The preemption notifiers called
vcpu_load, which checked for pending signals and triggered a host
intercept. But even with preemption, a sigkill was not delivered
immediately.
This patch changes the low level host interrupt handler to check for the
SIE instruction, if TIF_WORK is set. In that case we change the
instruction pointer of the return PSW to rerun the vcpu_run loop. The kvm
code sees an intercept reason 0 if that happens. This patch adds accounting
for these types of intercept as well.
The advantages:
- works with and without preemption
- signals are delivered immediately
- much better host latencies without preemption
Acked-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 5 |
1 files changed, 1 insertions, 4 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 98d1e73e01f1..0ac36a649eba 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 32 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
33 | { "userspace_handled", VCPU_STAT(exit_userspace) }, | 33 | { "userspace_handled", VCPU_STAT(exit_userspace) }, |
34 | { "exit_null", VCPU_STAT(exit_null) }, | ||
34 | { "exit_validity", VCPU_STAT(exit_validity) }, | 35 | { "exit_validity", VCPU_STAT(exit_validity) }, |
35 | { "exit_stop_request", VCPU_STAT(exit_stop_request) }, | 36 | { "exit_stop_request", VCPU_STAT(exit_stop_request) }, |
36 | { "exit_external_request", VCPU_STAT(exit_external_request) }, | 37 | { "exit_external_request", VCPU_STAT(exit_external_request) }, |
@@ -221,10 +222,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
221 | vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; | 222 | vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; |
222 | restore_fp_regs(&vcpu->arch.guest_fpregs); | 223 | restore_fp_regs(&vcpu->arch.guest_fpregs); |
223 | restore_access_regs(vcpu->arch.guest_acrs); | 224 | restore_access_regs(vcpu->arch.guest_acrs); |
224 | |||
225 | if (signal_pending(current)) | ||
226 | atomic_set_mask(CPUSTAT_STOP_INT, | ||
227 | &vcpu->arch.sie_block->cpuflags); | ||
228 | } | 225 | } |
229 | 226 | ||
230 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 227 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |