aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2008-05-07 03:22:53 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-05-07 03:23:01 -0400
commit0eaeafa10f3b2bd027e95859a6785d4c7fcc174c (patch)
tree97676107c28393326944bd3d922e7eeaa5caf942 /arch/s390/kvm
parent2688905e6a9b3647bf7b452cb0ff2bdb166bd8fe (diff)
[S390] s390-kvm: leave sie context on work. Removes preemption requirement
From: Martin Schwidefsky <schwidefsky@de.ibm.com> This patch fixes a bug with cpu bound guest on kvm-s390. Sometimes it was impossible to deliver a signal to a spinning guest. We used preemption as a circumvention. The preemption notifiers called vcpu_load, which checked for pending signals and triggered a host intercept. But even with preemption, a sigkill was not delivered immediately. This patch changes the low level host interrupt handler to check for the SIE instruction, if TIF_WORK is set. In that case we change the instruction pointer of the return PSW to rerun the vcpu_run loop. The kvm code sees an intercept reason 0 if that happens. This patch adds accounting for these types of intercept as well. The advantages: - works with and without preemption - signals are delivered immediately - much better host latencies without preemption Acked-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/kvm/intercept.c3
-rw-r--r--arch/s390/kvm/kvm-s390.c5
3 files changed, 4 insertions, 5 deletions
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 1761b74d639b..e051cad1f1e0 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -22,7 +22,6 @@ config KVM
22 select PREEMPT_NOTIFIERS 22 select PREEMPT_NOTIFIERS
23 select ANON_INODES 23 select ANON_INODES
24 select S390_SWITCH_AMODE 24 select S390_SWITCH_AMODE
25 select PREEMPT
26 ---help--- 25 ---help---
27 Support hosting paravirtualized guest machines using the SIE 26 Support hosting paravirtualized guest machines using the SIE
28 virtualization capability on the mainframe. This should work 27 virtualization capability on the mainframe. This should work
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 349581a26103..47a0b642174c 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -105,6 +105,9 @@ static intercept_handler_t instruction_handlers[256] = {
105static int handle_noop(struct kvm_vcpu *vcpu) 105static int handle_noop(struct kvm_vcpu *vcpu)
106{ 106{
107 switch (vcpu->arch.sie_block->icptcode) { 107 switch (vcpu->arch.sie_block->icptcode) {
108 case 0x0:
109 vcpu->stat.exit_null++;
110 break;
108 case 0x10: 111 case 0x10:
109 vcpu->stat.exit_external_request++; 112 vcpu->stat.exit_external_request++;
110 break; 113 break;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 98d1e73e01f1..0ac36a649eba 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -31,6 +31,7 @@
31 31
32struct kvm_stats_debugfs_item debugfs_entries[] = { 32struct kvm_stats_debugfs_item debugfs_entries[] = {
33 { "userspace_handled", VCPU_STAT(exit_userspace) }, 33 { "userspace_handled", VCPU_STAT(exit_userspace) },
34 { "exit_null", VCPU_STAT(exit_null) },
34 { "exit_validity", VCPU_STAT(exit_validity) }, 35 { "exit_validity", VCPU_STAT(exit_validity) },
35 { "exit_stop_request", VCPU_STAT(exit_stop_request) }, 36 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
36 { "exit_external_request", VCPU_STAT(exit_external_request) }, 37 { "exit_external_request", VCPU_STAT(exit_external_request) },
@@ -221,10 +222,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
221 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; 222 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
222 restore_fp_regs(&vcpu->arch.guest_fpregs); 223 restore_fp_regs(&vcpu->arch.guest_fpregs);
223 restore_access_regs(vcpu->arch.guest_acrs); 224 restore_access_regs(vcpu->arch.guest_acrs);
224
225 if (signal_pending(current))
226 atomic_set_mask(CPUSTAT_STOP_INT,
227 &vcpu->arch.sie_block->cpuflags);
228} 225}
229 226
230void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 227void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)