diff options
author | Christian Borntraeger <borntraeger@de.ibm.com> | 2008-05-07 03:22:53 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-05-07 03:23:01 -0400 |
commit | 0eaeafa10f3b2bd027e95859a6785d4c7fcc174c (patch) | |
tree | 97676107c28393326944bd3d922e7eeaa5caf942 | |
parent | 2688905e6a9b3647bf7b452cb0ff2bdb166bd8fe (diff) |
[S390] s390-kvm: leave sie context on work. Removes preemption requirement
From: Martin Schwidefsky <schwidefsky@de.ibm.com>
This patch fixes a bug with cpu bound guest on kvm-s390. Sometimes it
was impossible to deliver a signal to a spinning guest. We used
preemption as a circumvention. The preemption notifiers called
vcpu_load, which checked for pending signals and triggered a host
intercept. But even with preemption, a sigkill was not delivered
immediately.
This patch changes the low level host interrupt handler to check for the
SIE instruction, if TIF_WORK is set. In that case we change the
instruction pointer of the return PSW to rerun the vcpu_run loop. The kvm
code sees an intercept reason 0 if that happens. This patch adds accounting
for these types of intercept as well.
The advantages:
- works with and without preemption
- signals are delivered immediately
- much better host latencies without preemption
Acked-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r-- | arch/s390/kernel/entry64.S | 30 | ||||
-rw-r--r-- | arch/s390/kvm/Kconfig | 1 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 3 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 5 | ||||
-rw-r--r-- | include/asm-s390/kvm_host.h | 1 |
5 files changed, 34 insertions, 6 deletions
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index a57909d63149..fee10177dbfc 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -607,14 +607,37 @@ io_restore_trace_psw: | |||
607 | #endif | 607 | #endif |
608 | 608 | ||
609 | # | 609 | # |
610 | # switch to kernel stack, then check TIF bits | 610 | # There is work todo, we need to check if we return to userspace, then |
611 | # check, if we are in SIE, if yes leave it | ||
611 | # | 612 | # |
612 | io_work: | 613 | io_work: |
613 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 614 | tm SP_PSW+1(%r15),0x01 # returning to user ? |
614 | #ifndef CONFIG_PREEMPT | 615 | #ifndef CONFIG_PREEMPT |
616 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | ||
617 | jnz io_work_user # yes -> no need to check for SIE | ||
618 | la %r1, BASED(sie_opcode) # we return to kernel here | ||
619 | lg %r2, SP_PSW+8(%r15) | ||
620 | clc 0(2,%r1), 0(%r2) # is current instruction = SIE? | ||
621 | jne io_restore # no-> return to kernel | ||
622 | lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE | ||
623 | aghi %r1, 4 | ||
624 | stg %r1, SP_PSW+8(%r15) | ||
625 | j io_restore # return to kernel | ||
626 | #else | ||
615 | jno io_restore # no-> skip resched & signal | 627 | jno io_restore # no-> skip resched & signal |
628 | #endif | ||
616 | #else | 629 | #else |
617 | jnz io_work_user # yes -> do resched & signal | 630 | jnz io_work_user # yes -> do resched & signal |
631 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | ||
632 | la %r1, BASED(sie_opcode) | ||
633 | lg %r2, SP_PSW+8(%r15) | ||
634 | clc 0(2,%r1), 0(%r2) # is current instruction = SIE? | ||
635 | jne 0f # no -> leave PSW alone | ||
636 | lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE | ||
637 | aghi %r1, 4 | ||
638 | stg %r1, SP_PSW+8(%r15) | ||
639 | 0: | ||
640 | #endif | ||
618 | # check for preemptive scheduling | 641 | # check for preemptive scheduling |
619 | icm %r0,15,__TI_precount(%r9) | 642 | icm %r0,15,__TI_precount(%r9) |
620 | jnz io_restore # preemption is disabled | 643 | jnz io_restore # preemption is disabled |
@@ -652,6 +675,11 @@ io_work_loop: | |||
652 | j io_restore | 675 | j io_restore |
653 | io_work_done: | 676 | io_work_done: |
654 | 677 | ||
678 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | ||
679 | sie_opcode: | ||
680 | .long 0xb2140000 | ||
681 | #endif | ||
682 | |||
655 | # | 683 | # |
656 | # _TIF_MCCK_PENDING is set, call handler | 684 | # _TIF_MCCK_PENDING is set, call handler |
657 | # | 685 | # |
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index 1761b74d639b..e051cad1f1e0 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig | |||
@@ -22,7 +22,6 @@ config KVM | |||
22 | select PREEMPT_NOTIFIERS | 22 | select PREEMPT_NOTIFIERS |
23 | select ANON_INODES | 23 | select ANON_INODES |
24 | select S390_SWITCH_AMODE | 24 | select S390_SWITCH_AMODE |
25 | select PREEMPT | ||
26 | ---help--- | 25 | ---help--- |
27 | Support hosting paravirtualized guest machines using the SIE | 26 | Support hosting paravirtualized guest machines using the SIE |
28 | virtualization capability on the mainframe. This should work | 27 | virtualization capability on the mainframe. This should work |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 349581a26103..47a0b642174c 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -105,6 +105,9 @@ static intercept_handler_t instruction_handlers[256] = { | |||
105 | static int handle_noop(struct kvm_vcpu *vcpu) | 105 | static int handle_noop(struct kvm_vcpu *vcpu) |
106 | { | 106 | { |
107 | switch (vcpu->arch.sie_block->icptcode) { | 107 | switch (vcpu->arch.sie_block->icptcode) { |
108 | case 0x0: | ||
109 | vcpu->stat.exit_null++; | ||
110 | break; | ||
108 | case 0x10: | 111 | case 0x10: |
109 | vcpu->stat.exit_external_request++; | 112 | vcpu->stat.exit_external_request++; |
110 | break; | 113 | break; |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 98d1e73e01f1..0ac36a649eba 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 32 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
33 | { "userspace_handled", VCPU_STAT(exit_userspace) }, | 33 | { "userspace_handled", VCPU_STAT(exit_userspace) }, |
34 | { "exit_null", VCPU_STAT(exit_null) }, | ||
34 | { "exit_validity", VCPU_STAT(exit_validity) }, | 35 | { "exit_validity", VCPU_STAT(exit_validity) }, |
35 | { "exit_stop_request", VCPU_STAT(exit_stop_request) }, | 36 | { "exit_stop_request", VCPU_STAT(exit_stop_request) }, |
36 | { "exit_external_request", VCPU_STAT(exit_external_request) }, | 37 | { "exit_external_request", VCPU_STAT(exit_external_request) }, |
@@ -221,10 +222,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
221 | vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; | 222 | vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; |
222 | restore_fp_regs(&vcpu->arch.guest_fpregs); | 223 | restore_fp_regs(&vcpu->arch.guest_fpregs); |
223 | restore_access_regs(vcpu->arch.guest_acrs); | 224 | restore_access_regs(vcpu->arch.guest_acrs); |
224 | |||
225 | if (signal_pending(current)) | ||
226 | atomic_set_mask(CPUSTAT_STOP_INT, | ||
227 | &vcpu->arch.sie_block->cpuflags); | ||
228 | } | 225 | } |
229 | 226 | ||
230 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 227 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
diff --git a/include/asm-s390/kvm_host.h b/include/asm-s390/kvm_host.h index f8204a4f2e02..18cbd8a39796 100644 --- a/include/asm-s390/kvm_host.h +++ b/include/asm-s390/kvm_host.h | |||
@@ -104,6 +104,7 @@ struct sie_block { | |||
104 | 104 | ||
105 | struct kvm_vcpu_stat { | 105 | struct kvm_vcpu_stat { |
106 | u32 exit_userspace; | 106 | u32 exit_userspace; |
107 | u32 exit_null; | ||
107 | u32 exit_external_request; | 108 | u32 exit_external_request; |
108 | u32 exit_external_interrupt; | 109 | u32 exit_external_interrupt; |
109 | u32 exit_stop_request; | 110 | u32 exit_stop_request; |