aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/entry64.S
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2008-05-07 03:22:53 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-05-07 03:23:01 -0400
commit0eaeafa10f3b2bd027e95859a6785d4c7fcc174c (patch)
tree97676107c28393326944bd3d922e7eeaa5caf942 /arch/s390/kernel/entry64.S
parent2688905e6a9b3647bf7b452cb0ff2bdb166bd8fe (diff)
[S390] s390-kvm: leave sie context on work. Removes preemption requirement
From: Martin Schwidefsky <schwidefsky@de.ibm.com> This patch fixes a bug with cpu bound guest on kvm-s390. Sometimes it was impossible to deliver a signal to a spinning guest. We used preemption as a circumvention. The preemption notifiers called vcpu_load, which checked for pending signals and triggered a host intercept. But even with preemption, a sigkill was not delivered immediately. This patch changes the low level host interrupt handler to check for the SIE instruction, if TIF_WORK is set. In that case we change the instruction pointer of the return PSW to rerun the vcpu_run loop. The kvm code sees an intercept reason 0 if that happens. This patch adds accounting for these types of intercept as well. The advantages: - works with and without preemption - signals are delivered immediately - much better host latencies without preemption Acked-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/entry64.S')
-rw-r--r--arch/s390/kernel/entry64.S30
1 files changed, 29 insertions, 1 deletions
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index a57909d63149..fee10177dbfc 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -607,14 +607,37 @@ io_restore_trace_psw:
607#endif 607#endif
608 608
609# 609#
610# switch to kernel stack, then check TIF bits 610# There is work todo, we need to check if we return to userspace, then
611# check, if we are in SIE, if yes leave it
611# 612#
612io_work: 613io_work:
613 tm SP_PSW+1(%r15),0x01 # returning to user ? 614 tm SP_PSW+1(%r15),0x01 # returning to user ?
614#ifndef CONFIG_PREEMPT 615#ifndef CONFIG_PREEMPT
616#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
617 jnz io_work_user # yes -> no need to check for SIE
618 la %r1, BASED(sie_opcode) # we return to kernel here
619 lg %r2, SP_PSW+8(%r15)
620 clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
621 jne io_restore # no-> return to kernel
622 lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
623 aghi %r1, 4
624 stg %r1, SP_PSW+8(%r15)
625 j io_restore # return to kernel
626#else
615 jno io_restore # no-> skip resched & signal 627 jno io_restore # no-> skip resched & signal
628#endif
616#else 629#else
617 jnz io_work_user # yes -> do resched & signal 630 jnz io_work_user # yes -> do resched & signal
631#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
632 la %r1, BASED(sie_opcode)
633 lg %r2, SP_PSW+8(%r15)
634 clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
635 jne 0f # no -> leave PSW alone
636 lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
637 aghi %r1, 4
638 stg %r1, SP_PSW+8(%r15)
6390:
640#endif
618 # check for preemptive scheduling 641 # check for preemptive scheduling
619 icm %r0,15,__TI_precount(%r9) 642 icm %r0,15,__TI_precount(%r9)
620 jnz io_restore # preemption is disabled 643 jnz io_restore # preemption is disabled
@@ -652,6 +675,11 @@ io_work_loop:
652 j io_restore 675 j io_restore
653io_work_done: 676io_work_done:
654 677
678#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
679sie_opcode:
680 .long 0xb2140000
681#endif
682
655# 683#
656# _TIF_MCCK_PENDING is set, call handler 684# _TIF_MCCK_PENDING is set, call handler
657# 685#