aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/entry64.S
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2008-05-07 03:22:52 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-05-07 03:23:00 -0400
commit2688905e6a9b3647bf7b452cb0ff2bdb166bd8fe (patch)
tree389b9c29eaaa7eb49a9e7d3bb601959fb9d27af8 /arch/s390/kernel/entry64.S
parentc0a18111e571138747a98af18b3a2124df56a0d1 (diff)
[S390] s390: Optimize user and work TIF check
On return from syscall or interrupt, we have to check if we return to userspace (likely) and if there is work todo (less likely) to decide if we handle the work. We can optimize this check: we first check for the less likely work case and then check for userspace. This patch is also a preparation for an additional patch, that fixes a bug in KVM dealing with cpu bound guests. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/entry64.S')
-rw-r--r--arch/s390/kernel/entry64.S29
1 files changed, 14 insertions, 15 deletions
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 5a4a7bcd2bba..a57909d63149 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -271,8 +271,6 @@ sysc_noemu:
271 stg %r2,SP_R2(%r15) # store return value (change R2 on stack) 271 stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
272 272
273sysc_return: 273sysc_return:
274 tm SP_PSW+1(%r15),0x01 # returning to user ?
275 jno sysc_restore
276 tm __TI_flags+7(%r9),_TIF_WORK_SVC 274 tm __TI_flags+7(%r9),_TIF_WORK_SVC
277 jnz sysc_work # there is work to do (signals etc.) 275 jnz sysc_work # there is work to do (signals etc.)
278sysc_restore: 276sysc_restore:
@@ -304,6 +302,8 @@ sysc_work_loop:
304# One of the work bits is on. Find out which one. 302# One of the work bits is on. Find out which one.
305# 303#
306sysc_work: 304sysc_work:
305 tm SP_PSW+1(%r15),0x01 # returning to user ?
306 jno sysc_restore
307 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 307 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING
308 jo sysc_mcck_pending 308 jo sysc_mcck_pending
309 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 309 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
@@ -585,12 +585,6 @@ io_no_vtime:
585 la %r2,SP_PTREGS(%r15) # address of register-save area 585 la %r2,SP_PTREGS(%r15) # address of register-save area
586 brasl %r14,do_IRQ # call standard irq handler 586 brasl %r14,do_IRQ # call standard irq handler
587io_return: 587io_return:
588 tm SP_PSW+1(%r15),0x01 # returning to user ?
589#ifdef CONFIG_PREEMPT
590 jno io_preempt # no -> check for preemptive scheduling
591#else
592 jno io_restore # no-> skip resched & signal
593#endif
594 tm __TI_flags+7(%r9),_TIF_WORK_INT 588 tm __TI_flags+7(%r9),_TIF_WORK_INT
595 jnz io_work # there is work to do (signals etc.) 589 jnz io_work # there is work to do (signals etc.)
596io_restore: 590io_restore:
@@ -612,10 +606,18 @@ io_restore_trace_psw:
612 .quad 0, io_restore_trace 606 .quad 0, io_restore_trace
613#endif 607#endif
614 608
615#ifdef CONFIG_PREEMPT 609#
616io_preempt: 610# switch to kernel stack, then check TIF bits
611#
612io_work:
613 tm SP_PSW+1(%r15),0x01 # returning to user ?
614#ifndef CONFIG_PREEMPT
615 jno io_restore # no-> skip resched & signal
616#else
617 jnz io_work_user # yes -> do resched & signal
618 # check for preemptive scheduling
617 icm %r0,15,__TI_precount(%r9) 619 icm %r0,15,__TI_precount(%r9)
618 jnz io_restore 620 jnz io_restore # preemption is disabled
619 # switch to kernel stack 621 # switch to kernel stack
620 lg %r1,SP_R15(%r15) 622 lg %r1,SP_R15(%r15)
621 aghi %r1,-SP_SIZE 623 aghi %r1,-SP_SIZE
@@ -629,10 +631,7 @@ io_resume_loop:
629 jg preempt_schedule_irq 631 jg preempt_schedule_irq
630#endif 632#endif
631 633
632# 634io_work_user:
633# switch to kernel stack, then check TIF bits
634#
635io_work:
636 lg %r1,__LC_KERNEL_STACK 635 lg %r1,__LC_KERNEL_STACK
637 aghi %r1,-SP_SIZE 636 aghi %r1,-SP_SIZE
638 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 637 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)