aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2010-05-17 04:00:01 -0400
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2010-05-17 04:00:15 -0400
commit43d399d2ab7e96cb8d952d0ba4e9131587b7c8b9 (patch)
tree3b5c651e8cc1cdbde50a846ace4500aebcfe5ea2 /arch/s390
parent94038a99119c171aea27608f81c7ba359de98c4e (diff)
[S390] cleanup sysc_work and io_work code
Cleanup the #ifdef mess at io_work in entry[64].S and streamline the TIF work code of the system call and io exit path. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/kernel/entry.S75
-rw-r--r--arch/s390/kernel/entry64.S108
2 files changed, 64 insertions, 119 deletions
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 6af7045280a..ffebfb64b91 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -301,31 +301,29 @@ sysc_restore_trace_psw:
301#endif 301#endif
302 302
303# 303#
304# recheck if there is more work to do 304# There is work to do, but first we need to check if we return to userspace.
305#
306sysc_work_loop:
307 tm __TI_flags+3(%r9),_TIF_WORK_SVC
308 bz BASED(sysc_restore) # there is no work to do
309#
310# One of the work bits is on. Find out which one.
311# 305#
312sysc_work: 306sysc_work:
313 tm SP_PSW+1(%r15),0x01 # returning to user ? 307 tm SP_PSW+1(%r15),0x01 # returning to user ?
314 bno BASED(sysc_restore) 308 bno BASED(sysc_restore)
309
310#
311# One of the work bits is on. Find out which one.
312#
313sysc_work_loop:
315 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 314 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
316 bo BASED(sysc_mcck_pending) 315 bo BASED(sysc_mcck_pending)
317 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 316 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
318 bo BASED(sysc_reschedule) 317 bo BASED(sysc_reschedule)
319 tm __TI_flags+3(%r9),_TIF_SIGPENDING 318 tm __TI_flags+3(%r9),_TIF_SIGPENDING
320 bnz BASED(sysc_sigpending) 319 bo BASED(sysc_sigpending)
321 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 320 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
322 bnz BASED(sysc_notify_resume) 321 bo BASED(sysc_notify_resume)
323 tm __TI_flags+3(%r9),_TIF_RESTART_SVC 322 tm __TI_flags+3(%r9),_TIF_RESTART_SVC
324 bo BASED(sysc_restart) 323 bo BASED(sysc_restart)
325 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 324 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
326 bo BASED(sysc_singlestep) 325 bo BASED(sysc_singlestep)
327 b BASED(sysc_restore) 326 b BASED(sysc_return) # beware of critical section cleanup
328sysc_work_done:
329 327
330# 328#
331# _TIF_NEED_RESCHED is set, call schedule 329# _TIF_NEED_RESCHED is set, call schedule
@@ -386,7 +384,7 @@ sysc_singlestep:
386 mvi SP_SVCNR+1(%r15),0xff 384 mvi SP_SVCNR+1(%r15),0xff
387 la %r2,SP_PTREGS(%r15) # address of register-save area 385 la %r2,SP_PTREGS(%r15) # address of register-save area
388 l %r1,BASED(.Lhandle_per) # load adr. of per handler 386 l %r1,BASED(.Lhandle_per) # load adr. of per handler
389 la %r14,BASED(sysc_return) # load adr. of system return 387 la %r14,BASED(sysc_work_loop) # load adr. of system return
390 br %r1 # branch to do_single_step 388 br %r1 # branch to do_single_step
391 389
392# 390#
@@ -636,30 +634,36 @@ io_restore_trace_psw:
636#endif 634#endif
637 635
638# 636#
639# switch to kernel stack, then check the TIF bits 637# There is work todo, find out in which context we have been interrupted:
638# 1) if we return to user space we can do all _TIF_WORK_INT work
639# 2) if we return to kernel code and preemptive scheduling is enabled check
640# the preemption counter and if it is zero call preempt_schedule_irq
641# Before any work can be done, a switch to the kernel stack is required.
640# 642#
641io_work: 643io_work:
642 tm SP_PSW+1(%r15),0x01 # returning to user ? 644 tm SP_PSW+1(%r15),0x01 # returning to user ?
643#ifndef CONFIG_PREEMPT 645 bo BASED(io_work_user) # yes -> do resched & signal
644 bno BASED(io_restore) # no-> skip resched & signal 646#ifdef CONFIG_PREEMPT
645#else
646 bnz BASED(io_work_user) # no -> check for preemptive scheduling
647 # check for preemptive scheduling 647 # check for preemptive scheduling
648 icm %r0,15,__TI_precount(%r9) 648 icm %r0,15,__TI_precount(%r9)
649 bnz BASED(io_restore) # preemption disabled 649 bnz BASED(io_restore) # preemption disabled
650 # switch to kernel stack
650 l %r1,SP_R15(%r15) 651 l %r1,SP_R15(%r15)
651 s %r1,BASED(.Lc_spsize) 652 s %r1,BASED(.Lc_spsize)
652 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 653 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
653 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 654 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
654 lr %r15,%r1 655 lr %r15,%r1
655io_resume_loop: 656io_resume_loop:
656 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
657 bno BASED(io_restore)
658 l %r1,BASED(.Lpreempt_schedule_irq) 657 l %r1,BASED(.Lpreempt_schedule_irq)
659 la %r14,BASED(io_resume_loop) 658 la %r14,BASED(io_resume_loop)
660 br %r1 # call schedule 659 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
660 bor %r1 # call preempt_schedule_irq
661#endif 661#endif
662 b BASED(io_restore)
662 663
664#
665# Need to do work before returning to userspace, switch to kernel stack
666#
663io_work_user: 667io_work_user:
664 l %r1,__LC_KERNEL_STACK 668 l %r1,__LC_KERNEL_STACK
665 s %r1,BASED(.Lc_spsize) 669 s %r1,BASED(.Lc_spsize)
@@ -668,7 +672,7 @@ io_work_user:
668 lr %r15,%r1 672 lr %r15,%r1
669# 673#
670# One of the work bits is on. Find out which one. 674# One of the work bits is on. Find out which one.
671# Checked are: _TIF_SIGPENDING, _TIF_NEED_RESCHED 675# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
672# and _TIF_MCCK_PENDING 676# and _TIF_MCCK_PENDING
673# 677#
674io_work_loop: 678io_work_loop:
@@ -677,11 +681,10 @@ io_work_loop:
677 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 681 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
678 bo BASED(io_reschedule) 682 bo BASED(io_reschedule)
679 tm __TI_flags+3(%r9),_TIF_SIGPENDING 683 tm __TI_flags+3(%r9),_TIF_SIGPENDING
680 bnz BASED(io_sigpending) 684 bo BASED(io_sigpending)
681 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 685 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
682 bnz BASED(io_notify_resume) 686 bo BASED(io_notify_resume)
683 b BASED(io_restore) 687 b BASED(io_return) # beware of critical section cleanup
684io_work_done:
685 688
686# 689#
687# _TIF_MCCK_PENDING is set, call handler 690# _TIF_MCCK_PENDING is set, call handler
@@ -701,8 +704,6 @@ io_reschedule:
701 basr %r14,%r1 # call scheduler 704 basr %r14,%r1 # call scheduler
702 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 705 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
703 TRACE_IRQS_OFF 706 TRACE_IRQS_OFF
704 tm __TI_flags+3(%r9),_TIF_WORK_INT
705 bz BASED(io_restore) # there is no work to do
706 b BASED(io_work_loop) 707 b BASED(io_work_loop)
707 708
708# 709#
@@ -921,14 +922,10 @@ cleanup_table_sysc_return:
921 .long sysc_return + 0x80000000, sysc_leave + 0x80000000 922 .long sysc_return + 0x80000000, sysc_leave + 0x80000000
922cleanup_table_sysc_leave: 923cleanup_table_sysc_leave:
923 .long sysc_leave + 0x80000000, sysc_done + 0x80000000 924 .long sysc_leave + 0x80000000, sysc_done + 0x80000000
924cleanup_table_sysc_work_loop:
925 .long sysc_work_loop + 0x80000000, sysc_work_done + 0x80000000
926cleanup_table_io_return: 925cleanup_table_io_return:
927 .long io_return + 0x80000000, io_leave + 0x80000000 926 .long io_return + 0x80000000, io_leave + 0x80000000
928cleanup_table_io_leave: 927cleanup_table_io_leave:
929 .long io_leave + 0x80000000, io_done + 0x80000000 928 .long io_leave + 0x80000000, io_done + 0x80000000
930cleanup_table_io_work_loop:
931 .long io_work_loop + 0x80000000, io_work_done + 0x80000000
932 929
933cleanup_critical: 930cleanup_critical:
934 clc 4(4,%r12),BASED(cleanup_table_system_call) 931 clc 4(4,%r12),BASED(cleanup_table_system_call)
@@ -946,11 +943,6 @@ cleanup_critical:
946 clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4) 943 clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4)
947 bl BASED(cleanup_sysc_leave) 944 bl BASED(cleanup_sysc_leave)
9480: 9450:
949 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop)
950 bl BASED(0f)
951 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
952 bl BASED(cleanup_sysc_return)
9530:
954 clc 4(4,%r12),BASED(cleanup_table_io_return) 946 clc 4(4,%r12),BASED(cleanup_table_io_return)
955 bl BASED(0f) 947 bl BASED(0f)
956 clc 4(4,%r12),BASED(cleanup_table_io_return+4) 948 clc 4(4,%r12),BASED(cleanup_table_io_return+4)
@@ -961,11 +953,6 @@ cleanup_critical:
961 clc 4(4,%r12),BASED(cleanup_table_io_leave+4) 953 clc 4(4,%r12),BASED(cleanup_table_io_leave+4)
962 bl BASED(cleanup_io_leave) 954 bl BASED(cleanup_io_leave)
9630: 9550:
964 clc 4(4,%r12),BASED(cleanup_table_io_work_loop)
965 bl BASED(0f)
966 clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4)
967 bl BASED(cleanup_io_work_loop)
9680:
969 br %r14 956 br %r14
970 957
971cleanup_system_call: 958cleanup_system_call:
@@ -1043,12 +1030,6 @@ cleanup_io_return:
1043 la %r12,__LC_RETURN_PSW 1030 la %r12,__LC_RETURN_PSW
1044 br %r14 1031 br %r14
1045 1032
1046cleanup_io_work_loop:
1047 mvc __LC_RETURN_PSW(4),0(%r12)
1048 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop)
1049 la %r12,__LC_RETURN_PSW
1050 br %r14
1051
1052cleanup_io_leave: 1033cleanup_io_leave:
1053 clc 4(4,%r12),BASED(cleanup_io_leave_insn) 1034 clc 4(4,%r12),BASED(cleanup_io_leave_insn)
1054 be BASED(2f) 1035 be BASED(2f)
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 52106d53271..ca02b10a2c3 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -291,38 +291,36 @@ sysc_restore_trace_psw:
291#endif 291#endif
292 292
293# 293#
294# recheck if there is more work to do 294# There is work to do, but first we need to check if we return to userspace.
295#
296sysc_work_loop:
297 tm __TI_flags+7(%r9),_TIF_WORK_SVC
298 jz sysc_restore # there is no work to do
299#
300# One of the work bits is on. Find out which one.
301# 295#
302sysc_work: 296sysc_work:
303 tm SP_PSW+1(%r15),0x01 # returning to user ? 297 tm SP_PSW+1(%r15),0x01 # returning to user ?
304 jno sysc_restore 298 jno sysc_restore
299
300#
301# One of the work bits is on. Find out which one.
302#
303sysc_work_loop:
305 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 304 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING
306 jo sysc_mcck_pending 305 jo sysc_mcck_pending
307 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 306 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
308 jo sysc_reschedule 307 jo sysc_reschedule
309 tm __TI_flags+7(%r9),_TIF_SIGPENDING 308 tm __TI_flags+7(%r9),_TIF_SIGPENDING
310 jnz sysc_sigpending 309 jo sysc_sigpending
311 tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME 310 tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME
312 jnz sysc_notify_resume 311 jo sysc_notify_resume
313 tm __TI_flags+7(%r9),_TIF_RESTART_SVC 312 tm __TI_flags+7(%r9),_TIF_RESTART_SVC
314 jo sysc_restart 313 jo sysc_restart
315 tm __TI_flags+7(%r9),_TIF_SINGLE_STEP 314 tm __TI_flags+7(%r9),_TIF_SINGLE_STEP
316 jo sysc_singlestep 315 jo sysc_singlestep
317 j sysc_restore 316 j sysc_return # beware of critical section cleanup
318sysc_work_done:
319 317
320# 318#
321# _TIF_NEED_RESCHED is set, call schedule 319# _TIF_NEED_RESCHED is set, call schedule
322# 320#
323sysc_reschedule: 321sysc_reschedule:
324 larl %r14,sysc_work_loop 322 larl %r14,sysc_work_loop
325 jg schedule # return point is sysc_return 323 jg schedule # return point is sysc_work_loop
326 324
327# 325#
328# _TIF_MCCK_PENDING is set, call handler 326# _TIF_MCCK_PENDING is set, call handler
@@ -369,7 +367,7 @@ sysc_singlestep:
369 ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 367 ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
370 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 368 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
371 la %r2,SP_PTREGS(%r15) # address of register-save area 369 la %r2,SP_PTREGS(%r15) # address of register-save area
372 larl %r14,sysc_return # load adr. of system return 370 larl %r14,sysc_work_loop # load adr. of system return
373 jg do_single_step # branch to do_sigtrap 371 jg do_single_step # branch to do_sigtrap
374 372
375# 373#
@@ -605,37 +603,27 @@ io_restore_trace_psw:
605#endif 603#endif
606 604
607# 605#
608# There is work todo, we need to check if we return to userspace, then 606# There is work todo, find out in which context we have been interrupted:
609# check, if we are in SIE, if yes leave it 607# 1) if we return to user space we can do all _TIF_WORK_INT work
608# 2) if we return to kernel code and kvm is enabled check if we need to
609# modify the psw to leave SIE
610# 3) if we return to kernel code and preemptive scheduling is enabled check
611# the preemption counter and if it is zero call preempt_schedule_irq
612# Before any work can be done, a switch to the kernel stack is required.
610# 613#
611io_work: 614io_work:
612 tm SP_PSW+1(%r15),0x01 # returning to user ? 615 tm SP_PSW+1(%r15),0x01 # returning to user ?
613#ifndef CONFIG_PREEMPT 616 jo io_work_user # yes -> do resched & signal
614#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
615 jnz io_work_user # yes -> no need to check for SIE
616 la %r1, BASED(sie_opcode) # we return to kernel here
617 lg %r2, SP_PSW+8(%r15)
618 clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
619 jne io_restore # no-> return to kernel
620 lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
621 aghi %r1, 4
622 stg %r1, SP_PSW+8(%r15)
623 j io_restore # return to kernel
624#else
625 jno io_restore # no-> skip resched & signal
626#endif
627#else
628 jnz io_work_user # yes -> do resched & signal
629#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 617#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
630 la %r1, BASED(sie_opcode) 618 lg %r2,SP_PSW+8(%r15) # check if current instruction is SIE
631 lg %r2, SP_PSW+8(%r15) 619 lh %r1,0(%r2)
632 clc 0(2,%r1), 0(%r2) # is current instruction = SIE? 620 chi %r1,-19948 # signed 16 bit compare with 0xb214
633 jne 0f # no -> leave PSW alone 621 jne 0f # no -> leave PSW alone
634 lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE 622 aghi %r2,4 # yes-> add 4 bytes to leave SIE
635 aghi %r1, 4 623 stg %r2,SP_PSW+8(%r15)
636 stg %r1, SP_PSW+8(%r15)
6370: 6240:
638#endif 625#endif
626#ifdef CONFIG_PREEMPT
639 # check for preemptive scheduling 627 # check for preemptive scheduling
640 icm %r0,15,__TI_precount(%r9) 628 icm %r0,15,__TI_precount(%r9)
641 jnz io_restore # preemption is disabled 629 jnz io_restore # preemption is disabled
@@ -646,21 +634,25 @@ io_work:
646 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain 634 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
647 lgr %r15,%r1 635 lgr %r15,%r1
648io_resume_loop: 636io_resume_loop:
649 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
650 jno io_restore
651 larl %r14,io_resume_loop 637 larl %r14,io_resume_loop
652 jg preempt_schedule_irq 638 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
639 jgo preempt_schedule_irq
653#endif 640#endif
641 j io_restore
654 642
643#
644# Need to do work before returning to userspace, switch to kernel stack
645#
655io_work_user: 646io_work_user:
656 lg %r1,__LC_KERNEL_STACK 647 lg %r1,__LC_KERNEL_STACK
657 aghi %r1,-SP_SIZE 648 aghi %r1,-SP_SIZE
658 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 649 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
659 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain 650 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
660 lgr %r15,%r1 651 lgr %r15,%r1
652
661# 653#
662# One of the work bits is on. Find out which one. 654# One of the work bits is on. Find out which one.
663# Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGPENDING, _TIF_NEED_RESCHED 655# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
664# and _TIF_MCCK_PENDING 656# and _TIF_MCCK_PENDING
665# 657#
666io_work_loop: 658io_work_loop:
@@ -669,16 +661,10 @@ io_work_loop:
669 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 661 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
670 jo io_reschedule 662 jo io_reschedule
671 tm __TI_flags+7(%r9),_TIF_SIGPENDING 663 tm __TI_flags+7(%r9),_TIF_SIGPENDING
672 jnz io_sigpending 664 jo io_sigpending
673 tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME 665 tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME
674 jnz io_notify_resume 666 jo io_notify_resume
675 j io_restore 667 j io_return # beware of critical section cleanup
676io_work_done:
677
678#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
679sie_opcode:
680 .long 0xb2140000
681#endif
682 668
683# 669#
684# _TIF_MCCK_PENDING is set, call handler 670# _TIF_MCCK_PENDING is set, call handler
@@ -696,8 +682,6 @@ io_reschedule:
696 brasl %r14,schedule # call scheduler 682 brasl %r14,schedule # call scheduler
697 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 683 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
698 TRACE_IRQS_OFF 684 TRACE_IRQS_OFF
699 tm __TI_flags+7(%r9),_TIF_WORK_INT
700 jz io_restore # there is no work to do
701 j io_work_loop 685 j io_work_loop
702 686
703# 687#
@@ -903,14 +887,10 @@ cleanup_table_sysc_return:
903 .quad sysc_return, sysc_leave 887 .quad sysc_return, sysc_leave
904cleanup_table_sysc_leave: 888cleanup_table_sysc_leave:
905 .quad sysc_leave, sysc_done 889 .quad sysc_leave, sysc_done
906cleanup_table_sysc_work_loop:
907 .quad sysc_work_loop, sysc_work_done
908cleanup_table_io_return: 890cleanup_table_io_return:
909 .quad io_return, io_leave 891 .quad io_return, io_leave
910cleanup_table_io_leave: 892cleanup_table_io_leave:
911 .quad io_leave, io_done 893 .quad io_leave, io_done
912cleanup_table_io_work_loop:
913 .quad io_work_loop, io_work_done
914 894
915cleanup_critical: 895cleanup_critical:
916 clc 8(8,%r12),BASED(cleanup_table_system_call) 896 clc 8(8,%r12),BASED(cleanup_table_system_call)
@@ -928,11 +908,6 @@ cleanup_critical:
928 clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8) 908 clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8)
929 jl cleanup_sysc_leave 909 jl cleanup_sysc_leave
9300: 9100:
931 clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop)
932 jl 0f
933 clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
934 jl cleanup_sysc_return
9350:
936 clc 8(8,%r12),BASED(cleanup_table_io_return) 911 clc 8(8,%r12),BASED(cleanup_table_io_return)
937 jl 0f 912 jl 0f
938 clc 8(8,%r12),BASED(cleanup_table_io_return+8) 913 clc 8(8,%r12),BASED(cleanup_table_io_return+8)
@@ -943,11 +918,6 @@ cleanup_critical:
943 clc 8(8,%r12),BASED(cleanup_table_io_leave+8) 918 clc 8(8,%r12),BASED(cleanup_table_io_leave+8)
944 jl cleanup_io_leave 919 jl cleanup_io_leave
9450: 9200:
946 clc 8(8,%r12),BASED(cleanup_table_io_work_loop)
947 jl 0f
948 clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8)
949 jl cleanup_io_work_loop
9500:
951 br %r14 921 br %r14
952 922
953cleanup_system_call: 923cleanup_system_call:
@@ -1025,12 +995,6 @@ cleanup_io_return:
1025 la %r12,__LC_RETURN_PSW 995 la %r12,__LC_RETURN_PSW
1026 br %r14 996 br %r14
1027 997
1028cleanup_io_work_loop:
1029 mvc __LC_RETURN_PSW(8),0(%r12)
1030 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop)
1031 la %r12,__LC_RETURN_PSW
1032 br %r14
1033
1034cleanup_io_leave: 998cleanup_io_leave:
1035 clc 8(8,%r12),BASED(cleanup_io_leave_insn) 999 clc 8(8,%r12),BASED(cleanup_io_leave_insn)
1036 je 3f 1000 je 3f