diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2010-05-17 04:00:01 -0400 |
---|---|---|
committer | Martin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com> | 2010-05-17 04:00:15 -0400 |
commit | 43d399d2ab7e96cb8d952d0ba4e9131587b7c8b9 (patch) | |
tree | 3b5c651e8cc1cdbde50a846ace4500aebcfe5ea2 /arch/s390/kernel/entry64.S | |
parent | 94038a99119c171aea27608f81c7ba359de98c4e (diff) |
[S390] cleanup sysc_work and io_work code
Cleanup the #ifdef mess at io_work in entry[64].S and streamline the
TIF work code of the system call and io exit path.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/entry64.S')
-rw-r--r-- | arch/s390/kernel/entry64.S | 108 |
1 files changed, 36 insertions, 72 deletions
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 52106d53271c..ca02b10a2c32 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -291,38 +291,36 @@ sysc_restore_trace_psw: | |||
291 | #endif | 291 | #endif |
292 | 292 | ||
293 | # | 293 | # |
294 | # recheck if there is more work to do | 294 | # There is work to do, but first we need to check if we return to userspace. |
295 | # | ||
296 | sysc_work_loop: | ||
297 | tm __TI_flags+7(%r9),_TIF_WORK_SVC | ||
298 | jz sysc_restore # there is no work to do | ||
299 | # | ||
300 | # One of the work bits is on. Find out which one. | ||
301 | # | 295 | # |
302 | sysc_work: | 296 | sysc_work: |
303 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 297 | tm SP_PSW+1(%r15),0x01 # returning to user ? |
304 | jno sysc_restore | 298 | jno sysc_restore |
299 | |||
300 | # | ||
301 | # One of the work bits is on. Find out which one. | ||
302 | # | ||
303 | sysc_work_loop: | ||
305 | tm __TI_flags+7(%r9),_TIF_MCCK_PENDING | 304 | tm __TI_flags+7(%r9),_TIF_MCCK_PENDING |
306 | jo sysc_mcck_pending | 305 | jo sysc_mcck_pending |
307 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED | 306 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED |
308 | jo sysc_reschedule | 307 | jo sysc_reschedule |
309 | tm __TI_flags+7(%r9),_TIF_SIGPENDING | 308 | tm __TI_flags+7(%r9),_TIF_SIGPENDING |
310 | jnz sysc_sigpending | 309 | jo sysc_sigpending |
311 | tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME | 310 | tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME |
312 | jnz sysc_notify_resume | 311 | jo sysc_notify_resume |
313 | tm __TI_flags+7(%r9),_TIF_RESTART_SVC | 312 | tm __TI_flags+7(%r9),_TIF_RESTART_SVC |
314 | jo sysc_restart | 313 | jo sysc_restart |
315 | tm __TI_flags+7(%r9),_TIF_SINGLE_STEP | 314 | tm __TI_flags+7(%r9),_TIF_SINGLE_STEP |
316 | jo sysc_singlestep | 315 | jo sysc_singlestep |
317 | j sysc_restore | 316 | j sysc_return # beware of critical section cleanup |
318 | sysc_work_done: | ||
319 | 317 | ||
320 | # | 318 | # |
321 | # _TIF_NEED_RESCHED is set, call schedule | 319 | # _TIF_NEED_RESCHED is set, call schedule |
322 | # | 320 | # |
323 | sysc_reschedule: | 321 | sysc_reschedule: |
324 | larl %r14,sysc_work_loop | 322 | larl %r14,sysc_work_loop |
325 | jg schedule # return point is sysc_return | 323 | jg schedule # return point is sysc_work_loop |
326 | 324 | ||
327 | # | 325 | # |
328 | # _TIF_MCCK_PENDING is set, call handler | 326 | # _TIF_MCCK_PENDING is set, call handler |
@@ -369,7 +367,7 @@ sysc_singlestep: | |||
369 | ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP | 367 | ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP |
370 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number | 368 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number |
371 | la %r2,SP_PTREGS(%r15) # address of register-save area | 369 | la %r2,SP_PTREGS(%r15) # address of register-save area |
372 | larl %r14,sysc_return # load adr. of system return | 370 | larl %r14,sysc_work_loop # load adr. of system return |
373 | jg do_single_step # branch to do_sigtrap | 371 | jg do_single_step # branch to do_sigtrap |
374 | 372 | ||
375 | # | 373 | # |
@@ -605,37 +603,27 @@ io_restore_trace_psw: | |||
605 | #endif | 603 | #endif |
606 | 604 | ||
607 | # | 605 | # |
608 | # There is work todo, we need to check if we return to userspace, then | 606 | # There is work todo, find out in which context we have been interrupted: |
609 | # check, if we are in SIE, if yes leave it | 607 | # 1) if we return to user space we can do all _TIF_WORK_INT work |
608 | # 2) if we return to kernel code and kvm is enabled check if we need to | ||
609 | # modify the psw to leave SIE | ||
610 | # 3) if we return to kernel code and preemptive scheduling is enabled check | ||
611 | # the preemption counter and if it is zero call preempt_schedule_irq | ||
612 | # Before any work can be done, a switch to the kernel stack is required. | ||
610 | # | 613 | # |
611 | io_work: | 614 | io_work: |
612 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 615 | tm SP_PSW+1(%r15),0x01 # returning to user ? |
613 | #ifndef CONFIG_PREEMPT | 616 | jo io_work_user # yes -> do resched & signal |
614 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | ||
615 | jnz io_work_user # yes -> no need to check for SIE | ||
616 | la %r1, BASED(sie_opcode) # we return to kernel here | ||
617 | lg %r2, SP_PSW+8(%r15) | ||
618 | clc 0(2,%r1), 0(%r2) # is current instruction = SIE? | ||
619 | jne io_restore # no-> return to kernel | ||
620 | lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE | ||
621 | aghi %r1, 4 | ||
622 | stg %r1, SP_PSW+8(%r15) | ||
623 | j io_restore # return to kernel | ||
624 | #else | ||
625 | jno io_restore # no-> skip resched & signal | ||
626 | #endif | ||
627 | #else | ||
628 | jnz io_work_user # yes -> do resched & signal | ||
629 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | 617 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) |
630 | la %r1, BASED(sie_opcode) | 618 | lg %r2,SP_PSW+8(%r15) # check if current instruction is SIE |
631 | lg %r2, SP_PSW+8(%r15) | 619 | lh %r1,0(%r2) |
632 | clc 0(2,%r1), 0(%r2) # is current instruction = SIE? | 620 | chi %r1,-19948 # signed 16 bit compare with 0xb214 |
633 | jne 0f # no -> leave PSW alone | 621 | jne 0f # no -> leave PSW alone |
634 | lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE | 622 | aghi %r2,4 # yes-> add 4 bytes to leave SIE |
635 | aghi %r1, 4 | 623 | stg %r2,SP_PSW+8(%r15) |
636 | stg %r1, SP_PSW+8(%r15) | ||
637 | 0: | 624 | 0: |
638 | #endif | 625 | #endif |
626 | #ifdef CONFIG_PREEMPT | ||
639 | # check for preemptive scheduling | 627 | # check for preemptive scheduling |
640 | icm %r0,15,__TI_precount(%r9) | 628 | icm %r0,15,__TI_precount(%r9) |
641 | jnz io_restore # preemption is disabled | 629 | jnz io_restore # preemption is disabled |
@@ -646,21 +634,25 @@ io_work: | |||
646 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 634 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain |
647 | lgr %r15,%r1 | 635 | lgr %r15,%r1 |
648 | io_resume_loop: | 636 | io_resume_loop: |
649 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED | ||
650 | jno io_restore | ||
651 | larl %r14,io_resume_loop | 637 | larl %r14,io_resume_loop |
652 | jg preempt_schedule_irq | 638 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
639 | jgo preempt_schedule_irq | ||
653 | #endif | 640 | #endif |
641 | j io_restore | ||
654 | 642 | ||
643 | # | ||
644 | # Need to do work before returning to userspace, switch to kernel stack | ||
645 | # | ||
655 | io_work_user: | 646 | io_work_user: |
656 | lg %r1,__LC_KERNEL_STACK | 647 | lg %r1,__LC_KERNEL_STACK |
657 | aghi %r1,-SP_SIZE | 648 | aghi %r1,-SP_SIZE |
658 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 649 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) |
659 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 650 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain |
660 | lgr %r15,%r1 | 651 | lgr %r15,%r1 |
652 | |||
661 | # | 653 | # |
662 | # One of the work bits is on. Find out which one. | 654 | # One of the work bits is on. Find out which one. |
663 | # Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGPENDING, _TIF_NEED_RESCHED | 655 | # Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED |
664 | # and _TIF_MCCK_PENDING | 656 | # and _TIF_MCCK_PENDING |
665 | # | 657 | # |
666 | io_work_loop: | 658 | io_work_loop: |
@@ -669,16 +661,10 @@ io_work_loop: | |||
669 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED | 661 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED |
670 | jo io_reschedule | 662 | jo io_reschedule |
671 | tm __TI_flags+7(%r9),_TIF_SIGPENDING | 663 | tm __TI_flags+7(%r9),_TIF_SIGPENDING |
672 | jnz io_sigpending | 664 | jo io_sigpending |
673 | tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME | 665 | tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME |
674 | jnz io_notify_resume | 666 | jo io_notify_resume |
675 | j io_restore | 667 | j io_return # beware of critical section cleanup |
676 | io_work_done: | ||
677 | |||
678 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | ||
679 | sie_opcode: | ||
680 | .long 0xb2140000 | ||
681 | #endif | ||
682 | 668 | ||
683 | # | 669 | # |
684 | # _TIF_MCCK_PENDING is set, call handler | 670 | # _TIF_MCCK_PENDING is set, call handler |
@@ -696,8 +682,6 @@ io_reschedule: | |||
696 | brasl %r14,schedule # call scheduler | 682 | brasl %r14,schedule # call scheduler |
697 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 683 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts |
698 | TRACE_IRQS_OFF | 684 | TRACE_IRQS_OFF |
699 | tm __TI_flags+7(%r9),_TIF_WORK_INT | ||
700 | jz io_restore # there is no work to do | ||
701 | j io_work_loop | 685 | j io_work_loop |
702 | 686 | ||
703 | # | 687 | # |
@@ -903,14 +887,10 @@ cleanup_table_sysc_return: | |||
903 | .quad sysc_return, sysc_leave | 887 | .quad sysc_return, sysc_leave |
904 | cleanup_table_sysc_leave: | 888 | cleanup_table_sysc_leave: |
905 | .quad sysc_leave, sysc_done | 889 | .quad sysc_leave, sysc_done |
906 | cleanup_table_sysc_work_loop: | ||
907 | .quad sysc_work_loop, sysc_work_done | ||
908 | cleanup_table_io_return: | 890 | cleanup_table_io_return: |
909 | .quad io_return, io_leave | 891 | .quad io_return, io_leave |
910 | cleanup_table_io_leave: | 892 | cleanup_table_io_leave: |
911 | .quad io_leave, io_done | 893 | .quad io_leave, io_done |
912 | cleanup_table_io_work_loop: | ||
913 | .quad io_work_loop, io_work_done | ||
914 | 894 | ||
915 | cleanup_critical: | 895 | cleanup_critical: |
916 | clc 8(8,%r12),BASED(cleanup_table_system_call) | 896 | clc 8(8,%r12),BASED(cleanup_table_system_call) |
@@ -928,11 +908,6 @@ cleanup_critical: | |||
928 | clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8) | 908 | clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8) |
929 | jl cleanup_sysc_leave | 909 | jl cleanup_sysc_leave |
930 | 0: | 910 | 0: |
931 | clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop) | ||
932 | jl 0f | ||
933 | clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8) | ||
934 | jl cleanup_sysc_return | ||
935 | 0: | ||
936 | clc 8(8,%r12),BASED(cleanup_table_io_return) | 911 | clc 8(8,%r12),BASED(cleanup_table_io_return) |
937 | jl 0f | 912 | jl 0f |
938 | clc 8(8,%r12),BASED(cleanup_table_io_return+8) | 913 | clc 8(8,%r12),BASED(cleanup_table_io_return+8) |
@@ -943,11 +918,6 @@ cleanup_critical: | |||
943 | clc 8(8,%r12),BASED(cleanup_table_io_leave+8) | 918 | clc 8(8,%r12),BASED(cleanup_table_io_leave+8) |
944 | jl cleanup_io_leave | 919 | jl cleanup_io_leave |
945 | 0: | 920 | 0: |
946 | clc 8(8,%r12),BASED(cleanup_table_io_work_loop) | ||
947 | jl 0f | ||
948 | clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8) | ||
949 | jl cleanup_io_work_loop | ||
950 | 0: | ||
951 | br %r14 | 921 | br %r14 |
952 | 922 | ||
953 | cleanup_system_call: | 923 | cleanup_system_call: |
@@ -1025,12 +995,6 @@ cleanup_io_return: | |||
1025 | la %r12,__LC_RETURN_PSW | 995 | la %r12,__LC_RETURN_PSW |
1026 | br %r14 | 996 | br %r14 |
1027 | 997 | ||
1028 | cleanup_io_work_loop: | ||
1029 | mvc __LC_RETURN_PSW(8),0(%r12) | ||
1030 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop) | ||
1031 | la %r12,__LC_RETURN_PSW | ||
1032 | br %r14 | ||
1033 | |||
1034 | cleanup_io_leave: | 998 | cleanup_io_leave: |
1035 | clc 8(8,%r12),BASED(cleanup_io_leave_insn) | 999 | clc 8(8,%r12),BASED(cleanup_io_leave_insn) |
1036 | je 3f | 1000 | je 3f |