diff options
Diffstat (limited to 'arch/s390/kernel/entry64.S')
-rw-r--r-- | arch/s390/kernel/entry64.S | 65 |
1 files changed, 62 insertions, 3 deletions
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index e33789a45752..4e1c292fa7e3 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -489,7 +489,6 @@ io_restore: | |||
489 | lg %r14,__LC_VDSO_PER_CPU | 489 | lg %r14,__LC_VDSO_PER_CPU |
490 | lmg %r0,%r10,__PT_R0(%r11) | 490 | lmg %r0,%r10,__PT_R0(%r11) |
491 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) | 491 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) |
492 | ni __LC_RETURN_PSW+1,0xfd # clear wait state bit | ||
493 | stpt __LC_EXIT_TIMER | 492 | stpt __LC_EXIT_TIMER |
494 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER | 493 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER |
495 | lmg %r11,%r15,__PT_R11(%r11) | 494 | lmg %r11,%r15,__PT_R11(%r11) |
@@ -631,6 +630,24 @@ ext_skip: | |||
631 | brasl %r14,do_extint | 630 | brasl %r14,do_extint |
632 | j io_return | 631 | j io_return |
633 | 632 | ||
633 | /* | ||
634 | * Load idle PSW. The second "half" of this function is in cleanup_idle. | ||
635 | */ | ||
636 | ENTRY(psw_idle) | ||
637 | stg %r4,__SF_EMPTY(%r15) | ||
638 | larl %r1,psw_idle_lpsw+4 | ||
639 | stg %r1,__SF_EMPTY+8(%r15) | ||
640 | larl %r1,.Lvtimer_max | ||
641 | stck __IDLE_ENTER(%r2) | ||
642 | ltr %r5,%r5 | ||
643 | stpt __VQ_IDLE_ENTER(%r3) | ||
644 | jz psw_idle_lpsw | ||
645 | spt 0(%r1) | ||
646 | psw_idle_lpsw: | ||
647 | lpswe __SF_EMPTY(%r15) | ||
648 | br %r14 | ||
649 | psw_idle_end: | ||
650 | |||
634 | __critical_end: | 651 | __critical_end: |
635 | 652 | ||
636 | /* | 653 | /* |
@@ -696,7 +713,6 @@ mcck_return: | |||
696 | lg %r14,__LC_VDSO_PER_CPU | 713 | lg %r14,__LC_VDSO_PER_CPU |
697 | lmg %r0,%r10,__PT_R0(%r11) | 714 | lmg %r0,%r10,__PT_R0(%r11) |
698 | mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW | 715 | mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW |
699 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit | ||
700 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | 716 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
701 | jno 0f | 717 | jno 0f |
702 | stpt __LC_EXIT_TIMER | 718 | stpt __LC_EXIT_TIMER |
@@ -770,6 +786,8 @@ cleanup_table: | |||
770 | .quad io_tif | 786 | .quad io_tif |
771 | .quad io_restore | 787 | .quad io_restore |
772 | .quad io_done | 788 | .quad io_done |
789 | .quad psw_idle | ||
790 | .quad psw_idle_end | ||
773 | 791 | ||
774 | cleanup_critical: | 792 | cleanup_critical: |
775 | clg %r9,BASED(cleanup_table) # system_call | 793 | clg %r9,BASED(cleanup_table) # system_call |
@@ -788,6 +806,10 @@ cleanup_critical: | |||
788 | jl cleanup_io_tif | 806 | jl cleanup_io_tif |
789 | clg %r9,BASED(cleanup_table+56) # io_done | 807 | clg %r9,BASED(cleanup_table+56) # io_done |
790 | jl cleanup_io_restore | 808 | jl cleanup_io_restore |
809 | clg %r9,BASED(cleanup_table+64) # psw_idle | ||
810 | jl 0f | ||
811 | clg %r9,BASED(cleanup_table+72) # psw_idle_end | ||
812 | jl cleanup_idle | ||
791 | 0: br %r14 | 813 | 0: br %r14 |
792 | 814 | ||
793 | 815 | ||
@@ -877,7 +899,6 @@ cleanup_io_restore: | |||
877 | je 0f | 899 | je 0f |
878 | lg %r9,24(%r11) # get saved r11 pointer to pt_regs | 900 | lg %r9,24(%r11) # get saved r11 pointer to pt_regs |
879 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) | 901 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) |
880 | ni __LC_RETURN_PSW+1,0xfd # clear wait state bit | ||
881 | mvc 0(64,%r11),__PT_R8(%r9) | 902 | mvc 0(64,%r11),__PT_R8(%r9) |
882 | lmg %r0,%r7,__PT_R0(%r9) | 903 | lmg %r0,%r7,__PT_R0(%r9) |
883 | 0: lmg %r8,%r9,__LC_RETURN_PSW | 904 | 0: lmg %r8,%r9,__LC_RETURN_PSW |
@@ -885,6 +906,42 @@ cleanup_io_restore: | |||
885 | cleanup_io_restore_insn: | 906 | cleanup_io_restore_insn: |
886 | .quad io_done - 4 | 907 | .quad io_done - 4 |
887 | 908 | ||
909 | cleanup_idle: | ||
910 | # copy interrupt clock & cpu timer | ||
911 | mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK | ||
912 | mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER | ||
913 | cghi %r11,__LC_SAVE_AREA_ASYNC | ||
914 | je 0f | ||
915 | mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK | ||
916 | mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER | ||
917 | 0: # check if stck & stpt have been executed | ||
918 | clg %r9,BASED(cleanup_idle_insn) | ||
919 | jhe 1f | ||
920 | mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2) | ||
921 | mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3) | ||
922 | j 2f | ||
923 | 1: # check if the cpu timer has been reprogrammed | ||
924 | ltr %r5,%r5 | ||
925 | jz 2f | ||
926 | spt __VQ_IDLE_ENTER(%r3) | ||
927 | 2: # account system time going idle | ||
928 | lg %r9,__LC_STEAL_TIMER | ||
929 | alg %r9,__IDLE_ENTER(%r2) | ||
930 | slg %r9,__LC_LAST_UPDATE_CLOCK | ||
931 | stg %r9,__LC_STEAL_TIMER | ||
932 | mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2) | ||
933 | lg %r9,__LC_SYSTEM_TIMER | ||
934 | alg %r9,__LC_LAST_UPDATE_TIMER | ||
935 | slg %r9,__VQ_IDLE_ENTER(%r3) | ||
936 | stg %r9,__LC_SYSTEM_TIMER | ||
937 | mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3) | ||
938 | # prepare return psw | ||
939 | nihh %r8,0xfffd # clear wait state bit | ||
940 | lg %r9,48(%r11) # return from psw_idle | ||
941 | br %r14 | ||
942 | cleanup_idle_insn: | ||
943 | .quad psw_idle_lpsw | ||
944 | |||
888 | /* | 945 | /* |
889 | * Integer constants | 946 | * Integer constants |
890 | */ | 947 | */ |
@@ -893,6 +950,8 @@ cleanup_io_restore_insn: | |||
893 | .quad __critical_start | 950 | .quad __critical_start |
894 | .Lcritical_length: | 951 | .Lcritical_length: |
895 | .quad __critical_end - __critical_start | 952 | .quad __critical_end - __critical_start |
953 | .Lvtimer_max: | ||
954 | .quad 0x7fffffffffffffff | ||
896 | 955 | ||
897 | 956 | ||
898 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | 957 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) |