diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2012-03-11 11:59:27 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2012-03-11 11:59:28 -0400 |
commit | 4c1051e37a0e2a941115c6fb7ba08c318f25a0f9 (patch) | |
tree | f228f1a90c0d7abb8308f275d0906dd7d1588ba3 /arch/s390/kernel/entry.S | |
parent | 8b646bd759086f6090fe27acf414c0b5faa737f4 (diff) |
[S390] rework idle code
Whenever the cpu loads an enabled wait PSW it will appear as idle to the
underlying host system. The code in default_idle calls vtime_stop_cpu
which does the necessary voodoo to get the cpu time accounting right.
The udelay code just loads an enabled wait PSW. To correct this rework
the vtime_stop_cpu/vtime_start_cpu logic and move the difficult parts
to entry[64].S, vtime_stop_cpu can now be called from anywhere and
vtime_start_cpu is gone. The correction of the cpu time during wakeup
from an enabled wait PSW is done with a critical section in entry[64].S.
As vtime_start_cpu is gone, s390_idle_check can be removed as well.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/entry.S')
-rw-r--r-- | arch/s390/kernel/entry.S | 76 |
1 files changed, 70 insertions, 6 deletions
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 6143521a4fff..74ee563fe62b 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -105,14 +105,14 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
105 | 105 | ||
106 | .macro ADD64 high,low,timer | 106 | .macro ADD64 high,low,timer |
107 | al \high,\timer | 107 | al \high,\timer |
108 | al \low,\timer+4 | 108 | al \low,4+\timer |
109 | brc 12,.+8 | 109 | brc 12,.+8 |
110 | ahi \high,1 | 110 | ahi \high,1 |
111 | .endm | 111 | .endm |
112 | 112 | ||
113 | .macro SUB64 high,low,timer | 113 | .macro SUB64 high,low,timer |
114 | sl \high,\timer | 114 | sl \high,\timer |
115 | sl \low,\timer+4 | 115 | sl \low,4+\timer |
116 | brc 3,.+8 | 116 | brc 3,.+8 |
117 | ahi \high,-1 | 117 | ahi \high,-1 |
118 | .endm | 118 | .endm |
@@ -471,7 +471,6 @@ io_tif: | |||
471 | jnz io_work # there is work to do (signals etc.) | 471 | jnz io_work # there is work to do (signals etc.) |
472 | io_restore: | 472 | io_restore: |
473 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) | 473 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) |
474 | ni __LC_RETURN_PSW+1,0xfd # clean wait state bit | ||
475 | stpt __LC_EXIT_TIMER | 474 | stpt __LC_EXIT_TIMER |
476 | lm %r0,%r15,__PT_R0(%r11) | 475 | lm %r0,%r15,__PT_R0(%r11) |
477 | lpsw __LC_RETURN_PSW | 476 | lpsw __LC_RETURN_PSW |
@@ -612,6 +611,26 @@ ext_skip: | |||
612 | basr %r14,%r1 # call do_extint | 611 | basr %r14,%r1 # call do_extint |
613 | j io_return | 612 | j io_return |
614 | 613 | ||
614 | /* | ||
615 | * Load idle PSW. The second "half" of this function is in cleanup_idle. | ||
616 | */ | ||
617 | ENTRY(psw_idle) | ||
618 | st %r4,__SF_EMPTY(%r15) | ||
619 | basr %r1,0 | ||
620 | la %r1,psw_idle_lpsw+4-.(%r1) | ||
621 | st %r1,__SF_EMPTY+4(%r15) | ||
622 | oi __SF_EMPTY+4(%r15),0x80 | ||
623 | la %r1,.Lvtimer_max-psw_idle_lpsw-4(%r1) | ||
624 | stck __IDLE_ENTER(%r2) | ||
625 | ltr %r5,%r5 | ||
626 | stpt __VQ_IDLE_ENTER(%r3) | ||
627 | jz psw_idle_lpsw | ||
628 | spt 0(%r1) | ||
629 | psw_idle_lpsw: | ||
630 | lpsw __SF_EMPTY(%r15) | ||
631 | br %r14 | ||
632 | psw_idle_end: | ||
633 | |||
615 | __critical_end: | 634 | __critical_end: |
616 | 635 | ||
617 | /* | 636 | /* |
@@ -673,7 +692,6 @@ mcck_skip: | |||
673 | TRACE_IRQS_ON | 692 | TRACE_IRQS_ON |
674 | mcck_return: | 693 | mcck_return: |
675 | mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW | 694 | mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW |
676 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit | ||
677 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | 695 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
678 | jno 0f | 696 | jno 0f |
679 | lm %r0,%r15,__PT_R0(%r11) | 697 | lm %r0,%r15,__PT_R0(%r11) |
@@ -748,6 +766,8 @@ cleanup_table: | |||
748 | .long io_tif + 0x80000000 | 766 | .long io_tif + 0x80000000 |
749 | .long io_restore + 0x80000000 | 767 | .long io_restore + 0x80000000 |
750 | .long io_done + 0x80000000 | 768 | .long io_done + 0x80000000 |
769 | .long psw_idle + 0x80000000 | ||
770 | .long psw_idle_end + 0x80000000 | ||
751 | 771 | ||
752 | cleanup_critical: | 772 | cleanup_critical: |
753 | cl %r9,BASED(cleanup_table) # system_call | 773 | cl %r9,BASED(cleanup_table) # system_call |
@@ -766,6 +786,10 @@ cleanup_critical: | |||
766 | jl cleanup_io_tif | 786 | jl cleanup_io_tif |
767 | cl %r9,BASED(cleanup_table+28) # io_done | 787 | cl %r9,BASED(cleanup_table+28) # io_done |
768 | jl cleanup_io_restore | 788 | jl cleanup_io_restore |
789 | cl %r9,BASED(cleanup_table+32) # psw_idle | ||
790 | jl 0f | ||
791 | cl %r9,BASED(cleanup_table+36) # psw_idle_end | ||
792 | jl cleanup_idle | ||
769 | 0: br %r14 | 793 | 0: br %r14 |
770 | 794 | ||
771 | cleanup_system_call: | 795 | cleanup_system_call: |
@@ -849,7 +873,6 @@ cleanup_io_restore: | |||
849 | jhe 0f | 873 | jhe 0f |
850 | l %r9,12(%r11) # get saved r11 pointer to pt_regs | 874 | l %r9,12(%r11) # get saved r11 pointer to pt_regs |
851 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) | 875 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) |
852 | ni __LC_RETURN_PSW+1,0xfd # clear wait state bit | ||
853 | mvc 0(32,%r11),__PT_R8(%r9) | 876 | mvc 0(32,%r11),__PT_R8(%r9) |
854 | lm %r0,%r7,__PT_R0(%r9) | 877 | lm %r0,%r7,__PT_R0(%r9) |
855 | 0: lm %r8,%r9,__LC_RETURN_PSW | 878 | 0: lm %r8,%r9,__LC_RETURN_PSW |
@@ -857,11 +880,52 @@ cleanup_io_restore: | |||
857 | cleanup_io_restore_insn: | 880 | cleanup_io_restore_insn: |
858 | .long io_done - 4 + 0x80000000 | 881 | .long io_done - 4 + 0x80000000 |
859 | 882 | ||
883 | cleanup_idle: | ||
884 | # copy interrupt clock & cpu timer | ||
885 | mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK | ||
886 | mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER | ||
887 | chi %r11,__LC_SAVE_AREA_ASYNC | ||
888 | je 0f | ||
889 | mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK | ||
890 | mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER | ||
891 | 0: # check if stck has been executed | ||
892 | cl %r9,BASED(cleanup_idle_insn) | ||
893 | jhe 1f | ||
894 | mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2) | ||
895 | mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3) | ||
896 | j 2f | ||
897 | 1: # check if the cpu timer has been reprogrammed | ||
898 | ltr %r5,%r5 | ||
899 | jz 2f | ||
900 | spt __VQ_IDLE_ENTER(%r3) | ||
901 | 2: # account system time going idle | ||
902 | lm %r9,%r10,__LC_STEAL_TIMER | ||
903 | ADD64 %r9,%r10,__IDLE_ENTER(%r2) | ||
904 | SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK | ||
905 | stm %r9,%r10,__LC_STEAL_TIMER | ||
906 | mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2) | ||
907 | lm %r9,%r10,__LC_SYSTEM_TIMER | ||
908 | ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER | ||
909 | SUB64 %r9,%r10,__VQ_IDLE_ENTER(%r3) | ||
910 | stm %r9,%r10,__LC_SYSTEM_TIMER | ||
911 | mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3) | ||
912 | # prepare return psw | ||
913 | n %r8,BASED(cleanup_idle_wait) # clear wait state bit | ||
914 | l %r9,24(%r11) # return from psw_idle | ||
915 | br %r14 | ||
916 | cleanup_idle_insn: | ||
917 | .long psw_idle_lpsw + 0x80000000 | ||
918 | cleanup_idle_wait: | ||
919 | .long 0xfffdffff | ||
920 | |||
860 | /* | 921 | /* |
861 | * Integer constants | 922 | * Integer constants |
862 | */ | 923 | */ |
863 | .align 4 | 924 | .align 4 |
864 | .Lnr_syscalls: .long NR_syscalls | 925 | .Lnr_syscalls: |
926 | .long NR_syscalls | ||
927 | .Lvtimer_max: | ||
928 | .quad 0x7fffffffffffffff | ||
865 | 929 | ||
866 | /* | 930 | /* |
867 | * Symbol constants | 931 | * Symbol constants |