diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-06-29 08:58:05 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-06-29 08:58:05 -0400 |
commit | 63b122466484e44d09af12bba33b34019757a3c2 (patch) | |
tree | d35ecad386c69ddb930adcce352d0d0a8d5d95da /arch/s390/kernel | |
parent | 9faf06547efe11ccb51678c6805037c7377b85ee (diff) |
[S390] virtual cpu accounting vs. machine checks.
If a machine checks interrupts the external or the i/o interrupt
handler before they have completed the cpu time calculations, the
accounting goes wrong. After the cpu returned from the machine check
handler to the interrupted interrupt handler, a negative cpu time delta
can occur. If the accumulated cpu time in lowcore is small enough
this value can get negative as well. The next jiffy interrupt will pick
up that negative value, shift it by 12 and add the now huge positive
value to the cpu time of the process.
To solve this the machine check handler is modified not to change any
of the timestamps in the lowcore if the machine check interrupted kernel
context.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/entry.S | 79 | ||||
-rw-r--r-- | arch/s390/kernel/entry64.S | 78 |
2 files changed, 116 insertions, 41 deletions
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index b2448487854c..74d80c3bba80 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -93,13 +93,22 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
93 | l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 | 93 | l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 |
94 | .endm | 94 | .endm |
95 | 95 | ||
96 | .macro SAVE_ALL psworg,savearea,sync | 96 | .macro SAVE_ALL_SYNC psworg,savearea |
97 | la %r12,\psworg | 97 | la %r12,\psworg |
98 | .if \sync | ||
99 | tm \psworg+1,0x01 # test problem state bit | 98 | tm \psworg+1,0x01 # test problem state bit |
100 | bz BASED(2f) # skip stack setup save | 99 | bz BASED(2f) # skip stack setup save |
101 | l %r15,__LC_KERNEL_STACK # problem state -> load ksp | 100 | l %r15,__LC_KERNEL_STACK # problem state -> load ksp |
102 | .else | 101 | #ifdef CONFIG_CHECK_STACK |
102 | b BASED(3f) | ||
103 | 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | ||
104 | bz BASED(stack_overflow) | ||
105 | 3: | ||
106 | #endif | ||
107 | 2: | ||
108 | .endm | ||
109 | |||
110 | .macro SAVE_ALL_ASYNC psworg,savearea | ||
111 | la %r12,\psworg | ||
103 | tm \psworg+1,0x01 # test problem state bit | 112 | tm \psworg+1,0x01 # test problem state bit |
104 | bnz BASED(1f) # from user -> load async stack | 113 | bnz BASED(1f) # from user -> load async stack |
105 | clc \psworg+4(4),BASED(.Lcritical_end) | 114 | clc \psworg+4(4),BASED(.Lcritical_end) |
@@ -115,7 +124,6 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
115 | sra %r14,STACK_SHIFT | 124 | sra %r14,STACK_SHIFT |
116 | be BASED(2f) | 125 | be BASED(2f) |
117 | 1: l %r15,__LC_ASYNC_STACK | 126 | 1: l %r15,__LC_ASYNC_STACK |
118 | .endif | ||
119 | #ifdef CONFIG_CHECK_STACK | 127 | #ifdef CONFIG_CHECK_STACK |
120 | b BASED(3f) | 128 | b BASED(3f) |
121 | 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | 129 | 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD |
@@ -196,7 +204,7 @@ system_call: | |||
196 | STORE_TIMER __LC_SYNC_ENTER_TIMER | 204 | STORE_TIMER __LC_SYNC_ENTER_TIMER |
197 | sysc_saveall: | 205 | sysc_saveall: |
198 | SAVE_ALL_BASE __LC_SAVE_AREA | 206 | SAVE_ALL_BASE __LC_SAVE_AREA |
199 | SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 | 207 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
200 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 208 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
201 | lh %r7,0x8a # get svc number from lowcore | 209 | lh %r7,0x8a # get svc number from lowcore |
202 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 210 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
@@ -425,7 +433,7 @@ pgm_check_handler: | |||
425 | SAVE_ALL_BASE __LC_SAVE_AREA | 433 | SAVE_ALL_BASE __LC_SAVE_AREA |
426 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception | 434 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception |
427 | bnz BASED(pgm_per) # got per exception -> special case | 435 | bnz BASED(pgm_per) # got per exception -> special case |
428 | SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 | 436 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
429 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 437 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
430 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 438 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
431 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 439 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -464,7 +472,7 @@ pgm_per: | |||
464 | # Normal per exception | 472 | # Normal per exception |
465 | # | 473 | # |
466 | pgm_per_std: | 474 | pgm_per_std: |
467 | SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 | 475 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
468 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 476 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
469 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 477 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
470 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 478 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -490,7 +498,7 @@ pgm_no_vtime2: | |||
490 | # it was a single stepped SVC that is causing all the trouble | 498 | # it was a single stepped SVC that is causing all the trouble |
491 | # | 499 | # |
492 | pgm_svcper: | 500 | pgm_svcper: |
493 | SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 | 501 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
494 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 502 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
495 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 503 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
496 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 504 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -519,7 +527,7 @@ io_int_handler: | |||
519 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | 527 | STORE_TIMER __LC_ASYNC_ENTER_TIMER |
520 | stck __LC_INT_CLOCK | 528 | stck __LC_INT_CLOCK |
521 | SAVE_ALL_BASE __LC_SAVE_AREA+16 | 529 | SAVE_ALL_BASE __LC_SAVE_AREA+16 |
522 | SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0 | 530 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 |
523 | CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 | 531 | CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 |
524 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 532 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
525 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 533 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -631,7 +639,7 @@ ext_int_handler: | |||
631 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | 639 | STORE_TIMER __LC_ASYNC_ENTER_TIMER |
632 | stck __LC_INT_CLOCK | 640 | stck __LC_INT_CLOCK |
633 | SAVE_ALL_BASE __LC_SAVE_AREA+16 | 641 | SAVE_ALL_BASE __LC_SAVE_AREA+16 |
634 | SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0 | 642 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 |
635 | CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 | 643 | CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 |
636 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 644 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
637 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 645 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -657,21 +665,31 @@ __critical_end: | |||
657 | .globl mcck_int_handler | 665 | .globl mcck_int_handler |
658 | mcck_int_handler: | 666 | mcck_int_handler: |
659 | spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer | 667 | spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer |
660 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA | ||
661 | lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs | 668 | lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs |
662 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | 669 | SAVE_ALL_BASE __LC_SAVE_AREA+32 |
663 | la %r12,__LC_MCK_OLD_PSW | 670 | la %r12,__LC_MCK_OLD_PSW |
664 | tm __LC_MCCK_CODE,0x80 # system damage? | 671 | tm __LC_MCCK_CODE,0x80 # system damage? |
665 | bo BASED(mcck_int_main) # yes -> rest of mcck code invalid | 672 | bo BASED(mcck_int_main) # yes -> rest of mcck code invalid |
666 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | ||
667 | bo BASED(0f) | ||
668 | spt __LC_LAST_UPDATE_TIMER # revalidate cpu timer | ||
669 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 673 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
670 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER | 674 | mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER |
671 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER | 675 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA |
672 | mvc __LC_EXIT_TIMER(8),__LC_LAST_UPDATE_TIMER | 676 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? |
677 | bo BASED(1f) | ||
678 | la %r14,__LC_SYNC_ENTER_TIMER | ||
679 | clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER | ||
680 | bl BASED(0f) | ||
681 | la %r14,__LC_ASYNC_ENTER_TIMER | ||
682 | 0: clc 0(8,%r14),__LC_EXIT_TIMER | ||
683 | bl BASED(0f) | ||
684 | la %r14,__LC_EXIT_TIMER | ||
685 | 0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER | ||
686 | bl BASED(0f) | ||
687 | la %r14,__LC_LAST_UPDATE_TIMER | ||
688 | 0: spt 0(%r14) | ||
689 | mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) | ||
690 | 1: | ||
673 | #endif | 691 | #endif |
674 | 0: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | 692 | tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
675 | bno BASED(mcck_int_main) # no -> skip cleanup critical | 693 | bno BASED(mcck_int_main) # no -> skip cleanup critical |
676 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit | 694 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit |
677 | bnz BASED(mcck_int_main) # from user -> load async stack | 695 | bnz BASED(mcck_int_main) # from user -> load async stack |
@@ -691,7 +709,7 @@ mcck_int_main: | |||
691 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 709 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
692 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? | 710 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? |
693 | bno BASED(mcck_no_vtime) # no -> skip cleanup critical | 711 | bno BASED(mcck_no_vtime) # no -> skip cleanup critical |
694 | tm __LC_MCK_OLD_PSW+1,0x01 # interrupting from user ? | 712 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
695 | bz BASED(mcck_no_vtime) | 713 | bz BASED(mcck_no_vtime) |
696 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 714 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER |
697 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 715 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
@@ -715,6 +733,20 @@ mcck_no_vtime: | |||
715 | l %r1,BASED(.Ls390_handle_mcck) | 733 | l %r1,BASED(.Ls390_handle_mcck) |
716 | basr %r14,%r1 # call machine check handler | 734 | basr %r14,%r1 # call machine check handler |
717 | mcck_return: | 735 | mcck_return: |
736 | mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW | ||
737 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit | ||
738 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
739 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52 | ||
740 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | ||
741 | bno BASED(0f) | ||
742 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 | ||
743 | stpt __LC_EXIT_TIMER | ||
744 | lpsw __LC_RETURN_MCCK_PSW # back to caller | ||
745 | 0: | ||
746 | #endif | ||
747 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 | ||
748 | lpsw __LC_RETURN_MCCK_PSW # back to caller | ||
749 | |||
718 | RESTORE_ALL __LC_RETURN_MCCK_PSW,0 | 750 | RESTORE_ALL __LC_RETURN_MCCK_PSW,0 |
719 | 751 | ||
720 | #ifdef CONFIG_SMP | 752 | #ifdef CONFIG_SMP |
@@ -781,6 +813,8 @@ cleanup_table_sysc_leave: | |||
781 | .long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000 | 813 | .long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000 |
782 | cleanup_table_sysc_work_loop: | 814 | cleanup_table_sysc_work_loop: |
783 | .long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000 | 815 | .long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000 |
816 | cleanup_table_io_return: | ||
817 | .long io_return + 0x80000000, io_leave + 0x80000000 | ||
784 | cleanup_table_io_leave: | 818 | cleanup_table_io_leave: |
785 | .long io_leave + 0x80000000, io_done + 0x80000000 | 819 | .long io_leave + 0x80000000, io_done + 0x80000000 |
786 | cleanup_table_io_work_loop: | 820 | cleanup_table_io_work_loop: |
@@ -807,6 +841,11 @@ cleanup_critical: | |||
807 | clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4) | 841 | clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4) |
808 | bl BASED(cleanup_sysc_return) | 842 | bl BASED(cleanup_sysc_return) |
809 | 0: | 843 | 0: |
844 | clc 4(4,%r12),BASED(cleanup_table_io_return) | ||
845 | bl BASED(0f) | ||
846 | clc 4(4,%r12),BASED(cleanup_table_io_return+4) | ||
847 | bl BASED(cleanup_io_return) | ||
848 | 0: | ||
810 | clc 4(4,%r12),BASED(cleanup_table_io_leave) | 849 | clc 4(4,%r12),BASED(cleanup_table_io_leave) |
811 | bl BASED(0f) | 850 | bl BASED(0f) |
812 | clc 4(4,%r12),BASED(cleanup_table_io_leave+4) | 851 | clc 4(4,%r12),BASED(cleanup_table_io_leave+4) |
@@ -839,7 +878,7 @@ cleanup_system_call: | |||
839 | mvc __LC_SAVE_AREA(16),0(%r12) | 878 | mvc __LC_SAVE_AREA(16),0(%r12) |
840 | 0: st %r13,4(%r12) | 879 | 0: st %r13,4(%r12) |
841 | st %r12,__LC_SAVE_AREA+48 # argh | 880 | st %r12,__LC_SAVE_AREA+48 # argh |
842 | SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 | 881 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
843 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 882 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
844 | l %r12,__LC_SAVE_AREA+48 # argh | 883 | l %r12,__LC_SAVE_AREA+48 # argh |
845 | st %r15,12(%r12) | 884 | st %r15,12(%r12) |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 2ac095bc0e25..21e601d56139 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -87,13 +87,22 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \ | |||
87 | larl %r13,system_call | 87 | larl %r13,system_call |
88 | .endm | 88 | .endm |
89 | 89 | ||
90 | .macro SAVE_ALL psworg,savearea,sync | 90 | .macro SAVE_ALL_SYNC psworg,savearea |
91 | la %r12,\psworg | 91 | la %r12,\psworg |
92 | .if \sync | ||
93 | tm \psworg+1,0x01 # test problem state bit | 92 | tm \psworg+1,0x01 # test problem state bit |
94 | jz 2f # skip stack setup save | 93 | jz 2f # skip stack setup save |
95 | lg %r15,__LC_KERNEL_STACK # problem state -> load ksp | 94 | lg %r15,__LC_KERNEL_STACK # problem state -> load ksp |
96 | .else | 95 | #ifdef CONFIG_CHECK_STACK |
96 | j 3f | ||
97 | 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | ||
98 | jz stack_overflow | ||
99 | 3: | ||
100 | #endif | ||
101 | 2: | ||
102 | .endm | ||
103 | |||
104 | .macro SAVE_ALL_ASYNC psworg,savearea | ||
105 | la %r12,\psworg | ||
97 | tm \psworg+1,0x01 # test problem state bit | 106 | tm \psworg+1,0x01 # test problem state bit |
98 | jnz 1f # from user -> load kernel stack | 107 | jnz 1f # from user -> load kernel stack |
99 | clc \psworg+8(8),BASED(.Lcritical_end) | 108 | clc \psworg+8(8),BASED(.Lcritical_end) |
@@ -108,7 +117,6 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \ | |||
108 | srag %r14,%r14,STACK_SHIFT | 117 | srag %r14,%r14,STACK_SHIFT |
109 | jz 2f | 118 | jz 2f |
110 | 1: lg %r15,__LC_ASYNC_STACK # load async stack | 119 | 1: lg %r15,__LC_ASYNC_STACK # load async stack |
111 | .endif | ||
112 | #ifdef CONFIG_CHECK_STACK | 120 | #ifdef CONFIG_CHECK_STACK |
113 | j 3f | 121 | j 3f |
114 | 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | 122 | 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD |
@@ -187,7 +195,7 @@ system_call: | |||
187 | STORE_TIMER __LC_SYNC_ENTER_TIMER | 195 | STORE_TIMER __LC_SYNC_ENTER_TIMER |
188 | sysc_saveall: | 196 | sysc_saveall: |
189 | SAVE_ALL_BASE __LC_SAVE_AREA | 197 | SAVE_ALL_BASE __LC_SAVE_AREA |
190 | SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 | 198 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
191 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 199 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
192 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore | 200 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore |
193 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 201 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
@@ -446,7 +454,7 @@ pgm_check_handler: | |||
446 | SAVE_ALL_BASE __LC_SAVE_AREA | 454 | SAVE_ALL_BASE __LC_SAVE_AREA |
447 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception | 455 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception |
448 | jnz pgm_per # got per exception -> special case | 456 | jnz pgm_per # got per exception -> special case |
449 | SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 | 457 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
450 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 458 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
451 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 459 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
452 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 460 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -485,7 +493,7 @@ pgm_per: | |||
485 | # Normal per exception | 493 | # Normal per exception |
486 | # | 494 | # |
487 | pgm_per_std: | 495 | pgm_per_std: |
488 | SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 | 496 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
489 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 497 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
490 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 498 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
491 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 499 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -511,7 +519,7 @@ pgm_no_vtime2: | |||
511 | # it was a single stepped SVC that is causing all the trouble | 519 | # it was a single stepped SVC that is causing all the trouble |
512 | # | 520 | # |
513 | pgm_svcper: | 521 | pgm_svcper: |
514 | SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 | 522 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
515 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 523 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
516 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 524 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
517 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 525 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -539,7 +547,7 @@ io_int_handler: | |||
539 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | 547 | STORE_TIMER __LC_ASYNC_ENTER_TIMER |
540 | stck __LC_INT_CLOCK | 548 | stck __LC_INT_CLOCK |
541 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | 549 | SAVE_ALL_BASE __LC_SAVE_AREA+32 |
542 | SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0 | 550 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 |
543 | CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 | 551 | CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 |
544 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 552 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
545 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 553 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -647,7 +655,7 @@ ext_int_handler: | |||
647 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | 655 | STORE_TIMER __LC_ASYNC_ENTER_TIMER |
648 | stck __LC_INT_CLOCK | 656 | stck __LC_INT_CLOCK |
649 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | 657 | SAVE_ALL_BASE __LC_SAVE_AREA+32 |
650 | SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0 | 658 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 |
651 | CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 | 659 | CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 |
652 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 660 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
653 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 661 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -672,21 +680,32 @@ __critical_end: | |||
672 | mcck_int_handler: | 680 | mcck_int_handler: |
673 | la %r1,4095 # revalidate r1 | 681 | la %r1,4095 # revalidate r1 |
674 | spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer | 682 | spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer |
675 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r1) | ||
676 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs | 683 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs |
677 | SAVE_ALL_BASE __LC_SAVE_AREA+64 | 684 | SAVE_ALL_BASE __LC_SAVE_AREA+64 |
678 | la %r12,__LC_MCK_OLD_PSW | 685 | la %r12,__LC_MCK_OLD_PSW |
679 | tm __LC_MCCK_CODE,0x80 # system damage? | 686 | tm __LC_MCCK_CODE,0x80 # system damage? |
680 | jo mcck_int_main # yes -> rest of mcck code invalid | 687 | jo mcck_int_main # yes -> rest of mcck code invalid |
681 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | ||
682 | jo 0f | ||
683 | spt __LC_LAST_UPDATE_TIMER | ||
684 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 688 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
685 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER | 689 | la %r14,4095 |
686 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER | 690 | mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER |
687 | mvc __LC_EXIT_TIMER(8),__LC_LAST_UPDATE_TIMER | 691 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14) |
692 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | ||
693 | jo 1f | ||
694 | la %r14,__LC_SYNC_ENTER_TIMER | ||
695 | clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER | ||
696 | jl 0f | ||
697 | la %r14,__LC_ASYNC_ENTER_TIMER | ||
698 | 0: clc 0(8,%r14),__LC_EXIT_TIMER | ||
699 | jl 0f | ||
700 | la %r14,__LC_EXIT_TIMER | ||
701 | 0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER | ||
702 | jl 0f | ||
703 | la %r14,__LC_LAST_UPDATE_TIMER | ||
704 | 0: spt 0(%r14) | ||
705 | mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) | ||
706 | 1: | ||
688 | #endif | 707 | #endif |
689 | 0: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | 708 | tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
690 | jno mcck_int_main # no -> skip cleanup critical | 709 | jno mcck_int_main # no -> skip cleanup critical |
691 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit | 710 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit |
692 | jnz mcck_int_main # from user -> load kernel stack | 711 | jnz mcck_int_main # from user -> load kernel stack |
@@ -705,7 +724,7 @@ mcck_int_main: | |||
705 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 724 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
706 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? | 725 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? |
707 | jno mcck_no_vtime # no -> no timer update | 726 | jno mcck_no_vtime # no -> no timer update |
708 | tm __LC_MCK_OLD_PSW+1,0x01 # interrupting from user ? | 727 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
709 | jz mcck_no_vtime | 728 | jz mcck_no_vtime |
710 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 729 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER |
711 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 730 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
@@ -727,7 +746,17 @@ mcck_no_vtime: | |||
727 | jno mcck_return | 746 | jno mcck_return |
728 | brasl %r14,s390_handle_mcck | 747 | brasl %r14,s390_handle_mcck |
729 | mcck_return: | 748 | mcck_return: |
730 | RESTORE_ALL __LC_RETURN_MCCK_PSW,0 | 749 | mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW |
750 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit | ||
751 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 | ||
752 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
753 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104 | ||
754 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | ||
755 | jno 0f | ||
756 | stpt __LC_EXIT_TIMER | ||
757 | 0: | ||
758 | #endif | ||
759 | lpswe __LC_RETURN_MCCK_PSW # back to caller | ||
731 | 760 | ||
732 | #ifdef CONFIG_SMP | 761 | #ifdef CONFIG_SMP |
733 | /* | 762 | /* |
@@ -789,6 +818,8 @@ cleanup_table_sysc_leave: | |||
789 | .quad sysc_leave, sysc_work_loop | 818 | .quad sysc_leave, sysc_work_loop |
790 | cleanup_table_sysc_work_loop: | 819 | cleanup_table_sysc_work_loop: |
791 | .quad sysc_work_loop, sysc_reschedule | 820 | .quad sysc_work_loop, sysc_reschedule |
821 | cleanup_table_io_return: | ||
822 | .quad io_return, io_leave | ||
792 | cleanup_table_io_leave: | 823 | cleanup_table_io_leave: |
793 | .quad io_leave, io_done | 824 | .quad io_leave, io_done |
794 | cleanup_table_io_work_loop: | 825 | cleanup_table_io_work_loop: |
@@ -815,6 +846,11 @@ cleanup_critical: | |||
815 | clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8) | 846 | clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8) |
816 | jl cleanup_sysc_return | 847 | jl cleanup_sysc_return |
817 | 0: | 848 | 0: |
849 | clc 8(8,%r12),BASED(cleanup_table_io_return) | ||
850 | jl 0f | ||
851 | clc 8(8,%r12),BASED(cleanup_table_io_return+8) | ||
852 | jl cleanup_io_return | ||
853 | 0: | ||
818 | clc 8(8,%r12),BASED(cleanup_table_io_leave) | 854 | clc 8(8,%r12),BASED(cleanup_table_io_leave) |
819 | jl 0f | 855 | jl 0f |
820 | clc 8(8,%r12),BASED(cleanup_table_io_leave+8) | 856 | clc 8(8,%r12),BASED(cleanup_table_io_leave+8) |
@@ -847,7 +883,7 @@ cleanup_system_call: | |||
847 | mvc __LC_SAVE_AREA(32),0(%r12) | 883 | mvc __LC_SAVE_AREA(32),0(%r12) |
848 | 0: stg %r13,8(%r12) | 884 | 0: stg %r13,8(%r12) |
849 | stg %r12,__LC_SAVE_AREA+96 # argh | 885 | stg %r12,__LC_SAVE_AREA+96 # argh |
850 | SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 | 886 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
851 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 887 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
852 | lg %r12,__LC_SAVE_AREA+96 # argh | 888 | lg %r12,__LC_SAVE_AREA+96 # argh |
853 | stg %r15,24(%r12) | 889 | stg %r15,24(%r12) |