diff options
Diffstat (limited to 'arch/s390/kernel/entry.S')
-rw-r--r-- | arch/s390/kernel/entry.S | 1103 |
1 files changed, 483 insertions, 620 deletions
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index b13157057e02..3705700ed374 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -19,32 +19,22 @@ | |||
19 | #include <asm/unistd.h> | 19 | #include <asm/unistd.h> |
20 | #include <asm/page.h> | 20 | #include <asm/page.h> |
21 | 21 | ||
22 | /* | 22 | __PT_R0 = __PT_GPRS |
23 | * Stack layout for the system_call stack entry. | 23 | __PT_R1 = __PT_GPRS + 4 |
24 | * The first few entries are identical to the user_regs_struct. | 24 | __PT_R2 = __PT_GPRS + 8 |
25 | */ | 25 | __PT_R3 = __PT_GPRS + 12 |
26 | SP_PTREGS = STACK_FRAME_OVERHEAD | 26 | __PT_R4 = __PT_GPRS + 16 |
27 | SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS | 27 | __PT_R5 = __PT_GPRS + 20 |
28 | SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW | 28 | __PT_R6 = __PT_GPRS + 24 |
29 | SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS | 29 | __PT_R7 = __PT_GPRS + 28 |
30 | SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4 | 30 | __PT_R8 = __PT_GPRS + 32 |
31 | SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 | 31 | __PT_R9 = __PT_GPRS + 36 |
32 | SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12 | 32 | __PT_R10 = __PT_GPRS + 40 |
33 | SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 | 33 | __PT_R11 = __PT_GPRS + 44 |
34 | SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20 | 34 | __PT_R12 = __PT_GPRS + 48 |
35 | SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 | 35 | __PT_R13 = __PT_GPRS + 524 |
36 | SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28 | 36 | __PT_R14 = __PT_GPRS + 56 |
37 | SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 | 37 | __PT_R15 = __PT_GPRS + 60 |
38 | SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36 | ||
39 | SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 | ||
40 | SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44 | ||
41 | SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 | ||
42 | SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52 | ||
43 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 | ||
44 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60 | ||
45 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 | ||
46 | SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE | ||
47 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE | ||
48 | 38 | ||
49 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 39 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
50 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) | 40 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) |
@@ -58,133 +48,91 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
58 | 48 | ||
59 | #define BASED(name) name-system_call(%r13) | 49 | #define BASED(name) name-system_call(%r13) |
60 | 50 | ||
61 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
62 | .macro TRACE_IRQS_ON | 51 | .macro TRACE_IRQS_ON |
52 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
63 | basr %r2,%r0 | 53 | basr %r2,%r0 |
64 | l %r1,BASED(.Ltrace_irq_on_caller) | 54 | l %r1,BASED(.Lhardirqs_on) |
65 | basr %r14,%r1 | 55 | basr %r14,%r1 # call trace_hardirqs_on_caller |
56 | #endif | ||
66 | .endm | 57 | .endm |
67 | 58 | ||
68 | .macro TRACE_IRQS_OFF | 59 | .macro TRACE_IRQS_OFF |
60 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
69 | basr %r2,%r0 | 61 | basr %r2,%r0 |
70 | l %r1,BASED(.Ltrace_irq_off_caller) | 62 | l %r1,BASED(.Lhardirqs_off) |
71 | basr %r14,%r1 | 63 | basr %r14,%r1 # call trace_hardirqs_off_caller |
72 | .endm | ||
73 | #else | ||
74 | #define TRACE_IRQS_ON | ||
75 | #define TRACE_IRQS_OFF | ||
76 | #endif | 64 | #endif |
65 | .endm | ||
77 | 66 | ||
78 | #ifdef CONFIG_LOCKDEP | ||
79 | .macro LOCKDEP_SYS_EXIT | 67 | .macro LOCKDEP_SYS_EXIT |
80 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 68 | #ifdef CONFIG_LOCKDEP |
81 | jz 0f | 69 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
70 | jz .+10 | ||
82 | l %r1,BASED(.Llockdep_sys_exit) | 71 | l %r1,BASED(.Llockdep_sys_exit) |
83 | basr %r14,%r1 | 72 | basr %r14,%r1 # call lockdep_sys_exit |
84 | 0: | ||
85 | .endm | ||
86 | #else | ||
87 | #define LOCKDEP_SYS_EXIT | ||
88 | #endif | 73 | #endif |
89 | |||
90 | /* | ||
91 | * Register usage in interrupt handlers: | ||
92 | * R9 - pointer to current task structure | ||
93 | * R13 - pointer to literal pool | ||
94 | * R14 - return register for function calls | ||
95 | * R15 - kernel stack pointer | ||
96 | */ | ||
97 | |||
98 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum | ||
99 | lm %r10,%r11,\lc_from | ||
100 | sl %r10,\lc_to | ||
101 | sl %r11,\lc_to+4 | ||
102 | bc 3,BASED(0f) | ||
103 | sl %r10,BASED(.Lc_1) | ||
104 | 0: al %r10,\lc_sum | ||
105 | al %r11,\lc_sum+4 | ||
106 | bc 12,BASED(1f) | ||
107 | al %r10,BASED(.Lc_1) | ||
108 | 1: stm %r10,%r11,\lc_sum | ||
109 | .endm | ||
110 | |||
111 | .macro SAVE_ALL_SVC psworg,savearea | ||
112 | stm %r12,%r15,\savearea | ||
113 | l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 | ||
114 | l %r15,__LC_KERNEL_STACK # problem state -> load ksp | ||
115 | s %r15,BASED(.Lc_spsize) # make room for registers & psw | ||
116 | .endm | ||
117 | |||
118 | .macro SAVE_ALL_BASE savearea | ||
119 | stm %r12,%r15,\savearea | ||
120 | l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 | ||
121 | .endm | 74 | .endm |
122 | 75 | ||
123 | .macro SAVE_ALL_PGM psworg,savearea | 76 | .macro CHECK_STACK stacksize,savearea |
124 | tm \psworg+1,0x01 # test problem state bit | ||
125 | #ifdef CONFIG_CHECK_STACK | 77 | #ifdef CONFIG_CHECK_STACK |
126 | bnz BASED(1f) | 78 | tml %r15,\stacksize - CONFIG_STACK_GUARD |
127 | tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | 79 | la %r14,\savearea |
128 | bnz BASED(2f) | 80 | jz stack_overflow |
129 | la %r12,\psworg | ||
130 | b BASED(stack_overflow) | ||
131 | #else | ||
132 | bz BASED(2f) | ||
133 | #endif | 81 | #endif |
134 | 1: l %r15,__LC_KERNEL_STACK # problem state -> load ksp | ||
135 | 2: s %r15,BASED(.Lc_spsize) # make room for registers & psw | ||
136 | .endm | 82 | .endm |
137 | 83 | ||
138 | .macro SAVE_ALL_ASYNC psworg,savearea | 84 | .macro SWITCH_ASYNC savearea,stack,shift |
139 | stm %r12,%r15,\savearea | 85 | tmh %r8,0x0001 # interrupting from user ? |
140 | l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 | 86 | jnz 1f |
141 | la %r12,\psworg | 87 | lr %r14,%r9 |
142 | tm \psworg+1,0x01 # test problem state bit | 88 | sl %r14,BASED(.Lcritical_start) |
143 | bnz BASED(1f) # from user -> load async stack | 89 | cl %r14,BASED(.Lcritical_length) |
144 | clc \psworg+4(4),BASED(.Lcritical_end) | 90 | jhe 0f |
145 | bhe BASED(0f) | 91 | la %r11,\savearea # inside critical section, do cleanup |
146 | clc \psworg+4(4),BASED(.Lcritical_start) | 92 | bras %r14,cleanup_critical |
147 | bl BASED(0f) | 93 | tmh %r8,0x0001 # retest problem state after cleanup |
148 | l %r14,BASED(.Lcleanup_critical) | 94 | jnz 1f |
149 | basr %r14,%r14 | 95 | 0: l %r14,\stack # are we already on the target stack? |
150 | tm 1(%r12),0x01 # retest problem state after cleanup | ||
151 | bnz BASED(1f) | ||
152 | 0: l %r14,__LC_ASYNC_STACK # are we already on the async stack ? | ||
153 | slr %r14,%r15 | 96 | slr %r14,%r15 |
154 | sra %r14,STACK_SHIFT | 97 | sra %r14,\shift |
155 | #ifdef CONFIG_CHECK_STACK | 98 | jnz 1f |
156 | bnz BASED(1f) | 99 | CHECK_STACK 1<<\shift,\savearea |
157 | tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | 100 | j 2f |
158 | bnz BASED(2f) | 101 | 1: l %r15,\stack # load target stack |
159 | b BASED(stack_overflow) | 102 | 2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
160 | #else | 103 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
161 | bz BASED(2f) | ||
162 | #endif | ||
163 | 1: l %r15,__LC_ASYNC_STACK | ||
164 | 2: s %r15,BASED(.Lc_spsize) # make room for registers & psw | ||
165 | .endm | 104 | .endm |
166 | 105 | ||
167 | .macro CREATE_STACK_FRAME savearea | 106 | .macro ADD64 high,low,timer |
168 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 107 | al \high,\timer |
169 | st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 | 108 | al \low,\timer+4 |
170 | mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack | 109 | brc 12,.+8 |
171 | stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | 110 | ahi \high,1 |
172 | .endm | 111 | .endm |
173 | 112 | ||
174 | .macro RESTORE_ALL psworg,sync | 113 | .macro SUB64 high,low,timer |
175 | mvc \psworg(8),SP_PSW(%r15) # move user PSW to lowcore | 114 | sl \high,\timer |
176 | .if !\sync | 115 | sl \low,\timer+4 |
177 | ni \psworg+1,0xfd # clear wait state bit | 116 | brc 3,.+8 |
178 | .endif | 117 | ahi \high,-1 |
179 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user | 118 | .endm |
180 | stpt __LC_EXIT_TIMER | 119 | |
181 | lpsw \psworg # back to caller | 120 | .macro UPDATE_VTIME high,low,enter_timer |
121 | lm \high,\low,__LC_EXIT_TIMER | ||
122 | SUB64 \high,\low,\enter_timer | ||
123 | ADD64 \high,\low,__LC_USER_TIMER | ||
124 | stm \high,\low,__LC_USER_TIMER | ||
125 | lm \high,\low,__LC_LAST_UPDATE_TIMER | ||
126 | SUB64 \high,\low,__LC_EXIT_TIMER | ||
127 | ADD64 \high,\low,__LC_SYSTEM_TIMER | ||
128 | stm \high,\low,__LC_SYSTEM_TIMER | ||
129 | mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer | ||
182 | .endm | 130 | .endm |
183 | 131 | ||
184 | .macro REENABLE_IRQS | 132 | .macro REENABLE_IRQS |
185 | mvc __SF_EMPTY(1,%r15),SP_PSW(%r15) | 133 | st %r8,__LC_RETURN_PSW |
186 | ni __SF_EMPTY(%r15),0xbf | 134 | ni __LC_RETURN_PSW,0xbf |
187 | ssm __SF_EMPTY(%r15) | 135 | ssm __LC_RETURN_PSW |
188 | .endm | 136 | .endm |
189 | 137 | ||
190 | .section .kprobes.text, "ax" | 138 | .section .kprobes.text, "ax" |
@@ -197,14 +145,13 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
197 | * gpr2 = prev | 145 | * gpr2 = prev |
198 | */ | 146 | */ |
199 | ENTRY(__switch_to) | 147 | ENTRY(__switch_to) |
200 | basr %r1,0 | 148 | l %r4,__THREAD_info(%r2) # get thread_info of prev |
201 | 0: l %r4,__THREAD_info(%r2) # get thread_info of prev | ||
202 | l %r5,__THREAD_info(%r3) # get thread_info of next | 149 | l %r5,__THREAD_info(%r3) # get thread_info of next |
203 | tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? | 150 | tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? |
204 | bz 1f-0b(%r1) | 151 | jz 0f |
205 | ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev | 152 | ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev |
206 | oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next | 153 | oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next |
207 | 1: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task | 154 | 0: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task |
208 | st %r15,__THREAD_ksp(%r2) # store kernel stack of prev | 155 | st %r15,__THREAD_ksp(%r2) # store kernel stack of prev |
209 | l %r15,__THREAD_ksp(%r3) # load kernel stack of next | 156 | l %r15,__THREAD_ksp(%r3) # load kernel stack of next |
210 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 | 157 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 |
@@ -224,48 +171,55 @@ __critical_start: | |||
224 | 171 | ||
225 | ENTRY(system_call) | 172 | ENTRY(system_call) |
226 | stpt __LC_SYNC_ENTER_TIMER | 173 | stpt __LC_SYNC_ENTER_TIMER |
227 | sysc_saveall: | 174 | sysc_stm: |
228 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 175 | stm %r8,%r15,__LC_SAVE_AREA_SYNC |
229 | CREATE_STACK_FRAME __LC_SAVE_AREA | 176 | l %r12,__LC_THREAD_INFO |
230 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 177 | l %r13,__LC_SVC_NEW_PSW+4 |
231 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | 178 | sysc_per: |
232 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | 179 | l %r15,__LC_KERNEL_STACK |
233 | oi __TI_flags+3(%r12),_TIF_SYSCALL | 180 | ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
181 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs | ||
234 | sysc_vtime: | 182 | sysc_vtime: |
235 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 183 | UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER |
236 | sysc_stime: | 184 | stm %r0,%r7,__PT_R0(%r11) |
237 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 185 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC |
238 | sysc_update: | 186 | mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW |
239 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 187 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC |
240 | sysc_do_svc: | 188 | sysc_do_svc: |
241 | xr %r7,%r7 | 189 | oi __TI_flags+3(%r12),_TIF_SYSCALL |
242 | icm %r7,3,SP_SVC_CODE+2(%r15)# load svc number and test for svc 0 | 190 | lh %r8,__PT_INT_CODE+2(%r11) |
243 | bnz BASED(sysc_nr_ok) # svc number > 0 | 191 | sla %r8,2 # shift and test for svc0 |
192 | jnz sysc_nr_ok | ||
244 | # svc 0: system call number in %r1 | 193 | # svc 0: system call number in %r1 |
245 | cl %r1,BASED(.Lnr_syscalls) | 194 | cl %r1,BASED(.Lnr_syscalls) |
246 | bnl BASED(sysc_nr_ok) | 195 | jnl sysc_nr_ok |
247 | sth %r1,SP_SVC_CODE+2(%r15) | 196 | sth %r1,__PT_INT_CODE+2(%r11) |
248 | lr %r7,%r1 # copy svc number to %r7 | 197 | lr %r8,%r1 |
198 | sla %r8,2 | ||
249 | sysc_nr_ok: | 199 | sysc_nr_ok: |
250 | sll %r7,2 # svc number *4 | 200 | l %r10,BASED(.Lsys_call_table) # 31 bit system call table |
251 | l %r10,BASED(.Lsysc_table) | 201 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
202 | st %r2,__PT_ORIG_GPR2(%r11) | ||
203 | st %r7,STACK_FRAME_OVERHEAD(%r15) | ||
204 | l %r9,0(%r8,%r10) # get system call addr. | ||
252 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 | 205 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 |
253 | mvc SP_ARGS(4,%r15),SP_R7(%r15) | 206 | jnz sysc_tracesys |
254 | l %r8,0(%r7,%r10) # get system call addr. | 207 | basr %r14,%r9 # call sys_xxxx |
255 | bnz BASED(sysc_tracesys) | 208 | st %r2,__PT_R2(%r11) # store return value |
256 | basr %r14,%r8 # call sys_xxxx | ||
257 | st %r2,SP_R2(%r15) # store return value (change R2 on stack) | ||
258 | 209 | ||
259 | sysc_return: | 210 | sysc_return: |
260 | LOCKDEP_SYS_EXIT | 211 | LOCKDEP_SYS_EXIT |
261 | sysc_tif: | 212 | sysc_tif: |
262 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 213 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
263 | bno BASED(sysc_restore) | 214 | jno sysc_restore |
264 | tm __TI_flags+3(%r12),_TIF_WORK_SVC | 215 | tm __TI_flags+3(%r12),_TIF_WORK_SVC |
265 | bnz BASED(sysc_work) # there is work to do (signals etc.) | 216 | jnz sysc_work # check for work |
266 | ni __TI_flags+3(%r12),255-_TIF_SYSCALL | 217 | ni __TI_flags+3(%r12),255-_TIF_SYSCALL |
267 | sysc_restore: | 218 | sysc_restore: |
268 | RESTORE_ALL __LC_RETURN_PSW,1 | 219 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) |
220 | stpt __LC_EXIT_TIMER | ||
221 | lm %r0,%r15,__PT_R0(%r11) | ||
222 | lpsw __LC_RETURN_PSW | ||
269 | sysc_done: | 223 | sysc_done: |
270 | 224 | ||
271 | # | 225 | # |
@@ -273,16 +227,16 @@ sysc_done: | |||
273 | # | 227 | # |
274 | sysc_work: | 228 | sysc_work: |
275 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING | 229 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING |
276 | bo BASED(sysc_mcck_pending) | 230 | jo sysc_mcck_pending |
277 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 231 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED |
278 | bo BASED(sysc_reschedule) | 232 | jo sysc_reschedule |
279 | tm __TI_flags+3(%r12),_TIF_SIGPENDING | 233 | tm __TI_flags+3(%r12),_TIF_SIGPENDING |
280 | bo BASED(sysc_sigpending) | 234 | jo sysc_sigpending |
281 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 235 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME |
282 | bo BASED(sysc_notify_resume) | 236 | jo sysc_notify_resume |
283 | tm __TI_flags+3(%r12),_TIF_PER_TRAP | 237 | tm __TI_flags+3(%r12),_TIF_PER_TRAP |
284 | bo BASED(sysc_singlestep) | 238 | jo sysc_singlestep |
285 | b BASED(sysc_return) # beware of critical section cleanup | 239 | j sysc_return # beware of critical section cleanup |
286 | 240 | ||
287 | # | 241 | # |
288 | # _TIF_NEED_RESCHED is set, call schedule | 242 | # _TIF_NEED_RESCHED is set, call schedule |
@@ -290,13 +244,13 @@ sysc_work: | |||
290 | sysc_reschedule: | 244 | sysc_reschedule: |
291 | l %r1,BASED(.Lschedule) | 245 | l %r1,BASED(.Lschedule) |
292 | la %r14,BASED(sysc_return) | 246 | la %r14,BASED(sysc_return) |
293 | br %r1 # call scheduler | 247 | br %r1 # call schedule |
294 | 248 | ||
295 | # | 249 | # |
296 | # _TIF_MCCK_PENDING is set, call handler | 250 | # _TIF_MCCK_PENDING is set, call handler |
297 | # | 251 | # |
298 | sysc_mcck_pending: | 252 | sysc_mcck_pending: |
299 | l %r1,BASED(.Ls390_handle_mcck) | 253 | l %r1,BASED(.Lhandle_mcck) |
300 | la %r14,BASED(sysc_return) | 254 | la %r14,BASED(sysc_return) |
301 | br %r1 # TIF bit will be cleared by handler | 255 | br %r1 # TIF bit will be cleared by handler |
302 | 256 | ||
@@ -305,23 +259,24 @@ sysc_mcck_pending: | |||
305 | # | 259 | # |
306 | sysc_sigpending: | 260 | sysc_sigpending: |
307 | ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP | 261 | ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP |
308 | la %r2,SP_PTREGS(%r15) # load pt_regs | 262 | lr %r2,%r11 # pass pointer to pt_regs |
309 | l %r1,BASED(.Ldo_signal) | 263 | l %r1,BASED(.Ldo_signal) |
310 | basr %r14,%r1 # call do_signal | 264 | basr %r14,%r1 # call do_signal |
311 | tm __TI_flags+3(%r12),_TIF_SYSCALL | 265 | tm __TI_flags+3(%r12),_TIF_SYSCALL |
312 | bno BASED(sysc_return) | 266 | jno sysc_return |
313 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | 267 | lm %r2,%r7,__PT_R2(%r11) # load svc arguments |
314 | xr %r7,%r7 # svc 0 returns -ENOSYS | 268 | xr %r8,%r8 # svc 0 returns -ENOSYS |
315 | clc SP_SVC_CODE+2(2,%r15),BASED(.Lnr_syscalls+2) | 269 | clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) |
316 | bnl BASED(sysc_nr_ok) # invalid svc number -> do svc 0 | 270 | jnl sysc_nr_ok # invalid svc number -> do svc 0 |
317 | icm %r7,3,SP_SVC_CODE+2(%r15)# load new svc number | 271 | lh %r8,__PT_INT_CODE+2(%r11) # load new svc number |
318 | b BASED(sysc_nr_ok) # restart svc | 272 | sla %r8,2 |
273 | j sysc_nr_ok # restart svc | ||
319 | 274 | ||
320 | # | 275 | # |
321 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | 276 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume |
322 | # | 277 | # |
323 | sysc_notify_resume: | 278 | sysc_notify_resume: |
324 | la %r2,SP_PTREGS(%r15) # load pt_regs | 279 | lr %r2,%r11 # pass pointer to pt_regs |
325 | l %r1,BASED(.Ldo_notify_resume) | 280 | l %r1,BASED(.Ldo_notify_resume) |
326 | la %r14,BASED(sysc_return) | 281 | la %r14,BASED(sysc_return) |
327 | br %r1 # call do_notify_resume | 282 | br %r1 # call do_notify_resume |
@@ -331,56 +286,57 @@ sysc_notify_resume: | |||
331 | # | 286 | # |
332 | sysc_singlestep: | 287 | sysc_singlestep: |
333 | ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) | 288 | ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) |
334 | la %r2,SP_PTREGS(%r15) # address of register-save area | 289 | lr %r2,%r11 # pass pointer to pt_regs |
335 | l %r1,BASED(.Lhandle_per) # load adr. of per handler | 290 | l %r1,BASED(.Ldo_per_trap) |
336 | la %r14,BASED(sysc_return) # load adr. of system return | 291 | la %r14,BASED(sysc_return) |
337 | br %r1 # branch to do_per_trap | 292 | br %r1 # call do_per_trap |
338 | 293 | ||
339 | # | 294 | # |
340 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before | 295 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before |
341 | # and after the system call | 296 | # and after the system call |
342 | # | 297 | # |
343 | sysc_tracesys: | 298 | sysc_tracesys: |
344 | l %r1,BASED(.Ltrace_entry) | 299 | l %r1,BASED(.Ltrace_enter) |
345 | la %r2,SP_PTREGS(%r15) # load pt_regs | 300 | lr %r2,%r11 # pass pointer to pt_regs |
346 | la %r3,0 | 301 | la %r3,0 |
347 | xr %r0,%r0 | 302 | xr %r0,%r0 |
348 | icm %r0,3,SP_SVC_CODE(%r15) | 303 | icm %r0,3,__PT_INT_CODE+2(%r11) |
349 | st %r0,SP_R2(%r15) | 304 | st %r0,__PT_R2(%r11) |
350 | basr %r14,%r1 | 305 | basr %r14,%r1 # call do_syscall_trace_enter |
351 | cl %r2,BASED(.Lnr_syscalls) | 306 | cl %r2,BASED(.Lnr_syscalls) |
352 | bnl BASED(sysc_tracenogo) | 307 | jnl sysc_tracenogo |
353 | lr %r7,%r2 | 308 | lr %r8,%r2 |
354 | sll %r7,2 # svc number *4 | 309 | sll %r8,2 |
355 | l %r8,0(%r7,%r10) | 310 | l %r9,0(%r8,%r10) |
356 | sysc_tracego: | 311 | sysc_tracego: |
357 | lm %r3,%r6,SP_R3(%r15) | 312 | lm %r3,%r7,__PT_R3(%r11) |
358 | mvc SP_ARGS(4,%r15),SP_R7(%r15) | 313 | st %r7,STACK_FRAME_OVERHEAD(%r15) |
359 | l %r2,SP_ORIG_R2(%r15) | 314 | l %r2,__PT_ORIG_GPR2(%r11) |
360 | basr %r14,%r8 # call sys_xxx | 315 | basr %r14,%r9 # call sys_xxx |
361 | st %r2,SP_R2(%r15) # store return value | 316 | st %r2,__PT_R2(%r11) # store return value |
362 | sysc_tracenogo: | 317 | sysc_tracenogo: |
363 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 | 318 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 |
364 | bz BASED(sysc_return) | 319 | jz sysc_return |
365 | l %r1,BASED(.Ltrace_exit) | 320 | l %r1,BASED(.Ltrace_exit) |
366 | la %r2,SP_PTREGS(%r15) # load pt_regs | 321 | lr %r2,%r11 # pass pointer to pt_regs |
367 | la %r14,BASED(sysc_return) | 322 | la %r14,BASED(sysc_return) |
368 | br %r1 | 323 | br %r1 # call do_syscall_trace_exit |
369 | 324 | ||
370 | # | 325 | # |
371 | # a new process exits the kernel with ret_from_fork | 326 | # a new process exits the kernel with ret_from_fork |
372 | # | 327 | # |
373 | ENTRY(ret_from_fork) | 328 | ENTRY(ret_from_fork) |
329 | la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
330 | l %r12,__LC_THREAD_INFO | ||
374 | l %r13,__LC_SVC_NEW_PSW+4 | 331 | l %r13,__LC_SVC_NEW_PSW+4 |
375 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 332 | tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? |
376 | tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? | 333 | jo 0f |
377 | bo BASED(0f) | 334 | st %r15,__PT_R15(%r11) # store stack pointer for new kthread |
378 | st %r15,SP_R15(%r15) # store stack pointer for new kthread | 335 | 0: l %r1,BASED(.Lschedule_tail) |
379 | 0: l %r1,BASED(.Lschedtail) | 336 | basr %r14,%r1 # call schedule_tail |
380 | basr %r14,%r1 | ||
381 | TRACE_IRQS_ON | 337 | TRACE_IRQS_ON |
382 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 338 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
383 | b BASED(sysc_tracenogo) | 339 | j sysc_tracenogo |
384 | 340 | ||
385 | # | 341 | # |
386 | # kernel_execve function needs to deal with pt_regs that is not | 342 | # kernel_execve function needs to deal with pt_regs that is not |
@@ -390,153 +346,98 @@ ENTRY(kernel_execve) | |||
390 | stm %r12,%r15,48(%r15) | 346 | stm %r12,%r15,48(%r15) |
391 | lr %r14,%r15 | 347 | lr %r14,%r15 |
392 | l %r13,__LC_SVC_NEW_PSW+4 | 348 | l %r13,__LC_SVC_NEW_PSW+4 |
393 | s %r15,BASED(.Lc_spsize) | 349 | ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
394 | st %r14,__SF_BACKCHAIN(%r15) | 350 | st %r14,__SF_BACKCHAIN(%r15) |
395 | la %r12,SP_PTREGS(%r15) | 351 | la %r12,STACK_FRAME_OVERHEAD(%r15) |
396 | xc 0(__PT_SIZE,%r12),0(%r12) | 352 | xc 0(__PT_SIZE,%r12),0(%r12) |
397 | l %r1,BASED(.Ldo_execve) | 353 | l %r1,BASED(.Ldo_execve) |
398 | lr %r5,%r12 | 354 | lr %r5,%r12 |
399 | basr %r14,%r1 | 355 | basr %r14,%r1 # call do_execve |
400 | ltr %r2,%r2 | 356 | ltr %r2,%r2 |
401 | be BASED(0f) | 357 | je 0f |
402 | a %r15,BASED(.Lc_spsize) | 358 | ahi %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE) |
403 | lm %r12,%r15,48(%r15) | 359 | lm %r12,%r15,48(%r15) |
404 | br %r14 | 360 | br %r14 |
405 | # execve succeeded. | 361 | # execve succeeded. |
406 | 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts | 362 | 0: ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
407 | l %r15,__LC_KERNEL_STACK # load ksp | 363 | l %r15,__LC_KERNEL_STACK # load ksp |
408 | s %r15,BASED(.Lc_spsize) # make room for registers & psw | 364 | ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
409 | mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs | 365 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
366 | mvc 0(__PT_SIZE,%r11),0(%r12) # copy pt_regs | ||
410 | l %r12,__LC_THREAD_INFO | 367 | l %r12,__LC_THREAD_INFO |
411 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 368 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
412 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 369 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
413 | l %r1,BASED(.Lexecve_tail) | 370 | l %r1,BASED(.Lexecve_tail) |
414 | basr %r14,%r1 | 371 | basr %r14,%r1 # call execve_tail |
415 | b BASED(sysc_return) | 372 | j sysc_return |
416 | 373 | ||
417 | /* | 374 | /* |
418 | * Program check handler routine | 375 | * Program check handler routine |
419 | */ | 376 | */ |
420 | 377 | ||
421 | ENTRY(pgm_check_handler) | 378 | ENTRY(pgm_check_handler) |
422 | /* | ||
423 | * First we need to check for a special case: | ||
424 | * Single stepping an instruction that disables the PER event mask will | ||
425 | * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. | ||
426 | * For a single stepped SVC the program check handler gets control after | ||
427 | * the SVC new PSW has been loaded. But we want to execute the SVC first and | ||
428 | * then handle the PER event. Therefore we update the SVC old PSW to point | ||
429 | * to the pgm_check_handler and branch to the SVC handler after we checked | ||
430 | * if we have to load the kernel stack register. | ||
431 | * For every other possible cause for PER event without the PER mask set | ||
432 | * we just ignore the PER event (FIXME: is there anything we have to do | ||
433 | * for LPSW?). | ||
434 | */ | ||
435 | stpt __LC_SYNC_ENTER_TIMER | 379 | stpt __LC_SYNC_ENTER_TIMER |
436 | SAVE_ALL_BASE __LC_SAVE_AREA | 380 | stm %r8,%r15,__LC_SAVE_AREA_SYNC |
437 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception | 381 | l %r12,__LC_THREAD_INFO |
438 | bnz BASED(pgm_per) # got per exception -> special case | 382 | l %r13,__LC_SVC_NEW_PSW+4 |
439 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 383 | lm %r8,%r9,__LC_PGM_OLD_PSW |
440 | CREATE_STACK_FRAME __LC_SAVE_AREA | 384 | tmh %r8,0x0001 # test problem state bit |
441 | mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW | 385 | jnz 1f # -> fault in user space |
442 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 386 | tmh %r8,0x4000 # PER bit set in old PSW ? |
443 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 387 | jnz 0f # -> enabled, can't be a double fault |
444 | bz BASED(pgm_no_vtime) | 388 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
445 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 389 | jnz pgm_svcper # -> single stepped svc |
446 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 390 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC |
447 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 391 | j 2f |
448 | pgm_no_vtime: | 392 | 1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER |
449 | l %r3,__LC_PGM_ILC # load program interruption code | 393 | l %r15,__LC_KERNEL_STACK |
450 | l %r4,__LC_TRANS_EXC_CODE | 394 | 2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
451 | REENABLE_IRQS | 395 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
452 | la %r8,0x7f | 396 | stm %r0,%r7,__PT_R0(%r11) |
453 | nr %r8,%r3 | 397 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC |
454 | sll %r8,2 | 398 | stm %r8,%r9,__PT_PSW(%r11) |
455 | l %r1,BASED(.Ljump_table) | 399 | mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC |
456 | l %r1,0(%r8,%r1) # load address of handler routine | 400 | mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE |
457 | la %r2,SP_PTREGS(%r15) # address of register-save area | 401 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
458 | basr %r14,%r1 # branch to interrupt-handler | 402 | jz 0f |
459 | pgm_exit: | ||
460 | b BASED(sysc_return) | ||
461 | |||
462 | # | ||
463 | # handle per exception | ||
464 | # | ||
465 | pgm_per: | ||
466 | tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on | ||
467 | bnz BASED(pgm_per_std) # ok, normal per event from user space | ||
468 | # ok its one of the special cases, now we need to find out which one | ||
469 | clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW | ||
470 | be BASED(pgm_svcper) | ||
471 | # no interesting special case, ignore PER event | ||
472 | lm %r12,%r15,__LC_SAVE_AREA | ||
473 | lpsw 0x28 | ||
474 | |||
475 | # | ||
476 | # Normal per exception | ||
477 | # | ||
478 | pgm_per_std: | ||
479 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA | ||
480 | CREATE_STACK_FRAME __LC_SAVE_AREA | ||
481 | mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW | ||
482 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | ||
483 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
484 | bz BASED(pgm_no_vtime2) | ||
485 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
486 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
487 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
488 | pgm_no_vtime2: | ||
489 | l %r1,__TI_task(%r12) | 403 | l %r1,__TI_task(%r12) |
490 | tm SP_PSW+1(%r15),0x01 # kernel per event ? | 404 | tmh %r8,0x0001 # kernel per event ? |
491 | bz BASED(kernel_per) | 405 | jz pgm_kprobe |
492 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE | 406 | oi __TI_flags+3(%r12),_TIF_PER_TRAP |
493 | mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS | 407 | mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS |
408 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE | ||
494 | mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID | 409 | mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID |
495 | oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP | 410 | 0: REENABLE_IRQS |
496 | l %r3,__LC_PGM_ILC # load program interruption code | 411 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
497 | l %r4,__LC_TRANS_EXC_CODE | ||
498 | REENABLE_IRQS | ||
499 | la %r8,0x7f | ||
500 | nr %r8,%r3 # clear per-event-bit and ilc | ||
501 | be BASED(pgm_exit2) # only per or per+check ? | ||
502 | sll %r8,2 | ||
503 | l %r1,BASED(.Ljump_table) | 412 | l %r1,BASED(.Ljump_table) |
504 | l %r1,0(%r8,%r1) # load address of handler routine | 413 | la %r10,0x7f |
505 | la %r2,SP_PTREGS(%r15) # address of register-save area | 414 | n %r10,__PT_INT_CODE(%r11) |
415 | je sysc_return | ||
416 | sll %r10,2 | ||
417 | l %r1,0(%r10,%r1) # load address of handler routine | ||
418 | lr %r2,%r11 # pass pointer to pt_regs | ||
506 | basr %r14,%r1 # branch to interrupt-handler | 419 | basr %r14,%r1 # branch to interrupt-handler |
507 | pgm_exit2: | 420 | j sysc_return |
508 | b BASED(sysc_return) | ||
509 | 421 | ||
510 | # | 422 | # |
511 | # it was a single stepped SVC that is causing all the trouble | 423 | # PER event in supervisor state, must be kprobes |
512 | # | 424 | # |
513 | pgm_svcper: | 425 | pgm_kprobe: |
514 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 426 | REENABLE_IRQS |
515 | CREATE_STACK_FRAME __LC_SAVE_AREA | 427 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
516 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 428 | l %r1,BASED(.Ldo_per_trap) |
517 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | 429 | lr %r2,%r11 # pass pointer to pt_regs |
518 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | 430 | basr %r14,%r1 # call do_per_trap |
519 | oi __TI_flags+3(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP) | 431 | j sysc_return |
520 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
521 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
522 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
523 | l %r8,__TI_task(%r12) | ||
524 | mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE | ||
525 | mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS | ||
526 | mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID | ||
527 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
528 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | ||
529 | b BASED(sysc_do_svc) | ||
530 | 432 | ||
531 | # | 433 | # |
532 | # per was called from kernel, must be kprobes | 434 | # single stepped system call |
533 | # | 435 | # |
534 | kernel_per: | 436 | pgm_svcper: |
535 | REENABLE_IRQS | 437 | oi __TI_flags+3(%r12),_TIF_PER_TRAP |
536 | la %r2,SP_PTREGS(%r15) # address of register-save area | 438 | mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW |
537 | l %r1,BASED(.Lhandle_per) # load adr. of per handler | 439 | mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per) |
538 | basr %r14,%r1 # branch to do_single_step | 440 | lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs |
539 | b BASED(pgm_exit) | ||
540 | 441 | ||
541 | /* | 442 | /* |
542 | * IO interrupt handler routine | 443 | * IO interrupt handler routine |
@@ -545,28 +446,35 @@ kernel_per: | |||
545 | ENTRY(io_int_handler) | 446 | ENTRY(io_int_handler) |
546 | stck __LC_INT_CLOCK | 447 | stck __LC_INT_CLOCK |
547 | stpt __LC_ASYNC_ENTER_TIMER | 448 | stpt __LC_ASYNC_ENTER_TIMER |
548 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 | 449 | stm %r8,%r15,__LC_SAVE_AREA_ASYNC |
549 | CREATE_STACK_FRAME __LC_SAVE_AREA+16 | 450 | l %r12,__LC_THREAD_INFO |
550 | mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack | 451 | l %r13,__LC_SVC_NEW_PSW+4 |
551 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 452 | lm %r8,%r9,__LC_IO_OLD_PSW |
552 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 453 | tmh %r8,0x0001 # interrupting from user ? |
553 | bz BASED(io_no_vtime) | 454 | jz io_skip |
554 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 455 | UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER |
555 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 456 | io_skip: |
556 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 457 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT |
557 | io_no_vtime: | 458 | stm %r0,%r7,__PT_R0(%r11) |
459 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC | ||
460 | stm %r8,%r9,__PT_PSW(%r11) | ||
558 | TRACE_IRQS_OFF | 461 | TRACE_IRQS_OFF |
559 | l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ | 462 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
560 | la %r2,SP_PTREGS(%r15) # address of register-save area | 463 | l %r1,BASED(.Ldo_IRQ) |
561 | basr %r14,%r1 # branch to standard irq handler | 464 | lr %r2,%r11 # pass pointer to pt_regs |
465 | basr %r14,%r1 # call do_IRQ | ||
562 | io_return: | 466 | io_return: |
563 | LOCKDEP_SYS_EXIT | 467 | LOCKDEP_SYS_EXIT |
564 | TRACE_IRQS_ON | 468 | TRACE_IRQS_ON |
565 | io_tif: | 469 | io_tif: |
566 | tm __TI_flags+3(%r12),_TIF_WORK_INT | 470 | tm __TI_flags+3(%r12),_TIF_WORK_INT |
567 | bnz BASED(io_work) # there is work to do (signals etc.) | 471 | jnz io_work # there is work to do (signals etc.) |
568 | io_restore: | 472 | io_restore: |
569 | RESTORE_ALL __LC_RETURN_PSW,0 | 473 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) |
474 | ni __LC_RETURN_PSW+1,0xfd # clean wait state bit | ||
475 | stpt __LC_EXIT_TIMER | ||
476 | lm %r0,%r15,__PT_R0(%r11) | ||
477 | lpsw __LC_RETURN_PSW | ||
570 | io_done: | 478 | io_done: |
571 | 479 | ||
572 | # | 480 | # |
@@ -577,28 +485,29 @@ io_done: | |||
577 | # Before any work can be done, a switch to the kernel stack is required. | 485 | # Before any work can be done, a switch to the kernel stack is required. |
578 | # | 486 | # |
579 | io_work: | 487 | io_work: |
580 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 488 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
581 | bo BASED(io_work_user) # yes -> do resched & signal | 489 | jo io_work_user # yes -> do resched & signal |
582 | #ifdef CONFIG_PREEMPT | 490 | #ifdef CONFIG_PREEMPT |
583 | # check for preemptive scheduling | 491 | # check for preemptive scheduling |
584 | icm %r0,15,__TI_precount(%r12) | 492 | icm %r0,15,__TI_precount(%r12) |
585 | bnz BASED(io_restore) # preemption disabled | 493 | jnz io_restore # preemption disabled |
586 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 494 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED |
587 | bno BASED(io_restore) | 495 | jno io_restore |
588 | # switch to kernel stack | 496 | # switch to kernel stack |
589 | l %r1,SP_R15(%r15) | 497 | l %r1,__PT_R15(%r11) |
590 | s %r1,BASED(.Lc_spsize) | 498 | ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
591 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 499 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
592 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 500 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) |
501 | la %r11,STACK_FRAME_OVERHEAD(%r1) | ||
593 | lr %r15,%r1 | 502 | lr %r15,%r1 |
594 | # TRACE_IRQS_ON already done at io_return, call | 503 | # TRACE_IRQS_ON already done at io_return, call |
595 | # TRACE_IRQS_OFF to keep things symmetrical | 504 | # TRACE_IRQS_OFF to keep things symmetrical |
596 | TRACE_IRQS_OFF | 505 | TRACE_IRQS_OFF |
597 | l %r1,BASED(.Lpreempt_schedule_irq) | 506 | l %r1,BASED(.Lpreempt_irq) |
598 | basr %r14,%r1 # call preempt_schedule_irq | 507 | basr %r14,%r1 # call preempt_schedule_irq |
599 | b BASED(io_return) | 508 | j io_return |
600 | #else | 509 | #else |
601 | b BASED(io_restore) | 510 | j io_restore |
602 | #endif | 511 | #endif |
603 | 512 | ||
604 | # | 513 | # |
@@ -606,9 +515,10 @@ io_work: | |||
606 | # | 515 | # |
607 | io_work_user: | 516 | io_work_user: |
608 | l %r1,__LC_KERNEL_STACK | 517 | l %r1,__LC_KERNEL_STACK |
609 | s %r1,BASED(.Lc_spsize) | 518 | ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
610 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 519 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
611 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 520 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) |
521 | la %r11,STACK_FRAME_OVERHEAD(%r1) | ||
612 | lr %r15,%r1 | 522 | lr %r15,%r1 |
613 | 523 | ||
614 | # | 524 | # |
@@ -618,24 +528,24 @@ io_work_user: | |||
618 | # | 528 | # |
619 | io_work_tif: | 529 | io_work_tif: |
620 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING | 530 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING |
621 | bo BASED(io_mcck_pending) | 531 | jo io_mcck_pending |
622 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 532 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED |
623 | bo BASED(io_reschedule) | 533 | jo io_reschedule |
624 | tm __TI_flags+3(%r12),_TIF_SIGPENDING | 534 | tm __TI_flags+3(%r12),_TIF_SIGPENDING |
625 | bo BASED(io_sigpending) | 535 | jo io_sigpending |
626 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 536 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME |
627 | bo BASED(io_notify_resume) | 537 | jo io_notify_resume |
628 | b BASED(io_return) # beware of critical section cleanup | 538 | j io_return # beware of critical section cleanup |
629 | 539 | ||
630 | # | 540 | # |
631 | # _TIF_MCCK_PENDING is set, call handler | 541 | # _TIF_MCCK_PENDING is set, call handler |
632 | # | 542 | # |
633 | io_mcck_pending: | 543 | io_mcck_pending: |
634 | # TRACE_IRQS_ON already done at io_return | 544 | # TRACE_IRQS_ON already done at io_return |
635 | l %r1,BASED(.Ls390_handle_mcck) | 545 | l %r1,BASED(.Lhandle_mcck) |
636 | basr %r14,%r1 # TIF bit will be cleared by handler | 546 | basr %r14,%r1 # TIF bit will be cleared by handler |
637 | TRACE_IRQS_OFF | 547 | TRACE_IRQS_OFF |
638 | b BASED(io_return) | 548 | j io_return |
639 | 549 | ||
640 | # | 550 | # |
641 | # _TIF_NEED_RESCHED is set, call schedule | 551 | # _TIF_NEED_RESCHED is set, call schedule |
@@ -643,37 +553,37 @@ io_mcck_pending: | |||
643 | io_reschedule: | 553 | io_reschedule: |
644 | # TRACE_IRQS_ON already done at io_return | 554 | # TRACE_IRQS_ON already done at io_return |
645 | l %r1,BASED(.Lschedule) | 555 | l %r1,BASED(.Lschedule) |
646 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 556 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
647 | basr %r14,%r1 # call scheduler | 557 | basr %r14,%r1 # call scheduler |
648 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 558 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
649 | TRACE_IRQS_OFF | 559 | TRACE_IRQS_OFF |
650 | b BASED(io_return) | 560 | j io_return |
651 | 561 | ||
652 | # | 562 | # |
653 | # _TIF_SIGPENDING is set, call do_signal | 563 | # _TIF_SIGPENDING is set, call do_signal |
654 | # | 564 | # |
655 | io_sigpending: | 565 | io_sigpending: |
656 | # TRACE_IRQS_ON already done at io_return | 566 | # TRACE_IRQS_ON already done at io_return |
657 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
658 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
659 | l %r1,BASED(.Ldo_signal) | 567 | l %r1,BASED(.Ldo_signal) |
568 | ssm __LC_SVC_NEW_PSW # reenable interrupts | ||
569 | lr %r2,%r11 # pass pointer to pt_regs | ||
660 | basr %r14,%r1 # call do_signal | 570 | basr %r14,%r1 # call do_signal |
661 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 571 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
662 | TRACE_IRQS_OFF | 572 | TRACE_IRQS_OFF |
663 | b BASED(io_return) | 573 | j io_return |
664 | 574 | ||
665 | # | 575 | # |
666 | # _TIF_SIGPENDING is set, call do_signal | 576 | # _TIF_SIGPENDING is set, call do_signal |
667 | # | 577 | # |
668 | io_notify_resume: | 578 | io_notify_resume: |
669 | # TRACE_IRQS_ON already done at io_return | 579 | # TRACE_IRQS_ON already done at io_return |
670 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
671 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
672 | l %r1,BASED(.Ldo_notify_resume) | 580 | l %r1,BASED(.Ldo_notify_resume) |
673 | basr %r14,%r1 # call do_signal | 581 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
674 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 582 | lr %r2,%r11 # pass pointer to pt_regs |
583 | basr %r14,%r1 # call do_notify_resume | ||
584 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | ||
675 | TRACE_IRQS_OFF | 585 | TRACE_IRQS_OFF |
676 | b BASED(io_return) | 586 | j io_return |
677 | 587 | ||
678 | /* | 588 | /* |
679 | * External interrupt handler routine | 589 | * External interrupt handler routine |
@@ -682,23 +592,25 @@ io_notify_resume: | |||
682 | ENTRY(ext_int_handler) | 592 | ENTRY(ext_int_handler) |
683 | stck __LC_INT_CLOCK | 593 | stck __LC_INT_CLOCK |
684 | stpt __LC_ASYNC_ENTER_TIMER | 594 | stpt __LC_ASYNC_ENTER_TIMER |
685 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 | 595 | stm %r8,%r15,__LC_SAVE_AREA_ASYNC |
686 | CREATE_STACK_FRAME __LC_SAVE_AREA+16 | 596 | l %r12,__LC_THREAD_INFO |
687 | mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack | 597 | l %r13,__LC_SVC_NEW_PSW+4 |
688 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 598 | lm %r8,%r9,__LC_EXT_OLD_PSW |
689 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 599 | tmh %r8,0x0001 # interrupting from user ? |
690 | bz BASED(ext_no_vtime) | 600 | jz ext_skip |
691 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 601 | UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER |
692 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 602 | ext_skip: |
693 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 603 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT |
694 | ext_no_vtime: | 604 | stm %r0,%r7,__PT_R0(%r11) |
605 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC | ||
606 | stm %r8,%r9,__PT_PSW(%r11) | ||
695 | TRACE_IRQS_OFF | 607 | TRACE_IRQS_OFF |
696 | la %r2,SP_PTREGS(%r15) # address of register-save area | 608 | lr %r2,%r11 # pass pointer to pt_regs |
697 | l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code | 609 | l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code |
698 | l %r4,__LC_EXT_PARAMS # get external parameters | 610 | l %r4,__LC_EXT_PARAMS # get external parameters |
699 | l %r1,BASED(.Ldo_extint) | 611 | l %r1,BASED(.Ldo_extint) |
700 | basr %r14,%r1 | 612 | basr %r14,%r1 # call do_extint |
701 | b BASED(io_return) | 613 | j io_return |
702 | 614 | ||
703 | __critical_end: | 615 | __critical_end: |
704 | 616 | ||
@@ -710,82 +622,74 @@ ENTRY(mcck_int_handler) | |||
710 | stck __LC_MCCK_CLOCK | 622 | stck __LC_MCCK_CLOCK |
711 | spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer | 623 | spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer |
712 | lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs | 624 | lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs |
713 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | 625 | l %r12,__LC_THREAD_INFO |
714 | la %r12,__LC_MCK_OLD_PSW | 626 | l %r13,__LC_SVC_NEW_PSW+4 |
627 | lm %r8,%r9,__LC_MCK_OLD_PSW | ||
715 | tm __LC_MCCK_CODE,0x80 # system damage? | 628 | tm __LC_MCCK_CODE,0x80 # system damage? |
716 | bo BASED(mcck_int_main) # yes -> rest of mcck code invalid | 629 | jo mcck_panic # yes -> rest of mcck code invalid |
717 | mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA | 630 | la %r14,__LC_CPU_TIMER_SAVE_AREA |
631 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | ||
718 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | 632 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? |
719 | bo BASED(1f) | 633 | jo 3f |
720 | la %r14,__LC_SYNC_ENTER_TIMER | 634 | la %r14,__LC_SYNC_ENTER_TIMER |
721 | clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER | 635 | clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER |
722 | bl BASED(0f) | 636 | jl 0f |
723 | la %r14,__LC_ASYNC_ENTER_TIMER | 637 | la %r14,__LC_ASYNC_ENTER_TIMER |
724 | 0: clc 0(8,%r14),__LC_EXIT_TIMER | 638 | 0: clc 0(8,%r14),__LC_EXIT_TIMER |
725 | bl BASED(0f) | 639 | jl 1f |
726 | la %r14,__LC_EXIT_TIMER | 640 | la %r14,__LC_EXIT_TIMER |
727 | 0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER | 641 | 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER |
728 | bl BASED(0f) | 642 | jl 2f |
729 | la %r14,__LC_LAST_UPDATE_TIMER | 643 | la %r14,__LC_LAST_UPDATE_TIMER |
730 | 0: spt 0(%r14) | 644 | 2: spt 0(%r14) |
731 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | 645 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
732 | 1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | 646 | 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
733 | bno BASED(mcck_int_main) # no -> skip cleanup critical | 647 | jno mcck_panic # no -> skip cleanup critical |
734 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit | 648 | tm %r8,0x0001 # interrupting from user ? |
735 | bnz BASED(mcck_int_main) # from user -> load async stack | 649 | jz mcck_skip |
736 | clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end) | 650 | UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER |
737 | bhe BASED(mcck_int_main) | 651 | mcck_skip: |
738 | clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start) | 652 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT |
739 | bl BASED(mcck_int_main) | 653 | mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA |
740 | l %r14,BASED(.Lcleanup_critical) | 654 | stm %r8,%r9,__PT_PSW(%r11) |
741 | basr %r14,%r14 | 655 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
742 | mcck_int_main: | 656 | l %r1,BASED(.Ldo_machine_check) |
743 | l %r14,__LC_PANIC_STACK # are we already on the panic stack? | 657 | lr %r2,%r11 # pass pointer to pt_regs |
744 | slr %r14,%r15 | 658 | basr %r14,%r1 # call s390_do_machine_check |
745 | sra %r14,PAGE_SHIFT | 659 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
746 | be BASED(0f) | 660 | jno mcck_return |
747 | l %r15,__LC_PANIC_STACK # load panic stack | ||
748 | 0: s %r15,BASED(.Lc_spsize) # make room for registers & psw | ||
749 | CREATE_STACK_FRAME __LC_SAVE_AREA+32 | ||
750 | mvc SP_PSW(8,%r15),0(%r12) | ||
751 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | ||
752 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? | ||
753 | bno BASED(mcck_no_vtime) # no -> skip cleanup critical | ||
754 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
755 | bz BASED(mcck_no_vtime) | ||
756 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER | ||
757 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
758 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER | ||
759 | mcck_no_vtime: | ||
760 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
761 | l %r1,BASED(.Ls390_mcck) | ||
762 | basr %r14,%r1 # call machine check handler | ||
763 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
764 | bno BASED(mcck_return) | ||
765 | l %r1,__LC_KERNEL_STACK # switch to kernel stack | 661 | l %r1,__LC_KERNEL_STACK # switch to kernel stack |
766 | s %r1,BASED(.Lc_spsize) | 662 | ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
767 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 663 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
768 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 664 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) |
665 | la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
769 | lr %r15,%r1 | 666 | lr %r15,%r1 |
770 | stosm __SF_EMPTY(%r15),0x04 # turn dat on | 667 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off |
771 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING | 668 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING |
772 | bno BASED(mcck_return) | 669 | jno mcck_return |
773 | TRACE_IRQS_OFF | 670 | TRACE_IRQS_OFF |
774 | l %r1,BASED(.Ls390_handle_mcck) | 671 | l %r1,BASED(.Lhandle_mcck) |
775 | basr %r14,%r1 # call machine check handler | 672 | basr %r14,%r1 # call s390_handle_mcck |
776 | TRACE_IRQS_ON | 673 | TRACE_IRQS_ON |
777 | mcck_return: | 674 | mcck_return: |
778 | mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW | 675 | mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW |
779 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit | 676 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit |
780 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | 677 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
781 | bno BASED(0f) | 678 | jno 0f |
782 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 | 679 | lm %r0,%r15,__PT_R0(%r11) |
783 | stpt __LC_EXIT_TIMER | 680 | stpt __LC_EXIT_TIMER |
784 | lpsw __LC_RETURN_MCCK_PSW # back to caller | 681 | lpsw __LC_RETURN_MCCK_PSW |
785 | 0: lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 | 682 | 0: lm %r0,%r15,__PT_R0(%r11) |
786 | lpsw __LC_RETURN_MCCK_PSW # back to caller | 683 | lpsw __LC_RETURN_MCCK_PSW |
787 | 684 | ||
788 | RESTORE_ALL __LC_RETURN_MCCK_PSW,0 | 685 | mcck_panic: |
686 | l %r14,__LC_PANIC_STACK | ||
687 | slr %r14,%r15 | ||
688 | sra %r14,PAGE_SHIFT | ||
689 | jz 0f | ||
690 | l %r15,__LC_PANIC_STACK | ||
691 | 0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
692 | j mcck_skip | ||
789 | 693 | ||
790 | /* | 694 | /* |
791 | * Restart interruption handler, kick starter for additional CPUs | 695 | * Restart interruption handler, kick starter for additional CPUs |
@@ -799,18 +703,18 @@ restart_base: | |||
799 | stck __LC_LAST_UPDATE_CLOCK | 703 | stck __LC_LAST_UPDATE_CLOCK |
800 | mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) | 704 | mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) |
801 | mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) | 705 | mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) |
802 | l %r15,__LC_SAVE_AREA+60 # load ksp | 706 | l %r15,__LC_GPREGS_SAVE_AREA+60 # load ksp |
803 | lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs | 707 | lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs |
804 | lam %a0,%a15,__LC_AREGS_SAVE_AREA | 708 | lam %a0,%a15,__LC_AREGS_SAVE_AREA |
805 | lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone | 709 | lm %r6,%r15,__SF_GPRS(%r15)# load registers from clone |
806 | l %r1,__LC_THREAD_INFO | 710 | l %r1,__LC_THREAD_INFO |
807 | mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) | 711 | mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) |
808 | mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) | 712 | mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) |
809 | xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER | 713 | xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER |
810 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on | 714 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off |
811 | basr %r14,0 | 715 | basr %r14,0 |
812 | l %r14,restart_addr-.(%r14) | 716 | l %r14,restart_addr-.(%r14) |
813 | basr %r14,%r14 # branch to start_secondary | 717 | basr %r14,%r14 # call start_secondary |
814 | restart_addr: | 718 | restart_addr: |
815 | .long start_secondary | 719 | .long start_secondary |
816 | .align 8 | 720 | .align 8 |
@@ -835,19 +739,19 @@ restart_go: | |||
835 | # PSW restart interrupt handler | 739 | # PSW restart interrupt handler |
836 | # | 740 | # |
837 | ENTRY(psw_restart_int_handler) | 741 | ENTRY(psw_restart_int_handler) |
838 | st %r15,__LC_SAVE_AREA+48(%r0) # save r15 | 742 | st %r15,__LC_SAVE_AREA_RESTART |
839 | basr %r15,0 | 743 | basr %r15,0 |
840 | 0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack | 744 | 0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack |
841 | l %r15,0(%r15) | 745 | l %r15,0(%r15) |
842 | ahi %r15,-SP_SIZE # make room for pt_regs | 746 | ahi %r15,-__PT_SIZE # create pt_regs on stack |
843 | stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack | 747 | stm %r0,%r14,__PT_R0(%r15) |
844 | mvc SP_R15(4,%r15),__LC_SAVE_AREA+48(%r0)# store saved %r15 to stack | 748 | mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART |
845 | mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw | 749 | mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw |
846 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 | 750 | ahi %r15,-STACK_FRAME_OVERHEAD |
751 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | ||
847 | basr %r14,0 | 752 | basr %r14,0 |
848 | 1: l %r14,.Ldo_restart-1b(%r14) | 753 | 1: l %r14,.Ldo_restart-1b(%r14) |
849 | basr %r14,%r14 | 754 | basr %r14,%r14 |
850 | |||
851 | basr %r14,0 # load disabled wait PSW if | 755 | basr %r14,0 # load disabled wait PSW if |
852 | 2: lpsw restart_psw_crash-2b(%r14) # do_restart returns | 756 | 2: lpsw restart_psw_crash-2b(%r14) # do_restart returns |
853 | .align 4 | 757 | .align 4 |
@@ -869,215 +773,174 @@ restart_psw_crash: | |||
869 | */ | 773 | */ |
870 | stack_overflow: | 774 | stack_overflow: |
871 | l %r15,__LC_PANIC_STACK # change to panic stack | 775 | l %r15,__LC_PANIC_STACK # change to panic stack |
872 | sl %r15,BASED(.Lc_spsize) | 776 | ahi %r15,-__PT_SIZE # create pt_regs |
873 | mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack | 777 | stm %r0,%r7,__PT_R0(%r15) |
874 | stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | 778 | stm %r8,%r9,__PT_PSW(%r15) |
875 | la %r1,__LC_SAVE_AREA | 779 | mvc __PT_R8(32,%r11),0(%r14) |
876 | ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ? | 780 | lr %r15,%r11 |
877 | be BASED(0f) | 781 | ahi %r15,-STACK_FRAME_OVERHEAD |
878 | ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ? | 782 | l %r1,BASED(1f) |
879 | be BASED(0f) | 783 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
880 | la %r1,__LC_SAVE_AREA+16 | 784 | lr %r2,%r11 # pass pointer to pt_regs |
881 | 0: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack | 785 | br %r1 # branch to kernel_stack_overflow |
882 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain | ||
883 | l %r1,BASED(1f) # branch to kernel_stack_overflow | ||
884 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
885 | br %r1 | ||
886 | 1: .long kernel_stack_overflow | 786 | 1: .long kernel_stack_overflow |
887 | #endif | 787 | #endif |
888 | 788 | ||
889 | cleanup_table_system_call: | 789 | cleanup_table: |
890 | .long system_call + 0x80000000, sysc_do_svc + 0x80000000 | 790 | .long system_call + 0x80000000 |
891 | cleanup_table_sysc_tif: | 791 | .long sysc_do_svc + 0x80000000 |
892 | .long sysc_tif + 0x80000000, sysc_restore + 0x80000000 | 792 | .long sysc_tif + 0x80000000 |
893 | cleanup_table_sysc_restore: | 793 | .long sysc_restore + 0x80000000 |
894 | .long sysc_restore + 0x80000000, sysc_done + 0x80000000 | 794 | .long sysc_done + 0x80000000 |
895 | cleanup_table_io_tif: | 795 | .long io_tif + 0x80000000 |
896 | .long io_tif + 0x80000000, io_restore + 0x80000000 | 796 | .long io_restore + 0x80000000 |
897 | cleanup_table_io_restore: | 797 | .long io_done + 0x80000000 |
898 | .long io_restore + 0x80000000, io_done + 0x80000000 | ||
899 | 798 | ||
900 | cleanup_critical: | 799 | cleanup_critical: |
901 | clc 4(4,%r12),BASED(cleanup_table_system_call) | 800 | cl %r9,BASED(cleanup_table) # system_call |
902 | bl BASED(0f) | 801 | jl 0f |
903 | clc 4(4,%r12),BASED(cleanup_table_system_call+4) | 802 | cl %r9,BASED(cleanup_table+4) # sysc_do_svc |
904 | bl BASED(cleanup_system_call) | 803 | jl cleanup_system_call |
905 | 0: | 804 | cl %r9,BASED(cleanup_table+8) # sysc_tif |
906 | clc 4(4,%r12),BASED(cleanup_table_sysc_tif) | 805 | jl 0f |
907 | bl BASED(0f) | 806 | cl %r9,BASED(cleanup_table+12) # sysc_restore |
908 | clc 4(4,%r12),BASED(cleanup_table_sysc_tif+4) | 807 | jl cleanup_sysc_tif |
909 | bl BASED(cleanup_sysc_tif) | 808 | cl %r9,BASED(cleanup_table+16) # sysc_done |
910 | 0: | 809 | jl cleanup_sysc_restore |
911 | clc 4(4,%r12),BASED(cleanup_table_sysc_restore) | 810 | cl %r9,BASED(cleanup_table+20) # io_tif |
912 | bl BASED(0f) | 811 | jl 0f |
913 | clc 4(4,%r12),BASED(cleanup_table_sysc_restore+4) | 812 | cl %r9,BASED(cleanup_table+24) # io_restore |
914 | bl BASED(cleanup_sysc_restore) | 813 | jl cleanup_io_tif |
915 | 0: | 814 | cl %r9,BASED(cleanup_table+28) # io_done |
916 | clc 4(4,%r12),BASED(cleanup_table_io_tif) | 815 | jl cleanup_io_restore |
917 | bl BASED(0f) | 816 | 0: br %r14 |
918 | clc 4(4,%r12),BASED(cleanup_table_io_tif+4) | ||
919 | bl BASED(cleanup_io_tif) | ||
920 | 0: | ||
921 | clc 4(4,%r12),BASED(cleanup_table_io_restore) | ||
922 | bl BASED(0f) | ||
923 | clc 4(4,%r12),BASED(cleanup_table_io_restore+4) | ||
924 | bl BASED(cleanup_io_restore) | ||
925 | 0: | ||
926 | br %r14 | ||
927 | 817 | ||
928 | cleanup_system_call: | 818 | cleanup_system_call: |
929 | mvc __LC_RETURN_PSW(8),0(%r12) | 819 | # check if stpt has been executed |
930 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) | 820 | cl %r9,BASED(cleanup_system_call_insn) |
931 | bh BASED(0f) | 821 | jh 0f |
932 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER | ||
933 | c %r12,BASED(.Lmck_old_psw) | ||
934 | be BASED(0f) | ||
935 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | 822 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER |
936 | 0: c %r12,BASED(.Lmck_old_psw) | 823 | chi %r11,__LC_SAVE_AREA_ASYNC |
937 | la %r12,__LC_SAVE_AREA+32 | 824 | je 0f |
938 | be BASED(0f) | 825 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER |
939 | la %r12,__LC_SAVE_AREA+16 | 826 | 0: # check if stm has been executed |
940 | 0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) | 827 | cl %r9,BASED(cleanup_system_call_insn+4) |
941 | bhe BASED(cleanup_vtime) | 828 | jh 0f |
942 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) | 829 | mvc __LC_SAVE_AREA_SYNC(32),0(%r11) |
943 | bh BASED(0f) | 830 | 0: # set up saved registers r12, and r13 |
944 | mvc __LC_SAVE_AREA(16),0(%r12) | 831 | st %r12,16(%r11) # r12 thread-info pointer |
945 | 0: st %r13,4(%r12) | 832 | st %r13,20(%r11) # r13 literal-pool pointer |
946 | l %r15,__LC_KERNEL_STACK # problem state -> load ksp | 833 | # check if the user time calculation has been done |
947 | s %r15,BASED(.Lc_spsize) # make room for registers & psw | 834 | cl %r9,BASED(cleanup_system_call_insn+8) |
948 | st %r15,12(%r12) | 835 | jh 0f |
949 | CREATE_STACK_FRAME __LC_SAVE_AREA | 836 | l %r10,__LC_EXIT_TIMER |
950 | mvc 0(4,%r12),__LC_THREAD_INFO | 837 | l %r15,__LC_EXIT_TIMER+4 |
951 | l %r12,__LC_THREAD_INFO | 838 | SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER |
952 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | 839 | ADD64 %r10,%r15,__LC_USER_TIMER |
953 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | 840 | st %r10,__LC_USER_TIMER |
954 | oi __TI_flags+3(%r12),_TIF_SYSCALL | 841 | st %r15,__LC_USER_TIMER+4 |
955 | cleanup_vtime: | 842 | 0: # check if the system time calculation has been done |
956 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) | 843 | cl %r9,BASED(cleanup_system_call_insn+12) |
957 | bhe BASED(cleanup_stime) | 844 | jh 0f |
958 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 845 | l %r10,__LC_LAST_UPDATE_TIMER |
959 | cleanup_stime: | 846 | l %r15,__LC_LAST_UPDATE_TIMER+4 |
960 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16) | 847 | SUB64 %r10,%r15,__LC_EXIT_TIMER |
961 | bh BASED(cleanup_update) | 848 | ADD64 %r10,%r15,__LC_SYSTEM_TIMER |
962 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 849 | st %r10,__LC_SYSTEM_TIMER |
963 | cleanup_update: | 850 | st %r15,__LC_SYSTEM_TIMER+4 |
851 | 0: # update accounting time stamp | ||
964 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 852 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
965 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) | 853 | # set up saved register 11 |
966 | la %r12,__LC_RETURN_PSW | 854 | l %r15,__LC_KERNEL_STACK |
855 | ahi %r15,-__PT_SIZE | ||
856 | st %r15,12(%r11) # r11 pt_regs pointer | ||
857 | # fill pt_regs | ||
858 | mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC | ||
859 | stm %r0,%r7,__PT_R0(%r15) | ||
860 | mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
861 | mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC | ||
862 | # setup saved register 15 | ||
863 | ahi %r15,-STACK_FRAME_OVERHEAD | ||
864 | st %r15,28(%r11) # r15 stack pointer | ||
865 | # set new psw address and exit | ||
866 | l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 | ||
967 | br %r14 | 867 | br %r14 |
968 | cleanup_system_call_insn: | 868 | cleanup_system_call_insn: |
969 | .long sysc_saveall + 0x80000000 | ||
970 | .long system_call + 0x80000000 | 869 | .long system_call + 0x80000000 |
971 | .long sysc_vtime + 0x80000000 | 870 | .long sysc_stm + 0x80000000 |
972 | .long sysc_stime + 0x80000000 | 871 | .long sysc_vtime + 0x80000000 + 36 |
973 | .long sysc_update + 0x80000000 | 872 | .long sysc_vtime + 0x80000000 + 76 |
974 | 873 | ||
975 | cleanup_sysc_tif: | 874 | cleanup_sysc_tif: |
976 | mvc __LC_RETURN_PSW(4),0(%r12) | 875 | l %r9,BASED(cleanup_table+8) # sysc_tif + 0x80000000 |
977 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif) | ||
978 | la %r12,__LC_RETURN_PSW | ||
979 | br %r14 | 876 | br %r14 |
980 | 877 | ||
981 | cleanup_sysc_restore: | 878 | cleanup_sysc_restore: |
982 | clc 4(4,%r12),BASED(cleanup_sysc_restore_insn) | 879 | cl %r9,BASED(cleanup_sysc_restore_insn) |
983 | be BASED(2f) | 880 | jhe 0f |
984 | mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER | 881 | l %r9,12(%r11) # get saved pointer to pt_regs |
985 | c %r12,BASED(.Lmck_old_psw) | 882 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) |
986 | be BASED(0f) | 883 | mvc 0(32,%r11),__PT_R8(%r9) |
987 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | 884 | lm %r0,%r7,__PT_R0(%r9) |
988 | 0: clc 4(4,%r12),BASED(cleanup_sysc_restore_insn+4) | 885 | 0: lm %r8,%r9,__LC_RETURN_PSW |
989 | be BASED(2f) | ||
990 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) | ||
991 | c %r12,BASED(.Lmck_old_psw) | ||
992 | la %r12,__LC_SAVE_AREA+32 | ||
993 | be BASED(1f) | ||
994 | la %r12,__LC_SAVE_AREA+16 | ||
995 | 1: mvc 0(16,%r12),SP_R12(%r15) | ||
996 | lm %r0,%r11,SP_R0(%r15) | ||
997 | l %r15,SP_R15(%r15) | ||
998 | 2: la %r12,__LC_RETURN_PSW | ||
999 | br %r14 | 886 | br %r14 |
1000 | cleanup_sysc_restore_insn: | 887 | cleanup_sysc_restore_insn: |
1001 | .long sysc_done - 4 + 0x80000000 | 888 | .long sysc_done - 4 + 0x80000000 |
1002 | .long sysc_done - 8 + 0x80000000 | ||
1003 | 889 | ||
1004 | cleanup_io_tif: | 890 | cleanup_io_tif: |
1005 | mvc __LC_RETURN_PSW(4),0(%r12) | 891 | l %r9,BASED(cleanup_table+20) # io_tif + 0x80000000 |
1006 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif) | ||
1007 | la %r12,__LC_RETURN_PSW | ||
1008 | br %r14 | 892 | br %r14 |
1009 | 893 | ||
1010 | cleanup_io_restore: | 894 | cleanup_io_restore: |
1011 | clc 4(4,%r12),BASED(cleanup_io_restore_insn) | 895 | cl %r9,BASED(cleanup_io_restore_insn) |
1012 | be BASED(1f) | 896 | jhe 0f |
1013 | mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER | 897 | l %r9,12(%r11) # get saved r11 pointer to pt_regs |
1014 | clc 4(4,%r12),BASED(cleanup_io_restore_insn+4) | 898 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) |
1015 | be BASED(1f) | 899 | ni __LC_RETURN_PSW+1,0xfd # clear wait state bit |
1016 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) | 900 | mvc 0(32,%r11),__PT_R8(%r9) |
1017 | mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) | 901 | lm %r0,%r7,__PT_R0(%r9) |
1018 | lm %r0,%r11,SP_R0(%r15) | 902 | 0: lm %r8,%r9,__LC_RETURN_PSW |
1019 | l %r15,SP_R15(%r15) | ||
1020 | 1: la %r12,__LC_RETURN_PSW | ||
1021 | br %r14 | 903 | br %r14 |
1022 | cleanup_io_restore_insn: | 904 | cleanup_io_restore_insn: |
1023 | .long io_done - 4 + 0x80000000 | 905 | .long io_done - 4 + 0x80000000 |
1024 | .long io_done - 8 + 0x80000000 | ||
1025 | 906 | ||
1026 | /* | 907 | /* |
1027 | * Integer constants | 908 | * Integer constants |
1028 | */ | 909 | */ |
1029 | .align 4 | 910 | .align 4 |
1030 | .Lc_spsize: .long SP_SIZE | 911 | .Lnr_syscalls: .long NR_syscalls |
1031 | .Lc_overhead: .long STACK_FRAME_OVERHEAD | ||
1032 | .Lnr_syscalls: .long NR_syscalls | ||
1033 | .L0x018: .short 0x018 | ||
1034 | .L0x020: .short 0x020 | ||
1035 | .L0x028: .short 0x028 | ||
1036 | .L0x030: .short 0x030 | ||
1037 | .L0x038: .short 0x038 | ||
1038 | .Lc_1: .long 1 | ||
1039 | 912 | ||
1040 | /* | 913 | /* |
1041 | * Symbol constants | 914 | * Symbol constants |
1042 | */ | 915 | */ |
1043 | .Ls390_mcck: .long s390_do_machine_check | 916 | .Ldo_machine_check: .long s390_do_machine_check |
1044 | .Ls390_handle_mcck: | 917 | .Lhandle_mcck: .long s390_handle_mcck |
1045 | .long s390_handle_mcck | 918 | .Ldo_IRQ: .long do_IRQ |
1046 | .Lmck_old_psw: .long __LC_MCK_OLD_PSW | 919 | .Ldo_extint: .long do_extint |
1047 | .Ldo_IRQ: .long do_IRQ | 920 | .Ldo_signal: .long do_signal |
1048 | .Ldo_extint: .long do_extint | 921 | .Ldo_notify_resume: .long do_notify_resume |
1049 | .Ldo_signal: .long do_signal | 922 | .Ldo_per_trap: .long do_per_trap |
1050 | .Ldo_notify_resume: | 923 | .Ldo_execve: .long do_execve |
1051 | .long do_notify_resume | 924 | .Lexecve_tail: .long execve_tail |
1052 | .Lhandle_per: .long do_per_trap | 925 | .Ljump_table: .long pgm_check_table |
1053 | .Ldo_execve: .long do_execve | 926 | .Lschedule: .long schedule |
1054 | .Lexecve_tail: .long execve_tail | ||
1055 | .Ljump_table: .long pgm_check_table | ||
1056 | .Lschedule: .long schedule | ||
1057 | #ifdef CONFIG_PREEMPT | 927 | #ifdef CONFIG_PREEMPT |
1058 | .Lpreempt_schedule_irq: | 928 | .Lpreempt_irq: .long preempt_schedule_irq |
1059 | .long preempt_schedule_irq | ||
1060 | #endif | 929 | #endif |
1061 | .Ltrace_entry: .long do_syscall_trace_enter | 930 | .Ltrace_enter: .long do_syscall_trace_enter |
1062 | .Ltrace_exit: .long do_syscall_trace_exit | 931 | .Ltrace_exit: .long do_syscall_trace_exit |
1063 | .Lschedtail: .long schedule_tail | 932 | .Lschedule_tail: .long schedule_tail |
1064 | .Lsysc_table: .long sys_call_table | 933 | .Lsys_call_table: .long sys_call_table |
934 | .Lsysc_per: .long sysc_per + 0x80000000 | ||
1065 | #ifdef CONFIG_TRACE_IRQFLAGS | 935 | #ifdef CONFIG_TRACE_IRQFLAGS |
1066 | .Ltrace_irq_on_caller: | 936 | .Lhardirqs_on: .long trace_hardirqs_on_caller |
1067 | .long trace_hardirqs_on_caller | 937 | .Lhardirqs_off: .long trace_hardirqs_off_caller |
1068 | .Ltrace_irq_off_caller: | ||
1069 | .long trace_hardirqs_off_caller | ||
1070 | #endif | 938 | #endif |
1071 | #ifdef CONFIG_LOCKDEP | 939 | #ifdef CONFIG_LOCKDEP |
1072 | .Llockdep_sys_exit: | 940 | .Llockdep_sys_exit: .long lockdep_sys_exit |
1073 | .long lockdep_sys_exit | ||
1074 | #endif | 941 | #endif |
1075 | .Lcritical_start: | 942 | .Lcritical_start: .long __critical_start + 0x80000000 |
1076 | .long __critical_start + 0x80000000 | 943 | .Lcritical_length: .long __critical_end - __critical_start |
1077 | .Lcritical_end: | ||
1078 | .long __critical_end + 0x80000000 | ||
1079 | .Lcleanup_critical: | ||
1080 | .long cleanup_critical | ||
1081 | 944 | ||
1082 | .section .rodata, "a" | 945 | .section .rodata, "a" |
1083 | #define SYSCALL(esa,esame,emu) .long esa | 946 | #define SYSCALL(esa,esame,emu) .long esa |