diff options
Diffstat (limited to 'arch/s390/kernel/entry64.S')
-rw-r--r-- | arch/s390/kernel/entry64.S | 443 |
1 files changed, 221 insertions, 222 deletions
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 29bbfbab7332..0f758c329a5d 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -4,8 +4,8 @@ | |||
4 | * | 4 | * |
5 | * Copyright (C) IBM Corp. 1999,2006 | 5 | * Copyright (C) IBM Corp. 1999,2006 |
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
7 | * Hartmut Penner (hp@de.ibm.com), | 7 | * Hartmut Penner (hp@de.ibm.com), |
8 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | 8 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), |
9 | * Heiko Carstens <heiko.carstens@de.ibm.com> | 9 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
10 | */ | 10 | */ |
11 | 11 | ||
@@ -24,29 +24,29 @@ | |||
24 | * Stack layout for the system_call stack entry. | 24 | * Stack layout for the system_call stack entry. |
25 | * The first few entries are identical to the user_regs_struct. | 25 | * The first few entries are identical to the user_regs_struct. |
26 | */ | 26 | */ |
27 | SP_PTREGS = STACK_FRAME_OVERHEAD | 27 | SP_PTREGS = STACK_FRAME_OVERHEAD |
28 | SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS | 28 | SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS |
29 | SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW | 29 | SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW |
30 | SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS | 30 | SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS |
31 | SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 | 31 | SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 |
32 | SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 | 32 | SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 |
33 | SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 | 33 | SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 |
34 | SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 | 34 | SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 |
35 | SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 | 35 | SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 |
36 | SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 | 36 | SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 |
37 | SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 | 37 | SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 |
38 | SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64 | 38 | SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64 |
39 | SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72 | 39 | SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72 |
40 | SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80 | 40 | SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80 |
41 | SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88 | 41 | SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88 |
42 | SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96 | 42 | SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96 |
43 | SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104 | 43 | SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104 |
44 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112 | 44 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112 |
45 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120 | 45 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120 |
46 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 | 46 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 |
47 | SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC | 47 | SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC |
48 | SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP | 48 | SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP |
49 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE | 49 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE |
50 | 50 | ||
51 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 51 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
52 | STACK_SIZE = 1 << STACK_SHIFT | 52 | STACK_SIZE = 1 << STACK_SHIFT |
@@ -71,14 +71,14 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \ | |||
71 | #define TRACE_IRQS_OFF | 71 | #define TRACE_IRQS_OFF |
72 | #endif | 72 | #endif |
73 | 73 | ||
74 | .macro STORE_TIMER lc_offset | 74 | .macro STORE_TIMER lc_offset |
75 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 75 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
76 | stpt \lc_offset | 76 | stpt \lc_offset |
77 | #endif | 77 | #endif |
78 | .endm | 78 | .endm |
79 | 79 | ||
80 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 80 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
81 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum | 81 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum |
82 | lg %r10,\lc_from | 82 | lg %r10,\lc_from |
83 | slg %r10,\lc_to | 83 | slg %r10,\lc_to |
84 | alg %r10,\lc_sum | 84 | alg %r10,\lc_sum |
@@ -94,7 +94,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \ | |||
94 | * R15 - kernel stack pointer | 94 | * R15 - kernel stack pointer |
95 | */ | 95 | */ |
96 | 96 | ||
97 | .macro SAVE_ALL_BASE savearea | 97 | .macro SAVE_ALL_BASE savearea |
98 | stmg %r12,%r15,\savearea | 98 | stmg %r12,%r15,\savearea |
99 | larl %r13,system_call | 99 | larl %r13,system_call |
100 | .endm | 100 | .endm |
@@ -139,8 +139,8 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \ | |||
139 | .endm | 139 | .endm |
140 | 140 | ||
141 | .macro CREATE_STACK_FRAME psworg,savearea | 141 | .macro CREATE_STACK_FRAME psworg,savearea |
142 | aghi %r15,-SP_SIZE # make room for registers & psw | 142 | aghi %r15,-SP_SIZE # make room for registers & psw |
143 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack | 143 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack |
144 | la %r12,\psworg | 144 | la %r12,\psworg |
145 | stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 | 145 | stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 |
146 | icm %r12,12,__LC_SVC_ILC | 146 | icm %r12,12,__LC_SVC_ILC |
@@ -149,7 +149,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \ | |||
149 | mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack | 149 | mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack |
150 | la %r12,0 | 150 | la %r12,0 |
151 | stg %r12,__SF_BACKCHAIN(%r15) | 151 | stg %r12,__SF_BACKCHAIN(%r15) |
152 | .endm | 152 | .endm |
153 | 153 | ||
154 | .macro RESTORE_ALL psworg,sync | 154 | .macro RESTORE_ALL psworg,sync |
155 | mvc \psworg(16),SP_PSW(%r15) # move user PSW to lowcore | 155 | mvc \psworg(16),SP_PSW(%r15) # move user PSW to lowcore |
@@ -168,29 +168,29 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \ | |||
168 | * Returns: | 168 | * Returns: |
169 | * gpr2 = prev | 169 | * gpr2 = prev |
170 | */ | 170 | */ |
171 | .globl __switch_to | 171 | .globl __switch_to |
172 | __switch_to: | 172 | __switch_to: |
173 | tm __THREAD_per+4(%r3),0xe8 # is the new process using per ? | 173 | tm __THREAD_per+4(%r3),0xe8 # is the new process using per ? |
174 | jz __switch_to_noper # if not we're fine | 174 | jz __switch_to_noper # if not we're fine |
175 | stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff | 175 | stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff |
176 | clc __THREAD_per(24,%r3),__SF_EMPTY(%r15) | 176 | clc __THREAD_per(24,%r3),__SF_EMPTY(%r15) |
177 | je __switch_to_noper # we got away without bashing TLB's | 177 | je __switch_to_noper # we got away without bashing TLB's |
178 | lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't | 178 | lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't |
179 | __switch_to_noper: | 179 | __switch_to_noper: |
180 | lg %r4,__THREAD_info(%r2) # get thread_info of prev | 180 | lg %r4,__THREAD_info(%r2) # get thread_info of prev |
181 | tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? | 181 | tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? |
182 | jz __switch_to_no_mcck | 182 | jz __switch_to_no_mcck |
183 | ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev | 183 | ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev |
184 | lg %r4,__THREAD_info(%r3) # get thread_info of next | 184 | lg %r4,__THREAD_info(%r3) # get thread_info of next |
185 | oi __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next | 185 | oi __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next |
186 | __switch_to_no_mcck: | 186 | __switch_to_no_mcck: |
187 | stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task | 187 | stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task |
188 | stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp | 188 | stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp |
189 | lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp | 189 | lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp |
190 | lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task | 190 | lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task |
191 | stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct | 191 | stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct |
192 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 | 192 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 |
193 | lg %r3,__THREAD_info(%r3) # load thread_info from task struct | 193 | lg %r3,__THREAD_info(%r3) # load thread_info from task struct |
194 | stg %r3,__LC_THREAD_INFO | 194 | stg %r3,__LC_THREAD_INFO |
195 | aghi %r3,STACK_SIZE | 195 | aghi %r3,STACK_SIZE |
196 | stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack | 196 | stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack |
@@ -202,14 +202,14 @@ __critical_start: | |||
202 | * are executed with interrupts enabled. | 202 | * are executed with interrupts enabled. |
203 | */ | 203 | */ |
204 | 204 | ||
205 | .globl system_call | 205 | .globl system_call |
206 | system_call: | 206 | system_call: |
207 | STORE_TIMER __LC_SYNC_ENTER_TIMER | 207 | STORE_TIMER __LC_SYNC_ENTER_TIMER |
208 | sysc_saveall: | 208 | sysc_saveall: |
209 | SAVE_ALL_BASE __LC_SAVE_AREA | 209 | SAVE_ALL_BASE __LC_SAVE_AREA |
210 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 210 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
211 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 211 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
212 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore | 212 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore |
213 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 213 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
214 | sysc_vtime: | 214 | sysc_vtime: |
215 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 215 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -222,45 +222,45 @@ sysc_update: | |||
222 | #endif | 222 | #endif |
223 | sysc_do_svc: | 223 | sysc_do_svc: |
224 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 224 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
225 | slag %r7,%r7,2 # *4 and test for svc 0 | 225 | slag %r7,%r7,2 # *4 and test for svc 0 |
226 | jnz sysc_nr_ok | 226 | jnz sysc_nr_ok |
227 | # svc 0: system call number in %r1 | 227 | # svc 0: system call number in %r1 |
228 | cl %r1,BASED(.Lnr_syscalls) | 228 | cl %r1,BASED(.Lnr_syscalls) |
229 | jnl sysc_nr_ok | 229 | jnl sysc_nr_ok |
230 | lgfr %r7,%r1 # clear high word in r1 | 230 | lgfr %r7,%r1 # clear high word in r1 |
231 | slag %r7,%r7,2 # svc 0: system call number in %r1 | 231 | slag %r7,%r7,2 # svc 0: system call number in %r1 |
232 | sysc_nr_ok: | 232 | sysc_nr_ok: |
233 | mvc SP_ARGS(8,%r15),SP_R7(%r15) | 233 | mvc SP_ARGS(8,%r15),SP_R7(%r15) |
234 | sysc_do_restart: | 234 | sysc_do_restart: |
235 | larl %r10,sys_call_table | 235 | larl %r10,sys_call_table |
236 | #ifdef CONFIG_COMPAT | 236 | #ifdef CONFIG_COMPAT |
237 | tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ? | 237 | tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ? |
238 | jno sysc_noemu | 238 | jno sysc_noemu |
239 | larl %r10,sys_call_table_emu # use 31 bit emulation system calls | 239 | larl %r10,sys_call_table_emu # use 31 bit emulation system calls |
240 | sysc_noemu: | 240 | sysc_noemu: |
241 | #endif | 241 | #endif |
242 | tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) | 242 | tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) |
243 | lgf %r8,0(%r7,%r10) # load address of system call routine | 243 | lgf %r8,0(%r7,%r10) # load address of system call routine |
244 | jnz sysc_tracesys | 244 | jnz sysc_tracesys |
245 | basr %r14,%r8 # call sys_xxxx | 245 | basr %r14,%r8 # call sys_xxxx |
246 | stg %r2,SP_R2(%r15) # store return value (change R2 on stack) | 246 | stg %r2,SP_R2(%r15) # store return value (change R2 on stack) |
247 | # ATTENTION: check sys_execve_glue before | 247 | # ATTENTION: check sys_execve_glue before |
248 | # changing anything here !! | 248 | # changing anything here !! |
249 | 249 | ||
250 | sysc_return: | 250 | sysc_return: |
251 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 251 | tm SP_PSW+1(%r15),0x01 # returning to user ? |
252 | jno sysc_leave | 252 | jno sysc_leave |
253 | tm __TI_flags+7(%r9),_TIF_WORK_SVC | 253 | tm __TI_flags+7(%r9),_TIF_WORK_SVC |
254 | jnz sysc_work # there is work to do (signals etc.) | 254 | jnz sysc_work # there is work to do (signals etc.) |
255 | sysc_leave: | 255 | sysc_leave: |
256 | RESTORE_ALL __LC_RETURN_PSW,1 | 256 | RESTORE_ALL __LC_RETURN_PSW,1 |
257 | 257 | ||
258 | # | 258 | # |
259 | # recheck if there is more work to do | 259 | # recheck if there is more work to do |
260 | # | 260 | # |
261 | sysc_work_loop: | 261 | sysc_work_loop: |
262 | tm __TI_flags+7(%r9),_TIF_WORK_SVC | 262 | tm __TI_flags+7(%r9),_TIF_WORK_SVC |
263 | jz sysc_leave # there is no work to do | 263 | jz sysc_leave # there is no work to do |
264 | # | 264 | # |
265 | # One of the work bits is on. Find out which one. | 265 | # One of the work bits is on. Find out which one. |
266 | # | 266 | # |
@@ -279,25 +279,25 @@ sysc_work: | |||
279 | 279 | ||
280 | # | 280 | # |
281 | # _TIF_NEED_RESCHED is set, call schedule | 281 | # _TIF_NEED_RESCHED is set, call schedule |
282 | # | 282 | # |
283 | sysc_reschedule: | 283 | sysc_reschedule: |
284 | larl %r14,sysc_work_loop | 284 | larl %r14,sysc_work_loop |
285 | jg schedule # return point is sysc_return | 285 | jg schedule # return point is sysc_return |
286 | 286 | ||
287 | # | 287 | # |
288 | # _TIF_MCCK_PENDING is set, call handler | 288 | # _TIF_MCCK_PENDING is set, call handler |
289 | # | 289 | # |
290 | sysc_mcck_pending: | 290 | sysc_mcck_pending: |
291 | larl %r14,sysc_work_loop | 291 | larl %r14,sysc_work_loop |
292 | jg s390_handle_mcck # TIF bit will be cleared by handler | 292 | jg s390_handle_mcck # TIF bit will be cleared by handler |
293 | 293 | ||
294 | # | 294 | # |
295 | # _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal | 295 | # _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal |
296 | # | 296 | # |
297 | sysc_sigpending: | 297 | sysc_sigpending: |
298 | ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP | 298 | ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP |
299 | la %r2,SP_PTREGS(%r15) # load pt_regs | 299 | la %r2,SP_PTREGS(%r15) # load pt_regs |
300 | brasl %r14,do_signal # call do_signal | 300 | brasl %r14,do_signal # call do_signal |
301 | tm __TI_flags+7(%r9),_TIF_RESTART_SVC | 301 | tm __TI_flags+7(%r9),_TIF_RESTART_SVC |
302 | jo sysc_restart | 302 | jo sysc_restart |
303 | tm __TI_flags+7(%r9),_TIF_SINGLE_STEP | 303 | tm __TI_flags+7(%r9),_TIF_SINGLE_STEP |
@@ -309,11 +309,11 @@ sysc_sigpending: | |||
309 | # | 309 | # |
310 | sysc_restart: | 310 | sysc_restart: |
311 | ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC | 311 | ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC |
312 | lg %r7,SP_R2(%r15) # load new svc number | 312 | lg %r7,SP_R2(%r15) # load new svc number |
313 | slag %r7,%r7,2 # *4 | 313 | slag %r7,%r7,2 # *4 |
314 | mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument | 314 | mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument |
315 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | 315 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments |
316 | j sysc_do_restart # restart svc | 316 | j sysc_do_restart # restart svc |
317 | 317 | ||
318 | # | 318 | # |
319 | # _TIF_SINGLE_STEP is set, call do_single_step | 319 | # _TIF_SINGLE_STEP is set, call do_single_step |
@@ -326,49 +326,48 @@ sysc_singlestep: | |||
326 | larl %r14,sysc_return # load adr. of system return | 326 | larl %r14,sysc_return # load adr. of system return |
327 | jg do_single_step # branch to do_sigtrap | 327 | jg do_single_step # branch to do_sigtrap |
328 | 328 | ||
329 | |||
330 | # | 329 | # |
331 | # call syscall_trace before and after system call | 330 | # call syscall_trace before and after system call |
332 | # special linkage: %r12 contains the return address for trace_svc | 331 | # special linkage: %r12 contains the return address for trace_svc |
333 | # | 332 | # |
334 | sysc_tracesys: | 333 | sysc_tracesys: |
335 | la %r2,SP_PTREGS(%r15) # load pt_regs | 334 | la %r2,SP_PTREGS(%r15) # load pt_regs |
336 | la %r3,0 | 335 | la %r3,0 |
337 | srl %r7,2 | 336 | srl %r7,2 |
338 | stg %r7,SP_R2(%r15) | 337 | stg %r7,SP_R2(%r15) |
339 | brasl %r14,syscall_trace | 338 | brasl %r14,syscall_trace |
340 | lghi %r0,NR_syscalls | 339 | lghi %r0,NR_syscalls |
341 | clg %r0,SP_R2(%r15) | 340 | clg %r0,SP_R2(%r15) |
342 | jnh sysc_tracenogo | 341 | jnh sysc_tracenogo |
343 | lg %r7,SP_R2(%r15) # strace might have changed the | 342 | lg %r7,SP_R2(%r15) # strace might have changed the |
344 | sll %r7,2 # system call | 343 | sll %r7,2 # system call |
345 | lgf %r8,0(%r7,%r10) | 344 | lgf %r8,0(%r7,%r10) |
346 | sysc_tracego: | 345 | sysc_tracego: |
347 | lmg %r3,%r6,SP_R3(%r15) | 346 | lmg %r3,%r6,SP_R3(%r15) |
348 | lg %r2,SP_ORIG_R2(%r15) | 347 | lg %r2,SP_ORIG_R2(%r15) |
349 | basr %r14,%r8 # call sys_xxx | 348 | basr %r14,%r8 # call sys_xxx |
350 | stg %r2,SP_R2(%r15) # store return value | 349 | stg %r2,SP_R2(%r15) # store return value |
351 | sysc_tracenogo: | 350 | sysc_tracenogo: |
352 | tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) | 351 | tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) |
353 | jz sysc_return | 352 | jz sysc_return |
354 | la %r2,SP_PTREGS(%r15) # load pt_regs | 353 | la %r2,SP_PTREGS(%r15) # load pt_regs |
355 | la %r3,1 | 354 | la %r3,1 |
356 | larl %r14,sysc_return # return point is sysc_return | 355 | larl %r14,sysc_return # return point is sysc_return |
357 | jg syscall_trace | 356 | jg syscall_trace |
358 | 357 | ||
359 | # | 358 | # |
360 | # a new process exits the kernel with ret_from_fork | 359 | # a new process exits the kernel with ret_from_fork |
361 | # | 360 | # |
362 | .globl ret_from_fork | 361 | .globl ret_from_fork |
363 | ret_from_fork: | 362 | ret_from_fork: |
364 | lg %r13,__LC_SVC_NEW_PSW+8 | 363 | lg %r13,__LC_SVC_NEW_PSW+8 |
365 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 364 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
366 | tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? | 365 | tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? |
367 | jo 0f | 366 | jo 0f |
368 | stg %r15,SP_R15(%r15) # store stack pointer for new kthread | 367 | stg %r15,SP_R15(%r15) # store stack pointer for new kthread |
369 | 0: brasl %r14,schedule_tail | 368 | 0: brasl %r14,schedule_tail |
370 | TRACE_IRQS_ON | 369 | TRACE_IRQS_ON |
371 | stosm 24(%r15),0x03 # reenable interrupts | 370 | stosm 24(%r15),0x03 # reenable interrupts |
372 | j sysc_return | 371 | j sysc_return |
373 | 372 | ||
374 | # | 373 | # |
@@ -377,78 +376,78 @@ ret_from_fork: | |||
377 | # but are called with different parameter. | 376 | # but are called with different parameter. |
378 | # return-address is set up above | 377 | # return-address is set up above |
379 | # | 378 | # |
380 | sys_clone_glue: | 379 | sys_clone_glue: |
381 | la %r2,SP_PTREGS(%r15) # load pt_regs | 380 | la %r2,SP_PTREGS(%r15) # load pt_regs |
382 | jg sys_clone # branch to sys_clone | 381 | jg sys_clone # branch to sys_clone |
383 | 382 | ||
384 | #ifdef CONFIG_COMPAT | 383 | #ifdef CONFIG_COMPAT |
385 | sys32_clone_glue: | 384 | sys32_clone_glue: |
386 | la %r2,SP_PTREGS(%r15) # load pt_regs | 385 | la %r2,SP_PTREGS(%r15) # load pt_regs |
387 | jg sys32_clone # branch to sys32_clone | 386 | jg sys32_clone # branch to sys32_clone |
388 | #endif | 387 | #endif |
389 | 388 | ||
390 | sys_fork_glue: | 389 | sys_fork_glue: |
391 | la %r2,SP_PTREGS(%r15) # load pt_regs | 390 | la %r2,SP_PTREGS(%r15) # load pt_regs |
392 | jg sys_fork # branch to sys_fork | 391 | jg sys_fork # branch to sys_fork |
393 | 392 | ||
394 | sys_vfork_glue: | 393 | sys_vfork_glue: |
395 | la %r2,SP_PTREGS(%r15) # load pt_regs | 394 | la %r2,SP_PTREGS(%r15) # load pt_regs |
396 | jg sys_vfork # branch to sys_vfork | 395 | jg sys_vfork # branch to sys_vfork |
397 | 396 | ||
398 | sys_execve_glue: | 397 | sys_execve_glue: |
399 | la %r2,SP_PTREGS(%r15) # load pt_regs | 398 | la %r2,SP_PTREGS(%r15) # load pt_regs |
400 | lgr %r12,%r14 # save return address | 399 | lgr %r12,%r14 # save return address |
401 | brasl %r14,sys_execve # call sys_execve | 400 | brasl %r14,sys_execve # call sys_execve |
402 | ltgr %r2,%r2 # check if execve failed | 401 | ltgr %r2,%r2 # check if execve failed |
403 | bnz 0(%r12) # it did fail -> store result in gpr2 | 402 | bnz 0(%r12) # it did fail -> store result in gpr2 |
404 | b 6(%r12) # SKIP STG 2,SP_R2(15) in | 403 | b 6(%r12) # SKIP STG 2,SP_R2(15) in |
405 | # system_call/sysc_tracesys | 404 | # system_call/sysc_tracesys |
406 | #ifdef CONFIG_COMPAT | 405 | #ifdef CONFIG_COMPAT |
407 | sys32_execve_glue: | 406 | sys32_execve_glue: |
408 | la %r2,SP_PTREGS(%r15) # load pt_regs | 407 | la %r2,SP_PTREGS(%r15) # load pt_regs |
409 | lgr %r12,%r14 # save return address | 408 | lgr %r12,%r14 # save return address |
410 | brasl %r14,sys32_execve # call sys32_execve | 409 | brasl %r14,sys32_execve # call sys32_execve |
411 | ltgr %r2,%r2 # check if execve failed | 410 | ltgr %r2,%r2 # check if execve failed |
412 | bnz 0(%r12) # it did fail -> store result in gpr2 | 411 | bnz 0(%r12) # it did fail -> store result in gpr2 |
413 | b 6(%r12) # SKIP STG 2,SP_R2(15) in | 412 | b 6(%r12) # SKIP STG 2,SP_R2(15) in |
414 | # system_call/sysc_tracesys | 413 | # system_call/sysc_tracesys |
415 | #endif | 414 | #endif |
416 | 415 | ||
417 | sys_sigreturn_glue: | 416 | sys_sigreturn_glue: |
418 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter | 417 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter |
419 | jg sys_sigreturn # branch to sys_sigreturn | 418 | jg sys_sigreturn # branch to sys_sigreturn |
420 | 419 | ||
421 | #ifdef CONFIG_COMPAT | 420 | #ifdef CONFIG_COMPAT |
422 | sys32_sigreturn_glue: | 421 | sys32_sigreturn_glue: |
423 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter | 422 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter |
424 | jg sys32_sigreturn # branch to sys32_sigreturn | 423 | jg sys32_sigreturn # branch to sys32_sigreturn |
425 | #endif | 424 | #endif |
426 | 425 | ||
427 | sys_rt_sigreturn_glue: | 426 | sys_rt_sigreturn_glue: |
428 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter | 427 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter |
429 | jg sys_rt_sigreturn # branch to sys_sigreturn | 428 | jg sys_rt_sigreturn # branch to sys_sigreturn |
430 | 429 | ||
431 | #ifdef CONFIG_COMPAT | 430 | #ifdef CONFIG_COMPAT |
432 | sys32_rt_sigreturn_glue: | 431 | sys32_rt_sigreturn_glue: |
433 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter | 432 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter |
434 | jg sys32_rt_sigreturn # branch to sys32_sigreturn | 433 | jg sys32_rt_sigreturn # branch to sys32_sigreturn |
435 | #endif | 434 | #endif |
436 | 435 | ||
437 | sys_sigaltstack_glue: | 436 | sys_sigaltstack_glue: |
438 | la %r4,SP_PTREGS(%r15) # load pt_regs as parameter | 437 | la %r4,SP_PTREGS(%r15) # load pt_regs as parameter |
439 | jg sys_sigaltstack # branch to sys_sigreturn | 438 | jg sys_sigaltstack # branch to sys_sigreturn |
440 | 439 | ||
441 | #ifdef CONFIG_COMPAT | 440 | #ifdef CONFIG_COMPAT |
442 | sys32_sigaltstack_glue: | 441 | sys32_sigaltstack_glue: |
443 | la %r4,SP_PTREGS(%r15) # load pt_regs as parameter | 442 | la %r4,SP_PTREGS(%r15) # load pt_regs as parameter |
444 | jg sys32_sigaltstack_wrapper # branch to sys_sigreturn | 443 | jg sys32_sigaltstack_wrapper # branch to sys_sigreturn |
445 | #endif | 444 | #endif |
446 | 445 | ||
447 | /* | 446 | /* |
448 | * Program check handler routine | 447 | * Program check handler routine |
449 | */ | 448 | */ |
450 | 449 | ||
451 | .globl pgm_check_handler | 450 | .globl pgm_check_handler |
452 | pgm_check_handler: | 451 | pgm_check_handler: |
453 | /* | 452 | /* |
454 | * First we need to check for a special case: | 453 | * First we need to check for a special case: |
@@ -465,8 +464,8 @@ pgm_check_handler: | |||
465 | */ | 464 | */ |
466 | STORE_TIMER __LC_SYNC_ENTER_TIMER | 465 | STORE_TIMER __LC_SYNC_ENTER_TIMER |
467 | SAVE_ALL_BASE __LC_SAVE_AREA | 466 | SAVE_ALL_BASE __LC_SAVE_AREA |
468 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception | 467 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception |
469 | jnz pgm_per # got per exception -> special case | 468 | jnz pgm_per # got per exception -> special case |
470 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 469 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
471 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 470 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
472 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 471 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
@@ -478,29 +477,29 @@ pgm_check_handler: | |||
478 | pgm_no_vtime: | 477 | pgm_no_vtime: |
479 | #endif | 478 | #endif |
480 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 479 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
481 | lgf %r3,__LC_PGM_ILC # load program interruption code | 480 | lgf %r3,__LC_PGM_ILC # load program interruption code |
482 | lghi %r8,0x7f | 481 | lghi %r8,0x7f |
483 | ngr %r8,%r3 | 482 | ngr %r8,%r3 |
484 | pgm_do_call: | 483 | pgm_do_call: |
485 | sll %r8,3 | 484 | sll %r8,3 |
486 | larl %r1,pgm_check_table | 485 | larl %r1,pgm_check_table |
487 | lg %r1,0(%r8,%r1) # load address of handler routine | 486 | lg %r1,0(%r8,%r1) # load address of handler routine |
488 | la %r2,SP_PTREGS(%r15) # address of register-save area | 487 | la %r2,SP_PTREGS(%r15) # address of register-save area |
489 | larl %r14,sysc_return | 488 | larl %r14,sysc_return |
490 | br %r1 # branch to interrupt-handler | 489 | br %r1 # branch to interrupt-handler |
491 | 490 | ||
492 | # | 491 | # |
493 | # handle per exception | 492 | # handle per exception |
494 | # | 493 | # |
495 | pgm_per: | 494 | pgm_per: |
496 | tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on | 495 | tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on |
497 | jnz pgm_per_std # ok, normal per event from user space | 496 | jnz pgm_per_std # ok, normal per event from user space |
498 | # ok its one of the special cases, now we need to find out which one | 497 | # ok its one of the special cases, now we need to find out which one |
499 | clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW | 498 | clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW |
500 | je pgm_svcper | 499 | je pgm_svcper |
501 | # no interesting special case, ignore PER event | 500 | # no interesting special case, ignore PER event |
502 | lmg %r12,%r15,__LC_SAVE_AREA | 501 | lmg %r12,%r15,__LC_SAVE_AREA |
503 | lpswe __LC_PGM_OLD_PSW | 502 | lpswe __LC_PGM_OLD_PSW |
504 | 503 | ||
505 | # | 504 | # |
506 | # Normal per exception | 505 | # Normal per exception |
@@ -524,9 +523,9 @@ pgm_no_vtime2: | |||
524 | mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS | 523 | mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS |
525 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID | 524 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID |
526 | oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | 525 | oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP |
527 | lgf %r3,__LC_PGM_ILC # load program interruption code | 526 | lgf %r3,__LC_PGM_ILC # load program interruption code |
528 | lghi %r8,0x7f | 527 | lghi %r8,0x7f |
529 | ngr %r8,%r3 # clear per-event-bit and ilc | 528 | ngr %r8,%r3 # clear per-event-bit and ilc |
530 | je sysc_return | 529 | je sysc_return |
531 | j pgm_do_call | 530 | j pgm_do_call |
532 | 531 | ||
@@ -544,7 +543,7 @@ pgm_svcper: | |||
544 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 543 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
545 | pgm_no_vtime3: | 544 | pgm_no_vtime3: |
546 | #endif | 545 | #endif |
547 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore | 546 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore |
548 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 547 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
549 | lg %r1,__TI_task(%r9) | 548 | lg %r1,__TI_task(%r9) |
550 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID | 549 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID |
@@ -568,7 +567,7 @@ kernel_per: | |||
568 | /* | 567 | /* |
569 | * IO interrupt handler routine | 568 | * IO interrupt handler routine |
570 | */ | 569 | */ |
571 | .globl io_int_handler | 570 | .globl io_int_handler |
572 | io_int_handler: | 571 | io_int_handler: |
573 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | 572 | STORE_TIMER __LC_ASYNC_ENTER_TIMER |
574 | stck __LC_INT_CLOCK | 573 | stck __LC_INT_CLOCK |
@@ -585,42 +584,42 @@ io_no_vtime: | |||
585 | #endif | 584 | #endif |
586 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 585 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
587 | TRACE_IRQS_OFF | 586 | TRACE_IRQS_OFF |
588 | la %r2,SP_PTREGS(%r15) # address of register-save area | 587 | la %r2,SP_PTREGS(%r15) # address of register-save area |
589 | brasl %r14,do_IRQ # call standard irq handler | 588 | brasl %r14,do_IRQ # call standard irq handler |
590 | TRACE_IRQS_ON | 589 | TRACE_IRQS_ON |
591 | 590 | ||
592 | io_return: | 591 | io_return: |
593 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 592 | tm SP_PSW+1(%r15),0x01 # returning to user ? |
594 | #ifdef CONFIG_PREEMPT | 593 | #ifdef CONFIG_PREEMPT |
595 | jno io_preempt # no -> check for preemptive scheduling | 594 | jno io_preempt # no -> check for preemptive scheduling |
596 | #else | 595 | #else |
597 | jno io_leave # no-> skip resched & signal | 596 | jno io_leave # no-> skip resched & signal |
598 | #endif | 597 | #endif |
599 | tm __TI_flags+7(%r9),_TIF_WORK_INT | 598 | tm __TI_flags+7(%r9),_TIF_WORK_INT |
600 | jnz io_work # there is work to do (signals etc.) | 599 | jnz io_work # there is work to do (signals etc.) |
601 | io_leave: | 600 | io_leave: |
602 | RESTORE_ALL __LC_RETURN_PSW,0 | 601 | RESTORE_ALL __LC_RETURN_PSW,0 |
603 | io_done: | 602 | io_done: |
604 | 603 | ||
605 | #ifdef CONFIG_PREEMPT | 604 | #ifdef CONFIG_PREEMPT |
606 | io_preempt: | 605 | io_preempt: |
607 | icm %r0,15,__TI_precount(%r9) | 606 | icm %r0,15,__TI_precount(%r9) |
608 | jnz io_leave | 607 | jnz io_leave |
609 | # switch to kernel stack | 608 | # switch to kernel stack |
610 | lg %r1,SP_R15(%r15) | 609 | lg %r1,SP_R15(%r15) |
611 | aghi %r1,-SP_SIZE | 610 | aghi %r1,-SP_SIZE |
612 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 611 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) |
613 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 612 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain |
614 | lgr %r15,%r1 | 613 | lgr %r15,%r1 |
615 | io_resume_loop: | 614 | io_resume_loop: |
616 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED | 615 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED |
617 | jno io_leave | 616 | jno io_leave |
618 | larl %r1,.Lc_pactive | 617 | larl %r1,.Lc_pactive |
619 | mvc __TI_precount(4,%r9),0(%r1) | 618 | mvc __TI_precount(4,%r9),0(%r1) |
620 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 619 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
621 | brasl %r14,schedule # call schedule | 620 | brasl %r14,schedule # call schedule |
622 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 621 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts |
623 | xc __TI_precount(4,%r9),__TI_precount(%r9) | 622 | xc __TI_precount(4,%r9),__TI_precount(%r9) |
624 | j io_resume_loop | 623 | j io_resume_loop |
625 | #endif | 624 | #endif |
626 | 625 | ||
@@ -631,7 +630,7 @@ io_work: | |||
631 | lg %r1,__LC_KERNEL_STACK | 630 | lg %r1,__LC_KERNEL_STACK |
632 | aghi %r1,-SP_SIZE | 631 | aghi %r1,-SP_SIZE |
633 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 632 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) |
634 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 633 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain |
635 | lgr %r15,%r1 | 634 | lgr %r15,%r1 |
636 | # | 635 | # |
637 | # One of the work bits is on. Find out which one. | 636 | # One of the work bits is on. Find out which one. |
@@ -656,11 +655,11 @@ io_mcck_pending: | |||
656 | 655 | ||
657 | # | 656 | # |
658 | # _TIF_NEED_RESCHED is set, call schedule | 657 | # _TIF_NEED_RESCHED is set, call schedule |
659 | # | 658 | # |
660 | io_reschedule: | 659 | io_reschedule: |
661 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 660 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
662 | brasl %r14,schedule # call scheduler | 661 | brasl %r14,schedule # call scheduler |
663 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 662 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts |
664 | tm __TI_flags+7(%r9),_TIF_WORK_INT | 663 | tm __TI_flags+7(%r9),_TIF_WORK_INT |
665 | jz io_leave # there is no work to do | 664 | jz io_leave # there is no work to do |
666 | j io_work_loop | 665 | j io_work_loop |
@@ -668,17 +667,17 @@ io_reschedule: | |||
668 | # | 667 | # |
669 | # _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal | 668 | # _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal |
670 | # | 669 | # |
671 | io_sigpending: | 670 | io_sigpending: |
672 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 671 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
673 | la %r2,SP_PTREGS(%r15) # load pt_regs | 672 | la %r2,SP_PTREGS(%r15) # load pt_regs |
674 | brasl %r14,do_signal # call do_signal | 673 | brasl %r14,do_signal # call do_signal |
675 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 674 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts |
676 | j io_work_loop | 675 | j io_work_loop |
677 | 676 | ||
678 | /* | 677 | /* |
679 | * External interrupt handler routine | 678 | * External interrupt handler routine |
680 | */ | 679 | */ |
681 | .globl ext_int_handler | 680 | .globl ext_int_handler |
682 | ext_int_handler: | 681 | ext_int_handler: |
683 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | 682 | STORE_TIMER __LC_ASYNC_ENTER_TIMER |
684 | stck __LC_INT_CLOCK | 683 | stck __LC_INT_CLOCK |
@@ -695,9 +694,9 @@ ext_no_vtime: | |||
695 | #endif | 694 | #endif |
696 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 695 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
697 | TRACE_IRQS_OFF | 696 | TRACE_IRQS_OFF |
698 | la %r2,SP_PTREGS(%r15) # address of register-save area | 697 | la %r2,SP_PTREGS(%r15) # address of register-save area |
699 | llgh %r3,__LC_EXT_INT_CODE # get interruption code | 698 | llgh %r3,__LC_EXT_INT_CODE # get interruption code |
700 | brasl %r14,do_extint | 699 | brasl %r14,do_extint |
701 | TRACE_IRQS_ON | 700 | TRACE_IRQS_ON |
702 | j io_return | 701 | j io_return |
703 | 702 | ||
@@ -706,14 +705,14 @@ __critical_end: | |||
706 | /* | 705 | /* |
707 | * Machine check handler routines | 706 | * Machine check handler routines |
708 | */ | 707 | */ |
709 | .globl mcck_int_handler | 708 | .globl mcck_int_handler |
710 | mcck_int_handler: | 709 | mcck_int_handler: |
711 | la %r1,4095 # revalidate r1 | 710 | la %r1,4095 # revalidate r1 |
712 | spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer | 711 | spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer |
713 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs | 712 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs |
714 | SAVE_ALL_BASE __LC_SAVE_AREA+64 | 713 | SAVE_ALL_BASE __LC_SAVE_AREA+64 |
715 | la %r12,__LC_MCK_OLD_PSW | 714 | la %r12,__LC_MCK_OLD_PSW |
716 | tm __LC_MCCK_CODE,0x80 # system damage? | 715 | tm __LC_MCCK_CODE,0x80 # system damage? |
717 | jo mcck_int_main # yes -> rest of mcck code invalid | 716 | jo mcck_int_main # yes -> rest of mcck code invalid |
718 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 717 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
719 | la %r14,4095 | 718 | la %r14,4095 |
@@ -737,19 +736,19 @@ mcck_int_handler: | |||
737 | #endif | 736 | #endif |
738 | tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | 737 | tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
739 | jno mcck_int_main # no -> skip cleanup critical | 738 | jno mcck_int_main # no -> skip cleanup critical |
740 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit | 739 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit |
741 | jnz mcck_int_main # from user -> load kernel stack | 740 | jnz mcck_int_main # from user -> load kernel stack |
742 | clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end) | 741 | clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end) |
743 | jhe mcck_int_main | 742 | jhe mcck_int_main |
744 | clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start) | 743 | clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start) |
745 | jl mcck_int_main | 744 | jl mcck_int_main |
746 | brasl %r14,cleanup_critical | 745 | brasl %r14,cleanup_critical |
747 | mcck_int_main: | 746 | mcck_int_main: |
748 | lg %r14,__LC_PANIC_STACK # are we already on the panic stack? | 747 | lg %r14,__LC_PANIC_STACK # are we already on the panic stack? |
749 | slgr %r14,%r15 | 748 | slgr %r14,%r15 |
750 | srag %r14,%r14,PAGE_SHIFT | 749 | srag %r14,%r14,PAGE_SHIFT |
751 | jz 0f | 750 | jz 0f |
752 | lg %r15,__LC_PANIC_STACK # load panic stack | 751 | lg %r15,__LC_PANIC_STACK # load panic stack |
753 | 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64 | 752 | 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64 |
754 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 753 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
755 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? | 754 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? |
@@ -764,7 +763,7 @@ mcck_no_vtime: | |||
764 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 763 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
765 | la %r2,SP_PTREGS(%r15) # load pt_regs | 764 | la %r2,SP_PTREGS(%r15) # load pt_regs |
766 | brasl %r14,s390_do_machine_check | 765 | brasl %r14,s390_do_machine_check |
767 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 766 | tm SP_PSW+1(%r15),0x01 # returning to user ? |
768 | jno mcck_return | 767 | jno mcck_return |
769 | lg %r1,__LC_KERNEL_STACK # switch to kernel stack | 768 | lg %r1,__LC_KERNEL_STACK # switch to kernel stack |
770 | aghi %r1,-SP_SIZE | 769 | aghi %r1,-SP_SIZE |
@@ -794,28 +793,28 @@ mcck_return: | |||
794 | /* | 793 | /* |
795 | * Restart interruption handler, kick starter for additional CPUs | 794 | * Restart interruption handler, kick starter for additional CPUs |
796 | */ | 795 | */ |
797 | .globl restart_int_handler | 796 | .globl restart_int_handler |
798 | restart_int_handler: | 797 | restart_int_handler: |
799 | lg %r15,__LC_SAVE_AREA+120 # load ksp | 798 | lg %r15,__LC_SAVE_AREA+120 # load ksp |
800 | lghi %r10,__LC_CREGS_SAVE_AREA | 799 | lghi %r10,__LC_CREGS_SAVE_AREA |
801 | lctlg %c0,%c15,0(%r10) # get new ctl regs | 800 | lctlg %c0,%c15,0(%r10) # get new ctl regs |
802 | lghi %r10,__LC_AREGS_SAVE_AREA | 801 | lghi %r10,__LC_AREGS_SAVE_AREA |
803 | lam %a0,%a15,0(%r10) | 802 | lam %a0,%a15,0(%r10) |
804 | lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone | 803 | lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone |
805 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on | 804 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on |
806 | jg start_secondary | 805 | jg start_secondary |
807 | #else | 806 | #else |
808 | /* | 807 | /* |
809 | * If we do not run with SMP enabled, let the new CPU crash ... | 808 | * If we do not run with SMP enabled, let the new CPU crash ... |
810 | */ | 809 | */ |
811 | .globl restart_int_handler | 810 | .globl restart_int_handler |
812 | restart_int_handler: | 811 | restart_int_handler: |
813 | basr %r1,0 | 812 | basr %r1,0 |
814 | restart_base: | 813 | restart_base: |
815 | lpswe restart_crash-restart_base(%r1) | 814 | lpswe restart_crash-restart_base(%r1) |
816 | .align 8 | 815 | .align 8 |
817 | restart_crash: | 816 | restart_crash: |
818 | .long 0x000a0000,0x00000000,0x00000000,0x00000000 | 817 | .long 0x000a0000,0x00000000,0x00000000,0x00000000 |
819 | restart_go: | 818 | restart_go: |
820 | #endif | 819 | #endif |
821 | 820 | ||
@@ -836,9 +835,9 @@ stack_overflow: | |||
836 | chi %r12,__LC_PGM_OLD_PSW | 835 | chi %r12,__LC_PGM_OLD_PSW |
837 | je 0f | 836 | je 0f |
838 | la %r1,__LC_SAVE_AREA+32 | 837 | la %r1,__LC_SAVE_AREA+32 |
839 | 0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack | 838 | 0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack |
840 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain | 839 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain |
841 | la %r2,SP_PTREGS(%r15) # load pt_regs | 840 | la %r2,SP_PTREGS(%r15) # load pt_regs |
842 | jg kernel_stack_overflow | 841 | jg kernel_stack_overflow |
843 | #endif | 842 | #endif |
844 | 843 | ||
@@ -941,10 +940,10 @@ cleanup_novtime: | |||
941 | cleanup_system_call_insn: | 940 | cleanup_system_call_insn: |
942 | .quad sysc_saveall | 941 | .quad sysc_saveall |
943 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 942 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
944 | .quad system_call | 943 | .quad system_call |
945 | .quad sysc_vtime | 944 | .quad sysc_vtime |
946 | .quad sysc_stime | 945 | .quad sysc_stime |
947 | .quad sysc_update | 946 | .quad sysc_update |
948 | #endif | 947 | #endif |
949 | 948 | ||
950 | cleanup_sysc_return: | 949 | cleanup_sysc_return: |
@@ -1010,21 +1009,21 @@ cleanup_io_leave_insn: | |||
1010 | /* | 1009 | /* |
1011 | * Integer constants | 1010 | * Integer constants |
1012 | */ | 1011 | */ |
1013 | .align 4 | 1012 | .align 4 |
1014 | .Lconst: | 1013 | .Lconst: |
1015 | .Lc_pactive: .long PREEMPT_ACTIVE | 1014 | .Lc_pactive: .long PREEMPT_ACTIVE |
1016 | .Lnr_syscalls: .long NR_syscalls | 1015 | .Lnr_syscalls: .long NR_syscalls |
1017 | .L0x0130: .short 0x130 | 1016 | .L0x0130: .short 0x130 |
1018 | .L0x0140: .short 0x140 | 1017 | .L0x0140: .short 0x140 |
1019 | .L0x0150: .short 0x150 | 1018 | .L0x0150: .short 0x150 |
1020 | .L0x0160: .short 0x160 | 1019 | .L0x0160: .short 0x160 |
1021 | .L0x0170: .short 0x170 | 1020 | .L0x0170: .short 0x170 |
1022 | .Lcritical_start: | 1021 | .Lcritical_start: |
1023 | .quad __critical_start | 1022 | .quad __critical_start |
1024 | .Lcritical_end: | 1023 | .Lcritical_end: |
1025 | .quad __critical_end | 1024 | .quad __critical_end |
1026 | 1025 | ||
1027 | .section .rodata, "a" | 1026 | .section .rodata, "a" |
1028 | #define SYSCALL(esa,esame,emu) .long esame | 1027 | #define SYSCALL(esa,esame,emu) .long esame |
1029 | sys_call_table: | 1028 | sys_call_table: |
1030 | #include "syscalls.S" | 1029 | #include "syscalls.S" |