diff options
Diffstat (limited to 'arch/s390/kernel/entry64.S')
-rw-r--r-- | arch/s390/kernel/entry64.S | 881 |
1 files changed, 881 insertions, 0 deletions
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S new file mode 100644 index 000000000000..51527ab8c8f9 --- /dev/null +++ b/arch/s390/kernel/entry64.S | |||
@@ -0,0 +1,881 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/entry.S | ||
3 | * S390 low-level entry points. | ||
4 | * | ||
5 | * S390 version | ||
6 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
8 | * Hartmut Penner (hp@de.ibm.com), | ||
9 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | ||
10 | */ | ||
11 | |||
12 | #include <linux/sys.h> | ||
13 | #include <linux/linkage.h> | ||
14 | #include <linux/config.h> | ||
15 | #include <asm/cache.h> | ||
16 | #include <asm/lowcore.h> | ||
17 | #include <asm/errno.h> | ||
18 | #include <asm/ptrace.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/offsets.h> | ||
21 | #include <asm/unistd.h> | ||
22 | #include <asm/page.h> | ||
23 | |||
24 | /* | ||
25 | * Stack layout for the system_call stack entry. | ||
26 | * The first few entries are identical to the user_regs_struct. | ||
27 | */ | ||
28 | SP_PTREGS = STACK_FRAME_OVERHEAD | ||
29 | SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS | ||
30 | SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW | ||
31 | SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS | ||
32 | SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 | ||
33 | SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 | ||
34 | SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 | ||
35 | SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 | ||
36 | SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 | ||
37 | SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 | ||
38 | SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 | ||
39 | SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64 | ||
40 | SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72 | ||
41 | SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80 | ||
42 | SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88 | ||
43 | SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96 | ||
44 | SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104 | ||
45 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112 | ||
46 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120 | ||
47 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 | ||
48 | SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC | ||
49 | SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP | ||
50 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE | ||
51 | |||
52 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | ||
53 | STACK_SIZE = 1 << STACK_SHIFT | ||
54 | |||
55 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ | ||
56 | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) | ||
57 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED) | ||
58 | |||
59 | #define BASED(name) name-system_call(%r13) | ||
60 | |||
61 | .macro STORE_TIMER lc_offset | ||
62 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
63 | stpt \lc_offset | ||
64 | #endif | ||
65 | .endm | ||
66 | |||
67 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
68 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum | ||
69 | lg %r10,\lc_from | ||
70 | slg %r10,\lc_to | ||
71 | alg %r10,\lc_sum | ||
72 | stg %r10,\lc_sum | ||
73 | .endm | ||
74 | #endif | ||
75 | |||
76 | /* | ||
77 | * Register usage in interrupt handlers: | ||
78 | * R9 - pointer to current task structure | ||
79 | * R13 - pointer to literal pool | ||
80 | * R14 - return register for function calls | ||
81 | * R15 - kernel stack pointer | ||
82 | */ | ||
83 | |||
84 | .macro SAVE_ALL_BASE savearea | ||
85 | stmg %r12,%r15,\savearea | ||
86 | larl %r13,system_call | ||
87 | .endm | ||
88 | |||
89 | .macro SAVE_ALL psworg,savearea,sync | ||
90 | la %r12,\psworg | ||
91 | .if \sync | ||
92 | tm \psworg+1,0x01 # test problem state bit | ||
93 | jz 2f # skip stack setup save | ||
94 | lg %r15,__LC_KERNEL_STACK # problem state -> load ksp | ||
95 | .else | ||
96 | tm \psworg+1,0x01 # test problem state bit | ||
97 | jnz 1f # from user -> load kernel stack | ||
98 | clc \psworg+8(8),BASED(.Lcritical_end) | ||
99 | jhe 0f | ||
100 | clc \psworg+8(8),BASED(.Lcritical_start) | ||
101 | jl 0f | ||
102 | brasl %r14,cleanup_critical | ||
103 | tm 0(%r12),0x01 # retest problem state after cleanup | ||
104 | jnz 1f | ||
105 | 0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ? | ||
106 | slgr %r14,%r15 | ||
107 | srag %r14,%r14,STACK_SHIFT | ||
108 | jz 2f | ||
109 | 1: lg %r15,__LC_ASYNC_STACK # load async stack | ||
110 | .endif | ||
111 | #ifdef CONFIG_CHECK_STACK | ||
112 | j 3f | ||
113 | 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | ||
114 | jz stack_overflow | ||
115 | 3: | ||
116 | #endif | ||
117 | 2: aghi %r15,-SP_SIZE # make room for registers & psw | ||
118 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack | ||
119 | la %r12,\psworg | ||
120 | stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 | ||
121 | icm %r12,12,__LC_SVC_ILC | ||
122 | stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | ||
123 | st %r12,SP_ILC(%r15) | ||
124 | mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack | ||
125 | la %r12,0 | ||
126 | stg %r12,__SF_BACKCHAIN(%r15) | ||
127 | .endm | ||
128 | |||
129 | .macro RESTORE_ALL sync | ||
130 | mvc __LC_RETURN_PSW(16),SP_PSW(%r15) # move user PSW to lowcore | ||
131 | .if !\sync | ||
132 | ni __LC_RETURN_PSW+1,0xfd # clear wait state bit | ||
133 | .endif | ||
134 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user | ||
135 | STORE_TIMER __LC_EXIT_TIMER | ||
136 | lpswe __LC_RETURN_PSW # back to caller | ||
137 | .endm | ||
138 | |||
139 | /* | ||
140 | * Scheduler resume function, called by switch_to | ||
141 | * gpr2 = (task_struct *) prev | ||
142 | * gpr3 = (task_struct *) next | ||
143 | * Returns: | ||
144 | * gpr2 = prev | ||
145 | */ | ||
146 | .globl __switch_to | ||
147 | __switch_to: | ||
148 | tm __THREAD_per+4(%r3),0xe8 # is the new process using per ? | ||
149 | jz __switch_to_noper # if not we're fine | ||
150 | stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff | ||
151 | clc __THREAD_per(24,%r3),__SF_EMPTY(%r15) | ||
152 | je __switch_to_noper # we got away without bashing TLB's | ||
153 | lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't | ||
154 | __switch_to_noper: | ||
155 | stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task | ||
156 | stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp | ||
157 | lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp | ||
158 | lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task | ||
159 | stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct | ||
160 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 | ||
161 | lg %r3,__THREAD_info(%r3) # load thread_info from task struct | ||
162 | stg %r3,__LC_THREAD_INFO | ||
163 | aghi %r3,STACK_SIZE | ||
164 | stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack | ||
165 | br %r14 | ||
166 | |||
167 | __critical_start: | ||
168 | /* | ||
169 | * SVC interrupt handler routine. System calls are synchronous events and | ||
170 | * are executed with interrupts enabled. | ||
171 | */ | ||
172 | |||
173 | .globl system_call | ||
174 | system_call: | ||
175 | STORE_TIMER __LC_SYNC_ENTER_TIMER | ||
176 | sysc_saveall: | ||
177 | SAVE_ALL_BASE __LC_SAVE_AREA | ||
178 | SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 | ||
179 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore | ||
180 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
181 | sysc_vtime: | ||
182 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
183 | jz sysc_do_svc | ||
184 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
185 | sysc_stime: | ||
186 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
187 | sysc_update: | ||
188 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
189 | #endif | ||
190 | sysc_do_svc: | ||
191 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
192 | slag %r7,%r7,2 # *4 and test for svc 0 | ||
193 | jnz sysc_nr_ok | ||
194 | # svc 0: system call number in %r1 | ||
195 | cl %r1,BASED(.Lnr_syscalls) | ||
196 | jnl sysc_nr_ok | ||
197 | lgfr %r7,%r1 # clear high word in r1 | ||
198 | slag %r7,%r7,2 # svc 0: system call number in %r1 | ||
199 | sysc_nr_ok: | ||
200 | mvc SP_ARGS(8,%r15),SP_R7(%r15) | ||
201 | sysc_do_restart: | ||
202 | larl %r10,sys_call_table | ||
203 | #ifdef CONFIG_S390_SUPPORT | ||
204 | tm SP_PSW+3(%r15),0x01 # are we running in 31 bit mode ? | ||
205 | jo sysc_noemu | ||
206 | larl %r10,sys_call_table_emu # use 31 bit emulation system calls | ||
207 | sysc_noemu: | ||
208 | #endif | ||
209 | tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) | ||
210 | lgf %r8,0(%r7,%r10) # load address of system call routine | ||
211 | jnz sysc_tracesys | ||
212 | basr %r14,%r8 # call sys_xxxx | ||
213 | stg %r2,SP_R2(%r15) # store return value (change R2 on stack) | ||
214 | # ATTENTION: check sys_execve_glue before | ||
215 | # changing anything here !! | ||
216 | |||
217 | sysc_return: | ||
218 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
219 | jno sysc_leave | ||
220 | tm __TI_flags+7(%r9),_TIF_WORK_SVC | ||
221 | jnz sysc_work # there is work to do (signals etc.) | ||
222 | sysc_leave: | ||
223 | RESTORE_ALL 1 | ||
224 | |||
225 | # | ||
226 | # recheck if there is more work to do | ||
227 | # | ||
228 | sysc_work_loop: | ||
229 | tm __TI_flags+7(%r9),_TIF_WORK_SVC | ||
230 | jz sysc_leave # there is no work to do | ||
231 | # | ||
232 | # One of the work bits is on. Find out which one. | ||
233 | # | ||
234 | sysc_work: | ||
235 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED | ||
236 | jo sysc_reschedule | ||
237 | tm __TI_flags+7(%r9),_TIF_SIGPENDING | ||
238 | jo sysc_sigpending | ||
239 | tm __TI_flags+7(%r9),_TIF_RESTART_SVC | ||
240 | jo sysc_restart | ||
241 | tm __TI_flags+7(%r9),_TIF_SINGLE_STEP | ||
242 | jo sysc_singlestep | ||
243 | j sysc_leave | ||
244 | |||
245 | # | ||
246 | # _TIF_NEED_RESCHED is set, call schedule | ||
247 | # | ||
248 | sysc_reschedule: | ||
249 | larl %r14,sysc_work_loop | ||
250 | jg schedule # return point is sysc_return | ||
251 | |||
252 | # | ||
253 | # _TIF_SIGPENDING is set, call do_signal | ||
254 | # | ||
255 | sysc_sigpending: | ||
256 | ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP | ||
257 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
258 | sgr %r3,%r3 # clear *oldset | ||
259 | brasl %r14,do_signal # call do_signal | ||
260 | tm __TI_flags+7(%r9),_TIF_RESTART_SVC | ||
261 | jo sysc_restart | ||
262 | tm __TI_flags+7(%r9),_TIF_SINGLE_STEP | ||
263 | jo sysc_singlestep | ||
264 | j sysc_leave # out of here, do NOT recheck | ||
265 | |||
266 | # | ||
267 | # _TIF_RESTART_SVC is set, set up registers and restart svc | ||
268 | # | ||
269 | sysc_restart: | ||
270 | ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC | ||
271 | lg %r7,SP_R2(%r15) # load new svc number | ||
272 | slag %r7,%r7,2 # *4 | ||
273 | mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument | ||
274 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | ||
275 | j sysc_do_restart # restart svc | ||
276 | |||
277 | # | ||
278 | # _TIF_SINGLE_STEP is set, call do_single_step | ||
279 | # | ||
280 | sysc_singlestep: | ||
281 | ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP | ||
282 | lhi %r0,__LC_PGM_OLD_PSW | ||
283 | sth %r0,SP_TRAP(%r15) # set trap indication to pgm check | ||
284 | la %r2,SP_PTREGS(%r15) # address of register-save area | ||
285 | larl %r14,sysc_return # load adr. of system return | ||
286 | jg do_single_step # branch to do_sigtrap | ||
287 | |||
288 | |||
289 | __critical_end: | ||
290 | |||
291 | # | ||
292 | # call syscall_trace before and after system call | ||
293 | # special linkage: %r12 contains the return address for trace_svc | ||
294 | # | ||
295 | sysc_tracesys: | ||
296 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
297 | la %r3,0 | ||
298 | srl %r7,2 | ||
299 | stg %r7,SP_R2(%r15) | ||
300 | brasl %r14,syscall_trace | ||
301 | lghi %r0,NR_syscalls | ||
302 | clg %r0,SP_R2(%r15) | ||
303 | jnh sysc_tracenogo | ||
304 | lg %r7,SP_R2(%r15) # strace might have changed the | ||
305 | sll %r7,2 # system call | ||
306 | lgf %r8,0(%r7,%r10) | ||
307 | sysc_tracego: | ||
308 | lmg %r3,%r6,SP_R3(%r15) | ||
309 | lg %r2,SP_ORIG_R2(%r15) | ||
310 | basr %r14,%r8 # call sys_xxx | ||
311 | stg %r2,SP_R2(%r15) # store return value | ||
312 | sysc_tracenogo: | ||
313 | tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) | ||
314 | jz sysc_return | ||
315 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
316 | la %r3,1 | ||
317 | larl %r14,sysc_return # return point is sysc_return | ||
318 | jg syscall_trace | ||
319 | |||
320 | # | ||
321 | # a new process exits the kernel with ret_from_fork | ||
322 | # | ||
323 | .globl ret_from_fork | ||
324 | ret_from_fork: | ||
325 | lg %r13,__LC_SVC_NEW_PSW+8 | ||
326 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
327 | tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? | ||
328 | jo 0f | ||
329 | stg %r15,SP_R15(%r15) # store stack pointer for new kthread | ||
330 | 0: brasl %r14,schedule_tail | ||
331 | stosm 24(%r15),0x03 # reenable interrupts | ||
332 | j sysc_return | ||
333 | |||
334 | # | ||
335 | # clone, fork, vfork, exec and sigreturn need glue, | ||
336 | # because they all expect pt_regs as parameter, | ||
337 | # but are called with different parameter. | ||
338 | # return-address is set up above | ||
339 | # | ||
340 | sys_clone_glue: | ||
341 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
342 | jg sys_clone # branch to sys_clone | ||
343 | |||
344 | #ifdef CONFIG_S390_SUPPORT | ||
345 | sys32_clone_glue: | ||
346 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
347 | jg sys32_clone # branch to sys32_clone | ||
348 | #endif | ||
349 | |||
350 | sys_fork_glue: | ||
351 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
352 | jg sys_fork # branch to sys_fork | ||
353 | |||
354 | sys_vfork_glue: | ||
355 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
356 | jg sys_vfork # branch to sys_vfork | ||
357 | |||
358 | sys_execve_glue: | ||
359 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
360 | lgr %r12,%r14 # save return address | ||
361 | brasl %r14,sys_execve # call sys_execve | ||
362 | ltgr %r2,%r2 # check if execve failed | ||
363 | bnz 0(%r12) # it did fail -> store result in gpr2 | ||
364 | b 6(%r12) # SKIP STG 2,SP_R2(15) in | ||
365 | # system_call/sysc_tracesys | ||
366 | #ifdef CONFIG_S390_SUPPORT | ||
367 | sys32_execve_glue: | ||
368 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
369 | lgr %r12,%r14 # save return address | ||
370 | brasl %r14,sys32_execve # call sys32_execve | ||
371 | ltgr %r2,%r2 # check if execve failed | ||
372 | bnz 0(%r12) # it did fail -> store result in gpr2 | ||
373 | b 6(%r12) # SKIP STG 2,SP_R2(15) in | ||
374 | # system_call/sysc_tracesys | ||
375 | #endif | ||
376 | |||
377 | sys_sigreturn_glue: | ||
378 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter | ||
379 | jg sys_sigreturn # branch to sys_sigreturn | ||
380 | |||
381 | #ifdef CONFIG_S390_SUPPORT | ||
382 | sys32_sigreturn_glue: | ||
383 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter | ||
384 | jg sys32_sigreturn # branch to sys32_sigreturn | ||
385 | #endif | ||
386 | |||
387 | sys_rt_sigreturn_glue: | ||
388 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter | ||
389 | jg sys_rt_sigreturn # branch to sys_sigreturn | ||
390 | |||
391 | #ifdef CONFIG_S390_SUPPORT | ||
392 | sys32_rt_sigreturn_glue: | ||
393 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter | ||
394 | jg sys32_rt_sigreturn # branch to sys32_sigreturn | ||
395 | #endif | ||
396 | |||
397 | # | ||
398 | # sigsuspend and rt_sigsuspend need pt_regs as an additional | ||
399 | # parameter and they have to skip the store of %r2 into the | ||
400 | # user register %r2 because the return value was set in | ||
401 | # sigsuspend and rt_sigsuspend already and must not be overwritten! | ||
402 | # | ||
403 | |||
404 | sys_sigsuspend_glue: | ||
405 | lgr %r5,%r4 # move mask back | ||
406 | lgr %r4,%r3 # move history1 parameter | ||
407 | lgr %r3,%r2 # move history0 parameter | ||
408 | la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter | ||
409 | la %r14,6(%r14) # skip store of return value | ||
410 | jg sys_sigsuspend # branch to sys_sigsuspend | ||
411 | |||
412 | #ifdef CONFIG_S390_SUPPORT | ||
413 | sys32_sigsuspend_glue: | ||
414 | llgfr %r4,%r4 # unsigned long | ||
415 | lgr %r5,%r4 # move mask back | ||
416 | lgfr %r3,%r3 # int | ||
417 | lgr %r4,%r3 # move history1 parameter | ||
418 | lgfr %r2,%r2 # int | ||
419 | lgr %r3,%r2 # move history0 parameter | ||
420 | la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter | ||
421 | la %r14,6(%r14) # skip store of return value | ||
422 | jg sys32_sigsuspend # branch to sys32_sigsuspend | ||
423 | #endif | ||
424 | |||
425 | sys_rt_sigsuspend_glue: | ||
426 | lgr %r4,%r3 # move sigsetsize parameter | ||
427 | lgr %r3,%r2 # move unewset parameter | ||
428 | la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter | ||
429 | la %r14,6(%r14) # skip store of return value | ||
430 | jg sys_rt_sigsuspend # branch to sys_rt_sigsuspend | ||
431 | |||
432 | #ifdef CONFIG_S390_SUPPORT | ||
433 | sys32_rt_sigsuspend_glue: | ||
434 | llgfr %r3,%r3 # size_t | ||
435 | lgr %r4,%r3 # move sigsetsize parameter | ||
436 | llgtr %r2,%r2 # sigset_emu31_t * | ||
437 | lgr %r3,%r2 # move unewset parameter | ||
438 | la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter | ||
439 | la %r14,6(%r14) # skip store of return value | ||
440 | jg sys32_rt_sigsuspend # branch to sys32_rt_sigsuspend | ||
441 | #endif | ||
442 | |||
443 | sys_sigaltstack_glue: | ||
444 | la %r4,SP_PTREGS(%r15) # load pt_regs as parameter | ||
445 | jg sys_sigaltstack # branch to sys_sigreturn | ||
446 | |||
447 | #ifdef CONFIG_S390_SUPPORT | ||
448 | sys32_sigaltstack_glue: | ||
449 | la %r4,SP_PTREGS(%r15) # load pt_regs as parameter | ||
450 | jg sys32_sigaltstack_wrapper # branch to sys_sigreturn | ||
451 | #endif | ||
452 | |||
453 | /* | ||
454 | * Program check handler routine | ||
455 | */ | ||
456 | |||
457 | .globl pgm_check_handler | ||
458 | pgm_check_handler: | ||
459 | /* | ||
460 | * First we need to check for a special case: | ||
461 | * Single stepping an instruction that disables the PER event mask will | ||
462 | * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. | ||
463 | * For a single stepped SVC the program check handler gets control after | ||
464 | * the SVC new PSW has been loaded. But we want to execute the SVC first and | ||
465 | * then handle the PER event. Therefore we update the SVC old PSW to point | ||
466 | * to the pgm_check_handler and branch to the SVC handler after we checked | ||
467 | * if we have to load the kernel stack register. | ||
468 | * For every other possible cause for PER event without the PER mask set | ||
469 | * we just ignore the PER event (FIXME: is there anything we have to do | ||
470 | * for LPSW?). | ||
471 | */ | ||
472 | STORE_TIMER __LC_SYNC_ENTER_TIMER | ||
473 | SAVE_ALL_BASE __LC_SAVE_AREA | ||
474 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception | ||
475 | jnz pgm_per # got per exception -> special case | ||
476 | SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 | ||
477 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
478 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
479 | jz pgm_no_vtime | ||
480 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
481 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
482 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
483 | pgm_no_vtime: | ||
484 | #endif | ||
485 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
486 | lgf %r3,__LC_PGM_ILC # load program interruption code | ||
487 | lghi %r8,0x7f | ||
488 | ngr %r8,%r3 | ||
489 | pgm_do_call: | ||
490 | sll %r8,3 | ||
491 | larl %r1,pgm_check_table | ||
492 | lg %r1,0(%r8,%r1) # load address of handler routine | ||
493 | la %r2,SP_PTREGS(%r15) # address of register-save area | ||
494 | larl %r14,sysc_return | ||
495 | br %r1 # branch to interrupt-handler | ||
496 | |||
497 | # | ||
498 | # handle per exception | ||
499 | # | ||
500 | pgm_per: | ||
501 | tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on | ||
502 | jnz pgm_per_std # ok, normal per event from user space | ||
503 | # ok its one of the special cases, now we need to find out which one | ||
504 | clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW | ||
505 | je pgm_svcper | ||
506 | # no interesting special case, ignore PER event | ||
507 | lmg %r12,%r15,__LC_SAVE_AREA | ||
508 | lpswe __LC_PGM_OLD_PSW | ||
509 | |||
510 | # | ||
511 | # Normal per exception | ||
512 | # | ||
513 | pgm_per_std: | ||
514 | SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 | ||
515 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
516 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
517 | jz pgm_no_vtime2 | ||
518 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
519 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
520 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
521 | pgm_no_vtime2: | ||
522 | #endif | ||
523 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
524 | lg %r1,__TI_task(%r9) | ||
525 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID | ||
526 | mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS | ||
527 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID | ||
528 | oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | ||
529 | lgf %r3,__LC_PGM_ILC # load program interruption code | ||
530 | lghi %r8,0x7f | ||
531 | ngr %r8,%r3 # clear per-event-bit and ilc | ||
532 | je sysc_return | ||
533 | j pgm_do_call | ||
534 | |||
535 | # | ||
536 | # it was a single stepped SVC that is causing all the trouble | ||
537 | # | ||
538 | pgm_svcper: | ||
539 | SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 | ||
540 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
541 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
542 | jz pgm_no_vtime3 | ||
543 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
544 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
545 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
546 | pgm_no_vtime3: | ||
547 | #endif | ||
548 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore | ||
549 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
550 | lg %r1,__TI_task(%r9) | ||
551 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID | ||
552 | mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS | ||
553 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID | ||
554 | oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | ||
555 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
556 | j sysc_do_svc | ||
557 | |||
558 | /* | ||
559 | * IO interrupt handler routine | ||
560 | */ | ||
561 | .globl io_int_handler | ||
562 | io_int_handler: | ||
563 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | ||
564 | stck __LC_INT_CLOCK | ||
565 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | ||
566 | SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0 | ||
567 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
568 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
569 | jz io_no_vtime | ||
570 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | ||
571 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
572 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
573 | io_no_vtime: | ||
574 | #endif | ||
575 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
576 | la %r2,SP_PTREGS(%r15) # address of register-save area | ||
577 | brasl %r14,do_IRQ # call standard irq handler | ||
578 | |||
579 | io_return: | ||
580 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
581 | #ifdef CONFIG_PREEMPT | ||
582 | jno io_preempt # no -> check for preemptive scheduling | ||
583 | #else | ||
584 | jno io_leave # no-> skip resched & signal | ||
585 | #endif | ||
586 | tm __TI_flags+7(%r9),_TIF_WORK_INT | ||
587 | jnz io_work # there is work to do (signals etc.) | ||
588 | io_leave: | ||
589 | RESTORE_ALL 0 | ||
590 | |||
591 | #ifdef CONFIG_PREEMPT | ||
592 | io_preempt: | ||
593 | icm %r0,15,__TI_precount(%r9) | ||
594 | jnz io_leave | ||
595 | # switch to kernel stack | ||
596 | lg %r1,SP_R15(%r15) | ||
597 | aghi %r1,-SP_SIZE | ||
598 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | ||
599 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain | ||
600 | lgr %r15,%r1 | ||
601 | io_resume_loop: | ||
602 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED | ||
603 | jno io_leave | ||
604 | larl %r1,.Lc_pactive | ||
605 | mvc __TI_precount(4,%r9),0(%r1) | ||
606 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
607 | brasl %r14,schedule # call schedule | ||
608 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | ||
609 | xc __TI_precount(4,%r9),__TI_precount(%r9) | ||
610 | j io_resume_loop | ||
611 | #endif | ||
612 | |||
613 | # | ||
614 | # switch to kernel stack, then check TIF bits | ||
615 | # | ||
616 | io_work: | ||
617 | lg %r1,__LC_KERNEL_STACK | ||
618 | aghi %r1,-SP_SIZE | ||
619 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | ||
620 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain | ||
621 | lgr %r15,%r1 | ||
622 | # | ||
623 | # One of the work bits is on. Find out which one. | ||
624 | # Checked are: _TIF_SIGPENDING and _TIF_NEED_RESCHED | ||
625 | # | ||
626 | io_work_loop: | ||
627 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED | ||
628 | jo io_reschedule | ||
629 | tm __TI_flags+7(%r9),_TIF_SIGPENDING | ||
630 | jo io_sigpending | ||
631 | j io_leave | ||
632 | |||
633 | # | ||
634 | # _TIF_NEED_RESCHED is set, call schedule | ||
635 | # | ||
636 | io_reschedule: | ||
637 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
638 | brasl %r14,schedule # call scheduler | ||
639 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | ||
640 | tm __TI_flags+7(%r9),_TIF_WORK_INT | ||
641 | jz io_leave # there is no work to do | ||
642 | j io_work_loop | ||
643 | |||
644 | # | ||
645 | # _TIF_SIGPENDING is set, call do_signal | ||
646 | # | ||
647 | io_sigpending: | ||
648 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
649 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
650 | slgr %r3,%r3 # clear *oldset | ||
651 | brasl %r14,do_signal # call do_signal | ||
652 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | ||
653 | j sysc_leave # out of here, do NOT recheck | ||
654 | |||
655 | /* | ||
656 | * External interrupt handler routine | ||
657 | */ | ||
658 | .globl ext_int_handler | ||
659 | ext_int_handler: | ||
660 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | ||
661 | stck __LC_INT_CLOCK | ||
662 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | ||
663 | SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0 | ||
664 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
665 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
666 | jz ext_no_vtime | ||
667 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | ||
668 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
669 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
670 | ext_no_vtime: | ||
671 | #endif | ||
672 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
673 | la %r2,SP_PTREGS(%r15) # address of register-save area | ||
674 | llgh %r3,__LC_EXT_INT_CODE # get interruption code | ||
675 | brasl %r14,do_extint | ||
676 | j io_return | ||
677 | |||
678 | /* | ||
679 | * Machine check handler routines | ||
680 | */ | ||
681 | .globl mcck_int_handler | ||
682 | mcck_int_handler: | ||
683 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | ||
684 | SAVE_ALL_BASE __LC_SAVE_AREA+64 | ||
685 | SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64,0 | ||
686 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
687 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
688 | jz mcck_no_vtime | ||
689 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | ||
690 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
691 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
692 | mcck_no_vtime: | ||
693 | #endif | ||
694 | brasl %r14,s390_do_machine_check | ||
695 | mcck_return: | ||
696 | RESTORE_ALL 0 | ||
697 | |||
698 | #ifdef CONFIG_SMP | ||
699 | /* | ||
700 | * Restart interruption handler, kick starter for additional CPUs | ||
701 | */ | ||
702 | .globl restart_int_handler | ||
703 | restart_int_handler: | ||
704 | lg %r15,__LC_SAVE_AREA+120 # load ksp | ||
705 | lghi %r10,__LC_CREGS_SAVE_AREA | ||
706 | lctlg %c0,%c15,0(%r10) # get new ctl regs | ||
707 | lghi %r10,__LC_AREGS_SAVE_AREA | ||
708 | lam %a0,%a15,0(%r10) | ||
709 | lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone | ||
710 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on | ||
711 | jg start_secondary | ||
712 | #else | ||
713 | /* | ||
714 | * If we do not run with SMP enabled, let the new CPU crash ... | ||
715 | */ | ||
716 | .globl restart_int_handler | ||
717 | restart_int_handler: | ||
718 | basr %r1,0 | ||
719 | restart_base: | ||
720 | lpswe restart_crash-restart_base(%r1) | ||
721 | .align 8 | ||
722 | restart_crash: | ||
723 | .long 0x000a0000,0x00000000,0x00000000,0x00000000 | ||
724 | restart_go: | ||
725 | #endif | ||
726 | |||
727 | #ifdef CONFIG_CHECK_STACK | ||
728 | /* | ||
729 | * The synchronous or the asynchronous stack overflowed. We are dead. | ||
730 | * No need to properly save the registers, we are going to panic anyway. | ||
731 | * Setup a pt_regs so that show_trace can provide a good call trace. | ||
732 | */ | ||
733 | stack_overflow: | ||
734 | lg %r15,__LC_PANIC_STACK # change to panic stack | ||
735 | aghi %r1,-SP_SIZE | ||
736 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack | ||
737 | stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | ||
738 | la %r1,__LC_SAVE_AREA | ||
739 | chi %r12,__LC_SVC_OLD_PSW | ||
740 | je 0f | ||
741 | chi %r12,__LC_PGM_OLD_PSW | ||
742 | je 0f | ||
743 | la %r1,__LC_SAVE_AREA+16 | ||
744 | 0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack | ||
745 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain | ||
746 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
747 | jg kernel_stack_overflow | ||
748 | #endif | ||
749 | |||
750 | cleanup_table_system_call: | ||
751 | .quad system_call, sysc_do_svc | ||
752 | cleanup_table_sysc_return: | ||
753 | .quad sysc_return, sysc_leave | ||
754 | cleanup_table_sysc_leave: | ||
755 | .quad sysc_leave, sysc_work_loop | ||
756 | cleanup_table_sysc_work_loop: | ||
757 | .quad sysc_work_loop, sysc_reschedule | ||
758 | |||
759 | cleanup_critical: | ||
760 | clc 8(8,%r12),BASED(cleanup_table_system_call) | ||
761 | jl 0f | ||
762 | clc 8(8,%r12),BASED(cleanup_table_system_call+8) | ||
763 | jl cleanup_system_call | ||
764 | 0: | ||
765 | clc 8(8,%r12),BASED(cleanup_table_sysc_return) | ||
766 | jl 0f | ||
767 | clc 8(8,%r12),BASED(cleanup_table_sysc_return+8) | ||
768 | jl cleanup_sysc_return | ||
769 | 0: | ||
770 | clc 8(8,%r12),BASED(cleanup_table_sysc_leave) | ||
771 | jl 0f | ||
772 | clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8) | ||
773 | jl cleanup_sysc_leave | ||
774 | 0: | ||
775 | clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop) | ||
776 | jl 0f | ||
777 | clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8) | ||
778 | jl cleanup_sysc_leave | ||
779 | 0: | ||
780 | br %r14 | ||
781 | |||
782 | cleanup_system_call: | ||
783 | mvc __LC_RETURN_PSW(16),0(%r12) | ||
784 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
785 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) | ||
786 | jh 0f | ||
787 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
788 | 0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) | ||
789 | jhe cleanup_vtime | ||
790 | #endif | ||
791 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) | ||
792 | jh 0f | ||
793 | mvc __LC_SAVE_AREA(32),__LC_SAVE_AREA+32 | ||
794 | 0: stg %r13,__LC_SAVE_AREA+40 | ||
795 | SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 | ||
796 | stg %r15,__LC_SAVE_AREA+56 | ||
797 | llgh %r7,__LC_SVC_INT_CODE | ||
798 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
799 | cleanup_vtime: | ||
800 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) | ||
801 | jhe cleanup_stime | ||
802 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
803 | jz cleanup_novtime | ||
804 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
805 | cleanup_stime: | ||
806 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32) | ||
807 | jh cleanup_update | ||
808 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
809 | cleanup_update: | ||
810 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
811 | cleanup_novtime: | ||
812 | #endif | ||
813 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) | ||
814 | la %r12,__LC_RETURN_PSW | ||
815 | br %r14 | ||
816 | cleanup_system_call_insn: | ||
817 | .quad sysc_saveall | ||
818 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
819 | .quad system_call | ||
820 | .quad sysc_vtime | ||
821 | .quad sysc_stime | ||
822 | .quad sysc_update | ||
823 | #endif | ||
824 | |||
825 | cleanup_sysc_return: | ||
826 | mvc __LC_RETURN_PSW(8),0(%r12) | ||
827 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return) | ||
828 | la %r12,__LC_RETURN_PSW | ||
829 | br %r14 | ||
830 | |||
831 | cleanup_sysc_leave: | ||
832 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn) | ||
833 | je 0f | ||
834 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
835 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
836 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8) | ||
837 | je 0f | ||
838 | #endif | ||
839 | mvc __LC_RETURN_PSW(16),SP_PSW(%r15) | ||
840 | mvc __LC_SAVE_AREA+32(32),SP_R12(%r15) | ||
841 | lmg %r0,%r11,SP_R0(%r15) | ||
842 | lg %r15,SP_R15(%r15) | ||
843 | 0: la %r12,__LC_RETURN_PSW | ||
844 | br %r14 | ||
845 | cleanup_sysc_leave_insn: | ||
846 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
847 | .quad sysc_leave + 16 | ||
848 | #endif | ||
849 | .quad sysc_leave + 12 | ||
850 | |||
851 | /* | ||
852 | * Integer constants | ||
853 | */ | ||
854 | .align 4 | ||
855 | .Lconst: | ||
856 | .Lc_pactive: .long PREEMPT_ACTIVE | ||
857 | .Lnr_syscalls: .long NR_syscalls | ||
858 | .L0x0130: .short 0x130 | ||
859 | .L0x0140: .short 0x140 | ||
860 | .L0x0150: .short 0x150 | ||
861 | .L0x0160: .short 0x160 | ||
862 | .L0x0170: .short 0x170 | ||
863 | .Lcritical_start: | ||
864 | .quad __critical_start | ||
865 | .Lcritical_end: | ||
866 | .quad __critical_end | ||
867 | |||
868 | #define SYSCALL(esa,esame,emu) .long esame | ||
869 | .globl sys_call_table | ||
870 | sys_call_table: | ||
871 | #include "syscalls.S" | ||
872 | #undef SYSCALL | ||
873 | |||
874 | #ifdef CONFIG_S390_SUPPORT | ||
875 | |||
876 | #define SYSCALL(esa,esame,emu) .long emu | ||
877 | .globl sys_call_table_emu | ||
878 | sys_call_table_emu: | ||
879 | #include "syscalls.S" | ||
880 | #undef SYSCALL | ||
881 | #endif | ||