diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/lowcore.h | 142 | ||||
-rw-r--r-- | arch/s390/kernel/asm-offsets.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/base.S | 16 | ||||
-rw-r--r-- | arch/s390/kernel/entry.S | 1099 | ||||
-rw-r--r-- | arch/s390/kernel/entry64.S | 968 | ||||
-rw-r--r-- | arch/s390/kernel/head.S | 4 | ||||
-rw-r--r-- | arch/s390/kernel/reipl64.S | 4 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 2 |
8 files changed, 1002 insertions, 1237 deletions
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 3b97964e0e96..707f2306725b 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h | |||
@@ -97,47 +97,52 @@ struct _lowcore { | |||
97 | __u32 gpregs_save_area[16]; /* 0x0180 */ | 97 | __u32 gpregs_save_area[16]; /* 0x0180 */ |
98 | __u32 cregs_save_area[16]; /* 0x01c0 */ | 98 | __u32 cregs_save_area[16]; /* 0x01c0 */ |
99 | 99 | ||
100 | /* Save areas. */ | ||
101 | __u32 save_area_sync[8]; /* 0x0200 */ | ||
102 | __u32 save_area_async[8]; /* 0x0220 */ | ||
103 | __u32 save_area_restart[1]; /* 0x0240 */ | ||
104 | __u8 pad_0x0244[0x0248-0x0244]; /* 0x0244 */ | ||
105 | |||
100 | /* Return psws. */ | 106 | /* Return psws. */ |
101 | __u32 save_area[16]; /* 0x0200 */ | 107 | psw_t return_psw; /* 0x0248 */ |
102 | psw_t return_psw; /* 0x0240 */ | 108 | psw_t return_mcck_psw; /* 0x0250 */ |
103 | psw_t return_mcck_psw; /* 0x0248 */ | ||
104 | 109 | ||
105 | /* CPU time accounting values */ | 110 | /* CPU time accounting values */ |
106 | __u64 sync_enter_timer; /* 0x0250 */ | 111 | __u64 sync_enter_timer; /* 0x0258 */ |
107 | __u64 async_enter_timer; /* 0x0258 */ | 112 | __u64 async_enter_timer; /* 0x0260 */ |
108 | __u64 mcck_enter_timer; /* 0x0260 */ | 113 | __u64 mcck_enter_timer; /* 0x0268 */ |
109 | __u64 exit_timer; /* 0x0268 */ | 114 | __u64 exit_timer; /* 0x0270 */ |
110 | __u64 user_timer; /* 0x0270 */ | 115 | __u64 user_timer; /* 0x0278 */ |
111 | __u64 system_timer; /* 0x0278 */ | 116 | __u64 system_timer; /* 0x0280 */ |
112 | __u64 steal_timer; /* 0x0280 */ | 117 | __u64 steal_timer; /* 0x0288 */ |
113 | __u64 last_update_timer; /* 0x0288 */ | 118 | __u64 last_update_timer; /* 0x0290 */ |
114 | __u64 last_update_clock; /* 0x0290 */ | 119 | __u64 last_update_clock; /* 0x0298 */ |
115 | 120 | ||
116 | /* Current process. */ | 121 | /* Current process. */ |
117 | __u32 current_task; /* 0x0298 */ | 122 | __u32 current_task; /* 0x02a0 */ |
118 | __u32 thread_info; /* 0x029c */ | 123 | __u32 thread_info; /* 0x02a4 */ |
119 | __u32 kernel_stack; /* 0x02a0 */ | 124 | __u32 kernel_stack; /* 0x02a8 */ |
120 | 125 | ||
121 | /* Interrupt and panic stack. */ | 126 | /* Interrupt and panic stack. */ |
122 | __u32 async_stack; /* 0x02a4 */ | 127 | __u32 async_stack; /* 0x02ac */ |
123 | __u32 panic_stack; /* 0x02a8 */ | 128 | __u32 panic_stack; /* 0x02b0 */ |
124 | 129 | ||
125 | /* Address space pointer. */ | 130 | /* Address space pointer. */ |
126 | __u32 kernel_asce; /* 0x02ac */ | 131 | __u32 kernel_asce; /* 0x02b4 */ |
127 | __u32 user_asce; /* 0x02b0 */ | 132 | __u32 user_asce; /* 0x02b8 */ |
128 | __u32 current_pid; /* 0x02b4 */ | 133 | __u32 current_pid; /* 0x02bc */ |
129 | 134 | ||
130 | /* SMP info area */ | 135 | /* SMP info area */ |
131 | __u32 cpu_nr; /* 0x02b8 */ | 136 | __u32 cpu_nr; /* 0x02c0 */ |
132 | __u32 softirq_pending; /* 0x02bc */ | 137 | __u32 softirq_pending; /* 0x02c4 */ |
133 | __u32 percpu_offset; /* 0x02c0 */ | 138 | __u32 percpu_offset; /* 0x02c8 */ |
134 | __u32 ext_call_fast; /* 0x02c4 */ | 139 | __u32 ext_call_fast; /* 0x02cc */ |
135 | __u64 int_clock; /* 0x02c8 */ | 140 | __u64 int_clock; /* 0x02d0 */ |
136 | __u64 mcck_clock; /* 0x02d0 */ | 141 | __u64 mcck_clock; /* 0x02d8 */ |
137 | __u64 clock_comparator; /* 0x02d8 */ | 142 | __u64 clock_comparator; /* 0x02e0 */ |
138 | __u32 machine_flags; /* 0x02e0 */ | 143 | __u32 machine_flags; /* 0x02e8 */ |
139 | __u32 ftrace_func; /* 0x02e4 */ | 144 | __u32 ftrace_func; /* 0x02ec */ |
140 | __u8 pad_0x02e8[0x0300-0x02e8]; /* 0x02e8 */ | 145 | __u8 pad_0x02f8[0x0300-0x02f0]; /* 0x02f0 */ |
141 | 146 | ||
142 | /* Interrupt response block */ | 147 | /* Interrupt response block */ |
143 | __u8 irb[64]; /* 0x0300 */ | 148 | __u8 irb[64]; /* 0x0300 */ |
@@ -229,57 +234,62 @@ struct _lowcore { | |||
229 | psw_t mcck_new_psw; /* 0x01e0 */ | 234 | psw_t mcck_new_psw; /* 0x01e0 */ |
230 | psw_t io_new_psw; /* 0x01f0 */ | 235 | psw_t io_new_psw; /* 0x01f0 */ |
231 | 236 | ||
232 | /* Entry/exit save area & return psws. */ | 237 | /* Save areas. */ |
233 | __u64 save_area[16]; /* 0x0200 */ | 238 | __u64 save_area_sync[8]; /* 0x0200 */ |
234 | psw_t return_psw; /* 0x0280 */ | 239 | __u64 save_area_async[8]; /* 0x0240 */ |
235 | psw_t return_mcck_psw; /* 0x0290 */ | 240 | __u64 save_area_restart[1]; /* 0x0280 */ |
241 | __u8 pad_0x0288[0x0290-0x0288]; /* 0x0288 */ | ||
242 | |||
243 | /* Return psws. */ | ||
244 | psw_t return_psw; /* 0x0290 */ | ||
245 | psw_t return_mcck_psw; /* 0x02a0 */ | ||
236 | 246 | ||
237 | /* CPU accounting and timing values. */ | 247 | /* CPU accounting and timing values. */ |
238 | __u64 sync_enter_timer; /* 0x02a0 */ | 248 | __u64 sync_enter_timer; /* 0x02b0 */ |
239 | __u64 async_enter_timer; /* 0x02a8 */ | 249 | __u64 async_enter_timer; /* 0x02b8 */ |
240 | __u64 mcck_enter_timer; /* 0x02b0 */ | 250 | __u64 mcck_enter_timer; /* 0x02c0 */ |
241 | __u64 exit_timer; /* 0x02b8 */ | 251 | __u64 exit_timer; /* 0x02c8 */ |
242 | __u64 user_timer; /* 0x02c0 */ | 252 | __u64 user_timer; /* 0x02d0 */ |
243 | __u64 system_timer; /* 0x02c8 */ | 253 | __u64 system_timer; /* 0x02d8 */ |
244 | __u64 steal_timer; /* 0x02d0 */ | 254 | __u64 steal_timer; /* 0x02e0 */ |
245 | __u64 last_update_timer; /* 0x02d8 */ | 255 | __u64 last_update_timer; /* 0x02e8 */ |
246 | __u64 last_update_clock; /* 0x02e0 */ | 256 | __u64 last_update_clock; /* 0x02f0 */ |
247 | 257 | ||
248 | /* Current process. */ | 258 | /* Current process. */ |
249 | __u64 current_task; /* 0x02e8 */ | 259 | __u64 current_task; /* 0x02f8 */ |
250 | __u64 thread_info; /* 0x02f0 */ | 260 | __u64 thread_info; /* 0x0300 */ |
251 | __u64 kernel_stack; /* 0x02f8 */ | 261 | __u64 kernel_stack; /* 0x0308 */ |
252 | 262 | ||
253 | /* Interrupt and panic stack. */ | 263 | /* Interrupt and panic stack. */ |
254 | __u64 async_stack; /* 0x0300 */ | 264 | __u64 async_stack; /* 0x0310 */ |
255 | __u64 panic_stack; /* 0x0308 */ | 265 | __u64 panic_stack; /* 0x0318 */ |
256 | 266 | ||
257 | /* Address space pointer. */ | 267 | /* Address space pointer. */ |
258 | __u64 kernel_asce; /* 0x0310 */ | 268 | __u64 kernel_asce; /* 0x0320 */ |
259 | __u64 user_asce; /* 0x0318 */ | 269 | __u64 user_asce; /* 0x0328 */ |
260 | __u64 current_pid; /* 0x0320 */ | 270 | __u64 current_pid; /* 0x0330 */ |
261 | 271 | ||
262 | /* SMP info area */ | 272 | /* SMP info area */ |
263 | __u32 cpu_nr; /* 0x0328 */ | 273 | __u32 cpu_nr; /* 0x0338 */ |
264 | __u32 softirq_pending; /* 0x032c */ | 274 | __u32 softirq_pending; /* 0x033c */ |
265 | __u64 percpu_offset; /* 0x0330 */ | 275 | __u64 percpu_offset; /* 0x0340 */ |
266 | __u64 ext_call_fast; /* 0x0338 */ | 276 | __u64 ext_call_fast; /* 0x0348 */ |
267 | __u64 int_clock; /* 0x0340 */ | 277 | __u64 int_clock; /* 0x0350 */ |
268 | __u64 mcck_clock; /* 0x0348 */ | 278 | __u64 mcck_clock; /* 0x0358 */ |
269 | __u64 clock_comparator; /* 0x0350 */ | 279 | __u64 clock_comparator; /* 0x0360 */ |
270 | __u64 vdso_per_cpu_data; /* 0x0358 */ | 280 | __u64 vdso_per_cpu_data; /* 0x0368 */ |
271 | __u64 machine_flags; /* 0x0360 */ | 281 | __u64 machine_flags; /* 0x0370 */ |
272 | __u64 ftrace_func; /* 0x0368 */ | 282 | __u64 ftrace_func; /* 0x0378 */ |
273 | __u64 gmap; /* 0x0370 */ | 283 | __u64 gmap; /* 0x0380 */ |
274 | __u8 pad_0x0378[0x0380-0x0378]; /* 0x0378 */ | 284 | __u8 pad_0x0388[0x0400-0x0388]; /* 0x0388 */ |
275 | 285 | ||
276 | /* Interrupt response block. */ | 286 | /* Interrupt response block. */ |
277 | __u8 irb[64]; /* 0x0380 */ | 287 | __u8 irb[64]; /* 0x0400 */ |
278 | 288 | ||
279 | /* Per cpu primary space access list */ | 289 | /* Per cpu primary space access list */ |
280 | __u32 paste[16]; /* 0x03c0 */ | 290 | __u32 paste[16]; /* 0x0440 */ |
281 | 291 | ||
282 | __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */ | 292 | __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */ |
283 | 293 | ||
284 | /* | 294 | /* |
285 | * 0xe00 contains the address of the IPL Parameter Information | 295 | * 0xe00 contains the address of the IPL Parameter Information |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 0717363033eb..c1a56ba5f848 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -108,7 +108,9 @@ int main(void) | |||
108 | DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw)); | 108 | DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw)); |
109 | DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw)); | 109 | DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw)); |
110 | DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw)); | 110 | DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw)); |
111 | DEFINE(__LC_SAVE_AREA, offsetof(struct _lowcore, save_area)); | 111 | DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync)); |
112 | DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async)); | ||
113 | DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart)); | ||
112 | DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw)); | 114 | DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw)); |
113 | DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); | 115 | DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); |
114 | DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); | 116 | DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); |
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index f8828d38fa6e..3aa4d00aaf50 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S | |||
@@ -33,7 +33,7 @@ s390_base_mcck_handler_fn: | |||
33 | .previous | 33 | .previous |
34 | 34 | ||
35 | ENTRY(s390_base_ext_handler) | 35 | ENTRY(s390_base_ext_handler) |
36 | stmg %r0,%r15,__LC_SAVE_AREA | 36 | stmg %r0,%r15,__LC_SAVE_AREA_ASYNC |
37 | basr %r13,0 | 37 | basr %r13,0 |
38 | 0: aghi %r15,-STACK_FRAME_OVERHEAD | 38 | 0: aghi %r15,-STACK_FRAME_OVERHEAD |
39 | larl %r1,s390_base_ext_handler_fn | 39 | larl %r1,s390_base_ext_handler_fn |
@@ -41,7 +41,7 @@ ENTRY(s390_base_ext_handler) | |||
41 | ltgr %r1,%r1 | 41 | ltgr %r1,%r1 |
42 | jz 1f | 42 | jz 1f |
43 | basr %r14,%r1 | 43 | basr %r14,%r1 |
44 | 1: lmg %r0,%r15,__LC_SAVE_AREA | 44 | 1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC |
45 | ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit | 45 | ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit |
46 | lpswe __LC_EXT_OLD_PSW | 46 | lpswe __LC_EXT_OLD_PSW |
47 | 47 | ||
@@ -53,7 +53,7 @@ s390_base_ext_handler_fn: | |||
53 | .previous | 53 | .previous |
54 | 54 | ||
55 | ENTRY(s390_base_pgm_handler) | 55 | ENTRY(s390_base_pgm_handler) |
56 | stmg %r0,%r15,__LC_SAVE_AREA | 56 | stmg %r0,%r15,__LC_SAVE_AREA_SYNC |
57 | basr %r13,0 | 57 | basr %r13,0 |
58 | 0: aghi %r15,-STACK_FRAME_OVERHEAD | 58 | 0: aghi %r15,-STACK_FRAME_OVERHEAD |
59 | larl %r1,s390_base_pgm_handler_fn | 59 | larl %r1,s390_base_pgm_handler_fn |
@@ -61,7 +61,7 @@ ENTRY(s390_base_pgm_handler) | |||
61 | ltgr %r1,%r1 | 61 | ltgr %r1,%r1 |
62 | jz 1f | 62 | jz 1f |
63 | basr %r14,%r1 | 63 | basr %r14,%r1 |
64 | lmg %r0,%r15,__LC_SAVE_AREA | 64 | lmg %r0,%r15,__LC_SAVE_AREA_SYNC |
65 | lpswe __LC_PGM_OLD_PSW | 65 | lpswe __LC_PGM_OLD_PSW |
66 | 1: lpswe disabled_wait_psw-0b(%r13) | 66 | 1: lpswe disabled_wait_psw-0b(%r13) |
67 | 67 | ||
@@ -142,7 +142,7 @@ s390_base_mcck_handler_fn: | |||
142 | .previous | 142 | .previous |
143 | 143 | ||
144 | ENTRY(s390_base_ext_handler) | 144 | ENTRY(s390_base_ext_handler) |
145 | stm %r0,%r15,__LC_SAVE_AREA | 145 | stm %r0,%r15,__LC_SAVE_AREA_ASYNC |
146 | basr %r13,0 | 146 | basr %r13,0 |
147 | 0: ahi %r15,-STACK_FRAME_OVERHEAD | 147 | 0: ahi %r15,-STACK_FRAME_OVERHEAD |
148 | l %r1,2f-0b(%r13) | 148 | l %r1,2f-0b(%r13) |
@@ -150,7 +150,7 @@ ENTRY(s390_base_ext_handler) | |||
150 | ltr %r1,%r1 | 150 | ltr %r1,%r1 |
151 | jz 1f | 151 | jz 1f |
152 | basr %r14,%r1 | 152 | basr %r14,%r1 |
153 | 1: lm %r0,%r15,__LC_SAVE_AREA | 153 | 1: lm %r0,%r15,__LC_SAVE_AREA_ASYNC |
154 | ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit | 154 | ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit |
155 | lpsw __LC_EXT_OLD_PSW | 155 | lpsw __LC_EXT_OLD_PSW |
156 | 156 | ||
@@ -164,7 +164,7 @@ s390_base_ext_handler_fn: | |||
164 | .previous | 164 | .previous |
165 | 165 | ||
166 | ENTRY(s390_base_pgm_handler) | 166 | ENTRY(s390_base_pgm_handler) |
167 | stm %r0,%r15,__LC_SAVE_AREA | 167 | stm %r0,%r15,__LC_SAVE_AREA_SYNC |
168 | basr %r13,0 | 168 | basr %r13,0 |
169 | 0: ahi %r15,-STACK_FRAME_OVERHEAD | 169 | 0: ahi %r15,-STACK_FRAME_OVERHEAD |
170 | l %r1,2f-0b(%r13) | 170 | l %r1,2f-0b(%r13) |
@@ -172,7 +172,7 @@ ENTRY(s390_base_pgm_handler) | |||
172 | ltr %r1,%r1 | 172 | ltr %r1,%r1 |
173 | jz 1f | 173 | jz 1f |
174 | basr %r14,%r1 | 174 | basr %r14,%r1 |
175 | lm %r0,%r15,__LC_SAVE_AREA | 175 | lm %r0,%r15,__LC_SAVE_AREA_SYNC |
176 | lpsw __LC_PGM_OLD_PSW | 176 | lpsw __LC_PGM_OLD_PSW |
177 | 177 | ||
178 | 1: lpsw disabled_wait_psw-0b(%r13) | 178 | 1: lpsw disabled_wait_psw-0b(%r13) |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index b13157057e02..c2773cff89c3 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -19,32 +19,22 @@ | |||
19 | #include <asm/unistd.h> | 19 | #include <asm/unistd.h> |
20 | #include <asm/page.h> | 20 | #include <asm/page.h> |
21 | 21 | ||
22 | /* | 22 | __PT_R0 = __PT_GPRS |
23 | * Stack layout for the system_call stack entry. | 23 | __PT_R1 = __PT_GPRS + 4 |
24 | * The first few entries are identical to the user_regs_struct. | 24 | __PT_R2 = __PT_GPRS + 8 |
25 | */ | 25 | __PT_R3 = __PT_GPRS + 12 |
26 | SP_PTREGS = STACK_FRAME_OVERHEAD | 26 | __PT_R4 = __PT_GPRS + 16 |
27 | SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS | 27 | __PT_R5 = __PT_GPRS + 20 |
28 | SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW | 28 | __PT_R6 = __PT_GPRS + 24 |
29 | SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS | 29 | __PT_R7 = __PT_GPRS + 28 |
30 | SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4 | 30 | __PT_R8 = __PT_GPRS + 32 |
31 | SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 | 31 | __PT_R9 = __PT_GPRS + 36 |
32 | SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12 | 32 | __PT_R10 = __PT_GPRS + 40 |
33 | SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 | 33 | __PT_R11 = __PT_GPRS + 44 |
34 | SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20 | 34 | __PT_R12 = __PT_GPRS + 48 |
35 | SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 | 35 | __PT_R13 = __PT_GPRS + 524 |
36 | SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28 | 36 | __PT_R14 = __PT_GPRS + 56 |
37 | SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 | 37 | __PT_R15 = __PT_GPRS + 60 |
38 | SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36 | ||
39 | SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 | ||
40 | SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44 | ||
41 | SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 | ||
42 | SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52 | ||
43 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 | ||
44 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60 | ||
45 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 | ||
46 | SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE | ||
47 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE | ||
48 | 38 | ||
49 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 39 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
50 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) | 40 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) |
@@ -58,133 +48,91 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
58 | 48 | ||
59 | #define BASED(name) name-system_call(%r13) | 49 | #define BASED(name) name-system_call(%r13) |
60 | 50 | ||
61 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
62 | .macro TRACE_IRQS_ON | 51 | .macro TRACE_IRQS_ON |
52 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
63 | basr %r2,%r0 | 53 | basr %r2,%r0 |
64 | l %r1,BASED(.Ltrace_irq_on_caller) | 54 | l %r1,BASED(.Lhardirqs_on) |
65 | basr %r14,%r1 | 55 | basr %r14,%r1 # call trace_hardirqs_on_caller |
56 | #endif | ||
66 | .endm | 57 | .endm |
67 | 58 | ||
68 | .macro TRACE_IRQS_OFF | 59 | .macro TRACE_IRQS_OFF |
60 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
69 | basr %r2,%r0 | 61 | basr %r2,%r0 |
70 | l %r1,BASED(.Ltrace_irq_off_caller) | 62 | l %r1,BASED(.Lhardirqs_off) |
71 | basr %r14,%r1 | 63 | basr %r14,%r1 # call trace_hardirqs_off_caller |
72 | .endm | ||
73 | #else | ||
74 | #define TRACE_IRQS_ON | ||
75 | #define TRACE_IRQS_OFF | ||
76 | #endif | 64 | #endif |
65 | .endm | ||
77 | 66 | ||
78 | #ifdef CONFIG_LOCKDEP | ||
79 | .macro LOCKDEP_SYS_EXIT | 67 | .macro LOCKDEP_SYS_EXIT |
80 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 68 | #ifdef CONFIG_LOCKDEP |
81 | jz 0f | 69 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
70 | jz .+10 | ||
82 | l %r1,BASED(.Llockdep_sys_exit) | 71 | l %r1,BASED(.Llockdep_sys_exit) |
83 | basr %r14,%r1 | 72 | basr %r14,%r1 # call lockdep_sys_exit |
84 | 0: | ||
85 | .endm | ||
86 | #else | ||
87 | #define LOCKDEP_SYS_EXIT | ||
88 | #endif | 73 | #endif |
89 | |||
90 | /* | ||
91 | * Register usage in interrupt handlers: | ||
92 | * R9 - pointer to current task structure | ||
93 | * R13 - pointer to literal pool | ||
94 | * R14 - return register for function calls | ||
95 | * R15 - kernel stack pointer | ||
96 | */ | ||
97 | |||
98 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum | ||
99 | lm %r10,%r11,\lc_from | ||
100 | sl %r10,\lc_to | ||
101 | sl %r11,\lc_to+4 | ||
102 | bc 3,BASED(0f) | ||
103 | sl %r10,BASED(.Lc_1) | ||
104 | 0: al %r10,\lc_sum | ||
105 | al %r11,\lc_sum+4 | ||
106 | bc 12,BASED(1f) | ||
107 | al %r10,BASED(.Lc_1) | ||
108 | 1: stm %r10,%r11,\lc_sum | ||
109 | .endm | ||
110 | |||
111 | .macro SAVE_ALL_SVC psworg,savearea | ||
112 | stm %r12,%r15,\savearea | ||
113 | l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 | ||
114 | l %r15,__LC_KERNEL_STACK # problem state -> load ksp | ||
115 | s %r15,BASED(.Lc_spsize) # make room for registers & psw | ||
116 | .endm | ||
117 | |||
118 | .macro SAVE_ALL_BASE savearea | ||
119 | stm %r12,%r15,\savearea | ||
120 | l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 | ||
121 | .endm | 74 | .endm |
122 | 75 | ||
123 | .macro SAVE_ALL_PGM psworg,savearea | 76 | .macro CHECK_STACK stacksize,savearea |
124 | tm \psworg+1,0x01 # test problem state bit | ||
125 | #ifdef CONFIG_CHECK_STACK | 77 | #ifdef CONFIG_CHECK_STACK |
126 | bnz BASED(1f) | 78 | tml %r15,\stacksize - CONFIG_STACK_GUARD |
127 | tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | 79 | la %r14,\savearea |
128 | bnz BASED(2f) | 80 | jz stack_overflow |
129 | la %r12,\psworg | ||
130 | b BASED(stack_overflow) | ||
131 | #else | ||
132 | bz BASED(2f) | ||
133 | #endif | 81 | #endif |
134 | 1: l %r15,__LC_KERNEL_STACK # problem state -> load ksp | ||
135 | 2: s %r15,BASED(.Lc_spsize) # make room for registers & psw | ||
136 | .endm | 82 | .endm |
137 | 83 | ||
138 | .macro SAVE_ALL_ASYNC psworg,savearea | 84 | .macro SWITCH_ASYNC savearea,stack,shift |
139 | stm %r12,%r15,\savearea | 85 | tmh %r8,0x0001 # interrupting from user ? |
140 | l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 | 86 | jnz 1f |
141 | la %r12,\psworg | 87 | lr %r14,%r9 |
142 | tm \psworg+1,0x01 # test problem state bit | 88 | sl %r14,BASED(.Lcritical_start) |
143 | bnz BASED(1f) # from user -> load async stack | 89 | cl %r14,BASED(.Lcritical_length) |
144 | clc \psworg+4(4),BASED(.Lcritical_end) | 90 | jhe 0f |
145 | bhe BASED(0f) | 91 | la %r11,\savearea # inside critical section, do cleanup |
146 | clc \psworg+4(4),BASED(.Lcritical_start) | 92 | bras %r14,cleanup_critical |
147 | bl BASED(0f) | 93 | tmh %r8,0x0001 # retest problem state after cleanup |
148 | l %r14,BASED(.Lcleanup_critical) | 94 | jnz 1f |
149 | basr %r14,%r14 | 95 | 0: l %r14,\stack # are we already on the target stack? |
150 | tm 1(%r12),0x01 # retest problem state after cleanup | ||
151 | bnz BASED(1f) | ||
152 | 0: l %r14,__LC_ASYNC_STACK # are we already on the async stack ? | ||
153 | slr %r14,%r15 | 96 | slr %r14,%r15 |
154 | sra %r14,STACK_SHIFT | 97 | sra %r14,\shift |
155 | #ifdef CONFIG_CHECK_STACK | 98 | jnz 1f |
156 | bnz BASED(1f) | 99 | CHECK_STACK 1<<\shift,\savearea |
157 | tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | 100 | j 2f |
158 | bnz BASED(2f) | 101 | 1: l %r15,\stack # load target stack |
159 | b BASED(stack_overflow) | 102 | 2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
160 | #else | 103 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
161 | bz BASED(2f) | ||
162 | #endif | ||
163 | 1: l %r15,__LC_ASYNC_STACK | ||
164 | 2: s %r15,BASED(.Lc_spsize) # make room for registers & psw | ||
165 | .endm | 104 | .endm |
166 | 105 | ||
167 | .macro CREATE_STACK_FRAME savearea | 106 | .macro ADD64 high,low,timer |
168 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 107 | al \high,\timer |
169 | st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 | 108 | al \low,\timer+4 |
170 | mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack | 109 | brc 12,.+8 |
171 | stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | 110 | ahi \high,1 |
172 | .endm | 111 | .endm |
173 | 112 | ||
174 | .macro RESTORE_ALL psworg,sync | 113 | .macro SUB64 high,low,timer |
175 | mvc \psworg(8),SP_PSW(%r15) # move user PSW to lowcore | 114 | sl \high,\timer |
176 | .if !\sync | 115 | sl \low,\timer+4 |
177 | ni \psworg+1,0xfd # clear wait state bit | 116 | brc 3,.+8 |
178 | .endif | 117 | ahi \high,-1 |
179 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user | 118 | .endm |
180 | stpt __LC_EXIT_TIMER | 119 | |
181 | lpsw \psworg # back to caller | 120 | .macro UPDATE_VTIME high,low,enter_timer |
121 | lm \high,\low,__LC_EXIT_TIMER | ||
122 | SUB64 \high,\low,\enter_timer | ||
123 | ADD64 \high,\low,__LC_USER_TIMER | ||
124 | stm \high,\low,__LC_USER_TIMER | ||
125 | lm \high,\low,__LC_LAST_UPDATE_TIMER | ||
126 | SUB64 \high,\low,__LC_EXIT_TIMER | ||
127 | ADD64 \high,\low,__LC_SYSTEM_TIMER | ||
128 | stm \high,\low,__LC_SYSTEM_TIMER | ||
129 | mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer | ||
182 | .endm | 130 | .endm |
183 | 131 | ||
184 | .macro REENABLE_IRQS | 132 | .macro REENABLE_IRQS |
185 | mvc __SF_EMPTY(1,%r15),SP_PSW(%r15) | 133 | st %r8,__LC_RETURN_PSW |
186 | ni __SF_EMPTY(%r15),0xbf | 134 | ni __LC_RETURN_PSW,0xbf |
187 | ssm __SF_EMPTY(%r15) | 135 | ssm __LC_RETURN_PSW |
188 | .endm | 136 | .endm |
189 | 137 | ||
190 | .section .kprobes.text, "ax" | 138 | .section .kprobes.text, "ax" |
@@ -197,14 +145,13 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
197 | * gpr2 = prev | 145 | * gpr2 = prev |
198 | */ | 146 | */ |
199 | ENTRY(__switch_to) | 147 | ENTRY(__switch_to) |
200 | basr %r1,0 | 148 | l %r4,__THREAD_info(%r2) # get thread_info of prev |
201 | 0: l %r4,__THREAD_info(%r2) # get thread_info of prev | ||
202 | l %r5,__THREAD_info(%r3) # get thread_info of next | 149 | l %r5,__THREAD_info(%r3) # get thread_info of next |
203 | tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? | 150 | tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? |
204 | bz 1f-0b(%r1) | 151 | jz 0f |
205 | ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev | 152 | ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev |
206 | oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next | 153 | oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next |
207 | 1: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task | 154 | 0: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task |
208 | st %r15,__THREAD_ksp(%r2) # store kernel stack of prev | 155 | st %r15,__THREAD_ksp(%r2) # store kernel stack of prev |
209 | l %r15,__THREAD_ksp(%r3) # load kernel stack of next | 156 | l %r15,__THREAD_ksp(%r3) # load kernel stack of next |
210 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 | 157 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 |
@@ -224,48 +171,55 @@ __critical_start: | |||
224 | 171 | ||
225 | ENTRY(system_call) | 172 | ENTRY(system_call) |
226 | stpt __LC_SYNC_ENTER_TIMER | 173 | stpt __LC_SYNC_ENTER_TIMER |
227 | sysc_saveall: | 174 | sysc_stm: |
228 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 175 | stm %r8,%r15,__LC_SAVE_AREA_SYNC |
229 | CREATE_STACK_FRAME __LC_SAVE_AREA | 176 | l %r12,__LC_THREAD_INFO |
230 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 177 | l %r13,__LC_SVC_NEW_PSW+4 |
231 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | 178 | sysc_per: |
232 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | 179 | l %r15,__LC_KERNEL_STACK |
233 | oi __TI_flags+3(%r12),_TIF_SYSCALL | 180 | ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
181 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs | ||
234 | sysc_vtime: | 182 | sysc_vtime: |
235 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 183 | UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER |
236 | sysc_stime: | 184 | stm %r0,%r7,__PT_R0(%r11) |
237 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 185 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC |
238 | sysc_update: | 186 | mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW |
239 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 187 | mvc __PT_SVC_CODE(4,%r11),__LC_SVC_ILC |
240 | sysc_do_svc: | 188 | sysc_do_svc: |
241 | xr %r7,%r7 | 189 | oi __TI_flags+3(%r12),_TIF_SYSCALL |
242 | icm %r7,3,SP_SVC_CODE+2(%r15)# load svc number and test for svc 0 | 190 | lh %r8,__PT_SVC_CODE+2(%r11) |
243 | bnz BASED(sysc_nr_ok) # svc number > 0 | 191 | sla %r8,2 # shift and test for svc0 |
192 | jnz sysc_nr_ok | ||
244 | # svc 0: system call number in %r1 | 193 | # svc 0: system call number in %r1 |
245 | cl %r1,BASED(.Lnr_syscalls) | 194 | cl %r1,BASED(.Lnr_syscalls) |
246 | bnl BASED(sysc_nr_ok) | 195 | jnl sysc_nr_ok |
247 | sth %r1,SP_SVC_CODE+2(%r15) | 196 | sth %r1,__PT_SVC_CODE+2(%r11) |
248 | lr %r7,%r1 # copy svc number to %r7 | 197 | lr %r8,%r1 |
198 | sla %r8,2 | ||
249 | sysc_nr_ok: | 199 | sysc_nr_ok: |
250 | sll %r7,2 # svc number *4 | 200 | l %r10,BASED(.Lsys_call_table) # 31 bit system call table |
251 | l %r10,BASED(.Lsysc_table) | 201 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
202 | st %r2,__PT_ORIG_GPR2(%r11) | ||
203 | st %r7,STACK_FRAME_OVERHEAD(%r15) | ||
204 | l %r9,0(%r8,%r10) # get system call addr. | ||
252 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 | 205 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 |
253 | mvc SP_ARGS(4,%r15),SP_R7(%r15) | 206 | jnz sysc_tracesys |
254 | l %r8,0(%r7,%r10) # get system call addr. | 207 | basr %r14,%r9 # call sys_xxxx |
255 | bnz BASED(sysc_tracesys) | 208 | st %r2,__PT_R2(%r11) # store return value |
256 | basr %r14,%r8 # call sys_xxxx | ||
257 | st %r2,SP_R2(%r15) # store return value (change R2 on stack) | ||
258 | 209 | ||
259 | sysc_return: | 210 | sysc_return: |
260 | LOCKDEP_SYS_EXIT | 211 | LOCKDEP_SYS_EXIT |
261 | sysc_tif: | 212 | sysc_tif: |
262 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 213 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
263 | bno BASED(sysc_restore) | 214 | jno sysc_restore |
264 | tm __TI_flags+3(%r12),_TIF_WORK_SVC | 215 | tm __TI_flags+3(%r12),_TIF_WORK_SVC |
265 | bnz BASED(sysc_work) # there is work to do (signals etc.) | 216 | jnz sysc_work # check for work |
266 | ni __TI_flags+3(%r12),255-_TIF_SYSCALL | 217 | ni __TI_flags+3(%r12),255-_TIF_SYSCALL |
267 | sysc_restore: | 218 | sysc_restore: |
268 | RESTORE_ALL __LC_RETURN_PSW,1 | 219 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) |
220 | stpt __LC_EXIT_TIMER | ||
221 | lm %r0,%r15,__PT_R0(%r11) | ||
222 | lpsw __LC_RETURN_PSW | ||
269 | sysc_done: | 223 | sysc_done: |
270 | 224 | ||
271 | # | 225 | # |
@@ -273,16 +227,16 @@ sysc_done: | |||
273 | # | 227 | # |
274 | sysc_work: | 228 | sysc_work: |
275 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING | 229 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING |
276 | bo BASED(sysc_mcck_pending) | 230 | jo sysc_mcck_pending |
277 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 231 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED |
278 | bo BASED(sysc_reschedule) | 232 | jo sysc_reschedule |
279 | tm __TI_flags+3(%r12),_TIF_SIGPENDING | 233 | tm __TI_flags+3(%r12),_TIF_SIGPENDING |
280 | bo BASED(sysc_sigpending) | 234 | jo sysc_sigpending |
281 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 235 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME |
282 | bo BASED(sysc_notify_resume) | 236 | jo sysc_notify_resume |
283 | tm __TI_flags+3(%r12),_TIF_PER_TRAP | 237 | tm __TI_flags+3(%r12),_TIF_PER_TRAP |
284 | bo BASED(sysc_singlestep) | 238 | jo sysc_singlestep |
285 | b BASED(sysc_return) # beware of critical section cleanup | 239 | j sysc_return # beware of critical section cleanup |
286 | 240 | ||
287 | # | 241 | # |
288 | # _TIF_NEED_RESCHED is set, call schedule | 242 | # _TIF_NEED_RESCHED is set, call schedule |
@@ -290,13 +244,13 @@ sysc_work: | |||
290 | sysc_reschedule: | 244 | sysc_reschedule: |
291 | l %r1,BASED(.Lschedule) | 245 | l %r1,BASED(.Lschedule) |
292 | la %r14,BASED(sysc_return) | 246 | la %r14,BASED(sysc_return) |
293 | br %r1 # call scheduler | 247 | br %r1 # call schedule |
294 | 248 | ||
295 | # | 249 | # |
296 | # _TIF_MCCK_PENDING is set, call handler | 250 | # _TIF_MCCK_PENDING is set, call handler |
297 | # | 251 | # |
298 | sysc_mcck_pending: | 252 | sysc_mcck_pending: |
299 | l %r1,BASED(.Ls390_handle_mcck) | 253 | l %r1,BASED(.Lhandle_mcck) |
300 | la %r14,BASED(sysc_return) | 254 | la %r14,BASED(sysc_return) |
301 | br %r1 # TIF bit will be cleared by handler | 255 | br %r1 # TIF bit will be cleared by handler |
302 | 256 | ||
@@ -305,23 +259,24 @@ sysc_mcck_pending: | |||
305 | # | 259 | # |
306 | sysc_sigpending: | 260 | sysc_sigpending: |
307 | ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP | 261 | ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP |
308 | la %r2,SP_PTREGS(%r15) # load pt_regs | 262 | lr %r2,%r11 # pass pointer to pt_regs |
309 | l %r1,BASED(.Ldo_signal) | 263 | l %r1,BASED(.Ldo_signal) |
310 | basr %r14,%r1 # call do_signal | 264 | basr %r14,%r1 # call do_signal |
311 | tm __TI_flags+3(%r12),_TIF_SYSCALL | 265 | tm __TI_flags+3(%r12),_TIF_SYSCALL |
312 | bno BASED(sysc_return) | 266 | jno sysc_return |
313 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | 267 | lm %r2,%r7,__PT_R2(%r11) # load svc arguments |
314 | xr %r7,%r7 # svc 0 returns -ENOSYS | 268 | xr %r8,%r8 # svc 0 returns -ENOSYS |
315 | clc SP_SVC_CODE+2(2,%r15),BASED(.Lnr_syscalls+2) | 269 | clc __PT_SVC_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) |
316 | bnl BASED(sysc_nr_ok) # invalid svc number -> do svc 0 | 270 | jnl sysc_nr_ok # invalid svc number -> do svc 0 |
317 | icm %r7,3,SP_SVC_CODE+2(%r15)# load new svc number | 271 | lh %r8,__PT_SVC_CODE+2(%r11) # load new svc number |
318 | b BASED(sysc_nr_ok) # restart svc | 272 | sla %r8,2 |
273 | j sysc_nr_ok # restart svc | ||
319 | 274 | ||
320 | # | 275 | # |
321 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | 276 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume |
322 | # | 277 | # |
323 | sysc_notify_resume: | 278 | sysc_notify_resume: |
324 | la %r2,SP_PTREGS(%r15) # load pt_regs | 279 | lr %r2,%r11 # pass pointer to pt_regs |
325 | l %r1,BASED(.Ldo_notify_resume) | 280 | l %r1,BASED(.Ldo_notify_resume) |
326 | la %r14,BASED(sysc_return) | 281 | la %r14,BASED(sysc_return) |
327 | br %r1 # call do_notify_resume | 282 | br %r1 # call do_notify_resume |
@@ -331,56 +286,57 @@ sysc_notify_resume: | |||
331 | # | 286 | # |
332 | sysc_singlestep: | 287 | sysc_singlestep: |
333 | ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) | 288 | ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) |
334 | la %r2,SP_PTREGS(%r15) # address of register-save area | 289 | lr %r2,%r11 # pass pointer to pt_regs |
335 | l %r1,BASED(.Lhandle_per) # load adr. of per handler | 290 | l %r1,BASED(.Ldo_per_trap) |
336 | la %r14,BASED(sysc_return) # load adr. of system return | 291 | la %r14,BASED(sysc_return) |
337 | br %r1 # branch to do_per_trap | 292 | br %r1 # call do_per_trap |
338 | 293 | ||
339 | # | 294 | # |
340 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before | 295 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before |
341 | # and after the system call | 296 | # and after the system call |
342 | # | 297 | # |
343 | sysc_tracesys: | 298 | sysc_tracesys: |
344 | l %r1,BASED(.Ltrace_entry) | 299 | l %r1,BASED(.Ltrace_enter) |
345 | la %r2,SP_PTREGS(%r15) # load pt_regs | 300 | lr %r2,%r11 # pass pointer to pt_regs |
346 | la %r3,0 | 301 | la %r3,0 |
347 | xr %r0,%r0 | 302 | xr %r0,%r0 |
348 | icm %r0,3,SP_SVC_CODE(%r15) | 303 | icm %r0,3,__PT_SVC_CODE+2(%r11) |
349 | st %r0,SP_R2(%r15) | 304 | st %r0,__PT_R2(%r11) |
350 | basr %r14,%r1 | 305 | basr %r14,%r1 # call do_syscall_trace_enter |
351 | cl %r2,BASED(.Lnr_syscalls) | 306 | cl %r2,BASED(.Lnr_syscalls) |
352 | bnl BASED(sysc_tracenogo) | 307 | jnl sysc_tracenogo |
353 | lr %r7,%r2 | 308 | lr %r8,%r2 |
354 | sll %r7,2 # svc number *4 | 309 | sll %r8,2 |
355 | l %r8,0(%r7,%r10) | 310 | l %r9,0(%r8,%r10) |
356 | sysc_tracego: | 311 | sysc_tracego: |
357 | lm %r3,%r6,SP_R3(%r15) | 312 | lm %r3,%r7,__PT_R3(%r11) |
358 | mvc SP_ARGS(4,%r15),SP_R7(%r15) | 313 | st %r7,STACK_FRAME_OVERHEAD(%r15) |
359 | l %r2,SP_ORIG_R2(%r15) | 314 | l %r2,__PT_ORIG_GPR2(%r11) |
360 | basr %r14,%r8 # call sys_xxx | 315 | basr %r14,%r9 # call sys_xxx |
361 | st %r2,SP_R2(%r15) # store return value | 316 | st %r2,__PT_R2(%r11) # store return value |
362 | sysc_tracenogo: | 317 | sysc_tracenogo: |
363 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 | 318 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 |
364 | bz BASED(sysc_return) | 319 | jz sysc_return |
365 | l %r1,BASED(.Ltrace_exit) | 320 | l %r1,BASED(.Ltrace_exit) |
366 | la %r2,SP_PTREGS(%r15) # load pt_regs | 321 | lr %r2,%r11 # pass pointer to pt_regs |
367 | la %r14,BASED(sysc_return) | 322 | la %r14,BASED(sysc_return) |
368 | br %r1 | 323 | br %r1 # call do_syscall_trace_exit |
369 | 324 | ||
370 | # | 325 | # |
371 | # a new process exits the kernel with ret_from_fork | 326 | # a new process exits the kernel with ret_from_fork |
372 | # | 327 | # |
373 | ENTRY(ret_from_fork) | 328 | ENTRY(ret_from_fork) |
329 | la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
330 | l %r12,__LC_THREAD_INFO | ||
374 | l %r13,__LC_SVC_NEW_PSW+4 | 331 | l %r13,__LC_SVC_NEW_PSW+4 |
375 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 332 | tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? |
376 | tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? | 333 | jo 0f |
377 | bo BASED(0f) | 334 | st %r15,__PT_R15(%r11) # store stack pointer for new kthread |
378 | st %r15,SP_R15(%r15) # store stack pointer for new kthread | 335 | 0: l %r1,BASED(.Lschedule_tail) |
379 | 0: l %r1,BASED(.Lschedtail) | 336 | basr %r14,%r1 # call schedule_tail |
380 | basr %r14,%r1 | ||
381 | TRACE_IRQS_ON | 337 | TRACE_IRQS_ON |
382 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 338 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
383 | b BASED(sysc_tracenogo) | 339 | j sysc_tracenogo |
384 | 340 | ||
385 | # | 341 | # |
386 | # kernel_execve function needs to deal with pt_regs that is not | 342 | # kernel_execve function needs to deal with pt_regs that is not |
@@ -390,153 +346,98 @@ ENTRY(kernel_execve) | |||
390 | stm %r12,%r15,48(%r15) | 346 | stm %r12,%r15,48(%r15) |
391 | lr %r14,%r15 | 347 | lr %r14,%r15 |
392 | l %r13,__LC_SVC_NEW_PSW+4 | 348 | l %r13,__LC_SVC_NEW_PSW+4 |
393 | s %r15,BASED(.Lc_spsize) | 349 | ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
394 | st %r14,__SF_BACKCHAIN(%r15) | 350 | st %r14,__SF_BACKCHAIN(%r15) |
395 | la %r12,SP_PTREGS(%r15) | 351 | la %r12,STACK_FRAME_OVERHEAD(%r15) |
396 | xc 0(__PT_SIZE,%r12),0(%r12) | 352 | xc 0(__PT_SIZE,%r12),0(%r12) |
397 | l %r1,BASED(.Ldo_execve) | 353 | l %r1,BASED(.Ldo_execve) |
398 | lr %r5,%r12 | 354 | lr %r5,%r12 |
399 | basr %r14,%r1 | 355 | basr %r14,%r1 # call do_execve |
400 | ltr %r2,%r2 | 356 | ltr %r2,%r2 |
401 | be BASED(0f) | 357 | je 0f |
402 | a %r15,BASED(.Lc_spsize) | 358 | ahi %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE) |
403 | lm %r12,%r15,48(%r15) | 359 | lm %r12,%r15,48(%r15) |
404 | br %r14 | 360 | br %r14 |
405 | # execve succeeded. | 361 | # execve succeeded. |
406 | 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts | 362 | 0: ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
407 | l %r15,__LC_KERNEL_STACK # load ksp | 363 | l %r15,__LC_KERNEL_STACK # load ksp |
408 | s %r15,BASED(.Lc_spsize) # make room for registers & psw | 364 | ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
409 | mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs | 365 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
366 | mvc 0(__PT_SIZE,%r11),0(%r12) # copy pt_regs | ||
410 | l %r12,__LC_THREAD_INFO | 367 | l %r12,__LC_THREAD_INFO |
411 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 368 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
412 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 369 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
413 | l %r1,BASED(.Lexecve_tail) | 370 | l %r1,BASED(.Lexecve_tail) |
414 | basr %r14,%r1 | 371 | basr %r14,%r1 # call execve_tail |
415 | b BASED(sysc_return) | 372 | j sysc_return |
416 | 373 | ||
417 | /* | 374 | /* |
418 | * Program check handler routine | 375 | * Program check handler routine |
419 | */ | 376 | */ |
420 | 377 | ||
421 | ENTRY(pgm_check_handler) | 378 | ENTRY(pgm_check_handler) |
422 | /* | ||
423 | * First we need to check for a special case: | ||
424 | * Single stepping an instruction that disables the PER event mask will | ||
425 | * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. | ||
426 | * For a single stepped SVC the program check handler gets control after | ||
427 | * the SVC new PSW has been loaded. But we want to execute the SVC first and | ||
428 | * then handle the PER event. Therefore we update the SVC old PSW to point | ||
429 | * to the pgm_check_handler and branch to the SVC handler after we checked | ||
430 | * if we have to load the kernel stack register. | ||
431 | * For every other possible cause for PER event without the PER mask set | ||
432 | * we just ignore the PER event (FIXME: is there anything we have to do | ||
433 | * for LPSW?). | ||
434 | */ | ||
435 | stpt __LC_SYNC_ENTER_TIMER | 379 | stpt __LC_SYNC_ENTER_TIMER |
436 | SAVE_ALL_BASE __LC_SAVE_AREA | 380 | stm %r8,%r15,__LC_SAVE_AREA_SYNC |
437 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception | 381 | l %r12,__LC_THREAD_INFO |
438 | bnz BASED(pgm_per) # got per exception -> special case | 382 | l %r13,__LC_SVC_NEW_PSW+4 |
439 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 383 | lm %r8,%r9,__LC_PGM_OLD_PSW |
440 | CREATE_STACK_FRAME __LC_SAVE_AREA | 384 | tmh %r8,0x0001 # test problem state bit |
441 | mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW | 385 | jnz 1f # -> fault in user space |
442 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 386 | tmh %r8,0x4000 # PER bit set in old PSW ? |
443 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 387 | jnz 0f # -> enabled, can't be a double fault |
444 | bz BASED(pgm_no_vtime) | 388 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
445 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 389 | jnz pgm_svcper # -> single stepped svc |
446 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 390 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC |
447 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 391 | j 2f |
448 | pgm_no_vtime: | 392 | 1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER |
449 | l %r3,__LC_PGM_ILC # load program interruption code | 393 | l %r15,__LC_KERNEL_STACK |
450 | l %r4,__LC_TRANS_EXC_CODE | 394 | 2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
451 | REENABLE_IRQS | 395 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
452 | la %r8,0x7f | 396 | stm %r0,%r7,__PT_R0(%r11) |
453 | nr %r8,%r3 | 397 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC |
454 | sll %r8,2 | 398 | stm %r8,%r9,__PT_PSW(%r11) |
455 | l %r1,BASED(.Ljump_table) | 399 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
456 | l %r1,0(%r8,%r1) # load address of handler routine | 400 | jz 0f |
457 | la %r2,SP_PTREGS(%r15) # address of register-save area | ||
458 | basr %r14,%r1 # branch to interrupt-handler | ||
459 | pgm_exit: | ||
460 | b BASED(sysc_return) | ||
461 | |||
462 | # | ||
463 | # handle per exception | ||
464 | # | ||
465 | pgm_per: | ||
466 | tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on | ||
467 | bnz BASED(pgm_per_std) # ok, normal per event from user space | ||
468 | # ok its one of the special cases, now we need to find out which one | ||
469 | clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW | ||
470 | be BASED(pgm_svcper) | ||
471 | # no interesting special case, ignore PER event | ||
472 | lm %r12,%r15,__LC_SAVE_AREA | ||
473 | lpsw 0x28 | ||
474 | |||
475 | # | ||
476 | # Normal per exception | ||
477 | # | ||
478 | pgm_per_std: | ||
479 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA | ||
480 | CREATE_STACK_FRAME __LC_SAVE_AREA | ||
481 | mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW | ||
482 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | ||
483 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
484 | bz BASED(pgm_no_vtime2) | ||
485 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
486 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
487 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
488 | pgm_no_vtime2: | ||
489 | l %r1,__TI_task(%r12) | 401 | l %r1,__TI_task(%r12) |
490 | tm SP_PSW+1(%r15),0x01 # kernel per event ? | 402 | tmh %r8,0x0001 # kernel per event ? |
491 | bz BASED(kernel_per) | 403 | jz pgm_kprobe |
492 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE | 404 | oi __TI_flags+3(%r12),_TIF_PER_TRAP |
493 | mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS | 405 | mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS |
406 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE | ||
494 | mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID | 407 | mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID |
495 | oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP | 408 | 0: l %r3,__LC_PGM_ILC # load program interruption code |
496 | l %r3,__LC_PGM_ILC # load program interruption code | ||
497 | l %r4,__LC_TRANS_EXC_CODE | 409 | l %r4,__LC_TRANS_EXC_CODE |
498 | REENABLE_IRQS | 410 | REENABLE_IRQS |
499 | la %r8,0x7f | 411 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
500 | nr %r8,%r3 # clear per-event-bit and ilc | ||
501 | be BASED(pgm_exit2) # only per or per+check ? | ||
502 | sll %r8,2 | ||
503 | l %r1,BASED(.Ljump_table) | 412 | l %r1,BASED(.Ljump_table) |
504 | l %r1,0(%r8,%r1) # load address of handler routine | 413 | la %r10,0x7f |
505 | la %r2,SP_PTREGS(%r15) # address of register-save area | 414 | nr %r10,%r3 |
415 | je sysc_return | ||
416 | sll %r10,2 | ||
417 | l %r1,0(%r10,%r1) # load address of handler routine | ||
418 | lr %r2,%r11 # pass pointer to pt_regs | ||
506 | basr %r14,%r1 # branch to interrupt-handler | 419 | basr %r14,%r1 # branch to interrupt-handler |
507 | pgm_exit2: | 420 | j sysc_return |
508 | b BASED(sysc_return) | ||
509 | 421 | ||
510 | # | 422 | # |
511 | # it was a single stepped SVC that is causing all the trouble | 423 | # PER event in supervisor state, must be kprobes |
512 | # | 424 | # |
513 | pgm_svcper: | 425 | pgm_kprobe: |
514 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 426 | REENABLE_IRQS |
515 | CREATE_STACK_FRAME __LC_SAVE_AREA | 427 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
516 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 428 | l %r1,BASED(.Ldo_per_trap) |
517 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | 429 | lr %r2,%r11 # pass pointer to pt_regs |
518 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | 430 | basr %r14,%r1 # call do_per_trap |
519 | oi __TI_flags+3(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP) | 431 | j sysc_return |
520 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
521 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
522 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
523 | l %r8,__TI_task(%r12) | ||
524 | mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE | ||
525 | mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS | ||
526 | mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID | ||
527 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
528 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | ||
529 | b BASED(sysc_do_svc) | ||
530 | 432 | ||
531 | # | 433 | # |
532 | # per was called from kernel, must be kprobes | 434 | # single stepped system call |
533 | # | 435 | # |
534 | kernel_per: | 436 | pgm_svcper: |
535 | REENABLE_IRQS | 437 | oi __TI_flags+3(%r12),_TIF_PER_TRAP |
536 | la %r2,SP_PTREGS(%r15) # address of register-save area | 438 | mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW |
537 | l %r1,BASED(.Lhandle_per) # load adr. of per handler | 439 | mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per) |
538 | basr %r14,%r1 # branch to do_single_step | 440 | lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs |
539 | b BASED(pgm_exit) | ||
540 | 441 | ||
541 | /* | 442 | /* |
542 | * IO interrupt handler routine | 443 | * IO interrupt handler routine |
@@ -545,28 +446,35 @@ kernel_per: | |||
545 | ENTRY(io_int_handler) | 446 | ENTRY(io_int_handler) |
546 | stck __LC_INT_CLOCK | 447 | stck __LC_INT_CLOCK |
547 | stpt __LC_ASYNC_ENTER_TIMER | 448 | stpt __LC_ASYNC_ENTER_TIMER |
548 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 | 449 | stm %r8,%r15,__LC_SAVE_AREA_ASYNC |
549 | CREATE_STACK_FRAME __LC_SAVE_AREA+16 | 450 | l %r12,__LC_THREAD_INFO |
550 | mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack | 451 | l %r13,__LC_SVC_NEW_PSW+4 |
551 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 452 | lm %r8,%r9,__LC_IO_OLD_PSW |
552 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 453 | tmh %r8,0x0001 # interrupting from user ? |
553 | bz BASED(io_no_vtime) | 454 | jz io_skip |
554 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 455 | UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER |
555 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 456 | io_skip: |
556 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 457 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT |
557 | io_no_vtime: | 458 | stm %r0,%r7,__PT_R0(%r11) |
459 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC | ||
460 | stm %r8,%r9,__PT_PSW(%r11) | ||
558 | TRACE_IRQS_OFF | 461 | TRACE_IRQS_OFF |
559 | l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ | 462 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
560 | la %r2,SP_PTREGS(%r15) # address of register-save area | 463 | l %r1,BASED(.Ldo_IRQ) |
561 | basr %r14,%r1 # branch to standard irq handler | 464 | lr %r2,%r11 # pass pointer to pt_regs |
465 | basr %r14,%r1 # call do_IRQ | ||
562 | io_return: | 466 | io_return: |
563 | LOCKDEP_SYS_EXIT | 467 | LOCKDEP_SYS_EXIT |
564 | TRACE_IRQS_ON | 468 | TRACE_IRQS_ON |
565 | io_tif: | 469 | io_tif: |
566 | tm __TI_flags+3(%r12),_TIF_WORK_INT | 470 | tm __TI_flags+3(%r12),_TIF_WORK_INT |
567 | bnz BASED(io_work) # there is work to do (signals etc.) | 471 | jnz io_work # there is work to do (signals etc.) |
568 | io_restore: | 472 | io_restore: |
569 | RESTORE_ALL __LC_RETURN_PSW,0 | 473 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) |
474 | ni __LC_RETURN_PSW+1,0xfd # clean wait state bit | ||
475 | stpt __LC_EXIT_TIMER | ||
476 | lm %r0,%r15,__PT_R0(%r11) | ||
477 | lpsw __LC_RETURN_PSW | ||
570 | io_done: | 478 | io_done: |
571 | 479 | ||
572 | # | 480 | # |
@@ -577,28 +485,29 @@ io_done: | |||
577 | # Before any work can be done, a switch to the kernel stack is required. | 485 | # Before any work can be done, a switch to the kernel stack is required. |
578 | # | 486 | # |
579 | io_work: | 487 | io_work: |
580 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 488 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
581 | bo BASED(io_work_user) # yes -> do resched & signal | 489 | jo io_work_user # yes -> do resched & signal |
582 | #ifdef CONFIG_PREEMPT | 490 | #ifdef CONFIG_PREEMPT |
583 | # check for preemptive scheduling | 491 | # check for preemptive scheduling |
584 | icm %r0,15,__TI_precount(%r12) | 492 | icm %r0,15,__TI_precount(%r12) |
585 | bnz BASED(io_restore) # preemption disabled | 493 | jnz io_restore # preemption disabled |
586 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 494 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED |
587 | bno BASED(io_restore) | 495 | jno io_restore |
588 | # switch to kernel stack | 496 | # switch to kernel stack |
589 | l %r1,SP_R15(%r15) | 497 | l %r1,__PT_R15(%r11) |
590 | s %r1,BASED(.Lc_spsize) | 498 | ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
591 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 499 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
592 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 500 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) |
501 | la %r11,STACK_FRAME_OVERHEAD(%r1) | ||
593 | lr %r15,%r1 | 502 | lr %r15,%r1 |
594 | # TRACE_IRQS_ON already done at io_return, call | 503 | # TRACE_IRQS_ON already done at io_return, call |
595 | # TRACE_IRQS_OFF to keep things symmetrical | 504 | # TRACE_IRQS_OFF to keep things symmetrical |
596 | TRACE_IRQS_OFF | 505 | TRACE_IRQS_OFF |
597 | l %r1,BASED(.Lpreempt_schedule_irq) | 506 | l %r1,BASED(.Lpreempt_irq) |
598 | basr %r14,%r1 # call preempt_schedule_irq | 507 | basr %r14,%r1 # call preempt_schedule_irq |
599 | b BASED(io_return) | 508 | j io_return |
600 | #else | 509 | #else |
601 | b BASED(io_restore) | 510 | j io_restore |
602 | #endif | 511 | #endif |
603 | 512 | ||
604 | # | 513 | # |
@@ -606,9 +515,10 @@ io_work: | |||
606 | # | 515 | # |
607 | io_work_user: | 516 | io_work_user: |
608 | l %r1,__LC_KERNEL_STACK | 517 | l %r1,__LC_KERNEL_STACK |
609 | s %r1,BASED(.Lc_spsize) | 518 | ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
610 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 519 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
611 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 520 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) |
521 | la %r11,STACK_FRAME_OVERHEAD(%r1) | ||
612 | lr %r15,%r1 | 522 | lr %r15,%r1 |
613 | 523 | ||
614 | # | 524 | # |
@@ -618,24 +528,24 @@ io_work_user: | |||
618 | # | 528 | # |
619 | io_work_tif: | 529 | io_work_tif: |
620 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING | 530 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING |
621 | bo BASED(io_mcck_pending) | 531 | jo io_mcck_pending |
622 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 532 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED |
623 | bo BASED(io_reschedule) | 533 | jo io_reschedule |
624 | tm __TI_flags+3(%r12),_TIF_SIGPENDING | 534 | tm __TI_flags+3(%r12),_TIF_SIGPENDING |
625 | bo BASED(io_sigpending) | 535 | jo io_sigpending |
626 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 536 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME |
627 | bo BASED(io_notify_resume) | 537 | jo io_notify_resume |
628 | b BASED(io_return) # beware of critical section cleanup | 538 | j io_return # beware of critical section cleanup |
629 | 539 | ||
630 | # | 540 | # |
631 | # _TIF_MCCK_PENDING is set, call handler | 541 | # _TIF_MCCK_PENDING is set, call handler |
632 | # | 542 | # |
633 | io_mcck_pending: | 543 | io_mcck_pending: |
634 | # TRACE_IRQS_ON already done at io_return | 544 | # TRACE_IRQS_ON already done at io_return |
635 | l %r1,BASED(.Ls390_handle_mcck) | 545 | l %r1,BASED(.Lhandle_mcck) |
636 | basr %r14,%r1 # TIF bit will be cleared by handler | 546 | basr %r14,%r1 # TIF bit will be cleared by handler |
637 | TRACE_IRQS_OFF | 547 | TRACE_IRQS_OFF |
638 | b BASED(io_return) | 548 | j io_return |
639 | 549 | ||
640 | # | 550 | # |
641 | # _TIF_NEED_RESCHED is set, call schedule | 551 | # _TIF_NEED_RESCHED is set, call schedule |
@@ -643,37 +553,37 @@ io_mcck_pending: | |||
643 | io_reschedule: | 553 | io_reschedule: |
644 | # TRACE_IRQS_ON already done at io_return | 554 | # TRACE_IRQS_ON already done at io_return |
645 | l %r1,BASED(.Lschedule) | 555 | l %r1,BASED(.Lschedule) |
646 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 556 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
647 | basr %r14,%r1 # call scheduler | 557 | basr %r14,%r1 # call scheduler |
648 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 558 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
649 | TRACE_IRQS_OFF | 559 | TRACE_IRQS_OFF |
650 | b BASED(io_return) | 560 | j io_return |
651 | 561 | ||
652 | # | 562 | # |
653 | # _TIF_SIGPENDING is set, call do_signal | 563 | # _TIF_SIGPENDING is set, call do_signal |
654 | # | 564 | # |
655 | io_sigpending: | 565 | io_sigpending: |
656 | # TRACE_IRQS_ON already done at io_return | 566 | # TRACE_IRQS_ON already done at io_return |
657 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
658 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
659 | l %r1,BASED(.Ldo_signal) | 567 | l %r1,BASED(.Ldo_signal) |
568 | ssm __LC_SVC_NEW_PSW # reenable interrupts | ||
569 | lr %r2,%r11 # pass pointer to pt_regs | ||
660 | basr %r14,%r1 # call do_signal | 570 | basr %r14,%r1 # call do_signal |
661 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 571 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
662 | TRACE_IRQS_OFF | 572 | TRACE_IRQS_OFF |
663 | b BASED(io_return) | 573 | j io_return |
664 | 574 | ||
665 | # | 575 | # |
666 | # _TIF_SIGPENDING is set, call do_signal | 576 | # _TIF_SIGPENDING is set, call do_signal |
667 | # | 577 | # |
668 | io_notify_resume: | 578 | io_notify_resume: |
669 | # TRACE_IRQS_ON already done at io_return | 579 | # TRACE_IRQS_ON already done at io_return |
670 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
671 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
672 | l %r1,BASED(.Ldo_notify_resume) | 580 | l %r1,BASED(.Ldo_notify_resume) |
673 | basr %r14,%r1 # call do_signal | 581 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
674 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 582 | lr %r2,%r11 # pass pointer to pt_regs |
583 | basr %r14,%r1 # call do_notify_resume | ||
584 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | ||
675 | TRACE_IRQS_OFF | 585 | TRACE_IRQS_OFF |
676 | b BASED(io_return) | 586 | j io_return |
677 | 587 | ||
678 | /* | 588 | /* |
679 | * External interrupt handler routine | 589 | * External interrupt handler routine |
@@ -682,23 +592,25 @@ io_notify_resume: | |||
682 | ENTRY(ext_int_handler) | 592 | ENTRY(ext_int_handler) |
683 | stck __LC_INT_CLOCK | 593 | stck __LC_INT_CLOCK |
684 | stpt __LC_ASYNC_ENTER_TIMER | 594 | stpt __LC_ASYNC_ENTER_TIMER |
685 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 | 595 | stm %r8,%r15,__LC_SAVE_AREA_ASYNC |
686 | CREATE_STACK_FRAME __LC_SAVE_AREA+16 | 596 | l %r12,__LC_THREAD_INFO |
687 | mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack | 597 | l %r13,__LC_SVC_NEW_PSW+4 |
688 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 598 | lm %r8,%r9,__LC_EXT_OLD_PSW |
689 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 599 | tmh %r8,0x0001 # interrupting from user ? |
690 | bz BASED(ext_no_vtime) | 600 | jz ext_skip |
691 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 601 | UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER |
692 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 602 | ext_skip: |
693 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 603 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT |
694 | ext_no_vtime: | 604 | stm %r0,%r7,__PT_R0(%r11) |
605 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC | ||
606 | stm %r8,%r9,__PT_PSW(%r11) | ||
695 | TRACE_IRQS_OFF | 607 | TRACE_IRQS_OFF |
696 | la %r2,SP_PTREGS(%r15) # address of register-save area | 608 | lr %r2,%r11 # pass pointer to pt_regs |
697 | l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code | 609 | l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code |
698 | l %r4,__LC_EXT_PARAMS # get external parameters | 610 | l %r4,__LC_EXT_PARAMS # get external parameters |
699 | l %r1,BASED(.Ldo_extint) | 611 | l %r1,BASED(.Ldo_extint) |
700 | basr %r14,%r1 | 612 | basr %r14,%r1 # call do_extint |
701 | b BASED(io_return) | 613 | j io_return |
702 | 614 | ||
703 | __critical_end: | 615 | __critical_end: |
704 | 616 | ||
@@ -710,82 +622,74 @@ ENTRY(mcck_int_handler) | |||
710 | stck __LC_MCCK_CLOCK | 622 | stck __LC_MCCK_CLOCK |
711 | spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer | 623 | spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer |
712 | lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs | 624 | lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs |
713 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | 625 | l %r12,__LC_THREAD_INFO |
714 | la %r12,__LC_MCK_OLD_PSW | 626 | l %r13,__LC_SVC_NEW_PSW+4 |
627 | lm %r8,%r9,__LC_MCK_OLD_PSW | ||
715 | tm __LC_MCCK_CODE,0x80 # system damage? | 628 | tm __LC_MCCK_CODE,0x80 # system damage? |
716 | bo BASED(mcck_int_main) # yes -> rest of mcck code invalid | 629 | jo mcck_panic # yes -> rest of mcck code invalid |
717 | mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA | 630 | la %r14,__LC_CPU_TIMER_SAVE_AREA |
631 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | ||
718 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | 632 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? |
719 | bo BASED(1f) | 633 | jo 3f |
720 | la %r14,__LC_SYNC_ENTER_TIMER | 634 | la %r14,__LC_SYNC_ENTER_TIMER |
721 | clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER | 635 | clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER |
722 | bl BASED(0f) | 636 | jl 0f |
723 | la %r14,__LC_ASYNC_ENTER_TIMER | 637 | la %r14,__LC_ASYNC_ENTER_TIMER |
724 | 0: clc 0(8,%r14),__LC_EXIT_TIMER | 638 | 0: clc 0(8,%r14),__LC_EXIT_TIMER |
725 | bl BASED(0f) | 639 | jl 1f |
726 | la %r14,__LC_EXIT_TIMER | 640 | la %r14,__LC_EXIT_TIMER |
727 | 0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER | 641 | 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER |
728 | bl BASED(0f) | 642 | jl 2f |
729 | la %r14,__LC_LAST_UPDATE_TIMER | 643 | la %r14,__LC_LAST_UPDATE_TIMER |
730 | 0: spt 0(%r14) | 644 | 2: spt 0(%r14) |
731 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | 645 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
732 | 1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | 646 | 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
733 | bno BASED(mcck_int_main) # no -> skip cleanup critical | 647 | jno mcck_panic # no -> skip cleanup critical |
734 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit | 648 | tm %r8,0x0001 # interrupting from user ? |
735 | bnz BASED(mcck_int_main) # from user -> load async stack | 649 | jz mcck_skip |
736 | clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end) | 650 | UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER |
737 | bhe BASED(mcck_int_main) | 651 | mcck_skip: |
738 | clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start) | 652 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT |
739 | bl BASED(mcck_int_main) | 653 | mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA |
740 | l %r14,BASED(.Lcleanup_critical) | 654 | stm %r8,%r9,__PT_PSW(%r11) |
741 | basr %r14,%r14 | 655 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
742 | mcck_int_main: | 656 | l %r1,BASED(.Ldo_machine_check) |
743 | l %r14,__LC_PANIC_STACK # are we already on the panic stack? | 657 | lr %r2,%r11 # pass pointer to pt_regs |
744 | slr %r14,%r15 | 658 | basr %r14,%r1 # call s390_do_machine_check |
745 | sra %r14,PAGE_SHIFT | 659 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
746 | be BASED(0f) | 660 | jno mcck_return |
747 | l %r15,__LC_PANIC_STACK # load panic stack | ||
748 | 0: s %r15,BASED(.Lc_spsize) # make room for registers & psw | ||
749 | CREATE_STACK_FRAME __LC_SAVE_AREA+32 | ||
750 | mvc SP_PSW(8,%r15),0(%r12) | ||
751 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | ||
752 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? | ||
753 | bno BASED(mcck_no_vtime) # no -> skip cleanup critical | ||
754 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
755 | bz BASED(mcck_no_vtime) | ||
756 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER | ||
757 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
758 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER | ||
759 | mcck_no_vtime: | ||
760 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
761 | l %r1,BASED(.Ls390_mcck) | ||
762 | basr %r14,%r1 # call machine check handler | ||
763 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
764 | bno BASED(mcck_return) | ||
765 | l %r1,__LC_KERNEL_STACK # switch to kernel stack | 661 | l %r1,__LC_KERNEL_STACK # switch to kernel stack |
766 | s %r1,BASED(.Lc_spsize) | 662 | ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
767 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 663 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
768 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 664 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) |
665 | la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
769 | lr %r15,%r1 | 666 | lr %r15,%r1 |
770 | stosm __SF_EMPTY(%r15),0x04 # turn dat on | 667 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off |
771 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING | 668 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING |
772 | bno BASED(mcck_return) | 669 | jno mcck_return |
773 | TRACE_IRQS_OFF | 670 | TRACE_IRQS_OFF |
774 | l %r1,BASED(.Ls390_handle_mcck) | 671 | l %r1,BASED(.Lhandle_mcck) |
775 | basr %r14,%r1 # call machine check handler | 672 | basr %r14,%r1 # call s390_handle_mcck |
776 | TRACE_IRQS_ON | 673 | TRACE_IRQS_ON |
777 | mcck_return: | 674 | mcck_return: |
778 | mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW | 675 | mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW |
779 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit | 676 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit |
780 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | 677 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
781 | bno BASED(0f) | 678 | jno 0f |
782 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 | 679 | lm %r0,%r15,__PT_R0(%r11) |
783 | stpt __LC_EXIT_TIMER | 680 | stpt __LC_EXIT_TIMER |
784 | lpsw __LC_RETURN_MCCK_PSW # back to caller | 681 | lpsw __LC_RETURN_MCCK_PSW |
785 | 0: lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 | 682 | 0: lm %r0,%r15,__PT_R0(%r11) |
786 | lpsw __LC_RETURN_MCCK_PSW # back to caller | 683 | lpsw __LC_RETURN_MCCK_PSW |
787 | 684 | ||
788 | RESTORE_ALL __LC_RETURN_MCCK_PSW,0 | 685 | mcck_panic: |
686 | l %r14,__LC_PANIC_STACK | ||
687 | slr %r14,%r15 | ||
688 | sra %r14,PAGE_SHIFT | ||
689 | jz 0f | ||
690 | l %r15,__LC_PANIC_STACK | ||
691 | 0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
692 | j mcck_skip | ||
789 | 693 | ||
790 | /* | 694 | /* |
791 | * Restart interruption handler, kick starter for additional CPUs | 695 | * Restart interruption handler, kick starter for additional CPUs |
@@ -799,18 +703,18 @@ restart_base: | |||
799 | stck __LC_LAST_UPDATE_CLOCK | 703 | stck __LC_LAST_UPDATE_CLOCK |
800 | mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) | 704 | mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) |
801 | mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) | 705 | mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) |
802 | l %r15,__LC_SAVE_AREA+60 # load ksp | 706 | l %r15,__LC_GPREGS_SAVE_AREA+60 # load ksp |
803 | lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs | 707 | lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs |
804 | lam %a0,%a15,__LC_AREGS_SAVE_AREA | 708 | lam %a0,%a15,__LC_AREGS_SAVE_AREA |
805 | lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone | 709 | lm %r6,%r15,__SF_GPRS(%r15)# load registers from clone |
806 | l %r1,__LC_THREAD_INFO | 710 | l %r1,__LC_THREAD_INFO |
807 | mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) | 711 | mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) |
808 | mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) | 712 | mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) |
809 | xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER | 713 | xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER |
810 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on | 714 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off |
811 | basr %r14,0 | 715 | basr %r14,0 |
812 | l %r14,restart_addr-.(%r14) | 716 | l %r14,restart_addr-.(%r14) |
813 | basr %r14,%r14 # branch to start_secondary | 717 | basr %r14,%r14 # call start_secondary |
814 | restart_addr: | 718 | restart_addr: |
815 | .long start_secondary | 719 | .long start_secondary |
816 | .align 8 | 720 | .align 8 |
@@ -835,19 +739,19 @@ restart_go: | |||
835 | # PSW restart interrupt handler | 739 | # PSW restart interrupt handler |
836 | # | 740 | # |
837 | ENTRY(psw_restart_int_handler) | 741 | ENTRY(psw_restart_int_handler) |
838 | st %r15,__LC_SAVE_AREA+48(%r0) # save r15 | 742 | st %r15,__LC_SAVE_AREA_RESTART |
839 | basr %r15,0 | 743 | basr %r15,0 |
840 | 0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack | 744 | 0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack |
841 | l %r15,0(%r15) | 745 | l %r15,0(%r15) |
842 | ahi %r15,-SP_SIZE # make room for pt_regs | 746 | ahi %r15,-__PT_SIZE # create pt_regs on stack |
843 | stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack | 747 | stm %r0,%r14,__PT_R0(%r15) |
844 | mvc SP_R15(4,%r15),__LC_SAVE_AREA+48(%r0)# store saved %r15 to stack | 748 | mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART |
845 | mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw | 749 | mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw |
846 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 | 750 | ahi %r15,-STACK_FRAME_OVERHEAD |
751 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | ||
847 | basr %r14,0 | 752 | basr %r14,0 |
848 | 1: l %r14,.Ldo_restart-1b(%r14) | 753 | 1: l %r14,.Ldo_restart-1b(%r14) |
849 | basr %r14,%r14 | 754 | basr %r14,%r14 |
850 | |||
851 | basr %r14,0 # load disabled wait PSW if | 755 | basr %r14,0 # load disabled wait PSW if |
852 | 2: lpsw restart_psw_crash-2b(%r14) # do_restart returns | 756 | 2: lpsw restart_psw_crash-2b(%r14) # do_restart returns |
853 | .align 4 | 757 | .align 4 |
@@ -869,215 +773,174 @@ restart_psw_crash: | |||
869 | */ | 773 | */ |
870 | stack_overflow: | 774 | stack_overflow: |
871 | l %r15,__LC_PANIC_STACK # change to panic stack | 775 | l %r15,__LC_PANIC_STACK # change to panic stack |
872 | sl %r15,BASED(.Lc_spsize) | 776 | ahi %r15,-__PT_SIZE # create pt_regs |
873 | mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack | 777 | stm %r0,%r7,__PT_R0(%r15) |
874 | stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | 778 | stm %r8,%r9,__PT_PSW(%r15) |
875 | la %r1,__LC_SAVE_AREA | 779 | mvc __PT_R8(32,%r11),0(%r14) |
876 | ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ? | 780 | lr %r15,%r11 |
877 | be BASED(0f) | 781 | ahi %r15,-STACK_FRAME_OVERHEAD |
878 | ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ? | 782 | l %r1,BASED(1f) |
879 | be BASED(0f) | 783 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
880 | la %r1,__LC_SAVE_AREA+16 | 784 | lr %r2,%r11 # pass pointer to pt_regs |
881 | 0: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack | 785 | br %r1 # branch to kernel_stack_overflow |
882 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain | ||
883 | l %r1,BASED(1f) # branch to kernel_stack_overflow | ||
884 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
885 | br %r1 | ||
886 | 1: .long kernel_stack_overflow | 786 | 1: .long kernel_stack_overflow |
887 | #endif | 787 | #endif |
888 | 788 | ||
889 | cleanup_table_system_call: | 789 | cleanup_table: |
890 | .long system_call + 0x80000000, sysc_do_svc + 0x80000000 | 790 | .long system_call + 0x80000000 |
891 | cleanup_table_sysc_tif: | 791 | .long sysc_do_svc + 0x80000000 |
892 | .long sysc_tif + 0x80000000, sysc_restore + 0x80000000 | 792 | .long sysc_tif + 0x80000000 |
893 | cleanup_table_sysc_restore: | 793 | .long sysc_restore + 0x80000000 |
894 | .long sysc_restore + 0x80000000, sysc_done + 0x80000000 | 794 | .long sysc_done + 0x80000000 |
895 | cleanup_table_io_tif: | 795 | .long io_tif + 0x80000000 |
896 | .long io_tif + 0x80000000, io_restore + 0x80000000 | 796 | .long io_restore + 0x80000000 |
897 | cleanup_table_io_restore: | 797 | .long io_done + 0x80000000 |
898 | .long io_restore + 0x80000000, io_done + 0x80000000 | ||
899 | 798 | ||
900 | cleanup_critical: | 799 | cleanup_critical: |
901 | clc 4(4,%r12),BASED(cleanup_table_system_call) | 800 | cl %r9,BASED(cleanup_table) # system_call |
902 | bl BASED(0f) | 801 | jl 0f |
903 | clc 4(4,%r12),BASED(cleanup_table_system_call+4) | 802 | cl %r9,BASED(cleanup_table+4) # sysc_do_svc |
904 | bl BASED(cleanup_system_call) | 803 | jl cleanup_system_call |
905 | 0: | 804 | cl %r9,BASED(cleanup_table+8) # sysc_tif |
906 | clc 4(4,%r12),BASED(cleanup_table_sysc_tif) | 805 | jl 0f |
907 | bl BASED(0f) | 806 | cl %r9,BASED(cleanup_table+12) # sysc_restore |
908 | clc 4(4,%r12),BASED(cleanup_table_sysc_tif+4) | 807 | jl cleanup_sysc_tif |
909 | bl BASED(cleanup_sysc_tif) | 808 | cl %r9,BASED(cleanup_table+16) # sysc_done |
910 | 0: | 809 | jl cleanup_sysc_restore |
911 | clc 4(4,%r12),BASED(cleanup_table_sysc_restore) | 810 | cl %r9,BASED(cleanup_table+20) # io_tif |
912 | bl BASED(0f) | 811 | jl 0f |
913 | clc 4(4,%r12),BASED(cleanup_table_sysc_restore+4) | 812 | cl %r9,BASED(cleanup_table+24) # io_restore |
914 | bl BASED(cleanup_sysc_restore) | 813 | jl cleanup_io_tif |
915 | 0: | 814 | cl %r9,BASED(cleanup_table+28) # io_done |
916 | clc 4(4,%r12),BASED(cleanup_table_io_tif) | 815 | jl cleanup_io_restore |
917 | bl BASED(0f) | 816 | 0: br %r14 |
918 | clc 4(4,%r12),BASED(cleanup_table_io_tif+4) | ||
919 | bl BASED(cleanup_io_tif) | ||
920 | 0: | ||
921 | clc 4(4,%r12),BASED(cleanup_table_io_restore) | ||
922 | bl BASED(0f) | ||
923 | clc 4(4,%r12),BASED(cleanup_table_io_restore+4) | ||
924 | bl BASED(cleanup_io_restore) | ||
925 | 0: | ||
926 | br %r14 | ||
927 | 817 | ||
928 | cleanup_system_call: | 818 | cleanup_system_call: |
929 | mvc __LC_RETURN_PSW(8),0(%r12) | 819 | # check if stpt has been executed |
930 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) | 820 | cl %r9,BASED(cleanup_system_call_insn) |
931 | bh BASED(0f) | 821 | jh 0f |
932 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER | ||
933 | c %r12,BASED(.Lmck_old_psw) | ||
934 | be BASED(0f) | ||
935 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | 822 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER |
936 | 0: c %r12,BASED(.Lmck_old_psw) | 823 | chi %r11,__LC_SAVE_AREA_ASYNC |
937 | la %r12,__LC_SAVE_AREA+32 | 824 | je 0f |
938 | be BASED(0f) | 825 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER |
939 | la %r12,__LC_SAVE_AREA+16 | 826 | 0: # check if stm has been executed |
940 | 0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) | 827 | cl %r9,BASED(cleanup_system_call_insn+4) |
941 | bhe BASED(cleanup_vtime) | 828 | jh 0f |
942 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) | 829 | mvc __LC_SAVE_AREA_SYNC(32),0(%r11) |
943 | bh BASED(0f) | 830 | 0: # set up saved registers r12, and r13 |
944 | mvc __LC_SAVE_AREA(16),0(%r12) | 831 | st %r12,16(%r11) # r12 thread-info pointer |
945 | 0: st %r13,4(%r12) | 832 | st %r13,20(%r11) # r13 literal-pool pointer |
946 | l %r15,__LC_KERNEL_STACK # problem state -> load ksp | 833 | # check if the user time calculation has been done |
947 | s %r15,BASED(.Lc_spsize) # make room for registers & psw | 834 | cl %r9,BASED(cleanup_system_call_insn+8) |
948 | st %r15,12(%r12) | 835 | jh 0f |
949 | CREATE_STACK_FRAME __LC_SAVE_AREA | 836 | l %r10,__LC_EXIT_TIMER |
950 | mvc 0(4,%r12),__LC_THREAD_INFO | 837 | l %r15,__LC_EXIT_TIMER+4 |
951 | l %r12,__LC_THREAD_INFO | 838 | SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER |
952 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | 839 | ADD64 %r10,%r15,__LC_USER_TIMER |
953 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | 840 | st %r10,__LC_USER_TIMER |
954 | oi __TI_flags+3(%r12),_TIF_SYSCALL | 841 | st %r15,__LC_USER_TIMER+4 |
955 | cleanup_vtime: | 842 | 0: # check if the system time calculation has been done |
956 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) | 843 | cl %r9,BASED(cleanup_system_call_insn+12) |
957 | bhe BASED(cleanup_stime) | 844 | jh 0f |
958 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 845 | l %r10,__LC_LAST_UPDATE_TIMER |
959 | cleanup_stime: | 846 | l %r15,__LC_LAST_UPDATE_TIMER+4 |
960 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16) | 847 | SUB64 %r10,%r15,__LC_EXIT_TIMER |
961 | bh BASED(cleanup_update) | 848 | ADD64 %r10,%r15,__LC_SYSTEM_TIMER |
962 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 849 | st %r10,__LC_SYSTEM_TIMER |
963 | cleanup_update: | 850 | st %r15,__LC_SYSTEM_TIMER+4 |
851 | 0: # update accounting time stamp | ||
964 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 852 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
965 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) | 853 | # set up saved register 11 |
966 | la %r12,__LC_RETURN_PSW | 854 | l %r15,__LC_KERNEL_STACK |
855 | ahi %r15,-__PT_SIZE | ||
856 | st %r15,12(%r11) # r11 pt_regs pointer | ||
857 | # fill pt_regs | ||
858 | mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC | ||
859 | stm %r0,%r7,__PT_R0(%r15) | ||
860 | mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
861 | mvc __PT_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
862 | # setup saved register 15 | ||
863 | ahi %r15,-STACK_FRAME_OVERHEAD | ||
864 | st %r15,28(%r11) # r15 stack pointer | ||
865 | # set new psw address and exit | ||
866 | l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 | ||
967 | br %r14 | 867 | br %r14 |
968 | cleanup_system_call_insn: | 868 | cleanup_system_call_insn: |
969 | .long sysc_saveall + 0x80000000 | ||
970 | .long system_call + 0x80000000 | 869 | .long system_call + 0x80000000 |
971 | .long sysc_vtime + 0x80000000 | 870 | .long sysc_stm + 0x80000000 |
972 | .long sysc_stime + 0x80000000 | 871 | .long sysc_vtime + 0x80000000 + 36 |
973 | .long sysc_update + 0x80000000 | 872 | .long sysc_vtime + 0x80000000 + 76 |
974 | 873 | ||
975 | cleanup_sysc_tif: | 874 | cleanup_sysc_tif: |
976 | mvc __LC_RETURN_PSW(4),0(%r12) | 875 | l %r9,BASED(cleanup_table+8) # sysc_tif + 0x80000000 |
977 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif) | ||
978 | la %r12,__LC_RETURN_PSW | ||
979 | br %r14 | 876 | br %r14 |
980 | 877 | ||
981 | cleanup_sysc_restore: | 878 | cleanup_sysc_restore: |
982 | clc 4(4,%r12),BASED(cleanup_sysc_restore_insn) | 879 | cl %r9,BASED(cleanup_sysc_restore_insn) |
983 | be BASED(2f) | 880 | jhe 0f |
984 | mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER | 881 | l %r9,12(%r11) # get saved pointer to pt_regs |
985 | c %r12,BASED(.Lmck_old_psw) | 882 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) |
986 | be BASED(0f) | 883 | mvc 0(32,%r11),__PT_R8(%r9) |
987 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | 884 | lm %r0,%r7,__PT_R0(%r9) |
988 | 0: clc 4(4,%r12),BASED(cleanup_sysc_restore_insn+4) | 885 | 0: lm %r8,%r9,__LC_RETURN_PSW |
989 | be BASED(2f) | ||
990 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) | ||
991 | c %r12,BASED(.Lmck_old_psw) | ||
992 | la %r12,__LC_SAVE_AREA+32 | ||
993 | be BASED(1f) | ||
994 | la %r12,__LC_SAVE_AREA+16 | ||
995 | 1: mvc 0(16,%r12),SP_R12(%r15) | ||
996 | lm %r0,%r11,SP_R0(%r15) | ||
997 | l %r15,SP_R15(%r15) | ||
998 | 2: la %r12,__LC_RETURN_PSW | ||
999 | br %r14 | 886 | br %r14 |
1000 | cleanup_sysc_restore_insn: | 887 | cleanup_sysc_restore_insn: |
1001 | .long sysc_done - 4 + 0x80000000 | 888 | .long sysc_done - 4 + 0x80000000 |
1002 | .long sysc_done - 8 + 0x80000000 | ||
1003 | 889 | ||
1004 | cleanup_io_tif: | 890 | cleanup_io_tif: |
1005 | mvc __LC_RETURN_PSW(4),0(%r12) | 891 | l %r9,BASED(cleanup_table+20) # io_tif + 0x80000000 |
1006 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif) | ||
1007 | la %r12,__LC_RETURN_PSW | ||
1008 | br %r14 | 892 | br %r14 |
1009 | 893 | ||
1010 | cleanup_io_restore: | 894 | cleanup_io_restore: |
1011 | clc 4(4,%r12),BASED(cleanup_io_restore_insn) | 895 | cl %r9,BASED(cleanup_io_restore_insn) |
1012 | be BASED(1f) | 896 | jhe 0f |
1013 | mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER | 897 | l %r9,12(%r11) # get saved r11 pointer to pt_regs |
1014 | clc 4(4,%r12),BASED(cleanup_io_restore_insn+4) | 898 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) |
1015 | be BASED(1f) | 899 | ni __LC_RETURN_PSW+1,0xfd # clear wait state bit |
1016 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) | 900 | mvc 0(32,%r11),__PT_R8(%r9) |
1017 | mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) | 901 | lm %r0,%r7,__PT_R0(%r9) |
1018 | lm %r0,%r11,SP_R0(%r15) | 902 | 0: lm %r8,%r9,__LC_RETURN_PSW |
1019 | l %r15,SP_R15(%r15) | ||
1020 | 1: la %r12,__LC_RETURN_PSW | ||
1021 | br %r14 | 903 | br %r14 |
1022 | cleanup_io_restore_insn: | 904 | cleanup_io_restore_insn: |
1023 | .long io_done - 4 + 0x80000000 | 905 | .long io_done - 4 + 0x80000000 |
1024 | .long io_done - 8 + 0x80000000 | ||
1025 | 906 | ||
1026 | /* | 907 | /* |
1027 | * Integer constants | 908 | * Integer constants |
1028 | */ | 909 | */ |
1029 | .align 4 | 910 | .align 4 |
1030 | .Lc_spsize: .long SP_SIZE | 911 | .Lnr_syscalls: .long NR_syscalls |
1031 | .Lc_overhead: .long STACK_FRAME_OVERHEAD | ||
1032 | .Lnr_syscalls: .long NR_syscalls | ||
1033 | .L0x018: .short 0x018 | ||
1034 | .L0x020: .short 0x020 | ||
1035 | .L0x028: .short 0x028 | ||
1036 | .L0x030: .short 0x030 | ||
1037 | .L0x038: .short 0x038 | ||
1038 | .Lc_1: .long 1 | ||
1039 | 912 | ||
1040 | /* | 913 | /* |
1041 | * Symbol constants | 914 | * Symbol constants |
1042 | */ | 915 | */ |
1043 | .Ls390_mcck: .long s390_do_machine_check | 916 | .Ldo_machine_check: .long s390_do_machine_check |
1044 | .Ls390_handle_mcck: | 917 | .Lhandle_mcck: .long s390_handle_mcck |
1045 | .long s390_handle_mcck | 918 | .Ldo_IRQ: .long do_IRQ |
1046 | .Lmck_old_psw: .long __LC_MCK_OLD_PSW | 919 | .Ldo_extint: .long do_extint |
1047 | .Ldo_IRQ: .long do_IRQ | 920 | .Ldo_signal: .long do_signal |
1048 | .Ldo_extint: .long do_extint | 921 | .Ldo_notify_resume: .long do_notify_resume |
1049 | .Ldo_signal: .long do_signal | 922 | .Ldo_per_trap: .long do_per_trap |
1050 | .Ldo_notify_resume: | 923 | .Ldo_execve: .long do_execve |
1051 | .long do_notify_resume | 924 | .Lexecve_tail: .long execve_tail |
1052 | .Lhandle_per: .long do_per_trap | 925 | .Ljump_table: .long pgm_check_table |
1053 | .Ldo_execve: .long do_execve | 926 | .Lschedule: .long schedule |
1054 | .Lexecve_tail: .long execve_tail | ||
1055 | .Ljump_table: .long pgm_check_table | ||
1056 | .Lschedule: .long schedule | ||
1057 | #ifdef CONFIG_PREEMPT | 927 | #ifdef CONFIG_PREEMPT |
1058 | .Lpreempt_schedule_irq: | 928 | .Lpreempt_irq: .long preempt_schedule_irq |
1059 | .long preempt_schedule_irq | ||
1060 | #endif | 929 | #endif |
1061 | .Ltrace_entry: .long do_syscall_trace_enter | 930 | .Ltrace_enter: .long do_syscall_trace_enter |
1062 | .Ltrace_exit: .long do_syscall_trace_exit | 931 | .Ltrace_exit: .long do_syscall_trace_exit |
1063 | .Lschedtail: .long schedule_tail | 932 | .Lschedule_tail: .long schedule_tail |
1064 | .Lsysc_table: .long sys_call_table | 933 | .Lsys_call_table: .long sys_call_table |
934 | .Lsysc_per: .long sysc_per + 0x80000000 | ||
1065 | #ifdef CONFIG_TRACE_IRQFLAGS | 935 | #ifdef CONFIG_TRACE_IRQFLAGS |
1066 | .Ltrace_irq_on_caller: | 936 | .Lhardirqs_on: .long trace_hardirqs_on_caller |
1067 | .long trace_hardirqs_on_caller | 937 | .Lhardirqs_off: .long trace_hardirqs_off_caller |
1068 | .Ltrace_irq_off_caller: | ||
1069 | .long trace_hardirqs_off_caller | ||
1070 | #endif | 938 | #endif |
1071 | #ifdef CONFIG_LOCKDEP | 939 | #ifdef CONFIG_LOCKDEP |
1072 | .Llockdep_sys_exit: | 940 | .Llockdep_sys_exit: .long lockdep_sys_exit |
1073 | .long lockdep_sys_exit | ||
1074 | #endif | 941 | #endif |
1075 | .Lcritical_start: | 942 | .Lcritical_start: .long __critical_start + 0x80000000 |
1076 | .long __critical_start + 0x80000000 | 943 | .Lcritical_length: .long __critical_end - __critical_start |
1077 | .Lcritical_end: | ||
1078 | .long __critical_end + 0x80000000 | ||
1079 | .Lcleanup_critical: | ||
1080 | .long cleanup_critical | ||
1081 | 944 | ||
1082 | .section .rodata, "a" | 945 | .section .rodata, "a" |
1083 | #define SYSCALL(esa,esame,emu) .long esa | 946 | #define SYSCALL(esa,esame,emu) .long esa |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 130fb02305c1..73845a9e587c 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -1,3 +1,4 @@ | |||
1 | |||
1 | /* | 2 | /* |
2 | * arch/s390/kernel/entry64.S | 3 | * arch/s390/kernel/entry64.S |
3 | * S390 low-level entry points. | 4 | * S390 low-level entry points. |
@@ -19,32 +20,22 @@ | |||
19 | #include <asm/unistd.h> | 20 | #include <asm/unistd.h> |
20 | #include <asm/page.h> | 21 | #include <asm/page.h> |
21 | 22 | ||
22 | /* | 23 | __PT_R0 = __PT_GPRS |
23 | * Stack layout for the system_call stack entry. | 24 | __PT_R1 = __PT_GPRS + 8 |
24 | * The first few entries are identical to the user_regs_struct. | 25 | __PT_R2 = __PT_GPRS + 16 |
25 | */ | 26 | __PT_R3 = __PT_GPRS + 24 |
26 | SP_PTREGS = STACK_FRAME_OVERHEAD | 27 | __PT_R4 = __PT_GPRS + 32 |
27 | SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS | 28 | __PT_R5 = __PT_GPRS + 40 |
28 | SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW | 29 | __PT_R6 = __PT_GPRS + 48 |
29 | SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS | 30 | __PT_R7 = __PT_GPRS + 56 |
30 | SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 | 31 | __PT_R8 = __PT_GPRS + 64 |
31 | SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 | 32 | __PT_R9 = __PT_GPRS + 72 |
32 | SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 | 33 | __PT_R10 = __PT_GPRS + 80 |
33 | SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 | 34 | __PT_R11 = __PT_GPRS + 88 |
34 | SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 | 35 | __PT_R12 = __PT_GPRS + 96 |
35 | SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 | 36 | __PT_R13 = __PT_GPRS + 104 |
36 | SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 | 37 | __PT_R14 = __PT_GPRS + 112 |
37 | SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64 | 38 | __PT_R15 = __PT_GPRS + 120 |
38 | SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72 | ||
39 | SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80 | ||
40 | SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88 | ||
41 | SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96 | ||
42 | SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104 | ||
43 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112 | ||
44 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120 | ||
45 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 | ||
46 | SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE | ||
47 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE | ||
48 | 39 | ||
49 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 40 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
50 | STACK_SIZE = 1 << STACK_SHIFT | 41 | STACK_SIZE = 1 << STACK_SHIFT |
@@ -59,154 +50,103 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) | |||
59 | 50 | ||
60 | #define BASED(name) name-system_call(%r13) | 51 | #define BASED(name) name-system_call(%r13) |
61 | 52 | ||
62 | .macro SPP newpp | ||
63 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | ||
64 | tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP | ||
65 | jz .+8 | ||
66 | .insn s,0xb2800000,\newpp | ||
67 | #endif | ||
68 | .endm | ||
69 | |||
70 | .macro HANDLE_SIE_INTERCEPT | ||
71 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | ||
72 | tm __TI_flags+6(%r12),_TIF_SIE>>8 | ||
73 | jz 0f | ||
74 | SPP BASED(.Lhost_id) # set host id | ||
75 | clc SP_PSW+8(8,%r15),BASED(.Lsie_loop) | ||
76 | jl 0f | ||
77 | clc SP_PSW+8(8,%r15),BASED(.Lsie_done) | ||
78 | jhe 0f | ||
79 | mvc SP_PSW+8(8,%r15),BASED(.Lsie_loop) | ||
80 | 0: | ||
81 | #endif | ||
82 | .endm | ||
83 | |||
84 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
85 | .macro TRACE_IRQS_ON | 53 | .macro TRACE_IRQS_ON |
54 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
86 | basr %r2,%r0 | 55 | basr %r2,%r0 |
87 | brasl %r14,trace_hardirqs_on_caller | 56 | brasl %r14,trace_hardirqs_on_caller |
57 | #endif | ||
88 | .endm | 58 | .endm |
89 | 59 | ||
90 | .macro TRACE_IRQS_OFF | 60 | .macro TRACE_IRQS_OFF |
61 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
91 | basr %r2,%r0 | 62 | basr %r2,%r0 |
92 | brasl %r14,trace_hardirqs_off_caller | 63 | brasl %r14,trace_hardirqs_off_caller |
93 | .endm | ||
94 | #else | ||
95 | #define TRACE_IRQS_ON | ||
96 | #define TRACE_IRQS_OFF | ||
97 | #endif | 64 | #endif |
65 | .endm | ||
98 | 66 | ||
99 | #ifdef CONFIG_LOCKDEP | ||
100 | .macro LOCKDEP_SYS_EXIT | 67 | .macro LOCKDEP_SYS_EXIT |
101 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 68 | #ifdef CONFIG_LOCKDEP |
102 | jz 0f | 69 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
70 | jz .+10 | ||
103 | brasl %r14,lockdep_sys_exit | 71 | brasl %r14,lockdep_sys_exit |
104 | 0: | ||
105 | .endm | ||
106 | #else | ||
107 | #define LOCKDEP_SYS_EXIT | ||
108 | #endif | 72 | #endif |
109 | |||
110 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum | ||
111 | lg %r10,\lc_from | ||
112 | slg %r10,\lc_to | ||
113 | alg %r10,\lc_sum | ||
114 | stg %r10,\lc_sum | ||
115 | .endm | 73 | .endm |
116 | 74 | ||
117 | /* | 75 | .macro SPP newpp |
118 | * Register usage in interrupt handlers: | 76 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) |
119 | * R9 - pointer to current task structure | 77 | tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP |
120 | * R13 - pointer to literal pool | 78 | jz .+8 |
121 | * R14 - return register for function calls | 79 | .insn s,0xb2800000,\newpp |
122 | * R15 - kernel stack pointer | 80 | #endif |
123 | */ | 81 | .endm |
124 | 82 | ||
125 | .macro SAVE_ALL_SVC psworg,savearea | 83 | .macro HANDLE_SIE_INTERCEPT scratch |
126 | stmg %r11,%r15,\savearea | 84 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) |
127 | lg %r15,__LC_KERNEL_STACK # problem state -> load ksp | 85 | tm __TI_flags+6(%r12),_TIF_SIE>>8 |
128 | aghi %r15,-SP_SIZE # make room for registers & psw | 86 | jz .+42 |
129 | lg %r11,__LC_LAST_BREAK | 87 | tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP |
88 | jz .+8 | ||
89 | .insn s,0xb2800000,BASED(.Lhost_id) # set host id | ||
90 | lgr \scratch,%r9 | ||
91 | slg \scratch,BASED(.Lsie_loop) | ||
92 | clg \scratch,BASED(.Lsie_length) | ||
93 | jhe .+10 | ||
94 | lg %r9,BASED(.Lsie_loop) | ||
95 | #endif | ||
130 | .endm | 96 | .endm |
131 | 97 | ||
132 | .macro SAVE_ALL_PGM psworg,savearea | 98 | .macro CHECK_STACK stacksize,savearea |
133 | stmg %r11,%r15,\savearea | ||
134 | tm \psworg+1,0x01 # test problem state bit | ||
135 | #ifdef CONFIG_CHECK_STACK | 99 | #ifdef CONFIG_CHECK_STACK |
136 | jnz 1f | 100 | tml %r15,\stacksize - CONFIG_STACK_GUARD |
137 | tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | 101 | lghi %r14,\savearea |
138 | jnz 2f | 102 | jz stack_overflow |
139 | la %r12,\psworg | ||
140 | j stack_overflow | ||
141 | #else | ||
142 | jz 2f | ||
143 | #endif | 103 | #endif |
144 | 1: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp | ||
145 | 2: aghi %r15,-SP_SIZE # make room for registers & psw | ||
146 | larl %r13,system_call | ||
147 | lg %r11,__LC_LAST_BREAK | ||
148 | .endm | 104 | .endm |
149 | 105 | ||
150 | .macro SAVE_ALL_ASYNC psworg,savearea | 106 | .macro SWITCH_ASYNC savearea,stack,shift |
151 | stmg %r11,%r15,\savearea | 107 | tmhh %r8,0x0001 # interrupting from user ? |
152 | larl %r13,system_call | 108 | jnz 1f |
153 | lg %r11,__LC_LAST_BREAK | 109 | lgr %r14,%r9 |
154 | la %r12,\psworg | 110 | slg %r14,BASED(.Lcritical_start) |
155 | tm \psworg+1,0x01 # test problem state bit | 111 | clg %r14,BASED(.Lcritical_length) |
156 | jnz 1f # from user -> load kernel stack | ||
157 | clc \psworg+8(8),BASED(.Lcritical_end) | ||
158 | jhe 0f | 112 | jhe 0f |
159 | clc \psworg+8(8),BASED(.Lcritical_start) | 113 | lghi %r11,\savearea # inside critical section, do cleanup |
160 | jl 0f | ||
161 | brasl %r14,cleanup_critical | 114 | brasl %r14,cleanup_critical |
162 | tm 1(%r12),0x01 # retest problem state after cleanup | 115 | tmhh %r8,0x0001 # retest problem state after cleanup |
163 | jnz 1f | 116 | jnz 1f |
164 | 0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ? | 117 | 0: lg %r14,\stack # are we already on the target stack? |
165 | slgr %r14,%r15 | 118 | slgr %r14,%r15 |
166 | srag %r14,%r14,STACK_SHIFT | 119 | srag %r14,%r14,\shift |
167 | #ifdef CONFIG_CHECK_STACK | ||
168 | jnz 1f | 120 | jnz 1f |
169 | tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | 121 | CHECK_STACK 1<<\shift,\savearea |
170 | jnz 2f | 122 | j 2f |
171 | j stack_overflow | 123 | 1: lg %r15,\stack # load target stack |
172 | #else | 124 | 2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
173 | jz 2f | 125 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
174 | #endif | ||
175 | 1: lg %r15,__LC_ASYNC_STACK # load async stack | ||
176 | 2: aghi %r15,-SP_SIZE # make room for registers & psw | ||
177 | .endm | ||
178 | |||
179 | .macro CREATE_STACK_FRAME savearea | ||
180 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
181 | stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 | ||
182 | mvc SP_R11(40,%r15),\savearea # move %r11-%r15 to stack | ||
183 | stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack | ||
184 | .endm | 126 | .endm |
185 | 127 | ||
186 | .macro RESTORE_ALL psworg,sync | 128 | .macro UPDATE_VTIME scratch,enter_timer |
187 | mvc \psworg(16),SP_PSW(%r15) # move user PSW to lowcore | 129 | lg \scratch,__LC_EXIT_TIMER |
188 | .if !\sync | 130 | slg \scratch,\enter_timer |
189 | ni \psworg+1,0xfd # clear wait state bit | 131 | alg \scratch,__LC_USER_TIMER |
190 | .endif | 132 | stg \scratch,__LC_USER_TIMER |
191 | lg %r14,__LC_VDSO_PER_CPU | 133 | lg \scratch,__LC_LAST_UPDATE_TIMER |
192 | lmg %r0,%r13,SP_R0(%r15) # load gprs 0-13 of user | 134 | slg \scratch,__LC_EXIT_TIMER |
193 | stpt __LC_EXIT_TIMER | 135 | alg \scratch,__LC_SYSTEM_TIMER |
194 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER | 136 | stg \scratch,__LC_SYSTEM_TIMER |
195 | lmg %r14,%r15,SP_R14(%r15) # load grps 14-15 of user | 137 | mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer |
196 | lpswe \psworg # back to caller | ||
197 | .endm | 138 | .endm |
198 | 139 | ||
199 | .macro LAST_BREAK | 140 | .macro LAST_BREAK scratch |
200 | srag %r10,%r11,23 | 141 | srag \scratch,%r10,23 |
201 | jz 0f | 142 | jz .+10 |
202 | stg %r11,__TI_last_break(%r12) | 143 | stg %r10,__TI_last_break(%r12) |
203 | 0: | ||
204 | .endm | 144 | .endm |
205 | 145 | ||
206 | .macro REENABLE_IRQS | 146 | .macro REENABLE_IRQS |
207 | mvc __SF_EMPTY(1,%r15),SP_PSW(%r15) | 147 | stg %r8,__LC_RETURN_PSW |
208 | ni __SF_EMPTY(%r15),0xbf | 148 | ni __LC_RETURN_PSW,0xbf |
209 | ssm __SF_EMPTY(%r15) | 149 | ssm __LC_RETURN_PSW |
210 | .endm | 150 | .endm |
211 | 151 | ||
212 | .section .kprobes.text, "ax" | 152 | .section .kprobes.text, "ax" |
@@ -245,55 +185,66 @@ __critical_start: | |||
245 | 185 | ||
246 | ENTRY(system_call) | 186 | ENTRY(system_call) |
247 | stpt __LC_SYNC_ENTER_TIMER | 187 | stpt __LC_SYNC_ENTER_TIMER |
248 | sysc_saveall: | 188 | sysc_stmg: |
249 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 189 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC |
250 | CREATE_STACK_FRAME __LC_SAVE_AREA | 190 | lg %r10,__LC_LAST_BREAK |
251 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 191 | lg %r12,__LC_THREAD_INFO |
252 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | 192 | larl %r13,system_call |
253 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | 193 | sysc_per: |
254 | oi __TI_flags+7(%r12),_TIF_SYSCALL | 194 | lg %r15,__LC_KERNEL_STACK |
195 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
196 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs | ||
255 | sysc_vtime: | 197 | sysc_vtime: |
256 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 198 | UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER |
257 | sysc_stime: | 199 | LAST_BREAK %r13 |
258 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 200 | stmg %r0,%r7,__PT_R0(%r11) |
259 | sysc_update: | 201 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC |
260 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 202 | mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW |
261 | LAST_BREAK | 203 | mvc __PT_SVC_CODE(4,%r11),__LC_SVC_ILC |
262 | sysc_do_svc: | 204 | sysc_do_svc: |
263 | llgh %r7,SP_SVC_CODE+2(%r15) | 205 | oi __TI_flags+7(%r12),_TIF_SYSCALL |
264 | slag %r7,%r7,2 # shift and test for svc 0 | 206 | llgh %r8,__PT_SVC_CODE+2(%r11) |
207 | slag %r8,%r8,2 # shift and test for svc 0 | ||
265 | jnz sysc_nr_ok | 208 | jnz sysc_nr_ok |
266 | # svc 0: system call number in %r1 | 209 | # svc 0: system call number in %r1 |
267 | llgfr %r1,%r1 # clear high word in r1 | 210 | llgfr %r1,%r1 # clear high word in r1 |
268 | cghi %r1,NR_syscalls | 211 | cghi %r1,NR_syscalls |
269 | jnl sysc_nr_ok | 212 | jnl sysc_nr_ok |
270 | sth %r1,SP_SVC_CODE+2(%r15) | 213 | sth %r1,__PT_SVC_CODE+2(%r11) |
271 | slag %r7,%r1,2 # shift and test for svc 0 | 214 | slag %r8,%r1,2 |
272 | sysc_nr_ok: | 215 | sysc_nr_ok: |
273 | larl %r10,sys_call_table | 216 | larl %r10,sys_call_table # 64 bit system call table |
274 | #ifdef CONFIG_COMPAT | 217 | #ifdef CONFIG_COMPAT |
275 | tm __TI_flags+5(%r12),(_TIF_31BIT>>16) # running in 31 bit mode ? | 218 | tm __TI_flags+5(%r12),(_TIF_31BIT>>16) |
276 | jno sysc_noemu | 219 | jno sysc_noemu |
277 | larl %r10,sys_call_table_emu # use 31 bit emulation system calls | 220 | larl %r10,sys_call_table_emu # 31 bit system call table |
278 | sysc_noemu: | 221 | sysc_noemu: |
279 | #endif | 222 | #endif |
223 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
224 | stg %r2,__PT_ORIG_GPR2(%r11) | ||
225 | stg %r7,STACK_FRAME_OVERHEAD(%r15) | ||
226 | lgf %r9,0(%r8,%r10) # get system call add. | ||
280 | tm __TI_flags+6(%r12),_TIF_TRACE >> 8 | 227 | tm __TI_flags+6(%r12),_TIF_TRACE >> 8 |
281 | mvc SP_ARGS(8,%r15),SP_R7(%r15) | ||
282 | lgf %r8,0(%r7,%r10) # load address of system call routine | ||
283 | jnz sysc_tracesys | 228 | jnz sysc_tracesys |
284 | basr %r14,%r8 # call sys_xxxx | 229 | basr %r14,%r9 # call sys_xxxx |
285 | stg %r2,SP_R2(%r15) # store return value (change R2 on stack) | 230 | stg %r2,__PT_R2(%r11) # store return value |
286 | 231 | ||
287 | sysc_return: | 232 | sysc_return: |
288 | LOCKDEP_SYS_EXIT | 233 | LOCKDEP_SYS_EXIT |
289 | sysc_tif: | 234 | sysc_tif: |
290 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 235 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
291 | jno sysc_restore | 236 | jno sysc_restore |
292 | tm __TI_flags+7(%r12),_TIF_WORK_SVC | 237 | tm __TI_flags+7(%r12),_TIF_WORK_SVC |
293 | jnz sysc_work # there is work to do (signals etc.) | 238 | jnz sysc_work # check for work |
294 | ni __TI_flags+7(%r12),255-_TIF_SYSCALL | 239 | ni __TI_flags+7(%r12),255-_TIF_SYSCALL |
295 | sysc_restore: | 240 | sysc_restore: |
296 | RESTORE_ALL __LC_RETURN_PSW,1 | 241 | lg %r14,__LC_VDSO_PER_CPU |
242 | lmg %r0,%r10,__PT_R0(%r11) | ||
243 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) | ||
244 | stpt __LC_EXIT_TIMER | ||
245 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER | ||
246 | lmg %r11,%r15,__PT_R11(%r11) | ||
247 | lpswe __LC_RETURN_PSW | ||
297 | sysc_done: | 248 | sysc_done: |
298 | 249 | ||
299 | # | 250 | # |
@@ -317,7 +268,7 @@ sysc_work: | |||
317 | # | 268 | # |
318 | sysc_reschedule: | 269 | sysc_reschedule: |
319 | larl %r14,sysc_return | 270 | larl %r14,sysc_return |
320 | jg schedule # return point is sysc_return | 271 | jg schedule |
321 | 272 | ||
322 | # | 273 | # |
323 | # _TIF_MCCK_PENDING is set, call handler | 274 | # _TIF_MCCK_PENDING is set, call handler |
@@ -331,33 +282,33 @@ sysc_mcck_pending: | |||
331 | # | 282 | # |
332 | sysc_sigpending: | 283 | sysc_sigpending: |
333 | ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP | 284 | ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP |
334 | la %r2,SP_PTREGS(%r15) # load pt_regs | 285 | lgr %r2,%r11 # pass pointer to pt_regs |
335 | brasl %r14,do_signal # call do_signal | 286 | brasl %r14,do_signal |
336 | tm __TI_flags+7(%r12),_TIF_SYSCALL | 287 | tm __TI_flags+7(%r12),_TIF_SYSCALL |
337 | jno sysc_return | 288 | jno sysc_return |
338 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | 289 | lmg %r2,%r7,__PT_R2(%r11) # load svc arguments |
339 | lghi %r7,0 # svc 0 returns -ENOSYS | 290 | lghi %r8,0 # svc 0 returns -ENOSYS |
340 | lh %r1,SP_SVC_CODE+2(%r15) # load new svc number | 291 | lh %r1,__PT_SVC_CODE+2(%r11) # load new svc number |
341 | cghi %r1,NR_syscalls | 292 | cghi %r1,NR_syscalls |
342 | jnl sysc_nr_ok # invalid svc number -> do svc 0 | 293 | jnl sysc_nr_ok # invalid svc number -> do svc 0 |
343 | slag %r7,%r1,2 | 294 | slag %r8,%r1,2 |
344 | j sysc_nr_ok # restart svc | 295 | j sysc_nr_ok # restart svc |
345 | 296 | ||
346 | # | 297 | # |
347 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | 298 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume |
348 | # | 299 | # |
349 | sysc_notify_resume: | 300 | sysc_notify_resume: |
350 | la %r2,SP_PTREGS(%r15) # load pt_regs | 301 | lgr %r2,%r11 # pass pointer to pt_regs |
351 | larl %r14,sysc_return | 302 | larl %r14,sysc_return |
352 | jg do_notify_resume # call do_notify_resume | 303 | jg do_notify_resume |
353 | 304 | ||
354 | # | 305 | # |
355 | # _TIF_PER_TRAP is set, call do_per_trap | 306 | # _TIF_PER_TRAP is set, call do_per_trap |
356 | # | 307 | # |
357 | sysc_singlestep: | 308 | sysc_singlestep: |
358 | ni __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) | 309 | ni __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) |
359 | la %r2,SP_PTREGS(%r15) # address of register-save area | 310 | lgr %r2,%r11 # pass pointer to pt_regs |
360 | larl %r14,sysc_return # load adr. of system return | 311 | larl %r14,sysc_return |
361 | jg do_per_trap | 312 | jg do_per_trap |
362 | 313 | ||
363 | # | 314 | # |
@@ -365,41 +316,41 @@ sysc_singlestep: | |||
365 | # and after the system call | 316 | # and after the system call |
366 | # | 317 | # |
367 | sysc_tracesys: | 318 | sysc_tracesys: |
368 | la %r2,SP_PTREGS(%r15) # load pt_regs | 319 | lgr %r2,%r11 # pass pointer to pt_regs |
369 | la %r3,0 | 320 | la %r3,0 |
370 | llgh %r0,SP_SVC_CODE+2(%r15) | 321 | llgh %r0,__PT_SVC_CODE+2(%r11) |
371 | stg %r0,SP_R2(%r15) | 322 | stg %r0,__PT_R2(%r11) |
372 | brasl %r14,do_syscall_trace_enter | 323 | brasl %r14,do_syscall_trace_enter |
373 | lghi %r0,NR_syscalls | 324 | lghi %r0,NR_syscalls |
374 | clgr %r0,%r2 | 325 | clgr %r0,%r2 |
375 | jnh sysc_tracenogo | 326 | jnh sysc_tracenogo |
376 | sllg %r7,%r2,2 # svc number *4 | 327 | sllg %r8,%r2,2 |
377 | lgf %r8,0(%r7,%r10) | 328 | lgf %r9,0(%r8,%r10) |
378 | sysc_tracego: | 329 | sysc_tracego: |
379 | lmg %r3,%r6,SP_R3(%r15) | 330 | lmg %r3,%r7,__PT_R3(%r11) |
380 | mvc SP_ARGS(8,%r15),SP_R7(%r15) | 331 | stg %r7,STACK_FRAME_OVERHEAD(%r15) |
381 | lg %r2,SP_ORIG_R2(%r15) | 332 | lg %r2,__PT_ORIG_GPR2(%r11) |
382 | basr %r14,%r8 # call sys_xxx | 333 | basr %r14,%r9 # call sys_xxx |
383 | stg %r2,SP_R2(%r15) # store return value | 334 | stg %r2,__PT_R2(%r11) # store return value |
384 | sysc_tracenogo: | 335 | sysc_tracenogo: |
385 | tm __TI_flags+6(%r12),_TIF_TRACE >> 8 | 336 | tm __TI_flags+6(%r12),_TIF_TRACE >> 8 |
386 | jz sysc_return | 337 | jz sysc_return |
387 | la %r2,SP_PTREGS(%r15) # load pt_regs | 338 | lgr %r2,%r11 # pass pointer to pt_regs |
388 | larl %r14,sysc_return # return point is sysc_return | 339 | larl %r14,sysc_return |
389 | jg do_syscall_trace_exit | 340 | jg do_syscall_trace_exit |
390 | 341 | ||
391 | # | 342 | # |
392 | # a new process exits the kernel with ret_from_fork | 343 | # a new process exits the kernel with ret_from_fork |
393 | # | 344 | # |
394 | ENTRY(ret_from_fork) | 345 | ENTRY(ret_from_fork) |
395 | lg %r13,__LC_SVC_NEW_PSW+8 | 346 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
396 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 347 | lg %r12,__LC_THREAD_INFO |
397 | tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? | 348 | tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? |
398 | jo 0f | 349 | jo 0f |
399 | stg %r15,SP_R15(%r15) # store stack pointer for new kthread | 350 | stg %r15,__PT_R15(%r11) # store stack pointer for new kthread |
400 | 0: brasl %r14,schedule_tail | 351 | 0: brasl %r14,schedule_tail |
401 | TRACE_IRQS_ON | 352 | TRACE_IRQS_ON |
402 | stosm 24(%r15),0x03 # reenable interrupts | 353 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
403 | j sysc_tracenogo | 354 | j sysc_tracenogo |
404 | 355 | ||
405 | # | 356 | # |
@@ -409,26 +360,26 @@ ENTRY(ret_from_fork) | |||
409 | ENTRY(kernel_execve) | 360 | ENTRY(kernel_execve) |
410 | stmg %r12,%r15,96(%r15) | 361 | stmg %r12,%r15,96(%r15) |
411 | lgr %r14,%r15 | 362 | lgr %r14,%r15 |
412 | aghi %r15,-SP_SIZE | 363 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
413 | stg %r14,__SF_BACKCHAIN(%r15) | 364 | stg %r14,__SF_BACKCHAIN(%r15) |
414 | la %r12,SP_PTREGS(%r15) | 365 | la %r12,STACK_FRAME_OVERHEAD(%r15) |
415 | xc 0(__PT_SIZE,%r12),0(%r12) | 366 | xc 0(__PT_SIZE,%r12),0(%r12) |
416 | lgr %r5,%r12 | 367 | lgr %r5,%r12 |
417 | brasl %r14,do_execve | 368 | brasl %r14,do_execve |
418 | ltgfr %r2,%r2 | 369 | ltgfr %r2,%r2 |
419 | je 0f | 370 | je 0f |
420 | aghi %r15,SP_SIZE | 371 | aghi %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE) |
421 | lmg %r12,%r15,96(%r15) | 372 | lmg %r12,%r15,96(%r15) |
422 | br %r14 | 373 | br %r14 |
423 | # execve succeeded. | 374 | # execve succeeded. |
424 | 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts | 375 | 0: ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
425 | lg %r15,__LC_KERNEL_STACK # load ksp | 376 | lg %r15,__LC_KERNEL_STACK # load ksp |
426 | aghi %r15,-SP_SIZE # make room for registers & psw | 377 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
427 | lg %r13,__LC_SVC_NEW_PSW+8 | 378 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
428 | mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs | 379 | mvc 0(__PT_SIZE,%r11),0(%r12) # copy pt_regs |
429 | lg %r12,__LC_THREAD_INFO | 380 | lg %r12,__LC_THREAD_INFO |
430 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 381 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
431 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 382 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
432 | brasl %r14,execve_tail | 383 | brasl %r14,execve_tail |
433 | j sysc_return | 384 | j sysc_return |
434 | 385 | ||
@@ -437,127 +388,72 @@ ENTRY(kernel_execve) | |||
437 | */ | 388 | */ |
438 | 389 | ||
439 | ENTRY(pgm_check_handler) | 390 | ENTRY(pgm_check_handler) |
440 | /* | ||
441 | * First we need to check for a special case: | ||
442 | * Single stepping an instruction that disables the PER event mask will | ||
443 | * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. | ||
444 | * For a single stepped SVC the program check handler gets control after | ||
445 | * the SVC new PSW has been loaded. But we want to execute the SVC first and | ||
446 | * then handle the PER event. Therefore we update the SVC old PSW to point | ||
447 | * to the pgm_check_handler and branch to the SVC handler after we checked | ||
448 | * if we have to load the kernel stack register. | ||
449 | * For every other possible cause for PER event without the PER mask set | ||
450 | * we just ignore the PER event (FIXME: is there anything we have to do | ||
451 | * for LPSW?). | ||
452 | */ | ||
453 | stpt __LC_SYNC_ENTER_TIMER | 391 | stpt __LC_SYNC_ENTER_TIMER |
454 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception | 392 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC |
455 | jnz pgm_per # got per exception -> special case | 393 | lg %r10,__LC_LAST_BREAK |
456 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 394 | lg %r12,__LC_THREAD_INFO |
457 | CREATE_STACK_FRAME __LC_SAVE_AREA | 395 | larl %r13,system_call |
458 | mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW | 396 | lmg %r8,%r9,__LC_PGM_OLD_PSW |
459 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 397 | HANDLE_SIE_INTERCEPT %r14 |
460 | HANDLE_SIE_INTERCEPT | 398 | tmhh %r8,0x0001 # test problem state bit |
461 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 399 | jnz 1f # -> fault in user space |
462 | jz pgm_no_vtime | 400 | tmhh %r8,0x4000 # PER bit set in old PSW ? |
463 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 401 | jnz 0f # -> enabled, can't be a double fault |
464 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 402 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
465 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 403 | jnz pgm_svcper # -> single stepped svc |
466 | LAST_BREAK | 404 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC |
467 | pgm_no_vtime: | 405 | j 2f |
468 | stg %r11,SP_ARGS(%r15) | 406 | 1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER |
469 | lgf %r3,__LC_PGM_ILC # load program interruption code | 407 | LAST_BREAK %r14 |
470 | lg %r4,__LC_TRANS_EXC_CODE | 408 | lg %r15,__LC_KERNEL_STACK |
471 | REENABLE_IRQS | 409 | 2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
472 | lghi %r8,0x7f | 410 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
473 | ngr %r8,%r3 | 411 | stmg %r0,%r7,__PT_R0(%r11) |
474 | sll %r8,3 | 412 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC |
475 | larl %r1,pgm_check_table | 413 | stmg %r8,%r9,__PT_PSW(%r11) |
476 | lg %r1,0(%r8,%r1) # load address of handler routine | 414 | stg %r10,__PT_ARGS(%r11) |
477 | la %r2,SP_PTREGS(%r15) # address of register-save area | 415 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
478 | basr %r14,%r1 # branch to interrupt-handler | 416 | jz 0f |
479 | pgm_exit: | ||
480 | j sysc_return | ||
481 | |||
482 | # | ||
483 | # handle per exception | ||
484 | # | ||
485 | pgm_per: | ||
486 | tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on | ||
487 | jnz pgm_per_std # ok, normal per event from user space | ||
488 | # ok its one of the special cases, now we need to find out which one | ||
489 | clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW | ||
490 | je pgm_svcper | ||
491 | # no interesting special case, ignore PER event | ||
492 | lpswe __LC_PGM_OLD_PSW | ||
493 | |||
494 | # | ||
495 | # Normal per exception | ||
496 | # | ||
497 | pgm_per_std: | ||
498 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA | ||
499 | CREATE_STACK_FRAME __LC_SAVE_AREA | ||
500 | mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW | ||
501 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | ||
502 | HANDLE_SIE_INTERCEPT | ||
503 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
504 | jz pgm_no_vtime2 | ||
505 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
506 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
507 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
508 | LAST_BREAK | ||
509 | pgm_no_vtime2: | ||
510 | lg %r1,__TI_task(%r12) | 417 | lg %r1,__TI_task(%r12) |
511 | tm SP_PSW+1(%r15),0x01 # kernel per event ? | 418 | tmhh %r8,0x0001 # kernel per event ? |
512 | jz kernel_per | 419 | jz pgm_kprobe |
513 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE | 420 | oi __TI_flags+7(%r12),_TIF_PER_TRAP |
514 | mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS | 421 | mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS |
422 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE | ||
515 | mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID | 423 | mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID |
516 | oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP | 424 | 0: lgf %r3,__LC_PGM_ILC # load program interruption code |
517 | lgf %r3,__LC_PGM_ILC # load program interruption code | ||
518 | lg %r4,__LC_TRANS_EXC_CODE | 425 | lg %r4,__LC_TRANS_EXC_CODE |
519 | REENABLE_IRQS | 426 | REENABLE_IRQS |
520 | lghi %r8,0x7f | 427 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
521 | ngr %r8,%r3 # clear per-event-bit and ilc | 428 | lghi %r10,0x7f |
522 | je pgm_exit2 | 429 | ngr %r10,%r3 |
523 | sll %r8,3 | 430 | je sysc_return |
431 | sll %r10,3 | ||
524 | larl %r1,pgm_check_table | 432 | larl %r1,pgm_check_table |
525 | lg %r1,0(%r8,%r1) # load address of handler routine | 433 | lg %r1,0(%r10,%r1) # load address of handler routine |
526 | la %r2,SP_PTREGS(%r15) # address of register-save area | 434 | lgr %r2,%r11 # pass pointer to pt_regs |
527 | basr %r14,%r1 # branch to interrupt-handler | 435 | basr %r14,%r1 # branch to interrupt-handler |
528 | pgm_exit2: | ||
529 | j sysc_return | 436 | j sysc_return |
530 | 437 | ||
531 | # | 438 | # |
532 | # it was a single stepped SVC that is causing all the trouble | 439 | # PER event in supervisor state, must be kprobes |
533 | # | 440 | # |
534 | pgm_svcper: | 441 | pgm_kprobe: |
535 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 442 | REENABLE_IRQS |
536 | CREATE_STACK_FRAME __LC_SAVE_AREA | 443 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
537 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 444 | lgr %r2,%r11 # pass pointer to pt_regs |
538 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | 445 | brasl %r14,do_per_trap |
539 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | 446 | j sysc_return |
540 | oi __TI_flags+7(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP) | ||
541 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
542 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
543 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
544 | LAST_BREAK | ||
545 | lg %r8,__TI_task(%r12) | ||
546 | mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE | ||
547 | mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS | ||
548 | mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID | ||
549 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
550 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | ||
551 | j sysc_do_svc | ||
552 | 447 | ||
553 | # | 448 | # |
554 | # per was called from kernel, must be kprobes | 449 | # single stepped system call |
555 | # | 450 | # |
556 | kernel_per: | 451 | pgm_svcper: |
557 | REENABLE_IRQS | 452 | oi __TI_flags+7(%r12),_TIF_PER_TRAP |
558 | la %r2,SP_PTREGS(%r15) # address of register-save area | 453 | mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW |
559 | brasl %r14,do_per_trap | 454 | larl %r14,sysc_per |
560 | j pgm_exit | 455 | stg %r14,__LC_RETURN_PSW+8 |
456 | lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs | ||
561 | 457 | ||
562 | /* | 458 | /* |
563 | * IO interrupt handler routine | 459 | * IO interrupt handler routine |
@@ -565,21 +461,25 @@ kernel_per: | |||
565 | ENTRY(io_int_handler) | 461 | ENTRY(io_int_handler) |
566 | stck __LC_INT_CLOCK | 462 | stck __LC_INT_CLOCK |
567 | stpt __LC_ASYNC_ENTER_TIMER | 463 | stpt __LC_ASYNC_ENTER_TIMER |
568 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40 | 464 | stmg %r8,%r15,__LC_SAVE_AREA_ASYNC |
569 | CREATE_STACK_FRAME __LC_SAVE_AREA+40 | 465 | lg %r10,__LC_LAST_BREAK |
570 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack | 466 | lg %r12,__LC_THREAD_INFO |
571 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 467 | larl %r13,system_call |
572 | HANDLE_SIE_INTERCEPT | 468 | lmg %r8,%r9,__LC_IO_OLD_PSW |
573 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 469 | HANDLE_SIE_INTERCEPT %r14 |
574 | jz io_no_vtime | 470 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT |
575 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 471 | tmhh %r8,0x0001 # interrupting from user? |
576 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 472 | jz io_skip |
577 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 473 | UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER |
578 | LAST_BREAK | 474 | LAST_BREAK %r14 |
579 | io_no_vtime: | 475 | io_skip: |
476 | stmg %r0,%r7,__PT_R0(%r11) | ||
477 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC | ||
478 | stmg %r8,%r9,__PT_PSW(%r11) | ||
580 | TRACE_IRQS_OFF | 479 | TRACE_IRQS_OFF |
581 | la %r2,SP_PTREGS(%r15) # address of register-save area | 480 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
582 | brasl %r14,do_IRQ # call standard irq handler | 481 | lgr %r2,%r11 # pass pointer to pt_regs |
482 | brasl %r14,do_IRQ | ||
583 | io_return: | 483 | io_return: |
584 | LOCKDEP_SYS_EXIT | 484 | LOCKDEP_SYS_EXIT |
585 | TRACE_IRQS_ON | 485 | TRACE_IRQS_ON |
@@ -587,7 +487,14 @@ io_tif: | |||
587 | tm __TI_flags+7(%r12),_TIF_WORK_INT | 487 | tm __TI_flags+7(%r12),_TIF_WORK_INT |
588 | jnz io_work # there is work to do (signals etc.) | 488 | jnz io_work # there is work to do (signals etc.) |
589 | io_restore: | 489 | io_restore: |
590 | RESTORE_ALL __LC_RETURN_PSW,0 | 490 | lg %r14,__LC_VDSO_PER_CPU |
491 | lmg %r0,%r10,__PT_R0(%r11) | ||
492 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) | ||
493 | ni __LC_RETURN_PSW+1,0xfd # clear wait state bit | ||
494 | stpt __LC_EXIT_TIMER | ||
495 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER | ||
496 | lmg %r11,%r15,__PT_R11(%r11) | ||
497 | lpswe __LC_RETURN_PSW | ||
591 | io_done: | 498 | io_done: |
592 | 499 | ||
593 | # | 500 | # |
@@ -600,7 +507,7 @@ io_done: | |||
600 | # Before any work can be done, a switch to the kernel stack is required. | 507 | # Before any work can be done, a switch to the kernel stack is required. |
601 | # | 508 | # |
602 | io_work: | 509 | io_work: |
603 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 510 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
604 | jo io_work_user # yes -> do resched & signal | 511 | jo io_work_user # yes -> do resched & signal |
605 | #ifdef CONFIG_PREEMPT | 512 | #ifdef CONFIG_PREEMPT |
606 | # check for preemptive scheduling | 513 | # check for preemptive scheduling |
@@ -609,10 +516,11 @@ io_work: | |||
609 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | 516 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
610 | jno io_restore | 517 | jno io_restore |
611 | # switch to kernel stack | 518 | # switch to kernel stack |
612 | lg %r1,SP_R15(%r15) | 519 | lg %r1,__PT_R15(%r11) |
613 | aghi %r1,-SP_SIZE | 520 | aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
614 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 521 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
615 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 522 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) |
523 | la %r11,STACK_FRAME_OVERHEAD(%r1) | ||
616 | lgr %r15,%r1 | 524 | lgr %r15,%r1 |
617 | # TRACE_IRQS_ON already done at io_return, call | 525 | # TRACE_IRQS_ON already done at io_return, call |
618 | # TRACE_IRQS_OFF to keep things symmetrical | 526 | # TRACE_IRQS_OFF to keep things symmetrical |
@@ -628,9 +536,10 @@ io_work: | |||
628 | # | 536 | # |
629 | io_work_user: | 537 | io_work_user: |
630 | lg %r1,__LC_KERNEL_STACK | 538 | lg %r1,__LC_KERNEL_STACK |
631 | aghi %r1,-SP_SIZE | 539 | aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
632 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 540 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
633 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 541 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) |
542 | la %r11,STACK_FRAME_OVERHEAD(%r1) | ||
634 | lgr %r15,%r1 | 543 | lgr %r15,%r1 |
635 | 544 | ||
636 | # | 545 | # |
@@ -663,9 +572,9 @@ io_mcck_pending: | |||
663 | # | 572 | # |
664 | io_reschedule: | 573 | io_reschedule: |
665 | # TRACE_IRQS_ON already done at io_return | 574 | # TRACE_IRQS_ON already done at io_return |
666 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 575 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
667 | brasl %r14,schedule # call scheduler | 576 | brasl %r14,schedule # call scheduler |
668 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 577 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
669 | TRACE_IRQS_OFF | 578 | TRACE_IRQS_OFF |
670 | j io_return | 579 | j io_return |
671 | 580 | ||
@@ -674,10 +583,10 @@ io_reschedule: | |||
674 | # | 583 | # |
675 | io_sigpending: | 584 | io_sigpending: |
676 | # TRACE_IRQS_ON already done at io_return | 585 | # TRACE_IRQS_ON already done at io_return |
677 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 586 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
678 | la %r2,SP_PTREGS(%r15) # load pt_regs | 587 | lgr %r2,%r11 # pass pointer to pt_regs |
679 | brasl %r14,do_signal # call do_signal | 588 | brasl %r14,do_signal |
680 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 589 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
681 | TRACE_IRQS_OFF | 590 | TRACE_IRQS_OFF |
682 | j io_return | 591 | j io_return |
683 | 592 | ||
@@ -686,10 +595,10 @@ io_sigpending: | |||
686 | # | 595 | # |
687 | io_notify_resume: | 596 | io_notify_resume: |
688 | # TRACE_IRQS_ON already done at io_return | 597 | # TRACE_IRQS_ON already done at io_return |
689 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 598 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
690 | la %r2,SP_PTREGS(%r15) # load pt_regs | 599 | lgr %r2,%r11 # pass pointer to pt_regs |
691 | brasl %r14,do_notify_resume # call do_notify_resume | 600 | brasl %r14,do_notify_resume |
692 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 601 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
693 | TRACE_IRQS_OFF | 602 | TRACE_IRQS_OFF |
694 | j io_return | 603 | j io_return |
695 | 604 | ||
@@ -699,21 +608,24 @@ io_notify_resume: | |||
699 | ENTRY(ext_int_handler) | 608 | ENTRY(ext_int_handler) |
700 | stck __LC_INT_CLOCK | 609 | stck __LC_INT_CLOCK |
701 | stpt __LC_ASYNC_ENTER_TIMER | 610 | stpt __LC_ASYNC_ENTER_TIMER |
702 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40 | 611 | stmg %r8,%r15,__LC_SAVE_AREA_ASYNC |
703 | CREATE_STACK_FRAME __LC_SAVE_AREA+40 | 612 | lg %r10,__LC_LAST_BREAK |
704 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack | 613 | lg %r12,__LC_THREAD_INFO |
705 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 614 | larl %r13,system_call |
706 | HANDLE_SIE_INTERCEPT | 615 | lmg %r8,%r9,__LC_EXT_OLD_PSW |
707 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 616 | HANDLE_SIE_INTERCEPT %r14 |
708 | jz ext_no_vtime | 617 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT |
709 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 618 | tmhh %r8,0x0001 # interrupting from user ? |
710 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 619 | jz ext_skip |
711 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 620 | UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER |
712 | LAST_BREAK | 621 | LAST_BREAK %r14 |
713 | ext_no_vtime: | 622 | ext_skip: |
623 | stmg %r0,%r7,__PT_R0(%r11) | ||
624 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC | ||
625 | stmg %r8,%r9,__PT_PSW(%r11) | ||
714 | TRACE_IRQS_OFF | 626 | TRACE_IRQS_OFF |
715 | lghi %r1,4096 | 627 | lghi %r1,4096 |
716 | la %r2,SP_PTREGS(%r15) # address of register-save area | 628 | lgr %r2,%r11 # pass pointer to pt_regs |
717 | llgf %r3,__LC_CPU_ADDRESS # get cpu address + interruption code | 629 | llgf %r3,__LC_CPU_ADDRESS # get cpu address + interruption code |
718 | llgf %r4,__LC_EXT_PARAMS # get external parameter | 630 | llgf %r4,__LC_EXT_PARAMS # get external parameter |
719 | lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter | 631 | lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter |
@@ -730,81 +642,77 @@ ENTRY(mcck_int_handler) | |||
730 | la %r1,4095 # revalidate r1 | 642 | la %r1,4095 # revalidate r1 |
731 | spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer | 643 | spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer |
732 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs | 644 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs |
733 | stmg %r11,%r15,__LC_SAVE_AREA+80 | 645 | lg %r10,__LC_LAST_BREAK |
646 | lg %r12,__LC_THREAD_INFO | ||
734 | larl %r13,system_call | 647 | larl %r13,system_call |
735 | lg %r11,__LC_LAST_BREAK | 648 | lmg %r8,%r9,__LC_MCK_OLD_PSW |
736 | la %r12,__LC_MCK_OLD_PSW | 649 | HANDLE_SIE_INTERCEPT %r14 |
737 | tm __LC_MCCK_CODE,0x80 # system damage? | 650 | tm __LC_MCCK_CODE,0x80 # system damage? |
738 | jo mcck_int_main # yes -> rest of mcck code invalid | 651 | jo mcck_panic # yes -> rest of mcck code invalid |
739 | la %r14,4095 | 652 | lghi %r14,__LC_CPU_TIMER_SAVE_AREA |
740 | mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14) | 653 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
741 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | 654 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? |
742 | jo 1f | 655 | jo 3f |
743 | la %r14,__LC_SYNC_ENTER_TIMER | 656 | la %r14,__LC_SYNC_ENTER_TIMER |
744 | clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER | 657 | clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER |
745 | jl 0f | 658 | jl 0f |
746 | la %r14,__LC_ASYNC_ENTER_TIMER | 659 | la %r14,__LC_ASYNC_ENTER_TIMER |
747 | 0: clc 0(8,%r14),__LC_EXIT_TIMER | 660 | 0: clc 0(8,%r14),__LC_EXIT_TIMER |
748 | jl 0f | 661 | jl 1f |
749 | la %r14,__LC_EXIT_TIMER | 662 | la %r14,__LC_EXIT_TIMER |
750 | 0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER | 663 | 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER |
751 | jl 0f | 664 | jl 2f |
752 | la %r14,__LC_LAST_UPDATE_TIMER | 665 | la %r14,__LC_LAST_UPDATE_TIMER |
753 | 0: spt 0(%r14) | 666 | 2: spt 0(%r14) |
754 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | 667 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
755 | 1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | 668 | 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
756 | jno mcck_int_main # no -> skip cleanup critical | 669 | jno mcck_panic # no -> skip cleanup critical |
757 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit | 670 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT |
758 | jnz mcck_int_main # from user -> load kernel stack | 671 | tm %r8,0x0001 # interrupting from user ? |
759 | clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end) | 672 | jz mcck_skip |
760 | jhe mcck_int_main | 673 | UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER |
761 | clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start) | 674 | LAST_BREAK %r14 |
762 | jl mcck_int_main | 675 | mcck_skip: |
763 | brasl %r14,cleanup_critical | 676 | lghi %r14,__LC_GPREGS_SAVE_AREA |
764 | mcck_int_main: | 677 | mvc __PT_R0(128,%r11),0(%r14) |
765 | lg %r14,__LC_PANIC_STACK # are we already on the panic stack? | 678 | stmg %r8,%r9,__PT_PSW(%r11) |
766 | slgr %r14,%r15 | 679 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
767 | srag %r14,%r14,PAGE_SHIFT | 680 | lgr %r2,%r11 # pass pointer to pt_regs |
768 | jz 0f | ||
769 | lg %r15,__LC_PANIC_STACK # load panic stack | ||
770 | 0: aghi %r15,-SP_SIZE # make room for registers & psw | ||
771 | CREATE_STACK_FRAME __LC_SAVE_AREA+80 | ||
772 | mvc SP_PSW(16,%r15),0(%r12) | ||
773 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | ||
774 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? | ||
775 | jno mcck_no_vtime # no -> no timer update | ||
776 | HANDLE_SIE_INTERCEPT | ||
777 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
778 | jz mcck_no_vtime | ||
779 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER | ||
780 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
781 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER | ||
782 | LAST_BREAK | ||
783 | mcck_no_vtime: | ||
784 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
785 | brasl %r14,s390_do_machine_check | 681 | brasl %r14,s390_do_machine_check |
786 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 682 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
787 | jno mcck_return | 683 | jno mcck_return |
788 | lg %r1,__LC_KERNEL_STACK # switch to kernel stack | 684 | lg %r1,__LC_KERNEL_STACK # switch to kernel stack |
789 | aghi %r1,-SP_SIZE | 685 | aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
790 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 686 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
791 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 687 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) |
688 | la %r11,STACK_FRAME_OVERHEAD(%r1) | ||
792 | lgr %r15,%r1 | 689 | lgr %r15,%r1 |
793 | stosm __SF_EMPTY(%r15),0x04 # turn dat on | 690 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off |
794 | tm __TI_flags+7(%r12),_TIF_MCCK_PENDING | 691 | tm __TI_flags+7(%r12),_TIF_MCCK_PENDING |
795 | jno mcck_return | 692 | jno mcck_return |
796 | TRACE_IRQS_OFF | 693 | TRACE_IRQS_OFF |
797 | brasl %r14,s390_handle_mcck | 694 | brasl %r14,s390_handle_mcck |
798 | TRACE_IRQS_ON | 695 | TRACE_IRQS_ON |
799 | mcck_return: | 696 | mcck_return: |
800 | mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW | 697 | lg %r14,__LC_VDSO_PER_CPU |
698 | lmg %r0,%r10,__PT_R0(%r11) | ||
699 | mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW | ||
801 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit | 700 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit |
802 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 | ||
803 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | 701 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
804 | jno 0f | 702 | jno 0f |
805 | stpt __LC_EXIT_TIMER | 703 | stpt __LC_EXIT_TIMER |
806 | 0: lpswe __LC_RETURN_MCCK_PSW # back to caller | 704 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER |
807 | mcck_done: | 705 | 0: lmg %r11,%r15,__PT_R11(%r11) |
706 | lpswe __LC_RETURN_MCCK_PSW | ||
707 | |||
708 | mcck_panic: | ||
709 | lg %r14,__LC_PANIC_STACK | ||
710 | slgr %r14,%r15 | ||
711 | srag %r14,%r14,PAGE_SHIFT | ||
712 | jz 0f | ||
713 | lg %r15,__LC_PANIC_STACK | ||
714 | 0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
715 | j mcck_skip | ||
808 | 716 | ||
809 | /* | 717 | /* |
810 | * Restart interruption handler, kick starter for additional CPUs | 718 | * Restart interruption handler, kick starter for additional CPUs |
@@ -818,17 +726,18 @@ restart_base: | |||
818 | stck __LC_LAST_UPDATE_CLOCK | 726 | stck __LC_LAST_UPDATE_CLOCK |
819 | mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) | 727 | mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) |
820 | mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) | 728 | mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) |
821 | lg %r15,__LC_SAVE_AREA+120 # load ksp | 729 | lghi %r10,__LC_GPREGS_SAVE_AREA |
730 | lg %r15,120(%r10) # load ksp | ||
822 | lghi %r10,__LC_CREGS_SAVE_AREA | 731 | lghi %r10,__LC_CREGS_SAVE_AREA |
823 | lctlg %c0,%c15,0(%r10) # get new ctl regs | 732 | lctlg %c0,%c15,0(%r10) # get new ctl regs |
824 | lghi %r10,__LC_AREGS_SAVE_AREA | 733 | lghi %r10,__LC_AREGS_SAVE_AREA |
825 | lam %a0,%a15,0(%r10) | 734 | lam %a0,%a15,0(%r10) |
826 | lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone | 735 | lmg %r6,%r15,__SF_GPRS(%r15)# load registers from clone |
827 | lg %r1,__LC_THREAD_INFO | 736 | lg %r1,__LC_THREAD_INFO |
828 | mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) | 737 | mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) |
829 | mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) | 738 | mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) |
830 | xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER | 739 | xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER |
831 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on | 740 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off |
832 | brasl %r14,start_secondary | 741 | brasl %r14,start_secondary |
833 | .align 8 | 742 | .align 8 |
834 | restart_vtime: | 743 | restart_vtime: |
@@ -852,16 +761,16 @@ restart_go: | |||
852 | # PSW restart interrupt handler | 761 | # PSW restart interrupt handler |
853 | # | 762 | # |
854 | ENTRY(psw_restart_int_handler) | 763 | ENTRY(psw_restart_int_handler) |
855 | stg %r15,__LC_SAVE_AREA+120(%r0) # save r15 | 764 | stg %r15,__LC_SAVE_AREA_RESTART |
856 | larl %r15,restart_stack # load restart stack | 765 | larl %r15,restart_stack # load restart stack |
857 | lg %r15,0(%r15) | 766 | lg %r15,0(%r15) |
858 | aghi %r15,-SP_SIZE # make room for pt_regs | 767 | aghi %r15,-__PT_SIZE # create pt_regs on stack |
859 | stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack | 768 | stmg %r0,%r14,__PT_R0(%r15) |
860 | mvc SP_R15(8,%r15),__LC_SAVE_AREA+120(%r0)# store saved %r15 to stack | 769 | mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART |
861 | mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw | 770 | mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw |
862 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 | 771 | aghi %r15,-STACK_FRAME_OVERHEAD |
772 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
863 | brasl %r14,do_restart | 773 | brasl %r14,do_restart |
864 | |||
865 | larl %r14,restart_psw_crash # load disabled wait PSW if | 774 | larl %r14,restart_psw_crash # load disabled wait PSW if |
866 | lpswe 0(%r14) # do_restart returns | 775 | lpswe 0(%r14) # do_restart returns |
867 | .align 8 | 776 | .align 8 |
@@ -877,172 +786,153 @@ restart_psw_crash: | |||
877 | * Setup a pt_regs so that show_trace can provide a good call trace. | 786 | * Setup a pt_regs so that show_trace can provide a good call trace. |
878 | */ | 787 | */ |
879 | stack_overflow: | 788 | stack_overflow: |
880 | lg %r15,__LC_PANIC_STACK # change to panic stack | 789 | lg %r11,__LC_PANIC_STACK # change to panic stack |
881 | aghi %r15,-SP_SIZE | 790 | aghi %r11,-__PT_SIZE # create pt_regs |
882 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack | 791 | stmg %r0,%r7,__PT_R0(%r11) |
883 | stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack | 792 | stmg %r8,%r9,__PT_PSW(%r11) |
884 | la %r1,__LC_SAVE_AREA | 793 | mvc __PT_R8(64,%r11),0(%r14) |
885 | chi %r12,__LC_SVC_OLD_PSW | 794 | stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 |
886 | je 0f | 795 | lgr %r15,%r11 |
887 | chi %r12,__LC_PGM_OLD_PSW | 796 | aghi %r15,-STACK_FRAME_OVERHEAD |
888 | je 0f | 797 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
889 | la %r1,__LC_SAVE_AREA+40 | 798 | lgr %r2,%r11 # pass pointer to pt_regs |
890 | 0: mvc SP_R11(40,%r15),0(%r1) # move %r11-%r15 to stack | ||
891 | mvc SP_ARGS(8,%r15),__LC_LAST_BREAK | ||
892 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain | ||
893 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
894 | jg kernel_stack_overflow | 799 | jg kernel_stack_overflow |
895 | #endif | 800 | #endif |
896 | 801 | ||
897 | cleanup_table_system_call: | 802 | .align 8 |
898 | .quad system_call, sysc_do_svc | 803 | cleanup_table: |
899 | cleanup_table_sysc_tif: | 804 | .quad system_call |
900 | .quad sysc_tif, sysc_restore | 805 | .quad sysc_do_svc |
901 | cleanup_table_sysc_restore: | 806 | .quad sysc_tif |
902 | .quad sysc_restore, sysc_done | 807 | .quad sysc_restore |
903 | cleanup_table_io_tif: | 808 | .quad sysc_done |
904 | .quad io_tif, io_restore | 809 | .quad io_tif |
905 | cleanup_table_io_restore: | 810 | .quad io_restore |
906 | .quad io_restore, io_done | 811 | .quad io_done |
907 | 812 | ||
908 | cleanup_critical: | 813 | cleanup_critical: |
909 | clc 8(8,%r12),BASED(cleanup_table_system_call) | 814 | clg %r9,BASED(cleanup_table) # system_call |
910 | jl 0f | 815 | jl 0f |
911 | clc 8(8,%r12),BASED(cleanup_table_system_call+8) | 816 | clg %r9,BASED(cleanup_table+8) # sysc_do_svc |
912 | jl cleanup_system_call | 817 | jl cleanup_system_call |
913 | 0: | 818 | clg %r9,BASED(cleanup_table+16) # sysc_tif |
914 | clc 8(8,%r12),BASED(cleanup_table_sysc_tif) | ||
915 | jl 0f | 819 | jl 0f |
916 | clc 8(8,%r12),BASED(cleanup_table_sysc_tif+8) | 820 | clg %r9,BASED(cleanup_table+24) # sysc_restore |
917 | jl cleanup_sysc_tif | 821 | jl cleanup_sysc_tif |
918 | 0: | 822 | clg %r9,BASED(cleanup_table+32) # sysc_done |
919 | clc 8(8,%r12),BASED(cleanup_table_sysc_restore) | ||
920 | jl 0f | ||
921 | clc 8(8,%r12),BASED(cleanup_table_sysc_restore+8) | ||
922 | jl cleanup_sysc_restore | 823 | jl cleanup_sysc_restore |
923 | 0: | 824 | clg %r9,BASED(cleanup_table+40) # io_tif |
924 | clc 8(8,%r12),BASED(cleanup_table_io_tif) | ||
925 | jl 0f | 825 | jl 0f |
926 | clc 8(8,%r12),BASED(cleanup_table_io_tif+8) | 826 | clg %r9,BASED(cleanup_table+48) # io_restore |
927 | jl cleanup_io_tif | 827 | jl cleanup_io_tif |
928 | 0: | 828 | clg %r9,BASED(cleanup_table+56) # io_done |
929 | clc 8(8,%r12),BASED(cleanup_table_io_restore) | ||
930 | jl 0f | ||
931 | clc 8(8,%r12),BASED(cleanup_table_io_restore+8) | ||
932 | jl cleanup_io_restore | 829 | jl cleanup_io_restore |
933 | 0: | 830 | 0: br %r14 |
934 | br %r14 | 831 | |
935 | 832 | ||
936 | cleanup_system_call: | 833 | cleanup_system_call: |
937 | mvc __LC_RETURN_PSW(16),0(%r12) | 834 | # check if stpt has been executed |
938 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) | 835 | clg %r9,BASED(cleanup_system_call_insn) |
939 | jh 0f | 836 | jh 0f |
940 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER | ||
941 | cghi %r12,__LC_MCK_OLD_PSW | ||
942 | je 0f | ||
943 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | 837 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER |
944 | 0: cghi %r12,__LC_MCK_OLD_PSW | 838 | cghi %r11,__LC_SAVE_AREA_ASYNC |
945 | la %r12,__LC_SAVE_AREA+80 | ||
946 | je 0f | 839 | je 0f |
947 | la %r12,__LC_SAVE_AREA+40 | 840 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER |
948 | 0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) | 841 | 0: # check if stmg has been executed |
949 | jhe cleanup_vtime | 842 | clg %r9,BASED(cleanup_system_call_insn+8) |
950 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) | ||
951 | jh 0f | 843 | jh 0f |
952 | mvc __LC_SAVE_AREA(40),0(%r12) | 844 | mvc __LC_SAVE_AREA_SYNC(64),0(%r11) |
953 | 0: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp | 845 | 0: # check if base register setup + TIF bit load has been done |
954 | aghi %r15,-SP_SIZE # make room for registers & psw | 846 | clg %r9,BASED(cleanup_system_call_insn+16) |
955 | stg %r15,32(%r12) | 847 | jhe 0f |
956 | stg %r11,0(%r12) | 848 | # set up saved registers r10 and r12 |
957 | CREATE_STACK_FRAME __LC_SAVE_AREA | 849 | stg %r10,16(%r11) # r10 last break |
958 | mvc 8(8,%r12),__LC_THREAD_INFO | 850 | stg %r12,32(%r11) # r12 thread-info pointer |
959 | lg %r12,__LC_THREAD_INFO | 851 | 0: # check if the user time update has been done |
960 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | 852 | clg %r9,BASED(cleanup_system_call_insn+24) |
961 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | 853 | jh 0f |
962 | oi __TI_flags+7(%r12),_TIF_SYSCALL | 854 | lg %r15,__LC_EXIT_TIMER |
963 | cleanup_vtime: | 855 | slg %r15,__LC_SYNC_ENTER_TIMER |
964 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) | 856 | alg %r15,__LC_USER_TIMER |
965 | jhe cleanup_stime | 857 | stg %r15,__LC_USER_TIMER |
966 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 858 | 0: # check if the system time update has been done |
967 | cleanup_stime: | 859 | clg %r9,BASED(cleanup_system_call_insn+32) |
968 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32) | 860 | jh 0f |
969 | jh cleanup_update | 861 | lg %r15,__LC_LAST_UPDATE_TIMER |
970 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 862 | slg %r15,__LC_EXIT_TIMER |
971 | cleanup_update: | 863 | alg %r15,__LC_SYSTEM_TIMER |
864 | stg %r15,__LC_SYSTEM_TIMER | ||
865 | 0: # update accounting time stamp | ||
972 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 866 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
973 | srag %r12,%r11,23 | 867 | # do LAST_BREAK |
974 | lg %r12,__LC_THREAD_INFO | 868 | lg %r9,16(%r11) |
869 | srag %r9,%r9,23 | ||
975 | jz 0f | 870 | jz 0f |
976 | stg %r11,__TI_last_break(%r12) | 871 | mvc __TI_last_break(8,%r12),16(%r11) |
977 | 0: mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) | 872 | 0: # set up saved register r11 |
978 | la %r12,__LC_RETURN_PSW | 873 | lg %r15,__LC_KERNEL_STACK |
874 | aghi %r15,-__PT_SIZE | ||
875 | stg %r15,24(%r11) # r11 pt_regs pointer | ||
876 | # fill pt_regs | ||
877 | mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC | ||
878 | stmg %r0,%r7,__PT_R0(%r15) | ||
879 | mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
880 | mvc __PT_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
881 | # setup saved register r15 | ||
882 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
883 | stg %r15,56(%r11) # r15 stack pointer | ||
884 | # set new psw address and exit | ||
885 | larl %r9,sysc_do_svc | ||
979 | br %r14 | 886 | br %r14 |
980 | cleanup_system_call_insn: | 887 | cleanup_system_call_insn: |
981 | .quad sysc_saveall | ||
982 | .quad system_call | 888 | .quad system_call |
983 | .quad sysc_vtime | 889 | .quad sysc_stmg |
984 | .quad sysc_stime | 890 | .quad sysc_per |
985 | .quad sysc_update | 891 | .quad sysc_vtime+18 |
892 | .quad sysc_vtime+42 | ||
986 | 893 | ||
987 | cleanup_sysc_tif: | 894 | cleanup_sysc_tif: |
988 | mvc __LC_RETURN_PSW(8),0(%r12) | 895 | larl %r9,sysc_tif |
989 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_tif) | ||
990 | la %r12,__LC_RETURN_PSW | ||
991 | br %r14 | 896 | br %r14 |
992 | 897 | ||
993 | cleanup_sysc_restore: | 898 | cleanup_sysc_restore: |
994 | clc 8(8,%r12),BASED(cleanup_sysc_restore_insn) | 899 | clg %r9,BASED(cleanup_sysc_restore_insn) |
995 | je 2f | ||
996 | clc 8(8,%r12),BASED(cleanup_sysc_restore_insn+8) | ||
997 | jhe 0f | ||
998 | mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER | ||
999 | cghi %r12,__LC_MCK_OLD_PSW | ||
1000 | je 0f | 900 | je 0f |
1001 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | 901 | lg %r9,24(%r11) # get saved pointer to pt_regs |
1002 | 0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) | 902 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) |
1003 | cghi %r12,__LC_MCK_OLD_PSW | 903 | mvc 0(64,%r11),__PT_R8(%r9) |
1004 | la %r12,__LC_SAVE_AREA+80 | 904 | lmg %r0,%r7,__PT_R0(%r9) |
1005 | je 1f | 905 | 0: lmg %r8,%r9,__LC_RETURN_PSW |
1006 | la %r12,__LC_SAVE_AREA+40 | ||
1007 | 1: mvc 0(40,%r12),SP_R11(%r15) | ||
1008 | lmg %r0,%r10,SP_R0(%r15) | ||
1009 | lg %r15,SP_R15(%r15) | ||
1010 | 2: la %r12,__LC_RETURN_PSW | ||
1011 | br %r14 | 906 | br %r14 |
1012 | cleanup_sysc_restore_insn: | 907 | cleanup_sysc_restore_insn: |
1013 | .quad sysc_done - 4 | 908 | .quad sysc_done - 4 |
1014 | .quad sysc_done - 16 | ||
1015 | 909 | ||
1016 | cleanup_io_tif: | 910 | cleanup_io_tif: |
1017 | mvc __LC_RETURN_PSW(8),0(%r12) | 911 | larl %r9,io_tif |
1018 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_tif) | ||
1019 | la %r12,__LC_RETURN_PSW | ||
1020 | br %r14 | 912 | br %r14 |
1021 | 913 | ||
1022 | cleanup_io_restore: | 914 | cleanup_io_restore: |
1023 | clc 8(8,%r12),BASED(cleanup_io_restore_insn) | 915 | clg %r9,BASED(cleanup_io_restore_insn) |
1024 | je 1f | 916 | je 0f |
1025 | clc 8(8,%r12),BASED(cleanup_io_restore_insn+8) | 917 | lg %r9,24(%r11) # get saved r11 pointer to pt_regs |
1026 | jhe 0f | 918 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) |
1027 | mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER | 919 | ni __LC_RETURN_PSW+1,0xfd # clear wait state bit |
1028 | 0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) | 920 | mvc 0(64,%r11),__PT_R8(%r9) |
1029 | mvc __LC_SAVE_AREA+80(40),SP_R11(%r15) | 921 | lmg %r0,%r7,__PT_R0(%r9) |
1030 | lmg %r0,%r10,SP_R0(%r15) | 922 | 0: lmg %r8,%r9,__LC_RETURN_PSW |
1031 | lg %r15,SP_R15(%r15) | ||
1032 | 1: la %r12,__LC_RETURN_PSW | ||
1033 | br %r14 | 923 | br %r14 |
1034 | cleanup_io_restore_insn: | 924 | cleanup_io_restore_insn: |
1035 | .quad io_done - 4 | 925 | .quad io_done - 4 |
1036 | .quad io_done - 16 | ||
1037 | 926 | ||
1038 | /* | 927 | /* |
1039 | * Integer constants | 928 | * Integer constants |
1040 | */ | 929 | */ |
1041 | .align 4 | 930 | .align 8 |
1042 | .Lcritical_start: | 931 | .Lcritical_start: |
1043 | .quad __critical_start | 932 | .quad __critical_start |
1044 | .Lcritical_end: | 933 | .Lcritical_length: |
1045 | .quad __critical_end | 934 | .quad __critical_end - __critical_start |
935 | |||
1046 | 936 | ||
1047 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | 937 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) |
1048 | /* | 938 | /* |
@@ -1094,8 +984,8 @@ sie_fault: | |||
1094 | .align 8 | 984 | .align 8 |
1095 | .Lsie_loop: | 985 | .Lsie_loop: |
1096 | .quad sie_loop | 986 | .quad sie_loop |
1097 | .Lsie_done: | 987 | .Lsie_length: |
1098 | .quad sie_done | 988 | .quad sie_done - sie_loop |
1099 | .Lhost_id: | 989 | .Lhost_id: |
1100 | .quad 0 | 990 | .quad 0 |
1101 | 991 | ||
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 900068d2bf92..c27a0727f930 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -329,8 +329,8 @@ iplstart: | |||
329 | # | 329 | # |
330 | # reset files in VM reader | 330 | # reset files in VM reader |
331 | # | 331 | # |
332 | stidp __LC_SAVE_AREA # store cpuid | 332 | stidp __LC_SAVE_AREA_SYNC # store cpuid |
333 | tm __LC_SAVE_AREA,0xff # running VM ? | 333 | tm __LC_SAVE_AREA_SYNC,0xff# running VM ? |
334 | bno .Lnoreset | 334 | bno .Lnoreset |
335 | la %r2,.Lreset | 335 | la %r2,.Lreset |
336 | lhi %r3,26 | 336 | lhi %r3,26 |
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index 732a793ec53a..36b32658fb24 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S | |||
@@ -17,11 +17,11 @@ | |||
17 | # | 17 | # |
18 | ENTRY(store_status) | 18 | ENTRY(store_status) |
19 | /* Save register one and load save area base */ | 19 | /* Save register one and load save area base */ |
20 | stg %r1,__LC_SAVE_AREA+120(%r0) | 20 | stg %r1,__LC_SAVE_AREA_RESTART |
21 | lghi %r1,SAVE_AREA_BASE | 21 | lghi %r1,SAVE_AREA_BASE |
22 | /* General purpose registers */ | 22 | /* General purpose registers */ |
23 | stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | 23 | stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) |
24 | lg %r2,__LC_SAVE_AREA+120(%r0) | 24 | lg %r2,__LC_SAVE_AREA_RESTART |
25 | stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) | 25 | stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) |
26 | /* Control registers */ | 26 | /* Control registers */ |
27 | stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | 27 | stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 9cf01e455e50..109e7422bb20 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -654,7 +654,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
654 | - sizeof(struct stack_frame)); | 654 | - sizeof(struct stack_frame)); |
655 | memset(sf, 0, sizeof(struct stack_frame)); | 655 | memset(sf, 0, sizeof(struct stack_frame)); |
656 | sf->gprs[9] = (unsigned long) sf; | 656 | sf->gprs[9] = (unsigned long) sf; |
657 | cpu_lowcore->save_area[15] = (unsigned long) sf; | 657 | cpu_lowcore->gpregs_save_area[15] = (unsigned long) sf; |
658 | __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); | 658 | __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); |
659 | atomic_inc(&init_mm.context.attach_count); | 659 | atomic_inc(&init_mm.context.attach_count); |
660 | asm volatile( | 660 | asm volatile( |