diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-19 14:35:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-19 14:35:30 -0400 |
commit | ba0234ec35127fe21d373db53cbaf9fe20620cb6 (patch) | |
tree | a2cbef204482512ae9e723f2bf4d22051975ef45 /arch/s390/kernel | |
parent | 537b60d17894b7c19a6060feae40299d7109d6e7 (diff) | |
parent | 939e379e9e183ae6291ac7caa4a5e1dfadae4ccc (diff) |
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (24 commits)
[S390] drivers/s390/char: Use kmemdup
[S390] drivers/s390/char: Use kstrdup
[S390] debug: enable exception-trace debug facility
[S390] s390_hypfs: Add new attributes
[S390] qdio: remove API wrappers
[S390] qdio: set correct bit in dsci
[S390] qdio: dont convert timestamps to microseconds
[S390] qdio: remove memset hack
[S390] qdio: prevent starvation on PCI devices
[S390] qdio: count number of qdio interrupts
[S390] user space fault: report fault before calling do_exit
[S390] topology: expose core identifier
[S390] dasd: remove uid from devmap
[S390] dasd: add dynamic pav toleration
[S390] vdso: add missing vdso_install target
[S390] vdso: remove redundant check for CONFIG_64BIT
[S390] avoid default_llseek in s390 drivers
[S390] vmcp: disallow modular build
[S390] add breaking event address for user space
[S390] virtualization aware cpu measurement
...
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/asm-offsets.c | 6 | ||||
-rw-r--r-- | arch/s390/kernel/debug.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/entry.S | 324 | ||||
-rw-r--r-- | arch/s390/kernel/entry64.S | 617 | ||||
-rw-r--r-- | arch/s390/kernel/head.S | 4 | ||||
-rw-r--r-- | arch/s390/kernel/nmi.c | 3 | ||||
-rw-r--r-- | arch/s390/kernel/processor.c | 37 | ||||
-rw-r--r-- | arch/s390/kernel/ptrace.c | 68 | ||||
-rw-r--r-- | arch/s390/kernel/s390_ext.c | 3 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 27 | ||||
-rw-r--r-- | arch/s390/kernel/signal.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/topology.c | 7 | ||||
-rw-r--r-- | arch/s390/kernel/traps.c | 31 | ||||
-rw-r--r-- | arch/s390/kernel/vdso.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/vtime.c | 15 |
16 files changed, 580 insertions, 573 deletions
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index a09408952ed0..d9b490a2716e 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -39,6 +39,7 @@ int main(void) | |||
39 | DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); | 39 | DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); |
40 | DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); | 40 | DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); |
41 | DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer)); | 41 | DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer)); |
42 | DEFINE(__TI_last_break, offsetof(struct thread_info, last_break)); | ||
42 | BLANK(); | 43 | BLANK(); |
43 | DEFINE(__PT_ARGS, offsetof(struct pt_regs, args)); | 44 | DEFINE(__PT_ARGS, offsetof(struct pt_regs, args)); |
44 | DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); | 45 | DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); |
@@ -112,6 +113,7 @@ int main(void) | |||
112 | DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); | 113 | DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); |
113 | DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); | 114 | DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); |
114 | DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer)); | 115 | DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer)); |
116 | DEFINE(__LC_MCCK_ENTER_TIMER, offsetof(struct _lowcore, mcck_enter_timer)); | ||
115 | DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer)); | 117 | DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer)); |
116 | DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer)); | 118 | DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer)); |
117 | DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer)); | 119 | DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer)); |
@@ -126,10 +128,12 @@ int main(void) | |||
126 | DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce)); | 128 | DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce)); |
127 | DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); | 129 | DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); |
128 | DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce)); | 130 | DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce)); |
129 | DEFINE(__LC_CPUID, offsetof(struct _lowcore, cpu_id)); | ||
130 | DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); | 131 | DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); |
132 | DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); | ||
131 | DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); | 133 | DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); |
132 | DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); | 134 | DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); |
135 | DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook)); | ||
136 | DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp)); | ||
133 | DEFINE(__LC_IRB, offsetof(struct _lowcore, irb)); | 137 | DEFINE(__LC_IRB, offsetof(struct _lowcore, irb)); |
134 | DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); | 138 | DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); |
135 | DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); | 139 | DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); |
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 0168472b2fdf..98192261491d 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c | |||
@@ -655,6 +655,7 @@ found: | |||
655 | p_info->act_entry_offset = 0; | 655 | p_info->act_entry_offset = 0; |
656 | file->private_data = p_info; | 656 | file->private_data = p_info; |
657 | debug_info_get(debug_info); | 657 | debug_info_get(debug_info); |
658 | nonseekable_open(inode, file); | ||
658 | out: | 659 | out: |
659 | mutex_unlock(&debug_mutex); | 660 | mutex_unlock(&debug_mutex); |
660 | return rc; | 661 | return rc; |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 2d92c2cf92d7..c00856ad4e5a 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -356,6 +356,7 @@ static __init void detect_machine_facilities(void) | |||
356 | { | 356 | { |
357 | #ifdef CONFIG_64BIT | 357 | #ifdef CONFIG_64BIT |
358 | unsigned int facilities; | 358 | unsigned int facilities; |
359 | unsigned long long facility_bits; | ||
359 | 360 | ||
360 | facilities = stfl(); | 361 | facilities = stfl(); |
361 | if (facilities & (1 << 28)) | 362 | if (facilities & (1 << 28)) |
@@ -364,6 +365,9 @@ static __init void detect_machine_facilities(void) | |||
364 | S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF; | 365 | S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF; |
365 | if (facilities & (1 << 4)) | 366 | if (facilities & (1 << 4)) |
366 | S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; | 367 | S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; |
368 | if ((stfle(&facility_bits, 1) > 0) && | ||
369 | (facility_bits & (1ULL << (63 - 40)))) | ||
370 | S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; | ||
367 | #endif | 371 | #endif |
368 | } | 372 | } |
369 | 373 | ||
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 6af7045280a8..d5e3e6007447 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -73,21 +73,24 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
73 | basr %r14,%r1 | 73 | basr %r14,%r1 |
74 | .endm | 74 | .endm |
75 | 75 | ||
76 | .macro TRACE_IRQS_CHECK | 76 | .macro TRACE_IRQS_CHECK_ON |
77 | basr %r2,%r0 | ||
78 | tm SP_PSW(%r15),0x03 # irqs enabled? | 77 | tm SP_PSW(%r15),0x03 # irqs enabled? |
79 | jz 0f | 78 | bz BASED(0f) |
80 | l %r1,BASED(.Ltrace_irq_on_caller) | 79 | TRACE_IRQS_ON |
81 | basr %r14,%r1 | 80 | 0: |
82 | j 1f | 81 | .endm |
83 | 0: l %r1,BASED(.Ltrace_irq_off_caller) | 82 | |
84 | basr %r14,%r1 | 83 | .macro TRACE_IRQS_CHECK_OFF |
85 | 1: | 84 | tm SP_PSW(%r15),0x03 # irqs enabled? |
85 | bz BASED(0f) | ||
86 | TRACE_IRQS_OFF | ||
87 | 0: | ||
86 | .endm | 88 | .endm |
87 | #else | 89 | #else |
88 | #define TRACE_IRQS_ON | 90 | #define TRACE_IRQS_ON |
89 | #define TRACE_IRQS_OFF | 91 | #define TRACE_IRQS_OFF |
90 | #define TRACE_IRQS_CHECK | 92 | #define TRACE_IRQS_CHECK_ON |
93 | #define TRACE_IRQS_CHECK_OFF | ||
91 | #endif | 94 | #endif |
92 | 95 | ||
93 | #ifdef CONFIG_LOCKDEP | 96 | #ifdef CONFIG_LOCKDEP |
@@ -177,9 +180,9 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
177 | s %r15,BASED(.Lc_spsize) # make room for registers & psw | 180 | s %r15,BASED(.Lc_spsize) # make room for registers & psw |
178 | mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack | 181 | mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack |
179 | st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 | 182 | st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 |
180 | icm %r12,3,__LC_SVC_ILC | 183 | icm %r12,12,__LC_SVC_ILC |
181 | stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | 184 | stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack |
182 | st %r12,SP_SVCNR(%r15) | 185 | st %r12,SP_ILC(%r15) |
183 | mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack | 186 | mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack |
184 | la %r12,0 | 187 | la %r12,0 |
185 | st %r12,__SF_BACKCHAIN(%r15) # clear back chain | 188 | st %r12,__SF_BACKCHAIN(%r15) # clear back chain |
@@ -273,66 +276,45 @@ sysc_do_restart: | |||
273 | st %r2,SP_R2(%r15) # store return value (change R2 on stack) | 276 | st %r2,SP_R2(%r15) # store return value (change R2 on stack) |
274 | 277 | ||
275 | sysc_return: | 278 | sysc_return: |
279 | LOCKDEP_SYS_EXIT | ||
280 | sysc_tif: | ||
276 | tm __TI_flags+3(%r9),_TIF_WORK_SVC | 281 | tm __TI_flags+3(%r9),_TIF_WORK_SVC |
277 | bnz BASED(sysc_work) # there is work to do (signals etc.) | 282 | bnz BASED(sysc_work) # there is work to do (signals etc.) |
278 | sysc_restore: | 283 | sysc_restore: |
279 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
280 | la %r1,BASED(sysc_restore_trace_psw_addr) | ||
281 | l %r1,0(%r1) | ||
282 | lpsw 0(%r1) | ||
283 | sysc_restore_trace: | ||
284 | TRACE_IRQS_CHECK | ||
285 | LOCKDEP_SYS_EXIT | ||
286 | #endif | ||
287 | sysc_leave: | ||
288 | RESTORE_ALL __LC_RETURN_PSW,1 | 284 | RESTORE_ALL __LC_RETURN_PSW,1 |
289 | sysc_done: | 285 | sysc_done: |
290 | 286 | ||
291 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
292 | sysc_restore_trace_psw_addr: | ||
293 | .long sysc_restore_trace_psw | ||
294 | |||
295 | .section .data,"aw",@progbits | ||
296 | .align 8 | ||
297 | .globl sysc_restore_trace_psw | ||
298 | sysc_restore_trace_psw: | ||
299 | .long 0, sysc_restore_trace + 0x80000000 | ||
300 | .previous | ||
301 | #endif | ||
302 | |||
303 | # | ||
304 | # recheck if there is more work to do | ||
305 | # | 287 | # |
306 | sysc_work_loop: | 288 | # There is work to do, but first we need to check if we return to userspace. |
307 | tm __TI_flags+3(%r9),_TIF_WORK_SVC | ||
308 | bz BASED(sysc_restore) # there is no work to do | ||
309 | # | ||
310 | # One of the work bits is on. Find out which one. | ||
311 | # | 289 | # |
312 | sysc_work: | 290 | sysc_work: |
313 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 291 | tm SP_PSW+1(%r15),0x01 # returning to user ? |
314 | bno BASED(sysc_restore) | 292 | bno BASED(sysc_restore) |
293 | |||
294 | # | ||
295 | # One of the work bits is on. Find out which one. | ||
296 | # | ||
297 | sysc_work_tif: | ||
315 | tm __TI_flags+3(%r9),_TIF_MCCK_PENDING | 298 | tm __TI_flags+3(%r9),_TIF_MCCK_PENDING |
316 | bo BASED(sysc_mcck_pending) | 299 | bo BASED(sysc_mcck_pending) |
317 | tm __TI_flags+3(%r9),_TIF_NEED_RESCHED | 300 | tm __TI_flags+3(%r9),_TIF_NEED_RESCHED |
318 | bo BASED(sysc_reschedule) | 301 | bo BASED(sysc_reschedule) |
319 | tm __TI_flags+3(%r9),_TIF_SIGPENDING | 302 | tm __TI_flags+3(%r9),_TIF_SIGPENDING |
320 | bnz BASED(sysc_sigpending) | 303 | bo BASED(sysc_sigpending) |
321 | tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME | 304 | tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME |
322 | bnz BASED(sysc_notify_resume) | 305 | bo BASED(sysc_notify_resume) |
323 | tm __TI_flags+3(%r9),_TIF_RESTART_SVC | 306 | tm __TI_flags+3(%r9),_TIF_RESTART_SVC |
324 | bo BASED(sysc_restart) | 307 | bo BASED(sysc_restart) |
325 | tm __TI_flags+3(%r9),_TIF_SINGLE_STEP | 308 | tm __TI_flags+3(%r9),_TIF_SINGLE_STEP |
326 | bo BASED(sysc_singlestep) | 309 | bo BASED(sysc_singlestep) |
327 | b BASED(sysc_restore) | 310 | b BASED(sysc_return) # beware of critical section cleanup |
328 | sysc_work_done: | ||
329 | 311 | ||
330 | # | 312 | # |
331 | # _TIF_NEED_RESCHED is set, call schedule | 313 | # _TIF_NEED_RESCHED is set, call schedule |
332 | # | 314 | # |
333 | sysc_reschedule: | 315 | sysc_reschedule: |
334 | l %r1,BASED(.Lschedule) | 316 | l %r1,BASED(.Lschedule) |
335 | la %r14,BASED(sysc_work_loop) | 317 | la %r14,BASED(sysc_return) |
336 | br %r1 # call scheduler | 318 | br %r1 # call scheduler |
337 | 319 | ||
338 | # | 320 | # |
@@ -340,7 +322,7 @@ sysc_reschedule: | |||
340 | # | 322 | # |
341 | sysc_mcck_pending: | 323 | sysc_mcck_pending: |
342 | l %r1,BASED(.Ls390_handle_mcck) | 324 | l %r1,BASED(.Ls390_handle_mcck) |
343 | la %r14,BASED(sysc_work_loop) | 325 | la %r14,BASED(sysc_return) |
344 | br %r1 # TIF bit will be cleared by handler | 326 | br %r1 # TIF bit will be cleared by handler |
345 | 327 | ||
346 | # | 328 | # |
@@ -355,7 +337,7 @@ sysc_sigpending: | |||
355 | bo BASED(sysc_restart) | 337 | bo BASED(sysc_restart) |
356 | tm __TI_flags+3(%r9),_TIF_SINGLE_STEP | 338 | tm __TI_flags+3(%r9),_TIF_SINGLE_STEP |
357 | bo BASED(sysc_singlestep) | 339 | bo BASED(sysc_singlestep) |
358 | b BASED(sysc_work_loop) | 340 | b BASED(sysc_return) |
359 | 341 | ||
360 | # | 342 | # |
361 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | 343 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume |
@@ -363,7 +345,7 @@ sysc_sigpending: | |||
363 | sysc_notify_resume: | 345 | sysc_notify_resume: |
364 | la %r2,SP_PTREGS(%r15) # load pt_regs | 346 | la %r2,SP_PTREGS(%r15) # load pt_regs |
365 | l %r1,BASED(.Ldo_notify_resume) | 347 | l %r1,BASED(.Ldo_notify_resume) |
366 | la %r14,BASED(sysc_work_loop) | 348 | la %r14,BASED(sysc_return) |
367 | br %r1 # call do_notify_resume | 349 | br %r1 # call do_notify_resume |
368 | 350 | ||
369 | 351 | ||
@@ -458,11 +440,13 @@ kernel_execve: | |||
458 | br %r14 | 440 | br %r14 |
459 | # execve succeeded. | 441 | # execve succeeded. |
460 | 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts | 442 | 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts |
443 | TRACE_IRQS_OFF | ||
461 | l %r15,__LC_KERNEL_STACK # load ksp | 444 | l %r15,__LC_KERNEL_STACK # load ksp |
462 | s %r15,BASED(.Lc_spsize) # make room for registers & psw | 445 | s %r15,BASED(.Lc_spsize) # make room for registers & psw |
463 | l %r9,__LC_THREAD_INFO | 446 | l %r9,__LC_THREAD_INFO |
464 | mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs | 447 | mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs |
465 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 448 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
449 | TRACE_IRQS_ON | ||
466 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 450 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
467 | l %r1,BASED(.Lexecve_tail) | 451 | l %r1,BASED(.Lexecve_tail) |
468 | basr %r14,%r1 | 452 | basr %r14,%r1 |
@@ -499,8 +483,8 @@ pgm_check_handler: | |||
499 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 483 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
500 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 484 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
501 | pgm_no_vtime: | 485 | pgm_no_vtime: |
486 | TRACE_IRQS_CHECK_OFF | ||
502 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 487 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
503 | TRACE_IRQS_OFF | ||
504 | l %r3,__LC_PGM_ILC # load program interruption code | 488 | l %r3,__LC_PGM_ILC # load program interruption code |
505 | la %r8,0x7f | 489 | la %r8,0x7f |
506 | nr %r8,%r3 | 490 | nr %r8,%r3 |
@@ -509,8 +493,10 @@ pgm_do_call: | |||
509 | sll %r8,2 | 493 | sll %r8,2 |
510 | l %r7,0(%r8,%r7) # load address of handler routine | 494 | l %r7,0(%r8,%r7) # load address of handler routine |
511 | la %r2,SP_PTREGS(%r15) # address of register-save area | 495 | la %r2,SP_PTREGS(%r15) # address of register-save area |
512 | la %r14,BASED(sysc_return) | 496 | basr %r14,%r7 # branch to interrupt-handler |
513 | br %r7 # branch to interrupt-handler | 497 | pgm_exit: |
498 | TRACE_IRQS_CHECK_ON | ||
499 | b BASED(sysc_return) | ||
514 | 500 | ||
515 | # | 501 | # |
516 | # handle per exception | 502 | # handle per exception |
@@ -537,19 +523,19 @@ pgm_per_std: | |||
537 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 523 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
538 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 524 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
539 | pgm_no_vtime2: | 525 | pgm_no_vtime2: |
526 | TRACE_IRQS_CHECK_OFF | ||
540 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 527 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
541 | TRACE_IRQS_OFF | ||
542 | l %r1,__TI_task(%r9) | 528 | l %r1,__TI_task(%r9) |
529 | tm SP_PSW+1(%r15),0x01 # kernel per event ? | ||
530 | bz BASED(kernel_per) | ||
543 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID | 531 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID |
544 | mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS | 532 | mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS |
545 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID | 533 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID |
546 | oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | 534 | oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP |
547 | tm SP_PSW+1(%r15),0x01 # kernel per event ? | ||
548 | bz BASED(kernel_per) | ||
549 | l %r3,__LC_PGM_ILC # load program interruption code | 535 | l %r3,__LC_PGM_ILC # load program interruption code |
550 | la %r8,0x7f | 536 | la %r8,0x7f |
551 | nr %r8,%r3 # clear per-event-bit and ilc | 537 | nr %r8,%r3 # clear per-event-bit and ilc |
552 | be BASED(sysc_return) # only per or per+check ? | 538 | be BASED(pgm_exit) # only per or per+check ? |
553 | b BASED(pgm_do_call) | 539 | b BASED(pgm_do_call) |
554 | 540 | ||
555 | # | 541 | # |
@@ -570,8 +556,8 @@ pgm_svcper: | |||
570 | mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID | 556 | mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID |
571 | oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | 557 | oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP |
572 | TRACE_IRQS_ON | 558 | TRACE_IRQS_ON |
573 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | ||
574 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 559 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
560 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | ||
575 | b BASED(sysc_do_svc) | 561 | b BASED(sysc_do_svc) |
576 | 562 | ||
577 | # | 563 | # |
@@ -582,8 +568,8 @@ kernel_per: | |||
582 | mvi SP_SVCNR+1(%r15),0xff | 568 | mvi SP_SVCNR+1(%r15),0xff |
583 | la %r2,SP_PTREGS(%r15) # address of register-save area | 569 | la %r2,SP_PTREGS(%r15) # address of register-save area |
584 | l %r1,BASED(.Lhandle_per) # load adr. of per handler | 570 | l %r1,BASED(.Lhandle_per) # load adr. of per handler |
585 | la %r14,BASED(sysc_restore)# load adr. of system return | 571 | basr %r14,%r1 # branch to do_single_step |
586 | br %r1 # branch to do_single_step | 572 | b BASED(pgm_exit) |
587 | 573 | ||
588 | /* | 574 | /* |
589 | * IO interrupt handler routine | 575 | * IO interrupt handler routine |
@@ -602,134 +588,126 @@ io_int_handler: | |||
602 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 588 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
603 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 589 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER |
604 | io_no_vtime: | 590 | io_no_vtime: |
605 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
606 | TRACE_IRQS_OFF | 591 | TRACE_IRQS_OFF |
592 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
607 | l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ | 593 | l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ |
608 | la %r2,SP_PTREGS(%r15) # address of register-save area | 594 | la %r2,SP_PTREGS(%r15) # address of register-save area |
609 | basr %r14,%r1 # branch to standard irq handler | 595 | basr %r14,%r1 # branch to standard irq handler |
610 | io_return: | 596 | io_return: |
597 | LOCKDEP_SYS_EXIT | ||
598 | TRACE_IRQS_ON | ||
599 | io_tif: | ||
611 | tm __TI_flags+3(%r9),_TIF_WORK_INT | 600 | tm __TI_flags+3(%r9),_TIF_WORK_INT |
612 | bnz BASED(io_work) # there is work to do (signals etc.) | 601 | bnz BASED(io_work) # there is work to do (signals etc.) |
613 | io_restore: | 602 | io_restore: |
614 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
615 | la %r1,BASED(io_restore_trace_psw_addr) | ||
616 | l %r1,0(%r1) | ||
617 | lpsw 0(%r1) | ||
618 | io_restore_trace: | ||
619 | TRACE_IRQS_CHECK | ||
620 | LOCKDEP_SYS_EXIT | ||
621 | #endif | ||
622 | io_leave: | ||
623 | RESTORE_ALL __LC_RETURN_PSW,0 | 603 | RESTORE_ALL __LC_RETURN_PSW,0 |
624 | io_done: | 604 | io_done: |
625 | 605 | ||
626 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
627 | io_restore_trace_psw_addr: | ||
628 | .long io_restore_trace_psw | ||
629 | |||
630 | .section .data,"aw",@progbits | ||
631 | .align 8 | ||
632 | .globl io_restore_trace_psw | ||
633 | io_restore_trace_psw: | ||
634 | .long 0, io_restore_trace + 0x80000000 | ||
635 | .previous | ||
636 | #endif | ||
637 | |||
638 | # | 606 | # |
639 | # switch to kernel stack, then check the TIF bits | 607 | # There is work todo, find out in which context we have been interrupted: |
608 | # 1) if we return to user space we can do all _TIF_WORK_INT work | ||
609 | # 2) if we return to kernel code and preemptive scheduling is enabled check | ||
610 | # the preemption counter and if it is zero call preempt_schedule_irq | ||
611 | # Before any work can be done, a switch to the kernel stack is required. | ||
640 | # | 612 | # |
641 | io_work: | 613 | io_work: |
642 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 614 | tm SP_PSW+1(%r15),0x01 # returning to user ? |
643 | #ifndef CONFIG_PREEMPT | 615 | bo BASED(io_work_user) # yes -> do resched & signal |
644 | bno BASED(io_restore) # no-> skip resched & signal | 616 | #ifdef CONFIG_PREEMPT |
645 | #else | ||
646 | bnz BASED(io_work_user) # no -> check for preemptive scheduling | ||
647 | # check for preemptive scheduling | 617 | # check for preemptive scheduling |
648 | icm %r0,15,__TI_precount(%r9) | 618 | icm %r0,15,__TI_precount(%r9) |
649 | bnz BASED(io_restore) # preemption disabled | 619 | bnz BASED(io_restore) # preemption disabled |
620 | tm __TI_flags+3(%r9),_TIF_NEED_RESCHED | ||
621 | bno BASED(io_restore) | ||
622 | # switch to kernel stack | ||
650 | l %r1,SP_R15(%r15) | 623 | l %r1,SP_R15(%r15) |
651 | s %r1,BASED(.Lc_spsize) | 624 | s %r1,BASED(.Lc_spsize) |
652 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 625 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) |
653 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 626 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain |
654 | lr %r15,%r1 | 627 | lr %r15,%r1 |
655 | io_resume_loop: | 628 | # TRACE_IRQS_ON already done at io_return, call |
656 | tm __TI_flags+3(%r9),_TIF_NEED_RESCHED | 629 | # TRACE_IRQS_OFF to keep things symmetrical |
657 | bno BASED(io_restore) | 630 | TRACE_IRQS_OFF |
658 | l %r1,BASED(.Lpreempt_schedule_irq) | 631 | l %r1,BASED(.Lpreempt_schedule_irq) |
659 | la %r14,BASED(io_resume_loop) | 632 | basr %r14,%r1 # call preempt_schedule_irq |
660 | br %r1 # call schedule | 633 | b BASED(io_return) |
634 | #else | ||
635 | b BASED(io_restore) | ||
661 | #endif | 636 | #endif |
662 | 637 | ||
638 | # | ||
639 | # Need to do work before returning to userspace, switch to kernel stack | ||
640 | # | ||
663 | io_work_user: | 641 | io_work_user: |
664 | l %r1,__LC_KERNEL_STACK | 642 | l %r1,__LC_KERNEL_STACK |
665 | s %r1,BASED(.Lc_spsize) | 643 | s %r1,BASED(.Lc_spsize) |
666 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 644 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) |
667 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 645 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain |
668 | lr %r15,%r1 | 646 | lr %r15,%r1 |
647 | |||
669 | # | 648 | # |
670 | # One of the work bits is on. Find out which one. | 649 | # One of the work bits is on. Find out which one. |
671 | # Checked are: _TIF_SIGPENDING, _TIF_NEED_RESCHED | 650 | # Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED |
672 | # and _TIF_MCCK_PENDING | 651 | # and _TIF_MCCK_PENDING |
673 | # | 652 | # |
674 | io_work_loop: | 653 | io_work_tif: |
675 | tm __TI_flags+3(%r9),_TIF_MCCK_PENDING | 654 | tm __TI_flags+3(%r9),_TIF_MCCK_PENDING |
676 | bo BASED(io_mcck_pending) | 655 | bo BASED(io_mcck_pending) |
677 | tm __TI_flags+3(%r9),_TIF_NEED_RESCHED | 656 | tm __TI_flags+3(%r9),_TIF_NEED_RESCHED |
678 | bo BASED(io_reschedule) | 657 | bo BASED(io_reschedule) |
679 | tm __TI_flags+3(%r9),_TIF_SIGPENDING | 658 | tm __TI_flags+3(%r9),_TIF_SIGPENDING |
680 | bnz BASED(io_sigpending) | 659 | bo BASED(io_sigpending) |
681 | tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME | 660 | tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME |
682 | bnz BASED(io_notify_resume) | 661 | bo BASED(io_notify_resume) |
683 | b BASED(io_restore) | 662 | b BASED(io_return) # beware of critical section cleanup |
684 | io_work_done: | ||
685 | 663 | ||
686 | # | 664 | # |
687 | # _TIF_MCCK_PENDING is set, call handler | 665 | # _TIF_MCCK_PENDING is set, call handler |
688 | # | 666 | # |
689 | io_mcck_pending: | 667 | io_mcck_pending: |
668 | # TRACE_IRQS_ON already done at io_return | ||
690 | l %r1,BASED(.Ls390_handle_mcck) | 669 | l %r1,BASED(.Ls390_handle_mcck) |
691 | basr %r14,%r1 # TIF bit will be cleared by handler | 670 | basr %r14,%r1 # TIF bit will be cleared by handler |
692 | b BASED(io_work_loop) | 671 | TRACE_IRQS_OFF |
672 | b BASED(io_return) | ||
693 | 673 | ||
694 | # | 674 | # |
695 | # _TIF_NEED_RESCHED is set, call schedule | 675 | # _TIF_NEED_RESCHED is set, call schedule |
696 | # | 676 | # |
697 | io_reschedule: | 677 | io_reschedule: |
698 | TRACE_IRQS_ON | 678 | # TRACE_IRQS_ON already done at io_return |
699 | l %r1,BASED(.Lschedule) | 679 | l %r1,BASED(.Lschedule) |
700 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 680 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
701 | basr %r14,%r1 # call scheduler | 681 | basr %r14,%r1 # call scheduler |
702 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 682 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts |
703 | TRACE_IRQS_OFF | 683 | TRACE_IRQS_OFF |
704 | tm __TI_flags+3(%r9),_TIF_WORK_INT | 684 | b BASED(io_return) |
705 | bz BASED(io_restore) # there is no work to do | ||
706 | b BASED(io_work_loop) | ||
707 | 685 | ||
708 | # | 686 | # |
709 | # _TIF_SIGPENDING is set, call do_signal | 687 | # _TIF_SIGPENDING is set, call do_signal |
710 | # | 688 | # |
711 | io_sigpending: | 689 | io_sigpending: |
712 | TRACE_IRQS_ON | 690 | # TRACE_IRQS_ON already done at io_return |
713 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 691 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
714 | la %r2,SP_PTREGS(%r15) # load pt_regs | 692 | la %r2,SP_PTREGS(%r15) # load pt_regs |
715 | l %r1,BASED(.Ldo_signal) | 693 | l %r1,BASED(.Ldo_signal) |
716 | basr %r14,%r1 # call do_signal | 694 | basr %r14,%r1 # call do_signal |
717 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 695 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts |
718 | TRACE_IRQS_OFF | 696 | TRACE_IRQS_OFF |
719 | b BASED(io_work_loop) | 697 | b BASED(io_return) |
720 | 698 | ||
721 | # | 699 | # |
722 | # _TIF_SIGPENDING is set, call do_signal | 700 | # _TIF_SIGPENDING is set, call do_signal |
723 | # | 701 | # |
724 | io_notify_resume: | 702 | io_notify_resume: |
725 | TRACE_IRQS_ON | 703 | # TRACE_IRQS_ON already done at io_return |
726 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 704 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
727 | la %r2,SP_PTREGS(%r15) # load pt_regs | 705 | la %r2,SP_PTREGS(%r15) # load pt_regs |
728 | l %r1,BASED(.Ldo_notify_resume) | 706 | l %r1,BASED(.Ldo_notify_resume) |
729 | basr %r14,%r1 # call do_signal | 707 | basr %r14,%r1 # call do_signal |
730 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 708 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts |
731 | TRACE_IRQS_OFF | 709 | TRACE_IRQS_OFF |
732 | b BASED(io_work_loop) | 710 | b BASED(io_return) |
733 | 711 | ||
734 | /* | 712 | /* |
735 | * External interrupt handler routine | 713 | * External interrupt handler routine |
@@ -764,15 +742,14 @@ __critical_end: | |||
764 | 742 | ||
765 | .globl mcck_int_handler | 743 | .globl mcck_int_handler |
766 | mcck_int_handler: | 744 | mcck_int_handler: |
767 | stck __LC_INT_CLOCK | 745 | stck __LC_MCCK_CLOCK |
768 | spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer | 746 | spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer |
769 | lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs | 747 | lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs |
770 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | 748 | SAVE_ALL_BASE __LC_SAVE_AREA+32 |
771 | la %r12,__LC_MCK_OLD_PSW | 749 | la %r12,__LC_MCK_OLD_PSW |
772 | tm __LC_MCCK_CODE,0x80 # system damage? | 750 | tm __LC_MCCK_CODE,0x80 # system damage? |
773 | bo BASED(mcck_int_main) # yes -> rest of mcck code invalid | 751 | bo BASED(mcck_int_main) # yes -> rest of mcck code invalid |
774 | mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER | 752 | mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA |
775 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA | ||
776 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | 753 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? |
777 | bo BASED(1f) | 754 | bo BASED(1f) |
778 | la %r14,__LC_SYNC_ENTER_TIMER | 755 | la %r14,__LC_SYNC_ENTER_TIMER |
@@ -786,7 +763,7 @@ mcck_int_handler: | |||
786 | bl BASED(0f) | 763 | bl BASED(0f) |
787 | la %r14,__LC_LAST_UPDATE_TIMER | 764 | la %r14,__LC_LAST_UPDATE_TIMER |
788 | 0: spt 0(%r14) | 765 | 0: spt 0(%r14) |
789 | mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) | 766 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
790 | 1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | 767 | 1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
791 | bno BASED(mcck_int_main) # no -> skip cleanup critical | 768 | bno BASED(mcck_int_main) # no -> skip cleanup critical |
792 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit | 769 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit |
@@ -808,9 +785,9 @@ mcck_int_main: | |||
808 | bno BASED(mcck_no_vtime) # no -> skip cleanup critical | 785 | bno BASED(mcck_no_vtime) # no -> skip cleanup critical |
809 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 786 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
810 | bz BASED(mcck_no_vtime) | 787 | bz BASED(mcck_no_vtime) |
811 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 788 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER |
812 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 789 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
813 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 790 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER |
814 | mcck_no_vtime: | 791 | mcck_no_vtime: |
815 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 792 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
816 | la %r2,SP_PTREGS(%r15) # load pt_regs | 793 | la %r2,SP_PTREGS(%r15) # load pt_regs |
@@ -833,7 +810,6 @@ mcck_no_vtime: | |||
833 | mcck_return: | 810 | mcck_return: |
834 | mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW | 811 | mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW |
835 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit | 812 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit |
836 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52 | ||
837 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | 813 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
838 | bno BASED(0f) | 814 | bno BASED(0f) |
839 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 | 815 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 |
@@ -917,18 +893,14 @@ stack_overflow: | |||
917 | 893 | ||
918 | cleanup_table_system_call: | 894 | cleanup_table_system_call: |
919 | .long system_call + 0x80000000, sysc_do_svc + 0x80000000 | 895 | .long system_call + 0x80000000, sysc_do_svc + 0x80000000 |
920 | cleanup_table_sysc_return: | 896 | cleanup_table_sysc_tif: |
921 | .long sysc_return + 0x80000000, sysc_leave + 0x80000000 | 897 | .long sysc_tif + 0x80000000, sysc_restore + 0x80000000 |
922 | cleanup_table_sysc_leave: | 898 | cleanup_table_sysc_restore: |
923 | .long sysc_leave + 0x80000000, sysc_done + 0x80000000 | 899 | .long sysc_restore + 0x80000000, sysc_done + 0x80000000 |
924 | cleanup_table_sysc_work_loop: | 900 | cleanup_table_io_tif: |
925 | .long sysc_work_loop + 0x80000000, sysc_work_done + 0x80000000 | 901 | .long io_tif + 0x80000000, io_restore + 0x80000000 |
926 | cleanup_table_io_return: | 902 | cleanup_table_io_restore: |
927 | .long io_return + 0x80000000, io_leave + 0x80000000 | 903 | .long io_restore + 0x80000000, io_done + 0x80000000 |
928 | cleanup_table_io_leave: | ||
929 | .long io_leave + 0x80000000, io_done + 0x80000000 | ||
930 | cleanup_table_io_work_loop: | ||
931 | .long io_work_loop + 0x80000000, io_work_done + 0x80000000 | ||
932 | 904 | ||
933 | cleanup_critical: | 905 | cleanup_critical: |
934 | clc 4(4,%r12),BASED(cleanup_table_system_call) | 906 | clc 4(4,%r12),BASED(cleanup_table_system_call) |
@@ -936,49 +908,40 @@ cleanup_critical: | |||
936 | clc 4(4,%r12),BASED(cleanup_table_system_call+4) | 908 | clc 4(4,%r12),BASED(cleanup_table_system_call+4) |
937 | bl BASED(cleanup_system_call) | 909 | bl BASED(cleanup_system_call) |
938 | 0: | 910 | 0: |
939 | clc 4(4,%r12),BASED(cleanup_table_sysc_return) | 911 | clc 4(4,%r12),BASED(cleanup_table_sysc_tif) |
940 | bl BASED(0f) | ||
941 | clc 4(4,%r12),BASED(cleanup_table_sysc_return+4) | ||
942 | bl BASED(cleanup_sysc_return) | ||
943 | 0: | ||
944 | clc 4(4,%r12),BASED(cleanup_table_sysc_leave) | ||
945 | bl BASED(0f) | ||
946 | clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4) | ||
947 | bl BASED(cleanup_sysc_leave) | ||
948 | 0: | ||
949 | clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop) | ||
950 | bl BASED(0f) | 912 | bl BASED(0f) |
951 | clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4) | 913 | clc 4(4,%r12),BASED(cleanup_table_sysc_tif+4) |
952 | bl BASED(cleanup_sysc_return) | 914 | bl BASED(cleanup_sysc_tif) |
953 | 0: | 915 | 0: |
954 | clc 4(4,%r12),BASED(cleanup_table_io_return) | 916 | clc 4(4,%r12),BASED(cleanup_table_sysc_restore) |
955 | bl BASED(0f) | 917 | bl BASED(0f) |
956 | clc 4(4,%r12),BASED(cleanup_table_io_return+4) | 918 | clc 4(4,%r12),BASED(cleanup_table_sysc_restore+4) |
957 | bl BASED(cleanup_io_return) | 919 | bl BASED(cleanup_sysc_restore) |
958 | 0: | 920 | 0: |
959 | clc 4(4,%r12),BASED(cleanup_table_io_leave) | 921 | clc 4(4,%r12),BASED(cleanup_table_io_tif) |
960 | bl BASED(0f) | 922 | bl BASED(0f) |
961 | clc 4(4,%r12),BASED(cleanup_table_io_leave+4) | 923 | clc 4(4,%r12),BASED(cleanup_table_io_tif+4) |
962 | bl BASED(cleanup_io_leave) | 924 | bl BASED(cleanup_io_tif) |
963 | 0: | 925 | 0: |
964 | clc 4(4,%r12),BASED(cleanup_table_io_work_loop) | 926 | clc 4(4,%r12),BASED(cleanup_table_io_restore) |
965 | bl BASED(0f) | 927 | bl BASED(0f) |
966 | clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4) | 928 | clc 4(4,%r12),BASED(cleanup_table_io_restore+4) |
967 | bl BASED(cleanup_io_work_loop) | 929 | bl BASED(cleanup_io_restore) |
968 | 0: | 930 | 0: |
969 | br %r14 | 931 | br %r14 |
970 | 932 | ||
971 | cleanup_system_call: | 933 | cleanup_system_call: |
972 | mvc __LC_RETURN_PSW(8),0(%r12) | 934 | mvc __LC_RETURN_PSW(8),0(%r12) |
973 | c %r12,BASED(.Lmck_old_psw) | ||
974 | be BASED(0f) | ||
975 | la %r12,__LC_SAVE_AREA+16 | ||
976 | b BASED(1f) | ||
977 | 0: la %r12,__LC_SAVE_AREA+32 | ||
978 | 1: | ||
979 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) | 935 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) |
980 | bh BASED(0f) | 936 | bh BASED(0f) |
937 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER | ||
938 | c %r12,BASED(.Lmck_old_psw) | ||
939 | be BASED(0f) | ||
981 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | 940 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER |
941 | 0: c %r12,BASED(.Lmck_old_psw) | ||
942 | la %r12,__LC_SAVE_AREA+32 | ||
943 | be BASED(0f) | ||
944 | la %r12,__LC_SAVE_AREA+16 | ||
982 | 0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) | 945 | 0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) |
983 | bhe BASED(cleanup_vtime) | 946 | bhe BASED(cleanup_vtime) |
984 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) | 947 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) |
@@ -1011,61 +974,54 @@ cleanup_system_call_insn: | |||
1011 | .long sysc_stime + 0x80000000 | 974 | .long sysc_stime + 0x80000000 |
1012 | .long sysc_update + 0x80000000 | 975 | .long sysc_update + 0x80000000 |
1013 | 976 | ||
1014 | cleanup_sysc_return: | 977 | cleanup_sysc_tif: |
1015 | mvc __LC_RETURN_PSW(4),0(%r12) | 978 | mvc __LC_RETURN_PSW(4),0(%r12) |
1016 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return) | 979 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif) |
1017 | la %r12,__LC_RETURN_PSW | 980 | la %r12,__LC_RETURN_PSW |
1018 | br %r14 | 981 | br %r14 |
1019 | 982 | ||
1020 | cleanup_sysc_leave: | 983 | cleanup_sysc_restore: |
1021 | clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) | 984 | clc 4(4,%r12),BASED(cleanup_sysc_restore_insn) |
1022 | be BASED(2f) | 985 | be BASED(2f) |
986 | mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER | ||
987 | c %r12,BASED(.Lmck_old_psw) | ||
988 | be BASED(0f) | ||
1023 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | 989 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER |
1024 | clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) | 990 | 0: clc 4(4,%r12),BASED(cleanup_sysc_restore_insn+4) |
1025 | be BASED(2f) | 991 | be BASED(2f) |
1026 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) | 992 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) |
1027 | c %r12,BASED(.Lmck_old_psw) | 993 | c %r12,BASED(.Lmck_old_psw) |
1028 | bne BASED(0f) | 994 | la %r12,__LC_SAVE_AREA+32 |
1029 | mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) | 995 | be BASED(1f) |
1030 | b BASED(1f) | 996 | la %r12,__LC_SAVE_AREA+16 |
1031 | 0: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15) | 997 | 1: mvc 0(16,%r12),SP_R12(%r15) |
1032 | 1: lm %r0,%r11,SP_R0(%r15) | 998 | lm %r0,%r11,SP_R0(%r15) |
1033 | l %r15,SP_R15(%r15) | 999 | l %r15,SP_R15(%r15) |
1034 | 2: la %r12,__LC_RETURN_PSW | 1000 | 2: la %r12,__LC_RETURN_PSW |
1035 | br %r14 | 1001 | br %r14 |
1036 | cleanup_sysc_leave_insn: | 1002 | cleanup_sysc_restore_insn: |
1037 | .long sysc_done - 4 + 0x80000000 | 1003 | .long sysc_done - 4 + 0x80000000 |
1038 | .long sysc_done - 8 + 0x80000000 | 1004 | .long sysc_done - 8 + 0x80000000 |
1039 | 1005 | ||
1040 | cleanup_io_return: | 1006 | cleanup_io_tif: |
1041 | mvc __LC_RETURN_PSW(4),0(%r12) | 1007 | mvc __LC_RETURN_PSW(4),0(%r12) |
1042 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_return) | 1008 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif) |
1043 | la %r12,__LC_RETURN_PSW | 1009 | la %r12,__LC_RETURN_PSW |
1044 | br %r14 | 1010 | br %r14 |
1045 | 1011 | ||
1046 | cleanup_io_work_loop: | 1012 | cleanup_io_restore: |
1047 | mvc __LC_RETURN_PSW(4),0(%r12) | 1013 | clc 4(4,%r12),BASED(cleanup_io_restore_insn) |
1048 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop) | 1014 | be BASED(1f) |
1049 | la %r12,__LC_RETURN_PSW | 1015 | mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER |
1050 | br %r14 | 1016 | clc 4(4,%r12),BASED(cleanup_io_restore_insn+4) |
1051 | 1017 | be BASED(1f) | |
1052 | cleanup_io_leave: | ||
1053 | clc 4(4,%r12),BASED(cleanup_io_leave_insn) | ||
1054 | be BASED(2f) | ||
1055 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
1056 | clc 4(4,%r12),BASED(cleanup_io_leave_insn+4) | ||
1057 | be BASED(2f) | ||
1058 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) | 1018 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) |
1059 | c %r12,BASED(.Lmck_old_psw) | ||
1060 | bne BASED(0f) | ||
1061 | mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) | 1019 | mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) |
1062 | b BASED(1f) | 1020 | lm %r0,%r11,SP_R0(%r15) |
1063 | 0: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15) | ||
1064 | 1: lm %r0,%r11,SP_R0(%r15) | ||
1065 | l %r15,SP_R15(%r15) | 1021 | l %r15,SP_R15(%r15) |
1066 | 2: la %r12,__LC_RETURN_PSW | 1022 | 1: la %r12,__LC_RETURN_PSW |
1067 | br %r14 | 1023 | br %r14 |
1068 | cleanup_io_leave_insn: | 1024 | cleanup_io_restore_insn: |
1069 | .long io_done - 4 + 0x80000000 | 1025 | .long io_done - 4 + 0x80000000 |
1070 | .long io_done - 8 + 0x80000000 | 1026 | .long io_done - 8 + 0x80000000 |
1071 | 1027 | ||
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 52106d53271c..178d92536d90 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -2,7 +2,7 @@ | |||
2 | * arch/s390/kernel/entry64.S | 2 | * arch/s390/kernel/entry64.S |
3 | * S390 low-level entry points. | 3 | * S390 low-level entry points. |
4 | * | 4 | * |
5 | * Copyright (C) IBM Corp. 1999,2006 | 5 | * Copyright (C) IBM Corp. 1999,2010 |
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
7 | * Hartmut Penner (hp@de.ibm.com), | 7 | * Hartmut Penner (hp@de.ibm.com), |
8 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | 8 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), |
@@ -59,30 +59,45 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | |||
59 | 59 | ||
60 | #define BASED(name) name-system_call(%r13) | 60 | #define BASED(name) name-system_call(%r13) |
61 | 61 | ||
62 | .macro HANDLE_SIE_INTERCEPT | ||
63 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | ||
64 | lg %r3,__LC_SIE_HOOK | ||
65 | ltgr %r3,%r3 | ||
66 | jz 0f | ||
67 | basr %r14,%r3 | ||
68 | 0: | ||
69 | #endif | ||
70 | .endm | ||
71 | |||
62 | #ifdef CONFIG_TRACE_IRQFLAGS | 72 | #ifdef CONFIG_TRACE_IRQFLAGS |
63 | .macro TRACE_IRQS_ON | 73 | .macro TRACE_IRQS_ON |
64 | basr %r2,%r0 | 74 | basr %r2,%r0 |
65 | brasl %r14,trace_hardirqs_on_caller | 75 | brasl %r14,trace_hardirqs_on_caller |
66 | .endm | 76 | .endm |
67 | 77 | ||
68 | .macro TRACE_IRQS_OFF | 78 | .macro TRACE_IRQS_OFF |
69 | basr %r2,%r0 | 79 | basr %r2,%r0 |
70 | brasl %r14,trace_hardirqs_off_caller | 80 | brasl %r14,trace_hardirqs_off_caller |
71 | .endm | 81 | .endm |
72 | 82 | ||
73 | .macro TRACE_IRQS_CHECK | 83 | .macro TRACE_IRQS_CHECK_ON |
74 | basr %r2,%r0 | ||
75 | tm SP_PSW(%r15),0x03 # irqs enabled? | 84 | tm SP_PSW(%r15),0x03 # irqs enabled? |
76 | jz 0f | 85 | jz 0f |
77 | brasl %r14,trace_hardirqs_on_caller | 86 | TRACE_IRQS_ON |
78 | j 1f | 87 | 0: |
79 | 0: brasl %r14,trace_hardirqs_off_caller | 88 | .endm |
80 | 1: | 89 | |
90 | .macro TRACE_IRQS_CHECK_OFF | ||
91 | tm SP_PSW(%r15),0x03 # irqs enabled? | ||
92 | jz 0f | ||
93 | TRACE_IRQS_OFF | ||
94 | 0: | ||
81 | .endm | 95 | .endm |
82 | #else | 96 | #else |
83 | #define TRACE_IRQS_ON | 97 | #define TRACE_IRQS_ON |
84 | #define TRACE_IRQS_OFF | 98 | #define TRACE_IRQS_OFF |
85 | #define TRACE_IRQS_CHECK | 99 | #define TRACE_IRQS_CHECK_ON |
100 | #define TRACE_IRQS_CHECK_OFF | ||
86 | #endif | 101 | #endif |
87 | 102 | ||
88 | #ifdef CONFIG_LOCKDEP | 103 | #ifdef CONFIG_LOCKDEP |
@@ -111,31 +126,35 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | |||
111 | * R15 - kernel stack pointer | 126 | * R15 - kernel stack pointer |
112 | */ | 127 | */ |
113 | 128 | ||
114 | .macro SAVE_ALL_BASE savearea | ||
115 | stmg %r12,%r15,\savearea | ||
116 | larl %r13,system_call | ||
117 | .endm | ||
118 | |||
119 | .macro SAVE_ALL_SVC psworg,savearea | 129 | .macro SAVE_ALL_SVC psworg,savearea |
120 | la %r12,\psworg | 130 | stmg %r11,%r15,\savearea |
121 | lg %r15,__LC_KERNEL_STACK # problem state -> load ksp | 131 | lg %r15,__LC_KERNEL_STACK # problem state -> load ksp |
132 | aghi %r15,-SP_SIZE # make room for registers & psw | ||
133 | lg %r11,__LC_LAST_BREAK | ||
122 | .endm | 134 | .endm |
123 | 135 | ||
124 | .macro SAVE_ALL_SYNC psworg,savearea | 136 | .macro SAVE_ALL_PGM psworg,savearea |
125 | la %r12,\psworg | 137 | stmg %r11,%r15,\savearea |
126 | tm \psworg+1,0x01 # test problem state bit | 138 | tm \psworg+1,0x01 # test problem state bit |
127 | jz 2f # skip stack setup save | ||
128 | lg %r15,__LC_KERNEL_STACK # problem state -> load ksp | ||
129 | #ifdef CONFIG_CHECK_STACK | 139 | #ifdef CONFIG_CHECK_STACK |
130 | j 3f | 140 | jnz 1f |
131 | 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | 141 | tml %r15,STACK_SIZE - CONFIG_STACK_GUARD |
132 | jz stack_overflow | 142 | jnz 2f |
133 | 3: | 143 | la %r12,\psworg |
144 | j stack_overflow | ||
145 | #else | ||
146 | jz 2f | ||
134 | #endif | 147 | #endif |
135 | 2: | 148 | 1: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp |
149 | 2: aghi %r15,-SP_SIZE # make room for registers & psw | ||
150 | larl %r13,system_call | ||
151 | lg %r11,__LC_LAST_BREAK | ||
136 | .endm | 152 | .endm |
137 | 153 | ||
138 | .macro SAVE_ALL_ASYNC psworg,savearea | 154 | .macro SAVE_ALL_ASYNC psworg,savearea |
155 | stmg %r11,%r15,\savearea | ||
156 | larl %r13,system_call | ||
157 | lg %r11,__LC_LAST_BREAK | ||
139 | la %r12,\psworg | 158 | la %r12,\psworg |
140 | tm \psworg+1,0x01 # test problem state bit | 159 | tm \psworg+1,0x01 # test problem state bit |
141 | jnz 1f # from user -> load kernel stack | 160 | jnz 1f # from user -> load kernel stack |
@@ -149,27 +168,23 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | |||
149 | 0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ? | 168 | 0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ? |
150 | slgr %r14,%r15 | 169 | slgr %r14,%r15 |
151 | srag %r14,%r14,STACK_SHIFT | 170 | srag %r14,%r14,STACK_SHIFT |
152 | jz 2f | ||
153 | 1: lg %r15,__LC_ASYNC_STACK # load async stack | ||
154 | #ifdef CONFIG_CHECK_STACK | 171 | #ifdef CONFIG_CHECK_STACK |
155 | j 3f | 172 | jnz 1f |
156 | 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | 173 | tml %r15,STACK_SIZE - CONFIG_STACK_GUARD |
157 | jz stack_overflow | 174 | jnz 2f |
158 | 3: | 175 | j stack_overflow |
176 | #else | ||
177 | jz 2f | ||
159 | #endif | 178 | #endif |
160 | 2: | 179 | 1: lg %r15,__LC_ASYNC_STACK # load async stack |
180 | 2: aghi %r15,-SP_SIZE # make room for registers & psw | ||
161 | .endm | 181 | .endm |
162 | 182 | ||
163 | .macro CREATE_STACK_FRAME psworg,savearea | 183 | .macro CREATE_STACK_FRAME savearea |
164 | aghi %r15,-SP_SIZE # make room for registers & psw | 184 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
165 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack | ||
166 | stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 | 185 | stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 |
167 | icm %r12,3,__LC_SVC_ILC | 186 | mvc SP_R11(40,%r15),\savearea # move %r11-%r15 to stack |
168 | stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | 187 | stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack |
169 | st %r12,SP_SVCNR(%r15) | ||
170 | mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack | ||
171 | la %r12,0 | ||
172 | stg %r12,__SF_BACKCHAIN(%r15) | ||
173 | .endm | 188 | .endm |
174 | 189 | ||
175 | .macro RESTORE_ALL psworg,sync | 190 | .macro RESTORE_ALL psworg,sync |
@@ -185,6 +200,13 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | |||
185 | lpswe \psworg # back to caller | 200 | lpswe \psworg # back to caller |
186 | .endm | 201 | .endm |
187 | 202 | ||
203 | .macro LAST_BREAK | ||
204 | srag %r10,%r11,23 | ||
205 | jz 0f | ||
206 | stg %r11,__TI_last_break(%r12) | ||
207 | 0: | ||
208 | .endm | ||
209 | |||
188 | /* | 210 | /* |
189 | * Scheduler resume function, called by switch_to | 211 | * Scheduler resume function, called by switch_to |
190 | * gpr2 = (task_struct *) prev | 212 | * gpr2 = (task_struct *) prev |
@@ -230,143 +252,129 @@ __critical_start: | |||
230 | system_call: | 252 | system_call: |
231 | stpt __LC_SYNC_ENTER_TIMER | 253 | stpt __LC_SYNC_ENTER_TIMER |
232 | sysc_saveall: | 254 | sysc_saveall: |
233 | SAVE_ALL_BASE __LC_SAVE_AREA | ||
234 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 255 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
235 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 256 | CREATE_STACK_FRAME __LC_SAVE_AREA |
236 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore | 257 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW |
258 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
259 | stg %r7,SP_ARGS(%r15) | ||
260 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | ||
237 | sysc_vtime: | 261 | sysc_vtime: |
238 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 262 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
239 | sysc_stime: | 263 | sysc_stime: |
240 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 264 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
241 | sysc_update: | 265 | sysc_update: |
242 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 266 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
267 | LAST_BREAK | ||
243 | sysc_do_svc: | 268 | sysc_do_svc: |
244 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 269 | llgh %r7,SP_SVCNR(%r15) |
245 | ltgr %r7,%r7 # test for svc 0 | 270 | slag %r7,%r7,2 # shift and test for svc 0 |
246 | jnz sysc_nr_ok | 271 | jnz sysc_nr_ok |
247 | # svc 0: system call number in %r1 | 272 | # svc 0: system call number in %r1 |
248 | cl %r1,BASED(.Lnr_syscalls) | 273 | llgfr %r1,%r1 # clear high word in r1 |
274 | cghi %r1,NR_syscalls | ||
249 | jnl sysc_nr_ok | 275 | jnl sysc_nr_ok |
250 | lgfr %r7,%r1 # clear high word in r1 | 276 | sth %r1,SP_SVCNR(%r15) |
277 | slag %r7,%r1,2 # shift and test for svc 0 | ||
251 | sysc_nr_ok: | 278 | sysc_nr_ok: |
252 | mvc SP_ARGS(8,%r15),SP_R7(%r15) | ||
253 | sysc_do_restart: | ||
254 | sth %r7,SP_SVCNR(%r15) | ||
255 | sllg %r7,%r7,2 # svc number * 4 | ||
256 | larl %r10,sys_call_table | 279 | larl %r10,sys_call_table |
257 | #ifdef CONFIG_COMPAT | 280 | #ifdef CONFIG_COMPAT |
258 | tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ? | 281 | tm __TI_flags+5(%r12),(_TIF_31BIT>>16) # running in 31 bit mode ? |
259 | jno sysc_noemu | 282 | jno sysc_noemu |
260 | larl %r10,sys_call_table_emu # use 31 bit emulation system calls | 283 | larl %r10,sys_call_table_emu # use 31 bit emulation system calls |
261 | sysc_noemu: | 284 | sysc_noemu: |
262 | #endif | 285 | #endif |
263 | tm __TI_flags+6(%r9),_TIF_SYSCALL | 286 | tm __TI_flags+6(%r12),_TIF_SYSCALL |
264 | lgf %r8,0(%r7,%r10) # load address of system call routine | 287 | lgf %r8,0(%r7,%r10) # load address of system call routine |
265 | jnz sysc_tracesys | 288 | jnz sysc_tracesys |
266 | basr %r14,%r8 # call sys_xxxx | 289 | basr %r14,%r8 # call sys_xxxx |
267 | stg %r2,SP_R2(%r15) # store return value (change R2 on stack) | 290 | stg %r2,SP_R2(%r15) # store return value (change R2 on stack) |
268 | 291 | ||
269 | sysc_return: | 292 | sysc_return: |
270 | tm __TI_flags+7(%r9),_TIF_WORK_SVC | 293 | LOCKDEP_SYS_EXIT |
294 | sysc_tif: | ||
295 | tm __TI_flags+7(%r12),_TIF_WORK_SVC | ||
271 | jnz sysc_work # there is work to do (signals etc.) | 296 | jnz sysc_work # there is work to do (signals etc.) |
272 | sysc_restore: | 297 | sysc_restore: |
273 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
274 | larl %r1,sysc_restore_trace_psw | ||
275 | lpswe 0(%r1) | ||
276 | sysc_restore_trace: | ||
277 | TRACE_IRQS_CHECK | ||
278 | LOCKDEP_SYS_EXIT | ||
279 | #endif | ||
280 | sysc_leave: | ||
281 | RESTORE_ALL __LC_RETURN_PSW,1 | 298 | RESTORE_ALL __LC_RETURN_PSW,1 |
282 | sysc_done: | 299 | sysc_done: |
283 | 300 | ||
284 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
285 | .section .data,"aw",@progbits | ||
286 | .align 8 | ||
287 | .globl sysc_restore_trace_psw | ||
288 | sysc_restore_trace_psw: | ||
289 | .quad 0, sysc_restore_trace | ||
290 | .previous | ||
291 | #endif | ||
292 | |||
293 | # | ||
294 | # recheck if there is more work to do | ||
295 | # | 301 | # |
296 | sysc_work_loop: | 302 | # There is work to do, but first we need to check if we return to userspace. |
297 | tm __TI_flags+7(%r9),_TIF_WORK_SVC | ||
298 | jz sysc_restore # there is no work to do | ||
299 | # | ||
300 | # One of the work bits is on. Find out which one. | ||
301 | # | 303 | # |
302 | sysc_work: | 304 | sysc_work: |
303 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 305 | tm SP_PSW+1(%r15),0x01 # returning to user ? |
304 | jno sysc_restore | 306 | jno sysc_restore |
305 | tm __TI_flags+7(%r9),_TIF_MCCK_PENDING | 307 | |
308 | # | ||
309 | # One of the work bits is on. Find out which one. | ||
310 | # | ||
311 | sysc_work_tif: | ||
312 | tm __TI_flags+7(%r12),_TIF_MCCK_PENDING | ||
306 | jo sysc_mcck_pending | 313 | jo sysc_mcck_pending |
307 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED | 314 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
308 | jo sysc_reschedule | 315 | jo sysc_reschedule |
309 | tm __TI_flags+7(%r9),_TIF_SIGPENDING | 316 | tm __TI_flags+7(%r12),_TIF_SIGPENDING |
310 | jnz sysc_sigpending | 317 | jo sysc_sigpending |
311 | tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME | 318 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
312 | jnz sysc_notify_resume | 319 | jo sysc_notify_resume |
313 | tm __TI_flags+7(%r9),_TIF_RESTART_SVC | 320 | tm __TI_flags+7(%r12),_TIF_RESTART_SVC |
314 | jo sysc_restart | 321 | jo sysc_restart |
315 | tm __TI_flags+7(%r9),_TIF_SINGLE_STEP | 322 | tm __TI_flags+7(%r12),_TIF_SINGLE_STEP |
316 | jo sysc_singlestep | 323 | jo sysc_singlestep |
317 | j sysc_restore | 324 | j sysc_return # beware of critical section cleanup |
318 | sysc_work_done: | ||
319 | 325 | ||
320 | # | 326 | # |
321 | # _TIF_NEED_RESCHED is set, call schedule | 327 | # _TIF_NEED_RESCHED is set, call schedule |
322 | # | 328 | # |
323 | sysc_reschedule: | 329 | sysc_reschedule: |
324 | larl %r14,sysc_work_loop | 330 | larl %r14,sysc_return |
325 | jg schedule # return point is sysc_return | 331 | jg schedule # return point is sysc_return |
326 | 332 | ||
327 | # | 333 | # |
328 | # _TIF_MCCK_PENDING is set, call handler | 334 | # _TIF_MCCK_PENDING is set, call handler |
329 | # | 335 | # |
330 | sysc_mcck_pending: | 336 | sysc_mcck_pending: |
331 | larl %r14,sysc_work_loop | 337 | larl %r14,sysc_return |
332 | jg s390_handle_mcck # TIF bit will be cleared by handler | 338 | jg s390_handle_mcck # TIF bit will be cleared by handler |
333 | 339 | ||
334 | # | 340 | # |
335 | # _TIF_SIGPENDING is set, call do_signal | 341 | # _TIF_SIGPENDING is set, call do_signal |
336 | # | 342 | # |
337 | sysc_sigpending: | 343 | sysc_sigpending: |
338 | ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP | 344 | ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP |
339 | la %r2,SP_PTREGS(%r15) # load pt_regs | 345 | la %r2,SP_PTREGS(%r15) # load pt_regs |
340 | brasl %r14,do_signal # call do_signal | 346 | brasl %r14,do_signal # call do_signal |
341 | tm __TI_flags+7(%r9),_TIF_RESTART_SVC | 347 | tm __TI_flags+7(%r12),_TIF_RESTART_SVC |
342 | jo sysc_restart | 348 | jo sysc_restart |
343 | tm __TI_flags+7(%r9),_TIF_SINGLE_STEP | 349 | tm __TI_flags+7(%r12),_TIF_SINGLE_STEP |
344 | jo sysc_singlestep | 350 | jo sysc_singlestep |
345 | j sysc_work_loop | 351 | j sysc_return |
346 | 352 | ||
347 | # | 353 | # |
348 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | 354 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume |
349 | # | 355 | # |
350 | sysc_notify_resume: | 356 | sysc_notify_resume: |
351 | la %r2,SP_PTREGS(%r15) # load pt_regs | 357 | la %r2,SP_PTREGS(%r15) # load pt_regs |
352 | larl %r14,sysc_work_loop | 358 | larl %r14,sysc_return |
353 | jg do_notify_resume # call do_notify_resume | 359 | jg do_notify_resume # call do_notify_resume |
354 | 360 | ||
355 | # | 361 | # |
356 | # _TIF_RESTART_SVC is set, set up registers and restart svc | 362 | # _TIF_RESTART_SVC is set, set up registers and restart svc |
357 | # | 363 | # |
358 | sysc_restart: | 364 | sysc_restart: |
359 | ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC | 365 | ni __TI_flags+7(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC |
360 | lg %r7,SP_R2(%r15) # load new svc number | 366 | lg %r7,SP_R2(%r15) # load new svc number |
361 | mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument | 367 | mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument |
362 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | 368 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments |
363 | j sysc_do_restart # restart svc | 369 | sth %r7,SP_SVCNR(%r15) |
370 | slag %r7,%r7,2 | ||
371 | j sysc_nr_ok # restart svc | ||
364 | 372 | ||
365 | # | 373 | # |
366 | # _TIF_SINGLE_STEP is set, call do_single_step | 374 | # _TIF_SINGLE_STEP is set, call do_single_step |
367 | # | 375 | # |
368 | sysc_singlestep: | 376 | sysc_singlestep: |
369 | ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP | 377 | ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP |
370 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number | 378 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number |
371 | la %r2,SP_PTREGS(%r15) # address of register-save area | 379 | la %r2,SP_PTREGS(%r15) # address of register-save area |
372 | larl %r14,sysc_return # load adr. of system return | 380 | larl %r14,sysc_return # load adr. of system return |
@@ -379,8 +387,8 @@ sysc_singlestep: | |||
379 | sysc_tracesys: | 387 | sysc_tracesys: |
380 | la %r2,SP_PTREGS(%r15) # load pt_regs | 388 | la %r2,SP_PTREGS(%r15) # load pt_regs |
381 | la %r3,0 | 389 | la %r3,0 |
382 | srl %r7,2 | 390 | llgh %r0,SP_SVCNR(%r15) |
383 | stg %r7,SP_R2(%r15) | 391 | stg %r0,SP_R2(%r15) |
384 | brasl %r14,do_syscall_trace_enter | 392 | brasl %r14,do_syscall_trace_enter |
385 | lghi %r0,NR_syscalls | 393 | lghi %r0,NR_syscalls |
386 | clgr %r0,%r2 | 394 | clgr %r0,%r2 |
@@ -393,7 +401,7 @@ sysc_tracego: | |||
393 | basr %r14,%r8 # call sys_xxx | 401 | basr %r14,%r8 # call sys_xxx |
394 | stg %r2,SP_R2(%r15) # store return value | 402 | stg %r2,SP_R2(%r15) # store return value |
395 | sysc_tracenogo: | 403 | sysc_tracenogo: |
396 | tm __TI_flags+6(%r9),_TIF_SYSCALL | 404 | tm __TI_flags+6(%r12),_TIF_SYSCALL |
397 | jz sysc_return | 405 | jz sysc_return |
398 | la %r2,SP_PTREGS(%r15) # load pt_regs | 406 | la %r2,SP_PTREGS(%r15) # load pt_regs |
399 | larl %r14,sysc_return # return point is sysc_return | 407 | larl %r14,sysc_return # return point is sysc_return |
@@ -405,7 +413,7 @@ sysc_tracenogo: | |||
405 | .globl ret_from_fork | 413 | .globl ret_from_fork |
406 | ret_from_fork: | 414 | ret_from_fork: |
407 | lg %r13,__LC_SVC_NEW_PSW+8 | 415 | lg %r13,__LC_SVC_NEW_PSW+8 |
408 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 416 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
409 | tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? | 417 | tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? |
410 | jo 0f | 418 | jo 0f |
411 | stg %r15,SP_R15(%r15) # store stack pointer for new kthread | 419 | stg %r15,SP_R15(%r15) # store stack pointer for new kthread |
@@ -435,12 +443,14 @@ kernel_execve: | |||
435 | br %r14 | 443 | br %r14 |
436 | # execve succeeded. | 444 | # execve succeeded. |
437 | 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts | 445 | 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts |
446 | # TRACE_IRQS_OFF | ||
438 | lg %r15,__LC_KERNEL_STACK # load ksp | 447 | lg %r15,__LC_KERNEL_STACK # load ksp |
439 | aghi %r15,-SP_SIZE # make room for registers & psw | 448 | aghi %r15,-SP_SIZE # make room for registers & psw |
440 | lg %r13,__LC_SVC_NEW_PSW+8 | 449 | lg %r13,__LC_SVC_NEW_PSW+8 |
441 | lg %r9,__LC_THREAD_INFO | ||
442 | mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs | 450 | mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs |
451 | lg %r12,__LC_THREAD_INFO | ||
443 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 452 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
453 | # TRACE_IRQS_ON | ||
444 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 454 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
445 | brasl %r14,execve_tail | 455 | brasl %r14,execve_tail |
446 | j sysc_return | 456 | j sysc_return |
@@ -465,20 +475,23 @@ pgm_check_handler: | |||
465 | * for LPSW?). | 475 | * for LPSW?). |
466 | */ | 476 | */ |
467 | stpt __LC_SYNC_ENTER_TIMER | 477 | stpt __LC_SYNC_ENTER_TIMER |
468 | SAVE_ALL_BASE __LC_SAVE_AREA | ||
469 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception | 478 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception |
470 | jnz pgm_per # got per exception -> special case | 479 | jnz pgm_per # got per exception -> special case |
471 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 480 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
472 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 481 | CREATE_STACK_FRAME __LC_SAVE_AREA |
482 | xc SP_ILC(4,%r15),SP_ILC(%r15) | ||
483 | mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW | ||
484 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | ||
473 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 485 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
474 | jz pgm_no_vtime | 486 | jz pgm_no_vtime |
475 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 487 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
476 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 488 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
477 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 489 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
490 | LAST_BREAK | ||
478 | pgm_no_vtime: | 491 | pgm_no_vtime: |
479 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 492 | HANDLE_SIE_INTERCEPT |
480 | mvc SP_ARGS(8,%r15),__LC_LAST_BREAK | 493 | TRACE_IRQS_CHECK_OFF |
481 | TRACE_IRQS_OFF | 494 | stg %r11,SP_ARGS(%r15) |
482 | lgf %r3,__LC_PGM_ILC # load program interruption code | 495 | lgf %r3,__LC_PGM_ILC # load program interruption code |
483 | lghi %r8,0x7f | 496 | lghi %r8,0x7f |
484 | ngr %r8,%r3 | 497 | ngr %r8,%r3 |
@@ -487,8 +500,10 @@ pgm_do_call: | |||
487 | larl %r1,pgm_check_table | 500 | larl %r1,pgm_check_table |
488 | lg %r1,0(%r8,%r1) # load address of handler routine | 501 | lg %r1,0(%r8,%r1) # load address of handler routine |
489 | la %r2,SP_PTREGS(%r15) # address of register-save area | 502 | la %r2,SP_PTREGS(%r15) # address of register-save area |
490 | larl %r14,sysc_return | 503 | basr %r14,%r1 # branch to interrupt-handler |
491 | br %r1 # branch to interrupt-handler | 504 | pgm_exit: |
505 | TRACE_IRQS_CHECK_ON | ||
506 | j sysc_return | ||
492 | 507 | ||
493 | # | 508 | # |
494 | # handle per exception | 509 | # handle per exception |
@@ -500,55 +515,60 @@ pgm_per: | |||
500 | clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW | 515 | clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW |
501 | je pgm_svcper | 516 | je pgm_svcper |
502 | # no interesting special case, ignore PER event | 517 | # no interesting special case, ignore PER event |
503 | lmg %r12,%r15,__LC_SAVE_AREA | ||
504 | lpswe __LC_PGM_OLD_PSW | 518 | lpswe __LC_PGM_OLD_PSW |
505 | 519 | ||
506 | # | 520 | # |
507 | # Normal per exception | 521 | # Normal per exception |
508 | # | 522 | # |
509 | pgm_per_std: | 523 | pgm_per_std: |
510 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 524 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
511 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 525 | CREATE_STACK_FRAME __LC_SAVE_AREA |
526 | mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW | ||
527 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | ||
512 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 528 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
513 | jz pgm_no_vtime2 | 529 | jz pgm_no_vtime2 |
514 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 530 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
515 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 531 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
516 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 532 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
533 | LAST_BREAK | ||
517 | pgm_no_vtime2: | 534 | pgm_no_vtime2: |
518 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 535 | HANDLE_SIE_INTERCEPT |
519 | TRACE_IRQS_OFF | 536 | TRACE_IRQS_CHECK_OFF |
520 | lg %r1,__TI_task(%r9) | 537 | lg %r1,__TI_task(%r12) |
521 | tm SP_PSW+1(%r15),0x01 # kernel per event ? | 538 | tm SP_PSW+1(%r15),0x01 # kernel per event ? |
522 | jz kernel_per | 539 | jz kernel_per |
523 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID | 540 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID |
524 | mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS | 541 | mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS |
525 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID | 542 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID |
526 | oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | 543 | oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP |
527 | lgf %r3,__LC_PGM_ILC # load program interruption code | 544 | lgf %r3,__LC_PGM_ILC # load program interruption code |
528 | lghi %r8,0x7f | 545 | lghi %r8,0x7f |
529 | ngr %r8,%r3 # clear per-event-bit and ilc | 546 | ngr %r8,%r3 # clear per-event-bit and ilc |
530 | je sysc_return | 547 | je pgm_exit |
531 | j pgm_do_call | 548 | j pgm_do_call |
532 | 549 | ||
533 | # | 550 | # |
534 | # it was a single stepped SVC that is causing all the trouble | 551 | # it was a single stepped SVC that is causing all the trouble |
535 | # | 552 | # |
536 | pgm_svcper: | 553 | pgm_svcper: |
537 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 554 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
538 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 555 | CREATE_STACK_FRAME __LC_SAVE_AREA |
556 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
557 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
558 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | ||
539 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 559 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
540 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 560 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
541 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 561 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
542 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore | 562 | LAST_BREAK |
543 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 563 | TRACE_IRQS_OFF |
544 | lg %r8,__TI_task(%r9) | 564 | lg %r8,__TI_task(%r12) |
545 | mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID | 565 | mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID |
546 | mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS | 566 | mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS |
547 | mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID | 567 | mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID |
548 | oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | 568 | oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP |
549 | TRACE_IRQS_ON | 569 | TRACE_IRQS_ON |
550 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | ||
551 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 570 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
571 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | ||
552 | j sysc_do_svc | 572 | j sysc_do_svc |
553 | 573 | ||
554 | # | 574 | # |
@@ -557,8 +577,8 @@ pgm_svcper: | |||
557 | kernel_per: | 577 | kernel_per: |
558 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number | 578 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number |
559 | la %r2,SP_PTREGS(%r15) # address of register-save area | 579 | la %r2,SP_PTREGS(%r15) # address of register-save area |
560 | larl %r14,sysc_restore # load adr. of system ret, no work | 580 | brasl %r14,do_single_step |
561 | jg do_single_step # branch to do_single_step | 581 | j pgm_exit |
562 | 582 | ||
563 | /* | 583 | /* |
564 | * IO interrupt handler routine | 584 | * IO interrupt handler routine |
@@ -567,162 +587,133 @@ kernel_per: | |||
567 | io_int_handler: | 587 | io_int_handler: |
568 | stck __LC_INT_CLOCK | 588 | stck __LC_INT_CLOCK |
569 | stpt __LC_ASYNC_ENTER_TIMER | 589 | stpt __LC_ASYNC_ENTER_TIMER |
570 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | 590 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40 |
571 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 | 591 | CREATE_STACK_FRAME __LC_SAVE_AREA+40 |
572 | CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 | 592 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack |
593 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | ||
573 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 594 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
574 | jz io_no_vtime | 595 | jz io_no_vtime |
575 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 596 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER |
576 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 597 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
577 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 598 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER |
599 | LAST_BREAK | ||
578 | io_no_vtime: | 600 | io_no_vtime: |
579 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 601 | HANDLE_SIE_INTERCEPT |
580 | TRACE_IRQS_OFF | 602 | TRACE_IRQS_OFF |
581 | la %r2,SP_PTREGS(%r15) # address of register-save area | 603 | la %r2,SP_PTREGS(%r15) # address of register-save area |
582 | brasl %r14,do_IRQ # call standard irq handler | 604 | brasl %r14,do_IRQ # call standard irq handler |
583 | io_return: | 605 | io_return: |
584 | tm __TI_flags+7(%r9),_TIF_WORK_INT | 606 | LOCKDEP_SYS_EXIT |
607 | TRACE_IRQS_ON | ||
608 | io_tif: | ||
609 | tm __TI_flags+7(%r12),_TIF_WORK_INT | ||
585 | jnz io_work # there is work to do (signals etc.) | 610 | jnz io_work # there is work to do (signals etc.) |
586 | io_restore: | 611 | io_restore: |
587 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
588 | larl %r1,io_restore_trace_psw | ||
589 | lpswe 0(%r1) | ||
590 | io_restore_trace: | ||
591 | TRACE_IRQS_CHECK | ||
592 | LOCKDEP_SYS_EXIT | ||
593 | #endif | ||
594 | io_leave: | ||
595 | RESTORE_ALL __LC_RETURN_PSW,0 | 612 | RESTORE_ALL __LC_RETURN_PSW,0 |
596 | io_done: | 613 | io_done: |
597 | 614 | ||
598 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
599 | .section .data,"aw",@progbits | ||
600 | .align 8 | ||
601 | .globl io_restore_trace_psw | ||
602 | io_restore_trace_psw: | ||
603 | .quad 0, io_restore_trace | ||
604 | .previous | ||
605 | #endif | ||
606 | |||
607 | # | 615 | # |
608 | # There is work todo, we need to check if we return to userspace, then | 616 | # There is work todo, find out in which context we have been interrupted: |
609 | # check, if we are in SIE, if yes leave it | 617 | # 1) if we return to user space we can do all _TIF_WORK_INT work |
618 | # 2) if we return to kernel code and kvm is enabled check if we need to | ||
619 | # modify the psw to leave SIE | ||
620 | # 3) if we return to kernel code and preemptive scheduling is enabled check | ||
621 | # the preemption counter and if it is zero call preempt_schedule_irq | ||
622 | # Before any work can be done, a switch to the kernel stack is required. | ||
610 | # | 623 | # |
611 | io_work: | 624 | io_work: |
612 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 625 | tm SP_PSW+1(%r15),0x01 # returning to user ? |
613 | #ifndef CONFIG_PREEMPT | 626 | jo io_work_user # yes -> do resched & signal |
614 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | 627 | #ifdef CONFIG_PREEMPT |
615 | jnz io_work_user # yes -> no need to check for SIE | ||
616 | la %r1, BASED(sie_opcode) # we return to kernel here | ||
617 | lg %r2, SP_PSW+8(%r15) | ||
618 | clc 0(2,%r1), 0(%r2) # is current instruction = SIE? | ||
619 | jne io_restore # no-> return to kernel | ||
620 | lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE | ||
621 | aghi %r1, 4 | ||
622 | stg %r1, SP_PSW+8(%r15) | ||
623 | j io_restore # return to kernel | ||
624 | #else | ||
625 | jno io_restore # no-> skip resched & signal | ||
626 | #endif | ||
627 | #else | ||
628 | jnz io_work_user # yes -> do resched & signal | ||
629 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | ||
630 | la %r1, BASED(sie_opcode) | ||
631 | lg %r2, SP_PSW+8(%r15) | ||
632 | clc 0(2,%r1), 0(%r2) # is current instruction = SIE? | ||
633 | jne 0f # no -> leave PSW alone | ||
634 | lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE | ||
635 | aghi %r1, 4 | ||
636 | stg %r1, SP_PSW+8(%r15) | ||
637 | 0: | ||
638 | #endif | ||
639 | # check for preemptive scheduling | 628 | # check for preemptive scheduling |
640 | icm %r0,15,__TI_precount(%r9) | 629 | icm %r0,15,__TI_precount(%r12) |
641 | jnz io_restore # preemption is disabled | 630 | jnz io_restore # preemption is disabled |
631 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | ||
632 | jno io_restore | ||
642 | # switch to kernel stack | 633 | # switch to kernel stack |
643 | lg %r1,SP_R15(%r15) | 634 | lg %r1,SP_R15(%r15) |
644 | aghi %r1,-SP_SIZE | 635 | aghi %r1,-SP_SIZE |
645 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 636 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) |
646 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 637 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain |
647 | lgr %r15,%r1 | 638 | lgr %r15,%r1 |
648 | io_resume_loop: | 639 | # TRACE_IRQS_ON already done at io_return, call |
649 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED | 640 | # TRACE_IRQS_OFF to keep things symmetrical |
650 | jno io_restore | 641 | TRACE_IRQS_OFF |
651 | larl %r14,io_resume_loop | 642 | brasl %r14,preempt_schedule_irq |
652 | jg preempt_schedule_irq | 643 | j io_return |
644 | #else | ||
645 | j io_restore | ||
653 | #endif | 646 | #endif |
654 | 647 | ||
648 | # | ||
649 | # Need to do work before returning to userspace, switch to kernel stack | ||
650 | # | ||
655 | io_work_user: | 651 | io_work_user: |
656 | lg %r1,__LC_KERNEL_STACK | 652 | lg %r1,__LC_KERNEL_STACK |
657 | aghi %r1,-SP_SIZE | 653 | aghi %r1,-SP_SIZE |
658 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 654 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) |
659 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 655 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain |
660 | lgr %r15,%r1 | 656 | lgr %r15,%r1 |
657 | |||
661 | # | 658 | # |
662 | # One of the work bits is on. Find out which one. | 659 | # One of the work bits is on. Find out which one. |
663 | # Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGPENDING, _TIF_NEED_RESCHED | 660 | # Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED |
664 | # and _TIF_MCCK_PENDING | 661 | # and _TIF_MCCK_PENDING |
665 | # | 662 | # |
666 | io_work_loop: | 663 | io_work_tif: |
667 | tm __TI_flags+7(%r9),_TIF_MCCK_PENDING | 664 | tm __TI_flags+7(%r12),_TIF_MCCK_PENDING |
668 | jo io_mcck_pending | 665 | jo io_mcck_pending |
669 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED | 666 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
670 | jo io_reschedule | 667 | jo io_reschedule |
671 | tm __TI_flags+7(%r9),_TIF_SIGPENDING | 668 | tm __TI_flags+7(%r12),_TIF_SIGPENDING |
672 | jnz io_sigpending | 669 | jo io_sigpending |
673 | tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME | 670 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
674 | jnz io_notify_resume | 671 | jo io_notify_resume |
675 | j io_restore | 672 | j io_return # beware of critical section cleanup |
676 | io_work_done: | ||
677 | |||
678 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | ||
679 | sie_opcode: | ||
680 | .long 0xb2140000 | ||
681 | #endif | ||
682 | 673 | ||
683 | # | 674 | # |
684 | # _TIF_MCCK_PENDING is set, call handler | 675 | # _TIF_MCCK_PENDING is set, call handler |
685 | # | 676 | # |
686 | io_mcck_pending: | 677 | io_mcck_pending: |
678 | # TRACE_IRQS_ON already done at io_return | ||
687 | brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler | 679 | brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler |
688 | j io_work_loop | 680 | TRACE_IRQS_OFF |
681 | j io_return | ||
689 | 682 | ||
690 | # | 683 | # |
691 | # _TIF_NEED_RESCHED is set, call schedule | 684 | # _TIF_NEED_RESCHED is set, call schedule |
692 | # | 685 | # |
693 | io_reschedule: | 686 | io_reschedule: |
694 | TRACE_IRQS_ON | 687 | # TRACE_IRQS_ON already done at io_return |
695 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 688 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
696 | brasl %r14,schedule # call scheduler | 689 | brasl %r14,schedule # call scheduler |
697 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 690 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts |
698 | TRACE_IRQS_OFF | 691 | TRACE_IRQS_OFF |
699 | tm __TI_flags+7(%r9),_TIF_WORK_INT | 692 | j io_return |
700 | jz io_restore # there is no work to do | ||
701 | j io_work_loop | ||
702 | 693 | ||
703 | # | 694 | # |
704 | # _TIF_SIGPENDING or is set, call do_signal | 695 | # _TIF_SIGPENDING or is set, call do_signal |
705 | # | 696 | # |
706 | io_sigpending: | 697 | io_sigpending: |
707 | TRACE_IRQS_ON | 698 | # TRACE_IRQS_ON already done at io_return |
708 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 699 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
709 | la %r2,SP_PTREGS(%r15) # load pt_regs | 700 | la %r2,SP_PTREGS(%r15) # load pt_regs |
710 | brasl %r14,do_signal # call do_signal | 701 | brasl %r14,do_signal # call do_signal |
711 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 702 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts |
712 | TRACE_IRQS_OFF | 703 | TRACE_IRQS_OFF |
713 | j io_work_loop | 704 | j io_return |
714 | 705 | ||
715 | # | 706 | # |
716 | # _TIF_NOTIFY_RESUME or is set, call do_notify_resume | 707 | # _TIF_NOTIFY_RESUME or is set, call do_notify_resume |
717 | # | 708 | # |
718 | io_notify_resume: | 709 | io_notify_resume: |
719 | TRACE_IRQS_ON | 710 | # TRACE_IRQS_ON already done at io_return |
720 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 711 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
721 | la %r2,SP_PTREGS(%r15) # load pt_regs | 712 | la %r2,SP_PTREGS(%r15) # load pt_regs |
722 | brasl %r14,do_notify_resume # call do_notify_resume | 713 | brasl %r14,do_notify_resume # call do_notify_resume |
723 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | 714 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts |
724 | TRACE_IRQS_OFF | 715 | TRACE_IRQS_OFF |
725 | j io_work_loop | 716 | j io_return |
726 | 717 | ||
727 | /* | 718 | /* |
728 | * External interrupt handler routine | 719 | * External interrupt handler routine |
@@ -731,16 +722,18 @@ io_notify_resume: | |||
731 | ext_int_handler: | 722 | ext_int_handler: |
732 | stck __LC_INT_CLOCK | 723 | stck __LC_INT_CLOCK |
733 | stpt __LC_ASYNC_ENTER_TIMER | 724 | stpt __LC_ASYNC_ENTER_TIMER |
734 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | 725 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40 |
735 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 | 726 | CREATE_STACK_FRAME __LC_SAVE_AREA+40 |
736 | CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 | 727 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack |
728 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | ||
737 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 729 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
738 | jz ext_no_vtime | 730 | jz ext_no_vtime |
739 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 731 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER |
740 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 732 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
741 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 733 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER |
734 | LAST_BREAK | ||
742 | ext_no_vtime: | 735 | ext_no_vtime: |
743 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 736 | HANDLE_SIE_INTERCEPT |
744 | TRACE_IRQS_OFF | 737 | TRACE_IRQS_OFF |
745 | la %r2,SP_PTREGS(%r15) # address of register-save area | 738 | la %r2,SP_PTREGS(%r15) # address of register-save area |
746 | llgh %r3,__LC_EXT_INT_CODE # get interruption code | 739 | llgh %r3,__LC_EXT_INT_CODE # get interruption code |
@@ -754,17 +747,18 @@ __critical_end: | |||
754 | */ | 747 | */ |
755 | .globl mcck_int_handler | 748 | .globl mcck_int_handler |
756 | mcck_int_handler: | 749 | mcck_int_handler: |
757 | stck __LC_INT_CLOCK | 750 | stck __LC_MCCK_CLOCK |
758 | la %r1,4095 # revalidate r1 | 751 | la %r1,4095 # revalidate r1 |
759 | spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer | 752 | spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer |
760 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs | 753 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs |
761 | SAVE_ALL_BASE __LC_SAVE_AREA+64 | 754 | stmg %r11,%r15,__LC_SAVE_AREA+80 |
755 | larl %r13,system_call | ||
756 | lg %r11,__LC_LAST_BREAK | ||
762 | la %r12,__LC_MCK_OLD_PSW | 757 | la %r12,__LC_MCK_OLD_PSW |
763 | tm __LC_MCCK_CODE,0x80 # system damage? | 758 | tm __LC_MCCK_CODE,0x80 # system damage? |
764 | jo mcck_int_main # yes -> rest of mcck code invalid | 759 | jo mcck_int_main # yes -> rest of mcck code invalid |
765 | la %r14,4095 | 760 | la %r14,4095 |
766 | mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER | 761 | mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14) |
767 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14) | ||
768 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | 762 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? |
769 | jo 1f | 763 | jo 1f |
770 | la %r14,__LC_SYNC_ENTER_TIMER | 764 | la %r14,__LC_SYNC_ENTER_TIMER |
@@ -778,7 +772,7 @@ mcck_int_handler: | |||
778 | jl 0f | 772 | jl 0f |
779 | la %r14,__LC_LAST_UPDATE_TIMER | 773 | la %r14,__LC_LAST_UPDATE_TIMER |
780 | 0: spt 0(%r14) | 774 | 0: spt 0(%r14) |
781 | mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) | 775 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
782 | 1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | 776 | 1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
783 | jno mcck_int_main # no -> skip cleanup critical | 777 | jno mcck_int_main # no -> skip cleanup critical |
784 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit | 778 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit |
@@ -794,16 +788,19 @@ mcck_int_main: | |||
794 | srag %r14,%r14,PAGE_SHIFT | 788 | srag %r14,%r14,PAGE_SHIFT |
795 | jz 0f | 789 | jz 0f |
796 | lg %r15,__LC_PANIC_STACK # load panic stack | 790 | lg %r15,__LC_PANIC_STACK # load panic stack |
797 | 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64 | 791 | 0: aghi %r15,-SP_SIZE # make room for registers & psw |
792 | CREATE_STACK_FRAME __LC_SAVE_AREA+80 | ||
793 | mvc SP_PSW(16,%r15),0(%r12) | ||
794 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | ||
798 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? | 795 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? |
799 | jno mcck_no_vtime # no -> no timer update | 796 | jno mcck_no_vtime # no -> no timer update |
800 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 797 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
801 | jz mcck_no_vtime | 798 | jz mcck_no_vtime |
802 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 799 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER |
803 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 800 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
804 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 801 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER |
802 | LAST_BREAK | ||
805 | mcck_no_vtime: | 803 | mcck_no_vtime: |
806 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
807 | la %r2,SP_PTREGS(%r15) # load pt_regs | 804 | la %r2,SP_PTREGS(%r15) # load pt_regs |
808 | brasl %r14,s390_do_machine_check | 805 | brasl %r14,s390_do_machine_check |
809 | tm SP_PSW+1(%r15),0x01 # returning to user ? | 806 | tm SP_PSW+1(%r15),0x01 # returning to user ? |
@@ -814,8 +811,9 @@ mcck_no_vtime: | |||
814 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain | 811 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain |
815 | lgr %r15,%r1 | 812 | lgr %r15,%r1 |
816 | stosm __SF_EMPTY(%r15),0x04 # turn dat on | 813 | stosm __SF_EMPTY(%r15),0x04 # turn dat on |
817 | tm __TI_flags+7(%r9),_TIF_MCCK_PENDING | 814 | tm __TI_flags+7(%r12),_TIF_MCCK_PENDING |
818 | jno mcck_return | 815 | jno mcck_return |
816 | HANDLE_SIE_INTERCEPT | ||
819 | TRACE_IRQS_OFF | 817 | TRACE_IRQS_OFF |
820 | brasl %r14,s390_handle_mcck | 818 | brasl %r14,s390_handle_mcck |
821 | TRACE_IRQS_ON | 819 | TRACE_IRQS_ON |
@@ -823,11 +821,11 @@ mcck_return: | |||
823 | mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW | 821 | mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW |
824 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit | 822 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit |
825 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 | 823 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 |
826 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104 | ||
827 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | 824 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
828 | jno 0f | 825 | jno 0f |
829 | stpt __LC_EXIT_TIMER | 826 | stpt __LC_EXIT_TIMER |
830 | 0: lpswe __LC_RETURN_MCCK_PSW # back to caller | 827 | 0: lpswe __LC_RETURN_MCCK_PSW # back to caller |
828 | mcck_done: | ||
831 | 829 | ||
832 | /* | 830 | /* |
833 | * Restart interruption handler, kick starter for additional CPUs | 831 | * Restart interruption handler, kick starter for additional CPUs |
@@ -883,14 +881,14 @@ stack_overflow: | |||
883 | lg %r15,__LC_PANIC_STACK # change to panic stack | 881 | lg %r15,__LC_PANIC_STACK # change to panic stack |
884 | aghi %r15,-SP_SIZE | 882 | aghi %r15,-SP_SIZE |
885 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack | 883 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack |
886 | stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | 884 | stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack |
887 | la %r1,__LC_SAVE_AREA | 885 | la %r1,__LC_SAVE_AREA |
888 | chi %r12,__LC_SVC_OLD_PSW | 886 | chi %r12,__LC_SVC_OLD_PSW |
889 | je 0f | 887 | je 0f |
890 | chi %r12,__LC_PGM_OLD_PSW | 888 | chi %r12,__LC_PGM_OLD_PSW |
891 | je 0f | 889 | je 0f |
892 | la %r1,__LC_SAVE_AREA+32 | 890 | la %r1,__LC_SAVE_AREA+40 |
893 | 0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack | 891 | 0: mvc SP_R11(40,%r15),0(%r1) # move %r11-%r15 to stack |
894 | mvc SP_ARGS(8,%r15),__LC_LAST_BREAK | 892 | mvc SP_ARGS(8,%r15),__LC_LAST_BREAK |
895 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain | 893 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain |
896 | la %r2,SP_PTREGS(%r15) # load pt_regs | 894 | la %r2,SP_PTREGS(%r15) # load pt_regs |
@@ -899,18 +897,14 @@ stack_overflow: | |||
899 | 897 | ||
900 | cleanup_table_system_call: | 898 | cleanup_table_system_call: |
901 | .quad system_call, sysc_do_svc | 899 | .quad system_call, sysc_do_svc |
902 | cleanup_table_sysc_return: | 900 | cleanup_table_sysc_tif: |
903 | .quad sysc_return, sysc_leave | 901 | .quad sysc_tif, sysc_restore |
904 | cleanup_table_sysc_leave: | 902 | cleanup_table_sysc_restore: |
905 | .quad sysc_leave, sysc_done | 903 | .quad sysc_restore, sysc_done |
906 | cleanup_table_sysc_work_loop: | 904 | cleanup_table_io_tif: |
907 | .quad sysc_work_loop, sysc_work_done | 905 | .quad io_tif, io_restore |
908 | cleanup_table_io_return: | 906 | cleanup_table_io_restore: |
909 | .quad io_return, io_leave | 907 | .quad io_restore, io_done |
910 | cleanup_table_io_leave: | ||
911 | .quad io_leave, io_done | ||
912 | cleanup_table_io_work_loop: | ||
913 | .quad io_work_loop, io_work_done | ||
914 | 908 | ||
915 | cleanup_critical: | 909 | cleanup_critical: |
916 | clc 8(8,%r12),BASED(cleanup_table_system_call) | 910 | clc 8(8,%r12),BASED(cleanup_table_system_call) |
@@ -918,61 +912,54 @@ cleanup_critical: | |||
918 | clc 8(8,%r12),BASED(cleanup_table_system_call+8) | 912 | clc 8(8,%r12),BASED(cleanup_table_system_call+8) |
919 | jl cleanup_system_call | 913 | jl cleanup_system_call |
920 | 0: | 914 | 0: |
921 | clc 8(8,%r12),BASED(cleanup_table_sysc_return) | 915 | clc 8(8,%r12),BASED(cleanup_table_sysc_tif) |
922 | jl 0f | ||
923 | clc 8(8,%r12),BASED(cleanup_table_sysc_return+8) | ||
924 | jl cleanup_sysc_return | ||
925 | 0: | ||
926 | clc 8(8,%r12),BASED(cleanup_table_sysc_leave) | ||
927 | jl 0f | 916 | jl 0f |
928 | clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8) | 917 | clc 8(8,%r12),BASED(cleanup_table_sysc_tif+8) |
929 | jl cleanup_sysc_leave | 918 | jl cleanup_sysc_tif |
930 | 0: | 919 | 0: |
931 | clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop) | 920 | clc 8(8,%r12),BASED(cleanup_table_sysc_restore) |
932 | jl 0f | 921 | jl 0f |
933 | clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8) | 922 | clc 8(8,%r12),BASED(cleanup_table_sysc_restore+8) |
934 | jl cleanup_sysc_return | 923 | jl cleanup_sysc_restore |
935 | 0: | 924 | 0: |
936 | clc 8(8,%r12),BASED(cleanup_table_io_return) | 925 | clc 8(8,%r12),BASED(cleanup_table_io_tif) |
937 | jl 0f | 926 | jl 0f |
938 | clc 8(8,%r12),BASED(cleanup_table_io_return+8) | 927 | clc 8(8,%r12),BASED(cleanup_table_io_tif+8) |
939 | jl cleanup_io_return | 928 | jl cleanup_io_tif |
940 | 0: | 929 | 0: |
941 | clc 8(8,%r12),BASED(cleanup_table_io_leave) | 930 | clc 8(8,%r12),BASED(cleanup_table_io_restore) |
942 | jl 0f | 931 | jl 0f |
943 | clc 8(8,%r12),BASED(cleanup_table_io_leave+8) | 932 | clc 8(8,%r12),BASED(cleanup_table_io_restore+8) |
944 | jl cleanup_io_leave | 933 | jl cleanup_io_restore |
945 | 0: | ||
946 | clc 8(8,%r12),BASED(cleanup_table_io_work_loop) | ||
947 | jl 0f | ||
948 | clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8) | ||
949 | jl cleanup_io_work_loop | ||
950 | 0: | 934 | 0: |
951 | br %r14 | 935 | br %r14 |
952 | 936 | ||
953 | cleanup_system_call: | 937 | cleanup_system_call: |
954 | mvc __LC_RETURN_PSW(16),0(%r12) | 938 | mvc __LC_RETURN_PSW(16),0(%r12) |
955 | cghi %r12,__LC_MCK_OLD_PSW | ||
956 | je 0f | ||
957 | la %r12,__LC_SAVE_AREA+32 | ||
958 | j 1f | ||
959 | 0: la %r12,__LC_SAVE_AREA+64 | ||
960 | 1: | ||
961 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) | 939 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) |
962 | jh 0f | 940 | jh 0f |
941 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER | ||
942 | cghi %r12,__LC_MCK_OLD_PSW | ||
943 | je 0f | ||
963 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | 944 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER |
945 | 0: cghi %r12,__LC_MCK_OLD_PSW | ||
946 | la %r12,__LC_SAVE_AREA+80 | ||
947 | je 0f | ||
948 | la %r12,__LC_SAVE_AREA+40 | ||
964 | 0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) | 949 | 0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) |
965 | jhe cleanup_vtime | 950 | jhe cleanup_vtime |
966 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) | 951 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) |
967 | jh 0f | 952 | jh 0f |
968 | mvc __LC_SAVE_AREA(32),0(%r12) | 953 | mvc __LC_SAVE_AREA(40),0(%r12) |
969 | 0: stg %r13,8(%r12) | 954 | 0: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp |
970 | stg %r12,__LC_SAVE_AREA+96 # argh | 955 | aghi %r15,-SP_SIZE # make room for registers & psw |
971 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 956 | stg %r15,32(%r12) |
972 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 957 | stg %r11,0(%r12) |
973 | lg %r12,__LC_SAVE_AREA+96 # argh | 958 | CREATE_STACK_FRAME __LC_SAVE_AREA |
974 | stg %r15,24(%r12) | 959 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW |
975 | llgh %r7,__LC_SVC_INT_CODE | 960 | mvc SP_ILC(4,%r15),__LC_SVC_ILC |
961 | stg %r7,SP_ARGS(%r15) | ||
962 | mvc 8(8,%r12),__LC_THREAD_INFO | ||
976 | cleanup_vtime: | 963 | cleanup_vtime: |
977 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) | 964 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) |
978 | jhe cleanup_stime | 965 | jhe cleanup_stime |
@@ -983,7 +970,11 @@ cleanup_stime: | |||
983 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 970 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
984 | cleanup_update: | 971 | cleanup_update: |
985 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 972 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
986 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) | 973 | srag %r12,%r11,23 |
974 | lg %r12,__LC_THREAD_INFO | ||
975 | jz 0f | ||
976 | stg %r11,__TI_last_break(%r12) | ||
977 | 0: mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) | ||
987 | la %r12,__LC_RETURN_PSW | 978 | la %r12,__LC_RETURN_PSW |
988 | br %r14 | 979 | br %r14 |
989 | cleanup_system_call_insn: | 980 | cleanup_system_call_insn: |
@@ -993,61 +984,54 @@ cleanup_system_call_insn: | |||
993 | .quad sysc_stime | 984 | .quad sysc_stime |
994 | .quad sysc_update | 985 | .quad sysc_update |
995 | 986 | ||
996 | cleanup_sysc_return: | 987 | cleanup_sysc_tif: |
997 | mvc __LC_RETURN_PSW(8),0(%r12) | 988 | mvc __LC_RETURN_PSW(8),0(%r12) |
998 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return) | 989 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_tif) |
999 | la %r12,__LC_RETURN_PSW | 990 | la %r12,__LC_RETURN_PSW |
1000 | br %r14 | 991 | br %r14 |
1001 | 992 | ||
1002 | cleanup_sysc_leave: | 993 | cleanup_sysc_restore: |
1003 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn) | 994 | clc 8(8,%r12),BASED(cleanup_sysc_restore_insn) |
1004 | je 3f | 995 | je 2f |
1005 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8) | 996 | clc 8(8,%r12),BASED(cleanup_sysc_restore_insn+8) |
1006 | jhe 0f | 997 | jhe 0f |
998 | mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER | ||
999 | cghi %r12,__LC_MCK_OLD_PSW | ||
1000 | je 0f | ||
1007 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | 1001 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER |
1008 | 0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) | 1002 | 0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) |
1009 | cghi %r12,__LC_MCK_OLD_PSW | 1003 | cghi %r12,__LC_MCK_OLD_PSW |
1010 | jne 1f | 1004 | la %r12,__LC_SAVE_AREA+80 |
1011 | mvc __LC_SAVE_AREA+64(32),SP_R12(%r15) | 1005 | je 1f |
1012 | j 2f | 1006 | la %r12,__LC_SAVE_AREA+40 |
1013 | 1: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15) | 1007 | 1: mvc 0(40,%r12),SP_R11(%r15) |
1014 | 2: lmg %r0,%r11,SP_R0(%r15) | 1008 | lmg %r0,%r10,SP_R0(%r15) |
1015 | lg %r15,SP_R15(%r15) | 1009 | lg %r15,SP_R15(%r15) |
1016 | 3: la %r12,__LC_RETURN_PSW | 1010 | 2: la %r12,__LC_RETURN_PSW |
1017 | br %r14 | 1011 | br %r14 |
1018 | cleanup_sysc_leave_insn: | 1012 | cleanup_sysc_restore_insn: |
1019 | .quad sysc_done - 4 | 1013 | .quad sysc_done - 4 |
1020 | .quad sysc_done - 16 | 1014 | .quad sysc_done - 16 |
1021 | 1015 | ||
1022 | cleanup_io_return: | 1016 | cleanup_io_tif: |
1023 | mvc __LC_RETURN_PSW(8),0(%r12) | ||
1024 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_return) | ||
1025 | la %r12,__LC_RETURN_PSW | ||
1026 | br %r14 | ||
1027 | |||
1028 | cleanup_io_work_loop: | ||
1029 | mvc __LC_RETURN_PSW(8),0(%r12) | 1017 | mvc __LC_RETURN_PSW(8),0(%r12) |
1030 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop) | 1018 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_tif) |
1031 | la %r12,__LC_RETURN_PSW | 1019 | la %r12,__LC_RETURN_PSW |
1032 | br %r14 | 1020 | br %r14 |
1033 | 1021 | ||
1034 | cleanup_io_leave: | 1022 | cleanup_io_restore: |
1035 | clc 8(8,%r12),BASED(cleanup_io_leave_insn) | 1023 | clc 8(8,%r12),BASED(cleanup_io_restore_insn) |
1036 | je 3f | 1024 | je 1f |
1037 | clc 8(8,%r12),BASED(cleanup_io_leave_insn+8) | 1025 | clc 8(8,%r12),BASED(cleanup_io_restore_insn+8) |
1038 | jhe 0f | 1026 | jhe 0f |
1039 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | 1027 | mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER |
1040 | 0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) | 1028 | 0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) |
1041 | cghi %r12,__LC_MCK_OLD_PSW | 1029 | mvc __LC_SAVE_AREA+80(40),SP_R11(%r15) |
1042 | jne 1f | 1030 | lmg %r0,%r10,SP_R0(%r15) |
1043 | mvc __LC_SAVE_AREA+64(32),SP_R12(%r15) | ||
1044 | j 2f | ||
1045 | 1: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15) | ||
1046 | 2: lmg %r0,%r11,SP_R0(%r15) | ||
1047 | lg %r15,SP_R15(%r15) | 1031 | lg %r15,SP_R15(%r15) |
1048 | 3: la %r12,__LC_RETURN_PSW | 1032 | 1: la %r12,__LC_RETURN_PSW |
1049 | br %r14 | 1033 | br %r14 |
1050 | cleanup_io_leave_insn: | 1034 | cleanup_io_restore_insn: |
1051 | .quad io_done - 4 | 1035 | .quad io_done - 4 |
1052 | .quad io_done - 16 | 1036 | .quad io_done - 16 |
1053 | 1037 | ||
@@ -1055,13 +1039,6 @@ cleanup_io_leave_insn: | |||
1055 | * Integer constants | 1039 | * Integer constants |
1056 | */ | 1040 | */ |
1057 | .align 4 | 1041 | .align 4 |
1058 | .Lconst: | ||
1059 | .Lnr_syscalls: .long NR_syscalls | ||
1060 | .L0x0130: .short 0x130 | ||
1061 | .L0x0140: .short 0x140 | ||
1062 | .L0x0150: .short 0x150 | ||
1063 | .L0x0160: .short 0x160 | ||
1064 | .L0x0170: .short 0x170 | ||
1065 | .Lcritical_start: | 1042 | .Lcritical_start: |
1066 | .quad __critical_start | 1043 | .quad __critical_start |
1067 | .Lcritical_end: | 1044 | .Lcritical_end: |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 9d1f76702d47..51838ad42d56 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -328,8 +328,8 @@ iplstart: | |||
328 | # | 328 | # |
329 | # reset files in VM reader | 329 | # reset files in VM reader |
330 | # | 330 | # |
331 | stidp __LC_CPUID # store cpuid | 331 | stidp __LC_SAVE_AREA # store cpuid |
332 | tm __LC_CPUID,0xff # running VM ? | 332 | tm __LC_SAVE_AREA,0xff # running VM ? |
333 | bno .Lnoreset | 333 | bno .Lnoreset |
334 | la %r2,.Lreset | 334 | la %r2,.Lreset |
335 | lhi %r3,26 | 335 | lhi %r3,26 |
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 015e27da40eb..ac151399ef34 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c | |||
@@ -255,7 +255,8 @@ void notrace s390_do_machine_check(struct pt_regs *regs) | |||
255 | int umode; | 255 | int umode; |
256 | 256 | ||
257 | nmi_enter(); | 257 | nmi_enter(); |
258 | s390_idle_check(); | 258 | s390_idle_check(regs, S390_lowcore.mcck_clock, |
259 | S390_lowcore.mcck_enter_timer); | ||
259 | 260 | ||
260 | mci = (struct mci *) &S390_lowcore.mcck_interruption_code; | 261 | mci = (struct mci *) &S390_lowcore.mcck_interruption_code; |
261 | mcck = &__get_cpu_var(cpu_mcck); | 262 | mcck = &__get_cpu_var(cpu_mcck); |
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 0729f36c2fe3..ecb2d02b02e4 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c | |||
@@ -18,24 +18,42 @@ | |||
18 | #include <asm/lowcore.h> | 18 | #include <asm/lowcore.h> |
19 | #include <asm/param.h> | 19 | #include <asm/param.h> |
20 | 20 | ||
21 | static DEFINE_PER_CPU(struct cpuid, cpu_id); | ||
22 | |||
23 | /* | ||
24 | * cpu_init - initializes state that is per-CPU. | ||
25 | */ | ||
26 | void __cpuinit cpu_init(void) | ||
27 | { | ||
28 | struct cpuid *id = &per_cpu(cpu_id, smp_processor_id()); | ||
29 | |||
30 | get_cpu_id(id); | ||
31 | atomic_inc(&init_mm.mm_count); | ||
32 | current->active_mm = &init_mm; | ||
33 | BUG_ON(current->mm); | ||
34 | enter_lazy_tlb(&init_mm, current); | ||
35 | } | ||
36 | |||
37 | /* | ||
38 | * print_cpu_info - print basic information about a cpu | ||
39 | */ | ||
21 | void __cpuinit print_cpu_info(void) | 40 | void __cpuinit print_cpu_info(void) |
22 | { | 41 | { |
42 | struct cpuid *id = &per_cpu(cpu_id, smp_processor_id()); | ||
43 | |||
23 | pr_info("Processor %d started, address %d, identification %06X\n", | 44 | pr_info("Processor %d started, address %d, identification %06X\n", |
24 | S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, | 45 | S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, id->ident); |
25 | S390_lowcore.cpu_id.ident); | ||
26 | } | 46 | } |
27 | 47 | ||
28 | /* | 48 | /* |
29 | * show_cpuinfo - Get information on one CPU for use by procfs. | 49 | * show_cpuinfo - Get information on one CPU for use by procfs. |
30 | */ | 50 | */ |
31 | |||
32 | static int show_cpuinfo(struct seq_file *m, void *v) | 51 | static int show_cpuinfo(struct seq_file *m, void *v) |
33 | { | 52 | { |
34 | static const char *hwcap_str[10] = { | 53 | static const char *hwcap_str[10] = { |
35 | "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", | 54 | "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", |
36 | "edat", "etf3eh", "highgprs" | 55 | "edat", "etf3eh", "highgprs" |
37 | }; | 56 | }; |
38 | struct _lowcore *lc; | ||
39 | unsigned long n = (unsigned long) v - 1; | 57 | unsigned long n = (unsigned long) v - 1; |
40 | int i; | 58 | int i; |
41 | 59 | ||
@@ -55,19 +73,12 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
55 | } | 73 | } |
56 | 74 | ||
57 | if (cpu_online(n)) { | 75 | if (cpu_online(n)) { |
58 | #ifdef CONFIG_SMP | 76 | struct cpuid *id = &per_cpu(cpu_id, n); |
59 | lc = (smp_processor_id() == n) ? | ||
60 | &S390_lowcore : lowcore_ptr[n]; | ||
61 | #else | ||
62 | lc = &S390_lowcore; | ||
63 | #endif | ||
64 | seq_printf(m, "processor %li: " | 77 | seq_printf(m, "processor %li: " |
65 | "version = %02X, " | 78 | "version = %02X, " |
66 | "identification = %06X, " | 79 | "identification = %06X, " |
67 | "machine = %04X\n", | 80 | "machine = %04X\n", |
68 | n, lc->cpu_id.version, | 81 | n, id->version, id->ident, id->machine); |
69 | lc->cpu_id.ident, | ||
70 | lc->cpu_id.machine); | ||
71 | } | 82 | } |
72 | preempt_enable(); | 83 | preempt_enable(); |
73 | return 0; | 84 | return 0; |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 9f654da4cecc..83339d33c4b1 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -57,6 +57,7 @@ | |||
57 | enum s390_regset { | 57 | enum s390_regset { |
58 | REGSET_GENERAL, | 58 | REGSET_GENERAL, |
59 | REGSET_FP, | 59 | REGSET_FP, |
60 | REGSET_LAST_BREAK, | ||
60 | REGSET_GENERAL_EXTENDED, | 61 | REGSET_GENERAL_EXTENDED, |
61 | }; | 62 | }; |
62 | 63 | ||
@@ -381,6 +382,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
381 | copied += sizeof(unsigned long); | 382 | copied += sizeof(unsigned long); |
382 | } | 383 | } |
383 | return 0; | 384 | return 0; |
385 | case PTRACE_GET_LAST_BREAK: | ||
386 | put_user(task_thread_info(child)->last_break, | ||
387 | (unsigned long __user *) data); | ||
388 | return 0; | ||
384 | default: | 389 | default: |
385 | /* Removing high order bit from addr (only for 31 bit). */ | 390 | /* Removing high order bit from addr (only for 31 bit). */ |
386 | addr &= PSW_ADDR_INSN; | 391 | addr &= PSW_ADDR_INSN; |
@@ -633,6 +638,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
633 | copied += sizeof(unsigned int); | 638 | copied += sizeof(unsigned int); |
634 | } | 639 | } |
635 | return 0; | 640 | return 0; |
641 | case PTRACE_GET_LAST_BREAK: | ||
642 | put_user(task_thread_info(child)->last_break, | ||
643 | (unsigned int __user *) data); | ||
644 | return 0; | ||
636 | } | 645 | } |
637 | return compat_ptrace_request(child, request, addr, data); | 646 | return compat_ptrace_request(child, request, addr, data); |
638 | } | 647 | } |
@@ -797,6 +806,28 @@ static int s390_fpregs_set(struct task_struct *target, | |||
797 | return rc; | 806 | return rc; |
798 | } | 807 | } |
799 | 808 | ||
809 | #ifdef CONFIG_64BIT | ||
810 | |||
811 | static int s390_last_break_get(struct task_struct *target, | ||
812 | const struct user_regset *regset, | ||
813 | unsigned int pos, unsigned int count, | ||
814 | void *kbuf, void __user *ubuf) | ||
815 | { | ||
816 | if (count > 0) { | ||
817 | if (kbuf) { | ||
818 | unsigned long *k = kbuf; | ||
819 | *k = task_thread_info(target)->last_break; | ||
820 | } else { | ||
821 | unsigned long __user *u = ubuf; | ||
822 | if (__put_user(task_thread_info(target)->last_break, u)) | ||
823 | return -EFAULT; | ||
824 | } | ||
825 | } | ||
826 | return 0; | ||
827 | } | ||
828 | |||
829 | #endif | ||
830 | |||
800 | static const struct user_regset s390_regsets[] = { | 831 | static const struct user_regset s390_regsets[] = { |
801 | [REGSET_GENERAL] = { | 832 | [REGSET_GENERAL] = { |
802 | .core_note_type = NT_PRSTATUS, | 833 | .core_note_type = NT_PRSTATUS, |
@@ -814,6 +845,15 @@ static const struct user_regset s390_regsets[] = { | |||
814 | .get = s390_fpregs_get, | 845 | .get = s390_fpregs_get, |
815 | .set = s390_fpregs_set, | 846 | .set = s390_fpregs_set, |
816 | }, | 847 | }, |
848 | #ifdef CONFIG_64BIT | ||
849 | [REGSET_LAST_BREAK] = { | ||
850 | .core_note_type = NT_S390_LAST_BREAK, | ||
851 | .n = 1, | ||
852 | .size = sizeof(long), | ||
853 | .align = sizeof(long), | ||
854 | .get = s390_last_break_get, | ||
855 | }, | ||
856 | #endif | ||
817 | }; | 857 | }; |
818 | 858 | ||
819 | static const struct user_regset_view user_s390_view = { | 859 | static const struct user_regset_view user_s390_view = { |
@@ -948,6 +988,27 @@ static int s390_compat_regs_high_set(struct task_struct *target, | |||
948 | return rc; | 988 | return rc; |
949 | } | 989 | } |
950 | 990 | ||
991 | static int s390_compat_last_break_get(struct task_struct *target, | ||
992 | const struct user_regset *regset, | ||
993 | unsigned int pos, unsigned int count, | ||
994 | void *kbuf, void __user *ubuf) | ||
995 | { | ||
996 | compat_ulong_t last_break; | ||
997 | |||
998 | if (count > 0) { | ||
999 | last_break = task_thread_info(target)->last_break; | ||
1000 | if (kbuf) { | ||
1001 | unsigned long *k = kbuf; | ||
1002 | *k = last_break; | ||
1003 | } else { | ||
1004 | unsigned long __user *u = ubuf; | ||
1005 | if (__put_user(last_break, u)) | ||
1006 | return -EFAULT; | ||
1007 | } | ||
1008 | } | ||
1009 | return 0; | ||
1010 | } | ||
1011 | |||
951 | static const struct user_regset s390_compat_regsets[] = { | 1012 | static const struct user_regset s390_compat_regsets[] = { |
952 | [REGSET_GENERAL] = { | 1013 | [REGSET_GENERAL] = { |
953 | .core_note_type = NT_PRSTATUS, | 1014 | .core_note_type = NT_PRSTATUS, |
@@ -965,6 +1026,13 @@ static const struct user_regset s390_compat_regsets[] = { | |||
965 | .get = s390_fpregs_get, | 1026 | .get = s390_fpregs_get, |
966 | .set = s390_fpregs_set, | 1027 | .set = s390_fpregs_set, |
967 | }, | 1028 | }, |
1029 | [REGSET_LAST_BREAK] = { | ||
1030 | .core_note_type = NT_S390_LAST_BREAK, | ||
1031 | .n = 1, | ||
1032 | .size = sizeof(long), | ||
1033 | .align = sizeof(long), | ||
1034 | .get = s390_compat_last_break_get, | ||
1035 | }, | ||
968 | [REGSET_GENERAL_EXTENDED] = { | 1036 | [REGSET_GENERAL_EXTENDED] = { |
969 | .core_note_type = NT_S390_HIGH_GPRS, | 1037 | .core_note_type = NT_S390_HIGH_GPRS, |
970 | .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), | 1038 | .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), |
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c index 59618bcd99b7..9ce641b5291f 100644 --- a/arch/s390/kernel/s390_ext.c +++ b/arch/s390/kernel/s390_ext.c | |||
@@ -120,7 +120,8 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned short code) | |||
120 | struct pt_regs *old_regs; | 120 | struct pt_regs *old_regs; |
121 | 121 | ||
122 | old_regs = set_irq_regs(regs); | 122 | old_regs = set_irq_regs(regs); |
123 | s390_idle_check(); | 123 | s390_idle_check(regs, S390_lowcore.int_clock, |
124 | S390_lowcore.async_enter_timer); | ||
124 | irq_enter(); | 125 | irq_enter(); |
125 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) | 126 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) |
126 | /* Serve timer interrupts first. */ | 127 | /* Serve timer interrupts first. */ |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 91625f759ccd..7d893248d265 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * arch/s390/kernel/setup.c | 2 | * arch/s390/kernel/setup.c |
3 | * | 3 | * |
4 | * S390 version | 4 | * S390 version |
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Copyright (C) IBM Corp. 1999,2010 |
6 | * Author(s): Hartmut Penner (hp@de.ibm.com), | 6 | * Author(s): Hartmut Penner (hp@de.ibm.com), |
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
8 | * | 8 | * |
@@ -113,22 +113,6 @@ static struct resource data_resource = { | |||
113 | }; | 113 | }; |
114 | 114 | ||
115 | /* | 115 | /* |
116 | * cpu_init() initializes state that is per-CPU. | ||
117 | */ | ||
118 | void __cpuinit cpu_init(void) | ||
119 | { | ||
120 | /* | ||
121 | * Store processor id in lowcore (used e.g. in timer_interrupt) | ||
122 | */ | ||
123 | get_cpu_id(&S390_lowcore.cpu_id); | ||
124 | |||
125 | atomic_inc(&init_mm.mm_count); | ||
126 | current->active_mm = &init_mm; | ||
127 | BUG_ON(current->mm); | ||
128 | enter_lazy_tlb(&init_mm, current); | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * condev= and conmode= setup parameter. | 116 | * condev= and conmode= setup parameter. |
133 | */ | 117 | */ |
134 | 118 | ||
@@ -385,10 +369,6 @@ static void setup_addressing_mode(void) | |||
385 | pr_info("Address spaces switched, " | 369 | pr_info("Address spaces switched, " |
386 | "mvcos not available\n"); | 370 | "mvcos not available\n"); |
387 | } | 371 | } |
388 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
389 | sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; | ||
390 | io_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; | ||
391 | #endif | ||
392 | } | 372 | } |
393 | 373 | ||
394 | static void __init | 374 | static void __init |
@@ -421,6 +401,7 @@ setup_lowcore(void) | |||
421 | lc->io_new_psw.mask = psw_kernel_bits; | 401 | lc->io_new_psw.mask = psw_kernel_bits; |
422 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | 402 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; |
423 | lc->clock_comparator = -1ULL; | 403 | lc->clock_comparator = -1ULL; |
404 | lc->cmf_hpp = -1ULL; | ||
424 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; | 405 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; |
425 | lc->async_stack = (unsigned long) | 406 | lc->async_stack = (unsigned long) |
426 | __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; | 407 | __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; |
@@ -695,6 +676,7 @@ static void __init setup_hwcaps(void) | |||
695 | static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; | 676 | static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; |
696 | unsigned long long facility_list_extended; | 677 | unsigned long long facility_list_extended; |
697 | unsigned int facility_list; | 678 | unsigned int facility_list; |
679 | struct cpuid cpu_id; | ||
698 | int i; | 680 | int i; |
699 | 681 | ||
700 | facility_list = stfl(); | 682 | facility_list = stfl(); |
@@ -756,7 +738,8 @@ static void __init setup_hwcaps(void) | |||
756 | */ | 738 | */ |
757 | elf_hwcap |= HWCAP_S390_HIGH_GPRS; | 739 | elf_hwcap |= HWCAP_S390_HIGH_GPRS; |
758 | 740 | ||
759 | switch (S390_lowcore.cpu_id.machine) { | 741 | get_cpu_id(&cpu_id); |
742 | switch (cpu_id.machine) { | ||
760 | case 0x9672: | 743 | case 0x9672: |
761 | #if !defined(CONFIG_64BIT) | 744 | #if !defined(CONFIG_64BIT) |
762 | default: /* Use "g5" as default for 31 bit kernels. */ | 745 | default: /* Use "g5" as default for 31 bit kernels. */ |
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 6289945562b0..ee7ac8b11782 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -313,6 +313,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
313 | To avoid breaking binary compatibility, they are passed as args. */ | 313 | To avoid breaking binary compatibility, they are passed as args. */ |
314 | regs->gprs[4] = current->thread.trap_no; | 314 | regs->gprs[4] = current->thread.trap_no; |
315 | regs->gprs[5] = current->thread.prot_addr; | 315 | regs->gprs[5] = current->thread.prot_addr; |
316 | regs->gprs[6] = task_thread_info(current)->last_break; | ||
316 | 317 | ||
317 | /* Place signal number on stack to allow backtrace from handler. */ | 318 | /* Place signal number on stack to allow backtrace from handler. */ |
318 | if (__put_user(regs->gprs[2], (int __user *) &frame->signo)) | 319 | if (__put_user(regs->gprs[2], (int __user *) &frame->signo)) |
@@ -376,6 +377,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
376 | regs->gprs[2] = map_signal(sig); | 377 | regs->gprs[2] = map_signal(sig); |
377 | regs->gprs[3] = (unsigned long) &frame->info; | 378 | regs->gprs[3] = (unsigned long) &frame->info; |
378 | regs->gprs[4] = (unsigned long) &frame->uc; | 379 | regs->gprs[4] = (unsigned long) &frame->uc; |
380 | regs->gprs[5] = task_thread_info(current)->last_break; | ||
379 | return 0; | 381 | return 0; |
380 | 382 | ||
381 | give_sigsegv: | 383 | give_sigsegv: |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 247b4c2d1e51..bcef00766a64 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -37,7 +37,8 @@ struct tl_cpu { | |||
37 | }; | 37 | }; |
38 | 38 | ||
39 | struct tl_container { | 39 | struct tl_container { |
40 | unsigned char reserved[8]; | 40 | unsigned char reserved[7]; |
41 | unsigned char id; | ||
41 | }; | 42 | }; |
42 | 43 | ||
43 | union tl_entry { | 44 | union tl_entry { |
@@ -58,6 +59,7 @@ struct tl_info { | |||
58 | 59 | ||
59 | struct core_info { | 60 | struct core_info { |
60 | struct core_info *next; | 61 | struct core_info *next; |
62 | unsigned char id; | ||
61 | cpumask_t mask; | 63 | cpumask_t mask; |
62 | }; | 64 | }; |
63 | 65 | ||
@@ -73,6 +75,7 @@ static DECLARE_WORK(topology_work, topology_work_fn); | |||
73 | static DEFINE_SPINLOCK(topology_lock); | 75 | static DEFINE_SPINLOCK(topology_lock); |
74 | 76 | ||
75 | cpumask_t cpu_core_map[NR_CPUS]; | 77 | cpumask_t cpu_core_map[NR_CPUS]; |
78 | unsigned char cpu_core_id[NR_CPUS]; | ||
76 | 79 | ||
77 | static cpumask_t cpu_coregroup_map(unsigned int cpu) | 80 | static cpumask_t cpu_coregroup_map(unsigned int cpu) |
78 | { | 81 | { |
@@ -116,6 +119,7 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) | |||
116 | for_each_present_cpu(lcpu) { | 119 | for_each_present_cpu(lcpu) { |
117 | if (cpu_logical_map(lcpu) == rcpu) { | 120 | if (cpu_logical_map(lcpu) == rcpu) { |
118 | cpu_set(lcpu, core->mask); | 121 | cpu_set(lcpu, core->mask); |
122 | cpu_core_id[lcpu] = core->id; | ||
119 | smp_cpu_polarization[lcpu] = tl_cpu->pp; | 123 | smp_cpu_polarization[lcpu] = tl_cpu->pp; |
120 | } | 124 | } |
121 | } | 125 | } |
@@ -158,6 +162,7 @@ static void tl_to_cores(struct tl_info *info) | |||
158 | break; | 162 | break; |
159 | case 1: | 163 | case 1: |
160 | core = core->next; | 164 | core = core->next; |
165 | core->id = tle->container.id; | ||
161 | break; | 166 | break; |
162 | case 0: | 167 | case 0: |
163 | add_cpus_to_core(&tle->cpu, core); | 168 | add_cpus_to_core(&tle->cpu, core); |
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 6e7ad63854c0..5d8f0f3d0250 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -46,13 +46,7 @@ | |||
46 | 46 | ||
47 | pgm_check_handler_t *pgm_check_table[128]; | 47 | pgm_check_handler_t *pgm_check_table[128]; |
48 | 48 | ||
49 | #ifdef CONFIG_SYSCTL | 49 | int show_unhandled_signals; |
50 | #ifdef CONFIG_PROCESS_DEBUG | ||
51 | int sysctl_userprocess_debug = 1; | ||
52 | #else | ||
53 | int sysctl_userprocess_debug = 0; | ||
54 | #endif | ||
55 | #endif | ||
56 | 50 | ||
57 | extern pgm_check_handler_t do_protection_exception; | 51 | extern pgm_check_handler_t do_protection_exception; |
58 | extern pgm_check_handler_t do_dat_exception; | 52 | extern pgm_check_handler_t do_dat_exception; |
@@ -315,18 +309,19 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
315 | do_exit(SIGSEGV); | 309 | do_exit(SIGSEGV); |
316 | } | 310 | } |
317 | 311 | ||
318 | static void inline | 312 | static void inline report_user_fault(struct pt_regs *regs, long int_code, |
319 | report_user_fault(long interruption_code, struct pt_regs *regs) | 313 | int signr) |
320 | { | 314 | { |
321 | #if defined(CONFIG_SYSCTL) | 315 | if ((task_pid_nr(current) > 1) && !show_unhandled_signals) |
322 | if (!sysctl_userprocess_debug) | ||
323 | return; | 316 | return; |
324 | #endif | 317 | if (!unhandled_signal(current, signr)) |
325 | #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) | 318 | return; |
326 | printk("User process fault: interruption code 0x%lX\n", | 319 | if (!printk_ratelimit()) |
327 | interruption_code); | 320 | return; |
321 | printk("User process fault: interruption code 0x%lX ", int_code); | ||
322 | print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); | ||
323 | printk("\n"); | ||
328 | show_regs(regs); | 324 | show_regs(regs); |
329 | #endif | ||
330 | } | 325 | } |
331 | 326 | ||
332 | int is_valid_bugaddr(unsigned long addr) | 327 | int is_valid_bugaddr(unsigned long addr) |
@@ -354,7 +349,7 @@ static void __kprobes inline do_trap(long interruption_code, int signr, | |||
354 | 349 | ||
355 | tsk->thread.trap_no = interruption_code & 0xffff; | 350 | tsk->thread.trap_no = interruption_code & 0xffff; |
356 | force_sig_info(signr, info, tsk); | 351 | force_sig_info(signr, info, tsk); |
357 | report_user_fault(interruption_code, regs); | 352 | report_user_fault(regs, interruption_code, signr); |
358 | } else { | 353 | } else { |
359 | const struct exception_table_entry *fixup; | 354 | const struct exception_table_entry *fixup; |
360 | fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); | 355 | fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); |
@@ -390,8 +385,8 @@ static void default_trap_handler(struct pt_regs * regs, long interruption_code) | |||
390 | { | 385 | { |
391 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 386 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
392 | local_irq_enable(); | 387 | local_irq_enable(); |
388 | report_user_fault(regs, interruption_code, SIGSEGV); | ||
393 | do_exit(SIGSEGV); | 389 | do_exit(SIGSEGV); |
394 | report_user_fault(interruption_code, regs); | ||
395 | } else | 390 | } else |
396 | die("Unknown program exception", regs, interruption_code); | 391 | die("Unknown program exception", regs, interruption_code); |
397 | } | 392 | } |
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 6bc9c197aa91..6b83870507d5 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c | |||
@@ -102,11 +102,7 @@ static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd) | |||
102 | /* | 102 | /* |
103 | * Allocate/free per cpu vdso data. | 103 | * Allocate/free per cpu vdso data. |
104 | */ | 104 | */ |
105 | #ifdef CONFIG_64BIT | ||
106 | #define SEGMENT_ORDER 2 | 105 | #define SEGMENT_ORDER 2 |
107 | #else | ||
108 | #define SEGMENT_ORDER 1 | ||
109 | #endif | ||
110 | 106 | ||
111 | int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) | 107 | int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) |
112 | { | 108 | { |
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index b59a812a010e..3479f1b0d4e0 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -121,32 +121,35 @@ void account_system_vtime(struct task_struct *tsk) | |||
121 | } | 121 | } |
122 | EXPORT_SYMBOL_GPL(account_system_vtime); | 122 | EXPORT_SYMBOL_GPL(account_system_vtime); |
123 | 123 | ||
124 | void vtime_start_cpu(void) | 124 | void vtime_start_cpu(__u64 int_clock, __u64 enter_timer) |
125 | { | 125 | { |
126 | struct s390_idle_data *idle = &__get_cpu_var(s390_idle); | 126 | struct s390_idle_data *idle = &__get_cpu_var(s390_idle); |
127 | struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); | 127 | struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); |
128 | __u64 idle_time, expires; | 128 | __u64 idle_time, expires; |
129 | 129 | ||
130 | if (idle->idle_enter == 0ULL) | ||
131 | return; | ||
132 | |||
130 | /* Account time spent with enabled wait psw loaded as idle time. */ | 133 | /* Account time spent with enabled wait psw loaded as idle time. */ |
131 | idle_time = S390_lowcore.int_clock - idle->idle_enter; | 134 | idle_time = int_clock - idle->idle_enter; |
132 | account_idle_time(idle_time); | 135 | account_idle_time(idle_time); |
133 | S390_lowcore.steal_timer += | 136 | S390_lowcore.steal_timer += |
134 | idle->idle_enter - S390_lowcore.last_update_clock; | 137 | idle->idle_enter - S390_lowcore.last_update_clock; |
135 | S390_lowcore.last_update_clock = S390_lowcore.int_clock; | 138 | S390_lowcore.last_update_clock = int_clock; |
136 | 139 | ||
137 | /* Account system time spent going idle. */ | 140 | /* Account system time spent going idle. */ |
138 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle; | 141 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle; |
139 | S390_lowcore.last_update_timer = S390_lowcore.async_enter_timer; | 142 | S390_lowcore.last_update_timer = enter_timer; |
140 | 143 | ||
141 | /* Restart vtime CPU timer */ | 144 | /* Restart vtime CPU timer */ |
142 | if (vq->do_spt) { | 145 | if (vq->do_spt) { |
143 | /* Program old expire value but first save progress. */ | 146 | /* Program old expire value but first save progress. */ |
144 | expires = vq->idle - S390_lowcore.async_enter_timer; | 147 | expires = vq->idle - enter_timer; |
145 | expires += get_vtimer(); | 148 | expires += get_vtimer(); |
146 | set_vtimer(expires); | 149 | set_vtimer(expires); |
147 | } else { | 150 | } else { |
148 | /* Don't account the CPU timer delta while the cpu was idle. */ | 151 | /* Don't account the CPU timer delta while the cpu was idle. */ |
149 | vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer; | 152 | vq->elapsed -= vq->idle - enter_timer; |
150 | } | 153 | } |
151 | 154 | ||
152 | idle->sequence++; | 155 | idle->sequence++; |