diff options
Diffstat (limited to 'arch/s390/kernel')
36 files changed, 1629 insertions, 369 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index df3732249baa..dd4f07640919 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -48,6 +48,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) | |||
48 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 48 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
49 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 49 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
50 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o | 50 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o |
51 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | ||
51 | 52 | ||
52 | # Kexec part | 53 | # Kexec part |
53 | S390_KEXEC_OBJS := machine_kexec.o crash.o | 54 | S390_KEXEC_OBJS := machine_kexec.o crash.o |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 2b45591e1582..751318765e2e 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -45,8 +45,7 @@ int main(void) | |||
45 | DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); | 45 | DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); |
46 | DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs)); | 46 | DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs)); |
47 | DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2)); | 47 | DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2)); |
48 | DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc)); | 48 | DEFINE(__PT_SVC_CODE, offsetof(struct pt_regs, svc_code)); |
49 | DEFINE(__PT_SVCNR, offsetof(struct pt_regs, svcnr)); | ||
50 | DEFINE(__PT_SIZE, sizeof(struct pt_regs)); | 49 | DEFINE(__PT_SIZE, sizeof(struct pt_regs)); |
51 | BLANK(); | 50 | BLANK(); |
52 | DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); | 51 | DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); |
@@ -141,7 +140,6 @@ int main(void) | |||
141 | DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); | 140 | DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); |
142 | DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); | 141 | DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); |
143 | DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); | 142 | DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); |
144 | DEFINE(__LC_SAVE_AREA_64, offsetof(struct _lowcore, save_area_64)); | ||
145 | #ifdef CONFIG_32BIT | 143 | #ifdef CONFIG_32BIT |
146 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); | 144 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); |
147 | #else /* CONFIG_32BIT */ | 145 | #else /* CONFIG_32BIT */ |
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index 255435663bf8..f8828d38fa6e 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S | |||
@@ -86,6 +86,8 @@ s390_base_pgm_handler_fn: | |||
86 | ENTRY(diag308_reset) | 86 | ENTRY(diag308_reset) |
87 | larl %r4,.Lctlregs # Save control registers | 87 | larl %r4,.Lctlregs # Save control registers |
88 | stctg %c0,%c15,0(%r4) | 88 | stctg %c0,%c15,0(%r4) |
89 | larl %r4,.Lfpctl # Floating point control register | ||
90 | stfpc 0(%r4) | ||
89 | larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0 | 91 | larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0 |
90 | lghi %r3,0 | 92 | lghi %r3,0 |
91 | lg %r4,0(%r4) # Save PSW | 93 | lg %r4,0(%r4) # Save PSW |
@@ -99,6 +101,8 @@ ENTRY(diag308_reset) | |||
99 | sam64 # Switch to 64 bit addressing mode | 101 | sam64 # Switch to 64 bit addressing mode |
100 | larl %r4,.Lctlregs # Restore control registers | 102 | larl %r4,.Lctlregs # Restore control registers |
101 | lctlg %c0,%c15,0(%r4) | 103 | lctlg %c0,%c15,0(%r4) |
104 | larl %r4,.Lfpctl # Restore floating point ctl register | ||
105 | lfpc 0(%r4) | ||
102 | br %r14 | 106 | br %r14 |
103 | .align 16 | 107 | .align 16 |
104 | .Lrestart_psw: | 108 | .Lrestart_psw: |
@@ -110,6 +114,8 @@ ENTRY(diag308_reset) | |||
110 | .rept 16 | 114 | .rept 16 |
111 | .quad 0 | 115 | .quad 0 |
112 | .endr | 116 | .endr |
117 | .Lfpctl: | ||
118 | .long 0 | ||
113 | .previous | 119 | .previous |
114 | 120 | ||
115 | #else /* CONFIG_64BIT */ | 121 | #else /* CONFIG_64BIT */ |
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 53acaa86dd94..84a982898448 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c | |||
@@ -60,12 +60,9 @@ | |||
60 | 60 | ||
61 | #include "compat_linux.h" | 61 | #include "compat_linux.h" |
62 | 62 | ||
63 | long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | 63 | u32 psw32_user_bits = PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | |
64 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 64 | PSW32_DEFAULT_KEY | PSW32_MASK_BASE | PSW32_MASK_MCHECK | |
65 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY); | 65 | PSW32_MASK_PSTATE | PSW32_ASC_HOME; |
66 | long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | | ||
67 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | | ||
68 | PSW32_MASK_PSTATE); | ||
69 | 66 | ||
70 | /* For this source file, we want overflow handling. */ | 67 | /* For this source file, we want overflow handling. */ |
71 | 68 | ||
@@ -365,12 +362,7 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, | |||
365 | if (set) { | 362 | if (set) { |
366 | if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) | 363 | if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) |
367 | return -EFAULT; | 364 | return -EFAULT; |
368 | switch (_NSIG_WORDS) { | 365 | s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); |
369 | case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32); | ||
370 | case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32); | ||
371 | case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32); | ||
372 | case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); | ||
373 | } | ||
374 | } | 366 | } |
375 | set_fs (KERNEL_DS); | 367 | set_fs (KERNEL_DS); |
376 | ret = sys_rt_sigprocmask(how, | 368 | ret = sys_rt_sigprocmask(how, |
@@ -380,12 +372,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, | |||
380 | set_fs (old_fs); | 372 | set_fs (old_fs); |
381 | if (ret) return ret; | 373 | if (ret) return ret; |
382 | if (oset) { | 374 | if (oset) { |
383 | switch (_NSIG_WORDS) { | 375 | s32.sig[1] = (s.sig[0] >> 32); |
384 | case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; | 376 | s32.sig[0] = s.sig[0]; |
385 | case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; | ||
386 | case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; | ||
387 | case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; | ||
388 | } | ||
389 | if (copy_to_user (oset, &s32, sizeof(compat_sigset_t))) | 377 | if (copy_to_user (oset, &s32, sizeof(compat_sigset_t))) |
390 | return -EFAULT; | 378 | return -EFAULT; |
391 | } | 379 | } |
@@ -404,12 +392,8 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, | |||
404 | ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize); | 392 | ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize); |
405 | set_fs (old_fs); | 393 | set_fs (old_fs); |
406 | if (!ret) { | 394 | if (!ret) { |
407 | switch (_NSIG_WORDS) { | 395 | s32.sig[1] = (s.sig[0] >> 32); |
408 | case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; | 396 | s32.sig[0] = s.sig[0]; |
409 | case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; | ||
410 | case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; | ||
411 | case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; | ||
412 | } | ||
413 | if (copy_to_user (set, &s32, sizeof(compat_sigset_t))) | 397 | if (copy_to_user (set, &s32, sizeof(compat_sigset_t))) |
414 | return -EFAULT; | 398 | return -EFAULT; |
415 | } | 399 | } |
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index a9a285b8c4ad..4f68c81d3ffa 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c | |||
@@ -141,7 +141,8 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) | |||
141 | break; | 141 | break; |
142 | case __SI_FAULT >> 16: | 142 | case __SI_FAULT >> 16: |
143 | err |= __get_user(tmp, &from->si_addr); | 143 | err |= __get_user(tmp, &from->si_addr); |
144 | to->si_addr = (void __user *)(u64) (tmp & PSW32_ADDR_INSN); | 144 | to->si_addr = (void __force __user *) |
145 | (u64) (tmp & PSW32_ADDR_INSN); | ||
145 | break; | 146 | break; |
146 | case __SI_POLL >> 16: | 147 | case __SI_POLL >> 16: |
147 | err |= __get_user(to->si_band, &from->si_band); | 148 | err |= __get_user(to->si_band, &from->si_band); |
@@ -213,16 +214,8 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, | |||
213 | ret = get_user(sa_handler, &act->sa_handler); | 214 | ret = get_user(sa_handler, &act->sa_handler); |
214 | ret |= __copy_from_user(&set32, &act->sa_mask, | 215 | ret |= __copy_from_user(&set32, &act->sa_mask, |
215 | sizeof(compat_sigset_t)); | 216 | sizeof(compat_sigset_t)); |
216 | switch (_NSIG_WORDS) { | 217 | new_ka.sa.sa_mask.sig[0] = |
217 | case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] | 218 | set32.sig[0] | (((long)set32.sig[1]) << 32); |
218 | | (((long)set32.sig[7]) << 32); | ||
219 | case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] | ||
220 | | (((long)set32.sig[5]) << 32); | ||
221 | case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] | ||
222 | | (((long)set32.sig[3]) << 32); | ||
223 | case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] | ||
224 | | (((long)set32.sig[1]) << 32); | ||
225 | } | ||
226 | ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); | 219 | ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); |
227 | 220 | ||
228 | if (ret) | 221 | if (ret) |
@@ -233,20 +226,8 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, | |||
233 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | 226 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
234 | 227 | ||
235 | if (!ret && oact) { | 228 | if (!ret && oact) { |
236 | switch (_NSIG_WORDS) { | 229 | set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); |
237 | case 4: | 230 | set32.sig[0] = old_ka.sa.sa_mask.sig[0]; |
238 | set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); | ||
239 | set32.sig[6] = old_ka.sa.sa_mask.sig[3]; | ||
240 | case 3: | ||
241 | set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); | ||
242 | set32.sig[4] = old_ka.sa.sa_mask.sig[2]; | ||
243 | case 2: | ||
244 | set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); | ||
245 | set32.sig[2] = old_ka.sa.sa_mask.sig[1]; | ||
246 | case 1: | ||
247 | set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); | ||
248 | set32.sig[0] = old_ka.sa.sa_mask.sig[0]; | ||
249 | } | ||
250 | ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler); | 231 | ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler); |
251 | ret |= __copy_to_user(&oact->sa_mask, &set32, | 232 | ret |= __copy_to_user(&oact->sa_mask, &set32, |
252 | sizeof(compat_sigset_t)); | 233 | sizeof(compat_sigset_t)); |
@@ -300,9 +281,10 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) | |||
300 | _s390_regs_common32 regs32; | 281 | _s390_regs_common32 regs32; |
301 | int err, i; | 282 | int err, i; |
302 | 283 | ||
303 | regs32.psw.mask = PSW32_MASK_MERGE(psw32_user_bits, | 284 | regs32.psw.mask = psw32_user_bits | |
304 | (__u32)(regs->psw.mask >> 32)); | 285 | ((__u32)(regs->psw.mask >> 32) & PSW32_MASK_USER); |
305 | regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr; | 286 | regs32.psw.addr = (__u32) regs->psw.addr | |
287 | (__u32)(regs->psw.mask & PSW_MASK_BA); | ||
306 | for (i = 0; i < NUM_GPRS; i++) | 288 | for (i = 0; i < NUM_GPRS; i++) |
307 | regs32.gprs[i] = (__u32) regs->gprs[i]; | 289 | regs32.gprs[i] = (__u32) regs->gprs[i]; |
308 | save_access_regs(current->thread.acrs); | 290 | save_access_regs(current->thread.acrs); |
@@ -327,8 +309,9 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) | |||
327 | err = __copy_from_user(®s32, &sregs->regs, sizeof(regs32)); | 309 | err = __copy_from_user(®s32, &sregs->regs, sizeof(regs32)); |
328 | if (err) | 310 | if (err) |
329 | return err; | 311 | return err; |
330 | regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask, | 312 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | |
331 | (__u64)regs32.psw.mask << 32); | 313 | (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | |
314 | (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE); | ||
332 | regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); | 315 | regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); |
333 | for (i = 0; i < NUM_GPRS; i++) | 316 | for (i = 0; i < NUM_GPRS; i++) |
334 | regs->gprs[i] = (__u64) regs32.gprs[i]; | 317 | regs->gprs[i] = (__u64) regs32.gprs[i]; |
@@ -342,7 +325,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) | |||
342 | return err; | 325 | return err; |
343 | 326 | ||
344 | restore_fp_regs(¤t->thread.fp_regs); | 327 | restore_fp_regs(¤t->thread.fp_regs); |
345 | regs->svcnr = 0; /* disable syscall checks */ | 328 | clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ |
346 | return 0; | 329 | return 0; |
347 | } | 330 | } |
348 | 331 | ||
@@ -496,11 +479,11 @@ static int setup_frame32(int sig, struct k_sigaction *ka, | |||
496 | /* Set up to return from userspace. If provided, use a stub | 479 | /* Set up to return from userspace. If provided, use a stub |
497 | already in userspace. */ | 480 | already in userspace. */ |
498 | if (ka->sa.sa_flags & SA_RESTORER) { | 481 | if (ka->sa.sa_flags & SA_RESTORER) { |
499 | regs->gprs[14] = (__u64) ka->sa.sa_restorer; | 482 | regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE; |
500 | } else { | 483 | } else { |
501 | regs->gprs[14] = (__u64) frame->retcode; | 484 | regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE; |
502 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, | 485 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, |
503 | (u16 __user *)(frame->retcode))) | 486 | (u16 __force __user *)(frame->retcode))) |
504 | goto give_sigsegv; | 487 | goto give_sigsegv; |
505 | } | 488 | } |
506 | 489 | ||
@@ -509,11 +492,12 @@ static int setup_frame32(int sig, struct k_sigaction *ka, | |||
509 | goto give_sigsegv; | 492 | goto give_sigsegv; |
510 | 493 | ||
511 | /* Set up registers for signal handler */ | 494 | /* Set up registers for signal handler */ |
512 | regs->gprs[15] = (__u64) frame; | 495 | regs->gprs[15] = (__force __u64) frame; |
513 | regs->psw.addr = (__u64) ka->sa.sa_handler; | 496 | regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ |
497 | regs->psw.addr = (__force __u64) ka->sa.sa_handler; | ||
514 | 498 | ||
515 | regs->gprs[2] = map_signal(sig); | 499 | regs->gprs[2] = map_signal(sig); |
516 | regs->gprs[3] = (__u64) &frame->sc; | 500 | regs->gprs[3] = (__force __u64) &frame->sc; |
517 | 501 | ||
518 | /* We forgot to include these in the sigcontext. | 502 | /* We forgot to include these in the sigcontext. |
519 | To avoid breaking binary compatibility, they are passed as args. */ | 503 | To avoid breaking binary compatibility, they are passed as args. */ |
@@ -521,7 +505,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka, | |||
521 | regs->gprs[5] = current->thread.prot_addr; | 505 | regs->gprs[5] = current->thread.prot_addr; |
522 | 506 | ||
523 | /* Place signal number on stack to allow backtrace from handler. */ | 507 | /* Place signal number on stack to allow backtrace from handler. */ |
524 | if (__put_user(regs->gprs[2], (int __user *) &frame->signo)) | 508 | if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo)) |
525 | goto give_sigsegv; | 509 | goto give_sigsegv; |
526 | return 0; | 510 | return 0; |
527 | 511 | ||
@@ -564,20 +548,21 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
564 | } else { | 548 | } else { |
565 | regs->gprs[14] = (__u64) frame->retcode; | 549 | regs->gprs[14] = (__u64) frame->retcode; |
566 | err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, | 550 | err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, |
567 | (u16 __user *)(frame->retcode)); | 551 | (u16 __force __user *)(frame->retcode)); |
568 | } | 552 | } |
569 | 553 | ||
570 | /* Set up backchain. */ | 554 | /* Set up backchain. */ |
571 | if (__put_user(regs->gprs[15], (unsigned int __user *) frame)) | 555 | if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame)) |
572 | goto give_sigsegv; | 556 | goto give_sigsegv; |
573 | 557 | ||
574 | /* Set up registers for signal handler */ | 558 | /* Set up registers for signal handler */ |
575 | regs->gprs[15] = (__u64) frame; | 559 | regs->gprs[15] = (__force __u64) frame; |
560 | regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ | ||
576 | regs->psw.addr = (__u64) ka->sa.sa_handler; | 561 | regs->psw.addr = (__u64) ka->sa.sa_handler; |
577 | 562 | ||
578 | regs->gprs[2] = map_signal(sig); | 563 | regs->gprs[2] = map_signal(sig); |
579 | regs->gprs[3] = (__u64) &frame->info; | 564 | regs->gprs[3] = (__force __u64) &frame->info; |
580 | regs->gprs[4] = (__u64) &frame->uc; | 565 | regs->gprs[4] = (__force __u64) &frame->uc; |
581 | return 0; | 566 | return 0; |
582 | 567 | ||
583 | give_sigsegv: | 568 | give_sigsegv: |
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 7526db6bf501..5006a1d9f5d0 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S | |||
@@ -1623,8 +1623,7 @@ ENTRY(sys_syncfs_wrapper) | |||
1623 | lgfr %r2,%r2 # int | 1623 | lgfr %r2,%r2 # int |
1624 | jg sys_syncfs | 1624 | jg sys_syncfs |
1625 | 1625 | ||
1626 | .globl sys_setns_wrapper | 1626 | ENTRY(sys_setns_wrapper) |
1627 | sys_setns_wrapper: | ||
1628 | lgfr %r2,%r2 # int | 1627 | lgfr %r2,%r2 # int |
1629 | lgfr %r3,%r3 # int | 1628 | lgfr %r3,%r3 # int |
1630 | jg sys_setns | 1629 | jg sys_setns |
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c new file mode 100644 index 000000000000..39f8fd4438fc --- /dev/null +++ b/arch/s390/kernel/crash_dump.c | |||
@@ -0,0 +1,426 @@ | |||
1 | /* | ||
2 | * S390 kdump implementation | ||
3 | * | ||
4 | * Copyright IBM Corp. 2011 | ||
5 | * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/crash_dump.h> | ||
9 | #include <asm/lowcore.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/gfp.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/crash_dump.h> | ||
15 | #include <linux/bootmem.h> | ||
16 | #include <linux/elf.h> | ||
17 | #include <asm/ipl.h> | ||
18 | |||
19 | #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y))) | ||
20 | #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) | ||
21 | #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) | ||
22 | |||
23 | /* | ||
24 | * Copy one page from "oldmem" | ||
25 | * | ||
26 | * For the kdump reserved memory this functions performs a swap operation: | ||
27 | * - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE]. | ||
28 | * - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] | ||
29 | */ | ||
30 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | ||
31 | size_t csize, unsigned long offset, int userbuf) | ||
32 | { | ||
33 | unsigned long src; | ||
34 | |||
35 | if (!csize) | ||
36 | return 0; | ||
37 | |||
38 | src = (pfn << PAGE_SHIFT) + offset; | ||
39 | if (src < OLDMEM_SIZE) | ||
40 | src += OLDMEM_BASE; | ||
41 | else if (src > OLDMEM_BASE && | ||
42 | src < OLDMEM_BASE + OLDMEM_SIZE) | ||
43 | src -= OLDMEM_BASE; | ||
44 | if (userbuf) | ||
45 | copy_to_user_real((void __force __user *) buf, (void *) src, | ||
46 | csize); | ||
47 | else | ||
48 | memcpy_real(buf, (void *) src, csize); | ||
49 | return csize; | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * Copy memory from old kernel | ||
54 | */ | ||
55 | static int copy_from_oldmem(void *dest, void *src, size_t count) | ||
56 | { | ||
57 | unsigned long copied = 0; | ||
58 | int rc; | ||
59 | |||
60 | if ((unsigned long) src < OLDMEM_SIZE) { | ||
61 | copied = min(count, OLDMEM_SIZE - (unsigned long) src); | ||
62 | rc = memcpy_real(dest, src + OLDMEM_BASE, copied); | ||
63 | if (rc) | ||
64 | return rc; | ||
65 | } | ||
66 | return memcpy_real(dest + copied, src + copied, count - copied); | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * Alloc memory and panic in case of ENOMEM | ||
71 | */ | ||
72 | static void *kzalloc_panic(int len) | ||
73 | { | ||
74 | void *rc; | ||
75 | |||
76 | rc = kzalloc(len, GFP_KERNEL); | ||
77 | if (!rc) | ||
78 | panic("s390 kdump kzalloc (%d) failed", len); | ||
79 | return rc; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Get memory layout and create hole for oldmem | ||
84 | */ | ||
85 | static struct mem_chunk *get_memory_layout(void) | ||
86 | { | ||
87 | struct mem_chunk *chunk_array; | ||
88 | |||
89 | chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk)); | ||
90 | detect_memory_layout(chunk_array); | ||
91 | create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE, CHUNK_CRASHK); | ||
92 | return chunk_array; | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * Initialize ELF note | ||
97 | */ | ||
98 | static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len, | ||
99 | const char *name) | ||
100 | { | ||
101 | Elf64_Nhdr *note; | ||
102 | u64 len; | ||
103 | |||
104 | note = (Elf64_Nhdr *)buf; | ||
105 | note->n_namesz = strlen(name) + 1; | ||
106 | note->n_descsz = d_len; | ||
107 | note->n_type = type; | ||
108 | len = sizeof(Elf64_Nhdr); | ||
109 | |||
110 | memcpy(buf + len, name, note->n_namesz); | ||
111 | len = roundup(len + note->n_namesz, 4); | ||
112 | |||
113 | memcpy(buf + len, desc, note->n_descsz); | ||
114 | len = roundup(len + note->n_descsz, 4); | ||
115 | |||
116 | return PTR_ADD(buf, len); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Initialize prstatus note | ||
121 | */ | ||
122 | static void *nt_prstatus(void *ptr, struct save_area *sa) | ||
123 | { | ||
124 | struct elf_prstatus nt_prstatus; | ||
125 | static int cpu_nr = 1; | ||
126 | |||
127 | memset(&nt_prstatus, 0, sizeof(nt_prstatus)); | ||
128 | memcpy(&nt_prstatus.pr_reg.gprs, sa->gp_regs, sizeof(sa->gp_regs)); | ||
129 | memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw)); | ||
130 | memcpy(&nt_prstatus.pr_reg.acrs, sa->acc_regs, sizeof(sa->acc_regs)); | ||
131 | nt_prstatus.pr_pid = cpu_nr; | ||
132 | cpu_nr++; | ||
133 | |||
134 | return nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus), | ||
135 | "CORE"); | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * Initialize fpregset (floating point) note | ||
140 | */ | ||
141 | static void *nt_fpregset(void *ptr, struct save_area *sa) | ||
142 | { | ||
143 | elf_fpregset_t nt_fpregset; | ||
144 | |||
145 | memset(&nt_fpregset, 0, sizeof(nt_fpregset)); | ||
146 | memcpy(&nt_fpregset.fpc, &sa->fp_ctrl_reg, sizeof(sa->fp_ctrl_reg)); | ||
147 | memcpy(&nt_fpregset.fprs, &sa->fp_regs, sizeof(sa->fp_regs)); | ||
148 | |||
149 | return nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset), | ||
150 | "CORE"); | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Initialize timer note | ||
155 | */ | ||
156 | static void *nt_s390_timer(void *ptr, struct save_area *sa) | ||
157 | { | ||
158 | return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer), | ||
159 | KEXEC_CORE_NOTE_NAME); | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * Initialize TOD clock comparator note | ||
164 | */ | ||
165 | static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa) | ||
166 | { | ||
167 | return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp, | ||
168 | sizeof(sa->clk_cmp), KEXEC_CORE_NOTE_NAME); | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Initialize TOD programmable register note | ||
173 | */ | ||
174 | static void *nt_s390_tod_preg(void *ptr, struct save_area *sa) | ||
175 | { | ||
176 | return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg, | ||
177 | sizeof(sa->tod_reg), KEXEC_CORE_NOTE_NAME); | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * Initialize control register note | ||
182 | */ | ||
183 | static void *nt_s390_ctrs(void *ptr, struct save_area *sa) | ||
184 | { | ||
185 | return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs, | ||
186 | sizeof(sa->ctrl_regs), KEXEC_CORE_NOTE_NAME); | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Initialize prefix register note | ||
191 | */ | ||
192 | static void *nt_s390_prefix(void *ptr, struct save_area *sa) | ||
193 | { | ||
194 | return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg, | ||
195 | sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME); | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * Fill ELF notes for one CPU with save area registers | ||
200 | */ | ||
201 | void *fill_cpu_elf_notes(void *ptr, struct save_area *sa) | ||
202 | { | ||
203 | ptr = nt_prstatus(ptr, sa); | ||
204 | ptr = nt_fpregset(ptr, sa); | ||
205 | ptr = nt_s390_timer(ptr, sa); | ||
206 | ptr = nt_s390_tod_cmp(ptr, sa); | ||
207 | ptr = nt_s390_tod_preg(ptr, sa); | ||
208 | ptr = nt_s390_ctrs(ptr, sa); | ||
209 | ptr = nt_s390_prefix(ptr, sa); | ||
210 | return ptr; | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * Initialize prpsinfo note (new kernel) | ||
215 | */ | ||
216 | static void *nt_prpsinfo(void *ptr) | ||
217 | { | ||
218 | struct elf_prpsinfo prpsinfo; | ||
219 | |||
220 | memset(&prpsinfo, 0, sizeof(prpsinfo)); | ||
221 | prpsinfo.pr_sname = 'R'; | ||
222 | strcpy(prpsinfo.pr_fname, "vmlinux"); | ||
223 | return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo), | ||
224 | KEXEC_CORE_NOTE_NAME); | ||
225 | } | ||
226 | |||
227 | /* | ||
228 | * Initialize vmcoreinfo note (new kernel) | ||
229 | */ | ||
230 | static void *nt_vmcoreinfo(void *ptr) | ||
231 | { | ||
232 | char nt_name[11], *vmcoreinfo; | ||
233 | Elf64_Nhdr note; | ||
234 | void *addr; | ||
235 | |||
236 | if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr))) | ||
237 | return ptr; | ||
238 | memset(nt_name, 0, sizeof(nt_name)); | ||
239 | if (copy_from_oldmem(¬e, addr, sizeof(note))) | ||
240 | return ptr; | ||
241 | if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1)) | ||
242 | return ptr; | ||
243 | if (strcmp(nt_name, "VMCOREINFO") != 0) | ||
244 | return ptr; | ||
245 | vmcoreinfo = kzalloc_panic(note.n_descsz + 1); | ||
246 | if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz)) | ||
247 | return ptr; | ||
248 | vmcoreinfo[note.n_descsz + 1] = 0; | ||
249 | return nt_init(ptr, 0, vmcoreinfo, note.n_descsz, "VMCOREINFO"); | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * Initialize ELF header (new kernel) | ||
254 | */ | ||
255 | static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt) | ||
256 | { | ||
257 | memset(ehdr, 0, sizeof(*ehdr)); | ||
258 | memcpy(ehdr->e_ident, ELFMAG, SELFMAG); | ||
259 | ehdr->e_ident[EI_CLASS] = ELFCLASS64; | ||
260 | ehdr->e_ident[EI_DATA] = ELFDATA2MSB; | ||
261 | ehdr->e_ident[EI_VERSION] = EV_CURRENT; | ||
262 | memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD); | ||
263 | ehdr->e_type = ET_CORE; | ||
264 | ehdr->e_machine = EM_S390; | ||
265 | ehdr->e_version = EV_CURRENT; | ||
266 | ehdr->e_phoff = sizeof(Elf64_Ehdr); | ||
267 | ehdr->e_ehsize = sizeof(Elf64_Ehdr); | ||
268 | ehdr->e_phentsize = sizeof(Elf64_Phdr); | ||
269 | ehdr->e_phnum = mem_chunk_cnt + 1; | ||
270 | return ehdr + 1; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Return CPU count for ELF header (new kernel) | ||
275 | */ | ||
276 | static int get_cpu_cnt(void) | ||
277 | { | ||
278 | int i, cpus = 0; | ||
279 | |||
280 | for (i = 0; zfcpdump_save_areas[i]; i++) { | ||
281 | if (zfcpdump_save_areas[i]->pref_reg == 0) | ||
282 | continue; | ||
283 | cpus++; | ||
284 | } | ||
285 | return cpus; | ||
286 | } | ||
287 | |||
288 | /* | ||
289 | * Return memory chunk count for ELF header (new kernel) | ||
290 | */ | ||
291 | static int get_mem_chunk_cnt(void) | ||
292 | { | ||
293 | struct mem_chunk *chunk_array, *mem_chunk; | ||
294 | int i, cnt = 0; | ||
295 | |||
296 | chunk_array = get_memory_layout(); | ||
297 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
298 | mem_chunk = &chunk_array[i]; | ||
299 | if (chunk_array[i].type != CHUNK_READ_WRITE && | ||
300 | chunk_array[i].type != CHUNK_READ_ONLY) | ||
301 | continue; | ||
302 | if (mem_chunk->size == 0) | ||
303 | continue; | ||
304 | cnt++; | ||
305 | } | ||
306 | kfree(chunk_array); | ||
307 | return cnt; | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Relocate pointer in order to allow vmcore code access the data | ||
312 | */ | ||
313 | static inline unsigned long relocate(unsigned long addr) | ||
314 | { | ||
315 | return OLDMEM_BASE + addr; | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * Initialize ELF loads (new kernel) | ||
320 | */ | ||
321 | static int loads_init(Elf64_Phdr *phdr, u64 loads_offset) | ||
322 | { | ||
323 | struct mem_chunk *chunk_array, *mem_chunk; | ||
324 | int i; | ||
325 | |||
326 | chunk_array = get_memory_layout(); | ||
327 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
328 | mem_chunk = &chunk_array[i]; | ||
329 | if (mem_chunk->size == 0) | ||
330 | break; | ||
331 | if (chunk_array[i].type != CHUNK_READ_WRITE && | ||
332 | chunk_array[i].type != CHUNK_READ_ONLY) | ||
333 | continue; | ||
334 | else | ||
335 | phdr->p_filesz = mem_chunk->size; | ||
336 | phdr->p_type = PT_LOAD; | ||
337 | phdr->p_offset = mem_chunk->addr; | ||
338 | phdr->p_vaddr = mem_chunk->addr; | ||
339 | phdr->p_paddr = mem_chunk->addr; | ||
340 | phdr->p_memsz = mem_chunk->size; | ||
341 | phdr->p_flags = PF_R | PF_W | PF_X; | ||
342 | phdr->p_align = PAGE_SIZE; | ||
343 | phdr++; | ||
344 | } | ||
345 | kfree(chunk_array); | ||
346 | return i; | ||
347 | } | ||
348 | |||
349 | /* | ||
350 | * Initialize notes (new kernel) | ||
351 | */ | ||
352 | static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) | ||
353 | { | ||
354 | struct save_area *sa; | ||
355 | void *ptr_start = ptr; | ||
356 | int i; | ||
357 | |||
358 | ptr = nt_prpsinfo(ptr); | ||
359 | |||
360 | for (i = 0; zfcpdump_save_areas[i]; i++) { | ||
361 | sa = zfcpdump_save_areas[i]; | ||
362 | if (sa->pref_reg == 0) | ||
363 | continue; | ||
364 | ptr = fill_cpu_elf_notes(ptr, sa); | ||
365 | } | ||
366 | ptr = nt_vmcoreinfo(ptr); | ||
367 | memset(phdr, 0, sizeof(*phdr)); | ||
368 | phdr->p_type = PT_NOTE; | ||
369 | phdr->p_offset = relocate(notes_offset); | ||
370 | phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start); | ||
371 | phdr->p_memsz = phdr->p_filesz; | ||
372 | return ptr; | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * Create ELF core header (new kernel) | ||
377 | */ | ||
378 | static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz) | ||
379 | { | ||
380 | Elf64_Phdr *phdr_notes, *phdr_loads; | ||
381 | int mem_chunk_cnt; | ||
382 | void *ptr, *hdr; | ||
383 | u32 alloc_size; | ||
384 | u64 hdr_off; | ||
385 | |||
386 | mem_chunk_cnt = get_mem_chunk_cnt(); | ||
387 | |||
388 | alloc_size = 0x1000 + get_cpu_cnt() * 0x300 + | ||
389 | mem_chunk_cnt * sizeof(Elf64_Phdr); | ||
390 | hdr = kzalloc_panic(alloc_size); | ||
391 | /* Init elf header */ | ||
392 | ptr = ehdr_init(hdr, mem_chunk_cnt); | ||
393 | /* Init program headers */ | ||
394 | phdr_notes = ptr; | ||
395 | ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr)); | ||
396 | phdr_loads = ptr; | ||
397 | ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt); | ||
398 | /* Init notes */ | ||
399 | hdr_off = PTR_DIFF(ptr, hdr); | ||
400 | ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off); | ||
401 | /* Init loads */ | ||
402 | hdr_off = PTR_DIFF(ptr, hdr); | ||
403 | loads_init(phdr_loads, ((unsigned long) hdr) + hdr_off); | ||
404 | *elfcorebuf_sz = hdr_off; | ||
405 | *elfcorebuf = (void *) relocate((unsigned long) hdr); | ||
406 | BUG_ON(*elfcorebuf_sz > alloc_size); | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * Create kdump ELF core header in new kernel, if it has not been passed via | ||
411 | * the "elfcorehdr" kernel parameter | ||
412 | */ | ||
413 | static int setup_kdump_elfcorehdr(void) | ||
414 | { | ||
415 | size_t elfcorebuf_sz; | ||
416 | char *elfcorebuf; | ||
417 | |||
418 | if (!OLDMEM_BASE || is_kdump_kernel()) | ||
419 | return -EINVAL; | ||
420 | s390_elf_corehdr_create(&elfcorebuf, &elfcorebuf_sz); | ||
421 | elfcorehdr_addr = (unsigned long long) elfcorebuf; | ||
422 | elfcorehdr_size = elfcorebuf_sz; | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | subsys_initcall(setup_kdump_elfcorehdr); | ||
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index f297456dba7a..37394b3413e2 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -252,7 +252,7 @@ static noinline __init void setup_lowcore_early(void) | |||
252 | { | 252 | { |
253 | psw_t psw; | 253 | psw_t psw; |
254 | 254 | ||
255 | psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 255 | psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA; |
256 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler; | 256 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler; |
257 | S390_lowcore.external_new_psw = psw; | 257 | S390_lowcore.external_new_psw = psw; |
258 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; | 258 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 02ec8fe7d03f..b13157057e02 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -43,16 +43,15 @@ SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52 | |||
43 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 | 43 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 |
44 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60 | 44 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60 |
45 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 | 45 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 |
46 | SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC | 46 | SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE |
47 | SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR | ||
48 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE | 47 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE |
49 | 48 | ||
50 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 49 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
51 | _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP ) | 50 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) |
52 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 51 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
53 | _TIF_MCCK_PENDING) | 52 | _TIF_MCCK_PENDING) |
54 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | 53 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ |
55 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) | 54 | _TIF_SYSCALL_TRACEPOINT) |
56 | 55 | ||
57 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 56 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
58 | STACK_SIZE = 1 << STACK_SHIFT | 57 | STACK_SIZE = 1 << STACK_SHIFT |
@@ -228,9 +227,10 @@ ENTRY(system_call) | |||
228 | sysc_saveall: | 227 | sysc_saveall: |
229 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 228 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
230 | CREATE_STACK_FRAME __LC_SAVE_AREA | 229 | CREATE_STACK_FRAME __LC_SAVE_AREA |
231 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
232 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
233 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 230 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
231 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
232 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
233 | oi __TI_flags+3(%r12),_TIF_SYSCALL | ||
234 | sysc_vtime: | 234 | sysc_vtime: |
235 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 235 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
236 | sysc_stime: | 236 | sysc_stime: |
@@ -239,17 +239,17 @@ sysc_update: | |||
239 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 239 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
240 | sysc_do_svc: | 240 | sysc_do_svc: |
241 | xr %r7,%r7 | 241 | xr %r7,%r7 |
242 | icm %r7,3,SP_SVCNR(%r15) # load svc number and test for svc 0 | 242 | icm %r7,3,SP_SVC_CODE+2(%r15)# load svc number and test for svc 0 |
243 | bnz BASED(sysc_nr_ok) # svc number > 0 | 243 | bnz BASED(sysc_nr_ok) # svc number > 0 |
244 | # svc 0: system call number in %r1 | 244 | # svc 0: system call number in %r1 |
245 | cl %r1,BASED(.Lnr_syscalls) | 245 | cl %r1,BASED(.Lnr_syscalls) |
246 | bnl BASED(sysc_nr_ok) | 246 | bnl BASED(sysc_nr_ok) |
247 | sth %r1,SP_SVCNR(%r15) | 247 | sth %r1,SP_SVC_CODE+2(%r15) |
248 | lr %r7,%r1 # copy svc number to %r7 | 248 | lr %r7,%r1 # copy svc number to %r7 |
249 | sysc_nr_ok: | 249 | sysc_nr_ok: |
250 | sll %r7,2 # svc number *4 | 250 | sll %r7,2 # svc number *4 |
251 | l %r10,BASED(.Lsysc_table) | 251 | l %r10,BASED(.Lsysc_table) |
252 | tm __TI_flags+2(%r12),_TIF_SYSCALL | 252 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 |
253 | mvc SP_ARGS(4,%r15),SP_R7(%r15) | 253 | mvc SP_ARGS(4,%r15),SP_R7(%r15) |
254 | l %r8,0(%r7,%r10) # get system call addr. | 254 | l %r8,0(%r7,%r10) # get system call addr. |
255 | bnz BASED(sysc_tracesys) | 255 | bnz BASED(sysc_tracesys) |
@@ -259,23 +259,19 @@ sysc_nr_ok: | |||
259 | sysc_return: | 259 | sysc_return: |
260 | LOCKDEP_SYS_EXIT | 260 | LOCKDEP_SYS_EXIT |
261 | sysc_tif: | 261 | sysc_tif: |
262 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
263 | bno BASED(sysc_restore) | ||
262 | tm __TI_flags+3(%r12),_TIF_WORK_SVC | 264 | tm __TI_flags+3(%r12),_TIF_WORK_SVC |
263 | bnz BASED(sysc_work) # there is work to do (signals etc.) | 265 | bnz BASED(sysc_work) # there is work to do (signals etc.) |
266 | ni __TI_flags+3(%r12),255-_TIF_SYSCALL | ||
264 | sysc_restore: | 267 | sysc_restore: |
265 | RESTORE_ALL __LC_RETURN_PSW,1 | 268 | RESTORE_ALL __LC_RETURN_PSW,1 |
266 | sysc_done: | 269 | sysc_done: |
267 | 270 | ||
268 | # | 271 | # |
269 | # There is work to do, but first we need to check if we return to userspace. | ||
270 | # | ||
271 | sysc_work: | ||
272 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
273 | bno BASED(sysc_restore) | ||
274 | |||
275 | # | ||
276 | # One of the work bits is on. Find out which one. | 272 | # One of the work bits is on. Find out which one. |
277 | # | 273 | # |
278 | sysc_work_tif: | 274 | sysc_work: |
279 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING | 275 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING |
280 | bo BASED(sysc_mcck_pending) | 276 | bo BASED(sysc_mcck_pending) |
281 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 277 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED |
@@ -284,8 +280,6 @@ sysc_work_tif: | |||
284 | bo BASED(sysc_sigpending) | 280 | bo BASED(sysc_sigpending) |
285 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 281 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME |
286 | bo BASED(sysc_notify_resume) | 282 | bo BASED(sysc_notify_resume) |
287 | tm __TI_flags+3(%r12),_TIF_RESTART_SVC | ||
288 | bo BASED(sysc_restart) | ||
289 | tm __TI_flags+3(%r12),_TIF_PER_TRAP | 283 | tm __TI_flags+3(%r12),_TIF_PER_TRAP |
290 | bo BASED(sysc_singlestep) | 284 | bo BASED(sysc_singlestep) |
291 | b BASED(sysc_return) # beware of critical section cleanup | 285 | b BASED(sysc_return) # beware of critical section cleanup |
@@ -314,11 +308,14 @@ sysc_sigpending: | |||
314 | la %r2,SP_PTREGS(%r15) # load pt_regs | 308 | la %r2,SP_PTREGS(%r15) # load pt_regs |
315 | l %r1,BASED(.Ldo_signal) | 309 | l %r1,BASED(.Ldo_signal) |
316 | basr %r14,%r1 # call do_signal | 310 | basr %r14,%r1 # call do_signal |
317 | tm __TI_flags+3(%r12),_TIF_RESTART_SVC | 311 | tm __TI_flags+3(%r12),_TIF_SYSCALL |
318 | bo BASED(sysc_restart) | 312 | bno BASED(sysc_return) |
319 | tm __TI_flags+3(%r12),_TIF_PER_TRAP | 313 | lm %r2,%r6,SP_R2(%r15) # load svc arguments |
320 | bo BASED(sysc_singlestep) | 314 | xr %r7,%r7 # svc 0 returns -ENOSYS |
321 | b BASED(sysc_return) | 315 | clc SP_SVC_CODE+2(2,%r15),BASED(.Lnr_syscalls+2) |
316 | bnl BASED(sysc_nr_ok) # invalid svc number -> do svc 0 | ||
317 | icm %r7,3,SP_SVC_CODE+2(%r15)# load new svc number | ||
318 | b BASED(sysc_nr_ok) # restart svc | ||
322 | 319 | ||
323 | # | 320 | # |
324 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | 321 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume |
@@ -329,24 +326,11 @@ sysc_notify_resume: | |||
329 | la %r14,BASED(sysc_return) | 326 | la %r14,BASED(sysc_return) |
330 | br %r1 # call do_notify_resume | 327 | br %r1 # call do_notify_resume |
331 | 328 | ||
332 | |||
333 | # | ||
334 | # _TIF_RESTART_SVC is set, set up registers and restart svc | ||
335 | # | ||
336 | sysc_restart: | ||
337 | ni __TI_flags+3(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC | ||
338 | l %r7,SP_R2(%r15) # load new svc number | ||
339 | mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument | ||
340 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | ||
341 | sth %r7,SP_SVCNR(%r15) | ||
342 | b BASED(sysc_nr_ok) # restart svc | ||
343 | |||
344 | # | 329 | # |
345 | # _TIF_PER_TRAP is set, call do_per_trap | 330 | # _TIF_PER_TRAP is set, call do_per_trap |
346 | # | 331 | # |
347 | sysc_singlestep: | 332 | sysc_singlestep: |
348 | ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP | 333 | ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) |
349 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number | ||
350 | la %r2,SP_PTREGS(%r15) # address of register-save area | 334 | la %r2,SP_PTREGS(%r15) # address of register-save area |
351 | l %r1,BASED(.Lhandle_per) # load adr. of per handler | 335 | l %r1,BASED(.Lhandle_per) # load adr. of per handler |
352 | la %r14,BASED(sysc_return) # load adr. of system return | 336 | la %r14,BASED(sysc_return) # load adr. of system return |
@@ -361,7 +345,7 @@ sysc_tracesys: | |||
361 | la %r2,SP_PTREGS(%r15) # load pt_regs | 345 | la %r2,SP_PTREGS(%r15) # load pt_regs |
362 | la %r3,0 | 346 | la %r3,0 |
363 | xr %r0,%r0 | 347 | xr %r0,%r0 |
364 | icm %r0,3,SP_SVCNR(%r15) | 348 | icm %r0,3,SP_SVC_CODE(%r15) |
365 | st %r0,SP_R2(%r15) | 349 | st %r0,SP_R2(%r15) |
366 | basr %r14,%r1 | 350 | basr %r14,%r1 |
367 | cl %r2,BASED(.Lnr_syscalls) | 351 | cl %r2,BASED(.Lnr_syscalls) |
@@ -376,7 +360,7 @@ sysc_tracego: | |||
376 | basr %r14,%r8 # call sys_xxx | 360 | basr %r14,%r8 # call sys_xxx |
377 | st %r2,SP_R2(%r15) # store return value | 361 | st %r2,SP_R2(%r15) # store return value |
378 | sysc_tracenogo: | 362 | sysc_tracenogo: |
379 | tm __TI_flags+2(%r12),_TIF_SYSCALL | 363 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 |
380 | bz BASED(sysc_return) | 364 | bz BASED(sysc_return) |
381 | l %r1,BASED(.Ltrace_exit) | 365 | l %r1,BASED(.Ltrace_exit) |
382 | la %r2,SP_PTREGS(%r15) # load pt_regs | 366 | la %r2,SP_PTREGS(%r15) # load pt_regs |
@@ -454,7 +438,6 @@ ENTRY(pgm_check_handler) | |||
454 | bnz BASED(pgm_per) # got per exception -> special case | 438 | bnz BASED(pgm_per) # got per exception -> special case |
455 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 439 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
456 | CREATE_STACK_FRAME __LC_SAVE_AREA | 440 | CREATE_STACK_FRAME __LC_SAVE_AREA |
457 | xc SP_ILC(4,%r15),SP_ILC(%r15) | ||
458 | mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW | 441 | mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW |
459 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 442 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
460 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 443 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -530,9 +513,10 @@ pgm_exit2: | |||
530 | pgm_svcper: | 513 | pgm_svcper: |
531 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 514 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
532 | CREATE_STACK_FRAME __LC_SAVE_AREA | 515 | CREATE_STACK_FRAME __LC_SAVE_AREA |
533 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
534 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
535 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 516 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
517 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
518 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
519 | oi __TI_flags+3(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP) | ||
536 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 520 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
537 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 521 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
538 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 522 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
@@ -540,7 +524,6 @@ pgm_svcper: | |||
540 | mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE | 524 | mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE |
541 | mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS | 525 | mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS |
542 | mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID | 526 | mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID |
543 | oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP | ||
544 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 527 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
545 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | 528 | lm %r2,%r6,SP_R2(%r15) # load svc arguments |
546 | b BASED(sysc_do_svc) | 529 | b BASED(sysc_do_svc) |
@@ -550,7 +533,6 @@ pgm_svcper: | |||
550 | # | 533 | # |
551 | kernel_per: | 534 | kernel_per: |
552 | REENABLE_IRQS | 535 | REENABLE_IRQS |
553 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) | ||
554 | la %r2,SP_PTREGS(%r15) # address of register-save area | 536 | la %r2,SP_PTREGS(%r15) # address of register-save area |
555 | l %r1,BASED(.Lhandle_per) # load adr. of per handler | 537 | l %r1,BASED(.Lhandle_per) # load adr. of per handler |
556 | basr %r14,%r1 # branch to do_single_step | 538 | basr %r14,%r1 # branch to do_single_step |
@@ -853,13 +835,13 @@ restart_go: | |||
853 | # PSW restart interrupt handler | 835 | # PSW restart interrupt handler |
854 | # | 836 | # |
855 | ENTRY(psw_restart_int_handler) | 837 | ENTRY(psw_restart_int_handler) |
856 | st %r15,__LC_SAVE_AREA_64(%r0) # save r15 | 838 | st %r15,__LC_SAVE_AREA+48(%r0) # save r15 |
857 | basr %r15,0 | 839 | basr %r15,0 |
858 | 0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack | 840 | 0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack |
859 | l %r15,0(%r15) | 841 | l %r15,0(%r15) |
860 | ahi %r15,-SP_SIZE # make room for pt_regs | 842 | ahi %r15,-SP_SIZE # make room for pt_regs |
861 | stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack | 843 | stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack |
862 | mvc SP_R15(4,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack | 844 | mvc SP_R15(4,%r15),__LC_SAVE_AREA+48(%r0)# store saved %r15 to stack |
863 | mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw | 845 | mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw |
864 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 | 846 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 |
865 | basr %r14,0 | 847 | basr %r14,0 |
@@ -965,9 +947,11 @@ cleanup_system_call: | |||
965 | s %r15,BASED(.Lc_spsize) # make room for registers & psw | 947 | s %r15,BASED(.Lc_spsize) # make room for registers & psw |
966 | st %r15,12(%r12) | 948 | st %r15,12(%r12) |
967 | CREATE_STACK_FRAME __LC_SAVE_AREA | 949 | CREATE_STACK_FRAME __LC_SAVE_AREA |
968 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
969 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
970 | mvc 0(4,%r12),__LC_THREAD_INFO | 950 | mvc 0(4,%r12),__LC_THREAD_INFO |
951 | l %r12,__LC_THREAD_INFO | ||
952 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
953 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
954 | oi __TI_flags+3(%r12),_TIF_SYSCALL | ||
971 | cleanup_vtime: | 955 | cleanup_vtime: |
972 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) | 956 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) |
973 | bhe BASED(cleanup_stime) | 957 | bhe BASED(cleanup_stime) |
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 66729eb7bbc5..ef8fb1d6e8d7 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h | |||
@@ -5,24 +5,33 @@ | |||
5 | #include <linux/signal.h> | 5 | #include <linux/signal.h> |
6 | #include <asm/ptrace.h> | 6 | #include <asm/ptrace.h> |
7 | 7 | ||
8 | |||
9 | extern void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long); | ||
10 | extern void *restart_stack; | ||
11 | |||
12 | asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); | ||
13 | asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); | ||
14 | |||
8 | void do_protection_exception(struct pt_regs *, long, unsigned long); | 15 | void do_protection_exception(struct pt_regs *, long, unsigned long); |
9 | void do_dat_exception(struct pt_regs *, long, unsigned long); | 16 | void do_dat_exception(struct pt_regs *, long, unsigned long); |
10 | void do_asce_exception(struct pt_regs *, long, unsigned long); | 17 | void do_asce_exception(struct pt_regs *, long, unsigned long); |
11 | 18 | ||
12 | extern int sysctl_userprocess_debug; | ||
13 | |||
14 | void do_per_trap(struct pt_regs *regs); | 19 | void do_per_trap(struct pt_regs *regs); |
15 | void syscall_trace(struct pt_regs *regs, int entryexit); | 20 | void syscall_trace(struct pt_regs *regs, int entryexit); |
16 | void kernel_stack_overflow(struct pt_regs * regs); | 21 | void kernel_stack_overflow(struct pt_regs * regs); |
17 | void do_signal(struct pt_regs *regs); | 22 | void do_signal(struct pt_regs *regs); |
18 | int handle_signal32(unsigned long sig, struct k_sigaction *ka, | 23 | int handle_signal32(unsigned long sig, struct k_sigaction *ka, |
19 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); | 24 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); |
25 | void do_notify_resume(struct pt_regs *regs); | ||
20 | 26 | ||
21 | void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long); | 27 | void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long); |
28 | void do_restart(void); | ||
22 | int __cpuinit start_secondary(void *cpuvoid); | 29 | int __cpuinit start_secondary(void *cpuvoid); |
23 | void __init startup_init(void); | 30 | void __init startup_init(void); |
24 | void die(const char * str, struct pt_regs * regs, long err); | 31 | void die(const char * str, struct pt_regs * regs, long err); |
25 | 32 | ||
33 | void __init time_init(void); | ||
34 | |||
26 | struct s390_mmap_arg_struct; | 35 | struct s390_mmap_arg_struct; |
27 | struct fadvise64_64_args; | 36 | struct fadvise64_64_args; |
28 | struct old_sigaction; | 37 | struct old_sigaction; |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 713da0760538..83a93747e2fd 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -43,19 +43,18 @@ SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104 | |||
43 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112 | 43 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112 |
44 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120 | 44 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120 |
45 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 | 45 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 |
46 | SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC | 46 | SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE |
47 | SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR | ||
48 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE | 47 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE |
49 | 48 | ||
50 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 49 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
51 | STACK_SIZE = 1 << STACK_SHIFT | 50 | STACK_SIZE = 1 << STACK_SHIFT |
52 | 51 | ||
53 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 52 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
54 | _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP ) | 53 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) |
55 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 54 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
56 | _TIF_MCCK_PENDING) | 55 | _TIF_MCCK_PENDING) |
57 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | 56 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ |
58 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) | 57 | _TIF_SYSCALL_TRACEPOINT) |
59 | _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) | 58 | _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) |
60 | 59 | ||
61 | #define BASED(name) name-system_call(%r13) | 60 | #define BASED(name) name-system_call(%r13) |
@@ -249,9 +248,10 @@ ENTRY(system_call) | |||
249 | sysc_saveall: | 248 | sysc_saveall: |
250 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 249 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
251 | CREATE_STACK_FRAME __LC_SAVE_AREA | 250 | CREATE_STACK_FRAME __LC_SAVE_AREA |
252 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
253 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
254 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 251 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
252 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
253 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
254 | oi __TI_flags+7(%r12),_TIF_SYSCALL | ||
255 | sysc_vtime: | 255 | sysc_vtime: |
256 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 256 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
257 | sysc_stime: | 257 | sysc_stime: |
@@ -260,14 +260,14 @@ sysc_update: | |||
260 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 260 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
261 | LAST_BREAK | 261 | LAST_BREAK |
262 | sysc_do_svc: | 262 | sysc_do_svc: |
263 | llgh %r7,SP_SVCNR(%r15) | 263 | llgh %r7,SP_SVC_CODE+2(%r15) |
264 | slag %r7,%r7,2 # shift and test for svc 0 | 264 | slag %r7,%r7,2 # shift and test for svc 0 |
265 | jnz sysc_nr_ok | 265 | jnz sysc_nr_ok |
266 | # svc 0: system call number in %r1 | 266 | # svc 0: system call number in %r1 |
267 | llgfr %r1,%r1 # clear high word in r1 | 267 | llgfr %r1,%r1 # clear high word in r1 |
268 | cghi %r1,NR_syscalls | 268 | cghi %r1,NR_syscalls |
269 | jnl sysc_nr_ok | 269 | jnl sysc_nr_ok |
270 | sth %r1,SP_SVCNR(%r15) | 270 | sth %r1,SP_SVC_CODE+2(%r15) |
271 | slag %r7,%r1,2 # shift and test for svc 0 | 271 | slag %r7,%r1,2 # shift and test for svc 0 |
272 | sysc_nr_ok: | 272 | sysc_nr_ok: |
273 | larl %r10,sys_call_table | 273 | larl %r10,sys_call_table |
@@ -277,7 +277,7 @@ sysc_nr_ok: | |||
277 | larl %r10,sys_call_table_emu # use 31 bit emulation system calls | 277 | larl %r10,sys_call_table_emu # use 31 bit emulation system calls |
278 | sysc_noemu: | 278 | sysc_noemu: |
279 | #endif | 279 | #endif |
280 | tm __TI_flags+6(%r12),_TIF_SYSCALL | 280 | tm __TI_flags+6(%r12),_TIF_TRACE >> 8 |
281 | mvc SP_ARGS(8,%r15),SP_R7(%r15) | 281 | mvc SP_ARGS(8,%r15),SP_R7(%r15) |
282 | lgf %r8,0(%r7,%r10) # load address of system call routine | 282 | lgf %r8,0(%r7,%r10) # load address of system call routine |
283 | jnz sysc_tracesys | 283 | jnz sysc_tracesys |
@@ -287,23 +287,19 @@ sysc_noemu: | |||
287 | sysc_return: | 287 | sysc_return: |
288 | LOCKDEP_SYS_EXIT | 288 | LOCKDEP_SYS_EXIT |
289 | sysc_tif: | 289 | sysc_tif: |
290 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
291 | jno sysc_restore | ||
290 | tm __TI_flags+7(%r12),_TIF_WORK_SVC | 292 | tm __TI_flags+7(%r12),_TIF_WORK_SVC |
291 | jnz sysc_work # there is work to do (signals etc.) | 293 | jnz sysc_work # there is work to do (signals etc.) |
294 | ni __TI_flags+7(%r12),255-_TIF_SYSCALL | ||
292 | sysc_restore: | 295 | sysc_restore: |
293 | RESTORE_ALL __LC_RETURN_PSW,1 | 296 | RESTORE_ALL __LC_RETURN_PSW,1 |
294 | sysc_done: | 297 | sysc_done: |
295 | 298 | ||
296 | # | 299 | # |
297 | # There is work to do, but first we need to check if we return to userspace. | ||
298 | # | ||
299 | sysc_work: | ||
300 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
301 | jno sysc_restore | ||
302 | |||
303 | # | ||
304 | # One of the work bits is on. Find out which one. | 300 | # One of the work bits is on. Find out which one. |
305 | # | 301 | # |
306 | sysc_work_tif: | 302 | sysc_work: |
307 | tm __TI_flags+7(%r12),_TIF_MCCK_PENDING | 303 | tm __TI_flags+7(%r12),_TIF_MCCK_PENDING |
308 | jo sysc_mcck_pending | 304 | jo sysc_mcck_pending |
309 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | 305 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
@@ -312,8 +308,6 @@ sysc_work_tif: | |||
312 | jo sysc_sigpending | 308 | jo sysc_sigpending |
313 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME | 309 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
314 | jo sysc_notify_resume | 310 | jo sysc_notify_resume |
315 | tm __TI_flags+7(%r12),_TIF_RESTART_SVC | ||
316 | jo sysc_restart | ||
317 | tm __TI_flags+7(%r12),_TIF_PER_TRAP | 311 | tm __TI_flags+7(%r12),_TIF_PER_TRAP |
318 | jo sysc_singlestep | 312 | jo sysc_singlestep |
319 | j sysc_return # beware of critical section cleanup | 313 | j sysc_return # beware of critical section cleanup |
@@ -339,11 +333,15 @@ sysc_sigpending: | |||
339 | ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP | 333 | ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP |
340 | la %r2,SP_PTREGS(%r15) # load pt_regs | 334 | la %r2,SP_PTREGS(%r15) # load pt_regs |
341 | brasl %r14,do_signal # call do_signal | 335 | brasl %r14,do_signal # call do_signal |
342 | tm __TI_flags+7(%r12),_TIF_RESTART_SVC | 336 | tm __TI_flags+7(%r12),_TIF_SYSCALL |
343 | jo sysc_restart | 337 | jno sysc_return |
344 | tm __TI_flags+7(%r12),_TIF_PER_TRAP | 338 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments |
345 | jo sysc_singlestep | 339 | lghi %r7,0 # svc 0 returns -ENOSYS |
346 | j sysc_return | 340 | lh %r1,SP_SVC_CODE+2(%r15) # load new svc number |
341 | cghi %r1,NR_syscalls | ||
342 | jnl sysc_nr_ok # invalid svc number -> do svc 0 | ||
343 | slag %r7,%r1,2 | ||
344 | j sysc_nr_ok # restart svc | ||
347 | 345 | ||
348 | # | 346 | # |
349 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | 347 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume |
@@ -354,23 +352,10 @@ sysc_notify_resume: | |||
354 | jg do_notify_resume # call do_notify_resume | 352 | jg do_notify_resume # call do_notify_resume |
355 | 353 | ||
356 | # | 354 | # |
357 | # _TIF_RESTART_SVC is set, set up registers and restart svc | ||
358 | # | ||
359 | sysc_restart: | ||
360 | ni __TI_flags+7(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC | ||
361 | lg %r7,SP_R2(%r15) # load new svc number | ||
362 | mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument | ||
363 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | ||
364 | sth %r7,SP_SVCNR(%r15) | ||
365 | slag %r7,%r7,2 | ||
366 | j sysc_nr_ok # restart svc | ||
367 | |||
368 | # | ||
369 | # _TIF_PER_TRAP is set, call do_per_trap | 355 | # _TIF_PER_TRAP is set, call do_per_trap |
370 | # | 356 | # |
371 | sysc_singlestep: | 357 | sysc_singlestep: |
372 | ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP | 358 | ni __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) |
373 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number | ||
374 | la %r2,SP_PTREGS(%r15) # address of register-save area | 359 | la %r2,SP_PTREGS(%r15) # address of register-save area |
375 | larl %r14,sysc_return # load adr. of system return | 360 | larl %r14,sysc_return # load adr. of system return |
376 | jg do_per_trap | 361 | jg do_per_trap |
@@ -382,7 +367,7 @@ sysc_singlestep: | |||
382 | sysc_tracesys: | 367 | sysc_tracesys: |
383 | la %r2,SP_PTREGS(%r15) # load pt_regs | 368 | la %r2,SP_PTREGS(%r15) # load pt_regs |
384 | la %r3,0 | 369 | la %r3,0 |
385 | llgh %r0,SP_SVCNR(%r15) | 370 | llgh %r0,SP_SVC_CODE+2(%r15) |
386 | stg %r0,SP_R2(%r15) | 371 | stg %r0,SP_R2(%r15) |
387 | brasl %r14,do_syscall_trace_enter | 372 | brasl %r14,do_syscall_trace_enter |
388 | lghi %r0,NR_syscalls | 373 | lghi %r0,NR_syscalls |
@@ -397,7 +382,7 @@ sysc_tracego: | |||
397 | basr %r14,%r8 # call sys_xxx | 382 | basr %r14,%r8 # call sys_xxx |
398 | stg %r2,SP_R2(%r15) # store return value | 383 | stg %r2,SP_R2(%r15) # store return value |
399 | sysc_tracenogo: | 384 | sysc_tracenogo: |
400 | tm __TI_flags+6(%r12),_TIF_SYSCALL | 385 | tm __TI_flags+6(%r12),_TIF_TRACE >> 8 |
401 | jz sysc_return | 386 | jz sysc_return |
402 | la %r2,SP_PTREGS(%r15) # load pt_regs | 387 | la %r2,SP_PTREGS(%r15) # load pt_regs |
403 | larl %r14,sysc_return # return point is sysc_return | 388 | larl %r14,sysc_return # return point is sysc_return |
@@ -470,7 +455,6 @@ ENTRY(pgm_check_handler) | |||
470 | jnz pgm_per # got per exception -> special case | 455 | jnz pgm_per # got per exception -> special case |
471 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 456 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
472 | CREATE_STACK_FRAME __LC_SAVE_AREA | 457 | CREATE_STACK_FRAME __LC_SAVE_AREA |
473 | xc SP_ILC(4,%r15),SP_ILC(%r15) | ||
474 | mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW | 458 | mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW |
475 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 459 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
476 | HANDLE_SIE_INTERCEPT | 460 | HANDLE_SIE_INTERCEPT |
@@ -550,9 +534,10 @@ pgm_exit2: | |||
550 | pgm_svcper: | 534 | pgm_svcper: |
551 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 535 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
552 | CREATE_STACK_FRAME __LC_SAVE_AREA | 536 | CREATE_STACK_FRAME __LC_SAVE_AREA |
553 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
554 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
555 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 537 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
538 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
539 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
540 | oi __TI_flags+7(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP) | ||
556 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 541 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
557 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 542 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
558 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 543 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
@@ -561,7 +546,6 @@ pgm_svcper: | |||
561 | mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE | 546 | mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE |
562 | mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS | 547 | mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS |
563 | mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID | 548 | mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID |
564 | oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP | ||
565 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 549 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
566 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | 550 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments |
567 | j sysc_do_svc | 551 | j sysc_do_svc |
@@ -571,7 +555,6 @@ pgm_svcper: | |||
571 | # | 555 | # |
572 | kernel_per: | 556 | kernel_per: |
573 | REENABLE_IRQS | 557 | REENABLE_IRQS |
574 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number | ||
575 | la %r2,SP_PTREGS(%r15) # address of register-save area | 558 | la %r2,SP_PTREGS(%r15) # address of register-save area |
576 | brasl %r14,do_per_trap | 559 | brasl %r14,do_per_trap |
577 | j pgm_exit | 560 | j pgm_exit |
@@ -869,12 +852,12 @@ restart_go: | |||
869 | # PSW restart interrupt handler | 852 | # PSW restart interrupt handler |
870 | # | 853 | # |
871 | ENTRY(psw_restart_int_handler) | 854 | ENTRY(psw_restart_int_handler) |
872 | stg %r15,__LC_SAVE_AREA_64(%r0) # save r15 | 855 | stg %r15,__LC_SAVE_AREA+120(%r0) # save r15 |
873 | larl %r15,restart_stack # load restart stack | 856 | larl %r15,restart_stack # load restart stack |
874 | lg %r15,0(%r15) | 857 | lg %r15,0(%r15) |
875 | aghi %r15,-SP_SIZE # make room for pt_regs | 858 | aghi %r15,-SP_SIZE # make room for pt_regs |
876 | stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack | 859 | stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack |
877 | mvc SP_R15(8,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack | 860 | mvc SP_R15(8,%r15),__LC_SAVE_AREA+120(%r0)# store saved %r15 to stack |
878 | mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw | 861 | mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw |
879 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 | 862 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 |
880 | brasl %r14,do_restart | 863 | brasl %r14,do_restart |
@@ -972,9 +955,11 @@ cleanup_system_call: | |||
972 | stg %r15,32(%r12) | 955 | stg %r15,32(%r12) |
973 | stg %r11,0(%r12) | 956 | stg %r11,0(%r12) |
974 | CREATE_STACK_FRAME __LC_SAVE_AREA | 957 | CREATE_STACK_FRAME __LC_SAVE_AREA |
975 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
976 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
977 | mvc 8(8,%r12),__LC_THREAD_INFO | 958 | mvc 8(8,%r12),__LC_THREAD_INFO |
959 | lg %r12,__LC_THREAD_INFO | ||
960 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
961 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
962 | oi __TI_flags+7(%r12),_TIF_SYSCALL | ||
978 | cleanup_vtime: | 963 | cleanup_vtime: |
979 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) | 964 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) |
980 | jhe cleanup_stime | 965 | jhe cleanup_stime |
@@ -1096,6 +1081,7 @@ sie_exit: | |||
1096 | lghi %r2,0 | 1081 | lghi %r2,0 |
1097 | br %r14 | 1082 | br %r14 |
1098 | sie_fault: | 1083 | sie_fault: |
1084 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
1099 | lg %r14,__LC_THREAD_INFO # pointer thread_info struct | 1085 | lg %r14,__LC_THREAD_INFO # pointer thread_info struct |
1100 | ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) | 1086 | ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) |
1101 | lg %r14,__SF_EMPTY+8(%r15) # load guest register save area | 1087 | lg %r14,__SF_EMPTY+8(%r15) # load guest register save area |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 2d781bab37bb..900068d2bf92 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -449,10 +449,28 @@ ENTRY(start) | |||
449 | # | 449 | # |
450 | .org 0x10000 | 450 | .org 0x10000 |
451 | ENTRY(startup) | 451 | ENTRY(startup) |
452 | j .Lep_startup_normal | ||
453 | .org 0x10008 | ||
454 | # | ||
455 | # This is a list of s390 kernel entry points. At address 0x1000f the number of | ||
456 | # valid entry points is stored. | ||
457 | # | ||
458 | # IMPORTANT: Do not change this table, it is s390 kernel ABI! | ||
459 | # | ||
460 | .ascii "S390EP" | ||
461 | .byte 0x00,0x01 | ||
462 | # | ||
463 | # kdump startup-code at 0x10010, running in 64 bit absolute addressing mode | ||
464 | # | ||
465 | .org 0x10010 | ||
466 | ENTRY(startup_kdump) | ||
467 | j .Lep_startup_kdump | ||
468 | .Lep_startup_normal: | ||
452 | basr %r13,0 # get base | 469 | basr %r13,0 # get base |
453 | .LPG0: | 470 | .LPG0: |
454 | xc 0x200(256),0x200 # partially clear lowcore | 471 | xc 0x200(256),0x200 # partially clear lowcore |
455 | xc 0x300(256),0x300 | 472 | xc 0x300(256),0x300 |
473 | xc 0xe00(256),0xe00 | ||
456 | stck __LC_LAST_UPDATE_CLOCK | 474 | stck __LC_LAST_UPDATE_CLOCK |
457 | spt 5f-.LPG0(%r13) | 475 | spt 5f-.LPG0(%r13) |
458 | mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13) | 476 | mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13) |
@@ -534,6 +552,8 @@ ENTRY(startup) | |||
534 | .align 8 | 552 | .align 8 |
535 | 5: .long 0x7fffffff,0xffffffff | 553 | 5: .long 0x7fffffff,0xffffffff |
536 | 554 | ||
555 | #include "head_kdump.S" | ||
556 | |||
537 | # | 557 | # |
538 | # params at 10400 (setup.h) | 558 | # params at 10400 (setup.h) |
539 | # | 559 | # |
@@ -541,6 +561,8 @@ ENTRY(startup) | |||
541 | .long 0,0 # IPL_DEVICE | 561 | .long 0,0 # IPL_DEVICE |
542 | .long 0,0 # INITRD_START | 562 | .long 0,0 # INITRD_START |
543 | .long 0,0 # INITRD_SIZE | 563 | .long 0,0 # INITRD_SIZE |
564 | .long 0,0 # OLDMEM_BASE | ||
565 | .long 0,0 # OLDMEM_SIZE | ||
544 | 566 | ||
545 | .org COMMAND_LINE | 567 | .org COMMAND_LINE |
546 | .byte "root=/dev/ram0 ro" | 568 | .byte "root=/dev/ram0 ro" |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index f21954b44dc1..d3f1ab7d90ad 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
@@ -92,7 +92,7 @@ ENTRY(_stext) | |||
92 | .LPG3: | 92 | .LPG3: |
93 | # check control registers | 93 | # check control registers |
94 | stctl %c0,%c15,0(%r15) | 94 | stctl %c0,%c15,0(%r15) |
95 | oi 2(%r15),0x40 # enable sigp emergency signal | 95 | oi 2(%r15),0x60 # enable sigp emergency & external call |
96 | oi 0(%r15),0x10 # switch on low address protection | 96 | oi 0(%r15),0x10 # switch on low address protection |
97 | lctl %c0,%c15,0(%r15) | 97 | lctl %c0,%c15,0(%r15) |
98 | 98 | ||
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index ae5d492b069e..99348c0eaa41 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -90,7 +90,7 @@ ENTRY(_stext) | |||
90 | .LPG3: | 90 | .LPG3: |
91 | # check control registers | 91 | # check control registers |
92 | stctg %c0,%c15,0(%r15) | 92 | stctg %c0,%c15,0(%r15) |
93 | oi 6(%r15),0x40 # enable sigp emergency signal | 93 | oi 6(%r15),0x60 # enable sigp emergency & external call |
94 | oi 4(%r15),0x10 # switch on low address proctection | 94 | oi 4(%r15),0x10 # switch on low address proctection |
95 | lctlg %c0,%c15,0(%r15) | 95 | lctlg %c0,%c15,0(%r15) |
96 | 96 | ||
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S new file mode 100644 index 000000000000..e1ac3893e972 --- /dev/null +++ b/arch/s390/kernel/head_kdump.S | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | * S390 kdump lowlevel functions (new kernel) | ||
3 | * | ||
4 | * Copyright IBM Corp. 2011 | ||
5 | * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #define DATAMOVER_ADDR 0x4000 | ||
9 | #define COPY_PAGE_ADDR 0x6000 | ||
10 | |||
11 | #ifdef CONFIG_CRASH_DUMP | ||
12 | |||
13 | # | ||
14 | # kdump entry (new kernel - not yet relocated) | ||
15 | # | ||
16 | # Note: This code has to be position independent | ||
17 | # | ||
18 | |||
19 | .align 2 | ||
20 | .Lep_startup_kdump: | ||
21 | lhi %r1,2 # mode 2 = esame (dump) | ||
22 | sigp %r1,%r0,0x12 # Switch to esame mode | ||
23 | sam64 # Switch to 64 bit addressing | ||
24 | basr %r13,0 | ||
25 | .Lbase: | ||
26 | larl %r2,.Lbase_addr # Check, if we have been | ||
27 | lg %r2,0(%r2) # already relocated: | ||
28 | clgr %r2,%r13 # | ||
29 | jne .Lrelocate # No : Start data mover | ||
30 | lghi %r2,0 # Yes: Start kdump kernel | ||
31 | brasl %r14,startup_kdump_relocated | ||
32 | |||
33 | .Lrelocate: | ||
34 | larl %r4,startup | ||
35 | lg %r2,0x418(%r4) # Get kdump base | ||
36 | lg %r3,0x420(%r4) # Get kdump size | ||
37 | |||
38 | larl %r10,.Lcopy_start # Source of data mover | ||
39 | lghi %r8,DATAMOVER_ADDR # Target of data mover | ||
40 | mvc 0(256,%r8),0(%r10) # Copy data mover code | ||
41 | |||
42 | agr %r8,%r2 # Copy data mover to | ||
43 | mvc 0(256,%r8),0(%r10) # reserved mem | ||
44 | |||
45 | lghi %r14,DATAMOVER_ADDR # Jump to copied data mover | ||
46 | basr %r14,%r14 | ||
47 | .Lbase_addr: | ||
48 | .quad .Lbase | ||
49 | |||
50 | # | ||
51 | # kdump data mover code (runs at address DATAMOVER_ADDR) | ||
52 | # | ||
53 | # r2: kdump base address | ||
54 | # r3: kdump size | ||
55 | # | ||
56 | .Lcopy_start: | ||
57 | basr %r13,0 # Base | ||
58 | 0: | ||
59 | lgr %r11,%r2 # Save kdump base address | ||
60 | lgr %r12,%r2 | ||
61 | agr %r12,%r3 # Compute kdump end address | ||
62 | |||
63 | lghi %r5,0 | ||
64 | lghi %r10,COPY_PAGE_ADDR # Load copy page address | ||
65 | 1: | ||
66 | mvc 0(256,%r10),0(%r5) # Copy old kernel to tmp | ||
67 | mvc 0(256,%r5),0(%r11) # Copy new kernel to old | ||
68 | mvc 0(256,%r11),0(%r10) # Copy tmp to new | ||
69 | aghi %r11,256 | ||
70 | aghi %r5,256 | ||
71 | clgr %r11,%r12 | ||
72 | jl 1b | ||
73 | |||
74 | lg %r14,.Lstartup_kdump-0b(%r13) | ||
75 | basr %r14,%r14 # Start relocated kernel | ||
76 | .Lstartup_kdump: | ||
77 | .long 0x00000000,0x00000000 + startup_kdump_relocated | ||
78 | .Lcopy_end: | ||
79 | |||
80 | # | ||
81 | # Startup of kdump (relocated new kernel) | ||
82 | # | ||
83 | .align 2 | ||
84 | startup_kdump_relocated: | ||
85 | basr %r13,0 | ||
86 | 0: | ||
87 | mvc 0(8,%r0),.Lrestart_psw-0b(%r13) # Setup restart PSW | ||
88 | mvc 464(16,%r0),.Lpgm_psw-0b(%r13) # Setup pgm check PSW | ||
89 | lhi %r1,1 # Start new kernel | ||
90 | diag %r1,%r1,0x308 # with diag 308 | ||
91 | |||
92 | .Lno_diag308: # No diag 308 | ||
93 | sam31 # Switch to 31 bit addr mode | ||
94 | sr %r1,%r1 # Erase register r1 | ||
95 | sr %r2,%r2 # Erase register r2 | ||
96 | sigp %r1,%r2,0x12 # Switch to 31 bit arch mode | ||
97 | lpsw 0 # Start new kernel... | ||
98 | .align 8 | ||
99 | .Lrestart_psw: | ||
100 | .long 0x00080000,0x80000000 + startup | ||
101 | .Lpgm_psw: | ||
102 | .quad 0x0000000180000000,0x0000000000000000 + .Lno_diag308 | ||
103 | #else | ||
104 | .align 2 | ||
105 | .Lep_startup_kdump: | ||
106 | #ifdef CONFIG_64BIT | ||
107 | larl %r13,startup_kdump_crash | ||
108 | lpswe 0(%r13) | ||
109 | .align 8 | ||
110 | startup_kdump_crash: | ||
111 | .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash | ||
112 | #else | ||
113 | basr %r13,0 | ||
114 | 0: lpsw startup_kdump_crash-0b(%r13) | ||
115 | .align 8 | ||
116 | startup_kdump_crash: | ||
117 | .long 0x000a0000,0x00000000 + startup_kdump_crash | ||
118 | #endif /* CONFIG_64BIT */ | ||
119 | #endif /* CONFIG_CRASH_DUMP */ | ||
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 48c710206366..affa8e68124a 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/gfp.h> | 18 | #include <linux/gfp.h> |
19 | #include <linux/crash_dump.h> | ||
19 | #include <asm/ipl.h> | 20 | #include <asm/ipl.h> |
20 | #include <asm/smp.h> | 21 | #include <asm/smp.h> |
21 | #include <asm/setup.h> | 22 | #include <asm/setup.h> |
@@ -26,6 +27,7 @@ | |||
26 | #include <asm/sclp.h> | 27 | #include <asm/sclp.h> |
27 | #include <asm/sigp.h> | 28 | #include <asm/sigp.h> |
28 | #include <asm/checksum.h> | 29 | #include <asm/checksum.h> |
30 | #include "entry.h" | ||
29 | 31 | ||
30 | #define IPL_PARM_BLOCK_VERSION 0 | 32 | #define IPL_PARM_BLOCK_VERSION 0 |
31 | 33 | ||
@@ -275,8 +277,8 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr, | |||
275 | static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); | 277 | static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); |
276 | 278 | ||
277 | /* VM IPL PARM routines */ | 279 | /* VM IPL PARM routines */ |
278 | size_t reipl_get_ascii_vmparm(char *dest, size_t size, | 280 | static size_t reipl_get_ascii_vmparm(char *dest, size_t size, |
279 | const struct ipl_parameter_block *ipb) | 281 | const struct ipl_parameter_block *ipb) |
280 | { | 282 | { |
281 | int i; | 283 | int i; |
282 | size_t len; | 284 | size_t len; |
@@ -338,8 +340,8 @@ static size_t scpdata_length(const char* buf, size_t count) | |||
338 | return count; | 340 | return count; |
339 | } | 341 | } |
340 | 342 | ||
341 | size_t reipl_append_ascii_scpdata(char *dest, size_t size, | 343 | static size_t reipl_append_ascii_scpdata(char *dest, size_t size, |
342 | const struct ipl_parameter_block *ipb) | 344 | const struct ipl_parameter_block *ipb) |
343 | { | 345 | { |
344 | size_t count; | 346 | size_t count; |
345 | size_t i; | 347 | size_t i; |
@@ -1738,7 +1740,11 @@ static struct kobj_attribute on_restart_attr = | |||
1738 | 1740 | ||
1739 | void do_restart(void) | 1741 | void do_restart(void) |
1740 | { | 1742 | { |
1743 | smp_restart_with_online_cpu(); | ||
1741 | smp_send_stop(); | 1744 | smp_send_stop(); |
1745 | #ifdef CONFIG_CRASH_DUMP | ||
1746 | crash_kexec(NULL); | ||
1747 | #endif | ||
1742 | on_restart_trigger.action->fn(&on_restart_trigger); | 1748 | on_restart_trigger.action->fn(&on_restart_trigger); |
1743 | stop_run(&on_restart_trigger); | 1749 | stop_run(&on_restart_trigger); |
1744 | } | 1750 | } |
@@ -2009,7 +2015,7 @@ static void do_reset_calls(void) | |||
2009 | 2015 | ||
2010 | u32 dump_prefix_page; | 2016 | u32 dump_prefix_page; |
2011 | 2017 | ||
2012 | void s390_reset_system(void) | 2018 | void s390_reset_system(void (*func)(void *), void *data) |
2013 | { | 2019 | { |
2014 | struct _lowcore *lc; | 2020 | struct _lowcore *lc; |
2015 | 2021 | ||
@@ -2028,15 +2034,19 @@ void s390_reset_system(void) | |||
2028 | __ctl_clear_bit(0,28); | 2034 | __ctl_clear_bit(0,28); |
2029 | 2035 | ||
2030 | /* Set new machine check handler */ | 2036 | /* Set new machine check handler */ |
2031 | S390_lowcore.mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; | 2037 | S390_lowcore.mcck_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; |
2032 | S390_lowcore.mcck_new_psw.addr = | 2038 | S390_lowcore.mcck_new_psw.addr = |
2033 | PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; | 2039 | PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; |
2034 | 2040 | ||
2035 | /* Set new program check handler */ | 2041 | /* Set new program check handler */ |
2036 | S390_lowcore.program_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; | 2042 | S390_lowcore.program_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; |
2037 | S390_lowcore.program_new_psw.addr = | 2043 | S390_lowcore.program_new_psw.addr = |
2038 | PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; | 2044 | PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; |
2039 | 2045 | ||
2046 | /* Store status at absolute zero */ | ||
2047 | store_status(); | ||
2048 | |||
2040 | do_reset_calls(); | 2049 | do_reset_calls(); |
2050 | if (func) | ||
2051 | func(data); | ||
2041 | } | 2052 | } |
2042 | |||
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 1f4050d45f78..b9a7fdd9c814 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -33,7 +33,8 @@ static const struct irq_class intrclass_names[] = { | |||
33 | {.name = "EXT" }, | 33 | {.name = "EXT" }, |
34 | {.name = "I/O" }, | 34 | {.name = "I/O" }, |
35 | {.name = "CLK", .desc = "[EXT] Clock Comparator" }, | 35 | {.name = "CLK", .desc = "[EXT] Clock Comparator" }, |
36 | {.name = "IPI", .desc = "[EXT] Signal Processor" }, | 36 | {.name = "EXC", .desc = "[EXT] External Call" }, |
37 | {.name = "EMS", .desc = "[EXT] Emergency Signal" }, | ||
37 | {.name = "TMR", .desc = "[EXT] CPU Timer" }, | 38 | {.name = "TMR", .desc = "[EXT] CPU Timer" }, |
38 | {.name = "TAL", .desc = "[EXT] Timing Alert" }, | 39 | {.name = "TAL", .desc = "[EXT] Timing Alert" }, |
39 | {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" }, | 40 | {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" }, |
@@ -42,8 +43,8 @@ static const struct irq_class intrclass_names[] = { | |||
42 | {.name = "SCP", .desc = "[EXT] Service Call" }, | 43 | {.name = "SCP", .desc = "[EXT] Service Call" }, |
43 | {.name = "IUC", .desc = "[EXT] IUCV" }, | 44 | {.name = "IUC", .desc = "[EXT] IUCV" }, |
44 | {.name = "CPM", .desc = "[EXT] CPU Measurement" }, | 45 | {.name = "CPM", .desc = "[EXT] CPU Measurement" }, |
46 | {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt" }, | ||
45 | {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" }, | 47 | {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" }, |
46 | {.name = "QDI", .desc = "[I/O] QDIO Interrupt" }, | ||
47 | {.name = "DAS", .desc = "[I/O] DASD" }, | 48 | {.name = "DAS", .desc = "[I/O] DASD" }, |
48 | {.name = "C15", .desc = "[I/O] 3215" }, | 49 | {.name = "C15", .desc = "[I/O] 3215" }, |
49 | {.name = "C70", .desc = "[I/O] 3270" }, | 50 | {.name = "C70", .desc = "[I/O] 3270" }, |
@@ -53,6 +54,7 @@ static const struct irq_class intrclass_names[] = { | |||
53 | {.name = "CLW", .desc = "[I/O] CLAW" }, | 54 | {.name = "CLW", .desc = "[I/O] CLAW" }, |
54 | {.name = "CTC", .desc = "[I/O] CTC" }, | 55 | {.name = "CTC", .desc = "[I/O] CTC" }, |
55 | {.name = "APB", .desc = "[I/O] AP Bus" }, | 56 | {.name = "APB", .desc = "[I/O] AP Bus" }, |
57 | {.name = "CSC", .desc = "[I/O] CHSC Subchannel" }, | ||
56 | {.name = "NMI", .desc = "[NMI] Machine Check" }, | 58 | {.name = "NMI", .desc = "[NMI] Machine Check" }, |
57 | }; | 59 | }; |
58 | 60 | ||
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index 44cc06bedf77..b987ab2c1541 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c | |||
@@ -18,26 +18,15 @@ struct insn { | |||
18 | } __packed; | 18 | } __packed; |
19 | 19 | ||
20 | struct insn_args { | 20 | struct insn_args { |
21 | unsigned long *target; | 21 | struct jump_entry *entry; |
22 | struct insn *insn; | 22 | enum jump_label_type type; |
23 | ssize_t size; | ||
24 | }; | 23 | }; |
25 | 24 | ||
26 | static int __arch_jump_label_transform(void *data) | 25 | static void __jump_label_transform(struct jump_entry *entry, |
26 | enum jump_label_type type) | ||
27 | { | 27 | { |
28 | struct insn_args *args = data; | ||
29 | int rc; | ||
30 | |||
31 | rc = probe_kernel_write(args->target, args->insn, args->size); | ||
32 | WARN_ON_ONCE(rc < 0); | ||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | void arch_jump_label_transform(struct jump_entry *entry, | ||
37 | enum jump_label_type type) | ||
38 | { | ||
39 | struct insn_args args; | ||
40 | struct insn insn; | 28 | struct insn insn; |
29 | int rc; | ||
41 | 30 | ||
42 | if (type == JUMP_LABEL_ENABLE) { | 31 | if (type == JUMP_LABEL_ENABLE) { |
43 | /* brcl 15,offset */ | 32 | /* brcl 15,offset */ |
@@ -49,11 +38,33 @@ void arch_jump_label_transform(struct jump_entry *entry, | |||
49 | insn.offset = 0; | 38 | insn.offset = 0; |
50 | } | 39 | } |
51 | 40 | ||
52 | args.target = (void *) entry->code; | 41 | rc = probe_kernel_write((void *)entry->code, &insn, JUMP_LABEL_NOP_SIZE); |
53 | args.insn = &insn; | 42 | WARN_ON_ONCE(rc < 0); |
54 | args.size = JUMP_LABEL_NOP_SIZE; | 43 | } |
55 | 44 | ||
56 | stop_machine(__arch_jump_label_transform, &args, NULL); | 45 | static int __sm_arch_jump_label_transform(void *data) |
46 | { | ||
47 | struct insn_args *args = data; | ||
48 | |||
49 | __jump_label_transform(args->entry, args->type); | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | void arch_jump_label_transform(struct jump_entry *entry, | ||
54 | enum jump_label_type type) | ||
55 | { | ||
56 | struct insn_args args; | ||
57 | |||
58 | args.entry = entry; | ||
59 | args.type = type; | ||
60 | |||
61 | stop_machine(__sm_arch_jump_label_transform, &args, NULL); | ||
62 | } | ||
63 | |||
64 | void arch_jump_label_transform_static(struct jump_entry *entry, | ||
65 | enum jump_label_type type) | ||
66 | { | ||
67 | __jump_label_transform(entry, type); | ||
57 | } | 68 | } |
58 | 69 | ||
59 | #endif | 70 | #endif |
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 1d05d669107c..64b761aef004 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -635,7 +635,7 @@ void __kprobes jprobe_return(void) | |||
635 | asm volatile(".word 0x0002"); | 635 | asm volatile(".word 0x0002"); |
636 | } | 636 | } |
637 | 637 | ||
638 | void __kprobes jprobe_return_end(void) | 638 | static void __used __kprobes jprobe_return_end(void) |
639 | { | 639 | { |
640 | asm volatile("bcr 0,0"); | 640 | asm volatile("bcr 0,0"); |
641 | } | 641 | } |
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index b09b9c62573e..3cd0f25ab015 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c | |||
@@ -1,10 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/machine_kexec.c | 2 | * arch/s390/kernel/machine_kexec.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2005,2006 | 4 | * Copyright IBM Corp. 2005,2011 |
5 | * | 5 | * |
6 | * Author(s): Rolf Adelsberger, | 6 | * Author(s): Rolf Adelsberger, |
7 | * Heiko Carstens <heiko.carstens@de.ibm.com> | 7 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
8 | * Michael Holzheu <holzheu@linux.vnet.ibm.com> | ||
8 | */ | 9 | */ |
9 | 10 | ||
10 | #include <linux/device.h> | 11 | #include <linux/device.h> |
@@ -21,12 +22,162 @@ | |||
21 | #include <asm/smp.h> | 22 | #include <asm/smp.h> |
22 | #include <asm/reset.h> | 23 | #include <asm/reset.h> |
23 | #include <asm/ipl.h> | 24 | #include <asm/ipl.h> |
25 | #include <asm/diag.h> | ||
26 | #include <asm/asm-offsets.h> | ||
24 | 27 | ||
25 | typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); | 28 | typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); |
26 | 29 | ||
27 | extern const unsigned char relocate_kernel[]; | 30 | extern const unsigned char relocate_kernel[]; |
28 | extern const unsigned long long relocate_kernel_len; | 31 | extern const unsigned long long relocate_kernel_len; |
29 | 32 | ||
33 | #ifdef CONFIG_CRASH_DUMP | ||
34 | |||
35 | void *fill_cpu_elf_notes(void *ptr, struct save_area *sa); | ||
36 | |||
37 | /* | ||
38 | * Create ELF notes for one CPU | ||
39 | */ | ||
40 | static void add_elf_notes(int cpu) | ||
41 | { | ||
42 | struct save_area *sa = (void *) 4608 + store_prefix(); | ||
43 | void *ptr; | ||
44 | |||
45 | memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa)); | ||
46 | ptr = (u64 *) per_cpu_ptr(crash_notes, cpu); | ||
47 | ptr = fill_cpu_elf_notes(ptr, sa); | ||
48 | memset(ptr, 0, sizeof(struct elf_note)); | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Store status of next available physical CPU | ||
53 | */ | ||
54 | static int store_status_next(int start_cpu, int this_cpu) | ||
55 | { | ||
56 | struct save_area *sa = (void *) 4608 + store_prefix(); | ||
57 | int cpu, rc; | ||
58 | |||
59 | for (cpu = start_cpu; cpu < 65536; cpu++) { | ||
60 | if (cpu == this_cpu) | ||
61 | continue; | ||
62 | do { | ||
63 | rc = raw_sigp(cpu, sigp_stop_and_store_status); | ||
64 | } while (rc == sigp_busy); | ||
65 | if (rc != sigp_order_code_accepted) | ||
66 | continue; | ||
67 | if (sa->pref_reg) | ||
68 | return cpu; | ||
69 | } | ||
70 | return -1; | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Initialize CPU ELF notes | ||
75 | */ | ||
76 | void setup_regs(void) | ||
77 | { | ||
78 | unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; | ||
79 | int cpu, this_cpu, phys_cpu = 0, first = 1; | ||
80 | |||
81 | this_cpu = stap(); | ||
82 | |||
83 | if (!S390_lowcore.prefixreg_save_area) | ||
84 | first = 0; | ||
85 | for_each_online_cpu(cpu) { | ||
86 | if (first) { | ||
87 | add_elf_notes(cpu); | ||
88 | first = 0; | ||
89 | continue; | ||
90 | } | ||
91 | phys_cpu = store_status_next(phys_cpu, this_cpu); | ||
92 | if (phys_cpu == -1) | ||
93 | break; | ||
94 | add_elf_notes(cpu); | ||
95 | phys_cpu++; | ||
96 | } | ||
97 | /* Copy dump CPU store status info to absolute zero */ | ||
98 | memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); | ||
99 | } | ||
100 | |||
101 | #endif | ||
102 | |||
103 | /* | ||
104 | * Start kdump: We expect here that a store status has been done on our CPU | ||
105 | */ | ||
106 | static void __do_machine_kdump(void *image) | ||
107 | { | ||
108 | #ifdef CONFIG_CRASH_DUMP | ||
109 | int (*start_kdump)(int) = (void *)((struct kimage *) image)->start; | ||
110 | |||
111 | __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA); | ||
112 | setup_regs(); | ||
113 | start_kdump(1); | ||
114 | #endif | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Check if kdump checksums are valid: We call purgatory with parameter "0" | ||
119 | */ | ||
120 | static int kdump_csum_valid(struct kimage *image) | ||
121 | { | ||
122 | #ifdef CONFIG_CRASH_DUMP | ||
123 | int (*start_kdump)(int) = (void *)image->start; | ||
124 | int rc; | ||
125 | |||
126 | __arch_local_irq_stnsm(0xfb); /* disable DAT */ | ||
127 | rc = start_kdump(0); | ||
128 | __arch_local_irq_stosm(0x04); /* enable DAT */ | ||
129 | return rc ? 0 : -EINVAL; | ||
130 | #else | ||
131 | return -EINVAL; | ||
132 | #endif | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Map or unmap crashkernel memory | ||
137 | */ | ||
138 | static void crash_map_pages(int enable) | ||
139 | { | ||
140 | unsigned long size = resource_size(&crashk_res); | ||
141 | |||
142 | BUG_ON(crashk_res.start % KEXEC_CRASH_MEM_ALIGN || | ||
143 | size % KEXEC_CRASH_MEM_ALIGN); | ||
144 | if (enable) | ||
145 | vmem_add_mapping(crashk_res.start, size); | ||
146 | else | ||
147 | vmem_remove_mapping(crashk_res.start, size); | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Map crashkernel memory | ||
152 | */ | ||
153 | void crash_map_reserved_pages(void) | ||
154 | { | ||
155 | crash_map_pages(1); | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * Unmap crashkernel memory | ||
160 | */ | ||
161 | void crash_unmap_reserved_pages(void) | ||
162 | { | ||
163 | crash_map_pages(0); | ||
164 | } | ||
165 | |||
166 | /* | ||
167 | * Give back memory to hypervisor before new kdump is loaded | ||
168 | */ | ||
169 | static int machine_kexec_prepare_kdump(void) | ||
170 | { | ||
171 | #ifdef CONFIG_CRASH_DUMP | ||
172 | if (MACHINE_IS_VM) | ||
173 | diag10_range(PFN_DOWN(crashk_res.start), | ||
174 | PFN_DOWN(crashk_res.end - crashk_res.start + 1)); | ||
175 | return 0; | ||
176 | #else | ||
177 | return -EINVAL; | ||
178 | #endif | ||
179 | } | ||
180 | |||
30 | int machine_kexec_prepare(struct kimage *image) | 181 | int machine_kexec_prepare(struct kimage *image) |
31 | { | 182 | { |
32 | void *reboot_code_buffer; | 183 | void *reboot_code_buffer; |
@@ -35,6 +186,9 @@ int machine_kexec_prepare(struct kimage *image) | |||
35 | if (ipl_flags & IPL_NSS_VALID) | 186 | if (ipl_flags & IPL_NSS_VALID) |
36 | return -ENOSYS; | 187 | return -ENOSYS; |
37 | 188 | ||
189 | if (image->type == KEXEC_TYPE_CRASH) | ||
190 | return machine_kexec_prepare_kdump(); | ||
191 | |||
38 | /* We don't support anything but the default image type for now. */ | 192 | /* We don't support anything but the default image type for now. */ |
39 | if (image->type != KEXEC_TYPE_DEFAULT) | 193 | if (image->type != KEXEC_TYPE_DEFAULT) |
40 | return -EINVAL; | 194 | return -EINVAL; |
@@ -51,27 +205,53 @@ void machine_kexec_cleanup(struct kimage *image) | |||
51 | { | 205 | { |
52 | } | 206 | } |
53 | 207 | ||
208 | void arch_crash_save_vmcoreinfo(void) | ||
209 | { | ||
210 | VMCOREINFO_SYMBOL(lowcore_ptr); | ||
211 | VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); | ||
212 | } | ||
213 | |||
54 | void machine_shutdown(void) | 214 | void machine_shutdown(void) |
55 | { | 215 | { |
56 | } | 216 | } |
57 | 217 | ||
58 | static void __machine_kexec(void *data) | 218 | /* |
219 | * Do normal kexec | ||
220 | */ | ||
221 | static void __do_machine_kexec(void *data) | ||
59 | { | 222 | { |
60 | relocate_kernel_t data_mover; | 223 | relocate_kernel_t data_mover; |
61 | struct kimage *image = data; | 224 | struct kimage *image = data; |
62 | 225 | ||
63 | pfault_fini(); | ||
64 | s390_reset_system(); | ||
65 | |||
66 | data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); | 226 | data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); |
67 | 227 | ||
68 | /* Call the moving routine */ | 228 | /* Call the moving routine */ |
69 | (*data_mover)(&image->head, image->start); | 229 | (*data_mover)(&image->head, image->start); |
70 | for (;;); | ||
71 | } | 230 | } |
72 | 231 | ||
232 | /* | ||
233 | * Reset system and call either kdump or normal kexec | ||
234 | */ | ||
235 | static void __machine_kexec(void *data) | ||
236 | { | ||
237 | struct kimage *image = data; | ||
238 | |||
239 | pfault_fini(); | ||
240 | if (image->type == KEXEC_TYPE_CRASH) | ||
241 | s390_reset_system(__do_machine_kdump, data); | ||
242 | else | ||
243 | s390_reset_system(__do_machine_kexec, data); | ||
244 | disabled_wait((unsigned long) __builtin_return_address(0)); | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * Do either kdump or normal kexec. In case of kdump we first ask | ||
249 | * purgatory, if kdump checksums are valid. | ||
250 | */ | ||
73 | void machine_kexec(struct kimage *image) | 251 | void machine_kexec(struct kimage *image) |
74 | { | 252 | { |
253 | if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image)) | ||
254 | return; | ||
75 | tracer_disable(); | 255 | tracer_disable(); |
76 | smp_send_stop(); | 256 | smp_send_stop(); |
77 | smp_switch_to_ipl_cpu(__machine_kexec, image); | 257 | smp_switch_to_ipl_cpu(__machine_kexec, image); |
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c index 0fbe4e32f7ba..19b4568f4cee 100644 --- a/arch/s390/kernel/mem_detect.c +++ b/arch/s390/kernel/mem_detect.c | |||
@@ -62,3 +62,72 @@ void detect_memory_layout(struct mem_chunk chunk[]) | |||
62 | arch_local_irq_restore(flags); | 62 | arch_local_irq_restore(flags); |
63 | } | 63 | } |
64 | EXPORT_SYMBOL(detect_memory_layout); | 64 | EXPORT_SYMBOL(detect_memory_layout); |
65 | |||
66 | /* | ||
67 | * Create memory hole with given address, size, and type | ||
68 | */ | ||
69 | void create_mem_hole(struct mem_chunk chunks[], unsigned long addr, | ||
70 | unsigned long size, int type) | ||
71 | { | ||
72 | unsigned long start, end, new_size; | ||
73 | int i; | ||
74 | |||
75 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
76 | if (chunks[i].size == 0) | ||
77 | continue; | ||
78 | if (addr + size < chunks[i].addr) | ||
79 | continue; | ||
80 | if (addr >= chunks[i].addr + chunks[i].size) | ||
81 | continue; | ||
82 | start = max(addr, chunks[i].addr); | ||
83 | end = min(addr + size, chunks[i].addr + chunks[i].size); | ||
84 | new_size = end - start; | ||
85 | if (new_size == 0) | ||
86 | continue; | ||
87 | if (start == chunks[i].addr && | ||
88 | end == chunks[i].addr + chunks[i].size) { | ||
89 | /* Remove chunk */ | ||
90 | chunks[i].type = type; | ||
91 | } else if (start == chunks[i].addr) { | ||
92 | /* Make chunk smaller at start */ | ||
93 | if (i >= MEMORY_CHUNKS - 1) | ||
94 | panic("Unable to create memory hole"); | ||
95 | memmove(&chunks[i + 1], &chunks[i], | ||
96 | sizeof(struct mem_chunk) * | ||
97 | (MEMORY_CHUNKS - (i + 1))); | ||
98 | chunks[i + 1].addr = chunks[i].addr + new_size; | ||
99 | chunks[i + 1].size = chunks[i].size - new_size; | ||
100 | chunks[i].size = new_size; | ||
101 | chunks[i].type = type; | ||
102 | i += 1; | ||
103 | } else if (end == chunks[i].addr + chunks[i].size) { | ||
104 | /* Make chunk smaller at end */ | ||
105 | if (i >= MEMORY_CHUNKS - 1) | ||
106 | panic("Unable to create memory hole"); | ||
107 | memmove(&chunks[i + 1], &chunks[i], | ||
108 | sizeof(struct mem_chunk) * | ||
109 | (MEMORY_CHUNKS - (i + 1))); | ||
110 | chunks[i + 1].addr = start; | ||
111 | chunks[i + 1].size = new_size; | ||
112 | chunks[i + 1].type = type; | ||
113 | chunks[i].size -= new_size; | ||
114 | i += 1; | ||
115 | } else { | ||
116 | /* Create memory hole */ | ||
117 | if (i >= MEMORY_CHUNKS - 2) | ||
118 | panic("Unable to create memory hole"); | ||
119 | memmove(&chunks[i + 2], &chunks[i], | ||
120 | sizeof(struct mem_chunk) * | ||
121 | (MEMORY_CHUNKS - (i + 2))); | ||
122 | chunks[i + 1].addr = addr; | ||
123 | chunks[i + 1].size = size; | ||
124 | chunks[i + 1].type = type; | ||
125 | chunks[i + 2].addr = addr + size; | ||
126 | chunks[i + 2].size = | ||
127 | chunks[i].addr + chunks[i].size - (addr + size); | ||
128 | chunks[i + 2].type = chunks[i].type; | ||
129 | chunks[i].size = addr - chunks[i].addr; | ||
130 | i += 2; | ||
131 | } | ||
132 | } | ||
133 | } | ||
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 541a7509faeb..9451b210a1b4 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/elfcore.h> | ||
15 | #include <linux/smp.h> | 16 | #include <linux/smp.h> |
16 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
17 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
@@ -117,7 +118,8 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
117 | struct pt_regs regs; | 118 | struct pt_regs regs; |
118 | 119 | ||
119 | memset(®s, 0, sizeof(regs)); | 120 | memset(®s, 0, sizeof(regs)); |
120 | regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; | 121 | regs.psw.mask = psw_kernel_bits | |
122 | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; | ||
121 | regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; | 123 | regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; |
122 | regs.gprs[9] = (unsigned long) fn; | 124 | regs.gprs[9] = (unsigned long) fn; |
123 | regs.gprs[10] = (unsigned long) arg; | 125 | regs.gprs[10] = (unsigned long) arg; |
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 311e9d712888..6e0073e43f54 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c | |||
@@ -74,7 +74,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
74 | 74 | ||
75 | static void *c_start(struct seq_file *m, loff_t *pos) | 75 | static void *c_start(struct seq_file *m, loff_t *pos) |
76 | { | 76 | { |
77 | return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL; | 77 | return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; |
78 | } | 78 | } |
79 | 79 | ||
80 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | 80 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index ef86ad243986..450931a45b68 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -42,34 +42,37 @@ enum s390_regset { | |||
42 | REGSET_GENERAL, | 42 | REGSET_GENERAL, |
43 | REGSET_FP, | 43 | REGSET_FP, |
44 | REGSET_LAST_BREAK, | 44 | REGSET_LAST_BREAK, |
45 | REGSET_SYSTEM_CALL, | ||
45 | REGSET_GENERAL_EXTENDED, | 46 | REGSET_GENERAL_EXTENDED, |
46 | }; | 47 | }; |
47 | 48 | ||
48 | void update_per_regs(struct task_struct *task) | 49 | void update_per_regs(struct task_struct *task) |
49 | { | 50 | { |
50 | static const struct per_regs per_single_step = { | ||
51 | .control = PER_EVENT_IFETCH, | ||
52 | .start = 0, | ||
53 | .end = PSW_ADDR_INSN, | ||
54 | }; | ||
55 | struct pt_regs *regs = task_pt_regs(task); | 51 | struct pt_regs *regs = task_pt_regs(task); |
56 | struct thread_struct *thread = &task->thread; | 52 | struct thread_struct *thread = &task->thread; |
57 | const struct per_regs *new; | 53 | struct per_regs old, new; |
58 | struct per_regs old; | 54 | |
59 | 55 | /* Copy user specified PER registers */ | |
60 | /* TIF_SINGLE_STEP overrides the user specified PER registers. */ | 56 | new.control = thread->per_user.control; |
61 | new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ? | 57 | new.start = thread->per_user.start; |
62 | &per_single_step : &thread->per_user; | 58 | new.end = thread->per_user.end; |
59 | |||
60 | /* merge TIF_SINGLE_STEP into user specified PER registers. */ | ||
61 | if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) { | ||
62 | new.control |= PER_EVENT_IFETCH; | ||
63 | new.start = 0; | ||
64 | new.end = PSW_ADDR_INSN; | ||
65 | } | ||
63 | 66 | ||
64 | /* Take care of the PER enablement bit in the PSW. */ | 67 | /* Take care of the PER enablement bit in the PSW. */ |
65 | if (!(new->control & PER_EVENT_MASK)) { | 68 | if (!(new.control & PER_EVENT_MASK)) { |
66 | regs->psw.mask &= ~PSW_MASK_PER; | 69 | regs->psw.mask &= ~PSW_MASK_PER; |
67 | return; | 70 | return; |
68 | } | 71 | } |
69 | regs->psw.mask |= PSW_MASK_PER; | 72 | regs->psw.mask |= PSW_MASK_PER; |
70 | __ctl_store(old, 9, 11); | 73 | __ctl_store(old, 9, 11); |
71 | if (memcmp(new, &old, sizeof(struct per_regs)) != 0) | 74 | if (memcmp(&new, &old, sizeof(struct per_regs)) != 0) |
72 | __ctl_load(*new, 9, 11); | 75 | __ctl_load(new, 9, 11); |
73 | } | 76 | } |
74 | 77 | ||
75 | void user_enable_single_step(struct task_struct *task) | 78 | void user_enable_single_step(struct task_struct *task) |
@@ -166,8 +169,8 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) | |||
166 | */ | 169 | */ |
167 | tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); | 170 | tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); |
168 | if (addr == (addr_t) &dummy->regs.psw.mask) | 171 | if (addr == (addr_t) &dummy->regs.psw.mask) |
169 | /* Remove per bit from user psw. */ | 172 | /* Return a clean psw mask. */ |
170 | tmp &= ~PSW_MASK_PER; | 173 | tmp = psw_user_bits | (tmp & PSW_MASK_USER); |
171 | 174 | ||
172 | } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { | 175 | } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { |
173 | /* | 176 | /* |
@@ -289,18 +292,17 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
289 | * psw and gprs are stored on the stack | 292 | * psw and gprs are stored on the stack |
290 | */ | 293 | */ |
291 | if (addr == (addr_t) &dummy->regs.psw.mask && | 294 | if (addr == (addr_t) &dummy->regs.psw.mask && |
292 | #ifdef CONFIG_COMPAT | 295 | ((data & ~PSW_MASK_USER) != psw_user_bits || |
293 | data != PSW_MASK_MERGE(psw_user32_bits, data) && | 296 | ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)))) |
294 | #endif | ||
295 | data != PSW_MASK_MERGE(psw_user_bits, data)) | ||
296 | /* Invalid psw mask. */ | 297 | /* Invalid psw mask. */ |
297 | return -EINVAL; | 298 | return -EINVAL; |
298 | #ifndef CONFIG_64BIT | ||
299 | if (addr == (addr_t) &dummy->regs.psw.addr) | 299 | if (addr == (addr_t) &dummy->regs.psw.addr) |
300 | /* I'd like to reject addresses without the | 300 | /* |
301 | high order bit but older gdb's rely on it */ | 301 | * The debugger changed the instruction address, |
302 | data |= PSW_ADDR_AMODE; | 302 | * reset system call restart, see signal.c:do_signal |
303 | #endif | 303 | */ |
304 | task_thread_info(child)->system_call = 0; | ||
305 | |||
304 | *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; | 306 | *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; |
305 | 307 | ||
306 | } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { | 308 | } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { |
@@ -495,21 +497,21 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) | |||
495 | __u32 tmp; | 497 | __u32 tmp; |
496 | 498 | ||
497 | if (addr < (addr_t) &dummy32->regs.acrs) { | 499 | if (addr < (addr_t) &dummy32->regs.acrs) { |
500 | struct pt_regs *regs = task_pt_regs(child); | ||
498 | /* | 501 | /* |
499 | * psw and gprs are stored on the stack | 502 | * psw and gprs are stored on the stack |
500 | */ | 503 | */ |
501 | if (addr == (addr_t) &dummy32->regs.psw.mask) { | 504 | if (addr == (addr_t) &dummy32->regs.psw.mask) { |
502 | /* Fake a 31 bit psw mask. */ | 505 | /* Fake a 31 bit psw mask. */ |
503 | tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32); | 506 | tmp = (__u32)(regs->psw.mask >> 32); |
504 | tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp); | 507 | tmp = psw32_user_bits | (tmp & PSW32_MASK_USER); |
505 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { | 508 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { |
506 | /* Fake a 31 bit psw address. */ | 509 | /* Fake a 31 bit psw address. */ |
507 | tmp = (__u32) task_pt_regs(child)->psw.addr | | 510 | tmp = (__u32) regs->psw.addr | |
508 | PSW32_ADDR_AMODE31; | 511 | (__u32)(regs->psw.mask & PSW_MASK_BA); |
509 | } else { | 512 | } else { |
510 | /* gpr 0-15 */ | 513 | /* gpr 0-15 */ |
511 | tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw + | 514 | tmp = *(__u32 *)((addr_t) ®s->psw + addr*2 + 4); |
512 | addr*2 + 4); | ||
513 | } | 515 | } |
514 | } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { | 516 | } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { |
515 | /* | 517 | /* |
@@ -594,24 +596,32 @@ static int __poke_user_compat(struct task_struct *child, | |||
594 | addr_t offset; | 596 | addr_t offset; |
595 | 597 | ||
596 | if (addr < (addr_t) &dummy32->regs.acrs) { | 598 | if (addr < (addr_t) &dummy32->regs.acrs) { |
599 | struct pt_regs *regs = task_pt_regs(child); | ||
597 | /* | 600 | /* |
598 | * psw, gprs, acrs and orig_gpr2 are stored on the stack | 601 | * psw, gprs, acrs and orig_gpr2 are stored on the stack |
599 | */ | 602 | */ |
600 | if (addr == (addr_t) &dummy32->regs.psw.mask) { | 603 | if (addr == (addr_t) &dummy32->regs.psw.mask) { |
601 | /* Build a 64 bit psw mask from 31 bit mask. */ | 604 | /* Build a 64 bit psw mask from 31 bit mask. */ |
602 | if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp)) | 605 | if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits) |
603 | /* Invalid psw mask. */ | 606 | /* Invalid psw mask. */ |
604 | return -EINVAL; | 607 | return -EINVAL; |
605 | task_pt_regs(child)->psw.mask = | 608 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | |
606 | PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32); | 609 | (regs->psw.mask & PSW_MASK_BA) | |
610 | (__u64)(tmp & PSW32_MASK_USER) << 32; | ||
607 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { | 611 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { |
608 | /* Build a 64 bit psw address from 31 bit address. */ | 612 | /* Build a 64 bit psw address from 31 bit address. */ |
609 | task_pt_regs(child)->psw.addr = | 613 | regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; |
610 | (__u64) tmp & PSW32_ADDR_INSN; | 614 | /* Transfer 31 bit amode bit to psw mask. */ |
615 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | | ||
616 | (__u64)(tmp & PSW32_ADDR_AMODE); | ||
617 | /* | ||
618 | * The debugger changed the instruction address, | ||
619 | * reset system call restart, see signal.c:do_signal | ||
620 | */ | ||
621 | task_thread_info(child)->system_call = 0; | ||
611 | } else { | 622 | } else { |
612 | /* gpr 0-15 */ | 623 | /* gpr 0-15 */ |
613 | *(__u32*)((addr_t) &task_pt_regs(child)->psw | 624 | *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; |
614 | + addr*2 + 4) = tmp; | ||
615 | } | 625 | } |
616 | } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { | 626 | } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { |
617 | /* | 627 | /* |
@@ -735,7 +745,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) | |||
735 | * debugger stored an invalid system call number. Skip | 745 | * debugger stored an invalid system call number. Skip |
736 | * the system call and the system call restart handling. | 746 | * the system call and the system call restart handling. |
737 | */ | 747 | */ |
738 | regs->svcnr = 0; | 748 | clear_thread_flag(TIF_SYSCALL); |
739 | ret = -1; | 749 | ret = -1; |
740 | } | 750 | } |
741 | 751 | ||
@@ -897,6 +907,26 @@ static int s390_last_break_get(struct task_struct *target, | |||
897 | 907 | ||
898 | #endif | 908 | #endif |
899 | 909 | ||
910 | static int s390_system_call_get(struct task_struct *target, | ||
911 | const struct user_regset *regset, | ||
912 | unsigned int pos, unsigned int count, | ||
913 | void *kbuf, void __user *ubuf) | ||
914 | { | ||
915 | unsigned int *data = &task_thread_info(target)->system_call; | ||
916 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
917 | data, 0, sizeof(unsigned int)); | ||
918 | } | ||
919 | |||
920 | static int s390_system_call_set(struct task_struct *target, | ||
921 | const struct user_regset *regset, | ||
922 | unsigned int pos, unsigned int count, | ||
923 | const void *kbuf, const void __user *ubuf) | ||
924 | { | ||
925 | unsigned int *data = &task_thread_info(target)->system_call; | ||
926 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
927 | data, 0, sizeof(unsigned int)); | ||
928 | } | ||
929 | |||
900 | static const struct user_regset s390_regsets[] = { | 930 | static const struct user_regset s390_regsets[] = { |
901 | [REGSET_GENERAL] = { | 931 | [REGSET_GENERAL] = { |
902 | .core_note_type = NT_PRSTATUS, | 932 | .core_note_type = NT_PRSTATUS, |
@@ -923,6 +953,14 @@ static const struct user_regset s390_regsets[] = { | |||
923 | .get = s390_last_break_get, | 953 | .get = s390_last_break_get, |
924 | }, | 954 | }, |
925 | #endif | 955 | #endif |
956 | [REGSET_SYSTEM_CALL] = { | ||
957 | .core_note_type = NT_S390_SYSTEM_CALL, | ||
958 | .n = 1, | ||
959 | .size = sizeof(unsigned int), | ||
960 | .align = sizeof(unsigned int), | ||
961 | .get = s390_system_call_get, | ||
962 | .set = s390_system_call_set, | ||
963 | }, | ||
926 | }; | 964 | }; |
927 | 965 | ||
928 | static const struct user_regset_view user_s390_view = { | 966 | static const struct user_regset_view user_s390_view = { |
@@ -1102,6 +1140,14 @@ static const struct user_regset s390_compat_regsets[] = { | |||
1102 | .align = sizeof(long), | 1140 | .align = sizeof(long), |
1103 | .get = s390_compat_last_break_get, | 1141 | .get = s390_compat_last_break_get, |
1104 | }, | 1142 | }, |
1143 | [REGSET_SYSTEM_CALL] = { | ||
1144 | .core_note_type = NT_S390_SYSTEM_CALL, | ||
1145 | .n = 1, | ||
1146 | .size = sizeof(compat_uint_t), | ||
1147 | .align = sizeof(compat_uint_t), | ||
1148 | .get = s390_system_call_get, | ||
1149 | .set = s390_system_call_set, | ||
1150 | }, | ||
1105 | [REGSET_GENERAL_EXTENDED] = { | 1151 | [REGSET_GENERAL_EXTENDED] = { |
1106 | .core_note_type = NT_S390_HIGH_GPRS, | 1152 | .core_note_type = NT_S390_HIGH_GPRS, |
1107 | .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), | 1153 | .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), |
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index 303d961c3bb5..ad67c214be04 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S | |||
@@ -10,6 +10,12 @@ | |||
10 | #include <asm/asm-offsets.h> | 10 | #include <asm/asm-offsets.h> |
11 | 11 | ||
12 | # | 12 | # |
13 | # store_status: Empty implementation until kdump is supported on 31 bit | ||
14 | # | ||
15 | ENTRY(store_status) | ||
16 | br %r14 | ||
17 | |||
18 | # | ||
13 | # do_reipl_asm | 19 | # do_reipl_asm |
14 | # Parameter: r2 = schid of reipl device | 20 | # Parameter: r2 = schid of reipl device |
15 | # | 21 | # |
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index e690975403f4..732a793ec53a 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S | |||
@@ -17,11 +17,11 @@ | |||
17 | # | 17 | # |
18 | ENTRY(store_status) | 18 | ENTRY(store_status) |
19 | /* Save register one and load save area base */ | 19 | /* Save register one and load save area base */ |
20 | stg %r1,__LC_SAVE_AREA_64(%r0) | 20 | stg %r1,__LC_SAVE_AREA+120(%r0) |
21 | lghi %r1,SAVE_AREA_BASE | 21 | lghi %r1,SAVE_AREA_BASE |
22 | /* General purpose registers */ | 22 | /* General purpose registers */ |
23 | stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | 23 | stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) |
24 | lg %r2,__LC_SAVE_AREA_64(%r0) | 24 | lg %r2,__LC_SAVE_AREA+120(%r0) |
25 | stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) | 25 | stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) |
26 | /* Control registers */ | 26 | /* Control registers */ |
27 | stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | 27 | stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) |
@@ -62,8 +62,11 @@ ENTRY(store_status) | |||
62 | larl %r2,store_status | 62 | larl %r2,store_status |
63 | stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) | 63 | stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) |
64 | br %r14 | 64 | br %r14 |
65 | .align 8 | 65 | |
66 | .section .bss | ||
67 | .align 8 | ||
66 | .Lclkcmp: .quad 0x0000000000000000 | 68 | .Lclkcmp: .quad 0x0000000000000000 |
69 | .previous | ||
67 | 70 | ||
68 | # | 71 | # |
69 | # do_reipl_asm | 72 | # do_reipl_asm |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 7b371c37061d..8ac6bfa2786c 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -42,6 +42,9 @@ | |||
42 | #include <linux/reboot.h> | 42 | #include <linux/reboot.h> |
43 | #include <linux/topology.h> | 43 | #include <linux/topology.h> |
44 | #include <linux/ftrace.h> | 44 | #include <linux/ftrace.h> |
45 | #include <linux/kexec.h> | ||
46 | #include <linux/crash_dump.h> | ||
47 | #include <linux/memory.h> | ||
45 | 48 | ||
46 | #include <asm/ipl.h> | 49 | #include <asm/ipl.h> |
47 | #include <asm/uaccess.h> | 50 | #include <asm/uaccess.h> |
@@ -57,12 +60,13 @@ | |||
57 | #include <asm/ebcdic.h> | 60 | #include <asm/ebcdic.h> |
58 | #include <asm/compat.h> | 61 | #include <asm/compat.h> |
59 | #include <asm/kvm_virtio.h> | 62 | #include <asm/kvm_virtio.h> |
63 | #include <asm/diag.h> | ||
60 | 64 | ||
61 | long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | | 65 | long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY | |
62 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY); | 66 | PSW_MASK_EA | PSW_MASK_BA; |
63 | long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | 67 | long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | |
64 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 68 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | |
65 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY); | 69 | PSW_MASK_PSTATE | PSW_ASC_HOME; |
66 | 70 | ||
67 | /* | 71 | /* |
68 | * User copy operations. | 72 | * User copy operations. |
@@ -274,22 +278,14 @@ early_param("mem", early_parse_mem); | |||
274 | unsigned int user_mode = HOME_SPACE_MODE; | 278 | unsigned int user_mode = HOME_SPACE_MODE; |
275 | EXPORT_SYMBOL_GPL(user_mode); | 279 | EXPORT_SYMBOL_GPL(user_mode); |
276 | 280 | ||
277 | static int set_amode_and_uaccess(unsigned long user_amode, | 281 | static int set_amode_primary(void) |
278 | unsigned long user32_amode) | ||
279 | { | 282 | { |
280 | psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | | 283 | psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME; |
281 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 284 | psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY; |
282 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; | ||
283 | #ifdef CONFIG_COMPAT | 285 | #ifdef CONFIG_COMPAT |
284 | psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode | | 286 | psw32_user_bits = |
285 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 287 | (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY; |
286 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; | ||
287 | psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode | | ||
288 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | | ||
289 | PSW32_MASK_PSTATE; | ||
290 | #endif | 288 | #endif |
291 | psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | ||
292 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY; | ||
293 | 289 | ||
294 | if (MACHINE_HAS_MVCOS) { | 290 | if (MACHINE_HAS_MVCOS) { |
295 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); | 291 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); |
@@ -325,7 +321,7 @@ early_param("user_mode", early_parse_user_mode); | |||
325 | static void setup_addressing_mode(void) | 321 | static void setup_addressing_mode(void) |
326 | { | 322 | { |
327 | if (user_mode == PRIMARY_SPACE_MODE) { | 323 | if (user_mode == PRIMARY_SPACE_MODE) { |
328 | if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) | 324 | if (set_amode_primary()) |
329 | pr_info("Address spaces switched, " | 325 | pr_info("Address spaces switched, " |
330 | "mvcos available\n"); | 326 | "mvcos available\n"); |
331 | else | 327 | else |
@@ -344,24 +340,25 @@ setup_lowcore(void) | |||
344 | */ | 340 | */ |
345 | BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); | 341 | BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); |
346 | lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); | 342 | lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); |
347 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 343 | lc->restart_psw.mask = psw_kernel_bits; |
348 | lc->restart_psw.addr = | 344 | lc->restart_psw.addr = |
349 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; | 345 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; |
350 | if (user_mode != HOME_SPACE_MODE) | 346 | lc->external_new_psw.mask = psw_kernel_bits | |
351 | lc->restart_psw.mask |= PSW_ASC_HOME; | 347 | PSW_MASK_DAT | PSW_MASK_MCHECK; |
352 | lc->external_new_psw.mask = psw_kernel_bits; | ||
353 | lc->external_new_psw.addr = | 348 | lc->external_new_psw.addr = |
354 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; | 349 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; |
355 | lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; | 350 | lc->svc_new_psw.mask = psw_kernel_bits | |
351 | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; | ||
356 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; | 352 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; |
357 | lc->program_new_psw.mask = psw_kernel_bits; | 353 | lc->program_new_psw.mask = psw_kernel_bits | |
354 | PSW_MASK_DAT | PSW_MASK_MCHECK; | ||
358 | lc->program_new_psw.addr = | 355 | lc->program_new_psw.addr = |
359 | PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; | 356 | PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; |
360 | lc->mcck_new_psw.mask = | 357 | lc->mcck_new_psw.mask = psw_kernel_bits; |
361 | psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; | ||
362 | lc->mcck_new_psw.addr = | 358 | lc->mcck_new_psw.addr = |
363 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; | 359 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; |
364 | lc->io_new_psw.mask = psw_kernel_bits; | 360 | lc->io_new_psw.mask = psw_kernel_bits | |
361 | PSW_MASK_DAT | PSW_MASK_MCHECK; | ||
365 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | 362 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; |
366 | lc->clock_comparator = -1ULL; | 363 | lc->clock_comparator = -1ULL; |
367 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; | 364 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; |
@@ -435,10 +432,14 @@ static void __init setup_resources(void) | |||
435 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 432 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
436 | if (!memory_chunk[i].size) | 433 | if (!memory_chunk[i].size) |
437 | continue; | 434 | continue; |
435 | if (memory_chunk[i].type == CHUNK_OLDMEM || | ||
436 | memory_chunk[i].type == CHUNK_CRASHK) | ||
437 | continue; | ||
438 | res = alloc_bootmem_low(sizeof(*res)); | 438 | res = alloc_bootmem_low(sizeof(*res)); |
439 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | 439 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; |
440 | switch (memory_chunk[i].type) { | 440 | switch (memory_chunk[i].type) { |
441 | case CHUNK_READ_WRITE: | 441 | case CHUNK_READ_WRITE: |
442 | case CHUNK_CRASHK: | ||
442 | res->name = "System RAM"; | 443 | res->name = "System RAM"; |
443 | break; | 444 | break; |
444 | case CHUNK_READ_ONLY: | 445 | case CHUNK_READ_ONLY: |
@@ -479,6 +480,7 @@ static void __init setup_memory_end(void) | |||
479 | unsigned long max_mem; | 480 | unsigned long max_mem; |
480 | int i; | 481 | int i; |
481 | 482 | ||
483 | |||
482 | #ifdef CONFIG_ZFCPDUMP | 484 | #ifdef CONFIG_ZFCPDUMP |
483 | if (ipl_info.type == IPL_TYPE_FCP_DUMP) { | 485 | if (ipl_info.type == IPL_TYPE_FCP_DUMP) { |
484 | memory_end = ZFCPDUMP_HSA_SIZE; | 486 | memory_end = ZFCPDUMP_HSA_SIZE; |
@@ -545,11 +547,201 @@ static void __init setup_restart_psw(void) | |||
545 | * Setup restart PSW for absolute zero lowcore. This is necesary | 547 | * Setup restart PSW for absolute zero lowcore. This is necesary |
546 | * if PSW restart is done on an offline CPU that has lowcore zero | 548 | * if PSW restart is done on an offline CPU that has lowcore zero |
547 | */ | 549 | */ |
548 | psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 550 | psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; |
549 | psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; | 551 | psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; |
550 | copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); | 552 | copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); |
551 | } | 553 | } |
552 | 554 | ||
555 | static void __init setup_vmcoreinfo(void) | ||
556 | { | ||
557 | #ifdef CONFIG_KEXEC | ||
558 | unsigned long ptr = paddr_vmcoreinfo_note(); | ||
559 | |||
560 | copy_to_absolute_zero(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr)); | ||
561 | #endif | ||
562 | } | ||
563 | |||
564 | #ifdef CONFIG_CRASH_DUMP | ||
565 | |||
566 | /* | ||
567 | * Find suitable location for crashkernel memory | ||
568 | */ | ||
569 | static unsigned long __init find_crash_base(unsigned long crash_size, | ||
570 | char **msg) | ||
571 | { | ||
572 | unsigned long crash_base; | ||
573 | struct mem_chunk *chunk; | ||
574 | int i; | ||
575 | |||
576 | if (memory_chunk[0].size < crash_size) { | ||
577 | *msg = "first memory chunk must be at least crashkernel size"; | ||
578 | return 0; | ||
579 | } | ||
580 | if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE)) | ||
581 | return OLDMEM_BASE; | ||
582 | |||
583 | for (i = MEMORY_CHUNKS - 1; i >= 0; i--) { | ||
584 | chunk = &memory_chunk[i]; | ||
585 | if (chunk->size == 0) | ||
586 | continue; | ||
587 | if (chunk->type != CHUNK_READ_WRITE) | ||
588 | continue; | ||
589 | if (chunk->size < crash_size) | ||
590 | continue; | ||
591 | crash_base = (chunk->addr + chunk->size) - crash_size; | ||
592 | if (crash_base < crash_size) | ||
593 | continue; | ||
594 | if (crash_base < ZFCPDUMP_HSA_SIZE_MAX) | ||
595 | continue; | ||
596 | if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE) | ||
597 | continue; | ||
598 | return crash_base; | ||
599 | } | ||
600 | *msg = "no suitable area found"; | ||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | /* | ||
605 | * Check if crash_base and crash_size is valid | ||
606 | */ | ||
607 | static int __init verify_crash_base(unsigned long crash_base, | ||
608 | unsigned long crash_size, | ||
609 | char **msg) | ||
610 | { | ||
611 | struct mem_chunk *chunk; | ||
612 | int i; | ||
613 | |||
614 | /* | ||
615 | * Because we do the swap to zero, we must have at least 'crash_size' | ||
616 | * bytes free space before crash_base | ||
617 | */ | ||
618 | if (crash_size > crash_base) { | ||
619 | *msg = "crashkernel offset must be greater than size"; | ||
620 | return -EINVAL; | ||
621 | } | ||
622 | |||
623 | /* First memory chunk must be at least crash_size */ | ||
624 | if (memory_chunk[0].size < crash_size) { | ||
625 | *msg = "first memory chunk must be at least crashkernel size"; | ||
626 | return -EINVAL; | ||
627 | } | ||
628 | /* Check if we fit into the respective memory chunk */ | ||
629 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
630 | chunk = &memory_chunk[i]; | ||
631 | if (chunk->size == 0) | ||
632 | continue; | ||
633 | if (crash_base < chunk->addr) | ||
634 | continue; | ||
635 | if (crash_base >= chunk->addr + chunk->size) | ||
636 | continue; | ||
637 | /* we have found the memory chunk */ | ||
638 | if (crash_base + crash_size > chunk->addr + chunk->size) { | ||
639 | *msg = "selected memory chunk is too small for " | ||
640 | "crashkernel memory"; | ||
641 | return -EINVAL; | ||
642 | } | ||
643 | return 0; | ||
644 | } | ||
645 | *msg = "invalid memory range specified"; | ||
646 | return -EINVAL; | ||
647 | } | ||
648 | |||
649 | /* | ||
650 | * Reserve kdump memory by creating a memory hole in the mem_chunk array | ||
651 | */ | ||
652 | static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size, | ||
653 | int type) | ||
654 | { | ||
655 | |||
656 | create_mem_hole(memory_chunk, addr, size, type); | ||
657 | } | ||
658 | |||
659 | /* | ||
660 | * When kdump is enabled, we have to ensure that no memory from | ||
661 | * the area [0 - crashkernel memory size] and | ||
662 | * [crashk_res.start - crashk_res.end] is set offline. | ||
663 | */ | ||
664 | static int kdump_mem_notifier(struct notifier_block *nb, | ||
665 | unsigned long action, void *data) | ||
666 | { | ||
667 | struct memory_notify *arg = data; | ||
668 | |||
669 | if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) | ||
670 | return NOTIFY_BAD; | ||
671 | if (arg->start_pfn > PFN_DOWN(crashk_res.end)) | ||
672 | return NOTIFY_OK; | ||
673 | if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start)) | ||
674 | return NOTIFY_OK; | ||
675 | return NOTIFY_BAD; | ||
676 | } | ||
677 | |||
678 | static struct notifier_block kdump_mem_nb = { | ||
679 | .notifier_call = kdump_mem_notifier, | ||
680 | }; | ||
681 | |||
682 | #endif | ||
683 | |||
684 | /* | ||
685 | * Make sure that oldmem, where the dump is stored, is protected | ||
686 | */ | ||
687 | static void reserve_oldmem(void) | ||
688 | { | ||
689 | #ifdef CONFIG_CRASH_DUMP | ||
690 | if (!OLDMEM_BASE) | ||
691 | return; | ||
692 | |||
693 | reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM); | ||
694 | reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE, | ||
695 | CHUNK_OLDMEM); | ||
696 | if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size) | ||
697 | saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1; | ||
698 | else | ||
699 | saved_max_pfn = PFN_DOWN(real_memory_size) - 1; | ||
700 | #endif | ||
701 | } | ||
702 | |||
703 | /* | ||
704 | * Reserve memory for kdump kernel to be loaded with kexec | ||
705 | */ | ||
706 | static void __init reserve_crashkernel(void) | ||
707 | { | ||
708 | #ifdef CONFIG_CRASH_DUMP | ||
709 | unsigned long long crash_base, crash_size; | ||
710 | char *msg; | ||
711 | int rc; | ||
712 | |||
713 | rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, | ||
714 | &crash_base); | ||
715 | if (rc || crash_size == 0) | ||
716 | return; | ||
717 | crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); | ||
718 | crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); | ||
719 | if (register_memory_notifier(&kdump_mem_nb)) | ||
720 | return; | ||
721 | if (!crash_base) | ||
722 | crash_base = find_crash_base(crash_size, &msg); | ||
723 | if (!crash_base) { | ||
724 | pr_info("crashkernel reservation failed: %s\n", msg); | ||
725 | unregister_memory_notifier(&kdump_mem_nb); | ||
726 | return; | ||
727 | } | ||
728 | if (verify_crash_base(crash_base, crash_size, &msg)) { | ||
729 | pr_info("crashkernel reservation failed: %s\n", msg); | ||
730 | unregister_memory_notifier(&kdump_mem_nb); | ||
731 | return; | ||
732 | } | ||
733 | if (!OLDMEM_BASE && MACHINE_IS_VM) | ||
734 | diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); | ||
735 | crashk_res.start = crash_base; | ||
736 | crashk_res.end = crash_base + crash_size - 1; | ||
737 | insert_resource(&iomem_resource, &crashk_res); | ||
738 | reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK); | ||
739 | pr_info("Reserving %lluMB of memory at %lluMB " | ||
740 | "for crashkernel (System RAM: %luMB)\n", | ||
741 | crash_size >> 20, crash_base >> 20, memory_end >> 20); | ||
742 | #endif | ||
743 | } | ||
744 | |||
553 | static void __init | 745 | static void __init |
554 | setup_memory(void) | 746 | setup_memory(void) |
555 | { | 747 | { |
@@ -580,6 +772,14 @@ setup_memory(void) | |||
580 | if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { | 772 | if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { |
581 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; | 773 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; |
582 | 774 | ||
775 | #ifdef CONFIG_CRASH_DUMP | ||
776 | if (OLDMEM_BASE) { | ||
777 | /* Move initrd behind kdump oldmem */ | ||
778 | if (start + INITRD_SIZE > OLDMEM_BASE && | ||
779 | start < OLDMEM_BASE + OLDMEM_SIZE) | ||
780 | start = OLDMEM_BASE + OLDMEM_SIZE; | ||
781 | } | ||
782 | #endif | ||
583 | if (start + INITRD_SIZE > memory_end) { | 783 | if (start + INITRD_SIZE > memory_end) { |
584 | pr_err("initrd extends beyond end of " | 784 | pr_err("initrd extends beyond end of " |
585 | "memory (0x%08lx > 0x%08lx) " | 785 | "memory (0x%08lx > 0x%08lx) " |
@@ -610,7 +810,8 @@ setup_memory(void) | |||
610 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | 810 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |
611 | unsigned long start_chunk, end_chunk, pfn; | 811 | unsigned long start_chunk, end_chunk, pfn; |
612 | 812 | ||
613 | if (memory_chunk[i].type != CHUNK_READ_WRITE) | 813 | if (memory_chunk[i].type != CHUNK_READ_WRITE && |
814 | memory_chunk[i].type != CHUNK_CRASHK) | ||
614 | continue; | 815 | continue; |
615 | start_chunk = PFN_DOWN(memory_chunk[i].addr); | 816 | start_chunk = PFN_DOWN(memory_chunk[i].addr); |
616 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); | 817 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); |
@@ -644,6 +845,15 @@ setup_memory(void) | |||
644 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, | 845 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, |
645 | BOOTMEM_DEFAULT); | 846 | BOOTMEM_DEFAULT); |
646 | 847 | ||
848 | #ifdef CONFIG_CRASH_DUMP | ||
849 | if (crashk_res.start) | ||
850 | reserve_bootmem(crashk_res.start, | ||
851 | crashk_res.end - crashk_res.start + 1, | ||
852 | BOOTMEM_DEFAULT); | ||
853 | if (is_kdump_kernel()) | ||
854 | reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE, | ||
855 | PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT); | ||
856 | #endif | ||
647 | #ifdef CONFIG_BLK_DEV_INITRD | 857 | #ifdef CONFIG_BLK_DEV_INITRD |
648 | if (INITRD_START && INITRD_SIZE) { | 858 | if (INITRD_START && INITRD_SIZE) { |
649 | if (INITRD_START + INITRD_SIZE <= memory_end) { | 859 | if (INITRD_START + INITRD_SIZE <= memory_end) { |
@@ -812,8 +1022,11 @@ setup_arch(char **cmdline_p) | |||
812 | setup_ipl(); | 1022 | setup_ipl(); |
813 | setup_memory_end(); | 1023 | setup_memory_end(); |
814 | setup_addressing_mode(); | 1024 | setup_addressing_mode(); |
1025 | reserve_oldmem(); | ||
1026 | reserve_crashkernel(); | ||
815 | setup_memory(); | 1027 | setup_memory(); |
816 | setup_resources(); | 1028 | setup_resources(); |
1029 | setup_vmcoreinfo(); | ||
817 | setup_restart_psw(); | 1030 | setup_restart_psw(); |
818 | setup_lowcore(); | 1031 | setup_lowcore(); |
819 | 1032 | ||
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 9a40e1cc5ec3..05a85bc14c98 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/ucontext.h> | 30 | #include <asm/ucontext.h> |
31 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
32 | #include <asm/lowcore.h> | 32 | #include <asm/lowcore.h> |
33 | #include <asm/compat.h> | ||
33 | #include "entry.h" | 34 | #include "entry.h" |
34 | 35 | ||
35 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 36 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
@@ -116,7 +117,8 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | |||
116 | 117 | ||
117 | /* Copy a 'clean' PSW mask to the user to avoid leaking | 118 | /* Copy a 'clean' PSW mask to the user to avoid leaking |
118 | information about whether PER is currently on. */ | 119 | information about whether PER is currently on. */ |
119 | user_sregs.regs.psw.mask = PSW_MASK_MERGE(psw_user_bits, regs->psw.mask); | 120 | user_sregs.regs.psw.mask = psw_user_bits | |
121 | (regs->psw.mask & PSW_MASK_USER); | ||
120 | user_sregs.regs.psw.addr = regs->psw.addr; | 122 | user_sregs.regs.psw.addr = regs->psw.addr; |
121 | memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs)); | 123 | memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs)); |
122 | memcpy(&user_sregs.regs.acrs, current->thread.acrs, | 124 | memcpy(&user_sregs.regs.acrs, current->thread.acrs, |
@@ -143,9 +145,13 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | |||
143 | err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs)); | 145 | err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs)); |
144 | if (err) | 146 | if (err) |
145 | return err; | 147 | return err; |
146 | regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask, | 148 | /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */ |
147 | user_sregs.regs.psw.mask); | 149 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | |
148 | regs->psw.addr = PSW_ADDR_AMODE | user_sregs.regs.psw.addr; | 150 | (user_sregs.regs.psw.mask & PSW_MASK_USER); |
151 | /* Check for invalid amode */ | ||
152 | if (regs->psw.mask & PSW_MASK_EA) | ||
153 | regs->psw.mask |= PSW_MASK_BA; | ||
154 | regs->psw.addr = user_sregs.regs.psw.addr; | ||
149 | memcpy(®s->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs)); | 155 | memcpy(®s->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs)); |
150 | memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, | 156 | memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, |
151 | sizeof(sregs->regs.acrs)); | 157 | sizeof(sregs->regs.acrs)); |
@@ -156,7 +162,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | |||
156 | current->thread.fp_regs.fpc &= FPC_VALID_MASK; | 162 | current->thread.fp_regs.fpc &= FPC_VALID_MASK; |
157 | 163 | ||
158 | restore_fp_regs(¤t->thread.fp_regs); | 164 | restore_fp_regs(¤t->thread.fp_regs); |
159 | regs->svcnr = 0; /* disable syscall checks */ | 165 | clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ |
160 | return 0; | 166 | return 0; |
161 | } | 167 | } |
162 | 168 | ||
@@ -288,6 +294,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
288 | 294 | ||
289 | /* Set up registers for signal handler */ | 295 | /* Set up registers for signal handler */ |
290 | regs->gprs[15] = (unsigned long) frame; | 296 | regs->gprs[15] = (unsigned long) frame; |
297 | regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ | ||
291 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; | 298 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; |
292 | 299 | ||
293 | regs->gprs[2] = map_signal(sig); | 300 | regs->gprs[2] = map_signal(sig); |
@@ -356,6 +363,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
356 | 363 | ||
357 | /* Set up registers for signal handler */ | 364 | /* Set up registers for signal handler */ |
358 | regs->gprs[15] = (unsigned long) frame; | 365 | regs->gprs[15] = (unsigned long) frame; |
366 | regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ | ||
359 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; | 367 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; |
360 | 368 | ||
361 | regs->gprs[2] = map_signal(sig); | 369 | regs->gprs[2] = map_signal(sig); |
@@ -401,7 +409,6 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
401 | */ | 409 | */ |
402 | void do_signal(struct pt_regs *regs) | 410 | void do_signal(struct pt_regs *regs) |
403 | { | 411 | { |
404 | unsigned long retval = 0, continue_addr = 0, restart_addr = 0; | ||
405 | siginfo_t info; | 412 | siginfo_t info; |
406 | int signr; | 413 | int signr; |
407 | struct k_sigaction ka; | 414 | struct k_sigaction ka; |
@@ -421,54 +428,45 @@ void do_signal(struct pt_regs *regs) | |||
421 | else | 428 | else |
422 | oldset = ¤t->blocked; | 429 | oldset = ¤t->blocked; |
423 | 430 | ||
424 | /* Are we from a system call? */ | 431 | /* |
425 | if (regs->svcnr) { | 432 | * Get signal to deliver. When running under ptrace, at this point |
426 | continue_addr = regs->psw.addr; | 433 | * the debugger may change all our registers, including the system |
427 | restart_addr = continue_addr - regs->ilc; | 434 | * call information. |
428 | retval = regs->gprs[2]; | 435 | */ |
429 | 436 | current_thread_info()->system_call = | |
430 | /* Prepare for system call restart. We do this here so that a | 437 | test_thread_flag(TIF_SYSCALL) ? regs->svc_code : 0; |
431 | debugger will see the already changed PSW. */ | ||
432 | switch (retval) { | ||
433 | case -ERESTARTNOHAND: | ||
434 | case -ERESTARTSYS: | ||
435 | case -ERESTARTNOINTR: | ||
436 | regs->gprs[2] = regs->orig_gpr2; | ||
437 | regs->psw.addr = restart_addr; | ||
438 | break; | ||
439 | case -ERESTART_RESTARTBLOCK: | ||
440 | regs->gprs[2] = -EINTR; | ||
441 | } | ||
442 | regs->svcnr = 0; /* Don't deal with this again. */ | ||
443 | } | ||
444 | |||
445 | /* Get signal to deliver. When running under ptrace, at this point | ||
446 | the debugger may change all our registers ... */ | ||
447 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 438 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
448 | 439 | ||
449 | /* Depending on the signal settings we may need to revert the | ||
450 | decision to restart the system call. */ | ||
451 | if (signr > 0 && regs->psw.addr == restart_addr) { | ||
452 | if (retval == -ERESTARTNOHAND | ||
453 | || (retval == -ERESTARTSYS | ||
454 | && !(current->sighand->action[signr-1].sa.sa_flags | ||
455 | & SA_RESTART))) { | ||
456 | regs->gprs[2] = -EINTR; | ||
457 | regs->psw.addr = continue_addr; | ||
458 | } | ||
459 | } | ||
460 | |||
461 | if (signr > 0) { | 440 | if (signr > 0) { |
462 | /* Whee! Actually deliver the signal. */ | 441 | /* Whee! Actually deliver the signal. */ |
463 | int ret; | 442 | if (current_thread_info()->system_call) { |
464 | #ifdef CONFIG_COMPAT | 443 | regs->svc_code = current_thread_info()->system_call; |
465 | if (is_compat_task()) { | 444 | /* Check for system call restarting. */ |
466 | ret = handle_signal32(signr, &ka, &info, oldset, regs); | 445 | switch (regs->gprs[2]) { |
467 | } | 446 | case -ERESTART_RESTARTBLOCK: |
468 | else | 447 | case -ERESTARTNOHAND: |
469 | #endif | 448 | regs->gprs[2] = -EINTR; |
470 | ret = handle_signal(signr, &ka, &info, oldset, regs); | 449 | break; |
471 | if (!ret) { | 450 | case -ERESTARTSYS: |
451 | if (!(ka.sa.sa_flags & SA_RESTART)) { | ||
452 | regs->gprs[2] = -EINTR; | ||
453 | break; | ||
454 | } | ||
455 | /* fallthrough */ | ||
456 | case -ERESTARTNOINTR: | ||
457 | regs->gprs[2] = regs->orig_gpr2; | ||
458 | regs->psw.addr = | ||
459 | __rewind_psw(regs->psw, | ||
460 | regs->svc_code >> 16); | ||
461 | break; | ||
462 | } | ||
463 | /* No longer in a system call */ | ||
464 | clear_thread_flag(TIF_SYSCALL); | ||
465 | } | ||
466 | |||
467 | if ((is_compat_task() ? | ||
468 | handle_signal32(signr, &ka, &info, oldset, regs) : | ||
469 | handle_signal(signr, &ka, &info, oldset, regs)) == 0) { | ||
472 | /* | 470 | /* |
473 | * A signal was successfully delivered; the saved | 471 | * A signal was successfully delivered; the saved |
474 | * sigmask will have been stored in the signal frame, | 472 | * sigmask will have been stored in the signal frame, |
@@ -482,11 +480,32 @@ void do_signal(struct pt_regs *regs) | |||
482 | * Let tracing know that we've done the handler setup. | 480 | * Let tracing know that we've done the handler setup. |
483 | */ | 481 | */ |
484 | tracehook_signal_handler(signr, &info, &ka, regs, | 482 | tracehook_signal_handler(signr, &info, &ka, regs, |
485 | test_thread_flag(TIF_SINGLE_STEP)); | 483 | test_thread_flag(TIF_SINGLE_STEP)); |
486 | } | 484 | } |
487 | return; | 485 | return; |
488 | } | 486 | } |
489 | 487 | ||
488 | /* No handlers present - check for system call restart */ | ||
489 | if (current_thread_info()->system_call) { | ||
490 | regs->svc_code = current_thread_info()->system_call; | ||
491 | switch (regs->gprs[2]) { | ||
492 | case -ERESTART_RESTARTBLOCK: | ||
493 | /* Restart with sys_restart_syscall */ | ||
494 | regs->svc_code = __NR_restart_syscall; | ||
495 | /* fallthrough */ | ||
496 | case -ERESTARTNOHAND: | ||
497 | case -ERESTARTSYS: | ||
498 | case -ERESTARTNOINTR: | ||
499 | /* Restart system call with magic TIF bit. */ | ||
500 | regs->gprs[2] = regs->orig_gpr2; | ||
501 | set_thread_flag(TIF_SYSCALL); | ||
502 | break; | ||
503 | default: | ||
504 | clear_thread_flag(TIF_SYSCALL); | ||
505 | break; | ||
506 | } | ||
507 | } | ||
508 | |||
490 | /* | 509 | /* |
491 | * If there's no signal to deliver, we just put the saved sigmask back. | 510 | * If there's no signal to deliver, we just put the saved sigmask back. |
492 | */ | 511 | */ |
@@ -494,13 +513,6 @@ void do_signal(struct pt_regs *regs) | |||
494 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 513 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
495 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 514 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); |
496 | } | 515 | } |
497 | |||
498 | /* Restart a different system call. */ | ||
499 | if (retval == -ERESTART_RESTARTBLOCK | ||
500 | && regs->psw.addr == continue_addr) { | ||
501 | regs->gprs[2] = __NR_restart_syscall; | ||
502 | set_thread_flag(TIF_RESTART_SVC); | ||
503 | } | ||
504 | } | 516 | } |
505 | 517 | ||
506 | void do_notify_resume(struct pt_regs *regs) | 518 | void do_notify_resume(struct pt_regs *regs) |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 6ab16ac64d29..3ea872890da2 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/timex.h> | 38 | #include <linux/timex.h> |
39 | #include <linux/bootmem.h> | 39 | #include <linux/bootmem.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/crash_dump.h> | ||
41 | #include <asm/asm-offsets.h> | 42 | #include <asm/asm-offsets.h> |
42 | #include <asm/ipl.h> | 43 | #include <asm/ipl.h> |
43 | #include <asm/setup.h> | 44 | #include <asm/setup.h> |
@@ -97,6 +98,29 @@ static inline int cpu_stopped(int cpu) | |||
97 | return raw_cpu_stopped(cpu_logical_map(cpu)); | 98 | return raw_cpu_stopped(cpu_logical_map(cpu)); |
98 | } | 99 | } |
99 | 100 | ||
101 | /* | ||
102 | * Ensure that PSW restart is done on an online CPU | ||
103 | */ | ||
104 | void smp_restart_with_online_cpu(void) | ||
105 | { | ||
106 | int cpu; | ||
107 | |||
108 | for_each_online_cpu(cpu) { | ||
109 | if (stap() == __cpu_logical_map[cpu]) { | ||
110 | /* We are online: Enable DAT again and return */ | ||
111 | __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); | ||
112 | return; | ||
113 | } | ||
114 | } | ||
115 | /* We are not online: Do PSW restart on an online CPU */ | ||
116 | while (sigp(cpu, sigp_restart) == sigp_busy) | ||
117 | cpu_relax(); | ||
118 | /* And stop ourself */ | ||
119 | while (raw_sigp(stap(), sigp_stop) == sigp_busy) | ||
120 | cpu_relax(); | ||
121 | for (;;); | ||
122 | } | ||
123 | |||
100 | void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) | 124 | void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) |
101 | { | 125 | { |
102 | struct _lowcore *lc, *current_lc; | 126 | struct _lowcore *lc, *current_lc; |
@@ -106,14 +130,16 @@ void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) | |||
106 | 130 | ||
107 | if (smp_processor_id() == 0) | 131 | if (smp_processor_id() == 0) |
108 | func(data); | 132 | func(data); |
109 | __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY); | 133 | __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | |
134 | PSW_MASK_EA | PSW_MASK_BA); | ||
110 | /* Disable lowcore protection */ | 135 | /* Disable lowcore protection */ |
111 | __ctl_clear_bit(0, 28); | 136 | __ctl_clear_bit(0, 28); |
112 | current_lc = lowcore_ptr[smp_processor_id()]; | 137 | current_lc = lowcore_ptr[smp_processor_id()]; |
113 | lc = lowcore_ptr[0]; | 138 | lc = lowcore_ptr[0]; |
114 | if (!lc) | 139 | if (!lc) |
115 | lc = current_lc; | 140 | lc = current_lc; |
116 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 141 | lc->restart_psw.mask = |
142 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; | ||
117 | lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; | 143 | lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; |
118 | if (!cpu_online(0)) | 144 | if (!cpu_online(0)) |
119 | smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); | 145 | smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); |
@@ -135,7 +161,7 @@ void smp_send_stop(void) | |||
135 | int cpu, rc; | 161 | int cpu, rc; |
136 | 162 | ||
137 | /* Disable all interrupts/machine checks */ | 163 | /* Disable all interrupts/machine checks */ |
138 | __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); | 164 | __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); |
139 | trace_hardirqs_off(); | 165 | trace_hardirqs_off(); |
140 | 166 | ||
141 | /* stop all processors */ | 167 | /* stop all processors */ |
@@ -161,7 +187,10 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, | |||
161 | { | 187 | { |
162 | unsigned long bits; | 188 | unsigned long bits; |
163 | 189 | ||
164 | kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++; | 190 | if (ext_int_code == 0x1202) |
191 | kstat_cpu(smp_processor_id()).irqs[EXTINT_EXC]++; | ||
192 | else | ||
193 | kstat_cpu(smp_processor_id()).irqs[EXTINT_EMS]++; | ||
165 | /* | 194 | /* |
166 | * handle bit signal external calls | 195 | * handle bit signal external calls |
167 | */ | 196 | */ |
@@ -183,12 +212,19 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, | |||
183 | */ | 212 | */ |
184 | static void smp_ext_bitcall(int cpu, int sig) | 213 | static void smp_ext_bitcall(int cpu, int sig) |
185 | { | 214 | { |
215 | int order; | ||
216 | |||
186 | /* | 217 | /* |
187 | * Set signaling bit in lowcore of target cpu and kick it | 218 | * Set signaling bit in lowcore of target cpu and kick it |
188 | */ | 219 | */ |
189 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | 220 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); |
190 | while (sigp(cpu, sigp_emergency_signal) == sigp_busy) | 221 | while (1) { |
222 | order = smp_vcpu_scheduled(cpu) ? | ||
223 | sigp_external_call : sigp_emergency_signal; | ||
224 | if (sigp(cpu, order) != sigp_busy) | ||
225 | break; | ||
191 | udelay(10); | 226 | udelay(10); |
227 | } | ||
192 | } | 228 | } |
193 | 229 | ||
194 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 230 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
@@ -281,11 +317,13 @@ void smp_ctl_clear_bit(int cr, int bit) | |||
281 | } | 317 | } |
282 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 318 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
283 | 319 | ||
284 | #ifdef CONFIG_ZFCPDUMP | 320 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) |
285 | 321 | ||
286 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | 322 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) |
287 | { | 323 | { |
288 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | 324 | if (ipl_info.type != IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) |
325 | return; | ||
326 | if (is_kdump_kernel()) | ||
289 | return; | 327 | return; |
290 | if (cpu >= NR_CPUS) { | 328 | if (cpu >= NR_CPUS) { |
291 | pr_warning("CPU %i exceeds the maximum %i and is excluded from " | 329 | pr_warning("CPU %i exceeds the maximum %i and is excluded from " |
@@ -403,6 +441,18 @@ static void __init smp_detect_cpus(void) | |||
403 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 441 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
404 | if (!info) | 442 | if (!info) |
405 | panic("smp_detect_cpus failed to allocate memory\n"); | 443 | panic("smp_detect_cpus failed to allocate memory\n"); |
444 | #ifdef CONFIG_CRASH_DUMP | ||
445 | if (OLDMEM_BASE && !is_kdump_kernel()) { | ||
446 | struct save_area *save_area; | ||
447 | |||
448 | save_area = kmalloc(sizeof(*save_area), GFP_KERNEL); | ||
449 | if (!save_area) | ||
450 | panic("could not allocate memory for save area\n"); | ||
451 | copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), | ||
452 | 0x200, 0); | ||
453 | zfcpdump_save_areas[0] = save_area; | ||
454 | } | ||
455 | #endif | ||
406 | /* Use sigp detection algorithm if sclp doesn't work. */ | 456 | /* Use sigp detection algorithm if sclp doesn't work. */ |
407 | if (sclp_get_cpu_info(info)) { | 457 | if (sclp_get_cpu_info(info)) { |
408 | smp_use_sigp_detection = 1; | 458 | smp_use_sigp_detection = 1; |
@@ -463,7 +513,8 @@ int __cpuinit start_secondary(void *cpuvoid) | |||
463 | set_cpu_online(smp_processor_id(), true); | 513 | set_cpu_online(smp_processor_id(), true); |
464 | ipi_call_unlock(); | 514 | ipi_call_unlock(); |
465 | __ctl_clear_bit(0, 28); /* Disable lowcore protection */ | 515 | __ctl_clear_bit(0, 28); /* Disable lowcore protection */ |
466 | S390_lowcore.restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 516 | S390_lowcore.restart_psw.mask = |
517 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; | ||
467 | S390_lowcore.restart_psw.addr = | 518 | S390_lowcore.restart_psw.addr = |
468 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; | 519 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; |
469 | __ctl_set_bit(0, 28); /* Enable lowcore protection */ | 520 | __ctl_set_bit(0, 28); /* Enable lowcore protection */ |
@@ -511,7 +562,8 @@ static int __cpuinit smp_alloc_lowcore(int cpu) | |||
511 | memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); | 562 | memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); |
512 | lowcore->async_stack = async_stack + ASYNC_SIZE; | 563 | lowcore->async_stack = async_stack + ASYNC_SIZE; |
513 | lowcore->panic_stack = panic_stack + PAGE_SIZE; | 564 | lowcore->panic_stack = panic_stack + PAGE_SIZE; |
514 | lowcore->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 565 | lowcore->restart_psw.mask = |
566 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; | ||
515 | lowcore->restart_psw.addr = | 567 | lowcore->restart_psw.addr = |
516 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; | 568 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; |
517 | if (user_mode != HOME_SPACE_MODE) | 569 | if (user_mode != HOME_SPACE_MODE) |
@@ -712,6 +764,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
712 | /* request the 0x1201 emergency signal external interrupt */ | 764 | /* request the 0x1201 emergency signal external interrupt */ |
713 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | 765 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) |
714 | panic("Couldn't request external interrupt 0x1201"); | 766 | panic("Couldn't request external interrupt 0x1201"); |
767 | /* request the 0x1202 external call external interrupt */ | ||
768 | if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) | ||
769 | panic("Couldn't request external interrupt 0x1202"); | ||
715 | 770 | ||
716 | /* Reallocate current lowcore, but keep its contents. */ | 771 | /* Reallocate current lowcore, but keep its contents. */ |
717 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); | 772 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); |
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index cf9e5c6d5527..47df775c844d 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c | |||
@@ -7,6 +7,8 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/pfn.h> | 9 | #include <linux/pfn.h> |
10 | #include <linux/suspend.h> | ||
11 | #include <linux/mm.h> | ||
10 | #include <asm/system.h> | 12 | #include <asm/system.h> |
11 | 13 | ||
12 | /* | 14 | /* |
@@ -14,6 +16,123 @@ | |||
14 | */ | 16 | */ |
15 | extern const void __nosave_begin, __nosave_end; | 17 | extern const void __nosave_begin, __nosave_end; |
16 | 18 | ||
19 | /* | ||
20 | * The restore of the saved pages in an hibernation image will set | ||
21 | * the change and referenced bits in the storage key for each page. | ||
22 | * Overindication of the referenced bits after an hibernation cycle | ||
23 | * does not cause any harm but the overindication of the change bits | ||
24 | * would cause trouble. | ||
25 | * Use the ARCH_SAVE_PAGE_KEYS hooks to save the storage key of each | ||
26 | * page to the most significant byte of the associated page frame | ||
27 | * number in the hibernation image. | ||
28 | */ | ||
29 | |||
30 | /* | ||
31 | * Key storage is allocated as a linked list of pages. | ||
32 | * The size of the keys array is (PAGE_SIZE - sizeof(long)) | ||
33 | */ | ||
34 | struct page_key_data { | ||
35 | struct page_key_data *next; | ||
36 | unsigned char data[]; | ||
37 | }; | ||
38 | |||
39 | #define PAGE_KEY_DATA_SIZE (PAGE_SIZE - sizeof(struct page_key_data *)) | ||
40 | |||
41 | static struct page_key_data *page_key_data; | ||
42 | static struct page_key_data *page_key_rp, *page_key_wp; | ||
43 | static unsigned long page_key_rx, page_key_wx; | ||
44 | |||
45 | /* | ||
46 | * For each page in the hibernation image one additional byte is | ||
47 | * stored in the most significant byte of the page frame number. | ||
48 | * On suspend no additional memory is required but on resume the | ||
49 | * keys need to be memorized until the page data has been restored. | ||
50 | * Only then can the storage keys be set to their old state. | ||
51 | */ | ||
52 | unsigned long page_key_additional_pages(unsigned long pages) | ||
53 | { | ||
54 | return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * Free page_key_data list of arrays. | ||
59 | */ | ||
60 | void page_key_free(void) | ||
61 | { | ||
62 | struct page_key_data *pkd; | ||
63 | |||
64 | while (page_key_data) { | ||
65 | pkd = page_key_data; | ||
66 | page_key_data = pkd->next; | ||
67 | free_page((unsigned long) pkd); | ||
68 | } | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Allocate page_key_data list of arrays with enough room to store | ||
73 | * one byte for each page in the hibernation image. | ||
74 | */ | ||
75 | int page_key_alloc(unsigned long pages) | ||
76 | { | ||
77 | struct page_key_data *pk; | ||
78 | unsigned long size; | ||
79 | |||
80 | size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); | ||
81 | while (size--) { | ||
82 | pk = (struct page_key_data *) get_zeroed_page(GFP_KERNEL); | ||
83 | if (!pk) { | ||
84 | page_key_free(); | ||
85 | return -ENOMEM; | ||
86 | } | ||
87 | pk->next = page_key_data; | ||
88 | page_key_data = pk; | ||
89 | } | ||
90 | page_key_rp = page_key_wp = page_key_data; | ||
91 | page_key_rx = page_key_wx = 0; | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * Save the storage key into the upper 8 bits of the page frame number. | ||
97 | */ | ||
98 | void page_key_read(unsigned long *pfn) | ||
99 | { | ||
100 | unsigned long addr; | ||
101 | |||
102 | addr = (unsigned long) page_address(pfn_to_page(*pfn)); | ||
103 | *(unsigned char *) pfn = (unsigned char) page_get_storage_key(addr); | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Extract the storage key from the upper 8 bits of the page frame number | ||
108 | * and store it in the page_key_data list of arrays. | ||
109 | */ | ||
110 | void page_key_memorize(unsigned long *pfn) | ||
111 | { | ||
112 | page_key_wp->data[page_key_wx] = *(unsigned char *) pfn; | ||
113 | *(unsigned char *) pfn = 0; | ||
114 | if (++page_key_wx < PAGE_KEY_DATA_SIZE) | ||
115 | return; | ||
116 | page_key_wp = page_key_wp->next; | ||
117 | page_key_wx = 0; | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * Get the next key from the page_key_data list of arrays and set the | ||
122 | * storage key of the page referred by @address. If @address refers to | ||
123 | * a "safe" page the swsusp_arch_resume code will transfer the storage | ||
124 | * key from the buffer page to the original page. | ||
125 | */ | ||
126 | void page_key_write(void *address) | ||
127 | { | ||
128 | page_set_storage_key((unsigned long) address, | ||
129 | page_key_rp->data[page_key_rx], 0); | ||
130 | if (++page_key_rx >= PAGE_KEY_DATA_SIZE) | ||
131 | return; | ||
132 | page_key_rp = page_key_rp->next; | ||
133 | page_key_rx = 0; | ||
134 | } | ||
135 | |||
17 | int pfn_is_nosave(unsigned long pfn) | 136 | int pfn_is_nosave(unsigned long pfn) |
18 | { | 137 | { |
19 | unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); | 138 | unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); |
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 51bcdb50a230..acb78cdee896 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
@@ -136,11 +136,14 @@ ENTRY(swsusp_arch_resume) | |||
136 | 0: | 136 | 0: |
137 | lg %r2,8(%r1) | 137 | lg %r2,8(%r1) |
138 | lg %r4,0(%r1) | 138 | lg %r4,0(%r1) |
139 | iske %r0,%r4 | ||
139 | lghi %r3,PAGE_SIZE | 140 | lghi %r3,PAGE_SIZE |
140 | lghi %r5,PAGE_SIZE | 141 | lghi %r5,PAGE_SIZE |
141 | 1: | 142 | 1: |
142 | mvcle %r2,%r4,0 | 143 | mvcle %r2,%r4,0 |
143 | jo 1b | 144 | jo 1b |
145 | lg %r2,8(%r1) | ||
146 | sske %r0,%r2 | ||
144 | lg %r1,16(%r1) | 147 | lg %r1,16(%r1) |
145 | ltgr %r1,%r1 | 148 | ltgr %r1,%r1 |
146 | jnz 0b | 149 | jnz 0b |
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index 5c9e439bf3f6..2a94b774695c 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c | |||
@@ -442,7 +442,7 @@ void s390_adjust_jiffies(void) | |||
442 | */ | 442 | */ |
443 | FP_UNPACK_SP(SA, &fmil); | 443 | FP_UNPACK_SP(SA, &fmil); |
444 | if ((info->capability >> 23) == 0) | 444 | if ((info->capability >> 23) == 0) |
445 | FP_FROM_INT_S(SB, info->capability, 32, int); | 445 | FP_FROM_INT_S(SB, (long) info->capability, 64, long); |
446 | else | 446 | else |
447 | FP_UNPACK_SP(SB, &info->capability); | 447 | FP_UNPACK_SP(SB, &info->capability); |
448 | FP_DIV_S(SR, SA, SB); | 448 | FP_DIV_S(SR, SA, SB); |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index dff933065ab6..ebbfab3c6e5a 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <asm/timer.h> | 48 | #include <asm/timer.h> |
49 | #include <asm/etr.h> | 49 | #include <asm/etr.h> |
50 | #include <asm/cio.h> | 50 | #include <asm/cio.h> |
51 | #include "entry.h" | ||
51 | 52 | ||
52 | /* change this if you have some constant time drift */ | 53 | /* change this if you have some constant time drift */ |
53 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) | 54 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) |
@@ -109,10 +110,14 @@ static void fixup_clock_comparator(unsigned long long delta) | |||
109 | set_clock_comparator(S390_lowcore.clock_comparator); | 110 | set_clock_comparator(S390_lowcore.clock_comparator); |
110 | } | 111 | } |
111 | 112 | ||
112 | static int s390_next_event(unsigned long delta, | 113 | static int s390_next_ktime(ktime_t expires, |
113 | struct clock_event_device *evt) | 114 | struct clock_event_device *evt) |
114 | { | 115 | { |
115 | S390_lowcore.clock_comparator = get_clock() + delta; | 116 | u64 nsecs; |
117 | |||
118 | nsecs = ktime_to_ns(ktime_sub(expires, ktime_get_monotonic_offset())); | ||
119 | do_div(nsecs, 125); | ||
120 | S390_lowcore.clock_comparator = TOD_UNIX_EPOCH + (nsecs << 9); | ||
116 | set_clock_comparator(S390_lowcore.clock_comparator); | 121 | set_clock_comparator(S390_lowcore.clock_comparator); |
117 | return 0; | 122 | return 0; |
118 | } | 123 | } |
@@ -137,14 +142,15 @@ void init_cpu_timer(void) | |||
137 | cpu = smp_processor_id(); | 142 | cpu = smp_processor_id(); |
138 | cd = &per_cpu(comparators, cpu); | 143 | cd = &per_cpu(comparators, cpu); |
139 | cd->name = "comparator"; | 144 | cd->name = "comparator"; |
140 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | 145 | cd->features = CLOCK_EVT_FEAT_ONESHOT | |
146 | CLOCK_EVT_FEAT_KTIME; | ||
141 | cd->mult = 16777; | 147 | cd->mult = 16777; |
142 | cd->shift = 12; | 148 | cd->shift = 12; |
143 | cd->min_delta_ns = 1; | 149 | cd->min_delta_ns = 1; |
144 | cd->max_delta_ns = LONG_MAX; | 150 | cd->max_delta_ns = LONG_MAX; |
145 | cd->rating = 400; | 151 | cd->rating = 400; |
146 | cd->cpumask = cpumask_of(cpu); | 152 | cd->cpumask = cpumask_of(cpu); |
147 | cd->set_next_event = s390_next_event; | 153 | cd->set_next_ktime = s390_next_ktime; |
148 | cd->set_mode = s390_set_mode; | 154 | cd->set_mode = s390_set_mode; |
149 | 155 | ||
150 | clockevents_register_device(cd); | 156 | clockevents_register_device(cd); |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 0cd340b72632..77b8942b9a15 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -299,8 +299,8 @@ out: | |||
299 | } | 299 | } |
300 | __initcall(init_topology_update); | 300 | __initcall(init_topology_update); |
301 | 301 | ||
302 | static void alloc_masks(struct sysinfo_15_1_x *info, struct mask_info *mask, | 302 | static void __init alloc_masks(struct sysinfo_15_1_x *info, |
303 | int offset) | 303 | struct mask_info *mask, int offset) |
304 | { | 304 | { |
305 | int i, nr_masks; | 305 | int i, nr_masks; |
306 | 306 | ||
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index ffabcd9d3363..a9807dd86276 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -200,7 +200,7 @@ void show_registers(struct pt_regs *regs) | |||
200 | mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), | 200 | mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), |
201 | mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); | 201 | mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); |
202 | #ifdef CONFIG_64BIT | 202 | #ifdef CONFIG_64BIT |
203 | printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS)); | 203 | printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); |
204 | #endif | 204 | #endif |
205 | printk("\n%s GPRS: " FOURLONG, mode, | 205 | printk("\n%s GPRS: " FOURLONG, mode, |
206 | regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); | 206 | regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); |
@@ -334,7 +334,8 @@ void __kprobes do_per_trap(struct pt_regs *regs) | |||
334 | info.si_signo = SIGTRAP; | 334 | info.si_signo = SIGTRAP; |
335 | info.si_errno = 0; | 335 | info.si_errno = 0; |
336 | info.si_code = TRAP_HWBKPT; | 336 | info.si_code = TRAP_HWBKPT; |
337 | info.si_addr = (void *) current->thread.per_event.address; | 337 | info.si_addr = |
338 | (void __force __user *) current->thread.per_event.address; | ||
338 | force_sig_info(SIGTRAP, &info, current); | 339 | force_sig_info(SIGTRAP, &info, current); |
339 | } | 340 | } |
340 | 341 | ||
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 2d6228f60cd6..bb48977f5469 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -170,7 +170,8 @@ void __kprobes vtime_stop_cpu(void) | |||
170 | psw_t psw; | 170 | psw_t psw; |
171 | 171 | ||
172 | /* Wait for external, I/O or machine check interrupt. */ | 172 | /* Wait for external, I/O or machine check interrupt. */ |
173 | psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT; | 173 | psw.mask = psw_kernel_bits | PSW_MASK_WAIT | |
174 | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; | ||
174 | 175 | ||
175 | idle->nohz_delay = 0; | 176 | idle->nohz_delay = 0; |
176 | 177 | ||
@@ -183,7 +184,8 @@ void __kprobes vtime_stop_cpu(void) | |||
183 | * set_cpu_timer(VTIMER_MAX_SLICE); | 184 | * set_cpu_timer(VTIMER_MAX_SLICE); |
184 | * idle->idle_enter = get_clock(); | 185 | * idle->idle_enter = get_clock(); |
185 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | | 186 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | |
186 | * PSW_MASK_IO | PSW_MASK_EXT); | 187 | * PSW_MASK_DAT | PSW_MASK_IO | |
188 | * PSW_MASK_EXT | PSW_MASK_MCHECK); | ||
187 | * The difference is that the inline assembly makes sure that | 189 | * The difference is that the inline assembly makes sure that |
188 | * the last three instruction are stpt, stck and lpsw in that | 190 | * the last three instruction are stpt, stck and lpsw in that |
189 | * order. This is done to increase the precision. | 191 | * order. This is done to increase the precision. |
@@ -216,7 +218,8 @@ void __kprobes vtime_stop_cpu(void) | |||
216 | * vq->idle = get_cpu_timer(); | 218 | * vq->idle = get_cpu_timer(); |
217 | * idle->idle_enter = get_clock(); | 219 | * idle->idle_enter = get_clock(); |
218 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | | 220 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | |
219 | * PSW_MASK_IO | PSW_MASK_EXT); | 221 | * PSW_MASK_DAT | PSW_MASK_IO | |
222 | * PSW_MASK_EXT | PSW_MASK_MCHECK); | ||
220 | * The difference is that the inline assembly makes sure that | 223 | * The difference is that the inline assembly makes sure that |
221 | * the last three instruction are stpt, stck and lpsw in that | 224 | * the last three instruction are stpt, stck and lpsw in that |
222 | * order. This is done to increase the precision. | 225 | * order. This is done to increase the precision. |
@@ -458,7 +461,7 @@ void add_virt_timer_periodic(void *new) | |||
458 | } | 461 | } |
459 | EXPORT_SYMBOL(add_virt_timer_periodic); | 462 | EXPORT_SYMBOL(add_virt_timer_periodic); |
460 | 463 | ||
461 | int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) | 464 | static int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) |
462 | { | 465 | { |
463 | struct vtimer_queue *vq; | 466 | struct vtimer_queue *vq; |
464 | unsigned long flags; | 467 | unsigned long flags; |