aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-09-15 09:08:05 -0400
committerJiri Kosina <jkosina@suse.cz>2011-09-15 09:08:18 -0400
commite060c38434b2caa78efe7cedaff4191040b65a15 (patch)
tree407361230bf6733f63d8e788e4b5e6566ee04818 /arch/s390
parent10e4ac572eeffe5317019bd7330b6058a400dfc2 (diff)
parentcc39c6a9bbdebfcf1a7dee64d83bf302bc38d941 (diff)
Merge branch 'master' into for-next
Fast-forward merge with Linus to be able to merge patches based on more recent version of the tree.
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig9
-rw-r--r--arch/s390/include/asm/atomic.h7
-rw-r--r--arch/s390/include/asm/bitops.h5
-rw-r--r--arch/s390/include/asm/ipl.h1
-rw-r--r--arch/s390/include/asm/lowcore.h11
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/ptrace.h1
-rw-r--r--arch/s390/include/asm/system.h1
-rw-r--r--arch/s390/kernel/asm-offsets.c10
-rw-r--r--arch/s390/kernel/base.S36
-rw-r--r--arch/s390/kernel/compat_signal.c43
-rw-r--r--arch/s390/kernel/compat_wrapper.S6
-rw-r--r--arch/s390/kernel/dis.c2
-rw-r--r--arch/s390/kernel/early.c14
-rw-r--r--arch/s390/kernel/entry.S28
-rw-r--r--arch/s390/kernel/entry64.S20
-rw-r--r--arch/s390/kernel/ipl.c50
-rw-r--r--arch/s390/kernel/reipl64.S80
-rw-r--r--arch/s390/kernel/setup.c25
-rw-r--r--arch/s390/kernel/signal.c61
-rw-r--r--arch/s390/kernel/smp.c24
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/traps.c2
-rw-r--r--arch/s390/mm/maccess.c16
-rw-r--r--arch/s390/mm/pgtable.c1
25 files changed, 313 insertions, 144 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c03fef7a9c22..ed5cb5af5281 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -81,6 +81,7 @@ config S390
81 select INIT_ALL_POSSIBLE 81 select INIT_ALL_POSSIBLE
82 select HAVE_IRQ_WORK 82 select HAVE_IRQ_WORK
83 select HAVE_PERF_EVENTS 83 select HAVE_PERF_EVENTS
84 select ARCH_HAVE_NMI_SAFE_CMPXCHG
84 select HAVE_KERNEL_GZIP 85 select HAVE_KERNEL_GZIP
85 select HAVE_KERNEL_BZIP2 86 select HAVE_KERNEL_BZIP2
86 select HAVE_KERNEL_LZMA 87 select HAVE_KERNEL_LZMA
@@ -273,11 +274,11 @@ config MARCH_Z10
273 on older machines. 274 on older machines.
274 275
275config MARCH_Z196 276config MARCH_Z196
276 bool "IBM zEnterprise 196" 277 bool "IBM zEnterprise 114 and 196"
277 help 278 help
278 Select this to enable optimizations for IBM zEnterprise 196 279 Select this to enable optimizations for IBM zEnterprise 114 and 196
279 (2817 series). The kernel will be slightly faster but will not work 280 (2818 and 2817 series). The kernel will be slightly faster but will
280 on older machines. 281 not work on older machines.
281 282
282endchoice 283endchoice
283 284
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index d9db13810d15..8517d2ae3b5c 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -93,7 +93,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
93 return old; 93 return old;
94} 94}
95 95
96static inline int atomic_add_unless(atomic_t *v, int a, int u) 96static inline int __atomic_add_unless(atomic_t *v, int a, int u)
97{ 97{
98 int c, old; 98 int c, old;
99 c = atomic_read(v); 99 c = atomic_read(v);
@@ -105,10 +105,9 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
105 break; 105 break;
106 c = old; 106 c = old;
107 } 107 }
108 return c != u; 108 return c;
109} 109}
110 110
111#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
112 111
113#undef __CS_LOOP 112#undef __CS_LOOP
114 113
@@ -332,6 +331,4 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
332#define smp_mb__before_atomic_inc() smp_mb() 331#define smp_mb__before_atomic_inc() smp_mb()
333#define smp_mb__after_atomic_inc() smp_mb() 332#define smp_mb__after_atomic_inc() smp_mb()
334 333
335#include <asm-generic/atomic-long.h>
336
337#endif /* __ARCH_S390_ATOMIC__ */ 334#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 667c6e9f6a34..e5beb490959b 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -832,10 +832,7 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
832 832
833#include <asm-generic/bitops/le.h> 833#include <asm-generic/bitops/le.h>
834 834
835#define ext2_set_bit_atomic(lock, nr, addr) \ 835#include <asm-generic/bitops/ext2-atomic-setbit.h>
836 test_and_set_bit_le(nr, addr)
837#define ext2_clear_bit_atomic(lock, nr, addr) \
838 test_and_clear_bit_le(nr, addr)
839 836
840 837
841#endif /* __KERNEL__ */ 838#endif /* __KERNEL__ */
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 5e95d95450b3..97cc4403fabf 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -167,5 +167,6 @@ enum diag308_rc {
167}; 167};
168 168
169extern int diag308(unsigned long subcode, void *addr); 169extern int diag308(unsigned long subcode, void *addr);
170extern void diag308_reset(void);
170 171
171#endif /* _ASM_S390_IPL_H */ 172#endif /* _ASM_S390_IPL_H */
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index f26280d9e88d..e85c911aabf0 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -18,6 +18,7 @@ void system_call(void);
18void pgm_check_handler(void); 18void pgm_check_handler(void);
19void mcck_int_handler(void); 19void mcck_int_handler(void);
20void io_int_handler(void); 20void io_int_handler(void);
21void psw_restart_int_handler(void);
21 22
22#ifdef CONFIG_32BIT 23#ifdef CONFIG_32BIT
23 24
@@ -150,7 +151,10 @@ struct _lowcore {
150 */ 151 */
151 __u32 ipib; /* 0x0e00 */ 152 __u32 ipib; /* 0x0e00 */
152 __u32 ipib_checksum; /* 0x0e04 */ 153 __u32 ipib_checksum; /* 0x0e04 */
153 __u8 pad_0x0e08[0x0f00-0x0e08]; /* 0x0e08 */ 154
155 /* 64 bit save area */
156 __u64 save_area_64; /* 0x0e08 */
157 __u8 pad_0x0e10[0x0f00-0x0e10]; /* 0x0e10 */
154 158
155 /* Extended facility list */ 159 /* Extended facility list */
156 __u64 stfle_fac_list[32]; /* 0x0f00 */ 160 __u64 stfle_fac_list[32]; /* 0x0f00 */
@@ -286,7 +290,10 @@ struct _lowcore {
286 */ 290 */
287 __u64 ipib; /* 0x0e00 */ 291 __u64 ipib; /* 0x0e00 */
288 __u32 ipib_checksum; /* 0x0e08 */ 292 __u32 ipib_checksum; /* 0x0e08 */
289 __u8 pad_0x0e0c[0x0f00-0x0e0c]; /* 0x0e0c */ 293
294 /* 64 bit save area */
295 __u64 save_area_64; /* 0x0e0c */
296 __u8 pad_0x0e14[0x0f00-0x0e14]; /* 0x0e14 */
290 297
291 /* Extended facility list */ 298 /* Extended facility list */
292 __u64 stfle_fac_list[32]; /* 0x0f00 */ 299 __u64 stfle_fac_list[32]; /* 0x0f00 */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 55dfcc8bdc0d..a4b6229e5d4b 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -119,14 +119,12 @@ struct stack_frame {
119 * Do necessary setup to start up a new thread. 119 * Do necessary setup to start up a new thread.
120 */ 120 */
121#define start_thread(regs, new_psw, new_stackp) do { \ 121#define start_thread(regs, new_psw, new_stackp) do { \
122 set_fs(USER_DS); \
123 regs->psw.mask = psw_user_bits; \ 122 regs->psw.mask = psw_user_bits; \
124 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 123 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
125 regs->gprs[15] = new_stackp; \ 124 regs->gprs[15] = new_stackp; \
126} while (0) 125} while (0)
127 126
128#define start_thread31(regs, new_psw, new_stackp) do { \ 127#define start_thread31(regs, new_psw, new_stackp) do { \
129 set_fs(USER_DS); \
130 regs->psw.mask = psw_user32_bits; \ 128 regs->psw.mask = psw_user32_bits; \
131 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 129 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
132 regs->gprs[15] = new_stackp; \ 130 regs->gprs[15] = new_stackp; \
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 9ad628a8574a..62fd80c9e98c 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -539,7 +539,6 @@ struct user_regs_struct
539 * These are defined as per linux/ptrace.h, which see. 539 * These are defined as per linux/ptrace.h, which see.
540 */ 540 */
541#define arch_has_single_step() (1) 541#define arch_has_single_step() (1)
542extern void show_regs(struct pt_regs * regs);
543 542
544#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) 543#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
545#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) 544#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index d382629a0172..6582f69f2389 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -113,6 +113,7 @@ extern void pfault_fini(void);
113 113
114extern void cmma_init(void); 114extern void cmma_init(void);
115extern int memcpy_real(void *, void *, size_t); 115extern int memcpy_real(void *, void *, size_t);
116extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
116 117
117#define finish_arch_switch(prev) do { \ 118#define finish_arch_switch(prev) do { \
118 set_fs(current->thread.mm_segment); \ 119 set_fs(current->thread.mm_segment); \
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 05d8f38734ec..532fd4322156 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -27,12 +27,9 @@ int main(void)
27 BLANK(); 27 BLANK();
28 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 28 DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
29 BLANK(); 29 BLANK();
30 DEFINE(__THREAD_per_cause, 30 DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause));
31 offsetof(struct task_struct, thread.per_event.cause)); 31 DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address));
32 DEFINE(__THREAD_per_address, 32 DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid));
33 offsetof(struct task_struct, thread.per_event.address));
34 DEFINE(__THREAD_per_paid,
35 offsetof(struct task_struct, thread.per_event.paid));
36 BLANK(); 33 BLANK();
37 DEFINE(__TI_task, offsetof(struct thread_info, task)); 34 DEFINE(__TI_task, offsetof(struct thread_info, task));
38 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); 35 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
@@ -142,6 +139,7 @@ int main(void)
142 DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); 139 DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
143 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); 140 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
144 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); 141 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
142 DEFINE(__LC_SAVE_AREA_64, offsetof(struct _lowcore, save_area_64));
145#ifdef CONFIG_32BIT 143#ifdef CONFIG_32BIT
146 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); 144 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
147#else /* CONFIG_32BIT */ 145#else /* CONFIG_32BIT */
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 209938c1dfc8..255435663bf8 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -76,6 +76,42 @@ s390_base_pgm_handler_fn:
76 .quad 0 76 .quad 0
77 .previous 77 .previous
78 78
79#
80# Calls diag 308 subcode 1 and continues execution
81#
82# The following conditions must be ensured before calling this function:
83# * Prefix register = 0
84# * Lowcore protection is disabled
85#
86ENTRY(diag308_reset)
87 larl %r4,.Lctlregs # Save control registers
88 stctg %c0,%c15,0(%r4)
89 larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0
90 lghi %r3,0
91 lg %r4,0(%r4) # Save PSW
92 sturg %r4,%r3 # Use sturg, because of large pages
93 lghi %r1,1
94 diag %r1,%r1,0x308
95.Lrestart_part2:
96 lhi %r0,0 # Load r0 with zero
97 lhi %r1,2 # Use mode 2 = ESAME (dump)
98 sigp %r1,%r0,0x12 # Switch to ESAME mode
99 sam64 # Switch to 64 bit addressing mode
100 larl %r4,.Lctlregs # Restore control registers
101 lctlg %c0,%c15,0(%r4)
102 br %r14
103.align 16
104.Lrestart_psw:
105 .long 0x00080000,0x80000000 + .Lrestart_part2
106
107 .section .bss
108.align 8
109.Lctlregs:
110 .rept 16
111 .quad 0
112 .endr
113 .previous
114
79#else /* CONFIG_64BIT */ 115#else /* CONFIG_64BIT */
80 116
81ENTRY(s390_base_mcck_handler) 117ENTRY(s390_base_mcck_handler)
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index eee999853a7c..a9a285b8c4ad 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -380,20 +380,13 @@ asmlinkage long sys32_sigreturn(void)
380 goto badframe; 380 goto badframe;
381 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) 381 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
382 goto badframe; 382 goto badframe;
383
384 sigdelsetmask(&set, ~_BLOCKABLE); 383 sigdelsetmask(&set, ~_BLOCKABLE);
385 spin_lock_irq(&current->sighand->siglock); 384 set_current_blocked(&set);
386 current->blocked = set;
387 recalc_sigpending();
388 spin_unlock_irq(&current->sighand->siglock);
389
390 if (restore_sigregs32(regs, &frame->sregs)) 385 if (restore_sigregs32(regs, &frame->sregs))
391 goto badframe; 386 goto badframe;
392 if (restore_sigregs_gprs_high(regs, frame->gprs_high)) 387 if (restore_sigregs_gprs_high(regs, frame->gprs_high))
393 goto badframe; 388 goto badframe;
394
395 return regs->gprs[2]; 389 return regs->gprs[2];
396
397badframe: 390badframe:
398 force_sig(SIGSEGV, current); 391 force_sig(SIGSEGV, current);
399 return 0; 392 return 0;
@@ -413,31 +406,22 @@ asmlinkage long sys32_rt_sigreturn(void)
413 goto badframe; 406 goto badframe;
414 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 407 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
415 goto badframe; 408 goto badframe;
416
417 sigdelsetmask(&set, ~_BLOCKABLE); 409 sigdelsetmask(&set, ~_BLOCKABLE);
418 spin_lock_irq(&current->sighand->siglock); 410 set_current_blocked(&set);
419 current->blocked = set;
420 recalc_sigpending();
421 spin_unlock_irq(&current->sighand->siglock);
422
423 if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) 411 if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
424 goto badframe; 412 goto badframe;
425 if (restore_sigregs_gprs_high(regs, frame->gprs_high)) 413 if (restore_sigregs_gprs_high(regs, frame->gprs_high))
426 goto badframe; 414 goto badframe;
427
428 err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp); 415 err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp);
429 st.ss_sp = compat_ptr(ss_sp); 416 st.ss_sp = compat_ptr(ss_sp);
430 err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size); 417 err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size);
431 err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags); 418 err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags);
432 if (err) 419 if (err)
433 goto badframe; 420 goto badframe;
434
435 set_fs (KERNEL_DS); 421 set_fs (KERNEL_DS);
436 do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]); 422 do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]);
437 set_fs (old_fs); 423 set_fs (old_fs);
438
439 return regs->gprs[2]; 424 return regs->gprs[2];
440
441badframe: 425badframe:
442 force_sig(SIGSEGV, current); 426 force_sig(SIGSEGV, current);
443 return 0; 427 return 0;
@@ -605,10 +589,10 @@ give_sigsegv:
605 * OK, we're invoking a handler 589 * OK, we're invoking a handler
606 */ 590 */
607 591
608int 592int handle_signal32(unsigned long sig, struct k_sigaction *ka,
609handle_signal32(unsigned long sig, struct k_sigaction *ka, 593 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
610 siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
611{ 594{
595 sigset_t blocked;
612 int ret; 596 int ret;
613 597
614 /* Set up the stack frame */ 598 /* Set up the stack frame */
@@ -616,15 +600,12 @@ handle_signal32(unsigned long sig, struct k_sigaction *ka,
616 ret = setup_rt_frame32(sig, ka, info, oldset, regs); 600 ret = setup_rt_frame32(sig, ka, info, oldset, regs);
617 else 601 else
618 ret = setup_frame32(sig, ka, oldset, regs); 602 ret = setup_frame32(sig, ka, oldset, regs);
619 603 if (ret)
620 if (ret == 0) { 604 return ret;
621 spin_lock_irq(&current->sighand->siglock); 605 sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
622 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 606 if (!(ka->sa.sa_flags & SA_NODEFER))
623 if (!(ka->sa.sa_flags & SA_NODEFER)) 607 sigaddset(&blocked, sig);
624 sigaddset(&current->blocked,sig); 608 set_current_blocked(&blocked);
625 recalc_sigpending(); 609 return 0;
626 spin_unlock_irq(&current->sighand->siglock);
627 }
628 return ret;
629} 610}
630 611
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 08ab9aa6a0d5..7526db6bf501 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -665,12 +665,6 @@ ENTRY(sys32_poll_wrapper)
665 lgfr %r4,%r4 # long 665 lgfr %r4,%r4 # long
666 jg sys_poll # branch to system call 666 jg sys_poll # branch to system call
667 667
668ENTRY(compat_sys_nfsservctl_wrapper)
669 lgfr %r2,%r2 # int
670 llgtr %r3,%r3 # struct compat_nfsctl_arg*
671 llgtr %r4,%r4 # union compat_nfsctl_res*
672 jg compat_sys_nfsservctl # branch to system call
673
674ENTRY(sys32_setresgid16_wrapper) 668ENTRY(sys32_setresgid16_wrapper)
675 llgfr %r2,%r2 # __kernel_old_gid_emu31_t 669 llgfr %r2,%r2 # __kernel_old_gid_emu31_t
676 llgfr %r3,%r3 # __kernel_old_gid_emu31_t 670 llgfr %r3,%r3 # __kernel_old_gid_emu31_t
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 1ca3d1d6a86c..45df6d456aa1 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -27,7 +27,7 @@
27#include <asm/system.h> 27#include <asm/system.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/io.h> 29#include <asm/io.h>
30#include <asm/atomic.h> 30#include <linux/atomic.h>
31#include <asm/mathemu.h> 31#include <asm/mathemu.h>
32#include <asm/cpcmd.h> 32#include <asm/cpcmd.h>
33#include <asm/lowcore.h> 33#include <asm/lowcore.h>
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 068f8465c4ee..f297456dba7a 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -396,17 +396,19 @@ static __init void detect_machine_facilities(void)
396static __init void rescue_initrd(void) 396static __init void rescue_initrd(void)
397{ 397{
398#ifdef CONFIG_BLK_DEV_INITRD 398#ifdef CONFIG_BLK_DEV_INITRD
399 unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
399 /* 400 /*
400 * Move the initrd right behind the bss section in case it starts 401 * Just like in case of IPL from VM reader we make sure there is a
401 * within the bss section. So we don't overwrite it when the bss 402 * gap of 4MB between end of kernel and start of initrd.
402 * section gets cleared. 403 * That way we can also be sure that saving an NSS will succeed,
404 * which however only requires different segments.
403 */ 405 */
404 if (!INITRD_START || !INITRD_SIZE) 406 if (!INITRD_START || !INITRD_SIZE)
405 return; 407 return;
406 if (INITRD_START >= (unsigned long) __bss_stop) 408 if (INITRD_START >= min_initrd_addr)
407 return; 409 return;
408 memmove(__bss_stop, (void *) INITRD_START, INITRD_SIZE); 410 memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
409 INITRD_START = (unsigned long) __bss_stop; 411 INITRD_START = min_initrd_addr;
410#endif 412#endif
411} 413}
412 414
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3eab7cfab07c..02ec8fe7d03f 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -849,6 +849,34 @@ restart_crash:
849restart_go: 849restart_go:
850#endif 850#endif
851 851
852#
853# PSW restart interrupt handler
854#
855ENTRY(psw_restart_int_handler)
856 st %r15,__LC_SAVE_AREA_64(%r0) # save r15
857 basr %r15,0
8580: l %r15,.Lrestart_stack-0b(%r15) # load restart stack
859 l %r15,0(%r15)
860 ahi %r15,-SP_SIZE # make room for pt_regs
861 stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack
862 mvc SP_R15(4,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack
863 mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw
864 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
865 basr %r14,0
8661: l %r14,.Ldo_restart-1b(%r14)
867 basr %r14,%r14
868
869 basr %r14,0 # load disabled wait PSW if
8702: lpsw restart_psw_crash-2b(%r14) # do_restart returns
871 .align 4
872.Ldo_restart:
873 .long do_restart
874.Lrestart_stack:
875 .long restart_stack
876 .align 8
877restart_psw_crash:
878 .long 0x000a0000,0x00000000 + restart_psw_crash
879
852 .section .kprobes.text, "ax" 880 .section .kprobes.text, "ax"
853 881
854#ifdef CONFIG_CHECK_STACK 882#ifdef CONFIG_CHECK_STACK
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 7a0fd426ca92..5f729d627cef 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -865,6 +865,26 @@ restart_crash:
865restart_go: 865restart_go:
866#endif 866#endif
867 867
868#
869# PSW restart interrupt handler
870#
871ENTRY(psw_restart_int_handler)
872 stg %r15,__LC_SAVE_AREA_64(%r0) # save r15
873 larl %r15,restart_stack # load restart stack
874 lg %r15,0(%r15)
875 aghi %r15,-SP_SIZE # make room for pt_regs
876 stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack
877 mvc SP_R15(8,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack
878 mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw
879 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
880 brasl %r14,do_restart
881
882 larl %r14,restart_psw_crash # load disabled wait PSW if
883 lpswe 0(%r14) # do_restart returns
884 .align 8
885restart_psw_crash:
886 .quad 0x0002000080000000,0x0000000000000000 + restart_psw_crash
887
868 .section .kprobes.text, "ax" 888 .section .kprobes.text, "ax"
869 889
870#ifdef CONFIG_CHECK_STACK 890#ifdef CONFIG_CHECK_STACK
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index a689070be287..48c710206366 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -45,11 +45,13 @@
45 * - halt 45 * - halt
46 * - power off 46 * - power off
47 * - reipl 47 * - reipl
48 * - restart
48 */ 49 */
49#define ON_PANIC_STR "on_panic" 50#define ON_PANIC_STR "on_panic"
50#define ON_HALT_STR "on_halt" 51#define ON_HALT_STR "on_halt"
51#define ON_POFF_STR "on_poff" 52#define ON_POFF_STR "on_poff"
52#define ON_REIPL_STR "on_reboot" 53#define ON_REIPL_STR "on_reboot"
54#define ON_RESTART_STR "on_restart"
53 55
54struct shutdown_action; 56struct shutdown_action;
55struct shutdown_trigger { 57struct shutdown_trigger {
@@ -1218,7 +1220,7 @@ static int __init reipl_fcp_init(void)
1218 /* sysfs: create fcp kset for mixing attr group and bin attrs */ 1220 /* sysfs: create fcp kset for mixing attr group and bin attrs */
1219 reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL, 1221 reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL,
1220 &reipl_kset->kobj); 1222 &reipl_kset->kobj);
1221 if (!reipl_kset) { 1223 if (!reipl_fcp_kset) {
1222 free_page((unsigned long) reipl_block_fcp); 1224 free_page((unsigned long) reipl_block_fcp);
1223 return -ENOMEM; 1225 return -ENOMEM;
1224 } 1226 }
@@ -1544,17 +1546,20 @@ static char vmcmd_on_reboot[128];
1544static char vmcmd_on_panic[128]; 1546static char vmcmd_on_panic[128];
1545static char vmcmd_on_halt[128]; 1547static char vmcmd_on_halt[128];
1546static char vmcmd_on_poff[128]; 1548static char vmcmd_on_poff[128];
1549static char vmcmd_on_restart[128];
1547 1550
1548DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot); 1551DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot);
1549DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic); 1552DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic);
1550DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt); 1553DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt);
1551DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff); 1554DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff);
1555DEFINE_IPL_ATTR_STR_RW(vmcmd, on_restart, "%s\n", "%s\n", vmcmd_on_restart);
1552 1556
1553static struct attribute *vmcmd_attrs[] = { 1557static struct attribute *vmcmd_attrs[] = {
1554 &sys_vmcmd_on_reboot_attr.attr, 1558 &sys_vmcmd_on_reboot_attr.attr,
1555 &sys_vmcmd_on_panic_attr.attr, 1559 &sys_vmcmd_on_panic_attr.attr,
1556 &sys_vmcmd_on_halt_attr.attr, 1560 &sys_vmcmd_on_halt_attr.attr,
1557 &sys_vmcmd_on_poff_attr.attr, 1561 &sys_vmcmd_on_poff_attr.attr,
1562 &sys_vmcmd_on_restart_attr.attr,
1558 NULL, 1563 NULL,
1559}; 1564};
1560 1565
@@ -1576,6 +1581,8 @@ static void vmcmd_run(struct shutdown_trigger *trigger)
1576 cmd = vmcmd_on_halt; 1581 cmd = vmcmd_on_halt;
1577 else if (strcmp(trigger->name, ON_POFF_STR) == 0) 1582 else if (strcmp(trigger->name, ON_POFF_STR) == 0)
1578 cmd = vmcmd_on_poff; 1583 cmd = vmcmd_on_poff;
1584 else if (strcmp(trigger->name, ON_RESTART_STR) == 0)
1585 cmd = vmcmd_on_restart;
1579 else 1586 else
1580 return; 1587 return;
1581 1588
@@ -1611,7 +1618,8 @@ static struct shutdown_action vmcmd_action = {SHUTDOWN_ACTION_VMCMD_STR,
1611 1618
1612static void stop_run(struct shutdown_trigger *trigger) 1619static void stop_run(struct shutdown_trigger *trigger)
1613{ 1620{
1614 if (strcmp(trigger->name, ON_PANIC_STR) == 0) 1621 if (strcmp(trigger->name, ON_PANIC_STR) == 0 ||
1622 strcmp(trigger->name, ON_RESTART_STR) == 0)
1615 disabled_wait((unsigned long) __builtin_return_address(0)); 1623 disabled_wait((unsigned long) __builtin_return_address(0));
1616 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) 1624 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
1617 cpu_relax(); 1625 cpu_relax();
@@ -1707,6 +1715,34 @@ static void do_panic(void)
1707 stop_run(&on_panic_trigger); 1715 stop_run(&on_panic_trigger);
1708} 1716}
1709 1717
1718/* on restart */
1719
1720static struct shutdown_trigger on_restart_trigger = {ON_RESTART_STR,
1721 &stop_action};
1722
1723static ssize_t on_restart_show(struct kobject *kobj,
1724 struct kobj_attribute *attr, char *page)
1725{
1726 return sprintf(page, "%s\n", on_restart_trigger.action->name);
1727}
1728
1729static ssize_t on_restart_store(struct kobject *kobj,
1730 struct kobj_attribute *attr,
1731 const char *buf, size_t len)
1732{
1733 return set_trigger(buf, &on_restart_trigger, len);
1734}
1735
1736static struct kobj_attribute on_restart_attr =
1737 __ATTR(on_restart, 0644, on_restart_show, on_restart_store);
1738
1739void do_restart(void)
1740{
1741 smp_send_stop();
1742 on_restart_trigger.action->fn(&on_restart_trigger);
1743 stop_run(&on_restart_trigger);
1744}
1745
1710/* on halt */ 1746/* on halt */
1711 1747
1712static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action}; 1748static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
@@ -1783,7 +1819,9 @@ static void __init shutdown_triggers_init(void)
1783 if (sysfs_create_file(&shutdown_actions_kset->kobj, 1819 if (sysfs_create_file(&shutdown_actions_kset->kobj,
1784 &on_poff_attr.attr)) 1820 &on_poff_attr.attr))
1785 goto fail; 1821 goto fail;
1786 1822 if (sysfs_create_file(&shutdown_actions_kset->kobj,
1823 &on_restart_attr.attr))
1824 goto fail;
1787 return; 1825 return;
1788fail: 1826fail:
1789 panic("shutdown_triggers_init failed\n"); 1827 panic("shutdown_triggers_init failed\n");
@@ -1959,6 +1997,12 @@ static void do_reset_calls(void)
1959{ 1997{
1960 struct reset_call *reset; 1998 struct reset_call *reset;
1961 1999
2000#ifdef CONFIG_64BIT
2001 if (diag308_set_works) {
2002 diag308_reset();
2003 return;
2004 }
2005#endif
1962 list_for_each_entry(reset, &rcall, list) 2006 list_for_each_entry(reset, &rcall, list)
1963 reset->fn(); 2007 reset->fn();
1964} 2008}
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 78eb7cfbd3d1..e690975403f4 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp 2000,2009 2 * Copyright IBM Corp 2000,2011
3 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, 3 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
4 * Denis Joseph Barrow, 4 * Denis Joseph Barrow,
5 */ 5 */
@@ -8,6 +8,64 @@
8#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
9 9
10# 10#
11# store_status
12#
13# Prerequisites to run this function:
14# - Prefix register is set to zero
15# - Original prefix register is stored in "dump_prefix_page"
16# - Lowcore protection is off
17#
18ENTRY(store_status)
19 /* Save register one and load save area base */
20 stg %r1,__LC_SAVE_AREA_64(%r0)
21 lghi %r1,SAVE_AREA_BASE
22 /* General purpose registers */
23 stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
24 lg %r2,__LC_SAVE_AREA_64(%r0)
25 stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
26 /* Control registers */
27 stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
28 /* Access registers */
29 stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
30 /* Floating point registers */
31 std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
32 std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
33 std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
34 std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
35 std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
36 std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
37 std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
38 std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
39 std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
40 std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
41 std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
42 std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
43 std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
44 std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
45 std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
46 std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
47 /* Floating point control register */
48 stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1)
49 /* CPU timer */
50 stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1)
51 /* Saved prefix register */
52 larl %r2,dump_prefix_page
53 mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2)
54 /* Clock comparator - seven bytes */
55 larl %r2,.Lclkcmp
56 stckc 0(%r2)
57 mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2)
58 /* Program status word */
59 epsw %r2,%r3
60 st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1)
61 st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
62 larl %r2,store_status
63 stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
64 br %r14
65.align 8
66.Lclkcmp: .quad 0x0000000000000000
67
68#
11# do_reipl_asm 69# do_reipl_asm
12# Parameter: r2 = schid of reipl device 70# Parameter: r2 = schid of reipl device
13# 71#
@@ -15,22 +73,7 @@
15ENTRY(do_reipl_asm) 73ENTRY(do_reipl_asm)
16 basr %r13,0 74 basr %r13,0
17.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) 75.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
18.Lpg1: # do store status of all registers 76.Lpg1: brasl %r14,store_status
19
20 stg %r1,.Lregsave-.Lpg0(%r13)
21 lghi %r1,0x1000
22 stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-0x1000(%r1)
23 lg %r0,.Lregsave-.Lpg0(%r13)
24 stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1)
25 stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1)
26 stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1)
27 lg %r10,.Ldump_pfx-.Lpg0(%r13)
28 mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10)
29 stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1)
30 stckc .Lclkcmp-.Lpg0(%r13)
31 mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(7,%r1),.Lclkcmp-.Lpg0(%r13)
32 stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1)
33 stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1)
34 77
35 lctlg %c6,%c6,.Lall-.Lpg0(%r13) 78 lctlg %c6,%c6,.Lall-.Lpg0(%r13)
36 lgr %r1,%r2 79 lgr %r1,%r2
@@ -67,10 +110,7 @@ ENTRY(do_reipl_asm)
67 st %r14,.Ldispsw+12-.Lpg0(%r13) 110 st %r14,.Ldispsw+12-.Lpg0(%r13)
68 lpswe .Ldispsw-.Lpg0(%r13) 111 lpswe .Ldispsw-.Lpg0(%r13)
69 .align 8 112 .align 8
70.Lclkcmp: .quad 0x0000000000000000
71.Lall: .quad 0x00000000ff000000 113.Lall: .quad 0x00000000ff000000
72.Ldump_pfx: .quad dump_prefix_page
73.Lregsave: .quad 0x0000000000000000
74 .align 16 114 .align 16
75/* 115/*
76 * These addresses have to be 31 bit otherwise 116 * These addresses have to be 31 bit otherwise
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 0c35dee10b00..7b371c37061d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -346,7 +346,7 @@ setup_lowcore(void)
346 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); 346 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
347 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 347 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
348 lc->restart_psw.addr = 348 lc->restart_psw.addr =
349 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 349 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
350 if (user_mode != HOME_SPACE_MODE) 350 if (user_mode != HOME_SPACE_MODE)
351 lc->restart_psw.mask |= PSW_ASC_HOME; 351 lc->restart_psw.mask |= PSW_ASC_HOME;
352 lc->external_new_psw.mask = psw_kernel_bits; 352 lc->external_new_psw.mask = psw_kernel_bits;
@@ -529,6 +529,27 @@ static void __init setup_memory_end(void)
529 memory_end = memory_size; 529 memory_end = memory_size;
530} 530}
531 531
532void *restart_stack __attribute__((__section__(".data")));
533
534/*
535 * Setup new PSW and allocate stack for PSW restart interrupt
536 */
537static void __init setup_restart_psw(void)
538{
539 psw_t psw;
540
541 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
542 restart_stack += ASYNC_SIZE;
543
544 /*
545 * Setup restart PSW for absolute zero lowcore. This is necesary
546 * if PSW restart is done on an offline CPU that has lowcore zero
547 */
548 psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
549 psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
550 copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw));
551}
552
532static void __init 553static void __init
533setup_memory(void) 554setup_memory(void)
534{ 555{
@@ -731,6 +752,7 @@ static void __init setup_hwcaps(void)
731 strcpy(elf_platform, "z10"); 752 strcpy(elf_platform, "z10");
732 break; 753 break;
733 case 0x2817: 754 case 0x2817:
755 case 0x2818:
734 strcpy(elf_platform, "z196"); 756 strcpy(elf_platform, "z196");
735 break; 757 break;
736 } 758 }
@@ -792,6 +814,7 @@ setup_arch(char **cmdline_p)
792 setup_addressing_mode(); 814 setup_addressing_mode();
793 setup_memory(); 815 setup_memory();
794 setup_resources(); 816 setup_resources();
817 setup_restart_psw();
795 setup_lowcore(); 818 setup_lowcore();
796 819
797 cpu_init(); 820 cpu_init();
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index abbb3c3c7aab..9a40e1cc5ec3 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -57,17 +57,15 @@ typedef struct
57 */ 57 */
58SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask) 58SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask)
59{ 59{
60 mask &= _BLOCKABLE; 60 sigset_t blocked;
61 spin_lock_irq(&current->sighand->siglock);
62 current->saved_sigmask = current->blocked;
63 siginitset(&current->blocked, mask);
64 recalc_sigpending();
65 spin_unlock_irq(&current->sighand->siglock);
66 61
62 current->saved_sigmask = current->blocked;
63 mask &= _BLOCKABLE;
64 siginitset(&blocked, mask);
65 set_current_blocked(&blocked);
67 set_current_state(TASK_INTERRUPTIBLE); 66 set_current_state(TASK_INTERRUPTIBLE);
68 schedule(); 67 schedule();
69 set_thread_flag(TIF_RESTORE_SIGMASK); 68 set_restore_sigmask();
70
71 return -ERESTARTNOHAND; 69 return -ERESTARTNOHAND;
72} 70}
73 71
@@ -172,18 +170,11 @@ SYSCALL_DEFINE0(sigreturn)
172 goto badframe; 170 goto badframe;
173 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) 171 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
174 goto badframe; 172 goto badframe;
175
176 sigdelsetmask(&set, ~_BLOCKABLE); 173 sigdelsetmask(&set, ~_BLOCKABLE);
177 spin_lock_irq(&current->sighand->siglock); 174 set_current_blocked(&set);
178 current->blocked = set;
179 recalc_sigpending();
180 spin_unlock_irq(&current->sighand->siglock);
181
182 if (restore_sigregs(regs, &frame->sregs)) 175 if (restore_sigregs(regs, &frame->sregs))
183 goto badframe; 176 goto badframe;
184
185 return regs->gprs[2]; 177 return regs->gprs[2];
186
187badframe: 178badframe:
188 force_sig(SIGSEGV, current); 179 force_sig(SIGSEGV, current);
189 return 0; 180 return 0;
@@ -199,21 +190,14 @@ SYSCALL_DEFINE0(rt_sigreturn)
199 goto badframe; 190 goto badframe;
200 if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set))) 191 if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
201 goto badframe; 192 goto badframe;
202
203 sigdelsetmask(&set, ~_BLOCKABLE); 193 sigdelsetmask(&set, ~_BLOCKABLE);
204 spin_lock_irq(&current->sighand->siglock); 194 set_current_blocked(&set);
205 current->blocked = set;
206 recalc_sigpending();
207 spin_unlock_irq(&current->sighand->siglock);
208
209 if (restore_sigregs(regs, &frame->uc.uc_mcontext)) 195 if (restore_sigregs(regs, &frame->uc.uc_mcontext))
210 goto badframe; 196 goto badframe;
211
212 if (do_sigaltstack(&frame->uc.uc_stack, NULL, 197 if (do_sigaltstack(&frame->uc.uc_stack, NULL,
213 regs->gprs[15]) == -EFAULT) 198 regs->gprs[15]) == -EFAULT)
214 goto badframe; 199 goto badframe;
215 return regs->gprs[2]; 200 return regs->gprs[2];
216
217badframe: 201badframe:
218 force_sig(SIGSEGV, current); 202 force_sig(SIGSEGV, current);
219 return 0; 203 return 0;
@@ -385,14 +369,11 @@ give_sigsegv:
385 return -EFAULT; 369 return -EFAULT;
386} 370}
387 371
388/* 372static int handle_signal(unsigned long sig, struct k_sigaction *ka,
389 * OK, we're invoking a handler 373 siginfo_t *info, sigset_t *oldset,
390 */ 374 struct pt_regs *regs)
391
392static int
393handle_signal(unsigned long sig, struct k_sigaction *ka,
394 siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
395{ 375{
376 sigset_t blocked;
396 int ret; 377 int ret;
397 378
398 /* Set up the stack frame */ 379 /* Set up the stack frame */
@@ -400,17 +381,13 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
400 ret = setup_rt_frame(sig, ka, info, oldset, regs); 381 ret = setup_rt_frame(sig, ka, info, oldset, regs);
401 else 382 else
402 ret = setup_frame(sig, ka, oldset, regs); 383 ret = setup_frame(sig, ka, oldset, regs);
403 384 if (ret)
404 if (ret == 0) { 385 return ret;
405 spin_lock_irq(&current->sighand->siglock); 386 sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
406 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 387 if (!(ka->sa.sa_flags & SA_NODEFER))
407 if (!(ka->sa.sa_flags & SA_NODEFER)) 388 sigaddset(&blocked, sig);
408 sigaddset(&current->blocked,sig); 389 set_current_blocked(&blocked);
409 recalc_sigpending(); 390 return 0;
410 spin_unlock_irq(&current->sighand->siglock);
411 }
412
413 return ret;
414} 391}
415 392
416/* 393/*
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index a6d85c0a7f20..6ab16ac64d29 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -452,23 +452,27 @@ out:
452 */ 452 */
453int __cpuinit start_secondary(void *cpuvoid) 453int __cpuinit start_secondary(void *cpuvoid)
454{ 454{
455 /* Setup the cpu */
456 cpu_init(); 455 cpu_init();
457 preempt_disable(); 456 preempt_disable();
458 /* Enable TOD clock interrupts on the secondary cpu. */
459 init_cpu_timer(); 457 init_cpu_timer();
460 /* Enable cpu timer interrupts on the secondary cpu. */
461 init_cpu_vtimer(); 458 init_cpu_vtimer();
462 /* Enable pfault pseudo page faults on this cpu. */
463 pfault_init(); 459 pfault_init();
464 460
465 /* call cpu notifiers */
466 notify_cpu_starting(smp_processor_id()); 461 notify_cpu_starting(smp_processor_id());
467 /* Mark this cpu as online */
468 ipi_call_lock(); 462 ipi_call_lock();
469 set_cpu_online(smp_processor_id(), true); 463 set_cpu_online(smp_processor_id(), true);
470 ipi_call_unlock(); 464 ipi_call_unlock();
471 /* Switch on interrupts */ 465 __ctl_clear_bit(0, 28); /* Disable lowcore protection */
466 S390_lowcore.restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
467 S390_lowcore.restart_psw.addr =
468 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
469 __ctl_set_bit(0, 28); /* Enable lowcore protection */
470 /*
471 * Wait until the cpu which brought this one up marked it
472 * active before enabling interrupts.
473 */
474 while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
475 cpu_relax();
472 local_irq_enable(); 476 local_irq_enable();
473 /* cpu_idle will call schedule for us */ 477 /* cpu_idle will call schedule for us */
474 cpu_idle(); 478 cpu_idle();
@@ -507,7 +511,11 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
507 memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); 511 memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
508 lowcore->async_stack = async_stack + ASYNC_SIZE; 512 lowcore->async_stack = async_stack + ASYNC_SIZE;
509 lowcore->panic_stack = panic_stack + PAGE_SIZE; 513 lowcore->panic_stack = panic_stack + PAGE_SIZE;
510 514 lowcore->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
515 lowcore->restart_psw.addr =
516 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
517 if (user_mode != HOME_SPACE_MODE)
518 lowcore->restart_psw.mask |= PSW_ASC_HOME;
511#ifndef CONFIG_64BIT 519#ifndef CONFIG_64BIT
512 if (MACHINE_HAS_IEEE) { 520 if (MACHINE_HAS_IEEE) {
513 unsigned long save_area; 521 unsigned long save_area;
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 6ee39ef8fe4a..73eb08c874fb 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -177,7 +177,7 @@ SYSCALL(sys_getresuid16,sys_ni_syscall,sys32_getresuid16_wrapper) /* 165 old get
177NI_SYSCALL /* for vm86 */ 177NI_SYSCALL /* for vm86 */
178NI_SYSCALL /* old sys_query_module */ 178NI_SYSCALL /* old sys_query_module */
179SYSCALL(sys_poll,sys_poll,sys32_poll_wrapper) 179SYSCALL(sys_poll,sys_poll,sys32_poll_wrapper)
180SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper) 180NI_SYSCALL /* old nfsservctl */
181SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */ 181SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */
182SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */ 182SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */
183SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper) 183SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index e9372c77cced..ffabcd9d3363 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -36,7 +36,7 @@
36#include <asm/system.h> 36#include <asm/system.h>
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/atomic.h> 39#include <linux/atomic.h>
40#include <asm/mathemu.h> 40#include <asm/mathemu.h>
41#include <asm/cpcmd.h> 41#include <asm/cpcmd.h>
42#include <asm/lowcore.h> 42#include <asm/lowcore.h>
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 51e5cd9b906a..5dbbaa6e594c 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -85,3 +85,19 @@ int memcpy_real(void *dest, void *src, size_t count)
85 arch_local_irq_restore(flags); 85 arch_local_irq_restore(flags);
86 return rc; 86 return rc;
87} 87}
88
89/*
90 * Copy memory to absolute zero
91 */
92void copy_to_absolute_zero(void *dest, void *src, size_t count)
93{
94 unsigned long cr0;
95
96 BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore));
97 preempt_disable();
98 __ctl_store(cr0, 0, 0);
99 __ctl_clear_bit(0, 28); /* disable lowcore protection */
100 memcpy_real(dest + store_prefix(), src, count);
101 __ctl_load(cr0, 0, 0);
102 preempt_enable();
103}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 2adb23938a7f..4d1f2bce87b3 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -528,6 +528,7 @@ static inline void page_table_free_pgste(unsigned long *table)
528static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, 528static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
529 unsigned long vmaddr) 529 unsigned long vmaddr)
530{ 530{
531 return NULL;
531} 532}
532 533
533static inline void page_table_free_pgste(unsigned long *table) 534static inline void page_table_free_pgste(unsigned long *table)