aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig23
-rw-r--r--arch/s390/include/asm/atomic.h2
-rw-r--r--arch/s390/include/asm/bitsperlong.h13
-rw-r--r--arch/s390/include/asm/compat.h19
-rw-r--r--arch/s390/include/asm/cpu.h32
-rw-r--r--arch/s390/include/asm/cputime.h19
-rw-r--r--arch/s390/include/asm/ftrace.h21
-rw-r--r--arch/s390/include/asm/kvm_host.h5
-rw-r--r--arch/s390/include/asm/lowcore.h9
-rw-r--r--arch/s390/include/asm/mman.h2
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/pgtable.h7
-rw-r--r--arch/s390/include/asm/seccomp.h16
-rw-r--r--arch/s390/include/asm/signal.h2
-rw-r--r--arch/s390/include/asm/spinlock.h19
-rw-r--r--arch/s390/include/asm/suspend.h5
-rw-r--r--arch/s390/include/asm/syscall.h1
-rw-r--r--arch/s390/include/asm/termios.h2
-rw-r--r--arch/s390/include/asm/thread_info.h12
-rw-r--r--arch/s390/include/asm/types.h6
-rw-r--r--arch/s390/include/asm/uaccess.h16
-rw-r--r--arch/s390/include/asm/unistd.h4
-rw-r--r--arch/s390/kernel/Makefile7
-rw-r--r--arch/s390/kernel/compat_wrapper.S17
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/entry.S7
-rw-r--r--arch/s390/kernel/entry64.S7
-rw-r--r--arch/s390/kernel/ftrace.c260
-rw-r--r--arch/s390/kernel/head.S67
-rw-r--r--arch/s390/kernel/kprobes.c31
-rw-r--r--arch/s390/kernel/mcount.S212
-rw-r--r--arch/s390/kernel/module.c2
-rw-r--r--arch/s390/kernel/nmi.c2
-rw-r--r--arch/s390/kernel/process.c3
-rw-r--r--arch/s390/kernel/ptrace.c23
-rw-r--r--arch/s390/kernel/s390_ext.c5
-rw-r--r--arch/s390/kernel/sclp.S327
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/signal.c3
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/time.c9
-rw-r--r--arch/s390/kernel/vdso.c19
-rw-r--r--arch/s390/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/kvm/intercept.c28
-rw-r--r--arch/s390/kvm/interrupt.c59
-rw-r--r--arch/s390/kvm/kvm-s390.c65
-rw-r--r--arch/s390/kvm/kvm-s390.h4
-rw-r--r--arch/s390/kvm/priv.c4
-rw-r--r--arch/s390/kvm/sigp.c16
-rw-r--r--arch/s390/lib/spinlock.c40
-rw-r--r--arch/s390/mm/Makefile2
-rw-r--r--arch/s390/mm/fault.c3
-rw-r--r--arch/s390/mm/maccess.c61
-rw-r--r--arch/s390/mm/mmap.c11
-rw-r--r--arch/s390/mm/pgtable.c16
57 files changed, 1318 insertions, 243 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2eca5fe0e75b..99dc3ded6b49 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -82,6 +82,11 @@ config S390
82 select USE_GENERIC_SMP_HELPERS if SMP 82 select USE_GENERIC_SMP_HELPERS if SMP
83 select HAVE_SYSCALL_WRAPPERS 83 select HAVE_SYSCALL_WRAPPERS
84 select HAVE_FUNCTION_TRACER 84 select HAVE_FUNCTION_TRACER
85 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
86 select HAVE_FTRACE_MCOUNT_RECORD
87 select HAVE_FTRACE_SYSCALLS
88 select HAVE_DYNAMIC_FTRACE
89 select HAVE_FUNCTION_GRAPH_TRACER
85 select HAVE_DEFAULT_NO_SPIN_MUTEXES 90 select HAVE_DEFAULT_NO_SPIN_MUTEXES
86 select HAVE_OPROFILE 91 select HAVE_OPROFILE
87 select HAVE_KPROBES 92 select HAVE_KPROBES
@@ -567,6 +572,24 @@ bool "s390 guest support for KVM (EXPERIMENTAL)"
567 the KVM hypervisor. This will add detection for KVM as well as a 572 the KVM hypervisor. This will add detection for KVM as well as a
568 virtio transport. If KVM is detected, the virtio console will be 573 virtio transport. If KVM is detected, the virtio console will be
569 the default console. 574 the default console.
575
576config SECCOMP
577 bool "Enable seccomp to safely compute untrusted bytecode"
578 depends on PROC_FS
579 default y
580 help
581 This kernel feature is useful for number crunching applications
582 that may need to compute untrusted bytecode during their
583 execution. By using pipes or other transports made available to
584 the process as file descriptors supporting the read/write
585 syscalls, it's possible to isolate those applications in
586 their own address space using seccomp. Once seccomp is
587 enabled via /proc/<pid>/seccomp, it cannot be disabled
588 and the task is only allowed to execute a few safe syscalls
589 defined by each seccomp mode.
590
591 If unsure, say Y.
592
570endmenu 593endmenu
571 594
572source "net/Kconfig" 595source "net/Kconfig"
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index de432f2de2d2..fca9dffcc669 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -275,6 +275,6 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
275#define smp_mb__before_atomic_inc() smp_mb() 275#define smp_mb__before_atomic_inc() smp_mb()
276#define smp_mb__after_atomic_inc() smp_mb() 276#define smp_mb__after_atomic_inc() smp_mb()
277 277
278#include <asm-generic/atomic.h> 278#include <asm-generic/atomic-long.h>
279#endif /* __KERNEL__ */ 279#endif /* __KERNEL__ */
280#endif /* __ARCH_S390_ATOMIC__ */ 280#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/arch/s390/include/asm/bitsperlong.h b/arch/s390/include/asm/bitsperlong.h
new file mode 100644
index 000000000000..6b235aea9c66
--- /dev/null
+++ b/arch/s390/include/asm/bitsperlong.h
@@ -0,0 +1,13 @@
1#ifndef __ASM_S390_BITSPERLONG_H
2#define __ASM_S390_BITSPERLONG_H
3
4#ifndef __s390x__
5#define __BITS_PER_LONG 32
6#else
7#define __BITS_PER_LONG 64
8#endif
9
10#include <asm-generic/bitsperlong.h>
11
12#endif /* __ASM_S390_BITSPERLONG_H */
13
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index de065b32381a..01a08020bc0e 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -5,6 +5,7 @@
5 */ 5 */
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/thread_info.h>
8 9
9#define PSW32_MASK_PER 0x40000000UL 10#define PSW32_MASK_PER 0x40000000UL
10#define PSW32_MASK_DAT 0x04000000UL 11#define PSW32_MASK_DAT 0x04000000UL
@@ -163,12 +164,28 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
163 return (u32)(unsigned long)uptr; 164 return (u32)(unsigned long)uptr;
164} 165}
165 166
167#ifdef CONFIG_COMPAT
168
169static inline int is_compat_task(void)
170{
171 return test_thread_flag(TIF_31BIT);
172}
173
174#else
175
176static inline int is_compat_task(void)
177{
178 return 0;
179}
180
181#endif
182
166static inline void __user *compat_alloc_user_space(long len) 183static inline void __user *compat_alloc_user_space(long len)
167{ 184{
168 unsigned long stack; 185 unsigned long stack;
169 186
170 stack = KSTK_ESP(current); 187 stack = KSTK_ESP(current);
171 if (test_thread_flag(TIF_31BIT)) 188 if (is_compat_task())
172 stack &= 0x7fffffffUL; 189 stack &= 0x7fffffffUL;
173 return (void __user *) (stack - len); 190 return (void __user *) (stack - len);
174} 191}
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h
deleted file mode 100644
index d60a2eefb17b..000000000000
--- a/arch/s390/include/asm/cpu.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * include/asm-s390/cpu.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#ifndef _ASM_S390_CPU_H_
9#define _ASM_S390_CPU_H_
10
11#include <linux/types.h>
12#include <linux/percpu.h>
13#include <linux/spinlock.h>
14
15struct s390_idle_data {
16 spinlock_t lock;
17 unsigned long long idle_count;
18 unsigned long long idle_enter;
19 unsigned long long idle_time;
20};
21
22DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
23
24void vtime_start_cpu(void);
25
26static inline void s390_idle_check(void)
27{
28 if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
29 vtime_start_cpu();
30}
31
32#endif /* _ASM_S390_CPU_H_ */
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 941384fbd39c..ec917d42ee6d 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -9,6 +9,9 @@
9#ifndef _S390_CPUTIME_H 9#ifndef _S390_CPUTIME_H
10#define _S390_CPUTIME_H 10#define _S390_CPUTIME_H
11 11
12#include <linux/types.h>
13#include <linux/percpu.h>
14#include <linux/spinlock.h>
12#include <asm/div64.h> 15#include <asm/div64.h>
13 16
14/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ 17/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
@@ -174,8 +177,24 @@ cputime64_to_clock_t(cputime64_t cputime)
174 return __div(cputime, 4096000000ULL / USER_HZ); 177 return __div(cputime, 4096000000ULL / USER_HZ);
175} 178}
176 179
180struct s390_idle_data {
181 spinlock_t lock;
182 unsigned long long idle_count;
183 unsigned long long idle_enter;
184 unsigned long long idle_time;
185};
186
187DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
188
189void vtime_start_cpu(void);
177cputime64_t s390_get_idle_time(int cpu); 190cputime64_t s390_get_idle_time(int cpu);
178 191
179#define arch_idle_time(cpu) s390_get_idle_time(cpu) 192#define arch_idle_time(cpu) s390_get_idle_time(cpu)
180 193
194static inline void s390_idle_check(void)
195{
196 if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
197 vtime_start_cpu();
198}
199
181#endif /* _S390_CPUTIME_H */ 200#endif /* _S390_CPUTIME_H */
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 5a5bc75e19d4..96c14a9102b8 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -2,7 +2,28 @@
2#define _ASM_S390_FTRACE_H 2#define _ASM_S390_FTRACE_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5
5extern void _mcount(void); 6extern void _mcount(void);
7extern unsigned long ftrace_dyn_func;
8
9struct dyn_arch_ftrace { };
10
11#define MCOUNT_ADDR ((long)_mcount)
12
13#ifdef CONFIG_64BIT
14#define MCOUNT_OFFSET_RET 18
15#define MCOUNT_INSN_SIZE 24
16#define MCOUNT_OFFSET 14
17#else
18#define MCOUNT_OFFSET_RET 26
19#define MCOUNT_INSN_SIZE 30
20#define MCOUNT_OFFSET 8
6#endif 21#endif
7 22
23static inline unsigned long ftrace_call_adjust(unsigned long addr)
24{
25 return addr - MCOUNT_OFFSET;
26}
27
28#endif /* __ASSEMBLY__ */
8#endif /* _ASM_S390_FTRACE_H */ 29#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 54ea39f96ecd..a27d0d5a6f86 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -13,6 +13,8 @@
13 13
14#ifndef ASM_KVM_HOST_H 14#ifndef ASM_KVM_HOST_H
15#define ASM_KVM_HOST_H 15#define ASM_KVM_HOST_H
16#include <linux/hrtimer.h>
17#include <linux/interrupt.h>
16#include <linux/kvm_host.h> 18#include <linux/kvm_host.h>
17#include <asm/debug.h> 19#include <asm/debug.h>
18#include <asm/cpuid.h> 20#include <asm/cpuid.h>
@@ -210,7 +212,8 @@ struct kvm_vcpu_arch {
210 s390_fp_regs guest_fpregs; 212 s390_fp_regs guest_fpregs;
211 unsigned int guest_acrs[NUM_ACRS]; 213 unsigned int guest_acrs[NUM_ACRS];
212 struct kvm_s390_local_interrupt local_int; 214 struct kvm_s390_local_interrupt local_int;
213 struct timer_list ckc_timer; 215 struct hrtimer ckc_timer;
216 struct tasklet_struct tasklet;
214 union { 217 union {
215 cpuid_t cpu_id; 218 cpuid_t cpu_id;
216 u64 stidp_data; 219 u64 stidp_data;
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 3aeca492b147..5046ad6b7a63 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -30,6 +30,7 @@
30#define __LC_SUBCHANNEL_NR 0x00ba 30#define __LC_SUBCHANNEL_NR 0x00ba
31#define __LC_IO_INT_PARM 0x00bc 31#define __LC_IO_INT_PARM 0x00bc
32#define __LC_IO_INT_WORD 0x00c0 32#define __LC_IO_INT_WORD 0x00c0
33#define __LC_STFL_FAC_LIST 0x00c8
33#define __LC_MCCK_CODE 0x00e8 34#define __LC_MCCK_CODE 0x00e8
34 35
35#define __LC_DUMP_REIPL 0x0e00 36#define __LC_DUMP_REIPL 0x0e00
@@ -67,6 +68,7 @@
67#define __LC_CPUID 0x02b0 68#define __LC_CPUID 0x02b0
68#define __LC_INT_CLOCK 0x02c8 69#define __LC_INT_CLOCK 0x02c8
69#define __LC_MACHINE_FLAGS 0x02d8 70#define __LC_MACHINE_FLAGS 0x02d8
71#define __LC_FTRACE_FUNC 0x02dc
70#define __LC_IRB 0x0300 72#define __LC_IRB 0x0300
71#define __LC_PFAULT_INTPARM 0x0080 73#define __LC_PFAULT_INTPARM 0x0080
72#define __LC_CPU_TIMER_SAVE_AREA 0x00d8 74#define __LC_CPU_TIMER_SAVE_AREA 0x00d8
@@ -112,6 +114,7 @@
112#define __LC_INT_CLOCK 0x0340 114#define __LC_INT_CLOCK 0x0340
113#define __LC_VDSO_PER_CPU 0x0350 115#define __LC_VDSO_PER_CPU 0x0350
114#define __LC_MACHINE_FLAGS 0x0358 116#define __LC_MACHINE_FLAGS 0x0358
117#define __LC_FTRACE_FUNC 0x0360
115#define __LC_IRB 0x0380 118#define __LC_IRB 0x0380
116#define __LC_PASTE 0x03c0 119#define __LC_PASTE 0x03c0
117#define __LC_PFAULT_INTPARM 0x11b8 120#define __LC_PFAULT_INTPARM 0x11b8
@@ -280,7 +283,8 @@ struct _lowcore
280 __u64 int_clock; /* 0x02c8 */ 283 __u64 int_clock; /* 0x02c8 */
281 __u64 clock_comparator; /* 0x02d0 */ 284 __u64 clock_comparator; /* 0x02d0 */
282 __u32 machine_flags; /* 0x02d8 */ 285 __u32 machine_flags; /* 0x02d8 */
283 __u8 pad_0x02dc[0x0300-0x02dc]; /* 0x02dc */ 286 __u32 ftrace_func; /* 0x02dc */
287 __u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */
284 288
285 /* Interrupt response block */ 289 /* Interrupt response block */
286 __u8 irb[64]; /* 0x0300 */ 290 __u8 irb[64]; /* 0x0300 */
@@ -385,7 +389,8 @@ struct _lowcore
385 __u64 clock_comparator; /* 0x0348 */ 389 __u64 clock_comparator; /* 0x0348 */
386 __u64 vdso_per_cpu_data; /* 0x0350 */ 390 __u64 vdso_per_cpu_data; /* 0x0350 */
387 __u64 machine_flags; /* 0x0358 */ 391 __u64 machine_flags; /* 0x0358 */
388 __u8 pad_0x0360[0x0380-0x0360]; /* 0x0360 */ 392 __u64 ftrace_func; /* 0x0360 */
393 __u8 pad_0x0368[0x0380-0x0368]; /* 0x0368 */
389 394
390 /* Interrupt response block. */ 395 /* Interrupt response block. */
391 __u8 irb[64]; /* 0x0380 */ 396 __u8 irb[64]; /* 0x0380 */
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h
index da01432e8f44..f63fe7b431ed 100644
--- a/arch/s390/include/asm/mman.h
+++ b/arch/s390/include/asm/mman.h
@@ -9,7 +9,7 @@
9#ifndef __S390_MMAN_H__ 9#ifndef __S390_MMAN_H__
10#define __S390_MMAN_H__ 10#define __S390_MMAN_H__
11 11
12#include <asm-generic/mman.h> 12#include <asm-generic/mman-common.h>
13 13
14#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 14#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
15#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 15#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 32e8f6aa4384..3e3594d01f83 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -150,7 +150,7 @@ void arch_alloc_page(struct page *page, int order);
150 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 150 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
151 151
152#include <asm-generic/memory_model.h> 152#include <asm-generic/memory_model.h>
153#include <asm-generic/page.h> 153#include <asm-generic/getorder.h>
154 154
155#define __HAVE_ARCH_GATE_AREA 1 155#define __HAVE_ARCH_GATE_AREA 1
156 156
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 5caddd4f7bed..60a7b1a1702f 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -112,12 +112,15 @@ extern char empty_zero_page[PAGE_SIZE];
112 * effect, this also makes sure that 64 bit module code cannot be used 112 * effect, this also makes sure that 64 bit module code cannot be used
113 * as system call address. 113 * as system call address.
114 */ 114 */
115
116extern unsigned long VMALLOC_START;
117
115#ifndef __s390x__ 118#ifndef __s390x__
116#define VMALLOC_START 0x78000000UL 119#define VMALLOC_SIZE (96UL << 20)
117#define VMALLOC_END 0x7e000000UL 120#define VMALLOC_END 0x7e000000UL
118#define VMEM_MAP_END 0x80000000UL 121#define VMEM_MAP_END 0x80000000UL
119#else /* __s390x__ */ 122#else /* __s390x__ */
120#define VMALLOC_START 0x3e000000000UL 123#define VMALLOC_SIZE (1UL << 30)
121#define VMALLOC_END 0x3e040000000UL 124#define VMALLOC_END 0x3e040000000UL
122#define VMEM_MAP_END 0x40000000000UL 125#define VMEM_MAP_END 0x40000000000UL
123#endif /* __s390x__ */ 126#endif /* __s390x__ */
diff --git a/arch/s390/include/asm/seccomp.h b/arch/s390/include/asm/seccomp.h
new file mode 100644
index 000000000000..781a9cf9b002
--- /dev/null
+++ b/arch/s390/include/asm/seccomp.h
@@ -0,0 +1,16 @@
1#ifndef _ASM_S390_SECCOMP_H
2#define _ASM_S390_SECCOMP_H
3
4#include <linux/unistd.h>
5
6#define __NR_seccomp_read __NR_read
7#define __NR_seccomp_write __NR_write
8#define __NR_seccomp_exit __NR_exit
9#define __NR_seccomp_sigreturn __NR_sigreturn
10
11#define __NR_seccomp_read_32 __NR_read
12#define __NR_seccomp_write_32 __NR_write
13#define __NR_seccomp_exit_32 __NR_exit
14#define __NR_seccomp_sigreturn_32 __NR_sigreturn
15
16#endif /* _ASM_S390_SECCOMP_H */
diff --git a/arch/s390/include/asm/signal.h b/arch/s390/include/asm/signal.h
index f6cfddb278cb..cdf5cb2fe03f 100644
--- a/arch/s390/include/asm/signal.h
+++ b/arch/s390/include/asm/signal.h
@@ -115,7 +115,7 @@ typedef unsigned long sigset_t;
115#define MINSIGSTKSZ 2048 115#define MINSIGSTKSZ 2048
116#define SIGSTKSZ 8192 116#define SIGSTKSZ 8192
117 117
118#include <asm-generic/signal.h> 118#include <asm-generic/signal-defs.h>
119 119
120#ifdef __KERNEL__ 120#ifdef __KERNEL__
121struct old_sigaction { 121struct old_sigaction {
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index f3861b09ebb0..c9af0d19c7ab 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -122,8 +122,10 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lp)
122#define __raw_write_can_lock(x) ((x)->lock == 0) 122#define __raw_write_can_lock(x) ((x)->lock == 0)
123 123
124extern void _raw_read_lock_wait(raw_rwlock_t *lp); 124extern void _raw_read_lock_wait(raw_rwlock_t *lp);
125extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
125extern int _raw_read_trylock_retry(raw_rwlock_t *lp); 126extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
126extern void _raw_write_lock_wait(raw_rwlock_t *lp); 127extern void _raw_write_lock_wait(raw_rwlock_t *lp);
128extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
127extern int _raw_write_trylock_retry(raw_rwlock_t *lp); 129extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
128 130
129static inline void __raw_read_lock(raw_rwlock_t *rw) 131static inline void __raw_read_lock(raw_rwlock_t *rw)
@@ -134,6 +136,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
134 _raw_read_lock_wait(rw); 136 _raw_read_lock_wait(rw);
135} 137}
136 138
139static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
140{
141 unsigned int old;
142 old = rw->lock & 0x7fffffffU;
143 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
144 _raw_read_lock_wait_flags(rw, flags);
145}
146
137static inline void __raw_read_unlock(raw_rwlock_t *rw) 147static inline void __raw_read_unlock(raw_rwlock_t *rw)
138{ 148{
139 unsigned int old, cmp; 149 unsigned int old, cmp;
@@ -151,6 +161,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
151 _raw_write_lock_wait(rw); 161 _raw_write_lock_wait(rw);
152} 162}
153 163
164static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags)
165{
166 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
167 _raw_write_lock_wait_flags(rw, flags);
168}
169
154static inline void __raw_write_unlock(raw_rwlock_t *rw) 170static inline void __raw_write_unlock(raw_rwlock_t *rw)
155{ 171{
156 _raw_compare_and_swap(&rw->lock, 0x80000000, 0); 172 _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
@@ -172,9 +188,6 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
172 return _raw_write_trylock_retry(rw); 188 return _raw_write_trylock_retry(rw);
173} 189}
174 190
175#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
176#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
177
178#define _raw_read_relax(lock) cpu_relax() 191#define _raw_read_relax(lock) cpu_relax()
179#define _raw_write_relax(lock) cpu_relax() 192#define _raw_write_relax(lock) cpu_relax()
180 193
diff --git a/arch/s390/include/asm/suspend.h b/arch/s390/include/asm/suspend.h
deleted file mode 100644
index 1f34580e67a7..000000000000
--- a/arch/s390/include/asm/suspend.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifndef __ASM_S390_SUSPEND_H
2#define __ASM_S390_SUSPEND_H
3
4#endif
5
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 2429b87eb28d..e0a73d3eb837 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -12,6 +12,7 @@
12#ifndef _ASM_SYSCALL_H 12#ifndef _ASM_SYSCALL_H
13#define _ASM_SYSCALL_H 1 13#define _ASM_SYSCALL_H 1
14 14
15#include <linux/sched.h>
15#include <asm/ptrace.h> 16#include <asm/ptrace.h>
16 17
17static inline long syscall_get_nr(struct task_struct *task, 18static inline long syscall_get_nr(struct task_struct *task,
diff --git a/arch/s390/include/asm/termios.h b/arch/s390/include/asm/termios.h
index 67f66278f533..bc3a35cefc96 100644
--- a/arch/s390/include/asm/termios.h
+++ b/arch/s390/include/asm/termios.h
@@ -60,7 +60,7 @@ struct termio {
60#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) 60#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
61#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) 61#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
62 62
63#include <asm-generic/termios.h> 63#include <asm-generic/termios-base.h>
64 64
65#endif /* __KERNEL__ */ 65#endif /* __KERNEL__ */
66 66
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 461f2abd2e6f..925bcc649035 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -83,14 +83,16 @@ static inline struct thread_info *current_thread_info(void)
83/* 83/*
84 * thread information flags bit numbers 84 * thread information flags bit numbers
85 */ 85 */
86#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
87#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ 86#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
88#define TIF_SIGPENDING 2 /* signal pending */ 87#define TIF_SIGPENDING 2 /* signal pending */
89#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 88#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
90#define TIF_RESTART_SVC 4 /* restart svc with new svc number */ 89#define TIF_RESTART_SVC 4 /* restart svc with new svc number */
91#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
92#define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ 90#define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */
93#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ 91#define TIF_MCCK_PENDING 7 /* machine check handling is pending */
92#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
93#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
94#define TIF_SECCOMP 10 /* secure computing */
95#define TIF_SYSCALL_FTRACE 11 /* ftrace syscall instrumentation */
94#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 96#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
95#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling 97#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling
96 TIF_NEED_RESCHED */ 98 TIF_NEED_RESCHED */
@@ -99,15 +101,17 @@ static inline struct thread_info *current_thread_info(void)
99#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ 101#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */
100#define TIF_FREEZE 21 /* thread is freezing for suspend */ 102#define TIF_FREEZE 21 /* thread is freezing for suspend */
101 103
102#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
103#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 104#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
104#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 105#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
105#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 106#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
106#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 107#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
107#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) 108#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC)
108#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
109#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 109#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
110#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) 110#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
111#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
112#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
113#define _TIF_SECCOMP (1<<TIF_SECCOMP)
114#define _TIF_SYSCALL_FTRACE (1<<TIF_SYSCALL_FTRACE)
111#define _TIF_USEDFPU (1<<TIF_USEDFPU) 115#define _TIF_USEDFPU (1<<TIF_USEDFPU)
112#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 116#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
113#define _TIF_31BIT (1<<TIF_31BIT) 117#define _TIF_31BIT (1<<TIF_31BIT)
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index 3dc3fc228812..04d6b95a89c6 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -28,12 +28,6 @@ typedef __signed__ long saddr_t;
28 */ 28 */
29#ifdef __KERNEL__ 29#ifdef __KERNEL__
30 30
31#ifndef __s390x__
32#define BITS_PER_LONG 32
33#else
34#define BITS_PER_LONG 64
35#endif
36
37#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
38 32
39typedef u64 dma64_addr_t; 33typedef u64 dma64_addr_t;
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 0235970278f0..8377e91533d2 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -131,7 +131,7 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
131 131
132#define put_user(x, ptr) \ 132#define put_user(x, ptr) \
133({ \ 133({ \
134 might_sleep(); \ 134 might_fault(); \
135 __put_user(x, ptr); \ 135 __put_user(x, ptr); \
136}) 136})
137 137
@@ -180,7 +180,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
180 180
181#define get_user(x, ptr) \ 181#define get_user(x, ptr) \
182({ \ 182({ \
183 might_sleep(); \ 183 might_fault(); \
184 __get_user(x, ptr); \ 184 __get_user(x, ptr); \
185}) 185})
186 186
@@ -231,7 +231,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
231static inline unsigned long __must_check 231static inline unsigned long __must_check
232copy_to_user(void __user *to, const void *from, unsigned long n) 232copy_to_user(void __user *to, const void *from, unsigned long n)
233{ 233{
234 might_sleep(); 234 might_fault();
235 if (access_ok(VERIFY_WRITE, to, n)) 235 if (access_ok(VERIFY_WRITE, to, n))
236 n = __copy_to_user(to, from, n); 236 n = __copy_to_user(to, from, n);
237 return n; 237 return n;
@@ -282,7 +282,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
282static inline unsigned long __must_check 282static inline unsigned long __must_check
283copy_from_user(void *to, const void __user *from, unsigned long n) 283copy_from_user(void *to, const void __user *from, unsigned long n)
284{ 284{
285 might_sleep(); 285 might_fault();
286 if (access_ok(VERIFY_READ, from, n)) 286 if (access_ok(VERIFY_READ, from, n))
287 n = __copy_from_user(to, from, n); 287 n = __copy_from_user(to, from, n);
288 else 288 else
@@ -299,7 +299,7 @@ __copy_in_user(void __user *to, const void __user *from, unsigned long n)
299static inline unsigned long __must_check 299static inline unsigned long __must_check
300copy_in_user(void __user *to, const void __user *from, unsigned long n) 300copy_in_user(void __user *to, const void __user *from, unsigned long n)
301{ 301{
302 might_sleep(); 302 might_fault();
303 if (__access_ok(from,n) && __access_ok(to,n)) 303 if (__access_ok(from,n) && __access_ok(to,n))
304 n = __copy_in_user(to, from, n); 304 n = __copy_in_user(to, from, n);
305 return n; 305 return n;
@@ -312,7 +312,7 @@ static inline long __must_check
312strncpy_from_user(char *dst, const char __user *src, long count) 312strncpy_from_user(char *dst, const char __user *src, long count)
313{ 313{
314 long res = -EFAULT; 314 long res = -EFAULT;
315 might_sleep(); 315 might_fault();
316 if (access_ok(VERIFY_READ, src, 1)) 316 if (access_ok(VERIFY_READ, src, 1))
317 res = uaccess.strncpy_from_user(count, src, dst); 317 res = uaccess.strncpy_from_user(count, src, dst);
318 return res; 318 return res;
@@ -321,7 +321,7 @@ strncpy_from_user(char *dst, const char __user *src, long count)
321static inline unsigned long 321static inline unsigned long
322strnlen_user(const char __user * src, unsigned long n) 322strnlen_user(const char __user * src, unsigned long n)
323{ 323{
324 might_sleep(); 324 might_fault();
325 return uaccess.strnlen_user(n, src); 325 return uaccess.strnlen_user(n, src);
326} 326}
327 327
@@ -354,7 +354,7 @@ __clear_user(void __user *to, unsigned long n)
354static inline unsigned long __must_check 354static inline unsigned long __must_check
355clear_user(void __user *to, unsigned long n) 355clear_user(void __user *to, unsigned long n)
356{ 356{
357 might_sleep(); 357 might_fault();
358 if (access_ok(VERIFY_WRITE, to, n)) 358 if (access_ok(VERIFY_WRITE, to, n))
359 n = uaccess.clear_user(n, to); 359 n = uaccess.clear_user(n, to);
360 return n; 360 return n;
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index f0f19e6ace6c..c80602d7c880 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -267,7 +267,9 @@
267#define __NR_epoll_create1 327 267#define __NR_epoll_create1 327
268#define __NR_preadv 328 268#define __NR_preadv 328
269#define __NR_pwritev 329 269#define __NR_pwritev 329
270#define NR_syscalls 330 270#define __NR_rt_tgsigqueueinfo 330
271#define __NR_perf_counter_open 331
272#define NR_syscalls 332
271 273
272/* 274/*
273 * There are some system calls that are not present on 64 bit, some 275 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 228e3105ded7..c75ed43b1a18 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -3,8 +3,9 @@
3# 3#
4 4
5ifdef CONFIG_FUNCTION_TRACER 5ifdef CONFIG_FUNCTION_TRACER
6# Do not trace early boot code 6# Don't trace early setup code and tracing code
7CFLAGS_REMOVE_early.o = -pg 7CFLAGS_REMOVE_early.o = -pg
8CFLAGS_REMOVE_ftrace.o = -pg
8endif 9endif
9 10
10# 11#
@@ -22,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
22obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ 23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \
23 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
24 s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ 25 s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \
25 vdso.o vtime.o sysinfo.o nmi.o 26 vdso.o vtime.o sysinfo.o nmi.o sclp.o
26 27
27obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
28obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
@@ -41,6 +42,8 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
41obj-$(CONFIG_STACKTRACE) += stacktrace.o 42obj-$(CONFIG_STACKTRACE) += stacktrace.o
42obj-$(CONFIG_KPROBES) += kprobes.o 43obj-$(CONFIG_KPROBES) += kprobes.o
43obj-$(CONFIG_FUNCTION_TRACER) += mcount.o 44obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
45obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
46obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
44 47
45# Kexec part 48# Kexec part
46S390_KEXEC_OBJS := machine_kexec.o crash.o 49S390_KEXEC_OBJS := machine_kexec.o crash.o
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index fb38af6316bb..88a83366819f 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1823,3 +1823,20 @@ compat_sys_pwritev_wrapper:
1823 llgfr %r5,%r5 # u32 1823 llgfr %r5,%r5 # u32
1824 llgfr %r6,%r6 # u32 1824 llgfr %r6,%r6 # u32
1825 jg compat_sys_pwritev # branch to system call 1825 jg compat_sys_pwritev # branch to system call
1826
1827 .globl compat_sys_rt_tgsigqueueinfo_wrapper
1828compat_sys_rt_tgsigqueueinfo_wrapper:
1829 lgfr %r2,%r2 # compat_pid_t
1830 lgfr %r3,%r3 # compat_pid_t
1831 lgfr %r4,%r4 # int
1832 llgtr %r5,%r5 # struct compat_siginfo *
1833 jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call
1834
1835 .globl sys_perf_counter_open_wrapper
1836sys_perf_counter_open_wrapper:
1837 llgtr %r2,%r2 # const struct perf_counter_attr *
1838 lgfr %r3,%r3 # pid_t
1839 lgfr %r4,%r4 # int
1840 lgfr %r5,%r5 # int
1841 llgfr %r6,%r6 # unsigned long
1842 jg sys_perf_counter_open # branch to system call
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index cf09948faad6..fb263736826c 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -11,6 +11,7 @@
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/ctype.h> 13#include <linux/ctype.h>
14#include <linux/ftrace.h>
14#include <linux/lockdep.h> 15#include <linux/lockdep.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/pfn.h> 17#include <linux/pfn.h>
@@ -410,5 +411,8 @@ void __init startup_init(void)
410 sclp_facilities_detect(); 411 sclp_facilities_detect();
411 detect_memory_layout(memory_chunk); 412 detect_memory_layout(memory_chunk);
412 S390_lowcore.machine_flags = machine_flags; 413 S390_lowcore.machine_flags = machine_flags;
414#ifdef CONFIG_DYNAMIC_FTRACE
415 S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
416#endif
413 lockdep_on(); 417 lockdep_on();
414} 418}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index f3e275934213..c4c80a22bc1f 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -53,6 +53,8 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
53 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 53 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
54_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 54_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
55 _TIF_MCCK_PENDING) 55 _TIF_MCCK_PENDING)
56_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
57 _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8)
56 58
57STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 59STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
58STACK_SIZE = 1 << STACK_SHIFT 60STACK_SIZE = 1 << STACK_SHIFT
@@ -265,7 +267,7 @@ sysc_do_restart:
265 sth %r7,SP_SVCNR(%r15) 267 sth %r7,SP_SVCNR(%r15)
266 sll %r7,2 # svc number *4 268 sll %r7,2 # svc number *4
267 l %r8,BASED(.Lsysc_table) 269 l %r8,BASED(.Lsysc_table)
268 tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 270 tm __TI_flags+2(%r9),_TIF_SYSCALL
269 l %r8,0(%r7,%r8) # get system call addr. 271 l %r8,0(%r7,%r8) # get system call addr.
270 bnz BASED(sysc_tracesys) 272 bnz BASED(sysc_tracesys)
271 basr %r14,%r8 # call sys_xxxx 273 basr %r14,%r8 # call sys_xxxx
@@ -405,7 +407,7 @@ sysc_tracego:
405 basr %r14,%r8 # call sys_xxx 407 basr %r14,%r8 # call sys_xxx
406 st %r2,SP_R2(%r15) # store return value 408 st %r2,SP_R2(%r15) # store return value
407sysc_tracenogo: 409sysc_tracenogo:
408 tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 410 tm __TI_flags+2(%r9),_TIF_SYSCALL
409 bz BASED(sysc_return) 411 bz BASED(sysc_return)
410 l %r1,BASED(.Ltrace_exit) 412 l %r1,BASED(.Ltrace_exit)
411 la %r2,SP_PTREGS(%r15) # load pt_regs 413 la %r2,SP_PTREGS(%r15) # load pt_regs
@@ -1107,6 +1109,7 @@ cleanup_io_leave_insn:
1107 1109
1108 .section .rodata, "a" 1110 .section .rodata, "a"
1109#define SYSCALL(esa,esame,emu) .long esa 1111#define SYSCALL(esa,esame,emu) .long esa
1112 .globl sys_call_table
1110sys_call_table: 1113sys_call_table:
1111#include "syscalls.S" 1114#include "syscalls.S"
1112#undef SYSCALL 1115#undef SYSCALL
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 84a105838e03..f6618e9e15ef 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -56,6 +56,8 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
56 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 56 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
57_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 57_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
58 _TIF_MCCK_PENDING) 58 _TIF_MCCK_PENDING)
59_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
60 _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8)
59 61
60#define BASED(name) name-system_call(%r13) 62#define BASED(name) name-system_call(%r13)
61 63
@@ -260,7 +262,7 @@ sysc_do_restart:
260 larl %r10,sys_call_table_emu # use 31 bit emulation system calls 262 larl %r10,sys_call_table_emu # use 31 bit emulation system calls
261sysc_noemu: 263sysc_noemu:
262#endif 264#endif
263 tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 265 tm __TI_flags+6(%r9),_TIF_SYSCALL
264 lgf %r8,0(%r7,%r10) # load address of system call routine 266 lgf %r8,0(%r7,%r10) # load address of system call routine
265 jnz sysc_tracesys 267 jnz sysc_tracesys
266 basr %r14,%r8 # call sys_xxxx 268 basr %r14,%r8 # call sys_xxxx
@@ -391,7 +393,7 @@ sysc_tracego:
391 basr %r14,%r8 # call sys_xxx 393 basr %r14,%r8 # call sys_xxx
392 stg %r2,SP_R2(%r15) # store return value 394 stg %r2,SP_R2(%r15) # store return value
393sysc_tracenogo: 395sysc_tracenogo:
394 tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 396 tm __TI_flags+6(%r9),_TIF_SYSCALL
395 jz sysc_return 397 jz sysc_return
396 la %r2,SP_PTREGS(%r15) # load pt_regs 398 la %r2,SP_PTREGS(%r15) # load pt_regs
397 larl %r14,sysc_return # return point is sysc_return 399 larl %r14,sysc_return # return point is sysc_return
@@ -1058,6 +1060,7 @@ cleanup_io_leave_insn:
1058 1060
1059 .section .rodata, "a" 1061 .section .rodata, "a"
1060#define SYSCALL(esa,esame,emu) .long esame 1062#define SYSCALL(esa,esame,emu) .long esame
1063 .globl sys_call_table
1061sys_call_table: 1064sys_call_table:
1062#include "syscalls.S" 1065#include "syscalls.S"
1063#undef SYSCALL 1066#undef SYSCALL
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
new file mode 100644
index 000000000000..82ddfd3a75af
--- /dev/null
+++ b/arch/s390/kernel/ftrace.c
@@ -0,0 +1,260 @@
1/*
2 * Dynamic function tracer architecture backend.
3 *
4 * Copyright IBM Corp. 2009
5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 *
8 */
9
10#include <linux/hardirq.h>
11#include <linux/uaccess.h>
12#include <linux/ftrace.h>
13#include <linux/kernel.h>
14#include <linux/types.h>
15#include <trace/syscall.h>
16#include <asm/lowcore.h>
17
18#ifdef CONFIG_DYNAMIC_FTRACE
19
20void ftrace_disable_code(void);
21void ftrace_disable_return(void);
22void ftrace_call_code(void);
23void ftrace_nop_code(void);
24
25#define FTRACE_INSN_SIZE 4
26
27#ifdef CONFIG_64BIT
28
29asm(
30 " .align 4\n"
31 "ftrace_disable_code:\n"
32 " j 0f\n"
33 " .word 0x0024\n"
34 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
35 " basr %r14,%r1\n"
36 "ftrace_disable_return:\n"
37 " lg %r14,8(15)\n"
38 " lgr %r0,%r0\n"
39 "0:\n");
40
41asm(
42 " .align 4\n"
43 "ftrace_nop_code:\n"
44 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
45
46asm(
47 " .align 4\n"
48 "ftrace_call_code:\n"
49 " stg %r14,8(%r15)\n");
50
51#else /* CONFIG_64BIT */
52
53asm(
54 " .align 4\n"
55 "ftrace_disable_code:\n"
56 " j 0f\n"
57 " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
58 " basr %r14,%r1\n"
59 "ftrace_disable_return:\n"
60 " l %r14,4(%r15)\n"
61 " j 0f\n"
62 " bcr 0,%r7\n"
63 " bcr 0,%r7\n"
64 " bcr 0,%r7\n"
65 " bcr 0,%r7\n"
66 " bcr 0,%r7\n"
67 " bcr 0,%r7\n"
68 "0:\n");
69
70asm(
71 " .align 4\n"
72 "ftrace_nop_code:\n"
73 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
74
75asm(
76 " .align 4\n"
77 "ftrace_call_code:\n"
78 " st %r14,4(%r15)\n");
79
80#endif /* CONFIG_64BIT */
81
82static int ftrace_modify_code(unsigned long ip,
83 void *old_code, int old_size,
84 void *new_code, int new_size)
85{
86 unsigned char replaced[MCOUNT_INSN_SIZE];
87
88 /*
89 * Note: Due to modules code can disappear and change.
90 * We need to protect against faulting as well as code
91 * changing. We do this by using the probe_kernel_*
92 * functions.
93 * This however is just a simple sanity check.
94 */
95 if (probe_kernel_read(replaced, (void *)ip, old_size))
96 return -EFAULT;
97 if (memcmp(replaced, old_code, old_size) != 0)
98 return -EINVAL;
99 if (probe_kernel_write((void *)ip, new_code, new_size))
100 return -EPERM;
101 return 0;
102}
103
104static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
105 unsigned long addr)
106{
107 return ftrace_modify_code(rec->ip,
108 ftrace_call_code, FTRACE_INSN_SIZE,
109 ftrace_disable_code, MCOUNT_INSN_SIZE);
110}
111
112int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
113 unsigned long addr)
114{
115 if (addr == MCOUNT_ADDR)
116 return ftrace_make_initial_nop(mod, rec, addr);
117 return ftrace_modify_code(rec->ip,
118 ftrace_call_code, FTRACE_INSN_SIZE,
119 ftrace_nop_code, FTRACE_INSN_SIZE);
120}
121
122int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
123{
124 return ftrace_modify_code(rec->ip,
125 ftrace_nop_code, FTRACE_INSN_SIZE,
126 ftrace_call_code, FTRACE_INSN_SIZE);
127}
128
129int ftrace_update_ftrace_func(ftrace_func_t func)
130{
131 ftrace_dyn_func = (unsigned long)func;
132 return 0;
133}
134
135int __init ftrace_dyn_arch_init(void *data)
136{
137 *(unsigned long *)data = 0;
138 return 0;
139}
140
141#endif /* CONFIG_DYNAMIC_FTRACE */
142
143#ifdef CONFIG_FUNCTION_GRAPH_TRACER
144#ifdef CONFIG_DYNAMIC_FTRACE
145/*
146 * Patch the kernel code at ftrace_graph_caller location:
147 * The instruction there is branch relative on condition. The condition mask
148 * is either all ones (always branch aka disable ftrace_graph_caller) or all
149 * zeroes (nop aka enable ftrace_graph_caller).
150 * Instruction format for brc is a7m4xxxx where m is the condition mask.
151 */
152int ftrace_enable_ftrace_graph_caller(void)
153{
154 unsigned short opcode = 0xa704;
155
156 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
157}
158
159int ftrace_disable_ftrace_graph_caller(void)
160{
161 unsigned short opcode = 0xa7f4;
162
163 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
164}
165
166static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
167{
168 return addr - (ftrace_disable_return - ftrace_disable_code);
169}
170
171#else /* CONFIG_DYNAMIC_FTRACE */
172
173static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
174{
175 return addr - MCOUNT_OFFSET_RET;
176}
177
178#endif /* CONFIG_DYNAMIC_FTRACE */
179
180/*
181 * Hook the return address and push it in the stack of return addresses
182 * in current thread info.
183 */
184unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
185{
186 struct ftrace_graph_ent trace;
187
188 /* Nmi's are currently unsupported. */
189 if (unlikely(in_nmi()))
190 goto out;
191 if (unlikely(atomic_read(&current->tracing_graph_pause)))
192 goto out;
193 if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY)
194 goto out;
195 trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
196 /* Only trace if the calling function expects to. */
197 if (!ftrace_graph_entry(&trace)) {
198 current->curr_ret_stack--;
199 goto out;
200 }
201 parent = (unsigned long)return_to_handler;
202out:
203 return parent;
204}
205#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
206
207#ifdef CONFIG_FTRACE_SYSCALLS
208
209extern unsigned long __start_syscalls_metadata[];
210extern unsigned long __stop_syscalls_metadata[];
211extern unsigned int sys_call_table[];
212
213static struct syscall_metadata **syscalls_metadata;
214
215struct syscall_metadata *syscall_nr_to_meta(int nr)
216{
217 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
218 return NULL;
219
220 return syscalls_metadata[nr];
221}
222
223static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
224{
225 struct syscall_metadata *start;
226 struct syscall_metadata *stop;
227 char str[KSYM_SYMBOL_LEN];
228
229 start = (struct syscall_metadata *)__start_syscalls_metadata;
230 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
231 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
232
233 for ( ; start < stop; start++) {
234 if (start->name && !strcmp(start->name + 3, str + 3))
235 return start;
236 }
237 return NULL;
238}
239
240void arch_init_ftrace_syscalls(void)
241{
242 struct syscall_metadata *meta;
243 int i;
244 static atomic_t refs;
245
246 if (atomic_inc_return(&refs) != 1)
247 goto out;
248 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
249 GFP_KERNEL);
250 if (!syscalls_metadata)
251 goto out;
252 for (i = 0; i < NR_syscalls; i++) {
253 meta = find_syscall_meta((unsigned long)sys_call_table[i]);
254 syscalls_metadata[i] = meta;
255 }
256 return;
257out:
258 atomic_dec(&refs);
259}
260#endif
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 22596d70fc2e..ec6882348520 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -1,7 +1,5 @@
1/* 1/*
2 * arch/s390/kernel/head.S 2 * Copyright IBM Corp. 1999,2009
3 *
4 * Copyright (C) IBM Corp. 1999,2006
5 * 3 *
6 * Author(s): Hartmut Penner <hp@de.ibm.com> 4 * Author(s): Hartmut Penner <hp@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -64,7 +62,7 @@ __HEAD
64 .org 0x100 62 .org 0x100
65# 63#
66# subroutine for loading from tape 64# subroutine for loading from tape
67# Paramters: 65# Parameters:
68# R1 = device number 66# R1 = device number
69# R2 = load address 67# R2 = load address
70.Lloader: 68.Lloader:
@@ -479,27 +477,58 @@ startup:basr %r13,0 # get base
479 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) 477 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
480 mvc __LC_EXIT_TIMER(8),5f-.LPG0(%r13) 478 mvc __LC_EXIT_TIMER(8),5f-.LPG0(%r13)
481#ifndef CONFIG_MARCH_G5 479#ifndef CONFIG_MARCH_G5
482 # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10} 480 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
483 stidp __LC_CPUID # store cpuid 481 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
484 lhi %r0,(3f-2f) / 2 482 stfl __LC_STFL_FAC_LIST # store facility list
485 la %r1,2f-.LPG0(%r13) 483 tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
4860: clc __LC_CPUID+4(2),0(%r1) 484 jz 0f
487 jne 3f 485 la %r0,0
488 lpsw 1f-.LPG0(13) # machine type not good enough, crash 486 .insn s,0xb2b00000,__LC_STFL_FAC_LIST # store facility list extended
4870: l %r0,__LC_STFL_FAC_LIST
488 n %r0,2f+8-.LPG0(%r13)
489 cl %r0,2f+8-.LPG0(%r13)
490 jne 1f
491 l %r0,__LC_STFL_FAC_LIST+4
492 n %r0,2f+12-.LPG0(%r13)
493 cl %r0,2f+12-.LPG0(%r13)
494 je 3f
4951: l %r15,.Lstack-.LPG0(%r13)
496 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
497 ahi %r15,-96
498 la %r2,.Lals_string-.LPG0(%r13)
499 l %r3,.Lsclp_print-.LPG0(%r13)
500 basr %r14,%r3
501 lpsw 2f-.LPG0(%r13) # machine type not good enough, crash
502.Lals_string:
503 .asciz "The Linux kernel requires more recent processor hardware"
504.Lsclp_print:
505 .long _sclp_print_early
506.Lstack:
507 .long init_thread_union
489 .align 16 508 .align 16
4901: .long 0x000a0000,0x00000000 5092: .long 0x000a0000,0x8badcccc
4912: 510#if defined(CONFIG_64BIT)
492#if defined(CONFIG_MARCH_Z10) 511#if defined(CONFIG_MARCH_Z10)
493 .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086, 0x2094, 0x2096 512 .long 0xc100efe3, 0xf0680000
494#elif defined(CONFIG_MARCH_Z9_109) 513#elif defined(CONFIG_MARCH_Z9_109)
495 .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086 514 .long 0xc100efc3, 0x00000000
496#elif defined(CONFIG_MARCH_Z990) 515#elif defined(CONFIG_MARCH_Z990)
497 .short 0x9672, 0x2064, 0x2066 516 .long 0xc0002000, 0x00000000
498#elif defined(CONFIG_MARCH_Z900) 517#elif defined(CONFIG_MARCH_Z900)
499 .short 0x9672 518 .long 0xc0000000, 0x00000000
519#endif
520#else
521#if defined(CONFIG_MARCH_Z10)
522 .long 0x8100c880, 0x00000000
523#elif defined(CONFIG_MARCH_Z9_109)
524 .long 0x8100c880, 0x00000000
525#elif defined(CONFIG_MARCH_Z990)
526 .long 0x80002000, 0x00000000
527#elif defined(CONFIG_MARCH_Z900)
528 .long 0x80000000, 0x00000000
529#endif
500#endif 530#endif
5013: la %r1,2(%r1) 5313:
502 brct %r0,0b
503#endif 532#endif
504 533
505 l %r13,4f-.LPG0(%r13) 534 l %r13,4f-.LPG0(%r13)
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index a01cf0284db2..9bb2f6241d9f 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -25,9 +25,9 @@
25#include <linux/preempt.h> 25#include <linux/preempt.h>
26#include <linux/stop_machine.h> 26#include <linux/stop_machine.h>
27#include <linux/kdebug.h> 27#include <linux/kdebug.h>
28#include <linux/uaccess.h>
28#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
29#include <asm/sections.h> 30#include <asm/sections.h>
30#include <asm/uaccess.h>
31#include <linux/module.h> 31#include <linux/module.h>
32 32
33DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 33DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
@@ -155,35 +155,8 @@ void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
155static int __kprobes swap_instruction(void *aref) 155static int __kprobes swap_instruction(void *aref)
156{ 156{
157 struct ins_replace_args *args = aref; 157 struct ins_replace_args *args = aref;
158 u32 *addr;
159 u32 instr;
160 int err = -EFAULT;
161 158
162 /* 159 return probe_kernel_write(args->ptr, &args->new, sizeof(args->new));
163 * Text segment is read-only, hence we use stura to bypass dynamic
164 * address translation to exchange the instruction. Since stura
165 * always operates on four bytes, but we only want to exchange two
166 * bytes do some calculations to get things right. In addition we
167 * shall not cross any page boundaries (vmalloc area!) when writing
168 * the new instruction.
169 */
170 addr = (u32 *)((unsigned long)args->ptr & -4UL);
171 if ((unsigned long)args->ptr & 2)
172 instr = ((*addr) & 0xffff0000) | args->new;
173 else
174 instr = ((*addr) & 0x0000ffff) | args->new << 16;
175
176 asm volatile(
177 " lra %1,0(%1)\n"
178 "0: stura %2,%1\n"
179 "1: la %0,0\n"
180 "2:\n"
181 EX_TABLE(0b,2b)
182 : "+d" (err)
183 : "a" (addr), "d" (instr)
184 : "memory", "cc");
185
186 return err;
187} 160}
188 161
189void __kprobes arch_arm_kprobe(struct kprobe *p) 162void __kprobes arch_arm_kprobe(struct kprobe *p)
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 80641224a095..2a0a5e97ba8c 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 2008 2 * Copyright IBM Corp. 2008,2009
3 * 3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5 * 5 *
@@ -7,36 +7,64 @@
7 7
8#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
9 9
10#ifndef CONFIG_64BIT 10 .globl ftrace_stub
11.globl _mcount 11ftrace_stub:
12 br %r14
13
14#ifdef CONFIG_64BIT
15
16#ifdef CONFIG_DYNAMIC_FTRACE
17
18 .globl _mcount
12_mcount: 19_mcount:
13 stm %r0,%r5,8(%r15)
14 st %r14,56(%r15)
15 lr %r1,%r15
16 ahi %r15,-96
17 l %r3,100(%r15)
18 la %r2,0(%r14)
19 st %r1,__SF_BACKCHAIN(%r15)
20 la %r3,0(%r3)
21 bras %r14,0f
22 .long ftrace_trace_function
230: l %r14,0(%r14)
24 l %r14,0(%r14)
25 basr %r14,%r14
26 ahi %r15,96
27 lm %r0,%r5,8(%r15)
28 l %r14,56(%r15)
29 br %r14 20 br %r14
30 21
31.globl ftrace_stub 22 .globl ftrace_caller
32ftrace_stub: 23ftrace_caller:
24 larl %r1,function_trace_stop
25 icm %r1,0xf,0(%r1)
26 bnzr %r14
27 stmg %r2,%r5,32(%r15)
28 stg %r14,112(%r15)
29 lgr %r1,%r15
30 aghi %r15,-160
31 stg %r1,__SF_BACKCHAIN(%r15)
32 lgr %r2,%r14
33 lg %r3,168(%r15)
34 larl %r14,ftrace_dyn_func
35 lg %r14,0(%r14)
36 basr %r14,%r14
37#ifdef CONFIG_FUNCTION_GRAPH_TRACER
38 .globl ftrace_graph_caller
39ftrace_graph_caller:
40 # This unconditional branch gets runtime patched. Change only if
41 # you know what you are doing. See ftrace_enable_graph_caller().
42 j 0f
43 lg %r2,272(%r15)
44 lg %r3,168(%r15)
45 brasl %r14,prepare_ftrace_return
46 stg %r2,168(%r15)
470:
48#endif
49 aghi %r15,160
50 lmg %r2,%r5,32(%r15)
51 lg %r14,112(%r15)
33 br %r14 52 br %r14
34 53
35#else /* CONFIG_64BIT */ 54 .data
55 .globl ftrace_dyn_func
56ftrace_dyn_func:
57 .quad ftrace_stub
58 .previous
59
60#else /* CONFIG_DYNAMIC_FTRACE */
36 61
37.globl _mcount 62 .globl _mcount
38_mcount: 63_mcount:
39 stmg %r0,%r5,16(%r15) 64 larl %r1,function_trace_stop
65 icm %r1,0xf,0(%r1)
66 bnzr %r14
67 stmg %r2,%r5,32(%r15)
40 stg %r14,112(%r15) 68 stg %r14,112(%r15)
41 lgr %r1,%r15 69 lgr %r1,%r15
42 aghi %r15,-160 70 aghi %r15,-160
@@ -46,13 +74,143 @@ _mcount:
46 larl %r14,ftrace_trace_function 74 larl %r14,ftrace_trace_function
47 lg %r14,0(%r14) 75 lg %r14,0(%r14)
48 basr %r14,%r14 76 basr %r14,%r14
77#ifdef CONFIG_FUNCTION_GRAPH_TRACER
78 lg %r2,272(%r15)
79 lg %r3,168(%r15)
80 brasl %r14,prepare_ftrace_return
81 stg %r2,168(%r15)
82#endif
49 aghi %r15,160 83 aghi %r15,160
50 lmg %r0,%r5,16(%r15) 84 lmg %r2,%r5,32(%r15)
51 lg %r14,112(%r15) 85 lg %r14,112(%r15)
52 br %r14 86 br %r14
53 87
54.globl ftrace_stub 88#endif /* CONFIG_DYNAMIC_FTRACE */
55ftrace_stub: 89
90#ifdef CONFIG_FUNCTION_GRAPH_TRACER
91
92 .globl return_to_handler
93return_to_handler:
94 stmg %r2,%r5,32(%r15)
95 lgr %r1,%r15
96 aghi %r15,-160
97 stg %r1,__SF_BACKCHAIN(%r15)
98 brasl %r14,ftrace_return_to_handler
99 aghi %r15,160
100 lgr %r14,%r2
101 lmg %r2,%r5,32(%r15)
102 br %r14
103
104#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
105
106#else /* CONFIG_64BIT */
107
108#ifdef CONFIG_DYNAMIC_FTRACE
109
110 .globl _mcount
111_mcount:
112 br %r14
113
114 .globl ftrace_caller
115ftrace_caller:
116 stm %r2,%r5,16(%r15)
117 bras %r1,2f
1180: .long ftrace_trace_function
1191: .long function_trace_stop
1202: l %r2,1b-0b(%r1)
121 icm %r2,0xf,0(%r2)
122 jnz 3f
123 st %r14,56(%r15)
124 lr %r0,%r15
125 ahi %r15,-96
126 l %r3,100(%r15)
127 la %r2,0(%r14)
128 st %r0,__SF_BACKCHAIN(%r15)
129 la %r3,0(%r3)
130 l %r14,0b-0b(%r1)
131 l %r14,0(%r14)
132 basr %r14,%r14
133#ifdef CONFIG_FUNCTION_GRAPH_TRACER
134 .globl ftrace_graph_caller
135ftrace_graph_caller:
136 # This unconditional branch gets runtime patched. Change only if
137 # you know what you are doing. See ftrace_enable_graph_caller().
138 j 1f
139 bras %r1,0f
140 .long prepare_ftrace_return
1410: l %r2,152(%r15)
142 l %r4,0(%r1)
143 l %r3,100(%r15)
144 basr %r14,%r4
145 st %r2,100(%r15)
1461:
147#endif
148 ahi %r15,96
149 l %r14,56(%r15)
1503: lm %r2,%r5,16(%r15)
56 br %r14 151 br %r14
57 152
153 .data
154 .globl ftrace_dyn_func
155ftrace_dyn_func:
156 .long ftrace_stub
157 .previous
158
159#else /* CONFIG_DYNAMIC_FTRACE */
160
161 .globl _mcount
162_mcount:
163 stm %r2,%r5,16(%r15)
164 bras %r1,2f
1650: .long ftrace_trace_function
1661: .long function_trace_stop
1672: l %r2,1b-0b(%r1)
168 icm %r2,0xf,0(%r2)
169 jnz 3f
170 st %r14,56(%r15)
171 lr %r0,%r15
172 ahi %r15,-96
173 l %r3,100(%r15)
174 la %r2,0(%r14)
175 st %r0,__SF_BACKCHAIN(%r15)
176 la %r3,0(%r3)
177 l %r14,0b-0b(%r1)
178 l %r14,0(%r14)
179 basr %r14,%r14
180#ifdef CONFIG_FUNCTION_GRAPH_TRACER
181 bras %r1,0f
182 .long prepare_ftrace_return
1830: l %r2,152(%r15)
184 l %r4,0(%r1)
185 l %r3,100(%r15)
186 basr %r14,%r4
187 st %r2,100(%r15)
188#endif
189 ahi %r15,96
190 l %r14,56(%r15)
1913: lm %r2,%r5,16(%r15)
192 br %r14
193
194#endif /* CONFIG_DYNAMIC_FTRACE */
195
196#ifdef CONFIG_FUNCTION_GRAPH_TRACER
197
198 .globl return_to_handler
199return_to_handler:
200 stm %r2,%r5,16(%r15)
201 st %r14,56(%r15)
202 lr %r0,%r15
203 ahi %r15,-96
204 st %r0,__SF_BACKCHAIN(%r15)
205 bras %r1,0f
206 .long ftrace_return_to_handler
2070: l %r2,0b-0b(%r1)
208 basr %r14,%r2
209 lr %r14,%r2
210 ahi %r15,96
211 lm %r2,%r5,16(%r15)
212 br %r14
213
214#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
215
58#endif /* CONFIG_64BIT */ 216#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index eed4a00cb676..ab2e3ed28abc 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -56,8 +56,6 @@ void *module_alloc(unsigned long size)
56void module_free(struct module *mod, void *module_region) 56void module_free(struct module *mod, void *module_region)
57{ 57{
58 vfree(module_region); 58 vfree(module_region);
59 /* FIXME: If module_region == mod->init_region, trim exception
60 table entries. */
61} 59}
62 60
63static void 61static void
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 28cf196ba775..015e27da40eb 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -16,7 +16,7 @@
16#include <asm/lowcore.h> 16#include <asm/lowcore.h>
17#include <asm/smp.h> 17#include <asm/smp.h>
18#include <asm/etr.h> 18#include <asm/etr.h>
19#include <asm/cpu.h> 19#include <asm/cputime.h>
20#include <asm/nmi.h> 20#include <asm/nmi.h>
21#include <asm/crw.h> 21#include <asm/crw.h>
22 22
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index a3acd8e60aff..355f7a30c3f1 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -32,6 +32,7 @@
32#include <linux/elfcore.h> 32#include <linux/elfcore.h>
33#include <linux/kernel_stat.h> 33#include <linux/kernel_stat.h>
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <asm/compat.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36#include <asm/pgtable.h> 37#include <asm/pgtable.h>
37#include <asm/system.h> 38#include <asm/system.h>
@@ -204,7 +205,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
204 save_fp_regs(&p->thread.fp_regs); 205 save_fp_regs(&p->thread.fp_regs);
205 /* Set a new TLS ? */ 206 /* Set a new TLS ? */
206 if (clone_flags & CLONE_SETTLS) { 207 if (clone_flags & CLONE_SETTLS) {
207 if (test_thread_flag(TIF_31BIT)) { 208 if (is_compat_task()) {
208 p->thread.acrs[0] = (unsigned int) regs->gprs[6]; 209 p->thread.acrs[0] = (unsigned int) regs->gprs[6];
209 } else { 210 } else {
210 p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32); 211 p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 75c496f4f16d..490b39934d65 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -36,7 +36,9 @@
36#include <linux/elf.h> 36#include <linux/elf.h>
37#include <linux/regset.h> 37#include <linux/regset.h>
38#include <linux/tracehook.h> 38#include <linux/tracehook.h>
39 39#include <linux/seccomp.h>
40#include <trace/syscall.h>
41#include <asm/compat.h>
40#include <asm/segment.h> 42#include <asm/segment.h>
41#include <asm/page.h> 43#include <asm/page.h>
42#include <asm/pgtable.h> 44#include <asm/pgtable.h>
@@ -69,7 +71,7 @@ FixPerRegisters(struct task_struct *task)
69 if (per_info->single_step) { 71 if (per_info->single_step) {
70 per_info->control_regs.bits.starting_addr = 0; 72 per_info->control_regs.bits.starting_addr = 0;
71#ifdef CONFIG_COMPAT 73#ifdef CONFIG_COMPAT
72 if (test_thread_flag(TIF_31BIT)) 74 if (is_compat_task())
73 per_info->control_regs.bits.ending_addr = 0x7fffffffUL; 75 per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
74 else 76 else
75#endif 77#endif
@@ -482,8 +484,7 @@ static int peek_user_compat(struct task_struct *child,
482{ 484{
483 __u32 tmp; 485 __u32 tmp;
484 486
485 if (!test_thread_flag(TIF_31BIT) || 487 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
486 (addr & 3) || addr > sizeof(struct user) - 3)
487 return -EIO; 488 return -EIO;
488 489
489 tmp = __peek_user_compat(child, addr); 490 tmp = __peek_user_compat(child, addr);
@@ -584,8 +585,7 @@ static int __poke_user_compat(struct task_struct *child,
584static int poke_user_compat(struct task_struct *child, 585static int poke_user_compat(struct task_struct *child,
585 addr_t addr, addr_t data) 586 addr_t addr, addr_t data)
586{ 587{
587 if (!test_thread_flag(TIF_31BIT) || 588 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3)
588 (addr & 3) || addr > sizeof(struct user32) - 3)
589 return -EIO; 589 return -EIO;
590 590
591 return __poke_user_compat(child, addr, data); 591 return __poke_user_compat(child, addr, data);
@@ -642,6 +642,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
642{ 642{
643 long ret; 643 long ret;
644 644
645 /* Do the secure computing check first. */
646 secure_computing(regs->gprs[2]);
647
645 /* 648 /*
646 * The sysc_tracesys code in entry.S stored the system 649 * The sysc_tracesys code in entry.S stored the system
647 * call number to gprs[2]. 650 * call number to gprs[2].
@@ -659,8 +662,11 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
659 ret = -1; 662 ret = -1;
660 } 663 }
661 664
665 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
666 ftrace_syscall_enter(regs);
667
662 if (unlikely(current->audit_context)) 668 if (unlikely(current->audit_context))
663 audit_syscall_entry(test_thread_flag(TIF_31BIT) ? 669 audit_syscall_entry(is_compat_task() ?
664 AUDIT_ARCH_S390 : AUDIT_ARCH_S390X, 670 AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
665 regs->gprs[2], regs->orig_gpr2, 671 regs->gprs[2], regs->orig_gpr2,
666 regs->gprs[3], regs->gprs[4], 672 regs->gprs[3], regs->gprs[4],
@@ -674,6 +680,9 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
674 audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), 680 audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]),
675 regs->gprs[2]); 681 regs->gprs[2]);
676 682
683 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
684 ftrace_syscall_exit(regs);
685
677 if (test_thread_flag(TIF_SYSCALL_TRACE)) 686 if (test_thread_flag(TIF_SYSCALL_TRACE))
678 tracehook_report_syscall_exit(regs, 0); 687 tracehook_report_syscall_exit(regs, 0);
679} 688}
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index a0d2d55d7fb3..0de305b598ce 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -10,10 +10,11 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/ftrace.h>
13#include <linux/errno.h> 14#include <linux/errno.h>
14#include <linux/kernel_stat.h> 15#include <linux/kernel_stat.h>
15#include <linux/interrupt.h> 16#include <linux/interrupt.h>
16#include <asm/cpu.h> 17#include <asm/cputime.h>
17#include <asm/lowcore.h> 18#include <asm/lowcore.h>
18#include <asm/s390_ext.h> 19#include <asm/s390_ext.h>
19#include <asm/irq_regs.h> 20#include <asm/irq_regs.h>
@@ -112,7 +113,7 @@ int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
112 return 0; 113 return 0;
113} 114}
114 115
115void do_extint(struct pt_regs *regs, unsigned short code) 116void __irq_entry do_extint(struct pt_regs *regs, unsigned short code)
116{ 117{
117 ext_int_info_t *p; 118 ext_int_info_t *p;
118 int index; 119 int index;
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
new file mode 100644
index 000000000000..20639dfe0c42
--- /dev/null
+++ b/arch/s390/kernel/sclp.S
@@ -0,0 +1,327 @@
1/*
2 * Mini SCLP driver.
3 *
4 * Copyright IBM Corp. 2004,2009
5 *
6 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 *
9 */
10
11LC_EXT_NEW_PSW = 0x58 # addr of ext int handler
12LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter
13LC_EXT_INT_CODE = 0x86 # addr of ext int code
14
15#
16# Subroutine which waits synchronously until either an external interruption
17# or a timeout occurs.
18#
19# Parameters:
20# R2 = 0 for no timeout, non-zero for timeout in (approximated) seconds
21#
22# Returns:
23# R2 = 0 on interrupt, 2 on timeout
24# R3 = external interruption parameter if R2=0
25#
26
27.section ".init.text","ax"
28
29_sclp_wait_int:
30 stm %r6,%r15,24(%r15) # save registers
31 basr %r13,0 # get base register
32.LbaseS1:
33 ahi %r15,-96 # create stack frame
34 la %r8,LC_EXT_NEW_PSW # register int handler
35 mvc .LoldpswS1-.LbaseS1(8,%r13),0(%r8)
36 mvc 0(8,%r8),.LextpswS1-.LbaseS1(%r13)
37 lhi %r6,0x0200 # cr mask for ext int (cr0.54)
38 ltr %r2,%r2
39 jz .LsetctS1
40 ahi %r6,0x0800 # cr mask for clock int (cr0.52)
41 stck .LtimeS1-.LbaseS1(%r13) # initiate timeout
42 al %r2,.LtimeS1-.LbaseS1(%r13)
43 st %r2,.LtimeS1-.LbaseS1(%r13)
44 sckc .LtimeS1-.LbaseS1(%r13)
45
46.LsetctS1:
47 stctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # enable required interrupts
48 l %r0,.LctlS1-.LbaseS1(%r13)
49 lhi %r1,~(0x200 | 0x800) # clear old values
50 nr %r1,%r0
51 or %r1,%r6 # set new value
52 st %r1,.LctlS1-.LbaseS1(%r13)
53 lctl %c0,%c0,.LctlS1-.LbaseS1(%r13)
54 st %r0,.LctlS1-.LbaseS1(%r13)
55 lhi %r2,2 # return code for timeout
56.LloopS1:
57 lpsw .LwaitpswS1-.LbaseS1(%r13) # wait until interrupt
58.LwaitS1:
59 lh %r7,LC_EXT_INT_CODE
60 chi %r7,0x1004 # timeout?
61 je .LtimeoutS1
62 chi %r7,0x2401 # service int?
63 jne .LloopS1
64 sr %r2,%r2
65 l %r3,LC_EXT_INT_PARAM
66.LtimeoutS1:
67 lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting
68 # restore old handler
69 mvc 0(8,%r8),.LoldpswS1-.LbaseS1(%r13)
70 lm %r6,%r15,120(%r15) # restore registers
71 br %r14 # return to caller
72
73 .align 8
74.LoldpswS1:
75 .long 0, 0 # old ext int PSW
76.LextpswS1:
77 .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
78.LwaitpswS1:
79 .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
80.LtimeS1:
81 .quad 0 # current time
82.LctlS1:
83 .long 0 # CT0 contents
84
85#
86# Subroutine to synchronously issue a service call.
87#
88# Parameters:
89# R2 = command word
90# R3 = sccb address
91#
92# Returns:
93# R2 = 0 on success, 1 on failure
94# R3 = sccb response code if R2 = 0
95#
96
97_sclp_servc:
98 stm %r6,%r15,24(%r15) # save registers
99 ahi %r15,-96 # create stack frame
100 lr %r6,%r2 # save command word
101 lr %r7,%r3 # save sccb address
102.LretryS2:
103 lhi %r2,1 # error return code
104 .insn rre,0xb2200000,%r6,%r7 # servc
105 brc 1,.LendS2 # exit if not operational
106 brc 8,.LnotbusyS2 # go on if not busy
107 sr %r2,%r2 # wait until no longer busy
108 bras %r14,_sclp_wait_int
109 j .LretryS2 # retry
110.LnotbusyS2:
111 sr %r2,%r2 # wait until result
112 bras %r14,_sclp_wait_int
113 sr %r2,%r2
114 lh %r3,6(%r7)
115.LendS2:
116 lm %r6,%r15,120(%r15) # restore registers
117 br %r14
118
119#
120# Subroutine to set up the SCLP interface.
121#
122# Parameters:
123# R2 = 0 to activate, non-zero to deactivate
124#
125# Returns:
126# R2 = 0 on success, non-zero on failure
127#
128
129_sclp_setup:
130 stm %r6,%r15,24(%r15) # save registers
131 ahi %r15,-96 # create stack frame
132 basr %r13,0 # get base register
133.LbaseS3:
134 l %r6,.LsccbS0-.LbaseS3(%r13) # prepare init mask sccb
135 mvc 0(.LinitendS3-.LinitsccbS3,%r6),.LinitsccbS3-.LbaseS3(%r13)
136 ltr %r2,%r2 # initialization?
137 jz .LdoinitS3 # go ahead
138 # clear masks
139 xc .LinitmaskS3-.LinitsccbS3(8,%r6),.LinitmaskS3-.LinitsccbS3(%r6)
140.LdoinitS3:
141 l %r2,.LwritemaskS3-.LbaseS3(%r13)# get command word
142 lr %r3,%r6 # get sccb address
143 bras %r14,_sclp_servc # issue service call
144 ltr %r2,%r2 # servc successful?
145 jnz .LerrorS3
146 chi %r3,0x20 # write mask successful?
147 jne .LerrorS3
148 # check masks
149 la %r2,.LinitmaskS3-.LinitsccbS3(%r6)
150 l %r1,0(%r2) # receive mask ok?
151 n %r1,12(%r2)
152 cl %r1,0(%r2)
153 jne .LerrorS3
154 l %r1,4(%r2) # send mask ok?
155 n %r1,8(%r2)
156 cl %r1,4(%r2)
157 sr %r2,%r2
158 je .LendS3
159.LerrorS3:
160 lhi %r2,1 # error return code
161.LendS3:
162 lm %r6,%r15,120(%r15) # restore registers
163 br %r14
164.LwritemaskS3:
165 .long 0x00780005 # SCLP command for write mask
166.LinitsccbS3:
167 .word .LinitendS3-.LinitsccbS3
168 .byte 0,0,0,0
169 .word 0
170 .word 0
171 .word 4
172.LinitmaskS3:
173 .long 0x80000000
174 .long 0x40000000
175 .long 0
176 .long 0
177.LinitendS3:
178
179#
180# Subroutine which prints a given text to the SCLP console.
181#
182# Parameters:
183# R2 = address of nil-terminated ASCII text
184#
185# Returns:
186# R2 = 0 on success, 1 on failure
187#
188
189_sclp_print:
190 stm %r6,%r15,24(%r15) # save registers
191 ahi %r15,-96 # create stack frame
192 basr %r13,0 # get base register
193.LbaseS4:
194 l %r8,.LsccbS0-.LbaseS4(%r13) # prepare write data sccb
195 mvc 0(.LmtoS4-.LwritesccbS4,%r8),.LwritesccbS4-.LbaseS4(%r13)
196 la %r7,.LmtoS4-.LwritesccbS4(%r8) # current mto addr
197 sr %r0,%r0
198 l %r10,.Lascebc-.LbaseS4(%r13) # address of translation table
199.LinitmtoS4:
200 # initialize mto
201 mvc 0(.LmtoendS4-.LmtoS4,%r7),.LmtoS4-.LbaseS4(%r13)
202 lhi %r6,.LmtoendS4-.LmtoS4 # current mto length
203.LloopS4:
204 ic %r0,0(%r2) # get character
205 ahi %r2,1
206 ltr %r0,%r0 # end of string?
207 jz .LfinalizemtoS4
208 chi %r0,0x15 # end of line (NL)?
209 jz .LfinalizemtoS4
210 stc %r0,0(%r6,%r7) # copy to mto
211 la %r11,0(%r6,%r7)
212 tr 0(1,%r11),0(%r10) # translate to EBCDIC
213 ahi %r6,1
214 j .LloopS4
215.LfinalizemtoS4:
216 sth %r6,0(%r7) # update mto length
217 lh %r9,.LmdbS4-.LwritesccbS4(%r8) # update mdb length
218 ar %r9,%r6
219 sth %r9,.LmdbS4-.LwritesccbS4(%r8)
220 lh %r9,.LevbufS4-.LwritesccbS4(%r8)# update evbuf length
221 ar %r9,%r6
222 sth %r9,.LevbufS4-.LwritesccbS4(%r8)
223 lh %r9,0(%r8) # update sccb length
224 ar %r9,%r6
225 sth %r9,0(%r8)
226 ar %r7,%r6 # update current mto adress
227 ltr %r0,%r0 # more characters?
228 jnz .LinitmtoS4
229 l %r2,.LwritedataS4-.LbaseS4(%r13)# write data
230 lr %r3,%r8
231 bras %r14,_sclp_servc
232 ltr %r2,%r2 # servc successful?
233 jnz .LendS4
234 chi %r3,0x20 # write data successful?
235 je .LendS4
236 lhi %r2,1 # error return code
237.LendS4:
238 lm %r6,%r15,120(%r15) # restore registers
239 br %r14
240
241#
242# Function which prints a given text to the SCLP console.
243#
244# Parameters:
245# R2 = address of nil-terminated ASCII text
246#
247# Returns:
248# R2 = 0 on success, 1 on failure
249#
250
251 .globl _sclp_print_early
252_sclp_print_early:
253 stm %r6,%r15,24(%r15) # save registers
254 ahi %r15,-96 # create stack frame
255 lr %r10,%r2 # save string pointer
256 lhi %r2,0
257 bras %r14,_sclp_setup # enable console
258 ltr %r2,%r2
259 jnz .LendS5
260 lr %r2,%r10
261 bras %r14,_sclp_print # print string
262 ltr %r2,%r2
263 jnz .LendS5
264 lhi %r2,1
265 bras %r14,_sclp_setup # disable console
266.LendS5:
267 lm %r6,%r15,120(%r15) # restore registers
268 br %r14
269
270.LwritedataS4:
271 .long 0x00760005 # SCLP command for write data
272.LwritesccbS4:
273 # sccb
274 .word .LmtoS4-.LwritesccbS4
275 .byte 0
276 .byte 0,0,0
277 .word 0
278
279 # evbuf
280.LevbufS4:
281 .word .LmtoS4-.LevbufS4
282 .byte 0x02
283 .byte 0
284 .word 0
285
286.LmdbS4:
287 # mdb
288 .word .LmtoS4-.LmdbS4
289 .word 1
290 .long 0xd4c4c240
291 .long 1
292
293 # go
294.LgoS4:
295 .word .LmtoS4-.LgoS4
296 .word 1
297 .long 0
298 .byte 0,0,0,0,0,0,0,0
299 .byte 0,0,0
300 .byte 0
301 .byte 0,0,0,0,0,0,0
302 .byte 0
303 .word 0
304 .byte 0,0,0,0,0,0,0,0,0,0
305 .byte 0,0,0,0,0,0,0,0
306 .byte 0,0,0,0,0,0,0,0
307
308.LmtoS4:
309 .word .LmtoendS4-.LmtoS4
310 .word 4
311 .word 0x1000
312 .byte 0
313 .byte 0,0,0
314.LmtoendS4:
315
316 # Global constants
317.LsccbS0:
318 .long _sclp_work_area
319.Lascebc:
320 .long _ascebc
321.previous
322
323.section ".init.data","a"
324 .balign 4096
325_sclp_work_area:
326 .fill 4096
327.previous
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7402b6a39ead..9717717c6fea 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -42,6 +42,7 @@
42#include <linux/ctype.h> 42#include <linux/ctype.h>
43#include <linux/reboot.h> 43#include <linux/reboot.h>
44#include <linux/topology.h> 44#include <linux/topology.h>
45#include <linux/ftrace.h>
45 46
46#include <asm/ipl.h> 47#include <asm/ipl.h>
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
@@ -442,6 +443,7 @@ setup_lowcore(void)
442 lc->steal_timer = S390_lowcore.steal_timer; 443 lc->steal_timer = S390_lowcore.steal_timer;
443 lc->last_update_timer = S390_lowcore.last_update_timer; 444 lc->last_update_timer = S390_lowcore.last_update_timer;
444 lc->last_update_clock = S390_lowcore.last_update_clock; 445 lc->last_update_clock = S390_lowcore.last_update_clock;
446 lc->ftrace_func = S390_lowcore.ftrace_func;
445 set_prefix((u32)(unsigned long) lc); 447 set_prefix((u32)(unsigned long) lc);
446 lowcore_ptr[0] = lc; 448 lowcore_ptr[0] = lc;
447} 449}
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 3cf74c3ccb69..062bd64e65fa 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -26,6 +26,7 @@
26#include <linux/binfmts.h> 26#include <linux/binfmts.h>
27#include <linux/tracehook.h> 27#include <linux/tracehook.h>
28#include <linux/syscalls.h> 28#include <linux/syscalls.h>
29#include <linux/compat.h>
29#include <asm/ucontext.h> 30#include <asm/ucontext.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/lowcore.h> 32#include <asm/lowcore.h>
@@ -482,7 +483,7 @@ void do_signal(struct pt_regs *regs)
482 /* Whee! Actually deliver the signal. */ 483 /* Whee! Actually deliver the signal. */
483 int ret; 484 int ret;
484#ifdef CONFIG_COMPAT 485#ifdef CONFIG_COMPAT
485 if (test_thread_flag(TIF_31BIT)) { 486 if (is_compat_task()) {
486 ret = handle_signal32(signr, &ka, &info, oldset, regs); 487 ret = handle_signal32(signr, &ka, &info, oldset, regs);
487 } 488 }
488 else 489 else
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index a985a3ba4401..cc8c484984e3 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -47,7 +47,7 @@
47#include <asm/timer.h> 47#include <asm/timer.h>
48#include <asm/lowcore.h> 48#include <asm/lowcore.h>
49#include <asm/sclp.h> 49#include <asm/sclp.h>
50#include <asm/cpu.h> 50#include <asm/cputime.h>
51#include <asm/vdso.h> 51#include <asm/vdso.h>
52#include "entry.h" 52#include "entry.h"
53 53
@@ -572,6 +572,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
572 cpu_lowcore->cpu_nr = cpu; 572 cpu_lowcore->cpu_nr = cpu;
573 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; 573 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
574 cpu_lowcore->machine_flags = S390_lowcore.machine_flags; 574 cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
575 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
575 eieio(); 576 eieio();
576 577
577 while (signal_processor(cpu, sigp_restart) == sigp_busy) 578 while (signal_processor(cpu, sigp_restart) == sigp_busy)
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 2c7739fe70b1..ad1acd200385 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -338,3 +338,5 @@ SYSCALL(sys_dup3,sys_dup3,sys_dup3_wrapper)
338SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper) 338SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper)
339SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) 339SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper)
340SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) 340SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper)
341SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */
342SYSCALL(sys_perf_counter_open,sys_perf_counter_open,sys_perf_counter_open_wrapper)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index ef596d020573..215330a2c128 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -70,7 +70,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
70/* 70/*
71 * Scheduler clock - returns current time in nanosec units. 71 * Scheduler clock - returns current time in nanosec units.
72 */ 72 */
73unsigned long long sched_clock(void) 73unsigned long long notrace sched_clock(void)
74{ 74{
75 return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9; 75 return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9;
76} 76}
@@ -95,12 +95,6 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime)
95 xtime->tv_nsec = ((todval * 1000) >> 12); 95 xtime->tv_nsec = ((todval * 1000) >> 12);
96} 96}
97 97
98#ifdef CONFIG_PROFILING
99#define s390_do_profile() profile_tick(CPU_PROFILING)
100#else
101#define s390_do_profile() do { ; } while(0)
102#endif /* CONFIG_PROFILING */
103
104void clock_comparator_work(void) 98void clock_comparator_work(void)
105{ 99{
106 struct clock_event_device *cd; 100 struct clock_event_device *cd;
@@ -109,7 +103,6 @@ void clock_comparator_work(void)
109 set_clock_comparator(S390_lowcore.clock_comparator); 103 set_clock_comparator(S390_lowcore.clock_comparator);
110 cd = &__get_cpu_var(comparators); 104 cd = &__get_cpu_var(comparators);
111 cd->event_handler(cd); 105 cd->event_handler(cd);
112 s390_do_profile();
113} 106}
114 107
115/* 108/*
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 89b2e7f1b7a9..45e1708b70fd 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -22,7 +22,7 @@
22#include <linux/elf.h> 22#include <linux/elf.h>
23#include <linux/security.h> 23#include <linux/security.h>
24#include <linux/bootmem.h> 24#include <linux/bootmem.h>
25 25#include <linux/compat.h>
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/system.h> 27#include <asm/system.h>
28#include <asm/processor.h> 28#include <asm/processor.h>
@@ -53,8 +53,19 @@ unsigned int __read_mostly vdso_enabled = 1;
53 53
54static int __init vdso_setup(char *s) 54static int __init vdso_setup(char *s)
55{ 55{
56 vdso_enabled = simple_strtoul(s, NULL, 0); 56 unsigned long val;
57 return 1; 57 int rc;
58
59 rc = 0;
60 if (strncmp(s, "on", 3) == 0)
61 vdso_enabled = 1;
62 else if (strncmp(s, "off", 4) == 0)
63 vdso_enabled = 0;
64 else {
65 rc = strict_strtoul(s, 0, &val);
66 vdso_enabled = rc ? 0 : !!val;
67 }
68 return !rc;
58} 69}
59__setup("vdso=", vdso_setup); 70__setup("vdso=", vdso_setup);
60 71
@@ -203,7 +214,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
203 vdso_pagelist = vdso64_pagelist; 214 vdso_pagelist = vdso64_pagelist;
204 vdso_pages = vdso64_pages; 215 vdso_pages = vdso64_pages;
205#ifdef CONFIG_COMPAT 216#ifdef CONFIG_COMPAT
206 if (test_thread_flag(TIF_31BIT)) { 217 if (is_compat_task()) {
207 vdso_pagelist = vdso32_pagelist; 218 vdso_pagelist = vdso32_pagelist;
208 vdso_pages = vdso32_pages; 219 vdso_pages = vdso32_pages;
209 } 220 }
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 89399b8756c2..a53db23ee092 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -34,6 +34,7 @@ SECTIONS
34 SCHED_TEXT 34 SCHED_TEXT
35 LOCK_TEXT 35 LOCK_TEXT
36 KPROBES_TEXT 36 KPROBES_TEXT
37 IRQENTRY_TEXT
37 *(.fixup) 38 *(.fixup)
38 *(.gnu.warning) 39 *(.gnu.warning)
39 } :text = 0x0700 40 } :text = 0x0700
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c87f59bd8246..c8eb7255332b 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -23,7 +23,7 @@
23#include <asm/s390_ext.h> 23#include <asm/s390_ext.h>
24#include <asm/timer.h> 24#include <asm/timer.h>
25#include <asm/irq_regs.h> 25#include <asm/irq_regs.h>
26#include <asm/cpu.h> 26#include <asm/cputime.h>
27 27
28static ext_int_info_t ext_int_info_timer; 28static ext_int_info_t ext_int_info_timer;
29 29
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 9d19803111ba..98997ccba501 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -154,17 +154,25 @@ static int handle_stop(struct kvm_vcpu *vcpu)
154static int handle_validity(struct kvm_vcpu *vcpu) 154static int handle_validity(struct kvm_vcpu *vcpu)
155{ 155{
156 int viwhy = vcpu->arch.sie_block->ipb >> 16; 156 int viwhy = vcpu->arch.sie_block->ipb >> 16;
157 int rc;
158
157 vcpu->stat.exit_validity++; 159 vcpu->stat.exit_validity++;
158 if (viwhy == 0x37) { 160 if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix
159 fault_in_pages_writeable((char __user *) 161 <= vcpu->kvm->arch.guest_memsize - 2*PAGE_SIZE)){
160 vcpu->kvm->arch.guest_origin + 162 rc = fault_in_pages_writeable((char __user *)
161 vcpu->arch.sie_block->prefix, 163 vcpu->kvm->arch.guest_origin +
162 PAGE_SIZE); 164 vcpu->arch.sie_block->prefix,
163 return 0; 165 2*PAGE_SIZE);
164 } 166 if (rc)
165 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", 167 /* user will receive sigsegv, exit to user */
166 viwhy); 168 rc = -ENOTSUPP;
167 return -ENOTSUPP; 169 } else
170 rc = -ENOTSUPP;
171
172 if (rc)
173 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
174 viwhy);
175 return rc;
168} 176}
169 177
170static int handle_instruction(struct kvm_vcpu *vcpu) 178static int handle_instruction(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 0189356fe209..f04f5301b1b4 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -12,6 +12,8 @@
12 12
13#include <asm/lowcore.h> 13#include <asm/lowcore.h>
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15#include <linux/hrtimer.h>
16#include <linux/interrupt.h>
15#include <linux/kvm_host.h> 17#include <linux/kvm_host.h>
16#include <linux/signal.h> 18#include <linux/signal.h>
17#include "kvm-s390.h" 19#include "kvm-s390.h"
@@ -299,13 +301,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
299 } 301 }
300 302
301 if ((!rc) && atomic_read(&fi->active)) { 303 if ((!rc) && atomic_read(&fi->active)) {
302 spin_lock_bh(&fi->lock); 304 spin_lock(&fi->lock);
303 list_for_each_entry(inti, &fi->list, list) 305 list_for_each_entry(inti, &fi->list, list)
304 if (__interrupt_is_deliverable(vcpu, inti)) { 306 if (__interrupt_is_deliverable(vcpu, inti)) {
305 rc = 1; 307 rc = 1;
306 break; 308 break;
307 } 309 }
308 spin_unlock_bh(&fi->lock); 310 spin_unlock(&fi->lock);
309 } 311 }
310 312
311 if ((!rc) && (vcpu->arch.sie_block->ckc < 313 if ((!rc) && (vcpu->arch.sie_block->ckc <
@@ -318,6 +320,12 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
318 return rc; 320 return rc;
319} 321}
320 322
323int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
324{
325 /* do real check here */
326 return 1;
327}
328
321int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 329int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
322{ 330{
323 return 0; 331 return 0;
@@ -355,14 +363,12 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
355 return 0; 363 return 0;
356 } 364 }
357 365
358 sltime = (vcpu->arch.sie_block->ckc - now) / (0xf4240000ul / HZ) + 1; 366 sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
359 367
360 vcpu->arch.ckc_timer.expires = jiffies + sltime; 368 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
361 369 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
362 add_timer(&vcpu->arch.ckc_timer);
363 VCPU_EVENT(vcpu, 5, "enabled wait timer:%llx jiffies", sltime);
364no_timer: 370no_timer:
365 spin_lock_bh(&vcpu->arch.local_int.float_int->lock); 371 spin_lock(&vcpu->arch.local_int.float_int->lock);
366 spin_lock_bh(&vcpu->arch.local_int.lock); 372 spin_lock_bh(&vcpu->arch.local_int.lock);
367 add_wait_queue(&vcpu->arch.local_int.wq, &wait); 373 add_wait_queue(&vcpu->arch.local_int.wq, &wait);
368 while (list_empty(&vcpu->arch.local_int.list) && 374 while (list_empty(&vcpu->arch.local_int.list) &&
@@ -371,33 +377,46 @@ no_timer:
371 !signal_pending(current)) { 377 !signal_pending(current)) {
372 set_current_state(TASK_INTERRUPTIBLE); 378 set_current_state(TASK_INTERRUPTIBLE);
373 spin_unlock_bh(&vcpu->arch.local_int.lock); 379 spin_unlock_bh(&vcpu->arch.local_int.lock);
374 spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); 380 spin_unlock(&vcpu->arch.local_int.float_int->lock);
375 vcpu_put(vcpu); 381 vcpu_put(vcpu);
376 schedule(); 382 schedule();
377 vcpu_load(vcpu); 383 vcpu_load(vcpu);
378 spin_lock_bh(&vcpu->arch.local_int.float_int->lock); 384 spin_lock(&vcpu->arch.local_int.float_int->lock);
379 spin_lock_bh(&vcpu->arch.local_int.lock); 385 spin_lock_bh(&vcpu->arch.local_int.lock);
380 } 386 }
381 __unset_cpu_idle(vcpu); 387 __unset_cpu_idle(vcpu);
382 __set_current_state(TASK_RUNNING); 388 __set_current_state(TASK_RUNNING);
383 remove_wait_queue(&vcpu->wq, &wait); 389 remove_wait_queue(&vcpu->wq, &wait);
384 spin_unlock_bh(&vcpu->arch.local_int.lock); 390 spin_unlock_bh(&vcpu->arch.local_int.lock);
385 spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); 391 spin_unlock(&vcpu->arch.local_int.float_int->lock);
386 del_timer(&vcpu->arch.ckc_timer); 392 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
387 return 0; 393 return 0;
388} 394}
389 395
390void kvm_s390_idle_wakeup(unsigned long data) 396void kvm_s390_tasklet(unsigned long parm)
391{ 397{
392 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 398 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm;
393 399
394 spin_lock_bh(&vcpu->arch.local_int.lock); 400 spin_lock(&vcpu->arch.local_int.lock);
395 vcpu->arch.local_int.timer_due = 1; 401 vcpu->arch.local_int.timer_due = 1;
396 if (waitqueue_active(&vcpu->arch.local_int.wq)) 402 if (waitqueue_active(&vcpu->arch.local_int.wq))
397 wake_up_interruptible(&vcpu->arch.local_int.wq); 403 wake_up_interruptible(&vcpu->arch.local_int.wq);
398 spin_unlock_bh(&vcpu->arch.local_int.lock); 404 spin_unlock(&vcpu->arch.local_int.lock);
399} 405}
400 406
407/*
408 * low level hrtimer wake routine. Because this runs in hardirq context
409 * we schedule a tasklet to do the real work.
410 */
411enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
412{
413 struct kvm_vcpu *vcpu;
414
415 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
416 tasklet_schedule(&vcpu->arch.tasklet);
417
418 return HRTIMER_NORESTART;
419}
401 420
402void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 421void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
403{ 422{
@@ -436,7 +455,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
436 if (atomic_read(&fi->active)) { 455 if (atomic_read(&fi->active)) {
437 do { 456 do {
438 deliver = 0; 457 deliver = 0;
439 spin_lock_bh(&fi->lock); 458 spin_lock(&fi->lock);
440 list_for_each_entry_safe(inti, n, &fi->list, list) { 459 list_for_each_entry_safe(inti, n, &fi->list, list) {
441 if (__interrupt_is_deliverable(vcpu, inti)) { 460 if (__interrupt_is_deliverable(vcpu, inti)) {
442 list_del(&inti->list); 461 list_del(&inti->list);
@@ -447,7 +466,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
447 } 466 }
448 if (list_empty(&fi->list)) 467 if (list_empty(&fi->list))
449 atomic_set(&fi->active, 0); 468 atomic_set(&fi->active, 0);
450 spin_unlock_bh(&fi->lock); 469 spin_unlock(&fi->lock);
451 if (deliver) { 470 if (deliver) {
452 __do_deliver_interrupt(vcpu, inti); 471 __do_deliver_interrupt(vcpu, inti);
453 kfree(inti); 472 kfree(inti);
@@ -512,7 +531,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
512 531
513 mutex_lock(&kvm->lock); 532 mutex_lock(&kvm->lock);
514 fi = &kvm->arch.float_int; 533 fi = &kvm->arch.float_int;
515 spin_lock_bh(&fi->lock); 534 spin_lock(&fi->lock);
516 list_add_tail(&inti->list, &fi->list); 535 list_add_tail(&inti->list, &fi->list);
517 atomic_set(&fi->active, 1); 536 atomic_set(&fi->active, 1);
518 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); 537 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
@@ -529,7 +548,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
529 if (waitqueue_active(&li->wq)) 548 if (waitqueue_active(&li->wq))
530 wake_up_interruptible(&li->wq); 549 wake_up_interruptible(&li->wq);
531 spin_unlock_bh(&li->lock); 550 spin_unlock_bh(&li->lock);
532 spin_unlock_bh(&fi->lock); 551 spin_unlock(&fi->lock);
533 mutex_unlock(&kvm->lock); 552 mutex_unlock(&kvm->lock);
534 return 0; 553 return 0;
535} 554}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f4d56e9939c9..c18b21d6991c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -15,6 +15,7 @@
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/hrtimer.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/kvm.h> 20#include <linux/kvm.h>
20#include <linux/kvm_host.h> 21#include <linux/kvm_host.h>
@@ -195,6 +196,10 @@ out_nokvm:
195void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 196void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
196{ 197{
197 VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 198 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
199 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
200 (__u64) vcpu->arch.sie_block)
201 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
202 smp_mb();
198 free_page((unsigned long)(vcpu->arch.sie_block)); 203 free_page((unsigned long)(vcpu->arch.sie_block));
199 kvm_vcpu_uninit(vcpu); 204 kvm_vcpu_uninit(vcpu);
200 kfree(vcpu); 205 kfree(vcpu);
@@ -283,8 +288,10 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
283 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin; 288 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
284 vcpu->arch.sie_block->ecb = 2; 289 vcpu->arch.sie_block->ecb = 2;
285 vcpu->arch.sie_block->eca = 0xC1002001U; 290 vcpu->arch.sie_block->eca = 0xC1002001U;
286 setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, 291 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
287 (unsigned long) vcpu); 292 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
293 (unsigned long) vcpu);
294 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
288 get_cpu_id(&vcpu->arch.cpu_id); 295 get_cpu_id(&vcpu->arch.cpu_id);
289 vcpu->arch.cpu_id.version = 0xff; 296 vcpu->arch.cpu_id.version = 0xff;
290 return 0; 297 return 0;
@@ -307,19 +314,21 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
307 314
308 vcpu->arch.sie_block->icpua = id; 315 vcpu->arch.sie_block->icpua = id;
309 BUG_ON(!kvm->arch.sca); 316 BUG_ON(!kvm->arch.sca);
310 BUG_ON(kvm->arch.sca->cpu[id].sda); 317 if (!kvm->arch.sca->cpu[id].sda)
311 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; 318 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
319 else
320 BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */
312 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); 321 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
313 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 322 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
314 323
315 spin_lock_init(&vcpu->arch.local_int.lock); 324 spin_lock_init(&vcpu->arch.local_int.lock);
316 INIT_LIST_HEAD(&vcpu->arch.local_int.list); 325 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
317 vcpu->arch.local_int.float_int = &kvm->arch.float_int; 326 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
318 spin_lock_bh(&kvm->arch.float_int.lock); 327 spin_lock(&kvm->arch.float_int.lock);
319 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int; 328 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
320 init_waitqueue_head(&vcpu->arch.local_int.wq); 329 init_waitqueue_head(&vcpu->arch.local_int.wq);
321 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 330 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
322 spin_unlock_bh(&kvm->arch.float_int.lock); 331 spin_unlock(&kvm->arch.float_int.lock);
323 332
324 rc = kvm_vcpu_init(vcpu, kvm, id); 333 rc = kvm_vcpu_init(vcpu, kvm, id);
325 if (rc) 334 if (rc)
@@ -478,6 +487,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
478 487
479 vcpu_load(vcpu); 488 vcpu_load(vcpu);
480 489
490 /* verify, that memory has been registered */
491 if (!vcpu->kvm->arch.guest_memsize) {
492 vcpu_put(vcpu);
493 return -EINVAL;
494 }
495
481 if (vcpu->sigset_active) 496 if (vcpu->sigset_active)
482 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 497 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
483 498
@@ -497,7 +512,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
497 BUG(); 512 BUG();
498 } 513 }
499 514
500 might_sleep(); 515 might_fault();
501 516
502 do { 517 do {
503 __vcpu_run(vcpu); 518 __vcpu_run(vcpu);
@@ -657,6 +672,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
657 struct kvm_memory_slot old, 672 struct kvm_memory_slot old,
658 int user_alloc) 673 int user_alloc)
659{ 674{
675 int i;
676
660 /* A few sanity checks. We can have exactly one memory slot which has 677 /* A few sanity checks. We can have exactly one memory slot which has
661 to start at guest virtual zero and which has to be located at a 678 to start at guest virtual zero and which has to be located at a
662 page boundary in userland and which has to end at a page boundary. 679 page boundary in userland and which has to end at a page boundary.
@@ -664,7 +681,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
664 vmas. It is okay to mmap() and munmap() stuff in this slot after 681 vmas. It is okay to mmap() and munmap() stuff in this slot after
665 doing this call at any time */ 682 doing this call at any time */
666 683
667 if (mem->slot) 684 if (mem->slot || kvm->arch.guest_memsize)
668 return -EINVAL; 685 return -EINVAL;
669 686
670 if (mem->guest_phys_addr) 687 if (mem->guest_phys_addr)
@@ -676,15 +693,39 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
676 if (mem->memory_size & (PAGE_SIZE - 1)) 693 if (mem->memory_size & (PAGE_SIZE - 1))
677 return -EINVAL; 694 return -EINVAL;
678 695
696 if (!user_alloc)
697 return -EINVAL;
698
699 /* lock all vcpus */
700 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
701 if (!kvm->vcpus[i])
702 continue;
703 if (!mutex_trylock(&kvm->vcpus[i]->mutex))
704 goto fail_out;
705 }
706
679 kvm->arch.guest_origin = mem->userspace_addr; 707 kvm->arch.guest_origin = mem->userspace_addr;
680 kvm->arch.guest_memsize = mem->memory_size; 708 kvm->arch.guest_memsize = mem->memory_size;
681 709
682 /* FIXME: we do want to interrupt running CPUs and update their memory 710 /* update sie control blocks, and unlock all vcpus */
683 configuration now to avoid race conditions. But hey, changing the 711 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
684 memory layout while virtual CPUs are running is usually bad 712 if (kvm->vcpus[i]) {
685 programming practice. */ 713 kvm->vcpus[i]->arch.sie_block->gmsor =
714 kvm->arch.guest_origin;
715 kvm->vcpus[i]->arch.sie_block->gmslm =
716 kvm->arch.guest_memsize +
717 kvm->arch.guest_origin +
718 VIRTIODESCSPACE - 1ul;
719 mutex_unlock(&kvm->vcpus[i]->mutex);
720 }
721 }
686 722
687 return 0; 723 return 0;
724
725fail_out:
726 for (; i >= 0; i--)
727 mutex_unlock(&kvm->vcpus[i]->mutex);
728 return -EINVAL;
688} 729}
689 730
690void kvm_arch_flush_shadow(struct kvm *kvm) 731void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 00bbe69b78da..748fee872323 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -14,6 +14,7 @@
14#ifndef ARCH_S390_KVM_S390_H 14#ifndef ARCH_S390_KVM_S390_H
15#define ARCH_S390_KVM_S390_H 15#define ARCH_S390_KVM_S390_H
16 16
17#include <linux/hrtimer.h>
17#include <linux/kvm.h> 18#include <linux/kvm.h>
18#include <linux/kvm_host.h> 19#include <linux/kvm_host.h>
19 20
@@ -41,7 +42,8 @@ static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu)
41} 42}
42 43
43int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); 44int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
44void kvm_s390_idle_wakeup(unsigned long data); 45enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
46void kvm_s390_tasklet(unsigned long parm);
45void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); 47void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
46int kvm_s390_inject_vm(struct kvm *kvm, 48int kvm_s390_inject_vm(struct kvm *kvm,
47 struct kvm_s390_interrupt *s390int); 49 struct kvm_s390_interrupt *s390int);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 4b88834b8dd8..93ecd06e1a74 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -204,11 +204,11 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
204 int cpus = 0; 204 int cpus = 0;
205 int n; 205 int n;
206 206
207 spin_lock_bh(&fi->lock); 207 spin_lock(&fi->lock);
208 for (n = 0; n < KVM_MAX_VCPUS; n++) 208 for (n = 0; n < KVM_MAX_VCPUS; n++)
209 if (fi->local_int[n]) 209 if (fi->local_int[n])
210 cpus++; 210 cpus++;
211 spin_unlock_bh(&fi->lock); 211 spin_unlock(&fi->lock);
212 212
213 /* deal with other level 3 hypervisors */ 213 /* deal with other level 3 hypervisors */
214 if (stsi(mem, 3, 2, 2) == -ENOSYS) 214 if (stsi(mem, 3, 2, 2) == -ENOSYS)
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index f27dbedf0866..36678835034d 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -52,7 +52,7 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
52 if (cpu_addr >= KVM_MAX_VCPUS) 52 if (cpu_addr >= KVM_MAX_VCPUS)
53 return 3; /* not operational */ 53 return 3; /* not operational */
54 54
55 spin_lock_bh(&fi->lock); 55 spin_lock(&fi->lock);
56 if (fi->local_int[cpu_addr] == NULL) 56 if (fi->local_int[cpu_addr] == NULL)
57 rc = 3; /* not operational */ 57 rc = 3; /* not operational */
58 else if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 58 else if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
@@ -64,7 +64,7 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
64 *reg |= SIGP_STAT_STOPPED; 64 *reg |= SIGP_STAT_STOPPED;
65 rc = 1; /* status stored */ 65 rc = 1; /* status stored */
66 } 66 }
67 spin_unlock_bh(&fi->lock); 67 spin_unlock(&fi->lock);
68 68
69 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); 69 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
70 return rc; 70 return rc;
@@ -86,7 +86,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
86 86
87 inti->type = KVM_S390_INT_EMERGENCY; 87 inti->type = KVM_S390_INT_EMERGENCY;
88 88
89 spin_lock_bh(&fi->lock); 89 spin_lock(&fi->lock);
90 li = fi->local_int[cpu_addr]; 90 li = fi->local_int[cpu_addr];
91 if (li == NULL) { 91 if (li == NULL) {
92 rc = 3; /* not operational */ 92 rc = 3; /* not operational */
@@ -102,7 +102,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
102 spin_unlock_bh(&li->lock); 102 spin_unlock_bh(&li->lock);
103 rc = 0; /* order accepted */ 103 rc = 0; /* order accepted */
104unlock: 104unlock:
105 spin_unlock_bh(&fi->lock); 105 spin_unlock(&fi->lock);
106 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); 106 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
107 return rc; 107 return rc;
108} 108}
@@ -123,7 +123,7 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
123 123
124 inti->type = KVM_S390_SIGP_STOP; 124 inti->type = KVM_S390_SIGP_STOP;
125 125
126 spin_lock_bh(&fi->lock); 126 spin_lock(&fi->lock);
127 li = fi->local_int[cpu_addr]; 127 li = fi->local_int[cpu_addr];
128 if (li == NULL) { 128 if (li == NULL) {
129 rc = 3; /* not operational */ 129 rc = 3; /* not operational */
@@ -142,7 +142,7 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
142 spin_unlock_bh(&li->lock); 142 spin_unlock_bh(&li->lock);
143 rc = 0; /* order accepted */ 143 rc = 0; /* order accepted */
144unlock: 144unlock:
145 spin_unlock_bh(&fi->lock); 145 spin_unlock(&fi->lock);
146 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); 146 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
147 return rc; 147 return rc;
148} 148}
@@ -188,7 +188,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
188 if (!inti) 188 if (!inti)
189 return 2; /* busy */ 189 return 2; /* busy */
190 190
191 spin_lock_bh(&fi->lock); 191 spin_lock(&fi->lock);
192 li = fi->local_int[cpu_addr]; 192 li = fi->local_int[cpu_addr];
193 193
194 if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) { 194 if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) {
@@ -220,7 +220,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
220out_li: 220out_li:
221 spin_unlock_bh(&li->lock); 221 spin_unlock_bh(&li->lock);
222out_fi: 222out_fi:
223 spin_unlock_bh(&fi->lock); 223 spin_unlock(&fi->lock);
224 return rc; 224 return rc;
225} 225}
226 226
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index e41f4008afc5..f7e0d30250b7 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -124,6 +124,27 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
124} 124}
125EXPORT_SYMBOL(_raw_read_lock_wait); 125EXPORT_SYMBOL(_raw_read_lock_wait);
126 126
127void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
128{
129 unsigned int old;
130 int count = spin_retry;
131
132 local_irq_restore(flags);
133 while (1) {
134 if (count-- <= 0) {
135 _raw_yield();
136 count = spin_retry;
137 }
138 if (!__raw_read_can_lock(rw))
139 continue;
140 old = rw->lock & 0x7fffffffU;
141 local_irq_disable();
142 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
143 return;
144 }
145}
146EXPORT_SYMBOL(_raw_read_lock_wait_flags);
147
127int _raw_read_trylock_retry(raw_rwlock_t *rw) 148int _raw_read_trylock_retry(raw_rwlock_t *rw)
128{ 149{
129 unsigned int old; 150 unsigned int old;
@@ -157,6 +178,25 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
157} 178}
158EXPORT_SYMBOL(_raw_write_lock_wait); 179EXPORT_SYMBOL(_raw_write_lock_wait);
159 180
181void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
182{
183 int count = spin_retry;
184
185 local_irq_restore(flags);
186 while (1) {
187 if (count-- <= 0) {
188 _raw_yield();
189 count = spin_retry;
190 }
191 if (!__raw_write_can_lock(rw))
192 continue;
193 local_irq_disable();
194 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
195 return;
196 }
197}
198EXPORT_SYMBOL(_raw_write_lock_wait_flags);
199
160int _raw_write_trylock_retry(raw_rwlock_t *rw) 200int _raw_write_trylock_retry(raw_rwlock_t *rw)
161{ 201{
162 int count = spin_retry; 202 int count = spin_retry;
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 2a7458134544..db05661ac895 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the linux s390-specific parts of the memory manager. 2# Makefile for the linux s390-specific parts of the memory manager.
3# 3#
4 4
5obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o 5obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o
6obj-$(CONFIG_CMM) += cmm.o 6obj-$(CONFIG_CMM) += cmm.o
7obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 7obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
8obj-$(CONFIG_PAGE_STATES) += page-states.o 8obj-$(CONFIG_PAGE_STATES) += page-states.o
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 833e8366c351..220a152c836c 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -19,6 +19,7 @@
19#include <linux/ptrace.h> 19#include <linux/ptrace.h>
20#include <linux/mman.h> 20#include <linux/mman.h>
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/compat.h>
22#include <linux/smp.h> 23#include <linux/smp.h>
23#include <linux/kdebug.h> 24#include <linux/kdebug.h>
24#include <linux/smp_lock.h> 25#include <linux/smp_lock.h>
@@ -239,7 +240,7 @@ static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
239 up_read(&mm->mmap_sem); 240 up_read(&mm->mmap_sem);
240 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 241 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
241#ifdef CONFIG_COMPAT 242#ifdef CONFIG_COMPAT
242 compat = test_tsk_thread_flag(current, TIF_31BIT); 243 compat = is_compat_task();
243 if (compat && instruction == 0x0a77) 244 if (compat && instruction == 0x0a77)
244 sys32_sigreturn(); 245 sys32_sigreturn();
245 else if (compat && instruction == 0x0aad) 246 else if (compat && instruction == 0x0aad)
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
new file mode 100644
index 000000000000..81756271dc44
--- /dev/null
+++ b/arch/s390/mm/maccess.c
@@ -0,0 +1,61 @@
1/*
2 * Access kernel memory without faulting -- s390 specific implementation.
3 *
4 * Copyright IBM Corp. 2009
5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 *
8 */
9
10#include <linux/uaccess.h>
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/errno.h>
14#include <asm/system.h>
15
16/*
17 * This function writes to kernel memory bypassing DAT and possible
18 * write protection. It copies one to four bytes from src to dst
19 * using the stura instruction.
20 * Returns the number of bytes copied or -EFAULT.
21 */
22static long probe_kernel_write_odd(void *dst, void *src, size_t size)
23{
24 unsigned long count, aligned;
25 int offset, mask;
26 int rc = -EFAULT;
27
28 aligned = (unsigned long) dst & ~3UL;
29 offset = (unsigned long) dst & 3;
30 count = min_t(unsigned long, 4 - offset, size);
31 mask = (0xf << (4 - count)) & 0xf;
32 mask >>= offset;
33 asm volatile(
34 " bras 1,0f\n"
35 " icm 0,0,0(%3)\n"
36 "0: l 0,0(%1)\n"
37 " lra %1,0(%1)\n"
38 "1: ex %2,0(1)\n"
39 "2: stura 0,%1\n"
40 " la %0,0\n"
41 "3:\n"
42 EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
43 : "+d" (rc), "+a" (aligned)
44 : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
45 return rc ? rc : count;
46}
47
48long probe_kernel_write(void *dst, void *src, size_t size)
49{
50 long copied = 0;
51
52 while (size) {
53 copied = probe_kernel_write_odd(dst, src, size);
54 if (copied < 0)
55 break;
56 dst += copied;
57 src += copied;
58 size -= copied;
59 }
60 return copied < 0 ? -EFAULT : 0;
61}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index e008d236cc15..f4558ccf02b9 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -28,6 +28,7 @@
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <asm/pgalloc.h> 30#include <asm/pgalloc.h>
31#include <asm/compat.h>
31 32
32/* 33/*
33 * Top of mmap area (just below the process stack). 34 * Top of mmap area (just below the process stack).
@@ -55,7 +56,7 @@ static inline int mmap_is_legacy(void)
55 /* 56 /*
56 * Force standard allocation for 64 bit programs. 57 * Force standard allocation for 64 bit programs.
57 */ 58 */
58 if (!test_thread_flag(TIF_31BIT)) 59 if (!is_compat_task())
59 return 1; 60 return 1;
60#endif 61#endif
61 return sysctl_legacy_va_layout || 62 return sysctl_legacy_va_layout ||
@@ -91,7 +92,7 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
91 92
92int s390_mmap_check(unsigned long addr, unsigned long len) 93int s390_mmap_check(unsigned long addr, unsigned long len)
93{ 94{
94 if (!test_thread_flag(TIF_31BIT) && 95 if (!is_compat_task() &&
95 len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) 96 len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
96 return crst_table_upgrade(current->mm, 1UL << 53); 97 return crst_table_upgrade(current->mm, 1UL << 53);
97 return 0; 98 return 0;
@@ -108,8 +109,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
108 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 109 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
109 if (!(area & ~PAGE_MASK)) 110 if (!(area & ~PAGE_MASK))
110 return area; 111 return area;
111 if (area == -ENOMEM && 112 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
112 !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) {
113 /* Upgrade the page table to 4 levels and retry. */ 113 /* Upgrade the page table to 4 levels and retry. */
114 rc = crst_table_upgrade(mm, 1UL << 53); 114 rc = crst_table_upgrade(mm, 1UL << 53);
115 if (rc) 115 if (rc)
@@ -131,8 +131,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
131 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 131 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
132 if (!(area & ~PAGE_MASK)) 132 if (!(area & ~PAGE_MASK))
133 return area; 133 return area;
134 if (area == -ENOMEM && 134 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
135 !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) {
136 /* Upgrade the page table to 4 levels and retry. */ 135 /* Upgrade the page table to 4 levels and retry. */
137 rc = crst_table_upgrade(mm, 1UL << 53); 136 rc = crst_table_upgrade(mm, 1UL << 53);
138 if (rc) 137 if (rc)
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index be6c1cf4ad5a..4ca8e826bf30 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1,7 +1,5 @@
1/* 1/*
2 * arch/s390/mm/pgtable.c 2 * Copyright IBM Corp. 2007,2009
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */ 4 */
7 5
@@ -53,6 +51,18 @@ void clear_table_pgstes(unsigned long *table)
53 51
54#endif 52#endif
55 53
54unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
55EXPORT_SYMBOL(VMALLOC_START);
56
57static int __init parse_vmalloc(char *arg)
58{
59 if (!arg)
60 return -EINVAL;
61 VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
62 return 0;
63}
64early_param("vmalloc", parse_vmalloc);
65
56unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) 66unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
57{ 67{
58 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); 68 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);