aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c3
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/barrier.h7
-rw-r--r--arch/s390/include/asm/cmpxchg.h240
-rw-r--r--arch/s390/include/asm/cputime.h46
-rw-r--r--arch/s390/include/asm/debug.h29
-rw-r--r--arch/s390/include/asm/ftrace.h54
-rw-r--r--arch/s390/include/asm/idle.h3
-rw-r--r--arch/s390/include/asm/io.h19
-rw-r--r--arch/s390/include/asm/irq.h11
-rw-r--r--arch/s390/include/asm/kprobes.h1
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/mmu_context.h11
-rw-r--r--arch/s390/include/asm/pci.h5
-rw-r--r--arch/s390/include/asm/pci_io.h6
-rw-r--r--arch/s390/include/asm/pgalloc.h2
-rw-r--r--arch/s390/include/asm/pgtable.h33
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/spinlock.h9
-rw-r--r--arch/s390/include/asm/tlb.h1
-rw-r--r--arch/s390/include/uapi/asm/socket.h5
-rw-r--r--arch/s390/include/uapi/asm/unistd.h4
-rw-r--r--arch/s390/kernel/asm-offsets.c5
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/compat_wrapper.c2
-rw-r--r--arch/s390/kernel/debug.c12
-rw-r--r--arch/s390/kernel/dumpstack.c3
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/entry.S424
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/entry64.S372
-rw-r--r--arch/s390/kernel/ftrace.c136
-rw-r--r--arch/s390/kernel/idle.c29
-rw-r--r--arch/s390/kernel/irq.c5
-rw-r--r--arch/s390/kernel/kprobes.c178
-rw-r--r--arch/s390/kernel/mcount.S1
-rw-r--r--arch/s390/kernel/nmi.c8
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c1
-rw-r--r--arch/s390/kernel/process.c3
-rw-r--r--arch/s390/kernel/ptrace.c115
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c1
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/time.c3
-rw-r--r--arch/s390/kernel/traps.c25
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/s390/kvm/priv.c17
-rw-r--r--arch/s390/mm/fault.c10
-rw-r--r--arch/s390/mm/maccess.c4
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c185
-rw-r--r--arch/s390/pci/Makefile2
-rw-r--r--arch/s390/pci/pci.c19
-rw-r--r--arch/s390/pci/pci_clp.c1
-rw-r--r--arch/s390/pci/pci_debug.c7
-rw-r--r--arch/s390/pci/pci_mmio.c115
58 files changed, 1176 insertions, 1022 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f2cf1f90295b..68b68d755fdf 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -65,6 +65,7 @@ config S390
65 def_bool y 65 def_bool y
66 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 66 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
67 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS 67 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
68 select ARCH_HAS_GCOV_PROFILE_ALL
68 select ARCH_HAVE_NMI_SAFE_CMPXCHG 69 select ARCH_HAVE_NMI_SAFE_CMPXCHG
69 select ARCH_INLINE_READ_LOCK 70 select ARCH_INLINE_READ_LOCK
70 select ARCH_INLINE_READ_LOCK_BH 71 select ARCH_INLINE_READ_LOCK_BH
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index 2badf2bf9cd7..47fe1055c714 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -83,10 +83,9 @@ static ssize_t dbfs_read(struct file *file, char __user *buf,
83 83
84static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 84static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
85{ 85{
86 struct hypfs_dbfs_file *df; 86 struct hypfs_dbfs_file *df = file_inode(file)->i_private;
87 long rc; 87 long rc;
88 88
89 df = file->f_path.dentry->d_inode->i_private;
90 mutex_lock(&df->lock); 89 mutex_lock(&df->lock);
91 if (df->unlocked_ioctl) 90 if (df->unlocked_ioctl)
92 rc = df->unlocked_ioctl(file, cmd, arg); 91 rc = df->unlocked_ioctl(file, cmd, arg);
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 773f86676588..c631f98fd524 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,7 +1,6 @@
1 1
2 2
3generic-y += clkdev.h 3generic-y += clkdev.h
4generic-y += hash.h
5generic-y += irq_work.h 4generic-y += irq_work.h
6generic-y += mcs_spinlock.h 5generic-y += mcs_spinlock.h
7generic-y += preempt.h 6generic-y += preempt.h
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index b5dce6544d76..8d724718ec21 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -24,11 +24,14 @@
24 24
25#define rmb() mb() 25#define rmb() mb()
26#define wmb() mb() 26#define wmb() mb()
27#define read_barrier_depends() do { } while(0) 27#define dma_rmb() rmb()
28#define dma_wmb() wmb()
28#define smp_mb() mb() 29#define smp_mb() mb()
29#define smp_rmb() rmb() 30#define smp_rmb() rmb()
30#define smp_wmb() wmb() 31#define smp_wmb() wmb()
31#define smp_read_barrier_depends() read_barrier_depends() 32
33#define read_barrier_depends() do { } while (0)
34#define smp_read_barrier_depends() do { } while (0)
32 35
33#define smp_mb__before_atomic() smp_mb() 36#define smp_mb__before_atomic() smp_mb()
34#define smp_mb__after_atomic() smp_mb() 37#define smp_mb__after_atomic() smp_mb()
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 4236408070e5..6259895fcd97 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -11,200 +11,28 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/bug.h> 12#include <linux/bug.h>
13 13
14extern void __xchg_called_with_bad_pointer(void); 14#define cmpxchg(ptr, o, n) \
15 15({ \
16static inline unsigned long __xchg(unsigned long x, void *ptr, int size) 16 __typeof__(*(ptr)) __o = (o); \
17{ 17 __typeof__(*(ptr)) __n = (n); \
18 unsigned long addr, old; 18 (__typeof__(*(ptr))) __sync_val_compare_and_swap((ptr),__o,__n);\
19 int shift;
20
21 switch (size) {
22 case 1:
23 addr = (unsigned long) ptr;
24 shift = (3 ^ (addr & 3)) << 3;
25 addr ^= addr & 3;
26 asm volatile(
27 " l %0,%4\n"
28 "0: lr 0,%0\n"
29 " nr 0,%3\n"
30 " or 0,%2\n"
31 " cs %0,0,%4\n"
32 " jl 0b\n"
33 : "=&d" (old), "=Q" (*(int *) addr)
34 : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)),
35 "Q" (*(int *) addr) : "memory", "cc", "0");
36 return old >> shift;
37 case 2:
38 addr = (unsigned long) ptr;
39 shift = (2 ^ (addr & 2)) << 3;
40 addr ^= addr & 2;
41 asm volatile(
42 " l %0,%4\n"
43 "0: lr 0,%0\n"
44 " nr 0,%3\n"
45 " or 0,%2\n"
46 " cs %0,0,%4\n"
47 " jl 0b\n"
48 : "=&d" (old), "=Q" (*(int *) addr)
49 : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)),
50 "Q" (*(int *) addr) : "memory", "cc", "0");
51 return old >> shift;
52 case 4:
53 asm volatile(
54 " l %0,%3\n"
55 "0: cs %0,%2,%3\n"
56 " jl 0b\n"
57 : "=&d" (old), "=Q" (*(int *) ptr)
58 : "d" (x), "Q" (*(int *) ptr)
59 : "memory", "cc");
60 return old;
61#ifdef CONFIG_64BIT
62 case 8:
63 asm volatile(
64 " lg %0,%3\n"
65 "0: csg %0,%2,%3\n"
66 " jl 0b\n"
67 : "=&d" (old), "=m" (*(long *) ptr)
68 : "d" (x), "Q" (*(long *) ptr)
69 : "memory", "cc");
70 return old;
71#endif /* CONFIG_64BIT */
72 }
73 __xchg_called_with_bad_pointer();
74 return x;
75}
76
77#define xchg(ptr, x) \
78({ \
79 __typeof__(*(ptr)) __ret; \
80 __ret = (__typeof__(*(ptr))) \
81 __xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr)));\
82 __ret; \
83}) 19})
84 20
85/* 21#define cmpxchg64 cmpxchg
86 * Atomic compare and exchange. Compare OLD with MEM, if identical, 22#define cmpxchg_local cmpxchg
87 * store NEW in MEM. Return the initial value in MEM. Success is 23#define cmpxchg64_local cmpxchg
88 * indicated by comparing RETURN with OLD.
89 */
90
91#define __HAVE_ARCH_CMPXCHG
92
93extern void __cmpxchg_called_with_bad_pointer(void);
94
95static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
96 unsigned long new, int size)
97{
98 unsigned long addr, prev, tmp;
99 int shift;
100
101 switch (size) {
102 case 1:
103 addr = (unsigned long) ptr;
104 shift = (3 ^ (addr & 3)) << 3;
105 addr ^= addr & 3;
106 asm volatile(
107 " l %0,%2\n"
108 "0: nr %0,%5\n"
109 " lr %1,%0\n"
110 " or %0,%3\n"
111 " or %1,%4\n"
112 " cs %0,%1,%2\n"
113 " jnl 1f\n"
114 " xr %1,%0\n"
115 " nr %1,%5\n"
116 " jnz 0b\n"
117 "1:"
118 : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
119 : "d" ((old & 0xff) << shift),
120 "d" ((new & 0xff) << shift),
121 "d" (~(0xff << shift))
122 : "memory", "cc");
123 return prev >> shift;
124 case 2:
125 addr = (unsigned long) ptr;
126 shift = (2 ^ (addr & 2)) << 3;
127 addr ^= addr & 2;
128 asm volatile(
129 " l %0,%2\n"
130 "0: nr %0,%5\n"
131 " lr %1,%0\n"
132 " or %0,%3\n"
133 " or %1,%4\n"
134 " cs %0,%1,%2\n"
135 " jnl 1f\n"
136 " xr %1,%0\n"
137 " nr %1,%5\n"
138 " jnz 0b\n"
139 "1:"
140 : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
141 : "d" ((old & 0xffff) << shift),
142 "d" ((new & 0xffff) << shift),
143 "d" (~(0xffff << shift))
144 : "memory", "cc");
145 return prev >> shift;
146 case 4:
147 asm volatile(
148 " cs %0,%3,%1\n"
149 : "=&d" (prev), "=Q" (*(int *) ptr)
150 : "0" (old), "d" (new), "Q" (*(int *) ptr)
151 : "memory", "cc");
152 return prev;
153#ifdef CONFIG_64BIT
154 case 8:
155 asm volatile(
156 " csg %0,%3,%1\n"
157 : "=&d" (prev), "=Q" (*(long *) ptr)
158 : "0" (old), "d" (new), "Q" (*(long *) ptr)
159 : "memory", "cc");
160 return prev;
161#endif /* CONFIG_64BIT */
162 }
163 __cmpxchg_called_with_bad_pointer();
164 return old;
165}
166
167#define cmpxchg(ptr, o, n) \
168({ \
169 __typeof__(*(ptr)) __ret; \
170 __ret = (__typeof__(*(ptr))) \
171 __cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \
172 sizeof(*(ptr))); \
173 __ret; \
174})
175 24
176#ifdef CONFIG_64BIT 25#define xchg(ptr, x) \
177#define cmpxchg64(ptr, o, n) \
178({ \ 26({ \
179 cmpxchg((ptr), (o), (n)); \ 27 __typeof__(ptr) __ptr = (ptr); \
28 __typeof__(*(ptr)) __old; \
29 do { \
30 __old = *__ptr; \
31 } while (!__sync_bool_compare_and_swap(__ptr, __old, x)); \
32 __old; \
180}) 33})
181#else /* CONFIG_64BIT */
182static inline unsigned long long __cmpxchg64(void *ptr,
183 unsigned long long old,
184 unsigned long long new)
185{
186 register_pair rp_old = {.pair = old};
187 register_pair rp_new = {.pair = new};
188 unsigned long long *ullptr = ptr;
189 34
190 asm volatile( 35#define __HAVE_ARCH_CMPXCHG
191 " cds %0,%2,%1"
192 : "+d" (rp_old), "+Q" (*ullptr)
193 : "d" (rp_new)
194 : "memory", "cc");
195 return rp_old.pair;
196}
197
198#define cmpxchg64(ptr, o, n) \
199({ \
200 __typeof__(*(ptr)) __ret; \
201 __ret = (__typeof__(*(ptr))) \
202 __cmpxchg64((ptr), \
203 (unsigned long long)(o), \
204 (unsigned long long)(n)); \
205 __ret; \
206})
207#endif /* CONFIG_64BIT */
208 36
209#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \ 37#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
210({ \ 38({ \
@@ -265,40 +93,4 @@ extern void __cmpxchg_double_called_with_bad_pointer(void);
265 93
266#define system_has_cmpxchg_double() 1 94#define system_has_cmpxchg_double() 1
267 95
268#include <asm-generic/cmpxchg-local.h>
269
270static inline unsigned long __cmpxchg_local(void *ptr,
271 unsigned long old,
272 unsigned long new, int size)
273{
274 switch (size) {
275 case 1:
276 case 2:
277 case 4:
278#ifdef CONFIG_64BIT
279 case 8:
280#endif
281 return __cmpxchg(ptr, old, new, size);
282 default:
283 return __cmpxchg_local_generic(ptr, old, new, size);
284 }
285
286 return old;
287}
288
289/*
290 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
291 * them available.
292 */
293#define cmpxchg_local(ptr, o, n) \
294({ \
295 __typeof__(*(ptr)) __ret; \
296 __ret = (__typeof__(*(ptr))) \
297 __cmpxchg_local((ptr), (unsigned long)(o), \
298 (unsigned long)(n), sizeof(*(ptr))); \
299 __ret; \
300})
301
302#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n))
303
304#endif /* __ASM_CMPXCHG_H */ 96#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index f8c196984853..b91e960e4045 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -10,6 +10,8 @@
10#include <linux/types.h> 10#include <linux/types.h>
11#include <asm/div64.h> 11#include <asm/div64.h>
12 12
13#define CPUTIME_PER_USEC 4096ULL
14#define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC)
13 15
14/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ 16/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
15 17
@@ -38,24 +40,24 @@ static inline unsigned long __div(unsigned long long n, unsigned long base)
38 */ 40 */
39static inline unsigned long cputime_to_jiffies(const cputime_t cputime) 41static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
40{ 42{
41 return __div((__force unsigned long long) cputime, 4096000000ULL / HZ); 43 return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / HZ);
42} 44}
43 45
44static inline cputime_t jiffies_to_cputime(const unsigned int jif) 46static inline cputime_t jiffies_to_cputime(const unsigned int jif)
45{ 47{
46 return (__force cputime_t)(jif * (4096000000ULL / HZ)); 48 return (__force cputime_t)(jif * (CPUTIME_PER_SEC / HZ));
47} 49}
48 50
49static inline u64 cputime64_to_jiffies64(cputime64_t cputime) 51static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
50{ 52{
51 unsigned long long jif = (__force unsigned long long) cputime; 53 unsigned long long jif = (__force unsigned long long) cputime;
52 do_div(jif, 4096000000ULL / HZ); 54 do_div(jif, CPUTIME_PER_SEC / HZ);
53 return jif; 55 return jif;
54} 56}
55 57
56static inline cputime64_t jiffies64_to_cputime64(const u64 jif) 58static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
57{ 59{
58 return (__force cputime64_t)(jif * (4096000000ULL / HZ)); 60 return (__force cputime64_t)(jif * (CPUTIME_PER_SEC / HZ));
59} 61}
60 62
61/* 63/*
@@ -68,7 +70,7 @@ static inline unsigned int cputime_to_usecs(const cputime_t cputime)
68 70
69static inline cputime_t usecs_to_cputime(const unsigned int m) 71static inline cputime_t usecs_to_cputime(const unsigned int m)
70{ 72{
71 return (__force cputime_t)(m * 4096ULL); 73 return (__force cputime_t)(m * CPUTIME_PER_USEC);
72} 74}
73 75
74#define usecs_to_cputime64(m) usecs_to_cputime(m) 76#define usecs_to_cputime64(m) usecs_to_cputime(m)
@@ -78,12 +80,12 @@ static inline cputime_t usecs_to_cputime(const unsigned int m)
78 */ 80 */
79static inline unsigned int cputime_to_secs(const cputime_t cputime) 81static inline unsigned int cputime_to_secs(const cputime_t cputime)
80{ 82{
81 return __div((__force unsigned long long) cputime, 2048000000) >> 1; 83 return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / 2) >> 1;
82} 84}
83 85
84static inline cputime_t secs_to_cputime(const unsigned int s) 86static inline cputime_t secs_to_cputime(const unsigned int s)
85{ 87{
86 return (__force cputime_t)(s * 4096000000ULL); 88 return (__force cputime_t)(s * CPUTIME_PER_SEC);
87} 89}
88 90
89/* 91/*
@@ -91,8 +93,8 @@ static inline cputime_t secs_to_cputime(const unsigned int s)
91 */ 93 */
92static inline cputime_t timespec_to_cputime(const struct timespec *value) 94static inline cputime_t timespec_to_cputime(const struct timespec *value)
93{ 95{
94 unsigned long long ret = value->tv_sec * 4096000000ULL; 96 unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
95 return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000); 97 return (__force cputime_t)(ret + __div(value->tv_nsec * CPUTIME_PER_USEC, NSEC_PER_USEC));
96} 98}
97 99
98static inline void cputime_to_timespec(const cputime_t cputime, 100static inline void cputime_to_timespec(const cputime_t cputime,
@@ -103,12 +105,12 @@ static inline void cputime_to_timespec(const cputime_t cputime,
103 register_pair rp; 105 register_pair rp;
104 106
105 rp.pair = __cputime >> 1; 107 rp.pair = __cputime >> 1;
106 asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); 108 asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_SEC / 2));
107 value->tv_nsec = rp.subreg.even * 1000 / 4096; 109 value->tv_nsec = rp.subreg.even * NSEC_PER_USEC / CPUTIME_PER_USEC;
108 value->tv_sec = rp.subreg.odd; 110 value->tv_sec = rp.subreg.odd;
109#else 111#else
110 value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096; 112 value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC;
111 value->tv_sec = __cputime / 4096000000ULL; 113 value->tv_sec = __cputime / CPUTIME_PER_SEC;
112#endif 114#endif
113} 115}
114 116
@@ -119,8 +121,8 @@ static inline void cputime_to_timespec(const cputime_t cputime,
119 */ 121 */
120static inline cputime_t timeval_to_cputime(const struct timeval *value) 122static inline cputime_t timeval_to_cputime(const struct timeval *value)
121{ 123{
122 unsigned long long ret = value->tv_sec * 4096000000ULL; 124 unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
123 return (__force cputime_t)(ret + value->tv_usec * 4096ULL); 125 return (__force cputime_t)(ret + value->tv_usec * CPUTIME_PER_USEC);
124} 126}
125 127
126static inline void cputime_to_timeval(const cputime_t cputime, 128static inline void cputime_to_timeval(const cputime_t cputime,
@@ -131,12 +133,12 @@ static inline void cputime_to_timeval(const cputime_t cputime,
131 register_pair rp; 133 register_pair rp;
132 134
133 rp.pair = __cputime >> 1; 135 rp.pair = __cputime >> 1;
134 asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); 136 asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_USEC / 2));
135 value->tv_usec = rp.subreg.even / 4096; 137 value->tv_usec = rp.subreg.even / CPUTIME_PER_USEC;
136 value->tv_sec = rp.subreg.odd; 138 value->tv_sec = rp.subreg.odd;
137#else 139#else
138 value->tv_usec = (__cputime % 4096000000ULL) / 4096; 140 value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC;
139 value->tv_sec = __cputime / 4096000000ULL; 141 value->tv_sec = __cputime / CPUTIME_PER_SEC;
140#endif 142#endif
141} 143}
142 144
@@ -146,13 +148,13 @@ static inline void cputime_to_timeval(const cputime_t cputime,
146static inline clock_t cputime_to_clock_t(cputime_t cputime) 148static inline clock_t cputime_to_clock_t(cputime_t cputime)
147{ 149{
148 unsigned long long clock = (__force unsigned long long) cputime; 150 unsigned long long clock = (__force unsigned long long) cputime;
149 do_div(clock, 4096000000ULL / USER_HZ); 151 do_div(clock, CPUTIME_PER_SEC / USER_HZ);
150 return clock; 152 return clock;
151} 153}
152 154
153static inline cputime_t clock_t_to_cputime(unsigned long x) 155static inline cputime_t clock_t_to_cputime(unsigned long x)
154{ 156{
155 return (__force cputime_t)(x * (4096000000ULL / USER_HZ)); 157 return (__force cputime_t)(x * (CPUTIME_PER_SEC / USER_HZ));
156} 158}
157 159
158/* 160/*
@@ -161,7 +163,7 @@ static inline cputime_t clock_t_to_cputime(unsigned long x)
161static inline clock_t cputime64_to_clock_t(cputime64_t cputime) 163static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
162{ 164{
163 unsigned long long clock = (__force unsigned long long) cputime; 165 unsigned long long clock = (__force unsigned long long) cputime;
164 do_div(clock, 4096000000ULL / USER_HZ); 166 do_div(clock, CPUTIME_PER_SEC / USER_HZ);
165 return clock; 167 return clock;
166} 168}
167 169
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 530c15eb01e9..0206c8052328 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -151,9 +151,21 @@ debug_text_event(debug_info_t* id, int level, const char* txt)
151 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! 151 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
152 */ 152 */
153extern debug_entry_t * 153extern debug_entry_t *
154debug_sprintf_event(debug_info_t* id,int level,char *string,...) 154__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
155 __attribute__ ((format(printf, 3, 4))); 155 __attribute__ ((format(printf, 3, 4)));
156 156
157#define debug_sprintf_event(_id, _level, _fmt, ...) \
158({ \
159 debug_entry_t *__ret; \
160 debug_info_t *__id = _id; \
161 int __level = _level; \
162 if ((!__id) || (__level > __id->level)) \
163 __ret = NULL; \
164 else \
165 __ret = __debug_sprintf_event(__id, __level, \
166 _fmt, ## __VA_ARGS__); \
167 __ret; \
168})
157 169
158static inline debug_entry_t* 170static inline debug_entry_t*
159debug_exception(debug_info_t* id, int level, void* data, int length) 171debug_exception(debug_info_t* id, int level, void* data, int length)
@@ -194,9 +206,22 @@ debug_text_exception(debug_info_t* id, int level, const char* txt)
194 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! 206 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
195 */ 207 */
196extern debug_entry_t * 208extern debug_entry_t *
197debug_sprintf_exception(debug_info_t* id,int level,char *string,...) 209__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
198 __attribute__ ((format(printf, 3, 4))); 210 __attribute__ ((format(printf, 3, 4)));
199 211
212#define debug_sprintf_exception(_id, _level, _fmt, ...) \
213({ \
214 debug_entry_t *__ret; \
215 debug_info_t *__id = _id; \
216 int __level = _level; \
217 if ((!__id) || (__level > __id->level)) \
218 __ret = NULL; \
219 else \
220 __ret = __debug_sprintf_exception(__id, __level, \
221 _fmt, ## __VA_ARGS__);\
222 __ret; \
223})
224
200int debug_register_view(debug_info_t* id, struct debug_view* view); 225int debug_register_view(debug_info_t* id, struct debug_view* view);
201int debug_unregister_view(debug_info_t* id, struct debug_view* view); 226int debug_unregister_view(debug_info_t* id, struct debug_view* view);
202 227
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 3aef8afec336..abb618f1ead2 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -1,25 +1,69 @@
1#ifndef _ASM_S390_FTRACE_H 1#ifndef _ASM_S390_FTRACE_H
2#define _ASM_S390_FTRACE_H 2#define _ASM_S390_FTRACE_H
3 3
4#define ARCH_SUPPORTS_FTRACE_OPS 1
5
6#define MCOUNT_INSN_SIZE 24
7#define MCOUNT_RETURN_FIXUP 18
8
4#ifndef __ASSEMBLY__ 9#ifndef __ASSEMBLY__
5 10
6extern void _mcount(void); 11#define ftrace_return_address(n) __builtin_return_address(n)
12
13void _mcount(void);
14void ftrace_caller(void);
15
7extern char ftrace_graph_caller_end; 16extern char ftrace_graph_caller_end;
17extern unsigned long ftrace_plt;
8 18
9struct dyn_arch_ftrace { }; 19struct dyn_arch_ftrace { };
10 20
11#define MCOUNT_ADDR ((long)_mcount) 21#define MCOUNT_ADDR ((unsigned long)_mcount)
22#define FTRACE_ADDR ((unsigned long)ftrace_caller)
12 23
24#define KPROBE_ON_FTRACE_NOP 0
25#define KPROBE_ON_FTRACE_CALL 1
13 26
14static inline unsigned long ftrace_call_adjust(unsigned long addr) 27static inline unsigned long ftrace_call_adjust(unsigned long addr)
15{ 28{
16 return addr; 29 return addr;
17} 30}
18 31
19#endif /* __ASSEMBLY__ */ 32struct ftrace_insn {
33 u16 opc;
34 s32 disp;
35} __packed;
36
37static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
38{
39#ifdef CONFIG_FUNCTION_TRACER
40 /* jg .+24 */
41 insn->opc = 0xc0f4;
42 insn->disp = MCOUNT_INSN_SIZE / 2;
43#endif
44}
20 45
21#define MCOUNT_INSN_SIZE 18 46static inline int is_ftrace_nop(struct ftrace_insn *insn)
47{
48#ifdef CONFIG_FUNCTION_TRACER
49 if (insn->disp == MCOUNT_INSN_SIZE / 2)
50 return 1;
51#endif
52 return 0;
53}
22 54
23#define ARCH_SUPPORTS_FTRACE_OPS 1 55static inline void ftrace_generate_call_insn(struct ftrace_insn *insn,
56 unsigned long ip)
57{
58#ifdef CONFIG_FUNCTION_TRACER
59 unsigned long target;
60
61 /* brasl r0,ftrace_caller */
62 target = is_module_addr((void *) ip) ? ftrace_plt : FTRACE_ADDR;
63 insn->opc = 0xc005;
64 insn->disp = (target - ip) / 2;
65#endif
66}
24 67
68#endif /* __ASSEMBLY__ */
25#endif /* _ASM_S390_FTRACE_H */ 69#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
index 6af037f574b8..113cd963dbbe 100644
--- a/arch/s390/include/asm/idle.h
+++ b/arch/s390/include/asm/idle.h
@@ -9,9 +9,10 @@
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/device.h> 11#include <linux/device.h>
12#include <linux/seqlock.h>
12 13
13struct s390_idle_data { 14struct s390_idle_data {
14 unsigned int sequence; 15 seqcount_t seqcount;
15 unsigned long long idle_count; 16 unsigned long long idle_count;
16 unsigned long long idle_time; 17 unsigned long long idle_time;
17 unsigned long long clock_idle_enter; 18 unsigned long long clock_idle_enter;
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index cd6b9ee7b69c..30fd5c84680e 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -13,9 +13,10 @@
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/pci_io.h> 14#include <asm/pci_io.h>
15 15
16void *xlate_dev_mem_ptr(unsigned long phys);
17#define xlate_dev_mem_ptr xlate_dev_mem_ptr 16#define xlate_dev_mem_ptr xlate_dev_mem_ptr
18void unxlate_dev_mem_ptr(unsigned long phys, void *addr); 17void *xlate_dev_mem_ptr(phys_addr_t phys);
18#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
19void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
19 20
20/* 21/*
21 * Convert a virtual cached pointer to an uncached pointer 22 * Convert a virtual cached pointer to an uncached pointer
@@ -38,6 +39,15 @@ static inline void iounmap(volatile void __iomem *addr)
38{ 39{
39} 40}
40 41
42static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
43{
44 return NULL;
45}
46
47static inline void ioport_unmap(void __iomem *p)
48{
49}
50
41/* 51/*
42 * s390 needs a private implementation of pci_iomap since ioremap with its 52 * s390 needs a private implementation of pci_iomap since ioremap with its
43 * offset parameter isn't sufficient. That's because BAR spaces are not 53 * offset parameter isn't sufficient. That's because BAR spaces are not
@@ -60,11 +70,6 @@ static inline void iounmap(volatile void __iomem *addr)
60#define __raw_writel zpci_write_u32 70#define __raw_writel zpci_write_u32
61#define __raw_writeq zpci_write_u64 71#define __raw_writeq zpci_write_u64
62 72
63#define readb_relaxed readb
64#define readw_relaxed readw
65#define readl_relaxed readl
66#define readq_relaxed readq
67
68#endif /* CONFIG_PCI */ 73#endif /* CONFIG_PCI */
69 74
70#include <asm-generic/io.h> 75#include <asm-generic/io.h>
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index b0d5f0a97a01..343ea7c987aa 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -1,11 +1,11 @@
1#ifndef _ASM_IRQ_H 1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H 2#define _ASM_IRQ_H
3 3
4#define EXT_INTERRUPT 1 4#define EXT_INTERRUPT 0
5#define IO_INTERRUPT 2 5#define IO_INTERRUPT 1
6#define THIN_INTERRUPT 3 6#define THIN_INTERRUPT 2
7 7
8#define NR_IRQS_BASE 4 8#define NR_IRQS_BASE 3
9 9
10#ifdef CONFIG_PCI_NR_MSI 10#ifdef CONFIG_PCI_NR_MSI
11# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI) 11# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI)
@@ -13,9 +13,6 @@
13# define NR_IRQS NR_IRQS_BASE 13# define NR_IRQS NR_IRQS_BASE
14#endif 14#endif
15 15
16/* This number is used when no interrupt has been assigned */
17#define NO_IRQ 0
18
19/* External interruption codes */ 16/* External interruption codes */
20#define EXT_IRQ_INTERRUPT_KEY 0x0040 17#define EXT_IRQ_INTERRUPT_KEY 0x0040
21#define EXT_IRQ_CLK_COMP 0x1004 18#define EXT_IRQ_CLK_COMP 0x1004
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 98629173ce3b..b47ad3b642cc 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -60,6 +60,7 @@ typedef u16 kprobe_opcode_t;
60struct arch_specific_insn { 60struct arch_specific_insn {
61 /* copy of original instruction */ 61 /* copy of original instruction */
62 kprobe_opcode_t *insn; 62 kprobe_opcode_t *insn;
63 unsigned int is_ftrace_insn : 1;
63}; 64};
64 65
65struct prev_kprobe { 66struct prev_kprobe {
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 6cc51fe84410..34fbcac61133 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -147,7 +147,7 @@ struct _lowcore {
147 __u32 softirq_pending; /* 0x02ec */ 147 __u32 softirq_pending; /* 0x02ec */
148 __u32 percpu_offset; /* 0x02f0 */ 148 __u32 percpu_offset; /* 0x02f0 */
149 __u32 machine_flags; /* 0x02f4 */ 149 __u32 machine_flags; /* 0x02f4 */
150 __u32 ftrace_func; /* 0x02f8 */ 150 __u8 pad_0x02f8[0x02fc-0x02f8]; /* 0x02f8 */
151 __u32 spinlock_lockval; /* 0x02fc */ 151 __u32 spinlock_lockval; /* 0x02fc */
152 152
153 __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */ 153 __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */
@@ -297,7 +297,7 @@ struct _lowcore {
297 __u64 percpu_offset; /* 0x0378 */ 297 __u64 percpu_offset; /* 0x0378 */
298 __u64 vdso_per_cpu_data; /* 0x0380 */ 298 __u64 vdso_per_cpu_data; /* 0x0380 */
299 __u64 machine_flags; /* 0x0388 */ 299 __u64 machine_flags; /* 0x0388 */
300 __u64 ftrace_func; /* 0x0390 */ 300 __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
301 __u64 gmap; /* 0x0398 */ 301 __u64 gmap; /* 0x0398 */
302 __u32 spinlock_lockval; /* 0x03a0 */ 302 __u32 spinlock_lockval; /* 0x03a0 */
303 __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */ 303 __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 3815bfea1b2d..f49b71954654 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -120,4 +120,15 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
120{ 120{
121} 121}
122 122
123static inline void arch_unmap(struct mm_struct *mm,
124 struct vm_area_struct *vma,
125 unsigned long start, unsigned long end)
126{
127}
128
129static inline void arch_bprm_mm_init(struct mm_struct *mm,
130 struct vm_area_struct *vma)
131{
132}
133
123#endif /* __S390_MMU_CONTEXT_H */ 134#endif /* __S390_MMU_CONTEXT_H */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index c030900320e0..ef803c202d42 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -50,10 +50,6 @@ struct zpci_fmb {
50 atomic64_t unmapped_pages; 50 atomic64_t unmapped_pages;
51} __packed __aligned(16); 51} __packed __aligned(16);
52 52
53#define ZPCI_MSI_VEC_BITS 11
54#define ZPCI_MSI_VEC_MAX (1 << ZPCI_MSI_VEC_BITS)
55#define ZPCI_MSI_VEC_MASK (ZPCI_MSI_VEC_MAX - 1)
56
57enum zpci_state { 53enum zpci_state {
58 ZPCI_FN_STATE_RESERVED, 54 ZPCI_FN_STATE_RESERVED,
59 ZPCI_FN_STATE_STANDBY, 55 ZPCI_FN_STATE_STANDBY,
@@ -90,6 +86,7 @@ struct zpci_dev {
90 86
91 /* IRQ stuff */ 87 /* IRQ stuff */
92 u64 msi_addr; /* MSI address */ 88 u64 msi_addr; /* MSI address */
89 unsigned int max_msi; /* maximum number of MSI's */
93 struct airq_iv *aibv; /* adapter interrupt bit vector */ 90 struct airq_iv *aibv; /* adapter interrupt bit vector */
94 unsigned int aisb; /* number of the summary bit */ 91 unsigned int aisb; /* number of the summary bit */
95 92
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index d194d544d694..f664e96f48c7 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -139,7 +139,8 @@ static inline int zpci_memcpy_fromio(void *dst,
139 int size, rc = 0; 139 int size, rc = 0;
140 140
141 while (n > 0) { 141 while (n > 0) {
142 size = zpci_get_max_write_size((u64) src, (u64) dst, n, 8); 142 size = zpci_get_max_write_size((u64 __force) src,
143 (u64) dst, n, 8);
143 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); 144 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
144 rc = zpci_read_single(req, dst, offset, size); 145 rc = zpci_read_single(req, dst, offset, size);
145 if (rc) 146 if (rc)
@@ -162,7 +163,8 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
162 return -EINVAL; 163 return -EINVAL;
163 164
164 while (n > 0) { 165 while (n > 0) {
165 size = zpci_get_max_write_size((u64) dst, (u64) src, n, 128); 166 size = zpci_get_max_write_size((u64 __force) dst,
167 (u64) src, n, 128);
166 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); 168 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
167 169
168 if (size > 8) /* main path */ 170 if (size > 8) /* main path */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index d39a31c3cdf2..e510b9460efa 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -22,8 +22,6 @@ unsigned long *page_table_alloc(struct mm_struct *);
22void page_table_free(struct mm_struct *, unsigned long *); 22void page_table_free(struct mm_struct *, unsigned long *);
23void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long); 23void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
24 24
25void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long,
26 bool init_skey);
27int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 25int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
28 unsigned long key, bool nq); 26 unsigned long key, bool nq);
29 27
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 57c882761dea..5e102422c9ab 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -133,6 +133,18 @@ extern unsigned long MODULES_END;
133#define MODULES_LEN (1UL << 31) 133#define MODULES_LEN (1UL << 31)
134#endif 134#endif
135 135
136static inline int is_module_addr(void *addr)
137{
138#ifdef CONFIG_64BIT
139 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
140 if (addr < (void *)MODULES_VADDR)
141 return 0;
142 if (addr > (void *)MODULES_END)
143 return 0;
144#endif
145 return 1;
146}
147
136/* 148/*
137 * A 31 bit pagetable entry of S390 has following format: 149 * A 31 bit pagetable entry of S390 has following format:
138 * | PFRA | | OS | 150 * | PFRA | | OS |
@@ -479,6 +491,11 @@ static inline int mm_has_pgste(struct mm_struct *mm)
479 return 0; 491 return 0;
480} 492}
481 493
494/*
495 * In the case that a guest uses storage keys
496 * faults should no longer be backed by zero pages
497 */
498#define mm_forbids_zeropage mm_use_skey
482static inline int mm_use_skey(struct mm_struct *mm) 499static inline int mm_use_skey(struct mm_struct *mm)
483{ 500{
484#ifdef CONFIG_PGSTE 501#ifdef CONFIG_PGSTE
@@ -1634,6 +1651,19 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1634 return pmd; 1651 return pmd;
1635} 1652}
1636 1653
1654#define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
1655static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
1656 unsigned long address,
1657 pmd_t *pmdp, int full)
1658{
1659 pmd_t pmd = *pmdp;
1660
1661 if (!full)
1662 pmdp_flush_lazy(mm, address, pmdp);
1663 pmd_clear(pmdp);
1664 return pmd;
1665}
1666
1637#define __HAVE_ARCH_PMDP_CLEAR_FLUSH 1667#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1638static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, 1668static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1639 unsigned long address, pmd_t *pmdp) 1669 unsigned long address, pmd_t *pmdp)
@@ -1746,7 +1776,8 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1746extern int vmem_add_mapping(unsigned long start, unsigned long size); 1776extern int vmem_add_mapping(unsigned long start, unsigned long size);
1747extern int vmem_remove_mapping(unsigned long start, unsigned long size); 1777extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1748extern int s390_enable_sie(void); 1778extern int s390_enable_sie(void);
1749extern void s390_enable_skey(void); 1779extern int s390_enable_skey(void);
1780extern void s390_reset_cmma(struct mm_struct *mm);
1750 1781
1751/* 1782/*
1752 * No page table caches to initialise 1783 * No page table caches to initialise
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index d559bdb03d18..bed05ea7ec27 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -217,8 +217,6 @@ static inline unsigned short stap(void)
217 */ 217 */
218static inline void cpu_relax(void) 218static inline void cpu_relax(void)
219{ 219{
220 if (MACHINE_HAS_DIAG44)
221 asm volatile("diag 0,0,68");
222 barrier(); 220 barrier();
223} 221}
224 222
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index d6bdf906caa5..0e37cd041241 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -18,14 +18,7 @@ extern int spin_retry;
18static inline int 18static inline int
19_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) 19_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
20{ 20{
21 unsigned int old_expected = old; 21 return __sync_bool_compare_and_swap(lock, old, new);
22
23 asm volatile(
24 " cs %0,%3,%1"
25 : "=d" (old), "=Q" (*lock)
26 : "0" (old), "d" (new), "Q" (*lock)
27 : "cc", "memory" );
28 return old == old_expected;
29} 22}
30 23
31/* 24/*
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 572c59949004..06d8741ad6f4 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -121,6 +121,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
121#ifdef CONFIG_64BIT 121#ifdef CONFIG_64BIT
122 if (tlb->mm->context.asce_limit <= (1UL << 31)) 122 if (tlb->mm->context.asce_limit <= (1UL << 31))
123 return; 123 return;
124 pgtable_pmd_page_dtor(virt_to_page(pmd));
124 tlb_remove_table(tlb, pmd); 125 tlb_remove_table(tlb, pmd);
125#endif 126#endif
126} 127}
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index e031332096d7..296942d56e6a 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -86,4 +86,9 @@
86 86
87#define SO_BPF_EXTENSIONS 48 87#define SO_BPF_EXTENSIONS 48
88 88
89#define SO_INCOMING_CPU 49
90
91#define SO_ATTACH_BPF 50
92#define SO_DETACH_BPF SO_DETACH_FILTER
93
89#endif /* _ASM_SOCKET_H */ 94#endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 4197c89c52d4..2b446cf0cc65 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -287,7 +287,9 @@
287#define __NR_getrandom 349 287#define __NR_getrandom 349
288#define __NR_memfd_create 350 288#define __NR_memfd_create 350
289#define __NR_bpf 351 289#define __NR_bpf 351
290#define NR_syscalls 352 290#define __NR_s390_pci_mmio_write 352
291#define __NR_s390_pci_mmio_read 353
292#define NR_syscalls 354
291 293
292/* 294/*
293 * There are some system calls that are not present on 64 bit, some 295 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index ef279a136801..e07e91605353 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -17,8 +17,8 @@
17 * Make sure that the compiler is new enough. We want a compiler that 17 * Make sure that the compiler is new enough. We want a compiler that
18 * is known to work with the "Q" assembler constraint. 18 * is known to work with the "Q" assembler constraint.
19 */ 19 */
20#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) 20#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3)
21#error Your compiler is too old; please use version 3.3.3 or newer 21#error Your compiler is too old; please use version 4.3 or newer
22#endif 22#endif
23 23
24int main(void) 24int main(void)
@@ -156,7 +156,6 @@ int main(void)
156 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); 156 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
157 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); 157 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
158 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); 158 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
159 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
160 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); 159 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
161 BLANK(); 160 BLANK();
162 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); 161 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 009f5eb11125..34d5fa7b01b5 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -434,7 +434,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
434 ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE; 434 ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
435 } else { 435 } else {
436 /* Signal frames without vectors registers are short ! */ 436 /* Signal frames without vectors registers are short ! */
437 __u16 __user *svc = (void *) frame + frame_size - 2; 437 __u16 __user *svc = (void __user *) frame + frame_size - 2;
438 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc)) 438 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
439 return -EFAULT; 439 return -EFAULT;
440 restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE; 440 restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index c4f7a3d655b8..d7fa2f0f1425 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -218,3 +218,5 @@ COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, const char
218COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags) 218COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags)
219COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags) 219COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags)
220COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size); 220COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size);
221COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length);
222COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length);
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index ee8390da6ea7..c1f21aca76e7 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1019,7 +1019,7 @@ debug_count_numargs(char *string)
1019 */ 1019 */
1020 1020
1021debug_entry_t* 1021debug_entry_t*
1022debug_sprintf_event(debug_info_t* id, int level,char *string,...) 1022__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
1023{ 1023{
1024 va_list ap; 1024 va_list ap;
1025 int numargs,idx; 1025 int numargs,idx;
@@ -1027,8 +1027,6 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...)
1027 debug_sprintf_entry_t *curr_event; 1027 debug_sprintf_entry_t *curr_event;
1028 debug_entry_t *active; 1028 debug_entry_t *active;
1029 1029
1030 if((!id) || (level > id->level))
1031 return NULL;
1032 if (!debug_active || !id->areas) 1030 if (!debug_active || !id->areas)
1033 return NULL; 1031 return NULL;
1034 numargs=debug_count_numargs(string); 1032 numargs=debug_count_numargs(string);
@@ -1050,14 +1048,14 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...)
1050 1048
1051 return active; 1049 return active;
1052} 1050}
1053EXPORT_SYMBOL(debug_sprintf_event); 1051EXPORT_SYMBOL(__debug_sprintf_event);
1054 1052
1055/* 1053/*
1056 * debug_sprintf_exception: 1054 * debug_sprintf_exception:
1057 */ 1055 */
1058 1056
1059debug_entry_t* 1057debug_entry_t*
1060debug_sprintf_exception(debug_info_t* id, int level,char *string,...) 1058__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
1061{ 1059{
1062 va_list ap; 1060 va_list ap;
1063 int numargs,idx; 1061 int numargs,idx;
@@ -1065,8 +1063,6 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
1065 debug_sprintf_entry_t *curr_event; 1063 debug_sprintf_entry_t *curr_event;
1066 debug_entry_t *active; 1064 debug_entry_t *active;
1067 1065
1068 if((!id) || (level > id->level))
1069 return NULL;
1070 if (!debug_active || !id->areas) 1066 if (!debug_active || !id->areas)
1071 return NULL; 1067 return NULL;
1072 1068
@@ -1089,7 +1085,7 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
1089 1085
1090 return active; 1086 return active;
1091} 1087}
1092EXPORT_SYMBOL(debug_sprintf_exception); 1088EXPORT_SYMBOL(__debug_sprintf_exception);
1093 1089
1094/* 1090/*
1095 * debug_register_view: 1091 * debug_register_view:
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index acb412442e5e..a99852e96a77 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -191,7 +191,8 @@ void die(struct pt_regs *regs, const char *str)
191 console_verbose(); 191 console_verbose();
192 spin_lock_irq(&die_lock); 192 spin_lock_irq(&die_lock);
193 bust_spinlocks(1); 193 bust_spinlocks(1);
194 printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); 194 printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
195 regs->int_code >> 17, ++die_counter);
195#ifdef CONFIG_PREEMPT 196#ifdef CONFIG_PREEMPT
196 printk("PREEMPT "); 197 printk("PREEMPT ");
197#endif 198#endif
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index cef2879edff3..302ac1f7f8e7 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -12,7 +12,6 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/ctype.h> 14#include <linux/ctype.h>
15#include <linux/ftrace.h>
16#include <linux/lockdep.h> 15#include <linux/lockdep.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/pfn.h> 17#include <linux/pfn.h>
@@ -490,8 +489,5 @@ void __init startup_init(void)
490 detect_machine_facilities(); 489 detect_machine_facilities();
491 setup_topology(); 490 setup_topology();
492 sclp_early_detect(); 491 sclp_early_detect();
493#ifdef CONFIG_DYNAMIC_FTRACE
494 S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
495#endif
496 lockdep_on(); 492 lockdep_on();
497} 493}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 70203265196f..398329b2b518 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -53,7 +53,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
53 .macro TRACE_IRQS_ON 53 .macro TRACE_IRQS_ON
54#ifdef CONFIG_TRACE_IRQFLAGS 54#ifdef CONFIG_TRACE_IRQFLAGS
55 basr %r2,%r0 55 basr %r2,%r0
56 l %r1,BASED(.Lhardirqs_on) 56 l %r1,BASED(.Lc_hardirqs_on)
57 basr %r14,%r1 # call trace_hardirqs_on_caller 57 basr %r14,%r1 # call trace_hardirqs_on_caller
58#endif 58#endif
59 .endm 59 .endm
@@ -61,7 +61,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
61 .macro TRACE_IRQS_OFF 61 .macro TRACE_IRQS_OFF
62#ifdef CONFIG_TRACE_IRQFLAGS 62#ifdef CONFIG_TRACE_IRQFLAGS
63 basr %r2,%r0 63 basr %r2,%r0
64 l %r1,BASED(.Lhardirqs_off) 64 l %r1,BASED(.Lc_hardirqs_off)
65 basr %r14,%r1 # call trace_hardirqs_off_caller 65 basr %r14,%r1 # call trace_hardirqs_off_caller
66#endif 66#endif
67 .endm 67 .endm
@@ -70,7 +70,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
70#ifdef CONFIG_LOCKDEP 70#ifdef CONFIG_LOCKDEP
71 tm __PT_PSW+1(%r11),0x01 # returning to user ? 71 tm __PT_PSW+1(%r11),0x01 # returning to user ?
72 jz .+10 72 jz .+10
73 l %r1,BASED(.Llockdep_sys_exit) 73 l %r1,BASED(.Lc_lockdep_sys_exit)
74 basr %r14,%r1 # call lockdep_sys_exit 74 basr %r14,%r1 # call lockdep_sys_exit
75#endif 75#endif
76 .endm 76 .endm
@@ -87,8 +87,8 @@ _PIF_WORK = (_PIF_PER_TRAP)
87 tmh %r8,0x0001 # interrupting from user ? 87 tmh %r8,0x0001 # interrupting from user ?
88 jnz 1f 88 jnz 1f
89 lr %r14,%r9 89 lr %r14,%r9
90 sl %r14,BASED(.Lcritical_start) 90 sl %r14,BASED(.Lc_critical_start)
91 cl %r14,BASED(.Lcritical_length) 91 cl %r14,BASED(.Lc_critical_length)
92 jhe 0f 92 jhe 0f
93 la %r11,\savearea # inside critical section, do cleanup 93 la %r11,\savearea # inside critical section, do cleanup
94 bras %r14,cleanup_critical 94 bras %r14,cleanup_critical
@@ -162,7 +162,7 @@ ENTRY(__switch_to)
162 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 162 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
163 br %r14 163 br %r14
164 164
165__critical_start: 165.L__critical_start:
166/* 166/*
167 * SVC interrupt handler routine. System calls are synchronous events and 167 * SVC interrupt handler routine. System calls are synchronous events and
168 * are executed with interrupts enabled. 168 * are executed with interrupts enabled.
@@ -170,145 +170,145 @@ __critical_start:
170 170
171ENTRY(system_call) 171ENTRY(system_call)
172 stpt __LC_SYNC_ENTER_TIMER 172 stpt __LC_SYNC_ENTER_TIMER
173sysc_stm: 173.Lsysc_stm:
174 stm %r8,%r15,__LC_SAVE_AREA_SYNC 174 stm %r8,%r15,__LC_SAVE_AREA_SYNC
175 l %r12,__LC_THREAD_INFO 175 l %r12,__LC_THREAD_INFO
176 l %r13,__LC_SVC_NEW_PSW+4 176 l %r13,__LC_SVC_NEW_PSW+4
177 lhi %r14,_PIF_SYSCALL 177 lhi %r14,_PIF_SYSCALL
178sysc_per: 178.Lsysc_per:
179 l %r15,__LC_KERNEL_STACK 179 l %r15,__LC_KERNEL_STACK
180 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 180 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
181sysc_vtime: 181.Lsysc_vtime:
182 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 182 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
183 stm %r0,%r7,__PT_R0(%r11) 183 stm %r0,%r7,__PT_R0(%r11)
184 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC 184 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
185 mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW 185 mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW
186 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 186 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
187 st %r14,__PT_FLAGS(%r11) 187 st %r14,__PT_FLAGS(%r11)
188sysc_do_svc: 188.Lsysc_do_svc:
189 l %r10,__TI_sysc_table(%r12) # 31 bit system call table 189 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
190 lh %r8,__PT_INT_CODE+2(%r11) 190 lh %r8,__PT_INT_CODE+2(%r11)
191 sla %r8,2 # shift and test for svc0 191 sla %r8,2 # shift and test for svc0
192 jnz sysc_nr_ok 192 jnz .Lsysc_nr_ok
193 # svc 0: system call number in %r1 193 # svc 0: system call number in %r1
194 cl %r1,BASED(.Lnr_syscalls) 194 cl %r1,BASED(.Lnr_syscalls)
195 jnl sysc_nr_ok 195 jnl .Lsysc_nr_ok
196 sth %r1,__PT_INT_CODE+2(%r11) 196 sth %r1,__PT_INT_CODE+2(%r11)
197 lr %r8,%r1 197 lr %r8,%r1
198 sla %r8,2 198 sla %r8,2
199sysc_nr_ok: 199.Lsysc_nr_ok:
200 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 200 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
201 st %r2,__PT_ORIG_GPR2(%r11) 201 st %r2,__PT_ORIG_GPR2(%r11)
202 st %r7,STACK_FRAME_OVERHEAD(%r15) 202 st %r7,STACK_FRAME_OVERHEAD(%r15)
203 l %r9,0(%r8,%r10) # get system call addr. 203 l %r9,0(%r8,%r10) # get system call addr.
204 tm __TI_flags+3(%r12),_TIF_TRACE 204 tm __TI_flags+3(%r12),_TIF_TRACE
205 jnz sysc_tracesys 205 jnz .Lsysc_tracesys
206 basr %r14,%r9 # call sys_xxxx 206 basr %r14,%r9 # call sys_xxxx
207 st %r2,__PT_R2(%r11) # store return value 207 st %r2,__PT_R2(%r11) # store return value
208 208
209sysc_return: 209.Lsysc_return:
210 LOCKDEP_SYS_EXIT 210 LOCKDEP_SYS_EXIT
211sysc_tif: 211.Lsysc_tif:
212 tm __PT_PSW+1(%r11),0x01 # returning to user ? 212 tm __PT_PSW+1(%r11),0x01 # returning to user ?
213 jno sysc_restore 213 jno .Lsysc_restore
214 tm __PT_FLAGS+3(%r11),_PIF_WORK 214 tm __PT_FLAGS+3(%r11),_PIF_WORK
215 jnz sysc_work 215 jnz .Lsysc_work
216 tm __TI_flags+3(%r12),_TIF_WORK 216 tm __TI_flags+3(%r12),_TIF_WORK
217 jnz sysc_work # check for thread work 217 jnz .Lsysc_work # check for thread work
218 tm __LC_CPU_FLAGS+3,_CIF_WORK 218 tm __LC_CPU_FLAGS+3,_CIF_WORK
219 jnz sysc_work 219 jnz .Lsysc_work
220sysc_restore: 220.Lsysc_restore:
221 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 221 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
222 stpt __LC_EXIT_TIMER 222 stpt __LC_EXIT_TIMER
223 lm %r0,%r15,__PT_R0(%r11) 223 lm %r0,%r15,__PT_R0(%r11)
224 lpsw __LC_RETURN_PSW 224 lpsw __LC_RETURN_PSW
225sysc_done: 225.Lsysc_done:
226 226
227# 227#
228# One of the work bits is on. Find out which one. 228# One of the work bits is on. Find out which one.
229# 229#
230sysc_work: 230.Lsysc_work:
231 tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING 231 tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
232 jo sysc_mcck_pending 232 jo .Lsysc_mcck_pending
233 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 233 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
234 jo sysc_reschedule 234 jo .Lsysc_reschedule
235 tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP 235 tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP
236 jo sysc_singlestep 236 jo .Lsysc_singlestep
237 tm __TI_flags+3(%r12),_TIF_SIGPENDING 237 tm __TI_flags+3(%r12),_TIF_SIGPENDING
238 jo sysc_sigpending 238 jo .Lsysc_sigpending
239 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 239 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
240 jo sysc_notify_resume 240 jo .Lsysc_notify_resume
241 tm __LC_CPU_FLAGS+3,_CIF_ASCE 241 tm __LC_CPU_FLAGS+3,_CIF_ASCE
242 jo sysc_uaccess 242 jo .Lsysc_uaccess
243 j sysc_return # beware of critical section cleanup 243 j .Lsysc_return # beware of critical section cleanup
244 244
245# 245#
246# _TIF_NEED_RESCHED is set, call schedule 246# _TIF_NEED_RESCHED is set, call schedule
247# 247#
248sysc_reschedule: 248.Lsysc_reschedule:
249 l %r1,BASED(.Lschedule) 249 l %r1,BASED(.Lc_schedule)
250 la %r14,BASED(sysc_return) 250 la %r14,BASED(.Lsysc_return)
251 br %r1 # call schedule 251 br %r1 # call schedule
252 252
253# 253#
254# _CIF_MCCK_PENDING is set, call handler 254# _CIF_MCCK_PENDING is set, call handler
255# 255#
256sysc_mcck_pending: 256.Lsysc_mcck_pending:
257 l %r1,BASED(.Lhandle_mcck) 257 l %r1,BASED(.Lc_handle_mcck)
258 la %r14,BASED(sysc_return) 258 la %r14,BASED(.Lsysc_return)
259 br %r1 # TIF bit will be cleared by handler 259 br %r1 # TIF bit will be cleared by handler
260 260
261# 261#
262# _CIF_ASCE is set, load user space asce 262# _CIF_ASCE is set, load user space asce
263# 263#
264sysc_uaccess: 264.Lsysc_uaccess:
265 ni __LC_CPU_FLAGS+3,255-_CIF_ASCE 265 ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
266 lctl %c1,%c1,__LC_USER_ASCE # load primary asce 266 lctl %c1,%c1,__LC_USER_ASCE # load primary asce
267 j sysc_return 267 j .Lsysc_return
268 268
269# 269#
270# _TIF_SIGPENDING is set, call do_signal 270# _TIF_SIGPENDING is set, call do_signal
271# 271#
272sysc_sigpending: 272.Lsysc_sigpending:
273 lr %r2,%r11 # pass pointer to pt_regs 273 lr %r2,%r11 # pass pointer to pt_regs
274 l %r1,BASED(.Ldo_signal) 274 l %r1,BASED(.Lc_do_signal)
275 basr %r14,%r1 # call do_signal 275 basr %r14,%r1 # call do_signal
276 tm __PT_FLAGS+3(%r11),_PIF_SYSCALL 276 tm __PT_FLAGS+3(%r11),_PIF_SYSCALL
277 jno sysc_return 277 jno .Lsysc_return
278 lm %r2,%r7,__PT_R2(%r11) # load svc arguments 278 lm %r2,%r7,__PT_R2(%r11) # load svc arguments
279 l %r10,__TI_sysc_table(%r12) # 31 bit system call table 279 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
280 xr %r8,%r8 # svc 0 returns -ENOSYS 280 xr %r8,%r8 # svc 0 returns -ENOSYS
281 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) 281 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
282 jnl sysc_nr_ok # invalid svc number -> do svc 0 282 jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
283 lh %r8,__PT_INT_CODE+2(%r11) # load new svc number 283 lh %r8,__PT_INT_CODE+2(%r11) # load new svc number
284 sla %r8,2 284 sla %r8,2
285 j sysc_nr_ok # restart svc 285 j .Lsysc_nr_ok # restart svc
286 286
287# 287#
288# _TIF_NOTIFY_RESUME is set, call do_notify_resume 288# _TIF_NOTIFY_RESUME is set, call do_notify_resume
289# 289#
290sysc_notify_resume: 290.Lsysc_notify_resume:
291 lr %r2,%r11 # pass pointer to pt_regs 291 lr %r2,%r11 # pass pointer to pt_regs
292 l %r1,BASED(.Ldo_notify_resume) 292 l %r1,BASED(.Lc_do_notify_resume)
293 la %r14,BASED(sysc_return) 293 la %r14,BASED(.Lsysc_return)
294 br %r1 # call do_notify_resume 294 br %r1 # call do_notify_resume
295 295
296# 296#
297# _PIF_PER_TRAP is set, call do_per_trap 297# _PIF_PER_TRAP is set, call do_per_trap
298# 298#
299sysc_singlestep: 299.Lsysc_singlestep:
300 ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP 300 ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP
301 lr %r2,%r11 # pass pointer to pt_regs 301 lr %r2,%r11 # pass pointer to pt_regs
302 l %r1,BASED(.Ldo_per_trap) 302 l %r1,BASED(.Lc_do_per_trap)
303 la %r14,BASED(sysc_return) 303 la %r14,BASED(.Lsysc_return)
304 br %r1 # call do_per_trap 304 br %r1 # call do_per_trap
305 305
306# 306#
307# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 307# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
308# and after the system call 308# and after the system call
309# 309#
310sysc_tracesys: 310.Lsysc_tracesys:
311 l %r1,BASED(.Ltrace_enter) 311 l %r1,BASED(.Lc_trace_enter)
312 lr %r2,%r11 # pass pointer to pt_regs 312 lr %r2,%r11 # pass pointer to pt_regs
313 la %r3,0 313 la %r3,0
314 xr %r0,%r0 314 xr %r0,%r0
@@ -316,22 +316,22 @@ sysc_tracesys:
316 st %r0,__PT_R2(%r11) 316 st %r0,__PT_R2(%r11)
317 basr %r14,%r1 # call do_syscall_trace_enter 317 basr %r14,%r1 # call do_syscall_trace_enter
318 cl %r2,BASED(.Lnr_syscalls) 318 cl %r2,BASED(.Lnr_syscalls)
319 jnl sysc_tracenogo 319 jnl .Lsysc_tracenogo
320 lr %r8,%r2 320 lr %r8,%r2
321 sll %r8,2 321 sll %r8,2
322 l %r9,0(%r8,%r10) 322 l %r9,0(%r8,%r10)
323sysc_tracego: 323.Lsysc_tracego:
324 lm %r3,%r7,__PT_R3(%r11) 324 lm %r3,%r7,__PT_R3(%r11)
325 st %r7,STACK_FRAME_OVERHEAD(%r15) 325 st %r7,STACK_FRAME_OVERHEAD(%r15)
326 l %r2,__PT_ORIG_GPR2(%r11) 326 l %r2,__PT_ORIG_GPR2(%r11)
327 basr %r14,%r9 # call sys_xxx 327 basr %r14,%r9 # call sys_xxx
328 st %r2,__PT_R2(%r11) # store return value 328 st %r2,__PT_R2(%r11) # store return value
329sysc_tracenogo: 329.Lsysc_tracenogo:
330 tm __TI_flags+3(%r12),_TIF_TRACE 330 tm __TI_flags+3(%r12),_TIF_TRACE
331 jz sysc_return 331 jz .Lsysc_return
332 l %r1,BASED(.Ltrace_exit) 332 l %r1,BASED(.Lc_trace_exit)
333 lr %r2,%r11 # pass pointer to pt_regs 333 lr %r2,%r11 # pass pointer to pt_regs
334 la %r14,BASED(sysc_return) 334 la %r14,BASED(.Lsysc_return)
335 br %r1 # call do_syscall_trace_exit 335 br %r1 # call do_syscall_trace_exit
336 336
337# 337#
@@ -341,18 +341,18 @@ ENTRY(ret_from_fork)
341 la %r11,STACK_FRAME_OVERHEAD(%r15) 341 la %r11,STACK_FRAME_OVERHEAD(%r15)
342 l %r12,__LC_THREAD_INFO 342 l %r12,__LC_THREAD_INFO
343 l %r13,__LC_SVC_NEW_PSW+4 343 l %r13,__LC_SVC_NEW_PSW+4
344 l %r1,BASED(.Lschedule_tail) 344 l %r1,BASED(.Lc_schedule_tail)
345 basr %r14,%r1 # call schedule_tail 345 basr %r14,%r1 # call schedule_tail
346 TRACE_IRQS_ON 346 TRACE_IRQS_ON
347 ssm __LC_SVC_NEW_PSW # reenable interrupts 347 ssm __LC_SVC_NEW_PSW # reenable interrupts
348 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 348 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
349 jne sysc_tracenogo 349 jne .Lsysc_tracenogo
350 # it's a kernel thread 350 # it's a kernel thread
351 lm %r9,%r10,__PT_R9(%r11) # load gprs 351 lm %r9,%r10,__PT_R9(%r11) # load gprs
352ENTRY(kernel_thread_starter) 352ENTRY(kernel_thread_starter)
353 la %r2,0(%r10) 353 la %r2,0(%r10)
354 basr %r14,%r9 354 basr %r14,%r9
355 j sysc_tracenogo 355 j .Lsysc_tracenogo
356 356
357/* 357/*
358 * Program check handler routine 358 * Program check handler routine
@@ -369,7 +369,7 @@ ENTRY(pgm_check_handler)
369 tmh %r8,0x4000 # PER bit set in old PSW ? 369 tmh %r8,0x4000 # PER bit set in old PSW ?
370 jnz 0f # -> enabled, can't be a double fault 370 jnz 0f # -> enabled, can't be a double fault
371 tm __LC_PGM_ILC+3,0x80 # check for per exception 371 tm __LC_PGM_ILC+3,0x80 # check for per exception
372 jnz pgm_svcper # -> single stepped svc 372 jnz .Lpgm_svcper # -> single stepped svc
3730: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 3730: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
374 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 374 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
375 j 2f 375 j 2f
@@ -386,42 +386,42 @@ ENTRY(pgm_check_handler)
386 jz 0f 386 jz 0f
387 l %r1,__TI_task(%r12) 387 l %r1,__TI_task(%r12)
388 tmh %r8,0x0001 # kernel per event ? 388 tmh %r8,0x0001 # kernel per event ?
389 jz pgm_kprobe 389 jz .Lpgm_kprobe
390 oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP 390 oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP
391 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS 391 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
392 mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE 392 mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE
393 mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID 393 mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID
3940: REENABLE_IRQS 3940: REENABLE_IRQS
395 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 395 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
396 l %r1,BASED(.Ljump_table) 396 l %r1,BASED(.Lc_jump_table)
397 la %r10,0x7f 397 la %r10,0x7f
398 n %r10,__PT_INT_CODE(%r11) 398 n %r10,__PT_INT_CODE(%r11)
399 je sysc_return 399 je .Lsysc_return
400 sll %r10,2 400 sll %r10,2
401 l %r1,0(%r10,%r1) # load address of handler routine 401 l %r1,0(%r10,%r1) # load address of handler routine
402 lr %r2,%r11 # pass pointer to pt_regs 402 lr %r2,%r11 # pass pointer to pt_regs
403 basr %r14,%r1 # branch to interrupt-handler 403 basr %r14,%r1 # branch to interrupt-handler
404 j sysc_return 404 j .Lsysc_return
405 405
406# 406#
407# PER event in supervisor state, must be kprobes 407# PER event in supervisor state, must be kprobes
408# 408#
409pgm_kprobe: 409.Lpgm_kprobe:
410 REENABLE_IRQS 410 REENABLE_IRQS
411 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 411 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
412 l %r1,BASED(.Ldo_per_trap) 412 l %r1,BASED(.Lc_do_per_trap)
413 lr %r2,%r11 # pass pointer to pt_regs 413 lr %r2,%r11 # pass pointer to pt_regs
414 basr %r14,%r1 # call do_per_trap 414 basr %r14,%r1 # call do_per_trap
415 j sysc_return 415 j .Lsysc_return
416 416
417# 417#
418# single stepped system call 418# single stepped system call
419# 419#
420pgm_svcper: 420.Lpgm_svcper:
421 mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW 421 mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
422 mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per) 422 mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per)
423 lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP 423 lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
424 lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs 424 lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
425 425
426/* 426/*
427 * IO interrupt handler routine 427 * IO interrupt handler routine
@@ -435,9 +435,9 @@ ENTRY(io_int_handler)
435 l %r13,__LC_SVC_NEW_PSW+4 435 l %r13,__LC_SVC_NEW_PSW+4
436 lm %r8,%r9,__LC_IO_OLD_PSW 436 lm %r8,%r9,__LC_IO_OLD_PSW
437 tmh %r8,0x0001 # interrupting from user ? 437 tmh %r8,0x0001 # interrupting from user ?
438 jz io_skip 438 jz .Lio_skip
439 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER 439 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
440io_skip: 440.Lio_skip:
441 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 441 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
442 stm %r0,%r7,__PT_R0(%r11) 442 stm %r0,%r7,__PT_R0(%r11)
443 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC 443 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
@@ -446,35 +446,35 @@ io_skip:
446 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) 446 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
447 TRACE_IRQS_OFF 447 TRACE_IRQS_OFF
448 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 448 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
449io_loop: 449.Lio_loop:
450 l %r1,BASED(.Ldo_IRQ) 450 l %r1,BASED(.Lc_do_IRQ)
451 lr %r2,%r11 # pass pointer to pt_regs 451 lr %r2,%r11 # pass pointer to pt_regs
452 lhi %r3,IO_INTERRUPT 452 lhi %r3,IO_INTERRUPT
453 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? 453 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
454 jz io_call 454 jz .Lio_call
455 lhi %r3,THIN_INTERRUPT 455 lhi %r3,THIN_INTERRUPT
456io_call: 456.Lio_call:
457 basr %r14,%r1 # call do_IRQ 457 basr %r14,%r1 # call do_IRQ
458 tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR 458 tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR
459 jz io_return 459 jz .Lio_return
460 tpi 0 460 tpi 0
461 jz io_return 461 jz .Lio_return
462 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 462 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
463 j io_loop 463 j .Lio_loop
464io_return: 464.Lio_return:
465 LOCKDEP_SYS_EXIT 465 LOCKDEP_SYS_EXIT
466 TRACE_IRQS_ON 466 TRACE_IRQS_ON
467io_tif: 467.Lio_tif:
468 tm __TI_flags+3(%r12),_TIF_WORK 468 tm __TI_flags+3(%r12),_TIF_WORK
469 jnz io_work # there is work to do (signals etc.) 469 jnz .Lio_work # there is work to do (signals etc.)
470 tm __LC_CPU_FLAGS+3,_CIF_WORK 470 tm __LC_CPU_FLAGS+3,_CIF_WORK
471 jnz io_work 471 jnz .Lio_work
472io_restore: 472.Lio_restore:
473 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 473 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
474 stpt __LC_EXIT_TIMER 474 stpt __LC_EXIT_TIMER
475 lm %r0,%r15,__PT_R0(%r11) 475 lm %r0,%r15,__PT_R0(%r11)
476 lpsw __LC_RETURN_PSW 476 lpsw __LC_RETURN_PSW
477io_done: 477.Lio_done:
478 478
479# 479#
480# There is work todo, find out in which context we have been interrupted: 480# There is work todo, find out in which context we have been interrupted:
@@ -483,15 +483,15 @@ io_done:
483# the preemption counter and if it is zero call preempt_schedule_irq 483# the preemption counter and if it is zero call preempt_schedule_irq
484# Before any work can be done, a switch to the kernel stack is required. 484# Before any work can be done, a switch to the kernel stack is required.
485# 485#
486io_work: 486.Lio_work:
487 tm __PT_PSW+1(%r11),0x01 # returning to user ? 487 tm __PT_PSW+1(%r11),0x01 # returning to user ?
488 jo io_work_user # yes -> do resched & signal 488 jo .Lio_work_user # yes -> do resched & signal
489#ifdef CONFIG_PREEMPT 489#ifdef CONFIG_PREEMPT
490 # check for preemptive scheduling 490 # check for preemptive scheduling
491 icm %r0,15,__TI_precount(%r12) 491 icm %r0,15,__TI_precount(%r12)
492 jnz io_restore # preemption disabled 492 jnz .Lio_restore # preemption disabled
493 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 493 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
494 jno io_restore 494 jno .Lio_restore
495 # switch to kernel stack 495 # switch to kernel stack
496 l %r1,__PT_R15(%r11) 496 l %r1,__PT_R15(%r11)
497 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 497 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
@@ -499,20 +499,20 @@ io_work:
499 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 499 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
500 la %r11,STACK_FRAME_OVERHEAD(%r1) 500 la %r11,STACK_FRAME_OVERHEAD(%r1)
501 lr %r15,%r1 501 lr %r15,%r1
502 # TRACE_IRQS_ON already done at io_return, call 502 # TRACE_IRQS_ON already done at .Lio_return, call
503 # TRACE_IRQS_OFF to keep things symmetrical 503 # TRACE_IRQS_OFF to keep things symmetrical
504 TRACE_IRQS_OFF 504 TRACE_IRQS_OFF
505 l %r1,BASED(.Lpreempt_irq) 505 l %r1,BASED(.Lc_preempt_irq)
506 basr %r14,%r1 # call preempt_schedule_irq 506 basr %r14,%r1 # call preempt_schedule_irq
507 j io_return 507 j .Lio_return
508#else 508#else
509 j io_restore 509 j .Lio_restore
510#endif 510#endif
511 511
512# 512#
513# Need to do work before returning to userspace, switch to kernel stack 513# Need to do work before returning to userspace, switch to kernel stack
514# 514#
515io_work_user: 515.Lio_work_user:
516 l %r1,__LC_KERNEL_STACK 516 l %r1,__LC_KERNEL_STACK
517 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 517 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
518 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 518 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
@@ -522,74 +522,74 @@ io_work_user:
522# 522#
523# One of the work bits is on. Find out which one. 523# One of the work bits is on. Find out which one.
524# 524#
525io_work_tif: 525.Lio_work_tif:
526 tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING 526 tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING
527 jo io_mcck_pending 527 jo .Lio_mcck_pending
528 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 528 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
529 jo io_reschedule 529 jo .Lio_reschedule
530 tm __TI_flags+3(%r12),_TIF_SIGPENDING 530 tm __TI_flags+3(%r12),_TIF_SIGPENDING
531 jo io_sigpending 531 jo .Lio_sigpending
532 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 532 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
533 jo io_notify_resume 533 jo .Lio_notify_resume
534 tm __LC_CPU_FLAGS+3,_CIF_ASCE 534 tm __LC_CPU_FLAGS+3,_CIF_ASCE
535 jo io_uaccess 535 jo .Lio_uaccess
536 j io_return # beware of critical section cleanup 536 j .Lio_return # beware of critical section cleanup
537 537
538# 538#
539# _CIF_MCCK_PENDING is set, call handler 539# _CIF_MCCK_PENDING is set, call handler
540# 540#
541io_mcck_pending: 541.Lio_mcck_pending:
542 # TRACE_IRQS_ON already done at io_return 542 # TRACE_IRQS_ON already done at .Lio_return
543 l %r1,BASED(.Lhandle_mcck) 543 l %r1,BASED(.Lc_handle_mcck)
544 basr %r14,%r1 # TIF bit will be cleared by handler 544 basr %r14,%r1 # TIF bit will be cleared by handler
545 TRACE_IRQS_OFF 545 TRACE_IRQS_OFF
546 j io_return 546 j .Lio_return
547 547
548# 548#
549# _CIF_ASCE is set, load user space asce 549# _CIF_ASCE is set, load user space asce
550# 550#
551io_uaccess: 551.Lio_uaccess:
552 ni __LC_CPU_FLAGS+3,255-_CIF_ASCE 552 ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
553 lctl %c1,%c1,__LC_USER_ASCE # load primary asce 553 lctl %c1,%c1,__LC_USER_ASCE # load primary asce
554 j io_return 554 j .Lio_return
555 555
556# 556#
557# _TIF_NEED_RESCHED is set, call schedule 557# _TIF_NEED_RESCHED is set, call schedule
558# 558#
559io_reschedule: 559.Lio_reschedule:
560 # TRACE_IRQS_ON already done at io_return 560 # TRACE_IRQS_ON already done at .Lio_return
561 l %r1,BASED(.Lschedule) 561 l %r1,BASED(.Lc_schedule)
562 ssm __LC_SVC_NEW_PSW # reenable interrupts 562 ssm __LC_SVC_NEW_PSW # reenable interrupts
563 basr %r14,%r1 # call scheduler 563 basr %r14,%r1 # call scheduler
564 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 564 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
565 TRACE_IRQS_OFF 565 TRACE_IRQS_OFF
566 j io_return 566 j .Lio_return
567 567
568# 568#
569# _TIF_SIGPENDING is set, call do_signal 569# _TIF_SIGPENDING is set, call do_signal
570# 570#
571io_sigpending: 571.Lio_sigpending:
572 # TRACE_IRQS_ON already done at io_return 572 # TRACE_IRQS_ON already done at .Lio_return
573 l %r1,BASED(.Ldo_signal) 573 l %r1,BASED(.Lc_do_signal)
574 ssm __LC_SVC_NEW_PSW # reenable interrupts 574 ssm __LC_SVC_NEW_PSW # reenable interrupts
575 lr %r2,%r11 # pass pointer to pt_regs 575 lr %r2,%r11 # pass pointer to pt_regs
576 basr %r14,%r1 # call do_signal 576 basr %r14,%r1 # call do_signal
577 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 577 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
578 TRACE_IRQS_OFF 578 TRACE_IRQS_OFF
579 j io_return 579 j .Lio_return
580 580
581# 581#
582# _TIF_SIGPENDING is set, call do_signal 582# _TIF_SIGPENDING is set, call do_signal
583# 583#
584io_notify_resume: 584.Lio_notify_resume:
585 # TRACE_IRQS_ON already done at io_return 585 # TRACE_IRQS_ON already done at .Lio_return
586 l %r1,BASED(.Ldo_notify_resume) 586 l %r1,BASED(.Lc_do_notify_resume)
587 ssm __LC_SVC_NEW_PSW # reenable interrupts 587 ssm __LC_SVC_NEW_PSW # reenable interrupts
588 lr %r2,%r11 # pass pointer to pt_regs 588 lr %r2,%r11 # pass pointer to pt_regs
589 basr %r14,%r1 # call do_notify_resume 589 basr %r14,%r1 # call do_notify_resume
590 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 590 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
591 TRACE_IRQS_OFF 591 TRACE_IRQS_OFF
592 j io_return 592 j .Lio_return
593 593
594/* 594/*
595 * External interrupt handler routine 595 * External interrupt handler routine
@@ -603,9 +603,9 @@ ENTRY(ext_int_handler)
603 l %r13,__LC_SVC_NEW_PSW+4 603 l %r13,__LC_SVC_NEW_PSW+4
604 lm %r8,%r9,__LC_EXT_OLD_PSW 604 lm %r8,%r9,__LC_EXT_OLD_PSW
605 tmh %r8,0x0001 # interrupting from user ? 605 tmh %r8,0x0001 # interrupting from user ?
606 jz ext_skip 606 jz .Lext_skip
607 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER 607 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
608ext_skip: 608.Lext_skip:
609 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 609 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
610 stm %r0,%r7,__PT_R0(%r11) 610 stm %r0,%r7,__PT_R0(%r11)
611 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC 611 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
@@ -614,29 +614,29 @@ ext_skip:
614 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS 614 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
615 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) 615 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
616 TRACE_IRQS_OFF 616 TRACE_IRQS_OFF
617 l %r1,BASED(.Ldo_IRQ) 617 l %r1,BASED(.Lc_do_IRQ)
618 lr %r2,%r11 # pass pointer to pt_regs 618 lr %r2,%r11 # pass pointer to pt_regs
619 lhi %r3,EXT_INTERRUPT 619 lhi %r3,EXT_INTERRUPT
620 basr %r14,%r1 # call do_IRQ 620 basr %r14,%r1 # call do_IRQ
621 j io_return 621 j .Lio_return
622 622
623/* 623/*
624 * Load idle PSW. The second "half" of this function is in cleanup_idle. 624 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
625 */ 625 */
626ENTRY(psw_idle) 626ENTRY(psw_idle)
627 st %r3,__SF_EMPTY(%r15) 627 st %r3,__SF_EMPTY(%r15)
628 basr %r1,0 628 basr %r1,0
629 la %r1,psw_idle_lpsw+4-.(%r1) 629 la %r1,.Lpsw_idle_lpsw+4-.(%r1)
630 st %r1,__SF_EMPTY+4(%r15) 630 st %r1,__SF_EMPTY+4(%r15)
631 oi __SF_EMPTY+4(%r15),0x80 631 oi __SF_EMPTY+4(%r15),0x80
632 stck __CLOCK_IDLE_ENTER(%r2) 632 stck __CLOCK_IDLE_ENTER(%r2)
633 stpt __TIMER_IDLE_ENTER(%r2) 633 stpt __TIMER_IDLE_ENTER(%r2)
634psw_idle_lpsw: 634.Lpsw_idle_lpsw:
635 lpsw __SF_EMPTY(%r15) 635 lpsw __SF_EMPTY(%r15)
636 br %r14 636 br %r14
637psw_idle_end: 637.Lpsw_idle_end:
638 638
639__critical_end: 639.L__critical_end:
640 640
641/* 641/*
642 * Machine check handler routines 642 * Machine check handler routines
@@ -650,7 +650,7 @@ ENTRY(mcck_int_handler)
650 l %r13,__LC_SVC_NEW_PSW+4 650 l %r13,__LC_SVC_NEW_PSW+4
651 lm %r8,%r9,__LC_MCK_OLD_PSW 651 lm %r8,%r9,__LC_MCK_OLD_PSW
652 tm __LC_MCCK_CODE,0x80 # system damage? 652 tm __LC_MCCK_CODE,0x80 # system damage?
653 jo mcck_panic # yes -> rest of mcck code invalid 653 jo .Lmcck_panic # yes -> rest of mcck code invalid
654 la %r14,__LC_CPU_TIMER_SAVE_AREA 654 la %r14,__LC_CPU_TIMER_SAVE_AREA
655 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 655 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
656 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 656 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
@@ -668,22 +668,22 @@ ENTRY(mcck_int_handler)
6682: spt 0(%r14) 6682: spt 0(%r14)
669 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 669 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
6703: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 6703: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
671 jno mcck_panic # no -> skip cleanup critical 671 jno .Lmcck_panic # no -> skip cleanup critical
672 tm %r8,0x0001 # interrupting from user ? 672 tm %r8,0x0001 # interrupting from user ?
673 jz mcck_skip 673 jz .Lmcck_skip
674 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER 674 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
675mcck_skip: 675.Lmcck_skip:
676 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT 676 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
677 stm %r0,%r7,__PT_R0(%r11) 677 stm %r0,%r7,__PT_R0(%r11)
678 mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 678 mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
679 stm %r8,%r9,__PT_PSW(%r11) 679 stm %r8,%r9,__PT_PSW(%r11)
680 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) 680 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
681 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 681 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
682 l %r1,BASED(.Ldo_machine_check) 682 l %r1,BASED(.Lc_do_machine_check)
683 lr %r2,%r11 # pass pointer to pt_regs 683 lr %r2,%r11 # pass pointer to pt_regs
684 basr %r14,%r1 # call s390_do_machine_check 684 basr %r14,%r1 # call s390_do_machine_check
685 tm __PT_PSW+1(%r11),0x01 # returning to user ? 685 tm __PT_PSW+1(%r11),0x01 # returning to user ?
686 jno mcck_return 686 jno .Lmcck_return
687 l %r1,__LC_KERNEL_STACK # switch to kernel stack 687 l %r1,__LC_KERNEL_STACK # switch to kernel stack
688 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 688 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
689 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 689 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
@@ -691,12 +691,12 @@ mcck_skip:
691 lr %r15,%r1 691 lr %r15,%r1
692 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 692 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
693 tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING 693 tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
694 jno mcck_return 694 jno .Lmcck_return
695 TRACE_IRQS_OFF 695 TRACE_IRQS_OFF
696 l %r1,BASED(.Lhandle_mcck) 696 l %r1,BASED(.Lc_handle_mcck)
697 basr %r14,%r1 # call s390_handle_mcck 697 basr %r14,%r1 # call s390_handle_mcck
698 TRACE_IRQS_ON 698 TRACE_IRQS_ON
699mcck_return: 699.Lmcck_return:
700 mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW 700 mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
701 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 701 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
702 jno 0f 702 jno 0f
@@ -706,15 +706,15 @@ mcck_return:
7060: lm %r0,%r15,__PT_R0(%r11) 7060: lm %r0,%r15,__PT_R0(%r11)
707 lpsw __LC_RETURN_MCCK_PSW 707 lpsw __LC_RETURN_MCCK_PSW
708 708
709mcck_panic: 709.Lmcck_panic:
710 l %r14,__LC_PANIC_STACK 710 l %r14,__LC_PANIC_STACK
711 slr %r14,%r15 711 slr %r14,%r15
712 sra %r14,PAGE_SHIFT 712 sra %r14,PAGE_SHIFT
713 jz 0f 713 jz 0f
714 l %r15,__LC_PANIC_STACK 714 l %r15,__LC_PANIC_STACK
715 j mcck_skip 715 j .Lmcck_skip
7160: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 7160: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
717 j mcck_skip 717 j .Lmcck_skip
718 718
719# 719#
720# PSW restart interrupt handler 720# PSW restart interrupt handler
@@ -764,58 +764,58 @@ stack_overflow:
7641: .long kernel_stack_overflow 7641: .long kernel_stack_overflow
765#endif 765#endif
766 766
767cleanup_table: 767.Lcleanup_table:
768 .long system_call + 0x80000000 768 .long system_call + 0x80000000
769 .long sysc_do_svc + 0x80000000 769 .long .Lsysc_do_svc + 0x80000000
770 .long sysc_tif + 0x80000000 770 .long .Lsysc_tif + 0x80000000
771 .long sysc_restore + 0x80000000 771 .long .Lsysc_restore + 0x80000000
772 .long sysc_done + 0x80000000 772 .long .Lsysc_done + 0x80000000
773 .long io_tif + 0x80000000 773 .long .Lio_tif + 0x80000000
774 .long io_restore + 0x80000000 774 .long .Lio_restore + 0x80000000
775 .long io_done + 0x80000000 775 .long .Lio_done + 0x80000000
776 .long psw_idle + 0x80000000 776 .long psw_idle + 0x80000000
777 .long psw_idle_end + 0x80000000 777 .long .Lpsw_idle_end + 0x80000000
778 778
779cleanup_critical: 779cleanup_critical:
780 cl %r9,BASED(cleanup_table) # system_call 780 cl %r9,BASED(.Lcleanup_table) # system_call
781 jl 0f 781 jl 0f
782 cl %r9,BASED(cleanup_table+4) # sysc_do_svc 782 cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc
783 jl cleanup_system_call 783 jl .Lcleanup_system_call
784 cl %r9,BASED(cleanup_table+8) # sysc_tif 784 cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif
785 jl 0f 785 jl 0f
786 cl %r9,BASED(cleanup_table+12) # sysc_restore 786 cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore
787 jl cleanup_sysc_tif 787 jl .Lcleanup_sysc_tif
788 cl %r9,BASED(cleanup_table+16) # sysc_done 788 cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done
789 jl cleanup_sysc_restore 789 jl .Lcleanup_sysc_restore
790 cl %r9,BASED(cleanup_table+20) # io_tif 790 cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif
791 jl 0f 791 jl 0f
792 cl %r9,BASED(cleanup_table+24) # io_restore 792 cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore
793 jl cleanup_io_tif 793 jl .Lcleanup_io_tif
794 cl %r9,BASED(cleanup_table+28) # io_done 794 cl %r9,BASED(.Lcleanup_table+28) # .Lio_done
795 jl cleanup_io_restore 795 jl .Lcleanup_io_restore
796 cl %r9,BASED(cleanup_table+32) # psw_idle 796 cl %r9,BASED(.Lcleanup_table+32) # psw_idle
797 jl 0f 797 jl 0f
798 cl %r9,BASED(cleanup_table+36) # psw_idle_end 798 cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end
799 jl cleanup_idle 799 jl .Lcleanup_idle
8000: br %r14 8000: br %r14
801 801
802cleanup_system_call: 802.Lcleanup_system_call:
803 # check if stpt has been executed 803 # check if stpt has been executed
804 cl %r9,BASED(cleanup_system_call_insn) 804 cl %r9,BASED(.Lcleanup_system_call_insn)
805 jh 0f 805 jh 0f
806 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 806 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
807 chi %r11,__LC_SAVE_AREA_ASYNC 807 chi %r11,__LC_SAVE_AREA_ASYNC
808 je 0f 808 je 0f
809 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 809 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
8100: # check if stm has been executed 8100: # check if stm has been executed
811 cl %r9,BASED(cleanup_system_call_insn+4) 811 cl %r9,BASED(.Lcleanup_system_call_insn+4)
812 jh 0f 812 jh 0f
813 mvc __LC_SAVE_AREA_SYNC(32),0(%r11) 813 mvc __LC_SAVE_AREA_SYNC(32),0(%r11)
8140: # set up saved registers r12, and r13 8140: # set up saved registers r12, and r13
815 st %r12,16(%r11) # r12 thread-info pointer 815 st %r12,16(%r11) # r12 thread-info pointer
816 st %r13,20(%r11) # r13 literal-pool pointer 816 st %r13,20(%r11) # r13 literal-pool pointer
817 # check if the user time calculation has been done 817 # check if the user time calculation has been done
818 cl %r9,BASED(cleanup_system_call_insn+8) 818 cl %r9,BASED(.Lcleanup_system_call_insn+8)
819 jh 0f 819 jh 0f
820 l %r10,__LC_EXIT_TIMER 820 l %r10,__LC_EXIT_TIMER
821 l %r15,__LC_EXIT_TIMER+4 821 l %r15,__LC_EXIT_TIMER+4
@@ -824,7 +824,7 @@ cleanup_system_call:
824 st %r10,__LC_USER_TIMER 824 st %r10,__LC_USER_TIMER
825 st %r15,__LC_USER_TIMER+4 825 st %r15,__LC_USER_TIMER+4
8260: # check if the system time calculation has been done 8260: # check if the system time calculation has been done
827 cl %r9,BASED(cleanup_system_call_insn+12) 827 cl %r9,BASED(.Lcleanup_system_call_insn+12)
828 jh 0f 828 jh 0f
829 l %r10,__LC_LAST_UPDATE_TIMER 829 l %r10,__LC_LAST_UPDATE_TIMER
830 l %r15,__LC_LAST_UPDATE_TIMER+4 830 l %r15,__LC_LAST_UPDATE_TIMER+4
@@ -848,20 +848,20 @@ cleanup_system_call:
848 # setup saved register 15 848 # setup saved register 15
849 st %r15,28(%r11) # r15 stack pointer 849 st %r15,28(%r11) # r15 stack pointer
850 # set new psw address and exit 850 # set new psw address and exit
851 l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 851 l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000
852 br %r14 852 br %r14
853cleanup_system_call_insn: 853.Lcleanup_system_call_insn:
854 .long system_call + 0x80000000 854 .long system_call + 0x80000000
855 .long sysc_stm + 0x80000000 855 .long .Lsysc_stm + 0x80000000
856 .long sysc_vtime + 0x80000000 + 36 856 .long .Lsysc_vtime + 0x80000000 + 36
857 .long sysc_vtime + 0x80000000 + 76 857 .long .Lsysc_vtime + 0x80000000 + 76
858 858
859cleanup_sysc_tif: 859.Lcleanup_sysc_tif:
860 l %r9,BASED(cleanup_table+8) # sysc_tif + 0x80000000 860 l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000
861 br %r14 861 br %r14
862 862
863cleanup_sysc_restore: 863.Lcleanup_sysc_restore:
864 cl %r9,BASED(cleanup_sysc_restore_insn) 864 cl %r9,BASED(.Lcleanup_sysc_restore_insn)
865 jhe 0f 865 jhe 0f
866 l %r9,12(%r11) # get saved pointer to pt_regs 866 l %r9,12(%r11) # get saved pointer to pt_regs
867 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 867 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
@@ -869,15 +869,15 @@ cleanup_sysc_restore:
869 lm %r0,%r7,__PT_R0(%r9) 869 lm %r0,%r7,__PT_R0(%r9)
8700: lm %r8,%r9,__LC_RETURN_PSW 8700: lm %r8,%r9,__LC_RETURN_PSW
871 br %r14 871 br %r14
872cleanup_sysc_restore_insn: 872.Lcleanup_sysc_restore_insn:
873 .long sysc_done - 4 + 0x80000000 873 .long .Lsysc_done - 4 + 0x80000000
874 874
875cleanup_io_tif: 875.Lcleanup_io_tif:
876 l %r9,BASED(cleanup_table+20) # io_tif + 0x80000000 876 l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000
877 br %r14 877 br %r14
878 878
879cleanup_io_restore: 879.Lcleanup_io_restore:
880 cl %r9,BASED(cleanup_io_restore_insn) 880 cl %r9,BASED(.Lcleanup_io_restore_insn)
881 jhe 0f 881 jhe 0f
882 l %r9,12(%r11) # get saved r11 pointer to pt_regs 882 l %r9,12(%r11) # get saved r11 pointer to pt_regs
883 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 883 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
@@ -885,10 +885,10 @@ cleanup_io_restore:
885 lm %r0,%r7,__PT_R0(%r9) 885 lm %r0,%r7,__PT_R0(%r9)
8860: lm %r8,%r9,__LC_RETURN_PSW 8860: lm %r8,%r9,__LC_RETURN_PSW
887 br %r14 887 br %r14
888cleanup_io_restore_insn: 888.Lcleanup_io_restore_insn:
889 .long io_done - 4 + 0x80000000 889 .long .Lio_done - 4 + 0x80000000
890 890
891cleanup_idle: 891.Lcleanup_idle:
892 # copy interrupt clock & cpu timer 892 # copy interrupt clock & cpu timer
893 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK 893 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
894 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER 894 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
@@ -897,7 +897,7 @@ cleanup_idle:
897 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 897 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
898 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 898 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
8990: # check if stck has been executed 8990: # check if stck has been executed
900 cl %r9,BASED(cleanup_idle_insn) 900 cl %r9,BASED(.Lcleanup_idle_insn)
901 jhe 1f 901 jhe 1f
902 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) 902 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
903 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3) 903 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
@@ -913,12 +913,12 @@ cleanup_idle:
913 stm %r9,%r10,__LC_SYSTEM_TIMER 913 stm %r9,%r10,__LC_SYSTEM_TIMER
914 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 914 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
915 # prepare return psw 915 # prepare return psw
916 n %r8,BASED(cleanup_idle_wait) # clear irq & wait state bits 916 n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits
917 l %r9,24(%r11) # return from psw_idle 917 l %r9,24(%r11) # return from psw_idle
918 br %r14 918 br %r14
919cleanup_idle_insn: 919.Lcleanup_idle_insn:
920 .long psw_idle_lpsw + 0x80000000 920 .long .Lpsw_idle_lpsw + 0x80000000
921cleanup_idle_wait: 921.Lcleanup_idle_wait:
922 .long 0xfcfdffff 922 .long 0xfcfdffff
923 923
924/* 924/*
@@ -933,30 +933,30 @@ cleanup_idle_wait:
933/* 933/*
934 * Symbol constants 934 * Symbol constants
935 */ 935 */
936.Ldo_machine_check: .long s390_do_machine_check 936.Lc_do_machine_check: .long s390_do_machine_check
937.Lhandle_mcck: .long s390_handle_mcck 937.Lc_handle_mcck: .long s390_handle_mcck
938.Ldo_IRQ: .long do_IRQ 938.Lc_do_IRQ: .long do_IRQ
939.Ldo_signal: .long do_signal 939.Lc_do_signal: .long do_signal
940.Ldo_notify_resume: .long do_notify_resume 940.Lc_do_notify_resume: .long do_notify_resume
941.Ldo_per_trap: .long do_per_trap 941.Lc_do_per_trap: .long do_per_trap
942.Ljump_table: .long pgm_check_table 942.Lc_jump_table: .long pgm_check_table
943.Lschedule: .long schedule 943.Lc_schedule: .long schedule
944#ifdef CONFIG_PREEMPT 944#ifdef CONFIG_PREEMPT
945.Lpreempt_irq: .long preempt_schedule_irq 945.Lc_preempt_irq: .long preempt_schedule_irq
946#endif 946#endif
947.Ltrace_enter: .long do_syscall_trace_enter 947.Lc_trace_enter: .long do_syscall_trace_enter
948.Ltrace_exit: .long do_syscall_trace_exit 948.Lc_trace_exit: .long do_syscall_trace_exit
949.Lschedule_tail: .long schedule_tail 949.Lc_schedule_tail: .long schedule_tail
950.Lsysc_per: .long sysc_per + 0x80000000 950.Lc_sysc_per: .long .Lsysc_per + 0x80000000
951#ifdef CONFIG_TRACE_IRQFLAGS 951#ifdef CONFIG_TRACE_IRQFLAGS
952.Lhardirqs_on: .long trace_hardirqs_on_caller 952.Lc_hardirqs_on: .long trace_hardirqs_on_caller
953.Lhardirqs_off: .long trace_hardirqs_off_caller 953.Lc_hardirqs_off: .long trace_hardirqs_off_caller
954#endif 954#endif
955#ifdef CONFIG_LOCKDEP 955#ifdef CONFIG_LOCKDEP
956.Llockdep_sys_exit: .long lockdep_sys_exit 956.Lc_lockdep_sys_exit: .long lockdep_sys_exit
957#endif 957#endif
958.Lcritical_start: .long __critical_start + 0x80000000 958.Lc_critical_start: .long .L__critical_start + 0x80000000
959.Lcritical_length: .long __critical_end - __critical_start 959.Lc_critical_length: .long .L__critical_end - .L__critical_start
960 960
961 .section .rodata, "a" 961 .section .rodata, "a"
962#define SYSCALL(esa,esame,emu) .long esa 962#define SYSCALL(esa,esame,emu) .long esa
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 0554b9771c9f..8e61393c8275 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -74,4 +74,6 @@ struct old_sigaction;
74long sys_s390_personality(unsigned int personality); 74long sys_s390_personality(unsigned int personality);
75long sys_s390_runtime_instr(int command, int signum); 75long sys_s390_runtime_instr(int command, int signum);
76 76
77long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
78long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
77#endif /* _ENTRY_H */ 79#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 7b2e03afd017..c329446a951d 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -91,7 +91,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
91 .if \reason==1 91 .if \reason==1
92 # Some program interrupts are suppressing (e.g. protection). 92 # Some program interrupts are suppressing (e.g. protection).
93 # We must also check the instruction after SIE in that case. 93 # We must also check the instruction after SIE in that case.
94 # do_protection_exception will rewind to rewind_pad 94 # do_protection_exception will rewind to .Lrewind_pad
95 jh .+42 95 jh .+42
96 .else 96 .else
97 jhe .+42 97 jhe .+42
@@ -192,7 +192,7 @@ ENTRY(__switch_to)
192 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 192 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
193 br %r14 193 br %r14
194 194
195__critical_start: 195.L__critical_start:
196/* 196/*
197 * SVC interrupt handler routine. System calls are synchronous events and 197 * SVC interrupt handler routine. System calls are synchronous events and
198 * are executed with interrupts enabled. 198 * are executed with interrupts enabled.
@@ -200,15 +200,15 @@ __critical_start:
200 200
201ENTRY(system_call) 201ENTRY(system_call)
202 stpt __LC_SYNC_ENTER_TIMER 202 stpt __LC_SYNC_ENTER_TIMER
203sysc_stmg: 203.Lsysc_stmg:
204 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 204 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
205 lg %r10,__LC_LAST_BREAK 205 lg %r10,__LC_LAST_BREAK
206 lg %r12,__LC_THREAD_INFO 206 lg %r12,__LC_THREAD_INFO
207 lghi %r14,_PIF_SYSCALL 207 lghi %r14,_PIF_SYSCALL
208sysc_per: 208.Lsysc_per:
209 lg %r15,__LC_KERNEL_STACK 209 lg %r15,__LC_KERNEL_STACK
210 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 210 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
211sysc_vtime: 211.Lsysc_vtime:
212 UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER 212 UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
213 LAST_BREAK %r13 213 LAST_BREAK %r13
214 stmg %r0,%r7,__PT_R0(%r11) 214 stmg %r0,%r7,__PT_R0(%r11)
@@ -216,39 +216,39 @@ sysc_vtime:
216 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW 216 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
217 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 217 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
218 stg %r14,__PT_FLAGS(%r11) 218 stg %r14,__PT_FLAGS(%r11)
219sysc_do_svc: 219.Lsysc_do_svc:
220 lg %r10,__TI_sysc_table(%r12) # address of system call table 220 lg %r10,__TI_sysc_table(%r12) # address of system call table
221 llgh %r8,__PT_INT_CODE+2(%r11) 221 llgh %r8,__PT_INT_CODE+2(%r11)
222 slag %r8,%r8,2 # shift and test for svc 0 222 slag %r8,%r8,2 # shift and test for svc 0
223 jnz sysc_nr_ok 223 jnz .Lsysc_nr_ok
224 # svc 0: system call number in %r1 224 # svc 0: system call number in %r1
225 llgfr %r1,%r1 # clear high word in r1 225 llgfr %r1,%r1 # clear high word in r1
226 cghi %r1,NR_syscalls 226 cghi %r1,NR_syscalls
227 jnl sysc_nr_ok 227 jnl .Lsysc_nr_ok
228 sth %r1,__PT_INT_CODE+2(%r11) 228 sth %r1,__PT_INT_CODE+2(%r11)
229 slag %r8,%r1,2 229 slag %r8,%r1,2
230sysc_nr_ok: 230.Lsysc_nr_ok:
231 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 231 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
232 stg %r2,__PT_ORIG_GPR2(%r11) 232 stg %r2,__PT_ORIG_GPR2(%r11)
233 stg %r7,STACK_FRAME_OVERHEAD(%r15) 233 stg %r7,STACK_FRAME_OVERHEAD(%r15)
234 lgf %r9,0(%r8,%r10) # get system call add. 234 lgf %r9,0(%r8,%r10) # get system call add.
235 tm __TI_flags+7(%r12),_TIF_TRACE 235 tm __TI_flags+7(%r12),_TIF_TRACE
236 jnz sysc_tracesys 236 jnz .Lsysc_tracesys
237 basr %r14,%r9 # call sys_xxxx 237 basr %r14,%r9 # call sys_xxxx
238 stg %r2,__PT_R2(%r11) # store return value 238 stg %r2,__PT_R2(%r11) # store return value
239 239
240sysc_return: 240.Lsysc_return:
241 LOCKDEP_SYS_EXIT 241 LOCKDEP_SYS_EXIT
242sysc_tif: 242.Lsysc_tif:
243 tm __PT_PSW+1(%r11),0x01 # returning to user ? 243 tm __PT_PSW+1(%r11),0x01 # returning to user ?
244 jno sysc_restore 244 jno .Lsysc_restore
245 tm __PT_FLAGS+7(%r11),_PIF_WORK 245 tm __PT_FLAGS+7(%r11),_PIF_WORK
246 jnz sysc_work 246 jnz .Lsysc_work
247 tm __TI_flags+7(%r12),_TIF_WORK 247 tm __TI_flags+7(%r12),_TIF_WORK
248 jnz sysc_work # check for work 248 jnz .Lsysc_work # check for work
249 tm __LC_CPU_FLAGS+7,_CIF_WORK 249 tm __LC_CPU_FLAGS+7,_CIF_WORK
250 jnz sysc_work 250 jnz .Lsysc_work
251sysc_restore: 251.Lsysc_restore:
252 lg %r14,__LC_VDSO_PER_CPU 252 lg %r14,__LC_VDSO_PER_CPU
253 lmg %r0,%r10,__PT_R0(%r11) 253 lmg %r0,%r10,__PT_R0(%r11)
254 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 254 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
@@ -256,101 +256,101 @@ sysc_restore:
256 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 256 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
257 lmg %r11,%r15,__PT_R11(%r11) 257 lmg %r11,%r15,__PT_R11(%r11)
258 lpswe __LC_RETURN_PSW 258 lpswe __LC_RETURN_PSW
259sysc_done: 259.Lsysc_done:
260 260
261# 261#
262# One of the work bits is on. Find out which one. 262# One of the work bits is on. Find out which one.
263# 263#
264sysc_work: 264.Lsysc_work:
265 tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING 265 tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
266 jo sysc_mcck_pending 266 jo .Lsysc_mcck_pending
267 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 267 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
268 jo sysc_reschedule 268 jo .Lsysc_reschedule
269#ifdef CONFIG_UPROBES 269#ifdef CONFIG_UPROBES
270 tm __TI_flags+7(%r12),_TIF_UPROBE 270 tm __TI_flags+7(%r12),_TIF_UPROBE
271 jo sysc_uprobe_notify 271 jo .Lsysc_uprobe_notify
272#endif 272#endif
273 tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP 273 tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
274 jo sysc_singlestep 274 jo .Lsysc_singlestep
275 tm __TI_flags+7(%r12),_TIF_SIGPENDING 275 tm __TI_flags+7(%r12),_TIF_SIGPENDING
276 jo sysc_sigpending 276 jo .Lsysc_sigpending
277 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME 277 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
278 jo sysc_notify_resume 278 jo .Lsysc_notify_resume
279 tm __LC_CPU_FLAGS+7,_CIF_ASCE 279 tm __LC_CPU_FLAGS+7,_CIF_ASCE
280 jo sysc_uaccess 280 jo .Lsysc_uaccess
281 j sysc_return # beware of critical section cleanup 281 j .Lsysc_return # beware of critical section cleanup
282 282
283# 283#
284# _TIF_NEED_RESCHED is set, call schedule 284# _TIF_NEED_RESCHED is set, call schedule
285# 285#
286sysc_reschedule: 286.Lsysc_reschedule:
287 larl %r14,sysc_return 287 larl %r14,.Lsysc_return
288 jg schedule 288 jg schedule
289 289
290# 290#
291# _CIF_MCCK_PENDING is set, call handler 291# _CIF_MCCK_PENDING is set, call handler
292# 292#
293sysc_mcck_pending: 293.Lsysc_mcck_pending:
294 larl %r14,sysc_return 294 larl %r14,.Lsysc_return
295 jg s390_handle_mcck # TIF bit will be cleared by handler 295 jg s390_handle_mcck # TIF bit will be cleared by handler
296 296
297# 297#
298# _CIF_ASCE is set, load user space asce 298# _CIF_ASCE is set, load user space asce
299# 299#
300sysc_uaccess: 300.Lsysc_uaccess:
301 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE 301 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
302 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 302 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
303 j sysc_return 303 j .Lsysc_return
304 304
305# 305#
306# _TIF_SIGPENDING is set, call do_signal 306# _TIF_SIGPENDING is set, call do_signal
307# 307#
308sysc_sigpending: 308.Lsysc_sigpending:
309 lgr %r2,%r11 # pass pointer to pt_regs 309 lgr %r2,%r11 # pass pointer to pt_regs
310 brasl %r14,do_signal 310 brasl %r14,do_signal
311 tm __PT_FLAGS+7(%r11),_PIF_SYSCALL 311 tm __PT_FLAGS+7(%r11),_PIF_SYSCALL
312 jno sysc_return 312 jno .Lsysc_return
313 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 313 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
314 lg %r10,__TI_sysc_table(%r12) # address of system call table 314 lg %r10,__TI_sysc_table(%r12) # address of system call table
315 lghi %r8,0 # svc 0 returns -ENOSYS 315 lghi %r8,0 # svc 0 returns -ENOSYS
316 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number 316 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
317 cghi %r1,NR_syscalls 317 cghi %r1,NR_syscalls
318 jnl sysc_nr_ok # invalid svc number -> do svc 0 318 jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
319 slag %r8,%r1,2 319 slag %r8,%r1,2
320 j sysc_nr_ok # restart svc 320 j .Lsysc_nr_ok # restart svc
321 321
322# 322#
323# _TIF_NOTIFY_RESUME is set, call do_notify_resume 323# _TIF_NOTIFY_RESUME is set, call do_notify_resume
324# 324#
325sysc_notify_resume: 325.Lsysc_notify_resume:
326 lgr %r2,%r11 # pass pointer to pt_regs 326 lgr %r2,%r11 # pass pointer to pt_regs
327 larl %r14,sysc_return 327 larl %r14,.Lsysc_return
328 jg do_notify_resume 328 jg do_notify_resume
329 329
330# 330#
331# _TIF_UPROBE is set, call uprobe_notify_resume 331# _TIF_UPROBE is set, call uprobe_notify_resume
332# 332#
333#ifdef CONFIG_UPROBES 333#ifdef CONFIG_UPROBES
334sysc_uprobe_notify: 334.Lsysc_uprobe_notify:
335 lgr %r2,%r11 # pass pointer to pt_regs 335 lgr %r2,%r11 # pass pointer to pt_regs
336 larl %r14,sysc_return 336 larl %r14,.Lsysc_return
337 jg uprobe_notify_resume 337 jg uprobe_notify_resume
338#endif 338#endif
339 339
340# 340#
341# _PIF_PER_TRAP is set, call do_per_trap 341# _PIF_PER_TRAP is set, call do_per_trap
342# 342#
343sysc_singlestep: 343.Lsysc_singlestep:
344 ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP 344 ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
345 lgr %r2,%r11 # pass pointer to pt_regs 345 lgr %r2,%r11 # pass pointer to pt_regs
346 larl %r14,sysc_return 346 larl %r14,.Lsysc_return
347 jg do_per_trap 347 jg do_per_trap
348 348
349# 349#
350# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 350# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
351# and after the system call 351# and after the system call
352# 352#
353sysc_tracesys: 353.Lsysc_tracesys:
354 lgr %r2,%r11 # pass pointer to pt_regs 354 lgr %r2,%r11 # pass pointer to pt_regs
355 la %r3,0 355 la %r3,0
356 llgh %r0,__PT_INT_CODE+2(%r11) 356 llgh %r0,__PT_INT_CODE+2(%r11)
@@ -358,20 +358,20 @@ sysc_tracesys:
358 brasl %r14,do_syscall_trace_enter 358 brasl %r14,do_syscall_trace_enter
359 lghi %r0,NR_syscalls 359 lghi %r0,NR_syscalls
360 clgr %r0,%r2 360 clgr %r0,%r2
361 jnh sysc_tracenogo 361 jnh .Lsysc_tracenogo
362 sllg %r8,%r2,2 362 sllg %r8,%r2,2
363 lgf %r9,0(%r8,%r10) 363 lgf %r9,0(%r8,%r10)
364sysc_tracego: 364.Lsysc_tracego:
365 lmg %r3,%r7,__PT_R3(%r11) 365 lmg %r3,%r7,__PT_R3(%r11)
366 stg %r7,STACK_FRAME_OVERHEAD(%r15) 366 stg %r7,STACK_FRAME_OVERHEAD(%r15)
367 lg %r2,__PT_ORIG_GPR2(%r11) 367 lg %r2,__PT_ORIG_GPR2(%r11)
368 basr %r14,%r9 # call sys_xxx 368 basr %r14,%r9 # call sys_xxx
369 stg %r2,__PT_R2(%r11) # store return value 369 stg %r2,__PT_R2(%r11) # store return value
370sysc_tracenogo: 370.Lsysc_tracenogo:
371 tm __TI_flags+7(%r12),_TIF_TRACE 371 tm __TI_flags+7(%r12),_TIF_TRACE
372 jz sysc_return 372 jz .Lsysc_return
373 lgr %r2,%r11 # pass pointer to pt_regs 373 lgr %r2,%r11 # pass pointer to pt_regs
374 larl %r14,sysc_return 374 larl %r14,.Lsysc_return
375 jg do_syscall_trace_exit 375 jg do_syscall_trace_exit
376 376
377# 377#
@@ -384,13 +384,13 @@ ENTRY(ret_from_fork)
384 TRACE_IRQS_ON 384 TRACE_IRQS_ON
385 ssm __LC_SVC_NEW_PSW # reenable interrupts 385 ssm __LC_SVC_NEW_PSW # reenable interrupts
386 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 386 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
387 jne sysc_tracenogo 387 jne .Lsysc_tracenogo
388 # it's a kernel thread 388 # it's a kernel thread
389 lmg %r9,%r10,__PT_R9(%r11) # load gprs 389 lmg %r9,%r10,__PT_R9(%r11) # load gprs
390ENTRY(kernel_thread_starter) 390ENTRY(kernel_thread_starter)
391 la %r2,0(%r10) 391 la %r2,0(%r10)
392 basr %r14,%r9 392 basr %r14,%r9
393 j sysc_tracenogo 393 j .Lsysc_tracenogo
394 394
395/* 395/*
396 * Program check handler routine 396 * Program check handler routine
@@ -409,7 +409,7 @@ ENTRY(pgm_check_handler)
409 tmhh %r8,0x4000 # PER bit set in old PSW ? 409 tmhh %r8,0x4000 # PER bit set in old PSW ?
410 jnz 0f # -> enabled, can't be a double fault 410 jnz 0f # -> enabled, can't be a double fault
411 tm __LC_PGM_ILC+3,0x80 # check for per exception 411 tm __LC_PGM_ILC+3,0x80 # check for per exception
412 jnz pgm_svcper # -> single stepped svc 412 jnz .Lpgm_svcper # -> single stepped svc
4130: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 4130: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
414 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 414 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
415 j 2f 415 j 2f
@@ -432,7 +432,7 @@ ENTRY(pgm_check_handler)
432 tm __LC_PGM_ILC+3,0x80 # check for per exception 432 tm __LC_PGM_ILC+3,0x80 # check for per exception
433 jz 0f 433 jz 0f
434 tmhh %r8,0x0001 # kernel per event ? 434 tmhh %r8,0x0001 # kernel per event ?
435 jz pgm_kprobe 435 jz .Lpgm_kprobe
436 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP 436 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
437 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS 437 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
438 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE 438 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
@@ -443,31 +443,31 @@ ENTRY(pgm_check_handler)
443 llgh %r10,__PT_INT_CODE+2(%r11) 443 llgh %r10,__PT_INT_CODE+2(%r11)
444 nill %r10,0x007f 444 nill %r10,0x007f
445 sll %r10,2 445 sll %r10,2
446 je sysc_return 446 je .Lsysc_return
447 lgf %r1,0(%r10,%r1) # load address of handler routine 447 lgf %r1,0(%r10,%r1) # load address of handler routine
448 lgr %r2,%r11 # pass pointer to pt_regs 448 lgr %r2,%r11 # pass pointer to pt_regs
449 basr %r14,%r1 # branch to interrupt-handler 449 basr %r14,%r1 # branch to interrupt-handler
450 j sysc_return 450 j .Lsysc_return
451 451
452# 452#
453# PER event in supervisor state, must be kprobes 453# PER event in supervisor state, must be kprobes
454# 454#
455pgm_kprobe: 455.Lpgm_kprobe:
456 REENABLE_IRQS 456 REENABLE_IRQS
457 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 457 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
458 lgr %r2,%r11 # pass pointer to pt_regs 458 lgr %r2,%r11 # pass pointer to pt_regs
459 brasl %r14,do_per_trap 459 brasl %r14,do_per_trap
460 j sysc_return 460 j .Lsysc_return
461 461
462# 462#
463# single stepped system call 463# single stepped system call
464# 464#
465pgm_svcper: 465.Lpgm_svcper:
466 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 466 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
467 larl %r14,sysc_per 467 larl %r14,.Lsysc_per
468 stg %r14,__LC_RETURN_PSW+8 468 stg %r14,__LC_RETURN_PSW+8
469 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP 469 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
470 lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs 470 lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
471 471
472/* 472/*
473 * IO interrupt handler routine 473 * IO interrupt handler routine
@@ -483,10 +483,10 @@ ENTRY(io_int_handler)
483 HANDLE_SIE_INTERCEPT %r14,2 483 HANDLE_SIE_INTERCEPT %r14,2
484 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 484 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
485 tmhh %r8,0x0001 # interrupting from user? 485 tmhh %r8,0x0001 # interrupting from user?
486 jz io_skip 486 jz .Lio_skip
487 UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER 487 UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
488 LAST_BREAK %r14 488 LAST_BREAK %r14
489io_skip: 489.Lio_skip:
490 stmg %r0,%r7,__PT_R0(%r11) 490 stmg %r0,%r7,__PT_R0(%r11)
491 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 491 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
492 stmg %r8,%r9,__PT_PSW(%r11) 492 stmg %r8,%r9,__PT_PSW(%r11)
@@ -494,29 +494,29 @@ io_skip:
494 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 494 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
495 TRACE_IRQS_OFF 495 TRACE_IRQS_OFF
496 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 496 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
497io_loop: 497.Lio_loop:
498 lgr %r2,%r11 # pass pointer to pt_regs 498 lgr %r2,%r11 # pass pointer to pt_regs
499 lghi %r3,IO_INTERRUPT 499 lghi %r3,IO_INTERRUPT
500 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? 500 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
501 jz io_call 501 jz .Lio_call
502 lghi %r3,THIN_INTERRUPT 502 lghi %r3,THIN_INTERRUPT
503io_call: 503.Lio_call:
504 brasl %r14,do_IRQ 504 brasl %r14,do_IRQ
505 tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR 505 tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
506 jz io_return 506 jz .Lio_return
507 tpi 0 507 tpi 0
508 jz io_return 508 jz .Lio_return
509 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 509 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
510 j io_loop 510 j .Lio_loop
511io_return: 511.Lio_return:
512 LOCKDEP_SYS_EXIT 512 LOCKDEP_SYS_EXIT
513 TRACE_IRQS_ON 513 TRACE_IRQS_ON
514io_tif: 514.Lio_tif:
515 tm __TI_flags+7(%r12),_TIF_WORK 515 tm __TI_flags+7(%r12),_TIF_WORK
516 jnz io_work # there is work to do (signals etc.) 516 jnz .Lio_work # there is work to do (signals etc.)
517 tm __LC_CPU_FLAGS+7,_CIF_WORK 517 tm __LC_CPU_FLAGS+7,_CIF_WORK
518 jnz io_work 518 jnz .Lio_work
519io_restore: 519.Lio_restore:
520 lg %r14,__LC_VDSO_PER_CPU 520 lg %r14,__LC_VDSO_PER_CPU
521 lmg %r0,%r10,__PT_R0(%r11) 521 lmg %r0,%r10,__PT_R0(%r11)
522 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 522 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
@@ -524,7 +524,7 @@ io_restore:
524 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 524 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
525 lmg %r11,%r15,__PT_R11(%r11) 525 lmg %r11,%r15,__PT_R11(%r11)
526 lpswe __LC_RETURN_PSW 526 lpswe __LC_RETURN_PSW
527io_done: 527.Lio_done:
528 528
529# 529#
530# There is work todo, find out in which context we have been interrupted: 530# There is work todo, find out in which context we have been interrupted:
@@ -535,15 +535,15 @@ io_done:
535# the preemption counter and if it is zero call preempt_schedule_irq 535# the preemption counter and if it is zero call preempt_schedule_irq
536# Before any work can be done, a switch to the kernel stack is required. 536# Before any work can be done, a switch to the kernel stack is required.
537# 537#
538io_work: 538.Lio_work:
539 tm __PT_PSW+1(%r11),0x01 # returning to user ? 539 tm __PT_PSW+1(%r11),0x01 # returning to user ?
540 jo io_work_user # yes -> do resched & signal 540 jo .Lio_work_user # yes -> do resched & signal
541#ifdef CONFIG_PREEMPT 541#ifdef CONFIG_PREEMPT
542 # check for preemptive scheduling 542 # check for preemptive scheduling
543 icm %r0,15,__TI_precount(%r12) 543 icm %r0,15,__TI_precount(%r12)
544 jnz io_restore # preemption is disabled 544 jnz .Lio_restore # preemption is disabled
545 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 545 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
546 jno io_restore 546 jno .Lio_restore
547 # switch to kernel stack 547 # switch to kernel stack
548 lg %r1,__PT_R15(%r11) 548 lg %r1,__PT_R15(%r11)
549 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 549 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
@@ -551,19 +551,19 @@ io_work:
551 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 551 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
552 la %r11,STACK_FRAME_OVERHEAD(%r1) 552 la %r11,STACK_FRAME_OVERHEAD(%r1)
553 lgr %r15,%r1 553 lgr %r15,%r1
554 # TRACE_IRQS_ON already done at io_return, call 554 # TRACE_IRQS_ON already done at .Lio_return, call
555 # TRACE_IRQS_OFF to keep things symmetrical 555 # TRACE_IRQS_OFF to keep things symmetrical
556 TRACE_IRQS_OFF 556 TRACE_IRQS_OFF
557 brasl %r14,preempt_schedule_irq 557 brasl %r14,preempt_schedule_irq
558 j io_return 558 j .Lio_return
559#else 559#else
560 j io_restore 560 j .Lio_restore
561#endif 561#endif
562 562
563# 563#
564# Need to do work before returning to userspace, switch to kernel stack 564# Need to do work before returning to userspace, switch to kernel stack
565# 565#
566io_work_user: 566.Lio_work_user:
567 lg %r1,__LC_KERNEL_STACK 567 lg %r1,__LC_KERNEL_STACK
568 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 568 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
569 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 569 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
@@ -573,70 +573,70 @@ io_work_user:
573# 573#
574# One of the work bits is on. Find out which one. 574# One of the work bits is on. Find out which one.
575# 575#
576io_work_tif: 576.Lio_work_tif:
577 tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING 577 tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
578 jo io_mcck_pending 578 jo .Lio_mcck_pending
579 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 579 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
580 jo io_reschedule 580 jo .Lio_reschedule
581 tm __TI_flags+7(%r12),_TIF_SIGPENDING 581 tm __TI_flags+7(%r12),_TIF_SIGPENDING
582 jo io_sigpending 582 jo .Lio_sigpending
583 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME 583 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
584 jo io_notify_resume 584 jo .Lio_notify_resume
585 tm __LC_CPU_FLAGS+7,_CIF_ASCE 585 tm __LC_CPU_FLAGS+7,_CIF_ASCE
586 jo io_uaccess 586 jo .Lio_uaccess
587 j io_return # beware of critical section cleanup 587 j .Lio_return # beware of critical section cleanup
588 588
589# 589#
590# _CIF_MCCK_PENDING is set, call handler 590# _CIF_MCCK_PENDING is set, call handler
591# 591#
592io_mcck_pending: 592.Lio_mcck_pending:
593 # TRACE_IRQS_ON already done at io_return 593 # TRACE_IRQS_ON already done at .Lio_return
594 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler 594 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
595 TRACE_IRQS_OFF 595 TRACE_IRQS_OFF
596 j io_return 596 j .Lio_return
597 597
598# 598#
599# _CIF_ASCE is set, load user space asce 599# _CIF_ASCE is set, load user space asce
600# 600#
601io_uaccess: 601.Lio_uaccess:
602 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE 602 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
603 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 603 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
604 j io_return 604 j .Lio_return
605 605
606# 606#
607# _TIF_NEED_RESCHED is set, call schedule 607# _TIF_NEED_RESCHED is set, call schedule
608# 608#
609io_reschedule: 609.Lio_reschedule:
610 # TRACE_IRQS_ON already done at io_return 610 # TRACE_IRQS_ON already done at .Lio_return
611 ssm __LC_SVC_NEW_PSW # reenable interrupts 611 ssm __LC_SVC_NEW_PSW # reenable interrupts
612 brasl %r14,schedule # call scheduler 612 brasl %r14,schedule # call scheduler
613 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 613 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
614 TRACE_IRQS_OFF 614 TRACE_IRQS_OFF
615 j io_return 615 j .Lio_return
616 616
617# 617#
618# _TIF_SIGPENDING or is set, call do_signal 618# _TIF_SIGPENDING or is set, call do_signal
619# 619#
620io_sigpending: 620.Lio_sigpending:
621 # TRACE_IRQS_ON already done at io_return 621 # TRACE_IRQS_ON already done at .Lio_return
622 ssm __LC_SVC_NEW_PSW # reenable interrupts 622 ssm __LC_SVC_NEW_PSW # reenable interrupts
623 lgr %r2,%r11 # pass pointer to pt_regs 623 lgr %r2,%r11 # pass pointer to pt_regs
624 brasl %r14,do_signal 624 brasl %r14,do_signal
625 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 625 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
626 TRACE_IRQS_OFF 626 TRACE_IRQS_OFF
627 j io_return 627 j .Lio_return
628 628
629# 629#
630# _TIF_NOTIFY_RESUME or is set, call do_notify_resume 630# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
631# 631#
632io_notify_resume: 632.Lio_notify_resume:
633 # TRACE_IRQS_ON already done at io_return 633 # TRACE_IRQS_ON already done at .Lio_return
634 ssm __LC_SVC_NEW_PSW # reenable interrupts 634 ssm __LC_SVC_NEW_PSW # reenable interrupts
635 lgr %r2,%r11 # pass pointer to pt_regs 635 lgr %r2,%r11 # pass pointer to pt_regs
636 brasl %r14,do_notify_resume 636 brasl %r14,do_notify_resume
637 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 637 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
638 TRACE_IRQS_OFF 638 TRACE_IRQS_OFF
639 j io_return 639 j .Lio_return
640 640
641/* 641/*
642 * External interrupt handler routine 642 * External interrupt handler routine
@@ -652,10 +652,10 @@ ENTRY(ext_int_handler)
652 HANDLE_SIE_INTERCEPT %r14,3 652 HANDLE_SIE_INTERCEPT %r14,3
653 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 653 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
654 tmhh %r8,0x0001 # interrupting from user ? 654 tmhh %r8,0x0001 # interrupting from user ?
655 jz ext_skip 655 jz .Lext_skip
656 UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER 656 UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
657 LAST_BREAK %r14 657 LAST_BREAK %r14
658ext_skip: 658.Lext_skip:
659 stmg %r0,%r7,__PT_R0(%r11) 659 stmg %r0,%r7,__PT_R0(%r11)
660 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 660 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
661 stmg %r8,%r9,__PT_PSW(%r11) 661 stmg %r8,%r9,__PT_PSW(%r11)
@@ -669,23 +669,23 @@ ext_skip:
669 lgr %r2,%r11 # pass pointer to pt_regs 669 lgr %r2,%r11 # pass pointer to pt_regs
670 lghi %r3,EXT_INTERRUPT 670 lghi %r3,EXT_INTERRUPT
671 brasl %r14,do_IRQ 671 brasl %r14,do_IRQ
672 j io_return 672 j .Lio_return
673 673
674/* 674/*
675 * Load idle PSW. The second "half" of this function is in cleanup_idle. 675 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
676 */ 676 */
677ENTRY(psw_idle) 677ENTRY(psw_idle)
678 stg %r3,__SF_EMPTY(%r15) 678 stg %r3,__SF_EMPTY(%r15)
679 larl %r1,psw_idle_lpsw+4 679 larl %r1,.Lpsw_idle_lpsw+4
680 stg %r1,__SF_EMPTY+8(%r15) 680 stg %r1,__SF_EMPTY+8(%r15)
681 STCK __CLOCK_IDLE_ENTER(%r2) 681 STCK __CLOCK_IDLE_ENTER(%r2)
682 stpt __TIMER_IDLE_ENTER(%r2) 682 stpt __TIMER_IDLE_ENTER(%r2)
683psw_idle_lpsw: 683.Lpsw_idle_lpsw:
684 lpswe __SF_EMPTY(%r15) 684 lpswe __SF_EMPTY(%r15)
685 br %r14 685 br %r14
686psw_idle_end: 686.Lpsw_idle_end:
687 687
688__critical_end: 688.L__critical_end:
689 689
690/* 690/*
691 * Machine check handler routines 691 * Machine check handler routines
@@ -701,7 +701,7 @@ ENTRY(mcck_int_handler)
701 lmg %r8,%r9,__LC_MCK_OLD_PSW 701 lmg %r8,%r9,__LC_MCK_OLD_PSW
702 HANDLE_SIE_INTERCEPT %r14,4 702 HANDLE_SIE_INTERCEPT %r14,4
703 tm __LC_MCCK_CODE,0x80 # system damage? 703 tm __LC_MCCK_CODE,0x80 # system damage?
704 jo mcck_panic # yes -> rest of mcck code invalid 704 jo .Lmcck_panic # yes -> rest of mcck code invalid
705 lghi %r14,__LC_CPU_TIMER_SAVE_AREA 705 lghi %r14,__LC_CPU_TIMER_SAVE_AREA
706 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 706 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
707 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 707 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
@@ -719,13 +719,13 @@ ENTRY(mcck_int_handler)
7192: spt 0(%r14) 7192: spt 0(%r14)
720 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 720 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
7213: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 7213: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
722 jno mcck_panic # no -> skip cleanup critical 722 jno .Lmcck_panic # no -> skip cleanup critical
723 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT 723 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
724 tm %r8,0x0001 # interrupting from user ? 724 tm %r8,0x0001 # interrupting from user ?
725 jz mcck_skip 725 jz .Lmcck_skip
726 UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER 726 UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
727 LAST_BREAK %r14 727 LAST_BREAK %r14
728mcck_skip: 728.Lmcck_skip:
729 lghi %r14,__LC_GPREGS_SAVE_AREA+64 729 lghi %r14,__LC_GPREGS_SAVE_AREA+64
730 stmg %r0,%r7,__PT_R0(%r11) 730 stmg %r0,%r7,__PT_R0(%r11)
731 mvc __PT_R8(64,%r11),0(%r14) 731 mvc __PT_R8(64,%r11),0(%r14)
@@ -735,7 +735,7 @@ mcck_skip:
735 lgr %r2,%r11 # pass pointer to pt_regs 735 lgr %r2,%r11 # pass pointer to pt_regs
736 brasl %r14,s390_do_machine_check 736 brasl %r14,s390_do_machine_check
737 tm __PT_PSW+1(%r11),0x01 # returning to user ? 737 tm __PT_PSW+1(%r11),0x01 # returning to user ?
738 jno mcck_return 738 jno .Lmcck_return
739 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 739 lg %r1,__LC_KERNEL_STACK # switch to kernel stack
740 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 740 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
741 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 741 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
@@ -743,11 +743,11 @@ mcck_skip:
743 lgr %r15,%r1 743 lgr %r15,%r1
744 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 744 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
745 tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING 745 tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
746 jno mcck_return 746 jno .Lmcck_return
747 TRACE_IRQS_OFF 747 TRACE_IRQS_OFF
748 brasl %r14,s390_handle_mcck 748 brasl %r14,s390_handle_mcck
749 TRACE_IRQS_ON 749 TRACE_IRQS_ON
750mcck_return: 750.Lmcck_return:
751 lg %r14,__LC_VDSO_PER_CPU 751 lg %r14,__LC_VDSO_PER_CPU
752 lmg %r0,%r10,__PT_R0(%r11) 752 lmg %r0,%r10,__PT_R0(%r11)
753 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 753 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
@@ -758,14 +758,14 @@ mcck_return:
7580: lmg %r11,%r15,__PT_R11(%r11) 7580: lmg %r11,%r15,__PT_R11(%r11)
759 lpswe __LC_RETURN_MCCK_PSW 759 lpswe __LC_RETURN_MCCK_PSW
760 760
761mcck_panic: 761.Lmcck_panic:
762 lg %r14,__LC_PANIC_STACK 762 lg %r14,__LC_PANIC_STACK
763 slgr %r14,%r15 763 slgr %r14,%r15
764 srag %r14,%r14,PAGE_SHIFT 764 srag %r14,%r14,PAGE_SHIFT
765 jz 0f 765 jz 0f
766 lg %r15,__LC_PANIC_STACK 766 lg %r15,__LC_PANIC_STACK
7670: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 7670: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
768 j mcck_skip 768 j .Lmcck_skip
769 769
770# 770#
771# PSW restart interrupt handler 771# PSW restart interrupt handler
@@ -815,69 +815,69 @@ stack_overflow:
815#endif 815#endif
816 816
817 .align 8 817 .align 8
818cleanup_table: 818.Lcleanup_table:
819 .quad system_call 819 .quad system_call
820 .quad sysc_do_svc 820 .quad .Lsysc_do_svc
821 .quad sysc_tif 821 .quad .Lsysc_tif
822 .quad sysc_restore 822 .quad .Lsysc_restore
823 .quad sysc_done 823 .quad .Lsysc_done
824 .quad io_tif 824 .quad .Lio_tif
825 .quad io_restore 825 .quad .Lio_restore
826 .quad io_done 826 .quad .Lio_done
827 .quad psw_idle 827 .quad psw_idle
828 .quad psw_idle_end 828 .quad .Lpsw_idle_end
829 829
830cleanup_critical: 830cleanup_critical:
831 clg %r9,BASED(cleanup_table) # system_call 831 clg %r9,BASED(.Lcleanup_table) # system_call
832 jl 0f 832 jl 0f
833 clg %r9,BASED(cleanup_table+8) # sysc_do_svc 833 clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
834 jl cleanup_system_call 834 jl .Lcleanup_system_call
835 clg %r9,BASED(cleanup_table+16) # sysc_tif 835 clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
836 jl 0f 836 jl 0f
837 clg %r9,BASED(cleanup_table+24) # sysc_restore 837 clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
838 jl cleanup_sysc_tif 838 jl .Lcleanup_sysc_tif
839 clg %r9,BASED(cleanup_table+32) # sysc_done 839 clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
840 jl cleanup_sysc_restore 840 jl .Lcleanup_sysc_restore
841 clg %r9,BASED(cleanup_table+40) # io_tif 841 clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
842 jl 0f 842 jl 0f
843 clg %r9,BASED(cleanup_table+48) # io_restore 843 clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
844 jl cleanup_io_tif 844 jl .Lcleanup_io_tif
845 clg %r9,BASED(cleanup_table+56) # io_done 845 clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
846 jl cleanup_io_restore 846 jl .Lcleanup_io_restore
847 clg %r9,BASED(cleanup_table+64) # psw_idle 847 clg %r9,BASED(.Lcleanup_table+64) # psw_idle
848 jl 0f 848 jl 0f
849 clg %r9,BASED(cleanup_table+72) # psw_idle_end 849 clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
850 jl cleanup_idle 850 jl .Lcleanup_idle
8510: br %r14 8510: br %r14
852 852
853 853
854cleanup_system_call: 854.Lcleanup_system_call:
855 # check if stpt has been executed 855 # check if stpt has been executed
856 clg %r9,BASED(cleanup_system_call_insn) 856 clg %r9,BASED(.Lcleanup_system_call_insn)
857 jh 0f 857 jh 0f
858 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 858 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
859 cghi %r11,__LC_SAVE_AREA_ASYNC 859 cghi %r11,__LC_SAVE_AREA_ASYNC
860 je 0f 860 je 0f
861 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 861 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
8620: # check if stmg has been executed 8620: # check if stmg has been executed
863 clg %r9,BASED(cleanup_system_call_insn+8) 863 clg %r9,BASED(.Lcleanup_system_call_insn+8)
864 jh 0f 864 jh 0f
865 mvc __LC_SAVE_AREA_SYNC(64),0(%r11) 865 mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
8660: # check if base register setup + TIF bit load has been done 8660: # check if base register setup + TIF bit load has been done
867 clg %r9,BASED(cleanup_system_call_insn+16) 867 clg %r9,BASED(.Lcleanup_system_call_insn+16)
868 jhe 0f 868 jhe 0f
869 # set up saved registers r10 and r12 869 # set up saved registers r10 and r12
870 stg %r10,16(%r11) # r10 last break 870 stg %r10,16(%r11) # r10 last break
871 stg %r12,32(%r11) # r12 thread-info pointer 871 stg %r12,32(%r11) # r12 thread-info pointer
8720: # check if the user time update has been done 8720: # check if the user time update has been done
873 clg %r9,BASED(cleanup_system_call_insn+24) 873 clg %r9,BASED(.Lcleanup_system_call_insn+24)
874 jh 0f 874 jh 0f
875 lg %r15,__LC_EXIT_TIMER 875 lg %r15,__LC_EXIT_TIMER
876 slg %r15,__LC_SYNC_ENTER_TIMER 876 slg %r15,__LC_SYNC_ENTER_TIMER
877 alg %r15,__LC_USER_TIMER 877 alg %r15,__LC_USER_TIMER
878 stg %r15,__LC_USER_TIMER 878 stg %r15,__LC_USER_TIMER
8790: # check if the system time update has been done 8790: # check if the system time update has been done
880 clg %r9,BASED(cleanup_system_call_insn+32) 880 clg %r9,BASED(.Lcleanup_system_call_insn+32)
881 jh 0f 881 jh 0f
882 lg %r15,__LC_LAST_UPDATE_TIMER 882 lg %r15,__LC_LAST_UPDATE_TIMER
883 slg %r15,__LC_EXIT_TIMER 883 slg %r15,__LC_EXIT_TIMER
@@ -904,21 +904,21 @@ cleanup_system_call:
904 # setup saved register r15 904 # setup saved register r15
905 stg %r15,56(%r11) # r15 stack pointer 905 stg %r15,56(%r11) # r15 stack pointer
906 # set new psw address and exit 906 # set new psw address and exit
907 larl %r9,sysc_do_svc 907 larl %r9,.Lsysc_do_svc
908 br %r14 908 br %r14
909cleanup_system_call_insn: 909.Lcleanup_system_call_insn:
910 .quad system_call 910 .quad system_call
911 .quad sysc_stmg 911 .quad .Lsysc_stmg
912 .quad sysc_per 912 .quad .Lsysc_per
913 .quad sysc_vtime+18 913 .quad .Lsysc_vtime+18
914 .quad sysc_vtime+42 914 .quad .Lsysc_vtime+42
915 915
916cleanup_sysc_tif: 916.Lcleanup_sysc_tif:
917 larl %r9,sysc_tif 917 larl %r9,.Lsysc_tif
918 br %r14 918 br %r14
919 919
920cleanup_sysc_restore: 920.Lcleanup_sysc_restore:
921 clg %r9,BASED(cleanup_sysc_restore_insn) 921 clg %r9,BASED(.Lcleanup_sysc_restore_insn)
922 je 0f 922 je 0f
923 lg %r9,24(%r11) # get saved pointer to pt_regs 923 lg %r9,24(%r11) # get saved pointer to pt_regs
924 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 924 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
@@ -926,15 +926,15 @@ cleanup_sysc_restore:
926 lmg %r0,%r7,__PT_R0(%r9) 926 lmg %r0,%r7,__PT_R0(%r9)
9270: lmg %r8,%r9,__LC_RETURN_PSW 9270: lmg %r8,%r9,__LC_RETURN_PSW
928 br %r14 928 br %r14
929cleanup_sysc_restore_insn: 929.Lcleanup_sysc_restore_insn:
930 .quad sysc_done - 4 930 .quad .Lsysc_done - 4
931 931
932cleanup_io_tif: 932.Lcleanup_io_tif:
933 larl %r9,io_tif 933 larl %r9,.Lio_tif
934 br %r14 934 br %r14
935 935
936cleanup_io_restore: 936.Lcleanup_io_restore:
937 clg %r9,BASED(cleanup_io_restore_insn) 937 clg %r9,BASED(.Lcleanup_io_restore_insn)
938 je 0f 938 je 0f
939 lg %r9,24(%r11) # get saved r11 pointer to pt_regs 939 lg %r9,24(%r11) # get saved r11 pointer to pt_regs
940 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 940 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
@@ -942,10 +942,10 @@ cleanup_io_restore:
942 lmg %r0,%r7,__PT_R0(%r9) 942 lmg %r0,%r7,__PT_R0(%r9)
9430: lmg %r8,%r9,__LC_RETURN_PSW 9430: lmg %r8,%r9,__LC_RETURN_PSW
944 br %r14 944 br %r14
945cleanup_io_restore_insn: 945.Lcleanup_io_restore_insn:
946 .quad io_done - 4 946 .quad .Lio_done - 4
947 947
948cleanup_idle: 948.Lcleanup_idle:
949 # copy interrupt clock & cpu timer 949 # copy interrupt clock & cpu timer
950 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK 950 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
951 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER 951 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
@@ -954,7 +954,7 @@ cleanup_idle:
954 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 954 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
955 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 955 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
9560: # check if stck & stpt have been executed 9560: # check if stck & stpt have been executed
957 clg %r9,BASED(cleanup_idle_insn) 957 clg %r9,BASED(.Lcleanup_idle_insn)
958 jhe 1f 958 jhe 1f
959 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) 959 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
960 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) 960 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
@@ -973,17 +973,17 @@ cleanup_idle:
973 nihh %r8,0xfcfd # clear irq & wait state bits 973 nihh %r8,0xfcfd # clear irq & wait state bits
974 lg %r9,48(%r11) # return from psw_idle 974 lg %r9,48(%r11) # return from psw_idle
975 br %r14 975 br %r14
976cleanup_idle_insn: 976.Lcleanup_idle_insn:
977 .quad psw_idle_lpsw 977 .quad .Lpsw_idle_lpsw
978 978
979/* 979/*
980 * Integer constants 980 * Integer constants
981 */ 981 */
982 .align 8 982 .align 8
983.Lcritical_start: 983.Lcritical_start:
984 .quad __critical_start 984 .quad .L__critical_start
985.Lcritical_length: 985.Lcritical_length:
986 .quad __critical_end - __critical_start 986 .quad .L__critical_end - .L__critical_start
987 987
988 988
989#if IS_ENABLED(CONFIG_KVM) 989#if IS_ENABLED(CONFIG_KVM)
@@ -1000,25 +1000,25 @@ ENTRY(sie64a)
1000 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 1000 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
1001 lg %r14,__LC_GMAP # get gmap pointer 1001 lg %r14,__LC_GMAP # get gmap pointer
1002 ltgr %r14,%r14 1002 ltgr %r14,%r14
1003 jz sie_gmap 1003 jz .Lsie_gmap
1004 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 1004 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
1005sie_gmap: 1005.Lsie_gmap:
1006 lg %r14,__SF_EMPTY(%r15) # get control block pointer 1006 lg %r14,__SF_EMPTY(%r15) # get control block pointer
1007 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 1007 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
1008 tm __SIE_PROG20+3(%r14),1 # last exit... 1008 tm __SIE_PROG20+3(%r14),1 # last exit...
1009 jnz sie_done 1009 jnz .Lsie_done
1010 LPP __SF_EMPTY(%r15) # set guest id 1010 LPP __SF_EMPTY(%r15) # set guest id
1011 sie 0(%r14) 1011 sie 0(%r14)
1012sie_done: 1012.Lsie_done:
1013 LPP __SF_EMPTY+16(%r15) # set host id 1013 LPP __SF_EMPTY+16(%r15) # set host id
1014 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 1014 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
1015 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1015 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1016# some program checks are suppressing. C code (e.g. do_protection_exception) 1016# some program checks are suppressing. C code (e.g. do_protection_exception)
1017# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other 1017# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
1018# instructions between sie64a and sie_done should not cause program 1018# instructions between sie64a and .Lsie_done should not cause program
1019# interrupts. So lets use a nop (47 00 00 00) as a landing pad. 1019# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
1020# See also HANDLE_SIE_INTERCEPT 1020# See also HANDLE_SIE_INTERCEPT
1021rewind_pad: 1021.Lrewind_pad:
1022 nop 0 1022 nop 0
1023 .globl sie_exit 1023 .globl sie_exit
1024sie_exit: 1024sie_exit:
@@ -1027,19 +1027,19 @@ sie_exit:
1027 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 1027 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
1028 lg %r2,__SF_EMPTY+24(%r15) # return exit reason code 1028 lg %r2,__SF_EMPTY+24(%r15) # return exit reason code
1029 br %r14 1029 br %r14
1030sie_fault: 1030.Lsie_fault:
1031 lghi %r14,-EFAULT 1031 lghi %r14,-EFAULT
1032 stg %r14,__SF_EMPTY+24(%r15) # set exit reason code 1032 stg %r14,__SF_EMPTY+24(%r15) # set exit reason code
1033 j sie_exit 1033 j sie_exit
1034 1034
1035 .align 8 1035 .align 8
1036.Lsie_critical: 1036.Lsie_critical:
1037 .quad sie_gmap 1037 .quad .Lsie_gmap
1038.Lsie_critical_length: 1038.Lsie_critical_length:
1039 .quad sie_done - sie_gmap 1039 .quad .Lsie_done - .Lsie_gmap
1040 1040
1041 EX_TABLE(rewind_pad,sie_fault) 1041 EX_TABLE(.Lrewind_pad,.Lsie_fault)
1042 EX_TABLE(sie_exit,sie_fault) 1042 EX_TABLE(sie_exit,.Lsie_fault)
1043#endif 1043#endif
1044 1044
1045 .section .rodata, "a" 1045 .section .rodata, "a"
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index ca1cabb3a96c..b86bb8823f15 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -7,6 +7,7 @@
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */ 8 */
9 9
10#include <linux/moduleloader.h>
10#include <linux/hardirq.h> 11#include <linux/hardirq.h>
11#include <linux/uaccess.h> 12#include <linux/uaccess.h>
12#include <linux/ftrace.h> 13#include <linux/ftrace.h>
@@ -15,60 +16,39 @@
15#include <linux/kprobes.h> 16#include <linux/kprobes.h>
16#include <trace/syscall.h> 17#include <trace/syscall.h>
17#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
19#include <asm/cacheflush.h>
18#include "entry.h" 20#include "entry.h"
19 21
20void mcount_replace_code(void);
21void ftrace_disable_code(void);
22void ftrace_enable_insn(void);
23
24/* 22/*
25 * The mcount code looks like this: 23 * The mcount code looks like this:
26 * stg %r14,8(%r15) # offset 0 24 * stg %r14,8(%r15) # offset 0
27 * larl %r1,<&counter> # offset 6 25 * larl %r1,<&counter> # offset 6
28 * brasl %r14,_mcount # offset 12 26 * brasl %r14,_mcount # offset 12
29 * lg %r14,8(%r15) # offset 18 27 * lg %r14,8(%r15) # offset 18
30 * Total length is 24 bytes. The complete mcount block initially gets replaced 28 * Total length is 24 bytes. Only the first instruction will be patched
31 * by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop 29 * by ftrace_make_call / ftrace_make_nop.
32 * only patch the jg/lg instruction within the block.
33 * Note: we do not patch the first instruction to an unconditional branch,
34 * since that would break kprobes/jprobes. It is easier to leave the larl
35 * instruction in and only modify the second instruction.
36 * The enabled ftrace code block looks like this: 30 * The enabled ftrace code block looks like this:
37 * larl %r0,.+24 # offset 0 31 * > brasl %r0,ftrace_caller # offset 0
38 * > lg %r1,__LC_FTRACE_FUNC # offset 6 32 * larl %r1,<&counter> # offset 6
39 * br %r1 # offset 12 33 * brasl %r14,_mcount # offset 12
40 * brcl 0,0 # offset 14 34 * lg %r14,8(%r15) # offset 18
41 * brc 0,0 # offset 20
42 * The ftrace function gets called with a non-standard C function call ABI 35 * The ftrace function gets called with a non-standard C function call ABI
43 * where r0 contains the return address. It is also expected that the called 36 * where r0 contains the return address. It is also expected that the called
44 * function only clobbers r0 and r1, but restores r2-r15. 37 * function only clobbers r0 and r1, but restores r2-r15.
38 * For module code we can't directly jump to ftrace caller, but need a
39 * trampoline (ftrace_plt), which clobbers also r1.
45 * The return point of the ftrace function has offset 24, so execution 40 * The return point of the ftrace function has offset 24, so execution
46 * continues behind the mcount block. 41 * continues behind the mcount block.
47 * larl %r0,.+24 # offset 0 42 * The disabled ftrace code block looks like this:
48 * > jg .+18 # offset 6 43 * > jg .+24 # offset 0
49 * br %r1 # offset 12 44 * larl %r1,<&counter> # offset 6
50 * brcl 0,0 # offset 14 45 * brasl %r14,_mcount # offset 12
51 * brc 0,0 # offset 20 46 * lg %r14,8(%r15) # offset 18
52 * The jg instruction branches to offset 24 to skip as many instructions 47 * The jg instruction branches to offset 24 to skip as many instructions
53 * as possible. 48 * as possible.
54 */ 49 */
55asm( 50
56 " .align 4\n" 51unsigned long ftrace_plt;
57 "mcount_replace_code:\n"
58 " larl %r0,0f\n"
59 "ftrace_disable_code:\n"
60 " jg 0f\n"
61 " br %r1\n"
62 " brcl 0,0\n"
63 " brc 0,0\n"
64 "0:\n"
65 " .align 4\n"
66 "ftrace_enable_insn:\n"
67 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
68
69#define MCOUNT_BLOCK_SIZE 24
70#define MCOUNT_INSN_OFFSET 6
71#define FTRACE_INSN_SIZE 6
72 52
73int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 53int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
74 unsigned long addr) 54 unsigned long addr)
@@ -79,24 +59,62 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
79int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 59int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
80 unsigned long addr) 60 unsigned long addr)
81{ 61{
82 /* Initial replacement of the whole mcount block */ 62 struct ftrace_insn insn;
83 if (addr == MCOUNT_ADDR) { 63 unsigned short op;
84 if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET, 64 void *from, *to;
85 mcount_replace_code, 65 size_t size;
86 MCOUNT_BLOCK_SIZE)) 66
87 return -EPERM; 67 ftrace_generate_nop_insn(&insn);
88 return 0; 68 size = sizeof(insn);
69 from = &insn;
70 to = (void *) rec->ip;
71 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
72 return -EFAULT;
73 /*
74 * If we find a breakpoint instruction, a kprobe has been placed
75 * at the beginning of the function. We write the constant
76 * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original
77 * instruction so that the kprobes handler can execute a nop, if it
78 * reaches this breakpoint.
79 */
80 if (op == BREAKPOINT_INSTRUCTION) {
81 size -= 2;
82 from += 2;
83 to += 2;
84 insn.disp = KPROBE_ON_FTRACE_NOP;
89 } 85 }
90 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, 86 if (probe_kernel_write(to, from, size))
91 MCOUNT_INSN_SIZE))
92 return -EPERM; 87 return -EPERM;
93 return 0; 88 return 0;
94} 89}
95 90
96int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 91int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
97{ 92{
98 if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn, 93 struct ftrace_insn insn;
99 FTRACE_INSN_SIZE)) 94 unsigned short op;
95 void *from, *to;
96 size_t size;
97
98 ftrace_generate_call_insn(&insn, rec->ip);
99 size = sizeof(insn);
100 from = &insn;
101 to = (void *) rec->ip;
102 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
103 return -EFAULT;
104 /*
105 * If we find a breakpoint instruction, a kprobe has been placed
106 * at the beginning of the function. We write the constant
107 * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original
108 * instruction so that the kprobes handler can execute a brasl if it
109 * reaches this breakpoint.
110 */
111 if (op == BREAKPOINT_INSTRUCTION) {
112 size -= 2;
113 from += 2;
114 to += 2;
115 insn.disp = KPROBE_ON_FTRACE_CALL;
116 }
117 if (probe_kernel_write(to, from, size))
100 return -EPERM; 118 return -EPERM;
101 return 0; 119 return 0;
102} 120}
@@ -111,13 +129,30 @@ int __init ftrace_dyn_arch_init(void)
111 return 0; 129 return 0;
112} 130}
113 131
132static int __init ftrace_plt_init(void)
133{
134 unsigned int *ip;
135
136 ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
137 if (!ftrace_plt)
138 panic("cannot allocate ftrace plt\n");
139 ip = (unsigned int *) ftrace_plt;
140 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
141 ip[1] = 0x100a0004;
142 ip[2] = 0x07f10000;
143 ip[3] = FTRACE_ADDR >> 32;
144 ip[4] = FTRACE_ADDR & 0xffffffff;
145 set_memory_ro(ftrace_plt, 1);
146 return 0;
147}
148device_initcall(ftrace_plt_init);
149
114#ifdef CONFIG_FUNCTION_GRAPH_TRACER 150#ifdef CONFIG_FUNCTION_GRAPH_TRACER
115/* 151/*
116 * Hook the return address and push it in the stack of return addresses 152 * Hook the return address and push it in the stack of return addresses
117 * in current thread info. 153 * in current thread info.
118 */ 154 */
119unsigned long __kprobes prepare_ftrace_return(unsigned long parent, 155unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
120 unsigned long ip)
121{ 156{
122 struct ftrace_graph_ent trace; 157 struct ftrace_graph_ent trace;
123 158
@@ -137,6 +172,7 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
137out: 172out:
138 return parent; 173 return parent;
139} 174}
175NOKPROBE_SYMBOL(prepare_ftrace_return);
140 176
141/* 177/*
142 * Patch the kernel code at ftrace_graph_caller location. The instruction 178 * Patch the kernel code at ftrace_graph_caller location. The instruction
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index 7559f1beab29..7a55c29b0b33 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -19,7 +19,7 @@
19 19
20static DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 20static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
21 21
22void __kprobes enabled_wait(void) 22void enabled_wait(void)
23{ 23{
24 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); 24 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
25 unsigned long long idle_time; 25 unsigned long long idle_time;
@@ -35,31 +35,32 @@ void __kprobes enabled_wait(void)
35 /* Call the assembler magic in entry.S */ 35 /* Call the assembler magic in entry.S */
36 psw_idle(idle, psw_mask); 36 psw_idle(idle, psw_mask);
37 37
38 trace_hardirqs_off();
39
38 /* Account time spent with enabled wait psw loaded as idle time. */ 40 /* Account time spent with enabled wait psw loaded as idle time. */
39 idle->sequence++; 41 write_seqcount_begin(&idle->seqcount);
40 smp_wmb();
41 idle_time = idle->clock_idle_exit - idle->clock_idle_enter; 42 idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
42 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; 43 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
43 idle->idle_time += idle_time; 44 idle->idle_time += idle_time;
44 idle->idle_count++; 45 idle->idle_count++;
45 account_idle_time(idle_time); 46 account_idle_time(idle_time);
46 smp_wmb(); 47 write_seqcount_end(&idle->seqcount);
47 idle->sequence++;
48} 48}
49NOKPROBE_SYMBOL(enabled_wait);
49 50
50static ssize_t show_idle_count(struct device *dev, 51static ssize_t show_idle_count(struct device *dev,
51 struct device_attribute *attr, char *buf) 52 struct device_attribute *attr, char *buf)
52{ 53{
53 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); 54 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
54 unsigned long long idle_count; 55 unsigned long long idle_count;
55 unsigned int sequence; 56 unsigned int seq;
56 57
57 do { 58 do {
58 sequence = ACCESS_ONCE(idle->sequence); 59 seq = read_seqcount_begin(&idle->seqcount);
59 idle_count = ACCESS_ONCE(idle->idle_count); 60 idle_count = ACCESS_ONCE(idle->idle_count);
60 if (ACCESS_ONCE(idle->clock_idle_enter)) 61 if (ACCESS_ONCE(idle->clock_idle_enter))
61 idle_count++; 62 idle_count++;
62 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence)); 63 } while (read_seqcount_retry(&idle->seqcount, seq));
63 return sprintf(buf, "%llu\n", idle_count); 64 return sprintf(buf, "%llu\n", idle_count);
64} 65}
65DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); 66DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -69,15 +70,15 @@ static ssize_t show_idle_time(struct device *dev,
69{ 70{
70 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); 71 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
71 unsigned long long now, idle_time, idle_enter, idle_exit; 72 unsigned long long now, idle_time, idle_enter, idle_exit;
72 unsigned int sequence; 73 unsigned int seq;
73 74
74 do { 75 do {
75 now = get_tod_clock(); 76 now = get_tod_clock();
76 sequence = ACCESS_ONCE(idle->sequence); 77 seq = read_seqcount_begin(&idle->seqcount);
77 idle_time = ACCESS_ONCE(idle->idle_time); 78 idle_time = ACCESS_ONCE(idle->idle_time);
78 idle_enter = ACCESS_ONCE(idle->clock_idle_enter); 79 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
79 idle_exit = ACCESS_ONCE(idle->clock_idle_exit); 80 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
80 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence)); 81 } while (read_seqcount_retry(&idle->seqcount, seq));
81 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; 82 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
82 return sprintf(buf, "%llu\n", idle_time >> 12); 83 return sprintf(buf, "%llu\n", idle_time >> 12);
83} 84}
@@ -87,14 +88,14 @@ cputime64_t arch_cpu_idle_time(int cpu)
87{ 88{
88 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); 89 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
89 unsigned long long now, idle_enter, idle_exit; 90 unsigned long long now, idle_enter, idle_exit;
90 unsigned int sequence; 91 unsigned int seq;
91 92
92 do { 93 do {
93 now = get_tod_clock(); 94 now = get_tod_clock();
94 sequence = ACCESS_ONCE(idle->sequence); 95 seq = read_seqcount_begin(&idle->seqcount);
95 idle_enter = ACCESS_ONCE(idle->clock_idle_enter); 96 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
96 idle_exit = ACCESS_ONCE(idle->clock_idle_exit); 97 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
97 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence)); 98 } while (read_seqcount_retry(&idle->seqcount, seq));
98 return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0; 99 return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
99} 100}
100 101
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 1b8a38ab7861..f238720690f3 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -127,13 +127,10 @@ int show_interrupts(struct seq_file *p, void *v)
127 for_each_online_cpu(cpu) 127 for_each_online_cpu(cpu)
128 seq_printf(p, "CPU%d ", cpu); 128 seq_printf(p, "CPU%d ", cpu);
129 seq_putc(p, '\n'); 129 seq_putc(p, '\n');
130 goto out;
131 } 130 }
132 if (index < NR_IRQS) { 131 if (index < NR_IRQS) {
133 if (index >= NR_IRQS_BASE) 132 if (index >= NR_IRQS_BASE)
134 goto out; 133 goto out;
135 /* Adjust index to process irqclass_main_desc array entries */
136 index--;
137 seq_printf(p, "%s: ", irqclass_main_desc[index].name); 134 seq_printf(p, "%s: ", irqclass_main_desc[index].name);
138 irq = irqclass_main_desc[index].irq; 135 irq = irqclass_main_desc[index].irq;
139 for_each_online_cpu(cpu) 136 for_each_online_cpu(cpu)
@@ -158,7 +155,7 @@ out:
158 155
159unsigned int arch_dynirq_lower_bound(unsigned int from) 156unsigned int arch_dynirq_lower_bound(unsigned int from)
160{ 157{
161 return from < THIN_INTERRUPT ? THIN_INTERRUPT : from; 158 return from < NR_IRQS_BASE ? NR_IRQS_BASE : from;
162} 159}
163 160
164/* 161/*
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 014d4729b134..1e4c710dfb92 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/hardirq.h> 31#include <linux/hardirq.h>
32#include <linux/ftrace.h>
32#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
33#include <asm/sections.h> 34#include <asm/sections.h>
34#include <asm/dis.h> 35#include <asm/dis.h>
@@ -58,12 +59,23 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
58 .insn_size = MAX_INSN_SIZE, 59 .insn_size = MAX_INSN_SIZE,
59}; 60};
60 61
61static void __kprobes copy_instruction(struct kprobe *p) 62static void copy_instruction(struct kprobe *p)
62{ 63{
64 unsigned long ip = (unsigned long) p->addr;
63 s64 disp, new_disp; 65 s64 disp, new_disp;
64 u64 addr, new_addr; 66 u64 addr, new_addr;
65 67
66 memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8)); 68 if (ftrace_location(ip) == ip) {
69 /*
70 * If kprobes patches the instruction that is morphed by
71 * ftrace make sure that kprobes always sees the branch
72 * "jg .+24" that skips the mcount block
73 */
74 ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
75 p->ainsn.is_ftrace_insn = 1;
76 } else
77 memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
78 p->opcode = p->ainsn.insn[0];
67 if (!probe_is_insn_relative_long(p->ainsn.insn)) 79 if (!probe_is_insn_relative_long(p->ainsn.insn))
68 return; 80 return;
69 /* 81 /*
@@ -79,25 +91,14 @@ static void __kprobes copy_instruction(struct kprobe *p)
79 new_disp = ((addr + (disp * 2)) - new_addr) / 2; 91 new_disp = ((addr + (disp * 2)) - new_addr) / 2;
80 *(s32 *)&p->ainsn.insn[1] = new_disp; 92 *(s32 *)&p->ainsn.insn[1] = new_disp;
81} 93}
94NOKPROBE_SYMBOL(copy_instruction);
82 95
83static inline int is_kernel_addr(void *addr) 96static inline int is_kernel_addr(void *addr)
84{ 97{
85 return addr < (void *)_end; 98 return addr < (void *)_end;
86} 99}
87 100
88static inline int is_module_addr(void *addr) 101static int s390_get_insn_slot(struct kprobe *p)
89{
90#ifdef CONFIG_64BIT
91 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
92 if (addr < (void *)MODULES_VADDR)
93 return 0;
94 if (addr > (void *)MODULES_END)
95 return 0;
96#endif
97 return 1;
98}
99
100static int __kprobes s390_get_insn_slot(struct kprobe *p)
101{ 102{
102 /* 103 /*
103 * Get an insn slot that is within the same 2GB area like the original 104 * Get an insn slot that is within the same 2GB area like the original
@@ -111,8 +112,9 @@ static int __kprobes s390_get_insn_slot(struct kprobe *p)
111 p->ainsn.insn = get_insn_slot(); 112 p->ainsn.insn = get_insn_slot();
112 return p->ainsn.insn ? 0 : -ENOMEM; 113 return p->ainsn.insn ? 0 : -ENOMEM;
113} 114}
115NOKPROBE_SYMBOL(s390_get_insn_slot);
114 116
115static void __kprobes s390_free_insn_slot(struct kprobe *p) 117static void s390_free_insn_slot(struct kprobe *p)
116{ 118{
117 if (!p->ainsn.insn) 119 if (!p->ainsn.insn)
118 return; 120 return;
@@ -122,8 +124,9 @@ static void __kprobes s390_free_insn_slot(struct kprobe *p)
122 free_insn_slot(p->ainsn.insn, 0); 124 free_insn_slot(p->ainsn.insn, 0);
123 p->ainsn.insn = NULL; 125 p->ainsn.insn = NULL;
124} 126}
127NOKPROBE_SYMBOL(s390_free_insn_slot);
125 128
126int __kprobes arch_prepare_kprobe(struct kprobe *p) 129int arch_prepare_kprobe(struct kprobe *p)
127{ 130{
128 if ((unsigned long) p->addr & 0x01) 131 if ((unsigned long) p->addr & 0x01)
129 return -EINVAL; 132 return -EINVAL;
@@ -132,54 +135,79 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
132 return -EINVAL; 135 return -EINVAL;
133 if (s390_get_insn_slot(p)) 136 if (s390_get_insn_slot(p))
134 return -ENOMEM; 137 return -ENOMEM;
135 p->opcode = *p->addr;
136 copy_instruction(p); 138 copy_instruction(p);
137 return 0; 139 return 0;
138} 140}
141NOKPROBE_SYMBOL(arch_prepare_kprobe);
139 142
140struct ins_replace_args { 143int arch_check_ftrace_location(struct kprobe *p)
141 kprobe_opcode_t *ptr; 144{
142 kprobe_opcode_t opcode; 145 return 0;
146}
147
148struct swap_insn_args {
149 struct kprobe *p;
150 unsigned int arm_kprobe : 1;
143}; 151};
144 152
145static int __kprobes swap_instruction(void *aref) 153static int swap_instruction(void *data)
146{ 154{
147 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 155 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
148 unsigned long status = kcb->kprobe_status; 156 unsigned long status = kcb->kprobe_status;
149 struct ins_replace_args *args = aref; 157 struct swap_insn_args *args = data;
150 158 struct ftrace_insn new_insn, *insn;
159 struct kprobe *p = args->p;
160 size_t len;
161
162 new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
163 len = sizeof(new_insn.opc);
164 if (!p->ainsn.is_ftrace_insn)
165 goto skip_ftrace;
166 len = sizeof(new_insn);
167 insn = (struct ftrace_insn *) p->addr;
168 if (args->arm_kprobe) {
169 if (is_ftrace_nop(insn))
170 new_insn.disp = KPROBE_ON_FTRACE_NOP;
171 else
172 new_insn.disp = KPROBE_ON_FTRACE_CALL;
173 } else {
174 ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
175 if (insn->disp == KPROBE_ON_FTRACE_NOP)
176 ftrace_generate_nop_insn(&new_insn);
177 }
178skip_ftrace:
151 kcb->kprobe_status = KPROBE_SWAP_INST; 179 kcb->kprobe_status = KPROBE_SWAP_INST;
152 probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode)); 180 probe_kernel_write(p->addr, &new_insn, len);
153 kcb->kprobe_status = status; 181 kcb->kprobe_status = status;
154 return 0; 182 return 0;
155} 183}
184NOKPROBE_SYMBOL(swap_instruction);
156 185
157void __kprobes arch_arm_kprobe(struct kprobe *p) 186void arch_arm_kprobe(struct kprobe *p)
158{ 187{
159 struct ins_replace_args args; 188 struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
160 189
161 args.ptr = p->addr;
162 args.opcode = BREAKPOINT_INSTRUCTION;
163 stop_machine(swap_instruction, &args, NULL); 190 stop_machine(swap_instruction, &args, NULL);
164} 191}
192NOKPROBE_SYMBOL(arch_arm_kprobe);
165 193
166void __kprobes arch_disarm_kprobe(struct kprobe *p) 194void arch_disarm_kprobe(struct kprobe *p)
167{ 195{
168 struct ins_replace_args args; 196 struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
169 197
170 args.ptr = p->addr;
171 args.opcode = p->opcode;
172 stop_machine(swap_instruction, &args, NULL); 198 stop_machine(swap_instruction, &args, NULL);
173} 199}
200NOKPROBE_SYMBOL(arch_disarm_kprobe);
174 201
175void __kprobes arch_remove_kprobe(struct kprobe *p) 202void arch_remove_kprobe(struct kprobe *p)
176{ 203{
177 s390_free_insn_slot(p); 204 s390_free_insn_slot(p);
178} 205}
206NOKPROBE_SYMBOL(arch_remove_kprobe);
179 207
180static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb, 208static void enable_singlestep(struct kprobe_ctlblk *kcb,
181 struct pt_regs *regs, 209 struct pt_regs *regs,
182 unsigned long ip) 210 unsigned long ip)
183{ 211{
184 struct per_regs per_kprobe; 212 struct per_regs per_kprobe;
185 213
@@ -199,10 +227,11 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
199 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 227 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
200 regs->psw.addr = ip | PSW_ADDR_AMODE; 228 regs->psw.addr = ip | PSW_ADDR_AMODE;
201} 229}
230NOKPROBE_SYMBOL(enable_singlestep);
202 231
203static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb, 232static void disable_singlestep(struct kprobe_ctlblk *kcb,
204 struct pt_regs *regs, 233 struct pt_regs *regs,
205 unsigned long ip) 234 unsigned long ip)
206{ 235{
207 /* Restore control regs and psw mask, set new psw address */ 236 /* Restore control regs and psw mask, set new psw address */
208 __ctl_load(kcb->kprobe_saved_ctl, 9, 11); 237 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
@@ -210,41 +239,43 @@ static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
210 regs->psw.mask |= kcb->kprobe_saved_imask; 239 regs->psw.mask |= kcb->kprobe_saved_imask;
211 regs->psw.addr = ip | PSW_ADDR_AMODE; 240 regs->psw.addr = ip | PSW_ADDR_AMODE;
212} 241}
242NOKPROBE_SYMBOL(disable_singlestep);
213 243
214/* 244/*
215 * Activate a kprobe by storing its pointer to current_kprobe. The 245 * Activate a kprobe by storing its pointer to current_kprobe. The
216 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to 246 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to
217 * two kprobes can be active, see KPROBE_REENTER. 247 * two kprobes can be active, see KPROBE_REENTER.
218 */ 248 */
219static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) 249static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
220{ 250{
221 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); 251 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe);
222 kcb->prev_kprobe.status = kcb->kprobe_status; 252 kcb->prev_kprobe.status = kcb->kprobe_status;
223 __this_cpu_write(current_kprobe, p); 253 __this_cpu_write(current_kprobe, p);
224} 254}
255NOKPROBE_SYMBOL(push_kprobe);
225 256
226/* 257/*
227 * Deactivate a kprobe by backing up to the previous state. If the 258 * Deactivate a kprobe by backing up to the previous state. If the
228 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, 259 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
229 * for any other state prev_kprobe.kp will be NULL. 260 * for any other state prev_kprobe.kp will be NULL.
230 */ 261 */
231static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb) 262static void pop_kprobe(struct kprobe_ctlblk *kcb)
232{ 263{
233 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 264 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
234 kcb->kprobe_status = kcb->prev_kprobe.status; 265 kcb->kprobe_status = kcb->prev_kprobe.status;
235} 266}
267NOKPROBE_SYMBOL(pop_kprobe);
236 268
237void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 269void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
238 struct pt_regs *regs)
239{ 270{
240 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; 271 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
241 272
242 /* Replace the return addr with trampoline addr */ 273 /* Replace the return addr with trampoline addr */
243 regs->gprs[14] = (unsigned long) &kretprobe_trampoline; 274 regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
244} 275}
276NOKPROBE_SYMBOL(arch_prepare_kretprobe);
245 277
246static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb, 278static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p)
247 struct kprobe *p)
248{ 279{
249 switch (kcb->kprobe_status) { 280 switch (kcb->kprobe_status) {
250 case KPROBE_HIT_SSDONE: 281 case KPROBE_HIT_SSDONE:
@@ -264,8 +295,9 @@ static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb,
264 BUG(); 295 BUG();
265 } 296 }
266} 297}
298NOKPROBE_SYMBOL(kprobe_reenter_check);
267 299
268static int __kprobes kprobe_handler(struct pt_regs *regs) 300static int kprobe_handler(struct pt_regs *regs)
269{ 301{
270 struct kprobe_ctlblk *kcb; 302 struct kprobe_ctlblk *kcb;
271 struct kprobe *p; 303 struct kprobe *p;
@@ -339,6 +371,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
339 preempt_enable_no_resched(); 371 preempt_enable_no_resched();
340 return 0; 372 return 0;
341} 373}
374NOKPROBE_SYMBOL(kprobe_handler);
342 375
343/* 376/*
344 * Function return probe trampoline: 377 * Function return probe trampoline:
@@ -355,8 +388,7 @@ static void __used kretprobe_trampoline_holder(void)
355/* 388/*
356 * Called when the probe at kretprobe trampoline is hit 389 * Called when the probe at kretprobe trampoline is hit
357 */ 390 */
358static int __kprobes trampoline_probe_handler(struct kprobe *p, 391static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
359 struct pt_regs *regs)
360{ 392{
361 struct kretprobe_instance *ri; 393 struct kretprobe_instance *ri;
362 struct hlist_head *head, empty_rp; 394 struct hlist_head *head, empty_rp;
@@ -444,6 +476,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
444 */ 476 */
445 return 1; 477 return 1;
446} 478}
479NOKPROBE_SYMBOL(trampoline_probe_handler);
447 480
448/* 481/*
449 * Called after single-stepping. p->addr is the address of the 482 * Called after single-stepping. p->addr is the address of the
@@ -453,12 +486,30 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
453 * single-stepped a copy of the instruction. The address of this 486 * single-stepped a copy of the instruction. The address of this
454 * copy is p->ainsn.insn. 487 * copy is p->ainsn.insn.
455 */ 488 */
456static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 489static void resume_execution(struct kprobe *p, struct pt_regs *regs)
457{ 490{
458 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 491 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
459 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; 492 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
460 int fixup = probe_get_fixup_type(p->ainsn.insn); 493 int fixup = probe_get_fixup_type(p->ainsn.insn);
461 494
495 /* Check if the kprobes location is an enabled ftrace caller */
496 if (p->ainsn.is_ftrace_insn) {
497 struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
498 struct ftrace_insn call_insn;
499
500 ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
501 /*
502 * A kprobe on an enabled ftrace call site actually single
503 * stepped an unconditional branch (ftrace nop equivalent).
504 * Now we need to fixup things and pretend that a brasl r0,...
505 * was executed instead.
506 */
507 if (insn->disp == KPROBE_ON_FTRACE_CALL) {
508 ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
509 regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
510 }
511 }
512
462 if (fixup & FIXUP_PSW_NORMAL) 513 if (fixup & FIXUP_PSW_NORMAL)
463 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 514 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
464 515
@@ -476,8 +527,9 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
476 527
477 disable_singlestep(kcb, regs, ip); 528 disable_singlestep(kcb, regs, ip);
478} 529}
530NOKPROBE_SYMBOL(resume_execution);
479 531
480static int __kprobes post_kprobe_handler(struct pt_regs *regs) 532static int post_kprobe_handler(struct pt_regs *regs)
481{ 533{
482 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 534 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
483 struct kprobe *p = kprobe_running(); 535 struct kprobe *p = kprobe_running();
@@ -504,8 +556,9 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
504 556
505 return 1; 557 return 1;
506} 558}
559NOKPROBE_SYMBOL(post_kprobe_handler);
507 560
508static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) 561static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
509{ 562{
510 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 563 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
511 struct kprobe *p = kprobe_running(); 564 struct kprobe *p = kprobe_running();
@@ -567,8 +620,9 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
567 } 620 }
568 return 0; 621 return 0;
569} 622}
623NOKPROBE_SYMBOL(kprobe_trap_handler);
570 624
571int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 625int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
572{ 626{
573 int ret; 627 int ret;
574 628
@@ -579,12 +633,13 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
579 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 633 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
580 return ret; 634 return ret;
581} 635}
636NOKPROBE_SYMBOL(kprobe_fault_handler);
582 637
583/* 638/*
584 * Wrapper routine to for handling exceptions. 639 * Wrapper routine to for handling exceptions.
585 */ 640 */
586int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 641int kprobe_exceptions_notify(struct notifier_block *self,
587 unsigned long val, void *data) 642 unsigned long val, void *data)
588{ 643{
589 struct die_args *args = (struct die_args *) data; 644 struct die_args *args = (struct die_args *) data;
590 struct pt_regs *regs = args->regs; 645 struct pt_regs *regs = args->regs;
@@ -616,8 +671,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
616 671
617 return ret; 672 return ret;
618} 673}
674NOKPROBE_SYMBOL(kprobe_exceptions_notify);
619 675
620int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 676int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
621{ 677{
622 struct jprobe *jp = container_of(p, struct jprobe, kp); 678 struct jprobe *jp = container_of(p, struct jprobe, kp);
623 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 679 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -635,13 +691,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
635 memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack)); 691 memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
636 return 1; 692 return 1;
637} 693}
694NOKPROBE_SYMBOL(setjmp_pre_handler);
638 695
639void __kprobes jprobe_return(void) 696void jprobe_return(void)
640{ 697{
641 asm volatile(".word 0x0002"); 698 asm volatile(".word 0x0002");
642} 699}
700NOKPROBE_SYMBOL(jprobe_return);
643 701
644int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 702int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
645{ 703{
646 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 704 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
647 unsigned long stack; 705 unsigned long stack;
@@ -655,6 +713,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
655 preempt_enable_no_resched(); 713 preempt_enable_no_resched();
656 return 1; 714 return 1;
657} 715}
716NOKPROBE_SYMBOL(longjmp_break_handler);
658 717
659static struct kprobe trampoline = { 718static struct kprobe trampoline = {
660 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 719 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
@@ -666,7 +725,8 @@ int __init arch_init_kprobes(void)
666 return register_kprobe(&trampoline); 725 return register_kprobe(&trampoline);
667} 726}
668 727
669int __kprobes arch_trampoline_kprobe(struct kprobe *p) 728int arch_trampoline_kprobe(struct kprobe *p)
670{ 729{
671 return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; 730 return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
672} 731}
732NOKPROBE_SYMBOL(arch_trampoline_kprobe);
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 4300ea374826..b6dfc5bfcb89 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -27,6 +27,7 @@ ENTRY(ftrace_caller)
27 .globl ftrace_regs_caller 27 .globl ftrace_regs_caller
28 .set ftrace_regs_caller,ftrace_caller 28 .set ftrace_regs_caller,ftrace_caller
29 lgr %r1,%r15 29 lgr %r1,%r15
30 aghi %r0,MCOUNT_RETURN_FIXUP
30 aghi %r15,-STACK_FRAME_SIZE 31 aghi %r15,-STACK_FRAME_SIZE
31 stg %r1,__SF_BACKCHAIN(%r15) 32 stg %r1,__SF_BACKCHAIN(%r15)
32 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) 33 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index dd1c24ceda50..3f51cf4e8f02 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -54,12 +54,8 @@ void s390_handle_mcck(void)
54 */ 54 */
55 local_irq_save(flags); 55 local_irq_save(flags);
56 local_mcck_disable(); 56 local_mcck_disable();
57 /* 57 mcck = *this_cpu_ptr(&cpu_mcck);
58 * Ummm... Does this make sense at all? Copying the percpu struct 58 memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
59 * and then zapping it one statement later?
60 */
61 memcpy(&mcck, this_cpu_ptr(&cpu_mcck), sizeof(mcck));
62 memset(&mcck, 0, sizeof(struct mcck_struct));
63 clear_cpu_flag(CIF_MCCK_PENDING); 59 clear_cpu_flag(CIF_MCCK_PENDING);
64 local_mcck_enable(); 60 local_mcck_enable();
65 local_irq_restore(flags); 61 local_irq_restore(flags);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index b878f12a9597..c3f8d157cb0d 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1383,7 +1383,6 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
1383 cpuhw->lsctl.ed = 1; 1383 cpuhw->lsctl.ed = 1;
1384 1384
1385 /* Set in_use flag and store event */ 1385 /* Set in_use flag and store event */
1386 event->hw.idx = 0; /* only one sampling event per CPU supported */
1387 cpuhw->event = event; 1386 cpuhw->event = event;
1388 cpuhw->flags |= PMU_F_IN_USE; 1387 cpuhw->flags |= PMU_F_IN_USE;
1389 1388
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index ed84cc224899..aa7a83948c7b 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -61,7 +61,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
61 return sf->gprs[8]; 61 return sf->gprs[8];
62} 62}
63 63
64extern void __kprobes kernel_thread_starter(void); 64extern void kernel_thread_starter(void);
65 65
66/* 66/*
67 * Free current thread data structures etc.. 67 * Free current thread data structures etc..
@@ -153,6 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
153 save_fp_ctl(&p->thread.fp_regs.fpc); 153 save_fp_ctl(&p->thread.fp_regs.fpc);
154 save_fp_regs(p->thread.fp_regs.fprs); 154 save_fp_regs(p->thread.fp_regs.fprs);
155 p->thread.fp_regs.pad = 0; 155 p->thread.fp_regs.pad = 0;
156 p->thread.vxrs = NULL;
156 /* Set a new TLS ? */ 157 /* Set a new TLS ? */
157 if (clone_flags & CLONE_SETTLS) { 158 if (clone_flags & CLONE_SETTLS) {
158 unsigned long tls = frame->childregs.gprs[6]; 159 unsigned long tls = frame->childregs.gprs[6];
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 99a567b70d16..eabfb4594517 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -248,14 +248,27 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
248 */ 248 */
249 tmp = 0; 249 tmp = 0;
250 250
251 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
252 /*
253 * floating point control reg. is in the thread structure
254 */
255 tmp = child->thread.fp_regs.fpc;
256 tmp <<= BITS_PER_LONG - 32;
257
251 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { 258 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
252 /* 259 /*
253 * floating point regs. are stored in the thread structure 260 * floating point regs. are either in child->thread.fp_regs
261 * or the child->thread.vxrs array
254 */ 262 */
255 offset = addr - (addr_t) &dummy->regs.fp_regs; 263 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
256 tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset); 264#ifdef CONFIG_64BIT
257 if (addr == (addr_t) &dummy->regs.fp_regs.fpc) 265 if (child->thread.vxrs)
258 tmp <<= BITS_PER_LONG - 32; 266 tmp = *(addr_t *)
267 ((addr_t) child->thread.vxrs + 2*offset);
268 else
269#endif
270 tmp = *(addr_t *)
271 ((addr_t) &child->thread.fp_regs.fprs + offset);
259 272
260 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 273 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
261 /* 274 /*
@@ -383,16 +396,29 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
383 */ 396 */
384 return 0; 397 return 0;
385 398
399 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
400 /*
401 * floating point control reg. is in the thread structure
402 */
403 if ((unsigned int) data != 0 ||
404 test_fp_ctl(data >> (BITS_PER_LONG - 32)))
405 return -EINVAL;
406 child->thread.fp_regs.fpc = data >> (BITS_PER_LONG - 32);
407
386 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { 408 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
387 /* 409 /*
388 * floating point regs. are stored in the thread structure 410 * floating point regs. are either in child->thread.fp_regs
411 * or the child->thread.vxrs array
389 */ 412 */
390 if (addr == (addr_t) &dummy->regs.fp_regs.fpc) 413 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
391 if ((unsigned int) data != 0 || 414#ifdef CONFIG_64BIT
392 test_fp_ctl(data >> (BITS_PER_LONG - 32))) 415 if (child->thread.vxrs)
393 return -EINVAL; 416 *(addr_t *)((addr_t)
394 offset = addr - (addr_t) &dummy->regs.fp_regs; 417 child->thread.vxrs + 2*offset) = data;
395 *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; 418 else
419#endif
420 *(addr_t *)((addr_t)
421 &child->thread.fp_regs.fprs + offset) = data;
396 422
397 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 423 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
398 /* 424 /*
@@ -611,12 +637,26 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
611 */ 637 */
612 tmp = 0; 638 tmp = 0;
613 639
640 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
641 /*
642 * floating point control reg. is in the thread structure
643 */
644 tmp = child->thread.fp_regs.fpc;
645
614 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { 646 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
615 /* 647 /*
616 * floating point regs. are stored in the thread structure 648 * floating point regs. are either in child->thread.fp_regs
649 * or the child->thread.vxrs array
617 */ 650 */
618 offset = addr - (addr_t) &dummy32->regs.fp_regs; 651 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
619 tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset); 652#ifdef CONFIG_64BIT
653 if (child->thread.vxrs)
654 tmp = *(__u32 *)
655 ((addr_t) child->thread.vxrs + 2*offset);
656 else
657#endif
658 tmp = *(__u32 *)
659 ((addr_t) &child->thread.fp_regs.fprs + offset);
620 660
621 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 661 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
622 /* 662 /*
@@ -722,15 +762,28 @@ static int __poke_user_compat(struct task_struct *child,
722 */ 762 */
723 return 0; 763 return 0;
724 764
725 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { 765 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
726 /* 766 /*
727 * floating point regs. are stored in the thread structure 767 * floating point control reg. is in the thread structure
728 */ 768 */
729 if (addr == (addr_t) &dummy32->regs.fp_regs.fpc && 769 if (test_fp_ctl(tmp))
730 test_fp_ctl(tmp))
731 return -EINVAL; 770 return -EINVAL;
732 offset = addr - (addr_t) &dummy32->regs.fp_regs; 771 child->thread.fp_regs.fpc = data;
733 *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp; 772
773 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
774 /*
775 * floating point regs. are either in child->thread.fp_regs
776 * or the child->thread.vxrs array
777 */
778 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
779#ifdef CONFIG_64BIT
780 if (child->thread.vxrs)
781 *(__u32 *)((addr_t)
782 child->thread.vxrs + 2*offset) = tmp;
783 else
784#endif
785 *(__u32 *)((addr_t)
786 &child->thread.fp_regs.fprs + offset) = tmp;
734 787
735 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 788 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
736 /* 789 /*
@@ -1038,12 +1091,6 @@ static int s390_tdb_set(struct task_struct *target,
1038 return 0; 1091 return 0;
1039} 1092}
1040 1093
1041static int s390_vxrs_active(struct task_struct *target,
1042 const struct user_regset *regset)
1043{
1044 return !!target->thread.vxrs;
1045}
1046
1047static int s390_vxrs_low_get(struct task_struct *target, 1094static int s390_vxrs_low_get(struct task_struct *target,
1048 const struct user_regset *regset, 1095 const struct user_regset *regset,
1049 unsigned int pos, unsigned int count, 1096 unsigned int pos, unsigned int count,
@@ -1052,6 +1099,8 @@ static int s390_vxrs_low_get(struct task_struct *target,
1052 __u64 vxrs[__NUM_VXRS_LOW]; 1099 __u64 vxrs[__NUM_VXRS_LOW];
1053 int i; 1100 int i;
1054 1101
1102 if (!MACHINE_HAS_VX)
1103 return -ENODEV;
1055 if (target->thread.vxrs) { 1104 if (target->thread.vxrs) {
1056 if (target == current) 1105 if (target == current)
1057 save_vx_regs(target->thread.vxrs); 1106 save_vx_regs(target->thread.vxrs);
@@ -1070,6 +1119,8 @@ static int s390_vxrs_low_set(struct task_struct *target,
1070 __u64 vxrs[__NUM_VXRS_LOW]; 1119 __u64 vxrs[__NUM_VXRS_LOW];
1071 int i, rc; 1120 int i, rc;
1072 1121
1122 if (!MACHINE_HAS_VX)
1123 return -ENODEV;
1073 if (!target->thread.vxrs) { 1124 if (!target->thread.vxrs) {
1074 rc = alloc_vector_registers(target); 1125 rc = alloc_vector_registers(target);
1075 if (rc) 1126 if (rc)
@@ -1095,6 +1146,8 @@ static int s390_vxrs_high_get(struct task_struct *target,
1095{ 1146{
1096 __vector128 vxrs[__NUM_VXRS_HIGH]; 1147 __vector128 vxrs[__NUM_VXRS_HIGH];
1097 1148
1149 if (!MACHINE_HAS_VX)
1150 return -ENODEV;
1098 if (target->thread.vxrs) { 1151 if (target->thread.vxrs) {
1099 if (target == current) 1152 if (target == current)
1100 save_vx_regs(target->thread.vxrs); 1153 save_vx_regs(target->thread.vxrs);
@@ -1112,6 +1165,8 @@ static int s390_vxrs_high_set(struct task_struct *target,
1112{ 1165{
1113 int rc; 1166 int rc;
1114 1167
1168 if (!MACHINE_HAS_VX)
1169 return -ENODEV;
1115 if (!target->thread.vxrs) { 1170 if (!target->thread.vxrs) {
1116 rc = alloc_vector_registers(target); 1171 rc = alloc_vector_registers(target);
1117 if (rc) 1172 if (rc)
@@ -1196,7 +1251,6 @@ static const struct user_regset s390_regsets[] = {
1196 .n = __NUM_VXRS_LOW, 1251 .n = __NUM_VXRS_LOW,
1197 .size = sizeof(__u64), 1252 .size = sizeof(__u64),
1198 .align = sizeof(__u64), 1253 .align = sizeof(__u64),
1199 .active = s390_vxrs_active,
1200 .get = s390_vxrs_low_get, 1254 .get = s390_vxrs_low_get,
1201 .set = s390_vxrs_low_set, 1255 .set = s390_vxrs_low_set,
1202 }, 1256 },
@@ -1205,7 +1259,6 @@ static const struct user_regset s390_regsets[] = {
1205 .n = __NUM_VXRS_HIGH, 1259 .n = __NUM_VXRS_HIGH,
1206 .size = sizeof(__vector128), 1260 .size = sizeof(__vector128),
1207 .align = sizeof(__vector128), 1261 .align = sizeof(__vector128),
1208 .active = s390_vxrs_active,
1209 .get = s390_vxrs_high_get, 1262 .get = s390_vxrs_high_get,
1210 .set = s390_vxrs_high_set, 1263 .set = s390_vxrs_high_set,
1211 }, 1264 },
@@ -1419,7 +1472,6 @@ static const struct user_regset s390_compat_regsets[] = {
1419 .n = __NUM_VXRS_LOW, 1472 .n = __NUM_VXRS_LOW,
1420 .size = sizeof(__u64), 1473 .size = sizeof(__u64),
1421 .align = sizeof(__u64), 1474 .align = sizeof(__u64),
1422 .active = s390_vxrs_active,
1423 .get = s390_vxrs_low_get, 1475 .get = s390_vxrs_low_get,
1424 .set = s390_vxrs_low_set, 1476 .set = s390_vxrs_low_set,
1425 }, 1477 },
@@ -1428,7 +1480,6 @@ static const struct user_regset s390_compat_regsets[] = {
1428 .n = __NUM_VXRS_HIGH, 1480 .n = __NUM_VXRS_HIGH,
1429 .size = sizeof(__vector128), 1481 .size = sizeof(__vector128),
1430 .align = sizeof(__vector128), 1482 .align = sizeof(__vector128),
1431 .active = s390_vxrs_active,
1432 .get = s390_vxrs_high_get, 1483 .get = s390_vxrs_high_get,
1433 .set = s390_vxrs_high_set, 1484 .set = s390_vxrs_high_set,
1434 }, 1485 },
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e80d9ff9a56d..4e532c67832f 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -41,7 +41,6 @@
41#include <linux/ctype.h> 41#include <linux/ctype.h>
42#include <linux/reboot.h> 42#include <linux/reboot.h>
43#include <linux/topology.h> 43#include <linux/topology.h>
44#include <linux/ftrace.h>
45#include <linux/kexec.h> 44#include <linux/kexec.h>
46#include <linux/crash_dump.h> 45#include <linux/crash_dump.h>
47#include <linux/memory.h> 46#include <linux/memory.h>
@@ -356,7 +355,6 @@ static void __init setup_lowcore(void)
356 lc->steal_timer = S390_lowcore.steal_timer; 355 lc->steal_timer = S390_lowcore.steal_timer;
357 lc->last_update_timer = S390_lowcore.last_update_timer; 356 lc->last_update_timer = S390_lowcore.last_update_timer;
358 lc->last_update_clock = S390_lowcore.last_update_clock; 357 lc->last_update_clock = S390_lowcore.last_update_clock;
359 lc->ftrace_func = S390_lowcore.ftrace_func;
360 358
361 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); 359 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
362 restart_stack += ASYNC_SIZE; 360 restart_stack += ASYNC_SIZE;
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 0c1a0ff0a558..6a2ac257d98f 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -371,7 +371,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
371 restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE; 371 restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE;
372 } else { 372 } else {
373 /* Signal frame without vector registers are short ! */ 373 /* Signal frame without vector registers are short ! */
374 __u16 __user *svc = (void *) frame + frame_size - 2; 374 __u16 __user *svc = (void __user *) frame + frame_size - 2;
375 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc)) 375 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
376 return -EFAULT; 376 return -EFAULT;
377 restorer = (unsigned long) svc | PSW_ADDR_AMODE; 377 restorer = (unsigned long) svc | PSW_ADDR_AMODE;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 6fd9e60101f1..0b499f5cbe19 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -236,7 +236,6 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
236 lc->percpu_offset = __per_cpu_offset[cpu]; 236 lc->percpu_offset = __per_cpu_offset[cpu];
237 lc->kernel_asce = S390_lowcore.kernel_asce; 237 lc->kernel_asce = S390_lowcore.kernel_asce;
238 lc->machine_flags = S390_lowcore.machine_flags; 238 lc->machine_flags = S390_lowcore.machine_flags;
239 lc->ftrace_func = S390_lowcore.ftrace_func;
240 lc->user_timer = lc->system_timer = lc->steal_timer = 0; 239 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
241 __ctl_store(lc->cregs_save_area, 0, 15); 240 __ctl_store(lc->cregs_save_area, 0, 15);
242 save_access_regs((unsigned int *) lc->access_regs_save_area); 241 save_access_regs((unsigned int *) lc->access_regs_save_area);
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 9f7087fd58de..a2987243bc76 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -360,3 +360,5 @@ SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp)
360SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom) 360SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom)
361SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */ 361SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
362SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) 362SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
363SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
364SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 005d665fe4a5..20660dddb2d6 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -61,10 +61,11 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
61/* 61/*
62 * Scheduler clock - returns current time in nanosec units. 62 * Scheduler clock - returns current time in nanosec units.
63 */ 63 */
64unsigned long long notrace __kprobes sched_clock(void) 64unsigned long long notrace sched_clock(void)
65{ 65{
66 return tod_to_ns(get_tod_clock_monotonic()); 66 return tod_to_ns(get_tod_clock_monotonic());
67} 67}
68NOKPROBE_SYMBOL(sched_clock);
68 69
69/* 70/*
70 * Monotonic_clock - returns # of nanoseconds passed since time_init() 71 * Monotonic_clock - returns # of nanoseconds passed since time_init()
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 9ff5ecba26ab..f081cf1157c3 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -49,7 +49,8 @@ static inline void report_user_fault(struct pt_regs *regs, int signr)
49 return; 49 return;
50 if (!printk_ratelimit()) 50 if (!printk_ratelimit())
51 return; 51 return;
52 printk("User process fault: interruption code 0x%X ", regs->int_code); 52 printk("User process fault: interruption code %04x ilc:%d ",
53 regs->int_code & 0xffff, regs->int_code >> 17);
53 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); 54 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
54 printk("\n"); 55 printk("\n");
55 show_regs(regs); 56 show_regs(regs);
@@ -87,16 +88,16 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
87 } 88 }
88} 89}
89 90
90static void __kprobes do_trap(struct pt_regs *regs, int si_signo, int si_code, 91static void do_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
91 char *str)
92{ 92{
93 if (notify_die(DIE_TRAP, str, regs, 0, 93 if (notify_die(DIE_TRAP, str, regs, 0,
94 regs->int_code, si_signo) == NOTIFY_STOP) 94 regs->int_code, si_signo) == NOTIFY_STOP)
95 return; 95 return;
96 do_report_trap(regs, si_signo, si_code, str); 96 do_report_trap(regs, si_signo, si_code, str);
97} 97}
98NOKPROBE_SYMBOL(do_trap);
98 99
99void __kprobes do_per_trap(struct pt_regs *regs) 100void do_per_trap(struct pt_regs *regs)
100{ 101{
101 siginfo_t info; 102 siginfo_t info;
102 103
@@ -111,6 +112,7 @@ void __kprobes do_per_trap(struct pt_regs *regs)
111 (void __force __user *) current->thread.per_event.address; 112 (void __force __user *) current->thread.per_event.address;
112 force_sig_info(SIGTRAP, &info, current); 113 force_sig_info(SIGTRAP, &info, current);
113} 114}
115NOKPROBE_SYMBOL(do_per_trap);
114 116
115void default_trap_handler(struct pt_regs *regs) 117void default_trap_handler(struct pt_regs *regs)
116{ 118{
@@ -151,8 +153,6 @@ DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
151 "privileged operation") 153 "privileged operation")
152DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, 154DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
153 "special operation exception") 155 "special operation exception")
154DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
155 "translation exception")
156 156
157#ifdef CONFIG_64BIT 157#ifdef CONFIG_64BIT
158DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, 158DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
@@ -179,7 +179,13 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc)
179 do_trap(regs, SIGFPE, si_code, "floating point exception"); 179 do_trap(regs, SIGFPE, si_code, "floating point exception");
180} 180}
181 181
182void __kprobes illegal_op(struct pt_regs *regs) 182void translation_exception(struct pt_regs *regs)
183{
184 /* May never happen. */
185 die(regs, "Translation exception");
186}
187
188void illegal_op(struct pt_regs *regs)
183{ 189{
184 siginfo_t info; 190 siginfo_t info;
185 __u8 opcode[6]; 191 __u8 opcode[6];
@@ -252,7 +258,7 @@ void __kprobes illegal_op(struct pt_regs *regs)
252 if (signal) 258 if (signal)
253 do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); 259 do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
254} 260}
255 261NOKPROBE_SYMBOL(illegal_op);
256 262
257#ifdef CONFIG_MATHEMU 263#ifdef CONFIG_MATHEMU
258void specification_exception(struct pt_regs *regs) 264void specification_exception(struct pt_regs *regs)
@@ -469,7 +475,7 @@ void space_switch_exception(struct pt_regs *regs)
469 do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event"); 475 do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
470} 476}
471 477
472void __kprobes kernel_stack_overflow(struct pt_regs * regs) 478void kernel_stack_overflow(struct pt_regs *regs)
473{ 479{
474 bust_spinlocks(1); 480 bust_spinlocks(1);
475 printk("Kernel stack overflow.\n"); 481 printk("Kernel stack overflow.\n");
@@ -477,6 +483,7 @@ void __kprobes kernel_stack_overflow(struct pt_regs * regs)
477 bust_spinlocks(0); 483 bust_spinlocks(0);
478 panic("Corrupt kernel stack, can't continue."); 484 panic("Corrupt kernel stack, can't continue.");
479} 485}
486NOKPROBE_SYMBOL(kernel_stack_overflow);
480 487
481void __init trap_init(void) 488void __init trap_init(void)
482{ 489{
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 55aade49b6d1..6b049ee75a56 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -271,7 +271,7 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
271 case KVM_S390_VM_MEM_CLR_CMMA: 271 case KVM_S390_VM_MEM_CLR_CMMA:
272 mutex_lock(&kvm->lock); 272 mutex_lock(&kvm->lock);
273 idx = srcu_read_lock(&kvm->srcu); 273 idx = srcu_read_lock(&kvm->srcu);
274 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false); 274 s390_reset_cmma(kvm->arch.gmap->mm);
275 srcu_read_unlock(&kvm->srcu, idx); 275 srcu_read_unlock(&kvm->srcu, idx);
276 mutex_unlock(&kvm->lock); 276 mutex_unlock(&kvm->lock);
277 ret = 0; 277 ret = 0;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 72bb2dd8b9cd..f47cb0c6d906 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -156,21 +156,25 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
156 return 0; 156 return 0;
157} 157}
158 158
159static void __skey_check_enable(struct kvm_vcpu *vcpu) 159static int __skey_check_enable(struct kvm_vcpu *vcpu)
160{ 160{
161 int rc = 0;
161 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) 162 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
162 return; 163 return rc;
163 164
164 s390_enable_skey(); 165 rc = s390_enable_skey();
165 trace_kvm_s390_skey_related_inst(vcpu); 166 trace_kvm_s390_skey_related_inst(vcpu);
166 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); 167 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
168 return rc;
167} 169}
168 170
169 171
170static int handle_skey(struct kvm_vcpu *vcpu) 172static int handle_skey(struct kvm_vcpu *vcpu)
171{ 173{
172 __skey_check_enable(vcpu); 174 int rc = __skey_check_enable(vcpu);
173 175
176 if (rc)
177 return rc;
174 vcpu->stat.instruction_storage_key++; 178 vcpu->stat.instruction_storage_key++;
175 179
176 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 180 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
@@ -683,7 +687,10 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
683 } 687 }
684 688
685 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 689 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
686 __skey_check_enable(vcpu); 690 int rc = __skey_check_enable(vcpu);
691
692 if (rc)
693 return rc;
687 if (set_guest_storage_key(current->mm, useraddr, 694 if (set_guest_storage_key(current->mm, useraddr,
688 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, 695 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
689 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) 696 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index a2b81d6ce8a5..811937bb90be 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -261,8 +261,8 @@ static inline void report_user_fault(struct pt_regs *regs, long signr)
261 return; 261 return;
262 if (!printk_ratelimit()) 262 if (!printk_ratelimit())
263 return; 263 return;
264 printk(KERN_ALERT "User process fault: interruption code 0x%X ", 264 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d",
265 regs->int_code); 265 regs->int_code & 0xffff, regs->int_code >> 17);
266 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); 266 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
267 printk(KERN_CONT "\n"); 267 printk(KERN_CONT "\n");
268 printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n", 268 printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
@@ -548,7 +548,7 @@ out:
548 return fault; 548 return fault;
549} 549}
550 550
551void __kprobes do_protection_exception(struct pt_regs *regs) 551void do_protection_exception(struct pt_regs *regs)
552{ 552{
553 unsigned long trans_exc_code; 553 unsigned long trans_exc_code;
554 int fault; 554 int fault;
@@ -574,8 +574,9 @@ void __kprobes do_protection_exception(struct pt_regs *regs)
574 if (unlikely(fault)) 574 if (unlikely(fault))
575 do_fault_error(regs, fault); 575 do_fault_error(regs, fault);
576} 576}
577NOKPROBE_SYMBOL(do_protection_exception);
577 578
578void __kprobes do_dat_exception(struct pt_regs *regs) 579void do_dat_exception(struct pt_regs *regs)
579{ 580{
580 int access, fault; 581 int access, fault;
581 582
@@ -584,6 +585,7 @@ void __kprobes do_dat_exception(struct pt_regs *regs)
584 if (unlikely(fault)) 585 if (unlikely(fault))
585 do_fault_error(regs, fault); 586 do_fault_error(regs, fault);
586} 587}
588NOKPROBE_SYMBOL(do_dat_exception);
587 589
588#ifdef CONFIG_PFAULT 590#ifdef CONFIG_PFAULT
589/* 591/*
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 2a2e35416d2f..2eb34bdfc613 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -176,7 +176,7 @@ static int is_swapped(unsigned long addr)
176 * For swapped prefix pages a new buffer is returned that contains a copy of 176 * For swapped prefix pages a new buffer is returned that contains a copy of
177 * the absolute memory. The buffer size is maximum one page large. 177 * the absolute memory. The buffer size is maximum one page large.
178 */ 178 */
179void *xlate_dev_mem_ptr(unsigned long addr) 179void *xlate_dev_mem_ptr(phys_addr_t addr)
180{ 180{
181 void *bounce = (void *) addr; 181 void *bounce = (void *) addr;
182 unsigned long size; 182 unsigned long size;
@@ -197,7 +197,7 @@ void *xlate_dev_mem_ptr(unsigned long addr)
197/* 197/*
198 * Free converted buffer for /dev/mem access (if necessary) 198 * Free converted buffer for /dev/mem access (if necessary)
199 */ 199 */
200void unxlate_dev_mem_ptr(unsigned long addr, void *buf) 200void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
201{ 201{
202 if ((void *) addr != buf) 202 if ((void *) addr != buf)
203 free_page((unsigned long) buf); 203 free_page((unsigned long) buf);
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 3fef3b299665..426c9d462d1c 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -120,7 +120,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
120 } 120 }
121} 121}
122 122
123void kernel_map_pages(struct page *page, int numpages, int enable) 123void __kernel_map_pages(struct page *page, int numpages, int enable)
124{ 124{
125 unsigned long address; 125 unsigned long address;
126 int nr, i, j; 126 int nr, i, j;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 1b79ca67392f..71c7eff2c89f 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -18,6 +18,8 @@
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/swapops.h> 20#include <linux/swapops.h>
21#include <linux/ksm.h>
22#include <linux/mman.h>
21 23
22#include <asm/pgtable.h> 24#include <asm/pgtable.h>
23#include <asm/pgalloc.h> 25#include <asm/pgalloc.h>
@@ -750,8 +752,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
750 break; 752 break;
751 /* Walk the process page table, lock and get pte pointer */ 753 /* Walk the process page table, lock and get pte pointer */
752 ptep = get_locked_pte(gmap->mm, addr, &ptl); 754 ptep = get_locked_pte(gmap->mm, addr, &ptl);
753 if (unlikely(!ptep)) 755 VM_BUG_ON(!ptep);
754 continue;
755 /* Set notification bit in the pgste of the pte */ 756 /* Set notification bit in the pgste of the pte */
756 entry = *ptep; 757 entry = *ptep;
757 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) { 758 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
@@ -761,7 +762,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
761 gaddr += PAGE_SIZE; 762 gaddr += PAGE_SIZE;
762 len -= PAGE_SIZE; 763 len -= PAGE_SIZE;
763 } 764 }
764 spin_unlock(ptl); 765 pte_unmap_unlock(ptep, ptl);
765 } 766 }
766 up_read(&gmap->mm->mmap_sem); 767 up_read(&gmap->mm->mmap_sem);
767 return rc; 768 return rc;
@@ -834,99 +835,6 @@ static inline void page_table_free_pgste(unsigned long *table)
834 __free_page(page); 835 __free_page(page);
835} 836}
836 837
837static inline unsigned long page_table_reset_pte(struct mm_struct *mm, pmd_t *pmd,
838 unsigned long addr, unsigned long end, bool init_skey)
839{
840 pte_t *start_pte, *pte;
841 spinlock_t *ptl;
842 pgste_t pgste;
843
844 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
845 pte = start_pte;
846 do {
847 pgste = pgste_get_lock(pte);
848 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
849 if (init_skey) {
850 unsigned long address;
851
852 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
853 PGSTE_GR_BIT | PGSTE_GC_BIT);
854
855 /* skip invalid and not writable pages */
856 if (pte_val(*pte) & _PAGE_INVALID ||
857 !(pte_val(*pte) & _PAGE_WRITE)) {
858 pgste_set_unlock(pte, pgste);
859 continue;
860 }
861
862 address = pte_val(*pte) & PAGE_MASK;
863 page_set_storage_key(address, PAGE_DEFAULT_KEY, 1);
864 }
865 pgste_set_unlock(pte, pgste);
866 } while (pte++, addr += PAGE_SIZE, addr != end);
867 pte_unmap_unlock(start_pte, ptl);
868
869 return addr;
870}
871
872static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, pud_t *pud,
873 unsigned long addr, unsigned long end, bool init_skey)
874{
875 unsigned long next;
876 pmd_t *pmd;
877
878 pmd = pmd_offset(pud, addr);
879 do {
880 next = pmd_addr_end(addr, end);
881 if (pmd_none_or_clear_bad(pmd))
882 continue;
883 next = page_table_reset_pte(mm, pmd, addr, next, init_skey);
884 } while (pmd++, addr = next, addr != end);
885
886 return addr;
887}
888
889static inline unsigned long page_table_reset_pud(struct mm_struct *mm, pgd_t *pgd,
890 unsigned long addr, unsigned long end, bool init_skey)
891{
892 unsigned long next;
893 pud_t *pud;
894
895 pud = pud_offset(pgd, addr);
896 do {
897 next = pud_addr_end(addr, end);
898 if (pud_none_or_clear_bad(pud))
899 continue;
900 next = page_table_reset_pmd(mm, pud, addr, next, init_skey);
901 } while (pud++, addr = next, addr != end);
902
903 return addr;
904}
905
906void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
907 unsigned long end, bool init_skey)
908{
909 unsigned long addr, next;
910 pgd_t *pgd;
911
912 down_write(&mm->mmap_sem);
913 if (init_skey && mm_use_skey(mm))
914 goto out_up;
915 addr = start;
916 pgd = pgd_offset(mm, addr);
917 do {
918 next = pgd_addr_end(addr, end);
919 if (pgd_none_or_clear_bad(pgd))
920 continue;
921 next = page_table_reset_pud(mm, pgd, addr, next, init_skey);
922 } while (pgd++, addr = next, addr != end);
923 if (init_skey)
924 current->mm->context.use_skey = 1;
925out_up:
926 up_write(&mm->mmap_sem);
927}
928EXPORT_SYMBOL(page_table_reset_pgste);
929
930int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 838int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
931 unsigned long key, bool nq) 839 unsigned long key, bool nq)
932{ 840{
@@ -992,11 +900,6 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
992 return NULL; 900 return NULL;
993} 901}
994 902
995void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
996 unsigned long end, bool init_skey)
997{
998}
999
1000static inline void page_table_free_pgste(unsigned long *table) 903static inline void page_table_free_pgste(unsigned long *table)
1001{ 904{
1002} 905}
@@ -1347,13 +1250,89 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);
1347 * Enable storage key handling from now on and initialize the storage 1250 * Enable storage key handling from now on and initialize the storage
1348 * keys with the default key. 1251 * keys with the default key.
1349 */ 1252 */
1350void s390_enable_skey(void) 1253static int __s390_enable_skey(pte_t *pte, unsigned long addr,
1254 unsigned long next, struct mm_walk *walk)
1351{ 1255{
1352 page_table_reset_pgste(current->mm, 0, TASK_SIZE, true); 1256 unsigned long ptev;
1257 pgste_t pgste;
1258
1259 pgste = pgste_get_lock(pte);
1260 /*
1261 * Remove all zero page mappings,
1262 * after establishing a policy to forbid zero page mappings
1263 * following faults for that page will get fresh anonymous pages
1264 */
1265 if (is_zero_pfn(pte_pfn(*pte))) {
1266 ptep_flush_direct(walk->mm, addr, pte);
1267 pte_val(*pte) = _PAGE_INVALID;
1268 }
1269 /* Clear storage key */
1270 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
1271 PGSTE_GR_BIT | PGSTE_GC_BIT);
1272 ptev = pte_val(*pte);
1273 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
1274 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
1275 pgste_set_unlock(pte, pgste);
1276 return 0;
1277}
1278
1279int s390_enable_skey(void)
1280{
1281 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
1282 struct mm_struct *mm = current->mm;
1283 struct vm_area_struct *vma;
1284 int rc = 0;
1285
1286 down_write(&mm->mmap_sem);
1287 if (mm_use_skey(mm))
1288 goto out_up;
1289
1290 mm->context.use_skey = 1;
1291 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1292 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
1293 MADV_UNMERGEABLE, &vma->vm_flags)) {
1294 mm->context.use_skey = 0;
1295 rc = -ENOMEM;
1296 goto out_up;
1297 }
1298 }
1299 mm->def_flags &= ~VM_MERGEABLE;
1300
1301 walk.mm = mm;
1302 walk_page_range(0, TASK_SIZE, &walk);
1303
1304out_up:
1305 up_write(&mm->mmap_sem);
1306 return rc;
1353} 1307}
1354EXPORT_SYMBOL_GPL(s390_enable_skey); 1308EXPORT_SYMBOL_GPL(s390_enable_skey);
1355 1309
1356/* 1310/*
1311 * Reset CMMA state, make all pages stable again.
1312 */
1313static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
1314 unsigned long next, struct mm_walk *walk)
1315{
1316 pgste_t pgste;
1317
1318 pgste = pgste_get_lock(pte);
1319 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
1320 pgste_set_unlock(pte, pgste);
1321 return 0;
1322}
1323
1324void s390_reset_cmma(struct mm_struct *mm)
1325{
1326 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
1327
1328 down_write(&mm->mmap_sem);
1329 walk.mm = mm;
1330 walk_page_range(0, TASK_SIZE, &walk);
1331 up_write(&mm->mmap_sem);
1332}
1333EXPORT_SYMBOL_GPL(s390_reset_cmma);
1334
1335/*
1357 * Test and reset if a guest page is dirty 1336 * Test and reset if a guest page is dirty
1358 */ 1337 */
1359bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) 1338bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index a9e1dc4ae442..805d8b29193a 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -3,4 +3,4 @@
3# 3#
4 4
5obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_sysfs.o \ 5obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_sysfs.o \
6 pci_event.o pci_debug.o pci_insn.o 6 pci_event.o pci_debug.o pci_insn.o pci_mmio.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 2fa7b14b9c08..3290f11ae1d9 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(zpci_list_lock);
50 50
51static struct irq_chip zpci_irq_chip = { 51static struct irq_chip zpci_irq_chip = {
52 .name = "zPCI", 52 .name = "zPCI",
53 .irq_unmask = unmask_msi_irq, 53 .irq_unmask = pci_msi_unmask_irq,
54 .irq_mask = mask_msi_irq, 54 .irq_mask = pci_msi_mask_irq,
55}; 55};
56 56
57static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 57static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
@@ -369,8 +369,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
369 369
370 if (type == PCI_CAP_ID_MSI && nvec > 1) 370 if (type == PCI_CAP_ID_MSI && nvec > 1)
371 return 1; 371 return 1;
372 msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX); 372 msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
373 msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI);
374 373
375 /* Allocate adapter summary indicator bit */ 374 /* Allocate adapter summary indicator bit */
376 rc = -EIO; 375 rc = -EIO;
@@ -403,7 +402,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
403 msg.data = hwirq; 402 msg.data = hwirq;
404 msg.address_lo = zdev->msi_addr & 0xffffffff; 403 msg.address_lo = zdev->msi_addr & 0xffffffff;
405 msg.address_hi = zdev->msi_addr >> 32; 404 msg.address_hi = zdev->msi_addr >> 32;
406 write_msi_msg(irq, &msg); 405 pci_write_msi_msg(irq, &msg);
407 airq_iv_set_data(zdev->aibv, hwirq, irq); 406 airq_iv_set_data(zdev->aibv, hwirq, irq);
408 hwirq++; 407 hwirq++;
409 } 408 }
@@ -448,9 +447,9 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
448 /* Release MSI interrupts */ 447 /* Release MSI interrupts */
449 list_for_each_entry(msi, &pdev->msi_list, list) { 448 list_for_each_entry(msi, &pdev->msi_list, list) {
450 if (msi->msi_attrib.is_msix) 449 if (msi->msi_attrib.is_msix)
451 default_msix_mask_irq(msi, 1); 450 __pci_msix_desc_mask_irq(msi, 1);
452 else 451 else
453 default_msi_mask_irq(msi, 1, 1); 452 __pci_msi_desc_mask_irq(msi, 1, 1);
454 irq_set_msi_desc(msi->irq, NULL); 453 irq_set_msi_desc(msi->irq, NULL);
455 irq_free_desc(msi->irq); 454 irq_free_desc(msi->irq);
456 msi->msg.address_lo = 0; 455 msi->msg.address_lo = 0;
@@ -474,7 +473,8 @@ static void zpci_map_resources(struct zpci_dev *zdev)
474 len = pci_resource_len(pdev, i); 473 len = pci_resource_len(pdev, i);
475 if (!len) 474 if (!len)
476 continue; 475 continue;
477 pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0); 476 pdev->resource[i].start =
477 (resource_size_t __force) pci_iomap(pdev, i, 0);
478 pdev->resource[i].end = pdev->resource[i].start + len - 1; 478 pdev->resource[i].end = pdev->resource[i].start + len - 1;
479 } 479 }
480} 480}
@@ -489,7 +489,8 @@ static void zpci_unmap_resources(struct zpci_dev *zdev)
489 len = pci_resource_len(pdev, i); 489 len = pci_resource_len(pdev, i);
490 if (!len) 490 if (!len)
491 continue; 491 continue;
492 pci_iounmap(pdev, (void *) pdev->resource[i].start); 492 pci_iounmap(pdev, (void __iomem __force *)
493 pdev->resource[i].start);
493 } 494 }
494} 495}
495 496
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 6e22a247de9b..d6e411ed8b1f 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -62,6 +62,7 @@ static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
62 zdev->tlb_refresh = response->refresh; 62 zdev->tlb_refresh = response->refresh;
63 zdev->dma_mask = response->dasm; 63 zdev->dma_mask = response->dasm;
64 zdev->msi_addr = response->msia; 64 zdev->msi_addr = response->msia;
65 zdev->max_msi = response->noi;
65 zdev->fmb_update = response->mui; 66 zdev->fmb_update = response->mui;
66 67
67 switch (response->version) { 68 switch (response->version) {
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index eec598c5939f..3229a2e570df 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -158,10 +158,7 @@ int __init zpci_debug_init(void)
158 158
159void zpci_debug_exit(void) 159void zpci_debug_exit(void)
160{ 160{
161 if (pci_debug_msg_id) 161 debug_unregister(pci_debug_msg_id);
162 debug_unregister(pci_debug_msg_id); 162 debug_unregister(pci_debug_err_id);
163 if (pci_debug_err_id)
164 debug_unregister(pci_debug_err_id);
165
166 debugfs_remove(debugfs_root); 163 debugfs_remove(debugfs_root);
167} 164}
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
new file mode 100644
index 000000000000..62c5ea6d8682
--- /dev/null
+++ b/arch/s390/pci/pci_mmio.c
@@ -0,0 +1,115 @@
1/*
2 * Access to PCI I/O memory from user space programs.
3 *
4 * Copyright IBM Corp. 2014
5 * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com>
6 */
7#include <linux/kernel.h>
8#include <linux/syscalls.h>
9#include <linux/init.h>
10#include <linux/mm.h>
11#include <linux/errno.h>
12#include <linux/pci.h>
13
14static long get_pfn(unsigned long user_addr, unsigned long access,
15 unsigned long *pfn)
16{
17 struct vm_area_struct *vma;
18 long ret;
19
20 down_read(&current->mm->mmap_sem);
21 ret = -EINVAL;
22 vma = find_vma(current->mm, user_addr);
23 if (!vma)
24 goto out;
25 ret = -EACCES;
26 if (!(vma->vm_flags & access))
27 goto out;
28 ret = follow_pfn(vma, user_addr, pfn);
29out:
30 up_read(&current->mm->mmap_sem);
31 return ret;
32}
33
34SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
35 const void __user *, user_buffer, size_t, length)
36{
37 u8 local_buf[64];
38 void __iomem *io_addr;
39 void *buf;
40 unsigned long pfn;
41 long ret;
42
43 if (!zpci_is_enabled())
44 return -ENODEV;
45
46 if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
47 return -EINVAL;
48 if (length > 64) {
49 buf = kmalloc(length, GFP_KERNEL);
50 if (!buf)
51 return -ENOMEM;
52 } else
53 buf = local_buf;
54
55 ret = get_pfn(mmio_addr, VM_WRITE, &pfn);
56 if (ret)
57 goto out;
58 io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
59
60 ret = -EFAULT;
61 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
62 goto out;
63
64 if (copy_from_user(buf, user_buffer, length))
65 goto out;
66
67 memcpy_toio(io_addr, buf, length);
68 ret = 0;
69out:
70 if (buf != local_buf)
71 kfree(buf);
72 return ret;
73}
74
75SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
76 void __user *, user_buffer, size_t, length)
77{
78 u8 local_buf[64];
79 void __iomem *io_addr;
80 void *buf;
81 unsigned long pfn;
82 long ret;
83
84 if (!zpci_is_enabled())
85 return -ENODEV;
86
87 if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
88 return -EINVAL;
89 if (length > 64) {
90 buf = kmalloc(length, GFP_KERNEL);
91 if (!buf)
92 return -ENOMEM;
93 } else
94 buf = local_buf;
95
96 ret = get_pfn(mmio_addr, VM_READ, &pfn);
97 if (ret)
98 goto out;
99 io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
100
101 ret = -EFAULT;
102 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
103 goto out;
104
105 memcpy_fromio(buf, io_addr, length);
106
107 if (copy_to_user(user_buffer, buf, length))
108 goto out;
109
110 ret = 0;
111out:
112 if (buf != local_buf)
113 kfree(buf);
114 return ret;
115}