aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r--arch/s390/include/asm/cmpxchg.h240
-rw-r--r--arch/s390/include/asm/cputime.h46
-rw-r--r--arch/s390/include/asm/debug.h29
-rw-r--r--arch/s390/include/asm/ftrace.h54
-rw-r--r--arch/s390/include/asm/idle.h3
-rw-r--r--arch/s390/include/asm/io.h9
-rw-r--r--arch/s390/include/asm/irq.h11
-rw-r--r--arch/s390/include/asm/kprobes.h1
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/pci.h5
-rw-r--r--arch/s390/include/asm/pci_io.h6
-rw-r--r--arch/s390/include/asm/pgalloc.h2
-rw-r--r--arch/s390/include/asm/pgtable.h33
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/spinlock.h9
-rw-r--r--arch/s390/include/asm/tlb.h1
16 files changed, 173 insertions, 282 deletions
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 4236408070e5..6259895fcd97 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -11,200 +11,28 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/bug.h> 12#include <linux/bug.h>
13 13
14extern void __xchg_called_with_bad_pointer(void); 14#define cmpxchg(ptr, o, n) \
15 15({ \
16static inline unsigned long __xchg(unsigned long x, void *ptr, int size) 16 __typeof__(*(ptr)) __o = (o); \
17{ 17 __typeof__(*(ptr)) __n = (n); \
18 unsigned long addr, old; 18 (__typeof__(*(ptr))) __sync_val_compare_and_swap((ptr),__o,__n);\
19 int shift;
20
21 switch (size) {
22 case 1:
23 addr = (unsigned long) ptr;
24 shift = (3 ^ (addr & 3)) << 3;
25 addr ^= addr & 3;
26 asm volatile(
27 " l %0,%4\n"
28 "0: lr 0,%0\n"
29 " nr 0,%3\n"
30 " or 0,%2\n"
31 " cs %0,0,%4\n"
32 " jl 0b\n"
33 : "=&d" (old), "=Q" (*(int *) addr)
34 : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)),
35 "Q" (*(int *) addr) : "memory", "cc", "0");
36 return old >> shift;
37 case 2:
38 addr = (unsigned long) ptr;
39 shift = (2 ^ (addr & 2)) << 3;
40 addr ^= addr & 2;
41 asm volatile(
42 " l %0,%4\n"
43 "0: lr 0,%0\n"
44 " nr 0,%3\n"
45 " or 0,%2\n"
46 " cs %0,0,%4\n"
47 " jl 0b\n"
48 : "=&d" (old), "=Q" (*(int *) addr)
49 : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)),
50 "Q" (*(int *) addr) : "memory", "cc", "0");
51 return old >> shift;
52 case 4:
53 asm volatile(
54 " l %0,%3\n"
55 "0: cs %0,%2,%3\n"
56 " jl 0b\n"
57 : "=&d" (old), "=Q" (*(int *) ptr)
58 : "d" (x), "Q" (*(int *) ptr)
59 : "memory", "cc");
60 return old;
61#ifdef CONFIG_64BIT
62 case 8:
63 asm volatile(
64 " lg %0,%3\n"
65 "0: csg %0,%2,%3\n"
66 " jl 0b\n"
67 : "=&d" (old), "=m" (*(long *) ptr)
68 : "d" (x), "Q" (*(long *) ptr)
69 : "memory", "cc");
70 return old;
71#endif /* CONFIG_64BIT */
72 }
73 __xchg_called_with_bad_pointer();
74 return x;
75}
76
77#define xchg(ptr, x) \
78({ \
79 __typeof__(*(ptr)) __ret; \
80 __ret = (__typeof__(*(ptr))) \
81 __xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr)));\
82 __ret; \
83}) 19})
84 20
85/* 21#define cmpxchg64 cmpxchg
86 * Atomic compare and exchange. Compare OLD with MEM, if identical, 22#define cmpxchg_local cmpxchg
87 * store NEW in MEM. Return the initial value in MEM. Success is 23#define cmpxchg64_local cmpxchg
88 * indicated by comparing RETURN with OLD.
89 */
90
91#define __HAVE_ARCH_CMPXCHG
92
93extern void __cmpxchg_called_with_bad_pointer(void);
94
95static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
96 unsigned long new, int size)
97{
98 unsigned long addr, prev, tmp;
99 int shift;
100
101 switch (size) {
102 case 1:
103 addr = (unsigned long) ptr;
104 shift = (3 ^ (addr & 3)) << 3;
105 addr ^= addr & 3;
106 asm volatile(
107 " l %0,%2\n"
108 "0: nr %0,%5\n"
109 " lr %1,%0\n"
110 " or %0,%3\n"
111 " or %1,%4\n"
112 " cs %0,%1,%2\n"
113 " jnl 1f\n"
114 " xr %1,%0\n"
115 " nr %1,%5\n"
116 " jnz 0b\n"
117 "1:"
118 : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
119 : "d" ((old & 0xff) << shift),
120 "d" ((new & 0xff) << shift),
121 "d" (~(0xff << shift))
122 : "memory", "cc");
123 return prev >> shift;
124 case 2:
125 addr = (unsigned long) ptr;
126 shift = (2 ^ (addr & 2)) << 3;
127 addr ^= addr & 2;
128 asm volatile(
129 " l %0,%2\n"
130 "0: nr %0,%5\n"
131 " lr %1,%0\n"
132 " or %0,%3\n"
133 " or %1,%4\n"
134 " cs %0,%1,%2\n"
135 " jnl 1f\n"
136 " xr %1,%0\n"
137 " nr %1,%5\n"
138 " jnz 0b\n"
139 "1:"
140 : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
141 : "d" ((old & 0xffff) << shift),
142 "d" ((new & 0xffff) << shift),
143 "d" (~(0xffff << shift))
144 : "memory", "cc");
145 return prev >> shift;
146 case 4:
147 asm volatile(
148 " cs %0,%3,%1\n"
149 : "=&d" (prev), "=Q" (*(int *) ptr)
150 : "0" (old), "d" (new), "Q" (*(int *) ptr)
151 : "memory", "cc");
152 return prev;
153#ifdef CONFIG_64BIT
154 case 8:
155 asm volatile(
156 " csg %0,%3,%1\n"
157 : "=&d" (prev), "=Q" (*(long *) ptr)
158 : "0" (old), "d" (new), "Q" (*(long *) ptr)
159 : "memory", "cc");
160 return prev;
161#endif /* CONFIG_64BIT */
162 }
163 __cmpxchg_called_with_bad_pointer();
164 return old;
165}
166
167#define cmpxchg(ptr, o, n) \
168({ \
169 __typeof__(*(ptr)) __ret; \
170 __ret = (__typeof__(*(ptr))) \
171 __cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \
172 sizeof(*(ptr))); \
173 __ret; \
174})
175 24
176#ifdef CONFIG_64BIT 25#define xchg(ptr, x) \
177#define cmpxchg64(ptr, o, n) \
178({ \ 26({ \
179 cmpxchg((ptr), (o), (n)); \ 27 __typeof__(ptr) __ptr = (ptr); \
28 __typeof__(*(ptr)) __old; \
29 do { \
30 __old = *__ptr; \
31 } while (!__sync_bool_compare_and_swap(__ptr, __old, x)); \
32 __old; \
180}) 33})
181#else /* CONFIG_64BIT */
182static inline unsigned long long __cmpxchg64(void *ptr,
183 unsigned long long old,
184 unsigned long long new)
185{
186 register_pair rp_old = {.pair = old};
187 register_pair rp_new = {.pair = new};
188 unsigned long long *ullptr = ptr;
189 34
190 asm volatile( 35#define __HAVE_ARCH_CMPXCHG
191 " cds %0,%2,%1"
192 : "+d" (rp_old), "+Q" (*ullptr)
193 : "d" (rp_new)
194 : "memory", "cc");
195 return rp_old.pair;
196}
197
198#define cmpxchg64(ptr, o, n) \
199({ \
200 __typeof__(*(ptr)) __ret; \
201 __ret = (__typeof__(*(ptr))) \
202 __cmpxchg64((ptr), \
203 (unsigned long long)(o), \
204 (unsigned long long)(n)); \
205 __ret; \
206})
207#endif /* CONFIG_64BIT */
208 36
209#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \ 37#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
210({ \ 38({ \
@@ -265,40 +93,4 @@ extern void __cmpxchg_double_called_with_bad_pointer(void);
265 93
266#define system_has_cmpxchg_double() 1 94#define system_has_cmpxchg_double() 1
267 95
268#include <asm-generic/cmpxchg-local.h>
269
270static inline unsigned long __cmpxchg_local(void *ptr,
271 unsigned long old,
272 unsigned long new, int size)
273{
274 switch (size) {
275 case 1:
276 case 2:
277 case 4:
278#ifdef CONFIG_64BIT
279 case 8:
280#endif
281 return __cmpxchg(ptr, old, new, size);
282 default:
283 return __cmpxchg_local_generic(ptr, old, new, size);
284 }
285
286 return old;
287}
288
289/*
290 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
291 * them available.
292 */
293#define cmpxchg_local(ptr, o, n) \
294({ \
295 __typeof__(*(ptr)) __ret; \
296 __ret = (__typeof__(*(ptr))) \
297 __cmpxchg_local((ptr), (unsigned long)(o), \
298 (unsigned long)(n), sizeof(*(ptr))); \
299 __ret; \
300})
301
302#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n))
303
304#endif /* __ASM_CMPXCHG_H */ 96#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index f8c196984853..b91e960e4045 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -10,6 +10,8 @@
10#include <linux/types.h> 10#include <linux/types.h>
11#include <asm/div64.h> 11#include <asm/div64.h>
12 12
13#define CPUTIME_PER_USEC 4096ULL
14#define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC)
13 15
14/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ 16/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
15 17
@@ -38,24 +40,24 @@ static inline unsigned long __div(unsigned long long n, unsigned long base)
38 */ 40 */
39static inline unsigned long cputime_to_jiffies(const cputime_t cputime) 41static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
40{ 42{
41 return __div((__force unsigned long long) cputime, 4096000000ULL / HZ); 43 return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / HZ);
42} 44}
43 45
44static inline cputime_t jiffies_to_cputime(const unsigned int jif) 46static inline cputime_t jiffies_to_cputime(const unsigned int jif)
45{ 47{
46 return (__force cputime_t)(jif * (4096000000ULL / HZ)); 48 return (__force cputime_t)(jif * (CPUTIME_PER_SEC / HZ));
47} 49}
48 50
49static inline u64 cputime64_to_jiffies64(cputime64_t cputime) 51static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
50{ 52{
51 unsigned long long jif = (__force unsigned long long) cputime; 53 unsigned long long jif = (__force unsigned long long) cputime;
52 do_div(jif, 4096000000ULL / HZ); 54 do_div(jif, CPUTIME_PER_SEC / HZ);
53 return jif; 55 return jif;
54} 56}
55 57
56static inline cputime64_t jiffies64_to_cputime64(const u64 jif) 58static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
57{ 59{
58 return (__force cputime64_t)(jif * (4096000000ULL / HZ)); 60 return (__force cputime64_t)(jif * (CPUTIME_PER_SEC / HZ));
59} 61}
60 62
61/* 63/*
@@ -68,7 +70,7 @@ static inline unsigned int cputime_to_usecs(const cputime_t cputime)
68 70
69static inline cputime_t usecs_to_cputime(const unsigned int m) 71static inline cputime_t usecs_to_cputime(const unsigned int m)
70{ 72{
71 return (__force cputime_t)(m * 4096ULL); 73 return (__force cputime_t)(m * CPUTIME_PER_USEC);
72} 74}
73 75
74#define usecs_to_cputime64(m) usecs_to_cputime(m) 76#define usecs_to_cputime64(m) usecs_to_cputime(m)
@@ -78,12 +80,12 @@ static inline cputime_t usecs_to_cputime(const unsigned int m)
78 */ 80 */
79static inline unsigned int cputime_to_secs(const cputime_t cputime) 81static inline unsigned int cputime_to_secs(const cputime_t cputime)
80{ 82{
81 return __div((__force unsigned long long) cputime, 2048000000) >> 1; 83 return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / 2) >> 1;
82} 84}
83 85
84static inline cputime_t secs_to_cputime(const unsigned int s) 86static inline cputime_t secs_to_cputime(const unsigned int s)
85{ 87{
86 return (__force cputime_t)(s * 4096000000ULL); 88 return (__force cputime_t)(s * CPUTIME_PER_SEC);
87} 89}
88 90
89/* 91/*
@@ -91,8 +93,8 @@ static inline cputime_t secs_to_cputime(const unsigned int s)
91 */ 93 */
92static inline cputime_t timespec_to_cputime(const struct timespec *value) 94static inline cputime_t timespec_to_cputime(const struct timespec *value)
93{ 95{
94 unsigned long long ret = value->tv_sec * 4096000000ULL; 96 unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
95 return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000); 97 return (__force cputime_t)(ret + __div(value->tv_nsec * CPUTIME_PER_USEC, NSEC_PER_USEC));
96} 98}
97 99
98static inline void cputime_to_timespec(const cputime_t cputime, 100static inline void cputime_to_timespec(const cputime_t cputime,
@@ -103,12 +105,12 @@ static inline void cputime_to_timespec(const cputime_t cputime,
103 register_pair rp; 105 register_pair rp;
104 106
105 rp.pair = __cputime >> 1; 107 rp.pair = __cputime >> 1;
106 asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); 108 asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_SEC / 2));
107 value->tv_nsec = rp.subreg.even * 1000 / 4096; 109 value->tv_nsec = rp.subreg.even * NSEC_PER_USEC / CPUTIME_PER_USEC;
108 value->tv_sec = rp.subreg.odd; 110 value->tv_sec = rp.subreg.odd;
109#else 111#else
110 value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096; 112 value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC;
111 value->tv_sec = __cputime / 4096000000ULL; 113 value->tv_sec = __cputime / CPUTIME_PER_SEC;
112#endif 114#endif
113} 115}
114 116
@@ -119,8 +121,8 @@ static inline void cputime_to_timespec(const cputime_t cputime,
119 */ 121 */
120static inline cputime_t timeval_to_cputime(const struct timeval *value) 122static inline cputime_t timeval_to_cputime(const struct timeval *value)
121{ 123{
122 unsigned long long ret = value->tv_sec * 4096000000ULL; 124 unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
123 return (__force cputime_t)(ret + value->tv_usec * 4096ULL); 125 return (__force cputime_t)(ret + value->tv_usec * CPUTIME_PER_USEC);
124} 126}
125 127
126static inline void cputime_to_timeval(const cputime_t cputime, 128static inline void cputime_to_timeval(const cputime_t cputime,
@@ -131,12 +133,12 @@ static inline void cputime_to_timeval(const cputime_t cputime,
131 register_pair rp; 133 register_pair rp;
132 134
133 rp.pair = __cputime >> 1; 135 rp.pair = __cputime >> 1;
134 asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); 136 asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_USEC / 2));
135 value->tv_usec = rp.subreg.even / 4096; 137 value->tv_usec = rp.subreg.even / CPUTIME_PER_USEC;
136 value->tv_sec = rp.subreg.odd; 138 value->tv_sec = rp.subreg.odd;
137#else 139#else
138 value->tv_usec = (__cputime % 4096000000ULL) / 4096; 140 value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC;
139 value->tv_sec = __cputime / 4096000000ULL; 141 value->tv_sec = __cputime / CPUTIME_PER_SEC;
140#endif 142#endif
141} 143}
142 144
@@ -146,13 +148,13 @@ static inline void cputime_to_timeval(const cputime_t cputime,
146static inline clock_t cputime_to_clock_t(cputime_t cputime) 148static inline clock_t cputime_to_clock_t(cputime_t cputime)
147{ 149{
148 unsigned long long clock = (__force unsigned long long) cputime; 150 unsigned long long clock = (__force unsigned long long) cputime;
149 do_div(clock, 4096000000ULL / USER_HZ); 151 do_div(clock, CPUTIME_PER_SEC / USER_HZ);
150 return clock; 152 return clock;
151} 153}
152 154
153static inline cputime_t clock_t_to_cputime(unsigned long x) 155static inline cputime_t clock_t_to_cputime(unsigned long x)
154{ 156{
155 return (__force cputime_t)(x * (4096000000ULL / USER_HZ)); 157 return (__force cputime_t)(x * (CPUTIME_PER_SEC / USER_HZ));
156} 158}
157 159
158/* 160/*
@@ -161,7 +163,7 @@ static inline cputime_t clock_t_to_cputime(unsigned long x)
161static inline clock_t cputime64_to_clock_t(cputime64_t cputime) 163static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
162{ 164{
163 unsigned long long clock = (__force unsigned long long) cputime; 165 unsigned long long clock = (__force unsigned long long) cputime;
164 do_div(clock, 4096000000ULL / USER_HZ); 166 do_div(clock, CPUTIME_PER_SEC / USER_HZ);
165 return clock; 167 return clock;
166} 168}
167 169
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 530c15eb01e9..0206c8052328 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -151,9 +151,21 @@ debug_text_event(debug_info_t* id, int level, const char* txt)
151 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! 151 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
152 */ 152 */
153extern debug_entry_t * 153extern debug_entry_t *
154debug_sprintf_event(debug_info_t* id,int level,char *string,...) 154__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
155 __attribute__ ((format(printf, 3, 4))); 155 __attribute__ ((format(printf, 3, 4)));
156 156
157#define debug_sprintf_event(_id, _level, _fmt, ...) \
158({ \
159 debug_entry_t *__ret; \
160 debug_info_t *__id = _id; \
161 int __level = _level; \
162 if ((!__id) || (__level > __id->level)) \
163 __ret = NULL; \
164 else \
165 __ret = __debug_sprintf_event(__id, __level, \
166 _fmt, ## __VA_ARGS__); \
167 __ret; \
168})
157 169
158static inline debug_entry_t* 170static inline debug_entry_t*
159debug_exception(debug_info_t* id, int level, void* data, int length) 171debug_exception(debug_info_t* id, int level, void* data, int length)
@@ -194,9 +206,22 @@ debug_text_exception(debug_info_t* id, int level, const char* txt)
194 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! 206 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
195 */ 207 */
196extern debug_entry_t * 208extern debug_entry_t *
197debug_sprintf_exception(debug_info_t* id,int level,char *string,...) 209__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
198 __attribute__ ((format(printf, 3, 4))); 210 __attribute__ ((format(printf, 3, 4)));
199 211
212#define debug_sprintf_exception(_id, _level, _fmt, ...) \
213({ \
214 debug_entry_t *__ret; \
215 debug_info_t *__id = _id; \
216 int __level = _level; \
217 if ((!__id) || (__level > __id->level)) \
218 __ret = NULL; \
219 else \
220 __ret = __debug_sprintf_exception(__id, __level, \
221 _fmt, ## __VA_ARGS__);\
222 __ret; \
223})
224
200int debug_register_view(debug_info_t* id, struct debug_view* view); 225int debug_register_view(debug_info_t* id, struct debug_view* view);
201int debug_unregister_view(debug_info_t* id, struct debug_view* view); 226int debug_unregister_view(debug_info_t* id, struct debug_view* view);
202 227
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 3aef8afec336..abb618f1ead2 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -1,25 +1,69 @@
1#ifndef _ASM_S390_FTRACE_H 1#ifndef _ASM_S390_FTRACE_H
2#define _ASM_S390_FTRACE_H 2#define _ASM_S390_FTRACE_H
3 3
4#define ARCH_SUPPORTS_FTRACE_OPS 1
5
6#define MCOUNT_INSN_SIZE 24
7#define MCOUNT_RETURN_FIXUP 18
8
4#ifndef __ASSEMBLY__ 9#ifndef __ASSEMBLY__
5 10
6extern void _mcount(void); 11#define ftrace_return_address(n) __builtin_return_address(n)
12
13void _mcount(void);
14void ftrace_caller(void);
15
7extern char ftrace_graph_caller_end; 16extern char ftrace_graph_caller_end;
17extern unsigned long ftrace_plt;
8 18
9struct dyn_arch_ftrace { }; 19struct dyn_arch_ftrace { };
10 20
11#define MCOUNT_ADDR ((long)_mcount) 21#define MCOUNT_ADDR ((unsigned long)_mcount)
22#define FTRACE_ADDR ((unsigned long)ftrace_caller)
12 23
24#define KPROBE_ON_FTRACE_NOP 0
25#define KPROBE_ON_FTRACE_CALL 1
13 26
14static inline unsigned long ftrace_call_adjust(unsigned long addr) 27static inline unsigned long ftrace_call_adjust(unsigned long addr)
15{ 28{
16 return addr; 29 return addr;
17} 30}
18 31
19#endif /* __ASSEMBLY__ */ 32struct ftrace_insn {
33 u16 opc;
34 s32 disp;
35} __packed;
36
37static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
38{
39#ifdef CONFIG_FUNCTION_TRACER
40 /* jg .+24 */
41 insn->opc = 0xc0f4;
42 insn->disp = MCOUNT_INSN_SIZE / 2;
43#endif
44}
20 45
21#define MCOUNT_INSN_SIZE 18 46static inline int is_ftrace_nop(struct ftrace_insn *insn)
47{
48#ifdef CONFIG_FUNCTION_TRACER
49 if (insn->disp == MCOUNT_INSN_SIZE / 2)
50 return 1;
51#endif
52 return 0;
53}
22 54
23#define ARCH_SUPPORTS_FTRACE_OPS 1 55static inline void ftrace_generate_call_insn(struct ftrace_insn *insn,
56 unsigned long ip)
57{
58#ifdef CONFIG_FUNCTION_TRACER
59 unsigned long target;
60
61 /* brasl r0,ftrace_caller */
62 target = is_module_addr((void *) ip) ? ftrace_plt : FTRACE_ADDR;
63 insn->opc = 0xc005;
64 insn->disp = (target - ip) / 2;
65#endif
66}
24 67
68#endif /* __ASSEMBLY__ */
25#endif /* _ASM_S390_FTRACE_H */ 69#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
index 6af037f574b8..113cd963dbbe 100644
--- a/arch/s390/include/asm/idle.h
+++ b/arch/s390/include/asm/idle.h
@@ -9,9 +9,10 @@
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/device.h> 11#include <linux/device.h>
12#include <linux/seqlock.h>
12 13
13struct s390_idle_data { 14struct s390_idle_data {
14 unsigned int sequence; 15 seqcount_t seqcount;
15 unsigned long long idle_count; 16 unsigned long long idle_count;
16 unsigned long long idle_time; 17 unsigned long long idle_time;
17 unsigned long long clock_idle_enter; 18 unsigned long long clock_idle_enter;
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 6ad9013c67e7..30fd5c84680e 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -39,6 +39,15 @@ static inline void iounmap(volatile void __iomem *addr)
39{ 39{
40} 40}
41 41
42static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
43{
44 return NULL;
45}
46
47static inline void ioport_unmap(void __iomem *p)
48{
49}
50
42/* 51/*
43 * s390 needs a private implementation of pci_iomap since ioremap with its 52 * s390 needs a private implementation of pci_iomap since ioremap with its
44 * offset parameter isn't sufficient. That's because BAR spaces are not 53 * offset parameter isn't sufficient. That's because BAR spaces are not
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index b0d5f0a97a01..343ea7c987aa 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -1,11 +1,11 @@
1#ifndef _ASM_IRQ_H 1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H 2#define _ASM_IRQ_H
3 3
4#define EXT_INTERRUPT 1 4#define EXT_INTERRUPT 0
5#define IO_INTERRUPT 2 5#define IO_INTERRUPT 1
6#define THIN_INTERRUPT 3 6#define THIN_INTERRUPT 2
7 7
8#define NR_IRQS_BASE 4 8#define NR_IRQS_BASE 3
9 9
10#ifdef CONFIG_PCI_NR_MSI 10#ifdef CONFIG_PCI_NR_MSI
11# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI) 11# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI)
@@ -13,9 +13,6 @@
13# define NR_IRQS NR_IRQS_BASE 13# define NR_IRQS NR_IRQS_BASE
14#endif 14#endif
15 15
16/* This number is used when no interrupt has been assigned */
17#define NO_IRQ 0
18
19/* External interruption codes */ 16/* External interruption codes */
20#define EXT_IRQ_INTERRUPT_KEY 0x0040 17#define EXT_IRQ_INTERRUPT_KEY 0x0040
21#define EXT_IRQ_CLK_COMP 0x1004 18#define EXT_IRQ_CLK_COMP 0x1004
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 98629173ce3b..b47ad3b642cc 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -60,6 +60,7 @@ typedef u16 kprobe_opcode_t;
60struct arch_specific_insn { 60struct arch_specific_insn {
61 /* copy of original instruction */ 61 /* copy of original instruction */
62 kprobe_opcode_t *insn; 62 kprobe_opcode_t *insn;
63 unsigned int is_ftrace_insn : 1;
63}; 64};
64 65
65struct prev_kprobe { 66struct prev_kprobe {
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 6cc51fe84410..34fbcac61133 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -147,7 +147,7 @@ struct _lowcore {
147 __u32 softirq_pending; /* 0x02ec */ 147 __u32 softirq_pending; /* 0x02ec */
148 __u32 percpu_offset; /* 0x02f0 */ 148 __u32 percpu_offset; /* 0x02f0 */
149 __u32 machine_flags; /* 0x02f4 */ 149 __u32 machine_flags; /* 0x02f4 */
150 __u32 ftrace_func; /* 0x02f8 */ 150 __u8 pad_0x02f8[0x02fc-0x02f8]; /* 0x02f8 */
151 __u32 spinlock_lockval; /* 0x02fc */ 151 __u32 spinlock_lockval; /* 0x02fc */
152 152
153 __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */ 153 __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */
@@ -297,7 +297,7 @@ struct _lowcore {
297 __u64 percpu_offset; /* 0x0378 */ 297 __u64 percpu_offset; /* 0x0378 */
298 __u64 vdso_per_cpu_data; /* 0x0380 */ 298 __u64 vdso_per_cpu_data; /* 0x0380 */
299 __u64 machine_flags; /* 0x0388 */ 299 __u64 machine_flags; /* 0x0388 */
300 __u64 ftrace_func; /* 0x0390 */ 300 __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
301 __u64 gmap; /* 0x0398 */ 301 __u64 gmap; /* 0x0398 */
302 __u32 spinlock_lockval; /* 0x03a0 */ 302 __u32 spinlock_lockval; /* 0x03a0 */
303 __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */ 303 __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index c030900320e0..ef803c202d42 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -50,10 +50,6 @@ struct zpci_fmb {
50 atomic64_t unmapped_pages; 50 atomic64_t unmapped_pages;
51} __packed __aligned(16); 51} __packed __aligned(16);
52 52
53#define ZPCI_MSI_VEC_BITS 11
54#define ZPCI_MSI_VEC_MAX (1 << ZPCI_MSI_VEC_BITS)
55#define ZPCI_MSI_VEC_MASK (ZPCI_MSI_VEC_MAX - 1)
56
57enum zpci_state { 53enum zpci_state {
58 ZPCI_FN_STATE_RESERVED, 54 ZPCI_FN_STATE_RESERVED,
59 ZPCI_FN_STATE_STANDBY, 55 ZPCI_FN_STATE_STANDBY,
@@ -90,6 +86,7 @@ struct zpci_dev {
90 86
91 /* IRQ stuff */ 87 /* IRQ stuff */
92 u64 msi_addr; /* MSI address */ 88 u64 msi_addr; /* MSI address */
89 unsigned int max_msi; /* maximum number of MSI's */
93 struct airq_iv *aibv; /* adapter interrupt bit vector */ 90 struct airq_iv *aibv; /* adapter interrupt bit vector */
94 unsigned int aisb; /* number of the summary bit */ 91 unsigned int aisb; /* number of the summary bit */
95 92
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index d194d544d694..f664e96f48c7 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -139,7 +139,8 @@ static inline int zpci_memcpy_fromio(void *dst,
139 int size, rc = 0; 139 int size, rc = 0;
140 140
141 while (n > 0) { 141 while (n > 0) {
142 size = zpci_get_max_write_size((u64) src, (u64) dst, n, 8); 142 size = zpci_get_max_write_size((u64 __force) src,
143 (u64) dst, n, 8);
143 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); 144 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
144 rc = zpci_read_single(req, dst, offset, size); 145 rc = zpci_read_single(req, dst, offset, size);
145 if (rc) 146 if (rc)
@@ -162,7 +163,8 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
162 return -EINVAL; 163 return -EINVAL;
163 164
164 while (n > 0) { 165 while (n > 0) {
165 size = zpci_get_max_write_size((u64) dst, (u64) src, n, 128); 166 size = zpci_get_max_write_size((u64 __force) dst,
167 (u64) src, n, 128);
166 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); 168 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
167 169
168 if (size > 8) /* main path */ 170 if (size > 8) /* main path */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index d39a31c3cdf2..e510b9460efa 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -22,8 +22,6 @@ unsigned long *page_table_alloc(struct mm_struct *);
22void page_table_free(struct mm_struct *, unsigned long *); 22void page_table_free(struct mm_struct *, unsigned long *);
23void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long); 23void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
24 24
25void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long,
26 bool init_skey);
27int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 25int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
28 unsigned long key, bool nq); 26 unsigned long key, bool nq);
29 27
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 57c882761dea..5e102422c9ab 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -133,6 +133,18 @@ extern unsigned long MODULES_END;
133#define MODULES_LEN (1UL << 31) 133#define MODULES_LEN (1UL << 31)
134#endif 134#endif
135 135
136static inline int is_module_addr(void *addr)
137{
138#ifdef CONFIG_64BIT
139 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
140 if (addr < (void *)MODULES_VADDR)
141 return 0;
142 if (addr > (void *)MODULES_END)
143 return 0;
144#endif
145 return 1;
146}
147
136/* 148/*
137 * A 31 bit pagetable entry of S390 has following format: 149 * A 31 bit pagetable entry of S390 has following format:
138 * | PFRA | | OS | 150 * | PFRA | | OS |
@@ -479,6 +491,11 @@ static inline int mm_has_pgste(struct mm_struct *mm)
479 return 0; 491 return 0;
480} 492}
481 493
494/*
495 * In the case that a guest uses storage keys
496 * faults should no longer be backed by zero pages
497 */
498#define mm_forbids_zeropage mm_use_skey
482static inline int mm_use_skey(struct mm_struct *mm) 499static inline int mm_use_skey(struct mm_struct *mm)
483{ 500{
484#ifdef CONFIG_PGSTE 501#ifdef CONFIG_PGSTE
@@ -1634,6 +1651,19 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1634 return pmd; 1651 return pmd;
1635} 1652}
1636 1653
1654#define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
1655static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
1656 unsigned long address,
1657 pmd_t *pmdp, int full)
1658{
1659 pmd_t pmd = *pmdp;
1660
1661 if (!full)
1662 pmdp_flush_lazy(mm, address, pmdp);
1663 pmd_clear(pmdp);
1664 return pmd;
1665}
1666
1637#define __HAVE_ARCH_PMDP_CLEAR_FLUSH 1667#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1638static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, 1668static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1639 unsigned long address, pmd_t *pmdp) 1669 unsigned long address, pmd_t *pmdp)
@@ -1746,7 +1776,8 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1746extern int vmem_add_mapping(unsigned long start, unsigned long size); 1776extern int vmem_add_mapping(unsigned long start, unsigned long size);
1747extern int vmem_remove_mapping(unsigned long start, unsigned long size); 1777extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1748extern int s390_enable_sie(void); 1778extern int s390_enable_sie(void);
1749extern void s390_enable_skey(void); 1779extern int s390_enable_skey(void);
1780extern void s390_reset_cmma(struct mm_struct *mm);
1750 1781
1751/* 1782/*
1752 * No page table caches to initialise 1783 * No page table caches to initialise
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index d559bdb03d18..bed05ea7ec27 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -217,8 +217,6 @@ static inline unsigned short stap(void)
217 */ 217 */
218static inline void cpu_relax(void) 218static inline void cpu_relax(void)
219{ 219{
220 if (MACHINE_HAS_DIAG44)
221 asm volatile("diag 0,0,68");
222 barrier(); 220 barrier();
223} 221}
224 222
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index d6bdf906caa5..0e37cd041241 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -18,14 +18,7 @@ extern int spin_retry;
18static inline int 18static inline int
19_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) 19_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
20{ 20{
21 unsigned int old_expected = old; 21 return __sync_bool_compare_and_swap(lock, old, new);
22
23 asm volatile(
24 " cs %0,%3,%1"
25 : "=d" (old), "=Q" (*lock)
26 : "0" (old), "d" (new), "Q" (*lock)
27 : "cc", "memory" );
28 return old == old_expected;
29} 22}
30 23
31/* 24/*
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 572c59949004..06d8741ad6f4 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -121,6 +121,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
121#ifdef CONFIG_64BIT 121#ifdef CONFIG_64BIT
122 if (tlb->mm->context.asce_limit <= (1UL << 31)) 122 if (tlb->mm->context.asce_limit <= (1UL << 31))
123 return; 123 return;
124 pgtable_pmd_page_dtor(virt_to_page(pmd));
124 tlb_remove_table(tlb, pmd); 125 tlb_remove_table(tlb, pmd);
125#endif 126#endif
126} 127}