diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-08 15:02:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-08 15:02:28 -0400 |
commit | d586c86d50cefa0897a51a2dbc714060ccedae76 (patch) | |
tree | 76a7f454637badb74390047aebca5c071c0988fe /arch/s390 | |
parent | e9f37d3a8d126e73f5737ef548cdf6f618e295e4 (diff) | |
parent | 457f2180951cdcbfb4657ddcc83b486e93497f56 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull second set of s390 patches from Martin Schwidefsky:
"The second part of Heikos uaccess rework, the page table walker for
uaccess is now a thing of the past (yay!)
The code change to fix the theoretical TLB flush problem allows us to
add a TLB flush optimization for zEC12, this machine has new
instructions that allow to do CPU local TLB flushes for single pages
and for all pages of a specific address space.
Plus the usual bug fixing and some more cleanup"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/uaccess: rework uaccess code - fix locking issues
s390/mm,tlb: optimize TLB flushing for zEC12
s390/mm,tlb: safeguard against speculative TLB creation
s390/irq: Use defines for external interruption codes
s390/irq: Add defines for external interruption codes
s390/sclp: add timeout for queued requests
kvm/s390: also set guest pages back to stable on kexec/kdump
lcs: Add missing destroy_timer_on_stack()
s390/tape: Add missing destroy_timer_on_stack()
s390/tape: Use del_timer_sync()
s390/3270: fix crash with multiple reset device requests
s390/bitops,atomic: add missing memory barriers
s390/zcrypt: add length check for aligned data to avoid overflow in msg-type 6
Diffstat (limited to 'arch/s390')
36 files changed, 892 insertions, 958 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index fa9aaf7144b7..1d4706114a45 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -15,23 +15,29 @@ | |||
15 | 15 | ||
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <asm/barrier.h> | ||
18 | #include <asm/cmpxchg.h> | 19 | #include <asm/cmpxchg.h> |
19 | 20 | ||
20 | #define ATOMIC_INIT(i) { (i) } | 21 | #define ATOMIC_INIT(i) { (i) } |
21 | 22 | ||
23 | #define __ATOMIC_NO_BARRIER "\n" | ||
24 | |||
22 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES | 25 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
23 | 26 | ||
24 | #define __ATOMIC_OR "lao" | 27 | #define __ATOMIC_OR "lao" |
25 | #define __ATOMIC_AND "lan" | 28 | #define __ATOMIC_AND "lan" |
26 | #define __ATOMIC_ADD "laa" | 29 | #define __ATOMIC_ADD "laa" |
30 | #define __ATOMIC_BARRIER "bcr 14,0\n" | ||
27 | 31 | ||
28 | #define __ATOMIC_LOOP(ptr, op_val, op_string) \ | 32 | #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ |
29 | ({ \ | 33 | ({ \ |
30 | int old_val; \ | 34 | int old_val; \ |
31 | \ | 35 | \ |
32 | typecheck(atomic_t *, ptr); \ | 36 | typecheck(atomic_t *, ptr); \ |
33 | asm volatile( \ | 37 | asm volatile( \ |
38 | __barrier \ | ||
34 | op_string " %0,%2,%1\n" \ | 39 | op_string " %0,%2,%1\n" \ |
40 | __barrier \ | ||
35 | : "=d" (old_val), "+Q" ((ptr)->counter) \ | 41 | : "=d" (old_val), "+Q" ((ptr)->counter) \ |
36 | : "d" (op_val) \ | 42 | : "d" (op_val) \ |
37 | : "cc", "memory"); \ | 43 | : "cc", "memory"); \ |
@@ -43,8 +49,9 @@ | |||
43 | #define __ATOMIC_OR "or" | 49 | #define __ATOMIC_OR "or" |
44 | #define __ATOMIC_AND "nr" | 50 | #define __ATOMIC_AND "nr" |
45 | #define __ATOMIC_ADD "ar" | 51 | #define __ATOMIC_ADD "ar" |
52 | #define __ATOMIC_BARRIER "\n" | ||
46 | 53 | ||
47 | #define __ATOMIC_LOOP(ptr, op_val, op_string) \ | 54 | #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ |
48 | ({ \ | 55 | ({ \ |
49 | int old_val, new_val; \ | 56 | int old_val, new_val; \ |
50 | \ | 57 | \ |
@@ -82,7 +89,7 @@ static inline void atomic_set(atomic_t *v, int i) | |||
82 | 89 | ||
83 | static inline int atomic_add_return(int i, atomic_t *v) | 90 | static inline int atomic_add_return(int i, atomic_t *v) |
84 | { | 91 | { |
85 | return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i; | 92 | return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i; |
86 | } | 93 | } |
87 | 94 | ||
88 | static inline void atomic_add(int i, atomic_t *v) | 95 | static inline void atomic_add(int i, atomic_t *v) |
@@ -94,12 +101,10 @@ static inline void atomic_add(int i, atomic_t *v) | |||
94 | : "+Q" (v->counter) | 101 | : "+Q" (v->counter) |
95 | : "i" (i) | 102 | : "i" (i) |
96 | : "cc", "memory"); | 103 | : "cc", "memory"); |
97 | } else { | 104 | return; |
98 | atomic_add_return(i, v); | ||
99 | } | 105 | } |
100 | #else | ||
101 | atomic_add_return(i, v); | ||
102 | #endif | 106 | #endif |
107 | __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER); | ||
103 | } | 108 | } |
104 | 109 | ||
105 | #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) | 110 | #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) |
@@ -115,12 +120,12 @@ static inline void atomic_add(int i, atomic_t *v) | |||
115 | 120 | ||
116 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) | 121 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) |
117 | { | 122 | { |
118 | __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND); | 123 | __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER); |
119 | } | 124 | } |
120 | 125 | ||
121 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | 126 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) |
122 | { | 127 | { |
123 | __ATOMIC_LOOP(v, mask, __ATOMIC_OR); | 128 | __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER); |
124 | } | 129 | } |
125 | 130 | ||
126 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 131 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
@@ -157,19 +162,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
157 | 162 | ||
158 | #ifdef CONFIG_64BIT | 163 | #ifdef CONFIG_64BIT |
159 | 164 | ||
165 | #define __ATOMIC64_NO_BARRIER "\n" | ||
166 | |||
160 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES | 167 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
161 | 168 | ||
162 | #define __ATOMIC64_OR "laog" | 169 | #define __ATOMIC64_OR "laog" |
163 | #define __ATOMIC64_AND "lang" | 170 | #define __ATOMIC64_AND "lang" |
164 | #define __ATOMIC64_ADD "laag" | 171 | #define __ATOMIC64_ADD "laag" |
172 | #define __ATOMIC64_BARRIER "bcr 14,0\n" | ||
165 | 173 | ||
166 | #define __ATOMIC64_LOOP(ptr, op_val, op_string) \ | 174 | #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ |
167 | ({ \ | 175 | ({ \ |
168 | long long old_val; \ | 176 | long long old_val; \ |
169 | \ | 177 | \ |
170 | typecheck(atomic64_t *, ptr); \ | 178 | typecheck(atomic64_t *, ptr); \ |
171 | asm volatile( \ | 179 | asm volatile( \ |
180 | __barrier \ | ||
172 | op_string " %0,%2,%1\n" \ | 181 | op_string " %0,%2,%1\n" \ |
182 | __barrier \ | ||
173 | : "=d" (old_val), "+Q" ((ptr)->counter) \ | 183 | : "=d" (old_val), "+Q" ((ptr)->counter) \ |
174 | : "d" (op_val) \ | 184 | : "d" (op_val) \ |
175 | : "cc", "memory"); \ | 185 | : "cc", "memory"); \ |
@@ -181,8 +191,9 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
181 | #define __ATOMIC64_OR "ogr" | 191 | #define __ATOMIC64_OR "ogr" |
182 | #define __ATOMIC64_AND "ngr" | 192 | #define __ATOMIC64_AND "ngr" |
183 | #define __ATOMIC64_ADD "agr" | 193 | #define __ATOMIC64_ADD "agr" |
194 | #define __ATOMIC64_BARRIER "\n" | ||
184 | 195 | ||
185 | #define __ATOMIC64_LOOP(ptr, op_val, op_string) \ | 196 | #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ |
186 | ({ \ | 197 | ({ \ |
187 | long long old_val, new_val; \ | 198 | long long old_val, new_val; \ |
188 | \ | 199 | \ |
@@ -220,17 +231,32 @@ static inline void atomic64_set(atomic64_t *v, long long i) | |||
220 | 231 | ||
221 | static inline long long atomic64_add_return(long long i, atomic64_t *v) | 232 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
222 | { | 233 | { |
223 | return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i; | 234 | return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i; |
235 | } | ||
236 | |||
237 | static inline void atomic64_add(long long i, atomic64_t *v) | ||
238 | { | ||
239 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES | ||
240 | if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { | ||
241 | asm volatile( | ||
242 | "agsi %0,%1\n" | ||
243 | : "+Q" (v->counter) | ||
244 | : "i" (i) | ||
245 | : "cc", "memory"); | ||
246 | return; | ||
247 | } | ||
248 | #endif | ||
249 | __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER); | ||
224 | } | 250 | } |
225 | 251 | ||
226 | static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) | 252 | static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) |
227 | { | 253 | { |
228 | __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND); | 254 | __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER); |
229 | } | 255 | } |
230 | 256 | ||
231 | static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) | 257 | static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) |
232 | { | 258 | { |
233 | __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR); | 259 | __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER); |
234 | } | 260 | } |
235 | 261 | ||
236 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 262 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
@@ -334,25 +360,13 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) | |||
334 | } while (atomic64_cmpxchg(v, old, new) != old); | 360 | } while (atomic64_cmpxchg(v, old, new) != old); |
335 | } | 361 | } |
336 | 362 | ||
337 | #endif /* CONFIG_64BIT */ | ||
338 | |||
339 | static inline void atomic64_add(long long i, atomic64_t *v) | 363 | static inline void atomic64_add(long long i, atomic64_t *v) |
340 | { | 364 | { |
341 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES | ||
342 | if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { | ||
343 | asm volatile( | ||
344 | "agsi %0,%1\n" | ||
345 | : "+Q" (v->counter) | ||
346 | : "i" (i) | ||
347 | : "cc", "memory"); | ||
348 | } else { | ||
349 | atomic64_add_return(i, v); | ||
350 | } | ||
351 | #else | ||
352 | atomic64_add_return(i, v); | 365 | atomic64_add_return(i, v); |
353 | #endif | ||
354 | } | 366 | } |
355 | 367 | ||
368 | #endif /* CONFIG_64BIT */ | ||
369 | |||
356 | static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) | 370 | static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) |
357 | { | 371 | { |
358 | long long c, old; | 372 | long long c, old; |
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index ec5ef891db6b..520542477678 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h | |||
@@ -47,14 +47,18 @@ | |||
47 | 47 | ||
48 | #include <linux/typecheck.h> | 48 | #include <linux/typecheck.h> |
49 | #include <linux/compiler.h> | 49 | #include <linux/compiler.h> |
50 | #include <asm/barrier.h> | ||
51 | |||
52 | #define __BITOPS_NO_BARRIER "\n" | ||
50 | 53 | ||
51 | #ifndef CONFIG_64BIT | 54 | #ifndef CONFIG_64BIT |
52 | 55 | ||
53 | #define __BITOPS_OR "or" | 56 | #define __BITOPS_OR "or" |
54 | #define __BITOPS_AND "nr" | 57 | #define __BITOPS_AND "nr" |
55 | #define __BITOPS_XOR "xr" | 58 | #define __BITOPS_XOR "xr" |
59 | #define __BITOPS_BARRIER "\n" | ||
56 | 60 | ||
57 | #define __BITOPS_LOOP(__addr, __val, __op_string) \ | 61 | #define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ |
58 | ({ \ | 62 | ({ \ |
59 | unsigned long __old, __new; \ | 63 | unsigned long __old, __new; \ |
60 | \ | 64 | \ |
@@ -67,7 +71,7 @@ | |||
67 | " jl 0b" \ | 71 | " jl 0b" \ |
68 | : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ | 72 | : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ |
69 | : "d" (__val) \ | 73 | : "d" (__val) \ |
70 | : "cc"); \ | 74 | : "cc", "memory"); \ |
71 | __old; \ | 75 | __old; \ |
72 | }) | 76 | }) |
73 | 77 | ||
@@ -78,17 +82,20 @@ | |||
78 | #define __BITOPS_OR "laog" | 82 | #define __BITOPS_OR "laog" |
79 | #define __BITOPS_AND "lang" | 83 | #define __BITOPS_AND "lang" |
80 | #define __BITOPS_XOR "laxg" | 84 | #define __BITOPS_XOR "laxg" |
85 | #define __BITOPS_BARRIER "bcr 14,0\n" | ||
81 | 86 | ||
82 | #define __BITOPS_LOOP(__addr, __val, __op_string) \ | 87 | #define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ |
83 | ({ \ | 88 | ({ \ |
84 | unsigned long __old; \ | 89 | unsigned long __old; \ |
85 | \ | 90 | \ |
86 | typecheck(unsigned long *, (__addr)); \ | 91 | typecheck(unsigned long *, (__addr)); \ |
87 | asm volatile( \ | 92 | asm volatile( \ |
93 | __barrier \ | ||
88 | __op_string " %0,%2,%1\n" \ | 94 | __op_string " %0,%2,%1\n" \ |
95 | __barrier \ | ||
89 | : "=d" (__old), "+Q" (*(__addr)) \ | 96 | : "=d" (__old), "+Q" (*(__addr)) \ |
90 | : "d" (__val) \ | 97 | : "d" (__val) \ |
91 | : "cc"); \ | 98 | : "cc", "memory"); \ |
92 | __old; \ | 99 | __old; \ |
93 | }) | 100 | }) |
94 | 101 | ||
@@ -97,8 +104,9 @@ | |||
97 | #define __BITOPS_OR "ogr" | 104 | #define __BITOPS_OR "ogr" |
98 | #define __BITOPS_AND "ngr" | 105 | #define __BITOPS_AND "ngr" |
99 | #define __BITOPS_XOR "xgr" | 106 | #define __BITOPS_XOR "xgr" |
107 | #define __BITOPS_BARRIER "\n" | ||
100 | 108 | ||
101 | #define __BITOPS_LOOP(__addr, __val, __op_string) \ | 109 | #define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ |
102 | ({ \ | 110 | ({ \ |
103 | unsigned long __old, __new; \ | 111 | unsigned long __old, __new; \ |
104 | \ | 112 | \ |
@@ -111,7 +119,7 @@ | |||
111 | " jl 0b" \ | 119 | " jl 0b" \ |
112 | : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ | 120 | : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ |
113 | : "d" (__val) \ | 121 | : "d" (__val) \ |
114 | : "cc"); \ | 122 | : "cc", "memory"); \ |
115 | __old; \ | 123 | __old; \ |
116 | }) | 124 | }) |
117 | 125 | ||
@@ -149,12 +157,12 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr) | |||
149 | "oi %0,%b1\n" | 157 | "oi %0,%b1\n" |
150 | : "+Q" (*caddr) | 158 | : "+Q" (*caddr) |
151 | : "i" (1 << (nr & 7)) | 159 | : "i" (1 << (nr & 7)) |
152 | : "cc"); | 160 | : "cc", "memory"); |
153 | return; | 161 | return; |
154 | } | 162 | } |
155 | #endif | 163 | #endif |
156 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); | 164 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); |
157 | __BITOPS_LOOP(addr, mask, __BITOPS_OR); | 165 | __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER); |
158 | } | 166 | } |
159 | 167 | ||
160 | static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr) | 168 | static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr) |
@@ -170,12 +178,12 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr) | |||
170 | "ni %0,%b1\n" | 178 | "ni %0,%b1\n" |
171 | : "+Q" (*caddr) | 179 | : "+Q" (*caddr) |
172 | : "i" (~(1 << (nr & 7))) | 180 | : "i" (~(1 << (nr & 7))) |
173 | : "cc"); | 181 | : "cc", "memory"); |
174 | return; | 182 | return; |
175 | } | 183 | } |
176 | #endif | 184 | #endif |
177 | mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); | 185 | mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); |
178 | __BITOPS_LOOP(addr, mask, __BITOPS_AND); | 186 | __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER); |
179 | } | 187 | } |
180 | 188 | ||
181 | static inline void change_bit(unsigned long nr, volatile unsigned long *ptr) | 189 | static inline void change_bit(unsigned long nr, volatile unsigned long *ptr) |
@@ -191,12 +199,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr) | |||
191 | "xi %0,%b1\n" | 199 | "xi %0,%b1\n" |
192 | : "+Q" (*caddr) | 200 | : "+Q" (*caddr) |
193 | : "i" (1 << (nr & 7)) | 201 | : "i" (1 << (nr & 7)) |
194 | : "cc"); | 202 | : "cc", "memory"); |
195 | return; | 203 | return; |
196 | } | 204 | } |
197 | #endif | 205 | #endif |
198 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); | 206 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); |
199 | __BITOPS_LOOP(addr, mask, __BITOPS_XOR); | 207 | __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER); |
200 | } | 208 | } |
201 | 209 | ||
202 | static inline int | 210 | static inline int |
@@ -206,8 +214,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr) | |||
206 | unsigned long old, mask; | 214 | unsigned long old, mask; |
207 | 215 | ||
208 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); | 216 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); |
209 | old = __BITOPS_LOOP(addr, mask, __BITOPS_OR); | 217 | old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER); |
210 | barrier(); | ||
211 | return (old & mask) != 0; | 218 | return (old & mask) != 0; |
212 | } | 219 | } |
213 | 220 | ||
@@ -218,8 +225,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr) | |||
218 | unsigned long old, mask; | 225 | unsigned long old, mask; |
219 | 226 | ||
220 | mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); | 227 | mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); |
221 | old = __BITOPS_LOOP(addr, mask, __BITOPS_AND); | 228 | old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER); |
222 | barrier(); | ||
223 | return (old & ~mask) != 0; | 229 | return (old & ~mask) != 0; |
224 | } | 230 | } |
225 | 231 | ||
@@ -230,8 +236,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr) | |||
230 | unsigned long old, mask; | 236 | unsigned long old, mask; |
231 | 237 | ||
232 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); | 238 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); |
233 | old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR); | 239 | old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER); |
234 | barrier(); | ||
235 | return (old & mask) != 0; | 240 | return (old & mask) != 0; |
236 | } | 241 | } |
237 | 242 | ||
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index fda46bd38c99..69cf5b5eddc9 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h | |||
@@ -1,12 +1,25 @@ | |||
1 | #ifndef _ASM_S390_FUTEX_H | 1 | #ifndef _ASM_S390_FUTEX_H |
2 | #define _ASM_S390_FUTEX_H | 2 | #define _ASM_S390_FUTEX_H |
3 | 3 | ||
4 | #include <linux/futex.h> | ||
5 | #include <linux/uaccess.h> | 4 | #include <linux/uaccess.h> |
5 | #include <linux/futex.h> | ||
6 | #include <asm/mmu_context.h> | ||
6 | #include <asm/errno.h> | 7 | #include <asm/errno.h> |
7 | 8 | ||
8 | int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval); | 9 | #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ |
9 | int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old); | 10 | asm volatile( \ |
11 | " sacf 256\n" \ | ||
12 | "0: l %1,0(%6)\n" \ | ||
13 | "1:"insn \ | ||
14 | "2: cs %1,%2,0(%6)\n" \ | ||
15 | "3: jl 1b\n" \ | ||
16 | " lhi %0,0\n" \ | ||
17 | "4: sacf 768\n" \ | ||
18 | EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ | ||
19 | : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ | ||
20 | "=m" (*uaddr) \ | ||
21 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ | ||
22 | "m" (*uaddr) : "cc"); | ||
10 | 23 | ||
11 | static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | 24 | static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) |
12 | { | 25 | { |
@@ -14,13 +27,37 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |||
14 | int cmp = (encoded_op >> 24) & 15; | 27 | int cmp = (encoded_op >> 24) & 15; |
15 | int oparg = (encoded_op << 8) >> 20; | 28 | int oparg = (encoded_op << 8) >> 20; |
16 | int cmparg = (encoded_op << 20) >> 20; | 29 | int cmparg = (encoded_op << 20) >> 20; |
17 | int oldval, ret; | 30 | int oldval = 0, newval, ret; |
18 | 31 | ||
32 | update_primary_asce(current); | ||
19 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 33 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
20 | oparg = 1 << oparg; | 34 | oparg = 1 << oparg; |
21 | 35 | ||
22 | pagefault_disable(); | 36 | pagefault_disable(); |
23 | ret = __futex_atomic_op_inuser(op, uaddr, oparg, &oldval); | 37 | switch (op) { |
38 | case FUTEX_OP_SET: | ||
39 | __futex_atomic_op("lr %2,%5\n", | ||
40 | ret, oldval, newval, uaddr, oparg); | ||
41 | break; | ||
42 | case FUTEX_OP_ADD: | ||
43 | __futex_atomic_op("lr %2,%1\nar %2,%5\n", | ||
44 | ret, oldval, newval, uaddr, oparg); | ||
45 | break; | ||
46 | case FUTEX_OP_OR: | ||
47 | __futex_atomic_op("lr %2,%1\nor %2,%5\n", | ||
48 | ret, oldval, newval, uaddr, oparg); | ||
49 | break; | ||
50 | case FUTEX_OP_ANDN: | ||
51 | __futex_atomic_op("lr %2,%1\nnr %2,%5\n", | ||
52 | ret, oldval, newval, uaddr, oparg); | ||
53 | break; | ||
54 | case FUTEX_OP_XOR: | ||
55 | __futex_atomic_op("lr %2,%1\nxr %2,%5\n", | ||
56 | ret, oldval, newval, uaddr, oparg); | ||
57 | break; | ||
58 | default: | ||
59 | ret = -ENOSYS; | ||
60 | } | ||
24 | pagefault_enable(); | 61 | pagefault_enable(); |
25 | 62 | ||
26 | if (!ret) { | 63 | if (!ret) { |
@@ -37,4 +74,23 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |||
37 | return ret; | 74 | return ret; |
38 | } | 75 | } |
39 | 76 | ||
77 | static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | ||
78 | u32 oldval, u32 newval) | ||
79 | { | ||
80 | int ret; | ||
81 | |||
82 | update_primary_asce(current); | ||
83 | asm volatile( | ||
84 | " sacf 256\n" | ||
85 | "0: cs %1,%4,0(%5)\n" | ||
86 | "1: la %0,0\n" | ||
87 | "2: sacf 768\n" | ||
88 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | ||
89 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) | ||
90 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) | ||
91 | : "cc", "memory"); | ||
92 | *uval = oldval; | ||
93 | return ret; | ||
94 | } | ||
95 | |||
40 | #endif /* _ASM_S390_FUTEX_H */ | 96 | #endif /* _ASM_S390_FUTEX_H */ |
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index 35f0faab5361..c4dd400a2791 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h | |||
@@ -16,6 +16,20 @@ | |||
16 | /* This number is used when no interrupt has been assigned */ | 16 | /* This number is used when no interrupt has been assigned */ |
17 | #define NO_IRQ 0 | 17 | #define NO_IRQ 0 |
18 | 18 | ||
19 | /* External interruption codes */ | ||
20 | #define EXT_IRQ_INTERRUPT_KEY 0x0040 | ||
21 | #define EXT_IRQ_CLK_COMP 0x1004 | ||
22 | #define EXT_IRQ_CPU_TIMER 0x1005 | ||
23 | #define EXT_IRQ_WARNING_TRACK 0x1007 | ||
24 | #define EXT_IRQ_MALFUNC_ALERT 0x1200 | ||
25 | #define EXT_IRQ_EMERGENCY_SIG 0x1201 | ||
26 | #define EXT_IRQ_EXTERNAL_CALL 0x1202 | ||
27 | #define EXT_IRQ_TIMING_ALERT 0x1406 | ||
28 | #define EXT_IRQ_MEASURE_ALERT 0x1407 | ||
29 | #define EXT_IRQ_SERVICE_SIG 0x2401 | ||
30 | #define EXT_IRQ_CP_SERVICE 0x2603 | ||
31 | #define EXT_IRQ_IUCV 0x4000 | ||
32 | |||
19 | #ifndef __ASSEMBLY__ | 33 | #ifndef __ASSEMBLY__ |
20 | 34 | ||
21 | #include <linux/hardirq.h> | 35 | #include <linux/hardirq.h> |
@@ -77,8 +91,8 @@ struct ext_code { | |||
77 | 91 | ||
78 | typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long); | 92 | typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long); |
79 | 93 | ||
80 | int register_external_interrupt(u16 code, ext_int_handler_t handler); | 94 | int register_external_irq(u16 code, ext_int_handler_t handler); |
81 | int unregister_external_interrupt(u16 code, ext_int_handler_t handler); | 95 | int unregister_external_irq(u16 code, ext_int_handler_t handler); |
82 | 96 | ||
83 | enum irq_subclass { | 97 | enum irq_subclass { |
84 | IRQ_SUBCLASS_MEASUREMENT_ALERT = 5, | 98 | IRQ_SUBCLASS_MEASUREMENT_ALERT = 5, |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index ff132ac64ddd..f77695a82f64 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -1,9 +1,11 @@ | |||
1 | #ifndef __MMU_H | 1 | #ifndef __MMU_H |
2 | #define __MMU_H | 2 | #define __MMU_H |
3 | 3 | ||
4 | #include <linux/cpumask.h> | ||
4 | #include <linux/errno.h> | 5 | #include <linux/errno.h> |
5 | 6 | ||
6 | typedef struct { | 7 | typedef struct { |
8 | cpumask_t cpu_attach_mask; | ||
7 | atomic_t attach_count; | 9 | atomic_t attach_count; |
8 | unsigned int flush_mm; | 10 | unsigned int flush_mm; |
9 | spinlock_t list_lock; | 11 | spinlock_t list_lock; |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 38149b63dc44..71be346d0e3c 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -15,6 +15,7 @@ | |||
15 | static inline int init_new_context(struct task_struct *tsk, | 15 | static inline int init_new_context(struct task_struct *tsk, |
16 | struct mm_struct *mm) | 16 | struct mm_struct *mm) |
17 | { | 17 | { |
18 | cpumask_clear(&mm->context.cpu_attach_mask); | ||
18 | atomic_set(&mm->context.attach_count, 0); | 19 | atomic_set(&mm->context.attach_count, 0); |
19 | mm->context.flush_mm = 0; | 20 | mm->context.flush_mm = 0; |
20 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; | 21 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; |
@@ -29,41 +30,61 @@ static inline int init_new_context(struct task_struct *tsk, | |||
29 | 30 | ||
30 | #define destroy_context(mm) do { } while (0) | 31 | #define destroy_context(mm) do { } while (0) |
31 | 32 | ||
32 | #ifndef CONFIG_64BIT | 33 | static inline void update_user_asce(struct mm_struct *mm, int load_primary) |
33 | #define LCTL_OPCODE "lctl" | ||
34 | #else | ||
35 | #define LCTL_OPCODE "lctlg" | ||
36 | #endif | ||
37 | |||
38 | static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) | ||
39 | { | 34 | { |
40 | pgd_t *pgd = mm->pgd; | 35 | pgd_t *pgd = mm->pgd; |
41 | 36 | ||
42 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); | 37 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); |
43 | /* Load primary space page table origin. */ | 38 | if (load_primary) |
44 | asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce)); | 39 | __ctl_load(S390_lowcore.user_asce, 1, 1); |
45 | set_fs(current->thread.mm_segment); | 40 | set_fs(current->thread.mm_segment); |
46 | } | 41 | } |
47 | 42 | ||
43 | static inline void clear_user_asce(struct mm_struct *mm, int load_primary) | ||
44 | { | ||
45 | S390_lowcore.user_asce = S390_lowcore.kernel_asce; | ||
46 | |||
47 | if (load_primary) | ||
48 | __ctl_load(S390_lowcore.user_asce, 1, 1); | ||
49 | __ctl_load(S390_lowcore.user_asce, 7, 7); | ||
50 | } | ||
51 | |||
52 | static inline void update_primary_asce(struct task_struct *tsk) | ||
53 | { | ||
54 | unsigned long asce; | ||
55 | |||
56 | __ctl_store(asce, 1, 1); | ||
57 | if (asce != S390_lowcore.kernel_asce) | ||
58 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); | ||
59 | set_tsk_thread_flag(tsk, TIF_ASCE); | ||
60 | } | ||
61 | |||
48 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 62 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
49 | struct task_struct *tsk) | 63 | struct task_struct *tsk) |
50 | { | 64 | { |
51 | int cpu = smp_processor_id(); | 65 | int cpu = smp_processor_id(); |
52 | 66 | ||
67 | update_primary_asce(tsk); | ||
53 | if (prev == next) | 68 | if (prev == next) |
54 | return; | 69 | return; |
70 | if (MACHINE_HAS_TLB_LC) | ||
71 | cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); | ||
55 | if (atomic_inc_return(&next->context.attach_count) >> 16) { | 72 | if (atomic_inc_return(&next->context.attach_count) >> 16) { |
56 | /* Delay update_mm until all TLB flushes are done. */ | 73 | /* Delay update_user_asce until all TLB flushes are done. */ |
57 | set_tsk_thread_flag(tsk, TIF_TLB_WAIT); | 74 | set_tsk_thread_flag(tsk, TIF_TLB_WAIT); |
75 | /* Clear old ASCE by loading the kernel ASCE. */ | ||
76 | clear_user_asce(next, 0); | ||
58 | } else { | 77 | } else { |
59 | cpumask_set_cpu(cpu, mm_cpumask(next)); | 78 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
60 | update_mm(next, tsk); | 79 | update_user_asce(next, 0); |
61 | if (next->context.flush_mm) | 80 | if (next->context.flush_mm) |
62 | /* Flush pending TLBs */ | 81 | /* Flush pending TLBs */ |
63 | __tlb_flush_mm(next); | 82 | __tlb_flush_mm(next); |
64 | } | 83 | } |
65 | atomic_dec(&prev->context.attach_count); | 84 | atomic_dec(&prev->context.attach_count); |
66 | WARN_ON(atomic_read(&prev->context.attach_count) < 0); | 85 | WARN_ON(atomic_read(&prev->context.attach_count) < 0); |
86 | if (MACHINE_HAS_TLB_LC) | ||
87 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); | ||
67 | } | 88 | } |
68 | 89 | ||
69 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch | 90 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch |
@@ -80,7 +101,7 @@ static inline void finish_arch_post_lock_switch(void) | |||
80 | cpu_relax(); | 101 | cpu_relax(); |
81 | 102 | ||
82 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | 103 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
83 | update_mm(mm, tsk); | 104 | update_user_asce(mm, 0); |
84 | if (mm->context.flush_mm) | 105 | if (mm->context.flush_mm) |
85 | __tlb_flush_mm(mm); | 106 | __tlb_flush_mm(mm); |
86 | preempt_enable(); | 107 | preempt_enable(); |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 50a75d96f939..12f75313e086 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -1070,12 +1070,35 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) | |||
1070 | : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); | 1070 | : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); |
1071 | } | 1071 | } |
1072 | 1072 | ||
1073 | static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep) | ||
1074 | { | ||
1075 | unsigned long pto = (unsigned long) ptep; | ||
1076 | |||
1077 | #ifndef CONFIG_64BIT | ||
1078 | /* pto in ESA mode must point to the start of the segment table */ | ||
1079 | pto &= 0x7ffffc00; | ||
1080 | #endif | ||
1081 | /* Invalidation + local TLB flush for the pte */ | ||
1082 | asm volatile( | ||
1083 | " .insn rrf,0xb2210000,%2,%3,0,1" | ||
1084 | : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); | ||
1085 | } | ||
1086 | |||
1073 | static inline void ptep_flush_direct(struct mm_struct *mm, | 1087 | static inline void ptep_flush_direct(struct mm_struct *mm, |
1074 | unsigned long address, pte_t *ptep) | 1088 | unsigned long address, pte_t *ptep) |
1075 | { | 1089 | { |
1090 | int active, count; | ||
1091 | |||
1076 | if (pte_val(*ptep) & _PAGE_INVALID) | 1092 | if (pte_val(*ptep) & _PAGE_INVALID) |
1077 | return; | 1093 | return; |
1078 | __ptep_ipte(address, ptep); | 1094 | active = (mm == current->active_mm) ? 1 : 0; |
1095 | count = atomic_add_return(0x10000, &mm->context.attach_count); | ||
1096 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && | ||
1097 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) | ||
1098 | __ptep_ipte_local(address, ptep); | ||
1099 | else | ||
1100 | __ptep_ipte(address, ptep); | ||
1101 | atomic_sub(0x10000, &mm->context.attach_count); | ||
1079 | } | 1102 | } |
1080 | 1103 | ||
1081 | static inline void ptep_flush_lazy(struct mm_struct *mm, | 1104 | static inline void ptep_flush_lazy(struct mm_struct *mm, |
@@ -1384,35 +1407,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) | |||
1384 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) | 1407 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) |
1385 | #define pte_unmap(pte) do { } while (0) | 1408 | #define pte_unmap(pte) do { } while (0) |
1386 | 1409 | ||
1387 | static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) | ||
1388 | { | ||
1389 | unsigned long sto = (unsigned long) pmdp - | ||
1390 | pmd_index(address) * sizeof(pmd_t); | ||
1391 | |||
1392 | if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) { | ||
1393 | asm volatile( | ||
1394 | " .insn rrf,0xb98e0000,%2,%3,0,0" | ||
1395 | : "=m" (*pmdp) | ||
1396 | : "m" (*pmdp), "a" (sto), | ||
1397 | "a" ((address & HPAGE_MASK)) | ||
1398 | : "cc" | ||
1399 | ); | ||
1400 | } | ||
1401 | } | ||
1402 | |||
1403 | static inline void __pmd_csp(pmd_t *pmdp) | ||
1404 | { | ||
1405 | register unsigned long reg2 asm("2") = pmd_val(*pmdp); | ||
1406 | register unsigned long reg3 asm("3") = pmd_val(*pmdp) | | ||
1407 | _SEGMENT_ENTRY_INVALID; | ||
1408 | register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; | ||
1409 | |||
1410 | asm volatile( | ||
1411 | " csp %1,%3" | ||
1412 | : "=m" (*pmdp) | ||
1413 | : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); | ||
1414 | } | ||
1415 | |||
1416 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) | 1410 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) |
1417 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) | 1411 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) |
1418 | { | 1412 | { |
@@ -1481,18 +1475,80 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) | |||
1481 | } | 1475 | } |
1482 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ | 1476 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ |
1483 | 1477 | ||
1478 | static inline void __pmdp_csp(pmd_t *pmdp) | ||
1479 | { | ||
1480 | register unsigned long reg2 asm("2") = pmd_val(*pmdp); | ||
1481 | register unsigned long reg3 asm("3") = pmd_val(*pmdp) | | ||
1482 | _SEGMENT_ENTRY_INVALID; | ||
1483 | register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; | ||
1484 | |||
1485 | asm volatile( | ||
1486 | " csp %1,%3" | ||
1487 | : "=m" (*pmdp) | ||
1488 | : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); | ||
1489 | } | ||
1490 | |||
1491 | static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp) | ||
1492 | { | ||
1493 | unsigned long sto; | ||
1494 | |||
1495 | sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); | ||
1496 | asm volatile( | ||
1497 | " .insn rrf,0xb98e0000,%2,%3,0,0" | ||
1498 | : "=m" (*pmdp) | ||
1499 | : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK)) | ||
1500 | : "cc" ); | ||
1501 | } | ||
1502 | |||
1503 | static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp) | ||
1504 | { | ||
1505 | unsigned long sto; | ||
1506 | |||
1507 | sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); | ||
1508 | asm volatile( | ||
1509 | " .insn rrf,0xb98e0000,%2,%3,0,1" | ||
1510 | : "=m" (*pmdp) | ||
1511 | : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK)) | ||
1512 | : "cc" ); | ||
1513 | } | ||
1514 | |||
1515 | static inline void pmdp_flush_direct(struct mm_struct *mm, | ||
1516 | unsigned long address, pmd_t *pmdp) | ||
1517 | { | ||
1518 | int active, count; | ||
1519 | |||
1520 | if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) | ||
1521 | return; | ||
1522 | if (!MACHINE_HAS_IDTE) { | ||
1523 | __pmdp_csp(pmdp); | ||
1524 | return; | ||
1525 | } | ||
1526 | active = (mm == current->active_mm) ? 1 : 0; | ||
1527 | count = atomic_add_return(0x10000, &mm->context.attach_count); | ||
1528 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && | ||
1529 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) | ||
1530 | __pmdp_idte_local(address, pmdp); | ||
1531 | else | ||
1532 | __pmdp_idte(address, pmdp); | ||
1533 | atomic_sub(0x10000, &mm->context.attach_count); | ||
1534 | } | ||
1535 | |||
1484 | static inline void pmdp_flush_lazy(struct mm_struct *mm, | 1536 | static inline void pmdp_flush_lazy(struct mm_struct *mm, |
1485 | unsigned long address, pmd_t *pmdp) | 1537 | unsigned long address, pmd_t *pmdp) |
1486 | { | 1538 | { |
1487 | int active, count; | 1539 | int active, count; |
1488 | 1540 | ||
1541 | if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) | ||
1542 | return; | ||
1489 | active = (mm == current->active_mm) ? 1 : 0; | 1543 | active = (mm == current->active_mm) ? 1 : 0; |
1490 | count = atomic_add_return(0x10000, &mm->context.attach_count); | 1544 | count = atomic_add_return(0x10000, &mm->context.attach_count); |
1491 | if ((count & 0xffff) <= active) { | 1545 | if ((count & 0xffff) <= active) { |
1492 | pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; | 1546 | pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; |
1493 | mm->context.flush_mm = 1; | 1547 | mm->context.flush_mm = 1; |
1494 | } else | 1548 | } else if (MACHINE_HAS_IDTE) |
1495 | __pmd_idte(address, pmdp); | 1549 | __pmdp_idte(address, pmdp); |
1550 | else | ||
1551 | __pmdp_csp(pmdp); | ||
1496 | atomic_sub(0x10000, &mm->context.attach_count); | 1552 | atomic_sub(0x10000, &mm->context.attach_count); |
1497 | } | 1553 | } |
1498 | 1554 | ||
@@ -1545,7 +1601,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |||
1545 | pmd_t pmd; | 1601 | pmd_t pmd; |
1546 | 1602 | ||
1547 | pmd = *pmdp; | 1603 | pmd = *pmdp; |
1548 | __pmd_idte(address, pmdp); | 1604 | pmdp_flush_direct(vma->vm_mm, address, pmdp); |
1549 | *pmdp = pmd_mkold(pmd); | 1605 | *pmdp = pmd_mkold(pmd); |
1550 | return pmd_young(pmd); | 1606 | return pmd_young(pmd); |
1551 | } | 1607 | } |
@@ -1556,7 +1612,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, | |||
1556 | { | 1612 | { |
1557 | pmd_t pmd = *pmdp; | 1613 | pmd_t pmd = *pmdp; |
1558 | 1614 | ||
1559 | __pmd_idte(address, pmdp); | 1615 | pmdp_flush_direct(mm, address, pmdp); |
1560 | pmd_clear(pmdp); | 1616 | pmd_clear(pmdp); |
1561 | return pmd; | 1617 | return pmd; |
1562 | } | 1618 | } |
@@ -1572,7 +1628,7 @@ static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, | |||
1572 | static inline void pmdp_invalidate(struct vm_area_struct *vma, | 1628 | static inline void pmdp_invalidate(struct vm_area_struct *vma, |
1573 | unsigned long address, pmd_t *pmdp) | 1629 | unsigned long address, pmd_t *pmdp) |
1574 | { | 1630 | { |
1575 | __pmd_idte(address, pmdp); | 1631 | pmdp_flush_direct(vma->vm_mm, address, pmdp); |
1576 | } | 1632 | } |
1577 | 1633 | ||
1578 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT | 1634 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT |
@@ -1582,7 +1638,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |||
1582 | pmd_t pmd = *pmdp; | 1638 | pmd_t pmd = *pmdp; |
1583 | 1639 | ||
1584 | if (pmd_write(pmd)) { | 1640 | if (pmd_write(pmd)) { |
1585 | __pmd_idte(address, pmdp); | 1641 | pmdp_flush_direct(mm, address, pmdp); |
1586 | set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); | 1642 | set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); |
1587 | } | 1643 | } |
1588 | } | 1644 | } |
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 406f3a1e63ef..b31b22dba948 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
@@ -68,6 +68,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, | |||
68 | #define MACHINE_FLAG_TOPOLOGY (1UL << 14) | 68 | #define MACHINE_FLAG_TOPOLOGY (1UL << 14) |
69 | #define MACHINE_FLAG_TE (1UL << 15) | 69 | #define MACHINE_FLAG_TE (1UL << 15) |
70 | #define MACHINE_FLAG_RRBM (1UL << 16) | 70 | #define MACHINE_FLAG_RRBM (1UL << 16) |
71 | #define MACHINE_FLAG_TLB_LC (1UL << 17) | ||
71 | 72 | ||
72 | #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) | 73 | #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) |
73 | #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) | 74 | #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) |
@@ -90,6 +91,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, | |||
90 | #define MACHINE_HAS_TOPOLOGY (0) | 91 | #define MACHINE_HAS_TOPOLOGY (0) |
91 | #define MACHINE_HAS_TE (0) | 92 | #define MACHINE_HAS_TE (0) |
92 | #define MACHINE_HAS_RRBM (0) | 93 | #define MACHINE_HAS_RRBM (0) |
94 | #define MACHINE_HAS_TLB_LC (0) | ||
93 | #else /* CONFIG_64BIT */ | 95 | #else /* CONFIG_64BIT */ |
94 | #define MACHINE_HAS_IEEE (1) | 96 | #define MACHINE_HAS_IEEE (1) |
95 | #define MACHINE_HAS_CSP (1) | 97 | #define MACHINE_HAS_CSP (1) |
@@ -102,6 +104,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, | |||
102 | #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) | 104 | #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) |
103 | #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) | 105 | #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) |
104 | #define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM) | 106 | #define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM) |
107 | #define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) | ||
105 | #endif /* CONFIG_64BIT */ | 108 | #endif /* CONFIG_64BIT */ |
106 | 109 | ||
107 | /* | 110 | /* |
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index 29c81f82705e..e759181357fc 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h | |||
@@ -132,6 +132,7 @@ static inline void restore_access_regs(unsigned int *acrs) | |||
132 | update_cr_regs(next); \ | 132 | update_cr_regs(next); \ |
133 | } \ | 133 | } \ |
134 | prev = __switch_to(prev,next); \ | 134 | prev = __switch_to(prev,next); \ |
135 | update_primary_asce(current); \ | ||
135 | } while (0) | 136 | } while (0) |
136 | 137 | ||
137 | #define finish_arch_switch(prev) do { \ | 138 | #define finish_arch_switch(prev) do { \ |
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 3ccd71b90345..50630e6a35de 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
@@ -82,6 +82,7 @@ static inline struct thread_info *current_thread_info(void) | |||
82 | #define TIF_SIGPENDING 2 /* signal pending */ | 82 | #define TIF_SIGPENDING 2 /* signal pending */ |
83 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | 83 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
84 | #define TIF_TLB_WAIT 4 /* wait for TLB flush completion */ | 84 | #define TIF_TLB_WAIT 4 /* wait for TLB flush completion */ |
85 | #define TIF_ASCE 5 /* primary asce needs fixup / uaccess */ | ||
85 | #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ | 86 | #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ |
86 | #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ | 87 | #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ |
87 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ | 88 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ |
@@ -99,6 +100,7 @@ static inline struct thread_info *current_thread_info(void) | |||
99 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 100 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) |
100 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 101 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
101 | #define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT) | 102 | #define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT) |
103 | #define _TIF_ASCE (1<<TIF_ASCE) | ||
102 | #define _TIF_PER_TRAP (1<<TIF_PER_TRAP) | 104 | #define _TIF_PER_TRAP (1<<TIF_PER_TRAP) |
103 | #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) | 105 | #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) |
104 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 106 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 2cb846c4b37f..c544b6f05d95 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
@@ -57,8 +57,6 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, | |||
57 | tlb->end = end; | 57 | tlb->end = end; |
58 | tlb->fullmm = !(start | (end+1)); | 58 | tlb->fullmm = !(start | (end+1)); |
59 | tlb->batch = NULL; | 59 | tlb->batch = NULL; |
60 | if (tlb->fullmm) | ||
61 | __tlb_flush_mm(mm); | ||
62 | } | 60 | } |
63 | 61 | ||
64 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | 62 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
@@ -96,9 +94,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
96 | static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | 94 | static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |
97 | unsigned long address) | 95 | unsigned long address) |
98 | { | 96 | { |
99 | if (!tlb->fullmm) | 97 | page_table_free_rcu(tlb, (unsigned long *) pte); |
100 | return page_table_free_rcu(tlb, (unsigned long *) pte); | ||
101 | page_table_free(tlb->mm, (unsigned long *) pte); | ||
102 | } | 98 | } |
103 | 99 | ||
104 | /* | 100 | /* |
@@ -114,9 +110,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, | |||
114 | #ifdef CONFIG_64BIT | 110 | #ifdef CONFIG_64BIT |
115 | if (tlb->mm->context.asce_limit <= (1UL << 31)) | 111 | if (tlb->mm->context.asce_limit <= (1UL << 31)) |
116 | return; | 112 | return; |
117 | if (!tlb->fullmm) | 113 | tlb_remove_table(tlb, pmd); |
118 | return tlb_remove_table(tlb, pmd); | ||
119 | crst_table_free(tlb->mm, (unsigned long *) pmd); | ||
120 | #endif | 114 | #endif |
121 | } | 115 | } |
122 | 116 | ||
@@ -133,9 +127,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, | |||
133 | #ifdef CONFIG_64BIT | 127 | #ifdef CONFIG_64BIT |
134 | if (tlb->mm->context.asce_limit <= (1UL << 42)) | 128 | if (tlb->mm->context.asce_limit <= (1UL << 42)) |
135 | return; | 129 | return; |
136 | if (!tlb->fullmm) | 130 | tlb_remove_table(tlb, pud); |
137 | return tlb_remove_table(tlb, pud); | ||
138 | crst_table_free(tlb->mm, (unsigned long *) pud); | ||
139 | #endif | 131 | #endif |
140 | } | 132 | } |
141 | 133 | ||
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index f9fef0425fee..16c9c88658c8 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h | |||
@@ -7,19 +7,41 @@ | |||
7 | #include <asm/pgalloc.h> | 7 | #include <asm/pgalloc.h> |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Flush all tlb entries on the local cpu. | 10 | * Flush all TLB entries on the local CPU. |
11 | */ | 11 | */ |
12 | static inline void __tlb_flush_local(void) | 12 | static inline void __tlb_flush_local(void) |
13 | { | 13 | { |
14 | asm volatile("ptlb" : : : "memory"); | 14 | asm volatile("ptlb" : : : "memory"); |
15 | } | 15 | } |
16 | 16 | ||
17 | #ifdef CONFIG_SMP | ||
18 | /* | 17 | /* |
19 | * Flush all tlb entries on all cpus. | 18 | * Flush TLB entries for a specific ASCE on all CPUs |
20 | */ | 19 | */ |
20 | static inline void __tlb_flush_idte(unsigned long asce) | ||
21 | { | ||
22 | /* Global TLB flush for the mm */ | ||
23 | asm volatile( | ||
24 | " .insn rrf,0xb98e0000,0,%0,%1,0" | ||
25 | : : "a" (2048), "a" (asce) : "cc"); | ||
26 | } | ||
27 | |||
28 | /* | ||
29 | * Flush TLB entries for a specific ASCE on the local CPU | ||
30 | */ | ||
31 | static inline void __tlb_flush_idte_local(unsigned long asce) | ||
32 | { | ||
33 | /* Local TLB flush for the mm */ | ||
34 | asm volatile( | ||
35 | " .insn rrf,0xb98e0000,0,%0,%1,1" | ||
36 | : : "a" (2048), "a" (asce) : "cc"); | ||
37 | } | ||
38 | |||
39 | #ifdef CONFIG_SMP | ||
21 | void smp_ptlb_all(void); | 40 | void smp_ptlb_all(void); |
22 | 41 | ||
42 | /* | ||
43 | * Flush all TLB entries on all CPUs. | ||
44 | */ | ||
23 | static inline void __tlb_flush_global(void) | 45 | static inline void __tlb_flush_global(void) |
24 | { | 46 | { |
25 | register unsigned long reg2 asm("2"); | 47 | register unsigned long reg2 asm("2"); |
@@ -42,36 +64,89 @@ static inline void __tlb_flush_global(void) | |||
42 | : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); | 64 | : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); |
43 | } | 65 | } |
44 | 66 | ||
67 | /* | ||
68 | * Flush TLB entries for a specific mm on all CPUs (in case gmap is used | ||
69 | * this implicates multiple ASCEs!). | ||
70 | */ | ||
45 | static inline void __tlb_flush_full(struct mm_struct *mm) | 71 | static inline void __tlb_flush_full(struct mm_struct *mm) |
46 | { | 72 | { |
47 | cpumask_t local_cpumask; | ||
48 | |||
49 | preempt_disable(); | 73 | preempt_disable(); |
50 | /* | 74 | atomic_add(0x10000, &mm->context.attach_count); |
51 | * If the process only ran on the local cpu, do a local flush. | 75 | if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { |
52 | */ | 76 | /* Local TLB flush */ |
53 | cpumask_copy(&local_cpumask, cpumask_of(smp_processor_id())); | ||
54 | if (cpumask_equal(mm_cpumask(mm), &local_cpumask)) | ||
55 | __tlb_flush_local(); | 77 | __tlb_flush_local(); |
56 | else | 78 | } else { |
79 | /* Global TLB flush */ | ||
57 | __tlb_flush_global(); | 80 | __tlb_flush_global(); |
81 | /* Reset TLB flush mask */ | ||
82 | if (MACHINE_HAS_TLB_LC) | ||
83 | cpumask_copy(mm_cpumask(mm), | ||
84 | &mm->context.cpu_attach_mask); | ||
85 | } | ||
86 | atomic_sub(0x10000, &mm->context.attach_count); | ||
58 | preempt_enable(); | 87 | preempt_enable(); |
59 | } | 88 | } |
89 | |||
90 | /* | ||
91 | * Flush TLB entries for a specific ASCE on all CPUs. | ||
92 | */ | ||
93 | static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) | ||
94 | { | ||
95 | int active, count; | ||
96 | |||
97 | preempt_disable(); | ||
98 | active = (mm == current->active_mm) ? 1 : 0; | ||
99 | count = atomic_add_return(0x10000, &mm->context.attach_count); | ||
100 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && | ||
101 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { | ||
102 | __tlb_flush_idte_local(asce); | ||
103 | } else { | ||
104 | if (MACHINE_HAS_IDTE) | ||
105 | __tlb_flush_idte(asce); | ||
106 | else | ||
107 | __tlb_flush_global(); | ||
108 | /* Reset TLB flush mask */ | ||
109 | if (MACHINE_HAS_TLB_LC) | ||
110 | cpumask_copy(mm_cpumask(mm), | ||
111 | &mm->context.cpu_attach_mask); | ||
112 | } | ||
113 | atomic_sub(0x10000, &mm->context.attach_count); | ||
114 | preempt_enable(); | ||
115 | } | ||
116 | |||
117 | static inline void __tlb_flush_kernel(void) | ||
118 | { | ||
119 | if (MACHINE_HAS_IDTE) | ||
120 | __tlb_flush_idte((unsigned long) init_mm.pgd | | ||
121 | init_mm.context.asce_bits); | ||
122 | else | ||
123 | __tlb_flush_global(); | ||
124 | } | ||
60 | #else | 125 | #else |
61 | #define __tlb_flush_full(mm) __tlb_flush_local() | ||
62 | #define __tlb_flush_global() __tlb_flush_local() | 126 | #define __tlb_flush_global() __tlb_flush_local() |
63 | #endif | 127 | #define __tlb_flush_full(mm) __tlb_flush_local() |
64 | 128 | ||
65 | /* | 129 | /* |
66 | * Flush all tlb entries of a page table on all cpus. | 130 | * Flush TLB entries for a specific ASCE on all CPUs. |
67 | */ | 131 | */ |
68 | static inline void __tlb_flush_idte(unsigned long asce) | 132 | static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) |
69 | { | 133 | { |
70 | asm volatile( | 134 | if (MACHINE_HAS_TLB_LC) |
71 | " .insn rrf,0xb98e0000,0,%0,%1,0" | 135 | __tlb_flush_idte_local(asce); |
72 | : : "a" (2048), "a" (asce) : "cc" ); | 136 | else |
137 | __tlb_flush_local(); | ||
73 | } | 138 | } |
74 | 139 | ||
140 | static inline void __tlb_flush_kernel(void) | ||
141 | { | ||
142 | if (MACHINE_HAS_TLB_LC) | ||
143 | __tlb_flush_idte_local((unsigned long) init_mm.pgd | | ||
144 | init_mm.context.asce_bits); | ||
145 | else | ||
146 | __tlb_flush_local(); | ||
147 | } | ||
148 | #endif | ||
149 | |||
75 | static inline void __tlb_flush_mm(struct mm_struct * mm) | 150 | static inline void __tlb_flush_mm(struct mm_struct * mm) |
76 | { | 151 | { |
77 | /* | 152 | /* |
@@ -80,7 +155,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) | |||
80 | * only ran on the local cpu. | 155 | * only ran on the local cpu. |
81 | */ | 156 | */ |
82 | if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) | 157 | if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) |
83 | __tlb_flush_idte((unsigned long) mm->pgd | | 158 | __tlb_flush_asce(mm, (unsigned long) mm->pgd | |
84 | mm->context.asce_bits); | 159 | mm->context.asce_bits); |
85 | else | 160 | else |
86 | __tlb_flush_full(mm); | 161 | __tlb_flush_full(mm); |
@@ -130,7 +205,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, | |||
130 | static inline void flush_tlb_kernel_range(unsigned long start, | 205 | static inline void flush_tlb_kernel_range(unsigned long start, |
131 | unsigned long end) | 206 | unsigned long end) |
132 | { | 207 | { |
133 | __tlb_flush_mm(&init_mm); | 208 | __tlb_flush_kernel(); |
134 | } | 209 | } |
135 | 210 | ||
136 | #endif /* _S390_TLBFLUSH_H */ | 211 | #endif /* _S390_TLBFLUSH_H */ |
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 4133b3f72fb0..1be64a1506d0 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h | |||
@@ -92,8 +92,6 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x) | |||
92 | #define ARCH_HAS_SORT_EXTABLE | 92 | #define ARCH_HAS_SORT_EXTABLE |
93 | #define ARCH_HAS_SEARCH_EXTABLE | 93 | #define ARCH_HAS_SEARCH_EXTABLE |
94 | 94 | ||
95 | int __handle_fault(unsigned long, unsigned long, int); | ||
96 | |||
97 | /** | 95 | /** |
98 | * __copy_from_user: - Copy a block of data from user space, with less checking. | 96 | * __copy_from_user: - Copy a block of data from user space, with less checking. |
99 | * @to: Destination address, in kernel space. | 97 | * @to: Destination address, in kernel space. |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index e4c99a183651..cc10cdd4d6a2 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -136,6 +136,7 @@ int main(void) | |||
136 | DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn)); | 136 | DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn)); |
137 | DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data)); | 137 | DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data)); |
138 | DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source)); | 138 | DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source)); |
139 | DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce)); | ||
139 | DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); | 140 | DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); |
140 | DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); | 141 | DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); |
141 | DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); | 142 | DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 6b594439cca5..a734f3585ceb 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -386,6 +386,8 @@ static __init void detect_machine_facilities(void) | |||
386 | S390_lowcore.machine_flags |= MACHINE_FLAG_TE; | 386 | S390_lowcore.machine_flags |= MACHINE_FLAG_TE; |
387 | if (test_facility(66)) | 387 | if (test_facility(66)) |
388 | S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM; | 388 | S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM; |
389 | if (test_facility(51)) | ||
390 | S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; | ||
389 | #endif | 391 | #endif |
390 | } | 392 | } |
391 | 393 | ||
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 526d3735ed29..1662038516c0 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -38,9 +38,9 @@ __PT_R14 = __PT_GPRS + 56 | |||
38 | __PT_R15 = __PT_GPRS + 60 | 38 | __PT_R15 = __PT_GPRS + 60 |
39 | 39 | ||
40 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 40 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
41 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) | 41 | _TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE) |
42 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 42 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
43 | _TIF_MCCK_PENDING) | 43 | _TIF_MCCK_PENDING | _TIF_ASCE) |
44 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | 44 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ |
45 | _TIF_SYSCALL_TRACEPOINT) | 45 | _TIF_SYSCALL_TRACEPOINT) |
46 | _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) | 46 | _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) |
@@ -241,6 +241,8 @@ sysc_work: | |||
241 | jo sysc_sigpending | 241 | jo sysc_sigpending |
242 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 242 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME |
243 | jo sysc_notify_resume | 243 | jo sysc_notify_resume |
244 | tm __TI_flags+3(%r12),_TIF_ASCE | ||
245 | jo sysc_uaccess | ||
244 | j sysc_return # beware of critical section cleanup | 246 | j sysc_return # beware of critical section cleanup |
245 | 247 | ||
246 | # | 248 | # |
@@ -260,6 +262,14 @@ sysc_mcck_pending: | |||
260 | br %r1 # TIF bit will be cleared by handler | 262 | br %r1 # TIF bit will be cleared by handler |
261 | 263 | ||
262 | # | 264 | # |
265 | # _TIF_ASCE is set, load user space asce | ||
266 | # | ||
267 | sysc_uaccess: | ||
268 | ni __TI_flags+3(%r12),255-_TIF_ASCE | ||
269 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce | ||
270 | j sysc_return | ||
271 | |||
272 | # | ||
263 | # _TIF_SIGPENDING is set, call do_signal | 273 | # _TIF_SIGPENDING is set, call do_signal |
264 | # | 274 | # |
265 | sysc_sigpending: | 275 | sysc_sigpending: |
@@ -522,6 +532,8 @@ io_work_tif: | |||
522 | jo io_sigpending | 532 | jo io_sigpending |
523 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 533 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME |
524 | jo io_notify_resume | 534 | jo io_notify_resume |
535 | tm __TI_flags+3(%r12),_TIF_ASCE | ||
536 | jo io_uaccess | ||
525 | j io_return # beware of critical section cleanup | 537 | j io_return # beware of critical section cleanup |
526 | 538 | ||
527 | # | 539 | # |
@@ -535,6 +547,14 @@ io_mcck_pending: | |||
535 | j io_return | 547 | j io_return |
536 | 548 | ||
537 | # | 549 | # |
550 | # _TIF_ASCE is set, load user space asce | ||
551 | # | ||
552 | io_uaccess: | ||
553 | ni __TI_flags+3(%r12),255-_TIF_ASCE | ||
554 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce | ||
555 | j io_return | ||
556 | |||
557 | # | ||
538 | # _TIF_NEED_RESCHED is set, call schedule | 558 | # _TIF_NEED_RESCHED is set, call schedule |
539 | # | 559 | # |
540 | io_reschedule: | 560 | io_reschedule: |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index e09dbe5f2901..5963e43618bb 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -43,9 +43,9 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
43 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE | 43 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE |
44 | 44 | ||
45 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 45 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
46 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) | 46 | _TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE) |
47 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 47 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
48 | _TIF_MCCK_PENDING) | 48 | _TIF_MCCK_PENDING | _TIF_ASCE) |
49 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | 49 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ |
50 | _TIF_SYSCALL_TRACEPOINT) | 50 | _TIF_SYSCALL_TRACEPOINT) |
51 | _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) | 51 | _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) |
@@ -275,6 +275,8 @@ sysc_work: | |||
275 | jo sysc_sigpending | 275 | jo sysc_sigpending |
276 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME | 276 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
277 | jo sysc_notify_resume | 277 | jo sysc_notify_resume |
278 | tm __TI_flags+7(%r12),_TIF_ASCE | ||
279 | jo sysc_uaccess | ||
278 | j sysc_return # beware of critical section cleanup | 280 | j sysc_return # beware of critical section cleanup |
279 | 281 | ||
280 | # | 282 | # |
@@ -292,6 +294,14 @@ sysc_mcck_pending: | |||
292 | jg s390_handle_mcck # TIF bit will be cleared by handler | 294 | jg s390_handle_mcck # TIF bit will be cleared by handler |
293 | 295 | ||
294 | # | 296 | # |
297 | # _TIF_ASCE is set, load user space asce | ||
298 | # | ||
299 | sysc_uaccess: | ||
300 | ni __TI_flags+7(%r12),255-_TIF_ASCE | ||
301 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
302 | j sysc_return | ||
303 | |||
304 | # | ||
295 | # _TIF_SIGPENDING is set, call do_signal | 305 | # _TIF_SIGPENDING is set, call do_signal |
296 | # | 306 | # |
297 | sysc_sigpending: | 307 | sysc_sigpending: |
@@ -559,6 +569,8 @@ io_work_tif: | |||
559 | jo io_sigpending | 569 | jo io_sigpending |
560 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME | 570 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
561 | jo io_notify_resume | 571 | jo io_notify_resume |
572 | tm __TI_flags+7(%r12),_TIF_ASCE | ||
573 | jo io_uaccess | ||
562 | j io_return # beware of critical section cleanup | 574 | j io_return # beware of critical section cleanup |
563 | 575 | ||
564 | # | 576 | # |
@@ -571,6 +583,14 @@ io_mcck_pending: | |||
571 | j io_return | 583 | j io_return |
572 | 584 | ||
573 | # | 585 | # |
586 | # _TIF_ASCE is set, load user space asce | ||
587 | # | ||
588 | io_uaccess: | ||
589 | ni __TI_flags+7(%r12),255-_TIF_ASCE | ||
590 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
591 | j io_return | ||
592 | |||
593 | # | ||
574 | # _TIF_NEED_RESCHED is set, call schedule | 594 | # _TIF_NEED_RESCHED is set, call schedule |
575 | # | 595 | # |
576 | io_reschedule: | 596 | io_reschedule: |
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index d42b14cc72a4..c7463aa0014b 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -207,7 +207,7 @@ static inline int ext_hash(u16 code) | |||
207 | return (code + (code >> 9)) & (ARRAY_SIZE(ext_int_hash) - 1); | 207 | return (code + (code >> 9)) & (ARRAY_SIZE(ext_int_hash) - 1); |
208 | } | 208 | } |
209 | 209 | ||
210 | int register_external_interrupt(u16 code, ext_int_handler_t handler) | 210 | int register_external_irq(u16 code, ext_int_handler_t handler) |
211 | { | 211 | { |
212 | struct ext_int_info *p; | 212 | struct ext_int_info *p; |
213 | unsigned long flags; | 213 | unsigned long flags; |
@@ -225,9 +225,9 @@ int register_external_interrupt(u16 code, ext_int_handler_t handler) | |||
225 | spin_unlock_irqrestore(&ext_int_hash_lock, flags); | 225 | spin_unlock_irqrestore(&ext_int_hash_lock, flags); |
226 | return 0; | 226 | return 0; |
227 | } | 227 | } |
228 | EXPORT_SYMBOL(register_external_interrupt); | 228 | EXPORT_SYMBOL(register_external_irq); |
229 | 229 | ||
230 | int unregister_external_interrupt(u16 code, ext_int_handler_t handler) | 230 | int unregister_external_irq(u16 code, ext_int_handler_t handler) |
231 | { | 231 | { |
232 | struct ext_int_info *p; | 232 | struct ext_int_info *p; |
233 | unsigned long flags; | 233 | unsigned long flags; |
@@ -243,7 +243,7 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler) | |||
243 | spin_unlock_irqrestore(&ext_int_hash_lock, flags); | 243 | spin_unlock_irqrestore(&ext_int_hash_lock, flags); |
244 | return 0; | 244 | return 0; |
245 | } | 245 | } |
246 | EXPORT_SYMBOL(unregister_external_interrupt); | 246 | EXPORT_SYMBOL(unregister_external_irq); |
247 | 247 | ||
248 | static irqreturn_t do_ext_interrupt(int irq, void *dummy) | 248 | static irqreturn_t do_ext_interrupt(int irq, void *dummy) |
249 | { | 249 | { |
@@ -253,7 +253,7 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy) | |||
253 | int index; | 253 | int index; |
254 | 254 | ||
255 | ext_code = *(struct ext_code *) ®s->int_code; | 255 | ext_code = *(struct ext_code *) ®s->int_code; |
256 | if (ext_code.code != 0x1004) | 256 | if (ext_code.code != EXT_IRQ_CLK_COMP) |
257 | __get_cpu_var(s390_idle).nohz_delay = 1; | 257 | __get_cpu_var(s390_idle).nohz_delay = 1; |
258 | 258 | ||
259 | index = ext_hash(ext_code.code); | 259 | index = ext_hash(ext_code.code); |
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index f51214c04858..ea75d011a6fc 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c | |||
@@ -673,7 +673,8 @@ static int __init cpumf_pmu_init(void) | |||
673 | ctl_clear_bit(0, 48); | 673 | ctl_clear_bit(0, 48); |
674 | 674 | ||
675 | /* register handler for measurement-alert interruptions */ | 675 | /* register handler for measurement-alert interruptions */ |
676 | rc = register_external_interrupt(0x1407, cpumf_measurement_alert); | 676 | rc = register_external_irq(EXT_IRQ_MEASURE_ALERT, |
677 | cpumf_measurement_alert); | ||
677 | if (rc) { | 678 | if (rc) { |
678 | pr_err("Registering for CPU-measurement alerts " | 679 | pr_err("Registering for CPU-measurement alerts " |
679 | "failed with rc=%i\n", rc); | 680 | "failed with rc=%i\n", rc); |
@@ -684,7 +685,8 @@ static int __init cpumf_pmu_init(void) | |||
684 | rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); | 685 | rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); |
685 | if (rc) { | 686 | if (rc) { |
686 | pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); | 687 | pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); |
687 | unregister_external_interrupt(0x1407, cpumf_measurement_alert); | 688 | unregister_external_irq(EXT_IRQ_MEASURE_ALERT, |
689 | cpumf_measurement_alert); | ||
688 | goto out; | 690 | goto out; |
689 | } | 691 | } |
690 | perf_cpu_notifier(cpumf_pmu_notifier); | 692 | perf_cpu_notifier(cpumf_pmu_notifier); |
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 6c0d29827cb6..ea0c7b2ef030 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c | |||
@@ -1621,7 +1621,8 @@ static int __init init_cpum_sampling_pmu(void) | |||
1621 | pr_err("Registering for s390dbf failed\n"); | 1621 | pr_err("Registering for s390dbf failed\n"); |
1622 | debug_register_view(sfdbg, &debug_sprintf_view); | 1622 | debug_register_view(sfdbg, &debug_sprintf_view); |
1623 | 1623 | ||
1624 | err = register_external_interrupt(0x1407, cpumf_measurement_alert); | 1624 | err = register_external_irq(EXT_IRQ_MEASURE_ALERT, |
1625 | cpumf_measurement_alert); | ||
1625 | if (err) { | 1626 | if (err) { |
1626 | pr_cpumsf_err(RS_INIT_FAILURE_ALRT); | 1627 | pr_cpumsf_err(RS_INIT_FAILURE_ALRT); |
1627 | goto out; | 1628 | goto out; |
@@ -1630,7 +1631,8 @@ static int __init init_cpum_sampling_pmu(void) | |||
1630 | err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW); | 1631 | err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW); |
1631 | if (err) { | 1632 | if (err) { |
1632 | pr_cpumsf_err(RS_INIT_FAILURE_PERF); | 1633 | pr_cpumsf_err(RS_INIT_FAILURE_PERF); |
1633 | unregister_external_interrupt(0x1407, cpumf_measurement_alert); | 1634 | unregister_external_irq(EXT_IRQ_MEASURE_ALERT, |
1635 | cpumf_measurement_alert); | ||
1634 | goto out; | 1636 | goto out; |
1635 | } | 1637 | } |
1636 | perf_cpu_notifier(cpumf_pmu_notifier); | 1638 | perf_cpu_notifier(cpumf_pmu_notifier); |
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index d817cce7e72d..26b4ae96fdd7 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c | |||
@@ -138,7 +138,8 @@ static int __init runtime_instr_init(void) | |||
138 | return 0; | 138 | return 0; |
139 | 139 | ||
140 | irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); | 140 | irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); |
141 | rc = register_external_interrupt(0x1407, runtime_instr_int_handler); | 141 | rc = register_external_irq(EXT_IRQ_MEASURE_ALERT, |
142 | runtime_instr_int_handler); | ||
142 | if (rc) | 143 | if (rc) |
143 | irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); | 144 | irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); |
144 | else | 145 | else |
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index 29bd7bec4176..a41f2c99dcc8 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
12 | #include <asm/irq.h> | ||
12 | 13 | ||
13 | LC_EXT_NEW_PSW = 0x58 # addr of ext int handler | 14 | LC_EXT_NEW_PSW = 0x58 # addr of ext int handler |
14 | LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit | 15 | LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit |
@@ -73,9 +74,9 @@ _sclp_wait_int: | |||
73 | lpsw .LwaitpswS1-.LbaseS1(%r13) # wait until interrupt | 74 | lpsw .LwaitpswS1-.LbaseS1(%r13) # wait until interrupt |
74 | .LwaitS1: | 75 | .LwaitS1: |
75 | lh %r7,LC_EXT_INT_CODE | 76 | lh %r7,LC_EXT_INT_CODE |
76 | chi %r7,0x1004 # timeout? | 77 | chi %r7,EXT_IRQ_CLK_COMP # timeout? |
77 | je .LtimeoutS1 | 78 | je .LtimeoutS1 |
78 | chi %r7,0x2401 # service int? | 79 | chi %r7,EXT_IRQ_SERVICE_SIG # service int? |
79 | jne .LloopS1 | 80 | jne .LloopS1 |
80 | sr %r2,%r2 | 81 | sr %r2,%r2 |
81 | l %r3,LC_EXT_INT_PARAM | 82 | l %r3,LC_EXT_INT_PARAM |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 5a640b395bd4..512ce1cde2a4 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -236,6 +236,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) | |||
236 | { | 236 | { |
237 | struct _lowcore *lc = pcpu->lowcore; | 237 | struct _lowcore *lc = pcpu->lowcore; |
238 | 238 | ||
239 | if (MACHINE_HAS_TLB_LC) | ||
240 | cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); | ||
241 | cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); | ||
239 | atomic_inc(&init_mm.context.attach_count); | 242 | atomic_inc(&init_mm.context.attach_count); |
240 | lc->cpu_nr = cpu; | 243 | lc->cpu_nr = cpu; |
241 | lc->percpu_offset = __per_cpu_offset[cpu]; | 244 | lc->percpu_offset = __per_cpu_offset[cpu]; |
@@ -760,6 +763,9 @@ void __cpu_die(unsigned int cpu) | |||
760 | cpu_relax(); | 763 | cpu_relax(); |
761 | pcpu_free_lowcore(pcpu); | 764 | pcpu_free_lowcore(pcpu); |
762 | atomic_dec(&init_mm.context.attach_count); | 765 | atomic_dec(&init_mm.context.attach_count); |
766 | cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); | ||
767 | if (MACHINE_HAS_TLB_LC) | ||
768 | cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask); | ||
763 | } | 769 | } |
764 | 770 | ||
765 | void __noreturn cpu_die(void) | 771 | void __noreturn cpu_die(void) |
@@ -785,10 +791,10 @@ void __init smp_fill_possible_mask(void) | |||
785 | void __init smp_prepare_cpus(unsigned int max_cpus) | 791 | void __init smp_prepare_cpus(unsigned int max_cpus) |
786 | { | 792 | { |
787 | /* request the 0x1201 emergency signal external interrupt */ | 793 | /* request the 0x1201 emergency signal external interrupt */ |
788 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | 794 | if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt)) |
789 | panic("Couldn't request external interrupt 0x1201"); | 795 | panic("Couldn't request external interrupt 0x1201"); |
790 | /* request the 0x1202 external call external interrupt */ | 796 | /* request the 0x1202 external call external interrupt */ |
791 | if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) | 797 | if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt)) |
792 | panic("Couldn't request external interrupt 0x1202"); | 798 | panic("Couldn't request external interrupt 0x1202"); |
793 | smp_detect_cpus(); | 799 | smp_detect_cpus(); |
794 | } | 800 | } |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index dd95f1631621..386d37a228bb 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -262,11 +262,11 @@ void __init time_init(void) | |||
262 | stp_reset(); | 262 | stp_reset(); |
263 | 263 | ||
264 | /* request the clock comparator external interrupt */ | 264 | /* request the clock comparator external interrupt */ |
265 | if (register_external_interrupt(0x1004, clock_comparator_interrupt)) | 265 | if (register_external_irq(EXT_IRQ_CLK_COMP, clock_comparator_interrupt)) |
266 | panic("Couldn't request external interrupt 0x1004"); | 266 | panic("Couldn't request external interrupt 0x1004"); |
267 | 267 | ||
268 | /* request the timing alert external interrupt */ | 268 | /* request the timing alert external interrupt */ |
269 | if (register_external_interrupt(0x1406, timing_alert_interrupt)) | 269 | if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt)) |
270 | panic("Couldn't request external interrupt 0x1406"); | 270 | panic("Couldn't request external interrupt 0x1406"); |
271 | 271 | ||
272 | if (clocksource_register(&clocksource_tod) != 0) | 272 | if (clocksource_register(&clocksource_tod) != 0) |
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 03a05ffb662f..08dfc839a6cf 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c | |||
@@ -167,6 +167,10 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) | |||
167 | 167 | ||
168 | VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); | 168 | VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); |
169 | switch (subcode) { | 169 | switch (subcode) { |
170 | case 0: | ||
171 | case 1: | ||
172 | page_table_reset_pgste(current->mm, 0, TASK_SIZE); | ||
173 | return -EOPNOTSUPP; | ||
170 | case 3: | 174 | case 3: |
171 | vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; | 175 | vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; |
172 | page_table_reset_pgste(current->mm, 0, TASK_SIZE); | 176 | page_table_reset_pgste(current->mm, 0, TASK_SIZE); |
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index e3fffe1dff51..c6d752e8bf28 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for s390-specific library files.. | 2 | # Makefile for s390-specific library files.. |
3 | # | 3 | # |
4 | 4 | ||
5 | lib-y += delay.o string.o uaccess_pt.o uaccess_mvcos.o find.o | 5 | lib-y += delay.o string.o uaccess.o find.o |
6 | obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o | 6 | obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o |
7 | obj-$(CONFIG_64BIT) += mem64.o | 7 | obj-$(CONFIG_64BIT) += mem64.o |
8 | lib-$(CONFIG_SMP) += spinlock.o | 8 | lib-$(CONFIG_SMP) += spinlock.o |
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c new file mode 100644 index 000000000000..23f866b4c7f1 --- /dev/null +++ b/arch/s390/lib/uaccess.c | |||
@@ -0,0 +1,407 @@ | |||
1 | /* | ||
2 | * Standard user space access functions based on mvcp/mvcs and doing | ||
3 | * interesting things in the secondary space mode. | ||
4 | * | ||
5 | * Copyright IBM Corp. 2006,2014 | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
7 | * Gerald Schaefer (gerald.schaefer@de.ibm.com) | ||
8 | */ | ||
9 | |||
10 | #include <linux/jump_label.h> | ||
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/export.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <asm/mmu_context.h> | ||
16 | #include <asm/facility.h> | ||
17 | |||
18 | #ifndef CONFIG_64BIT | ||
19 | #define AHI "ahi" | ||
20 | #define ALR "alr" | ||
21 | #define CLR "clr" | ||
22 | #define LHI "lhi" | ||
23 | #define SLR "slr" | ||
24 | #else | ||
25 | #define AHI "aghi" | ||
26 | #define ALR "algr" | ||
27 | #define CLR "clgr" | ||
28 | #define LHI "lghi" | ||
29 | #define SLR "slgr" | ||
30 | #endif | ||
31 | |||
32 | static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE; | ||
33 | |||
34 | static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, | ||
35 | unsigned long size) | ||
36 | { | ||
37 | register unsigned long reg0 asm("0") = 0x81UL; | ||
38 | unsigned long tmp1, tmp2; | ||
39 | |||
40 | tmp1 = -4096UL; | ||
41 | asm volatile( | ||
42 | "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" | ||
43 | "9: jz 7f\n" | ||
44 | "1:"ALR" %0,%3\n" | ||
45 | " "SLR" %1,%3\n" | ||
46 | " "SLR" %2,%3\n" | ||
47 | " j 0b\n" | ||
48 | "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ | ||
49 | " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ | ||
50 | " "SLR" %4,%1\n" | ||
51 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | ||
52 | " jnh 4f\n" | ||
53 | "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" | ||
54 | "10:"SLR" %0,%4\n" | ||
55 | " "ALR" %2,%4\n" | ||
56 | "4:"LHI" %4,-1\n" | ||
57 | " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ | ||
58 | " bras %3,6f\n" /* memset loop */ | ||
59 | " xc 0(1,%2),0(%2)\n" | ||
60 | "5: xc 0(256,%2),0(%2)\n" | ||
61 | " la %2,256(%2)\n" | ||
62 | "6:"AHI" %4,-256\n" | ||
63 | " jnm 5b\n" | ||
64 | " ex %4,0(%3)\n" | ||
65 | " j 8f\n" | ||
66 | "7:"SLR" %0,%0\n" | ||
67 | "8:\n" | ||
68 | EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) | ||
69 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | ||
70 | : "d" (reg0) : "cc", "memory"); | ||
71 | return size; | ||
72 | } | ||
73 | |||
74 | static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, | ||
75 | unsigned long size) | ||
76 | { | ||
77 | unsigned long tmp1, tmp2; | ||
78 | |||
79 | update_primary_asce(current); | ||
80 | tmp1 = -256UL; | ||
81 | asm volatile( | ||
82 | " sacf 0\n" | ||
83 | "0: mvcp 0(%0,%2),0(%1),%3\n" | ||
84 | "10:jz 8f\n" | ||
85 | "1:"ALR" %0,%3\n" | ||
86 | " la %1,256(%1)\n" | ||
87 | " la %2,256(%2)\n" | ||
88 | "2: mvcp 0(%0,%2),0(%1),%3\n" | ||
89 | "11:jnz 1b\n" | ||
90 | " j 8f\n" | ||
91 | "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ | ||
92 | " "LHI" %3,-4096\n" | ||
93 | " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ | ||
94 | " "SLR" %4,%1\n" | ||
95 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | ||
96 | " jnh 5f\n" | ||
97 | "4: mvcp 0(%4,%2),0(%1),%3\n" | ||
98 | "12:"SLR" %0,%4\n" | ||
99 | " "ALR" %2,%4\n" | ||
100 | "5:"LHI" %4,-1\n" | ||
101 | " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ | ||
102 | " bras %3,7f\n" /* memset loop */ | ||
103 | " xc 0(1,%2),0(%2)\n" | ||
104 | "6: xc 0(256,%2),0(%2)\n" | ||
105 | " la %2,256(%2)\n" | ||
106 | "7:"AHI" %4,-256\n" | ||
107 | " jnm 6b\n" | ||
108 | " ex %4,0(%3)\n" | ||
109 | " j 9f\n" | ||
110 | "8:"SLR" %0,%0\n" | ||
111 | "9: sacf 768\n" | ||
112 | EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) | ||
113 | EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b) | ||
114 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | ||
115 | : : "cc", "memory"); | ||
116 | return size; | ||
117 | } | ||
118 | |||
119 | unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) | ||
120 | { | ||
121 | if (static_key_false(&have_mvcos)) | ||
122 | return copy_from_user_mvcos(to, from, n); | ||
123 | return copy_from_user_mvcp(to, from, n); | ||
124 | } | ||
125 | EXPORT_SYMBOL(__copy_from_user); | ||
126 | |||
127 | static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, | ||
128 | unsigned long size) | ||
129 | { | ||
130 | register unsigned long reg0 asm("0") = 0x810000UL; | ||
131 | unsigned long tmp1, tmp2; | ||
132 | |||
133 | tmp1 = -4096UL; | ||
134 | asm volatile( | ||
135 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" | ||
136 | "6: jz 4f\n" | ||
137 | "1:"ALR" %0,%3\n" | ||
138 | " "SLR" %1,%3\n" | ||
139 | " "SLR" %2,%3\n" | ||
140 | " j 0b\n" | ||
141 | "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ | ||
142 | " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ | ||
143 | " "SLR" %4,%1\n" | ||
144 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | ||
145 | " jnh 5f\n" | ||
146 | "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" | ||
147 | "7:"SLR" %0,%4\n" | ||
148 | " j 5f\n" | ||
149 | "4:"SLR" %0,%0\n" | ||
150 | "5:\n" | ||
151 | EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) | ||
152 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | ||
153 | : "d" (reg0) : "cc", "memory"); | ||
154 | return size; | ||
155 | } | ||
156 | |||
157 | static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, | ||
158 | unsigned long size) | ||
159 | { | ||
160 | unsigned long tmp1, tmp2; | ||
161 | |||
162 | update_primary_asce(current); | ||
163 | tmp1 = -256UL; | ||
164 | asm volatile( | ||
165 | " sacf 0\n" | ||
166 | "0: mvcs 0(%0,%1),0(%2),%3\n" | ||
167 | "7: jz 5f\n" | ||
168 | "1:"ALR" %0,%3\n" | ||
169 | " la %1,256(%1)\n" | ||
170 | " la %2,256(%2)\n" | ||
171 | "2: mvcs 0(%0,%1),0(%2),%3\n" | ||
172 | "8: jnz 1b\n" | ||
173 | " j 5f\n" | ||
174 | "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ | ||
175 | " "LHI" %3,-4096\n" | ||
176 | " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ | ||
177 | " "SLR" %4,%1\n" | ||
178 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | ||
179 | " jnh 6f\n" | ||
180 | "4: mvcs 0(%4,%1),0(%2),%3\n" | ||
181 | "9:"SLR" %0,%4\n" | ||
182 | " j 6f\n" | ||
183 | "5:"SLR" %0,%0\n" | ||
184 | "6: sacf 768\n" | ||
185 | EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) | ||
186 | EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) | ||
187 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | ||
188 | : : "cc", "memory"); | ||
189 | return size; | ||
190 | } | ||
191 | |||
192 | unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) | ||
193 | { | ||
194 | if (static_key_false(&have_mvcos)) | ||
195 | return copy_to_user_mvcos(to, from, n); | ||
196 | return copy_to_user_mvcs(to, from, n); | ||
197 | } | ||
198 | EXPORT_SYMBOL(__copy_to_user); | ||
199 | |||
200 | static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, | ||
201 | unsigned long size) | ||
202 | { | ||
203 | register unsigned long reg0 asm("0") = 0x810081UL; | ||
204 | unsigned long tmp1, tmp2; | ||
205 | |||
206 | tmp1 = -4096UL; | ||
207 | /* FIXME: copy with reduced length. */ | ||
208 | asm volatile( | ||
209 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" | ||
210 | " jz 2f\n" | ||
211 | "1:"ALR" %0,%3\n" | ||
212 | " "SLR" %1,%3\n" | ||
213 | " "SLR" %2,%3\n" | ||
214 | " j 0b\n" | ||
215 | "2:"SLR" %0,%0\n" | ||
216 | "3: \n" | ||
217 | EX_TABLE(0b,3b) | ||
218 | : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) | ||
219 | : "d" (reg0) : "cc", "memory"); | ||
220 | return size; | ||
221 | } | ||
222 | |||
223 | static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from, | ||
224 | unsigned long size) | ||
225 | { | ||
226 | unsigned long tmp1; | ||
227 | |||
228 | update_primary_asce(current); | ||
229 | asm volatile( | ||
230 | " sacf 256\n" | ||
231 | " "AHI" %0,-1\n" | ||
232 | " jo 5f\n" | ||
233 | " bras %3,3f\n" | ||
234 | "0:"AHI" %0,257\n" | ||
235 | "1: mvc 0(1,%1),0(%2)\n" | ||
236 | " la %1,1(%1)\n" | ||
237 | " la %2,1(%2)\n" | ||
238 | " "AHI" %0,-1\n" | ||
239 | " jnz 1b\n" | ||
240 | " j 5f\n" | ||
241 | "2: mvc 0(256,%1),0(%2)\n" | ||
242 | " la %1,256(%1)\n" | ||
243 | " la %2,256(%2)\n" | ||
244 | "3:"AHI" %0,-256\n" | ||
245 | " jnm 2b\n" | ||
246 | "4: ex %0,1b-0b(%3)\n" | ||
247 | "5: "SLR" %0,%0\n" | ||
248 | "6: sacf 768\n" | ||
249 | EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) | ||
250 | : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) | ||
251 | : : "cc", "memory"); | ||
252 | return size; | ||
253 | } | ||
254 | |||
255 | unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) | ||
256 | { | ||
257 | if (static_key_false(&have_mvcos)) | ||
258 | return copy_in_user_mvcos(to, from, n); | ||
259 | return copy_in_user_mvc(to, from, n); | ||
260 | } | ||
261 | EXPORT_SYMBOL(__copy_in_user); | ||
262 | |||
263 | static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) | ||
264 | { | ||
265 | register unsigned long reg0 asm("0") = 0x810000UL; | ||
266 | unsigned long tmp1, tmp2; | ||
267 | |||
268 | tmp1 = -4096UL; | ||
269 | asm volatile( | ||
270 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" | ||
271 | " jz 4f\n" | ||
272 | "1:"ALR" %0,%2\n" | ||
273 | " "SLR" %1,%2\n" | ||
274 | " j 0b\n" | ||
275 | "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ | ||
276 | " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ | ||
277 | " "SLR" %3,%1\n" | ||
278 | " "CLR" %0,%3\n" /* copy crosses next page boundary? */ | ||
279 | " jnh 5f\n" | ||
280 | "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" | ||
281 | " "SLR" %0,%3\n" | ||
282 | " j 5f\n" | ||
283 | "4:"SLR" %0,%0\n" | ||
284 | "5:\n" | ||
285 | EX_TABLE(0b,2b) EX_TABLE(3b,5b) | ||
286 | : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) | ||
287 | : "a" (empty_zero_page), "d" (reg0) : "cc", "memory"); | ||
288 | return size; | ||
289 | } | ||
290 | |||
291 | static inline unsigned long clear_user_xc(void __user *to, unsigned long size) | ||
292 | { | ||
293 | unsigned long tmp1, tmp2; | ||
294 | |||
295 | update_primary_asce(current); | ||
296 | asm volatile( | ||
297 | " sacf 256\n" | ||
298 | " "AHI" %0,-1\n" | ||
299 | " jo 5f\n" | ||
300 | " bras %3,3f\n" | ||
301 | " xc 0(1,%1),0(%1)\n" | ||
302 | "0:"AHI" %0,257\n" | ||
303 | " la %2,255(%1)\n" /* %2 = ptr + 255 */ | ||
304 | " srl %2,12\n" | ||
305 | " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ | ||
306 | " "SLR" %2,%1\n" | ||
307 | " "CLR" %0,%2\n" /* clear crosses next page boundary? */ | ||
308 | " jnh 5f\n" | ||
309 | " "AHI" %2,-1\n" | ||
310 | "1: ex %2,0(%3)\n" | ||
311 | " "AHI" %2,1\n" | ||
312 | " "SLR" %0,%2\n" | ||
313 | " j 5f\n" | ||
314 | "2: xc 0(256,%1),0(%1)\n" | ||
315 | " la %1,256(%1)\n" | ||
316 | "3:"AHI" %0,-256\n" | ||
317 | " jnm 2b\n" | ||
318 | "4: ex %0,0(%3)\n" | ||
319 | "5: "SLR" %0,%0\n" | ||
320 | "6: sacf 768\n" | ||
321 | EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) | ||
322 | : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) | ||
323 | : : "cc", "memory"); | ||
324 | return size; | ||
325 | } | ||
326 | |||
327 | unsigned long __clear_user(void __user *to, unsigned long size) | ||
328 | { | ||
329 | if (static_key_false(&have_mvcos)) | ||
330 | return clear_user_mvcos(to, size); | ||
331 | return clear_user_xc(to, size); | ||
332 | } | ||
333 | EXPORT_SYMBOL(__clear_user); | ||
334 | |||
335 | static inline unsigned long strnlen_user_srst(const char __user *src, | ||
336 | unsigned long size) | ||
337 | { | ||
338 | register unsigned long reg0 asm("0") = 0; | ||
339 | unsigned long tmp1, tmp2; | ||
340 | |||
341 | if (unlikely(!size)) | ||
342 | return 0; | ||
343 | update_primary_asce(current); | ||
344 | asm volatile( | ||
345 | " la %2,0(%1)\n" | ||
346 | " la %3,0(%0,%1)\n" | ||
347 | " "SLR" %0,%0\n" | ||
348 | " sacf 256\n" | ||
349 | "0: srst %3,%2\n" | ||
350 | " jo 0b\n" | ||
351 | " la %0,1(%3)\n" /* strnlen_user results includes \0 */ | ||
352 | " "SLR" %0,%1\n" | ||
353 | "1: sacf 768\n" | ||
354 | EX_TABLE(0b,1b) | ||
355 | : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) | ||
356 | : "d" (reg0) : "cc", "memory"); | ||
357 | return size; | ||
358 | } | ||
359 | |||
360 | unsigned long __strnlen_user(const char __user *src, unsigned long size) | ||
361 | { | ||
362 | update_primary_asce(current); | ||
363 | return strnlen_user_srst(src, size); | ||
364 | } | ||
365 | EXPORT_SYMBOL(__strnlen_user); | ||
366 | |||
367 | long __strncpy_from_user(char *dst, const char __user *src, long size) | ||
368 | { | ||
369 | size_t done, len, offset, len_str; | ||
370 | |||
371 | if (unlikely(size <= 0)) | ||
372 | return 0; | ||
373 | done = 0; | ||
374 | do { | ||
375 | offset = (size_t)src & ~PAGE_MASK; | ||
376 | len = min(size - done, PAGE_SIZE - offset); | ||
377 | if (copy_from_user(dst, src, len)) | ||
378 | return -EFAULT; | ||
379 | len_str = strnlen(dst, len); | ||
380 | done += len_str; | ||
381 | src += len_str; | ||
382 | dst += len_str; | ||
383 | } while ((len_str == len) && (done < size)); | ||
384 | return done; | ||
385 | } | ||
386 | EXPORT_SYMBOL(__strncpy_from_user); | ||
387 | |||
388 | /* | ||
389 | * The "old" uaccess variant without mvcos can be enforced with the | ||
390 | * uaccess_primary kernel parameter. This is mainly for debugging purposes. | ||
391 | */ | ||
392 | static int uaccess_primary __initdata; | ||
393 | |||
394 | static int __init parse_uaccess_pt(char *__unused) | ||
395 | { | ||
396 | uaccess_primary = 1; | ||
397 | return 0; | ||
398 | } | ||
399 | early_param("uaccess_primary", parse_uaccess_pt); | ||
400 | |||
401 | static int __init uaccess_init(void) | ||
402 | { | ||
403 | if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27)) | ||
404 | static_key_slow_inc(&have_mvcos); | ||
405 | return 0; | ||
406 | } | ||
407 | early_initcall(uaccess_init); | ||
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h deleted file mode 100644 index c7e0e81f4b4e..000000000000 --- a/arch/s390/lib/uaccess.h +++ /dev/null | |||
@@ -1,16 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2007 | ||
3 | * | ||
4 | */ | ||
5 | |||
6 | #ifndef __ARCH_S390_LIB_UACCESS_H | ||
7 | #define __ARCH_S390_LIB_UACCESS_H | ||
8 | |||
9 | unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n); | ||
10 | unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n); | ||
11 | unsigned long copy_in_user_pt(void __user *to, const void __user *from, unsigned long n); | ||
12 | unsigned long clear_user_pt(void __user *to, unsigned long n); | ||
13 | unsigned long strnlen_user_pt(const char __user *src, unsigned long count); | ||
14 | long strncpy_from_user_pt(char *dst, const char __user *src, long count); | ||
15 | |||
16 | #endif /* __ARCH_S390_LIB_UACCESS_H */ | ||
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c deleted file mode 100644 index ae97b8df11aa..000000000000 --- a/arch/s390/lib/uaccess_mvcos.c +++ /dev/null | |||
@@ -1,263 +0,0 @@ | |||
1 | /* | ||
2 | * Optimized user space space access functions based on mvcos. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2006 | ||
5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
6 | * Gerald Schaefer (gerald.schaefer@de.ibm.com) | ||
7 | */ | ||
8 | |||
9 | #include <linux/jump_label.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <asm/facility.h> | ||
14 | #include <asm/uaccess.h> | ||
15 | #include <asm/futex.h> | ||
16 | #include "uaccess.h" | ||
17 | |||
18 | #ifndef CONFIG_64BIT | ||
19 | #define AHI "ahi" | ||
20 | #define ALR "alr" | ||
21 | #define CLR "clr" | ||
22 | #define LHI "lhi" | ||
23 | #define SLR "slr" | ||
24 | #else | ||
25 | #define AHI "aghi" | ||
26 | #define ALR "algr" | ||
27 | #define CLR "clgr" | ||
28 | #define LHI "lghi" | ||
29 | #define SLR "slgr" | ||
30 | #endif | ||
31 | |||
32 | static struct static_key have_mvcos = STATIC_KEY_INIT_TRUE; | ||
33 | |||
34 | static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, | ||
35 | unsigned long size) | ||
36 | { | ||
37 | register unsigned long reg0 asm("0") = 0x81UL; | ||
38 | unsigned long tmp1, tmp2; | ||
39 | |||
40 | tmp1 = -4096UL; | ||
41 | asm volatile( | ||
42 | "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" | ||
43 | "9: jz 7f\n" | ||
44 | "1:"ALR" %0,%3\n" | ||
45 | " "SLR" %1,%3\n" | ||
46 | " "SLR" %2,%3\n" | ||
47 | " j 0b\n" | ||
48 | "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ | ||
49 | " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ | ||
50 | " "SLR" %4,%1\n" | ||
51 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | ||
52 | " jnh 4f\n" | ||
53 | "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" | ||
54 | "10:"SLR" %0,%4\n" | ||
55 | " "ALR" %2,%4\n" | ||
56 | "4:"LHI" %4,-1\n" | ||
57 | " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ | ||
58 | " bras %3,6f\n" /* memset loop */ | ||
59 | " xc 0(1,%2),0(%2)\n" | ||
60 | "5: xc 0(256,%2),0(%2)\n" | ||
61 | " la %2,256(%2)\n" | ||
62 | "6:"AHI" %4,-256\n" | ||
63 | " jnm 5b\n" | ||
64 | " ex %4,0(%3)\n" | ||
65 | " j 8f\n" | ||
66 | "7:"SLR" %0,%0\n" | ||
67 | "8: \n" | ||
68 | EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) | ||
69 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | ||
70 | : "d" (reg0) : "cc", "memory"); | ||
71 | return size; | ||
72 | } | ||
73 | |||
74 | unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) | ||
75 | { | ||
76 | if (static_key_true(&have_mvcos)) | ||
77 | return copy_from_user_mvcos(to, from, n); | ||
78 | return copy_from_user_pt(to, from, n); | ||
79 | } | ||
80 | EXPORT_SYMBOL(__copy_from_user); | ||
81 | |||
82 | static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, | ||
83 | unsigned long size) | ||
84 | { | ||
85 | register unsigned long reg0 asm("0") = 0x810000UL; | ||
86 | unsigned long tmp1, tmp2; | ||
87 | |||
88 | tmp1 = -4096UL; | ||
89 | asm volatile( | ||
90 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" | ||
91 | "6: jz 4f\n" | ||
92 | "1:"ALR" %0,%3\n" | ||
93 | " "SLR" %1,%3\n" | ||
94 | " "SLR" %2,%3\n" | ||
95 | " j 0b\n" | ||
96 | "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ | ||
97 | " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ | ||
98 | " "SLR" %4,%1\n" | ||
99 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | ||
100 | " jnh 5f\n" | ||
101 | "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" | ||
102 | "7:"SLR" %0,%4\n" | ||
103 | " j 5f\n" | ||
104 | "4:"SLR" %0,%0\n" | ||
105 | "5: \n" | ||
106 | EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) | ||
107 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | ||
108 | : "d" (reg0) : "cc", "memory"); | ||
109 | return size; | ||
110 | } | ||
111 | |||
112 | unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) | ||
113 | { | ||
114 | if (static_key_true(&have_mvcos)) | ||
115 | return copy_to_user_mvcos(to, from, n); | ||
116 | return copy_to_user_pt(to, from, n); | ||
117 | } | ||
118 | EXPORT_SYMBOL(__copy_to_user); | ||
119 | |||
120 | static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, | ||
121 | unsigned long size) | ||
122 | { | ||
123 | register unsigned long reg0 asm("0") = 0x810081UL; | ||
124 | unsigned long tmp1, tmp2; | ||
125 | |||
126 | tmp1 = -4096UL; | ||
127 | /* FIXME: copy with reduced length. */ | ||
128 | asm volatile( | ||
129 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" | ||
130 | " jz 2f\n" | ||
131 | "1:"ALR" %0,%3\n" | ||
132 | " "SLR" %1,%3\n" | ||
133 | " "SLR" %2,%3\n" | ||
134 | " j 0b\n" | ||
135 | "2:"SLR" %0,%0\n" | ||
136 | "3: \n" | ||
137 | EX_TABLE(0b,3b) | ||
138 | : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) | ||
139 | : "d" (reg0) : "cc", "memory"); | ||
140 | return size; | ||
141 | } | ||
142 | |||
143 | unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) | ||
144 | { | ||
145 | if (static_key_true(&have_mvcos)) | ||
146 | return copy_in_user_mvcos(to, from, n); | ||
147 | return copy_in_user_pt(to, from, n); | ||
148 | } | ||
149 | EXPORT_SYMBOL(__copy_in_user); | ||
150 | |||
151 | static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) | ||
152 | { | ||
153 | register unsigned long reg0 asm("0") = 0x810000UL; | ||
154 | unsigned long tmp1, tmp2; | ||
155 | |||
156 | tmp1 = -4096UL; | ||
157 | asm volatile( | ||
158 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" | ||
159 | " jz 4f\n" | ||
160 | "1:"ALR" %0,%2\n" | ||
161 | " "SLR" %1,%2\n" | ||
162 | " j 0b\n" | ||
163 | "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ | ||
164 | " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ | ||
165 | " "SLR" %3,%1\n" | ||
166 | " "CLR" %0,%3\n" /* copy crosses next page boundary? */ | ||
167 | " jnh 5f\n" | ||
168 | "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" | ||
169 | " "SLR" %0,%3\n" | ||
170 | " j 5f\n" | ||
171 | "4:"SLR" %0,%0\n" | ||
172 | "5: \n" | ||
173 | EX_TABLE(0b,2b) EX_TABLE(3b,5b) | ||
174 | : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) | ||
175 | : "a" (empty_zero_page), "d" (reg0) : "cc", "memory"); | ||
176 | return size; | ||
177 | } | ||
178 | |||
179 | unsigned long __clear_user(void __user *to, unsigned long size) | ||
180 | { | ||
181 | if (static_key_true(&have_mvcos)) | ||
182 | return clear_user_mvcos(to, size); | ||
183 | return clear_user_pt(to, size); | ||
184 | } | ||
185 | EXPORT_SYMBOL(__clear_user); | ||
186 | |||
187 | static inline unsigned long strnlen_user_mvcos(const char __user *src, | ||
188 | unsigned long count) | ||
189 | { | ||
190 | unsigned long done, len, offset, len_str; | ||
191 | char buf[256]; | ||
192 | |||
193 | done = 0; | ||
194 | do { | ||
195 | offset = (unsigned long)src & ~PAGE_MASK; | ||
196 | len = min(256UL, PAGE_SIZE - offset); | ||
197 | len = min(count - done, len); | ||
198 | if (copy_from_user_mvcos(buf, src, len)) | ||
199 | return 0; | ||
200 | len_str = strnlen(buf, len); | ||
201 | done += len_str; | ||
202 | src += len_str; | ||
203 | } while ((len_str == len) && (done < count)); | ||
204 | return done + 1; | ||
205 | } | ||
206 | |||
207 | unsigned long __strnlen_user(const char __user *src, unsigned long count) | ||
208 | { | ||
209 | if (static_key_true(&have_mvcos)) | ||
210 | return strnlen_user_mvcos(src, count); | ||
211 | return strnlen_user_pt(src, count); | ||
212 | } | ||
213 | EXPORT_SYMBOL(__strnlen_user); | ||
214 | |||
215 | static inline long strncpy_from_user_mvcos(char *dst, const char __user *src, | ||
216 | long count) | ||
217 | { | ||
218 | unsigned long done, len, offset, len_str; | ||
219 | |||
220 | if (unlikely(count <= 0)) | ||
221 | return 0; | ||
222 | done = 0; | ||
223 | do { | ||
224 | offset = (unsigned long)src & ~PAGE_MASK; | ||
225 | len = min(count - done, PAGE_SIZE - offset); | ||
226 | if (copy_from_user_mvcos(dst, src, len)) | ||
227 | return -EFAULT; | ||
228 | len_str = strnlen(dst, len); | ||
229 | done += len_str; | ||
230 | src += len_str; | ||
231 | dst += len_str; | ||
232 | } while ((len_str == len) && (done < count)); | ||
233 | return done; | ||
234 | } | ||
235 | |||
236 | long __strncpy_from_user(char *dst, const char __user *src, long count) | ||
237 | { | ||
238 | if (static_key_true(&have_mvcos)) | ||
239 | return strncpy_from_user_mvcos(dst, src, count); | ||
240 | return strncpy_from_user_pt(dst, src, count); | ||
241 | } | ||
242 | EXPORT_SYMBOL(__strncpy_from_user); | ||
243 | |||
244 | /* | ||
245 | * The uaccess page tabe walk variant can be enforced with the "uaccesspt" | ||
246 | * kernel parameter. This is mainly for debugging purposes. | ||
247 | */ | ||
248 | static int force_uaccess_pt __initdata; | ||
249 | |||
250 | static int __init parse_uaccess_pt(char *__unused) | ||
251 | { | ||
252 | force_uaccess_pt = 1; | ||
253 | return 0; | ||
254 | } | ||
255 | early_param("uaccesspt", parse_uaccess_pt); | ||
256 | |||
257 | static int __init uaccess_init(void) | ||
258 | { | ||
259 | if (IS_ENABLED(CONFIG_32BIT) || force_uaccess_pt || !test_facility(27)) | ||
260 | static_key_slow_dec(&have_mvcos); | ||
261 | return 0; | ||
262 | } | ||
263 | early_initcall(uaccess_init); | ||
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c deleted file mode 100644 index 8d39760bae68..000000000000 --- a/arch/s390/lib/uaccess_pt.c +++ /dev/null | |||
@@ -1,471 +0,0 @@ | |||
1 | /* | ||
2 | * User access functions based on page table walks for enhanced | ||
3 | * system layout without hardware support. | ||
4 | * | ||
5 | * Copyright IBM Corp. 2006, 2012 | ||
6 | * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) | ||
7 | */ | ||
8 | |||
9 | #include <linux/errno.h> | ||
10 | #include <linux/hardirq.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/hugetlb.h> | ||
13 | #include <asm/uaccess.h> | ||
14 | #include <asm/futex.h> | ||
15 | #include "uaccess.h" | ||
16 | |||
17 | #ifndef CONFIG_64BIT | ||
18 | #define AHI "ahi" | ||
19 | #define SLR "slr" | ||
20 | #else | ||
21 | #define AHI "aghi" | ||
22 | #define SLR "slgr" | ||
23 | #endif | ||
24 | |||
25 | static unsigned long strnlen_kernel(const char __user *src, unsigned long count) | ||
26 | { | ||
27 | register unsigned long reg0 asm("0") = 0UL; | ||
28 | unsigned long tmp1, tmp2; | ||
29 | |||
30 | asm volatile( | ||
31 | " la %2,0(%1)\n" | ||
32 | " la %3,0(%0,%1)\n" | ||
33 | " "SLR" %0,%0\n" | ||
34 | "0: srst %3,%2\n" | ||
35 | " jo 0b\n" | ||
36 | " la %0,1(%3)\n" /* strnlen_kernel results includes \0 */ | ||
37 | " "SLR" %0,%1\n" | ||
38 | "1:\n" | ||
39 | EX_TABLE(0b,1b) | ||
40 | : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2) | ||
41 | : "d" (reg0) : "cc", "memory"); | ||
42 | return count; | ||
43 | } | ||
44 | |||
45 | static unsigned long copy_in_kernel(void __user *to, const void __user *from, | ||
46 | unsigned long count) | ||
47 | { | ||
48 | unsigned long tmp1; | ||
49 | |||
50 | asm volatile( | ||
51 | " "AHI" %0,-1\n" | ||
52 | " jo 5f\n" | ||
53 | " bras %3,3f\n" | ||
54 | "0:"AHI" %0,257\n" | ||
55 | "1: mvc 0(1,%1),0(%2)\n" | ||
56 | " la %1,1(%1)\n" | ||
57 | " la %2,1(%2)\n" | ||
58 | " "AHI" %0,-1\n" | ||
59 | " jnz 1b\n" | ||
60 | " j 5f\n" | ||
61 | "2: mvc 0(256,%1),0(%2)\n" | ||
62 | " la %1,256(%1)\n" | ||
63 | " la %2,256(%2)\n" | ||
64 | "3:"AHI" %0,-256\n" | ||
65 | " jnm 2b\n" | ||
66 | "4: ex %0,1b-0b(%3)\n" | ||
67 | "5:"SLR" %0,%0\n" | ||
68 | "6:\n" | ||
69 | EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) | ||
70 | : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1) | ||
71 | : : "cc", "memory"); | ||
72 | return count; | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * Returns kernel address for user virtual address. If the returned address is | ||
77 | * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occurred and the | ||
78 | * address contains the (negative) exception code. | ||
79 | */ | ||
80 | #ifdef CONFIG_64BIT | ||
81 | |||
82 | static unsigned long follow_table(struct mm_struct *mm, | ||
83 | unsigned long address, int write) | ||
84 | { | ||
85 | unsigned long *table = (unsigned long *)__pa(mm->pgd); | ||
86 | |||
87 | if (unlikely(address > mm->context.asce_limit - 1)) | ||
88 | return -0x38UL; | ||
89 | switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { | ||
90 | case _ASCE_TYPE_REGION1: | ||
91 | table = table + ((address >> 53) & 0x7ff); | ||
92 | if (unlikely(*table & _REGION_ENTRY_INVALID)) | ||
93 | return -0x39UL; | ||
94 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
95 | /* fallthrough */ | ||
96 | case _ASCE_TYPE_REGION2: | ||
97 | table = table + ((address >> 42) & 0x7ff); | ||
98 | if (unlikely(*table & _REGION_ENTRY_INVALID)) | ||
99 | return -0x3aUL; | ||
100 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
101 | /* fallthrough */ | ||
102 | case _ASCE_TYPE_REGION3: | ||
103 | table = table + ((address >> 31) & 0x7ff); | ||
104 | if (unlikely(*table & _REGION_ENTRY_INVALID)) | ||
105 | return -0x3bUL; | ||
106 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
107 | /* fallthrough */ | ||
108 | case _ASCE_TYPE_SEGMENT: | ||
109 | table = table + ((address >> 20) & 0x7ff); | ||
110 | if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) | ||
111 | return -0x10UL; | ||
112 | if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { | ||
113 | if (write && (*table & _SEGMENT_ENTRY_PROTECT)) | ||
114 | return -0x04UL; | ||
115 | return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + | ||
116 | (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); | ||
117 | } | ||
118 | table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); | ||
119 | } | ||
120 | table = table + ((address >> 12) & 0xff); | ||
121 | if (unlikely(*table & _PAGE_INVALID)) | ||
122 | return -0x11UL; | ||
123 | if (write && (*table & _PAGE_PROTECT)) | ||
124 | return -0x04UL; | ||
125 | return (*table & PAGE_MASK) + (address & ~PAGE_MASK); | ||
126 | } | ||
127 | |||
128 | #else /* CONFIG_64BIT */ | ||
129 | |||
130 | static unsigned long follow_table(struct mm_struct *mm, | ||
131 | unsigned long address, int write) | ||
132 | { | ||
133 | unsigned long *table = (unsigned long *)__pa(mm->pgd); | ||
134 | |||
135 | table = table + ((address >> 20) & 0x7ff); | ||
136 | if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) | ||
137 | return -0x10UL; | ||
138 | table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); | ||
139 | table = table + ((address >> 12) & 0xff); | ||
140 | if (unlikely(*table & _PAGE_INVALID)) | ||
141 | return -0x11UL; | ||
142 | if (write && (*table & _PAGE_PROTECT)) | ||
143 | return -0x04UL; | ||
144 | return (*table & PAGE_MASK) + (address & ~PAGE_MASK); | ||
145 | } | ||
146 | |||
147 | #endif /* CONFIG_64BIT */ | ||
148 | |||
149 | static inline unsigned long __user_copy_pt(unsigned long uaddr, void *kptr, | ||
150 | unsigned long n, int write_user) | ||
151 | { | ||
152 | struct mm_struct *mm = current->mm; | ||
153 | unsigned long offset, done, size, kaddr; | ||
154 | void *from, *to; | ||
155 | |||
156 | if (!mm) | ||
157 | return n; | ||
158 | done = 0; | ||
159 | retry: | ||
160 | spin_lock(&mm->page_table_lock); | ||
161 | do { | ||
162 | kaddr = follow_table(mm, uaddr, write_user); | ||
163 | if (IS_ERR_VALUE(kaddr)) | ||
164 | goto fault; | ||
165 | |||
166 | offset = uaddr & ~PAGE_MASK; | ||
167 | size = min(n - done, PAGE_SIZE - offset); | ||
168 | if (write_user) { | ||
169 | to = (void *) kaddr; | ||
170 | from = kptr + done; | ||
171 | } else { | ||
172 | from = (void *) kaddr; | ||
173 | to = kptr + done; | ||
174 | } | ||
175 | memcpy(to, from, size); | ||
176 | done += size; | ||
177 | uaddr += size; | ||
178 | } while (done < n); | ||
179 | spin_unlock(&mm->page_table_lock); | ||
180 | return n - done; | ||
181 | fault: | ||
182 | spin_unlock(&mm->page_table_lock); | ||
183 | if (__handle_fault(uaddr, -kaddr, write_user)) | ||
184 | return n - done; | ||
185 | goto retry; | ||
186 | } | ||
187 | |||
188 | /* | ||
189 | * Do DAT for user address by page table walk, return kernel address. | ||
190 | * This function needs to be called with current->mm->page_table_lock held. | ||
191 | */ | ||
192 | static inline unsigned long __dat_user_addr(unsigned long uaddr, int write) | ||
193 | { | ||
194 | struct mm_struct *mm = current->mm; | ||
195 | unsigned long kaddr; | ||
196 | int rc; | ||
197 | |||
198 | retry: | ||
199 | kaddr = follow_table(mm, uaddr, write); | ||
200 | if (IS_ERR_VALUE(kaddr)) | ||
201 | goto fault; | ||
202 | |||
203 | return kaddr; | ||
204 | fault: | ||
205 | spin_unlock(&mm->page_table_lock); | ||
206 | rc = __handle_fault(uaddr, -kaddr, write); | ||
207 | spin_lock(&mm->page_table_lock); | ||
208 | if (!rc) | ||
209 | goto retry; | ||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n) | ||
214 | { | ||
215 | unsigned long rc; | ||
216 | |||
217 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
218 | return copy_in_kernel((void __user *) to, from, n); | ||
219 | rc = __user_copy_pt((unsigned long) from, to, n, 0); | ||
220 | if (unlikely(rc)) | ||
221 | memset(to + n - rc, 0, rc); | ||
222 | return rc; | ||
223 | } | ||
224 | |||
225 | unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n) | ||
226 | { | ||
227 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
228 | return copy_in_kernel(to, (void __user *) from, n); | ||
229 | return __user_copy_pt((unsigned long) to, (void *) from, n, 1); | ||
230 | } | ||
231 | |||
232 | unsigned long clear_user_pt(void __user *to, unsigned long n) | ||
233 | { | ||
234 | void *zpage = (void *) empty_zero_page; | ||
235 | unsigned long done, size, ret; | ||
236 | |||
237 | done = 0; | ||
238 | do { | ||
239 | if (n - done > PAGE_SIZE) | ||
240 | size = PAGE_SIZE; | ||
241 | else | ||
242 | size = n - done; | ||
243 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
244 | ret = copy_in_kernel(to, (void __user *) zpage, n); | ||
245 | else | ||
246 | ret = __user_copy_pt((unsigned long) to, zpage, size, 1); | ||
247 | done += size; | ||
248 | to += size; | ||
249 | if (ret) | ||
250 | return ret + n - done; | ||
251 | } while (done < n); | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | unsigned long strnlen_user_pt(const char __user *src, unsigned long count) | ||
256 | { | ||
257 | unsigned long uaddr = (unsigned long) src; | ||
258 | struct mm_struct *mm = current->mm; | ||
259 | unsigned long offset, done, len, kaddr; | ||
260 | unsigned long len_str; | ||
261 | |||
262 | if (unlikely(!count)) | ||
263 | return 0; | ||
264 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
265 | return strnlen_kernel(src, count); | ||
266 | if (!mm) | ||
267 | return 0; | ||
268 | done = 0; | ||
269 | retry: | ||
270 | spin_lock(&mm->page_table_lock); | ||
271 | do { | ||
272 | kaddr = follow_table(mm, uaddr, 0); | ||
273 | if (IS_ERR_VALUE(kaddr)) | ||
274 | goto fault; | ||
275 | |||
276 | offset = uaddr & ~PAGE_MASK; | ||
277 | len = min(count - done, PAGE_SIZE - offset); | ||
278 | len_str = strnlen((char *) kaddr, len); | ||
279 | done += len_str; | ||
280 | uaddr += len_str; | ||
281 | } while ((len_str == len) && (done < count)); | ||
282 | spin_unlock(&mm->page_table_lock); | ||
283 | return done + 1; | ||
284 | fault: | ||
285 | spin_unlock(&mm->page_table_lock); | ||
286 | if (__handle_fault(uaddr, -kaddr, 0)) | ||
287 | return 0; | ||
288 | goto retry; | ||
289 | } | ||
290 | |||
291 | long strncpy_from_user_pt(char *dst, const char __user *src, long count) | ||
292 | { | ||
293 | unsigned long done, len, offset, len_str; | ||
294 | |||
295 | if (unlikely(count <= 0)) | ||
296 | return 0; | ||
297 | done = 0; | ||
298 | do { | ||
299 | offset = (unsigned long)src & ~PAGE_MASK; | ||
300 | len = min(count - done, PAGE_SIZE - offset); | ||
301 | if (segment_eq(get_fs(), KERNEL_DS)) { | ||
302 | if (copy_in_kernel((void __user *) dst, src, len)) | ||
303 | return -EFAULT; | ||
304 | } else { | ||
305 | if (__user_copy_pt((unsigned long) src, dst, len, 0)) | ||
306 | return -EFAULT; | ||
307 | } | ||
308 | len_str = strnlen(dst, len); | ||
309 | done += len_str; | ||
310 | src += len_str; | ||
311 | dst += len_str; | ||
312 | } while ((len_str == len) && (done < count)); | ||
313 | return done; | ||
314 | } | ||
315 | |||
316 | unsigned long copy_in_user_pt(void __user *to, const void __user *from, | ||
317 | unsigned long n) | ||
318 | { | ||
319 | struct mm_struct *mm = current->mm; | ||
320 | unsigned long offset_max, uaddr, done, size, error_code; | ||
321 | unsigned long uaddr_from = (unsigned long) from; | ||
322 | unsigned long uaddr_to = (unsigned long) to; | ||
323 | unsigned long kaddr_to, kaddr_from; | ||
324 | int write_user; | ||
325 | |||
326 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
327 | return copy_in_kernel(to, from, n); | ||
328 | if (!mm) | ||
329 | return n; | ||
330 | done = 0; | ||
331 | retry: | ||
332 | spin_lock(&mm->page_table_lock); | ||
333 | do { | ||
334 | write_user = 0; | ||
335 | uaddr = uaddr_from; | ||
336 | kaddr_from = follow_table(mm, uaddr_from, 0); | ||
337 | error_code = kaddr_from; | ||
338 | if (IS_ERR_VALUE(error_code)) | ||
339 | goto fault; | ||
340 | |||
341 | write_user = 1; | ||
342 | uaddr = uaddr_to; | ||
343 | kaddr_to = follow_table(mm, uaddr_to, 1); | ||
344 | error_code = (unsigned long) kaddr_to; | ||
345 | if (IS_ERR_VALUE(error_code)) | ||
346 | goto fault; | ||
347 | |||
348 | offset_max = max(uaddr_from & ~PAGE_MASK, | ||
349 | uaddr_to & ~PAGE_MASK); | ||
350 | size = min(n - done, PAGE_SIZE - offset_max); | ||
351 | |||
352 | memcpy((void *) kaddr_to, (void *) kaddr_from, size); | ||
353 | done += size; | ||
354 | uaddr_from += size; | ||
355 | uaddr_to += size; | ||
356 | } while (done < n); | ||
357 | spin_unlock(&mm->page_table_lock); | ||
358 | return n - done; | ||
359 | fault: | ||
360 | spin_unlock(&mm->page_table_lock); | ||
361 | if (__handle_fault(uaddr, -error_code, write_user)) | ||
362 | return n - done; | ||
363 | goto retry; | ||
364 | } | ||
365 | |||
366 | #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ | ||
367 | asm volatile("0: l %1,0(%6)\n" \ | ||
368 | "1: " insn \ | ||
369 | "2: cs %1,%2,0(%6)\n" \ | ||
370 | "3: jl 1b\n" \ | ||
371 | " lhi %0,0\n" \ | ||
372 | "4:\n" \ | ||
373 | EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ | ||
374 | : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ | ||
375 | "=m" (*uaddr) \ | ||
376 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ | ||
377 | "m" (*uaddr) : "cc" ); | ||
378 | |||
379 | static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) | ||
380 | { | ||
381 | int oldval = 0, newval, ret; | ||
382 | |||
383 | switch (op) { | ||
384 | case FUTEX_OP_SET: | ||
385 | __futex_atomic_op("lr %2,%5\n", | ||
386 | ret, oldval, newval, uaddr, oparg); | ||
387 | break; | ||
388 | case FUTEX_OP_ADD: | ||
389 | __futex_atomic_op("lr %2,%1\nar %2,%5\n", | ||
390 | ret, oldval, newval, uaddr, oparg); | ||
391 | break; | ||
392 | case FUTEX_OP_OR: | ||
393 | __futex_atomic_op("lr %2,%1\nor %2,%5\n", | ||
394 | ret, oldval, newval, uaddr, oparg); | ||
395 | break; | ||
396 | case FUTEX_OP_ANDN: | ||
397 | __futex_atomic_op("lr %2,%1\nnr %2,%5\n", | ||
398 | ret, oldval, newval, uaddr, oparg); | ||
399 | break; | ||
400 | case FUTEX_OP_XOR: | ||
401 | __futex_atomic_op("lr %2,%1\nxr %2,%5\n", | ||
402 | ret, oldval, newval, uaddr, oparg); | ||
403 | break; | ||
404 | default: | ||
405 | ret = -ENOSYS; | ||
406 | } | ||
407 | if (ret == 0) | ||
408 | *old = oldval; | ||
409 | return ret; | ||
410 | } | ||
411 | |||
412 | int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old) | ||
413 | { | ||
414 | int ret; | ||
415 | |||
416 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
417 | return __futex_atomic_op_pt(op, uaddr, oparg, old); | ||
418 | if (unlikely(!current->mm)) | ||
419 | return -EFAULT; | ||
420 | spin_lock(¤t->mm->page_table_lock); | ||
421 | uaddr = (u32 __force __user *) | ||
422 | __dat_user_addr((__force unsigned long) uaddr, 1); | ||
423 | if (!uaddr) { | ||
424 | spin_unlock(¤t->mm->page_table_lock); | ||
425 | return -EFAULT; | ||
426 | } | ||
427 | get_page(virt_to_page(uaddr)); | ||
428 | spin_unlock(¤t->mm->page_table_lock); | ||
429 | ret = __futex_atomic_op_pt(op, uaddr, oparg, old); | ||
430 | put_page(virt_to_page(uaddr)); | ||
431 | return ret; | ||
432 | } | ||
433 | |||
434 | static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, | ||
435 | u32 oldval, u32 newval) | ||
436 | { | ||
437 | int ret; | ||
438 | |||
439 | asm volatile("0: cs %1,%4,0(%5)\n" | ||
440 | "1: la %0,0\n" | ||
441 | "2:\n" | ||
442 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | ||
443 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) | ||
444 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) | ||
445 | : "cc", "memory" ); | ||
446 | *uval = oldval; | ||
447 | return ret; | ||
448 | } | ||
449 | |||
450 | int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | ||
451 | u32 oldval, u32 newval) | ||
452 | { | ||
453 | int ret; | ||
454 | |||
455 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
456 | return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); | ||
457 | if (unlikely(!current->mm)) | ||
458 | return -EFAULT; | ||
459 | spin_lock(¤t->mm->page_table_lock); | ||
460 | uaddr = (u32 __force __user *) | ||
461 | __dat_user_addr((__force unsigned long) uaddr, 1); | ||
462 | if (!uaddr) { | ||
463 | spin_unlock(¤t->mm->page_table_lock); | ||
464 | return -EFAULT; | ||
465 | } | ||
466 | get_page(virt_to_page(uaddr)); | ||
467 | spin_unlock(¤t->mm->page_table_lock); | ||
468 | ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); | ||
469 | put_page(virt_to_page(uaddr)); | ||
470 | return ret; | ||
471 | } | ||
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 88cef505453b..19f623f1f21c 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -106,21 +106,24 @@ void bust_spinlocks(int yes) | |||
106 | * Returns the address space associated with the fault. | 106 | * Returns the address space associated with the fault. |
107 | * Returns 0 for kernel space and 1 for user space. | 107 | * Returns 0 for kernel space and 1 for user space. |
108 | */ | 108 | */ |
109 | static inline int user_space_fault(unsigned long trans_exc_code) | 109 | static inline int user_space_fault(struct pt_regs *regs) |
110 | { | 110 | { |
111 | unsigned long trans_exc_code; | ||
112 | |||
111 | /* | 113 | /* |
112 | * The lowest two bits of the translation exception | 114 | * The lowest two bits of the translation exception |
113 | * identification indicate which paging table was used. | 115 | * identification indicate which paging table was used. |
114 | */ | 116 | */ |
115 | trans_exc_code &= 3; | 117 | trans_exc_code = regs->int_parm_long & 3; |
116 | if (trans_exc_code == 2) | 118 | if (trans_exc_code == 3) /* home space -> kernel */ |
117 | /* Access via secondary space, set_fs setting decides */ | 119 | return 0; |
120 | if (user_mode(regs)) | ||
121 | return 1; | ||
122 | if (trans_exc_code == 2) /* secondary space -> set_fs */ | ||
118 | return current->thread.mm_segment.ar4; | 123 | return current->thread.mm_segment.ar4; |
119 | /* | 124 | if (current->flags & PF_VCPU) |
120 | * Access via primary space or access register is from user space | 125 | return 1; |
121 | * and access via home space is from the kernel. | 126 | return 0; |
122 | */ | ||
123 | return trans_exc_code != 3; | ||
124 | } | 127 | } |
125 | 128 | ||
126 | static inline void report_user_fault(struct pt_regs *regs, long signr) | 129 | static inline void report_user_fault(struct pt_regs *regs, long signr) |
@@ -172,7 +175,7 @@ static noinline void do_no_context(struct pt_regs *regs) | |||
172 | * terminate things with extreme prejudice. | 175 | * terminate things with extreme prejudice. |
173 | */ | 176 | */ |
174 | address = regs->int_parm_long & __FAIL_ADDR_MASK; | 177 | address = regs->int_parm_long & __FAIL_ADDR_MASK; |
175 | if (!user_space_fault(regs->int_parm_long)) | 178 | if (!user_space_fault(regs)) |
176 | printk(KERN_ALERT "Unable to handle kernel pointer dereference" | 179 | printk(KERN_ALERT "Unable to handle kernel pointer dereference" |
177 | " at virtual kernel address %p\n", (void *)address); | 180 | " at virtual kernel address %p\n", (void *)address); |
178 | else | 181 | else |
@@ -296,7 +299,7 @@ static inline int do_exception(struct pt_regs *regs, int access) | |||
296 | * user context. | 299 | * user context. |
297 | */ | 300 | */ |
298 | fault = VM_FAULT_BADCONTEXT; | 301 | fault = VM_FAULT_BADCONTEXT; |
299 | if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) | 302 | if (unlikely(!user_space_fault(regs) || in_atomic() || !mm)) |
300 | goto out; | 303 | goto out; |
301 | 304 | ||
302 | address = trans_exc_code & __FAIL_ADDR_MASK; | 305 | address = trans_exc_code & __FAIL_ADDR_MASK; |
@@ -441,30 +444,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs) | |||
441 | do_fault_error(regs, fault); | 444 | do_fault_error(regs, fault); |
442 | } | 445 | } |
443 | 446 | ||
444 | int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) | ||
445 | { | ||
446 | struct pt_regs regs; | ||
447 | int access, fault; | ||
448 | |||
449 | /* Emulate a uaccess fault from kernel mode. */ | ||
450 | regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK; | ||
451 | if (!irqs_disabled()) | ||
452 | regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; | ||
453 | regs.psw.addr = (unsigned long) __builtin_return_address(0); | ||
454 | regs.psw.addr |= PSW_ADDR_AMODE; | ||
455 | regs.int_code = pgm_int_code; | ||
456 | regs.int_parm_long = (uaddr & PAGE_MASK) | 2; | ||
457 | access = write ? VM_WRITE : VM_READ; | ||
458 | fault = do_exception(®s, access); | ||
459 | /* | ||
460 | * Since the fault happened in kernel mode while performing a uaccess | ||
461 | * all we need to do now is emulating a fixup in case "fault" is not | ||
462 | * zero. | ||
463 | * For the calling uaccess functions this results always in -EFAULT. | ||
464 | */ | ||
465 | return fault ? -EFAULT : 0; | ||
466 | } | ||
467 | |||
468 | #ifdef CONFIG_PFAULT | 447 | #ifdef CONFIG_PFAULT |
469 | /* | 448 | /* |
470 | * 'pfault' pseudo page faults routines. | 449 | * 'pfault' pseudo page faults routines. |
@@ -645,7 +624,7 @@ static int __init pfault_irq_init(void) | |||
645 | { | 624 | { |
646 | int rc; | 625 | int rc; |
647 | 626 | ||
648 | rc = register_external_interrupt(0x2603, pfault_interrupt); | 627 | rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt); |
649 | if (rc) | 628 | if (rc) |
650 | goto out_extint; | 629 | goto out_extint; |
651 | rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; | 630 | rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; |
@@ -656,7 +635,7 @@ static int __init pfault_irq_init(void) | |||
656 | return 0; | 635 | return 0; |
657 | 636 | ||
658 | out_pfault: | 637 | out_pfault: |
659 | unregister_external_interrupt(0x2603, pfault_interrupt); | 638 | unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt); |
660 | out_extint: | 639 | out_extint: |
661 | pfault_disable = 1; | 640 | pfault_disable = 1; |
662 | return rc; | 641 | return rc; |
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index d261c62e40a6..0727a55d87d9 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c | |||
@@ -123,10 +123,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | |||
123 | pmd_t *pmdp = (pmd_t *) ptep; | 123 | pmd_t *pmdp = (pmd_t *) ptep; |
124 | pte_t pte = huge_ptep_get(ptep); | 124 | pte_t pte = huge_ptep_get(ptep); |
125 | 125 | ||
126 | if (MACHINE_HAS_IDTE) | 126 | pmdp_flush_direct(mm, addr, pmdp); |
127 | __pmd_idte(addr, pmdp); | ||
128 | else | ||
129 | __pmd_csp(pmdp); | ||
130 | pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; | 127 | pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; |
131 | return pte; | 128 | return pte; |
132 | } | 129 | } |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index ad446b0c55b6..0c1073ed1e84 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -124,8 +124,6 @@ void __init paging_init(void) | |||
124 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); | 124 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); |
125 | arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); | 125 | arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); |
126 | 126 | ||
127 | atomic_set(&init_mm.context.attach_count, 1); | ||
128 | |||
129 | sparse_memory_present_with_active_regions(MAX_NUMNODES); | 127 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
130 | sparse_init(); | 128 | sparse_init(); |
131 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 129 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
@@ -136,6 +134,11 @@ void __init paging_init(void) | |||
136 | 134 | ||
137 | void __init mem_init(void) | 135 | void __init mem_init(void) |
138 | { | 136 | { |
137 | if (MACHINE_HAS_TLB_LC) | ||
138 | cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); | ||
139 | cpumask_set_cpu(0, mm_cpumask(&init_mm)); | ||
140 | atomic_set(&init_mm.context.attach_count, 1); | ||
141 | |||
139 | max_mapnr = max_low_pfn; | 142 | max_mapnr = max_low_pfn; |
140 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | 143 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
141 | 144 | ||
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 5d8324cd866b..d7cfd57815fb 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -54,7 +54,7 @@ static void __crst_table_upgrade(void *arg) | |||
54 | struct mm_struct *mm = arg; | 54 | struct mm_struct *mm = arg; |
55 | 55 | ||
56 | if (current->active_mm == mm) | 56 | if (current->active_mm == mm) |
57 | update_mm(mm, current); | 57 | update_user_asce(mm, 1); |
58 | __tlb_flush_local(); | 58 | __tlb_flush_local(); |
59 | } | 59 | } |
60 | 60 | ||
@@ -107,8 +107,10 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | |||
107 | { | 107 | { |
108 | pgd_t *pgd; | 108 | pgd_t *pgd; |
109 | 109 | ||
110 | if (current->active_mm == mm) | 110 | if (current->active_mm == mm) { |
111 | clear_user_asce(mm, 1); | ||
111 | __tlb_flush_mm(mm); | 112 | __tlb_flush_mm(mm); |
113 | } | ||
112 | while (mm->context.asce_limit > limit) { | 114 | while (mm->context.asce_limit > limit) { |
113 | pgd = mm->pgd; | 115 | pgd = mm->pgd; |
114 | switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { | 116 | switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { |
@@ -132,7 +134,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | |||
132 | crst_table_free(mm, (unsigned long *) pgd); | 134 | crst_table_free(mm, (unsigned long *) pgd); |
133 | } | 135 | } |
134 | if (current->active_mm == mm) | 136 | if (current->active_mm == mm) |
135 | update_mm(mm, current); | 137 | update_user_asce(mm, 1); |
136 | } | 138 | } |
137 | #endif | 139 | #endif |
138 | 140 | ||
@@ -198,7 +200,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) | |||
198 | static void gmap_flush_tlb(struct gmap *gmap) | 200 | static void gmap_flush_tlb(struct gmap *gmap) |
199 | { | 201 | { |
200 | if (MACHINE_HAS_IDTE) | 202 | if (MACHINE_HAS_IDTE) |
201 | __tlb_flush_idte((unsigned long) gmap->table | | 203 | __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table | |
202 | _ASCE_TYPE_REGION1); | 204 | _ASCE_TYPE_REGION1); |
203 | else | 205 | else |
204 | __tlb_flush_global(); | 206 | __tlb_flush_global(); |
@@ -217,7 +219,7 @@ void gmap_free(struct gmap *gmap) | |||
217 | 219 | ||
218 | /* Flush tlb. */ | 220 | /* Flush tlb. */ |
219 | if (MACHINE_HAS_IDTE) | 221 | if (MACHINE_HAS_IDTE) |
220 | __tlb_flush_idte((unsigned long) gmap->table | | 222 | __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table | |
221 | _ASCE_TYPE_REGION1); | 223 | _ASCE_TYPE_REGION1); |
222 | else | 224 | else |
223 | __tlb_flush_global(); | 225 | __tlb_flush_global(); |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index bcfb70b60be6..72b04de18283 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -138,7 +138,6 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | |||
138 | } | 138 | } |
139 | ret = 0; | 139 | ret = 0; |
140 | out: | 140 | out: |
141 | flush_tlb_kernel_range(start, end); | ||
142 | return ret; | 141 | return ret; |
143 | } | 142 | } |
144 | 143 | ||
@@ -265,7 +264,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | |||
265 | memset((void *)start, 0, end - start); | 264 | memset((void *)start, 0, end - start); |
266 | ret = 0; | 265 | ret = 0; |
267 | out: | 266 | out: |
268 | flush_tlb_kernel_range(start, end); | ||
269 | return ret; | 267 | return ret; |
270 | } | 268 | } |
271 | 269 | ||
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index a32c96761eab..276f2e26c761 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c | |||
@@ -1033,7 +1033,7 @@ int hwsampler_setup(void) | |||
1033 | max_sampler_rate = cb->qsi.max_sampl_rate; | 1033 | max_sampler_rate = cb->qsi.max_sampl_rate; |
1034 | } | 1034 | } |
1035 | } | 1035 | } |
1036 | register_external_interrupt(0x1407, hws_ext_handler); | 1036 | register_external_irq(EXT_IRQ_MEASURE_ALERT, hws_ext_handler); |
1037 | 1037 | ||
1038 | hws_state = HWS_DEALLOCATED; | 1038 | hws_state = HWS_DEALLOCATED; |
1039 | rc = 0; | 1039 | rc = 0; |
@@ -1068,7 +1068,7 @@ int hwsampler_shutdown(void) | |||
1068 | hws_wq = NULL; | 1068 | hws_wq = NULL; |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | unregister_external_interrupt(0x1407, hws_ext_handler); | 1071 | unregister_external_irq(EXT_IRQ_MEASURE_ALERT, hws_ext_handler); |
1072 | hws_state = HWS_INIT; | 1072 | hws_state = HWS_INIT; |
1073 | rc = 0; | 1073 | rc = 0; |
1074 | } | 1074 | } |