diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-08 15:02:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-08 15:02:28 -0400 |
commit | d586c86d50cefa0897a51a2dbc714060ccedae76 (patch) | |
tree | 76a7f454637badb74390047aebca5c071c0988fe /arch/s390/include | |
parent | e9f37d3a8d126e73f5737ef548cdf6f618e295e4 (diff) | |
parent | 457f2180951cdcbfb4657ddcc83b486e93497f56 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull second set of s390 patches from Martin Schwidefsky:
"The second part of Heikos uaccess rework, the page table walker for
uaccess is now a thing of the past (yay!)
The code change to fix the theoretical TLB flush problem allows us to
add a TLB flush optimization for zEC12, this machine has new
instructions that allow to do CPU local TLB flushes for single pages
and for all pages of a specific address space.
Plus the usual bug fixing and some more cleanup"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/uaccess: rework uaccess code - fix locking issues
s390/mm,tlb: optimize TLB flushing for zEC12
s390/mm,tlb: safeguard against speculative TLB creation
s390/irq: Use defines for external interruption codes
s390/irq: Add defines for external interruption codes
s390/sclp: add timeout for queued requests
kvm/s390: also set guest pages back to stable on kexec/kdump
lcs: Add missing destroy_timer_on_stack()
s390/tape: Add missing destroy_timer_on_stack()
s390/tape: Use del_timer_sync()
s390/3270: fix crash with multiple reset device requests
s390/bitops,atomic: add missing memory barriers
s390/zcrypt: add length check for aligned data to avoid overflow in msg-type 6
Diffstat (limited to 'arch/s390/include')
-rw-r--r-- | arch/s390/include/asm/atomic.h | 70 | ||||
-rw-r--r-- | arch/s390/include/asm/bitops.h | 41 | ||||
-rw-r--r-- | arch/s390/include/asm/futex.h | 66 | ||||
-rw-r--r-- | arch/s390/include/asm/irq.h | 18 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 45 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 128 | ||||
-rw-r--r-- | arch/s390/include/asm/setup.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/switch_to.h | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/tlb.h | 14 | ||||
-rw-r--r-- | arch/s390/include/asm/tlbflush.h | 115 | ||||
-rw-r--r-- | arch/s390/include/asm/uaccess.h | 2 |
13 files changed, 373 insertions, 134 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index fa9aaf7144b7..1d4706114a45 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -15,23 +15,29 @@ | |||
15 | 15 | ||
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <asm/barrier.h> | ||
18 | #include <asm/cmpxchg.h> | 19 | #include <asm/cmpxchg.h> |
19 | 20 | ||
20 | #define ATOMIC_INIT(i) { (i) } | 21 | #define ATOMIC_INIT(i) { (i) } |
21 | 22 | ||
23 | #define __ATOMIC_NO_BARRIER "\n" | ||
24 | |||
22 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES | 25 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
23 | 26 | ||
24 | #define __ATOMIC_OR "lao" | 27 | #define __ATOMIC_OR "lao" |
25 | #define __ATOMIC_AND "lan" | 28 | #define __ATOMIC_AND "lan" |
26 | #define __ATOMIC_ADD "laa" | 29 | #define __ATOMIC_ADD "laa" |
30 | #define __ATOMIC_BARRIER "bcr 14,0\n" | ||
27 | 31 | ||
28 | #define __ATOMIC_LOOP(ptr, op_val, op_string) \ | 32 | #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ |
29 | ({ \ | 33 | ({ \ |
30 | int old_val; \ | 34 | int old_val; \ |
31 | \ | 35 | \ |
32 | typecheck(atomic_t *, ptr); \ | 36 | typecheck(atomic_t *, ptr); \ |
33 | asm volatile( \ | 37 | asm volatile( \ |
38 | __barrier \ | ||
34 | op_string " %0,%2,%1\n" \ | 39 | op_string " %0,%2,%1\n" \ |
40 | __barrier \ | ||
35 | : "=d" (old_val), "+Q" ((ptr)->counter) \ | 41 | : "=d" (old_val), "+Q" ((ptr)->counter) \ |
36 | : "d" (op_val) \ | 42 | : "d" (op_val) \ |
37 | : "cc", "memory"); \ | 43 | : "cc", "memory"); \ |
@@ -43,8 +49,9 @@ | |||
43 | #define __ATOMIC_OR "or" | 49 | #define __ATOMIC_OR "or" |
44 | #define __ATOMIC_AND "nr" | 50 | #define __ATOMIC_AND "nr" |
45 | #define __ATOMIC_ADD "ar" | 51 | #define __ATOMIC_ADD "ar" |
52 | #define __ATOMIC_BARRIER "\n" | ||
46 | 53 | ||
47 | #define __ATOMIC_LOOP(ptr, op_val, op_string) \ | 54 | #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ |
48 | ({ \ | 55 | ({ \ |
49 | int old_val, new_val; \ | 56 | int old_val, new_val; \ |
50 | \ | 57 | \ |
@@ -82,7 +89,7 @@ static inline void atomic_set(atomic_t *v, int i) | |||
82 | 89 | ||
83 | static inline int atomic_add_return(int i, atomic_t *v) | 90 | static inline int atomic_add_return(int i, atomic_t *v) |
84 | { | 91 | { |
85 | return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i; | 92 | return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i; |
86 | } | 93 | } |
87 | 94 | ||
88 | static inline void atomic_add(int i, atomic_t *v) | 95 | static inline void atomic_add(int i, atomic_t *v) |
@@ -94,12 +101,10 @@ static inline void atomic_add(int i, atomic_t *v) | |||
94 | : "+Q" (v->counter) | 101 | : "+Q" (v->counter) |
95 | : "i" (i) | 102 | : "i" (i) |
96 | : "cc", "memory"); | 103 | : "cc", "memory"); |
97 | } else { | 104 | return; |
98 | atomic_add_return(i, v); | ||
99 | } | 105 | } |
100 | #else | ||
101 | atomic_add_return(i, v); | ||
102 | #endif | 106 | #endif |
107 | __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER); | ||
103 | } | 108 | } |
104 | 109 | ||
105 | #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) | 110 | #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) |
@@ -115,12 +120,12 @@ static inline void atomic_add(int i, atomic_t *v) | |||
115 | 120 | ||
116 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) | 121 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) |
117 | { | 122 | { |
118 | __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND); | 123 | __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER); |
119 | } | 124 | } |
120 | 125 | ||
121 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | 126 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) |
122 | { | 127 | { |
123 | __ATOMIC_LOOP(v, mask, __ATOMIC_OR); | 128 | __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER); |
124 | } | 129 | } |
125 | 130 | ||
126 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 131 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
@@ -157,19 +162,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
157 | 162 | ||
158 | #ifdef CONFIG_64BIT | 163 | #ifdef CONFIG_64BIT |
159 | 164 | ||
165 | #define __ATOMIC64_NO_BARRIER "\n" | ||
166 | |||
160 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES | 167 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
161 | 168 | ||
162 | #define __ATOMIC64_OR "laog" | 169 | #define __ATOMIC64_OR "laog" |
163 | #define __ATOMIC64_AND "lang" | 170 | #define __ATOMIC64_AND "lang" |
164 | #define __ATOMIC64_ADD "laag" | 171 | #define __ATOMIC64_ADD "laag" |
172 | #define __ATOMIC64_BARRIER "bcr 14,0\n" | ||
165 | 173 | ||
166 | #define __ATOMIC64_LOOP(ptr, op_val, op_string) \ | 174 | #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ |
167 | ({ \ | 175 | ({ \ |
168 | long long old_val; \ | 176 | long long old_val; \ |
169 | \ | 177 | \ |
170 | typecheck(atomic64_t *, ptr); \ | 178 | typecheck(atomic64_t *, ptr); \ |
171 | asm volatile( \ | 179 | asm volatile( \ |
180 | __barrier \ | ||
172 | op_string " %0,%2,%1\n" \ | 181 | op_string " %0,%2,%1\n" \ |
182 | __barrier \ | ||
173 | : "=d" (old_val), "+Q" ((ptr)->counter) \ | 183 | : "=d" (old_val), "+Q" ((ptr)->counter) \ |
174 | : "d" (op_val) \ | 184 | : "d" (op_val) \ |
175 | : "cc", "memory"); \ | 185 | : "cc", "memory"); \ |
@@ -181,8 +191,9 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
181 | #define __ATOMIC64_OR "ogr" | 191 | #define __ATOMIC64_OR "ogr" |
182 | #define __ATOMIC64_AND "ngr" | 192 | #define __ATOMIC64_AND "ngr" |
183 | #define __ATOMIC64_ADD "agr" | 193 | #define __ATOMIC64_ADD "agr" |
194 | #define __ATOMIC64_BARRIER "\n" | ||
184 | 195 | ||
185 | #define __ATOMIC64_LOOP(ptr, op_val, op_string) \ | 196 | #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ |
186 | ({ \ | 197 | ({ \ |
187 | long long old_val, new_val; \ | 198 | long long old_val, new_val; \ |
188 | \ | 199 | \ |
@@ -220,17 +231,32 @@ static inline void atomic64_set(atomic64_t *v, long long i) | |||
220 | 231 | ||
221 | static inline long long atomic64_add_return(long long i, atomic64_t *v) | 232 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
222 | { | 233 | { |
223 | return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i; | 234 | return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i; |
235 | } | ||
236 | |||
237 | static inline void atomic64_add(long long i, atomic64_t *v) | ||
238 | { | ||
239 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES | ||
240 | if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { | ||
241 | asm volatile( | ||
242 | "agsi %0,%1\n" | ||
243 | : "+Q" (v->counter) | ||
244 | : "i" (i) | ||
245 | : "cc", "memory"); | ||
246 | return; | ||
247 | } | ||
248 | #endif | ||
249 | __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER); | ||
224 | } | 250 | } |
225 | 251 | ||
226 | static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) | 252 | static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) |
227 | { | 253 | { |
228 | __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND); | 254 | __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER); |
229 | } | 255 | } |
230 | 256 | ||
231 | static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) | 257 | static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) |
232 | { | 258 | { |
233 | __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR); | 259 | __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER); |
234 | } | 260 | } |
235 | 261 | ||
236 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 262 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
@@ -334,25 +360,13 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) | |||
334 | } while (atomic64_cmpxchg(v, old, new) != old); | 360 | } while (atomic64_cmpxchg(v, old, new) != old); |
335 | } | 361 | } |
336 | 362 | ||
337 | #endif /* CONFIG_64BIT */ | ||
338 | |||
339 | static inline void atomic64_add(long long i, atomic64_t *v) | 363 | static inline void atomic64_add(long long i, atomic64_t *v) |
340 | { | 364 | { |
341 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES | ||
342 | if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { | ||
343 | asm volatile( | ||
344 | "agsi %0,%1\n" | ||
345 | : "+Q" (v->counter) | ||
346 | : "i" (i) | ||
347 | : "cc", "memory"); | ||
348 | } else { | ||
349 | atomic64_add_return(i, v); | ||
350 | } | ||
351 | #else | ||
352 | atomic64_add_return(i, v); | 365 | atomic64_add_return(i, v); |
353 | #endif | ||
354 | } | 366 | } |
355 | 367 | ||
368 | #endif /* CONFIG_64BIT */ | ||
369 | |||
356 | static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) | 370 | static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) |
357 | { | 371 | { |
358 | long long c, old; | 372 | long long c, old; |
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index ec5ef891db6b..520542477678 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h | |||
@@ -47,14 +47,18 @@ | |||
47 | 47 | ||
48 | #include <linux/typecheck.h> | 48 | #include <linux/typecheck.h> |
49 | #include <linux/compiler.h> | 49 | #include <linux/compiler.h> |
50 | #include <asm/barrier.h> | ||
51 | |||
52 | #define __BITOPS_NO_BARRIER "\n" | ||
50 | 53 | ||
51 | #ifndef CONFIG_64BIT | 54 | #ifndef CONFIG_64BIT |
52 | 55 | ||
53 | #define __BITOPS_OR "or" | 56 | #define __BITOPS_OR "or" |
54 | #define __BITOPS_AND "nr" | 57 | #define __BITOPS_AND "nr" |
55 | #define __BITOPS_XOR "xr" | 58 | #define __BITOPS_XOR "xr" |
59 | #define __BITOPS_BARRIER "\n" | ||
56 | 60 | ||
57 | #define __BITOPS_LOOP(__addr, __val, __op_string) \ | 61 | #define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ |
58 | ({ \ | 62 | ({ \ |
59 | unsigned long __old, __new; \ | 63 | unsigned long __old, __new; \ |
60 | \ | 64 | \ |
@@ -67,7 +71,7 @@ | |||
67 | " jl 0b" \ | 71 | " jl 0b" \ |
68 | : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ | 72 | : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ |
69 | : "d" (__val) \ | 73 | : "d" (__val) \ |
70 | : "cc"); \ | 74 | : "cc", "memory"); \ |
71 | __old; \ | 75 | __old; \ |
72 | }) | 76 | }) |
73 | 77 | ||
@@ -78,17 +82,20 @@ | |||
78 | #define __BITOPS_OR "laog" | 82 | #define __BITOPS_OR "laog" |
79 | #define __BITOPS_AND "lang" | 83 | #define __BITOPS_AND "lang" |
80 | #define __BITOPS_XOR "laxg" | 84 | #define __BITOPS_XOR "laxg" |
85 | #define __BITOPS_BARRIER "bcr 14,0\n" | ||
81 | 86 | ||
82 | #define __BITOPS_LOOP(__addr, __val, __op_string) \ | 87 | #define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ |
83 | ({ \ | 88 | ({ \ |
84 | unsigned long __old; \ | 89 | unsigned long __old; \ |
85 | \ | 90 | \ |
86 | typecheck(unsigned long *, (__addr)); \ | 91 | typecheck(unsigned long *, (__addr)); \ |
87 | asm volatile( \ | 92 | asm volatile( \ |
93 | __barrier \ | ||
88 | __op_string " %0,%2,%1\n" \ | 94 | __op_string " %0,%2,%1\n" \ |
95 | __barrier \ | ||
89 | : "=d" (__old), "+Q" (*(__addr)) \ | 96 | : "=d" (__old), "+Q" (*(__addr)) \ |
90 | : "d" (__val) \ | 97 | : "d" (__val) \ |
91 | : "cc"); \ | 98 | : "cc", "memory"); \ |
92 | __old; \ | 99 | __old; \ |
93 | }) | 100 | }) |
94 | 101 | ||
@@ -97,8 +104,9 @@ | |||
97 | #define __BITOPS_OR "ogr" | 104 | #define __BITOPS_OR "ogr" |
98 | #define __BITOPS_AND "ngr" | 105 | #define __BITOPS_AND "ngr" |
99 | #define __BITOPS_XOR "xgr" | 106 | #define __BITOPS_XOR "xgr" |
107 | #define __BITOPS_BARRIER "\n" | ||
100 | 108 | ||
101 | #define __BITOPS_LOOP(__addr, __val, __op_string) \ | 109 | #define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ |
102 | ({ \ | 110 | ({ \ |
103 | unsigned long __old, __new; \ | 111 | unsigned long __old, __new; \ |
104 | \ | 112 | \ |
@@ -111,7 +119,7 @@ | |||
111 | " jl 0b" \ | 119 | " jl 0b" \ |
112 | : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ | 120 | : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ |
113 | : "d" (__val) \ | 121 | : "d" (__val) \ |
114 | : "cc"); \ | 122 | : "cc", "memory"); \ |
115 | __old; \ | 123 | __old; \ |
116 | }) | 124 | }) |
117 | 125 | ||
@@ -149,12 +157,12 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr) | |||
149 | "oi %0,%b1\n" | 157 | "oi %0,%b1\n" |
150 | : "+Q" (*caddr) | 158 | : "+Q" (*caddr) |
151 | : "i" (1 << (nr & 7)) | 159 | : "i" (1 << (nr & 7)) |
152 | : "cc"); | 160 | : "cc", "memory"); |
153 | return; | 161 | return; |
154 | } | 162 | } |
155 | #endif | 163 | #endif |
156 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); | 164 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); |
157 | __BITOPS_LOOP(addr, mask, __BITOPS_OR); | 165 | __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER); |
158 | } | 166 | } |
159 | 167 | ||
160 | static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr) | 168 | static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr) |
@@ -170,12 +178,12 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr) | |||
170 | "ni %0,%b1\n" | 178 | "ni %0,%b1\n" |
171 | : "+Q" (*caddr) | 179 | : "+Q" (*caddr) |
172 | : "i" (~(1 << (nr & 7))) | 180 | : "i" (~(1 << (nr & 7))) |
173 | : "cc"); | 181 | : "cc", "memory"); |
174 | return; | 182 | return; |
175 | } | 183 | } |
176 | #endif | 184 | #endif |
177 | mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); | 185 | mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); |
178 | __BITOPS_LOOP(addr, mask, __BITOPS_AND); | 186 | __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER); |
179 | } | 187 | } |
180 | 188 | ||
181 | static inline void change_bit(unsigned long nr, volatile unsigned long *ptr) | 189 | static inline void change_bit(unsigned long nr, volatile unsigned long *ptr) |
@@ -191,12 +199,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr) | |||
191 | "xi %0,%b1\n" | 199 | "xi %0,%b1\n" |
192 | : "+Q" (*caddr) | 200 | : "+Q" (*caddr) |
193 | : "i" (1 << (nr & 7)) | 201 | : "i" (1 << (nr & 7)) |
194 | : "cc"); | 202 | : "cc", "memory"); |
195 | return; | 203 | return; |
196 | } | 204 | } |
197 | #endif | 205 | #endif |
198 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); | 206 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); |
199 | __BITOPS_LOOP(addr, mask, __BITOPS_XOR); | 207 | __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER); |
200 | } | 208 | } |
201 | 209 | ||
202 | static inline int | 210 | static inline int |
@@ -206,8 +214,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr) | |||
206 | unsigned long old, mask; | 214 | unsigned long old, mask; |
207 | 215 | ||
208 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); | 216 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); |
209 | old = __BITOPS_LOOP(addr, mask, __BITOPS_OR); | 217 | old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER); |
210 | barrier(); | ||
211 | return (old & mask) != 0; | 218 | return (old & mask) != 0; |
212 | } | 219 | } |
213 | 220 | ||
@@ -218,8 +225,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr) | |||
218 | unsigned long old, mask; | 225 | unsigned long old, mask; |
219 | 226 | ||
220 | mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); | 227 | mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); |
221 | old = __BITOPS_LOOP(addr, mask, __BITOPS_AND); | 228 | old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER); |
222 | barrier(); | ||
223 | return (old & ~mask) != 0; | 229 | return (old & ~mask) != 0; |
224 | } | 230 | } |
225 | 231 | ||
@@ -230,8 +236,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr) | |||
230 | unsigned long old, mask; | 236 | unsigned long old, mask; |
231 | 237 | ||
232 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); | 238 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); |
233 | old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR); | 239 | old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER); |
234 | barrier(); | ||
235 | return (old & mask) != 0; | 240 | return (old & mask) != 0; |
236 | } | 241 | } |
237 | 242 | ||
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index fda46bd38c99..69cf5b5eddc9 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h | |||
@@ -1,12 +1,25 @@ | |||
1 | #ifndef _ASM_S390_FUTEX_H | 1 | #ifndef _ASM_S390_FUTEX_H |
2 | #define _ASM_S390_FUTEX_H | 2 | #define _ASM_S390_FUTEX_H |
3 | 3 | ||
4 | #include <linux/futex.h> | ||
5 | #include <linux/uaccess.h> | 4 | #include <linux/uaccess.h> |
5 | #include <linux/futex.h> | ||
6 | #include <asm/mmu_context.h> | ||
6 | #include <asm/errno.h> | 7 | #include <asm/errno.h> |
7 | 8 | ||
8 | int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval); | 9 | #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ |
9 | int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old); | 10 | asm volatile( \ |
11 | " sacf 256\n" \ | ||
12 | "0: l %1,0(%6)\n" \ | ||
13 | "1:"insn \ | ||
14 | "2: cs %1,%2,0(%6)\n" \ | ||
15 | "3: jl 1b\n" \ | ||
16 | " lhi %0,0\n" \ | ||
17 | "4: sacf 768\n" \ | ||
18 | EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ | ||
19 | : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ | ||
20 | "=m" (*uaddr) \ | ||
21 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ | ||
22 | "m" (*uaddr) : "cc"); | ||
10 | 23 | ||
11 | static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | 24 | static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) |
12 | { | 25 | { |
@@ -14,13 +27,37 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |||
14 | int cmp = (encoded_op >> 24) & 15; | 27 | int cmp = (encoded_op >> 24) & 15; |
15 | int oparg = (encoded_op << 8) >> 20; | 28 | int oparg = (encoded_op << 8) >> 20; |
16 | int cmparg = (encoded_op << 20) >> 20; | 29 | int cmparg = (encoded_op << 20) >> 20; |
17 | int oldval, ret; | 30 | int oldval = 0, newval, ret; |
18 | 31 | ||
32 | update_primary_asce(current); | ||
19 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 33 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
20 | oparg = 1 << oparg; | 34 | oparg = 1 << oparg; |
21 | 35 | ||
22 | pagefault_disable(); | 36 | pagefault_disable(); |
23 | ret = __futex_atomic_op_inuser(op, uaddr, oparg, &oldval); | 37 | switch (op) { |
38 | case FUTEX_OP_SET: | ||
39 | __futex_atomic_op("lr %2,%5\n", | ||
40 | ret, oldval, newval, uaddr, oparg); | ||
41 | break; | ||
42 | case FUTEX_OP_ADD: | ||
43 | __futex_atomic_op("lr %2,%1\nar %2,%5\n", | ||
44 | ret, oldval, newval, uaddr, oparg); | ||
45 | break; | ||
46 | case FUTEX_OP_OR: | ||
47 | __futex_atomic_op("lr %2,%1\nor %2,%5\n", | ||
48 | ret, oldval, newval, uaddr, oparg); | ||
49 | break; | ||
50 | case FUTEX_OP_ANDN: | ||
51 | __futex_atomic_op("lr %2,%1\nnr %2,%5\n", | ||
52 | ret, oldval, newval, uaddr, oparg); | ||
53 | break; | ||
54 | case FUTEX_OP_XOR: | ||
55 | __futex_atomic_op("lr %2,%1\nxr %2,%5\n", | ||
56 | ret, oldval, newval, uaddr, oparg); | ||
57 | break; | ||
58 | default: | ||
59 | ret = -ENOSYS; | ||
60 | } | ||
24 | pagefault_enable(); | 61 | pagefault_enable(); |
25 | 62 | ||
26 | if (!ret) { | 63 | if (!ret) { |
@@ -37,4 +74,23 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |||
37 | return ret; | 74 | return ret; |
38 | } | 75 | } |
39 | 76 | ||
77 | static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | ||
78 | u32 oldval, u32 newval) | ||
79 | { | ||
80 | int ret; | ||
81 | |||
82 | update_primary_asce(current); | ||
83 | asm volatile( | ||
84 | " sacf 256\n" | ||
85 | "0: cs %1,%4,0(%5)\n" | ||
86 | "1: la %0,0\n" | ||
87 | "2: sacf 768\n" | ||
88 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | ||
89 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) | ||
90 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) | ||
91 | : "cc", "memory"); | ||
92 | *uval = oldval; | ||
93 | return ret; | ||
94 | } | ||
95 | |||
40 | #endif /* _ASM_S390_FUTEX_H */ | 96 | #endif /* _ASM_S390_FUTEX_H */ |
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index 35f0faab5361..c4dd400a2791 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h | |||
@@ -16,6 +16,20 @@ | |||
16 | /* This number is used when no interrupt has been assigned */ | 16 | /* This number is used when no interrupt has been assigned */ |
17 | #define NO_IRQ 0 | 17 | #define NO_IRQ 0 |
18 | 18 | ||
19 | /* External interruption codes */ | ||
20 | #define EXT_IRQ_INTERRUPT_KEY 0x0040 | ||
21 | #define EXT_IRQ_CLK_COMP 0x1004 | ||
22 | #define EXT_IRQ_CPU_TIMER 0x1005 | ||
23 | #define EXT_IRQ_WARNING_TRACK 0x1007 | ||
24 | #define EXT_IRQ_MALFUNC_ALERT 0x1200 | ||
25 | #define EXT_IRQ_EMERGENCY_SIG 0x1201 | ||
26 | #define EXT_IRQ_EXTERNAL_CALL 0x1202 | ||
27 | #define EXT_IRQ_TIMING_ALERT 0x1406 | ||
28 | #define EXT_IRQ_MEASURE_ALERT 0x1407 | ||
29 | #define EXT_IRQ_SERVICE_SIG 0x2401 | ||
30 | #define EXT_IRQ_CP_SERVICE 0x2603 | ||
31 | #define EXT_IRQ_IUCV 0x4000 | ||
32 | |||
19 | #ifndef __ASSEMBLY__ | 33 | #ifndef __ASSEMBLY__ |
20 | 34 | ||
21 | #include <linux/hardirq.h> | 35 | #include <linux/hardirq.h> |
@@ -77,8 +91,8 @@ struct ext_code { | |||
77 | 91 | ||
78 | typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long); | 92 | typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long); |
79 | 93 | ||
80 | int register_external_interrupt(u16 code, ext_int_handler_t handler); | 94 | int register_external_irq(u16 code, ext_int_handler_t handler); |
81 | int unregister_external_interrupt(u16 code, ext_int_handler_t handler); | 95 | int unregister_external_irq(u16 code, ext_int_handler_t handler); |
82 | 96 | ||
83 | enum irq_subclass { | 97 | enum irq_subclass { |
84 | IRQ_SUBCLASS_MEASUREMENT_ALERT = 5, | 98 | IRQ_SUBCLASS_MEASUREMENT_ALERT = 5, |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index ff132ac64ddd..f77695a82f64 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -1,9 +1,11 @@ | |||
1 | #ifndef __MMU_H | 1 | #ifndef __MMU_H |
2 | #define __MMU_H | 2 | #define __MMU_H |
3 | 3 | ||
4 | #include <linux/cpumask.h> | ||
4 | #include <linux/errno.h> | 5 | #include <linux/errno.h> |
5 | 6 | ||
6 | typedef struct { | 7 | typedef struct { |
8 | cpumask_t cpu_attach_mask; | ||
7 | atomic_t attach_count; | 9 | atomic_t attach_count; |
8 | unsigned int flush_mm; | 10 | unsigned int flush_mm; |
9 | spinlock_t list_lock; | 11 | spinlock_t list_lock; |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 38149b63dc44..71be346d0e3c 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -15,6 +15,7 @@ | |||
15 | static inline int init_new_context(struct task_struct *tsk, | 15 | static inline int init_new_context(struct task_struct *tsk, |
16 | struct mm_struct *mm) | 16 | struct mm_struct *mm) |
17 | { | 17 | { |
18 | cpumask_clear(&mm->context.cpu_attach_mask); | ||
18 | atomic_set(&mm->context.attach_count, 0); | 19 | atomic_set(&mm->context.attach_count, 0); |
19 | mm->context.flush_mm = 0; | 20 | mm->context.flush_mm = 0; |
20 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; | 21 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; |
@@ -29,41 +30,61 @@ static inline int init_new_context(struct task_struct *tsk, | |||
29 | 30 | ||
30 | #define destroy_context(mm) do { } while (0) | 31 | #define destroy_context(mm) do { } while (0) |
31 | 32 | ||
32 | #ifndef CONFIG_64BIT | 33 | static inline void update_user_asce(struct mm_struct *mm, int load_primary) |
33 | #define LCTL_OPCODE "lctl" | ||
34 | #else | ||
35 | #define LCTL_OPCODE "lctlg" | ||
36 | #endif | ||
37 | |||
38 | static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) | ||
39 | { | 34 | { |
40 | pgd_t *pgd = mm->pgd; | 35 | pgd_t *pgd = mm->pgd; |
41 | 36 | ||
42 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); | 37 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); |
43 | /* Load primary space page table origin. */ | 38 | if (load_primary) |
44 | asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce)); | 39 | __ctl_load(S390_lowcore.user_asce, 1, 1); |
45 | set_fs(current->thread.mm_segment); | 40 | set_fs(current->thread.mm_segment); |
46 | } | 41 | } |
47 | 42 | ||
43 | static inline void clear_user_asce(struct mm_struct *mm, int load_primary) | ||
44 | { | ||
45 | S390_lowcore.user_asce = S390_lowcore.kernel_asce; | ||
46 | |||
47 | if (load_primary) | ||
48 | __ctl_load(S390_lowcore.user_asce, 1, 1); | ||
49 | __ctl_load(S390_lowcore.user_asce, 7, 7); | ||
50 | } | ||
51 | |||
52 | static inline void update_primary_asce(struct task_struct *tsk) | ||
53 | { | ||
54 | unsigned long asce; | ||
55 | |||
56 | __ctl_store(asce, 1, 1); | ||
57 | if (asce != S390_lowcore.kernel_asce) | ||
58 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); | ||
59 | set_tsk_thread_flag(tsk, TIF_ASCE); | ||
60 | } | ||
61 | |||
48 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 62 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
49 | struct task_struct *tsk) | 63 | struct task_struct *tsk) |
50 | { | 64 | { |
51 | int cpu = smp_processor_id(); | 65 | int cpu = smp_processor_id(); |
52 | 66 | ||
67 | update_primary_asce(tsk); | ||
53 | if (prev == next) | 68 | if (prev == next) |
54 | return; | 69 | return; |
70 | if (MACHINE_HAS_TLB_LC) | ||
71 | cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); | ||
55 | if (atomic_inc_return(&next->context.attach_count) >> 16) { | 72 | if (atomic_inc_return(&next->context.attach_count) >> 16) { |
56 | /* Delay update_mm until all TLB flushes are done. */ | 73 | /* Delay update_user_asce until all TLB flushes are done. */ |
57 | set_tsk_thread_flag(tsk, TIF_TLB_WAIT); | 74 | set_tsk_thread_flag(tsk, TIF_TLB_WAIT); |
75 | /* Clear old ASCE by loading the kernel ASCE. */ | ||
76 | clear_user_asce(next, 0); | ||
58 | } else { | 77 | } else { |
59 | cpumask_set_cpu(cpu, mm_cpumask(next)); | 78 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
60 | update_mm(next, tsk); | 79 | update_user_asce(next, 0); |
61 | if (next->context.flush_mm) | 80 | if (next->context.flush_mm) |
62 | /* Flush pending TLBs */ | 81 | /* Flush pending TLBs */ |
63 | __tlb_flush_mm(next); | 82 | __tlb_flush_mm(next); |
64 | } | 83 | } |
65 | atomic_dec(&prev->context.attach_count); | 84 | atomic_dec(&prev->context.attach_count); |
66 | WARN_ON(atomic_read(&prev->context.attach_count) < 0); | 85 | WARN_ON(atomic_read(&prev->context.attach_count) < 0); |
86 | if (MACHINE_HAS_TLB_LC) | ||
87 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); | ||
67 | } | 88 | } |
68 | 89 | ||
69 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch | 90 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch |
@@ -80,7 +101,7 @@ static inline void finish_arch_post_lock_switch(void) | |||
80 | cpu_relax(); | 101 | cpu_relax(); |
81 | 102 | ||
82 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | 103 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
83 | update_mm(mm, tsk); | 104 | update_user_asce(mm, 0); |
84 | if (mm->context.flush_mm) | 105 | if (mm->context.flush_mm) |
85 | __tlb_flush_mm(mm); | 106 | __tlb_flush_mm(mm); |
86 | preempt_enable(); | 107 | preempt_enable(); |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 50a75d96f939..12f75313e086 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -1070,12 +1070,35 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) | |||
1070 | : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); | 1070 | : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); |
1071 | } | 1071 | } |
1072 | 1072 | ||
1073 | static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep) | ||
1074 | { | ||
1075 | unsigned long pto = (unsigned long) ptep; | ||
1076 | |||
1077 | #ifndef CONFIG_64BIT | ||
1078 | /* pto in ESA mode must point to the start of the segment table */ | ||
1079 | pto &= 0x7ffffc00; | ||
1080 | #endif | ||
1081 | /* Invalidation + local TLB flush for the pte */ | ||
1082 | asm volatile( | ||
1083 | " .insn rrf,0xb2210000,%2,%3,0,1" | ||
1084 | : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); | ||
1085 | } | ||
1086 | |||
1073 | static inline void ptep_flush_direct(struct mm_struct *mm, | 1087 | static inline void ptep_flush_direct(struct mm_struct *mm, |
1074 | unsigned long address, pte_t *ptep) | 1088 | unsigned long address, pte_t *ptep) |
1075 | { | 1089 | { |
1090 | int active, count; | ||
1091 | |||
1076 | if (pte_val(*ptep) & _PAGE_INVALID) | 1092 | if (pte_val(*ptep) & _PAGE_INVALID) |
1077 | return; | 1093 | return; |
1078 | __ptep_ipte(address, ptep); | 1094 | active = (mm == current->active_mm) ? 1 : 0; |
1095 | count = atomic_add_return(0x10000, &mm->context.attach_count); | ||
1096 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && | ||
1097 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) | ||
1098 | __ptep_ipte_local(address, ptep); | ||
1099 | else | ||
1100 | __ptep_ipte(address, ptep); | ||
1101 | atomic_sub(0x10000, &mm->context.attach_count); | ||
1079 | } | 1102 | } |
1080 | 1103 | ||
1081 | static inline void ptep_flush_lazy(struct mm_struct *mm, | 1104 | static inline void ptep_flush_lazy(struct mm_struct *mm, |
@@ -1384,35 +1407,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) | |||
1384 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) | 1407 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) |
1385 | #define pte_unmap(pte) do { } while (0) | 1408 | #define pte_unmap(pte) do { } while (0) |
1386 | 1409 | ||
1387 | static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) | ||
1388 | { | ||
1389 | unsigned long sto = (unsigned long) pmdp - | ||
1390 | pmd_index(address) * sizeof(pmd_t); | ||
1391 | |||
1392 | if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) { | ||
1393 | asm volatile( | ||
1394 | " .insn rrf,0xb98e0000,%2,%3,0,0" | ||
1395 | : "=m" (*pmdp) | ||
1396 | : "m" (*pmdp), "a" (sto), | ||
1397 | "a" ((address & HPAGE_MASK)) | ||
1398 | : "cc" | ||
1399 | ); | ||
1400 | } | ||
1401 | } | ||
1402 | |||
1403 | static inline void __pmd_csp(pmd_t *pmdp) | ||
1404 | { | ||
1405 | register unsigned long reg2 asm("2") = pmd_val(*pmdp); | ||
1406 | register unsigned long reg3 asm("3") = pmd_val(*pmdp) | | ||
1407 | _SEGMENT_ENTRY_INVALID; | ||
1408 | register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; | ||
1409 | |||
1410 | asm volatile( | ||
1411 | " csp %1,%3" | ||
1412 | : "=m" (*pmdp) | ||
1413 | : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); | ||
1414 | } | ||
1415 | |||
1416 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) | 1410 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) |
1417 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) | 1411 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) |
1418 | { | 1412 | { |
@@ -1481,18 +1475,80 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) | |||
1481 | } | 1475 | } |
1482 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ | 1476 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ |
1483 | 1477 | ||
1478 | static inline void __pmdp_csp(pmd_t *pmdp) | ||
1479 | { | ||
1480 | register unsigned long reg2 asm("2") = pmd_val(*pmdp); | ||
1481 | register unsigned long reg3 asm("3") = pmd_val(*pmdp) | | ||
1482 | _SEGMENT_ENTRY_INVALID; | ||
1483 | register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; | ||
1484 | |||
1485 | asm volatile( | ||
1486 | " csp %1,%3" | ||
1487 | : "=m" (*pmdp) | ||
1488 | : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); | ||
1489 | } | ||
1490 | |||
1491 | static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp) | ||
1492 | { | ||
1493 | unsigned long sto; | ||
1494 | |||
1495 | sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); | ||
1496 | asm volatile( | ||
1497 | " .insn rrf,0xb98e0000,%2,%3,0,0" | ||
1498 | : "=m" (*pmdp) | ||
1499 | : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK)) | ||
1500 | : "cc" ); | ||
1501 | } | ||
1502 | |||
1503 | static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp) | ||
1504 | { | ||
1505 | unsigned long sto; | ||
1506 | |||
1507 | sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); | ||
1508 | asm volatile( | ||
1509 | " .insn rrf,0xb98e0000,%2,%3,0,1" | ||
1510 | : "=m" (*pmdp) | ||
1511 | : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK)) | ||
1512 | : "cc" ); | ||
1513 | } | ||
1514 | |||
1515 | static inline void pmdp_flush_direct(struct mm_struct *mm, | ||
1516 | unsigned long address, pmd_t *pmdp) | ||
1517 | { | ||
1518 | int active, count; | ||
1519 | |||
1520 | if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) | ||
1521 | return; | ||
1522 | if (!MACHINE_HAS_IDTE) { | ||
1523 | __pmdp_csp(pmdp); | ||
1524 | return; | ||
1525 | } | ||
1526 | active = (mm == current->active_mm) ? 1 : 0; | ||
1527 | count = atomic_add_return(0x10000, &mm->context.attach_count); | ||
1528 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && | ||
1529 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) | ||
1530 | __pmdp_idte_local(address, pmdp); | ||
1531 | else | ||
1532 | __pmdp_idte(address, pmdp); | ||
1533 | atomic_sub(0x10000, &mm->context.attach_count); | ||
1534 | } | ||
1535 | |||
1484 | static inline void pmdp_flush_lazy(struct mm_struct *mm, | 1536 | static inline void pmdp_flush_lazy(struct mm_struct *mm, |
1485 | unsigned long address, pmd_t *pmdp) | 1537 | unsigned long address, pmd_t *pmdp) |
1486 | { | 1538 | { |
1487 | int active, count; | 1539 | int active, count; |
1488 | 1540 | ||
1541 | if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) | ||
1542 | return; | ||
1489 | active = (mm == current->active_mm) ? 1 : 0; | 1543 | active = (mm == current->active_mm) ? 1 : 0; |
1490 | count = atomic_add_return(0x10000, &mm->context.attach_count); | 1544 | count = atomic_add_return(0x10000, &mm->context.attach_count); |
1491 | if ((count & 0xffff) <= active) { | 1545 | if ((count & 0xffff) <= active) { |
1492 | pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; | 1546 | pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; |
1493 | mm->context.flush_mm = 1; | 1547 | mm->context.flush_mm = 1; |
1494 | } else | 1548 | } else if (MACHINE_HAS_IDTE) |
1495 | __pmd_idte(address, pmdp); | 1549 | __pmdp_idte(address, pmdp); |
1550 | else | ||
1551 | __pmdp_csp(pmdp); | ||
1496 | atomic_sub(0x10000, &mm->context.attach_count); | 1552 | atomic_sub(0x10000, &mm->context.attach_count); |
1497 | } | 1553 | } |
1498 | 1554 | ||
@@ -1545,7 +1601,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |||
1545 | pmd_t pmd; | 1601 | pmd_t pmd; |
1546 | 1602 | ||
1547 | pmd = *pmdp; | 1603 | pmd = *pmdp; |
1548 | __pmd_idte(address, pmdp); | 1604 | pmdp_flush_direct(vma->vm_mm, address, pmdp); |
1549 | *pmdp = pmd_mkold(pmd); | 1605 | *pmdp = pmd_mkold(pmd); |
1550 | return pmd_young(pmd); | 1606 | return pmd_young(pmd); |
1551 | } | 1607 | } |
@@ -1556,7 +1612,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, | |||
1556 | { | 1612 | { |
1557 | pmd_t pmd = *pmdp; | 1613 | pmd_t pmd = *pmdp; |
1558 | 1614 | ||
1559 | __pmd_idte(address, pmdp); | 1615 | pmdp_flush_direct(mm, address, pmdp); |
1560 | pmd_clear(pmdp); | 1616 | pmd_clear(pmdp); |
1561 | return pmd; | 1617 | return pmd; |
1562 | } | 1618 | } |
@@ -1572,7 +1628,7 @@ static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, | |||
1572 | static inline void pmdp_invalidate(struct vm_area_struct *vma, | 1628 | static inline void pmdp_invalidate(struct vm_area_struct *vma, |
1573 | unsigned long address, pmd_t *pmdp) | 1629 | unsigned long address, pmd_t *pmdp) |
1574 | { | 1630 | { |
1575 | __pmd_idte(address, pmdp); | 1631 | pmdp_flush_direct(vma->vm_mm, address, pmdp); |
1576 | } | 1632 | } |
1577 | 1633 | ||
1578 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT | 1634 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT |
@@ -1582,7 +1638,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |||
1582 | pmd_t pmd = *pmdp; | 1638 | pmd_t pmd = *pmdp; |
1583 | 1639 | ||
1584 | if (pmd_write(pmd)) { | 1640 | if (pmd_write(pmd)) { |
1585 | __pmd_idte(address, pmdp); | 1641 | pmdp_flush_direct(mm, address, pmdp); |
1586 | set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); | 1642 | set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); |
1587 | } | 1643 | } |
1588 | } | 1644 | } |
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 406f3a1e63ef..b31b22dba948 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
@@ -68,6 +68,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, | |||
68 | #define MACHINE_FLAG_TOPOLOGY (1UL << 14) | 68 | #define MACHINE_FLAG_TOPOLOGY (1UL << 14) |
69 | #define MACHINE_FLAG_TE (1UL << 15) | 69 | #define MACHINE_FLAG_TE (1UL << 15) |
70 | #define MACHINE_FLAG_RRBM (1UL << 16) | 70 | #define MACHINE_FLAG_RRBM (1UL << 16) |
71 | #define MACHINE_FLAG_TLB_LC (1UL << 17) | ||
71 | 72 | ||
72 | #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) | 73 | #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) |
73 | #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) | 74 | #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) |
@@ -90,6 +91,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, | |||
90 | #define MACHINE_HAS_TOPOLOGY (0) | 91 | #define MACHINE_HAS_TOPOLOGY (0) |
91 | #define MACHINE_HAS_TE (0) | 92 | #define MACHINE_HAS_TE (0) |
92 | #define MACHINE_HAS_RRBM (0) | 93 | #define MACHINE_HAS_RRBM (0) |
94 | #define MACHINE_HAS_TLB_LC (0) | ||
93 | #else /* CONFIG_64BIT */ | 95 | #else /* CONFIG_64BIT */ |
94 | #define MACHINE_HAS_IEEE (1) | 96 | #define MACHINE_HAS_IEEE (1) |
95 | #define MACHINE_HAS_CSP (1) | 97 | #define MACHINE_HAS_CSP (1) |
@@ -102,6 +104,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, | |||
102 | #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) | 104 | #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) |
103 | #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) | 105 | #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) |
104 | #define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM) | 106 | #define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM) |
107 | #define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) | ||
105 | #endif /* CONFIG_64BIT */ | 108 | #endif /* CONFIG_64BIT */ |
106 | 109 | ||
107 | /* | 110 | /* |
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index 29c81f82705e..e759181357fc 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h | |||
@@ -132,6 +132,7 @@ static inline void restore_access_regs(unsigned int *acrs) | |||
132 | update_cr_regs(next); \ | 132 | update_cr_regs(next); \ |
133 | } \ | 133 | } \ |
134 | prev = __switch_to(prev,next); \ | 134 | prev = __switch_to(prev,next); \ |
135 | update_primary_asce(current); \ | ||
135 | } while (0) | 136 | } while (0) |
136 | 137 | ||
137 | #define finish_arch_switch(prev) do { \ | 138 | #define finish_arch_switch(prev) do { \ |
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 3ccd71b90345..50630e6a35de 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
@@ -82,6 +82,7 @@ static inline struct thread_info *current_thread_info(void) | |||
82 | #define TIF_SIGPENDING 2 /* signal pending */ | 82 | #define TIF_SIGPENDING 2 /* signal pending */ |
83 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | 83 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
84 | #define TIF_TLB_WAIT 4 /* wait for TLB flush completion */ | 84 | #define TIF_TLB_WAIT 4 /* wait for TLB flush completion */ |
85 | #define TIF_ASCE 5 /* primary asce needs fixup / uaccess */ | ||
85 | #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ | 86 | #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ |
86 | #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ | 87 | #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ |
87 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ | 88 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ |
@@ -99,6 +100,7 @@ static inline struct thread_info *current_thread_info(void) | |||
99 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 100 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) |
100 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 101 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
101 | #define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT) | 102 | #define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT) |
103 | #define _TIF_ASCE (1<<TIF_ASCE) | ||
102 | #define _TIF_PER_TRAP (1<<TIF_PER_TRAP) | 104 | #define _TIF_PER_TRAP (1<<TIF_PER_TRAP) |
103 | #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) | 105 | #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) |
104 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 106 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 2cb846c4b37f..c544b6f05d95 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
@@ -57,8 +57,6 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, | |||
57 | tlb->end = end; | 57 | tlb->end = end; |
58 | tlb->fullmm = !(start | (end+1)); | 58 | tlb->fullmm = !(start | (end+1)); |
59 | tlb->batch = NULL; | 59 | tlb->batch = NULL; |
60 | if (tlb->fullmm) | ||
61 | __tlb_flush_mm(mm); | ||
62 | } | 60 | } |
63 | 61 | ||
64 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | 62 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
@@ -96,9 +94,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
96 | static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | 94 | static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |
97 | unsigned long address) | 95 | unsigned long address) |
98 | { | 96 | { |
99 | if (!tlb->fullmm) | 97 | page_table_free_rcu(tlb, (unsigned long *) pte); |
100 | return page_table_free_rcu(tlb, (unsigned long *) pte); | ||
101 | page_table_free(tlb->mm, (unsigned long *) pte); | ||
102 | } | 98 | } |
103 | 99 | ||
104 | /* | 100 | /* |
@@ -114,9 +110,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, | |||
114 | #ifdef CONFIG_64BIT | 110 | #ifdef CONFIG_64BIT |
115 | if (tlb->mm->context.asce_limit <= (1UL << 31)) | 111 | if (tlb->mm->context.asce_limit <= (1UL << 31)) |
116 | return; | 112 | return; |
117 | if (!tlb->fullmm) | 113 | tlb_remove_table(tlb, pmd); |
118 | return tlb_remove_table(tlb, pmd); | ||
119 | crst_table_free(tlb->mm, (unsigned long *) pmd); | ||
120 | #endif | 114 | #endif |
121 | } | 115 | } |
122 | 116 | ||
@@ -133,9 +127,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, | |||
133 | #ifdef CONFIG_64BIT | 127 | #ifdef CONFIG_64BIT |
134 | if (tlb->mm->context.asce_limit <= (1UL << 42)) | 128 | if (tlb->mm->context.asce_limit <= (1UL << 42)) |
135 | return; | 129 | return; |
136 | if (!tlb->fullmm) | 130 | tlb_remove_table(tlb, pud); |
137 | return tlb_remove_table(tlb, pud); | ||
138 | crst_table_free(tlb->mm, (unsigned long *) pud); | ||
139 | #endif | 131 | #endif |
140 | } | 132 | } |
141 | 133 | ||
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index f9fef0425fee..16c9c88658c8 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h | |||
@@ -7,19 +7,41 @@ | |||
7 | #include <asm/pgalloc.h> | 7 | #include <asm/pgalloc.h> |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Flush all tlb entries on the local cpu. | 10 | * Flush all TLB entries on the local CPU. |
11 | */ | 11 | */ |
12 | static inline void __tlb_flush_local(void) | 12 | static inline void __tlb_flush_local(void) |
13 | { | 13 | { |
14 | asm volatile("ptlb" : : : "memory"); | 14 | asm volatile("ptlb" : : : "memory"); |
15 | } | 15 | } |
16 | 16 | ||
17 | #ifdef CONFIG_SMP | ||
18 | /* | 17 | /* |
19 | * Flush all tlb entries on all cpus. | 18 | * Flush TLB entries for a specific ASCE on all CPUs |
20 | */ | 19 | */ |
20 | static inline void __tlb_flush_idte(unsigned long asce) | ||
21 | { | ||
22 | /* Global TLB flush for the mm */ | ||
23 | asm volatile( | ||
24 | " .insn rrf,0xb98e0000,0,%0,%1,0" | ||
25 | : : "a" (2048), "a" (asce) : "cc"); | ||
26 | } | ||
27 | |||
28 | /* | ||
29 | * Flush TLB entries for a specific ASCE on the local CPU | ||
30 | */ | ||
31 | static inline void __tlb_flush_idte_local(unsigned long asce) | ||
32 | { | ||
33 | /* Local TLB flush for the mm */ | ||
34 | asm volatile( | ||
35 | " .insn rrf,0xb98e0000,0,%0,%1,1" | ||
36 | : : "a" (2048), "a" (asce) : "cc"); | ||
37 | } | ||
38 | |||
39 | #ifdef CONFIG_SMP | ||
21 | void smp_ptlb_all(void); | 40 | void smp_ptlb_all(void); |
22 | 41 | ||
42 | /* | ||
43 | * Flush all TLB entries on all CPUs. | ||
44 | */ | ||
23 | static inline void __tlb_flush_global(void) | 45 | static inline void __tlb_flush_global(void) |
24 | { | 46 | { |
25 | register unsigned long reg2 asm("2"); | 47 | register unsigned long reg2 asm("2"); |
@@ -42,36 +64,89 @@ static inline void __tlb_flush_global(void) | |||
42 | : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); | 64 | : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); |
43 | } | 65 | } |
44 | 66 | ||
67 | /* | ||
68 | * Flush TLB entries for a specific mm on all CPUs (in case gmap is used | ||
69 | * this implicates multiple ASCEs!). | ||
70 | */ | ||
45 | static inline void __tlb_flush_full(struct mm_struct *mm) | 71 | static inline void __tlb_flush_full(struct mm_struct *mm) |
46 | { | 72 | { |
47 | cpumask_t local_cpumask; | ||
48 | |||
49 | preempt_disable(); | 73 | preempt_disable(); |
50 | /* | 74 | atomic_add(0x10000, &mm->context.attach_count); |
51 | * If the process only ran on the local cpu, do a local flush. | 75 | if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { |
52 | */ | 76 | /* Local TLB flush */ |
53 | cpumask_copy(&local_cpumask, cpumask_of(smp_processor_id())); | ||
54 | if (cpumask_equal(mm_cpumask(mm), &local_cpumask)) | ||
55 | __tlb_flush_local(); | 77 | __tlb_flush_local(); |
56 | else | 78 | } else { |
79 | /* Global TLB flush */ | ||
57 | __tlb_flush_global(); | 80 | __tlb_flush_global(); |
81 | /* Reset TLB flush mask */ | ||
82 | if (MACHINE_HAS_TLB_LC) | ||
83 | cpumask_copy(mm_cpumask(mm), | ||
84 | &mm->context.cpu_attach_mask); | ||
85 | } | ||
86 | atomic_sub(0x10000, &mm->context.attach_count); | ||
58 | preempt_enable(); | 87 | preempt_enable(); |
59 | } | 88 | } |
89 | |||
90 | /* | ||
91 | * Flush TLB entries for a specific ASCE on all CPUs. | ||
92 | */ | ||
93 | static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) | ||
94 | { | ||
95 | int active, count; | ||
96 | |||
97 | preempt_disable(); | ||
98 | active = (mm == current->active_mm) ? 1 : 0; | ||
99 | count = atomic_add_return(0x10000, &mm->context.attach_count); | ||
100 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && | ||
101 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { | ||
102 | __tlb_flush_idte_local(asce); | ||
103 | } else { | ||
104 | if (MACHINE_HAS_IDTE) | ||
105 | __tlb_flush_idte(asce); | ||
106 | else | ||
107 | __tlb_flush_global(); | ||
108 | /* Reset TLB flush mask */ | ||
109 | if (MACHINE_HAS_TLB_LC) | ||
110 | cpumask_copy(mm_cpumask(mm), | ||
111 | &mm->context.cpu_attach_mask); | ||
112 | } | ||
113 | atomic_sub(0x10000, &mm->context.attach_count); | ||
114 | preempt_enable(); | ||
115 | } | ||
116 | |||
117 | static inline void __tlb_flush_kernel(void) | ||
118 | { | ||
119 | if (MACHINE_HAS_IDTE) | ||
120 | __tlb_flush_idte((unsigned long) init_mm.pgd | | ||
121 | init_mm.context.asce_bits); | ||
122 | else | ||
123 | __tlb_flush_global(); | ||
124 | } | ||
60 | #else | 125 | #else |
61 | #define __tlb_flush_full(mm) __tlb_flush_local() | ||
62 | #define __tlb_flush_global() __tlb_flush_local() | 126 | #define __tlb_flush_global() __tlb_flush_local() |
63 | #endif | 127 | #define __tlb_flush_full(mm) __tlb_flush_local() |
64 | 128 | ||
65 | /* | 129 | /* |
66 | * Flush all tlb entries of a page table on all cpus. | 130 | * Flush TLB entries for a specific ASCE on all CPUs. |
67 | */ | 131 | */ |
68 | static inline void __tlb_flush_idte(unsigned long asce) | 132 | static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) |
69 | { | 133 | { |
70 | asm volatile( | 134 | if (MACHINE_HAS_TLB_LC) |
71 | " .insn rrf,0xb98e0000,0,%0,%1,0" | 135 | __tlb_flush_idte_local(asce); |
72 | : : "a" (2048), "a" (asce) : "cc" ); | 136 | else |
137 | __tlb_flush_local(); | ||
73 | } | 138 | } |
74 | 139 | ||
140 | static inline void __tlb_flush_kernel(void) | ||
141 | { | ||
142 | if (MACHINE_HAS_TLB_LC) | ||
143 | __tlb_flush_idte_local((unsigned long) init_mm.pgd | | ||
144 | init_mm.context.asce_bits); | ||
145 | else | ||
146 | __tlb_flush_local(); | ||
147 | } | ||
148 | #endif | ||
149 | |||
75 | static inline void __tlb_flush_mm(struct mm_struct * mm) | 150 | static inline void __tlb_flush_mm(struct mm_struct * mm) |
76 | { | 151 | { |
77 | /* | 152 | /* |
@@ -80,7 +155,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) | |||
80 | * only ran on the local cpu. | 155 | * only ran on the local cpu. |
81 | */ | 156 | */ |
82 | if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) | 157 | if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) |
83 | __tlb_flush_idte((unsigned long) mm->pgd | | 158 | __tlb_flush_asce(mm, (unsigned long) mm->pgd | |
84 | mm->context.asce_bits); | 159 | mm->context.asce_bits); |
85 | else | 160 | else |
86 | __tlb_flush_full(mm); | 161 | __tlb_flush_full(mm); |
@@ -130,7 +205,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, | |||
130 | static inline void flush_tlb_kernel_range(unsigned long start, | 205 | static inline void flush_tlb_kernel_range(unsigned long start, |
131 | unsigned long end) | 206 | unsigned long end) |
132 | { | 207 | { |
133 | __tlb_flush_mm(&init_mm); | 208 | __tlb_flush_kernel(); |
134 | } | 209 | } |
135 | 210 | ||
136 | #endif /* _S390_TLBFLUSH_H */ | 211 | #endif /* _S390_TLBFLUSH_H */ |
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 4133b3f72fb0..1be64a1506d0 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h | |||
@@ -92,8 +92,6 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x) | |||
92 | #define ARCH_HAS_SORT_EXTABLE | 92 | #define ARCH_HAS_SORT_EXTABLE |
93 | #define ARCH_HAS_SEARCH_EXTABLE | 93 | #define ARCH_HAS_SEARCH_EXTABLE |
94 | 94 | ||
95 | int __handle_fault(unsigned long, unsigned long, int); | ||
96 | |||
97 | /** | 95 | /** |
98 | * __copy_from_user: - Copy a block of data from user space, with less checking. | 96 | * __copy_from_user: - Copy a block of data from user space, with less checking. |
99 | * @to: Destination address, in kernel space. | 97 | * @to: Destination address, in kernel space. |