aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include
diff options
context:
space:
mode:
authorTony Lindgren <tony@atomide.com>2010-03-01 17:19:05 -0500
committerTony Lindgren <tony@atomide.com>2010-03-01 17:19:05 -0500
commitd702d12167a2c05a346f49aac7a311d597762495 (patch)
treebaae42c299cce34d6df24b5d01f8b1d0b481bd9a /arch/s390/include
parent9418c65f9bd861d0f7e39aab9cfb3aa6f2275d11 (diff)
parentac0f6f927db539e03e1f3f61bcd4ed57d5cde7a9 (diff)
Merge with mainline to remove plat-omap/Kconfig conflict
Conflicts: arch/arm/plat-omap/Kconfig
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/atomic.h86
-rw-r--r--arch/s390/include/asm/bitops.h83
-rw-r--r--arch/s390/include/asm/bug.h10
-rw-r--r--arch/s390/include/asm/crw.h1
-rw-r--r--arch/s390/include/asm/etr.h12
-rw-r--r--arch/s390/include/asm/irqflags.h36
-rw-r--r--arch/s390/include/asm/lowcore.h250
-rw-r--r--arch/s390/include/asm/page.h3
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/processor.h18
-rw-r--r--arch/s390/include/asm/ptrace.h13
-rw-r--r--arch/s390/include/asm/qdio.h3
-rw-r--r--arch/s390/include/asm/rwsem.h147
-rw-r--r--arch/s390/include/asm/setup.h9
-rw-r--r--arch/s390/include/asm/sigp.h142
-rw-r--r--arch/s390/include/asm/smp.h38
-rw-r--r--arch/s390/include/asm/spinlock.h18
-rw-r--r--arch/s390/include/asm/swab.h16
-rw-r--r--arch/s390/include/asm/syscall.h7
-rw-r--r--arch/s390/include/asm/sysinfo.h3
-rw-r--r--arch/s390/include/asm/system.h168
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/timex.h22
-rw-r--r--arch/s390/include/asm/uaccess.h12
-rw-r--r--arch/s390/include/asm/vdso.h2
25 files changed, 429 insertions, 674 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 2a113d6a7dfd..451bfbb9db3d 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -18,8 +18,6 @@
18 18
19#define ATOMIC_INIT(i) { (i) } 19#define ATOMIC_INIT(i) { (i) }
20 20
21#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
22
23#define __CS_LOOP(ptr, op_val, op_string) ({ \ 21#define __CS_LOOP(ptr, op_val, op_string) ({ \
24 int old_val, new_val; \ 22 int old_val, new_val; \
25 asm volatile( \ 23 asm volatile( \
@@ -35,26 +33,6 @@
35 new_val; \ 33 new_val; \
36}) 34})
37 35
38#else /* __GNUC__ */
39
40#define __CS_LOOP(ptr, op_val, op_string) ({ \
41 int old_val, new_val; \
42 asm volatile( \
43 " l %0,0(%3)\n" \
44 "0: lr %1,%0\n" \
45 op_string " %1,%4\n" \
46 " cs %0,%1,0(%3)\n" \
47 " jl 0b" \
48 : "=&d" (old_val), "=&d" (new_val), \
49 "=m" (((atomic_t *)(ptr))->counter) \
50 : "a" (ptr), "d" (op_val), \
51 "m" (((atomic_t *)(ptr))->counter) \
52 : "cc", "memory"); \
53 new_val; \
54})
55
56#endif /* __GNUC__ */
57
58static inline int atomic_read(const atomic_t *v) 36static inline int atomic_read(const atomic_t *v)
59{ 37{
60 barrier(); 38 barrier();
@@ -101,19 +79,11 @@ static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
101 79
102static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 80static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
103{ 81{
104#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
105 asm volatile( 82 asm volatile(
106 " cs %0,%2,%1" 83 " cs %0,%2,%1"
107 : "+d" (old), "=Q" (v->counter) 84 : "+d" (old), "=Q" (v->counter)
108 : "d" (new), "Q" (v->counter) 85 : "d" (new), "Q" (v->counter)
109 : "cc", "memory"); 86 : "cc", "memory");
110#else /* __GNUC__ */
111 asm volatile(
112 " cs %0,%3,0(%2)"
113 : "+d" (old), "=m" (v->counter)
114 : "a" (v), "d" (new), "m" (v->counter)
115 : "cc", "memory");
116#endif /* __GNUC__ */
117 return old; 87 return old;
118} 88}
119 89
@@ -140,8 +110,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
140 110
141#ifdef CONFIG_64BIT 111#ifdef CONFIG_64BIT
142 112
143#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
144
145#define __CSG_LOOP(ptr, op_val, op_string) ({ \ 113#define __CSG_LOOP(ptr, op_val, op_string) ({ \
146 long long old_val, new_val; \ 114 long long old_val, new_val; \
147 asm volatile( \ 115 asm volatile( \
@@ -157,26 +125,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
157 new_val; \ 125 new_val; \
158}) 126})
159 127
160#else /* __GNUC__ */
161
162#define __CSG_LOOP(ptr, op_val, op_string) ({ \
163 long long old_val, new_val; \
164 asm volatile( \
165 " lg %0,0(%3)\n" \
166 "0: lgr %1,%0\n" \
167 op_string " %1,%4\n" \
168 " csg %0,%1,0(%3)\n" \
169 " jl 0b" \
170 : "=&d" (old_val), "=&d" (new_val), \
171 "=m" (((atomic_t *)(ptr))->counter) \
172 : "a" (ptr), "d" (op_val), \
173 "m" (((atomic_t *)(ptr))->counter) \
174 : "cc", "memory"); \
175 new_val; \
176})
177
178#endif /* __GNUC__ */
179
180static inline long long atomic64_read(const atomic64_t *v) 128static inline long long atomic64_read(const atomic64_t *v)
181{ 129{
182 barrier(); 130 barrier();
@@ -214,19 +162,11 @@ static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
214static inline long long atomic64_cmpxchg(atomic64_t *v, 162static inline long long atomic64_cmpxchg(atomic64_t *v,
215 long long old, long long new) 163 long long old, long long new)
216{ 164{
217#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
218 asm volatile( 165 asm volatile(
219 " csg %0,%2,%1" 166 " csg %0,%2,%1"
220 : "+d" (old), "=Q" (v->counter) 167 : "+d" (old), "=Q" (v->counter)
221 : "d" (new), "Q" (v->counter) 168 : "d" (new), "Q" (v->counter)
222 : "cc", "memory"); 169 : "cc", "memory");
223#else /* __GNUC__ */
224 asm volatile(
225 " csg %0,%3,0(%2)"
226 : "+d" (old), "=m" (v->counter)
227 : "a" (v), "d" (new), "m" (v->counter)
228 : "cc", "memory");
229#endif /* __GNUC__ */
230 return old; 170 return old;
231} 171}
232 172
@@ -243,10 +183,8 @@ static inline long long atomic64_read(const atomic64_t *v)
243 register_pair rp; 183 register_pair rp;
244 184
245 asm volatile( 185 asm volatile(
246 " lm %0,%N0,0(%1)" 186 " lm %0,%N0,%1"
247 : "=&d" (rp) 187 : "=&d" (rp) : "Q" (v->counter) );
248 : "a" (&v->counter), "m" (v->counter)
249 );
250 return rp.pair; 188 return rp.pair;
251} 189}
252 190
@@ -255,10 +193,8 @@ static inline void atomic64_set(atomic64_t *v, long long i)
255 register_pair rp = {.pair = i}; 193 register_pair rp = {.pair = i};
256 194
257 asm volatile( 195 asm volatile(
258 " stm %1,%N1,0(%2)" 196 " stm %1,%N1,%0"
259 : "=m" (v->counter) 197 : "=Q" (v->counter) : "d" (rp) );
260 : "d" (rp), "a" (&v->counter)
261 );
262} 198}
263 199
264static inline long long atomic64_xchg(atomic64_t *v, long long new) 200static inline long long atomic64_xchg(atomic64_t *v, long long new)
@@ -267,11 +203,11 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new)
267 register_pair rp_old; 203 register_pair rp_old;
268 204
269 asm volatile( 205 asm volatile(
270 " lm %0,%N0,0(%2)\n" 206 " lm %0,%N0,%1\n"
271 "0: cds %0,%3,0(%2)\n" 207 "0: cds %0,%2,%1\n"
272 " jl 0b\n" 208 " jl 0b\n"
273 : "=&d" (rp_old), "+m" (v->counter) 209 : "=&d" (rp_old), "=Q" (v->counter)
274 : "a" (&v->counter), "d" (rp_new) 210 : "d" (rp_new), "Q" (v->counter)
275 : "cc"); 211 : "cc");
276 return rp_old.pair; 212 return rp_old.pair;
277} 213}
@@ -283,9 +219,9 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
283 register_pair rp_new = {.pair = new}; 219 register_pair rp_new = {.pair = new};
284 220
285 asm volatile( 221 asm volatile(
286 " cds %0,%3,0(%2)" 222 " cds %0,%2,%1"
287 : "+&d" (rp_old), "+m" (v->counter) 223 : "+&d" (rp_old), "=Q" (v->counter)
288 : "a" (&v->counter), "d" (rp_new) 224 : "d" (rp_new), "Q" (v->counter)
289 : "cc"); 225 : "cc");
290 return rp_old.pair; 226 return rp_old.pair;
291} 227}
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index b30606f6d523..2e05972c5085 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -71,8 +71,6 @@ extern const char _sb_findmap[];
71#define __BITOPS_AND "nr" 71#define __BITOPS_AND "nr"
72#define __BITOPS_XOR "xr" 72#define __BITOPS_XOR "xr"
73 73
74#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
75
76#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 74#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
77 asm volatile( \ 75 asm volatile( \
78 " l %0,%2\n" \ 76 " l %0,%2\n" \
@@ -85,22 +83,6 @@ extern const char _sb_findmap[];
85 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 83 : "d" (__val), "Q" (*(unsigned long *) __addr) \
86 : "cc"); 84 : "cc");
87 85
88#else /* __GNUC__ */
89
90#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
91 asm volatile( \
92 " l %0,0(%4)\n" \
93 "0: lr %1,%0\n" \
94 __op_string " %1,%3\n" \
95 " cs %0,%1,0(%4)\n" \
96 " jl 0b" \
97 : "=&d" (__old), "=&d" (__new), \
98 "=m" (*(unsigned long *) __addr) \
99 : "d" (__val), "a" (__addr), \
100 "m" (*(unsigned long *) __addr) : "cc");
101
102#endif /* __GNUC__ */
103
104#else /* __s390x__ */ 86#else /* __s390x__ */
105 87
106#define __BITOPS_ALIGN 7 88#define __BITOPS_ALIGN 7
@@ -109,8 +91,6 @@ extern const char _sb_findmap[];
109#define __BITOPS_AND "ngr" 91#define __BITOPS_AND "ngr"
110#define __BITOPS_XOR "xgr" 92#define __BITOPS_XOR "xgr"
111 93
112#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
113
114#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 94#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
115 asm volatile( \ 95 asm volatile( \
116 " lg %0,%2\n" \ 96 " lg %0,%2\n" \
@@ -123,23 +103,6 @@ extern const char _sb_findmap[];
123 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 103 : "d" (__val), "Q" (*(unsigned long *) __addr) \
124 : "cc"); 104 : "cc");
125 105
126#else /* __GNUC__ */
127
128#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
129 asm volatile( \
130 " lg %0,0(%4)\n" \
131 "0: lgr %1,%0\n" \
132 __op_string " %1,%3\n" \
133 " csg %0,%1,0(%4)\n" \
134 " jl 0b" \
135 : "=&d" (__old), "=&d" (__new), \
136 "=m" (*(unsigned long *) __addr) \
137 : "d" (__val), "a" (__addr), \
138 "m" (*(unsigned long *) __addr) : "cc");
139
140
141#endif /* __GNUC__ */
142
143#endif /* __s390x__ */ 106#endif /* __s390x__ */
144 107
145#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) 108#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
@@ -261,9 +224,8 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
261 224
262 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 225 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
263 asm volatile( 226 asm volatile(
264 " oc 0(1,%1),0(%2)" 227 " oc %O0(1,%R0),%1"
265 : "=m" (*(char *) addr) : "a" (addr), 228 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
266 "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" );
267} 229}
268 230
269static inline void 231static inline void
@@ -290,9 +252,8 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
290 252
291 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 253 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
292 asm volatile( 254 asm volatile(
293 " nc 0(1,%1),0(%2)" 255 " nc %O0(1,%R0),%1"
294 : "=m" (*(char *) addr) : "a" (addr), 256 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
295 "a" (_ni_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc");
296} 257}
297 258
298static inline void 259static inline void
@@ -318,9 +279,8 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
318 279
319 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 280 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
320 asm volatile( 281 asm volatile(
321 " xc 0(1,%1),0(%2)" 282 " xc %O0(1,%R0),%1"
322 : "=m" (*(char *) addr) : "a" (addr), 283 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
323 "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" );
324} 284}
325 285
326static inline void 286static inline void
@@ -349,10 +309,9 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
349 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 309 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
350 ch = *(unsigned char *) addr; 310 ch = *(unsigned char *) addr;
351 asm volatile( 311 asm volatile(
352 " oc 0(1,%1),0(%2)" 312 " oc %O0(1,%R0),%1"
353 : "=m" (*(char *) addr) 313 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
354 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 314 : "cc", "memory");
355 "m" (*(char *) addr) : "cc", "memory");
356 return (ch >> (nr & 7)) & 1; 315 return (ch >> (nr & 7)) & 1;
357} 316}
358#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) 317#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
@@ -369,10 +328,9 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
369 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 328 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
370 ch = *(unsigned char *) addr; 329 ch = *(unsigned char *) addr;
371 asm volatile( 330 asm volatile(
372 " nc 0(1,%1),0(%2)" 331 " nc %O0(1,%R0),%1"
373 : "=m" (*(char *) addr) 332 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
374 : "a" (addr), "a" (_ni_bitmap + (nr & 7)), 333 : "cc", "memory");
375 "m" (*(char *) addr) : "cc", "memory");
376 return (ch >> (nr & 7)) & 1; 334 return (ch >> (nr & 7)) & 1;
377} 335}
378#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) 336#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
@@ -389,10 +347,9 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
389 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 347 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
390 ch = *(unsigned char *) addr; 348 ch = *(unsigned char *) addr;
391 asm volatile( 349 asm volatile(
392 " xc 0(1,%1),0(%2)" 350 " xc %O0(1,%R0),%1"
393 : "=m" (*(char *) addr) 351 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
394 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 352 : "cc", "memory");
395 "m" (*(char *) addr) : "cc", "memory");
396 return (ch >> (nr & 7)) & 1; 353 return (ch >> (nr & 7)) & 1;
397} 354}
398#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) 355#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
@@ -591,11 +548,11 @@ static inline unsigned long __load_ulong_le(const unsigned long *p,
591 p = (unsigned long *)((unsigned long) p + offset); 548 p = (unsigned long *)((unsigned long) p + offset);
592#ifndef __s390x__ 549#ifndef __s390x__
593 asm volatile( 550 asm volatile(
594 " ic %0,0(%1)\n" 551 " ic %0,%O1(%R1)\n"
595 " icm %0,2,1(%1)\n" 552 " icm %0,2,%O1+1(%R1)\n"
596 " icm %0,4,2(%1)\n" 553 " icm %0,4,%O1+2(%R1)\n"
597 " icm %0,8,3(%1)" 554 " icm %0,8,%O1+3(%R1)"
598 : "=&d" (word) : "a" (p), "m" (*p) : "cc"); 555 : "=&d" (word) : "Q" (*p) : "cc");
599#else 556#else
600 asm volatile( 557 asm volatile(
601 " lrvg %0,%1" 558 " lrvg %0,%1"
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index b1066b9fb5f8..9beeb9db9b23 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -5,12 +5,6 @@
5 5
6#ifdef CONFIG_BUG 6#ifdef CONFIG_BUG
7 7
8#ifdef CONFIG_64BIT
9#define S390_LONG ".quad"
10#else
11#define S390_LONG ".long"
12#endif
13
14#ifdef CONFIG_DEBUG_BUGVERBOSE 8#ifdef CONFIG_DEBUG_BUGVERBOSE
15 9
16#define __EMIT_BUG(x) do { \ 10#define __EMIT_BUG(x) do { \
@@ -21,7 +15,7 @@
21 "2: .asciz \""__FILE__"\"\n" \ 15 "2: .asciz \""__FILE__"\"\n" \
22 ".previous\n" \ 16 ".previous\n" \
23 ".section __bug_table,\"a\"\n" \ 17 ".section __bug_table,\"a\"\n" \
24 "3:\t" S390_LONG "\t1b,2b\n" \ 18 "3: .long 1b-3b,2b-3b\n" \
25 " .short %0,%1\n" \ 19 " .short %0,%1\n" \
26 " .org 3b+%2\n" \ 20 " .org 3b+%2\n" \
27 ".previous\n" \ 21 ".previous\n" \
@@ -37,7 +31,7 @@
37 "0: j 0b+2\n" \ 31 "0: j 0b+2\n" \
38 "1:\n" \ 32 "1:\n" \
39 ".section __bug_table,\"a\"\n" \ 33 ".section __bug_table,\"a\"\n" \
40 "2:\t" S390_LONG "\t1b\n" \ 34 "2: .long 1b-2b\n" \
41 " .short %0\n" \ 35 " .short %0\n" \
42 " .org 2b+%1\n" \ 36 " .org 2b+%1\n" \
43 ".previous\n" \ 37 ".previous\n" \
diff --git a/arch/s390/include/asm/crw.h b/arch/s390/include/asm/crw.h
index 2185a6d619d3..749a97e61bea 100644
--- a/arch/s390/include/asm/crw.h
+++ b/arch/s390/include/asm/crw.h
@@ -32,6 +32,7 @@ typedef void (*crw_handler_t)(struct crw *, struct crw *, int);
32extern int crw_register_handler(int rsc, crw_handler_t handler); 32extern int crw_register_handler(int rsc, crw_handler_t handler);
33extern void crw_unregister_handler(int rsc); 33extern void crw_unregister_handler(int rsc);
34extern void crw_handle_channel_report(void); 34extern void crw_handle_channel_report(void);
35void crw_wait_for_channel_report(void);
35 36
36#define NR_RSCS 16 37#define NR_RSCS 16
37 38
diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h
index 80ef58c61970..538e1b36a726 100644
--- a/arch/s390/include/asm/etr.h
+++ b/arch/s390/include/asm/etr.h
@@ -145,11 +145,11 @@ static inline int etr_setr(struct etr_eacr *ctrl)
145 int rc = -ENOSYS; 145 int rc = -ENOSYS;
146 146
147 asm volatile( 147 asm volatile(
148 " .insn s,0xb2160000,0(%2)\n" 148 " .insn s,0xb2160000,%1\n"
149 "0: la %0,0\n" 149 "0: la %0,0\n"
150 "1:\n" 150 "1:\n"
151 EX_TABLE(0b,1b) 151 EX_TABLE(0b,1b)
152 : "+d" (rc) : "m" (*ctrl), "a" (ctrl)); 152 : "+d" (rc) : "Q" (*ctrl));
153 return rc; 153 return rc;
154} 154}
155 155
@@ -159,11 +159,11 @@ static inline int etr_stetr(struct etr_aib *aib)
159 int rc = -ENOSYS; 159 int rc = -ENOSYS;
160 160
161 asm volatile( 161 asm volatile(
162 " .insn s,0xb2170000,0(%2)\n" 162 " .insn s,0xb2170000,%1\n"
163 "0: la %0,0\n" 163 "0: la %0,0\n"
164 "1:\n" 164 "1:\n"
165 EX_TABLE(0b,1b) 165 EX_TABLE(0b,1b)
166 : "+d" (rc) : "m" (*aib), "a" (aib)); 166 : "+d" (rc) : "Q" (*aib));
167 return rc; 167 return rc;
168} 168}
169 169
@@ -174,11 +174,11 @@ static inline int etr_steai(struct etr_aib *aib, unsigned int func)
174 int rc = -ENOSYS; 174 int rc = -ENOSYS;
175 175
176 asm volatile( 176 asm volatile(
177 " .insn s,0xb2b30000,0(%2)\n" 177 " .insn s,0xb2b30000,%1\n"
178 "0: la %0,0\n" 178 "0: la %0,0\n"
179 "1:\n" 179 "1:\n"
180 EX_TABLE(0b,1b) 180 EX_TABLE(0b,1b)
181 : "+d" (rc) : "m" (*aib), "a" (aib), "d" (reg0)); 181 : "+d" (rc) : "Q" (*aib), "d" (reg0));
182 return rc; 182 return rc;
183} 183}
184 184
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index c2fb432f576a..15b3ac253898 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -8,8 +8,6 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10 10
11#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
12
13/* store then or system mask. */ 11/* store then or system mask. */
14#define __raw_local_irq_stosm(__or) \ 12#define __raw_local_irq_stosm(__or) \
15({ \ 13({ \
@@ -36,40 +34,6 @@
36 asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ 34 asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \
37}) 35})
38 36
39#else /* __GNUC__ */
40
41/* store then or system mask. */
42#define __raw_local_irq_stosm(__or) \
43({ \
44 unsigned long __mask; \
45 asm volatile( \
46 " stosm 0(%1),%2" \
47 : "=m" (__mask) \
48 : "a" (&__mask), "i" (__or) : "memory"); \
49 __mask; \
50})
51
52/* store then and system mask. */
53#define __raw_local_irq_stnsm(__and) \
54({ \
55 unsigned long __mask; \
56 asm volatile( \
57 " stnsm 0(%1),%2" \
58 : "=m" (__mask) \
59 : "a" (&__mask), "i" (__and) : "memory"); \
60 __mask; \
61})
62
63/* set system mask. */
64#define __raw_local_irq_ssm(__mask) \
65({ \
66 asm volatile( \
67 " ssm 0(%0)" \
68 : : "a" (&__mask), "m" (__mask) : "memory"); \
69})
70
71#endif /* __GNUC__ */
72
73/* interrupt control.. */ 37/* interrupt control.. */
74static inline unsigned long raw_local_irq_enable(void) 38static inline unsigned long raw_local_irq_enable(void)
75{ 39{
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index c25dfac7dd76..05527c040b7a 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -1,141 +1,16 @@
1/* 1/*
2 * include/asm-s390/lowcore.h 2 * Copyright IBM Corp. 1999,2010
3 * 3 * Author(s): Hartmut Penner <hp@de.ibm.com>,
4 * S390 version 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Denis Joseph Barrow,
6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
9 */ 6 */
10 7
11#ifndef _ASM_S390_LOWCORE_H 8#ifndef _ASM_S390_LOWCORE_H
12#define _ASM_S390_LOWCORE_H 9#define _ASM_S390_LOWCORE_H
13 10
14#define __LC_IPL_PARMBLOCK_PTR 0x0014
15#define __LC_EXT_PARAMS 0x0080
16#define __LC_CPU_ADDRESS 0x0084
17#define __LC_EXT_INT_CODE 0x0086
18
19#define __LC_SVC_ILC 0x0088
20#define __LC_SVC_INT_CODE 0x008a
21#define __LC_PGM_ILC 0x008c
22#define __LC_PGM_INT_CODE 0x008e
23
24#define __LC_PER_ATMID 0x0096
25#define __LC_PER_ADDRESS 0x0098
26#define __LC_PER_ACCESS_ID 0x00a1
27#define __LC_AR_MODE_ID 0x00a3
28
29#define __LC_SUBCHANNEL_ID 0x00b8
30#define __LC_SUBCHANNEL_NR 0x00ba
31#define __LC_IO_INT_PARM 0x00bc
32#define __LC_IO_INT_WORD 0x00c0
33#define __LC_STFL_FAC_LIST 0x00c8
34#define __LC_MCCK_CODE 0x00e8
35
36#define __LC_DUMP_REIPL 0x0e00
37
38#ifndef __s390x__
39#define __LC_EXT_OLD_PSW 0x0018
40#define __LC_SVC_OLD_PSW 0x0020
41#define __LC_PGM_OLD_PSW 0x0028
42#define __LC_MCK_OLD_PSW 0x0030
43#define __LC_IO_OLD_PSW 0x0038
44#define __LC_EXT_NEW_PSW 0x0058
45#define __LC_SVC_NEW_PSW 0x0060
46#define __LC_PGM_NEW_PSW 0x0068
47#define __LC_MCK_NEW_PSW 0x0070
48#define __LC_IO_NEW_PSW 0x0078
49#define __LC_SAVE_AREA 0x0200
50#define __LC_RETURN_PSW 0x0240
51#define __LC_RETURN_MCCK_PSW 0x0248
52#define __LC_SYNC_ENTER_TIMER 0x0250
53#define __LC_ASYNC_ENTER_TIMER 0x0258
54#define __LC_EXIT_TIMER 0x0260
55#define __LC_USER_TIMER 0x0268
56#define __LC_SYSTEM_TIMER 0x0270
57#define __LC_STEAL_TIMER 0x0278
58#define __LC_LAST_UPDATE_TIMER 0x0280
59#define __LC_LAST_UPDATE_CLOCK 0x0288
60#define __LC_CURRENT 0x0290
61#define __LC_THREAD_INFO 0x0294
62#define __LC_KERNEL_STACK 0x0298
63#define __LC_ASYNC_STACK 0x029c
64#define __LC_PANIC_STACK 0x02a0
65#define __LC_KERNEL_ASCE 0x02a4
66#define __LC_USER_ASCE 0x02a8
67#define __LC_USER_EXEC_ASCE 0x02ac
68#define __LC_CPUID 0x02b0
69#define __LC_INT_CLOCK 0x02c8
70#define __LC_MACHINE_FLAGS 0x02d8
71#define __LC_FTRACE_FUNC 0x02dc
72#define __LC_IRB 0x0300
73#define __LC_PFAULT_INTPARM 0x0080
74#define __LC_CPU_TIMER_SAVE_AREA 0x00d8
75#define __LC_CLOCK_COMP_SAVE_AREA 0x00e0
76#define __LC_PSW_SAVE_AREA 0x0100
77#define __LC_PREFIX_SAVE_AREA 0x0108
78#define __LC_AREGS_SAVE_AREA 0x0120
79#define __LC_FPREGS_SAVE_AREA 0x0160
80#define __LC_GPREGS_SAVE_AREA 0x0180
81#define __LC_CREGS_SAVE_AREA 0x01c0
82#else /* __s390x__ */
83#define __LC_LAST_BREAK 0x0110
84#define __LC_EXT_OLD_PSW 0x0130
85#define __LC_SVC_OLD_PSW 0x0140
86#define __LC_PGM_OLD_PSW 0x0150
87#define __LC_MCK_OLD_PSW 0x0160
88#define __LC_IO_OLD_PSW 0x0170
89#define __LC_RESTART_PSW 0x01a0
90#define __LC_EXT_NEW_PSW 0x01b0
91#define __LC_SVC_NEW_PSW 0x01c0
92#define __LC_PGM_NEW_PSW 0x01d0
93#define __LC_MCK_NEW_PSW 0x01e0
94#define __LC_IO_NEW_PSW 0x01f0
95#define __LC_SAVE_AREA 0x0200
96#define __LC_RETURN_PSW 0x0280
97#define __LC_RETURN_MCCK_PSW 0x0290
98#define __LC_SYNC_ENTER_TIMER 0x02a0
99#define __LC_ASYNC_ENTER_TIMER 0x02a8
100#define __LC_EXIT_TIMER 0x02b0
101#define __LC_USER_TIMER 0x02b8
102#define __LC_SYSTEM_TIMER 0x02c0
103#define __LC_STEAL_TIMER 0x02c8
104#define __LC_LAST_UPDATE_TIMER 0x02d0
105#define __LC_LAST_UPDATE_CLOCK 0x02d8
106#define __LC_CURRENT 0x02e0
107#define __LC_THREAD_INFO 0x02e8
108#define __LC_KERNEL_STACK 0x02f0
109#define __LC_ASYNC_STACK 0x02f8
110#define __LC_PANIC_STACK 0x0300
111#define __LC_KERNEL_ASCE 0x0308
112#define __LC_USER_ASCE 0x0310
113#define __LC_USER_EXEC_ASCE 0x0318
114#define __LC_CPUID 0x0320
115#define __LC_INT_CLOCK 0x0340
116#define __LC_VDSO_PER_CPU 0x0350
117#define __LC_MACHINE_FLAGS 0x0358
118#define __LC_FTRACE_FUNC 0x0360
119#define __LC_IRB 0x0380
120#define __LC_PASTE 0x03c0
121#define __LC_PFAULT_INTPARM 0x11b8
122#define __LC_FPREGS_SAVE_AREA 0x1200
123#define __LC_GPREGS_SAVE_AREA 0x1280
124#define __LC_PSW_SAVE_AREA 0x1300
125#define __LC_PREFIX_SAVE_AREA 0x1318
126#define __LC_FP_CREG_SAVE_AREA 0x131c
127#define __LC_TODREG_SAVE_AREA 0x1324
128#define __LC_CPU_TIMER_SAVE_AREA 0x1328
129#define __LC_CLOCK_COMP_SAVE_AREA 0x1331
130#define __LC_AREGS_SAVE_AREA 0x1340
131#define __LC_CREGS_SAVE_AREA 0x1380
132#endif /* __s390x__ */
133
134#ifndef __ASSEMBLY__
135
136#include <asm/cpu.h>
137#include <asm/ptrace.h>
138#include <linux/types.h> 11#include <linux/types.h>
12#include <asm/ptrace.h>
13#include <asm/cpu.h>
139 14
140void restart_int_handler(void); 15void restart_int_handler(void);
141void ext_int_handler(void); 16void ext_int_handler(void);
@@ -144,7 +19,12 @@ void pgm_check_handler(void);
144void mcck_int_handler(void); 19void mcck_int_handler(void);
145void io_int_handler(void); 20void io_int_handler(void);
146 21
147struct save_area_s390 { 22#ifdef CONFIG_32BIT
23
24#define LC_ORDER 0
25#define LC_PAGES 1
26
27struct save_area {
148 u32 ext_save; 28 u32 ext_save;
149 u64 timer; 29 u64 timer;
150 u64 clk_cmp; 30 u64 clk_cmp;
@@ -156,54 +36,13 @@ struct save_area_s390 {
156 u64 fp_regs[4]; 36 u64 fp_regs[4];
157 u32 gp_regs[16]; 37 u32 gp_regs[16];
158 u32 ctrl_regs[16]; 38 u32 ctrl_regs[16];
159} __attribute__((packed)); 39} __packed;
160 40
161struct save_area_s390x { 41struct _lowcore {
162 u64 fp_regs[16];
163 u64 gp_regs[16];
164 u8 psw[16];
165 u8 pad1[8];
166 u32 pref_reg;
167 u32 fp_ctrl_reg;
168 u8 pad2[4];
169 u32 tod_reg;
170 u64 timer;
171 u64 clk_cmp;
172 u8 pad3[8];
173 u32 acc_regs[16];
174 u64 ctrl_regs[16];
175} __attribute__((packed));
176
177union save_area {
178 struct save_area_s390 s390;
179 struct save_area_s390x s390x;
180};
181
182#define SAVE_AREA_BASE_S390 0xd4
183#define SAVE_AREA_BASE_S390X 0x1200
184
185#ifndef __s390x__
186#define SAVE_AREA_SIZE sizeof(struct save_area_s390)
187#define SAVE_AREA_BASE SAVE_AREA_BASE_S390
188#else
189#define SAVE_AREA_SIZE sizeof(struct save_area_s390x)
190#define SAVE_AREA_BASE SAVE_AREA_BASE_S390X
191#endif
192
193#ifndef __s390x__
194#define LC_ORDER 0
195#else
196#define LC_ORDER 1
197#endif
198
199#define LC_PAGES (1UL << LC_ORDER)
200
201struct _lowcore
202{
203#ifndef __s390x__
204 /* 0x0000 - 0x01ff: defined by architecture */
205 psw_t restart_psw; /* 0x0000 */ 42 psw_t restart_psw; /* 0x0000 */
206 __u32 ccw2[4]; /* 0x0008 */ 43 psw_t restart_old_psw; /* 0x0008 */
44 __u8 pad_0x0010[0x0014-0x0010]; /* 0x0010 */
45 __u32 ipl_parmblock_ptr; /* 0x0014 */
207 psw_t external_old_psw; /* 0x0018 */ 46 psw_t external_old_psw; /* 0x0018 */
208 psw_t svc_old_psw; /* 0x0020 */ 47 psw_t svc_old_psw; /* 0x0020 */
209 psw_t program_old_psw; /* 0x0028 */ 48 psw_t program_old_psw; /* 0x0028 */
@@ -229,7 +68,9 @@ struct _lowcore
229 __u32 monitor_code; /* 0x009c */ 68 __u32 monitor_code; /* 0x009c */
230 __u8 exc_access_id; /* 0x00a0 */ 69 __u8 exc_access_id; /* 0x00a0 */
231 __u8 per_access_id; /* 0x00a1 */ 70 __u8 per_access_id; /* 0x00a1 */
232 __u8 pad_0x00a2[0x00b8-0x00a2]; /* 0x00a2 */ 71 __u8 op_access_id; /* 0x00a2 */
72 __u8 ar_access_id; /* 0x00a3 */
73 __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */
233 __u16 subchannel_id; /* 0x00b8 */ 74 __u16 subchannel_id; /* 0x00b8 */
234 __u16 subchannel_nr; /* 0x00ba */ 75 __u16 subchannel_nr; /* 0x00ba */
235 __u32 io_int_parm; /* 0x00bc */ 76 __u32 io_int_parm; /* 0x00bc */
@@ -245,8 +86,9 @@ struct _lowcore
245 __u32 external_damage_code; /* 0x00f4 */ 86 __u32 external_damage_code; /* 0x00f4 */
246 __u32 failing_storage_address; /* 0x00f8 */ 87 __u32 failing_storage_address; /* 0x00f8 */
247 __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */ 88 __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */
248 __u32 st_status_fixed_logout[4]; /* 0x0100 */ 89 psw_t psw_save_area; /* 0x0100 */
249 __u8 pad_0x0110[0x0120-0x0110]; /* 0x0110 */ 90 __u32 prefixreg_save_area; /* 0x0108 */
91 __u8 pad_0x010c[0x0120-0x010c]; /* 0x010c */
250 92
251 /* CPU register save area: defined by architecture */ 93 /* CPU register save area: defined by architecture */
252 __u32 access_regs_save_area[16]; /* 0x0120 */ 94 __u32 access_regs_save_area[16]; /* 0x0120 */
@@ -310,10 +152,32 @@ struct _lowcore
310 152
311 /* Align to the top 1k of prefix area */ 153 /* Align to the top 1k of prefix area */
312 __u8 pad_0x0e08[0x1000-0x0e08]; /* 0x0e08 */ 154 __u8 pad_0x0e08[0x1000-0x0e08]; /* 0x0e08 */
313#else /* !__s390x__ */ 155} __packed;
314 /* 0x0000 - 0x01ff: defined by architecture */ 156
315 __u32 ccw1[2]; /* 0x0000 */ 157#else /* CONFIG_32BIT */
316 __u32 ccw2[4]; /* 0x0008 */ 158
159#define LC_ORDER 1
160#define LC_PAGES 2
161
162struct save_area {
163 u64 fp_regs[16];
164 u64 gp_regs[16];
165 u8 psw[16];
166 u8 pad1[8];
167 u32 pref_reg;
168 u32 fp_ctrl_reg;
169 u8 pad2[4];
170 u32 tod_reg;
171 u64 timer;
172 u64 clk_cmp;
173 u8 pad3[8];
174 u32 acc_regs[16];
175 u64 ctrl_regs[16];
176} __packed;
177
178struct _lowcore {
179 __u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
180 __u32 ipl_parmblock_ptr; /* 0x0014 */
317 __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */ 181 __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */
318 __u32 ext_params; /* 0x0080 */ 182 __u32 ext_params; /* 0x0080 */
319 __u16 cpu_addr; /* 0x0084 */ 183 __u16 cpu_addr; /* 0x0084 */
@@ -344,7 +208,9 @@ struct _lowcore
344 __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ 208 __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
345 __u32 external_damage_code; /* 0x00f4 */ 209 __u32 external_damage_code; /* 0x00f4 */
346 addr_t failing_storage_address; /* 0x00f8 */ 210 addr_t failing_storage_address; /* 0x00f8 */
347 __u8 pad_0x0100[0x0120-0x0100]; /* 0x0100 */ 211 __u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */
212 __u64 breaking_event_addr; /* 0x0110 */
213 __u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */
348 psw_t restart_old_psw; /* 0x0120 */ 214 psw_t restart_old_psw; /* 0x0120 */
349 psw_t external_old_psw; /* 0x0130 */ 215 psw_t external_old_psw; /* 0x0130 */
350 psw_t svc_old_psw; /* 0x0140 */ 216 psw_t svc_old_psw; /* 0x0140 */
@@ -425,7 +291,7 @@ struct _lowcore
425 /* CPU register save area: defined by architecture */ 291 /* CPU register save area: defined by architecture */
426 __u64 floating_pt_save_area[16]; /* 0x1200 */ 292 __u64 floating_pt_save_area[16]; /* 0x1200 */
427 __u64 gpregs_save_area[16]; /* 0x1280 */ 293 __u64 gpregs_save_area[16]; /* 0x1280 */
428 __u32 st_status_fixed_logout[4]; /* 0x1300 */ 294 psw_t psw_save_area; /* 0x1300 */
429 __u8 pad_0x1310[0x1318-0x1310]; /* 0x1310 */ 295 __u8 pad_0x1310[0x1318-0x1310]; /* 0x1310 */
430 __u32 prefixreg_save_area; /* 0x1318 */ 296 __u32 prefixreg_save_area; /* 0x1318 */
431 __u32 fpt_creg_save_area; /* 0x131c */ 297 __u32 fpt_creg_save_area; /* 0x131c */
@@ -439,10 +305,12 @@ struct _lowcore
439 305
440 /* align to the top of the prefix area */ 306 /* align to the top of the prefix area */
441 __u8 pad_0x1400[0x2000-0x1400]; /* 0x1400 */ 307 __u8 pad_0x1400[0x2000-0x1400]; /* 0x1400 */
442#endif /* !__s390x__ */ 308} __packed;
443} __attribute__((packed)); /* End structure*/ 309
310#endif /* CONFIG_32BIT */
444 311
445#define S390_lowcore (*((struct _lowcore *) 0)) 312#define S390_lowcore (*((struct _lowcore *) 0))
313
446extern struct _lowcore *lowcore_ptr[]; 314extern struct _lowcore *lowcore_ptr[];
447 315
448static inline void set_prefix(__u32 address) 316static inline void set_prefix(__u32 address)
@@ -458,6 +326,4 @@ static inline __u32 store_prefix(void)
458 return address; 326 return address;
459} 327}
460 328
461#endif 329#endif /* _ASM_S390_LOWCORE_H */
462
463#endif
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 5e9daf5d7f22..af650fb47206 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -107,9 +107,6 @@ typedef pte_t *pgtable_t;
107#define __pgd(x) ((pgd_t) { (x) } ) 107#define __pgd(x) ((pgd_t) { (x) } )
108#define __pgprot(x) ((pgprot_t) { (x) } ) 108#define __pgprot(x) ((pgprot_t) { (x) } )
109 109
110/* default storage key used for all pages */
111extern unsigned int default_storage_key;
112
113static inline void 110static inline void
114page_set_storage_key(unsigned long addr, unsigned int skey) 111page_set_storage_key(unsigned long addr, unsigned int skey)
115{ 112{
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index e2fa79cf0614..9b5b9189c15e 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -43,7 +43,7 @@ extern void vmem_map_init(void);
43 * The S390 doesn't have any external MMU info: the kernel page 43 * The S390 doesn't have any external MMU info: the kernel page
44 * tables contain all the necessary information. 44 * tables contain all the necessary information.
45 */ 45 */
46#define update_mmu_cache(vma, address, pte) do { } while (0) 46#define update_mmu_cache(vma, address, ptep) do { } while (0)
47 47
48/* 48/*
49 * ZERO_PAGE is a global shared page that is always zero: used 49 * ZERO_PAGE is a global shared page that is always zero: used
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index b42715458312..73e259834e10 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -28,7 +28,7 @@
28 28
29static inline void get_cpu_id(struct cpuid *ptr) 29static inline void get_cpu_id(struct cpuid *ptr)
30{ 30{
31 asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); 31 asm volatile("stidp %0" : "=Q" (*ptr));
32} 32}
33 33
34extern void s390_adjust_jiffies(void); 34extern void s390_adjust_jiffies(void);
@@ -184,9 +184,9 @@ static inline void psw_set_key(unsigned int key)
184static inline void __load_psw(psw_t psw) 184static inline void __load_psw(psw_t psw)
185{ 185{
186#ifndef __s390x__ 186#ifndef __s390x__
187 asm volatile("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); 187 asm volatile("lpsw %0" : : "Q" (psw) : "cc");
188#else 188#else
189 asm volatile("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); 189 asm volatile("lpswe %0" : : "Q" (psw) : "cc");
190#endif 190#endif
191} 191}
192 192
@@ -206,17 +206,17 @@ static inline void __load_psw_mask (unsigned long mask)
206 asm volatile( 206 asm volatile(
207 " basr %0,0\n" 207 " basr %0,0\n"
208 "0: ahi %0,1f-0b\n" 208 "0: ahi %0,1f-0b\n"
209 " st %0,4(%1)\n" 209 " st %0,%O1+4(%R1)\n"
210 " lpsw 0(%1)\n" 210 " lpsw %1\n"
211 "1:" 211 "1:"
212 : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); 212 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
213#else /* __s390x__ */ 213#else /* __s390x__ */
214 asm volatile( 214 asm volatile(
215 " larl %0,1f\n" 215 " larl %0,1f\n"
216 " stg %0,8(%1)\n" 216 " stg %0,%O1+8(%R1)\n"
217 " lpswe 0(%1)\n" 217 " lpswe %1\n"
218 "1:" 218 "1:"
219 : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); 219 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
220#endif /* __s390x__ */ 220#endif /* __s390x__ */
221} 221}
222 222
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 95dcf183a28d..dd2d913afcae 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -492,13 +492,24 @@ struct user_regs_struct
492struct task_struct; 492struct task_struct;
493extern void user_enable_single_step(struct task_struct *); 493extern void user_enable_single_step(struct task_struct *);
494extern void user_disable_single_step(struct task_struct *); 494extern void user_disable_single_step(struct task_struct *);
495extern void show_regs(struct pt_regs * regs);
495 496
496#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) 497#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
497#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) 498#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
498#define user_stack_pointer(regs)((regs)->gprs[15]) 499#define user_stack_pointer(regs)((regs)->gprs[15])
499#define regs_return_value(regs)((regs)->gprs[2]) 500#define regs_return_value(regs)((regs)->gprs[2])
500#define profile_pc(regs) instruction_pointer(regs) 501#define profile_pc(regs) instruction_pointer(regs)
501extern void show_regs(struct pt_regs * regs); 502
503int regs_query_register_offset(const char *name);
504const char *regs_query_register_name(unsigned int offset);
505unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
506unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
507
508static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
509{
510 return regs->gprs[15] & PSW_ADDR_INSN;
511}
512
502#endif /* __KERNEL__ */ 513#endif /* __KERNEL__ */
503#endif /* __ASSEMBLY__ */ 514#endif /* __ASSEMBLY__ */
504 515
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 79d849f014f0..c666bfe5e984 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -13,7 +13,8 @@
13#include <asm/cio.h> 13#include <asm/cio.h>
14#include <asm/ccwdev.h> 14#include <asm/ccwdev.h>
15 15
16#define QDIO_MAX_QUEUES_PER_IRQ 32 16/* only use 4 queues to save some cachelines */
17#define QDIO_MAX_QUEUES_PER_IRQ 4
17#define QDIO_MAX_BUFFERS_PER_Q 128 18#define QDIO_MAX_BUFFERS_PER_Q 128
18#define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1) 19#define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1)
19#define QDIO_MAX_ELEMENTS_PER_BUFFER 16 20#define QDIO_MAX_ELEMENTS_PER_BUFFER 16
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 9d2a17971805..423fdda2322d 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -124,21 +124,21 @@ static inline void __down_read(struct rw_semaphore *sem)
124 124
125 asm volatile( 125 asm volatile(
126#ifndef __s390x__ 126#ifndef __s390x__
127 " l %0,0(%3)\n" 127 " l %0,%2\n"
128 "0: lr %1,%0\n" 128 "0: lr %1,%0\n"
129 " ahi %1,%5\n" 129 " ahi %1,%4\n"
130 " cs %0,%1,0(%3)\n" 130 " cs %0,%1,%2\n"
131 " jl 0b" 131 " jl 0b"
132#else /* __s390x__ */ 132#else /* __s390x__ */
133 " lg %0,0(%3)\n" 133 " lg %0,%2\n"
134 "0: lgr %1,%0\n" 134 "0: lgr %1,%0\n"
135 " aghi %1,%5\n" 135 " aghi %1,%4\n"
136 " csg %0,%1,0(%3)\n" 136 " csg %0,%1,%2\n"
137 " jl 0b" 137 " jl 0b"
138#endif /* __s390x__ */ 138#endif /* __s390x__ */
139 : "=&d" (old), "=&d" (new), "=m" (sem->count) 139 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
140 : "a" (&sem->count), "m" (sem->count), 140 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
141 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); 141 : "cc", "memory");
142 if (old < 0) 142 if (old < 0)
143 rwsem_down_read_failed(sem); 143 rwsem_down_read_failed(sem);
144} 144}
@@ -152,25 +152,25 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
152 152
153 asm volatile( 153 asm volatile(
154#ifndef __s390x__ 154#ifndef __s390x__
155 " l %0,0(%3)\n" 155 " l %0,%2\n"
156 "0: ltr %1,%0\n" 156 "0: ltr %1,%0\n"
157 " jm 1f\n" 157 " jm 1f\n"
158 " ahi %1,%5\n" 158 " ahi %1,%4\n"
159 " cs %0,%1,0(%3)\n" 159 " cs %0,%1,%2\n"
160 " jl 0b\n" 160 " jl 0b\n"
161 "1:" 161 "1:"
162#else /* __s390x__ */ 162#else /* __s390x__ */
163 " lg %0,0(%3)\n" 163 " lg %0,%2\n"
164 "0: ltgr %1,%0\n" 164 "0: ltgr %1,%0\n"
165 " jm 1f\n" 165 " jm 1f\n"
166 " aghi %1,%5\n" 166 " aghi %1,%4\n"
167 " csg %0,%1,0(%3)\n" 167 " csg %0,%1,%2\n"
168 " jl 0b\n" 168 " jl 0b\n"
169 "1:" 169 "1:"
170#endif /* __s390x__ */ 170#endif /* __s390x__ */
171 : "=&d" (old), "=&d" (new), "=m" (sem->count) 171 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
172 : "a" (&sem->count), "m" (sem->count), 172 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
173 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); 173 : "cc", "memory");
174 return old >= 0 ? 1 : 0; 174 return old >= 0 ? 1 : 0;
175} 175}
176 176
@@ -184,20 +184,20 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
184 tmp = RWSEM_ACTIVE_WRITE_BIAS; 184 tmp = RWSEM_ACTIVE_WRITE_BIAS;
185 asm volatile( 185 asm volatile(
186#ifndef __s390x__ 186#ifndef __s390x__
187 " l %0,0(%3)\n" 187 " l %0,%2\n"
188 "0: lr %1,%0\n" 188 "0: lr %1,%0\n"
189 " a %1,%5\n" 189 " a %1,%4\n"
190 " cs %0,%1,0(%3)\n" 190 " cs %0,%1,%2\n"
191 " jl 0b" 191 " jl 0b"
192#else /* __s390x__ */ 192#else /* __s390x__ */
193 " lg %0,0(%3)\n" 193 " lg %0,%2\n"
194 "0: lgr %1,%0\n" 194 "0: lgr %1,%0\n"
195 " ag %1,%5\n" 195 " ag %1,%4\n"
196 " csg %0,%1,0(%3)\n" 196 " csg %0,%1,%2\n"
197 " jl 0b" 197 " jl 0b"
198#endif /* __s390x__ */ 198#endif /* __s390x__ */
199 : "=&d" (old), "=&d" (new), "=m" (sem->count) 199 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
200 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 200 : "Q" (sem->count), "m" (tmp)
201 : "cc", "memory"); 201 : "cc", "memory");
202 if (old != 0) 202 if (old != 0)
203 rwsem_down_write_failed(sem); 203 rwsem_down_write_failed(sem);
@@ -217,22 +217,22 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
217 217
218 asm volatile( 218 asm volatile(
219#ifndef __s390x__ 219#ifndef __s390x__
220 " l %0,0(%2)\n" 220 " l %0,%1\n"
221 "0: ltr %0,%0\n" 221 "0: ltr %0,%0\n"
222 " jnz 1f\n" 222 " jnz 1f\n"
223 " cs %0,%4,0(%2)\n" 223 " cs %0,%3,%1\n"
224 " jl 0b\n" 224 " jl 0b\n"
225#else /* __s390x__ */ 225#else /* __s390x__ */
226 " lg %0,0(%2)\n" 226 " lg %0,%1\n"
227 "0: ltgr %0,%0\n" 227 "0: ltgr %0,%0\n"
228 " jnz 1f\n" 228 " jnz 1f\n"
229 " csg %0,%4,0(%2)\n" 229 " csg %0,%3,%1\n"
230 " jl 0b\n" 230 " jl 0b\n"
231#endif /* __s390x__ */ 231#endif /* __s390x__ */
232 "1:" 232 "1:"
233 : "=&d" (old), "=m" (sem->count) 233 : "=&d" (old), "=Q" (sem->count)
234 : "a" (&sem->count), "m" (sem->count), 234 : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
235 "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory"); 235 : "cc", "memory");
236 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0; 236 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
237} 237}
238 238
@@ -245,21 +245,20 @@ static inline void __up_read(struct rw_semaphore *sem)
245 245
246 asm volatile( 246 asm volatile(
247#ifndef __s390x__ 247#ifndef __s390x__
248 " l %0,0(%3)\n" 248 " l %0,%2\n"
249 "0: lr %1,%0\n" 249 "0: lr %1,%0\n"
250 " ahi %1,%5\n" 250 " ahi %1,%4\n"
251 " cs %0,%1,0(%3)\n" 251 " cs %0,%1,%2\n"
252 " jl 0b" 252 " jl 0b"
253#else /* __s390x__ */ 253#else /* __s390x__ */
254 " lg %0,0(%3)\n" 254 " lg %0,%2\n"
255 "0: lgr %1,%0\n" 255 "0: lgr %1,%0\n"
256 " aghi %1,%5\n" 256 " aghi %1,%4\n"
257 " csg %0,%1,0(%3)\n" 257 " csg %0,%1,%2\n"
258 " jl 0b" 258 " jl 0b"
259#endif /* __s390x__ */ 259#endif /* __s390x__ */
260 : "=&d" (old), "=&d" (new), "=m" (sem->count) 260 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
261 : "a" (&sem->count), "m" (sem->count), 261 : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
262 "i" (-RWSEM_ACTIVE_READ_BIAS)
263 : "cc", "memory"); 262 : "cc", "memory");
264 if (new < 0) 263 if (new < 0)
265 if ((new & RWSEM_ACTIVE_MASK) == 0) 264 if ((new & RWSEM_ACTIVE_MASK) == 0)
@@ -276,20 +275,20 @@ static inline void __up_write(struct rw_semaphore *sem)
276 tmp = -RWSEM_ACTIVE_WRITE_BIAS; 275 tmp = -RWSEM_ACTIVE_WRITE_BIAS;
277 asm volatile( 276 asm volatile(
278#ifndef __s390x__ 277#ifndef __s390x__
279 " l %0,0(%3)\n" 278 " l %0,%2\n"
280 "0: lr %1,%0\n" 279 "0: lr %1,%0\n"
281 " a %1,%5\n" 280 " a %1,%4\n"
282 " cs %0,%1,0(%3)\n" 281 " cs %0,%1,%2\n"
283 " jl 0b" 282 " jl 0b"
284#else /* __s390x__ */ 283#else /* __s390x__ */
285 " lg %0,0(%3)\n" 284 " lg %0,%2\n"
286 "0: lgr %1,%0\n" 285 "0: lgr %1,%0\n"
287 " ag %1,%5\n" 286 " ag %1,%4\n"
288 " csg %0,%1,0(%3)\n" 287 " csg %0,%1,%2\n"
289 " jl 0b" 288 " jl 0b"
290#endif /* __s390x__ */ 289#endif /* __s390x__ */
291 : "=&d" (old), "=&d" (new), "=m" (sem->count) 290 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
292 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 291 : "Q" (sem->count), "m" (tmp)
293 : "cc", "memory"); 292 : "cc", "memory");
294 if (new < 0) 293 if (new < 0)
295 if ((new & RWSEM_ACTIVE_MASK) == 0) 294 if ((new & RWSEM_ACTIVE_MASK) == 0)
@@ -306,20 +305,20 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
306 tmp = -RWSEM_WAITING_BIAS; 305 tmp = -RWSEM_WAITING_BIAS;
307 asm volatile( 306 asm volatile(
308#ifndef __s390x__ 307#ifndef __s390x__
309 " l %0,0(%3)\n" 308 " l %0,%2\n"
310 "0: lr %1,%0\n" 309 "0: lr %1,%0\n"
311 " a %1,%5\n" 310 " a %1,%4\n"
312 " cs %0,%1,0(%3)\n" 311 " cs %0,%1,%2\n"
313 " jl 0b" 312 " jl 0b"
314#else /* __s390x__ */ 313#else /* __s390x__ */
315 " lg %0,0(%3)\n" 314 " lg %0,%2\n"
316 "0: lgr %1,%0\n" 315 "0: lgr %1,%0\n"
317 " ag %1,%5\n" 316 " ag %1,%4\n"
318 " csg %0,%1,0(%3)\n" 317 " csg %0,%1,%2\n"
319 " jl 0b" 318 " jl 0b"
320#endif /* __s390x__ */ 319#endif /* __s390x__ */
321 : "=&d" (old), "=&d" (new), "=m" (sem->count) 320 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
322 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 321 : "Q" (sem->count), "m" (tmp)
323 : "cc", "memory"); 322 : "cc", "memory");
324 if (new > 1) 323 if (new > 1)
325 rwsem_downgrade_wake(sem); 324 rwsem_downgrade_wake(sem);
@@ -334,20 +333,20 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
334 333
335 asm volatile( 334 asm volatile(
336#ifndef __s390x__ 335#ifndef __s390x__
337 " l %0,0(%3)\n" 336 " l %0,%2\n"
338 "0: lr %1,%0\n" 337 "0: lr %1,%0\n"
339 " ar %1,%5\n" 338 " ar %1,%4\n"
340 " cs %0,%1,0(%3)\n" 339 " cs %0,%1,%2\n"
341 " jl 0b" 340 " jl 0b"
342#else /* __s390x__ */ 341#else /* __s390x__ */
343 " lg %0,0(%3)\n" 342 " lg %0,%2\n"
344 "0: lgr %1,%0\n" 343 "0: lgr %1,%0\n"
345 " agr %1,%5\n" 344 " agr %1,%4\n"
346 " csg %0,%1,0(%3)\n" 345 " csg %0,%1,%2\n"
347 " jl 0b" 346 " jl 0b"
348#endif /* __s390x__ */ 347#endif /* __s390x__ */
349 : "=&d" (old), "=&d" (new), "=m" (sem->count) 348 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
350 : "a" (&sem->count), "m" (sem->count), "d" (delta) 349 : "Q" (sem->count), "d" (delta)
351 : "cc", "memory"); 350 : "cc", "memory");
352} 351}
353 352
@@ -360,20 +359,20 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
360 359
361 asm volatile( 360 asm volatile(
362#ifndef __s390x__ 361#ifndef __s390x__
363 " l %0,0(%3)\n" 362 " l %0,%2\n"
364 "0: lr %1,%0\n" 363 "0: lr %1,%0\n"
365 " ar %1,%5\n" 364 " ar %1,%4\n"
366 " cs %0,%1,0(%3)\n" 365 " cs %0,%1,%2\n"
367 " jl 0b" 366 " jl 0b"
368#else /* __s390x__ */ 367#else /* __s390x__ */
369 " lg %0,0(%3)\n" 368 " lg %0,%2\n"
370 "0: lgr %1,%0\n" 369 "0: lgr %1,%0\n"
371 " agr %1,%5\n" 370 " agr %1,%4\n"
372 " csg %0,%1,0(%3)\n" 371 " csg %0,%1,%2\n"
373 " jl 0b" 372 " jl 0b"
374#endif /* __s390x__ */ 373#endif /* __s390x__ */
375 : "=&d" (old), "=&d" (new), "=m" (sem->count) 374 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
376 : "a" (&sem->count), "m" (sem->count), "d" (delta) 375 : "Q" (sem->count), "d" (delta)
377 : "cc", "memory"); 376 : "cc", "memory");
378 return new; 377 return new;
379} 378}
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 52a779c337e8..9ab6bd3a65d1 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -14,14 +14,14 @@
14 14
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16 16
17#include <asm/lowcore.h>
18#include <asm/types.h>
19
20#define PARMAREA 0x10400 17#define PARMAREA 0x10400
21#define MEMORY_CHUNKS 256 18#define MEMORY_CHUNKS 256
22 19
23#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
24 21
22#include <asm/lowcore.h>
23#include <asm/types.h>
24
25#ifndef __s390x__ 25#ifndef __s390x__
26#define IPL_DEVICE (*(unsigned long *) (0x10404)) 26#define IPL_DEVICE (*(unsigned long *) (0x10404))
27#define INITRD_START (*(unsigned long *) (0x1040C)) 27#define INITRD_START (*(unsigned long *) (0x1040C))
@@ -71,9 +71,12 @@ extern unsigned int user_mode;
71#define MACHINE_FLAG_KVM (1UL << 9) 71#define MACHINE_FLAG_KVM (1UL << 9)
72#define MACHINE_FLAG_HPAGE (1UL << 10) 72#define MACHINE_FLAG_HPAGE (1UL << 10)
73#define MACHINE_FLAG_PFMF (1UL << 11) 73#define MACHINE_FLAG_PFMF (1UL << 11)
74#define MACHINE_FLAG_LPAR (1UL << 12)
74 75
75#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 76#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
76#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 77#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
78#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
79
77#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) 80#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
78 81
79#ifndef __s390x__ 82#ifndef __s390x__
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index f72d611f7e13..e3bffd4e2d66 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -1,24 +1,19 @@
1/* 1/*
2 * include/asm-s390/sigp.h 2 * Routines and structures for signalling other processors.
3 * 3 *
4 * S390 version 4 * Copyright IBM Corp. 1999,2010
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Author(s): Denis Joseph Barrow,
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 *
10 * sigp.h by D.J. Barrow (c) IBM 1999
11 * contains routines / structures for signalling other S/390 processors in an
12 * SMP configuration.
13 */ 8 */
14 9
15#ifndef __SIGP__ 10#ifndef __ASM_SIGP_H
16#define __SIGP__ 11#define __ASM_SIGP_H
17 12
18#include <asm/system.h> 13#include <asm/system.h>
19 14
20/* get real cpu address from logical cpu number */ 15/* Get real cpu address from logical cpu number. */
21extern int __cpu_logical_map[]; 16extern unsigned short __cpu_logical_map[];
22 17
23static inline int cpu_logical_map(int cpu) 18static inline int cpu_logical_map(int cpu)
24{ 19{
@@ -29,107 +24,108 @@ static inline int cpu_logical_map(int cpu)
29#endif 24#endif
30} 25}
31 26
32typedef enum 27enum {
33{ 28 sigp_sense = 1,
34 sigp_unassigned=0x0, 29 sigp_external_call = 2,
35 sigp_sense, 30 sigp_emergency_signal = 3,
36 sigp_external_call, 31 sigp_start = 4,
37 sigp_emergency_signal, 32 sigp_stop = 5,
38 sigp_start, 33 sigp_restart = 6,
39 sigp_stop, 34 sigp_stop_and_store_status = 9,
40 sigp_restart, 35 sigp_initial_cpu_reset = 11,
41 sigp_unassigned1, 36 sigp_cpu_reset = 12,
42 sigp_unassigned2, 37 sigp_set_prefix = 13,
43 sigp_stop_and_store_status, 38 sigp_store_status_at_address = 14,
44 sigp_unassigned3, 39 sigp_store_extended_status_at_address = 15,
45 sigp_initial_cpu_reset, 40 sigp_set_architecture = 18,
46 sigp_cpu_reset, 41 sigp_conditional_emergency_signal = 19,
47 sigp_set_prefix, 42 sigp_sense_running = 21,
48 sigp_store_status_at_address, 43};
49 sigp_store_extended_status_at_address 44
50} sigp_order_code; 45enum {
51 46 sigp_order_code_accepted = 0,
52typedef __u32 sigp_status_word; 47 sigp_status_stored = 1,
53 48 sigp_busy = 2,
54typedef enum 49 sigp_not_operational = 3,
55{ 50};
56 sigp_order_code_accepted=0,
57 sigp_status_stored,
58 sigp_busy,
59 sigp_not_operational
60} sigp_ccode;
61
62 51
63/* 52/*
64 * Definitions for the external call 53 * Definitions for external call.
65 */ 54 */
66 55enum {
67/* 'Bit' signals, asynchronous */ 56 ec_schedule = 0,
68typedef enum
69{
70 ec_schedule=0,
71 ec_call_function, 57 ec_call_function,
72 ec_call_function_single, 58 ec_call_function_single,
73 ec_bit_last 59};
74} ec_bit_sig;
75 60
76/* 61/*
77 * Signal processor 62 * Signal processor.
78 */ 63 */
79static inline sigp_ccode 64static inline int raw_sigp(u16 cpu, int order)
80signal_processor(__u16 cpu_addr, sigp_order_code order_code)
81{ 65{
82 register unsigned long reg1 asm ("1") = 0; 66 register unsigned long reg1 asm ("1") = 0;
83 sigp_ccode ccode; 67 int ccode;
84 68
85 asm volatile( 69 asm volatile(
86 " sigp %1,%2,0(%3)\n" 70 " sigp %1,%2,0(%3)\n"
87 " ipm %0\n" 71 " ipm %0\n"
88 " srl %0,28\n" 72 " srl %0,28\n"
89 : "=d" (ccode) 73 : "=d" (ccode)
90 : "d" (reg1), "d" (cpu_logical_map(cpu_addr)), 74 : "d" (reg1), "d" (cpu),
91 "a" (order_code) : "cc" , "memory"); 75 "a" (order) : "cc" , "memory");
92 return ccode; 76 return ccode;
93} 77}
94 78
95/* 79/*
96 * Signal processor with parameter 80 * Signal processor with parameter.
97 */ 81 */
98static inline sigp_ccode 82static inline int raw_sigp_p(u32 parameter, u16 cpu, int order)
99signal_processor_p(__u32 parameter, __u16 cpu_addr, sigp_order_code order_code)
100{ 83{
101 register unsigned int reg1 asm ("1") = parameter; 84 register unsigned int reg1 asm ("1") = parameter;
102 sigp_ccode ccode; 85 int ccode;
103 86
104 asm volatile( 87 asm volatile(
105 " sigp %1,%2,0(%3)\n" 88 " sigp %1,%2,0(%3)\n"
106 " ipm %0\n" 89 " ipm %0\n"
107 " srl %0,28\n" 90 " srl %0,28\n"
108 : "=d" (ccode) 91 : "=d" (ccode)
109 : "d" (reg1), "d" (cpu_logical_map(cpu_addr)), 92 : "d" (reg1), "d" (cpu),
110 "a" (order_code) : "cc" , "memory"); 93 "a" (order) : "cc" , "memory");
111 return ccode; 94 return ccode;
112} 95}
113 96
114/* 97/*
115 * Signal processor with parameter and return status 98 * Signal processor with parameter and return status.
116 */ 99 */
117static inline sigp_ccode 100static inline int raw_sigp_ps(u32 *status, u32 parm, u16 cpu, int order)
118signal_processor_ps(__u32 *statusptr, __u32 parameter, __u16 cpu_addr,
119 sigp_order_code order_code)
120{ 101{
121 register unsigned int reg1 asm ("1") = parameter; 102 register unsigned int reg1 asm ("1") = parm;
122 sigp_ccode ccode; 103 int ccode;
123 104
124 asm volatile( 105 asm volatile(
125 " sigp %1,%2,0(%3)\n" 106 " sigp %1,%2,0(%3)\n"
126 " ipm %0\n" 107 " ipm %0\n"
127 " srl %0,28\n" 108 " srl %0,28\n"
128 : "=d" (ccode), "+d" (reg1) 109 : "=d" (ccode), "+d" (reg1)
129 : "d" (cpu_logical_map(cpu_addr)), "a" (order_code) 110 : "d" (cpu), "a" (order)
130 : "cc" , "memory"); 111 : "cc" , "memory");
131 *statusptr = reg1; 112 *status = reg1;
132 return ccode; 113 return ccode;
133} 114}
134 115
135#endif /* __SIGP__ */ 116static inline int sigp(int cpu, int order)
117{
118 return raw_sigp(cpu_logical_map(cpu), order);
119}
120
121static inline int sigp_p(u32 parameter, int cpu, int order)
122{
123 return raw_sigp_p(parameter, cpu_logical_map(cpu), order);
124}
125
126static inline int sigp_ps(u32 *status, u32 parm, int cpu, int order)
127{
128 return raw_sigp_ps(status, parm, cpu_logical_map(cpu), order);
129}
130
131#endif /* __ASM_SIGP_H */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 2ab1141eeb50..edc03cb9cd79 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -29,7 +29,43 @@ extern int smp_cpu_polarization[];
29extern void arch_send_call_function_single_ipi(int cpu); 29extern void arch_send_call_function_single_ipi(int cpu);
30extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 30extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
31 31
32extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 32extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
33
34extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *);
35extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp,
36 int from, int to);
37extern void smp_restart_cpu(void);
38
39/*
40 * returns 1 if (virtual) cpu is scheduled
41 * returns 0 otherwise
42 */
43static inline int smp_vcpu_scheduled(int cpu)
44{
45 u32 status;
46
47 switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) {
48 case sigp_status_stored:
49 /* Check for running status */
50 if (status & 0x400)
51 return 0;
52 break;
53 case sigp_not_operational:
54 return 0;
55 default:
56 break;
57 }
58 return 1;
59}
60
61#else /* CONFIG_SMP */
62
63static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
64{
65 func(data);
66}
67
68#define smp_vcpu_scheduled (1)
33 69
34#endif /* CONFIG_SMP */ 70#endif /* CONFIG_SMP */
35 71
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index a587907d77f3..56612fc8186e 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -13,8 +13,6 @@
13 13
14#include <linux/smp.h> 14#include <linux/smp.h>
15 15
16#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
17
18static inline int 16static inline int
19_raw_compare_and_swap(volatile unsigned int *lock, 17_raw_compare_and_swap(volatile unsigned int *lock,
20 unsigned int old, unsigned int new) 18 unsigned int old, unsigned int new)
@@ -27,22 +25,6 @@ _raw_compare_and_swap(volatile unsigned int *lock,
27 return old; 25 return old;
28} 26}
29 27
30#else /* __GNUC__ */
31
32static inline int
33_raw_compare_and_swap(volatile unsigned int *lock,
34 unsigned int old, unsigned int new)
35{
36 asm volatile(
37 " cs %0,%3,0(%4)"
38 : "=d" (old), "=m" (*lock)
39 : "0" (old), "d" (new), "a" (lock), "m" (*lock)
40 : "cc", "memory" );
41 return old;
42}
43
44#endif /* __GNUC__ */
45
46/* 28/*
47 * Simple spin lock operations. There are two variants, one clears IRQ's 29 * Simple spin lock operations. There are two variants, one clears IRQ's
48 * on the local processor, one does not. 30 * on the local processor, one does not.
diff --git a/arch/s390/include/asm/swab.h b/arch/s390/include/asm/swab.h
index eb18dc1f327b..6bdee21c077e 100644
--- a/arch/s390/include/asm/swab.h
+++ b/arch/s390/include/asm/swab.h
@@ -47,11 +47,11 @@ static inline __u32 __arch_swab32p(const __u32 *x)
47 47
48 asm volatile( 48 asm volatile(
49#ifndef __s390x__ 49#ifndef __s390x__
50 " icm %0,8,3(%1)\n" 50 " icm %0,8,%O1+3(%R1)\n"
51 " icm %0,4,2(%1)\n" 51 " icm %0,4,%O1+2(%R1)\n"
52 " icm %0,2,1(%1)\n" 52 " icm %0,2,%O1+1(%R1)\n"
53 " ic %0,0(%1)" 53 " ic %0,%1"
54 : "=&d" (result) : "a" (x), "m" (*x) : "cc"); 54 : "=&d" (result) : "Q" (*x) : "cc");
55#else /* __s390x__ */ 55#else /* __s390x__ */
56 " lrv %0,%1" 56 " lrv %0,%1"
57 : "=d" (result) : "m" (*x)); 57 : "=d" (result) : "m" (*x));
@@ -77,9 +77,9 @@ static inline __u16 __arch_swab16p(const __u16 *x)
77 77
78 asm volatile( 78 asm volatile(
79#ifndef __s390x__ 79#ifndef __s390x__
80 " icm %0,2,1(%1)\n" 80 " icm %0,2,%O+1(%R1)\n"
81 " ic %0,0(%1)\n" 81 " ic %0,%1\n"
82 : "=&d" (result) : "a" (x), "m" (*x) : "cc"); 82 : "=&d" (result) : "Q" (*x) : "cc");
83#else /* __s390x__ */ 83#else /* __s390x__ */
84 " lrvh %0,%1" 84 " lrvh %0,%1"
85 : "=d" (result) : "m" (*x)); 85 : "=d" (result) : "m" (*x));
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index e0a73d3eb837..8429686951f9 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -15,6 +15,13 @@
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <asm/ptrace.h> 16#include <asm/ptrace.h>
17 17
18/*
19 * The syscall table always contains 32 bit pointers since we know that the
20 * address of the function to be called is (way) below 4GB. So the "int"
21 * type here is what we want [need] for both 32 bit and 64 bit systems.
22 */
23extern const unsigned int sys_call_table[];
24
18static inline long syscall_get_nr(struct task_struct *task, 25static inline long syscall_get_nr(struct task_struct *task,
19 struct pt_regs *regs) 26 struct pt_regs *regs)
20{ 27{
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 9d70057d828c..22bdb2a0ee5f 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -87,7 +87,8 @@ struct sysinfo_2_2_2 {
87 87
88struct sysinfo_3_2_2 { 88struct sysinfo_3_2_2 {
89 char reserved_0[31]; 89 char reserved_0[31];
90 unsigned char count; 90 unsigned char :4;
91 unsigned char count:4;
91 struct { 92 struct {
92 char reserved_0[4]; 93 char reserved_0[4];
93 unsigned short cpus_total; 94 unsigned short cpus_total;
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 379661d2f81a..67ee6c3c6bb3 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -24,65 +24,65 @@ extern struct task_struct *__switch_to(void *, void *);
24static inline void save_fp_regs(s390_fp_regs *fpregs) 24static inline void save_fp_regs(s390_fp_regs *fpregs)
25{ 25{
26 asm volatile( 26 asm volatile(
27 " std 0,8(%1)\n" 27 " std 0,%O0+8(%R0)\n"
28 " std 2,24(%1)\n" 28 " std 2,%O0+24(%R0)\n"
29 " std 4,40(%1)\n" 29 " std 4,%O0+40(%R0)\n"
30 " std 6,56(%1)" 30 " std 6,%O0+56(%R0)"
31 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); 31 : "=Q" (*fpregs) : "Q" (*fpregs));
32 if (!MACHINE_HAS_IEEE) 32 if (!MACHINE_HAS_IEEE)
33 return; 33 return;
34 asm volatile( 34 asm volatile(
35 " stfpc 0(%1)\n" 35 " stfpc %0\n"
36 " std 1,16(%1)\n" 36 " std 1,%O0+16(%R0)\n"
37 " std 3,32(%1)\n" 37 " std 3,%O0+32(%R0)\n"
38 " std 5,48(%1)\n" 38 " std 5,%O0+48(%R0)\n"
39 " std 7,64(%1)\n" 39 " std 7,%O0+64(%R0)\n"
40 " std 8,72(%1)\n" 40 " std 8,%O0+72(%R0)\n"
41 " std 9,80(%1)\n" 41 " std 9,%O0+80(%R0)\n"
42 " std 10,88(%1)\n" 42 " std 10,%O0+88(%R0)\n"
43 " std 11,96(%1)\n" 43 " std 11,%O0+96(%R0)\n"
44 " std 12,104(%1)\n" 44 " std 12,%O0+104(%R0)\n"
45 " std 13,112(%1)\n" 45 " std 13,%O0+112(%R0)\n"
46 " std 14,120(%1)\n" 46 " std 14,%O0+120(%R0)\n"
47 " std 15,128(%1)\n" 47 " std 15,%O0+128(%R0)\n"
48 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); 48 : "=Q" (*fpregs) : "Q" (*fpregs));
49} 49}
50 50
51static inline void restore_fp_regs(s390_fp_regs *fpregs) 51static inline void restore_fp_regs(s390_fp_regs *fpregs)
52{ 52{
53 asm volatile( 53 asm volatile(
54 " ld 0,8(%0)\n" 54 " ld 0,%O0+8(%R0)\n"
55 " ld 2,24(%0)\n" 55 " ld 2,%O0+24(%R0)\n"
56 " ld 4,40(%0)\n" 56 " ld 4,%O0+40(%R0)\n"
57 " ld 6,56(%0)" 57 " ld 6,%O0+56(%R0)"
58 : : "a" (fpregs), "m" (*fpregs)); 58 : : "Q" (*fpregs));
59 if (!MACHINE_HAS_IEEE) 59 if (!MACHINE_HAS_IEEE)
60 return; 60 return;
61 asm volatile( 61 asm volatile(
62 " lfpc 0(%0)\n" 62 " lfpc %0\n"
63 " ld 1,16(%0)\n" 63 " ld 1,%O0+16(%R0)\n"
64 " ld 3,32(%0)\n" 64 " ld 3,%O0+32(%R0)\n"
65 " ld 5,48(%0)\n" 65 " ld 5,%O0+48(%R0)\n"
66 " ld 7,64(%0)\n" 66 " ld 7,%O0+64(%R0)\n"
67 " ld 8,72(%0)\n" 67 " ld 8,%O0+72(%R0)\n"
68 " ld 9,80(%0)\n" 68 " ld 9,%O0+80(%R0)\n"
69 " ld 10,88(%0)\n" 69 " ld 10,%O0+88(%R0)\n"
70 " ld 11,96(%0)\n" 70 " ld 11,%O0+96(%R0)\n"
71 " ld 12,104(%0)\n" 71 " ld 12,%O0+104(%R0)\n"
72 " ld 13,112(%0)\n" 72 " ld 13,%O0+112(%R0)\n"
73 " ld 14,120(%0)\n" 73 " ld 14,%O0+120(%R0)\n"
74 " ld 15,128(%0)\n" 74 " ld 15,%O0+128(%R0)\n"
75 : : "a" (fpregs), "m" (*fpregs)); 75 : : "Q" (*fpregs));
76} 76}
77 77
78static inline void save_access_regs(unsigned int *acrs) 78static inline void save_access_regs(unsigned int *acrs)
79{ 79{
80 asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory"); 80 asm volatile("stam 0,15,%0" : "=Q" (*acrs));
81} 81}
82 82
83static inline void restore_access_regs(unsigned int *acrs) 83static inline void restore_access_regs(unsigned int *acrs)
84{ 84{
85 asm volatile("lam 0,15,0(%0)" : : "a" (acrs)); 85 asm volatile("lam 0,15,%0" : : "Q" (*acrs));
86} 86}
87 87
88#define switch_to(prev,next,last) do { \ 88#define switch_to(prev,next,last) do { \
@@ -139,48 +139,48 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
139 shift = (3 ^ (addr & 3)) << 3; 139 shift = (3 ^ (addr & 3)) << 3;
140 addr ^= addr & 3; 140 addr ^= addr & 3;
141 asm volatile( 141 asm volatile(
142 " l %0,0(%4)\n" 142 " l %0,%4\n"
143 "0: lr 0,%0\n" 143 "0: lr 0,%0\n"
144 " nr 0,%3\n" 144 " nr 0,%3\n"
145 " or 0,%2\n" 145 " or 0,%2\n"
146 " cs %0,0,0(%4)\n" 146 " cs %0,0,%4\n"
147 " jl 0b\n" 147 " jl 0b\n"
148 : "=&d" (old), "=m" (*(int *) addr) 148 : "=&d" (old), "=Q" (*(int *) addr)
149 : "d" (x << shift), "d" (~(255 << shift)), "a" (addr), 149 : "d" (x << shift), "d" (~(255 << shift)),
150 "m" (*(int *) addr) : "memory", "cc", "0"); 150 "Q" (*(int *) addr) : "memory", "cc", "0");
151 return old >> shift; 151 return old >> shift;
152 case 2: 152 case 2:
153 addr = (unsigned long) ptr; 153 addr = (unsigned long) ptr;
154 shift = (2 ^ (addr & 2)) << 3; 154 shift = (2 ^ (addr & 2)) << 3;
155 addr ^= addr & 2; 155 addr ^= addr & 2;
156 asm volatile( 156 asm volatile(
157 " l %0,0(%4)\n" 157 " l %0,%4\n"
158 "0: lr 0,%0\n" 158 "0: lr 0,%0\n"
159 " nr 0,%3\n" 159 " nr 0,%3\n"
160 " or 0,%2\n" 160 " or 0,%2\n"
161 " cs %0,0,0(%4)\n" 161 " cs %0,0,%4\n"
162 " jl 0b\n" 162 " jl 0b\n"
163 : "=&d" (old), "=m" (*(int *) addr) 163 : "=&d" (old), "=Q" (*(int *) addr)
164 : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), 164 : "d" (x << shift), "d" (~(65535 << shift)),
165 "m" (*(int *) addr) : "memory", "cc", "0"); 165 "Q" (*(int *) addr) : "memory", "cc", "0");
166 return old >> shift; 166 return old >> shift;
167 case 4: 167 case 4:
168 asm volatile( 168 asm volatile(
169 " l %0,0(%3)\n" 169 " l %0,%3\n"
170 "0: cs %0,%2,0(%3)\n" 170 "0: cs %0,%2,%3\n"
171 " jl 0b\n" 171 " jl 0b\n"
172 : "=&d" (old), "=m" (*(int *) ptr) 172 : "=&d" (old), "=Q" (*(int *) ptr)
173 : "d" (x), "a" (ptr), "m" (*(int *) ptr) 173 : "d" (x), "Q" (*(int *) ptr)
174 : "memory", "cc"); 174 : "memory", "cc");
175 return old; 175 return old;
176#ifdef __s390x__ 176#ifdef __s390x__
177 case 8: 177 case 8:
178 asm volatile( 178 asm volatile(
179 " lg %0,0(%3)\n" 179 " lg %0,%3\n"
180 "0: csg %0,%2,0(%3)\n" 180 "0: csg %0,%2,%3\n"
181 " jl 0b\n" 181 " jl 0b\n"
182 : "=&d" (old), "=m" (*(long *) ptr) 182 : "=&d" (old), "=m" (*(long *) ptr)
183 : "d" (x), "a" (ptr), "m" (*(long *) ptr) 183 : "d" (x), "Q" (*(long *) ptr)
184 : "memory", "cc"); 184 : "memory", "cc");
185 return old; 185 return old;
186#endif /* __s390x__ */ 186#endif /* __s390x__ */
@@ -215,20 +215,20 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
215 shift = (3 ^ (addr & 3)) << 3; 215 shift = (3 ^ (addr & 3)) << 3;
216 addr ^= addr & 3; 216 addr ^= addr & 3;
217 asm volatile( 217 asm volatile(
218 " l %0,0(%4)\n" 218 " l %0,%2\n"
219 "0: nr %0,%5\n" 219 "0: nr %0,%5\n"
220 " lr %1,%0\n" 220 " lr %1,%0\n"
221 " or %0,%2\n" 221 " or %0,%2\n"
222 " or %1,%3\n" 222 " or %1,%3\n"
223 " cs %0,%1,0(%4)\n" 223 " cs %0,%1,%2\n"
224 " jnl 1f\n" 224 " jnl 1f\n"
225 " xr %1,%0\n" 225 " xr %1,%0\n"
226 " nr %1,%5\n" 226 " nr %1,%5\n"
227 " jnz 0b\n" 227 " jnz 0b\n"
228 "1:" 228 "1:"
229 : "=&d" (prev), "=&d" (tmp) 229 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
230 : "d" (old << shift), "d" (new << shift), "a" (ptr), 230 : "d" (old << shift), "d" (new << shift),
231 "d" (~(255 << shift)) 231 "d" (~(255 << shift)), "Q" (*(int *) ptr)
232 : "memory", "cc"); 232 : "memory", "cc");
233 return prev >> shift; 233 return prev >> shift;
234 case 2: 234 case 2:
@@ -236,33 +236,35 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
236 shift = (2 ^ (addr & 2)) << 3; 236 shift = (2 ^ (addr & 2)) << 3;
237 addr ^= addr & 2; 237 addr ^= addr & 2;
238 asm volatile( 238 asm volatile(
239 " l %0,0(%4)\n" 239 " l %0,%2\n"
240 "0: nr %0,%5\n" 240 "0: nr %0,%5\n"
241 " lr %1,%0\n" 241 " lr %1,%0\n"
242 " or %0,%2\n" 242 " or %0,%2\n"
243 " or %1,%3\n" 243 " or %1,%3\n"
244 " cs %0,%1,0(%4)\n" 244 " cs %0,%1,%2\n"
245 " jnl 1f\n" 245 " jnl 1f\n"
246 " xr %1,%0\n" 246 " xr %1,%0\n"
247 " nr %1,%5\n" 247 " nr %1,%5\n"
248 " jnz 0b\n" 248 " jnz 0b\n"
249 "1:" 249 "1:"
250 : "=&d" (prev), "=&d" (tmp) 250 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
251 : "d" (old << shift), "d" (new << shift), "a" (ptr), 251 : "d" (old << shift), "d" (new << shift),
252 "d" (~(65535 << shift)) 252 "d" (~(65535 << shift)), "Q" (*(int *) ptr)
253 : "memory", "cc"); 253 : "memory", "cc");
254 return prev >> shift; 254 return prev >> shift;
255 case 4: 255 case 4:
256 asm volatile( 256 asm volatile(
257 " cs %0,%2,0(%3)\n" 257 " cs %0,%3,%1\n"
258 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 258 : "=&d" (prev), "=Q" (*(int *) ptr)
259 : "0" (old), "d" (new), "Q" (*(int *) ptr)
259 : "memory", "cc"); 260 : "memory", "cc");
260 return prev; 261 return prev;
261#ifdef __s390x__ 262#ifdef __s390x__
262 case 8: 263 case 8:
263 asm volatile( 264 asm volatile(
264 " csg %0,%2,0(%3)\n" 265 " csg %0,%3,%1\n"
265 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 266 : "=&d" (prev), "=Q" (*(long *) ptr)
267 : "0" (old), "d" (new), "Q" (*(long *) ptr)
266 : "memory", "cc"); 268 : "memory", "cc");
267 return prev; 269 return prev;
268#endif /* __s390x__ */ 270#endif /* __s390x__ */
@@ -302,17 +304,17 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
302#define __ctl_load(array, low, high) ({ \ 304#define __ctl_load(array, low, high) ({ \
303 typedef struct { char _[sizeof(array)]; } addrtype; \ 305 typedef struct { char _[sizeof(array)]; } addrtype; \
304 asm volatile( \ 306 asm volatile( \
305 " lctlg %1,%2,0(%0)\n" \ 307 " lctlg %1,%2,%0\n" \
306 : : "a" (&array), "i" (low), "i" (high), \ 308 : : "Q" (*(addrtype *)(&array)), \
307 "m" (*(addrtype *)(&array))); \ 309 "i" (low), "i" (high)); \
308 }) 310 })
309 311
310#define __ctl_store(array, low, high) ({ \ 312#define __ctl_store(array, low, high) ({ \
311 typedef struct { char _[sizeof(array)]; } addrtype; \ 313 typedef struct { char _[sizeof(array)]; } addrtype; \
312 asm volatile( \ 314 asm volatile( \
313 " stctg %2,%3,0(%1)\n" \ 315 " stctg %1,%2,%0\n" \
314 : "=m" (*(addrtype *)(&array)) \ 316 : "=Q" (*(addrtype *)(&array)) \
315 : "a" (&array), "i" (low), "i" (high)); \ 317 : "i" (low), "i" (high)); \
316 }) 318 })
317 319
318#else /* __s390x__ */ 320#else /* __s390x__ */
@@ -320,17 +322,17 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
320#define __ctl_load(array, low, high) ({ \ 322#define __ctl_load(array, low, high) ({ \
321 typedef struct { char _[sizeof(array)]; } addrtype; \ 323 typedef struct { char _[sizeof(array)]; } addrtype; \
322 asm volatile( \ 324 asm volatile( \
323 " lctl %1,%2,0(%0)\n" \ 325 " lctl %1,%2,%0\n" \
324 : : "a" (&array), "i" (low), "i" (high), \ 326 : : "Q" (*(addrtype *)(&array)), \
325 "m" (*(addrtype *)(&array))); \ 327 "i" (low), "i" (high)); \
326}) 328})
327 329
328#define __ctl_store(array, low, high) ({ \ 330#define __ctl_store(array, low, high) ({ \
329 typedef struct { char _[sizeof(array)]; } addrtype; \ 331 typedef struct { char _[sizeof(array)]; } addrtype; \
330 asm volatile( \ 332 asm volatile( \
331 " stctl %2,%3,0(%1)\n" \ 333 " stctl %1,%2,%0\n" \
332 : "=m" (*(addrtype *)(&array)) \ 334 : "=Q" (*(addrtype *)(&array)) \
333 : "a" (&array), "i" (low), "i" (high)); \ 335 : "i" (low), "i" (high)); \
334 }) 336 })
335 337
336#endif /* __s390x__ */ 338#endif /* __s390x__ */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 66069e736842..34f0873d6525 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -73,7 +73,7 @@ struct thread_info {
73/* how to get the thread information struct from C */ 73/* how to get the thread information struct from C */
74static inline struct thread_info *current_thread_info(void) 74static inline struct thread_info *current_thread_info(void)
75{ 75{
76 return (struct thread_info *)((*(unsigned long *) __LC_KERNEL_STACK)-THREAD_SIZE); 76 return (struct thread_info *)(S390_lowcore.kernel_stack - THREAD_SIZE);
77} 77}
78 78
79#define THREAD_SIZE_ORDER THREAD_ORDER 79#define THREAD_SIZE_ORDER THREAD_ORDER
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 68d9fea34b4b..f174bdaa6b59 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -20,10 +20,10 @@ static inline int set_clock(__u64 time)
20 int cc; 20 int cc;
21 21
22 asm volatile( 22 asm volatile(
23 " sck 0(%2)\n" 23 " sck %1\n"
24 " ipm %0\n" 24 " ipm %0\n"
25 " srl %0,28\n" 25 " srl %0,28\n"
26 : "=d" (cc) : "m" (time), "a" (&time) : "cc"); 26 : "=d" (cc) : "Q" (time) : "cc");
27 return cc; 27 return cc;
28} 28}
29 29
@@ -32,21 +32,21 @@ static inline int store_clock(__u64 *time)
32 int cc; 32 int cc;
33 33
34 asm volatile( 34 asm volatile(
35 " stck 0(%2)\n" 35 " stck %1\n"
36 " ipm %0\n" 36 " ipm %0\n"
37 " srl %0,28\n" 37 " srl %0,28\n"
38 : "=d" (cc), "=m" (*time) : "a" (time) : "cc"); 38 : "=d" (cc), "=Q" (*time) : : "cc");
39 return cc; 39 return cc;
40} 40}
41 41
42static inline void set_clock_comparator(__u64 time) 42static inline void set_clock_comparator(__u64 time)
43{ 43{
44 asm volatile("sckc 0(%1)" : : "m" (time), "a" (&time)); 44 asm volatile("sckc %0" : : "Q" (time));
45} 45}
46 46
47static inline void store_clock_comparator(__u64 *time) 47static inline void store_clock_comparator(__u64 *time)
48{ 48{
49 asm volatile("stckc 0(%1)" : "=m" (*time) : "a" (time)); 49 asm volatile("stckc %0" : "=Q" (*time));
50} 50}
51 51
52#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 52#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
@@ -57,11 +57,7 @@ static inline unsigned long long get_clock (void)
57{ 57{
58 unsigned long long clk; 58 unsigned long long clk;
59 59
60#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
61 asm volatile("stck %0" : "=Q" (clk) : : "cc"); 60 asm volatile("stck %0" : "=Q" (clk) : : "cc");
62#else /* __GNUC__ */
63 asm volatile("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc");
64#endif /* __GNUC__ */
65 return clk; 61 return clk;
66} 62}
67 63
@@ -69,13 +65,7 @@ static inline unsigned long long get_clock_xt(void)
69{ 65{
70 unsigned char clk[16]; 66 unsigned char clk[16];
71 67
72#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
73 asm volatile("stcke %0" : "=Q" (clk) : : "cc"); 68 asm volatile("stcke %0" : "=Q" (clk) : : "cc");
74#else /* __GNUC__ */
75 asm volatile("stcke 0(%1)" : "=m" (clk)
76 : "a" (clk) : "cc");
77#endif /* __GNUC__ */
78
79 return *((unsigned long long *)&clk[1]); 69 return *((unsigned long long *)&clk[1]);
80} 70}
81 71
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index cbf0a8745bf4..d6b1ed0ec52b 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -265,6 +265,12 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
265 return uaccess.copy_from_user(n, from, to); 265 return uaccess.copy_from_user(n, from, to);
266} 266}
267 267
268extern void copy_from_user_overflow(void)
269#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
270__compiletime_warning("copy_from_user() buffer size is not provably correct")
271#endif
272;
273
268/** 274/**
269 * copy_from_user: - Copy a block of data from user space. 275 * copy_from_user: - Copy a block of data from user space.
270 * @to: Destination address, in kernel space. 276 * @to: Destination address, in kernel space.
@@ -284,7 +290,13 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
284static inline unsigned long __must_check 290static inline unsigned long __must_check
285copy_from_user(void *to, const void __user *from, unsigned long n) 291copy_from_user(void *to, const void __user *from, unsigned long n)
286{ 292{
293 unsigned int sz = __compiletime_object_size(to);
294
287 might_fault(); 295 might_fault();
296 if (unlikely(sz != -1 && sz < n)) {
297 copy_from_user_overflow();
298 return n;
299 }
288 if (access_ok(VERIFY_READ, from, n)) 300 if (access_ok(VERIFY_READ, from, n))
289 n = __copy_from_user(to, from, n); 301 n = __copy_from_user(to, from, n);
290 else 302 else
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 7bdd7c8ebc91..4a76d9480cce 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -7,7 +7,7 @@
7#define VDSO32_LBASE 0 7#define VDSO32_LBASE 0
8#define VDSO64_LBASE 0 8#define VDSO64_LBASE 0
9 9
10#define VDSO_VERSION_STRING LINUX_2.6.26 10#define VDSO_VERSION_STRING LINUX_2.6.29
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13 13